code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
from matplotlib import pyplot as plt
import seaborn as sns
import pandas as pd
from ..ml.linear_algebra import distmat
def scatter_2d(orig_df: pd.DataFrame, colx, coly, label_col,
xmin=None, xmax=None, ymin=None, ymax=None):
"""
Return scatter plot of 2 columns in a DataFrame, taking labels as colours.
"""
plt.scatter(orig_df[colx], orig_df[coly],
c=orig_df[label_col].values, cmap='viridis')
plt.colorbar()
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
plt.show()
def visualise_dist(fframe=None, metric='euclidean'):
"""
Plot a distance matrix from a DataFrame containing only feature columns.
The plot is a heatmap, and the distance metric is specified with `metric`
"""
plt.figure(figsize=(14, 10))
# ax = plt.gca()
sns.heatmap(distmat(fframe, metric=metric))
plt.show() | Seiji-Armstrong/seipy | seipy/plots_/base.py | Python | mit | 875 |
'''
Created on Jan 30, 2011
@author: snail
'''
import logging
import logging.handlers
import os
import sys
from os.path import join
from os import getcwd
from logging import DEBUG, INFO, WARNING, ERROR, CRITICAL
from pickle import dumps
LogPath = "Logs"
#ensure the logging path exists.
try:
from os import mkdir
mkdir(join(getcwd(), LogPath))
del mkdir
except:
pass
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except:
return sys.exc_info()[2].tb_frame.f_back
def CreateLogger(name, level=None):
l = logging.getLogger(name)
l.setLevel(DEBUG)
if level != None:
l.setLevel(level)
handler = logging.handlers.RotatingFileHandler(join(
LogPath, "%s.log" % name), maxBytes=10240, backupCount=10)
formatter = logging.Formatter("%(asctime)s|%(thread)d|%(levelno)s|%(module)s:%(funcName)s:%(lineno)d|%(message)s")
handler.setFormatter(formatter)
l.addHandler(handler)
return l
class LogFile:
def __init__(self, output, minLevel=WARNING):
self.minLevel = minLevel
self._log = CreateLogger(output)
self._log.findCaller = self.findCaller
def findCaller(self):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = currentframe()
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)"
i = 5
while hasattr(f, "f_code") and i > 0:
i = i - 1
co = f.f_code
rv = (co.co_filename, f.f_lineno, co.co_name)
f = f.f_back
return rv
def debug(self, *vals, **kws):
self.log(DEBUG, *vals, **kws)
def note(self, *vals, **kws):
self.log(INFO, *vals, **kws)
def info(self, *vals, **kws):
self.log(INFO, *vals, **kws)
def warning(self, *vals, **kws):
self.log(WARNING, *vals, **kws)
def error(self, *vals, **kws):
self.log(ERROR, *vals, **kws)
def critical(self, *vals, **kws):
self.log(CRITICAL, *vals, **kws)
def dict(self, d, *vals):
if d:
lines = map(lambda (x, y): str(x) + " => " + str(y), d.items())
else:
lines = ["None"]
lines+=vals
self.log(DEBUG, *lines)
def exception(self, *vals):
lines = list(vals)
import sys
import traceback
tb = sys.exc_info()
tbLines = (traceback.format_exception(*tb))
for l in tbLines:
lines += l[:-1].split("\n")
self.log(ERROR,*lines)
global ExceptionLog
ExceptionLog.log(ERROR,*lines)
def log(self, level, *vals, **kws):
self._log.log(level, "\t".join(map(str, vals)))
ExceptionLog = LogFile("Exceptions")
if __name__ == "__main__":
import threading
import time
import random
class Worker(threading.Thread):
log = None
def run(self):
for i in range(20):
time.sleep(random.random() * .1)
if self.log:
self.foo()
self.log.debug("Exception time!")
try:
self.bar()
except:
self.log.exception("Exception while doing math!")
def bar(self):
i = 1 / 0
def foo(self):
self.log.warning(i, "abc", "123")
logger = LogFile("test")
for i in range(20):
w = Worker()
w.log = logger
w.start()
logger.dict({"a":"a","foo":"bar",1:[1]})
| theepicsnail/SuperBot2 | Logging.py | Python | mit | 3,658 |
#!/usr/bin/env python
import sys, json
from confusionmatrix import ConfusionMatrix as CM
def main():
for line in sys.stdin:
cm = json.loads(line)
print CM(cm["TP"], cm["FP"], cm["FN"], cm["TN"])
if __name__ == '__main__':
main()
| yatmingyatming/LogisticRegressionSGDMapReduce | display_stats.py | Python | mit | 264 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-08-17 20:30
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app_pessoa', '0004_auto_20170817_1727'),
]
operations = [
migrations.AlterField(
model_name='pessoa',
name='profissao',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='app_pessoa.Profissao'),
),
]
| LEDS/X-data | Xdata/app_pessoa/migrations/0005_auto_20170817_1730.py | Python | mit | 559 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import re
import uuid
from . import utils
from .exceptions import (RevisionNotFound, RevisionAlreadyExists,
NoRevisionsFound, GoingTooFar, AlreadyHere)
def init(directory: str):
print('Creating directory', directory)
os.mkdir(directory)
print('Creating file migro.yaml')
with open('migro.yaml', 'w') as file:
file.write('database_url: driver://user:pass@localhost/dbname\n'
'script_location: %s\n' % directory)
print('Please edit migro.yaml before proceeding.')
def current(config: dict):
conn, curs = utils.connect(config['database_url'])
revisions = utils.parse_dir(config['script_location'])
if not revisions:
raise NoRevisionsFound
curr_rev_id = utils.get_curr_rev_id(conn)
curr_rev = next(
(x for x in revisions if x['revision'] == curr_rev_id), None)
if curr_rev is None:
raise RevisionNotFound(curr_rev_id)
print(curr_rev['revision'], ':', curr_rev['description'])
curs.close()
conn.close()
def revision(config: dict, message: str):
revisions = utils.parse_dir(config['script_location'])
latest_rev_id = revisions[-1]['revision'] if revisions else None
new_rev_id = str(uuid.uuid4())[-12:]
new_rev_filename = os.path.join(
config['script_location'], '%s_%s.yaml' % (
new_rev_id, re.sub('\W', '_', message).lower()
)
)
if os.path.isfile(new_rev_filename):
raise RevisionAlreadyExists
with open(new_rev_filename, 'w') as file:
file.write(
'description: %s\n\nrevision: %s\n'
'down_revision: %s\n\nupgrade:\n\ndowngrade:\n' % (
message, new_rev_id,
latest_rev_id if latest_rev_id is not None else 'null'))
print('Created revision at %s' % new_rev_filename)
def checkout(config: dict, arg: str):
conn, curs = utils.connect(config['database_url'])
revisions = utils.parse_dir(config['script_location'])
if not revisions:
raise NoRevisionsFound
curr_rev_id = utils.get_curr_rev_id(conn)
curr_rev_index = utils.get_index(revisions, curr_rev_id)
if curr_rev_index is None and curr_rev_id is None:
curr_rev_index = -1
elif curr_rev_index is None:
raise RevisionNotFound(curr_rev_id)
if arg == 'head':
next_rev_index = len(revisions) - 1
elif utils.isnum(arg):
next_rev_index = curr_rev_index + int(arg)
if next_rev_index > len(revisions) - 1 or next_rev_index < -1:
raise GoingTooFar
else:
next_rev_index = utils.get_index(revisions, arg)
if next_rev_index is None:
raise RevisionNotFound(arg)
if next_rev_index == curr_rev_index:
AlreadyHere()
if next_rev_index > curr_rev_index: # Upgrading
for rev_index in range(curr_rev_index + 1, next_rev_index + 1):
print('Upgrading to', revisions[rev_index]['revision'], ':',
revisions[rev_index]['description'])
curs.execute(revisions[rev_index]['upgrade'])
curs.execute(
"TRUNCATE TABLE migro_ver; INSERT INTO migro_ver VALUES (%s);",
(revisions[rev_index]['revision'],))
conn.commit()
else: # Downgrading
for rev_index in range(curr_rev_index, next_rev_index, -1):
print('Downgrading from', revisions[rev_index]['revision'], ':',
revisions[rev_index]['description'])
curs.execute(revisions[rev_index]['downgrade'])
curs.execute("TRUNCATE TABLE migro_ver;")
if rev_index > 0:
curs.execute("INSERT INTO migro_ver VALUES (%s);",
(revisions[rev_index - 1]['revision'],))
conn.commit()
curs.close()
conn.close()
def reapply(config: dict):
conn, curs = utils.connect(config['database_url'])
revisions = utils.parse_dir(config['script_location'])
if not revisions:
raise NoRevisionsFound
curr_rev_id = utils.get_curr_rev_id(conn)
curr_rev_index = utils.get_index(revisions, curr_rev_id)
if curr_rev_index is None:
raise RevisionNotFound(curr_rev_id)
print('Reapplying', revisions[curr_rev_index]['revision'], ':',
revisions[curr_rev_index]['description'])
curs.execute(revisions[curr_rev_index]['downgrade'])
curs.execute(revisions[curr_rev_index]['upgrade'])
conn.commit()
def show(config: dict):
conn, curs = utils.connect(config['database_url'])
revisions = utils.parse_dir(config['script_location'])
if not revisions:
raise NoRevisionsFound
curr_rev_id = utils.get_curr_rev_id(conn)
for rev in revisions:
print('[%s]' % ('x' if curr_rev_id == rev['revision'] else ' '),
rev['revision'], ':', rev['description'])
| ALFminecraft/migro | migro/main.py | Python | mit | 4,923 |
from django import forms
from django.contrib.auth.forms import AuthenticationForm
from django.utils.translation import ugettext_lazy as _
class EmailAuthenticationForm(AuthenticationForm):
"""Email authentication Form
increase the size of the username field to fit long emails"""
# TODO: consider to change this to an email only field
username = forms.CharField(label=_("Username"),
widget=forms.TextInput(attrs={'class': 'text'}))
password = forms.CharField(label=_("Password"),
widget=forms.PasswordInput(
attrs={'class': 'text'}))
remember_me = forms.BooleanField(label='Keep me logged in',
required=False)
| theteam/django-theteamcommon | src/theteamcommon/forms.py | Python | mit | 770 |
from discrete import *
f = factorial(100)
s = str(f)
print sum([int(s[i]) for i in range(len(s))])
| jreese/euler | python/problem20.py | Python | mit | 101 |
import cPickle as pickle
import numpy as np
import os
import sys
sys.path.append('../')
import gp
from uitools import UITools
class Manager(object):
def __init__(self, output_dir):
'''
'''
self._data_path = '/home/d/dojo_xp/data/'
self._output_path = os.path.join(self._data_path, 'ui_out', output_dir)
self._merge_errors = None
self._corrections = []
self._correction_times = []
self._correction_vis = []
self._mode = 'GP'
def start( self, mode, cnn_path='../nets/IPMLB_FULL.p', verbose=True ):
'''
'''
self._cnn = UITools.load_cnn(cnn_path)
self._mode = mode
if self._mode == 'GP*':
# now we use the matlab engine
print 'Using Active Label Suggestion'
import matlab.engine
eng = matlab.engine.start_matlab()
self._merge_errors = self.load_merge_errors()
self._bigM = self.load_split_errors()
# let's generate our active label features and store a lsit
elif self._mode == 'GP':
if verbose:
print 'Using GP proper'
self._merge_errors = self.load_merge_errors()
self._bigM = self.load_split_errors()
elif self._mode == 'FP':
print 'Using FP'
self._merge_errors = []
self._bigM = self.load_split_errors(filename='bigM_fp.p')
elif self._mode == 'TEST':
print 'Test mode using FP'
self._merge_errors = []
self._bigM = self.load_split_errors(filename='bigM_fp_test.p')
else:
print 'WRONG MODE, should be GP, GP* or FP'
sys.exit(2)
# load data
if self._mode == 'TEST':
print 'We are using dummy data for testing'
input_image, input_prob, input_gold, input_rhoana, dojo_bbox = gp.Legacy.read_dojo_test_data()
self._mode = 'FP'
else:
input_image, input_prob, input_gold, input_rhoana, dojo_bbox = gp.Legacy.read_dojo_data()
self._input_image = input_image
self._input_prob = input_prob
self._input_gold = input_gold
self._input_rhoana = input_rhoana
self._dojo_bbox = dojo_bbox
if verbose:
print 'VI at start:', UITools.VI(self._input_gold, self._input_rhoana)[1]
print 'aRE at start:', UITools.adaptedRandError(self._input_rhoana, self._input_gold)[1]
def gen_active_label_features(self):
'''
'''
active_labels_file = os.path.join(self._data_path, 'split_active_labels.p')
if os.path.exists(active_labels_file):
with open(active_labels_file, 'rb') as f:
feature_vector = pickle.load(f)
print 'Feature vector loaded from pickle.'
else:
print 'Calculating active labels...'
# we work on a copy of bigM, let's call it bigD like daddy
bigD = self._bigM.copy()
import theano
import theano.tensor as T
from lasagne.layers import get_output
# go from highest prob to lowest in our bigD
prediction = np.inf
while prediction > -1:
z, labels, prediction = UITools.find_next_split_error(bigD)
# create patch
l,n = labels
patches = []
patches_l, patches_n = Patch.grab(image, prob, segmentation, l, n, sample_rate=10, oversampling=False)
patches += patches_l
patches += patches_n
grouped_patches = Patch.group(patches)
# let CNN without softmax analyze the patch to create features
x = X_test[100].reshape(1,4,75,75)
layer = net.layers_['hidden5']
xs = T.tensor4('xs').astype(theano.config.floatX)
get_activity = theano.function([xs], get_output(layer, xs))
activity = get_activity(x)
# create feature vector
# store feature vector to pickle
with open(active_labels_file, 'wb') as f:
pickle.dump(feature_vector, f)
print 'Feature vector stored.'
# Now, we need to represent the distance between each of these items using the graph Laplacian matrix 'LGReg'.
# We're going to build this now using a MATLAB function - 'BuildLGRegularizer.m'
# First, we need to set two parameters, as this is an approximation of the true graph laplacian to allow us to
# use this on very large datasets
manifoldDim = 17;
kNNSize = 20;
# Second, we set the regularization strength of this graph Laplacian
lambdaRP = 0.005;
# Next, we call the function
#LGReg = eng.BuildLGRegularizer( x, manifoldDim, kNNSize, nargout=1 );
# ...but, two problems:
# 1) We need to transform our numpy array x into something MATLAB can handle
xM = matlab.double( size=[nItems, nFeatures] )
for j in range(0, nFeatures-1):
for i in range(0, nItems-1):
xM[i][j] = x[i][j];
# 2) LGReg is a 'sparse' matrix type, and python doesn't support that.
# Let's leave the output variable in the MATLAB workspace, and until we need to use it.
eng.workspace['xM'] = xM;
# We also need to pass our function variables
eng.workspace['nItems'] = nItems;
eng.workspace['nFeatures'] = nFeatures;
eng.workspace['lambdaRP'] = lambdaRP;
eng.workspace['manifoldDim'] = manifoldDim;
eng.workspace['kNNSize'] = kNNSize;
# OK, now let's call our function
eng.eval( "LGReg = BuildLGRegularizer( xM, manifoldDim, kNNSize )", nargout=0 )
def load_merge_errors(self, filename='merges_new_cnn.p'):
'''
'''
with open(os.path.join(self._data_path, filename), 'rb') as f:
merge_errors = pickle.load(f)
return sorted(merge_errors, key=lambda x: x[3], reverse=False)
def load_split_errors(self, filename='bigM_new_cnn.p'):
'''
'''
with open(os.path.join(self._data_path, filename), 'rb') as f:
bigM = pickle.load(f)
return bigM
def get_next_merge_error(self):
'''
'''
if len(self._merge_errors) == 0:
return None
return self._merge_errors[0]
def get_next_split_error(self):
'''
'''
if self._mode == 'GP' or self._mode == 'FP':
z, labels, prediction = UITools.find_next_split_error(self._bigM)
elif self._mode == 'GP*':
#
# here, let's check for the next active label suggestion
# but only if we already corrected twice
#
pass
self._split_error = (z, labels, prediction)
return self._split_error
def get_merge_error_image(self, merge_error, number):
border = merge_error[3][number][1]
z = merge_error[0]
label = merge_error[1]
prob = merge_error[2]
input_image = self._input_image
input_prob = self._input_prob
input_rhoana = self._input_rhoana
a,b,c,d,e,f,g,h,i,j,k = gp.Legacy.get_merge_error_image(input_image[z], input_rhoana[z], label, border, returnbb=True)
border_before = b
labels_before = h
border_after = c
labels_after = i
slice_overview = g
cropped_slice_overview = j
bbox = k
return border_before, border_after, labels_before, labels_after, slice_overview, cropped_slice_overview, bbox
def get_split_error_image(self, split_error, number=1):
z = split_error[0]
labels = split_error[1]
input_image = self._input_image
input_prob = self._input_prob
input_rhoana = self._input_rhoana
a,b,c,d,e,f,g,h = gp.Legacy.get_split_error_image(input_image[z], input_rhoana[z], labels, returnbb=True)
labels_before = b
borders_before = c
borders_after = d
labels_after = e
slice_overview = f
cropped_slice_overview = g
bbox = h
return borders_before, borders_after, labels_before, labels_after, slice_overview, cropped_slice_overview, bbox
def correct_merge(self, clicked_correction, do_oracle=False, do_GT=False):
input_image = self._input_image
input_prob = self._input_prob
input_rhoana = self._input_rhoana
#
#
#
oracle_choice = ''
delta_vi = -1
if do_oracle:
# lets check what the oracle would do
merge_error = self._merge_errors[0]
number = 0
border = merge_error[3][number][1]
z = merge_error[0]
label = merge_error[1]
a,b,c,d,e,f,g,h,i,j = gp.Legacy.get_merge_error_image(input_image[z], input_rhoana[z], label, border)
oracle_rhoana = f
# check VI delta
old_vi = gp.Util.vi(self._input_gold[z], self._input_rhoana[z])
new_vi = gp.Util.vi(self._input_gold[z], oracle_rhoana)
delta_vi = old_vi - new_vi
if delta_vi > 0:
oracle_choice = '1'
else:
oracle_choice = 'current'
if not clicked_correction == 'current':
clicked_correction = int(clicked_correction)-1
#
# correct the merge
#
merge_error = self._merge_errors[0]
number = clicked_correction
border = merge_error[3][number][1]
z = merge_error[0]
label = merge_error[1]
a,b,c,d,e,f,g,h,i,j = gp.Legacy.get_merge_error_image(input_image[z], input_rhoana[z], label, border)
new_rhoana = f
self._input_rhoana[z] = new_rhoana
vi = UITools.VI(self._input_gold, input_rhoana)
#print 'New global VI', vi[0]
self._correction_vis.append(vi[2])
#
# and remove the original label from our bigM matrix
#
self._bigM[z][label,:] = -3
self._bigM[z][:,label] = -3
# now add the two new labels
label1 = new_rhoana.max()
label2 = new_rhoana.max()-1
new_m = np.zeros((self._bigM[z].shape[0]+2, self._bigM[z].shape[1]+2), dtype=self._bigM[z].dtype)
new_m[:,:] = -1
new_m[0:-2,0:-2] = self._bigM[z]
#print 'adding', label1, 'to', z
new_m = gp.Legacy.add_new_label_to_M(self._cnn, new_m, input_image[z], input_prob[z], new_rhoana, label1)
new_m = gp.Legacy.add_new_label_to_M(self._cnn, new_m, input_image[z], input_prob[z], new_rhoana, label2)
# re-propapage new_m to bigM
self._bigM[z] = new_m
# remove merge error
del self._merge_errors[0]
mode = 'merge'
if len(self._merge_errors) == 0:
mode = 'split'
return mode, oracle_choice, delta_vi
def correct_split(self, clicked_correction, do_oracle=False):
input_image = self._input_image
input_prob = self._input_prob
input_rhoana = self._input_rhoana
split_error = self._split_error
z = split_error[0]
labels = split_error[1]
m = self._bigM[z]
#
#
#
oracle_choice = ''
delta_vi = -1
if do_oracle:
oracle_m, oracle_rhoana = UITools.correct_split(self._cnn, m, self._mode, input_image[z], input_prob[z], input_rhoana[z], labels[0], labels[1], oversampling=False)
# check VI delta
old_vi = gp.Util.vi(self._input_gold[z], self._input_rhoana[z])
new_vi = gp.Util.vi(self._input_gold[z], oracle_rhoana)
delta_vi = old_vi - new_vi
if delta_vi > 0:
oracle_choice = '1'
else:
oracle_choice = 'current'
if clicked_correction == 'current':
# we skip this split
# print 'FP or current'
new_m = UITools.skip_split(m, labels[0], labels[1])
self._bigM[z] = new_m
else:
# we correct this split
# print 'fixing slice',z,'labels', labels
# vi = gp.Util.vi(self._input_gold[z], self._input_rhoana[z])
# print 'bef vi', vi
new_m, new_rhoana = UITools.correct_split(self._cnn, m, self._mode, input_image[z], input_prob[z], input_rhoana[z], labels[0], labels[1], oversampling=False)
self._bigM[z] = new_m
self._input_rhoana[z] = new_rhoana
# vi = gp.Util.vi(self._input_gold[z], self._input_rhoana[z])
# print 'New VI', vi[0]
vi = UITools.VI(self._input_gold, self._input_rhoana)
#print 'New global VI', vi[0]
self._correction_vis.append(vi[2])
# self.finish()
return 'split', oracle_choice, delta_vi
def store(self):
vi = UITools.VI(self._input_gold, self._input_rhoana)
print 'New VI', vi[1]
are = UITools.adaptedRandError(self._input_rhoana, self._input_gold)
print 'New aRE', are[1]
if not os.path.exists(self._output_path):
os.makedirs(self._output_path)
# store our changed rhoana
with open(os.path.join(self._output_path, 'ui_results.p'), 'wb') as f:
pickle.dump(self._input_rhoana, f)
# store the times
with open(os.path.join(self._output_path, 'times.p'), 'wb') as f:
pickle.dump(self._correction_times, f)
# store the corrections
with open(os.path.join(self._output_path, 'corrections.p'), 'wb') as f:
pickle.dump(self._corrections, f)
with open(os.path.join(self._output_path, 'correction_vis.p'), 'wb') as f:
pickle.dump(self._correction_vis, f)
print 'All stored.'
| VCG/gp | ui/manager.py | Python | mit | 12,894 |
# Rushy Panchal
"""
See https://bittrex.com/Home/Api
"""
import urllib
import time
import requests
import hmac
import hashlib
BUY_ORDERBOOK = 'buy'
SELL_ORDERBOOK = 'sell'
BOTH_ORDERBOOK = 'both'
PUBLIC_SET = ['getmarkets', 'getcurrencies', 'getticker', 'getmarketsummaries', 'getorderbook',
'getmarkethistory']
MARKET_SET = ['getopenorders', 'cancel', 'sellmarket', 'selllimit', 'buymarket', 'buylimit']
ACCOUNT_SET = ['getbalances', 'getbalance', 'getdepositaddress', 'withdraw', 'getorder', 'getorderhistory', 'getwithdrawalhistory', 'getdeposithistory']
class Bittrex(object):
"""
Used for requesting Bittrex with API key and API secret
"""
def __init__(self, api_key, api_secret):
self.api_key = str(api_key) if api_key is not None else ''
self.api_secret = str(api_secret) if api_secret is not None else ''
self.public_set = set(PUBLIC_SET)
self.market_set = set(MARKET_SET)
self.account_set = set(ACCOUNT_SET)
def api_query(self, method, options=None):
"""
Queries Bittrex with given method and options
:param method: Query method for getting info
:type method: str
:param options: Extra options for query
:type options: dict
:return: JSON response from Bittrex
:rtype : dict
"""
if not options:
options = {}
nonce = str(int(time.time() * 1000))
base_url = 'https://bittrex.com/api/v1.1/%s/'
request_url = ''
if method in self.public_set:
request_url = (base_url % 'public') + method + '?'
elif method in self.market_set:
request_url = (base_url % 'market') + method + '?apikey=' + self.api_key + "&nonce=" + nonce + '&'
elif method in self.account_set:
request_url = (base_url % 'account') + method + '?apikey=' + self.api_key + "&nonce=" + nonce + '&'
request_url += urllib.urlencode(options)
signature = hmac.new(self.api_secret, request_url, hashlib.sha512).hexdigest()
headers = {"apisign": signature}
ret = requests.get(request_url, headers=headers)
return ret.json()
def get_markets(self):
"""
Used to get the open and available trading markets
at Bittrex along with other meta data.
:return: Available market info in JSON
:rtype : dict
"""
return self.api_query('getmarkets')
def get_currencies(self):
"""
Used to get all supported currencies at Bittrex
along with other meta data.
:return: Supported currencies info in JSON
:rtype : dict
"""
return self.api_query('getcurrencies')
def get_ticker(self, market):
"""
Used to get the current tick values for a market.
:param market: String literal for the market (ex: BTC-LTC)
:type market: str
:return: Current values for given market in JSON
:rtype : dict
"""
return self.api_query('getticker', {'market': market})
def get_market_summaries(self):
"""
Used to get the last 24 hour summary of all active exchanges
:return: Summaries of active exchanges in JSON
:rtype : dict
"""
return self.api_query('getmarketsummaries')
def get_orderbook(self, market, depth_type, depth=20):
"""
Used to get retrieve the orderbook for a given market
:param market: String literal for the market (ex: BTC-LTC)
:type market: str
:param depth_type: buy, sell or both to identify the type of orderbook to return.
Use constants BUY_ORDERBOOK, SELL_ORDERBOOK, BOTH_ORDERBOOK
:type depth_type: str
:param depth: how deep of an order book to retrieve. Max is 100, default is 20
:type depth: int
:return: Orderbook of market in JSON
:rtype : dict
"""
return self.api_query('getorderbook', {'market': market, 'type': depth_type, 'depth': depth})
def get_market_history(self, market, count):
"""
Used to retrieve the latest trades that have occured for a
specific market.
/market/getmarkethistory
:param market: String literal for the market (ex: BTC-LTC)
:type market: str
:param count: Number between 1-100 for the number of entries to return (default = 20)
:type count: int
:return: Market history in JSON
:rtype : dict
"""
return self.api_query('getmarkethistory', {'market': market, 'count': count})
def buy_market(self, market, quantity, rate):
"""
Used to place a buy order in a specific market. Use buymarket to
place market orders. Make sure you have the proper permissions
set on your API keys for this call to work
/market/buymarket
:param market: String literal for the market (ex: BTC-LTC)
:type market: str
:param quantity: The amount to purchase
:type quantity: float
:param rate: The rate at which to place the order.
This is not needed for market orders
:type rate: float
:return:
:rtype : dict
"""
return self.api_query('buymarket', {'market': market, 'quantity': quantity, 'rate': rate})
def buy_limit(self, market, quantity, rate):
"""
Used to place a buy order in a specific market. Use buylimit to place
limit orders Make sure you have the proper permissions set on your
API keys for this call to work
/market/buylimit
:param market: String literal for the market (ex: BTC-LTC)
:type market: str
:param quantity: The amount to purchase
:type quantity: float
:param rate: The rate at which to place the order.
This is not needed for market orders
:type rate: float
:return:
:rtype : dict
"""
return self.api_query('buylimit', {'market': market, 'quantity': quantity, 'rate': rate})
def sell_market(self, market, quantity, rate):
"""
Used to place a sell order in a specific market. Use sellmarket to place
market orders. Make sure you have the proper permissions set on your
API keys for this call to work
/market/sellmarket
:param market: String literal for the market (ex: BTC-LTC)
:type market: str
:param quantity: The amount to purchase
:type quantity: float
:param rate: The rate at which to place the order.
This is not needed for market orders
:type rate: float
:return:
:rtype : dict
"""
return self.api_query('sellmarket', {'market': market, 'quantity': quantity, 'rate': rate})
def sell_limit(self, market, quantity, rate):
"""
Used to place a sell order in a specific market. Use selllimit to place
limit orders Make sure you have the proper permissions set on your
API keys for this call to work
/market/selllimit
:param market: String literal for the market (ex: BTC-LTC)
:type market: str
:param quantity: The amount to purchase
:type quantity: float
:param rate: The rate at which to place the order.
This is not needed for market orders
:type rate: float
:return:
:rtype : dict
"""
return self.api_query('selllimit', {'market': market, 'quantity': quantity, 'rate': rate})
def cancel(self, uuid):
"""
Used to cancel a buy or sell order
/market/cancel
:param uuid: uuid of buy or sell order
:type uuid: str
:return:
:rtype : dict
"""
return self.api_query('cancel', {'uuid': uuid})
def get_open_orders(self, market):
"""
Get all orders that you currently have opened. A specific market can be requested
/market/getopenorders
:param market: String literal for the market (ie. BTC-LTC)
:type market: str
:return: Open orders info in JSON
:rtype : dict
"""
return self.api_query('getopenorders', {'market': market})
def get_balances(self):
"""
Used to retrieve all balances from your account
/account/getbalances
:return: Balances info in JSON
:rtype : dict
"""
return self.api_query('getbalances', {})
def get_balance(self, currency):
"""
Used to retrieve the balance from your account for a specific currency
/account/getbalance
:param currency: String literal for the currency (ex: LTC)
:type currency: str
:return: Balance info in JSON
:rtype : dict
"""
return self.api_query('getbalance', {'currency': currency})
def get_deposit_address(self, currency):
"""
Used to generate or retrieve an address for a specific currency
/account/getdepositaddress
:param currency: String literal for the currency (ie. BTC)
:type currency: str
:return: Address info in JSON
:rtype : dict
"""
return self.api_query('getdepositaddress', {'currency': currency})
def withdraw(self, currency, quantity, address):
"""
Used to withdraw funds from your account
/account/withdraw
:param currency: String literal for the currency (ie. BTC)
:type currency: str
:param quantity: The quantity of coins to withdraw
:type quantity: float
:param address: The address where to send the funds.
:type address: str
:return:
:rtype : dict
"""
return self.api_query('withdraw', {'currency': currency, 'quantity': quantity, 'address': address})
def get_order(self, uuid):
"""
Used to get an order from your account
/account/getorder
:param uuid: The order UUID to look for
:type uuid: str
:return:
:rtype : dict
"""
return self.api_query('getorder', {'uuid': uuid})
def get_order_history(self, market = ""):
"""
Used to retrieve your order history
/account/getorderhistory
:param market: Bittrex market identifier (i.e BTC-DOGE)
:type market: str
:return:
:rtype : dict
"""
return self.api_query('getorderhistory', {"market": market})
def get_withdrawal_history(self, currency = ""):
"""
Used to retrieve your withdrawal history
/account/getwithdrawalhistory
:param currency: String literal for the currency (ie. BTC) (defaults to all)
:type currency: str
:return:
:rtype : dict
"""
return self.api_query('getwithdrawalhistory', {"currency": currency})
def get_deposit_history(self, currency = ""):
"""
Used to retrieve your deposit history
/account/getdeposithistory
:param currency: String literal for the currency (ie. BTC) (defaults to all)
:type currency: str
:return:
:rtype : dict
"""
return self.api_query('getdeposithistory', {"currency": currency})
| panchr/python-bittrex | bittrex/bittrex.py | Python | mit | 9,819 |
import hexchat
import re
__module_name__ = 'BanSearch'
__module_author__ = 'TingPing'
__module_version__ = '2'
__module_description__ = 'Search for bans/quiets matching a user'
banhook = 0
quiethook = 0
endbanhook = 0
endquiethook = 0
banlist = []
quietlist = []
regexescapes = {'[':r'\[', ']':r'\]', '.':r'\.'}
ircreplace = {'{':'[', '}':']', '|':'\\'} # IRC nick substitutions
wildcards = {'?':r'.', '*': r'.*'} # translate wildcards to regex
def print_result(mask, matchlist, _type):
if matchlist:
print('\00318{}\017 had \00320{}\017 {} matches:'.format(mask, len(matchlist), _type))
for match in matchlist:
print('\t\t\t{}'.format(match))
else:
print('No {} matches for \00318{}\017 were found.'.format(_type, mask))
def match_mask(mask, searchmask):
if searchmask is None:
searchmask = ''
# A mask of $a:* can match a user with no account
if searchmask == '' and mask != '*':
return False
# A mask of $a will not match a user with no account
elif mask == '' and searchmask != '':
return True
# These have to be replaced in a very specific order
for match, repl in ircreplace.items():
mask = mask.replace(match, repl)
searchmask = searchmask.replace(match, repl)
for match, repl in regexescapes.items():
mask = mask.replace(match, repl)
for match, repl in wildcards.items():
mask = mask.replace(match, repl)
if '$' in mask and mask[0] != '$': # $#channel is used to forward users, ignore it
mask = mask.split('$')[0]
return bool(re.match(mask, searchmask, re.IGNORECASE))
def match_extban(mask, host, account, realname, usermask):
try:
extban, banmask = mask.split(':')
except ValueError:
extban = mask
banmask = ''
if '~' in extban:
invert = True
else:
invert = False
# Extbans from http://freenode.net/using_the_network.shtml
if ':' in usermask: # Searching for extban
userextban, usermask = usermask.split(':')
if extban == userextban:
ret = match_mask(banmask, usermask)
else:
return False
elif 'a' in extban:
ret = match_mask (banmask, account)
elif 'r' in extban:
ret = match_mask (banmask, realname)
elif 'x' in extban:
ret = match_mask (banmask, '{}#{}'.format(host, realname))
else:
return False
if invert:
return not ret
else:
return ret
def get_user_info(nick):
invalid_chars = ['*', '?', '$', '@', '!']
if any(char in nick for char in invalid_chars):
return (None, None, None) # It's a mask not a nick.
for user in hexchat.get_list('users'):
if user.nick == nick:
host = user.nick + '!' + user.host
account = user.account
realname = user.realname
return (host, account, realname)
return (nick + '!*@*', None, None)
def search_list(list, usermask):
matchlist = []
host, account, realname = get_user_info (usermask)
for mask in list:
# If extban we require userinfo or we are searching for extban
if mask[0] == '$' and (host or usermask[0] == '$'):
if match_extban (mask, host, account, realname, usermask):
matchlist.append(mask)
elif mask[0] != '$':
if host: # Was given a user
if match_mask (mask, host):
matchlist.append(mask)
else: # Was given a mask or no userinfo found
if match_mask (mask, usermask):
matchlist.append(mask)
return matchlist
def banlist_cb(word, word_eol, userdata):
global banlist
banlist.append(word[4])
return hexchat.EAT_HEXCHAT
def endbanlist_cb(word, word_eol, usermask):
global banhook
global endbanhook
global banlist
matchlist = []
hexchat.unhook(banhook)
banhook = 0
hexchat.unhook(endbanhook)
endbanhook = 0
if banlist:
matchlist = search_list(banlist, usermask)
banlist = []
print_result (usermask, matchlist, 'Ban')
return hexchat.EAT_HEXCHAT
def quietlist_cb(word, word_eol, userdata):
global quietlist
quietlist.append(word[5])
return hexchat.EAT_HEXCHAT
def endquietlist_cb(word, word_eol, usermask):
global quiethook
global endquiethook
global quietlist
matchlist = []
hexchat.unhook(quiethook)
quiethook = 0
hexchat.unhook(endquiethook)
endquiethook = 0
if quietlist:
matchlist = search_list(quietlist, usermask)
quietlist = []
print_result (usermask, matchlist, 'Quiet')
return hexchat.EAT_HEXCHAT
def search_cb(word, word_eol, userdata):
global banhook
global quiethook
global endbanhook
global endquiethook
if len(word) == 2:
hooks = (quiethook, banhook, endquiethook, endbanhook)
if not any(hooks):
banhook = hexchat.hook_server ('367', banlist_cb)
quiethook = hexchat.hook_server ('728', quietlist_cb)
endbanhook = hexchat.hook_server ('368', endbanlist_cb, word[1])
endquiethook = hexchat.hook_server ('729', endquietlist_cb, word[1])
hexchat.command('ban')
hexchat.command('quiet')
else:
print('A ban search is already in progress.')
else:
hexchat.command('help bansearch')
return hexchat.EAT_ALL
def unload_cb(userdata):
print(__module_name__ + ' version ' + __module_version__ + ' unloaded.')
hexchat.hook_unload(unload_cb)
hexchat.hook_command('bansearch', search_cb, help='BANSEARCH <mask|nick>')
hexchat.prnt(__module_name__ + ' version ' + __module_version__ + ' loaded.')
| TingPing/plugins | HexChat/bansearch.py | Python | mit | 5,101 |
import signal
import subprocess
import sys
import time
import numba
import numpy as np
import SharedArray as sa
sys.path.append('./pymunk')
import pymunk as pm
max_creatures = 50
@numba.jit
def _find_first(vec, item):
for i in range(len(vec)):
if vec[i] == item:
return i
return -1
@numba.jit(nopython=True)
def random_circle_point():
theta = np.random.rand()*2*np.pi
x,y = 5*np.cos(theta), 5*np.sin(theta)
return x,y
class Culture(object):
def __init__(self):
try:
self.creature_parts = sa.create('creature_parts', (max_creatures*3, 12), dtype=np.float32)
except FileExistsError:
sa.delete('creature_parts')
self.creature_parts = sa.create('creature_parts', (max_creatures*3, 12), dtype=np.float32)
# X POSITION, Y POSITION
self.creature_parts[:, :2] = (30.0, 30.0)#np.random.random((max_creatures, 2)).astype(np.float32)*20.0 - 10.0
# ROTATION
self.creature_parts[:, 2] = np.random.random(max_creatures*3)*2*np.pi - np.pi
# SCALE
self.creature_parts[:, 3] = 0.5
# TEXTURE INDEX
self.creature_parts[:, 4] = np.random.randint(0, 10, max_creatures*3)
# COLOR ROTATION
self.creature_parts[:, 5] = np.random.randint(0, 4, max_creatures*3)/4.0
# SATURATION
self.creature_parts[:, 6] = 1.0
# ALPHA
self.creature_parts[:, 7] = 1.0
# TIME OFFSET (FOR ANIMATION
self.creature_parts[:, 8] = np.random.random(max_creatures).repeat(3)*2*np.pi
self.creature_parts[1::3, 8] += 0.4
self.creature_parts[2::3, 8] += 0.8
# BEAT ANIMATION FREQUENCY
self.creature_parts[:, 9] = 2.0
# SWIRL ANIMATON RADIUS
self.creature_parts[:, 10] = 2.3
# SWIRL ANIMATION FREQUENCY
self.creature_parts[:, 11] = 1.0
self.creature_data = np.zeros((max_creatures, 4))
self.creature_data[:, 1] = 1.0 # max_age
self.creature_data[:, 3] = 0.5 # creature size
self.pm_space = pm.Space()
self.pm_space.damping = 0.4
# self.pm_space.gravity = 0.0, -1.0
self.pm_body = []
self.pm_body_joint = []
self.pm_target = []
self.pm_target_spring = []
for i in range(max_creatures):
head = pm.Body(10.0, 5.0)
head.position = tuple(self.creature_parts[i, :2])
mid = pm.Body(1.0, 1.0)
mid.position = head.position + (0.0, -1.0)
tail = pm.Body(1.0, 1.0)
tail.position = head.position + (0.0, -2.0)
self.pm_body.append([head, mid, tail])
head_mid_joint1 = pm.constraint.SlideJoint(head, mid, (0.4, -0.3), (0.4, 0.3), 0.1, 0.2)
head_mid_joint2 = pm.constraint.SlideJoint(head, mid, (-0.4, -0.3), (-0.4, 0.3), 0.1, 0.2)
mid_tail_joint = pm.constraint.SlideJoint(mid, tail, (0.0, -0.1), (0.0, 0.1), 0.1, 0.5)
self.pm_body_joint.append([head_mid_joint1, head_mid_joint2, mid_tail_joint])
target = pm.Body(10.0, 10.0)
target.position = tuple(self.creature_parts[i, :2] + (0.0, 5.0))
self.pm_target.append(target)
head_offset = pm.vec2d.Vec2d((0.0, 0.8)) * float(0.5)
target_spring = pm.constraint.DampedSpring(head, target, head_offset, (0.0, 0.0), 0.0, 10.0, 15.0)
self.pm_target_spring.append(target_spring)
self.pm_space.add([head, mid, tail])
self.pm_space.add([head_mid_joint1, head_mid_joint2, mid_tail_joint])
self.pm_space.add([target_spring])
self.prev_update = time.perf_counter()
self.ct = time.perf_counter()
#self.dt = p0.0
def add_creature(self, type=None):
if type is None:
type = 0#np.random.randint(2)
print('adding creature {}'.format(type))
ind = _find_first(self.creature_data[:, 0], 0.0)
if ind != -1:
if type == 0: # Meduusa
new_pos = pm.vec2d.Vec2d(tuple(np.random.random(2)*20.0 - 10.0))
print('at position: ', new_pos)
head_offset = pm.vec2d.Vec2d((0.0, 0.8)) * 0.5
self.pm_target[ind].position = new_pos + head_offset
self.pm_body[ind][0].position = new_pos #creature_data[ind, :2] = new_pos
self.pm_body[ind][1].position = new_pos + (0.0, -0.5)
self.pm_body[ind][2].position = new_pos + (0.0, -1.0)
for i in range(3):
self.pm_body[ind][i].reset_forces()
self.pm_body[ind][i].velocity = 0.0, 0.0
self.creature_parts[ind*3+i, 3] = 0.5 # size/scale
self.creature_parts[ind*3+i, 6] = 1.0
self.creature_parts[ind*3+i, 4] = 2+i
self.creature_data[ind, :] = [1.0, np.random.random(1)*10+10, 0.0, 0.5] # Alive, max_age, age, size
if type == 1: # Ötö
pass
def update(self, dt):
self.ct = time.perf_counter()
if self.ct - self.prev_update > 5.0:
self.add_creature()
#i = np.random.randint(0, max_creatures)
#self.pm_target[i].position = tuple(np.random.random(2)*20.0 - 10.0)
self.prev_update = self.ct
alive = self.creature_data[:, 0]
max_age = self.creature_data[:, 1]
cur_age = self.creature_data[:, 2]
cur_age[:] += dt
self.creature_parts[:, 6] = np.clip(1.0 - (cur_age / max_age), 0.0, 1.0).repeat(3)
# dying_creatures = (alive == 1.0) & (cur_age > max_age)
self.creature_parts[:, 7] = np.clip(1.0 - (cur_age - max_age)/5.0, 0.0, 1.0).repeat(3)
dead_creatures = (alive == 1.0) & (cur_age > max_age + 5.0)
self.creature_data[dead_creatures, 0] = 0.0
self.pm_space.step(dt)
for i in range(max_creatures):
head_offset = pm.vec2d.Vec2d((0.0, 0.8)) * 0.5
if alive[i] == 1.0 and \
(self.pm_body[i][0].position - (self.pm_target[i].position - head_offset)).get_length() < 2.0:
self.pm_target[i].position += random_circle_point()
for j in range(3):
self.creature_parts[3*i+j, :2] = tuple(self.pm_body[i][j].position)
self.creature_parts[3*i+j, 2] = self.pm_body[i][j].angle
#self.creature_data[:, 2] += dt
@staticmethod
def cleanup():
print('Cleaning up')
sa.delete('creature_parts')
def main():
culture = Culture()
gfx_p = subprocess.Popen(['python', 'main.py'])
running = True
def signal_handler(signal_number, frame):
print('Received signal {} in frame {}'.format(signal_number, frame))
nonlocal running
running = False
signal.signal(signal.SIGINT, signal_handler)
print('Press Ctrl+C to quit')
while running:
culture.update(0.01)
time.sleep(0.01)
if gfx_p.poll() == 0:
break
culture.cleanup()
if __name__ == "__main__":
main()
| brains-on-art/culture | culture_logic.py | Python | mit | 7,076 |
import logging
from copy import copy
from six import iteritems, reraise
import sys
from .base import Singleton
from .utils import APP_DIR, rel, merge
class BaseConfigMixin(dict):
def __setitem__(self, key, value):
self.__dict__[key.lower()] = value
def __getitem__(self, key):
return self.__dict__[key.lower()]
def __delitem__(self, key):
del self.__dict__[key]
def __contains__(self, key):
return key in self.__dict__
def __len__(self):
return len(self.__dict__)
def get(self, key, default=None):
return self.__dict__.get(key.lower(), default)
def init_config(self):
def is_builtin(k, v):
return k.startswith('__') or k.endswith('__')
def is_callable(k, v):
return callable(v)
for k, v in iteritems(Config.__dict__):
if is_builtin(k, v) or is_callable(k, v):
continue
self[k] = v
def override_config(self, **kwargs):
for k, v in iteritems(kwargs):
orig_v = getattr(self, k, getattr(Config, k))
if orig_v is not None:
if isinstance(orig_v, dict):
v = merge(copy(orig_v), v)
self[k] = v
class Config(BaseConfigMixin, Singleton):
def __init__(self, *a, **kw):
Singleton.__init__(self, *a, **kw)
def init(self, **kwargs):
self.init_config()
self.override_config(**kwargs)
app_dir = APP_DIR
environment = 'dev'
verbose = False
application = {
#'key':
#'token'
#"path": 'https://staging-api.translationexchange.com'
#"cdn_path": "http://trex-snapshots.s3-us-west-1.amazonaws.com"
"path": "https://api.translationexchange.com",
"cdn_path": "http://cdn.translationexchange.com"
}
logger = {
'enabled': True,
'path': rel(APP_DIR, 'tml.log'),
'level': logging.DEBUG
}
api_client = 'tml.api.client.Client'
locale = {
'default': 'en',
'method': 'current_locale',
'subdomain': False,
'extension': False,
'query_param': 'locale'
}
locale_mapping = {
'pt-br': 'pt-BR',
'zh-hans-cn': 'zh-Hans-CN'
}
agent = {
'enabled': True,
'type': 'agent',
'cache': 86400, # timeout every 24 hours
'host': "https://tools.translationexchange.com/agent/stable/agent.min.js",
'force_injection': False # force inject js agent as soon as tml is configured
#'host': "https://tools.translationexchange.com/agent/staging/agent.min.js"
}
data_preprocessors = ()
env_generators = ('tml.tools.viewing_user.get_viewing_user',)
cache = {
'enabled': False,
#'adapter': 'file',
#'path': 'a/b/c/snapshot.tar.gz'
}
default_source = "index"
context_class = None # just for testing purpose
context_rules = {
'number': {'variables': {}},
'gender': {
'variables': {
'@gender': 'gender',
'@size': lambda lst: len(lst)
}
},
'genders': {
'variables': {
'@genders': lambda lst: [u['gender'] if hasattr(u, 'items') else getattr(u, 'gender') for u in lst]
}
},
'date': {'variables': {}},
'time': {'variables': {}},
'list': {
'variables': {
'@count': lambda lst: len(lst)
}
}
}
localization = {
'default_day_names' : ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"],
'default_abbr_day_names' : ["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"],
'default_month_names' : ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"],
'default_abbr_month_names': ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"],
'custom_date_formats' : {
'default' : '%m/%d/%Y', # 07/4/2008
'short_numeric' : '%m/%d', # 07/4
'short_numeric_year' : '%m/%d/%y', # 07/4/08
'long_numeric' : '%m/%d/%Y', # 07/4/2008
'verbose' : '%A, %B %d, %Y', # Friday, July 4, 2008
'monthname' : '%B %d', # July 4
'monthname_year' : '%B %d, %Y', # July 4, 2008
'monthname_abbr' : '%b %d', # Jul 4
'monthname_abbr_year' : '%b %d, %Y', # Jul 4, 2008
'date_time' : '%m/%d/%Y at %H:%M', # 01/03/1010 at 5:30
},
'token_mapping': {
'%a': '{short_week_day_name}',
'%A': '{week_day_name}',
'%b': '{short_month_name}',
'%B': '{month_name}',
'%p': '{am_pm}',
'%d': '{days}',
'%e': '{day_of_month}',
'%j': '{year_days}',
'%m': '{months}',
'%W': '{week_num}',
'%w': '{week_days}',
'%y': '{short_years}',
'%Y': '{years}',
'%l': '{trimed_hour}',
'%H': '{full_hours}',
'%I': '{short_hours}',
'%M': '{minutes}',
'%S': '{seconds}',
'%s': '{since_epoch}'
}
}
translator_options = {
'debug': False,
'debug_format_html': "<span style='font-size:20px;color:red;'>{</span> {$0} <span style='font-size:20px;color:red;'>}</span>",
'debug_format': '{{{{$0}}}}',
'split_sentences': False,
'nodes': {
'ignored': [],
'scripts': ["style", "script", "code", "pre"],
'inline': ["a", "span", "i", "b", "img", "strong", "s", "em", "u", "sub", "sup"],
'short': ["i", "b"],
'splitters': ["br", "hr"]
},
'attributes': {
'labels': ["title", "alt"]
},
'name_mapping': {
'b': 'bold',
'i': 'italic',
'a': 'link',
'img': 'picture'
},
'data_tokens': {
'special': {
'enabled': True,
'regex': '(&[^;]*;)'
},
'date': {
'enabled': True,
'formats': [
['((Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s+\d+,\s+\d+)', "{month} {day}, {year}"],
['((January|February|March|April|May|June|July|August|September|October|November|December)\s+\d+,\s+\d+)', "{month} {day}, {year}"],
['(\d+\s+(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec),\s+\d+)', "{day} {month}, {year}"],
['(\d+\s+(January|February|March|April|May|June|July|August|September|October|November|December),\s+\d+)', "{day} {month}, {year}"]
],
'name': 'date'
},
'rules': [
{'enabled': True, 'name': 'time', 'regex': '(\d{1,2}:\d{1,2}\s+([A-Z]{2,3}|am|pm|AM|PM)?)'},
{'enabled': True, 'name': 'phone', 'regex': '((\d{1}-)?\d{3}-\d{3}-\d{4}|\d?\(\d{3}\)\s*\d{3}-\d{4}|(\d.)?\d{3}.\d{3}.\d{4})'},
{'enabled': True, 'name': 'email', 'regex': '([-a-z0-9~!$%^&*_=+}{\'?]+(\.[-a-z0-9~!$%^&*_=+}{\'?]+)*@([a-z0-9_][-a-z0-9_]*(\.[-a-z0-9_]+)*\.(aero|arpa|biz|com|coop|edu|gov|info|int|mil|museum|name|net|org|pro|travel|io|mobi|[a-z][a-z])|([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}))(:[0-9]{1,5})?)'},
{'enabled': True, 'name': 'price', 'regex': '(\$\d*(,\d*)*(\.\d*)?)'},
{'enabled': True, 'name': 'fraction', 'regex': '(\d+\/\d+)'},
{'enabled': True, 'name': 'num', 'regex': '\\b(\d+(,\d*)*(\.\d*)?%?)\\b'}
]
}
}
# memcached
#'cache': {
#'enabled': True,
#'adapter': 'memcached',
#'backend': 'default',
# 'namespace': 'foody'
#},
version_check_interval = 3600
source_separator = '@:@'
strict_mode = False
supported_tr_opts = ('source',
'target_locale',)
tml_cookie = 'trex_%s'
decorator_class = "html"
@property
def default_locale(self):
return self.locale['default']
def get_locale(self, locale):
if not locale:
return self.default_locale
return self.locale_mapping.get(locale, locale)
def cache_enabled(self):
return self['cache'].get('enabled', False)
def application_key(self):
return self['application'].get('key', 'current')
def access_token(self, default=None):
return self['application'].get('access_token', default)
def api_host(self):
return self.application['path']
if self.environment == 'prod':
return 'https://api.translationexchange.com'
else:
return
def cdn_host(self):
return self.application['cdn_path']
def agent_host(self):
return self.agent['host']
def is_interactive_mode(self):
return False
def get_custom_date_format(self, format):
return self.localization['custom_date_formats'][format]
def strftime_symbol_to_token(self, symbol):
return self.localization['token_mapping'].get(symbol, None)
def get_abbr_day_name(self, index):
return self.localization['default_abbr_day_names'][index]
def get_day_name(self, index):
return self.localization['default_day_names'][index]
def get_abbr_month_name(self, index):
return self.localization['default_abbr_month_names'][index]
def get_month_name(self, index):
return self.localization['default_month_names'][index]
def handle_exception(self, exc):
if self.strict_mode:
reraise(exc.__class__, exc, sys.exc_info()[2])
else:
pass # silent (logged in tml.py)
def nested_value(self, hash_value, key, default_value=None):
parts = key.split('.')
for part in parts:
if not hash_value.get(part, None):
return default_value
hash_value = hash_value.get(part)
return hash_value
def translator_option(self, key):
return self.nested_value(self.translator_options, key)
CONFIG = Config.instance()
def configure(**kwargs):
global CONFIG
if kwargs:
CONFIG.override_config(**kwargs)
return CONFIG
| translationexchange/tml-python | tml/config.py | Python | mit | 10,511 |
from ..osid import records as osid_records
class HierarchyRecord(osid_records.OsidRecord):
"""A record for a ``Hierarchy``.
The methods specified by the record type are available through the
underlying object.
"""
class HierarchyQueryRecord(osid_records.OsidRecord):
"""A record for a ``HierarchyQuery``.
The methods specified by the record type are available through the
underlying object.
"""
class HierarchyFormRecord(osid_records.OsidRecord):
"""A record for a ``HierarchyForm``.
The methods specified by the record type are available through the
underlying object.
"""
class HierarchySearchRecord(osid_records.OsidRecord):
"""A record for a ``HierarchySearch``.
The methods specified by the record type are available through the
underlying object.
"""
| birdland/dlkit-doc | dlkit/hierarchy/records.py | Python | mit | 848 |
"""
homeassistant.components.device_tracker.tplink
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Device tracker platform that supports scanning a TP-Link router for device
presence.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.tplink.html
"""
import base64
import logging
from datetime import timedelta
import re
import threading
import requests
from homeassistant.const import CONF_HOST, CONF_USERNAME, CONF_PASSWORD
from homeassistant.helpers import validate_config
from homeassistant.util import Throttle
from homeassistant.components.device_tracker import DOMAIN
# Return cached results if last scan was less then this time ago
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=5)
_LOGGER = logging.getLogger(__name__)
def get_scanner(hass, config):
""" Validates config and returns a TP-Link scanner. """
if not validate_config(config,
{DOMAIN: [CONF_HOST, CONF_USERNAME, CONF_PASSWORD]},
_LOGGER):
return None
scanner = Tplink3DeviceScanner(config[DOMAIN])
if not scanner.success_init:
scanner = Tplink2DeviceScanner(config[DOMAIN])
if not scanner.success_init:
scanner = TplinkDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class TplinkDeviceScanner(object):
"""
This class queries a wireless router running TP-Link firmware
for connected devices.
"""
def __init__(self, config):
host = config[CONF_HOST]
username, password = config[CONF_USERNAME], config[CONF_PASSWORD]
self.parse_macs = re.compile('[0-9A-F]{2}-[0-9A-F]{2}-[0-9A-F]{2}-' +
'[0-9A-F]{2}-[0-9A-F]{2}-[0-9A-F]{2}')
self.host = host
self.username = username
self.password = password
self.last_results = {}
self.lock = threading.Lock()
self.success_init = self._update_info()
def scan_devices(self):
"""
Scans for new devices and return a list containing found device ids.
"""
self._update_info()
return self.last_results
# pylint: disable=no-self-use
def get_device_name(self, device):
"""
The TP-Link firmware doesn't save the name of the wireless device.
"""
return None
@Throttle(MIN_TIME_BETWEEN_SCANS)
def _update_info(self):
"""
Ensures the information from the TP-Link router is up to date.
Returns boolean if scanning successful.
"""
with self.lock:
_LOGGER.info("Loading wireless clients...")
url = 'http://{}/userRpm/WlanStationRpm.htm'.format(self.host)
referer = 'http://{}'.format(self.host)
page = requests.get(url, auth=(self.username, self.password),
headers={'referer': referer})
result = self.parse_macs.findall(page.text)
if result:
self.last_results = [mac.replace("-", ":") for mac in result]
return True
return False
class Tplink2DeviceScanner(TplinkDeviceScanner):
"""
This class queries a wireless router running newer version of TP-Link
firmware for connected devices.
"""
def scan_devices(self):
"""
Scans for new devices and return a list containing found device ids.
"""
self._update_info()
return self.last_results.keys()
# pylint: disable=no-self-use
def get_device_name(self, device):
"""
The TP-Link firmware doesn't save the name of the wireless device.
"""
return self.last_results.get(device)
@Throttle(MIN_TIME_BETWEEN_SCANS)
def _update_info(self):
"""
Ensures the information from the TP-Link router is up to date.
Returns boolean if scanning successful.
"""
with self.lock:
_LOGGER.info("Loading wireless clients...")
url = 'http://{}/data/map_access_wireless_client_grid.json' \
.format(self.host)
referer = 'http://{}'.format(self.host)
# Router uses Authorization cookie instead of header
# Let's create the cookie
username_password = '{}:{}'.format(self.username, self.password)
b64_encoded_username_password = base64.b64encode(
username_password.encode('ascii')
).decode('ascii')
cookie = 'Authorization=Basic {}' \
.format(b64_encoded_username_password)
response = requests.post(url, headers={'referer': referer,
'cookie': cookie})
try:
result = response.json().get('data')
except ValueError:
_LOGGER.error("Router didn't respond with JSON. "
"Check if credentials are correct.")
return False
if result:
self.last_results = {
device['mac_addr'].replace('-', ':'): device['name']
for device in result
}
return True
return False
class Tplink3DeviceScanner(TplinkDeviceScanner):
"""
This class queries the Archer C9 router running version 150811 or higher
of TP-Link firmware for connected devices.
"""
def __init__(self, config):
self.stok = ''
self.sysauth = ''
super(Tplink3DeviceScanner, self).__init__(config)
def scan_devices(self):
"""
Scans for new devices and return a list containing found device ids.
"""
self._update_info()
return self.last_results.keys()
# pylint: disable=no-self-use
def get_device_name(self, device):
"""
The TP-Link firmware doesn't save the name of the wireless device.
We are forced to use the MAC address as name here.
"""
return self.last_results.get(device)
def _get_auth_tokens(self):
"""
Retrieves auth tokens from the router.
"""
_LOGGER.info("Retrieving auth tokens...")
url = 'http://{}/cgi-bin/luci/;stok=/login?form=login' \
.format(self.host)
referer = 'http://{}/webpages/login.html'.format(self.host)
# if possible implement rsa encryption of password here
response = requests.post(url,
params={'operation': 'login',
'username': self.username,
'password': self.password},
headers={'referer': referer})
try:
self.stok = response.json().get('data').get('stok')
_LOGGER.info(self.stok)
regex_result = re.search('sysauth=(.*);',
response.headers['set-cookie'])
self.sysauth = regex_result.group(1)
_LOGGER.info(self.sysauth)
return True
except ValueError:
_LOGGER.error("Couldn't fetch auth tokens!")
return False
@Throttle(MIN_TIME_BETWEEN_SCANS)
def _update_info(self):
"""
Ensures the information from the TP-Link router is up to date.
Returns boolean if scanning successful.
"""
with self.lock:
if (self.stok == '') or (self.sysauth == ''):
self._get_auth_tokens()
_LOGGER.info("Loading wireless clients...")
url = 'http://{}/cgi-bin/luci/;stok={}/admin/wireless?form=statistics' \
.format(self.host, self.stok)
referer = 'http://{}/webpages/index.html'.format(self.host)
response = requests.post(url,
params={'operation': 'load'},
headers={'referer': referer},
cookies={'sysauth': self.sysauth})
try:
json_response = response.json()
if json_response.get('success'):
result = response.json().get('data')
else:
if json_response.get('errorcode') == 'timeout':
_LOGGER.info("Token timed out. "
"Relogging on next scan.")
self.stok = ''
self.sysauth = ''
return False
else:
_LOGGER.error("An unknown error happened "
"while fetching data.")
return False
except ValueError:
_LOGGER.error("Router didn't respond with JSON. "
"Check if credentials are correct.")
return False
if result:
self.last_results = {
device['mac'].replace('-', ':'): device['mac']
for device in result
}
return True
return False
| pottzer/home-assistant | homeassistant/components/device_tracker/tplink.py | Python | mit | 9,226 |
#!/usr/bin/env python3
# Copyright 2014 BitPay Inc.
# Copyright 2016-2019 The Bitcoin Core developers
# Copyright 2015-2019 The Auroracoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test framework for auroracoin utils.
Runs automatically during `make check`.
Can also be run manually."""
import argparse
import binascii
import configparser
import difflib
import json
import logging
import os
import pprint
import subprocess
import sys
def main():
config = configparser.ConfigParser()
config.optionxform = str
config.read_file(open(os.path.join(os.path.dirname(__file__), "../config.ini"), encoding="utf8"))
env_conf = dict(config.items('environment'))
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
verbose = args.verbose
if verbose:
level = logging.DEBUG
else:
level = logging.ERROR
formatter = '%(asctime)s - %(levelname)s - %(message)s'
# Add the format/level to the logger
logging.basicConfig(format=formatter, level=level)
bctester(os.path.join(env_conf["SRCDIR"], "test", "util", "data"), "auroracoin-util-test.json", env_conf)
def bctester(testDir, input_basename, buildenv):
""" Loads and parses the input file, runs all tests and reports results"""
input_filename = os.path.join(testDir, input_basename)
raw_data = open(input_filename, encoding="utf8").read()
input_data = json.loads(raw_data)
failed_testcases = []
for testObj in input_data:
try:
bctest(testDir, testObj, buildenv)
logging.info("PASSED: " + testObj["description"])
except:
logging.info("FAILED: " + testObj["description"])
failed_testcases.append(testObj["description"])
if failed_testcases:
error_message = "FAILED_TESTCASES:\n"
error_message += pprint.pformat(failed_testcases, width=400)
logging.error(error_message)
sys.exit(1)
else:
sys.exit(0)
def bctest(testDir, testObj, buildenv):
"""Runs a single test, comparing output and RC to expected output and RC.
Raises an error if input can't be read, executable fails, or output/RC
are not as expected. Error is caught by bctester() and reported.
"""
# Get the exec names and arguments
execprog = os.path.join(buildenv["BUILDDIR"], "src", testObj["exec"] + buildenv["EXEEXT"])
execargs = testObj['args']
execrun = [execprog] + execargs
# Read the input data (if there is any)
stdinCfg = None
inputData = None
if "input" in testObj:
filename = os.path.join(testDir, testObj["input"])
inputData = open(filename, encoding="utf8").read()
stdinCfg = subprocess.PIPE
# Read the expected output data (if there is any)
outputFn = None
outputData = None
outputType = None
if "output_cmp" in testObj:
outputFn = testObj['output_cmp']
outputType = os.path.splitext(outputFn)[1][1:] # output type from file extension (determines how to compare)
try:
outputData = open(os.path.join(testDir, outputFn), encoding="utf8").read()
except:
logging.error("Output file " + outputFn + " can not be opened")
raise
if not outputData:
logging.error("Output data missing for " + outputFn)
raise Exception
if not outputType:
logging.error("Output file %s does not have a file extension" % outputFn)
raise Exception
# Run the test
proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
try:
outs = proc.communicate(input=inputData)
except OSError:
logging.error("OSError, Failed to execute " + execprog)
raise
if outputData:
data_mismatch, formatting_mismatch = False, False
# Parse command output and expected output
try:
a_parsed = parse_output(outs[0], outputType)
except Exception as e:
logging.error('Error parsing command output as %s: %s' % (outputType, e))
raise
try:
b_parsed = parse_output(outputData, outputType)
except Exception as e:
logging.error('Error parsing expected output %s as %s: %s' % (outputFn, outputType, e))
raise
# Compare data
if a_parsed != b_parsed:
logging.error("Output data mismatch for " + outputFn + " (format " + outputType + ")")
data_mismatch = True
# Compare formatting
if outs[0] != outputData:
error_message = "Output formatting mismatch for " + outputFn + ":\n"
error_message += "".join(difflib.context_diff(outputData.splitlines(True),
outs[0].splitlines(True),
fromfile=outputFn,
tofile="returned"))
logging.error(error_message)
formatting_mismatch = True
assert not data_mismatch and not formatting_mismatch
# Compare the return code to the expected return code
wantRC = 0
if "return_code" in testObj:
wantRC = testObj['return_code']
if proc.returncode != wantRC:
logging.error("Return code mismatch for " + outputFn)
raise Exception
if "error_txt" in testObj:
want_error = testObj["error_txt"]
# Compare error text
# TODO: ideally, we'd compare the strings exactly and also assert
# That stderr is empty if no errors are expected. However, auroracoin-tx
# emits DISPLAY errors when running as a windows application on
# linux through wine. Just assert that the expected error text appears
# somewhere in stderr.
if want_error not in outs[1]:
logging.error("Error mismatch:\n" + "Expected: " + want_error + "\nReceived: " + outs[1].rstrip())
raise Exception
def parse_output(a, fmt):
"""Parse the output according to specified format.
Raise an error if the output can't be parsed."""
if fmt == 'json': # json: compare parsed data
return json.loads(a)
elif fmt == 'hex': # hex: parse and compare binary data
return binascii.a2b_hex(a.strip())
else:
raise NotImplementedError("Don't know how to compare %s" % fmt)
if __name__ == '__main__':
main()
| aurarad/auroracoin | test/util/auroracoin-util-test.py | Python | mit | 6,651 |
from ..models import Job
import datetime
class JobContainer():
def __init__(self):
self.organization = None
self.title = None
self.division = None
self.date_posted = None
self.date_closing = None
self.date_collected = None
self.url_detail = None
self.salary_waged = None
self.salary_amount = None
self.region = None
def is_unique(self):
""" Checks whether job (denoted by URL) already exists in DB.
Remember to use this function before doing any intense parsing operations.
"""
if not self.url_detail:
raise KeyError(
"Queried record uniqueness before detail URL set: {}".format(self))
else:
if len(Job.objects.filter(url_detail=self.url_detail)) == 0:
return True
else:
# print("Job already exists in DB: {}".format(self.url_detail))
return False
def cleanup(self):
self.title = self.title.title() if self.title.isupper() else self.title
self.salary_amount = 0 if self.salary_amount == None else self.salary_amount
# totally arbitray amount
self.salary_waged = True if self.salary_amount < 5000 else False
self.date_collected = datetime.date.today()
def validate(self):
field_dict = self.__dict__
attributes = {
k: v for k, v in field_dict.items() if not k.startswith("_")}
for k, v in attributes.items():
if v == None:
raise KeyError(
"Job {} was missing {}".format(self.url_detail, k))
def save(self):
""" Save job to DB, after final checks.
"""
if not self.is_unique(): # failsafe in case we forgot to check this earlier.
print(
"{} tried to save a job hat is not unique!".format(self.organization))
return
self.cleanup()
try:
self.validate()
except KeyError as err:
print("|| EXCEPTION ", err)
return
print("Saved job to DB: {}".format(self))
j = Job(organization=self.organization, title=self.title, division=self.division, date_posted=self.date_posted, date_closing=self.date_closing, url_detail=self.url_detail, salary_waged=self.salary_waged, salary_amount=self.salary_amount, region=self.region, date_collected=self.date_collected
)
try:
j.save()
except Exception as err:
print("|| Exception ", err)
def __str__(self):
return "{} at {}".format(self.title, self.organization)
| rgscherf/gainful2 | parsing/parsinglib/jobcontainer.py | Python | mit | 2,667 |
import unittest
from tweet_dns.sources.source_base import SourceBase
class SourceBaseTest(unittest.TestCase):
def test_regex(self):
text = """
1.1.1.1
192.168.0.1
127.0.0.1
255.255.255.255
256.
1.1..1
1.200.3.4
"""
correct_ips = ['1.1.1.1', '192.168.0.1', '127.0.0.1', '255.255.255.255', '1.200.3.4']
self.assert_(list(SourceBase._search_for_ips(text)) == correct_ips)
with self.assertRaises(RuntimeError):
SourceBase().get()
| AstromechZA/TweetDNS | tests/sources/source_base_test.py | Python | mit | 579 |
#!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test that the wallet resends transactions periodically."""
from collections import defaultdict
import time
from test_framework.blocktools import create_block, create_coinbase
from test_framework.messages import ToHex
from test_framework.mininode import P2PInterface, mininode_lock
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, wait_until
class P2PStoreTxInvs(P2PInterface):
def __init__(self):
super().__init__()
self.tx_invs_received = defaultdict(int)
def on_inv(self, message):
# Store how many times invs have been received for each tx.
for i in message.inv:
if i.type == 1:
# save txid
self.tx_invs_received[i.hash] += 1
class ResendWalletTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node = self.nodes[0] # alias
node.add_p2p_connection(P2PStoreTxInvs())
self.log.info("Create a new transaction and wait until it's broadcast")
txid = int(node.sendtoaddress(node.getnewaddress(), 1), 16)
# Wallet rebroadcast is first scheduled 1 sec after startup (see
# nNextResend in ResendWalletTransactions()). Sleep for just over a
# second to be certain that it has been called before the first
# setmocktime call below.
time.sleep(1.1)
# Can take a few seconds due to transaction trickling
wait_until(lambda: node.p2p.tx_invs_received[txid] >= 1, lock=mininode_lock)
# Add a second peer since txs aren't rebroadcast to the same peer (see filterInventoryKnown)
node.add_p2p_connection(P2PStoreTxInvs())
self.log.info("Create a block")
# Create and submit a block without the transaction.
# Transactions are only rebroadcast if there has been a block at least five minutes
# after the last time we tried to broadcast. Use mocktime and give an extra minute to be sure.
block_time = int(time.time()) + 6 * 60
node.setmocktime(block_time)
block = create_block(int(node.getbestblockhash(), 16), create_coinbase(node.getblockchaininfo()['blocks']), block_time)
block.nVersion = 3
block.rehash()
block.solve()
node.submitblock(ToHex(block))
# Transaction should not be rebroadcast
node.p2ps[1].sync_with_ping()
assert_equal(node.p2ps[1].tx_invs_received[txid], 0)
self.log.info("Transaction should be rebroadcast after 30 minutes")
# Use mocktime and give an extra 5 minutes to be sure.
rebroadcast_time = int(time.time()) + 41 * 60
node.setmocktime(rebroadcast_time)
wait_until(lambda: node.p2ps[1].tx_invs_received[txid] >= 1, lock=mininode_lock)
if __name__ == '__main__':
ResendWalletTransactionsTest().main()
| afk11/bitcoin | test/functional/wallet_resendwallettransactions.py | Python | mit | 3,194 |
from BeautifulSoup import BeautifulSoup as b
from collections import Counter
import urllib2, numpy
import matplotlib.pyplot as plt
response = urllib2.urlopen('http://en.wikipedia.org/wiki/List_of_Question_Time_episodes')
html = response.read()
soup = b(html)
people = []
tables = soup.findAll('table','wikitable')[2:] #First two tables are other content
year_headers = soup.findAll('h2')[2:-4] # Likewise with headers
years = []
for year in year_headers:
spans = year.findAll('span')
years.append(int(spans[0].text))
for i, table in enumerate(tables[-10:]):
print i
for row in table.findAll('tr'):
cols = row.findAll('td')
if len(cols) >= 3:
names = cols[2]
nstring = names.getText().split(',')
for name in nstring:
people.append(name)
else:
continue
counts = Counter(people)
order = numpy.argsort(counts.values())
names = numpy.array(counts.keys())[order][::-1]
appearances = numpy.array(counts.values())[order][::-1]
N = 20
app_percentage = (appearances[:N] / float(numpy.sum(appearances[:N]))) * 100
index = numpy.arange(N)+0.25
bar_width = 0.5
"""
PLOT THAT SHIT
"""
Fig = plt.figure(figsize=(10,6))
Ax = Fig.add_subplot(111)
Apps = Ax.bar(index,app_percentage,bar_width, color='dodgerblue',alpha=0.8,linewidth=0)
Ax.set_xticks(index+ 0.5*bar_width)
Ax.set_xticklabels(names[:N],rotation=90)
Ax.set_ylabel('Appearance Percentage')
amin,amax = numpy.min(app_percentage), numpy.max(app_percentage)
def autolabel(Bars):
# attach some text labels
for Bar in Bars:
height = Bar.get_height()
Ax.text(Bar.get_x()+Bar.get_width()/2., 1.03*height, '%.1f'%float(height),
ha='center', va='bottom',fontsize=9)
autolabel(Apps)
Ax.set_ylim([amin-1,amax+1])
Ax.set_title('Top '+str(N)+' QT guests')
Fig.subplots_adjust(bottom=0.26,right=0.95,left=0.07)
Fig.savefig('QTappearances.png',fmt='png')
plt.show() | dunkenj/DimbleData | QTstats.py | Python | mit | 1,981 |
# -*- coding: utf-8 -*-
"""
Utils has nothing to do with models and views.
"""
from datetime import datetime
from flask import current_app
def get_current_time():
return datetime.utcnow()
def format_date(value, format='%Y-%m-%d %H:%M:%S'):
return value.strftime(format)
def get_resource_as_string(name, charset='utf-8'):
with current_app.open_resource(name) as f:
return f.read().decode(charset)
| vovantics/flask-bluebone | app/utils.py | Python | mit | 427 |
# Copyright (c) 2016 kamyu. All rights reserved.
#
# Google Code Jam 2016 Round 1A - Problem B. Rank and File
# https://code.google.com/codejam/contest/4304486/dashboard#s=p1
#
# Time: O(N^2)
# Space: O(N^2), at most N^2 numbers in the Counter
#
from collections import Counter
def rank_and_file():
N = input()
cnt = Counter()
for _ in xrange(2 * N - 1):
cnt += Counter(list(raw_input().strip().split()))
file = []
for k, v in cnt.iteritems():
# The count of the missing number must be odd.
if v % 2 == 1:
file.append(k)
# The order of the missing numbers must be sorted.
file.sort(key=int)
return " ".join(file)
for case in xrange(input()):
print 'Case #%d: %s' % (case+1, rank_and_file())
| kamyu104/GoogleCodeJam-2016 | Round 1A/rank-and-file.py | Python | mit | 774 |
import os
import numpy as np
import pandas as pd
import patools.packing as pck
class Trial:
def __init__(self, directory=os.getcwd(), nbs=False):
self.packings = self.loadTrials(directory, nbs)
self.df = pd.DataFrame(index=self.packings.keys())
self._getSphereRads()
self._getParticleRads()
def __len__(self):
return sum([len(packing) for packing in self.packings.values()])
def _getSphereRads(self):
rads = {}
for key, val in self.packings.items():
rads[key] = val.sphereRad
radS = pd.Series(rads, name='rad')
self.df['rad'] = radS
def _getParticleRads(self):
pck = self.packings[list(self.packings.keys())[0]]
self.bigRad = pck.bigRad
self.littleRad = pck.littleRad
def loadTrials(self, directory, nbs=False):
"""
Loads a set of packings corresponding to one parameter set. The
subdirectories should contain packings files themselves.
ParamDirectory -> Trial Subdirs -> Packing Files
"""
subdirs = os.listdir(directory)
trialPackings = {}
for trial in subdirs:
trialDir = os.path.join(directory, trial)
if not os.path.isdir(trialDir): # Only look in dirs
continue
newPacking = pck.Packing(trialDir,nbs=nbs)
if len(newPacking) <= 1: # Remove broken packings
continue
trialPackings[trial] = newPacking
return trialPackings
def calculatePF_all(self):
"""
Calculates the packing fraction for each packing in a trial. The packing
fractions for each trial are stored in a dataframe. This function is
most often accessed from the dataset object, where different statistics
of the packing fractions are reported, such as the mean, median, max, or
min.
"""
pfs = {}
for key, val in self.packings.items():
pfs[key] = val.calculatePF()
pfS = pd.Series(pfs, name='pf')
self.df['pf'] = pfS
return pfs
def calculateOP_all(self, n=6):
"""
Calculates the n-atic order parameter for each packing in a trial. The
average order parameter for a packing is stored in a dataframe. This
function is most often accessed from the dataset object, where different
statistics of the packing fractions are reported, such as the mean,
median, max, or min.
"""
ops = {}
for key, val in self.packings.items():
val.calculateOP(n)
ops[key] = val.op[n]
opS = pd.Series(ops, name=str(n) + '-atic')
self.df[str(n) + '-atic'] = opS
return opS
def calculateSegOP_all(self, n=6):
"""
Calculates the segregated n-atic order parameter for each packing in a
trial. The average order parameter for big-big and little-little for a
packing is stored in a dataframe. This function is most often accessed
from the dataset object, where different statistics of the packing
fractions are reported, such as the mean, median, max, or min.
"""
opsBig = {}
opsLittle = {}
for key, val in self.packings.items():
val.calculateSegOP(n)
opsBig[key] = val.ops[str(n) + 'B']
opsLittle[key] = val.ops[str(n) + 'L']
opSBig = pd.Series(opsBig, name=str(n) + '-aticBig')
opSLittle = pd.Series(opsLittle, name=str(n) + '-aticLittle')
self.df[str(n) + '-aticBig'] = opSBig
self.df[str(n) + '-aticLittle'] = opSLittle
def calculateDFG_all(self):
"""
Calculuates the defect graph size for all of the packings in a given
trial.
"""
dfgs = {}
for key, val in self.packings.items():
val.calculateDefectGraphSize()
dfgs[key] = val.dfG
dfgS = pd.Series(dfgs, name='DFGS')
self.df['DFGS'] = dfgS
return dfgS
def calculateDFF_all(self):
"""
Calculates the defect fraction for all of the packings in a given trial.
"""
dffs = {}
for key, val in self.packings.items():
val.calculateDefectFraction()
dffs[key] = val.dfN
dffS = pd.Series(dffs, name='DFF')
self.df['DFF'] = dffS
return dffS
def calculateCM_all(self):
"""
Calculates the coordination matrix for all of the packings in a given
trial.
"""
cmsbb = {}; cmsbl = {}; cmslb = {}; cmsll = {}
for key, val in self.packings.items():
val.calculateCoordinationMatrix()
cmsbb[key] = val.cm[0]
cmsbl[key] = val.cm[1]
cmslb[key] = val.cm[2]
cmsll[key] = val.cm[3]
self.df['BB'] = pd.Series(cmsbb)
self.df['BL'] = pd.Series(cmsbl)
self.df['LB'] = pd.Series(cmslb)
self.df['LL'] = pd.Series(cmsll)
def calculateRDF_all(self, nBins=50, nTestPts=0):
"""
Computes the RDF over a full trial of one parameter.
"""
self.gsBB = np.zeros([len(self.df), nBins])
self.gsBL = np.zeros([len(self.df), nBins])
self.gsLB = np.zeros([len(self.df), nBins])
self.gsLL = np.zeros([len(self.df), nBins])
empty = []
for i, val in enumerate(self.packings.values()):
val.calculateRadialDF(nBins, nTestPts)
try:
self.gsBB[i] = val.gsBB
self.gsBL[i] = val.gsBL
self.gsLB[i] = val.gsLB
self.gsLL[i] = val.gsLL
except AttributeError:
empty.append(i)
pass
self.gsBB = np.delete(self.gsBB, empty, axis=0)
self.gsBL = np.delete(self.gsBL, empty, axis=0)
self.gsLB = np.delete(self.gsLB, empty, axis=0)
self.gsLL = np.delete(self.gsLL, empty, axis=0)
| amas0/patools | patools/trial.py | Python | mit | 5,966 |
txt = "the quick brown fox jumped over thethe lazy dog"
txt2 = txt.replace("the","a")
print txt
print txt2
| treeform/pystorm | tests/strings/replace.py | Python | mit | 110 |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
import configparser
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
import logging
# Formatting. Default colors to empty strings.
BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
BLUE = ('\033[0m', '\033[0;34m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
BASE_SCRIPTS= [
# Scripts that are run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'wallet-hd.py',
'walletbackup.py',
# vv Tests less than 5m vv
'p2p-fullblocktest.py',
'fundrawtransaction.py',
'p2p-compactblocks.py',
'segwit.py',
# vv Tests less than 2m vv
'wallet.py',
'wallet-accounts.py',
'p2p-segwit.py',
'wallet-dump.py',
'listtransactions.py',
# vv Tests less than 60s vv
'sendheaders.py',
'zapwallettxes.py',
'importmulti.py',
'mempool_limit.py',
'merkle_blocks.py',
'receivedby.py',
'abandonconflict.py',
'bip68-112-113-p2p.py',
'rawtransactions.py',
'reindex.py',
# vv Tests less than 30s vv
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'httpbasics.py',
'multi_rpc.py',
'proxy_test.py',
'signrawtransactions.py',
'nodehandling.py',
'decodescript.py',
'blockchain.py',
'disablewallet.py',
'net.py',
'keypool.py',
'p2p-mempool.py',
'prioritise_transaction.py',
'invalidblockrequest.py',
'invalidtxrequest.py',
'p2p-versionbits-warning.py',
'preciousblock.py',
'importprunedfunds.py',
'signmessages.py',
'nulldummy.py',
'import-rescan.py',
'bumpfee.py',
'rpcnamedargs.py',
'listsinceblock.py',
'p2p-leaktests.py',
]
ZMQ_SCRIPTS = [
# ZMQ test can only be run if bitcoin was built with zmq-enabled.
# call test_runner.py with -nozmq to explicitly exclude these tests.
'zmq_test.py']
EXTENDED_SCRIPTS = [
# These tests are not run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'pruning.py',
# vv Tests less than 20m vv
'smartfees.py',
# vv Tests less than 5m vv
'maxuploadtarget.py',
'mempool_packages.py',
# vv Tests less than 2m vv
'bip68-sequence.py',
'getblocktemplate_longpoll.py',
'p2p-timeouts.py',
# vv Tests less than 60s vv
'bip9-softforks.py',
'p2p-feefilter.py',
'rpcbind_test.py',
# vv Tests less than 30s vv
'assumevalid.py',
'bip65-cltv.py',
'bip65-cltv-p2p.py',
'bipdersig-p2p.py',
'bipdersig.py',
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
'forknotify.py',
'invalidateblock.py',
'maxblocksinflight.py',
'p2p-acceptblock.py',
'replace-by-fee.py',
]
ALL_SCRIPTS = BASE_SCRIPTS + ZMQ_SCRIPTS + EXTENDED_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--exclude', '-x', help='specify a comma-seperated-list of scripts to exclude. Do not include the .py extension in the name.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--quiet', '-q', action='store_true', help='only print results summary and failure logs')
parser.add_argument('--nozmq', action='store_true', help='do not run the zmq tests')
args, unknown_args = parser.parse_known_args()
# Create a set to store arguments and create the passon string
tests = set(arg for arg in unknown_args if arg[:2] != "--")
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
config.read_file(open(os.path.dirname(__file__) + "/config.ini"))
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
enable_wallet = config["components"].getboolean("ENABLE_WALLET")
enable_utils = config["components"].getboolean("ENABLE_UTILS")
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
enable_zmq = config["components"].getboolean("ENABLE_ZMQ") and not args.nozmq
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print("Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
if not (enable_wallet and enable_utils and enable_bitcoind):
print("No functional tests to run. Wallet, utils, and bitcoind must all be enabled")
print("Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make")
sys.exit(0)
# python3-zmq may not be installed. Handle this gracefully and with some helpful info
if enable_zmq:
try:
import zmq
except ImportError:
print("ERROR: \"import zmq\" failed. Use -nozmq to run without the ZMQ tests."
"To run zmq tests, see dependency info in /test/README.md.")
raise
# Build list of tests
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept the name with or without .py extension.
test_list = [t for t in ALL_SCRIPTS if
(t in tests or re.sub(".py$", "", t) in tests)]
else:
# No individual tests have been specified. Run base tests, and
# optionally ZMQ tests and extended tests.
test_list = BASE_SCRIPTS
if enable_zmq:
test_list += ZMQ_SCRIPTS
if args.extended:
test_list += EXTENDED_SCRIPTS
# TODO: BASE_SCRIPTS and EXTENDED_SCRIPTS are sorted by runtime
# (for parallel running efficiency). This combined list will is no
# longer sorted.
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
for exclude_test in args.exclude.split(','):
if exclude_test + ".py" in test_list:
test_list.remove(exclude_test + ".py")
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
parser.print_help()
subprocess.check_call([(config["environment"]["SRCDIR"] + '/test/functional/' + test_list[0].split()[0])] + ['-h'])
sys.exit(0)
check_script_list(config["environment"]["SRCDIR"])
run_tests(test_list, config["environment"]["SRCDIR"], config["environment"]["BUILDDIR"], config["environment"]["EXEEXT"], args.jobs, args.coverage, passon_args)
def run_tests(test_list, src_dir, build_dir, exeext, jobs=1, enable_coverage=False, args=[]):
#Set env vars
if "BITCOIND" not in os.environ:
os.environ["BITCOIND"] = build_dir + '/src/bitcoind' + exeext
tests_dir = src_dir + '/test/functional/'
flags = ["--srcdir={}/src".format(build_dir)] + args
flags.append("--cachedir=%s/test/cache" % build_dir)
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug("Initializing coverage directory at %s" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
subprocess.check_output([tests_dir + 'create_cache.py'] + flags)
#Run Tests
job_queue = TestHandler(jobs, tests_dir, test_list, flags)
time0 = time.time()
test_results = []
max_len_name = len(max(test_list, key=len))
for _ in range(len(test_list)):
test_result, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
if test_result.status == "Passed":
logging.debug("\n%s%s%s passed, Duration: %s s" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
elif test_result.status == "Skipped":
logging.debug("\n%s%s%s skipped" % (BOLD[1], test_result.name, BOLD[0]))
else:
print("\n%s%s%s failed, Duration: %s s\n" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
print_results(test_results, max_len_name, (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
all_passed = all(map(lambda test_result: test_result.status == "Passed", test_results))
sys.exit(not all_passed)
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=lambda result: result.name.lower())
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.status != "Failed"
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
results += "Runtime: %s s\n" % (runtime)
print(results)
class TestHandler:
"""
Trigger the testscrips passed in via the list.
"""
def __init__(self, num_tests_parallel, tests_dir, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.test_list = test_list
self.flags = flags
self.num_running = 0
# In case there is a graveyard of zombie bitcoinds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
t = self.test_list.pop(0)
port_seed = ["--portseed={}".format(len(self.test_list) + self.portseed_offset)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = t.split()
self.jobs.append((t,
time.time(),
subprocess.Popen([self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + port_seed,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for j in self.jobs:
(name, time0, proc, log_out, log_err) = j
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif proc.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(j)
return TestResult(name, status, int(time.time() - time0)), stdout, stderr
print('.', end='', flush=True)
class TestResult():
def __init__(self, name, status, time):
self.name = name
self.status = status
self.time = time
self.padding = 0
def __repr__(self):
if self.status == "Passed":
color = BLUE
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
def check_script_list(src_dir):
"""Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
python_files = set([t for t in os.listdir(script_dir) if t[-3:] == ".py"])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
if len(missed_tests) != 0:
print("The following scripts are not being run:" + str(missed_tests))
print("Check the test lists in test_runner.py")
sys.exit(1)
class RPCCoverage(object):
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main()
| jimmysong/bitcoin | test/functional/test_runner.py | Python | mit | 17,927 |
class Solution(object):
def findSubstring(self, s, words):
"""
:type s: str
:type words: List[str]
:rtype: List[int]
"""
| xingjian-f/Leetcode-solution | 30. Substring with Concatenation of All Words.py | Python | mit | 173 |
from functools import wraps
import os
from flask import request
from werkzeug.utils import redirect
ssl_required_flag = os.environ.get('SSL_REQUIRED', False) == 'True'
def ssl_required(fn):
@wraps(fn)
def decorated_view(*args, **kwargs):
if ssl_required_flag and not request.is_secure:
return redirect(request.url.replace("http://", "https://"))
return fn(*args, **kwargs)
return decorated_view
| hectorbenitez/flask-heroku | flas/decorators.py | Python | mit | 408 |
# -*- coding: utf-8 -*-
"""
Setuptools script for the xbee-helper project.
"""
import os
from textwrap import fill, dedent
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
def required(fname):
return open(
os.path.join(
os.path.dirname(__file__), fname
)
).read().split('\n')
setup(
name="xbee-helper",
version="0.0.7",
packages=find_packages(
exclude=[
"*.tests",
"*.tests.*",
"tests.*",
"tests",
"*.ez_setup",
"*.ez_setup.*",
"ez_setup.*",
"ez_setup",
"*.examples",
"*.examples.*",
"examples.*",
"examples"
]
),
scripts=[],
entry_points={},
include_package_data=True,
setup_requires='pytest-runner',
tests_require='pytest',
install_requires=required('requirements.txt'),
test_suite='pytest',
zip_safe=False,
# Metadata for upload to PyPI
author='Ellis Percival',
author_email="[email protected]",
description=fill(dedent("""\
This project offers a high level API to an XBee device running an
up-to-date version of the ZigBee firmware. It builds upon the existing
XBee project by abstracting more functionality into methods.
""")),
classifiers=[
"Programming Language :: Python",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Topic :: Communications",
"Topic :: Home Automation",
"Topic :: Software Development :: Embedded Systems",
"Topic :: System :: Networking"
],
license="MIT",
keywords="",
url="https://github.com/flyte/xbee-helper"
)
| flyte/xbee-helper | setup.py | Python | mit | 1,972 |
from .base import KaffeError
from .core import GraphBuilder, DataReshaper, NodeMapper
from . import tensorflow
| polltooh/FineGrainedAction | nn/kaffe/__init__.py | Python | mit | 111 |
#!/usr/bin/env python
###
# (C) Copyright (2012-2015) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import range
from future import standard_library
standard_library.install_aliases()
import sys
PYTHON_VERSION = sys.version_info[:3]
PY2 = (PYTHON_VERSION[0] == 2)
if PY2:
if PYTHON_VERSION < (2, 7, 9):
raise Exception('Must use Python 2.7.9 or later')
elif PYTHON_VERSION < (3, 4):
raise Exception('Must use Python 3.4 or later')
import hpOneView as hpov
from pprint import pprint
def acceptEULA(con):
# See if we need to accept the EULA before we try to log in
con.get_eula_status()
try:
if con.get_eula_status() is True:
print('EULA display needed')
con.set_eula('no')
except Exception as e:
print('EXCEPTION:')
print(e)
def login(con, credential):
# Login with givin credentials
try:
con.login(credential)
except:
print('Login failed')
def add_network_set(net, name, networks, minbw, maxbw):
nset = []
maxbw = int(maxbw * 1000)
minbw = int(minbw * 1000)
if networks:
enets = net.get_enet_networks()
for enet in enets:
if enet['name'] in networks:
nset.append(enet['uri'])
nset = net.create_networkset(name, networkUris=nset,
typicalBandwidth=minbw,
maximumBandwidth=maxbw)
if 'connectionTemplateUri' in nset:
print('\n\nName: ', nset['name'])
print('Type: ', nset['type'])
print('Description: ', nset['description'])
print('State: ', nset['state'])
print('Status: ', nset['status'])
print('Created: ', nset['created'])
print('Uri: ', nset['uri'])
print('networkUris: ')
for net in nset['networkUris']:
print('\t\t', net)
else:
pprint(nset)
def main():
parser = argparse.ArgumentParser(add_help=True,
formatter_class=argparse.RawTextHelpFormatter,
description='''
Define new Network Set
Usage: ''')
parser.add_argument('-a', dest='host', required=True,
help='''
HP OneView Appliance hostname or IP address''')
parser.add_argument('-u', dest='user', required=False,
default='Administrator',
help='''
HP OneView Username''')
parser.add_argument('-p', dest='passwd', required=True,
help='''
HP OneView Password''')
parser.add_argument('-c', dest='cert', required=False,
help='''
Trusted SSL Certificate Bundle in PEM (Base64 Encoded DER) Format''')
parser.add_argument('-y', dest='proxy', required=False,
help='''
Proxy (host:port format''')
parser.add_argument('-j', dest='domain', required=False,
default='Local',
help='''
HP OneView Authorized Login Domain''')
parser.add_argument('-n', dest='network_set_name', required=True,
help='''
Name of the network set''')
parser.add_argument('-l', dest='list_of_networks', required=False,
nargs='+',
help='''
List of network names to add to the network set, seperated by spaces.
For example:
-t "Net One" "Net Two" "Net Three"''')
parser.add_argument('-b', dest='prefered_bandwidth', type=float,
required=False, default=2.5,
help='''
Typical bandwidth between .1 and 20 Gb/s''')
parser.add_argument('-m', dest='max_bandwidth', type=float, required=False,
default=10,
help='''
Maximum bandwidth between .1 and 20 Gb/s''')
args = parser.parse_args()
credential = {'authLoginDomain': args.domain.upper(), 'userName': args.user, 'password': args.passwd}
con = hpov.connection(args.host)
net = hpov.networking(con)
if args.proxy:
con.set_proxy(args.proxy.split(':')[0], args.proxy.split(':')[1])
if args.cert:
con.set_trusted_ssl_bundle(args.cert)
login(con, credential)
acceptEULA(con)
if args.prefered_bandwidth < .1 or args.prefered_bandwidth > 20:
print('Error, prefered bandwidth must be between .1 and 20 Gb/s')
sys.exit()
if args.max_bandwidth < .1 or args.max_bandwidth > 20:
print('Error, max bandwidth must be between .1 and 20 Gb/s')
sys.exit()
if args.prefered_bandwidth > args.max_bandwidth:
print('Error, prefered bandwidth must be less than or equal '
'to the maximum bandwidth')
sys.exit()
add_network_set(net, args.network_set_name, args.list_of_networks,
args.prefered_bandwidth, args.max_bandwidth)
if __name__ == '__main__':
import sys
import argparse
sys.exit(main())
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| ufcg-lsd/python-hpOneView | examples/scripts/define-network-set.py | Python | mit | 6,316 |
import argparse
import io
import unittest
import mock
import time
from imagemounter.cli import AppendDictAction
class AppendDictActionTest(unittest.TestCase):
def test_with_comma(self):
parser = argparse.ArgumentParser()
parser.add_argument('--test', action=AppendDictAction)
self.assertDictEqual(parser.parse_args(["--test", "x"]).test, {"*": 'x'})
self.assertDictEqual(parser.parse_args(["--test", "y=x"]).test, {"y": 'x'})
self.assertDictEqual(parser.parse_args(["--test", "y=x,z=a"]).test, {"y": 'x', "z": 'a'})
self.assertDictEqual(parser.parse_args(["--test", "*=x"]).test, {"*": 'x'})
self.assertDictEqual(parser.parse_args(["--test", "*=x", "--test", "y=a"]).test, {"*": 'x', 'y': 'a'})
self.assertDictEqual(parser.parse_args(["--test", "x=x", "--test", "x=y"]).test, {'x': 'y'})
self.assertDictEqual(parser.parse_args(["--test", "*=x", "--test", "y"]).test, {"*": 'y'})
self.assertDictEqual(parser.parse_args(["--test", "y", "--test", "*=3"]).test, {"*": '3'})
with self.assertRaises(SystemExit):
parser.parse_args(["--test", "y=x,z"])
def test_with_comma_multiple_times(self):
parser = argparse.ArgumentParser()
parser.add_argument('--test', action=AppendDictAction)
self.assertDictEqual(parser.parse_args(["--test", "*=x", "--test", "y=a"]).test, {"*": 'x', 'y': 'a'})
self.assertDictEqual(parser.parse_args(["--test", "x=x", "--test", "x=y"]).test, {'x': 'y'})
self.assertDictEqual(parser.parse_args(["--test", "*=x", "--test", "y"]).test, {"*": 'y'})
self.assertDictEqual(parser.parse_args(["--test", "y", "--test", "*=3"]).test, {"*": '3'})
with self.assertRaises(SystemExit):
parser.parse_args(["--test", "y=x,z", "--test", "x"])
def test_without_comma(self):
parser = argparse.ArgumentParser()
parser.add_argument('--test', action=AppendDictAction, allow_commas=False)
self.assertDictEqual(parser.parse_args(["--test", "x"]).test, {"*": 'x'})
self.assertDictEqual(parser.parse_args(["--test", "y=x"]).test, {"y": 'x'})
self.assertDictEqual(parser.parse_args(["--test", "y=x,z=a"]).test, {"y": 'x,z=a'})
self.assertDictEqual(parser.parse_args(["--test", "*=x"]).test, {"*": 'x'})
self.assertDictEqual(parser.parse_args(["--test", "y=x,z"]).test, {"y": 'x,z'})
def test_without_comma_multiple_times(self):
parser = argparse.ArgumentParser()
parser.add_argument('--test', action=AppendDictAction, allow_commas=False)
self.assertDictEqual(parser.parse_args(["--test", "x", "--test", "y"]).test, {"*": 'y'})
self.assertDictEqual(parser.parse_args(["--test", "y=x", "--test", "x=y"]).test, {"y": 'x', 'x': 'y'})
self.assertDictEqual(parser.parse_args(["--test", "y=x,z=a", "--test", "b=c"]).test, {"y": 'x,z=a', 'b': 'c'})
def test_with_default(self):
parser = argparse.ArgumentParser()
parser.add_argument('--test', action=AppendDictAction, default={"aa": "bb"})
self.assertDictEqual(parser.parse_args(["--test", "x"]).test, {"*": 'x', 'aa': 'bb'}) | jdossett/imagemounter | tests/cli_test.py | Python | mit | 3,178 |
# Import the necessary packages and modules
import matplotlib.pyplot as plt
import numpy as np
# Prepare the data
x = np.linspace(0, 10, 100)
# Plot the data
plt.plot(x, x, label='linear')
# Add a legend
plt.legend()
# Show the plot
plt.show()
print("done")
| vadim-ivlev/STUDY | coding/plot.py | Python | mit | 262 |
#!/usr/bin/env python
##################################################################
# Imports
from __future__ import print_function
from random import random
import codecs
import numpy as np
import sys
##################################################################
# Variables and Constants
ENCODING = "utf-8"
POS = {"gut": None, "gute": None}
NEG = {"schlecht": None}
##################################################################
# Methods
def _get_vec_len(a_vec):
"""Return length of the vector
@param a_vec - vector whose length should be computed
@return vector's length
"""
return np.sqrt(sum([i**2 for i in a_vec]))
def _compute_eucl_distance(a_vec1, a_vec2):
"""Compute Euclidean distance between two vectors
@param a_vec1 - first vector
@param a_vec2 - second vector
@return squared Euclidean distance between two vectors
"""
return sum((a_vec1 - a_vec2)**2)
def compute_distance(a_vecs1, a_vecs2):
"""Compute Euclidean distance between all pairs of vectors
Compute \sum_{p^{+} \in P^{+}}\sum_{p^{-} \in P^{-}}||p^{+} - p^{-}||^{2}
@param a_vecs1 - set of positive vectors
@param a_vecs2 - set of negative vectors
@return squared Euclidean distance between all pairs of vectors
"""
return sum([_compute_eucl_distance(ivec1, ivec2) for ivec1 in a_vecs1 \
for ivec2 in a_vecs2])
def _project_vec(a_vec, a_norm, a_prj_line):
"""Project original vector on projection line
@param a_vec - vector whoch should e projected
@param a_norm - square length of projection line
@param a_prj_line - projection line
@return a_vec's projection on a_prj_line
"""
# print("_project_vec: a_vec =", repr(a_vec), file = sys.stderr)
# print("_project_vec: a_prj_line =", repr(a_prj_line), file = sys.stderr)
# print("_project_vec: a_norm =", repr(a_norm), file = sys.stderr)
# print("_project_vec: np.dot() =", repr(np.dot(a_vec, a_prj_line)), file = sys.stderr)
# print("_project_vec: projection =", \
# repr((np.dot(a_vec, a_prj_line) / a_norm) * a_prj_line), file = sys.stderr)
return (np.dot(a_vec, a_prj_line) / a_norm) * a_prj_line
def _project(a_pos_set, a_neg_set, a_prj_line):
"""Project original vector sets on the projection line
@param a_pos_set - set of positive vectors
@param a_neg_set - set of negative vectors
@param a_prj_line - projection line
@return 2-tuple with sets of projected positive and negative vectors
"""
idiv = sum(a_prj_line ** 2)
assert idiv != 0, "Projection vector cannot be zero vector."
vecs1 = [_project_vec(ivec, idiv, a_prj_line) for ivec in a_pos_set]
vecs2 = [_project_vec(ivec, idiv, a_prj_line) for ivec in a_neg_set]
return (vecs1, vecs2)
def _compute_gradient(a_pos_vecs, a_neg_vecs, a_prj_line):
"""Compute gradient of distance function wrt projection line
@param a_pos_vecs - set of positive vectors
@param a_neg_vecs - set of negative vectors
@param a_prj_line - current projection line
@return gradient vector
"""
print("a_prj_line = ", repr(a_prj_line), file=sys.stderr)
# zero-out the gradient vector
dot_prod = diff_vec = None
# prj_squared = a_prj_line ** 2
idiv = 1. # np.float128(sum(a_prj_line ** 2))
idiv_squared = 1. # idiv ** 2
# normalized_prj = a_prj_line / idiv
assert idiv != 0, "Projection vector cannot be zero vector."
gradient = np.array([0 for _ in a_prj_line])
for pos_vec in a_pos_vecs:
for neg_vec in a_neg_vecs:
diff_vec = pos_vec - neg_vec
dot_prod = np.dot(a_prj_line, diff_vec)
print("dot_prod = ", repr(dot_prod), file=sys.stderr)
print("idiv = ", repr(idiv), file=sys.stderr)
print("idiv_squared = ", repr(idiv_squared), file=sys.stderr)
# constant 0.5 below is a dirty hack
gradient += (dot_prod) * (diff_vec - dot_prod * a_prj_line)
# update = multi
# print("0) update =", repr(update), file = sys.stderr)
# update *= (pos_vec * idiv - 2 * np.dot(pos_vec, a_prj_line) * a_prj_line) / idiv_squared + \
# np.dot(pos_vec, a_prj_line) / idiv - \
# (neg_vec * idiv - 2 * np.dot(neg_vec, a_prj_line) * a_prj_line) / idiv_squared - \
# np.dot(neg_vec, a_prj_line) / idiv
# update *= (diff_vec * idiv - 2 * np.dot(a_prj_line, diff_vec) * a_prj_line) * a_prj_line / \
# idiv_squared + np.dot(a_prj_line, diff_vec)/idiv * ones
# print("1) update =", repr(update), file = sys.stderr)
# gradient += update
# since we have a quadratic function, the gradient has coefficient
# two
print("gradient =", repr(gradient), file=sys.stderr)
return 2 * gradient
def find_optimal_prj(a_dim):
"""Find projection line that maximizes distance between vectors
@param a_dim - dimension of vectors
@return 2-tuple with projection line and cost
"""
DELTA = 1e-10 # cost difference
ALPHA = 0.00001 # learning rate
n = 0 # current iteration
max_n = 100000 # maximum number of iterations
inf = float("inf")
ipos = ineg = None
prev_dist = dist = float(inf)
prj_line = np.array([np.float128(1.) for _ in xrange(a_dim)])
# prj_line = np.array([random() for i in xrange(a_dim)])
# gradient = np.array([1 for i in xrange(a_dim)])
while (prev_dist == inf or dist - prev_dist > DELTA) and n < max_n:
prev_dist = dist
# normalize length of projection line
prj_line /= _get_vec_len(prj_line)
# project word vectors on the guessed polarity line
# print("POS = ", repr(POS), file = sys.stderr)
# print("NEG = ", repr(NEG), file = sys.stderr)
ipos, ineg = _project(POS.itervalues(), NEG.itervalues(), prj_line)
# compute distance between posiive and negative vectors
# print("ipos = ", repr(ipos), file = sys.stderr)
# print("ineg = ", repr(ineg), file = sys.stderr)
dist = compute_distance(ipos, ineg)
print("prj_line before = ", prj_line, file = sys.stderr)
print("prev_dist = {:f}".format(prev_dist), file = sys.stderr)
print("dist = {:f}".format(dist), file = sys.stderr)
# update polarity line
prj_line += ALPHA * _compute_gradient(POS.itervalues(), NEG.itervalues(), \
prj_line)
print("prj_line after = ", prj_line, file = sys.stderr)
n += 1
if dist - prev_dist < DELTA:
print("Model converged: delta = {}".format(dist - prev_dist), file = sys.stderr)
return (prj_line, dist)
def parse_vecfile(a_fname):
"""Parse files containing word vectors
@param a_fname - name of the wordvec file
@return \c dimension of the vectors
"""
global POS, NEG
ivec = None
with codecs.open(a_fname, 'r', ENCODING) as ifile:
fnr = True
toks = None
for iline in ifile:
iline = iline.strip()
if fnr:
ndim = int(iline.split()[-1])
fnr = False
continue
elif not iline:
continue
toks = iline.split()
assert (len(toks) - 1) == ndim, "Wrong vector dimension: {:d}".format(\
len(toks) - 1)
if toks[0] in POS:
ivec = np.array([np.float128(i) for i in toks[1:]])
# ivec /= _get_vec_len(ivec)
POS[toks[0]] = ivec
elif toks[0] in NEG:
ivec = np.array([np.float128(i) for i in toks[1:]])
# ivec /= _get_vec_len(ivec)
NEG[toks[0]] = ivec
# prune words for which there were no vectors
POS = {iword: ivec for iword, ivec in POS.iteritems() if ivec is not None}
NEG = {iword: ivec for iword, ivec in NEG.iteritems() if ivec is not None}
return ndim
def main():
"""Main method for finding optimal projection line
@return square sum of the distances between positive and negative
words projected on the line
"""
import argparse
argparser = argparse.ArgumentParser()
argparser.add_argument("vec_file", help = "file containing vectors")
args = argparser.parse_args()
ndim = parse_vecfile(args.vec_file)
prj_line, ret = find_optimal_prj(ndim)
print("ret =", str(ret))
##################################################################
# Main
if __name__ == "__main__":
main()
| WladimirSidorenko/SentiLex | scripts/find_prj_line.py | Python | mit | 8,625 |
import _plotly_utils.basevalidators
class ArrayminussrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="arrayminussrc", parent_name="bar.error_y", **kwargs
):
super(ArrayminussrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/bar/error_y/_arrayminussrc.py | Python | mit | 429 |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class MeiziItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
page_num = scrapy.Field()
img_url = scrapy.Field()
img_md5 = scrapy.Field()
up_vote = scrapy.Field()
down_vote = scrapy.Field()
image_urls = scrapy.Field()
images = scrapy.Field()
| JMwill/wiki | notebook/tool/spider/meizi_spider/python_spider/meizi/meizi/items.py | Python | mit | 514 |
""" Command to set maintenance status. """
from django.core.management.base import BaseCommand
import sys
import json
import os
BASE_DIR = os.path.dirname(__file__)
JSON_FILE = os.path.join(BASE_DIR, '../../maintenance_settings.json')
class Command(BaseCommand):
""" Set maintenance status """
@classmethod
def set_maintenance(cls, option):
""" Set maintenance status """
json_data = json.dumps({"DJANGO_MAINTENANCE": option},
indent=4, separators=(',', ': '))
json_file = open(JSON_FILE, 'w')
json_file.write(json_data)
json_file.close()
def handle(self, *args, **kwargs):
""" Handle maintenance command """
# sys.argv mut be equal to 3
if len(sys.argv) != 3:
print 'Invalid number of arguments: {0}'.format(len(sys.argv) - 2)
# sys.argv[2] must have on or off
elif sys.argv[2] != 'on' and sys.argv[2] != 'off':
print 'Invalid maintenance option: {0}'.format(sys.argv[2])
print 'Valid options: on/off'
else:
self.set_maintenance(sys.argv[2])
| mccricardo/django_maintenance | django_maintenance/management/commands/maintenance.py | Python | mit | 1,052 |
"""Functionality to interact with Google Cloud Platform.
"""
| chapmanb/bcbio-nextgen-vm | bcbiovm/gcp/__init__.py | Python | mit | 61 |
import sys
import pdb
import svgfig
import json
import os
import math
import random
def show_help():
print("Usage: main.py input_file [--silent] [--output=<out.svg>]" +
" [--order=order.txt]")
print("Input file is either a text file containing t u v," +
"or a JSON file where the following properties are available:")
print(" from")
print(" to")
print(" time")
print(" color: to be chosen in " +
"http://www.december.com/html/spec/colorsvg.html")
print("The orderFile contains a list of all nodes to display " +
"in the order of appearance in orderFile.")
def read_argv(argv):
for arg in sys.argv:
if "=" in arg:
content = arg.split("=")
arg_name = content[0].replace("--", "")
argv[arg_name] = content[1]
elif "--" in arg:
arg_name = arg.replace("--", "")
argv[arg_name] = True
def version():
sys.stderr.write("\tLinkStreamViz 1.0 -- Jordan Viard 2015\n")
class idGenerator:
"""generates id"""
def __init__(self):
self.lookUp = dict() # dict[Node] = id
self.idCount = 0
self.reverse = dict() # dict[id] = node
def impose(self, node, id_):
self.lookUp[node] = id_
self.reverse[id_] = node
def contains(self, element):
return element in self.lookUp
def get(self, element):
if element not in self.lookUp:
while self.idCount in self.reverse and self.reverse[self.idCount] != element:
self.idCount += 1
self.lookUp[element] = self.idCount
self.reverse[self.idCount] = element
return self.lookUp[element]
def size(self):
return len(self.lookUp)
class Link:
def __init__(self, t, u, v, color="black", direction=0, duration=0, duration_color="black"):
self.t = float(t)
self.u = int(min(u, v))
self.v = int(max(u, v))
self.color = color
self.direction = direction
self.duration = duration
self.duration_color = duration_color
@staticmethod
def from_dict(link):
obj = Link(link["time"],
link["from"],
link["to"])
obj.color = link.get("color", "black")
obj.direction = link.get("direction", 0)
obj.duration = float(link.get("duration", 0))
obj.duration_color = link.get("duration_color", "black")
return obj
class LinkStream:
def __init__(self, inputFile, orderFile=""):
self.links = []
self.max_time = 0
self.nodeID = idGenerator()
self.max_label_len = 0
self.g = svgfig.SVG("g")
self.ppux = 10 # piwel per unit time
if "json" in inputFile:
with open(inputFile, 'r') as inFile:
json_struct = json.loads(inFile.read())
for link_json in json_struct:
link = Link.from_dict(link_json)
self.addNode(link.u)
self.addNode(link.v)
if (link.t + link.duration) > self.max_time:
self.max_time = link.t + link.duration
self.links.append(link)
else:
with open(inputFile, 'r') as inFile:
for line in inFile:
contents = line.split(" ")
t = float(contents[0])
u = int(contents[1])
v = int(contents[2])
d = 0
if len(contents) > 3:
d = float(contents[3])
self.addNode(u)
self.addNode(v)
if t > self.max_time:
self.max_time = t
self.links.append(Link(t, u, v, duration=d))
if orderFile != "":
tmp_nodes = set()
with open(orderFile, 'r') as order:
for i, n in enumerate(order):
node = int(n)
tmp_nodes.add(node)
if self.nodeID.contains(node):
self.nodeID.impose(node, i)
self.nodes.append(node)
else:
print('The node', node, "is not present in the stream")
exit()
for node in self.nodeID.lookUp:
if node not in tmp_nodes:
print('The node', node, "is not present in", orderFile)
exit()
def addNode(self, node):
self.nodeID.get(node)
if self.max_label_len < len(str(node)):
self.max_label_len = len(str(node))
def evaluateOrder(self, order):
distance = 0
for link in self.links:
distance += abs(order[link.u]-order[link.v])
return distance
def findOrder(self):
cur_solution = self.nodeID.lookUp
cur_reverse = self.nodeID.reverse
dist = self.evaluateOrder(cur_solution)
sys.stderr.write("Order improved from "+str(dist))
for i in range(0, 10000):
i = random.randint(0, len(cur_solution) - 1)
j = random.randint(0, len(cur_solution) - 1)
cur_reverse[j], cur_reverse[i] = cur_reverse[i], cur_reverse[j]
cur_solution[cur_reverse[j]] = j
cur_solution[cur_reverse[i]] = i
tmp = self.evaluateOrder(cur_solution)
if tmp >= dist:
# re swap to go back.
cur_reverse[j], cur_reverse[i] = cur_reverse[i], cur_reverse[j]
cur_solution[cur_reverse[j]] = j
cur_solution[cur_reverse[i]] = i
else:
dist = tmp
self.nodeID.lookUp = cur_solution
new_order = "new_order.txt"
with open(new_order, "w") as out:
for node in self.nodeID.reverse:
out.write(str(self.nodeID.reverse[node]) + "\n")
sys.stderr.write(" to "+str(dist)+". Order saved in:"+new_order+"\n")
def addDuration(self, origin, duration, color, amplitude=1):
freq = 0.8 # angular frequency
duration = duration * self.ppux
self.g.append(svgfig.SVG("line",
stroke=color,
stroke_opacity=0.8,
stroke_width=1.1,
x1=origin["x"],
y1=origin["y"],
x2=origin["x"]+duration,
y2=origin["y"]))
def draw(self, outputFile):
self.findOrder()
offset = 1.5 * self.ppux
# Define dimensions
label_margin = 5 * self.max_label_len
origleft = label_margin + 1 * self.ppux
right_margin = self.ppux
width = origleft + self.ppux * math.ceil(self.max_time) + right_margin
svgfig._canvas_defaults["width"] = str(width) + 'px'
arrow_of_time_height = 5
height = 5 + 10 * int(self.nodeID.size() + 1) + arrow_of_time_height
svgfig._canvas_defaults["height"] = str(height) + 'px'
origtop = 10
################
# Draw background lines
for node in self.nodeID.lookUp:
horizonta_axe = self.ppux * self.nodeID.get(node) + origtop
self.g.append(svgfig.SVG("text", str(node),
x=str(label_margin),
y=horizonta_axe + 2,
fill="black", stroke_width=0,
text_anchor="end",
font_size="6"))
self.g.append(svgfig.SVG("line", stroke_dasharray="2,2",
stroke_width=0.5,
x1=str(origleft-5),
y1=horizonta_axe,
x2=width - right_margin,
y2=horizonta_axe))
# Add timearrow
self.g.append(svgfig.SVG("line",
stroke_width=0.5,
x1=self.ppux ,
y1=10*(self.nodeID.size()+1),
x2=width-5,
y2=10*(self.nodeID.size()+1)))
self.g.append(svgfig.SVG("line", stroke_width=0.5,
x1=width-8,
y1=10*(self.nodeID.size()+1)-3,
x2=width-5,
y2=10*(self.nodeID.size()+1)))
self.g.append(svgfig.SVG("line", stroke_width=0.5,
x1=width-8,
y1=10*(self.nodeID.size()+1)+3,
x2=width-5,
y2=10*(self.nodeID.size()+1)))
self.g.append(svgfig.SVG("text", str("Time"),
x=width-19,
y=10*(self.nodeID.size()+1)-3,
fill="black", stroke_width=0,
font_size="6"))
#
# Add time ticks
for i in range(0, int(math.ceil(self.max_time)+1), 5):
x_tick = i * self.ppux + origleft
self.g.append(svgfig.SVG("line",
stroke_width=0.5,
x1=str(x_tick),
y1=10*(self.nodeID.size()+1)-3,
x2=str(x_tick),
y2=10*(self.nodeID.size()+1)+3))
self.g.append(svgfig.SVG("text", str(i),
x=str(x_tick), y=10*(self.nodeID.size()+1)+7,
fill="black", stroke_width=0,
font_size="6"))
for link in self.links:
ts = link.t
node_1 = min(self.nodeID.get(link.u), self.nodeID.get(link.v))
node_2 = max(self.nodeID.get(link.u), self.nodeID.get(link.v))
offset = ts * self.ppux + origleft
y_node1 = 10 * node_1 + origtop
y_node2 = 10 * node_2 + origtop
# Add nodes
self.g.append(svgfig.SVG("circle",
cx=offset, cy=y_node1,
r=1, fill=link.color))
self.g.append(svgfig.SVG("circle",
cx=offset, cy=y_node2,
r=1, fill=link.color))
x = 0.2 * ((10 * node_2 - 10 * node_1) / math.tan(math.pi / 3)) + offset
y = (y_node1 + y_node2) / 2
param_d = "M" + str(offset) + "," + str(y_node1) +\
" C" + str(x) + "," + str(y) + " " + str(x) + "," + str(y) +\
" " + str(offset) + "," + str(y_node2)
self.g.append(svgfig.SVG("path", stroke=link.color,
d=param_d))
self.addDuration({"x": x, "y": (y_node1+y_node2)/2}, link.duration, link.duration_color)
# Save to svg file
viewBoxparam = "0 0 " + str(width) + " " + str(height)
svgfig.canvas(self.g, viewBox=viewBoxparam).save(outputFile)
if __name__ == '__main__':
if len(sys.argv) < 2 or "--help" in sys.argv or "-h" in sys.argv:
show_help()
sys.exit()
if "-v" in sys.argv or "--version" in sys.argv:
version()
exit()
argv = {"order": "", "silent": False}
read_argv(argv)
Links = LinkStream(sys.argv[1], argv["order"])
default_output = os.path.basename(sys.argv[1]).split(".")[0]+".svg"
argv["output"] = argv.get("output", default_output)
Links.draw(argv["output"])
if not argv["silent"]:
sys.stderr.write("Output generated to " + argv["output"] + ".\n")
| JordanV/LinkStreamViz | main.py | Python | mit | 11,963 |
from django.contrib import admin
from player.models import Room, PlaylistTrack
from ordered_model.admin import OrderedModelAdmin
class RoomAdmin(admin.ModelAdmin):
list_display = ('name', 'shuffle', 'current_music', 'can_adjust_volume')
class ItemAdmin(OrderedModelAdmin):
list_display = ('move_up_down_links', 'order', 'track', 'room')
admin.site.register(Room, RoomAdmin)
admin.site.register(PlaylistTrack, ItemAdmin)
| Amoki/Amoki-Music | player/admin.py | Python | mit | 434 |
#!/usr/bin/env python
# Copyright (c) 2002-2003 ActiveState Corp.
# Author: Trent Mick ([email protected])
"""Test suite for which.py."""
import sys
import os
import re
import tempfile
import unittest
import testsupport
#XXX:TODO
# - def test_registry_success(self): ...App Paths setting
# - def test_registry_noexist(self):
# - test all the other options
# - test on linux
# - test the module API
class WhichTestCase(unittest.TestCase):
def setUp(self):
"""Create a temp directory with a couple test "commands".
The temp dir can be added to the PATH, etc, for testing purposes.
"""
# Find the which.py to call.
whichPy = os.path.join(os.path.dirname(__file__),
os.pardir, "which.py")
self.which = sys.executable + " " + whichPy
# Setup the test environment.
self.tmpdir = tempfile.mktemp()
os.makedirs(self.tmpdir)
if sys.platform.startswith("win"):
self.testapps = ['whichtestapp1.exe',
'whichtestapp2.exe',
'whichtestapp3.wta']
else:
self.testapps = ['whichtestapp1', 'whichtestapp2']
for app in self.testapps:
path = os.path.join(self.tmpdir, app)
f = open(path, 'wb')
f.write('\n'.encode('ascii'))
f.close()
os.chmod(path, 0o755)
def tearDown(self):
testsupport.rmtree(self.tmpdir)
def test_opt_h(self):
output, error, retval = testsupport.run(self.which+' --h')
token = 'Usage:'.encode('ascii')
self.failUnless(output.find(token) != -1,
"'%s' was not found in 'which -h' output: '%s' "\
% (token, output))
self.failUnless(retval == 0,
"'which -h' did not return 0: retval=%d" % retval)
def test_opt_help(self):
output, error, retval = testsupport.run(self.which+' --help')
token = 'Usage:'.encode('ascii')
self.failUnless(output.find(token) != -1,
"'%s' was not found in 'which --help' output: '%s' "\
% (token, output))
self.failUnless(retval == 0,
"'which --help' did not return 0: retval=%d" % retval)
def test_opt_version(self):
output, error, retval = testsupport.run(self.which+' --version')
versionRe = re.compile("^which \d+\.\d+\.\d+$".encode('ascii'))
versionMatch = versionRe.search(output.strip())
self.failUnless(versionMatch,
"Version, '%s', from 'which --version' does not "\
"match pattern, '%s'."\
% (output.strip(), versionRe.pattern))
self.failUnless(retval == 0,
"'which --version' did not return 0: retval=%d"\
% retval)
def test_no_args(self):
output, error, retval = testsupport.run(self.which)
self.failUnless(retval == -1,
"'which' with no args should return -1: retval=%d"\
% retval)
def test_one_failure(self):
output, error, retval = testsupport.run(
self.which+' whichtestapp1')
self.failUnless(retval == 1,
"One failure did not return 1: retval=%d" % retval)
def test_two_failures(self):
output, error, retval = testsupport.run(
self.which+' whichtestapp1 whichtestapp2')
self.failUnless(retval == 2,
"Two failures did not return 2: retval=%d" % retval)
def _match(self, path1, path2):
#print "_match: %r =?= %r" % (path1, path2)
if sys.platform.startswith('win'):
path1 = os.path.normpath(os.path.normcase(path1))
path2 = os.path.normpath(os.path.normcase(path2))
path1 = os.path.splitext(path1)[0]
path2 = os.path.splitext(path2)[0]
return path1 == path2
else:
return os.path.samefile(path1, path2)
def test_one_success(self):
os.environ["PATH"] += os.pathsep + self.tmpdir
output, error, retval = testsupport.run(self.which+' -q whichtestapp1')
expectedOutput = os.path.join(self.tmpdir, "whichtestapp1")
self.failUnless(self._match(output.strip(), expectedOutput),
"Output, %r, and expected output, %r, do not match."\
% (output.strip(), expectedOutput))
self.failUnless(retval == 0,
"'which ...' should have returned 0: retval=%d" % retval)
def test_two_successes(self):
os.environ["PATH"] += os.pathsep + self.tmpdir
apps = ['whichtestapp1', 'whichtestapp2']
output, error, retval = testsupport.run(
self.which + ' -q ' + ' '.join(apps))
lines = output.strip().split("\n".encode('ascii'))
for app, line in zip(apps, lines):
expected = os.path.join(self.tmpdir, app)
self.failUnless(self._match(line, expected),
"Output, %r, and expected output, %r, do not match."\
% (line, expected))
self.failUnless(retval == 0,
"'which ...' should have returned 0: retval=%d" % retval)
if sys.platform.startswith("win"):
def test_PATHEXT_failure(self):
os.environ["PATH"] += os.pathsep + self.tmpdir
output, error, retval = testsupport.run(self.which+' whichtestapp3')
self.failUnless(retval == 1,
"'which ...' should have returned 1: retval=%d" % retval)
def test_PATHEXT_success(self):
os.environ["PATH"] += os.pathsep + self.tmpdir
os.environ["PATHEXT"] += os.pathsep + '.wta'
output, error, retval = testsupport.run(self.which+' whichtestapp3')
expectedOutput = os.path.join(self.tmpdir, "whichtestapp3")
self.failUnless(self._match(output.strip(), expectedOutput),
"Output, %r, and expected output, %r, do not match."\
% (output.strip(), expectedOutput))
self.failUnless(retval == 0,
"'which ...' should have returned 0: retval=%d" % retval)
def test_exts(self):
os.environ["PATH"] += os.pathsep + self.tmpdir
output, error, retval = testsupport.run(self.which+' -e .wta whichtestapp3')
expectedOutput = os.path.join(self.tmpdir, "whichtestapp3")
self.failUnless(self._match(output.strip(), expectedOutput),
"Output, %r, and expected output, %r, do not match."\
% (output.strip(), expectedOutput))
self.failUnless(retval == 0,
"'which ...' should have returned 0: retval=%d" % retval)
def suite():
"""Return a unittest.TestSuite to be used by test.py."""
return unittest.makeSuite(WhichTestCase)
if __name__ == "__main__":
unittest.main()
| fsys/which | test/test_which.py | Python | mit | 6,969 |
# Made by Christian Oliveros on 04/10/2017 for MMKF15
# Imports Used
import decimal as d
try:
from .constants import VL, EPSILON, STEP_MAX
except SystemError as e:
from constants import VL, EPSILON, STEP_MAX
class Vector3(object):
"""Class that represents a Vector with 3 coordinates"""
def __init__(self, x=0.0, y=0.0, z=0.0):
super(Vector3, self).__init__()
self.x = d.Decimal(x)
self.y = d.Decimal(y)
self.z = d.Decimal(z)
"""Class Representation"""
def __repr__(self):
return "Vector3(x=%r,y=%r,z=%r)" % (self.x, self.y, self.z)
"""Class String Representation"""
def __str__(self):
return "(%s, %s, %s)" % (str(self.x), str(self.y), str(self.z))
"""Copy this vector in new instance"""
def copy(self):
return Vector3(self.x, self.y, self.z)
"""Get Square Magnitude of vector"""
def sqrMagnitude(self):
dec2 = d.Decimal(2.0)
return self.x**dec2 + self.y**dec2 + self.z**dec2
"""Get Magnitude of vector"""
def magnitude(self):
return self.sqrMagnitude().sqrt()
"""Unary minus"""
def __neg__(self):
return self * -1
"""Unary addition"""
def __pos__(self):
return self
"""Absolute Value, Equivalent to Magnitude"""
def __abs__(self):
return self.magnitude()
"""In place addition"""
def __iadd__(self, other):
if not isinstance(other, Vector3):
raise TypeError("Expected Vector3 but got '%s'" % str(type(other).__name__))
self.x += other.x
self.y += other.y
self.z += other.z
return self
"""Addition"""
def __add__(self, other):
v = Vector3()
v += other
v += self
return v
"""Reverse Addition"""
def __radd__(self, other):
return self + other
"""In place Scalar Multiplication"""
def __imul__(self, other):
rhs = d.Decimal(other)
self.x *= rhs
self.y *= rhs
self.z *= rhs
return self
"""Scalar Multiplication"""
def __mul__(self, other):
v = self.copy()
v *= other
return v
"""Reverse Addition"""
def __rmul__(self, other):
return self * other
"""In place Substraction"""
def __isub__(self, other):
if not isinstance(other, Vector3):
msg = "Expected Vector3 but got '%s'" % str(type(other).__name__)
raise TypeError(msg)
self += (other * -1)
return self
"""Substraction"""
def __sub__(self, other):
v = self.copy()
v -= other
return v
"""Normalize this vector"""
def normalize(self):
length = self.magnitude()
if length < EPSILON:
self *= 0
return
self *= d.Decimal(1) / length
"""Return this vector normalized"""
def normalized(self):
v = self.copy()
v.normalize()
return v
# Set constant start position
def _setConstants():
try:
from .constants import __name__ as constants_module_name
from .constants import START_POSITION_HEIGHT_OFFSET
except SystemError as e:
from constants import __name__ as constants_module_name
from constants import START_POSITION_HEIGHT_OFFSET
import sys
module = sys.modules[constants_module_name]
setattr(module, 'START_POSITION', Vector3(0, 0, VL - START_POSITION_HEIGHT_OFFSET))
_setConstants()
"""
Generates Lineary Interpolated points from p0 to p1 with steps of size maxStep or less.
If step lesser than EPSILON then no step is done
"""
def interpolatePoints(p0, p1, maxStep=STEP_MAX):
direction = p1 - p0
length_sqr = direction.sqrMagnitude()
if length_sqr < EPSILON**2:
yield p0
return
dist = d.Decimal(0)
one = d.Decimal(1)
segments = int(length_sqr.sqrt() / maxStep)
if segments <= 1:
yield p0
yield p1
return
step = one / d.Decimal(segments)
while True:
yield p0 + (direction * dist)
dist += step
if dist > one:
break
if __name__ == '__main__':
print("Init test")
v = Vector3()
print(v)
v2 = Vector3(1,2,3.3)
print(v2)
print("Square Magnitude test")
print(v.sqrMagnitude())
print(v2.sqrMagnitude())
print("Magnitude test")
print(v.magnitude())
print(v2.magnitude())
print("In place Addition Tests")
try:
v += 1
except Exception as e:
print(e.args)
try:
v3 = Vector3(1,1,1)
v3 += v2
print(v3)
except Exception as e:
print(e.args)
print("Addition Tests")
try:
a = v + 1
except Exception as e:
print(e.args)
try:
v3 = Vector3(1,1,1)
a = v3 + v2
print(a)
except Exception as e:
print(e.args)
print("In place Scalar Multiplication Tests")
try:
v *= Vector3()
except Exception as e:
print(e.args)
try:
v3 = Vector3(1,1,1)
v3 *= 2
print(v3)
except Exception as e:
print(e.args)
print("Scalar Multiplication Tests")
try:
a = v * Vector3()
except Exception as e:
print(e.args)
try:
v3 = Vector3(1,1,1)
a = v3 * 4
print(a)
print("v3=%s" % str(v3))
except Exception as e:
print(e.args)
print("Unary minus test")
v3 = Vector3(1,2,3)
print("v=%s" % str(v3))
print("-v=%s" % str(-v3))
print("Substraction test")
v2 = Vector3(1,0,1)
v3 = Vector3(1,1,0)
print("v2=%s" % str(v2))
print("v3=%s" % str(v3))
print("v3-v2=%s" % str(v3 - v2))
print("v3=%s" % str(v3))
v3 -= v2
print("(v3-=v2)=%s" % str(v3))
print("Normalization Tests")
v3 = Vector3(1,1,1)
print(v3.normalized())
print(v3)
v3.normalize()
print(v3)
v3 = Vector3(0,0,0)
v3.normalize()
print(v3)
print("Interpolation Tests")
p0 = Vector3(0, 0, 0)
p1 = Vector3(1, 1, 1)
a = [v for v in interpolatePoints(p0, p1)]
print("Too long to print but it is here, uncomment if want you to see")
#print(a)
print("Interpolation test for points too close")
print([v for v in interpolatePoints(p0, Vector3(0, EPSILON / d.Decimal(2), 0))])
print("Interpolation test for points really close")
print([v for v in interpolatePoints(p0, Vector3(0, EPSILON, 0))])
print("Interpolation test for points almost really close")
print([v for v in interpolatePoints(p0, Vector3(0, EPSILON * d.Decimal(2), 0))])
| maniatic0/rasppi-printer | Utilities/vector.py | Python | mit | 5,714 |
# This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
from sys import platform as _platform
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++0x',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
'-I./',
'-I../common/',
]
# Xcode for std stuff on OS X
if _platform == "darwin":
flags.append('-isystem/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/../lib/c++/v1')
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| astrellon/cotsb | server/.ycm_extra_conf.py | Python | mit | 5,739 |
# -*- coding: utf-8 -*-
#
# py-uwerr documentation build configuration file, created by
# sphinx-quickstart on Sat Nov 24 19:07:07 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'py-uwerr'
copyright = u'2012, Dirk Hesse'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'py-uwerrdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'py-uwerr.tex', u'py-uwerr Documentation',
u'Dirk Hesse', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'py-uwerr', u'py-uwerr Documentation',
[u'Dirk Hesse'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'py-uwerr', u'py-uwerr Documentation',
u'Dirk Hesse', 'py-uwerr', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| dhesse/py-uwerr | doc/conf.py | Python | mit | 7,830 |
'''
Example which moves objects around in a 2D world.
This example requires matplotlib. The ros package doesnt have this as a
rosdep though, since nothing else needs it. Just do a system install
of matplotlib.
'''
import roslib; roslib.load_manifest('hierarchical_interactive_planning')
import numpy as np
from scipy import linalg
import hierarchical_interactive_planning as hip
######################################################################################################################
# World
######################################################################################################################
class MoveStuffWorld:
def __init__(self, obs_map, objects, eps=1.0):
'''
Args:
objects (dict): Mapping from string object names to numpy arrays of their positions.
'''
self.obs_map = obs_map
self.objects = objects
self.eps = eps
def execute(self, op_instance):
op_name = op_instance.operator_name
if op_name == 'MoveTo':
x_des = op_instance.target.args[0].val
self.objects['robot'].position = x_des
else:
raise ValueError('Unknown operator: %s' % str(operator.name))
def entails(self, f):
if isinstance(f, hip.ConjunctionOfFluents):
return np.all([self.entails(fluent) for fluent in f.fluents])
if f.pred == RobotAtLoc:
x_robot = self.objects['robot'].position
x_des = f.args[0].val
if linalg.norm(x_robot - x_des) < self.eps:
return True
else:
return False
else:
raise ValueError('Unknown predicate: %s' % str(f.pred))
def contradicts(self, f):
return False
def plan_path(self, x_start, x_end, n_graph_points=1000, graph_conn_dist=1.0):
'''Plan a collision free path from x_start to x_end.
Returns:
path (list of np.array): Path (or None if no path found).
'''
# build a random graph
x_min, x_max, y_min, y_max = self.obs_map.extent()
points = np.zeros((n_graph_points, 2))
points[:,0] = np.random.uniform(x_min, x_max, n_graph_points)
points[:,1] = np.random.uniform(y_min, y_max, n_graph_points)
def action_generator(state):
for neighbor in range(len(points)):
d = linalg.norm(points[state] - points[neighbor])
if d < conn_dist:
yield neighbor, neighbor, d # action, next_state, cost
p = a_star.a_star(
start,
lambda s: s == goal,
action_generator,
lambda s: linalg.norm(points[goal] - points[s])
)
class Robot:
def __init__(self, position):
self.position = position
class Box:
def __init__(self, center, r):
self.center = np.array(center)
self.r = r
######################################################################################################################
# Predicates
######################################################################################################################
RobotAtLoc = hip.Predicate('RobotAtLoc', ['loc'])
RobotLocFree = hip.Predicate('RobotLocFree', ['robot', 'loc'])
######################################################################################################################
# Suggesters
######################################################################################################################
def robot_path_suggester(world, current_state, goal):
for f in goal.fluents:
if f.pred == RobotAtLoc:
x_des = f.args[0].val
yield []
######################################################################################################################
# Operators
######################################################################################################################
loc = hip.Variable('loc')
path = hip.Variable('path')
MoveTo = hip.Operator(
'MoveTo',
target = RobotAtLoc((loc,)),
suggesters = {path:robot_path_suggester},
preconditions = [],
side_effects = hip.ConjunctionOfFluents([]),
primitive = False,
)
operators = [MoveTo]
######################################################################################################################
# Main
######################################################################################################################
def draw_objects(objects):
for obj_name, obj in objects.items():
if isinstance(obj, Box):
plt.plot([obj.center[0]], [obj.center[1]], 'x')
vertices = []
r = obj.r
for offset in [(r, r), (r, -r), (-r, -r), (-r, r)]:
vertices.append(obj.center + np.array(offset))
vertices = np.array(vertices)
plt.fill(vertices[:,0], vertices[:,1], 'm')
elif isinstance(obj, Robot):
plt.plot([obj.position[0]], [obj.position[1]], 'go', markersize=40)
class ObstacleMap:
def __init__(self, obs_array, res):
'''2D obstacle map class.
Args:
obs_array (np.array of np.bool): Occupancy array.
res (float): Size (height and width) of each cell in the occupancy array.
'''
self.obs_array = obs_array
self.res = res
def pos_to_ind(self, p):
'''Return array index for cell that contains x,y position.
'''
ii, jj = np.array(p) / self.res
return int(ii), int(jj)
def ind_to_pos(self, ind):
'''Return x,y position of center point of cell specified by index.
'''
return np.array(ind) * self.res + 0.5 * self.res
def is_occupied(self, p):
ii, jj = self.pos_to_ind(p)
return self.obs_array[ii,jj]
def any_occupied(self, x0, x1, y0, y1):
'''Return true if any cells within the bounding box are occupied.
'''
i0, j0 = self.pos_to_ind((x0, y0))
i1, j1 = self.pos_to_ind((x1, y1))
i0 = max(0, i0)
i1 = max(0, i1)
j0 = max(0, j0)
j1 = max(0, j1)
return self.obs_array[i0:i1,j0:j1].any()
def points(self):
points = []
for ii in range(self.obs_array.shape[0]):
for jj in range(self.obs_array.shape[1]):
p = self.ind_to_pos((ii, jj))
points.append(p)
return np.array(points)
def occupied_points(self):
points = []
for ii in range(self.obs_array.shape[0]):
for jj in range(self.obs_array.shape[1]):
p = self.ind_to_pos((ii, jj))
if self.is_occupied(p):
points.append(p)
return np.array(points)
def extent(self):
x_min, y_min = self.ind_to_pos((0, 0))
s = self.obs_array.shape
x_max, y_max = self.ind_to_pos((s[0]-1, s[1]-1))
return x_min, y_min, x_max, y_max
if __name__ == '__main__':
import sys
from matplotlib import pyplot as plt
# load world map
res = 1.0
obs_arr = plt.imread(sys.argv[1])[::-1,:].T
obs_arr = obs_arr < obs_arr.max() / 2.0
obs_map = ObstacleMap(obs_arr, res)
objects = {
'robot': Robot(np.array((50., 50.))),
'box1': Box((5., 5.), 4.),
}
start_state = hip.ConjunctionOfFluents([])
world = MoveStuffWorld(obs_map, objects)
goal_symbol = hip.Symbol(np.array((10., 10.)))
goal = hip.ConjunctionOfFluents([RobotAtLoc((goal_symbol,))])
# run HPN to generate plan
tree = hip.HPlanTree()
hip.hpn(operators, start_state, goal, world, tree=tree)
fig = plt.figure()
points_occ = obs_map.occupied_points()
plt.plot(points_occ[:,0], points_occ[:,-1], 'bo')
if 0:
w = 5
occ = []
free = []
for x, y in obs_map.points():
if obs_map.any_occupied(x-w, x+w, y-w, y+w):
occ.append((x, y))
else:
free.append((x, y))
occ = np.array(occ)
free = np.array(free)
plt.plot(occ[:,0], occ[:,1], 'r.')
plt.plot(free[:,0], free[:,1], 'g.')
draw_objects(objects)
x_min, x_max, y_min, y_max = obs_map.extent()
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.show()
| jonbinney/python-planning | python_task_planning/examples/move_stuff/move_stuff.py | Python | mit | 8,400 |
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def mainRoute():
return render_template('hello.html')
@app.route('/jujitsu')
def jujitsu():
return render_template('jujitsu.html')
if __name__ == '__main__':
app.run(debug=True,host='0.0.0.0', port=8080)
| CrazyDiamond567/docker-cloud-test | unh698.py | Python | mit | 302 |
#!/usr/bin/env python
# encoding: utf-8
"""MoodleFUSE initialization
"""
import os
import errno
from moodlefuse.filesystem import Filesystem
from moodlefuse.core import setup, config
from moodlefuse.services import USERS
MOODLEFUSE_DATABASE_FILE = 'moodlefuse.sqlite'
MOODLEFUSE_CONFIG_FILE = 'moodlefuse.conf'
MOODLEFUSE_HIDDEN_FOLDER = '.moodlefuse'
DATABASE_CONF = 'alembic.ini'
class MoodleFuse(object):
def __init__(self, settings=None, testing=None):
setup(settings)
self._create_filesystem_root()
Filesystem(config['LOCAL_MOODLE_FOLDER'], testing)
def _create_filesystem_root(self):
moodle_fs_path = config['LOCAL_MOODLE_FOLDER']
try:
os.makedirs(moodle_fs_path)
except OSError as e:
if e.errno is not errno.EEXIST:
raise e
| BroganD1993/MoodleFUSE | moodlefuse/__init__.py | Python | mit | 836 |
import sys
import time as tmod
import warnings
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
import pandas as pd
warnings.simplefilter("ignore")
sys.path.insert(0, "../FATS/")
import FATS
iterations = 100000
lc_size = 1000
random = np.random.RandomState(42)
results = {
"StetsonK": np.empty(iterations),
"StetsonJ": np.empty(iterations),
"AndersonDarling": np.empty(iterations)}
for it in range(iterations):
fs = FATS.FeatureSpace(featureList=list(results.keys()))
# a simple time array from 0 to 99 with steps of 0.01
time = np.arange(0, 100, 100./lc_size).shape
# create 1000 magnitudes with mu 0 and std 1
mags = random.normal(size=lc_size)
# create 1000 magnitudes with difference <= 0.1% than mags
mags2 = mags * random.uniform(0, 0.01, mags.size)
# create two errors for the magnitudes equivalent to the 0.001%
# of the magnitudes
errors = random.normal(scale=0.00001, size=lc_size)
errors2 = random.normal(scale=0.00001, size=lc_size)
lc = np.array([
mags, # magnitude
time, # time
errors, # error
mags, # magnitude2
mags, # aligned_magnitude
mags, # aligned_magnitude2
time, # aligned_time
errors, # aligned_error
errors # aligned_error2
])
fs.calculateFeature(lc)
for k, v in fs.result("dict").items():
results[k][it] = v
df = pd.DataFrame(results).describe()
print df
df.to_latex("features_montecarlo.tex", float_format='%.4f')
| carpyncho/feets | res/paper/reports/features_montecarlo.py | Python | mit | 1,591 |
import pymc3 as pm
from lasagne.layers.helper import *
from lasagne.layers.helper import __all__ as __helper__all__
__all__ = [
"find_parent",
"find_root",
] + __helper__all__
def find_parent(layer):
candidates = get_all_layers(layer)[::-1]
found = None
for candidate in candidates:
if isinstance(candidate, pm.Model):
found = candidate
break
return found
def find_root(layer):
model = find_parent(layer)
if model is not None:
return model.root
else:
return None
| ferrine/gelato | gelato/layers/helper.py | Python | mit | 551 |
import inspect
__all__ = ['GenericVisitor']
class GenericVisitor(object):
"""
A generic visitor.
To define handlers, subclasses should define :data:`visit_Foo`
methods for each class :data:`Foo` they want to handle.
If a specific method for a class :data:`Foo` is not found, the MRO
of the class is walked in order until a matching method is found.
The method signature is:
.. code-block::
def visit_Foo(self, o, [*args, **kwargs]):
pass
The handler is responsible for visiting the children (if any) of
the node :data:`o`. :data:`*args` and :data:`**kwargs` may be
used to pass information up and down the call stack. You can also
pass named keyword arguments, e.g.:
.. code-block::
def visit_Foo(self, o, parent=None, *args, **kwargs):
pass
"""
def __init__(self):
handlers = {}
# visit methods are spelt visit_Foo.
prefix = "visit_"
# Inspect the methods on this instance to find out which
# handlers are defined.
for (name, meth) in inspect.getmembers(self, predicate=inspect.ismethod):
if not name.startswith(prefix):
continue
# Check the argument specification
# Valid options are:
# visit_Foo(self, o, [*args, **kwargs])
argspec = inspect.getfullargspec(meth)
if len(argspec.args) < 2:
raise RuntimeError("Visit method signature must be "
"visit_Foo(self, o, [*args, **kwargs])")
handlers[name[len(prefix):]] = meth
self._handlers = handlers
"""
:attr:`default_args`. A dict of default keyword arguments for the visitor.
These are not used by default in :meth:`visit`, however, a caller may pass
them explicitly to :meth:`visit` by accessing :attr:`default_args`.
For example::
.. code-block::
v = FooVisitor()
v.visit(node, **v.default_args)
"""
default_args = {}
@classmethod
def default_retval(cls):
"""
A method that returns an object to use to populate return values.
If your visitor combines values in a tree-walk, it may be useful to
provide a object to combine the results into. :meth:`default_retval`
may be defined by the visitor to be called to provide an empty object
of appropriate type.
"""
return None
def lookup_method(self, instance):
"""
Look up a handler method for a visitee.
Parameters
----------
instance : object
The instance to look up a method for.
"""
cls = instance.__class__
try:
# Do we have a method handler defined for this type name
return self._handlers[cls.__name__]
except KeyError:
# No, walk the MRO.
for klass in cls.mro()[1:]:
entry = self._handlers.get(klass.__name__)
if entry:
# Save it on this type name for faster lookup next time
self._handlers[cls.__name__] = entry
return entry
raise RuntimeError("No handler found for class %s", cls.__name__)
def visit(self, o, *args, **kwargs):
"""
Apply this Visitor to an object.
Parameters
----------
o : object
The object to be visited.
*args
Optional arguments to pass to the visit methods.
**kwargs
Optional keyword arguments to pass to the visit methods.
"""
ret = self._visit(o, *args, **kwargs)
ret = self._post_visit(ret)
return ret
def _visit(self, o, *args, **kwargs):
"""Visit ``o``."""
meth = self.lookup_method(o)
return meth(o, *args, **kwargs)
def _post_visit(self, ret):
"""Postprocess the visitor output before returning it to the caller."""
return ret
def visit_object(self, o, **kwargs):
return self.default_retval()
| opesci/devito | devito/tools/visitors.py | Python | mit | 4,134 |
import base64
import demistomock as demisto
from WildFireReports import main
import requests_mock
def test_wildfire_report(mocker):
"""
Given:
A sha256 represents a file uploaded to WildFire.
When:
internal-wildfire-get-report command is running.
Then:
Ensure that the command is running as expected.
"""
mock_sha256 = 'abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890'
mocker.patch.object(demisto, 'command', return_value='internal-wildfire-get-report')
mocker.patch.object(demisto, 'params', return_value={'server': 'https://test.com/', 'token': '123456'})
mocker.patch.object(demisto, 'args', return_value={'sha256': mock_sha256})
with open('test_data/response.pdf', 'rb') as file:
file_content = b''
while byte := file.read(1):
file_content += byte
mocker.patch('WildFireReports.fileResult', return_value=file_content) # prevent file creation
demisto_mock = mocker.patch.object(demisto, 'results')
with requests_mock.Mocker() as m:
m.post(f'https://test.com/publicapi/get/report?format=pdf&hash={mock_sha256}', content=file_content)
main()
assert demisto_mock.call_args_list[0][0][0]['data'] == base64.b64encode(file_content).decode()
def test_report_not_found(mocker):
"""
Given:
A sha256 represents a file not uploaded to WildFire.
When:
internal-wildfire-get-report command is running.
Then:
Ensure that the command is running as expected.
"""
mock_sha256 = 'abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567891'
mocker.patch.object(demisto, 'command', return_value='internal-wildfire-get-report')
mocker.patch.object(demisto, 'params', return_value={'server': 'https://test.com/', 'token': '123456'})
mocker.patch.object(demisto, 'args', return_value={'sha256': mock_sha256})
demisto_mock = mocker.patch.object(demisto, 'results')
with requests_mock.Mocker() as m:
m.post(f'https://test.com/publicapi/get/report?format=pdf&hash={mock_sha256}', status_code=404)
main()
assert demisto_mock.call_args[0][0] == {'status': 'not found'}
def test_incorrect_sha256(mocker):
"""
Given:
An incorrect sha256.
When:
internal-wildfire-get-report command is running.
Then:
Ensure that the command is running as expected.
"""
mock_sha256 = 'abcdef1234567890abcdef1234567890abcdef1234567890abcdef123456789' # The length is 63 insteadof 64
mocker.patch.object(demisto, 'command', return_value='internal-wildfire-get-report')
mocker.patch.object(demisto, 'params', return_value={'server': 'https://test.com/', 'token': '123456'})
mocker.patch.object(demisto, 'args', return_value={'sha256': mock_sha256})
demisto_mock = mocker.patch.object(demisto, 'results')
expected_description_error = 'Failed to download report.\nError:\nInvalid hash. Only SHA256 are supported.'
main()
assert demisto_mock.call_args_list[0].args[0].get('error', {}).get('description') == expected_description_error
def test_incorrect_authorization(mocker):
"""
Given:
An incorrect API token.
When:
test-module command is running.
Then:
Ensure that the command is running as expected.
"""
mocker.patch.object(demisto, 'command', return_value='test-module')
mocker.patch.object(demisto, 'params', return_value={'server': 'https://test.com/', 'token': 'incorrect api token'})
demisto_mock = mocker.patch.object(demisto, 'results')
expected_description_error = 'Authorization Error: make sure API Key is correctly set'
url = 'https://test.com/publicapi/get/report'
params = '?apikey=incorrect+api+token&format=pdf&hash=dca86121cc7427e375fd24fe5871d727'
with requests_mock.Mocker() as m:
m.post(url + params, status_code=401)
main()
assert demisto_mock.call_args_list[0].args[0] == expected_description_error
def test_empty_api_token(mocker):
"""
Given:
An empty API token.
When:
test-module command is running.
Then:
Ensure that the command is running as expected.
"""
mocker.patch.object(demisto, 'command', return_value='test-module')
mocker.patch.object(demisto, 'params', return_value={'server': 'https://test.com/', 'token': ''})
mocker.patch.object(demisto, 'getLicenseCustomField', return_value=None)
demisto_mock = mocker.patch('WildFireReports.return_error')
expected_description_error = 'Authorization Error: It\'s seems that the token is empty and you have not a ' \
'TIM license that is up-to-date, Please fill the token or update your TIM license ' \
'and try again.'
main()
assert demisto_mock.call_args_list[0].args[0] == expected_description_error
| VirusTotal/content | Packs/Palo_Alto_Networks_WildFire/Integrations/WildFireReports/WildFireReports_test.py | Python | mit | 4,906 |
#!/usr/bin/env python2
import argparse
import xml.etree.ElementTree as ET
import subprocess
import os.path as path
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
import time
OUT_DIR = './_out'
HAXE_PATH = 'haxe'
def get_main_info(meta_root):
server_main, client_main = None, None
node = meta_root.find('server_main')
if node is not None:
server_main = {'src': '', 'class': 'Main', 'main_dir': ''}
src = node.attrib['src']
if src is not None:
server_main['main_dir'] = path.dirname(src)
server_main['src'] = path.relpath(src, server_main['main_dir'])
node = meta_root.find('client_main')
if node is not None:
client_main = {'src': '', 'class': 'Main', 'main_dir': ''}
src = node.attrib['src']
if src is not None:
client_main['main_dir'] = path.dirname(src)
client_main['src'] = path.relpath(src, client_main['main_dir'])
return server_main, client_main
def build_meta(meta_root):
meta_root.append(ET.Element('script', {'src': 'server.lua', 'type': 'server'}))
meta_root.append(ET.Element('script', {'src': 'client.lua', 'type': 'client'}))
for node in meta_root.findall('server_main'):
meta_root.remove(node)
for node in meta_root.findall('client_main'):
meta_root.remove(node)
ET.ElementTree(meta_root).write(path.join(OUT_DIR, 'meta.xml'))
def build_resource():
print('Building...')
# Parse meta.xml
tree = ET.parse('meta.xml')
root = tree.getroot()
# Get information about entry point
server_main, client_main = get_main_info(root)
server_out_path = path.join('..', OUT_DIR, 'server.lua')
client_out_path = path.join('..', OUT_DIR, 'client.lua')
# Invoke the compiler
if server_main is not None:
ret = subprocess.call([HAXE_PATH, '-main', server_main['class'], '-lua', server_out_path, server_main['src'], '-lib', 'mtasa-typings'], cwd=server_main['main_dir'])
if client_main is not None:
ret = subprocess.call([HAXE_PATH, '-main', client_main['class'], '-lua', client_out_path, client_main['src'], '-lib', 'mtasa-typings'], cwd=client_main['main_dir'])
# Build new meta
build_meta(root)
if __name__ == '__main__':
# Parse args
parser = argparse.ArgumentParser(description='haxe-mta build tools')
sub_parsers = parser.add_subparsers(help='sub commands')
build_parser = sub_parsers.add_parser('build', help='build resource')
run_parser = sub_parsers.add_parser('run', help='wait and recompile on changes')
build_parser.set_defaults(build_parser=True, run_parser=False)
run_parser.set_defaults(run_parser=True, build_parser=False)
args = parser.parse_args()
# Handle commands
if args.build_parser:
# Build resource once
build_resource()
elif args.run_parser:
# Build resource again on any change
class FileSystemListener(PatternMatchingEventHandler):
def __init__(self):
super(FileSystemListener, self).__init__(ignore_patterns=['**/_out*'])
def on_any_event(self, event):
build_resource()
event_handler = FileSystemListener()
observer = Observer()
observer.schedule(event_handler, '.', recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
| Jusonex/haxe-mtasa-typings | example/build.py | Python | mit | 3,302 |
# -*- coding: utf-8 -*-
from google.appengine.api import apiproxy_stub_map
from google.appengine.ext import db
from django.core.urlresolvers import resolve
from django.http import HttpRequest, QueryDict
from ragendja.testutils import ModelTestCase
from search.core import SearchIndexProperty
import base64
class Indexed(db.Model):
# Test normal and prefix index
one = db.StringProperty()
two = db.StringProperty()
one_two_index = SearchIndexProperty(('one', 'two'))
check = db.BooleanProperty()
# Test relation index
value = db.StringProperty()
value_index = SearchIndexProperty('value', integrate=('one', 'check'))
def run_tasks():
stub = apiproxy_stub_map.apiproxy.GetStub('taskqueue')
tasks = stub.GetTasks('default')
for task in tasks:
view, args, kwargs = resolve(task['url'])
request = HttpRequest()
request.POST = QueryDict(base64.b64decode(task['body']))
view(request)
stub.DeleteTask('default', task['name'])
class TestIndexed(ModelTestCase):
model = Indexed.value_index._relation_index_model
def setUp(self):
apiproxy_stub_map.apiproxy.GetStub('taskqueue').FlushQueue('default')
for i in range(3):
Indexed(one=u'OneOne%d' % i).put()
for i in range(3):
Indexed(one=u'one%d' % i, two='two%d' % i).put()
for i in range(3):
Indexed(one=(None, u'ÜÄÖ-+!#><|', 'blub')[i],
check=bool(i%2), value=u'value%d test-word' % i).put()
run_tasks()
def test_setup(self):
self.assertEqual(len(Indexed.one_two_index.search('one2')), 1)
self.assertEqual(len(Indexed.one_two_index.search('two')), 0)
self.assertEqual(len(Indexed.one_two_index.search('two1')), 1)
self.assertEqual(len(Indexed.value_index.search('word')), 3)
self.assertEqual(len(Indexed.value_index.search('test-word')), 3)
self.assertEqual(len(Indexed.value_index.search('value0',
filters=('check =', False))), 1)
self.assertEqual(len(Indexed.value_index.search('value1',
filters=('check =', True, 'one =', u'ÜÄÖ-+!#><|'))), 1)
self.assertEqual(len(Indexed.value_index.search('value2',
filters=('check =', False, 'one =', 'blub'))), 1)
def test_change(self):
value = Indexed.value_index.search('value0').get()
value.value = 'value1 test-word'
value.put()
value.one = 'shidori'
value.value = 'value3 rasengan/shidori'
value.put()
run_tasks()
self.assertEqual(len(Indexed.value_index.search('rasengan')), 1)
self.assertEqual(len(Indexed.value_index.search('value3')), 1)
value = Indexed.value_index.search('value3').get()
value.delete()
run_tasks()
self.assertEqual(len(Indexed.value_index.search('value3')), 0)
| nurey/disclosed | app2/search/tests.py | Python | mit | 2,896 |
# classifier.py
#
# This module contains code to support the classifier notebook
import numpy as np
import pylab as plt
def p_correct_given_pos(sens, fpr, b):
""" Returns a simple Bayesian probability for the probability
that a prediction is correct, given that the prediction
was positive, given the prevailing sensitivity (sens),
false positive rate (fpr) and base rate of positive
examples.
"""
assert 0 <= sens <= 1, "Sensitivity must be in range [0,1]"
assert 0 <= fpr <= 1, "FPR must be in range [0,1]"
return sens * b / (sens * b + fpr * (1 - b))
def plot_prob_effector(sens=0.85, fpr=0.17, baserate=0.1, xmax=1):
""" Plots a line graph of P(effector|positive test) against
the baserate of effectors in the input set to the classifier.
The baserate argument draws an annotation arrow
indicating P(pos|+ve) at that baserate
"""
assert 0.1 <= xmax <= 1, "Max x axis value must be in range [0,1]"
assert 0.01 <= baserate <= 1, "Baserate annotation must be in range [0,1]"
baserates = np.arange(0, 1.05,
xmax * 0.005) # Range of +ve example base rate
probs = [p_correct_given_pos(sens, fpr, b) for b in baserates]
plt.figure(figsize=(10, 6))
plt.plot(baserates, probs, 'r')
plt.title("P(eff|pos) vs baserate; sens: %.2f, fpr: %.2f" % (sens, fpr))
plt.ylabel("P(effector|positive)")
plt.xlabel("effector baserate")
plt.xlim(0, xmax)
plt.ylim(0, 1)
# Add annotation arrow
xpos, ypos = (baserate, p_correct_given_pos(sens, fpr, baserate))
if baserate < xmax:
if xpos > 0.7 * xmax:
xtextpos = 0.05 * xmax
else:
xtextpos = xpos + (xmax-xpos)/5.
if ypos > 0.5:
ytextpos = ypos - 0.05
else:
ytextpos = ypos + 0.05
plt.annotate('baserate: %.2f, P(pos|+ve): %.3f' % (xpos, ypos),
xy=(xpos, ypos),
xytext=(xtextpos, ytextpos),
arrowprops=dict(facecolor='black', shrink=0.05))
else:
plt.text(0.05 * xmax, 0.95, 'baserate: %.2f, P(pos|+ve): %.3f' %
(xpos, ypos))
plt.show()
| widdowquinn/Teaching-SfAM-ECS | workshop/tools/classifier.py | Python | mit | 2,237 |
"""manglocreative URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
# ============================================================================================================ #
# kkk kkk ooooo dddddddd eeeeeeeeee kkk kkk ooooo ooooo #
# kkk kkk ooooooooo ddddddddddd eeeeeeeeee kkk kkk ooooooooo ooooooooo #
# kkk kkk ooo ooo ddd ddd eee kkk kkk ooo ooo ooo ooo #
# kkk kkk oooo oooo ddd ddd eee kkk kkk oooo oooo oooo oooo #
# kkk kkk oooo oooo ddd ddd eee kkk kkk oooo oooo oooo oooo #
# kkkkkkkk oooo oooo ddd ddd eeeeeeeeee kkkkkkkk oooo oooo oooo oooo #
# kkk kkk oooo oooo ddd ddd eee kkk kkk oooo oooo oooo oooo #
# kkk kkk oooo oooo ddd ddd eee kkk kkk oooo oooo oooo oooo #
# kkk kkk ooo ooo ddd ddd eee kkk kkk ooo ooo ooo ooo #
# kkk kkk ooooooooo dddddddddd eeeeeeeeee kkk kkk ooooooooo ooooooooo #
# kkk kkk ooooo ddddddd eeeeeeeeee kkk kkk ooooo ooooo #
# ============================================================================================================ #
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.sitemaps.views import sitemap
from blog.sitemaps import PostSitemap
sitemaps = {
'posts': PostSitemap,
}
urlpatterns = [
url(r'^manage/', admin.site.urls),
url(r'^ckeditor/', include('ckeditor_uploader.urls')),
# url(r'^blog/', include('blog.urls', namespace='blog', app_name='blog')),
url(r'^', include('blog.urls', namespace='blog', app_name='blog')),
url(r'^sitemap\.xml$', sitemap, {'sitemaps': sitemaps}, name='django.contrib.sitemaps.views.sitemap'),
]
| rizkiwisnuaji/django-newbie-cms | manglocreative/urls.py | Python | mit | 2,703 |
import numpy as np
import scipy.misc
import matplotlib.pyplot as plt
x = np.linspace(0, 5, 100)
y1 = np.power(2, x)
y2 = scipy.misc.factorial(x)
plt.plot(x, y1)
plt.plot(x, y2)
plt.grid(True)
plt.savefig('../../img/question_4_plots/g.png')
| ammiranda/CS325 | week1/plots/question_4/g.py | Python | mit | 243 |
import random
color_names=[
'aliceblue',
'antiquewhite',
'aqua',
'aquamarine',
'azure',
'beige',
'bisque',
'blanchedalmond',
'blue',
'blueviolet',
'brown',
'burlywood',
'cadetblue',
'chartreuse',
'chocolate',
'coral',
'cornflowerblue',
'cornsilk',
'crimson',
'cyan',
'darkblue',
'darkcyan',
'darkgoldenrod',
'darkgray',
'darkgreen',
'darkkhaki',
'darkmagenta',
'darkolivegreen',
'darkorange',
'darkorchid',
'darkred',
'darksalmon',
'darkseagreen',
'darkslateblue',
'darkturquoise',
'darkviolet',
'deeppink',
'deepskyblue',
'dimgray',
'dodgerblue',
'firebrick',
'floralwhite',
'forestgreen',
'fuchsia',
'gainsboro',
'ghostwhite',
'gold',
'goldenrod',
'gray',
'green',
'greenyellow',
'honeydew',
'hotpink',
'indianred',
'indigo',
'ivory',
'khaki',
'lavender',
'lavenderblush',
'lawngreen',
'lemonchiffon',
'lightblue',
'lightcoral',
'lightcyan',
'lightgoldenrodyellow',
'lightgreen',
'lightgray',
'lightpink',
'lightsalmon',
'lightseagreen',
'lightskyblue',
'lightslategray',
'lightsteelblue',
'lightyellow',
'lime',
'limegreen',
'linen',
'magenta',
'maroon',
'mediumaquamarine',
'mediumblue',
'mediumorchid',
'mediumpurple',
'mediumseagreen',
'mediumslateblue',
'mediumspringgreen',
'mediumturquoise',
'mediumvioletred',
'mintcream',
'mistyrose',
'moccasin',
'navajowhite',
'navy',
'oldlace',
'olive',
'olivedrab',
'orange',
'orangered',
'orchid',
'palegoldenrod',
'palegreen',
'paleturquoise',
'palevioletred',
'papayawhip',
'peachpuff',
'peru',
'pink',
'plum',
'powderblue',
'purple',
'red',
'rosybrown',
'royalblue',
'saddlebrown',
'salmon',
'sandybrown',
'seagreen',
'seashell',
'sienna',
'silver',
'skyblue',
'slateblue',
'slategray',
'snow',
'springgreen',
'steelblue',
'tan',
'teal',
'thistle',
'tomato',
'turquoise',
'violet',
'wheat',
'white',
'whitesmoke',
'yellow',
'yellowgreen',
]
def getRandomColors(num):
if num > len(color_names):
return color_names
ns = set()
while num > len(ns):
ns.add(random.choice(color_names))
return list(ns)
| largetalk/tenbagger | draw/colors.py | Python | mit | 2,176 |
"""
Compare the regions predicted to be prophages to the regions that are marked as prophages in our testing set
Probably the hardest part of this is the identifiers!
"""
import os
import sys
import argparse
import gzip
from Bio import SeqIO, BiopythonWarning
from PhiSpyModules import message, is_gzip_file
__author__ = 'Rob Edwards'
__copyright__ = 'Copyright 2020, Rob Edwards'
__credits__ = ['Rob Edwards']
__license__ = 'MIT'
__maintainer__ = 'Rob Edwards'
__email__ = '[email protected]'
def genbank_seqio(gbkf):
if is_gzip_file(gbkf):
handle = gzip.open(gbkf, 'rt')
else:
handle = open(gbkf, 'r')
return SeqIO.parse(handle, "genbank")
def actual_phage_cds(gbkf, verbose=False):
"""
Read the genbank file and return a list of features that are actually phage regions
:param gbkf: the test genbank file with CDS marked with is_phage
:param verbose: more output
:return: a set of phage features
"""
if verbose:
message(f"Reading {gbkf}", "GREEN", "stderr")
phage = {}
nonphage = {}
for seq in genbank_seqio(gbkf):
for feat in seq.features:
if feat.type == 'CDS':
if 'product' not in feat.qualifiers:
feat.qualifiers['product'] = [f"Hypothetical protein (not annotated in {gbkf})"]
if 'is_phage' in feat.qualifiers:
phage[str(feat.translate(seq, cds=False).seq).upper()] = feat.qualifiers['product'][0]
else:
nonphage[str(feat.translate(seq, cds=False).seq).upper()] = feat.qualifiers['product'][0]
return phage, nonphage
def predicted_genbank(predf, verbose=False):
"""
Read the predictions from the genbank file and return
a set of features
:param predf: the predictions file
:param verbose: more output
:return: a set of predicted phage genes
"""
if verbose:
message(f"Reading {predf}", "GREEN", "stderr")
predicted = {}
for seq in genbank_seqio(predf):
for feat in seq.features:
if feat.type == 'CDS':
if 'product' in feat.qualifiers:
predicted[str(feat.translate(seq, cds=False).seq).upper()] = feat.qualifiers['product'][0]
else:
predicted[str(feat.translate(seq, cds=False).seq).upper()] = f"Hypothetical protein (not annotated in {predf})"
if verbose:
message(f"Found {len(predicted)} predicted prophage features", "BLUE", "stderr")
return predicted
def predicted_regions(regf, gbkf, verbose):
"""
Pull the phage genes from the regions
:param regf: the regions file with contigs/start/stop
:param gbkf: the genbank file used to make those predictions
:param verbose: more output
:return: a set of predicted phage genes
"""
regions = {}
if verbose:
message(f"Reading {regf}", "GREEN", "stderr")
with open(regf, 'r') as f:
for l in f:
p = l.strip().split("\t")
assert(len(p) == 3), f"Expected a tple of [contig, start, stop] in {regf}"
p[1] = int(p[1])
p[2] = int(p[2])
if p[0] not in regions:
regions[p[0]] = []
if p[2] < p[1]:
regions[p[0]].append([p[2], p[1]])
else:
regions[p[0]].append([p[1], p[2]])
if verbose:
message(f"Reading {gbkf} again to get the phage regions", "GREEN", "stderr")
predicted = {}
for seq in genbank_seqio(gbkf):
if seq.id in regions:
for loc in regions[seq.id]:
if verbose:
message(f"Getting from {loc[0]} to {loc[1]}", "PINK", "stderr")
for feat in seq[loc[0]:loc[1]].features:
if feat.type == 'CDS':
if 'product' in feat.qualifiers:
predicted[str(feat.translate(seq[loc[0]:loc[1]], cds=False).seq).upper()] = feat.qualifiers['product'][0]
else:
predicted[str(feat.translate(seq[loc[0]:loc[1]], cds=False).seq).upper()] = f"Hypothetical protein (not annotated in {gbkf})"
if verbose:
message(f"Found {len(predicted)} predicted prophage features", "BLUE", "stderr")
return predicted
def compare_real_predicted(phage: dict, nonphage: dict, predicted: dict, print_fp: bool, print_fn: bool, verbose: bool):
"""
Compare the features that are real and predicted
:param print_fn: print out the false negative matches
:param print_fp: print out the false positive matches
:param phage: actual phage features
:param nonphage: actual non phage features
:param predicted: predicted phage features
:param verbose: more output
:return:
"""
if verbose:
message(f"Comparing real and predicted", "GREEN", "stderr")
# TP = phage intersection predicted
# TN = nonphage intersection [not in predicted]
# FP = nonphage intersection predicted
# FN = phage intersection [not in predicted]
# convert the keys to sets
phage_set = set(phage.keys())
nonphage_set = set(nonphage.keys())
predicted_set = set(predicted.keys())
# calculate not in predicted
not_predicted = set()
for s in phage_set.union(nonphage):
if s not in predicted:
not_predicted.add(s)
print(f"Found:\nTest set:\n\tPhage: {len(phage)} Not phage: {len(nonphage)}")
print(f"Predictions:\n\tPhage: {len(predicted)} Not phage: {len(not_predicted)}")
tp = len(phage_set.intersection(predicted_set))
tn = len(nonphage_set.intersection(not_predicted))
fp = len(nonphage_set.intersection(predicted_set))
fn = len(phage_set.intersection(not_predicted))
print(f"TP: {tp} FP: {fp} TN: {tn} FN: {fn}")
try:
accuracy = (tp+tn)/(tp + tn + fp + fn)
except ZeroDivisionError:
accuracy = "NaN"
try:
precision = tp/(tp+fp)
except ZeroDivisionError:
precision = "NaN"
try:
recall = tp/(tp+fn)
except ZeroDivisionError:
recall = "NaN"
try:
specificity = tn/(tn+fp)
except ZeroDivisionError:
specificity = "NaN"
f1_score = "NaN"
if accuracy != "NaN" and precision != "NaN" and recall != "NaN" and specificity != "NaN":
try:
f1_score = 2*(recall * precision) / (recall + precision)
except ZeroDivisionError:
f1_score = "NaN"
if accuracy != "NaN":
print(f"Accuracy: {accuracy:.3f}\t(this is the ratio of the correctly labeled phage genes to the whole pool of genes")
else:
print("Accuracy: NaN")
if precision != "NaN":
print(f"Precision: {precision:.3f}\t(This is the ratio of correctly labeled phage genes to all predictions)")
else:
print("Precision: NaN")
if recall != "NaN":
print(f"Recall: {recall:.3f}\t(This is the fraction of actual phage genes we got right)")
else:
print("Recall: NaN")
if specificity != "NaN":
print(f"Specificity: {specificity:.3f}\t(This is the fraction of non phage genes we got right)")
else:
print("Specificity: NaN")
if f1_score != "NaN":
print(f"f1 score: {f1_score:.3f}\t(this is the harmonic mean of precision and recall, and is the best measure when, as in this case, there is a big difference between the number of phage and non-phage genes)")
else:
print("f1 score: NaN")
if print_fp:
for i in nonphage_set.intersection(predicted_set):
print(f"FP\t{i}\t{nonphage[i]}")
if print_fn:
for i in phage_set.intersection(not_predicted):
print(f"FN\t{i}\t{[phage[i]]}")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Compare predictions to reality")
parser.add_argument('-t', '--testfile', help='test file that has phage proteins marked with the is_phage qualifier', required=True)
parser.add_argument('-p', '--predictfile', help='predictions genbank file that has each prophage as a sequence entry')
parser.add_argument('-r', '--regionsfile', help='predictions regions file that has tuple of [contig, start, end]')
parser.add_argument('--fp', help='print out the false positives', action='store_true')
parser.add_argument('--fn', help='print out the false negatives', action='store_true')
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
pred = None
if args.predictfile:
pred = predicted_genbank(args.predictfile, args.v)
elif args.regionsfile:
pred = predicted_regions(args.regionsfile, args.testfile, args.v)
else:
message("FATAL: Please provide either a predictions genbank or tsv file", "RED", "stderr")
phage, nonphage = actual_phage_cds(args.testfile, args.v)
compare_real_predicted(phage, nonphage, pred, args.fp, args.fn, args.v)
| linsalrob/PhiSpy | scripts/compare_predictions_to_phages.py | Python | mit | 8,988 |
from pymarkdownlint.tests.base import BaseTestCase
from pymarkdownlint.config import LintConfig, LintConfigError
from pymarkdownlint import rules
class LintConfigTests(BaseTestCase):
def test_get_rule_by_name_or_id(self):
config = LintConfig()
# get by id
expected = rules.MaxLineLengthRule()
rule = config.get_rule_by_name_or_id('R1')
self.assertEqual(rule, expected)
# get by name
expected = rules.TrailingWhiteSpace()
rule = config.get_rule_by_name_or_id('trailing-whitespace')
self.assertEqual(rule, expected)
# get non-existing
rule = config.get_rule_by_name_or_id('foo')
self.assertIsNone(rule)
def test_default_rules(self):
config = LintConfig()
expected_rule_classes = [rules.MaxLineLengthRule, rules.TrailingWhiteSpace, rules.HardTab]
expected_rules = [rule_cls() for rule_cls in expected_rule_classes]
self.assertEqual(config.default_rule_classes, expected_rule_classes)
self.assertEqual(config.rules, expected_rules)
def test_load_config_from_file(self):
# regular config file load, no problems
LintConfig.load_from_file(self.get_sample_path("markdownlint"))
# bad config file load
foo_path = self.get_sample_path("foo")
with self.assertRaisesRegexp(LintConfigError, "Invalid file path: {0}".format(foo_path)):
LintConfig.load_from_file(foo_path)
# error during file parsing
bad_markdowlint_path = self.get_sample_path("badmarkdownlint")
expected_error_msg = "Error during config file parsing: File contains no section headers."
with self.assertRaisesRegexp(LintConfigError, expected_error_msg):
LintConfig.load_from_file(bad_markdowlint_path)
| jorisroovers/pymarkdownlint | pymarkdownlint/tests/test_config.py | Python | mit | 1,809 |
"""
homeassistant.components.binary_sensor
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Component to interface with binary sensors (sensors which only know two states)
that can be monitored.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/binary_sensor/
"""
import logging
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.entity import Entity
from homeassistant.const import (STATE_ON, STATE_OFF)
DOMAIN = 'binary_sensor'
DEPENDENCIES = []
SCAN_INTERVAL = 30
ENTITY_ID_FORMAT = DOMAIN + '.{}'
def setup(hass, config):
""" Track states and offer events for binary sensors. """
component = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL)
component.setup(config)
return True
# pylint: disable=no-self-use
class BinarySensorDevice(Entity):
""" Represents a binary sensor. """
@property
def is_on(self):
""" True if the binary sensor is on. """
return None
@property
def state(self):
""" Returns the state of the binary sensor. """
return STATE_ON if self.is_on else STATE_OFF
@property
def friendly_state(self):
""" Returns the friendly state of the binary sensor. """
return None
| badele/home-assistant | homeassistant/components/binary_sensor/__init__.py | Python | mit | 1,321 |
#! /usr/bin/env python
import sys
sys.setrecursionlimit(150000)
import time
count = 0
def my_yield():
global count
count = count + 1
yield
def run_co(c):
global count
count = 0
t0 = time.clock()
for r in c: ()
t = time.clock()
dt = t-t0
print(dt,count,count / dt)
return r
def parallel_(p1,p2):
r1 = None
r2 = None
while(r1 == None or r2 == None):
if(r1 == None):
r1 = next(p1)
if(r2 == None):
r2 = next(p2)
for x in my_yield() :
yield
for x in my_yield() :
yield r1,r2
def parallel_many_aux(ps,l,u):
if(l < u):
for x in parallel_(ps[l],parallel_many_aux(ps,l+1,u)):
for y in my_yield():
yield x
yield 0
def parallel_many_(ps):
return parallel_many_aux(ps,0,len(ps))
def parallel_first_(p1,p2):
r1 = None
r2 = None
while(r1 == None and r2 == None):
if(r1 == None):
r1 = next(p1)
if(r2 == None):
r2 = next(p2)
for x in my_yield() :
yield
for x in my_yield() :
yield r1,r2
def wait(max_dt):
t0 = time.clock()
t = time.clock()
while(t - t0 < max_dt):
for x in my_yield() :
yield
#print(t - t0)
t = time.clock()
for x in my_yield() :
yield 0
def fibo_co(n):
if(n==0):
for x in my_yield() :
yield 0
else:
if(n==1):
for x in my_yield() :
yield 1
else:
for x in my_yield() :
yield
for n1 in fibo_co(n-1):
for x in my_yield() :
yield
for n2 in fibo_co(n-2):
for x in my_yield() :
yield
for x in my_yield() :
yield n1+n2
def log(i):
#print("log ", i)
for x in wait(2.0):
for x in my_yield() :
yield
for x in log(i+1):
for x in my_yield() :
yield
def fibo_test():
run_co(parallel_first_(fibo_co(25),log(0)))
def many_fibs(n):
#yield return log(0);
for i in range(0,n+1):
yield fibo_co(i + 5)
def many_fibs_test():
run_co(parallel_many_(list(many_fibs(15))))
#fibo_test()
#many_fibs_test()
class Entity:
pass
class State:
pass
def simple_mk_ship(n):
s = Entity()
s.Position = 100.0 - n
return s
def add_ship(new_ship, run_ship, state):
state.Entities.append(new_ship)
for result in run_ship(new_ship):
for x in my_yield():
yield
state.Entities.remove(new_ship)
for x in my_yield():
yield result
def simple_run_ship(self):
while(self.Position > 0.0):
self.Position = self.Position - 0.1
for x in my_yield():
yield
yield 0
def many_ships(n,state):
if(n > 0):
for x in parallel_(add_ship(simple_mk_ship(n), simple_run_ship, state), many_ships(n-1,state)):
for x in my_yield():
yield
yield 0
def log_ships(state):
while(True):
print("there are ", len(state.Entities), " ships")
for x in wait(2.0):
for x in my_yield():
yield
def state_access_test():
state = State()
state.Entities = []
#test = parallel_(many_ships(200,state),log_ships(state))
test = many_ships(200,state)
run_co(test)
state_access_test()
| vs-team/Papers | 0. MonadicCoroutines/Src/MonadicCoroutines/CSharp/main.py | Python | mit | 3,063 |
from django import forms
class SearchForm(forms.Form):
criteria = forms.CharField(label='Criteria', max_length=100, required=True) | chaocodes/playlist-manager-django | manager/search/forms.py | Python | mit | 135 |
class Solution:
def combine(self, n, k):
return [list(elem) for elem in itertools.combinations(xrange(1, n + 1), k)]
| rahul-ramadas/leetcode | combinations/Solution.6808610.py | Python | mit | 132 |
import os
import unittest
from erettsegit import argparse, yearify, monthify, levelify
from erettsegit import MessageType, message_for
class TestErettsegit(unittest.TestCase):
def test_yearify_raises_out_of_bounds_years(self):
with self.assertRaises(argparse.ArgumentTypeError):
yearify(2003)
yearify(1999)
yearify(2)
yearify(2999)
def test_yearify_pads_short_year(self):
self.assertEqual(yearify(12), 2012)
def test_monthify_handles_textual_dates(self):
self.assertEqual(monthify('Feb'), 2)
self.assertEqual(monthify('majus'), 5)
self.assertEqual(monthify('ősz'), 10)
def test_levelify_handles_multi_lang(self):
self.assertEqual(levelify('mid-level'), 'k')
self.assertEqual(levelify('advanced'), 'e')
def test_messages_get_interpolated_with_extra(self):
os.environ['ERETTSEGIT_LANG'] = 'EN'
self.assertEqual(message_for(MessageType.e_input, MessageType.c_year),
'incorrect year')
def test_messages_ignore_unnecessary_extra(self):
self.assertNotIn('None', message_for(MessageType.i_quit, extra=None))
| z2s8/erettsegit | test_erettsegit.py | Python | mit | 1,186 |
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist
from django.core.paginator import InvalidPage, Paginator
from django.db import models
from django.http import HttpResponseRedirect
from django.template.response import SimpleTemplateResponse, TemplateResponse
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_unicode, smart_unicode
from django.utils.html import escape, conditional_escape
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from nadmin.util import lookup_field, display_for_field, label_for_field, boolean_icon
from base import ModelAdminView, filter_hook, inclusion_tag, csrf_protect_m
# List settings
ALL_VAR = 'all'
ORDER_VAR = 'o'
PAGE_VAR = 'p'
TO_FIELD_VAR = 't'
COL_LIST_VAR = '_cols'
ERROR_FLAG = 'e'
DOT = '.'
# Text to display within change-list table cells if the value is blank.
EMPTY_CHANGELIST_VALUE = _('Null')
class FakeMethodField(object):
"""
This class used when a column is an model function, wrap function as a fake field to display in select columns.
"""
def __init__(self, name, verbose_name):
# Initial comm field attrs
self.name = name
self.verbose_name = verbose_name
self.primary_key = False
class ResultRow(dict):
pass
class ResultItem(object):
def __init__(self, field_name, row):
self.classes = []
self.text = ' '
self.wraps = []
self.tag = 'td'
self.tag_attrs = []
self.allow_tags = False
self.btns = []
self.menus = []
self.is_display_link = False
self.row = row
self.field_name = field_name
self.field = None
self.attr = None
self.value = None
@property
def label(self):
text = mark_safe(
self.text) if self.allow_tags else conditional_escape(self.text)
if force_unicode(text) == '':
text = mark_safe(' ')
for wrap in self.wraps:
text = mark_safe(wrap % text)
return text
@property
def tagattrs(self):
return mark_safe(
'%s%s' % ((self.tag_attrs and ' '.join(self.tag_attrs) or ''),
(self.classes and (' class="%s"' % ' '.join(self.classes)) or '')))
class ResultHeader(ResultItem):
def __init__(self, field_name, row):
super(ResultHeader, self).__init__(field_name, row)
self.tag = 'th'
self.tag_attrs = ['scope="col"']
self.sortable = False
self.allow_tags = True
self.sorted = False
self.ascending = None
self.sort_priority = None
self.url_primary = None
self.url_remove = None
self.url_toggle = None
class ListAdminView(ModelAdminView):
"""
Display models objects view. this class has ordering and simple filter features.
"""
list_display = ('__str__',)
list_display_links = ()
list_display_links_details = False
list_select_related = None
list_per_page = 50
list_max_show_all = 200
list_exclude = ()
search_fields = ()
paginator_class = Paginator
ordering = None
# Change list templates
object_list_template = None
def init_request(self, *args, **kwargs):
if not self.has_view_permission():
raise PermissionDenied
request = self.request
request.session['LIST_QUERY'] = (self.model_info, self.request.META['QUERY_STRING'])
self.pk_attname = self.opts.pk.attname
self.lookup_opts = self.opts
self.list_display = self.get_list_display()
self.list_display_links = self.get_list_display_links()
# Get page number parameters from the query string.
try:
self.page_num = int(request.GET.get(PAGE_VAR, 0))
except ValueError:
self.page_num = 0
# Get params from request
self.show_all = ALL_VAR in request.GET
self.to_field = request.GET.get(TO_FIELD_VAR)
self.params = dict(request.GET.items())
if PAGE_VAR in self.params:
del self.params[PAGE_VAR]
if ERROR_FLAG in self.params:
del self.params[ERROR_FLAG]
@filter_hook
def get_list_display(self):
"""
Return a sequence containing the fields to be displayed on the list.
"""
self.base_list_display = (COL_LIST_VAR in self.request.GET and self.request.GET[COL_LIST_VAR] != "" and \
self.request.GET[COL_LIST_VAR].split('.')) or self.list_display
return list(self.base_list_display)
@filter_hook
def get_list_display_links(self):
"""
Return a sequence containing the fields to be displayed as links
on the changelist. The list_display parameter is the list of fields
returned by get_list_display().
"""
if self.list_display_links or not self.list_display:
return self.list_display_links
else:
# Use only the first item in list_display as link
return list(self.list_display)[:1]
def make_result_list(self):
# Get search parameters from the query string.
self.base_queryset = self.queryset()
self.list_queryset = self.get_list_queryset()
self.ordering_field_columns = self.get_ordering_field_columns()
self.paginator = self.get_paginator()
# Get the number of objects, with admin filters applied.
self.result_count = self.paginator.count
# Get the total number of objects, with no admin filters applied.
# Perform a slight optimization: Check to see whether any filters were
# given. If not, use paginator.hits to calculate the number of objects,
# because we've already done paginator.hits and the value is cached.
if not self.list_queryset.query.where:
self.full_result_count = self.result_count
else:
self.full_result_count = self.base_queryset.count()
self.can_show_all = self.result_count <= self.list_max_show_all
self.multi_page = self.result_count > self.list_per_page
# Get the list of objects to display on this page.
if (self.show_all and self.can_show_all) or not self.multi_page:
self.result_list = self.list_queryset._clone()
else:
try:
self.result_list = self.paginator.page(
self.page_num + 1).object_list
except InvalidPage:
if ERROR_FLAG in self.request.GET.keys():
return SimpleTemplateResponse('nadmin/views/invalid_setup.html', {
'title': _('Database error'),
})
return HttpResponseRedirect(self.request.path + '?' + ERROR_FLAG + '=1')
self.has_more = self.result_count > (
self.list_per_page * self.page_num + len(self.result_list))
@filter_hook
def get_result_list(self):
return self.make_result_list()
@filter_hook
def post_result_list(self):
return self.make_result_list()
@filter_hook
def get_list_queryset(self):
"""
Get model queryset. The query has been filted and ordered.
"""
# First, get queryset from base class.
queryset = self.queryset()
# Use select_related() if one of the list_display options is a field
# with a relationship and the provided queryset doesn't already have
# select_related defined.
if not queryset.query.select_related:
if self.list_select_related:
queryset = queryset.select_related()
elif self.list_select_related is None:
related_fields = []
for field_name in self.list_display:
try:
field = self.opts.get_field(field_name)
except models.FieldDoesNotExist:
pass
else:
if isinstance(field.rel, models.ManyToOneRel):
related_fields.append(field_name)
if related_fields:
queryset = queryset.select_related(*related_fields)
else:
pass
# Then, set queryset ordering.
queryset = queryset.order_by(*self.get_ordering())
# Return the queryset.
return queryset
# List ordering
def _get_default_ordering(self):
ordering = []
if self.ordering:
ordering = self.ordering
elif self.opts.ordering:
ordering = self.opts.ordering
return ordering
@filter_hook
def get_ordering_field(self, field_name):
"""
Returns the proper model field name corresponding to the given
field_name to use for ordering. field_name may either be the name of a
proper model field or the name of a method (on the admin or model) or a
callable with the 'admin_order_field' attribute. Returns None if no
proper model field name can be matched.
"""
try:
field = self.opts.get_field(field_name)
return field.name
except models.FieldDoesNotExist:
# See whether field_name is a name of a non-field
# that allows sorting.
if callable(field_name):
attr = field_name
elif hasattr(self, field_name):
attr = getattr(self, field_name)
else:
attr = getattr(self.model, field_name)
return getattr(attr, 'admin_order_field', None)
@filter_hook
def get_ordering(self):
"""
Returns the list of ordering fields for the change list.
First we check the get_ordering() method in model admin, then we check
the object's default ordering. Then, any manually-specified ordering
from the query string overrides anything. Finally, a deterministic
order is guaranteed by ensuring the primary key is used as the last
ordering field.
"""
ordering = list(super(ListAdminView, self).get_ordering()
or self._get_default_ordering())
if ORDER_VAR in self.params and self.params[ORDER_VAR]:
# Clear ordering and used params
ordering = [pfx + self.get_ordering_field(field_name) for n, pfx, field_name in
map(
lambda p: p.rpartition('-'),
self.params[ORDER_VAR].split('.'))
if self.get_ordering_field(field_name)]
# Ensure that the primary key is systematically present in the list of
# ordering fields so we can guarantee a deterministic order across all
# database backends.
pk_name = self.opts.pk.name
if not (set(ordering) & set(['pk', '-pk', pk_name, '-' + pk_name])):
# The two sets do not intersect, meaning the pk isn't present. So
# we add it.
ordering.append('-pk')
return ordering
@filter_hook
def get_ordering_field_columns(self):
"""
Returns a SortedDict of ordering field column numbers and asc/desc
"""
# We must cope with more than one column having the same underlying sort
# field, so we base things on column numbers.
ordering = self._get_default_ordering()
ordering_fields = SortedDict()
if ORDER_VAR not in self.params or not self.params[ORDER_VAR]:
# for ordering specified on ModelAdmin or model Meta, we don't know
# the right column numbers absolutely, because there might be more
# than one column associated with that ordering, so we guess.
for field in ordering:
if field.startswith('-'):
field = field[1:]
order_type = 'desc'
else:
order_type = 'asc'
for attr in self.list_display:
if self.get_ordering_field(attr) == field:
ordering_fields[field] = order_type
break
else:
for p in self.params[ORDER_VAR].split('.'):
none, pfx, field_name = p.rpartition('-')
ordering_fields[field_name] = 'desc' if pfx == '-' else 'asc'
return ordering_fields
def get_check_field_url(self, f):
"""
Return the select column menu items link.
We must use base_list_display, because list_display maybe changed by plugins.
"""
fields = [fd for fd in self.base_list_display if fd != f.name]
if len(self.base_list_display) == len(fields):
if f.primary_key:
fields.insert(0, f.name)
else:
fields.append(f.name)
return self.get_query_string({COL_LIST_VAR: '.'.join(fields)})
def get_model_method_fields(self):
"""
Return the fields info defined in model. use FakeMethodField class wrap method as a db field.
"""
methods = []
for name in dir(self):
try:
if getattr(getattr(self, name), 'is_column', False):
methods.append((name, getattr(self, name)))
except:
pass
return [FakeMethodField(name, getattr(method, 'short_description', capfirst(name.replace('_', ' '))))
for name, method in methods]
@filter_hook
def get_context(self):
"""
Prepare the context for templates.
"""
self.title = _('%s List') % force_unicode(self.opts.verbose_name)
model_fields = [(f, f.name in self.list_display, self.get_check_field_url(f))
for f in (self.opts.fields + tuple(self.get_model_method_fields())) if f.name not in self.list_exclude]
new_context = {
'model_name': force_unicode(self.opts.verbose_name_plural),
'title': self.title,
'cl': self,
'model_fields': model_fields,
'clean_select_field_url': self.get_query_string(remove=[COL_LIST_VAR]),
'has_add_permission': self.has_add_permission(),
'app_label': self.app_label,
'brand_name': self.opts.verbose_name_plural,
'brand_icon': self.get_model_icon(self.model),
'add_url': self.model_admin_url('add'),
'result_headers': self.result_headers(),
'results': self.results()
}
context = super(ListAdminView, self).get_context()
context.update(new_context)
return context
@filter_hook
def get_response(self, context, *args, **kwargs):
pass
@csrf_protect_m
@filter_hook
def get(self, request, *args, **kwargs):
"""
The 'change list' admin view for this model.
"""
response = self.get_result_list()
if response:
return response
context = self.get_context()
context.update(kwargs or {})
response = self.get_response(context, *args, **kwargs)
return response or TemplateResponse(request, self.object_list_template or
self.get_template_list('views/model_list.html'), context, current_app=self.admin_site.name)
@filter_hook
def post_response(self, *args, **kwargs):
pass
@csrf_protect_m
@filter_hook
def post(self, request, *args, **kwargs):
return self.post_result_list() or self.post_response(*args, **kwargs) or self.get(request, *args, **kwargs)
@filter_hook
def get_paginator(self):
return self.paginator_class(self.list_queryset, self.list_per_page, 0, True)
@filter_hook
def get_page_number(self, i):
if i == DOT:
return mark_safe(u'<span class="dot-page">...</span> ')
elif i == self.page_num:
return mark_safe(u'<span class="this-page">%d</span> ' % (i + 1))
else:
return mark_safe(u'<a href="%s"%s>%d</a> ' % (escape(self.get_query_string({PAGE_VAR: i})), (i == self.paginator.num_pages - 1 and ' class="end"' or ''), i + 1))
# Result List methods
@filter_hook
def result_header(self, field_name, row):
ordering_field_columns = self.ordering_field_columns
item = ResultHeader(field_name, row)
text, attr = label_for_field(field_name, self.model,
model_admin=self,
return_attr=True
)
item.text = text
item.attr = attr
if attr and not getattr(attr, "admin_order_field", None):
return item
# OK, it is sortable if we got this far
th_classes = ['sortable']
order_type = ''
new_order_type = 'desc'
sort_priority = 0
sorted = False
# Is it currently being sorted on?
if field_name in ordering_field_columns:
sorted = True
order_type = ordering_field_columns.get(field_name).lower()
sort_priority = ordering_field_columns.keys().index(field_name) + 1
th_classes.append('sorted %sending' % order_type)
new_order_type = {'asc': 'desc', 'desc': 'asc'}[order_type]
# build new ordering param
o_list_asc = [] # URL for making this field the primary sort
o_list_desc = [] # URL for making this field the primary sort
o_list_remove = [] # URL for removing this field from sort
o_list_toggle = [] # URL for toggling order type for this field
make_qs_param = lambda t, n: ('-' if t == 'desc' else '') + str(n)
for j, ot in ordering_field_columns.items():
if j == field_name: # Same column
param = make_qs_param(new_order_type, j)
# We want clicking on this header to bring the ordering to the
# front
o_list_asc.insert(0, j)
o_list_desc.insert(0, '-' + j)
o_list_toggle.append(param)
# o_list_remove - omit
else:
param = make_qs_param(ot, j)
o_list_asc.append(param)
o_list_desc.append(param)
o_list_toggle.append(param)
o_list_remove.append(param)
if field_name not in ordering_field_columns:
o_list_asc.insert(0, field_name)
o_list_desc.insert(0, '-' + field_name)
item.sorted = sorted
item.sortable = True
item.ascending = (order_type == "asc")
item.sort_priority = sort_priority
menus = [
('asc', o_list_asc, 'caret-up', _(u'Sort ASC')),
('desc', o_list_desc, 'caret-down', _(u'Sort DESC')),
]
if sorted:
row['num_sorted_fields'] = row['num_sorted_fields'] + 1
menus.append((None, o_list_remove, 'times', _(u'Cancel Sort')))
item.btns.append('<a class="toggle" href="%s"><i class="fa fa-%s"></i></a>' % (
self.get_query_string({ORDER_VAR: '.'.join(o_list_toggle)}), 'sort-up' if order_type == "asc" else 'sort-down'))
item.menus.extend(['<li%s><a href="%s" class="active"><i class="fa fa-%s"></i> %s</a></li>' %
(
(' class="active"' if sorted and order_type == i[
0] else ''),
self.get_query_string({ORDER_VAR: '.'.join(i[1])}), i[2], i[3]) for i in menus])
item.classes.extend(th_classes)
return item
@filter_hook
def result_headers(self):
"""
Generates the list column headers.
"""
row = ResultRow()
row['num_sorted_fields'] = 0
row.cells = [self.result_header(
field_name, row) for field_name in self.list_display]
return row
@filter_hook
def result_item(self, obj, field_name, row):
"""
Generates the actual list of data.
"""
item = ResultItem(field_name, row)
try:
f, attr, value = lookup_field(field_name, obj, self)
except (AttributeError, ObjectDoesNotExist):
item.text = mark_safe("<span class='text-muted'>%s</span>" % EMPTY_CHANGELIST_VALUE)
else:
if f is None:
item.allow_tags = getattr(attr, 'allow_tags', False)
boolean = getattr(attr, 'boolean', False)
if boolean:
item.allow_tags = True
item.text = boolean_icon(value)
else:
item.text = smart_unicode(value)
else:
if isinstance(f.rel, models.ManyToOneRel):
field_val = getattr(obj, f.name)
if field_val is None:
item.text = mark_safe("<span class='text-muted'>%s</span>" % EMPTY_CHANGELIST_VALUE)
else:
item.text = field_val
else:
item.text = display_for_field(value, f)
if isinstance(f, models.DateField)\
or isinstance(f, models.TimeField)\
or isinstance(f, models.ForeignKey):
item.classes.append('nowrap')
item.field = f
item.attr = attr
item.value = value
# If list_display_links not defined, add the link tag to the first field
if (item.row['is_display_first'] and not self.list_display_links) \
or field_name in self.list_display_links:
item.row['is_display_first'] = False
item.is_display_link = True
if self.list_display_links_details:
item_res_uri = self.model_admin_url("detail", getattr(obj, self.pk_attname))
if item_res_uri:
if self.has_change_permission(obj):
edit_url = self.model_admin_url("change", getattr(obj, self.pk_attname))
else:
edit_url = ""
item.wraps.append('<a data-res-uri="%s" data-edit-uri="%s" class="details-handler" rel="tooltip" title="%s">%%s</a>'
% (item_res_uri, edit_url, _(u'Details of %s') % str(obj)))
else:
url = self.url_for_result(obj)
item.wraps.append(u'<a href="%s">%%s</a>' % url)
return item
@filter_hook
def result_row(self, obj):
row = ResultRow()
row['is_display_first'] = True
row['object'] = obj
row.cells = [self.result_item(
obj, field_name, row) for field_name in self.list_display]
return row
@filter_hook
def results(self):
results = []
for obj in self.result_list:
results.append(self.result_row(obj))
return results
@filter_hook
def url_for_result(self, result):
return self.get_object_url(result)
# Media
@filter_hook
def get_media(self):
media = super(ListAdminView, self).get_media() + self.vendor('nadmin.page.list.js', 'nadmin.page.form.js')
if self.list_display_links_details:
media += self.vendor('nadmin.plugin.details.js', 'nadmin.form.css')
return media
# Blocks
@inclusion_tag('nadmin/includes/pagination.html')
def block_pagination(self, context, nodes, page_type='normal'):
"""
Generates the series of links to the pages in a paginated list.
"""
paginator, page_num = self.paginator, self.page_num
pagination_required = (
not self.show_all or not self.can_show_all) and self.multi_page
if not pagination_required:
page_range = []
else:
ON_EACH_SIDE = {'normal': 5, 'small': 3}.get(page_type, 3)
ON_ENDS = 2
# If there are 10 or fewer pages, display links to every page.
# Otherwise, do some fancy
if paginator.num_pages <= 10:
page_range = range(paginator.num_pages)
else:
# Insert "smart" pagination links, so that there are always ON_ENDS
# links at either end of the list of pages, and there are always
# ON_EACH_SIDE links at either end of the "current page" link.
page_range = []
if page_num > (ON_EACH_SIDE + ON_ENDS):
page_range.extend(range(0, ON_EACH_SIDE - 1))
page_range.append(DOT)
page_range.extend(
range(page_num - ON_EACH_SIDE, page_num + 1))
else:
page_range.extend(range(0, page_num + 1))
if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1):
page_range.extend(
range(page_num + 1, page_num + ON_EACH_SIDE + 1))
page_range.append(DOT)
page_range.extend(range(
paginator.num_pages - ON_ENDS, paginator.num_pages))
else:
page_range.extend(range(page_num + 1, paginator.num_pages))
need_show_all_link = self.can_show_all and not self.show_all and self.multi_page
return {
'cl': self,
'pagination_required': pagination_required,
'show_all_url': need_show_all_link and self.get_query_string({ALL_VAR: ''}),
'page_range': map(self.get_page_number, page_range),
'ALL_VAR': ALL_VAR,
'1': 1,
}
| A425/django-nadmin | nadmin/views/list.py | Python | mit | 25,811 |
import json
import re
from pygeocoder import Geocoder
from pygeolib import GeocoderError
import requests
# from picasso.index.models import Tag
from picasso.index.models import Address, Listing, Tag
__author__ = 'tmehta'
url = 'http://www.yellowpages.ca/ajax/search/music+teachers/Toronto%2C+ON?sType=si&sort=rel&pg=1&' + \
'skipNonPaids=56&trGeo=43.797452066539165,-79.15031040820315&blGeo=43.55112164714018,-79.6419485917969'
base_url = 'http://www.yellowpages.ca/bus/'
city = 'Toronto'
def extract_cats(p):
try:
p_s = p.split('Products and Services</h3>')[1].split('</span>')[0].replace('<span>', '')
except IndexError:
return []
p_s = p_s.split("'>")[1:]
cats = []
for line in p_s:
cats.append(line.split('</li>')[0])
return cats
def extract_phone(p):
try:
phone = p.split('class="phone"')[1].split('<span >')[1].split('</span>')[0]
except IndexError:
phone = ''
return phone
r = requests.get(url)
listings = json.loads(r.text)['features']
for l in listings:
name = l['properties']['name']
scraped_url = base_url + str(l['properties']['id']) + '.html'
try:
lst = Listing.objects.get(scraped_url=scraped_url)
page = requests.get(scraped_url).text
try:
location = page.split('itemprop="streetAddress">')[1].split('</span>')[0]
except IndexError:
location = ''
try:
postalCode = page.split('itemprop="postalCode">')[1].split('</span>')[0]
except IndexError:
postalCode = ''
lat = l["geometry"]["coordinates"][0]
lon = l["geometry"]["coordinates"][1]
point = "POINT(%s %s)" % (lon, lat)
lst.address.point = point
lst.save()
except Listing.DoesNotExist:
active = True
place = 'Sch'
email = ''
page = requests.get(scraped_url).text
categories = extract_cats(page)
tags = []
for cat in categories:
t = Tag.objects.get_or_create(tag_name=cat)
phone_number = extract_phone(page)
try:
location = page.split('itemprop="streetAddress">')[1].split('</span>')[0]
except IndexError:
location = ''
try:
postalCode = page.split('itemprop="postalCode">')[1].split('</span>')[0]
except IndexError:
postalCode = ''
try:
description = page.split('itemprop="description">')[1].split('</article>')[0].split('<a href')[0].replace(
'<span', '').replace('</span>', '')
except IndexError:
description = ''
lat = l["geometry"]["coordinates"][0]
lon = l["geometry"]["coordinates"][1]
point = "POINT(%s %s)" % (lon, lat)
add = Address.objects.create(location=location, postal_code=postalCode, city=city, point=point)
lst = Listing.objects.create(address=add, listing_name=name, scraped_url=scraped_url, description=description,
phone=phone_number)
for t in tags:
lst.tags.add(t)
lst.save()
| TejasM/picasso | picasso/yellow_pages.py | Python | mit | 3,125 |
import subprocess
with open('names.txt') as f:
names = f.read().splitlines()
with open('portraits.txt') as f:
portraits = f.read().splitlines()
for i, name in enumerate(names):
portrait = portraits[i]
if portrait.endswith('.png'):
subprocess.call(['cp', 'minor/{}'.format(portrait), '{}.png'.format(name.lower())])
| dcripplinger/rotj | data/images/portraits/copy_portraits.py | Python | mit | 341 |
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
from distutils.core import setup, Extension
setup(name='sample',
ext_modules=[
Extension('sample',
['pysample.c'],
include_dirs=['/some/dir'],
define_macros=[('FOO', '1')],
undef_macros=['BAR'],
library_dirs=['/usr/local/lib'],
libraries=['sample']
)
]
)
| xu6148152/Binea_Python_Project | PythonCookbook/interaction_c/setup.py | Python | mit | 474 |
import logging
import time
import os
from fuocore.models import (
BaseModel,
SongModel,
LyricModel,
PlaylistModel,
AlbumModel,
ArtistModel,
SearchModel,
UserModel,
)
from .provider import provider
logger = logging.getLogger(__name__)
MUSIC_LIBRARY_PATH = os.path.expanduser('~') + '/Music'
class NBaseModel(BaseModel):
# FIXME: remove _detail_fields and _api to Meta
_api = provider.api
class Meta:
allow_get = True
provider = provider
class NSongModel(SongModel, NBaseModel):
@classmethod
def get(cls, identifier):
data = cls._api.song_detail(int(identifier))
song, _ = NeteaseSongSchema(strict=True).load(data)
return song
@classmethod
def list(cls, identifiers):
song_data_list = cls._api.songs_detail(identifiers)
songs = []
for song_data in song_data_list:
song, _ = NeteaseSongSchema(strict=True).load(song_data)
songs.append(song)
return songs
def _refresh_url(self):
"""刷新获取 url,失败的时候返回空而不是 None"""
songs = self._api.weapi_songs_url([int(self.identifier)])
if songs and songs[0]['url']:
self.url = songs[0]['url']
else:
self.url = ''
def _find_in_local(self):
# TODO: make this a API in SongModel
path = os.path.join(MUSIC_LIBRARY_PATH, self.filename)
if os.path.exists(path):
logger.debug('find local file for {}'.format(self))
return path
return None
# NOTE: if we want to override model attribute, we must
# implement both getter and setter.
@property
def url(self):
"""
We will always check if this song file exists in local library,
if true, we return the url of the local file.
.. note::
As netease song url will be expired after a period of time,
we can not use static url here. Currently, we assume that the
expiration time is 20 minutes, after the url expires, it
will be automaticly refreshed.
"""
local_path = self._find_in_local()
if local_path:
return local_path
if not self._url:
self._refresh_url()
elif time.time() > self._expired_at:
logger.info('song({}) url is expired, refresh...'.format(self))
self._refresh_url()
return self._url
@url.setter
def url(self, value):
self._expired_at = time.time() + 60 * 20 * 1 # 20 minutes
self._url = value
@property
def lyric(self):
if self._lyric is not None:
assert isinstance(self._lyric, LyricModel)
return self._lyric
data = self._api.get_lyric_by_songid(self.identifier)
lrc = data.get('lrc', {})
lyric = lrc.get('lyric', '')
self._lyric = LyricModel(
identifier=self.identifier,
content=lyric
)
return self._lyric
@lyric.setter
def lyric(self, value):
self._lyric = value
class NAlbumModel(AlbumModel, NBaseModel):
@classmethod
def get(cls, identifier):
album_data = cls._api.album_infos(identifier)
if album_data is None:
return None
album, _ = NeteaseAlbumSchema(strict=True).load(album_data)
return album
@property
def desc(self):
if self._desc is None:
self._desc = self._api.album_desc(self.identifier)
return self._desc
@desc.setter
def desc(self, value):
self._desc = value
class NArtistModel(ArtistModel, NBaseModel):
@classmethod
def get(cls, identifier):
artist_data = cls._api.artist_infos(identifier)
artist = artist_data['artist']
artist['songs'] = artist_data['hotSongs'] or []
artist, _ = NeteaseArtistSchema(strict=True).load(artist)
return artist
@property
def desc(self):
if self._desc is None:
self._desc = self._api.artist_desc(self.identifier)
return self._desc
@desc.setter
def desc(self, value):
self._desc = value
class NPlaylistModel(PlaylistModel, NBaseModel):
class Meta:
fields = ('uid')
@classmethod
def get(cls, identifier):
data = cls._api.playlist_detail(identifier)
playlist, _ = NeteasePlaylistSchema(strict=True).load(data)
return playlist
def add(self, song_id, allow_exist=True):
rv = self._api.op_music_to_playlist(song_id, self.identifier, 'add')
if rv == 1:
song = NSongModel.get(song_id)
self.songs.append(song)
return True
elif rv == -1:
return True
return False
def remove(self, song_id, allow_not_exist=True):
rv = self._api.op_music_to_playlist(song_id, self.identifier, 'del')
if rv != 1:
return False
# XXX: make it O(1) if you want
for song in self.songs:
if song.identifier == song_id:
self.songs.remove(song)
return True
class NSearchModel(SearchModel, NBaseModel):
pass
class NUserModel(UserModel, NBaseModel):
class Meta:
fields = ('cookies', )
fields_no_get = ('cookies', )
@classmethod
def get(cls, identifier):
user = {'id': identifier}
user_brief = cls._api.user_brief(identifier)
user.update(user_brief)
playlists = cls._api.user_playlists(identifier)
user['playlists'] = []
user['fav_playlists'] = []
for pl in playlists:
if pl['userId'] == identifier:
user['playlists'].append(pl)
else:
user['fav_playlists'].append(pl)
user, _ = NeteaseUserSchema(strict=True).load(user)
return user
def search(keyword, **kwargs):
_songs = provider.api.search(keyword)
id_song_map = {}
songs = []
if _songs:
for song in _songs:
id_song_map[str(song['id'])] = song
schema = NeteaseSongSchema(strict=True)
s, _ = schema.load(song)
songs.append(s)
return NSearchModel(q=keyword, songs=songs)
# import loop
from .schemas import (
NeteaseSongSchema,
NeteaseAlbumSchema,
NeteaseArtistSchema,
NeteasePlaylistSchema,
NeteaseUserSchema,
) # noqa
| cosven/feeluown-core | fuocore/netease/models.py | Python | mit | 6,421 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kirppu_project.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| jlaunonen/kirppu | manage.py | Python | mit | 257 |
# -*- coding: utf-8 -*-
# Copyright 2017-TODAY LasLabs Inc.
# License MIT (https://opensource.org/licenses/MIT).
import properties
from datetime import datetime, date
from ..base_model import BaseModel
class Domain(properties.HasProperties):
"""This represents a full search query."""
OR = 'OR'
AND = 'AND'
def __init__(self, queries=None, join_with=AND):
"""Initialize a domain, with optional queries."""
self.query = []
if queries is not None:
for query in queries:
self.add_query(query, join_with)
@classmethod
def from_tuple(cls, queries):
"""Create a ``Domain`` given a set of complex query tuples.
Args:
queries (iter): An iterator of complex queries. Each iteration
should contain either:
* A data-set compatible with :func:`~domain.Domain.add_query`
* A string to switch the join type
Example::
[('subject', 'Test1'),
'OR',
('subject', 'Test2')',
('subject', 'Test3')',
]
# The above is equivalent to:
# subject:'Test1' OR subject:'Test2' OR subject:'Test3'
[('modified_at', datetime(2017, 01, 01)),
('status', 'active'),
]
# The above is equivalent to:
# modified_at:[2017-01-01T00:00:00Z TO *]
# AND status:"active"
Returns:
Domain: A domain representing the input queries.
"""
domain = cls()
join_with = cls.AND
for query in queries:
if query in [cls.OR, cls.AND]:
join_with = query
else:
domain.add_query(query, join_with)
return domain
def add_query(self, query, join_with=AND):
"""Join a new query to existing queries on the stack.
Args:
query (tuple or list or DomainCondition): The condition for the
query. If a ``DomainCondition`` object is not provided, the
input should conform to the interface defined in
:func:`~.domain.DomainCondition.from_tuple`.
join_with (str): The join string to apply, if other queries are
already on the stack.
"""
if not isinstance(query, DomainCondition):
query = DomainCondition.from_tuple(query)
if len(self.query):
self.query.append(join_with)
self.query.append(query)
def __str__(self):
"""Return a string usable as the query in an API request."""
if not self.query:
return '*'
return '(%s)' % ' '.join([str(q) for q in self.query])
class DomainCondition(properties.HasProperties):
"""This represents one condition of a domain query."""
field = properties.String(
'Field to search on',
required=True,
)
value = properties.String(
'String Value',
required=True,
)
@property
def field_name(self):
"""Return the name of the API field."""
return BaseModel._to_camel_case(self.field)
def __init__(self, field, value, **kwargs):
"""Initialize a new generic query condition.
Args:
field (str): Field name to search on. This should be the
Pythonified name as in the internal models, not the
name as provided in the API e.g. ``first_name`` for
the Customer's first name instead of ``firstName``.
value (mixed): The value of the field.
"""
return super(DomainCondition, self).__init__(
field=field, value=value, **kwargs
)
@classmethod
def from_tuple(cls, query):
"""Create a condition from a query tuple.
Args:
query (tuple or list): Tuple or list that contains a query domain
in the format of ``(field_name, field_value,
field_value_to)``. ``field_value_to`` is only applicable in
the case of a date search.
Returns:
DomainCondition: An instance of a domain condition. The specific
type will depend on the data type of the first value provided
in ``query``.
"""
field, query = query[0], query[1:]
try:
cls = TYPES[type(query[0])]
except KeyError:
# We just fallback to the base class if unknown type.
pass
return cls(field, *query)
def __str__(self):
"""Return a string usable as a query part in an API request."""
return '%s:"%s"' % (self.field_name, self.value)
class DomainConditionBoolean(DomainCondition):
"""This represents an integer query."""
value = properties.Bool(
'Boolean Value',
required=True,
)
def __str__(self):
"""Return a string usable as a query part in an API request."""
value = 'true' if self.value else 'false'
return '%s:%s' % (self.field_name, value)
class DomainConditionInteger(DomainCondition):
"""This represents an integer query."""
value = properties.Integer(
'Integer Value',
required=True,
)
def __str__(self):
"""Return a string usable as a query part in an API request."""
return '%s:%d' % (self.field_name, self.value)
class DomainConditionDateTime(DomainCondition):
"""This represents a date time query."""
value = properties.DateTime(
'Date From',
required=True,
)
value_to = properties.DateTime(
'Date To',
)
def __init__(self, field, value_from, value_to=None):
"""Initialize a new datetime query condition.
Args:
field (str): Field name to search on. This should be the
Pythonified name as in the internal models, not the
name as provided in the API e.g. ``first_name`` for
the Customer's first name instead of ``firstName``.
value_from (date or datetime): The start value of the field.
value_to (date or datetime, optional): The ending value for
the field. If omitted, will search to now.
"""
return super(DomainConditionDateTime, self).__init__(
field=field, value=value_from, value_to=value_to,
)
def __str__(self):
"""Return a string usable as a query part in an API request."""
value_to = self.value_to.isoformat() if self.value_to else '*'
return '%s:[%sZ TO %sZ]' % (
self.field_name,
self.value.isoformat(),
value_to,
)
TYPES = {
bool: DomainConditionBoolean,
int: DomainConditionInteger,
date: DomainConditionDateTime,
datetime: DomainConditionDateTime,
}
__all__ = [
'Domain',
'DomainCondition',
'DomainConditionBoolean',
'DomainConditionDateTime',
'DomainConditionInteger',
]
| LasLabs/python-helpscout | helpscout/domain/__init__.py | Python | mit | 7,131 |
"""
Pylot command line tool
manage.py
Command line tool to manage your application
"""
import argparse
from application import get_config
import application.model as model
from pylot import utils
config = get_config()
NAME = "Pylot Manager"
__version__ = config.APP_VERSION
def setup():
# Create all db
model.db.create_all()
roles = ["user", "admin", "superadmin"]
# Setup the SUPERADMIN
email = config.ADMIN_EMAIL
name = config.ADMIN_NAME
user = model.User.get_by_email(email)
if not user:
model.User.User.new(email=email,
name=name,
role="SUPERADMIN")
def main():
parser = argparse.ArgumentParser(description="%s v.%s" % (NAME, __version__))
parser.add_argument("--setup", help="Setup the system", action="store_true")
parser.add_argument("--upload-static-to-s3", help="Upload all static files to S3", action="store_true")
arg = parser.parse_args()
if arg.setup:
# Default setup
print("Setting up...")
setup()
if arg.upload_static_to_s3:
# Upload static files to s3
import flask_s3
import run_www # Or the main application run file
print("Upload static files to S3")
flask_s3.create_all(run_www.app)
if __name__ == "__main__":
main()
| mardix/pylot | pylot/app_templates/manage.py | Python | mit | 1,343 |
#!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import CScript, OP_1NEGATE, OP_NOP3, OP_DROP
from binascii import hexlify, unhexlify
from io import BytesIO
import time
import itertools
'''
This test is meant to exercise BIP forks
Connect to a single node.
regtest lock-in with 108/144 block signalling
activation after a further 144 blocks
mine 2 block and save coinbases for later use
mine 141 blocks to transition from DEFINED to STARTED
mine 100 blocks signalling readiness and 44 not in order to fail to change state this period
mine 108 blocks signalling readiness and 36 blocks not signalling readiness (STARTED->LOCKED_IN)
mine a further 143 blocks (LOCKED_IN)
test that enforcement has not triggered (which triggers ACTIVE)
test that enforcement has triggered
'''
class BIP9SoftForksTest(ComparisonTestFramework):
def __init__(self):
self.num_nodes = 1
def setup_network(self):
self.nodes = start_nodes(1, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']],
binary=[self.options.testbinary])
def run_test(self):
self.test = TestManager(self, self.options.tmpdir)
self.test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
self.test.run()
def create_transaction(self, node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
f = BytesIO(unhexlify(rawtx))
tx.deserialize(f)
tx.nVersion = 2
return tx
def sign_transaction(self, node, tx):
signresult = node.signrawtransaction(hexlify(tx.serialize()))
tx = CTransaction()
f = BytesIO(unhexlify(signresult['hex']))
tx.deserialize(f)
return tx
def generate_blocks(self, number, version, test_blocks = []):
for i in xrange(number):
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = version
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
return test_blocks
def get_bip9_status(self, key):
info = self.nodes[0].getblockchaininfo()
for row in info['bip9_softforks']:
if row['id'] == key:
return row
raise IndexError ('key:"%s" not found' % key)
def test_BIP(self, bipName, activated_version, invalidate, invalidatePostSignature):
# generate some coins for later
self.coinbase_blocks = self.nodes[0].generate(2)
self.height = 3 # height of the next block to build
self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
self.nodeaddress = self.nodes[0].getnewaddress()
self.last_block_time = int(time.time())
assert_equal(self.get_bip9_status(bipName)['status'], 'defined')
# Test 1
# Advance from DEFINED to STARTED
test_blocks = self.generate_blocks(141, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
# Test 2
# Fail to achieve LOCKED_IN 100 out of 144 signal bit 1
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(50, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(24, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
# Test 3
# 108 out of 144 signal bit 1 to achieve LOCKED_IN
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(58, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(10, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
# Test 4
# 143 more version 536870913 blocks (waiting period-1)
test_blocks = self.generate_blocks(143, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
# Test 5
# Check that the new rule is enforced
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[0], self.nodeaddress, 1.0)
invalidate(spendtx)
spendtx = self.sign_transaction(self.nodes[0], spendtx)
spendtx.rehash()
invalidatePostSignature(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = activated_version
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
yield TestInstance([[block, True]])
assert_equal(self.get_bip9_status(bipName)['status'], 'active')
# Test 6
# Check that the new sequence lock rules are enforced
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[1], self.nodeaddress, 1.0)
invalidate(spendtx)
spendtx = self.sign_transaction(self.nodes[0], spendtx)
spendtx.rehash()
invalidatePostSignature(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = 5
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
# Restart all
stop_nodes(self.nodes)
wait_bitcoinds()
shutil.rmtree(self.options.tmpdir)
self.setup_chain()
self.setup_network()
self.test.clear_all_connections()
self.test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
def get_tests(self):
for test in itertools.chain(
self.test_BIP('csv', 536870913, self.sequence_lock_invalidate, self.donothing),
self.test_BIP('csv', 536870913, self.mtp_invalidate, self.donothing),
self.test_BIP('csv', 536870913, self.donothing, self.csv_invalidate)
):
yield test
def donothing(self, tx):
return
def csv_invalidate(self, tx):
'''Modify the signature in vin 0 of the tx to fail CSV
Prepends -1 CSV DROP in the scriptSig itself.
'''
tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_NOP3, OP_DROP] +
list(CScript(tx.vin[0].scriptSig)))
def sequence_lock_invalidate(self, tx):
'''Modify the nSequence to make it fails once sequence lock rule is activated (high timespan)
'''
tx.vin[0].nSequence = 0x00FFFFFF
tx.nLockTime = 0
def mtp_invalidate(self, tx):
'''Modify the nLockTime to make it fails once MTP rule is activated
'''
# Disable Sequence lock, Activate nLockTime
tx.vin[0].nSequence = 0x90FFFFFF
tx.nLockTime = self.last_block_time
if __name__ == '__main__':
BIP9SoftForksTest().main()
| Kangmo/bitcoin | qa/rpc-tests/bip9-softforks.py | Python | mit | 8,775 |
while True:
input_number = int(raw_input())
if input_number == 42:
break
print input_number,
exit() | sandy-8925/codechef | test.py | Python | mit | 112 |
import json
from django import template
from django.utils.safestring import mark_safe
register = template.Library()
def jsonfilter(value):
return mark_safe(json.dumps(value))
register.filter('json', jsonfilter)
| Open511/open511-server | open511_server/templatetags/open511.py | Python | mit | 220 |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.text import slugify
from django.contrib.auth.models import (
User
)
from pastryio.models.mixins import ArchiveMixin
class BaseProfile(ArchiveMixin):
user = models.OneToOneField(User)
avatar = models.ImageField(_("avatar"), blank=True)
def __unicode__(self):
return self.user.username
@property
def slug(self):
return slugify(self.user.first_name)
| octaflop/pastryio | apps/profiles/models.py | Python | mit | 496 |
from django.db import models
from django.core.exceptions import ImproperlyConfigured
from django import forms
from django.conf import settings
import warnings
try:
from keyczar import keyczar
except ImportError:
raise ImportError('Using an encrypted field requires the Keyczar module. '
'You can obtain Keyczar from http://www.keyczar.org/.')
class EncryptionWarning(RuntimeWarning):
pass
class BaseEncryptedField(models.Field):
prefix = 'enc_str:::'
def __init__(self, *args, **kwargs):
if not hasattr(settings, 'ENCRYPTED_FIELD_KEYS_DIR'):
raise ImproperlyConfigured('You must set the ENCRYPTED_FIELD_KEYS_DIR setting to your Keyczar keys directory.')
self.crypt = keyczar.Crypter.Read(settings.ENCRYPTED_FIELD_KEYS_DIR)
# Encrypted size is larger than unencrypted
self.unencrypted_length = max_length = kwargs.get('max_length', None)
if max_length:
max_length = len(self.prefix) + len(self.crypt.Encrypt('x' * max_length))
# TODO: Re-examine if this logic will actually make a large-enough
# max-length for unicode strings that have non-ascii characters in them.
kwargs['max_length'] = max_length
super(BaseEncryptedField, self).__init__(*args, **kwargs)
def to_python(self, value):
if isinstance(self.crypt.primary_key, keyczar.keys.RsaPublicKey):
retval = value
elif value and (value.startswith(self.prefix)):
retval = self.crypt.Decrypt(value[len(self.prefix):])
if retval:
retval = retval.decode('utf-8')
else:
retval = value
return retval
def get_db_prep_value(self, value, connection, prepared=False):
if value and not value.startswith(self.prefix):
# We need to encode a unicode string into a byte string, first.
# keyczar expects a bytestring, not a unicode string.
if type(value) == unicode:
value = value.encode('utf-8')
# Truncated encrypted content is unreadable,
# so truncate before encryption
max_length = self.unencrypted_length
if max_length and len(value) > max_length:
warnings.warn("Truncating field %s from %d to %d bytes" % (
self.name, len(value), max_length), EncryptionWarning
)
value = value[:max_length]
value = self.prefix + self.crypt.Encrypt(value)
return value
class EncryptedTextField(BaseEncryptedField):
__metaclass__ = models.SubfieldBase
def get_internal_type(self):
return 'TextField'
def formfield(self, **kwargs):
defaults = {'widget': forms.Textarea}
defaults.update(kwargs)
return super(EncryptedTextField, self).formfield(**defaults)
def south_field_triple(self):
"Returns a suitable description of this field for South."
# We'll just introspect the _actual_ field.
from south.modelsinspector import introspector
field_class = "django.db.models.fields.TextField"
args, kwargs = introspector(self)
# That's our definition!
return (field_class, args, kwargs)
class EncryptedCharField(BaseEncryptedField):
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
super(EncryptedCharField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "CharField"
def formfield(self, **kwargs):
defaults = {'max_length': self.max_length}
defaults.update(kwargs)
return super(EncryptedCharField, self).formfield(**defaults)
def south_field_triple(self):
"Returns a suitable description of this field for South."
# We'll just introspect the _actual_ field.
from south.modelsinspector import introspector
field_class = "django.db.models.fields.CharField"
args, kwargs = introspector(self)
# That's our definition!
return (field_class, args, kwargs)
| orbitvu/django-extensions | django_extensions/db/fields/encrypted.py | Python | mit | 4,102 |
try:
print 'Importing ....'
from base import * # noqa
from local import * # noqa
except ImportError:
import traceback
print traceback.format_exc()
print 'Unable to find moderation/settings/local.py'
try:
from post_env_commons import * # noqa
except ImportError:
pass
| CareerVillage/slack-moderation | src/moderation/settings/__init__.py | Python | mit | 305 |
from distutils.core import setup, Extension
setup(
name="tentacle_pi.TSL2561",
version="1.0",
packages = ["tentacle_pi"],
ext_modules = [
Extension("tentacle_pi.TSL2561",
sources = ["src/tsl2561.c", "src/tsl2561_ext.c"])
]
)
| lexruee/tsl2561 | setup.py | Python | mit | 237 |
from flask import Flask, jsonify, request, redirect, url_for
import subprocess
import os
import json
from cross_domain import *
app = Flask(__name__)
ALLOWED_EXTENSIONS = set(['mol', 'smi'])
try:
TARGET = os.environ['TARGET']
except Exception:
print 'export TARGET=<path to data>'
exit(1)
try:
AlGDock = os.environ['AlGDock_Pref']
except Exception:
print 'export AlGDock_Pref=<path to BindingPMF_arguments.py>'
exit(1)
import sys
sys.path.insert(0, AlGDock)
from BindingPMF_arguments import *
#File system functions
@app.route('/api/v1.0/proteins', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*')
def get_protein_names():
proteins = os.walk(TARGET).next()[1]
protein_lst = [{"filename": protein} for protein in sorted(proteins) if protein != "scripts"]
return jsonify({"files": protein_lst})
@app.route('/api/v1.0/ligands/<protein>', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*')
def get_ligand_names(protein):
ligands = os.walk(os.path.join(TARGET, protein, "ligand")).next()[2]
ligand_lst = [{"filename": ligand} for ligand in sorted(ligands) if ".ism" in ligand]
return jsonify({"files": ligand_lst})
@app.route('/api/v1.0/ligandSelection/<protein>/<library>', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*')
def get_ligand_selections(protein, library):
trimmed_library = library.split(".ism")[0] + ".A__"
try:
ligand_selections = sorted(os.walk(os.path.join(TARGET, protein, "AlGDock/cool", trimmed_library)).next()[1])
except Exception:
ligand_selections = None
return jsonify({"ligandSelections": ligand_selections})
@app.route('/api/v1.0/ligandLine/<protein>/<library>/<lineNumber>', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*')
def get_ligand_line(protein, library, lineNumber):
try:
path = os.path.join(TARGET, protein, "ligand", library)
library_f = open(path, 'r')
for i, line in enumerate(library_f):
if i == int(lineNumber) - 1:
return line.split()[0]
library_f.close()
except Exception:
return None
@app.route('/api/v1.0/addToLibrary/<protein>/', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def add_to_library(protein):
fileJson = request.get_json()
libraryName = fileJson["libraryName"]
smiles = fileJson["smiles"]
path = os.path.join(TARGET, protein, "ligand", libraryName)
library_f = open(path, 'a')
library_f.write(smiles)
library_f.write("\n")
library_f.close()
return "Added ligand to library."
#Gets for preference dropdowns
@app.route('/api/v1.0/protocols', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*')
def get_protocols():
choices = arguments['protocol']['choices']
choice_lst = [{"choice": choice} for choice in choices]
return jsonify({"protocol": choice_lst})
@app.route('/api/v1.0/samplers', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*')
def get_samplers():
choices = arguments['sampler']['choices']
choice_lst = [{"choice": choice} for choice in choices]
return jsonify({"sampler": choice_lst})
@app.route('/api/v1.0/sites', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*')
def get_sites():
choices = arguments['site']['choices']
choice_lst = [{"choice": choice} for choice in choices]
return jsonify({"site": choice_lst})
@app.route('/api/v1.0/phases', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*')
def get_phases():
choices = allowed_phases
choice_lst = [{"choice": choice} for choice in choices]
return jsonify({"phase": choice_lst})
@app.route('/api/v1.0/runtypes', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*')
def get_runtype():
choices = arguments['run_type']['choices']
choice_lst = [{"choice": choice} for choice in choices]
return jsonify({"runtype": choice_lst})
#Saving preferences file
@app.route('/api/v1.0/run/<protein>/<protocol>/<runtype>/<cthermspeed>/<dthermspeed>/<sampler>/<mcmc>/<seedsperstate>/<stepsperseed>/<sweepspercycle>/<attemptspersweep>/<stepspersweep>/<crepxcycles>/<drepxcycles>/<site>/<sxcenter>/<sycenter>/<szcenter>/<sradius>/<sdensity>/<phase>/<cores>/<score>/<from_reps>/<to_reps>/<rmsd>', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*')
def save_preferences(protein, protocol, runtype, cthermspeed, dthermspeed, sampler, mcmc, seedsperstate, stepsperseed, sweepspercycle, attemptspersweep, stepspersweep, crepxcycles, drepxcycles, site, sxcenter, sycenter, szcenter, sradius, sdensity, phase, cores, score, from_reps, to_reps, rmsd):
rmsd_n = " "
score_n = " "
if rmsd == "false":
rmsd_n = "#"
if score == "Score" or score == "None":
score_n = "#"
args = ["./create_saved_args.sh", runtype, protocol, cthermspeed, dthermspeed, sampler, mcmc, seedsperstate, stepsperseed, sweepspercycle, attemptspersweep, stepspersweep, crepxcycles, drepxcycles, site, sxcenter, sycenter, szcenter, sradius, sdensity, phase, cores, score, from_reps, to_reps, rmsd, score_n, rmsd_n]
p = subprocess.Popen(args, stdout=subprocess.PIPE)
f = open(os.path.join(TARGET, protein, "AlGDock/saved_arguments.py"), 'w')
f.write(p.stdout.read())
f.close()
return "Preferences File Saved"
#Run button
@app.route('/api/v1.0/run/<protein>/<ligand>/<email>', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*')
def run(protein, ligand, email):
run_string = "python " + os.path.join(AlGDock, "../Pipeline/run_anchor_and_grow.py") + " --max_jobs 20 --email " + email + " --ligand " + os.path.join(TARGET, protein, "ligand/dock_in", ligand.split(".ism")[0] + ".A__")
os.chdir(os.path.join(TARGET, protein, "dock6"))
print run_string
os.system(run_string)
return "Job Sent to Cluster"
#Prepare ligands button
@app.route('/api/v1.0/prepLigandLibrary/<protein>/<ligand>/<email>', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*')
def prepareLigandLibrary(protein, ligand, email):
run_string = "python " + os.path.join(AlGDock, "../Pipeline/run_prep_ligand_for_dock.py") + " " + ligand + " --email " + email
os.chdir(os.path.join(TARGET, protein, "ligand"))
print run_string
os.system(run_string)
return "Ligand is being prepared."
if __name__ == '__main__':
app.run(debug=True)
| gkumar7/AlGDock | gui/api/REST.py | Python | mit | 6,256 |
# -*- encoding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2014-2015 Haltu Oy, http://haltu.fi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import csv
import codecs
from optparse import make_option
from collections import OrderedDict
from django.core.management.base import BaseCommand, CommandError
from django.db import IntegrityError
from django.core.exceptions import ObjectDoesNotExist
from authdata.models import User, Role, Attribute, UserAttribute, Municipality, School, Attendance, Source
class Command(BaseCommand):
help = """Imports data from CSV file to the database.
Do not put any header to the CSV file. Only provide data separated by commas and quoted with \".
You need to provide at least two arguments: the name of the input file and list of attributes for the User.
For example: manage.py csv_import file.csv dreamschool,facebook,twitter,linkedin,mepin
"""
args = '<csvfile> <attr1,attr2...>'
option_list = BaseCommand.option_list + (
make_option('--source',
action='store',
dest='source',
default='manual',
help='Source value for this run'),
make_option('--municipality',
action='store',
dest='municipality',
default='-',
help='Source value for this run'),
make_option('--run',
action='store_true',
dest='really_do_this',
default=False,
help='Really run the command'),
make_option('--verbose',
action='store_true',
dest='verbose',
default=False,
help='Verbose'),
)
def handle(self, *args, **options):
if len(args) != 2:
raise CommandError('Wrong parameters, try reading --help')
self.verbose = options['verbose']
self.municipality = options['municipality']
# Create needed Attribute objects to the database
# These are the attributes which can be used to query for User objects in the API
# attribute names are defined in the commandline as the second parameter
# for example: manage.py csv_import file.csv dreamschool,facebook,twitter,linkedin,mepin
self.attribute_names = OrderedDict()
for key in args[1].split(','):
self.attribute_names[key], _ = Attribute.objects.get_or_create(name=key)
self.source, _ = Source.objects.get_or_create(name=options['source'])
# If you need more roles, add them here
self.role_names = OrderedDict()
for r in ['teacher', 'student']:
self.role_names[r], _ = Role.objects.get_or_create(name=r)
csv_data = csv.reader(codecs.open(args[0], 'rb'), delimiter=',', quotechar='"')
for r in csv_data:
# These are the fixed fields for the User. These are returned from the API.
data = {
'username': r[0], # OID
'school': r[1], # School
'group': r[2], # Class
'role': r[3], # Role
'first_name': r[4], # First name
'last_name': r[5], # Last name
}
# This is not mandatory, but it would be nice. Can be changed to error by terminating the script here.
if data['role'] not in self.role_names.keys():
print 'WARNING, role not in:', repr(self.role_names.keys())
attributes = {}
i = 6 # Next csv_data row index is 6 :)
for a in self.attribute_names:
attributes[a] = r[i]
i = i + 1
try:
if self.verbose:
print repr(data)
print repr(attributes)
if options['really_do_this']:
self.really_do_this(data.copy(), attributes.copy())
except IntegrityError, e:
print "ERR IE", e
print repr(data)
print repr(attributes)
except ObjectDoesNotExist, e:
print "ERR ODN", e
print repr(data)
print repr(attributes)
def really_do_this(self, d, a):
# Create User
# User is identified from username and other fields are updated
user, _ = User.objects.get_or_create(username=d['username'])
user.first_name = d['first_name']
user.last_name = d['last_name']
user.save()
# Assign attributes for User
# There can be multiple attributes with the same name and different value.
# This is one of the reasons we have the source parameter to tell where the data came from.
for k, v in a.iteritems():
UserAttribute.objects.get_or_create(user=user, attribute=self.attribute_names[k], value=v, source=self.source)
# Create Municipality
# If you leave this empty on the CLI it will default to '-'
municipality, _ = Municipality.objects.get_or_create(name=self.municipality)
# Create School
# School data is not updated after it is created. Data can be then changed in the admin.
school, _ = School.objects.get_or_create(school_id=d['school'], defaults={'municipality': municipality, 'name': d['school']})
# Create Attendance object for User. There can be more than one Attendance per User.
Attendance.objects.get_or_create(user=user, school=school, role=self.role_names[d['role']], group=d['group'], source=self.source)
# vim: tabstop=2 expandtab shiftwidth=2 softtabstop=2
| educloudalliance/eca-auth-data | authdata/management/commands/csv_import.py | Python | mit | 6,066 |
import asyncio
import rocat.message
import rocat.actor
import rocat.globals
class BaseActorRef(object):
def tell(self, m, *, sender=None):
raise NotImplementedError
def ask(self, m, *, timeout=None):
raise NotImplementedError
def error(self, e):
raise NotImplementedError
class LocalActorRef(BaseActorRef):
def __init__(self, q, loop):
self._q = q
self._loop = loop
def _send(self, envel):
self._loop.call_soon_threadsafe(self._q.put_nowait, envel)
def tell(self, m, *, sender=None):
if sender is None:
sender = _guess_current_sender()
self._send(rocat.message.Envelope.for_tell(m, sender=sender))
async def ask(self, m, *, timeout=None):
fut = asyncio.get_event_loop().create_future()
sender = FunctionRef(fut, asyncio.get_event_loop())
self._send(rocat.message.Envelope.for_ask(m, sender=sender))
if timeout is None:
timeout = _guess_default_timeout()
if timeout > 0:
reply = await asyncio.wait_for(fut, timeout)
else:
reply = await fut
if reply.is_error:
raise reply.msg
return reply.msg
def error(self, e):
raise NotImplementedError('You can tell error only when you reply')
class FunctionRef(BaseActorRef):
def __init__(self, fut, loop):
self._fut = fut
self._loop = loop
def _send(self, envel):
self._loop.call_soon_threadsafe(self._try_set_future, envel)
def _try_set_future(self, result):
if not self._fut.done():
self._fut.set_result(result)
def tell(self, m, *, sender=None):
if sender is None:
sender = _guess_current_sender()
self._send(rocat.message.Envelope.for_ask(m, sender=sender))
def ask(self, m, *, sender=None, timeout=None):
raise NotImplementedError('You cannot ask back to ask request')
def error(self, e):
self._send(rocat.message.Envelope.for_error(e))
def _guess_current_sender():
current_ctx = rocat.actor.ActorContext.current()
if current_ctx is not None:
return current_ctx.sender
def _guess_default_timeout():
current_ctx = rocat.actor.ActorContext.current()
if current_ctx is not None:
return current_ctx.default_timeout or -1
return -1
| chongkong/rocat | rocat/ref.py | Python | mit | 2,370 |
#!/usr/bin/env python3.7
# coding=utf-8
"""Jerod Gawne, 2018.06.28
https://github.com/jerodg/hackerrank
"""
import sys
import traceback
if __name__ == '__main__':
try:
n, m = map(int, input().split())
for i in range(1, n, 2):
print(str('.|.') * i).center(m, '-')
print(str('WELCOME').center(m, '-'))
for i in range(n - 2, -1, -2):
print(str('.|.') * i).center(m, '-')
except Exception:
print(traceback.print_exception(*sys.exc_info()))
| jerodg/hackerrank-python | python/02.Strings/08.DesignerDoorMat/solution1.py | Python | mit | 510 |
import math
d = int(input())
for _ in range(d):
t, a, b, c = [int(x) for x in input().split()]
l = [a, b, c]
l.sort()
if l[0] + l[1] < l[2]:
l[2] = l[0] + l[1]
print(min(t, math.floor((l[0]+l[1]+l[2])/2)))
| madeinqc/IEEEXtreme9.0 | 06-tacostand-moderate/main.py | Python | mit | 238 |
#!/usr/bin/env python
#file cogent.parse.mothur.py
"""Parses Mothur otu list"""
from record_finder import is_empty
__author__ = "Kyle Bittinger"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["Kyle Bittinger"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "Kyle Bittinger"
__email__ = "[email protected]"
__status__ = "Prototype"
def parse_otu_list(lines, precision=0.0049):
"""Parser for mothur *.list file
To ensure all distances are of type float, the parser returns a
distance of 0.0 for the unique groups. However, if some sequences
are very similar, mothur may return a grouping at zero distance.
What Mothur really means by this, however, is that the clustering
is at the level of Mothur's precision. In this case, the parser
returns the distance explicitly.
If you are parsing otu's with a non-default precision, you must
specify the precision here to ensure that the parsed distances are
in order.
Returns an iterator over (distance, otu_list)
"""
for line in lines:
if is_empty(line):
continue
tokens = line.strip().split('\t')
distance_str = tokens.pop(0)
if distance_str.lstrip().lower().startswith('u'):
distance = 0.0
elif distance_str == '0.0':
distance = float(precision)
else:
distance = float(distance_str)
num_otus = int(tokens.pop(0))
otu_list = [t.split(',') for t in tokens]
yield (distance, otu_list)
| sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/cogent/parse/mothur.py | Python | mit | 1,558 |
from django.contrib import admin
# Register your models here.
from .models import Question, Choice
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
class QuestionAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['question_text']}),
('Date information', {'fields': ['pub_date']}),
]
inlines = [ChoiceInline]
list_display = ('question_text', 'pub_date', 'was_published_recently')
list_filter = ['pub_date']
search_fields = ['question_text']
admin.site.register(Question, QuestionAdmin)
admin.site.register(Choice)
| singh-pratyush96/voter | polls/admin.py | Python | mit | 593 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import todo.mixins
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='TodoItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('done', models.BooleanField(default=False)),
('text', models.CharField(max_length=100)),
],
bases=(todo.mixins.SelfPublishModel, models.Model),
),
migrations.CreateModel(
name='TodoList',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('description', models.TextField()),
],
bases=(todo.mixins.SelfPublishModel, models.Model),
),
migrations.AddField(
model_name='todoitem',
name='todo_list',
field=models.ForeignKey(to='todo.TodoList'),
),
]
| aaronbassett/djangocon-pusher | talk/todo/migrations/0001_initial.py | Python | mit | 1,191 |
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
m = {}
def findFrequentTreeSum(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
if not root:
return []
self.m = {}
self.traverse(root)
freq = max(self.m.values())
return [e for e in self.m if self.m[e] == freq]
def traverse(self, root):
if not root:
return
self.traverse(root.left)
self.traverse(root.right)
if root.left:
root.val += root.left.val
if root.right:
root.val += root.right.val
print root.val
self.m[root.val] = self.m.get(root.val, 0) + 1
| Jspsun/LEETCodePractice | Python/MostFrequentSubtreeSum.py | Python | mit | 849 |
import threading
import wx
from styled_text_ctrl import StyledTextCtrl
class ThreadOutputCtrl(StyledTextCtrl):
def __init__(self, parent, env, auto_scroll=False):
StyledTextCtrl.__init__(self, parent, env)
self.auto_scroll = auto_scroll
self.__lock = threading.Lock()
self.__queue = []
self.__timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.__OnTimer, self.__timer)
def __OnTimer(self, evt):
self.flush()
def flush(self):
with self.__lock:
queue, self.__queue = self.__queue, []
lines = "".join(queue)
if lines:
with self.ModifyReadOnly():
self.AppendText(lines)
self.EmptyUndoBuffer()
if self.auto_scroll:
self.ScrollToLine(self.GetLineCount() - 1)
def start(self, interval=100):
self.SetReadOnly(True)
self.__timer.Start(interval)
def stop(self):
self.__timer.Stop()
self.flush()
self.SetReadOnly(False)
def write(self, s):
with self.__lock:
self.__queue.append(s)
def ClearAll(self):
with self.ModifyReadOnly():
StyledTextCtrl.ClearAll(self)
self.EmptyUndoBuffer()
| shaurz/devo | thread_output_ctrl.py | Python | mit | 1,266 |
import cgi
from http.server import HTTPServer, BaseHTTPRequestHandler
from database_setup import Base, Restaurant
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from test.restaurant_renderer import RestaurantRenderer
engine = create_engine('sqlite:///restaurantmenu.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
class WebServerHandler(BaseHTTPRequestHandler):
def _find_restaurant(self):
parsed_path = self.path.split('/')
if not len(parsed_path) >= 3:
print('Error 400:', parsed_path)
self.send_error(400)
self.end_headers()
return None
result = session.query(Restaurant).filter(Restaurant.id == parsed_path[-2]).first()
if not result:
print(result)
print(parsed_path[-2])
self.send_error(404)
self.end_headers()
return None
return result
def do_GET(self):
try:
if self.path.endswith("/restaurants"):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
renderer = RestaurantRenderer(title='Restaurant List')
renderer.generate_partial_body(
preface="<h3><a href='restaurants/new'>Make a new restaurant</a></h3><br>\n")
restaurants = session.query(Restaurant).all()
page = renderer.generate_page(renderer.render_restaurants(restaurants))
self.wfile.write(page.encode())
if self.path.endswith("/restaurants/new"):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
renderer = RestaurantRenderer(title='New Restaurant Creator')
renderer.generate_partial_body(preface='<H1>Make a new Restaurant</h1><br>\n')
form_code = '<input name="restaurant" type="text" placeHolder="New Restaurant Name"><input type="submit" value="Create" > '
page = renderer.generate_page(renderer.render_simple_form(form_code, action='/restaurants/new'))
self.wfile.write(page.encode())
if self.path.endswith("/edit"):
restaurant = self._find_restaurant()
if not restaurant:
return
renderer = RestaurantRenderer(title='Modify ' + restaurant.name)
renderer.generate_partial_body(preface='<h2>' + restaurant.name + '</h2>')
form_code = '<input name="edit" type="text" placeHolder="' + restaurant.name + '"><input type="submit" value="Rename" > '
page = renderer.generate_page(
renderer.render_simple_form(form_code, action='/restaurants/' + str(restaurant.id) + '/edit'))
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(page.encode())
if self.path.endswith("/delete"):
restaurant = self._find_restaurant()
if not restaurant:
return
renderer = RestaurantRenderer(title='Remove ' + restaurant.name)
renderer.generate_partial_body(
preface='<h2>Are you sure you wish to remove {}<h/2>'.format(restaurant.name))
submit_code = '<input type="submit" value="Delete">\n'
page = renderer.generate_page(
renderer.render_simple_form(submit_code, action='/restaurants/' + str(restaurant.id) + '/delete'))
self.wfile.write(page.encode())
if self.path.endswith("/whoareyou"):
self.send_error(418, message="I am a teapot, running on the Hyper Text Coffee Pot Control Protocol")
self.end_headers()
except IOError:
self.send_error(404, "File Not Found {}".format(self.path))
def do_POST(self):
try:
# HEADERS are now in dict/json style container
ctype, pdict = cgi.parse_header(
self.headers['content-type'])
# boundary data needs to be encoded in a binary format
pdict['boundary'] = bytes(pdict['boundary'], "utf-8")
if self.path.endswith("/restaurants/new"):
if ctype == 'multipart/form-data':
fields = cgi.parse_multipart(self.rfile, pdict)
messagecontent = fields.get('restaurant')
session.add(Restaurant(name=messagecontent[0].decode()))
session.commit()
self.send_response(302)
self.send_header('Content-type', 'text/html')
self.send_header('Location', '/restaurants')
self.send_response(201)
self.end_headers()
if self.path.endswith("/edit"):
if ctype == 'multipart/form-data':
fields = cgi.parse_multipart(self.rfile, pdict)
messagecontent = fields.get('edit')
restaurant = self._find_restaurant()
if not restaurant:
return
restaurant.name = messagecontent[0].decode()
session.commit()
self.send_response(302)
self.send_header('Content-type', 'text/html')
self.send_header('Location', '/restaurants')
self.send_response(202)
self.end_headers()
if self.path.endswith('/delete'):
restaurant = self._find_restaurant()
if not restaurant:
return
session.delete(restaurant)
session.commit()
self.send_response(302)
self.send_header('Content-type', 'text/html')
self.send_header('Location', '/restaurants')
self.send_response(204)
self.end_headers()
except:
raise
def main():
try:
port = 8080
server = HTTPServer(('', port), WebServerHandler)
print("Web server is running on port {}".format(port))
server.serve_forever()
except KeyboardInterrupt:
print("^C entered, stopping web server...")
finally:
if server:
server.socket.close()
if __name__ == '__main__':
main()
| MFry/pyItemCatalog | vagrant/itemCatalog/test/webserver.py | Python | mit | 6,549 |
# -*- coding: utf-8 -*-
from .store import Store
from ..tagged_cache import TaggedCache
from ..tag_set import TagSet
class TaggableStore(Store):
def tags(self, *names):
"""
Begin executing a new tags operation.
:param names: The tags
:type names: tuple
:rtype: cachy.tagged_cache.TaggedCache
"""
if len(names) == 1 and isinstance(names[0], list):
names = names[0]
return TaggedCache(self, TagSet(self, names))
| sdispater/cachy | cachy/contracts/taggable_store.py | Python | mit | 497 |
from pandac.PandaModules import *
from toontown.toonbase.ToonBaseGlobal import *
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import StateData
from direct.showbase.PythonUtil import PriorityCallbacks
from toontown.safezone import PublicWalk
from toontown.launcher import DownloadForceAcknowledge
import TrialerForceAcknowledge
import ZoneUtil
from toontown.friends import FriendsListManager
from toontown.toonbase import ToontownGlobals
from toontown.toon.Toon import teleportDebug
from toontown.estate import HouseGlobals
from toontown.toonbase import TTLocalizer
from otp.otpbase import OTPLocalizer
from otp.avatar import Emote
from otp.avatar.Avatar import teleportNotify
from direct.task import Task
import QuietZoneState
from toontown.distributed import ToontownDistrictStats
class Place(StateData.StateData, FriendsListManager.FriendsListManager):
notify = DirectNotifyGlobal.directNotify.newCategory('Place')
def __init__(self, loader, doneEvent):
StateData.StateData.__init__(self, doneEvent)
FriendsListManager.FriendsListManager.__init__(self)
self.loader = loader
self.dfaDoneEvent = 'dfaDoneEvent'
self.trialerFADoneEvent = 'trialerFADoneEvent'
self.zoneId = None
self.trialerFA = None
self._tiToken = None
self._leftQuietZoneLocalCallbacks = PriorityCallbacks()
self._leftQuietZoneSubframeCall = None
self._setZoneCompleteLocalCallbacks = PriorityCallbacks()
self._setZoneCompleteSubframeCall = None
return
def load(self):
StateData.StateData.load(self)
FriendsListManager.FriendsListManager.load(self)
self.walkDoneEvent = 'walkDone'
self.walkStateData = PublicWalk.PublicWalk(self.fsm, self.walkDoneEvent)
self.walkStateData.load()
self._tempFSM = self.fsm
def unload(self):
StateData.StateData.unload(self)
FriendsListManager.FriendsListManager.unload(self)
self.notify.info('Unloading Place (%s). Fsm in %s' % (self.zoneId, self._tempFSM.getCurrentState().getName()))
if self._leftQuietZoneSubframeCall:
self._leftQuietZoneSubframeCall.cleanup()
self._leftQuietZoneSubframeCall = None
if self._setZoneCompleteSubframeCall:
self._setZoneCompleteSubframeCall.cleanup()
self._setZoneCompleteSubframeCall = None
self._leftQuietZoneLocalCallbacks = None
self._setZoneCompleteLocalCallbacks = None
del self._tempFSM
taskMgr.remove('goHomeFailed')
del self.walkDoneEvent
self.walkStateData.unload()
del self.walkStateData
del self.loader
if self.trialerFA:
self.trialerFA.exit()
del self.trialerFA
return
def _getQZState(self):
if hasattr(base, 'cr') and hasattr(base.cr, 'playGame'):
if hasattr(base.cr.playGame, 'quietZoneStateData') and base.cr.playGame.quietZoneStateData:
return base.cr.playGame.quietZoneStateData
return None
def addLeftQuietZoneCallback(self, callback, priority = None):
qzsd = self._getQZState()
if qzsd:
return qzsd.addLeftQuietZoneCallback(callback, priority)
else:
token = self._leftQuietZoneLocalCallbacks.add(callback, priority=priority)
if not self._leftQuietZoneSubframeCall:
self._leftQuietZoneSubframeCall = SubframeCall(self._doLeftQuietZoneCallbacks, taskMgr.getCurrentTask().getPriority() - 1)
return token
def removeLeftQuietZoneCallback(self, token):
if token is not None:
if token in self._leftQuietZoneLocalCallbacks:
self._leftQuietZoneLocalCallbacks.remove(token)
qzsd = self._getQZState()
if qzsd:
qzsd.removeLeftQuietZoneCallback(token)
return
def _doLeftQuietZoneCallbacks(self):
self._leftQuietZoneLocalCallbacks()
self._leftQuietZoneLocalCallbacks.clear()
self._leftQuietZoneSubframeCall = None
return
def addSetZoneCompleteCallback(self, callback, priority = None):
qzsd = self._getQZState()
if qzsd:
return qzsd.addSetZoneCompleteCallback(callback, priority)
else:
token = self._setZoneCompleteLocalCallbacks.add(callback, priority=priority)
if not self._setZoneCompleteSubframeCall:
self._setZoneCompleteSubframeCall = SubframeCall(self._doSetZoneCompleteLocalCallbacks, taskMgr.getCurrentTask().getPriority() - 1)
return token
def removeSetZoneCompleteCallback(self, token):
if token is not None:
if token in self._setZoneCompleteLocalCallbacks:
self._setZoneCompleteLocalCallbacks.remove(token)
qzsd = self._getQZState()
if qzsd:
qzsd.removeSetZoneCompleteCallback(token)
return
def _doSetZoneCompleteLocalCallbacks(self):
self._setZoneCompleteSubframeCall = None
localCallbacks = self._setZoneCompleteLocalCallbacks
self._setZoneCompleteLocalCallbacks()
localCallbacks.clear()
return
def setState(self, state):
if hasattr(self, 'fsm'):
curState = self.fsm.getName()
if state == 'pet' or curState == 'pet':
self.preserveFriendsList()
self.fsm.request(state)
def getState(self):
if hasattr(self, 'fsm'):
curState = self.fsm.getCurrentState().getName()
return curState
def getZoneId(self):
return self.zoneId
def getTaskZoneId(self):
return self.getZoneId()
def isPeriodTimerEffective(self):
return 1
def handleTeleportQuery(self, fromAvatar, toAvatar):
if base.config.GetBool('want-tptrack', False):
if toAvatar == localAvatar:
toAvatar.doTeleportResponse(fromAvatar, toAvatar, toAvatar.doId, 1, toAvatar.defaultShard, base.cr.playGame.getPlaceId(), self.getZoneId(), fromAvatar.doId)
else:
self.notify.warning('handleTeleportQuery toAvatar.doId != localAvatar.doId' % (toAvatar.doId, localAvatar.doId))
else:
fromAvatar.d_teleportResponse(toAvatar.doId, 1, toAvatar.defaultShard, base.cr.playGame.getPlaceId(), self.getZoneId())
def enablePeriodTimer(self):
if self.isPeriodTimerEffective():
if base.cr.periodTimerExpired:
taskMgr.doMethodLater(5, self.redoPeriodTimer, 'redoPeriodTimer')
self.accept('periodTimerExpired', self.periodTimerExpired)
def disablePeriodTimer(self):
taskMgr.remove('redoPeriodTimer')
self.ignore('periodTimerExpired')
def redoPeriodTimer(self, task):
messenger.send('periodTimerExpired')
return Task.done
def periodTimerExpired(self):
self.fsm.request('final')
if base.localAvatar.book.isEntered:
base.localAvatar.book.exit()
base.localAvatar.b_setAnimState('CloseBook', 1, callback=self.__handlePeriodTimerBookClose)
else:
base.localAvatar.b_setAnimState('TeleportOut', 1, self.__handlePeriodTimerExitTeleport)
def exitPeriodTimerExpired(self):
pass
def __handlePeriodTimerBookClose(self):
base.localAvatar.b_setAnimState('TeleportOut', 1, self.__handlePeriodTimerExitTeleport)
def __handlePeriodTimerExitTeleport(self):
base.cr.loginFSM.request('periodTimeout')
def detectedPhoneCollision(self):
self.fsm.request('phone')
def detectedFishingCollision(self):
self.fsm.request('fishing')
def enterStart(self):
pass
def exitStart(self):
pass
def enterFinal(self):
pass
def exitFinal(self):
pass
def enterWalk(self, teleportIn = 0):
self.enterFLM()
self.walkStateData.enter()
if teleportIn == 0:
self.walkStateData.fsm.request('walking')
self.acceptOnce(self.walkDoneEvent, self.handleWalkDone)
if base.cr.productName in ['DisneyOnline-US', 'ES'] and not base.cr.isPaid() and base.localAvatar.tutorialAck:
base.localAvatar.chatMgr.obscure(0, 0)
base.localAvatar.chatMgr.normalButton.show()
self.accept('teleportQuery', self.handleTeleportQuery)
base.localAvatar.setTeleportAvailable(1)
base.localAvatar.questPage.acceptOnscreenHooks()
base.localAvatar.invPage.acceptOnscreenHooks()
base.localAvatar.questMap.acceptOnscreenHooks()
self.walkStateData.fsm.request('walking')
self.enablePeriodTimer()
def exitWalk(self):
self.exitFLM()
if base.cr.productName in ['DisneyOnline-US', 'ES'] and not base.cr.isPaid() and base.localAvatar.tutorialAck and not base.cr.whiteListChatEnabled:
base.localAvatar.chatMgr.obscure(1, 0)
self.disablePeriodTimer()
messenger.send('wakeup')
self.walkStateData.exit()
self.ignore(self.walkDoneEvent)
base.localAvatar.setTeleportAvailable(0)
self.ignore('teleportQuery')
if base.cr.playGame.hood != None:
base.cr.playGame.hood.hideTitleText()
base.localAvatar.questPage.hideQuestsOnscreen()
base.localAvatar.questPage.ignoreOnscreenHooks()
base.localAvatar.invPage.ignoreOnscreenHooks()
base.localAvatar.invPage.hideInventoryOnscreen()
base.localAvatar.questMap.hide()
base.localAvatar.questMap.ignoreOnscreenHooks()
return
def handleWalkDone(self, doneStatus):
mode = doneStatus['mode']
if mode == 'StickerBook':
self.last = self.fsm.getCurrentState().getName()
self.fsm.request('stickerBook')
elif mode == 'Options':
self.last = self.fsm.getCurrentState().getName()
self.fsm.request('stickerBook', [base.localAvatar.optionsPage])
elif mode == 'Sit':
self.last = self.fsm.getCurrentState().getName()
self.fsm.request('sit')
else:
Place.notify.error('Invalid mode: %s' % mode)
def enterSit(self):
self.enterFLM()
base.localAvatar.laffMeter.start()
self.accept('teleportQuery', self.handleTeleportQuery)
base.localAvatar.setTeleportAvailable(1)
base.localAvatar.b_setAnimState('SitStart', 1)
self.accept('arrow_up', self.fsm.request, extraArgs=['walk'])
def exitSit(self):
self.exitFLM()
base.localAvatar.laffMeter.stop()
base.localAvatar.setTeleportAvailable(0)
self.ignore('teleportQuery')
self.ignore('arrow_up')
def enterDrive(self):
self.enterFLM()
base.localAvatar.laffMeter.start()
self.accept('teleportQuery', self.handleTeleportQuery)
base.localAvatar.setTeleportAvailable(1)
base.localAvatar.b_setAnimState('SitStart', 1)
def exitDrive(self):
self.exitFLM()
base.localAvatar.laffMeter.stop()
base.localAvatar.setTeleportAvailable(0)
self.ignore('teleportQuery')
def enterPush(self):
self.enterFLM()
base.localAvatar.laffMeter.start()
self.accept('teleportQuery', self.handleTeleportQuery)
base.localAvatar.setTeleportAvailable(1)
base.localAvatar.attachCamera()
base.localAvatar.startUpdateSmartCamera()
base.localAvatar.startPosHprBroadcast()
base.localAvatar.b_setAnimState('Push', 1)
def exitPush(self):
self.exitFLM()
base.localAvatar.laffMeter.stop()
base.localAvatar.setTeleportAvailable(0)
base.localAvatar.stopUpdateSmartCamera()
base.localAvatar.detachCamera()
base.localAvatar.stopPosHprBroadcast()
self.ignore('teleportQuery')
def enterStickerBook(self, page = None):
self.enterFLM()
base.localAvatar.laffMeter.start()
target = base.cr.doFind('DistributedTarget')
if target:
target.hideGui()
self.accept('teleportQuery', self.handleTeleportQuery)
base.localAvatar.setTeleportAvailable(1)
if page:
base.localAvatar.book.setPage(page)
base.localAvatar.b_setAnimState('OpenBook', 1, self.enterStickerBookGUI)
base.localAvatar.obscureMoveFurnitureButton(1)
def enterStickerBookGUI(self):
base.localAvatar.collisionsOn()
base.localAvatar.book.showButton()
base.localAvatar.book.enter()
base.localAvatar.setGuiConflict(1)
base.localAvatar.startSleepWatch(self.__handleFallingAsleep)
self.accept('bookDone', self.__handleBook)
base.localAvatar.b_setAnimState('ReadBook', 1)
self.enablePeriodTimer()
def __handleFallingAsleep(self, task):
base.localAvatar.book.exit()
base.localAvatar.b_setAnimState('CloseBook', 1, callback=self.__handleFallingAsleepBookClose)
return Task.done
def __handleFallingAsleepBookClose(self):
if hasattr(self, 'fsm'):
self.fsm.request('walk')
base.localAvatar.forceGotoSleep()
def exitStickerBook(self):
base.localAvatar.stopSleepWatch()
self.disablePeriodTimer()
self.exitFLM()
base.localAvatar.laffMeter.stop()
base.localAvatar.setGuiConflict(0)
base.localAvatar.book.exit()
base.localAvatar.book.hideButton()
base.localAvatar.collisionsOff()
self.ignore('bookDone')
base.localAvatar.setTeleportAvailable(0)
self.ignore('teleportQuery')
base.localAvatar.obscureMoveFurnitureButton(-1)
target = base.cr.doFind('DistributedTarget')
if target:
target.showGui()
def __handleBook(self):
base.localAvatar.stopSleepWatch()
base.localAvatar.book.exit()
bookStatus = base.localAvatar.book.getDoneStatus()
if bookStatus['mode'] == 'close':
base.localAvatar.b_setAnimState('CloseBook', 1, callback=self.handleBookClose)
elif bookStatus['mode'] == 'teleport':
zoneId = bookStatus['hood']
base.localAvatar.collisionsOff()
base.localAvatar.b_setAnimState('CloseBook', 1, callback=self.handleBookCloseTeleport, extraArgs=[zoneId, zoneId])
elif bookStatus['mode'] == 'exit':
self.exitTo = bookStatus.get('exitTo')
base.localAvatar.collisionsOff()
base.localAvatar.b_setAnimState('CloseBook', 1, callback=self.__handleBookCloseExit)
elif bookStatus['mode'] == 'gohome':
zoneId = bookStatus['hood']
base.localAvatar.collisionsOff()
base.localAvatar.b_setAnimState('CloseBook', 1, callback=self.goHomeNow, extraArgs=[zoneId])
elif bookStatus['mode'] == 'startparty':
firstStart = bookStatus['firstStart']
hostId = bookStatus['hostId']
base.localAvatar.collisionsOff()
base.localAvatar.b_setAnimState('CloseBook', 1, callback=self.startPartyNow, extraArgs=[firstStart, hostId])
def handleBookCloseTeleport(self, hoodId, zoneId):
if localAvatar.hasActiveBoardingGroup():
rejectText = TTLocalizer.BoardingCannotLeaveZone
localAvatar.elevatorNotifier.showMe(rejectText)
return
self.requestLeave({'loader': ZoneUtil.getBranchLoaderName(zoneId),
'where': ZoneUtil.getToonWhereName(zoneId),
'how': 'teleportIn',
'hoodId': hoodId,
'zoneId': zoneId,
'shardId': None,
'avId': -1})
return
def __handleBookCloseExit(self):
base.localAvatar.b_setAnimState('TeleportOut', 1, self.__handleBookExitTeleport, [0])
def __handleBookExitTeleport(self, requestStatus):
if base.cr.timeManager:
base.cr.timeManager.setDisconnectReason(ToontownGlobals.DisconnectBookExit)
base.transitions.fadeScreen(1.0)
base.cr.gameFSM.request(self.exitTo)
def goHomeNow(self, curZoneId):
if localAvatar.hasActiveBoardingGroup():
rejectText = TTLocalizer.BoardingCannotLeaveZone
localAvatar.elevatorNotifier.showMe(rejectText)
return
hoodId = ToontownGlobals.MyEstate
self.requestLeave({'loader': 'safeZoneLoader',
'where': 'estate',
'how': 'teleportIn',
'hoodId': hoodId,
'zoneId': -1,
'shardId': None,
'avId': -1})
return
def startPartyNow(self, firstStart, hostId):
if localAvatar.hasActiveBoardingGroup():
rejectText = TTLocalizer.BoardingCannotLeaveZone
localAvatar.elevatorNotifier.showMe(rejectText)
return
base.localAvatar.creatingNewPartyWithMagicWord = False
base.localAvatar.aboutToPlanParty = False
hoodId = ToontownGlobals.PartyHood
if firstStart:
zoneId = 0
ToontownDistrictStats.refresh('shardInfoUpdated')
curShardTuples = base.cr.listActiveShards()
lowestPop = 100000000000000000L
shardId = None
for shardInfo in curShardTuples:
pop = shardInfo[2]
if pop < lowestPop:
lowestPop = pop
shardId = shardInfo[0]
if shardId == base.localAvatar.defaultShard:
shardId = None
base.cr.playGame.getPlace().requestLeave({'loader': 'safeZoneLoader',
'where': 'party',
'how': 'teleportIn',
'hoodId': hoodId,
'zoneId': zoneId,
'shardId': shardId,
'avId': -1})
else:
if hostId is None:
hostId = base.localAvatar.doId
base.cr.partyManager.sendAvatarToParty(hostId)
return
return
def handleBookClose(self):
if hasattr(self, 'fsm'):
self.fsm.request('walk')
if hasattr(self, 'toonSubmerged') and self.toonSubmerged == 1:
if hasattr(self, 'walkStateData'):
self.walkStateData.fsm.request('swimming', [self.loader.swimSound])
def requestLeave(self, requestStatus):
teleportDebug(requestStatus, 'requestLeave(%s)' % (requestStatus,))
if hasattr(self, 'fsm'):
self.doRequestLeave(requestStatus)
def doRequestLeave(self, requestStatus):
teleportDebug(requestStatus, 'requestLeave(%s)' % (requestStatus,))
self.fsm.request('DFA', [requestStatus])
def enterDFA(self, requestStatus):
teleportDebug(requestStatus, 'enterDFA(%s)' % (requestStatus,))
self.acceptOnce(self.dfaDoneEvent, self.enterDFACallback, [requestStatus])
self.dfa = DownloadForceAcknowledge.DownloadForceAcknowledge(self.dfaDoneEvent)
self.dfa.enter(base.cr.hoodMgr.getPhaseFromHood(requestStatus['hoodId']))
def exitDFA(self):
self.ignore(self.dfaDoneEvent)
def handleEnterTunnel(self, requestStatus, collEntry):
if localAvatar.hasActiveBoardingGroup():
rejectText = TTLocalizer.BoardingCannotLeaveZone
localAvatar.elevatorNotifier.showMe(rejectText)
dummyNP = NodePath('dummyNP')
dummyNP.reparentTo(render)
tunnelOrigin = requestStatus['tunnelOrigin']
dummyNP.setPos(localAvatar.getPos())
dummyNP.setH(tunnelOrigin.getH())
dummyNP.setPos(dummyNP, 0, 4, 0)
localAvatar.setPos(dummyNP.getPos())
dummyNP.removeNode()
del dummyNP
return
self.requestLeave(requestStatus)
def enterDFACallback(self, requestStatus, doneStatus):
teleportDebug(requestStatus, 'enterDFACallback%s' % ((requestStatus, doneStatus),))
self.dfa.exit()
del self.dfa
if doneStatus['mode'] == 'complete':
if requestStatus.get('tutorial', 0):
out = {'teleportIn': 'tunnelOut'}
requestStatus['zoneId'] = 22000
requestStatus['hoodId'] = 22000
else:
out = {'teleportIn': 'teleportOut',
'tunnelIn': 'tunnelOut',
'doorIn': 'doorOut'}
teleportDebug(requestStatus, 'requesting %s, requestStatus=%s' % (out[requestStatus['how']], requestStatus))
self.fsm.request(out[requestStatus['how']], [requestStatus])
elif doneStatus['mode'] == 'incomplete':
self.fsm.request('DFAReject')
else:
Place.notify.error('Unknown done status for DownloadForceAcknowledge: ' + `doneStatus`)
def enterDFAReject(self):
self.fsm.request('walk')
def exitDFAReject(self):
pass
def enterTrialerFA(self, requestStatus):
teleportDebug(requestStatus, 'enterTrialerFA(%s)' % requestStatus)
self.acceptOnce(self.trialerFADoneEvent, self.trialerFACallback, [requestStatus])
self.trialerFA = TrialerForceAcknowledge.TrialerForceAcknowledge(self.trialerFADoneEvent)
self.trialerFA.enter(requestStatus['hoodId'])
def exitTrialerFA(self):
pass
def trialerFACallback(self, requestStatus, doneStatus):
if doneStatus['mode'] == 'pass':
self.fsm.request('DFA', [requestStatus])
elif doneStatus['mode'] == 'fail':
self.fsm.request('trialerFAReject')
else:
Place.notify.error('Unknown done status for TrialerForceAcknowledge: %s' % doneStatus)
def enterTrialerFAReject(self):
self.fsm.request('walk')
def exitTrialerFAReject(self):
pass
def enterDoorIn(self, requestStatus):
NametagGlobals.setMasterArrowsOn(0)
door = base.cr.doId2do.get(requestStatus['doorDoId'])
door.readyToExit()
base.localAvatar.obscureMoveFurnitureButton(1)
base.localAvatar.startQuestMap()
def exitDoorIn(self):
NametagGlobals.setMasterArrowsOn(1)
base.localAvatar.obscureMoveFurnitureButton(-1)
def enterDoorOut(self):
base.localAvatar.obscureMoveFurnitureButton(1)
def exitDoorOut(self):
base.localAvatar.obscureMoveFurnitureButton(-1)
base.localAvatar.stopQuestMap()
def handleDoorDoneEvent(self, requestStatus):
self.doneStatus = requestStatus
messenger.send(self.doneEvent)
def handleDoorTrigger(self):
self.fsm.request('doorOut')
def enterTunnelIn(self, requestStatus):
self.notify.debug('enterTunnelIn(requestStatus=' + str(requestStatus) + ')')
tunnelOrigin = base.render.find(requestStatus['tunnelName'])
self.accept('tunnelInMovieDone', self.__tunnelInMovieDone)
base.localAvatar.reconsiderCheesyEffect()
base.localAvatar.tunnelIn(tunnelOrigin)
base.localAvatar.startQuestMap()
def __tunnelInMovieDone(self):
self.ignore('tunnelInMovieDone')
self.fsm.request('walk')
def exitTunnelIn(self):
pass
def enterTunnelOut(self, requestStatus):
hoodId = requestStatus['hoodId']
zoneId = requestStatus['zoneId']
how = requestStatus['how']
tunnelOrigin = requestStatus['tunnelOrigin']
fromZoneId = ZoneUtil.getCanonicalZoneId(self.getZoneId())
tunnelName = requestStatus.get('tunnelName')
if tunnelName == None:
tunnelName = base.cr.hoodMgr.makeLinkTunnelName(self.loader.hood.id, fromZoneId)
self.doneStatus = {'loader': ZoneUtil.getLoaderName(zoneId),
'where': ZoneUtil.getToonWhereName(zoneId),
'how': how,
'hoodId': hoodId,
'zoneId': zoneId,
'shardId': None,
'tunnelName': tunnelName}
self.accept('tunnelOutMovieDone', self.__tunnelOutMovieDone)
base.localAvatar.tunnelOut(tunnelOrigin)
base.localAvatar.stopQuestMap()
return
def __tunnelOutMovieDone(self):
self.ignore('tunnelOutMovieDone')
messenger.send(self.doneEvent)
def exitTunnelOut(self):
pass
def enterTeleportOut(self, requestStatus, callback):
base.localAvatar.laffMeter.start()
base.localAvatar.b_setAnimState('TeleportOut', 1, callback, [requestStatus])
base.localAvatar.obscureMoveFurnitureButton(1)
def exitTeleportOut(self):
base.localAvatar.laffMeter.stop()
base.localAvatar.stopQuestMap()
base.localAvatar.obscureMoveFurnitureButton(-1)
def enterDied(self, requestStatus, callback = None):
if callback == None:
callback = self.__diedDone
base.localAvatar.laffMeter.start()
camera.wrtReparentTo(render)
base.localAvatar.b_setAnimState('Died', 1, callback, [requestStatus])
base.localAvatar.obscureMoveFurnitureButton(1)
return
def __diedDone(self, requestStatus):
self.doneStatus = requestStatus
messenger.send(self.doneEvent)
def exitDied(self):
base.localAvatar.laffMeter.stop()
base.localAvatar.obscureMoveFurnitureButton(-1)
def getEstateZoneAndGoHome(self, requestStatus):
self.doneStatus = requestStatus
avId = requestStatus['avId']
self.acceptOnce('setLocalEstateZone', self.goHome)
if avId > 0:
base.cr.estateMgr.getLocalEstateZone(avId)
else:
base.cr.estateMgr.getLocalEstateZone(base.localAvatar.getDoId())
if HouseGlobals.WANT_TELEPORT_TIMEOUT:
taskMgr.doMethodLater(HouseGlobals.TELEPORT_TIMEOUT, self.goHomeFailed, 'goHomeFailed')
def goHome(self, ownerId, zoneId):
self.notify.debug('goHome ownerId = %s' % ownerId)
taskMgr.remove('goHomeFailed')
if ownerId > 0 and ownerId != base.localAvatar.doId and not base.cr.isFriend(ownerId):
self.doneStatus['failed'] = 1
self.goHomeFailed(None)
return
if ownerId == 0 and zoneId == 0:
if self.doneStatus['shardId'] is None or self.doneStatus['shardId'] is base.localAvatar.defaultShard:
self.doneStatus['failed'] = 1
self.goHomeFailed(None)
return
else:
self.doneStatus['hood'] = ToontownGlobals.MyEstate
self.doneStatus['zone'] = base.localAvatar.lastHood
self.doneStatus['loaderId'] = 'safeZoneLoader'
self.doneStatus['whereId'] = 'estate'
self.doneStatus['how'] = 'teleportIn'
messenger.send(self.doneEvent)
return
if self.doneStatus['zoneId'] == -1:
self.doneStatus['zoneId'] = zoneId
elif self.doneStatus['zoneId'] != zoneId:
self.doneStatus['where'] = 'house'
self.doneStatus['ownerId'] = ownerId
messenger.send(self.doneEvent)
messenger.send('localToonLeft')
return
def goHomeFailed(self, task):
self.notify.debug('goHomeFailed')
self.notifyUserGoHomeFailed()
self.ignore('setLocalEstateZone')
self.doneStatus['hood'] = base.localAvatar.lastHood
self.doneStatus['zone'] = base.localAvatar.lastHood
self.fsm.request('teleportIn', [self.doneStatus])
return Task.done
def notifyUserGoHomeFailed(self):
self.notify.debug('notifyUserGoHomeFailed')
failedToVisitAvId = self.doneStatus.get('avId', -1)
avName = None
if failedToVisitAvId != -1:
avatar = base.cr.identifyAvatar(failedToVisitAvId)
if avatar:
avName = avatar.getName()
if avName:
message = TTLocalizer.EstateTeleportFailedNotFriends % avName
else:
message = TTLocalizer.EstateTeleportFailed
base.localAvatar.setSystemMessage(0, message)
return
def enterTeleportIn(self, requestStatus):
self._tiToken = self.addSetZoneCompleteCallback(Functor(self._placeTeleportInPostZoneComplete, requestStatus), 100)
def _placeTeleportInPostZoneComplete(self, requestStatus):
teleportDebug(requestStatus, '_placeTeleportInPostZoneComplete(%s)' % (requestStatus,))
NametagGlobals.setMasterArrowsOn(0)
base.localAvatar.laffMeter.start()
base.localAvatar.startQuestMap()
base.localAvatar.reconsiderCheesyEffect()
base.localAvatar.obscureMoveFurnitureButton(1)
avId = requestStatus.get('avId', -1)
if avId != -1:
if base.cr.doId2do.has_key(avId):
teleportDebug(requestStatus, 'teleport to avatar')
avatar = base.cr.doId2do[avId]
avatar.forceToTruePosition()
base.localAvatar.gotoNode(avatar)
base.localAvatar.b_teleportGreeting(avId)
else:
friend = base.cr.identifyAvatar(avId)
if friend != None:
teleportDebug(requestStatus, 'friend not here, giving up')
base.localAvatar.setSystemMessage(avId, OTPLocalizer.WhisperTargetLeftVisit % (friend.getName(),))
friend.d_teleportGiveup(base.localAvatar.doId)
base.transitions.irisIn()
self.nextState = requestStatus.get('nextState', 'walk')
base.localAvatar.attachCamera()
base.localAvatar.startUpdateSmartCamera()
base.localAvatar.startPosHprBroadcast()
globalClock.tick()
base.localAvatar.b_setAnimState('TeleportIn', 1, callback=self.teleportInDone)
base.localAvatar.d_broadcastPositionNow()
base.localAvatar.b_setParent(ToontownGlobals.SPRender)
return
def teleportInDone(self):
if hasattr(self, 'fsm'):
teleportNotify.debug('teleportInDone: %s' % self.nextState)
self.fsm.request(self.nextState, [1])
def exitTeleportIn(self):
self.removeSetZoneCompleteCallback(self._tiToken)
self._tiToken = None
NametagGlobals.setMasterArrowsOn(1)
base.localAvatar.laffMeter.stop()
base.localAvatar.obscureMoveFurnitureButton(-1)
base.localAvatar.stopUpdateSmartCamera()
base.localAvatar.detachCamera()
base.localAvatar.stopPosHprBroadcast()
return
def requestTeleport(self, hoodId, zoneId, shardId, avId):
if avId > 0:
teleportNotify.debug('requestTeleport%s' % ((hoodId,
zoneId,
shardId,
avId),))
if localAvatar.hasActiveBoardingGroup():
if avId > 0:
teleportNotify.debug('requestTeleport: has active boarding group')
rejectText = TTLocalizer.BoardingCannotLeaveZone
localAvatar.elevatorNotifier.showMe(rejectText)
return
loaderId = ZoneUtil.getBranchLoaderName(zoneId)
whereId = ZoneUtil.getToonWhereName(zoneId)
if hoodId == ToontownGlobals.MyEstate:
loaderId = 'safeZoneLoader'
whereId = 'estate'
if hoodId == ToontownGlobals.PartyHood:
loaderId = 'safeZoneLoader'
whereId = 'party'
self.requestLeave({'loader': loaderId,
'where': whereId,
'how': 'teleportIn',
'hoodId': hoodId,
'zoneId': zoneId,
'shardId': shardId,
'avId': avId})
def enterQuest(self, npcToon):
base.localAvatar.b_setAnimState('neutral', 1)
self.accept('teleportQuery', self.handleTeleportQuery)
base.localAvatar.setTeleportAvailable(1)
base.localAvatar.laffMeter.start()
base.localAvatar.obscureMoveFurnitureButton(1)
def exitQuest(self):
base.localAvatar.setTeleportAvailable(0)
self.ignore('teleportQuery')
base.localAvatar.laffMeter.stop()
base.localAvatar.obscureMoveFurnitureButton(-1)
def enterPurchase(self):
base.localAvatar.b_setAnimState('neutral', 1)
self.accept('teleportQuery', self.handleTeleportQuery)
base.localAvatar.setTeleportAvailable(1)
base.localAvatar.laffMeter.start()
base.localAvatar.obscureMoveFurnitureButton(1)
def exitPurchase(self):
base.localAvatar.setTeleportAvailable(0)
self.ignore('teleportQuery')
base.localAvatar.laffMeter.stop()
base.localAvatar.obscureMoveFurnitureButton(-1)
def enterFishing(self):
base.localAvatar.b_setAnimState('neutral', 1)
self.accept('teleportQuery', self.handleTeleportQuery)
base.localAvatar.setTeleportAvailable(1)
base.localAvatar.laffMeter.start()
def exitFishing(self):
base.localAvatar.setTeleportAvailable(0)
self.ignore('teleportQuery')
base.localAvatar.laffMeter.stop()
def enterBanking(self):
base.localAvatar.b_setAnimState('neutral', 1)
self.accept('teleportQuery', self.handleTeleportQuery)
base.localAvatar.setTeleportAvailable(1)
base.localAvatar.laffMeter.start()
base.localAvatar.obscureMoveFurnitureButton(1)
base.localAvatar.startSleepWatch(self.__handleFallingAsleepBanking)
self.enablePeriodTimer()
def __handleFallingAsleepBanking(self, arg):
if hasattr(self, 'fsm'):
messenger.send('bankAsleep')
self.fsm.request('walk')
base.localAvatar.forceGotoSleep()
def exitBanking(self):
base.localAvatar.setTeleportAvailable(0)
self.ignore('teleportQuery')
base.localAvatar.laffMeter.stop()
base.localAvatar.obscureMoveFurnitureButton(-1)
base.localAvatar.stopSleepWatch()
self.disablePeriodTimer()
def enterPhone(self):
base.localAvatar.b_setAnimState('neutral', 1)
self.accept('teleportQuery', self.handleTeleportQuery)
base.localAvatar.setTeleportAvailable(1)
base.localAvatar.laffMeter.start()
base.localAvatar.obscureMoveFurnitureButton(1)
base.localAvatar.startSleepWatch(self.__handleFallingAsleepPhone)
self.enablePeriodTimer()
def __handleFallingAsleepPhone(self, arg):
if hasattr(self, 'fsm'):
self.fsm.request('walk')
messenger.send('phoneAsleep')
base.localAvatar.forceGotoSleep()
def exitPhone(self):
base.localAvatar.setTeleportAvailable(0)
self.ignore('teleportQuery')
base.localAvatar.laffMeter.stop()
base.localAvatar.obscureMoveFurnitureButton(-1)
base.localAvatar.stopSleepWatch()
self.disablePeriodTimer()
def enterStopped(self):
base.localAvatar.b_setAnimState('neutral', 1)
Emote.globalEmote.disableBody(base.localAvatar, 'enterStopped')
self.accept('teleportQuery', self.handleTeleportQuery)
if base.localAvatar.isDisguised:
base.localAvatar.setTeleportAvailable(0)
else:
base.localAvatar.setTeleportAvailable(1)
base.localAvatar.laffMeter.start()
base.localAvatar.obscureMoveFurnitureButton(1)
base.localAvatar.startSleepWatch(self.__handleFallingAsleepStopped)
self.enablePeriodTimer()
def __handleFallingAsleepStopped(self, arg):
if hasattr(self, 'fsm'):
self.fsm.request('walk')
base.localAvatar.forceGotoSleep()
messenger.send('stoppedAsleep')
def exitStopped(self):
Emote.globalEmote.releaseBody(base.localAvatar, 'exitStopped')
base.localAvatar.setTeleportAvailable(0)
self.ignore('teleportQuery')
base.localAvatar.laffMeter.stop()
base.localAvatar.obscureMoveFurnitureButton(-1)
base.localAvatar.stopSleepWatch()
self.disablePeriodTimer()
messenger.send('exitingStoppedState')
def enterPet(self):
base.localAvatar.b_setAnimState('neutral', 1)
Emote.globalEmote.disableBody(base.localAvatar, 'enterPet')
self.accept('teleportQuery', self.handleTeleportQuery)
base.localAvatar.setTeleportAvailable(1)
base.localAvatar.setTeleportAllowed(0)
base.localAvatar.laffMeter.start()
self.enterFLM()
def exitPet(self):
base.localAvatar.setTeleportAvailable(0)
base.localAvatar.setTeleportAllowed(1)
Emote.globalEmote.releaseBody(base.localAvatar, 'exitPet')
self.ignore('teleportQuery')
base.localAvatar.laffMeter.stop()
self.exitFLM()
def enterQuietZone(self, requestStatus):
self.quietZoneDoneEvent = uniqueName('quietZoneDone')
self.acceptOnce(self.quietZoneDoneEvent, self.handleQuietZoneDone)
self.quietZoneStateData = QuietZoneState.QuietZoneState(self.quietZoneDoneEvent)
self.quietZoneStateData.load()
self.quietZoneStateData.enter(requestStatus)
def exitQuietZone(self):
self.ignore(self.quietZoneDoneEvent)
del self.quietZoneDoneEvent
self.quietZoneStateData.exit()
self.quietZoneStateData.unload()
self.quietZoneStateData = None
return
def handleQuietZoneDone(self):
how = base.cr.handlerArgs['how']
self.fsm.request(how, [base.cr.handlerArgs])
| ksmit799/Toontown-Source | toontown/hood/Place.py | Python | mit | 37,078 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-09 16:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tag', models.CharField(max_length=100)),
('created', models.DateTimeField(editable=False)),
('modified', models.DateTimeField()),
],
),
]
| internship2016/sovolo | app/tag/migrations/0001_initial.py | Python | mit | 666 |
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.utils import timezone
from .models import Course
from .models import Step
class CourseModelTests(TestCase):
def test_course_creation(self):
course = Course.objects.create(
title="Python Regular Expressions",
description="Learn to write regular expressions in Python"
)
now = timezone.now()
self.assertLess(course.created_at, now)
class StepModelTests(TestCase):
def setUp(self):
self.course = Course.objects.create(
title="Python testing",
description="Learn to write tests in python."
)
def test_step_creation(self):
step = Step.objects.create(
title="A Lovely step title",
description="Nice step description",
course=self.course
)
self.assertIn(step, self.course.step_set.all())
class CourseViewsTest(TestCase):
def setUp(self):
self.course = Course.objects.create(
title="Python testing",
description="Learn to write tests in Python."
)
self.course2 = Course.objects.create(
title="New Course",
description="A new course"
)
self.step = Step.objects.create(
title="Introduction to Doctests",
description="Learn to write tests in your Docstrings",
course=self.course
)
def test_course_list_view(self):
resp = self.client.get(reverse('courses:list'))
self.assertEqual(resp.status_code, 200)
self.assertIn(self.course, resp.context['courses'])
self.assertIn(self.course2, resp.context['courses'])
self.assertTemplateUsed(resp, 'courses/course_list.html')
self.assertContains(resp, self.course.title)
def test_course_detail_view(self):
resp = self.client.get(reverse('courses:course',
kwargs={'pk': self.course.pk}))
self.assertEqual(resp.status_code, 200)
self.assertEqual(self.course, resp.context['course'])
self.assertTemplateUsed(resp, 'courses/course_detail.html')
self.assertContains(resp, self.course.title)
def test_step_detail_view(self):
resp = self.client.get(reverse('courses:step',
kwargs={'course_pk': self.course.pk,
'step_pk': self.step.pk}))
self.assertEqual(resp.status_code, 200)
self.assertEqual(self.step, resp.context['step'])
self.assertTemplateUsed(resp, 'courses/step_detail.html')
self.assertContains(resp, self.course.title)
self.assertContains(resp, self.step.title)
| lorimccurry/learning_site | courses/tests.py | Python | mit | 2,724 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('django_graph', '0001_initial'),
('cyborg_identity', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='IsContactEmailAddress',
fields=[
('relationship_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='django_graph.Relationship')),
('valid_from', models.DateTimeField(null=True, blank=True)),
('valid_to', models.DateTimeField(null=True, blank=True)),
],
options={
'abstract': False,
},
bases=('django_graph.relationship', models.Model),
),
migrations.CreateModel(
name='IsContactPhoneNumber',
fields=[
('relationship_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='django_graph.Relationship')),
('valid_from', models.DateTimeField(null=True, blank=True)),
('valid_to', models.DateTimeField(null=True, blank=True)),
('phone_number_type', models.CharField(blank=True, max_length=30, null=True, choices=[(b'HOME', b'Home'), (b'MOBILE', b'Mobile'), (b'WORK', b'Work'), (b'SCHOOL', b'School'), (b'OTHER', b'Other')])),
],
options={
'abstract': False,
},
bases=('django_graph.relationship', models.Model),
),
migrations.CreateModel(
name='PhoneNumber',
fields=[
('node_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='django_graph.Node')),
('us_phone_number', models.CharField(unique=True, max_length=10, validators=[django.core.validators.RegexValidator(regex=b'[0-9]{10}')])),
],
options={
'abstract': False,
},
bases=('django_graph.node',),
),
]
| shawnhermans/cyborg-identity-manager | cyborg_identity/migrations/0002_iscontactemailaddress_iscontactphonenumber_phonenumber.py | Python | mit | 2,204 |
from . import _version
__version__ = _version.__version__
import os, math, pysam
from clint.textui import progress, puts_err
import sqlite3 as lite
import tempfile, warnings, pickle
def example_reads():
"""
returns the path to the example BAM file
"""
return os.path.join(os.path.join(os.path.dirname(__file__), "data"),"example.bam")
def example_regions():
"""
returns the path to the example BED file
"""
return os.path.join(os.path.join(os.path.dirname(__file__), "data"),"example.bed")
class BAMHandler(object):
"""
The object that provides the interface to DNase-seq data help in a BAM file
"""
def __init__(self, filePath, caching=True, chunkSize=1000, ATAC=False):
"""Initializes the BAMHandler with a BAM file
Args:
filePath (str): the path of a sorted, indexed BAM file from a DNase-seq experiment
Kwargs:
chunkSize (int): and int of the size of the regions to load if caching (default: 1000)
caching (bool): enables or disables read caching (default: True)
Raises:
IOError
"""
try:
self.samfile = pysam.Samfile(filePath)
except IOError:
errorString = "Unable to load BAM file:{0}".format(filePath)
raise IOError(errorString)
#Initialise the empty DNase cut cache with the chromosome names from the BAM file
self.purge_cache()
#Do not change the CHUNK_SIZE after object instantiation!
self.CHUNK_SIZE = chunkSize
self.CACHING = caching
if ATAC:
self.loffset = -5
self.roffset = +4
else:
self.loffset = 0
self.roffset = 0
def __addCutsToCache(self,chrom,start,end):
"""Loads reads from the BAM file into the cutCache. Will not check if reads are already there.
Args:
chrom (str): The chromosome
start (int): The start of the interval
end (int): The end of the interval
"""
for alignedread in self.samfile.fetch(chrom, max(start, 0), end):
if not alignedread.is_unmapped:
if alignedread.is_reverse:
a = int(alignedread.reference_end)
if a <= end +1:
self.cutCache[chrom]["-"][a] = self.cutCache[chrom]["-"].get(a, 0) + 1
else:
a = int(alignedread.reference_start) -1
if a >= start:
self.cutCache[chrom]["+"][a] = self.cutCache[chrom]["+"].get(a, 0) + 1
self.lookupCache[chrom].append(start)
def __lookupReadsUsingCache(self,startbp,endbp,chrom):
"""Looks up the DNase cut information from the cutCache and returns as a dictionary (private method)
Args:
startbp (int): The start of the interval
endbp (int): The end of the interval
chrom (str): The chromosome
"""
#Expand the region to the nearest CHUNK_SIZE and load these reads if they aren't in the cache
lbound = int(math.floor(startbp / float(self.CHUNK_SIZE)) * float(self.CHUNK_SIZE))
ubound = int(math.ceil(endbp / float(self.CHUNK_SIZE)) * float(self.CHUNK_SIZE))
for i in range(lbound,ubound,self.CHUNK_SIZE):
if i not in self.lookupCache[chrom]:
self.__addCutsToCache(chrom,i,i+self.CHUNK_SIZE)
#Fills in with zeroes where the hash table contains no information for each strand.
fwCutArray = [self.cutCache[chrom]["+"].get(i, 0) for i in range(startbp + self.loffset ,endbp + self.loffset)]
revCutArray = [self.cutCache[chrom]["-"].get(i, 0) for i in range(startbp + self.roffset, endbp + self.roffset)]
return {"+":fwCutArray,"-":revCutArray}
def __lookupReadsWithoutCache(self,startbp,endbp,chrom):
"""Loads reads from the BAM file directly from disk, ignoring the cache (private method)
Args:
startbp (int): The start of the interval
endbp (int): The end of the interval
chrom (str): The chromosome
"""
tempcutf = {}
tempcutr = {}
for alignedread in self.samfile.fetch(chrom, max(startbp, 0), endbp):
if not alignedread.is_unmapped:
if alignedread.is_reverse:
a = int(alignedread.reference_end)
if a <= endbp +1:
tempcutr[a] = tempcutr.get(a, 0) + 1
else:
a = int(alignedread.reference_start) - 1
if a >= startbp:
tempcutf[a] = tempcutf.get(a, 0) + 1
fwCutArray = [tempcutf.get(i, 0) for i in range(startbp + self.loffset ,endbp + self.loffset)]
revCutArray = [tempcutr.get(i, 0) for i in range(startbp + self.roffset, endbp + self.roffset)]
return {"+":fwCutArray,"-":revCutArray}
def purge_cache(self):
"""
Wipes the internal cache - useful if you need finer grain control over caching.
"""
#Initialise the empty DNase cut cache with the chromosome names from the BAM file
self.cutCache = {}
#This helps us differentiate between what's been looked up and when there's just no reads
self.lookupCache = {}
for i in self.samfile.references:
self.cutCache[i] = {"+":{},"-":{}}
self.lookupCache[i] = []
def get_cut_values(self,vals):
"""Return a dictionary with the cut counts. Can be used in two different ways:
You can either use a string or a GenomicInterval to query for cuts.
Returns reads dict with "+" corresponding to the +ve strand and "-" has the data with the -ve strand (rotated 180 degrees)
Args:
vals: either a string with the format "chr18,500:600,+" or a GenomicInterval object
>>> BAMHandler(example_reads())["chr6,170863142,170863150,+"]
{'+': array([ 1, 0, 0, 0, 1, 11, 1, 0]), '-': array([0, 1, 0, 0, 1, 0, 0, 1])}
>>> BAMHandler(example_reads())["chr6,170863142,170863150,-"]
{'+': array([1, 0, 0, 1, 0, 0, 1, 0]), '-': array([ 0, 1, 11, 1, 0, 0, 0, 1])}
"""
if isinstance(vals, GenomicInterval):
chrom = vals.chromosome
startbp = vals.startbp
endbp = vals.endbp
flip = vals.strand
elif isinstance(vals, str):
try:
chrom,startbp,endbp,flip = vals.split(",")
startbp = int(startbp)
endbp = int(endbp)
assert(flip in ["+", "-"])
except:
raise ValueError("Malformed query string")
else:
raise TypeError("Lookup must be a string or a GenomicInterval")
if self.CACHING:
retval = self.__lookupReadsUsingCache(startbp,endbp,chrom)
else:
retval = self.__lookupReadsWithoutCache(startbp,endbp,chrom)
if flip is "-":
retval["+"], retval["-"] = retval["-"][::-1], retval["+"][::-1]
return retval
def __getitem__(self,vals):
"""
Wrapper for get_cut_values
"""
return self.get_cut_values(vals)
def FOS(self,interval,bgsize=35):
"""Calculates the Footprint Occupancy Score (FOS) for a Genomicinterval. See Neph et al. 2012 (Nature) for full details.
Args:
interval (GenomicInterval): The interval that you want the FOS for
Kwargs:
bgsize (int): The size of the flanking region to use when calculating the FOS (default: 35)
Returns:
A float with the FOS - returns -1 if it can't calculate it
"""
cuts = self["{0},{1},{2},{3}".format(interval.chromosome,interval.startbp-bgsize,interval.endbp+bgsize,interval.strand)]
forwardArray, backwardArray = cuts["+"], cuts["-"]
cutArray = [forwardArray[i] + backwardArray[i] for i in range(len(forwardArray))]
leftReads = float(sum(cutArray[:bgsize]))
centreReads = float(sum(cutArray[bgsize:-bgsize]))
rightReads = float(sum(cutArray[-bgsize:]))
#Here we normalise by region length
leftReads /= (bgsize * 1.0)
centreReads /= (len(interval) * 1.0)
rightReads /= (bgsize * 1.0)
try:
return ( (centreReads+1.0) / (leftReads + 1.0) ) + ( (centreReads+1.0)/(rightReads + 1.0))
except BaseException:
#If it can't calculate the score, return a sentinel value
return -1
class GenomicIntervalSet(object):
"""Container class which stores and allow manipulations of large numbers of GenomicInterval objects.
Essentially a way of storing and sorting BED files.
"""
def __init__(self,filename = None):
"""
Inits GenomicIntervalSet. You can also specify a BED file path to load the intervals from
Kwargs:
filename (str): the path to a BED file to initialize the intervals with
If no ``filename`` provided, then the set will be empty
"""
self.intervals = {}
if filename:
self.loadBEDFile(filename)
def loadBEDFile(self,filename):
"""
Adds all the intervals in a BED file to this GenomicIntervalSet.
We're quite naughty here and allow some non-standard BED formats (along with the official one):
chrom chromStart chromEnd
chrom chromStart chromEnd strand
chrom chromStart chromEnd name score strand
Any whitespace (tabs or spaces) will be considered separators, so spaces in names cause a problem!
.. note::
If you don't supply a strand, we infer that it's +ve.
Args:
filename: the path to a BED file to load
Raises:
IOError
"""
try:
BEDfile = open(filename, 'r')
except IOError:
errorString = "Cannot load BED file: {0}".format(filename)
raise IOError(errorString)
puts_err("Reading BED File...")
#This is done so that if a malformed BED record is detected, no intervals are loaded.
records = []
intervalCount = max(enumerate(open(filename)))[0] + 1
for _ in progress.bar(list(range(intervalCount))):
line = BEDfile.readline()
if line:
#Skip lines in the bed files which are UCSC track metadata or comments
if not self.__isBEDHeader(line):
records.append(self.__parseBEDString(line))
for i in records:
self.__addInterval(GenomicInterval(i[0], i[1], i[2], i[3], i[4], i[5]))
BEDfile.close()
def __malformedBEDline(self,BEDString):
"""
Raises an exception and prints the offending BED string
Raises:
Exception
"""
#TODO: Make a new exception class, something like malformedBEDException?
exceptionString = "Malformed BED line: {0}".format(BEDString)
raise Exception(exceptionString)
def __isBEDHeader(self,string):
"""
Returns True/False whether a line in a bed file should be ignored according to
http://genome.ucsc.edu/goldenPath/help/customTrack.html#TRACK
"""
if string[0] == "#":
return True
headers = ["name","description","type","visibility","color","itemRgb","useScore","group",
"priority","db","offset","maxItems","url","htmlUrl","bigDataUrl","track","browser"]
for each in headers:
if string.startswith(each):
return True
return False
def __parseBEDString(self,BEDString):
"""
Parses the following BED formats
We're quite naughty here and allow some non-standard BED formats (along with the official one):
chrom chromStart chromEnd
chrom chromStart chromEnd strand
chrom chromStart chromEnd name score strand
Returns:
(chrom,startbp,endbp,label,score,strand)
Raises:
Exception
"""
BEDSplit = BEDString.split()
#Sanity check
if len(BEDSplit) not in [3,4,6]:
self.__malformedBEDline(BEDString)
#Default if only Chrom Start End is detected
try:
chrom = BEDSplit[0]
startbp = int(BEDSplit[1])
endbp = int(BEDSplit[2])
except:
self.__malformedBEDline(BEDString)
label = 0
score = 0
strand = "+"
if len(BEDSplit) is 4:
if BEDSplit[3] in ["+", "-"]:
strand = BEDSplit[3]
else:
self.__malformedBEDline(BEDString)
if len(BEDSplit) is 6:
label = BEDSplit[3]
try:
score = float(BEDSplit[4])
except ValueError:
self.__malformedBEDline(BEDString)
if BEDSplit[5] in ["+", "-"]:
strand = BEDSplit[5]
else:
self.__malformedBEDline(BEDString)
return chrom,startbp,endbp,label,score,strand
def __len__(self):
"""
Return the number of intervals in the set
"""
intervals = 0
for each in list(self.intervals.values()):
intervals += len(each)
return intervals
def __iter__(self):
"""
Iterates over the intervals in the order that the intervals were generated
"""
for each in sorted(sum(list(self.intervals.values()),[]), key=lambda peak: peak.importorder):
yield each
def __getitem__(self, i):
"""
Indexes the intervals in the order that the intervals were generated
"""
return sorted(sum(list(self.intervals.values()),[]), key=lambda peak: peak.importorder)[i]
def __delitem__(self,i):
"""
Deletes an interval from the set using the position i
"""
pos = self.intervals[self[i].chromosome].index(self[i])
del self.intervals[self[i].chromosome][pos]
def __iadd__(self, other):
"""
Adds all the intervals from an other GenomicIntervalSet or GenomicInterval to this one.
Args:
other: either a GenomicInterval or GenomicIntervalSet to be added
Raises:
TypeError: A GenomicInterval or GenomicIntervalSet wasn't supplied.
"""
if isinstance(other,GenomicIntervalSet):
for i in other:
self.__addInterval(i)
elif isinstance(other,GenomicInterval):
self.__addInterval(other)
else:
raise TypeError("Can only add GenomicInterval or GenomicIntervalSet objects to existing GenomicIntervalSet")
return self
def __addInterval(self, other):
"""
Adds a GenomicInterval to the set
Args:
other (GenomicInterval): The GenomicInterval to be added.
"""
if other.chromosome not in self.intervals:
self.intervals[other.chromosome] = []
self.intervals[other.chromosome].append(other)
def resizeRegions(self,toSize):
"""
Resized all GenomicIntervals to a specific size
Args:
toSize: an int of the size to resize all intervals to
"""
if not type(toSize) == int:
ValueError("Can only resize intervals to integers")
for i in self:
xamount = toSize-(i.endbp-i.startbp)//2
i.startbp -= xamount
i.endbp += xamount
if (i.endbp-i.startbp) > toSize*2:
i.endbp -= 1
def __str__(self):
return ''.join(str(i) +"\n" for i in self)
class GenomicInterval(object):
"""
Basic Object which describes reads region of the genome
"""
#This counts how many GenomicInterval objects have been created
counter = 0
def __init__(self, chrom, start, stop, label = 0,score = 0,strand="+"):
"""
Initialization routine
Args:
chrom (str): the chromosome
start (int): the start of the interval
stop (int): the end of the interval
Kwargs:
label: The name of the interval (will be given an automatic name if none entered)
score (float): the score of the interval (default: 0)
strand (str): the strand the interval is on (default: "+")
"""
self.__class__.counter += 1
self.importorder = self.__class__.counter
self.chromosome = str(chrom)
self.startbp = int(start)
self.endbp = int(stop)
self.strand = str(strand)
self.score = float(score)
if self.startbp > self.endbp:
raise Exception("Start location of GenomicInterval is larger than end location!")
# This is from reads bygone era where we ordered the intervals by import order
# self.score = self.__class__.counter
#This makes up reads fake name if one doesn't exist in the BED file
if label:
self.label = str(label)
else:
self.label = "Unnamed{0}".format(self.__class__.counter)
#This contains anything else you want to store about the interval
self.metadata = {}
def __str__(self):
"""
BED representation of the interval
"""
return "{0}\t{1}\t{2}\t{3}\t{4}\t{5}".format(self.chromosome, self.startbp, self.endbp, self.label, self.score, self.strand)
def __len__(self):
"""
Returns the length of the GenomicInterval in basepairs
"""
return self.endbp - self.startbp
def __lt__(self, other):
"""
Implements foo < bar
"""
if self.chromosome == other.chromosome:
if self.startbp < other.startbp:
return True
elif self.startbp == other.startbp:
if self.endbp < other.endbp:
return True
else:
return False
else:
return False
elif self.chromosome < other.chromosome:
return True
else:
return False
def __le__(self, other):
"""
Implements foo <= bar
"""
if self.chromosome == other.chromosome:
if self.startbp < other.startbp:
return True
elif self.startbp == other.startbp:
if self.endbp <= other.endbp:
return True
else:
return False
else:
return False
elif self.chromosome < other.chromosome:
return True
else:
return False
def __eq__(self, other):
"""
Implements foo == bar
"""
if self.chromosome == other.chromosome and \
self.startbp == other.startbp and \
self.endbp == other.endbp:
return True
return False
def __gt__(self, other):
"""
Implements foo > bar
"""
if self.chromosome == other.chromosome:
if self.startbp > other.startbp:
return True
elif self.startbp == other.startbp:
if self.endbp > other.endbp:
return True
else:
return False
else:
return False
elif self.chromosome > other.chromosome:
return True
else:
return False
def __ge__(self, other):
"""
Implements foo >= bar
"""
if self.chromosome == other.chromosome:
if self.startbp > other.startbp:
return True
elif self.startbp == other.startbp:
if self.endbp >= other.endbp:
return True
else:
return False
else:
return False
elif self.chromosome > other.chromosome:
return True
else:
return False
class FASTAHandler(object):
def __init__(self, fasta_file, vcf_file = None):
self.ffile = pysam.Fastafile(fasta_file)
self.conn = None
if vcf_file:
self.conn = lite.connect(tempfile.NamedTemporaryFile().name)
with open(vcf_file, 'r') as f:
records = [(x[0],x[1],x[3],x[4]) for x in (x.split() for x in f if x[0] != "#")]
with self.conn:
cur = self.conn.cursor()
cur.execute("CREATE TABLE SNPS(chr TEXT,pos INT, ref TEXT, mut TEXT)")
cur.executemany("INSERT INTO SNPS VALUES(?,?,?,?)",records)
#Manually remove these, as they potentially could be large in memory
del(records)
def sequence(self,interval):
sequence_string = self.ffile.fetch(interval.chromosome,interval.startbp,interval.endbp).upper()
if not self.conn:
# if interval.strand != "+":
# sequence_string = sequence_string[::-1]
return str(sequence_string)
else:
query_string = "SELECT chr, pos - ? - 1 as offset,ref,mut FROM SNPS WHERE chr=? and pos BETWEEN ? and ?"
snps = self.conn.cursor().execute(query_string,(interval.startbp,interval.chromosome,interval.startbp,interval.endbp)).fetchall()
sequence_list = [i for i in sequence_string]
for i in snps:
if sequence_list[i[1]] != i[2]:
warnings.warn("MISMATCH IN REF TO SNP - WHAT HAVE YOU DONE?")
else:
#str needed as sqlite returns unicode
sequence_list[i[1]] = str(i[3])
return "".join(sequence_list)
class BiasCalculator(object):
def __init__(self,bias_file=None):
if bias_file is None:
#Load the genomic IMR90 bias from Shirley
bias_file = open(os.path.join(os.path.join(os.path.dirname(__file__), "data"),"IMR90_6mer.pickle"))
self.biasdict = pickle.load(bias_file)
def bias(self,sequence):
"""
NOTE: Because bias is calculated from the centre of a 6-mer,
the data will be missing the values for the first and last 3 bases
"""
#Split sequence into consituent 6-mers
sequence_chunks = [sequence[i:i+6] for i in range(len(sequence)-5)]
#Look up values of these 6-mers in the precomputed bias database
fw_bias = [float(self.biasdict[i]["forward"])for i in sequence_chunks]
rv_bias = [float(self.biasdict[i]["reverse"])for i in sequence_chunks]
#FIXME: Pickled data should use "+" and "-" and not forward and reverse to prevent confusion here
#FIXME: Fix the pickled data - the reverse positions are off by one!
return {"+": fw_bias, "-":rv_bias}
class BAMHandlerWithBias(BAMHandler):
def __init__(self, sequence_object, *args, **kwargs):
super(BAMHandlerWithBias, self).__init__(*args, **kwargs)
self.sequence_data = sequence_object
self.bias_data = BiasCalculator()
def __getitem__(self,interval):
if not isinstance(interval,GenomicInterval):
raise TypeError("Sorry, but we only support GenomicInterval querying for the Bias Handler at the moment")
#Note: This is pretty Hacky!
interval.startbp -= 3
interval.endbp += 3
#Get the sequence data
bias_values = self.bias_data.bias(self.sequence_data.sequence(interval))
interval.startbp += 3
interval.endbp -= 3
bias_values["+"] = bias_values["+"][1:]
bias_values["-"] = bias_values["-"][:-1]
cuts = self.get_cut_values(interval)
#Nomenclature used below is that in the He. et al Nature Methods Paper
#These are N_i^s - note we are using an entire hypersensitive site, and not 50bp like the paper
N_i = {"+":sum(cuts["+"]) ,"-":sum(cuts["-"])}
#bias_values are y_i
for dir in ("-","+"):
bias_values[dir] = [float(i)/sum(bias_values[dir]) for i in bias_values[dir]]
#Stupid pass-by-reference
predicted_cuts = {"+":cuts["+"][:],"-":cuts["-"][:]}
#Calculate the predicted counts (nhat_i, which is N_i * y_i) based on n-mer cutting preference
for dir in ("-","+"):
#For each base
for num, val in enumerate(predicted_cuts[dir]):
#Multiply the total number of observed cuts by the bias value
predicted_cuts[dir][num] = bias_values[dir][num] * N_i[dir]
#Now we normalised the observed cuts by the expected
for dir in ("-","+"):
#For each base
for num, val in enumerate(predicted_cuts[dir]):
#Divide the number of observed cuts by the bias value
pass
#predicted_cuts[dir][num] = (cuts[dir][num] + 1.0) / (val + 1.0)
if interval.strand == "-":
# That's numberwang, let's rotate the board!
predicted_cuts["+"], predicted_cuts["-"] = predicted_cuts["-"][::-1], predicted_cuts["+"][::-1]
return predicted_cuts
| jpiper/pyDNase | pyDNase/__init__.py | Python | mit | 25,452 |
# By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.
# What is the 10 001st prime number?
import math
from Utilities import Primes
from Problem import Problem
class N10001stPrime(Problem):
def __init__(self):
self.answer = 104743
def do(self):
return Primes.primes_up_to_n(self.find_upper_bound(10001))[10001 - 1]
# Lower bound on the number of primes below x
# there is at least : x/log(x) * (1 + 1 / log(x)) primes below x
# (https://en.wikipedia.org/wiki/Prime_number_theorem#Bounds_on_the_prime-counting_function)
def prime_count(self, x):
return (x / math.log(x)) * (1 + (1 / math.log(x)))
# finds an upper bound on the number of numbers we need to find n primes
def find_upper_bound(self, n):
inc = 1000
num = inc
while self.prime_count(num) <= n:
num += inc
return num
| hperreault/ProjectEuler | 007_N10001stPrime.py | Python | mit | 932 |
#ignore this file
| Fire-Hound/Linear-Regression-APK | ignore.py | Python | mit | 18 |
import unittest
from functools import partialmethod
from muk.sexp import *
class sexp_tests(unittest.TestCase):
def test_null_list(self):
self.isomorphism(l=[], c=[])
def test_singleton_proper_list_to_cons(self):
self.isomorphism(l=[1], c=cons(1, []))
def test_plain_proper_list_to_cons(self):
self.isomorphism(l=[1,2,3], c=cons(1, cons(2, cons(3, []))))
def test_plain_improper_list_to_cons(self):
self.isomorphism(l=(1,2,3), c=cons(1, cons(2, 3)))
def test_nested_improper_list_to_cons(self):
self.isomorphism(l=(1,[2,3], 4), c=cons(1, cons(cons(2, cons(3, [])), 4)))
def test_more_nested_improper_list_to_cons(self):
self.isomorphism(l=([3],(4,5), 6), c=cons(cons(3, []), cons(cons(4, 5), 6)))
def test_shadow_proper_list_using_improper_list_notation(self):
# pay attention, this is not an isomorphism, the next test shows the
# natural way of writing, without shadowing. The broken direction is
# represented by function `cons_to_list` which doesn't shadow objs it
# produces.
self.assertEqual(list_to_cons(([3],(4,5), [6])), cons(cons(3, []), cons(cons(4, 5), cons(6, []))))
def test_more_nested_improper_lists_into_proper_list_to_cons(self):
self.isomorphism(l=[[3],(4,5), 6], c=cons(cons(3, []), cons(cons(4, 5), cons(6, []))))
def test_invalid_improper_list(self):
with self.assertRaises(ImproperListError):
list_to_cons(l=(3,))
def test_invalid_improper_cons(self):
with self.assertRaises(ImproperListError):
cons_to_list(c=cons(3, ()))
def isomorphism(self, l, c):
self.assertEqual(c, list_to_cons(l))
self.assertEqual(l, cons_to_list(c))
def test_tuple_wrapping_and_ctor_call(self):
class A(tuple):
__int__ = partialmethod(sum)
a = (1,2,3,4) # vanilla tuple obj
self.assertEqual(tuple, type(a))
self.assertEqual(A, type(A(a)))
self.assertEqual(10, int(A(a)))
| massimo-nocentini/on-python | microkanren/sexp_test.py | Python | mit | 2,061 |
import json
from operator import itemgetter
import os
import random
import string
import sys
from datetime import datetime
from devtools import debug
from functools import partial
from pathlib import Path
from statistics import StatisticsError, mean
from statistics import stdev as stdev_
from test_pydantic import TestPydantic
try:
from test_trafaret import TestTrafaret
except Exception:
print('WARNING: unable to import TestTrafaret')
TestTrafaret = None
try:
from test_drf import TestDRF
except Exception:
print('WARNING: unable to import TestDRF')
TestDRF = None
try:
from test_marshmallow import TestMarshmallow
except Exception:
print('WARNING: unable to import TestMarshmallow')
TestMarshmallow = None
try:
from test_valideer import TestValideer
except Exception:
print('WARNING: unable to import TestValideer')
TestValideer = None
try:
from test_cattrs import TestCAttrs
except Exception:
print('WARNING: unable to import TestCAttrs')
TestCAttrs = None
try:
from test_cerberus import TestCerberus
except Exception:
print('WARNING: unable to import TestCerberus')
TestCerberus = None
try:
from test_voluptuous import TestVoluptuous
except Exception as e:
print('WARNING: unable to import TestVoluptuous')
TestVoluptuous = None
try:
from test_schematics import TestSchematics
except Exception as e:
print('WARNING: unable to import TestSchematics')
TestSchematics = None
PUNCTUATION = ' \t\n!"#$%&\'()*+,-./'
LETTERS = string.ascii_letters
UNICODE = '\xa0\xad¡¢£¤¥¦§¨©ª«¬ ®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖ×ØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿ'
ALL = PUNCTUATION * 5 + LETTERS * 20 + UNICODE
random = random.SystemRandom()
# in order of performance for csv
other_tests = [
TestCAttrs,
TestValideer,
TestMarshmallow,
TestVoluptuous,
TestTrafaret,
TestSchematics,
TestDRF,
TestCerberus,
]
active_other_tests = [t for t in other_tests if t is not None]
class GenerateData:
def __init__(self):
pass
def rand_string(min_length, max_length, corpus=ALL):
return ''.join(random.choices(corpus, k=random.randrange(min_length, max_length)))
MISSING = object()
def null_missing_v(f, null_chance=0.2, missing_chance=None):
r = random.random()
if random.random() < null_chance:
return None
missing_chance = null_chance if missing_chance is None else missing_chance
if r < (null_chance + missing_chance):
return MISSING
return f()
def null_missing_string(*args, **kwargs):
f = partial(rand_string, *args)
return null_missing_v(f, **kwargs)
def rand_email():
if random.random() < 0.2:
c1, c2 = UNICODE, LETTERS
else:
c1, c2 = LETTERS, LETTERS
return f'{rand_string(10, 50, corpus=c1)}@{rand_string(10, 50, corpus=c2)}.{rand_string(2, 5, corpus=c2)}'
def null_missing_email():
return null_missing_v(rand_email)
def rand_date():
r = random.randrange
return f'{r(1900, 2020)}-{r(0, 12)}-{r(0, 32)}T{r(0, 24)}:{r(0, 60)}:{r(0, 60)}'
def remove_missing(d):
if isinstance(d, dict):
return {k: remove_missing(v) for k, v in d.items() if v is not MISSING}
elif isinstance(d, list):
return [remove_missing(d_) for d_ in d]
else:
return d
def generate_case():
return remove_missing(dict(
id=random.randrange(1, 2000),
client_name=null_missing_string(10, 280, null_chance=0.05, missing_chance=0.05),
sort_index=random.random() * 200,
# client_email=null_missing_email(), # email checks differ with different frameworks
client_phone=null_missing_string(5, 15),
location=dict(
latitude=random.random() * 180 - 90,
longitude=random.random() * 180,
),
contractor=str(random.randrange(-100, 2000)),
upstream_http_referrer=null_missing_string(10, 1050),
grecaptcha_response=null_missing_string(10, 1050, null_chance=0.05, missing_chance=0.05),
last_updated=rand_date(),
skills=[dict(
subject=null_missing_string(5, 20, null_chance=0.01, missing_chance=0),
subject_id=i,
category=rand_string(5, 20),
qual_level=rand_string(5, 20),
qual_level_id=random.randrange(2000),
qual_level_ranking=random.random() * 20
) for i in range(random.randrange(1, 5))]
))
THIS_DIR = Path(__file__).parent.resolve()
def stdev(d):
try:
return stdev_(d)
except StatisticsError:
return 0
def run_tests(classes, cases, repeats, json=False):
if json:
classes = [c for c in classes if hasattr(c, 'to_json')]
lpad = max(len(t.package) for t in classes) + 4
print(f'testing {", ".join(t.package for t in classes)}, {repeats} times each')
results = []
csv_results = []
for test_class in classes:
times = []
p = test_class.package
for i in range(repeats):
count, pass_count = 0, 0
test = test_class(True)
models = []
if json:
models = [m for passed, m in (test.validate(c) for c in cases) if passed]
start = datetime.now()
for j in range(3):
if json:
for model in models:
test.to_json(model)
pass_count += 1
count += 1
else:
for case in cases:
passed, result = test.validate(case)
pass_count += passed
count += 1
time = (datetime.now() - start).total_seconds()
success = pass_count / count * 100
print(f'{p:>{lpad}} ({i+1:>{len(str(repeats))}}/{repeats}) time={time:0.3f}s, success={success:0.2f}%')
times.append(time)
print(f'{p:>{lpad}} best={min(times):0.3f}s, avg={mean(times):0.3f}s, stdev={stdev(times):0.3f}s')
model_count = 3 * len(cases)
avg = mean(times) / model_count * 1e6
sd = stdev(times) / model_count * 1e6
results.append(f'{p:>{lpad}} best={min(times) / model_count * 1e6:0.3f}μs/iter '
f'avg={avg:0.3f}μs/iter stdev={sd:0.3f}μs/iter version={test_class.version}')
csv_results.append([p, test_class.version, avg])
print()
return results, csv_results
def main():
json_path = THIS_DIR / 'cases.json'
if not json_path.exists():
print('generating test cases...')
cases = [generate_case() for _ in range(2000)]
with json_path.open('w') as f:
json.dump(cases, f, indent=2, sort_keys=True)
else:
with json_path.open() as f:
cases = json.load(f)
tests = [TestPydantic]
if 'pydantic-only' not in sys.argv:
tests += active_other_tests
repeats = int(os.getenv('BENCHMARK_REPEATS', '5'))
test_json = 'TEST_JSON' in os.environ
results, csv_results = run_tests(tests, cases, repeats, test_json)
for r in results:
print(r)
if 'SAVE' in os.environ:
save_md(csv_results)
def save_md(data):
headings = 'Package', 'Version', 'Relative Performance', 'Mean validation time'
rows = [headings, ['---' for _ in headings]]
first_avg = None
for package, version, avg in sorted(data, key=itemgetter(2)):
if first_avg:
relative = f'{avg / first_avg:0.1f}x slower'
else:
relative = ''
first_avg = avg
rows.append([package, f'`{version}`', relative, f'{avg:0.1f}μs'])
table = '\n'.join(' | '.join(row) for row in rows)
text = f"""\
[//]: <> (Generated with benchmarks/run.py, DO NOT EDIT THIS FILE DIRECTLY, instead run `SAVE=1 python ./run.py`.)
{table}
"""
(Path(__file__).parent / '..' / 'docs' / '.benchmarks_table.md').write_text(text)
def diff():
json_path = THIS_DIR / 'cases.json'
with json_path.open() as f:
cases = json.load(f)
allow_extra = True
pydantic = TestPydantic(allow_extra)
others = [t(allow_extra) for t in active_other_tests]
for case in cases:
pydantic_passed, pydantic_result = pydantic.validate(case)
for other in others:
other_passed, other_result = other.validate(case)
if other_passed != pydantic_passed:
print(f'⨯ pydantic {pydantic_passed} != {other.package} {other_passed}')
debug(case, pydantic_result, other_result)
return
print('✓ data passes match for all packages')
if __name__ == '__main__':
if 'diff' in sys.argv:
diff()
else:
main()
# if None in other_tests:
# print('not all libraries could be imported!')
# sys.exit(1)
| samuelcolvin/pydantic | benchmarks/run.py | Python | mit | 8,926 |
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import datetime
import numpy as np
from copy import deepcopy
import sys
import logging
class System:
def __init__(self, _start_time, start_power=0, test_plan=None):
self.start_power = start_power
self.power_left = self.start_power
self.last_event = _start_time
self.last_decision = _start_time
self.decision_interval = (24*3600) / test_plan.state_number
self.reward = 0
self.time_to_read_input = 0
self.learning_a = deepcopy(test_plan.learning_module)
self.current_clock = self.learning_a.choose_action(0)
self.first_start = True
self.event_count = 0
self.awake_count = 0
self.ml_parameters = {'d_penalty': test_plan.delay_award, 'd_award': test_plan.delay_award,
's_penalty': test_plan.awake_penalty, 's_award': test_plan.awake_award,
'delay_threshold': test_plan.delay_threshold, 'delay_scale': 2}
self.power_consumption = {'sleep': 4.42, 'active': 63.68}
self.button_push_counter = 0
self.voltage = 6.54
self.b_event_action = False
print("d_penalty = ", self.ml_parameters['d_penalty'], "s_penalty", self.ml_parameters['s_penalty'],
's_award', self.ml_parameters['s_award'], "delay_threshold", self.ml_parameters['delay_threshold'])
def simulation_step(self, _current_time):
if self.time_to_read_input <= 0:
if self.b_event_action:
self.reward += self.ml_parameters['s_award']
self.b_event_action = False
else:
self.reward -= self.ml_parameters['s_penalty']
self.awake_count += 1
self.power_left += self.power_consumption['active'] * self.voltage / 1000.
self.time_to_read_input = self.current_clock
else:
self.power_left += self.power_consumption['sleep'] * self.voltage / 1000.
self.time_to_read_input -= 1
if _current_time - self.last_decision >= datetime.timedelta(seconds=self.decision_interval)\
or _current_time < self.last_decision:
self.last_decision = _current_time
state = (_current_time.hour * 3600 + _current_time.minute * 60 +
_current_time.second) / self.decision_interval
if not self.first_start:
# update (Q table)/knowledge
self.learning_a.update_knowledge(self.calculate_reward(), int(state))
self.first_start = False
# Learning - choose action
self.current_clock = self.learning_a.choose_action(int(state))
def reset_epoch(self):
self.power_left = self.start_power
self.button_push_counter = 0
self.event_count = 0
self.awake_count = 0
try:
self.learning_a.e_greedy *= .9
except AttributeError:
pass
def event_action(self, _):
self.event_count += 1
self.b_event_action = True
if self.time_to_read_input > np.random.normal(loc=self.ml_parameters['delay_threshold'],
scale=self.ml_parameters['delay_scale']):
self.reward -= self.ml_parameters['d_penalty']
self.button_push_counter += 1
else:
self.reward += self.ml_parameters['d_award']
self.reward -= self.time_to_read_input
return self.time_to_read_input
def calculate_reward(self):
reward = self.reward
self.reward = 0
self.awake_count = 0
self.event_count = 0
return reward
class SystemTransmission:
def __init__(self, _start_time, start_power=0, test_plan=None):
self.start_power = start_power
self.power_left = self.start_power
self.last_event = _start_time
self.last_decision = _start_time
self.decision_interval = 1200
self.reward = 0
self.power_reward = 0
self.time_to_read_input = 0
self.learning_a = deepcopy(test_plan.learning_module)
self.update_knowledge = False
self.event_count = 0
self.awake_count = 0
self.power_consumption = {'idle': 63.68, 'sleep': 4.42, 'off': None,
'gprs': [32.75, 36.39, 37.26, 38.82, 41.09, 42.99, 44.9, 46.81, 48.71, 50.62],
'sms': 16.37}
self.timings = {'sms': 18.16, 'gprs': [23.41, 25.13, 26.06, 27.19, 28.52, 29.74, 30.97, 32.3, 33.42, 34.65],
'deregister_sms': 2., 'deregister_gprs': 2.02}
self.rewards = {'delay_award': 150, 'delay_penalty': 150, 'buff_overflow': 10000, 'lost_message': 1000}
self.rewards_const = {'message_threshold': 500, 'power_scale': .2}
self.current_state = 'off'
self.max_buff_size = 9
self.buff = []
self.state_number = int(((24 * 60 * 60) / self.decision_interval) * (self.max_buff_size + 1))
self.delays = [[] for _ in range(self.state_number)]
self.button_push_counter = 0
self.action_time = None
self.last_action = None
self.aggregation_count = 0
self.mode = test_plan.mode
def simulation_step(self, _current_time):
# self.power_left += self.power_consumption['idle']
buff_timeout = 3600
if self.mode == 'timeout':
buff_timeout = 1200
for mgs in self.buff:
delay = self.datatime2seconds(_current_time) - self.datatime2seconds(mgs)
if delay < 0:
delay = 86400 - self.datatime2seconds(mgs) + self.datatime2seconds(_current_time)
# logging.error("rtime = " + str(_current_time) + "mgs = " + str(mgs))
if delay > buff_timeout:
# print(self.buff)
# logging.error("TIMEOUT! rtime = " + str(_current_time) + "mgs = " + str(mgs))
self.send_gprs(_current_time, add2buff=False)
self.learning_a.update_knowledge(-200, self.time2state(_current_time))
self.update_knowledge = False
def reset_epoch(self):
self.delays = [[] for _ in range(self.state_number)]
self.power_left = self.start_power
self.button_push_counter = 0
try:
self.learning_a.e_greedy *= .9
except AttributeError:
pass
def event_action(self, _current_time):
# logging.info("LEARNING MODE = " + str(self.mode))
state = self.time2state(_current_time)
if self.update_knowledge:
self.learning_a.update_knowledge(self.calculate_reward(_current_time), int(state))
self.update_knowledge = True
if self.mode == 'ml':
if len(self.buff) == 0:
action = self.learning_a.choose_action(state, restricted_state=[2])
elif len(self.buff) > 0:
action = self.learning_a.choose_action(state, restricted_state=[0])
elif self.mode == 'sms':
action = 0
elif self.mode == 'timeout':
action = 1
else:
logging.error("MODE NOT SET!")
sys.exit()
# logging.info("ACTION == " + str(action))
self.execute_action(action, _current_time)
def execute_action(self, action, time):
"""
# 0 - wyślij SMS
# 1 - dodaj wiadomość do bufora
# 2 - wyślij przez GPRS wiadomości z bufora
:param action:
:param time:
"""
if action == 0:
self.send_sms(time)
elif action == 1:
if len(self.buff) > self.max_buff_size - 2:
self.send_gprs(time)
else:
self.buffer_data(time)
elif action == 2:
self.send_gprs(time)
def buffer_data(self, r_time):
self.last_action = 'buff'
if len(self.buff) == 0:
self.action_time = r_time
self.buff.append(r_time)
else:
self.buff.append(r_time)
def send_sms(self, r_time):
self.last_action = 'sms'
self.action_time = r_time
time = self.timings['sms'] - self.timings['deregister_sms']
energy = self.power_consumption['sms'] # power consumed for sending sms
self.power_left += energy
if time < 0:
sys.exit()
self.delays[self.time2state(r_time)].append(time)
def send_gprs(self, r_time, add2buff=True):
self.aggregation_count = len(self.buff)
self.last_action = 'gprs'
if add2buff:
self.buff.append(r_time)
messages_number = len(self.buff)
time = self.timings['gprs'][messages_number] - self.timings['deregister_gprs']
energy = self.power_consumption['gprs'][messages_number]
self.power_left += energy
for mgs in self.buff:
delay = self.datatime2seconds(r_time) - self.datatime2seconds(mgs) + time
if delay > 14400:
logging.error("rtime = " + str(r_time) + "mgs = " + str(mgs))
if delay < 0:
delay = 86400 - self.datatime2seconds(mgs) + self.datatime2seconds(r_time) + time
self.delays[self.time2state(r_time)].append(delay)
self.buff.clear()
def calculate_reward(self, r_time):
if self.dates2seconds(r_time, self.action_time) > 350:
if self.last_action != 'buff':
reward = 100
else:
reward = -100
else:
if self.last_action == 'buff':
reward = 100
else:
reward = -100
if self.last_action == 'gprs':
if self.aggregation_count < 4:
reward -= 400
else:
reward += 200
# self.action_time = r_time
return reward
def delay2reward(self, seconds):
if seconds > self.rewards_const['message_threshold']:
self.button_push_counter += 1
return self.rewards['delay_penalty']
else:
return self.rewards['delay_award']
def time2state(self, _current_time):
time_state = int((_current_time.hour * 3600 + _current_time.minute * 60 + _current_time.second) /
self.decision_interval)
val = time_state + 72 * self.buff2state(len(self.buff))
return val
def datatime2seconds(self, _current_time):
return _current_time.hour * 3600 + _current_time.minute * 60 + _current_time.second
def dates2seconds(self, first_time, second_time):
res_time = self.datatime2seconds(first_time) - self.datatime2seconds(second_time)
if res_time < 0:
res_time = 86400 - self.datatime2seconds(second_time) + self.datatime2seconds(first_time)
return res_time
def buff2state(self, n):
if n == 0:
return 0
elif n == 1:
return 1
elif n < 8:
return int(np.log2(n)+1)
else:
return int(np.log2(n-1) + 1)
| pwcz/ml_embedded | simulation_tool/symulator.py | Python | mit | 11,083 |
# Accelerator for pip, the Python package manager.
#
# Author: Peter Odding <[email protected]>
# Last Change: November 16, 2014
# URL: https://github.com/paylogic/pip-accel
"""
:py:mod:`pip_accel.caches.local` - Local cache backend
======================================================
This module implements the local cache backend which stores distribution
archives on the local file system. This is a very simple cache backend, all it
does is create directories and write local files. The only trick here is that
new binary distribution archives are written to temporary files which are
then moved into place atomically using :py:func:`os.rename()` to avoid partial
reads caused by running multiple invocations of pip-accel at the same time
(which happened in `issue 25`_).
.. _issue 25: https://github.com/paylogic/pip-accel/issues/25
"""
# Standard library modules.
import logging
import os
import shutil
# Modules included in our package.
from pip_accel.caches import AbstractCacheBackend
from pip_accel.utils import makedirs
# Initialize a logger for this module.
logger = logging.getLogger(__name__)
class LocalCacheBackend(AbstractCacheBackend):
"""The local cache backend stores Python distribution archives on the local file system."""
PRIORITY = 10
def get(self, filename):
"""
Check if a distribution archive exists in the local cache.
:param filename: The filename of the distribution archive (a string).
:returns: The pathname of a distribution archive on the local file
system or ``None``.
"""
pathname = os.path.join(self.config.binary_cache, filename)
if os.path.isfile(pathname):
logger.debug("Distribution archive exists in local cache (%s).", pathname)
return pathname
else:
logger.debug("Distribution archive doesn't exist in local cache (%s).", pathname)
return None
def put(self, filename, handle):
"""
Store a distribution archive in the local cache.
:param filename: The filename of the distribution archive (a string).
:param handle: A file-like object that provides access to the
distribution archive.
"""
file_in_cache = os.path.join(self.config.binary_cache, filename)
logger.debug("Storing distribution archive in local cache: %s", file_in_cache)
makedirs(os.path.dirname(file_in_cache))
# Stream the contents of the distribution archive to a temporary file
# to avoid race conditions (e.g. partial reads) between multiple
# processes that are using the local cache at the same time.
temporary_file = '%s.tmp-%i' % (file_in_cache, os.getpid())
logger.debug("Using temporary file to avoid partial reads: %s", temporary_file)
with open(temporary_file, 'wb') as temporary_file_handle:
shutil.copyfileobj(handle, temporary_file_handle)
# Atomically move the distribution archive into its final place
# (again, to avoid race conditions between multiple processes).
logger.debug("Moving temporary file into place ..")
os.rename(temporary_file, file_in_cache)
logger.debug("Finished caching distribution archive in local cache.")
| theyoprst/pip-accel | pip_accel/caches/local.py | Python | mit | 3,310 |
Subsets and Splits