code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# -*- coding:utf-8 -*-
import json
import sys
from importlib import import_module
from importlib.util import find_spec
from owlmixin import OwlMixin, TOption
from owlmixin.util import load_json
from jumeaux.addons.res2res import Res2ResExecutor
from jumeaux.logger import Logger
from jumeaux.models import Res2ResAddOnPayload, Response, Request
from jumeaux.utils import when_filter
logger: Logger = Logger(__name__)
LOG_PREFIX = "[res2res/json]"
def wrap(anything: bytes, encoding: str) -> str:
"""Use for example of Transformer.function
"""
return json.dumps({"wrap": load_json(anything.decode(encoding))}, ensure_ascii=False)
class Transformer(OwlMixin):
module: str
function: str = "transform"
class Config(OwlMixin):
transformer: Transformer
default_encoding: str = "utf8"
when: TOption[str]
class Executor(Res2ResExecutor):
def __init__(self, config: dict) -> None:
self.config: Config = Config.from_dict(config or {})
t: Transformer = self.config.transformer
try:
if not find_spec(t.module):
raise ModuleNotFoundError
except ModuleNotFoundError as e:
logger.error(f"{LOG_PREFIX} Module {t.module} is not existed.")
sys.exit(1)
try:
self.module = getattr(import_module(t.module), t.function)
except AttributeError as e:
logger.error(f"{LOG_PREFIX} {t.function} is not existed in {t.module} module")
sys.exit(1)
def exec(self, payload: Res2ResAddOnPayload) -> Res2ResAddOnPayload:
req: Request = payload.req
res: Response = payload.response
if not self.config.when.map(lambda x: when_filter(x, {"req": req, "res": res})).get_or(
True
):
return payload
json_str: str = self.module(res.body, res.encoding.get())
new_encoding: str = res.encoding.get_or(self.config.default_encoding)
return Res2ResAddOnPayload.from_dict(
{
"response": {
"body": json_str.encode(new_encoding, errors="replace"),
"type": "json",
"encoding": new_encoding,
"headers": res.headers,
"url": res.url,
"status_code": res.status_code,
"elapsed": res.elapsed,
"elapsed_sec": res.elapsed_sec,
},
"req": req,
"tags": payload.tags,
}
)
| tadashi-aikawa/gemini | jumeaux/addons/res2res/json.py | Python | mit | 2,536 |
#!/usr/bin/env python
"""
Prototype to DOT (Graphviz) converter by Dario Gomez
Table format from django-extensions
"""
from protoExt.utils.utilsBase import Enum, getClassName
from protoExt.utils.utilsConvert import slugify2
class GraphModel():
def __init__(self):
self.tblStyle = False
self.dotSource = 'digraph Sm {'
self.dotSource += 'fontname="Helvetica";fontsize = 8;'
self.GRAPH_LEVEL = Enum(['all', 'essential', 'required' , 'primary', 'title'])
self.GRAPH_FORM = Enum(['orf', 'erf', 'drn'])
if self.tblStyle:
self.dotSource += 'node [shape="plaintext"];\n'
self.tblTitle = '\n{0} [label=<<TABLE BGCOLOR="palegoldenrod" BORDER="0" CELLBORDER="0" CELLSPACING="0" style="width:100px"><TR><TD COLSPAN="2" CELLPADDING="4" ALIGN="CENTER" BGCOLOR="olivedrab4"> <FONT FACE="Helvetica Bold" COLOR="white">{1}</FONT> </TD></TR>'
self.tblField = '\n<TR><TD ALIGN="LEFT" BORDER="0"><FONT FACE="Helvetica {2}">{0}</FONT></TD><TD ALIGN="LEFT"><FONT FACE="Helvetica {2}">{1}</FONT></TD></TR>'
else:
# Animal [label = "{{{1}|+ name : string\l+ age : int\l|+ die() : void\l}"]
self.dotSource += 'rankdir = BT;node [shape=record,width=0,height=0,concentrate=true];\n'
self.tblRecord = '\n{0} [label = "{{{1}|'
self.lnkComposition = '[dir=both,arrowhead=diamond,arrowtail=none]\n'
self.lnkAgregation = '[dir=both,arrowhead=ediamond,arrowtail=none]\n'
self.lnkNoCascade = '[dir=both,arrowhead=diamondtee,arrowtail=none]\n'
self.lnkHeritage = '[dir=both,arrowhead=empty,arrowtail=none]\n'
self.lnkER = '[dir=both,arrowhead=none,arrowtail=invempty]\n'
def getDiagramDefinition(self, diagramSet):
self.diagrams = []
self.entities = []
for pDiag in diagramSet:
gDiagram = {
'code': getClassName(pDiag.code) ,
'label': slugify2( pDiag.code ),
'clusterName': slugify2( getattr(pDiag, 'title', pDiag.code)),
'graphLevel' : getattr(pDiag, 'graphLevel' , self.GRAPH_LEVEL.all),
'graphForm' : getattr(pDiag, 'graphForm' , self.GRAPH_FORM.orf),
'showPrpType': getattr(pDiag, 'showPrpType' , False),
'showBorder' : getattr(pDiag, 'showBorder' , False),
'showFKey' : getattr(pDiag, 'showFKey' , False),
'prefix' : slugify2( getattr(pDiag, 'prefix' , '')),
'entities': []
}
for pDiagEntity in pDiag.diagramentity_set.all():
pEntity = pDiagEntity.entity
enttCode = self.getEntityCode(pEntity.code, gDiagram.get('prefix'))
# Si ya se encuentra en otro diagrama no la dibuja
if enttCode in self.entities:
continue
self.entities.append(enttCode)
gEntity = {
'code': enttCode,
'fields': [],
'relations': []
}
for pProperty in pEntity.property_set.all():
pptCode = slugify2(pProperty.code, '_')
if pProperty.isForeign:
pLinkTo = self.getEntityCode(pProperty.relationship.refEntity.code, gDiagram.get('prefix'))
gEntity['relations'].append({
'code': pptCode,
'linkTo': pLinkTo,
'primary': pProperty.isPrimary,
'required': pProperty.isRequired,
'essential': pProperty.isEssential,
'foreign': True
})
else:
pType = slugify2(pProperty.baseType , '_')
gEntity['fields'].append({
'code': pptCode,
'type': pType or 'string',
'primary': pProperty.isPrimary,
'required': pProperty.isRequired,
'essential': pProperty.isEssential,
'foreign': False
})
gDiagram['entities'].append(gEntity)
self.diagrams.append(gDiagram)
def generateDotModel(self):
# Dibuja las entidades
for gDiagram in self.diagrams:
if gDiagram.get('graphLevel') < self.GRAPH_LEVEL.title :
self.dotSource += '\nsubgraph cluster_{0} {{'.format(gDiagram.get('code'))
if not gDiagram.get('showBorder', False) :
self.dotSource += 'style=dotted;'
if len(gDiagram.get('label', '')) > 0:
self.dotSource += 'label="{}";'.format(gDiagram.get('label', ''))
for gEntity in gDiagram['entities']:
self.entity2dot(gDiagram, gEntity)
self.dotSource += '}\n'
# Dibuja los vinculos
for gDiagram in self.diagrams:
for gEntity in gDiagram['entities']:
self.link2dot(gEntity, gDiagram.get( 'showFKey'))
self.dotSource += '}'
# Dibuja las relaciones
# for gDiagram in self.diagrams:
# for relation in gEntity['relations']:
# if relation['target'] in nodes:
# relation['needs_node'] = False
return self.dotSource
def link2dot(self, gEntity, showFKey):
for gLink in gEntity['relations']:
pEntity = gEntity.get('code')
pLinkTo = gLink.get('linkTo')
if ( not showFKey ) and ( pLinkTo not in self.entities ):
continue
self.dotSource += '{0} -> {1} '.format(pEntity, pLinkTo) + self.lnkComposition
def entity2dot(self, gDiagram, gEntity):
if self.tblStyle:
enttTable = self.tblTitle.format(gEntity.get('code'), gEntity.get('label', gEntity.get('code')))
else:
enttRecord = self.tblRecord.format(gEntity.get('code'), gEntity.get('label', gEntity.get('code')))
# 0 : colName; 1 : baseType; 2 : Bold / Italic
for gField in gEntity['fields'] + gEntity['relations'] :
if gDiagram.get('showPrpType') :
sPrpType = gField.get('type', ' ')
else : sPrpType = ' '
sPk = ''
fildLv = 0
diagLv = gDiagram.get('graphLevel')
if gField.get('primary') :
fildLv = self.GRAPH_LEVEL.primary
sPk = 'Bold'
elif gField.get('required'):
fildLv = self.GRAPH_LEVEL.required
elif gField.get('essential'):
fildLv = self.GRAPH_LEVEL.essential
# Si no alcanza el nivel
if fildLv >= diagLv:
sFk = ''
if gField.get('foreign'):
sFk = ' Italic'
if self.tblStyle:
enttTable += self.tblField.format(gField.get('code'), sPrpType, sPk + sFk)
else:
if len(sPk) > 0:
sPk = '*'
if len(sFk) > 0:
sPk += '+'
if len(sPk) > 0:
sPk += ' '
if len(sPrpType) > 1:
sPrpType = ': ' + sPrpType
enttRecord += '{2}{0}{1}\l'.format(gField.get('code'), sPrpType, sPk)
if self.tblStyle:
enttTable += '</TABLE>>]\n'
else:
enttRecord += '}"]\n'
# self.dotSource += enttTable
self.dotSource += enttRecord
def getEntityCode(self, code, prefix):
# Formatea el nombre de la entidad
enttCode = code.lower()
prefix = prefix or ''
if len(prefix) and enttCode.startswith(prefix.lower()):
enttCode = enttCode[len(prefix):]
return getClassName(enttCode)
| DarioGT/docker-carra | src/prototype/actions/graphModel.py | Python | mit | 8,442 |
#!/usr/bin/python
import json
f = file('treasures.json', 'r')
try:
foo = json.load(f)
json_contents = foo
except ValueError:
json_contents = dict()
f.close()
print 'Type \'q\' to [q]uit'
while True:
name = raw_input('Treasure Name: ')
if name == 'q':
break
print 'Type \'n\' to stop entering heroes and go to [n]ext treasure'
set_contents = dict()
hero = ''
while True:
hero = raw_input('Hero name: ')
if hero == 'n' or hero == 'q':
break
else:
bundle_rating = raw_input('Item set rating [1-3]: ')
set_contents[hero] = bundle_rating
json_contents[name] = set_contents
if hero == 'q':
break
f = open('treasures.json', 'w')
json.dump(json_contents, f, indent=4)
f.close() | mosbasik/dotatreasures | create_treasures_json.py | Python | mit | 794 |
import json
from tornado import httpclient as hc
from tornado import gen
from graphite_beacon.handlers import LOGGER, AbstractHandler
class HipChatHandler(AbstractHandler):
name = 'hipchat'
# Default options
defaults = {
'url': 'https://api.hipchat.com',
'room': None,
'key': None,
}
colors = {
'critical': 'red',
'warning': 'yellow',
'normal': 'green',
}
def init_handler(self):
self.room = self.options.get('room')
self.key = self.options.get('key')
assert self.room, 'Hipchat room is not defined.'
assert self.key, 'Hipchat key is not defined.'
self.client = hc.AsyncHTTPClient()
@gen.coroutine
def notify(self, level, *args, **kwargs):
LOGGER.debug("Handler (%s) %s", self.name, level)
data = {
'message': self.get_short(level, *args, **kwargs).decode('UTF-8'),
'notify': True,
'color': self.colors.get(level, 'gray'),
'message_format': 'text',
}
yield self.client.fetch('{url}/v2/room/{room}/notification?auth_token={token}'.format(
url=self.options.get('url'), room=self.room, token=self.key), headers={
'Content-Type': 'application/json'}, method='POST', body=json.dumps(data))
| klen/graphite-beacon | graphite_beacon/handlers/hipchat.py | Python | mit | 1,328 |
"""
Task description (in Estonian):
3. Maatriksi vähendamine (6p)
Kirjuta funktsioon vähenda, mis võtab argumendiks arvumaatriksi, milles ridu ja
veerge on paarisarv, ning tagastab uue maatriksi, milles on kaks korda vähem
ridu ja kaks korda vähem veerge, ja kus iga element on esialgse maatriksi nelja
elemendi keskmine, järgnevas näites toodud skeemi järgi:
See tähendab, et
vähenda([[1,5,2,6,3,6], [1,3,2,7,3,3], [4,8,5,1,1,6], [4,4,9,5,6,1]])
peab tagastama
[[2.5, 4.25, 3.75], [5.0, 5.0, 3.5]].
"""
from grader import *
from KT2_util import make_checker
def vähenda(maatriks):
tulemus = []
for r in range(0, len(maatriks), 2):
rida = []
for c in range(0, len(maatriks[r]), 2):
tul = 0
for i in range(4):
tul += maatriks[r+i%2][c+i//2]
rida.append(tul / 4.0)
tulemus.append(rida)
return tulemus
checker = make_checker(vähenda)
checker([[1, 2], [3, 4]],
description="Ruudukujuline 2x2 maatriks- {function}({args}) == {expected}")
checker([[1, 2, 3, 4], [5, 6, 7, 8]],
description="Mitte-ruudukujuline maatriks - {function}({args}) == {expected}")
checker([[1,5,2,6,3,6], [1,3,2,7,3,3], [4,8,5,1,1,6], [4,4,9,5,6,1]])
checker([[1,5,2,6,3,6], [1,3,2,7,3,3], [4,8,5,1,1,6], [4,4,9,5,6,1]])
checker([],
description="Erijuht, tühi maatriks- {function}({args}) == {expected}")
random_tests = [
[[7, 5, 2, 6, 6, 9], [2, 8, 6, 3, 8, 7]],
[[3, 1, 0, 9], [0, 5, 1, 7]],
[[4, 4], [0, 8], [4, 9], [3, 0], [3, 6], [8, 2]],
[[9, 4, 6, 5, 4, 6],
[3, 8, 7, 1, 2, 5],
[8, 9, 8, 5, 0, 2],
[2, 7, 2, 4, 3, 5],
[2, 6, 8, 0, 2, 9],
[7, 4, 6, 4, 8, 2]],
[[-1, -3], [-6, 6], [5, -6], [1, 0]],
[[-5, -10, 6, -1], [-8, -10, -5, 7], [-7, 9, -5, -5], [-8, -7, -10, 8]],
[[-3, 6, -3, 6], [4, -6, 3, 8], [-9, -6, 7, -6], [6, 6, 4, -3]],
[[1, 6], [2, -6]]
]
for test_case in random_tests:
checker(test_case) | macobo/python-grader | tasks/MTAT.03.100/2013/Midterm_1_resit/KT2_J1_vahenda_tester.py | Python | mit | 1,967 |
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import xgboost as xgb
import argparse
from os import path
import os
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
from utils import *
import pickle
np.random.seed(345345)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--yix', type=int, default=0)
return parser.parse_args()
# functions for xgboost training
def evalF1(preds, dtrain):
from sklearn.metrics import f1_score
labels = dtrain.get_label()
return 'f1-score', f1_score(labels, preds > 0.5)
def fpreproc(dtrain, dtest, param):
label = dtrain.get_label()
ratio = float(np.sum(label == 0)) / np.sum(label == 1)
param['scale_pos_weight'] = ratio
return (dtrain, dtest, param)
# functions for hyperparameters optimization
class Score:
def __init__(self, X, y):
self.dtrain = xgb.DMatrix(X, label=y)
def get_score(self, params):
params['max_depth'] = int(params['max_depth'])
params['min_child_weight'] = int(params['min_child_weight'])
params['num_boost_round'] = int(params['num_boost_round'])
print('Training with params:')
print(params)
cv_result = xgb.cv(params=params,
dtrain=self.dtrain,
num_boost_round=params['num_boost_round'],
nfold=5,
stratified=True,
feval=evalF1,
maximize=True,
fpreproc=fpreproc,
verbose_eval=True)
score = cv_result.ix[params['num_boost_round'] - 1, 0]
print(score)
return {'loss': -score, 'status': STATUS_OK}
def optimize(trials, X, y, max_evals):
space = {
'num_boost_round': hp.quniform('num_boost_round', 10, 200, 10),
'eta': hp.quniform('eta', 0.1, 0.3, 0.1),
'gamma': hp.quniform('gamma', 0, 1, 0.2),
'max_depth': hp.quniform('max_depth', 1, 6, 1),
'min_child_weight': hp.quniform('min_child_weight', 1, 3, 1),
'subsample': hp.quniform('subsample', 0.8, 1, 0.1),
'silent': 1,
'objective': 'binary:logistic'
}
s = Score(X, y)
best = fmin(s.get_score,
space,
algo=tpe.suggest,
trials=trials,
max_evals=max_evals
)
best['max_depth'] = int(best['max_depth'])
best['min_child_weight'] = int(best['min_child_weight'])
best['num_boost_round'] = int(best['num_boost_round'])
del s
return best
def out_fold_pred(params, X, y, reps):
preds = np.zeros((y.shape[0]))
params['silent'] = 1
params['objective'] = 'binary:logistic'
params['scale_pos_weight'] = float(np.sum(y == 0)) / np.sum(y == 1)
for train_ix, test_ix in makeKFold(5, y, reps):
X_train, X_test = X[train_ix, :], X[test_ix, :]
y_train = y[train_ix]
dtrain = xgb.DMatrix(X_train, label=y_train)
dtest = xgb.DMatrix(X_test)
bst = xgb.train(params=params,
dtrain=dtrain,
num_boost_round=params['num_boost_round'],
evals=[(dtrain, 'train')],
feval=evalF1,
maximize=True,
verbose_eval=None)
preds[test_ix] = bst.predict(dtest)
return preds
def get_model(params, X, y):
dtrain = xgb.DMatrix(X, label=y)
params['silent'] = 1
params['objective'] = 'binary:logistic'
params['scale_pos_weight'] = float(np.sum(y == 0)) / np.sum(y == 1)
bst = xgb.train(params=params,
dtrain=dtrain,
num_boost_round=params['num_boost_round'],
evals=[(dtrain, 'train')],
feval=evalF1,
maximize=True,
verbose_eval=None)
return bst
args = parse_args()
data_dir = '../level3-feature/' + str(args.yix)
X_train = np.load(path.join(data_dir, 'X_train.npy'))
X_test = np.load(path.join(data_dir, 'X_test.npy'))
y_train = np.load(path.join(data_dir, 'y_train.npy'))
print(X_train.shape, X_test.shape, y_train.shape)
X_train_ext = np.load('../extra_ftrs/' + str(args.yix) + '/X_train_ext.npy')
X_test_ext = np.load('../extra_ftrs/' + str(args.yix) + '/X_test_ext.npy')
print(X_train_ext.shape, X_test_ext.shape)
X_train = np.hstack((X_train, X_train_ext))
X_test = np.hstack((X_test, X_test_ext))
print('Add Extra')
print(X_train.shape, X_test.shape, y_train.shape)
trials = Trials()
params = optimize(trials, X_train, y_train, 100)
out_fold = out_fold_pred(params, X_train, y_train, 1)
clf = get_model(params, X_train, y_train)
dtest = xgb.DMatrix(X_test)
preds = clf.predict(dtest)
save_dir = '../level3-model-final/' + str(args.yix)
print(save_dir)
if not path.exists(save_dir):
os.makedirs(save_dir)
# save model, parameter, outFold_pred, pred
with open(path.join(save_dir, 'model.pkl'), 'wb') as f_model:
pickle.dump(clf, f_model)
with open(path.join(save_dir, 'param.pkl'), 'wb') as f_param:
pickle.dump(params, f_param)
np.save(path.join(save_dir, 'pred.npy'), preds)
np.save(path.join(save_dir, 'outFold.npy'), out_fold)
| jingxiang-li/kaggle-yelp | model/level3_model.py | Python | mit | 5,368 |
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases() # NOQA
import chainer
from chainer import functions as F
from chainer import links as L
from chainer import optimizers
import numpy as np
from chainerrl.envs.abc import ABC
from chainerrl.explorers.epsilon_greedy import LinearDecayEpsilonGreedy
from chainerrl.links import Sequence
from chainerrl import policies
from chainerrl import q_function
from chainerrl import replay_buffer
from basetest_training import _TestTraining
class _TestPGTOnABC(_TestTraining):
def make_agent(self, env, gpu):
model = self.make_model(env)
policy = model['policy']
q_func = model['q_function']
actor_opt = optimizers.Adam(alpha=1e-4)
actor_opt.setup(policy)
critic_opt = optimizers.Adam(alpha=1e-3)
critic_opt.setup(q_func)
explorer = self.make_explorer(env)
rbuf = self.make_replay_buffer(env)
return self.make_pgt_agent(env=env, model=model,
actor_opt=actor_opt, critic_opt=critic_opt,
explorer=explorer, rbuf=rbuf, gpu=gpu)
def make_pgt_agent(self, env, model, actor_opt, critic_opt, explorer,
rbuf, gpu):
raise NotImplementedError()
def make_explorer(self, env):
def random_action_func():
a = env.action_space.sample()
if isinstance(a, np.ndarray):
return a.astype(np.float32)
else:
return a
return LinearDecayEpsilonGreedy(1.0, 0.2, 1000, random_action_func)
def make_replay_buffer(self, env):
return replay_buffer.ReplayBuffer(10 ** 5)
class _TestPGTOnContinuousPOABC(_TestPGTOnABC):
def make_model(self, env):
n_dim_obs = env.observation_space.low.size
n_dim_action = env.action_space.low.size
n_hidden_channels = 50
policy = Sequence(
L.Linear(n_dim_obs, n_hidden_channels),
F.relu,
L.Linear(n_hidden_channels, n_hidden_channels),
F.relu,
L.LSTM(n_hidden_channels, n_hidden_channels),
policies.FCGaussianPolicy(
n_input_channels=n_hidden_channels,
action_size=n_dim_action,
min_action=env.action_space.low,
max_action=env.action_space.high)
)
q_func = q_function.FCLSTMSAQFunction(
n_dim_obs=n_dim_obs,
n_dim_action=n_dim_action,
n_hidden_layers=2,
n_hidden_channels=n_hidden_channels)
return chainer.Chain(policy=policy, q_function=q_func)
def make_env_and_successful_return(self, test):
return ABC(discrete=False, partially_observable=True,
deterministic=test), 1
def make_replay_buffer(self, env):
return replay_buffer.EpisodicReplayBuffer(10 ** 5)
class _TestPGTOnContinuousABC(_TestPGTOnABC):
def make_model(self, env):
n_dim_obs = env.observation_space.low.size
n_dim_action = env.action_space.low.size
n_hidden_channels = 50
policy = policies.FCGaussianPolicy(
n_input_channels=n_dim_obs,
n_hidden_layers=2,
n_hidden_channels=n_hidden_channels,
action_size=n_dim_action,
min_action=env.action_space.low,
max_action=env.action_space.high)
q_func = q_function.FCSAQFunction(
n_dim_obs=n_dim_obs,
n_dim_action=n_dim_action,
n_hidden_layers=2,
n_hidden_channels=n_hidden_channels)
return chainer.Chain(policy=policy, q_function=q_func)
def make_env_and_successful_return(self, test):
return ABC(discrete=False, deterministic=test), 1
| toslunar/chainerrl | tests/agents_tests/basetest_pgt.py | Python | mit | 3,966 |
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'esite.settings')
import django
django.setup()
from auto.models import Car
#
def add_car(make, model, km, year, color, eng, drive,trans, icolor):
c = Car.objects.get_or_create(make=make, model=model, kilometers=km, year=year, color=color, engine_size=eng, drivetrain=drive, transmition=trans, interanl_color=icolor)
def populate():
# car = Car(make='Acura',model='TL', kilometers=74673, year=2012, color='White', engine_size=3.7, drivetrain='AWD', transmition='MA')
add_car('Acura', 'TL', 74673, 2012, 'White', 3.7, 'AWD','MA','White')
add_car('Volkswagen', 'Touareg', 5344, 2015, 'Silver', 3.6, 'AWD','AU','White')
if __name__ == '__main__':
print "Starting Car population script..."
populate()
# def populate():
# python_cat = add_cat('Python')
#
# add_page(cat=python_cat,
# title="Official Python Tutorial",
# url="http://docs.python.org/2/tutorial/")
#
# add_page(cat=python_cat,
# title="How to Think like a Computer Scientist",
# url="http://www.greenteapress.com/thinkpython/")
#
# add_page(cat=python_cat,
# title="Learn Python in 10 Minutes",
# url="http://www.korokithakis.net/tutorials/python/")
#
# django_cat = add_cat("Django")
#
# add_page(cat=django_cat,
# title="Official Django Tutorial",
# url="https://docs.djangoproject.com/en/1.5/intro/tutorial01/")
#
# add_page(cat=django_cat,
# title="Django Rocks",
# url="http://www.djangorocks.com/")
#
# add_page(cat=django_cat,
# title="How to Tango with Django",
# url="http://www.tangowithdjango.com/")
#
# frame_cat = add_cat("Other Frameworks")
#
# add_page(cat=frame_cat,
# title="Bottle",
# url="http://bottlepy.org/docs/dev/")
#
# add_page(cat=frame_cat,
# title="Flask",
# url="http://flask.pocoo.org")
#
# # Print out what we have added to the user.
# for c in Category.objects.all():
# for p in Page.objects.filter(category=c):
# print "- {0} - {1}".format(str(c), str(p))
#
# def add_page(cat, title, url, views=0):
# p = Page.objects.get_or_create(category=cat, title=title)[0]
# p.url=url
# p.views=views
# p.save()
# return p
#
# def add_cat(name):
# c = Category.objects.get_or_create(name=name)[0]
# return c
# Start execution here!
# if __name__ == '__main__':
# print "Starting Rango population script..."
# populate() | vollov/django-template | django/esite/populate_auto.py | Python | mit | 2,560 |
import aioamqp
import asyncio
import umsgpack as msgpack
import logging
from functools import wraps
from uuid import uuid4
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
class RemoteException(Exception):
pass
class Client(object):
def __init__(self, queue='', host='localhost', port=None, ssl=False):
self._transport = None
self._protocol = None
self._channel = None
self._callback_queue = None
self._queue = queue
self._host = host
self._port = port
self._ssl = ssl
self._waiter = asyncio.Event()
async def _connect(self, *args, **kwargs):
""" an `__init__` method can't be a coroutine"""
self._transport, self._protocol = await aioamqp.connect(*args, **kwargs)
host = kwargs.get('host', 'localhost')
port = kwargs.get('port')
ssl = kwargs.get('ssl', False)
if port is None:
port = 5671 if ssl else 5672
logger.info(f'Connected to amqp://{host}:{port}/')
self._channel = await self._protocol.channel()
result = await self._channel.queue_declare(queue_name='', exclusive=True)
self._callback_queue = result['queue']
logger.info(f'Created callback queue: {self._callback_queue}')
await self._channel.basic_consume(
self._on_response,
no_ack=True,
queue_name=self._callback_queue,
)
async def _on_response(self, channel, body, envelope, properties):
if self._corr_id == properties.correlation_id:
self._response = body
logger.info(f'Received response for {self._corr_id}')
self._waiter.set()
async def __call__(self, method, *args, **kwargs):
if not self._protocol:
await self._connect(host=self._host, port=self._port, ssl=self._ssl)
self._response = None
self._corr_id = str(uuid4())
payload = msgpack.packb((method, args, kwargs))
logger.info(f'Publishing to {self._queue}: {method} ({self._corr_id})')
await self._channel.basic_publish(
payload=payload,
exchange_name='',
routing_key=self._queue,
properties={
'reply_to': self._callback_queue,
'correlation_id': self._corr_id,
},
)
logger.info(f'Waiting for response on queue {self._callback_queue} ({self._corr_id})')
await self._waiter.wait()
await self._protocol.close()
try:
exc, result = msgpack.unpackb(self._response)
except Exception as err:
logger.error(f'Could not unpack response: {err}')
return None
if exc is not None:
raise RemoteException(exc)
return result
def __getattr__(self, method):
@wraps(self.__call__)
async def wrapper(*args, **kwargs):
return await self(method, *args, **kwargs)
return wrapper | jjacobson93/aiorpc | aiorpc/client.py | Python | mit | 3,036 |
def my_max(a, b):
if a > b:
return a
elif b > a:
return b
else:
return None
x = my_max(1, 2)
print x
print my_max(3, 2)
| Akagi201/learning-python | pyramid/Pyramid Web开发入门/2. Python语言基础/fun_return.py | Python | mit | 172 |
import pandas as pd
import pytest
from athletic_pandas.algorithms import heartrate_models
def test_heartrate_model():
heartrate = pd.Series(range(50))
power = pd.Series(range(0, 100, 2))
model, predictions = heartrate_models.heartrate_model(heartrate, power)
assert model.params['hr_rest'].value == 0.00039182374117378518
assert model.params['hr_max'].value == 195.75616175654685
assert model.params['dhr'].value == 0.49914432620946803
assert model.params['tau_rise'].value == 0.98614419733274383
assert model.params['tau_fall'].value == 22.975975612579408
assert model.params['hr_drift'].value == 6.7232899323328612 * 10**-5
assert len(predictions) == 50
| AartGoossens/athletic_pandas | tests/algorithms/test_heartrate_models.py | Python | mit | 701 |
#!/usr/bin/env python
__all__ = ['acfun_download']
from ..common import *
from .letv import letvcloud_download_by_vu
from .qq import qq_download_by_vid
from .sina import sina_download_by_vid
from .tudou import tudou_download_by_iid
from .youku import youku_download_by_vid
import json, re
def get_srt_json(id):
url = 'http://danmu.aixifan.com/V2/%s' % id
return get_html(url)
def acfun_download_by_vid(vid, title, output_dir='.', merge=True, info_only=False, **kwargs):
info = json.loads(get_html('http://www.acfun.tv/video/getVideo.aspx?id=' + vid))
sourceType = info['sourceType']
if 'sourceId' in info: sourceId = info['sourceId']
# danmakuId = info['danmakuId']
if sourceType == 'sina':
sina_download_by_vid(sourceId, title, output_dir=output_dir, merge=merge, info_only=info_only)
elif sourceType == 'youku':
youku_download_by_vid(sourceId, title=title, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs)
elif sourceType == 'tudou':
tudou_download_by_iid(sourceId, title, output_dir=output_dir, merge=merge, info_only=info_only)
elif sourceType == 'qq':
qq_download_by_vid(sourceId, title, output_dir=output_dir, merge=merge, info_only=info_only)
elif sourceType == 'letv':
letvcloud_download_by_vu(sourceId, '2d8c027396', title, output_dir=output_dir, merge=merge, info_only=info_only)
elif sourceType == 'zhuzhan':
a = 'http://api.aixifan.com/plays/%s/realSource' % vid
s = json.loads(get_content(a, headers={'deviceType': '1'}))
urls = s['data']['files'][-1]['url']
size = urls_size(urls)
print_info(site_info, title, 'mp4', size)
if not info_only:
download_urls(urls, title, 'mp4', size,
output_dir=output_dir, merge=merge)
else:
raise NotImplementedError(sourceType)
if not info_only and not dry_run:
if not kwargs['caption']:
print('Skipping danmaku.')
return
try:
title = get_filename(title)
print('Downloading %s ...\n' % (title + '.cmt.json'))
cmt = get_srt_json(vid)
with open(os.path.join(output_dir, title + '.cmt.json'), 'w', encoding='utf-8') as x:
x.write(cmt)
except:
pass
def acfun_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
assert re.match(r'http://[^\.]+.acfun.[^\.]+/\D/\D\D(\d+)', url)
html = get_html(url)
title = r1(r'<h1 id="txt-title-view">([^<>]+)<', html)
title = unescape_html(title)
title = escape_file_path(title)
assert title
videos = re.findall("data-vid=\"(\d+)\".*href=\"[^\"]+\".*title=\"([^\"]+)\"", html)
for video in videos:
p_vid = video[0]
p_title = title + " - " + video[1] if video[1] != '删除标签' else title
acfun_download_by_vid(p_vid, p_title,
output_dir=output_dir,
merge=merge,
info_only=info_only,
**kwargs)
site_info = "AcFun.tv"
download = acfun_download
download_playlist = playlist_not_supported('acfun')
| lilydjwg/you-get | src/you_get/extractors/acfun.py | Python | mit | 3,211 |
import math
from autoprotocol import UserError
from modules.utils import *
def transform(protocol, params):
# general parameters
constructs = params['constructs']
num_constructs = len(constructs)
plates = list(set([construct.container for construct in constructs]))
if len(plates) != 1:
raise UserError('You can only transform aliquots from one common container.')
# **** need to be able to check if plate is sealed to add run-chaining ****
mm_mult = 1.3
transformation_plate = protocol.ref("transformation_plate", None, "96-pcr", discard=True)
protocol.incubate(transformation_plate, "cold_20", "10:minute")
transformation_wells = transformation_plate.wells_from(0, num_constructs)
for i in range(num_constructs):
protocol.provision("rs16pbjc4r7vvz", transformation_wells[i], "50:microliter")
for i, well in enumerate(constructs):
protocol.transfer(well, transformation_wells[i], "2.0:microliter",
dispense_speed="10:microliter/second",
mix_after=False,
new_group=det_new_group(i))
if well.name:
transformation_wells[i].set_name(well.name)
else:
transformation_wells[i].set_name('construct_%s' % (i+1))
# NEED to confirm second de-seal is working OR move to cover/uncover 96-flat
protocol.seal(transformation_plate)
protocol.incubate(transformation_plate, "cold_4", "20:minute", shaking=False, co2=0)
protocol.unseal(transformation_plate)
protocol.dispense_full_plate( transformation_plate, 'soc', '50:microliter' )
protocol.seal(transformation_plate)
protocol.incubate(transformation_plate, "warm_37", "10:minute", shaking=True)
protocol.unseal(transformation_plate)
# spread on agar plates
# kan "ki17rs7j799zc2"
# amp "ki17sbb845ssx9"
# specto "ki17sbb9r7jf98"
# cm "ki17urn3gg8tmj"
# "noAB" "ki17reefwqq3sq"
agar_plates = []
agar_wells = WellGroup([])
for well in range(0, len(transformation_wells), 6):
agar_name = "agar-%s_%s" % (len(agar_plates), printdatetime(time=False))
agar_plate = ref_kit_container(protocol, agar_name, "6-flat", "ki17rs7j799zc2", discard=False, store='cold_4')
agar_plates.append(agar_plate)
for i, w in enumerate(transformation_wells[well:well + 6]):
protocol.spread(w, agar_plate.well(i), "100:microliter")
agar_wells.append(agar_plate.well(i).set_name(w.name))
for agar_p in agar_plates:
protocol.incubate( agar_p, 'warm_37', '12:hour' )
protocol.image_plate( agar_p, mode='top', dataref=agar_p.name )
# return agar plates to end protocol
return agar_plates
if __name__ == '__main__':
from autoprotocol.harness import run
run(transform, 'Transform')
| dacarlin/TSkunkel | transform.py | Python | mit | 2,849 |
#!/usr/bin/env python3
##
# Copyright (c) 2007 Apple Inc.
#
# This is the MIT license. This software may also be distributed under the
# same terms as Python (the PSF license).
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
##
import sys
import os
import getopt
import xattr
import zlib
def usage(e=None):
if e:
print(e)
print("")
name = os.path.basename(sys.argv[0])
print("usage: %s [-lz] file [file ...]" % (name,))
print(" %s -p [-lz] attr_name file [file ...]" % (name,))
print(" %s -w [-z] attr_name attr_value file [file ...]" % (name,))
print(" %s -d attr_name file [file ...]" % (name,))
print("")
print("The first form lists the names of all xattrs on the given file(s).")
print("The second form (-p) prints the value of the xattr attr_name.")
print("The third form (-w) sets the value of the xattr attr_name to attr_value.")
print("The fourth form (-d) deletes the xattr attr_name.")
print("")
print("options:")
print(" -h: print this help")
print(" -l: print long format (attr_name: attr_value)")
print(" -z: compress or decompress (if compressed) attribute value in zip format")
if e:
sys.exit(64)
else:
sys.exit(0)
class NullsInString(Exception):
"""Nulls in string."""
_FILTER=''.join([(len(repr(chr(x)))==3) and chr(x) or '.' for x in range(256)])
def _dump(src, length=16):
result=[]
for i in range(0, len(src), length):
s = src[i:i+length]
hexa = ' '.join(["%02X"%ord(x) for x in s])
printable = s.translate(_FILTER)
result.append("%04X %-*s %s\n" % (i, length*3, hexa, printable))
return ''.join(result)
def main():
try:
(optargs, args) = getopt.getopt(sys.argv[1:], "hlpwdz", ["help"])
except getopt.GetoptError as e:
usage(e)
attr_name = None
long_format = False
read = False
write = False
delete = False
compress = lambda x: x
decompress = compress
status = 0
for opt, arg in optargs:
if opt in ("-h", "--help"):
usage()
elif opt == "-l":
long_format = True
elif opt == "-p":
read = True
if write or delete:
usage("-p not allowed with -w or -d")
elif opt == "-w":
write = True
if read or delete:
usage("-w not allowed with -p or -d")
elif opt == "-d":
delete = True
if read or write:
usage("-d not allowed with -p or -w")
elif opt == "-z":
compress = zlib.compress
decompress = zlib.decompress
if write or delete:
if long_format:
usage("-l not allowed with -w or -p")
if read or write or delete:
if not args:
usage("No attr_name")
attr_name = args.pop(0)
if write:
if not args:
usage("No attr_value")
attr_value = args.pop(0)
if len(args) > 1:
multiple_files = True
else:
multiple_files = False
for filename in args:
def onError(e):
if not os.path.exists(filename):
sys.stderr.write("No such file: %s\n" % (filename,))
else:
sys.stderr.write(str(e) + "\n")
status = 1
try:
attrs = xattr.xattr(filename)
except (IOError, OSError) as e:
onError(e)
continue
if write:
try:
attrs[attr_name] = compress(attr_value)
except (IOError, OSError) as e:
onError(e)
continue
elif delete:
try:
del attrs[attr_name]
except (IOError, OSError) as e:
onError(e)
continue
except KeyError:
onError("No such xattr: %s" % (attr_name,))
continue
else:
try:
if read:
attr_names = (attr_name,)
else:
attr_names = list(attrs.keys())
except (IOError, OSError) as e:
onError(e)
continue
if multiple_files:
file_prefix = "%s: " % (filename,)
else:
file_prefix = ""
for attr_name in attr_names:
try:
try:
attr_value = decompress(attrs[attr_name])
except zlib.error:
attr_value = attrs[attr_name]
except KeyError:
onError("%sNo such xattr: %s" % (file_prefix, attr_name))
continue
if long_format:
try:
if attr_value.find('\0') >= 0:
raise NullsInString;
print("".join((file_prefix, "%s: " % (attr_name,), attr_value)))
except (UnicodeDecodeError, NullsInString):
print("".join((file_prefix, "%s:" % (attr_name,))))
print(_dump(attr_value))
else:
if read:
print("".join((file_prefix, attr_value)))
else:
print("".join((file_prefix, attr_name)))
sys.exit(status)
if __name__ == "__main__":
main()
| brainysmurf/xattr3 | xattr/tool.py | Python | mit | 6,510 |
# -*- coding: UTF-8 -*-
from datetime import datetime
from threading import Timer
from queue import Queue
import uuid
import logging
#Fallbacl for python < 3.3
try:
from time import perf_counter
except ImportError:
from time import clock as perf_counter
log = logging.getLogger(__name__)
class _Task:
_processing_time = 10
_scheduler = None
def __init__(self, function, due=None, interval=None, repeat=0):
self._function = function
if hasattr(due, '__iter__'):
self._due_iter = iter(due)
self._due = self._due_iter.__next__()
else:
self._due_iter = None
self._due = due
self._interval = interval
self._repeat = repeat
if not (self._due or self._interval):
raise ValueError
def __call__(self, *args, job_uuid=None, **kwargs):
start = perf_counter()
result = self._function(*args, **kwargs)
self._processing_time = perf_counter() - start
if self._scheduler:
del self._scheduler._scheduled[job_uuid]
if self._interval and self._repeat != 1:
if self._repeat > 0:
self._repeat -= 1
self._scheduler.schedule(self, *args, job_uuid=job_uuid, **kwargs)
if self._due_iter:
self._due = self._due_iter.__next__()
if self._due:
self._scheduler.schedule(self, *args, job_uuid=job_uuid, **kwargs)
return result
def __get__(self, obj, type=None):
if obj is None:
return self
new_func = self._function.__get__(obj, type)
return self.__class__(new_func, self._due_iter or self._due, self._interval, self._repeat)
class Task:
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def __call__(self, function):
return _Task(function, *self.args, **self.kwargs)
class Scheduler:
_queue = Queue()
_scheduled = dict()
def __init__(self):
pass
def schedule(self, function, *args, job_uuid=None, **kwargs):
if isinstance(function, _Task):
if not job_uuid:
job_uuid = uuid.uuid4()
kwargs['job_uuid'] = job_uuid
function._scheduler = self
if function._interval:
timer = Timer(function._interval, function, args, kwargs)
else:
remainder = (function._due - datetime.now()).total_seconds()
timer = Timer(remainder - function._processing_time, function, args, kwargs)
self._scheduled[job_uuid] = timer
timer.start()
return job_uuid
else:
self.queue.put((function, args, kwargs))
def cancel(self, job_uuid=None):
if job_uuid:
self._scheduled[job_uuid].cancel()
del self._scheduled[job_uuid]
else:
for job_uuid in self._scheduled:
self._scheduled[job_uuid].cancel()
del self._scheduled[job_uuid]
| weddige/moneypenny | pywcl/scheduler/__init__.py | Python | mit | 3,088 |
# These tests are all based on the tutorial at http://killer-web-development.com/
# if registration is successful this may work but lets
# try and get user logged in first
from functional_tests import FunctionalTest, ROOT, USERS
import time
from selenium.webdriver.support.ui import WebDriverWait
class AddBasicAction (FunctionalTest):
def setUp(self):
self.url = ROOT + '/default/user/login'
get_browser=self.browser.get(self.url)
username = WebDriverWait(self, 10).until(lambda self : self.browser.find_element_by_name("username"))
username.send_keys(USERS['USER2'])
password = self.browser.find_element_by_name("password")
password.send_keys(USERS['PASSWORD2'])
submit_button = self.browser.find_element_by_css_selector("#submit_record__row input")
submit_button.click()
time.sleep(1)
self.url = ROOT + '/submit/new_question/action'
get_browser=self.browser.get(self.url)
time.sleep(2)
def test_can_view_submit_page(self):
response_code = self.get_response_code(self.url)
self.assertEqual(response_code, 200)
def test_has_right_title(self):
title = self.browser.title
self.assertEqual('Networked Decision Making', title)
def test_has_right_heading(self):
body = self.browser.find_element_by_tag_name('body')
self.assertIn('Submit Action', body.text)
def test_question(self):
time.sleep(2) #still getting blank category for some reason but not if loaded manually
#questiontext = self.browser.find_element_by_name('questiontext')
questiontext = WebDriverWait(self, 10).until(lambda self : self.browser.find_element_by_name('questiontext'))
questiontext.send_keys("Lets get this done")
submit_button = self.browser.find_element_by_css_selector("#submit_record__row input")
submit_button.click()
time.sleep(1)
welcome_message = self.browser.find_element_by_css_selector(".flash")
self.assertEqual(u'Details Submitted\n\xd7', welcome_message.text)
| NewGlobalStrategy/NetDecisionMaking | fts/test_addaction.py | Python | mit | 2,203 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-02-21 01:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clients', '0002_contact'),
]
operations = [
migrations.AlterField(
model_name='contact',
name='alternate_email',
field=models.EmailField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='contact',
name='alternate_phone',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='contact',
name='phone',
field=models.CharField(blank=True, max_length=50),
),
]
| fernandolobato/balarco | clients/migrations/0003_auto_20170221_0107.py | Python | mit | 804 |
#!/usr/bin/env python
from blob import Blob
from foreground_processor import ForegroundProcessor
import cv2
import operator
import rospy
from blob_detector.msg import Blob as BlobMsg
from blob_detector.msg import Blobs as BlobsMsg
import numpy as np
class BlobDetector(ForegroundProcessor):
def __init__(self, node_name):
super(BlobDetector, self).__init__(node_name)
self.pub = rospy.Publisher('/blobs', BlobsMsg)
def find_blobs(self, rgbd):
mask = rgbd.depth_mask_sm
contours0 = cv2.findContours( mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = [cv2.approxPolyDP(cnt, 3, True) for cnt in contours0[0]]
blobs = [Blob(contour=c, source_rgbd=rgbd) for c in contours]
blobs = [b for b in blobs if b.area > 800] # filter
[b.compute_params() for b in blobs] # cpu intensive initialization
return blobs
def process_depth_mask_image(self, rgbd):
blobs = self.find_blobs(rgbd)
#for blob in blobs:
# blob.set_world_coordinates_from_depth(rgbd.depth_raw)
self.process_blobs(blobs, rgbd)
def publish_blobs(self, blobs):
blobs_msg = BlobsMsg()
for blob in blobs:
blob_msg = blob.to_msg()
blobs_msg.blobs.append(blob_msg)
self.pub.publish(blobs_msg)
def show_blobs(self, blobs, rgbd):
for blob in blobs:
blob.draw(rgbd.depth_color_sm)
self.show_depth_color(rgbd)
def process_blobs(self, blobs, rgbd):
self.publish_blobs(blobs)
self.show_blobs(self, blobs, rgbd)
if __name__ == '__main__':
bd = BlobDetector('fg')
bd.run()
| light-swarm/blob_detector | scripts/blob_detector_.py | Python | mit | 1,685 |
import ctypes
import numpy as np
import os
libpath = os.path.dirname(os.path.realpath(__file__))
lib = ctypes.cdll.LoadLibrary(libpath+'\libscatt_bg.so')
scatt_bg_c = lib.scatt_bg
scatt_bg_c.restype = ctypes.c_void_p # reset return types. default is c_int
scatt_bg_c.argtypes = [ctypes.c_double, ctypes.POINTER(ctypes.c_double), ctypes.c_int, ctypes.c_int]
subtend_c = lib.subtend
subtend_c.restype = ctypes.c_double # reset return types. default is c_int
subtend_c.argtypes = [ctypes.c_double, ctypes.c_double, ctypes.c_double, ctypes.c_double]
def scatt_bg(kev, Z_max = 98, theta_max = 90):
# kev = np.asarray(kev)
out = np.zeros(Z_max*theta_max)
# Z_max = np.asarray(Z_max)
# theta_max = np.asarray(theta_max)
scatt_bg_c(ctypes.c_double(kev),out.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),ctypes.c_int(Z_max),ctypes.c_int(theta_max))
return out
def subtend(theta0,theta1,beta0,beta1):
return subtend_c(c_double(np.radians(theta0)),c_double(np.radians(theta1)),c_double(np.radians(beta0)),c_double(np.radians(beta1)))
| gromitsun/sim-xrf-py | others/scatt_bg/scatt_bg_c.py | Python | mit | 1,042 |
"""Tests for forms in eCommerce app."""
from django.test import TestCase
from ecommerce.forms import OrderForm
required_fields = {
'phone': '123456789',
'email': '[email protected]',
}
invalid_form_email = {
'email': 'clearly!not_@_email',
'phone': '123456789'
}
no_phone = {'email': '[email protected]'}
class TestForm(TestCase):
"""Test suite for forms in eCommerce app."""
def test_empty_form(self):
"""Empty form shouldn't be valid."""
form = OrderForm()
self.assertFalse(form.is_valid())
def test_filled_form_without_required_field(self):
"""Form is still not valid, if there are some required fields left unfilled."""
form = OrderForm(data=no_phone)
self.assertFalse(form.is_valid())
def test_valid_form(self):
"""Form is valid, if there all required fields are filled."""
form = OrderForm(data=required_fields)
self.assertTrue(form.is_valid())
def test_from_validation_on_email_field(self):
"""Form should validate user's email if it is filled."""
form = OrderForm(data=invalid_form_email)
self.assertFalse(form.is_valid())
| fidals/refarm-site | tests/ecommerce/tests_forms.py | Python | mit | 1,163 |
## Automatically adapted for numpy.oldnumeric Apr 14, 2008 by -c
from builtins import range
def writeMeshMatlabFormat(mesh,meshFileBase):
"""
build array data structures for matlab finite element mesh representation
and write to a file to view and play with in matlatb
in matlab can then print mesh with
pdemesh(p,e,t)
where
p is the vertex or point matrix
e is the edge matrix, and
t is the element matrix
points matrix is [2 x num vertices]
format :
row 1 = x coord,
row 2 = y coord for nodes in mesh
edge matrix is [7 x num edges]
format:
row 1 = start vertex number
row 2 = end vertex number
row 3 = start value in edge parameterization, should be 0
row 4 = end value in edge parameterization, should be 1
row 5 = global edge id, base 1
row 6 = subdomain on left? always 1 for now
row 7 = subdomain on right? always 0 for now
element matrix is [4 x num elements]
row 1 = vertex 1 global number
row 2 = vertex 2 global number
row 3 = vertex 3 global number
row 4 = triangle subdomain number
where 1,2,3 is a local counter clockwise numbering of vertices in
triangle
"""
import numpy as numpy
matlabBase = 1
p = numpy.zeros((2,mesh['nNodes_global']),numpy.float_)
e = numpy.zeros((7,mesh['nElementBoundaries_global']),numpy.float_)
t = numpy.zeros((4,mesh['nElements_global']),numpy.float_)
#load p,e,t and write file
mfile = open(meshFileBase+'.m','w')
mfile.write('p = [ ... \n')
for nN in range(mesh['nNodes_global']):
p[0,nN]=mesh['nodeArray'][nN,0]
p[1,nN]=mesh['nodeArray'][nN,1]
mfile.write('%g %g \n' % tuple(p[:,nN]))
mfile.write(']; \n')
mfile.write("p = p\';\n") #need transpose for matlab
mfile.write('e = [ ... \n')
for ebN in range(mesh['nElementBoundaries_global']):
e[0,ebN]=mesh['elementBoundaryNodesArray'][ebN,0] + matlabBase #global node number of start node base 1
e[1,ebN]=mesh['elementBoundaryNodesArray'][ebN,1] + matlabBase #global node number of end node base 1
e[2,ebN]=0.0 #edge param. is 0 to 1
e[3,ebN]=1.0
e[4,ebN]=ebN + matlabBase #global edge number base 1
e[5,ebN]=0 #subdomain to left
e[6,ebN]=1 #subdomain to right
mfile.write('%g %g %g %g %g %g %g \n' % tuple(e[:,ebN]))
mfile.write(']; \n')
mfile.write("e = e\';\n") #need transpose for matlab
#write triangles last
mfile.write('t = [ ... \n')
for eN in range(mesh['nElements_global']):
t[0,eN]=mesh['elementNodesArray'][eN,0]+matlabBase #global node number for vertex 0
t[1,eN]=mesh['elementNodesArray'][eN,1]+matlabBase #global node number for vertex 0
t[2,eN]=mesh['elementNodesArray'][eN,2]+matlabBase #global node number for vertex 0
t[3,eN]=1 #subdomain id
mfile.write('%g %g %g %g \n' % tuple(t[:,eN]))
mfile.write(']; \n');
mfile.write("t = t\';\n") #need transpose for matlab
mfile.close()
return p,e,t
########################################################################
if __name__ == '__main__':
import os,shelve
import ppmatlab,numpy.oldnumeric as numpy
os.listdir('./results')
filename = './results/re_forsyth2_ss_2d_pre_forsyth2_ss_2d_c0p1_n_mesh_results.dat'
res = shelve.open(filename)
mesh = res['mesh']
mmfile = 'forsyth2MeshMatlab'
p,e,t = ppmatlab.writeMeshMatlabFormat(mesh,mmfile)
| erdc/proteus | scripts/ppmatlab.py | Python | mit | 3,523 |
def test_root(client):
response = client.get('/')
assert response.status_code == 200
assert response.data.decode('utf-8') == 'Hey enlil'
| EliRibble/enlil | tests/api/test_root.py | Python | mit | 150 |
import os.path
from subprocess import call
class InstallerTools(object):
@staticmethod
def update_environment(file_path,environment_path):
update_file = open(file_path, 'r')
original_lines = update_file.readlines()
original_lines[0] = environment_path+'\n'
update_file.close()
update_file = open(file_path, 'w')
for lines in original_lines:
update_file.write(lines)
update_file.close()
@staticmethod
def fix_migrate(base_directory):
print "\nFixing the migrate bug \n"
buggy_path = os.path.join(base_directory,
'env/lib/python2.7/site-packages/migrate/versioning/schema.py')
buggy_file = open(buggy_path,'r')
original_lines = buggy_file.readlines()
original_lines[9] = "from sqlalchemy import exc as sa_exceptions\n"
buggy_file.close()
update_file = open(buggy_path,'w')
for lines in original_lines:
update_file.write(lines)
update_file.close()
@staticmethod
def refresh_environment(framework_config):
InstallerTools.update_environment(framework_config.yard_path,framework_config.environment_path)
InstallerTools.update_environment(framework_config.blow_path,framework_config.environment_path)
InstallerTools.update_environment(framework_config.try_path,framework_config.environment_path)
@staticmethod
def change_permissions(framework_config):
call(['chmod', 'a+x', framework_config.yard_path])
call(['chmod', 'a+x', framework_config.blow_path])
call(['chmod', 'a+x', framework_config.try_path])
@staticmethod
def create_db_directory(base_directory):
if not os.path.exists(os.path.join(base_directory, 'storage/')):
os.makedirs(os.path.join(base_directory, 'storage/'))
@staticmethod
def create_virtual_environment(framework_config):
call(['python', framework_config.v_path, framework_config.environment_name])
InstallerTools.refresh_environment(framework_config)
InstallerTools.change_permissions(framework_config)
| femmerling/backyard | builder/installer_tools.py | Python | mit | 1,906 |
# coding: utf-8
import http.server
import socketserver
PORT = 8000
Handler = http.server.SimpleHTTPRequestHandler
httpd = socketserver.TCPServer(("0.0.0.0", PORT), Handler)
print("serving at port", PORT)
httpd.serve_forever() | telminov/raspberry-car | frontend/ui.py | Python | mit | 229 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'toolsforbiology.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
)
| washimimizuku/tools-for-biology | toolsforbiology/urls.py | Python | mit | 306 |
from django.conf import settings
from django.db import models
from .ticket import Ticket
class Attachment(models.Model):
"""Ticket attachment model."""
ticket = models.ForeignKey(
Ticket, blank=False, related_name='attachments', db_index=True,
on_delete=models.DO_NOTHING)
user = models.ForeignKey(
settings.AUTH_USER_MODEL, blank=False, db_index=True,
on_delete=models.DO_NOTHING)
upload = models.FileField(upload_to='attachments/%Y/%m/%d', max_length=255)
created_at = models.DateTimeField(auto_now_add=True)
@classmethod
def filter_by_user(cls, user, queryset=None):
"""Returns any user accessible attachments.
Ones he has access to through the tickets.
"""
if queryset is None:
queryset = cls.objects
return queryset.filter(ticket__in=Ticket.filter_by_user(user))
| occrp/id-backend | api_v3/models/attachment.py | Python | mit | 887 |
import unittest
from juggling.notation import siteswap
class SiteswapUtilsTests(unittest.TestCase):
def test_siteswap_char_to_int(self):
self.assertEqual(siteswap.siteswap_char_to_int('0'), 0)
self.assertEqual(siteswap.siteswap_char_to_int('1'), 1)
self.assertEqual(siteswap.siteswap_char_to_int('a'), 10)
self.assertEqual(siteswap.siteswap_char_to_int('f'), 15)
self.assertEqual(siteswap.siteswap_char_to_int('z'), 35)
def test_invalid_char(self):
self.assertRaises(ValueError, siteswap.siteswap_char_to_int, [3])
self.assertRaises(ValueError, siteswap.siteswap_char_to_int, 10)
self.assertRaises(ValueError, siteswap.siteswap_char_to_int, '#')
self.assertRaises(ValueError, siteswap.siteswap_char_to_int, 'multichar')
def test_siteswap_int_to_char(self):
self.assertEqual(siteswap.siteswap_int_to_char(9), '9')
self.assertEqual(siteswap.siteswap_int_to_char(0), '0')
self.assertEqual(siteswap.siteswap_int_to_char(10), 'a')
self.assertEqual(siteswap.siteswap_int_to_char(15), 'f')
self.assertEqual(siteswap.siteswap_int_to_char(35), 'z')
def test_invalid_int(self):
self.assertRaises(ValueError, siteswap.siteswap_int_to_char, ['3'])
self.assertRaises(ValueError, siteswap.siteswap_int_to_char, 'a')
self.assertRaises(ValueError, siteswap.siteswap_int_to_char, 36)
self.assertRaises(ValueError, siteswap.siteswap_int_to_char, -1)
class SiteSwapSyntaxValidationTests(unittest.TestCase):
def test_valid_syntax(self):
solo_patterns = [
'441',
'(6x,4)(4,6x)',
'(6x,4)*',
'[64]020',
'[33](3,3)123',
'(4,2)(2x,[44x])',
]
for pattern in solo_patterns:
self.assertTrue(siteswap.is_valid_siteswap_syntax(pattern))
passing_patterns = [
('<4p|3><2|3p>', 2),
('<2|3p><2p|3><[3p22]|3p><3|3>', 2),
('<(2p3,4x)|(2xp3,4p1)|(2xp2,4xp2)>', 3)
]
for pattern, num_jugglers in passing_patterns:
self.assertTrue(siteswap.is_valid_siteswap_syntax(pattern, num_jugglers))
def test_return_match(self):
import re
sre_match_object = type(re.match('', ''))
self.assertTrue(siteswap.is_valid_siteswap_syntax('441', return_match=False))
_, match = siteswap.is_valid_siteswap_syntax('441', return_match=True)
self.assertIsInstance(match, sre_match_object)
_, match = siteswap.is_valid_siteswap_syntax('###', return_match=True)
self.assertIsNone(match)
def test_invalid_syntax(self):
solo_patterns = [
'#!j',
'((3232,3)',
'(3232,3))',
'[(3232,3)])',
]
for pattern in solo_patterns:
self.assertFalse(siteswap.is_valid_siteswap_syntax(pattern))
| PacketPerception/pyjuggling | tests/siteswap_tests.py | Python | mit | 2,919 |
#!/usr/bin/env python
"""Run pytest with coverage and generate an html report."""
from sys import argv
from os import system as run
# To run a specific file with debug logging prints:
# py -3 -m pytest test_can.py --log-cli-format="%(asctime)s.%(msecs)d %(levelname)s: %(message)s (%(filename)s:%(lineno)d)" --log-cli-level=debug
def main(): # noqa
run_str = 'python -m coverage run --include={} --omit=./* -m pytest {} {}'
arg = ''
# All source files included in coverage
includes = '../*'
if len(argv) >= 2:
arg = argv[1]
if ':' in argv[1]:
includes = argv[1].split('::')[0]
other_args = ' '.join(argv[2:])
run(run_str.format(includes, arg, other_args))
# Generate the html coverage report and ignore errors
run('python -m coverage html -i')
if __name__ == '__main__':
main()
| cmcerove/pyvxl | pyvxl/tests/run.py | Python | mit | 884 |
from copy import deepcopy
class IModel():
def __init__(self):
self.list = []
def __getitem__(self, index):
'''
Getter for the [] operator
'''
if index >= len(self.list):
raise IndexError("Index out of range.")
return self.list[index]
def __setitem__(self, index, value):
'''
Setter for the [] operator
'''
self.list[index] = value
def extend(self, val):
self.list.extend([0] * val)
def append(self, val):
self.list.append(val)
def __iter__(self):
i = 0
while i < len(self.list):
yield self.list[i]
i += 1
def __len__(self):
return len(self.list)
def __delitem__(self, index):
del self.list[index]
return
def gnomeSort(list):
i = 0
n = len(list)
cpy = deepcopy(list)
while i < n:
if i and cpy[i] < cpy[i-1]:
cpy[i], cpy[i-1] = cpy[i-1], cpy[i]
i -= 1
else:
i += 1
return cpy
def _filter(list, f):
result = []
for e in list:
if f(e):
result.append(e)
return result
if __name__ == "__main__":
a = IModel()
a.extend(4)
print(a.list)
a[0] = 15
a[1] = 10
a[2] = 5
a[3] = 3
for i in a:
print(i)
print("-----------")
for i in gnomeSort(a):
print(i)
print("-----------")
for i in _filter(a, lambda x: x % 5):
print(i) | Zephyrrus/ubb | YEAR 1/SEM1/FP/LAB/l6-l9/Domain/IModel.py | Python | mit | 1,500 |
#
# Copyright (C) 2011 - 2015 Satoru SATOH <ssato @ redhat.com>
# License: MIT
#
# Some XML modules may be missing and Base.{load,dumps}_impl are not overriden:
# pylint: disable=import-error
"""XML files parser backend, should be available always.
.. versionchanged:: 0.1.0
Added XML dump support.
- Format to support: XML, e.g. http://www.w3.org/TR/xml11/
- Requirements: one of the followings
- lxml2.etree if available
- xml.etree.ElementTree in standard lib if python >= 2.5
- elementtree.ElementTree (otherwise)
- Limitations:
- '<prefix>attrs', '<prefix>text' and '<prefix>children' are used as special
parameter to keep XML structure of original data. You have to cusomize
<prefix> (default: '@') if any config parameters conflict with some of
them.
- Some data or structures of original XML file may be lost if make it backed
to XML file; XML file - (anyconfig.load) -> config - (anyconfig.dump) ->
XML file
- XML specific features (namespace, etc.) may not be processed correctly.
- Special Options: None supported
"""
from __future__ import absolute_import
from io import BytesIO
import sys
import anyconfig.backend.base
import anyconfig.compat
try:
# First, try lxml which is compatible with elementtree and looks faster a
# lot. See also: http://getpython3.com/diveintopython3/xml.html
from lxml2 import etree as ET
except ImportError:
try:
import xml.etree.ElementTree as ET
except ImportError:
import elementtree.ElementTree as ET
_PARAM_PREFIX = "@"
# It seems that ET.ElementTree.write() cannot process a parameter
# 'xml_declaration' in older python < 2.7:
_IS_OLDER_PYTHON = sys.version_info[0] < 3 and sys.version_info[1] < 7
def etree_to_container(root, cls, pprefix=_PARAM_PREFIX):
"""
Convert XML ElementTree to a collection of container objects.
:param root: etree root object or None
:param cls: Container class
:param pprefix: Special parameter name prefix
"""
(attrs, text, children) = [pprefix + x for x in ("attrs", "text",
"children")]
tree = cls()
if root is None:
return tree
tree[root.tag] = cls()
if root.attrib:
tree[root.tag][attrs] = cls(anyconfig.compat.iteritems(root.attrib))
if root.text and root.text.strip():
tree[root.tag][text] = root.text.strip()
if len(root): # It has children.
# Note: Configuration item cannot have both attributes and values
# (list) at the same time in current implementation:
tree[root.tag][children] = [etree_to_container(c, cls, pprefix)
for c in root]
return tree
def container_to_etree(obj, cls, parent=None, pprefix=_PARAM_PREFIX):
"""
Convert a container object to XML ElementTree.
:param obj: Container instance to convert to
:param cls: Container class
:param parent: XML ElementTree parent node object or None
:param pprefix: Special parameter name prefix
"""
if not isinstance(obj, (cls, dict)):
return # All attributes and text should be set already.
(attrs, text, children) = [pprefix + x for x in ("attrs", "text",
"children")]
for key, val in anyconfig.compat.iteritems(obj):
if key == attrs:
for attr, aval in anyconfig.compat.iteritems(val):
parent.set(attr, aval)
elif key == text:
parent.text = val
elif key == children:
for child in val: # child should be a dict-like object.
for ckey, cval in anyconfig.compat.iteritems(child):
celem = ET.Element(ckey)
container_to_etree(cval, cls, celem, pprefix)
parent.append(celem)
else:
elem = ET.Element(key)
container_to_etree(val, cls, elem, pprefix)
return ET.ElementTree(elem)
def etree_write(tree, stream):
"""
Write XML ElementTree `root` content into `stream`.
:param tree: XML ElementTree object
:param stream: File or file-like object can write to
"""
if _IS_OLDER_PYTHON:
tree.write(stream, encoding='UTF-8')
else:
tree.write(stream, encoding='UTF-8', xml_declaration=True)
class Parser(anyconfig.backend.base.D2Parser):
"""
Parser for XML files.
"""
_type = "xml"
_extensions = ["xml"]
_open_flags = ('rb', 'wb')
def load_from_string(self, content, **kwargs):
"""
Load config from XML snippet (a string `content`).
:param content: XML snippet (a string)
:param kwargs: optional keyword parameters passed to
:return: self.container object holding config parameters
"""
root = ET.ElementTree(ET.fromstring(content)).getroot()
return etree_to_container(root, self.container)
def load_from_path(self, filepath, **kwargs):
"""
:param filepath: XML file path
:param kwargs: optional keyword parameters to be sanitized :: dict
:return: self.container object holding config parameters
"""
root = ET.parse(filepath).getroot()
return etree_to_container(root, self.container)
def load_from_stream(self, stream, **kwargs):
"""
:param stream: XML file or file-like object
:param kwargs: optional keyword parameters to be sanitized :: dict
:return: self.container object holding config parameters
"""
return self.load_from_path(stream, **kwargs)
def dump_to_string(self, cnf, **kwargs):
"""
:param cnf: Configuration data to dump :: self.container
:param kwargs: optional keyword parameters
:return: string represents the configuration
"""
tree = container_to_etree(cnf, self.container)
buf = BytesIO()
etree_write(tree, buf)
return buf.getvalue()
def dump_to_stream(self, cnf, stream, **kwargs):
"""
:param cnf: Configuration data to dump :: self.container
:param stream: Config file or file like object write to
:param kwargs: optional keyword parameters
"""
tree = container_to_etree(cnf, self.container)
etree_write(tree, stream)
# vim:sw=4:ts=4:et:
| pmquang/python-anyconfig | anyconfig/backend/xml.py | Python | mit | 6,370 |
"""
Hannah Aizenman
10/13/2013
Generates a random subset of size 10^P for p in [1,MAX_P) from [0, 10^8)
"""
import random
MAX_P = 8
max_value = 10**MAX_P
large_set = range(max_value)
for p in xrange(1,MAX_P):
print "list of size: 10^{0}".format(p)
f = open("p{0}.txt".format(p), 'w')
sample = random.sample(large_set, 10**p)
f.write("\n".join(map(lambda x: str(x), sample)))
f.close()
| story645/hpcc | set_partition/sets/gensets.py | Python | mit | 414 |
#!/usr/bin/env python
#
# $Id: get_code_stats.py 9318 2011-06-10 02:37:10Z nathan_george $
#
# Proprietary and confidential.
# Copyright $Date:: 2011#$ Perfect Search Corporation.
# All rights reserved.
#
import sys
import os
import re
import optparse
import math
buildscriptDir = os.path.dirname(__file__)
buildscriptDir = os.path.abspath(os.path.join(buildscriptDir, os.path.pardir))
sys.path.append(buildscriptDir)
import sandbox
import codescan
import xmail
import metadata
from ioutil import *
EXT_PAT = metadata.INTERESTING_EXT_PAT
FROM = 'Code Stat Scanner <[email protected]>'
parser = optparse.OptionParser('Usage: %prog [options] [folder]\n\nCompiles stats about a code base; optionally emails report.')
xmail.addMailOptions(parser)
def getRelevantPaths(p):
relevant = []
if not p.endswith('/'):
relevant.append(p)
while p:
i = p.rfind('/')
if i == -1:
relevant.append('')
break
else:
p = p[0:i+1]
relevant.append(p)
p = p[0:-1]
return relevant
def getValuesKeyName(key):
return '[' + key + ']'
def isValuesKeyName(key):
return key[0] == '['
class StatsHolder:
def __init__(self, rootPath):
rootPath = norm_folder(rootPath)
self.rootPath = rootPath
self.statsByPath = {}
self.statsByExtension = {}
def getSandboxName(self):
i = self.rootPath.find('/sandboxes/')
if i != -1:
x = self.rootPath[i + 11:]
i = x.find('/code')
if i > -1:
x = x[0:i]
i = x.rfind('/')
if i > -1:
x = x[0:i]
return x
else:
return self.rootPath
def getRelativePath(self, path):
endsWithSlash = path.endswith('/')
path = os.path.abspath(path).replace('\\', '/')
# abspath() removes trailing slash; undo
if endsWithSlash and path[-1] != '/':
path = path + '/'
return path[len(self.rootPath):]
def addStat(self, path, statName, number):
shouldAggregate = not path.endswith('/')
if shouldAggregate:
k = getValuesKeyName(statName)
dict = self.statsByExtension
ignored, ext = os.path.splitext(path)
#print('ext = %s' % ext)
#sys.exit(0)
if not ext in dict:
dict[ext] = {}
dict = dict[ext]
if not statName in dict:
dict[statName] = number
dict[k] = [number]
else:
dict[statName] = dict[statName] + number
dict[k].append(number)
relativePath = self.getRelativePath(path)
sbp = self.statsByPath
for p in getRelevantPaths(relativePath):
if not p in sbp:
sbp[p] = {}
dict = sbp[p]
if not statName in dict:
dict[statName] = number
if shouldAggregate:
#print('aggregating %s for %s', (k, p))
dict[k] = [number]
else:
dict[statName] = dict[statName] + number
if shouldAggregate:
dict[k].append(number)
_CPP_TESTNAME_PAT = re.compile(r'^\s*(SIMPLE_TEST\s*\(\s*(.*?)\s*\)|class\s+([a-zA-Z_0-9]+)\s*:\s*(public|protected|private)\s+[a-zA-Z_0-9]+Test)', re.MULTILINE | re.DOTALL)
_JAVA_TESTNAME_PAT = re.compile(r'^\s*public\s+void\s+([a-zA-Z_0-9]+)\s*\(', re.MULTILINE | re.DOTALL)
_PY_TESTNAME_PAT = re.compile(r'^\s*def test([a-zA-Z_0-9]+)\s*\(\s*self\s*\)\s*:', re.MULTILINE | re.DOTALL)
_CPP_CLASS_PAT = re.compile(r'^\s*(template\s*<.*?>\s*)?(class|struct|union)\s+([a-zA-Z_0-9]+)', re.MULTILINE | re.DOTALL)
_JAVA_CLASS_PAT = re.compile(r'^\s*((abstract|public|private|protected|static|final)\s+)*(class|interface)\s+([a-zA-Z_0-9]+)', re.MULTILINE | re.DOTALL)
_PY_CLASS_PAT = re.compile(r'^\s*class\s+([a-zA-Z_0-9]+).*?:', re.MULTILINE | re.DOTALL)
_TEST_FILE_PAT = re.compile(r'/test/', re.IGNORECASE)
_CLASS_PATS = [_CPP_CLASS_PAT, _JAVA_CLASS_PAT, _PY_CLASS_PAT]
_TESTNAME_PATS = [_CPP_TESTNAME_PAT, _JAVA_TESTNAME_PAT, _PY_TESTNAME_PAT]
def getFileTypeIndex(path):
path = path.lower()
if path.endswith('.cpp') or path.endswith('.h'):
return 0
elif path.endswith('.java'):
return 1
elif path.endswith('.py'):
return 2
return -1
def getClassPatForPath(path):
i = getFileTypeIndex(path)
if i != -1:
return _CLASS_PATS[i]
def getTestnamePatForPath(path):
i = getFileTypeIndex(path)
if i != -1:
return _TESTNAME_PATS[i]
def analyzeFile(fpath, stats):
fpath = os.path.abspath(fpath)
rel = stats.getRelativePath(fpath)
#print('analyzing %s' % rel)
txt = read_file(fpath)
byteCount = len(txt)
stats.addStat(fpath, 'byte count, impl + test', byteCount)
lineCount = codescan.getLineNumForOffset(txt, byteCount)
stats.addStat(fpath, 'line count, impl + test', lineCount)
isTest = bool(_TEST_FILE_PAT.search(fpath))
codeType = 'impl'
if isTest:
codeType = 'test'
stats.addStat(fpath, 'byte count, ' + codeType, byteCount)
stats.addStat(fpath, 'line count, ' + codeType, lineCount)
# See if we know how to do any further analysis on this file.
pat = getClassPatForPath(fpath)
if pat:
if isTest:
pat = getTestnamePatForPath(fpath)
if pat:
stats.addStat(fpath, 'unit test count', len(pat.findall(txt)))
else:
stats.addStat(fpath, 'class count', len(pat.findall(txt)))
def statPathIsFile(p):
i = p.rfind('.')
if i > -1:
return p[i+1:] in ['cpp','h','java','py']
return False
def statPathIsComponent(p):
return p == '' or (p.endswith('/') and p.find('/') == len(p) - 1)
_FLOAT_TYPE = type(0.1)
def getReportLine(key, number, showKB = False, formatSpecifier='%02f'):
numtxt = number
ntype = type(number)
if ntype == _FLOAT_TYPE:
numtxt = formatSpecifier % number
if numtxt.endswith('00'):
numtxt = numtxt[0:-3]
else:
numtxt = str(number)
line = '%s = %s' % (key, numtxt)
if showKB:
line += ' (%0.0f KB)' % (number / 1024.0)
return line
def getAggregateStats(dict, key):
values = dict.get(getValuesKeyName(key))
avg = mean(values)
stdev = stddev(values)
return avg, stdev
def describeTestRatio(ratio, multiplier = 1.0):
if ratio < 0.085 * multiplier:
lbl = 'POOR COVERAGE'
elif ratio < 0.20 * multiplier:
lbl = 'fair coverage'
elif ratio < 0.5 * multiplier:
lbl = 'good coverage'
else:
lbl = 'excellent coverage'
return '%0.2f (%s)' % (ratio, lbl)
def generateReport(stats):
#print(stats.statsByPath)
report = ''
components = [p for p in stats.statsByPath.keys() if statPathIsComponent(p)]
files = [p for p in stats.statsByPath.keys() if statPathIsFile(p)]
components.sort()
files.sort()
uberDict = stats.statsByPath['']
avg, stdev = getAggregateStats(uberDict, 'byte count, impl')
tooBigs = {'': max(avg + 2.5 * stdev, 20000)}
avg, stdev = getAggregateStats(uberDict, 'line count, impl')
tooLongs = {'': max(avg + 2.5 * stdev, 1000)}
for ext in stats.statsByExtension.keys():
dict = stats.statsByExtension[ext]
avg, stdev = getAggregateStats(dict, 'byte count, impl')
tooBigs[ext] = avg + 2.5 * stdev
avg, stdev = getAggregateStats(dict, 'line count, impl')
tooLongs[ext] = max(avg + 2.5 * stdev, 1000)
for path in components:
desc = path
if desc == '':
desc = 'entire folder tree'
report += '\nStats for %s' % desc
dict = stats.statsByPath[path]
keys = [k for k in dict.keys() if not isValuesKeyName(k)]
keys.sort()
for key in keys:
showKB = key.startswith('byte')
report += '\n ' + getReportLine(key, dict[key], showKB)
if showKB or key.startswith('line'):
values = dict[getValuesKeyName(key)]
avg = mean(values)
report += '; ' + getReportLine('mean', avg, showKB, formatSpecifier='%0.0f')
report += '; ' + getReportLine('std dev', stddev(values), False, formatSpecifier='%0.1f')
classCount = dict.get('class count', 0)
unitTestCount = dict.get('unit test count', 0)
if unitTestCount:
implLineCount = dict.get('line count, impl', 0)
testLineCount = dict.get('line count, test', 0)
if implLineCount:
ratio = describeTestRatio(testLineCount / float(implLineCount))
report += '\n ' + getReportLine('test lines per impl line', ratio)
implByteCount = dict.get('byte count, impl', 0)
testByteCount = dict.get('byte count, test', 0)
if implByteCount:
ratio = describeTestRatio(testByteCount / float(implByteCount))
report += '\n ' + getReportLine('test bytes per impl byte', ratio)
if classCount:
ratio = describeTestRatio(float(unitTestCount) / classCount, 2.5)
else:
ratio = '(undefined; no classes)'
else:
ratio = 'NO UNIT TESTS!'
report += '\n ' + getReportLine('tests per class', ratio)
if path:
myFiles = [f for f in files if f.startswith(path)]
#testFiles = [f for f in myFiles if _TEST_FILE_PAT.search(f)]
#implFiles = [f for f in myFiles if not _TEST_FILE_PAT.search(f)]
tooComplex = []
for implF in myFiles:
ignored, ext = os.path.splitext(implF)
size = stats.statsByPath[implF].get('byte count, impl')
length = stats.statsByPath[implF].get('line count, impl')
if size > tooBigs[''] or size > tooBigs[ext] or length > tooLongs[''] or length > tooLongs[ext]:
tooComplex.append((implF, size, length))
if tooComplex:
# Java doesn't support partial classes, so splitting classes into multiple
# files isn't always practical. In C++ and python, however, there are good
# ways to split into smaller files.
if tooComplex[0][0].endswith('.java'):
comment = 'refactor suggested'
else:
comment = 'REFACTOR NEEDED'
report += '\n unusually complex files (%s):' % comment
for tc in tooComplex:
report += '\n %s (%0.0f KB, %d lines)' % (tc[0], tc[1] / 1024.0, tc[2])
report += '\n'
return report
def sum(numbers):
n = 0
for x in numbers:
n += x
return n
def mean(numbers):
return sum(numbers) / float(len(numbers))
def variance(numbers):
avg = mean(numbers)
diffsFromMean = [n - avg for n in numbers]
squaredDfm = [n * n for n in diffsFromMean]
variance = sum(squaredDfm) / len(numbers)
return variance
def stddev(numbers):
# This is a *population* stddev, not a sample stddev.
# The difference is that we assume we have all possible
# values, not just a representative sample.
return math.sqrt(variance(numbers))
class StatsRecurser:
def __init__(self, stats):
self.stats = stats
def select(self, folder, dirs):
self.stats.addStat(folder, "scanned subdir count", len(dirs))
return dirs
class StatsVisitor:
def __init__(self, stats):
self.stats = stats
def visit(self, folder, item, relativePath):
analyzeFile(folder + item, self.stats)
self.stats.addStat(folder, "scanned file count", 1)
def analyze(path, prebuilt, options):
if not os.path.isdir(path):
sys.stderr.write('%s is not a valid folder.\n' % path)
return 1
path = norm_folder(path)
stats = StatsHolder(path)
print('\nCompiling stats for %s...' % metadata.get_friendly_name_for_path(path))
visitor = StatsVisitor(stats)
recurser = StatsRecurser(stats)
visitedFiles, visitedFolders = metadata.visit(path, visitor, recurser, excludePrograms=True)#, debug=True)
report = generateReport(stats)
print(report)
if xmail.hasDest(options):
xmail.sendmail(report, subject='code stats for %s' % metadata.get_friendly_name_for_path(path),
sender='Code Stat Scanner <[email protected]>', options=options)
if __name__ == '__main__':
options, args = parser.parse_args()
prebuilt = []
if args:
folder = args[0]
else:
folder = sandbox.current.get_code_root()
exitCode = analyze(folder, prebuilt, options)
sys.exit(exitCode)
| perfectsearch/sandman | code/buildscripts/codescan/get_code_stats.py | Python | mit | 12,831 |
#==============================================================================
# Principles of the new `climlab` API design:
#
# * `climlab.Process` object has several iterable dictionaries of named,
# gridded variables:
#
# * `process.state`
#
# * state variables, usually time-dependent
#
# - `process.input`
# - boundary conditions and other gridded quantities independent of the
# `process`
# - often set by a parent `process`
# - `process.param` (which are basically just scalar `input`)
# - `process.tendencies`
# - iterable `dict` of time-tendencies (d/dt) for each state variable
# - `process.diagnostics`
# - any quantity derived from current state
# - The `process` is fully described by contents of `state`, `input` and `param`
# dictionaries. `tendencies` and `diagnostics` are always computable from current
# state.
# - `climlab` will remain (as much as possible) agnostic about the data formats
# - Variables within the dictionaries will behave as `numpy.ndarray` objects
# - Grid information and other domain details accessible as attributes
# of each variable
# - e.g. Tatm.lat
# - Shortcuts like `process.lat` will work where these are unambiguous
# - Many variables will be accessible as process attributes `process.name`
# - this restricts to unique field names in the above dictionaries
# - There may be other dictionaries that do have name conflicts
# - e.g. dictionary of tendencies, with same keys as `process.state`
# - These will *not* be accessible as `process.name`
# - but *will* be accessible as `process.dict_name.name`
# (as well as regular dict interface)
# - There will be a dictionary of named subprocesses `process.subprocess`
# - Each item in subprocess dict will itself be a `climlab.Process` object
# - For convenience with interactive work, each subprocess should be accessible
# as `process.subprocess.name` as well as `process.subprocess['name']`
# - `process.compute()` is a method that computes tendencies (d/dt)
# - returns a dictionary of tendencies for all state variables
# - keys for this dictionary are same as keys of state dictionary
# - tendency dictionary is the total tendency including all subprocesses
# - method only computes d/dt, does not apply changes
# - thus method is relatively independent of numerical scheme
# - may need to make exception for implicit scheme?
# - method *will* update variables in `process.diagnostic`
# - will also *gather all diagnostics* from `subprocesses`
# - `process.step_forward()` updates the state variables
# - calls `process.compute()` to get current tendencies
# - implements a particular time-stepping scheme
# - user interface is agnostic about numerical scheme
# - `process.integrate_years()` etc will automate time-stepping
# - also computation of time-average diagnostics.
# - Every `subprocess` should work independently of its parent `process` given
# appropriate `input`.
# - investigating an individual `process` (possibly with its own
# `subprocesses`) isolated from its parent needs to be as simple as doing:
# - `newproc = climlab.process_like(procname.subprocess['subprocname'])`
#
# - `newproc.compute()`
# - anything in the `input` dictionary of `subprocname` will remain fixed
#==============================================================================
from __future__ import division, print_function
from builtins import object
import time, copy
import numpy as np
from climlab.domain.field import Field
from climlab.domain.domain import _Domain, zonal_mean_surface
from climlab.utils import walk
from attrdict import AttrDict
from climlab.domain.xarray import state_to_xarray
def _make_dict(arg, argtype):
if arg is None:
return {}
elif isinstance(arg, dict):
return arg
elif isinstance(arg, argtype):
return {'default': arg}
else:
raise ValueError('Problem with input type')
class Process(object):
"""A generic parent class for all climlab process objects.
Every process object has a set of state variables on a spatial grid.
For more general information about `Processes` and their role in climlab,
see :ref:`process_architecture` section climlab-architecture.
**Initialization parameters** \n
An instance of ``Process`` is initialized with the following
arguments *(for detailed information see Object attributes below)*:
:param Field state: spatial state variable for the process.
Set to ``None`` if not specified.
:param domains: domain(s) for the process
:type domains: :class:`~climlab.domain.domain._Domain` or dict of
:class:`~climlab.domain.domain._Domain`
:param subprocess: subprocess(es) of the process
:type subprocess: :class:`~climlab.process.process.Process` or dict of
:class:`~climlab.process.process.Process`
:param array lat: latitudinal points (optional)
:param lev: altitudinal points (optional)
:param int num_lat: number of latitudional points (optional)
:param int num_levels:
number of altitudinal points (optional)
:param dict input: collection of input quantities
:param bool verbose: Flag to control text output during instantiation
of the Process [default: True]
**Object attributes** \n
Additional to the parent class :class:`~climlab.process.process.Process`
following object attributes are generated during initialization:
:ivar dict domains: dictionary of process :class:`~climlab.domain.domain._Domain`
:ivar dict state: dictionary of process states
(of type :class:`~climlab.domain.field.Field`)
:ivar dict param: dictionary of model parameters which are given
through ``**kwargs``
:ivar dict diagnostics: a dictionary with all diagnostic variables
:ivar dict _input_vars: collection of input quantities like boundary conditions
and other gridded quantities
:ivar str creation_date:
date and time when process was created
:ivar subprocess: dictionary of suprocesses of the process
:vartype subprocess: dict of :class:`~climlab.process.process.Process`
"""
def __str__(self):
str1 = 'climlab Process of type {0}. \n'.format(type(self))
str1 += 'State variables and domain shapes: \n'
for varname in list(self.state.keys()):
str1 += ' {0}: {1} \n'.format(varname, self.domains[varname].shape)
str1 += 'The subprocess tree: \n'
str1 += walk.process_tree(self, name=self.name)
return str1
def __init__(self, name='Untitled', state=None, domains=None, subprocess=None,
lat=None, lev=None, num_lat=None, num_levels=None,
input=None, verbose=True, **kwargs):
# verbose flag used to control text output at process creation time
self.verbose = verbose
self.name = name
# dictionary of domains. Keys are the domain names
self.domains = _make_dict(domains, _Domain)
# If lat is given, create a simple domains
if lat is not None:
sfc = zonal_mean_surface()
self.domains.update({'default': sfc})
# dictionary of state variables (all of type Field)
self.state = AttrDict()
states = _make_dict(state, Field)
for name, value in states.items():
self.set_state(name, value)
# dictionary of model parameters
self.param = kwargs
# dictionary of diagnostic quantities
#self.diagnostics = AttrDict()
#self._diag_vars = frozenset()
self._diag_vars = []
# dictionary of input quantities
#self.input = _make_dict(input, Field)
if input is None:
#self._input_vars = frozenset()
self._input_vars = []
else:
self.add_input(list(input.keys()))
for name, var in input:
self.__dict__[name] = var
self.creation_date = time.strftime("%a, %d %b %Y %H:%M:%S %z",
time.localtime())
# subprocess is a dictionary of any sub-processes
self.subprocess = AttrDict()
if subprocess is not None:
self.add_subprocesses(subprocess)
#if subprocess is None:
# #self.subprocess = {}
# # a dictionary whose items can be accessed as attributes
# self.subprocess = AttrDict()
#else:
# self.add_subprocesses(subprocess)
def add_subprocesses(self, procdict):
"""Adds a dictionary of subproceses to this process.
Calls :func:`add_subprocess` for every process given in the
input-dictionary. It can also pass a single process, which will
be given the name *default*.
:param procdict: a dictionary with process names as keys
:type procdict: dict
"""
if isinstance(procdict, Process):
try:
name = procdict.name
except:
name = 'default'
self.add_subprocess(name, procdict)
else:
for name, proc in procdict.items():
self.add_subprocess(name, proc)
def add_subprocess(self, name, proc):
"""Adds a single subprocess to this process.
:param string name: name of the subprocess
:param proc: a Process object
:type proc: :class:`~climlab.process.process.Process`
:raises: :exc:`ValueError`
if ``proc`` is not a process
:Example:
Replacing an albedo subprocess through adding a subprocess with
same name::
>>> from climlab.model.ebm import EBM_seasonal
>>> from climlab.surface.albedo import StepFunctionAlbedo
>>> # creating EBM model
>>> ebm_s = EBM_seasonal()
>>> print ebm_s
.. code-block:: none
:emphasize-lines: 8
climlab Process of type <class 'climlab.model.ebm.EBM_seasonal'>.
State variables and domain shapes:
Ts: (90, 1)
The subprocess tree:
top: <class 'climlab.model.ebm.EBM_seasonal'>
diffusion: <class 'climlab.dynamics.diffusion.MeridionalDiffusion'>
LW: <class 'climlab.radiation.AplusBT.AplusBT'>
albedo: <class 'climlab.surface.albedo.P2Albedo'>
insolation: <class 'climlab.radiation.insolation.DailyInsolation'>
::
>>> # creating and adding albedo feedback subprocess
>>> step_albedo = StepFunctionAlbedo(state=ebm_s.state, **ebm_s.param)
>>> ebm_s.add_subprocess('albedo', step_albedo)
>>>
>>> print ebm_s
.. code-block:: none
:emphasize-lines: 8
climlab Process of type <class 'climlab.model.ebm.EBM_seasonal'>.
State variables and domain shapes:
Ts: (90, 1)
The subprocess tree:
top: <class 'climlab.model.ebm.EBM_seasonal'>
diffusion: <class 'climlab.dynamics.diffusion.MeridionalDiffusion'>
LW: <class 'climlab.radiation.AplusBT.AplusBT'>
albedo: <class 'climlab.surface.albedo.StepFunctionAlbedo'>
iceline: <class 'climlab.surface.albedo.Iceline'>
cold_albedo: <class 'climlab.surface.albedo.ConstantAlbedo'>
warm_albedo: <class 'climlab.surface.albedo.P2Albedo'>
insolation: <class 'climlab.radiation.insolation.DailyInsolation'>
"""
if isinstance(proc, Process):
self.subprocess.update({name: proc})
self.has_process_type_list = False
# Add subprocess diagnostics to parent
# (if there are no name conflicts)
for diagname, value in proc.diagnostics.items():
#if not (diagname in self.diagnostics or hasattr(self, diagname)):
# self.add_diagnostic(diagname, value)
self.add_diagnostic(diagname, value)
else:
raise ValueError('subprocess must be Process object')
def remove_subprocess(self, name, verbose=True):
"""Removes a single subprocess from this process.
:param string name: name of the subprocess
:param bool verbose: information whether warning message
should be printed [default: True]
:Example:
Remove albedo subprocess from energy balance model::
>>> import climlab
>>> model = climlab.EBM()
>>> print model
climlab Process of type <class 'climlab.model.ebm.EBM'>.
State variables and domain shapes:
Ts: (90, 1)
The subprocess tree:
top: <class 'climlab.model.ebm.EBM'>
diffusion: <class 'climlab.dynamics.diffusion.MeridionalDiffusion'>
LW: <class 'climlab.radiation.AplusBT.AplusBT'>
albedo: <class 'climlab.surface.albedo.StepFunctionAlbedo'>
iceline: <class 'climlab.surface.albedo.Iceline'>
cold_albedo: <class 'climlab.surface.albedo.ConstantAlbedo'>
warm_albedo: <class 'climlab.surface.albedo.P2Albedo'>
insolation: <class 'climlab.radiation.insolation.P2Insolation'>
>>> model.remove_subprocess('albedo')
>>> print model
climlab Process of type <class 'climlab.model.ebm.EBM'>.
State variables and domain shapes:
Ts: (90, 1)
The subprocess tree:
top: <class 'climlab.model.ebm.EBM'>
diffusion: <class 'climlab.dynamics.diffusion.MeridionalDiffusion'>
LW: <class 'climlab.radiation.AplusBT.AplusBT'>
insolation: <class 'climlab.radiation.insolation.P2Insolation'>
"""
try:
self.subprocess.pop(name)
except KeyError:
if verbose:
print('WARNING: {} not found in subprocess dictionary.'.format(name))
self.has_process_type_list = False
def set_state(self, name, value):
"""Sets the variable ``name`` to a new state ``value``.
:param string name: name of the state
:param value: state variable
:type value: :class:`~climlab.domain.field.Field` or *array*
:raises: :exc:`ValueError`
if state variable ``value`` is not having a domain.
:raises: :exc:`ValueError`
if shape mismatch between existing domain and
new state variable.
:Example:
Resetting the surface temperature of an EBM to
:math:`-5 ^{\circ} \\textrm{C}` on all latitues::
>>> import climlab
>>> from climlab import Field
>>> import numpy as np
>>> # setup model
>>> model = climlab.EBM(num_lat=36)
>>> # create new temperature distribution
>>> initial = -5 * ones(size(model.lat))
>>> model.set_state('Ts', Field(initial, domain=model.domains['Ts']))
>>> np.squeeze(model.Ts)
Field([-5., -5., -5., -5., -5., -5., -5., -5., -5., -5., -5., -5., -5.,
-5., -5., -5., -5., -5., -5., -5., -5., -5., -5., -5., -5., -5.,
-5., -5., -5., -5., -5., -5., -5., -5., -5., -5.])
"""
if isinstance(value, Field):
# populate domains dictionary with domains from state variables
self.domains.update({name: value.domain})
else:
try:
thisdom = self.state[name].domain
domshape = thisdom.shape
except:
raise ValueError('State variable needs a domain.')
value = np.atleast_1d(value)
if value.shape == domshape:
value = Field(value, domain=thisdom)
else:
raise ValueError('Shape mismatch between existing domain and new state variable.')
# set the state dictionary
self.state[name] = value
for name, value in self.state.items():
#convert int dtype to float
if np.issubdtype(self.state[name].dtype, np.dtype('int').type):
value = self.state[name].astype(float)
self.state[name]=value
self.__setattr__(name, value)
def _guess_state_domains(self):
for name, value in self.state.items():
for domname, dom in self.domains.items():
if value.shape == dom.shape:
# same shape, assume it's the right domain
self.state_domain[name] = dom
def _add_field(self, field_type, name, value):
"""Adds a new field to a specified dictionary. The field is also added
as a process attribute. field_type can be 'input', 'diagnostics' """
try:
self.__getattribute__(field_type).update({name: value})
except:
raise ValueError('Problem with field_type %s' %field_type)
# Note that if process has attribute name, this will trigger The
# setter method for that attribute
self.__setattr__(name, value)
def add_diagnostic(self, name, value=None):
"""Create a new diagnostic variable called ``name`` for this process
and initialize it with the given ``value``.
Quantity is accessible in two ways:
* as a process attribute, i.e. ``proc.name``
* as a member of the diagnostics dictionary,
i.e. ``proc.diagnostics['name']``
Use attribute method to set values, e.g.
```proc.name = value ```
:param str name: name of diagnostic quantity to be initialized
:param array value: initial value for quantity [default: None]
:Example:
Add a diagnostic CO2 variable to an energy balance model::
>>> import climlab
>>> model = climlab.EBM()
>>> # initialize CO2 variable with value 280 ppm
>>> model.add_diagnostic('CO2',280.)
>>> # access variable directly or through diagnostic dictionary
>>> model.CO2
280
>>> model.diagnostics.keys()
['ASR', 'CO2', 'net_radiation', 'icelat', 'OLR', 'albedo']
"""
self._diag_vars.append(name)
self.__setattr__(name, value)
def add_input(self, name, value=None):
'''Create a new input variable called ``name`` for this process
and initialize it with the given ``value``.
Quantity is accessible in two ways:
* as a process attribute, i.e. ``proc.name``
* as a member of the input dictionary,
i.e. ``proc.input['name']``
Use attribute method to set values, e.g.
```proc.name = value ```
:param str name: name of diagnostic quantity to be initialized
:param array value: initial value for quantity [default: None]
'''
self._input_vars.append(name)
self.__setattr__(name, value)
def declare_input(self, inputlist):
'''Add the variable names in ``inputlist`` to the list of necessary inputs.'''
for name in inputlist:
self._input_vars.append(name)
def declare_diagnostics(self, diaglist):
'''Add the variable names in ``inputlist`` to the list of diagnostics.'''
for name in diaglist:
self._diag_vars.append(name)
def remove_diagnostic(self, name):
""" Removes a diagnostic from the ``process.diagnostic`` dictionary
and also delete the associated process attribute.
:param str name: name of diagnostic quantity to be removed
:Example:
Remove diagnostic variable 'icelat' from energy balance model::
>>> import climlab
>>> model = climlab.EBM()
>>> # display all diagnostic variables
>>> model.diagnostics.keys()
['ASR', 'OLR', 'net_radiation', 'albedo', 'icelat']
>>> model.remove_diagnostic('icelat')
>>> model.diagnostics.keys()
['ASR', 'OLR', 'net_radiation', 'albedo']
>>> # Watch out for subprocesses that may still want
>>> # to access the diagnostic 'icelat' variable !!!
"""
#_ = self.diagnostics.pop(name)
#delattr(type(self), name)
try:
delattr(self, name)
self._diag_vars.remove(name)
except:
print('No diagnostic named {} was found.'.format(name))
def to_xarray(self, diagnostics=False):
""" Convert process variables to ``xarray.Dataset`` format.
With ``diagnostics=True``, both state and diagnostic variables are included.
Otherwise just the state variables are included.
Returns an ``xarray.Dataset`` object with all spatial axes,
including 'bounds' axes indicating cell boundaries in each spatial dimension.
:Example:
Create a single column radiation model and view as ``xarray`` object::
>>> import climlab
>>> state = climlab.column_state(num_lev=20)
>>> model = climlab.radiation.RRTMG(state=state)
>>> # display model state as xarray:
>>> model.to_xarray()
<xarray.Dataset>
Dimensions: (depth: 1, depth_bounds: 2, lev: 20, lev_bounds: 21)
Coordinates:
* depth (depth) float64 0.5
* depth_bounds (depth_bounds) float64 0.0 1.0
* lev (lev) float64 25.0 75.0 125.0 175.0 225.0 275.0 325.0 ...
* lev_bounds (lev_bounds) float64 0.0 50.0 100.0 150.0 200.0 250.0 ...
Data variables:
Ts (depth) float64 288.0
Tatm (lev) float64 200.0 204.1 208.2 212.3 216.4 220.5 224.6 ...
>>> # take a single timestep to populate the diagnostic variables
>>> model.step_forward()
>>> # Now look at the full output in xarray format
>>> model.to_xarray(diagnostics=True)
<xarray.Dataset>
Dimensions: (depth: 1, depth_bounds: 2, lev: 20, lev_bounds: 21)
Coordinates:
* depth (depth) float64 0.5
* depth_bounds (depth_bounds) float64 0.0 1.0
* lev (lev) float64 25.0 75.0 125.0 175.0 225.0 275.0 325.0 ...
* lev_bounds (lev_bounds) float64 0.0 50.0 100.0 150.0 200.0 250.0 ...
Data variables:
Ts (depth) float64 288.7
Tatm (lev) float64 201.3 204.0 208.0 212.0 216.1 220.2 ...
ASR (depth) float64 240.0
ASRcld (depth) float64 0.0
ASRclr (depth) float64 240.0
LW_flux_down (lev_bounds) float64 0.0 12.63 19.47 26.07 32.92 40.1 ...
LW_flux_down_clr (lev_bounds) float64 0.0 12.63 19.47 26.07 32.92 40.1 ...
LW_flux_net (lev_bounds) float64 240.1 231.2 227.6 224.1 220.5 ...
LW_flux_net_clr (lev_bounds) float64 240.1 231.2 227.6 224.1 220.5 ...
LW_flux_up (lev_bounds) float64 240.1 243.9 247.1 250.2 253.4 ...
LW_flux_up_clr (lev_bounds) float64 240.1 243.9 247.1 250.2 253.4 ...
LW_sfc (depth) float64 128.9
LW_sfc_clr (depth) float64 128.9
OLR (depth) float64 240.1
OLRcld (depth) float64 0.0
OLRclr (depth) float64 240.1
SW_flux_down (lev_bounds) float64 341.3 323.1 318.0 313.5 309.5 ...
SW_flux_down_clr (lev_bounds) float64 341.3 323.1 318.0 313.5 309.5 ...
SW_flux_net (lev_bounds) float64 240.0 223.3 220.2 217.9 215.9 ...
SW_flux_net_clr (lev_bounds) float64 240.0 223.3 220.2 217.9 215.9 ...
SW_flux_up (lev_bounds) float64 101.3 99.88 97.77 95.64 93.57 ...
SW_flux_up_clr (lev_bounds) float64 101.3 99.88 97.77 95.64 93.57 ...
SW_sfc (depth) float64 163.8
SW_sfc_clr (depth) float64 163.8
TdotLW (lev) float64 -1.502 -0.6148 -0.5813 -0.6173 -0.6426 ...
TdotLW_clr (lev) float64 -1.502 -0.6148 -0.5813 -0.6173 -0.6426 ...
TdotSW (lev) float64 2.821 0.5123 0.3936 0.3368 0.3174 0.3299 ...
TdotSW_clr (lev) float64 2.821 0.5123 0.3936 0.3368 0.3174 0.3299 ...
"""
if diagnostics:
dic = self.state.copy()
dic.update(self.diagnostics)
return state_to_xarray(dic)
else:
return state_to_xarray(self.state)
@property
def diagnostics(self):
"""Dictionary access to all diagnostic variables
:type: dict
"""
diag_dict = {}
for key in self._diag_vars:
try:
#diag_dict[key] = getattr(self,key)
# using self.__dict__ doesn't count diagnostics defined as properties
diag_dict[key] = self.__dict__[key]
except:
pass
return diag_dict
@property
def input(self):
"""Dictionary access to all input variables
That can be boundary conditions and other gridded quantities
independent of the `process`
:type: dict
"""
input_dict = {}
for key in self._input_vars:
try:
input_dict[key] = getattr(self,key)
except:
pass
return input_dict
# Some handy shortcuts... only really make sense when there is only
# a single axis of that type in the process.
@property
def lat(self):
"""Latitude of grid centers (degrees North)
:getter: Returns the points of axis ``'lat'`` if availible in the
process's domains.
:type: array
:raises: :exc:`ValueError`
if no ``'lat'`` axis can be found.
"""
try:
for domname, dom in self.domains.items():
try:
thislat = dom.axes['lat'].points
except:
pass
return thislat
except:
raise ValueError('Can\'t resolve a lat axis.')
@property
def lat_bounds(self):
"""Latitude of grid interfaces (degrees North)
:getter: Returns the bounds of axis ``'lat'`` if availible in the
process's domains.
:type: array
:raises: :exc:`ValueError`
if no ``'lat'`` axis can be found.
"""
try:
for domname, dom in self.domains.items():
try:
thislat = dom.axes['lat'].bounds
except:
pass
return thislat
except:
raise ValueError('Can\'t resolve a lat axis.')
@property
def lon(self):
"""Longitude of grid centers (degrees)
:getter: Returns the points of axis ``'lon'`` if availible in the
process's domains.
:type: array
:raises: :exc:`ValueError`
if no ``'lon'`` axis can be found.
"""
try:
for domname, dom in self.domains.items():
try:
thislon = dom.axes['lon'].points
except:
pass
return thislon
except:
raise ValueError('Can\'t resolve a lon axis.')
@property
def lon_bounds(self):
"""Longitude of grid interfaces (degrees)
:getter: Returns the bounds of axis ``'lon'`` if availible in the
process's domains.
:type: array
:raises: :exc:`ValueError`
if no ``'lon'`` axis can be found.
"""
try:
for domname, dom in self.domains.items():
try:
thislon = dom.axes['lon'].bounds
except:
pass
return thislon
except:
raise ValueError('Can\'t resolve a lon axis.')
@property
def lev(self):
"""Pressure levels at grid centers (hPa or mb)
:getter: Returns the points of axis ``'lev'`` if availible in the
process's domains.
:type: array
:raises: :exc:`ValueError`
if no ``'lev'`` axis can be found.
"""
try:
for domname, dom in self.domains.items():
try:
thislev = dom.axes['lev'].points
except:
pass
return thislev
except:
raise ValueError('Can\'t resolve a lev axis.')
@property
def lev_bounds(self):
"""Pressure levels at grid interfaces (hPa or mb)
:getter: Returns the bounds of axis ``'lev'`` if availible in the
process's domains.
:type: array
:raises: :exc:`ValueError`
if no ``'lev'`` axis can be found.
"""
try:
for domname, dom in self.domains.items():
try:
thislev = dom.axes['lev'].bounds
except:
pass
return thislev
except:
raise ValueError('Can\'t resolve a lev axis.')
@property
def depth(self):
"""Depth at grid centers (m)
:getter: Returns the points of axis ``'depth'`` if availible in the
process's domains.
:type: array
:raises: :exc:`ValueError`
if no ``'depth'`` axis can be found.
"""
try:
for domname, dom in self.domains.items():
try:
thisdepth = dom.axes['depth'].points
except:
pass
return thisdepth
except:
raise ValueError('Can\'t resolve a depth axis.')
@property
def depth_bounds(self):
"""Depth at grid interfaces (m)
:getter: Returns the bounds of axis ``'depth'`` if availible in the
process's domains.
:type: array
:raises: :exc:`ValueError`
if no ``'depth'`` axis can be found.
"""
try:
for domname, dom in self.domains.items():
try:
thisdepth = dom.axes['depth'].bounds
except:
pass
return thisdepth
except:
raise ValueError('Can\'t resolve a depth axis.')
def process_like(proc):
"""Make an exact clone of a process, including state and all subprocesses.
The creation date is updated.
:param proc: process
:type proc: :class:`~climlab.process.process.Process`
:return: new process identical to the given process
:rtype: :class:`~climlab.process.process.Process`
:Example:
::
>>> import climlab
>>> from climlab.process.process import process_like
>>> model = climlab.EBM()
>>> model.subprocess.keys()
['diffusion', 'LW', 'albedo', 'insolation']
>>> albedo = model.subprocess['albedo']
>>> albedo_copy = process_like(albedo)
>>> albedo.creation_date
'Thu, 24 Mar 2016 01:32:25 +0000'
>>> albedo_copy.creation_date
'Thu, 24 Mar 2016 01:33:29 +0000'
"""
newproc = copy.deepcopy(proc)
newproc.creation_date = time.strftime("%a, %d %b %Y %H:%M:%S %z",
time.localtime())
return newproc
def get_axes(process_or_domain):
"""Returns a dictionary of all Axis in a domain or dictionary of domains.
:param process_or_domain: a process or a domain object
:type process_or_domain: :class:`~climlab.process.process.Process` or
:class:`~climlab.domain.domain._Domain`
:raises: :exc: `TypeError` if input is not or not having a domain
:returns: dictionary of input's Axis
:rtype: dict
:Example:
::
>>> import climlab
>>> from climlab.process.process import get_axes
>>> model = climlab.EBM()
>>> get_axes(model)
{'lat': <climlab.domain.axis.Axis object at 0x7ff13b9dd2d0>,
'depth': <climlab.domain.axis.Axis object at 0x7ff13b9dd310>}
"""
if isinstance(process_or_domain, Process):
dom = process_or_domain.domains
else:
dom = process_or_domain
if isinstance(dom, _Domain):
return dom.axes
elif isinstance(dom, dict):
axes = {}
for thisdom in list(dom.values()):
assert isinstance(thisdom, _Domain)
axes.update(thisdom.axes)
return axes
else:
raise TypeError('dom must be a domain or dictionary of domains.')
| cjcardinale/climlab | climlab/process/process.py | Python | mit | 34,601 |
from debug_toolbar.panels.templates import TemplatesPanel as BaseTemplatesPanel
class TemplatesPanel(BaseTemplatesPanel):
def generate_stats(self, *args):
template = self.templates[0]['template']
if not hasattr(template, 'engine') and hasattr(template, 'backend'):
template.engine = template.backend
return super().generate_stats(*args)
| grvty-labs/A1-136 | contrib/django_debug_toolbar/panels.py | Python | mit | 379 |
from django.shortcuts import render, get_object_or_404
# import the custom context processor
from vehicles.context_processor import global_context_processor
from vehicles.models import Vehicle, VehicleMake, Category
from settings.models import SliderImage
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from dynamic_preferences.registries import global_preferences_registry
def home_page(request):
# instanciate a manager for global preferences
global_preferences = global_preferences_registry.manager()
MAX_VEHICLES_TO_SHOW = global_preferences['homepage__number_of_vehicles']
MAX_CATEGORIES_TO_SHOW = 4
# get list of slider objects
sliders = SliderImage.objects.all()
# get categories to show on homepage
top_categories = Category.objects.get_home_page_categories()
if top_categories:
top_categories = top_categories[:MAX_CATEGORIES_TO_SHOW]
# get recently added vehicles
top_vehicles = Vehicle.objects.all().order_by(
'-timestamp').prefetch_related('images')
if top_vehicles:
top_vehicles = top_vehicles[:MAX_VEHICLES_TO_SHOW]
context = global_context_processor(locals())
return render(request, "home_page.html", context)
def exports_page(request):
context = global_context_processor(locals())
return render(request, "exports_page.html", context)
def how_to_buy(request):
context = global_context_processor(locals())
return render(request, "how_to_buy.html", context)
def category_page(request, slug):
# check if make slug parameter is passed into the url
vehicle_make_slug = request.GET.get('make', None)
# get category by slug
category = Category.objects.get_category_by_slug(slug)
# get all the vehicles by the category and make (if provided)
if vehicle_make_slug:
# get make by slug
make = VehicleMake.objects.get_make_by_slug(vehicle_make_slug)
if category:
vehicles_list = Vehicle.objects.get_vehicles_by_category_and_make(
category, make
).prefetch_related('images')
else:
vehicles_list = Vehicle.objects.get_vehicles_by_make(
make
).prefetch_related('images')
else:
# if category is not found then get all of the vehicles
if category:
vehicles_list = Vehicle.objects.get_vehicles_by_category(
category
).prefetch_related('images')
else:
vehicles_list = Vehicle.objects.all().prefetch_related('images')
# paginate vehicle list for 10 items per page
paginator = Paginator(vehicles_list, 16)
try:
page = int(request.GET.get("page", '1'))
except ValueError:
page = 1
try:
vehicles = paginator.page(page)
except (InvalidPage, EmptyPage):
vehicles = paginator.page(paginator.num_pages)
makes = get_makes_in_category(category)
context = global_context_processor(locals())
return render(request, "categories_page.html", context)
def vehicle_detail_page(request, category_slug, vehicle_id, vehicle_slug):
# get vehicle details by vehicle_id
vehicle = get_object_or_404(Vehicle, id=vehicle_id)
related_vehicles = Vehicle.objects.get_vehicles_by_category(
vehicle.category)
return render(request, "detail_page.html", global_context_processor(locals()))
def get_makes_in_category(category):
makes_in_category = []
# get all the vehicle objects by category
vehicles_in_category = Vehicle.objects.get_vehicles_by_category(
category=category)
for vehicle in vehicles_in_category:
makes_in_category.append(vehicle.make)
# remove duplicate makes from the list
makes_in_category = list(set(makes_in_category))
makes_in_category = sorted(makes_in_category, key=lambda x: x.v_make)
return makes_in_category
| sitture/trade-motors | src/vehicles/views.py | Python | mit | 3,897 |
#!/usr/bin/env python2
# transpiled with BefunCompile v1.3.0 (c) 2017
import sys
import zlib, base64
_g = ("Ah+LCAAAAAAABACT7+ZgAAEWhre3/LNvG0iwP1i/yPTlUbXVqdvzlJoi+a3Lj8v6RJl1JZacmaK7/Otuf07ZXEnrN/zZZ+cdV4iexrfrz59Tftsevr0tcO7wz0oLK678"
+ "PLvaHVX/Lff8K6otFRbb/W/369X9D7+oMAiXlZWJlbEzGIQaM4yCUTAKRsEoGPzgnzcjw4w9ejJ35HS6A8KTT0zfPp3dVXBWrHr2qoXeofNfZVm8eZ31+0g2a93585ut"
+ "w3JN9984E/ele8axTZZS1/4XxB6I/8bdWrVmWqrMqqVnDpeUFEb23t0kFaTV171P99WmM7e/nr75LancfFrm1OPBq7oXnf9bc4u/fb3/3oIH/XuqLEPeHm7aK7k69NbU"
+ "j1ON+IS38DrntEX0b9Q9bSi3fJNHZfS+7LDknKDAKz+17ksmzxX7nszEf/ni27IX/L83eufKdO3eW73qcUGUSaGGf9fjO+ecNvY8rjv2ff2Hw4HBfJrnv1rKzVuvl26p"
+ "vrMvWfi4740pH/MS7p499OejfabZ97vdb3Nqb4b/3CLxyEjzg4Hnz617Yp9s/1T2f3VU6Pf2nZ5/lcKOCtzecu+YOz+jZzvnrad7/hg+31n1vtguPv/Tkp0Vh4u/824s"
+ "fMX7Q1acAQDKcaipZwcAAA==")
g = base64.b64decode(_g)[1:]
for i in range(ord(base64.b64decode(_g)[0])):
g = zlib.decompress(g, 16+zlib.MAX_WBITS)
g=list(map(ord, g))
def gr(x,y):
if(x>=0 and y>=0 and x<1000 and y<1515):
return g[y*1000 + x];
return 0;
def gw(x,y,v):
if(x>=0 and y>=0 and x<1000 and y<1515):
g[y*1000 + x]=v;
def td(a,b):
return ((0)if(b==0)else(a//b))
def tm(a,b):
return ((0)if(b==0)else(a%b))
s=[]
def sp():
global s
if (len(s) == 0):
return 0
return s.pop()
def sa(v):
global s
s.append(v)
def sr():
global s
if (len(s) == 0):
return 0
return s[-1]
def _0():
gw(2,0,1000)
gw(3,0,1500000)
sa(gr(3,0)-1)
sa(gr(3,0))
gw(tm(gr(3,0),gr(2,0)),(td(gr(3,0),gr(2,0)))+3,0)
return 1
def _1():
return (2)if(sp()!=0)else(3)
def _2():
sa(sr());
sa(0)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(tm(sr(),gr(2,0)))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(td(sp(),gr(2,0)))
sa(sp()+3)
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(sr()-1)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
return 1
def _3():
gw(6,0,0)
gw(8,0,1)
sp();
return 4
def _4():
return (21)if(((gr(8,0)*gr(8,0)*4)+(gr(8,0)*6)+2)>gr(3,0))else(5)
def _5():
sa((gr(8,0)+1)*(gr(8,0)+1)*2)
sa(gr(8,0)+1)
gw(9,0,gr(8,0)+1)
return 6
def _6():
global t0
sa(sp()*gr(8,0)*2)
sa(sp()+sp());
t0=sp()
t0=(1)if(t0>gr(3,0))else(0)
return (20)if((t0)!=0)else(7)
def _7():
global t0
global t1
global t2
t0=(gr(9,0)*gr(9,0))-(gr(8,0)*gr(8,0))
gw(2,1,(gr(9,0)*gr(9,0))-(gr(8,0)*gr(8,0)))
t1=gr(8,0)*gr(9,0)*2
gw(3,1,gr(8,0)*gr(9,0)*2)
t1=t1+(gr(9,0)*gr(9,0))+(gr(8,0)*gr(8,0))
gw(4,1,(gr(9,0)*gr(9,0))+(gr(8,0)*gr(8,0)))
t2=t0+t1
gw(6,1,t2)
return (19)if(gr(2,1)>gr(3,1))else(8)
def _8():
sa(1)
sa((1)if(gr(6,1)>gr(3,0))else(0))
return 9
def _9():
return (18)if(sp()!=0)else(10)
def _10():
gw(8,1,sr()*((((gr(2,1)*7)+gr(3,1))*5)+gr(4,1)))
sa(sr()*gr(6,1))
sa(tm(sr(),gr(2,0)))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(td(sp(),gr(2,0)))
sa(sp()+3)
v0=sp()
sa(gr(sp(),v0))
sa(sr());
return (13)if(sp()!=0)else(11)
def _11():
sp();
sa(sr()*gr(6,1))
sa(gr(8,1))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(tm(sr(),gr(2,0)))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(td(sp(),gr(2,0)))
sa(sp()+3)
v0=sp()
v1=sp()
gw(v1,v0,sp())
gw(6,0,gr(6,0)+1)
return 12
def _12():
sa(sp()+1)
sa((1)if((sr()*gr(6,1))>gr(3,0))else(0))
return 9
def _13():
return (17)if((sr()-gr(8,1))!=0)else(14)
def _14():
sp();
sa(1)
return 15
def _15():
return (12)if(sp()!=0)else(16)
def _16():
sa(sr()*gr(6,1))
sa(-1)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(tm(sr(),gr(2,0)))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(td(sp(),gr(2,0)))
sa(sp()+3)
v0=sp()
v1=sp()
gw(v1,v0,sp())
gw(6,0,gr(6,0)-1)
return 12
def _17():
sa((1)if(sp()<0)else(0))
return 15
def _18():
sp();
sa((gr(9,0)+1)*(gr(9,0)+1)*2)
sa(gr(9,0)+1)
gw(9,0,gr(9,0)+1)
return 6
def _19():
global t0
t0=gr(2,1)
gw(2,1,gr(3,1))
gw(3,1,t0)
return 8
def _20():
gw(8,0,gr(8,0)+1)
return 4
def _21():
sys.stdout.write(str(gr(6,0))+" ")
sys.stdout.flush()
return 22
m=[_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,_11,_12,_13,_14,_15,_16,_17,_18,_19,_20,_21]
c=0
while c<22:
c=m[c]()
| Mikescher/Project-Euler_Befunge | compiled/Python2/Euler_Problem-075.py | Python | mit | 4,387 |
# -*- coding: utf-8 -*-
__version__ = '0.8.0'
__author__ = 'Steven Loria'
__license__ = 'MIT'
from webargs.core import Arg, WebargsError, ValidationError, Missing
__all__ = ['Arg', 'WebargsError', 'ValidationError', 'Missing']
| jmcarp/webargs | webargs/__init__.py | Python | mit | 231 |
import numpy
import numpy.linalg
def training(inputs, minvar=0.1):
"""Trains a naive-bayes classifier using inputs
Returns means and variances of the classifiers
"""
return numpy.mean(inputs, axis=0), numpy.maximum(minvar, numpy.var(inputs, axis=0))
def gaussian(input, mu, sigma2):
"""Calculates gaussian value for each input in the array
"""
return (1/ (2*numpy.sqrt(3.14*sigma2))) * \
numpy.exp( - ((input-mu)**2)/(2*sigma2))
def likelihood(inputs, means, variances):
"""Minimum distances between inputs and any reference
Each element should be in a row!
"""
out = numpy.ones(inputs.shape[0])
for j in xrange(inputs.shape[1]):
if variances[j] != 0:
out = out * \
(gaussian (inputs[:,j], means[j], variances[j]))
return out
def naive_bayes(test, train):
"""Implements the whole naive bayes flow.
Returns a likelihood array
"""
m, v = training(train)
return likelihood(test, m, v)
def naive_bayes_multidimensional(test, train):
"""Naive bayes analysis keeping dimensions isolated
"""
m, v = training(train)
out = numpy.ones( (test.shape) )
for i in xrange(test.shape[0]):
for j in xrange(test.shape[1]):
out[i,j] = out[i,j] * \
(gaussian (test[i,j], m[j], v[j]))
return out
# a = numpy.array([[2, 4, 6], [4, 3, 2], [5, -2, -1], [10, 11, 12], [15, 20, 31]])
# b = numpy.array([[2, 4, 2], [4, 3, 1.5]])
# m, v = training(b)
# print m, v
# print likelihood(a, m, v)
# out = naive_bayes_multidimensional(a, b)
# out = (out / numpy.max(out)) * (out > 0.01)
# print out
| pymir3/pymir3 | mir3/lib/naive_bayes.py | Python | mit | 1,643 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.home),
url(r'^interviewer/$', views.interviewer),
url(r'^candidate/$', views.candidate),
]
| VRSandeep/icrs | website/urls.py | Python | mit | 191 |
# Created: 16.03.2011, 2018 rewritten for pytest
# Copyright (C) 2011-2019, Manfred Moitzi
# License: MIT License
import pytest
from ezdxf.entities.appid import AppID
@pytest.fixture
def appid():
return AppID.new(
"FFFF",
dxfattribs={
"name": "EZDXF",
},
)
def test_name(appid):
assert appid.dxf.name == "EZDXF"
| mozman/ezdxf | tests/test_01_dxf_entities/test_118_appid_table_entry.py | Python | mit | 365 |
# Download the Python helper library from twilio.com/docs/python/install
import os
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
# To set up environmental variables, see http://twil.io/secure
account = os.environ['TWILIO_ACCOUNT_SID']
token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account, token)
# Update role
role = client.chat \
.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.roles("RLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.fetch()
new_permissions = ['sendMediaMessage'] + (role.permissions or [])
role.update(permission=new_permissions)
print(role.friendly_name)
| TwilioDevEd/api-snippets | ip-messaging/rest/roles/update-role/update-role.6.x.py | Python | mit | 644 |
# views are handlers that respond to requests from web browsers or other clients
# Each view function maps to one or more request URLs
from flask import render_template, flash, redirect
from app import app
from .forms import Deck
#./run.py
@app.route('/submit', methods=('GET', 'POST'))
def submit():
form = Deck()
if form.validate_on_submit():
return redirect('/index')
return render_template('submit.html',
title='Create Card',
form=form)
@app.route('/')
@app.route('/index')
def index():
# This is displayed on client's web browser
user = {'nickname': 'Enrique Iglesias'} #fake user
decks = [
{
'title': 'GRE Words',
'cards': [
{
'word': 'combust',
'definition': 'to catch on fire'
},
{
'word': 'phaze',
'definition': 'to be affected'
}
]
},
{
'title': 'Food words',
'cards': [
{
'word': 'amuse bouche',
'definition': 'little serving'
},
{
'word': 'kimchii',
'definition': 'femented cabbage'
}
]
}
]
return render_template('index.html',
title ='Home',
user=user,
posts=decks)
| ecotg/Flash-Card-App | app/views.py | Python | mit | 1,100 |
# -*- coding: utf-8 -*-
"""
rio.blueprints.api_1
~~~~~~~~~~~~~~~~~~~~~
"""
from flask import Blueprint
bp = Blueprint('api_1', __name__)
| soasme/rio | rio/blueprints/api_1.py | Python | mit | 139 |
import math
from ..df import DocumentFrequencyVectorCreator
from . import InverseDocumentFrequencyVector
class InverseDocumentFrequencyVectorCreator(DocumentFrequencyVectorCreator):
"""Creates inverse-document-frequency vectors
Inherits from :class:`recommender.vector.abstractvector.VectorCreator`
:parameter sqlite3_connection: connection to a database build with :class:`recommender.vector.vectortablecreator.VectorTableCreator`
:type sqlite3_connection: sqlite3.Connection
:raises: TypeError
"""
def __init__(self, db_connection_str):
super(InverseDocumentFrequencyVectorCreator, self).__init__(db_connection_str)
self._create_inverse_document_frequency_view()
pass
def _create_vector(self, document_id=None):
vector = InverseDocumentFrequencyVector()
with self._get_db_connection() as conn:
cursor = conn.cursor()
self._create_log_function(conn)
values = self._get_vector_values_from_db(cursor)
for value in [] if values is None else values:
vector.add_to_vector(value)
return vector
def _get_vector_values_from_db(self, c):
c.execute(
'''
SELECT
[term_id]
, [name]
, [value]
FROM
[InverseDocumentFrequency]
;
''')
vector_values = []
for result in c.fetchall():
vector_values.append((result[0], result[1], result[2]))
pass
return None if not vector_values else vector_values
def _create_log_function(self, conn):
conn.create_function('log10', 1, InverseDocumentFrequencyVectorCreator.log_10)
pass
@staticmethod
def log_10(x):
"""simply a method calculating log_10 used by the view in :func:`_create_inverse_document_frequency_view`
"""
base = 10
return math.log(x, base)
def _create_inverse_document_frequency_view(self):
"""Creates a view in the database required for building idf-vectors
"""
with self._get_db_connection() as conn:
self._create_log_function(conn)
c = conn.cursor()
c.execute(
'''
CREATE VIEW IF NOT EXISTS [InverseDocumentFrequency] AS
SELECT
[term_id]
, [name]
, log10
(
CAST ((SELECT [document_count] from [N]) AS REAL) / [df].[value]
)
AS [value]
FROM
[DocumentFrequency] AS [df]
ORDER BY
[term_id]
;
''')
pass
| dustywind/bachelor-thesis | impl/recommender/vector/idf/inversedocumentfrequencyvectorcreator.py | Python | mit | 2,882 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from scripts.db_api import accident
def usa_query(hour):
return '''
SELECT count(*), (select count(*) from accident
join vehicle on(acc_id = accident.id)
where country = 'USA'
and vehicle.speed > accident.speed_limit
and vehicle.speed > -1
and accident.speed_limit > 0
and date_part('hour', timestamp) = {0}) as exceeded
from accident
where country = 'USA' and date_part('hour', timestamp) = {0};
'''.format(hour)
def get_value(age, dictionary):
if age not in dictionary:
return 0
return dictionary[age]
if __name__ == '__main__':
print('HOUR\tALL\tEXCEEDED')
for i in xrange(0, 24):
usa_count = accident.execute_query(usa_query(i))
print('{0}\t{1}\t{2}'.format(i, usa_count[0][0], usa_count[0][1]))
| lopiola/integracja_wypadki | scripts/statistics/speed_limit_exceeded_by_hour.py | Python | mit | 817 |
# noinspection PyMethodMayBeStatic
class TestDevice:
def __init__(self, cf):
self.type = cf.get('device_test_type', 'test')
self.host = ('test', 80)
self.mac = [1, 2, 3, 4, 5, 6]
def auth(self):
pass
# RM2/RM4
def check_temperature(self):
return 23.5
# RM4
def check_humidity(self):
return 56
def enter_learning(self):
pass
def check_data(self):
payload = bytearray(5)
payload[0] = 0xAA
payload[1] = 0xBB
payload[2] = 0xCC
payload[3] = 0xDD
payload[4] = 0xEE
return payload
def send_data(self, data):
pass
def check_sensors(self):
return {'temperature': 23.5, 'humidity': 36, 'light': 'dim', 'air_quality': 'normal', 'noise': 'noisy'}
def check_sensors_raw(self):
return {'temperature': 23.5, 'humidity': 36, 'light': 1, 'air_quality': 3, 'noise': 2}
def get_percentage(self):
return 33
def open(self):
pass
def get_state(self):
return {'pwr': 1, 'pwr1': 1, 'pwr2': 0, 'maxworktime': 60, 'maxworktime1': 60, 'maxworktime2': 0, 'idcbrightness': 50}
def check_power(self):
return {'s1': True, 's2': False, 's3': True, 's4': False}
| eschava/broadlink-mqtt | test.py | Python | mit | 1,269 |
import os
import stat
import socket
import paramiko
from transfert.statresult import stat_result
from transfert.resources._resource import _Resource
from transfert.exceptions import TransfertFileExistsError, TransfertFileNotFoundError
class SftpResource(_Resource):
KNOW_HOSt_FILE = '~/.ssh/known_hosts'
GSS_AUTH = False
GSS_KEX = False
_DEFAULT_PORT = 22
def __init__(self, url):
_Resource.__init__(self, url)
self.__client = None
self._transport = None
self._fd = None
def exists(self):
try:
return self.isfile() or self.isdir()
except FileNotFoundError:
return False
def _get_hostkey(self):
try:
host_keys = paramiko.util.load_host_keys(os.path.expanduser(self.KNOW_HOSt_FILE))
htype = host_keys[self.url.host].keys()[0]
return host_keys[self.url.host][htype]
except (IOError, KeyError):
return None
def _connect(self):
self._transport = paramiko.Transport((self.url.host, self.url.port or self._DEFAULT_PORT))
self._transport.connect(self._get_hostkey(),
self.url.user,
self.url.password,
gss_host=socket.getfqdn(self.url.host),
gss_auth=self.GSS_AUTH,
gss_kex=self.GSS_KEX)
self.__client = paramiko.SFTPClient.from_transport(self._transport)
self.__client.chdir()
def open(self, flags):
self._fd = self._client.open(self.url.path[1:], flags)
@property
def _client(self):
if self.__client is None:
self._connect()
return self.__client
def isfile(self):
try:
with self('r'):
return stat.S_ISREG(self.stat().st_mode)
except IOError:
return False
def isdir(self):
try:
with self('r'):
return stat.S_ISDIR(self.stat().st_mode)
except IOError:
return False
def listdir(self, path=None):
if self.isfile():
yield self
elif self.isdir():
for entry in self._client.listdir(self.url.path[1:] + '/'):
yield self.join(entry)
else:
raise FileNotFoundError(self)
def close(self):
if self._fd:
self._fd.close()
self._fd = None
if self._transport:
self._transport.close()
self._transport = None
if self.__client is not None:
self.__client.close()
self.__client = None
def stat(self):
stat_res = self._client.stat(self.url.path[1:])
return stat_result(
st_atime=stat_res.st_atime,
st_gid=stat_res.st_gid,
st_mode=stat_res.st_mode,
st_mtime=stat_res.st_mtime,
st_size=stat_res.st_size,
st_uid=stat_res.st_uid,
)
def size(self):
return self.stat().st_size
def delete(self):
if self.isfile():
self._client.remove(self.url.path[1:])
elif self.isdir():
self._client.rmdir(self.url.path[1:])
else:
raise TransfertFileNotFoundError(self)
def chmod(self, mode):
self._client.chmod(self.url.path[1:], mode)
def read(self, size):
return iter(lambda: self._fd.read(size), b'')
def write(self, data):
self._fd.write(data)
def mkdir(self, name=None):
# Can be optimized after connection pool setup
if name is None:
if self.isfile():
raise TransfertFileExistsError(self)
elif not self.isdir():
self._client.mkdir(self.url.path[1:])
return self
else:
dire = self.join(name)
if dire.isfile():
raise TransfertFileExistsError(self)
elif not dire.isdir():
return dire.mkdir()
return dire
def __del__(self):
self.close()
| rbernand/transfert | transfert/resources/sftp.py | Python | mit | 4,113 |
import unittest
import numpy as np
from collections import Counter
from diogenes.utils import remove_cols,cast_list_of_list_to_sa
import utils_for_tests
import unittest
import numpy as np
from numpy.random import rand
import diogenes.read
import diogenes.utils
from diogenes.modify import remove_cols_where
from diogenes.modify import col_val_eq
from diogenes.modify import col_val_eq_any
from diogenes.modify import col_fewer_than_n_nonzero
from diogenes.modify import where_all_are_true
from diogenes.modify import choose_rows_where
from diogenes.modify import remove_rows_where
from diogenes.modify import row_val_eq
from diogenes.modify import row_val_lt
from diogenes.modify import row_val_between
from diogenes.modify import combine_cols
from diogenes.modify import combine_sum
from diogenes.modify import combine_mean
from diogenes.modify import label_encode
from diogenes.modify import generate_bin
from diogenes.modify import normalize
from diogenes.modify import replace_missing_vals
from diogenes.modify import distance_from_point
class TestModify(unittest.TestCase):
def test_col_val_eq(self):
M = cast_list_of_list_to_sa(
[[1,2,3], [1,3,4], [1,4,5]],
col_names=['height','weight', 'age'])
arguments = [{'func': col_val_eq, 'vals': 1}]
M = remove_cols_where(M, arguments)
correct = cast_list_of_list_to_sa(
[[2,3], [3,4], [4,5]],
col_names=['weight', 'age'])
self.assertTrue(np.array_equal(M, correct))
def test_col_val_eq_any(self):
M = cast_list_of_list_to_sa(
[[1,2,3], [1,3,4], [1,4,5]],
col_names=['height','weight', 'age'])
arguments = [{'func': col_val_eq_any, 'vals': None}]
M = remove_cols_where(M, arguments)
correct = cast_list_of_list_to_sa(
[[2,3], [3,4], [4,5]],
col_names=['weight', 'age'])
self.assertTrue(np.array_equal(M, correct))
def test_col_fewer_than_n_nonzero(self):
M = cast_list_of_list_to_sa(
[[0,2,3], [0,3,4], [1,4,5]],
col_names=['height','weight', 'age'])
arguments = [{'func': col_fewer_than_n_nonzero, 'vals': 2}]
M = remove_cols_where(M, arguments)
correct = cast_list_of_list_to_sa(
[[2,3], [3,4], [4,5]],
col_names=['weight', 'age'])
self.assertTrue(np.array_equal(M, correct))
def test_label_encoding(self):
M = np.array(
[('a', 0, 'Martin'),
('b', 1, 'Tim'),
('b', 2, 'Martin'),
('c', 3, 'Martin')],
dtype=[('letter', 'O'), ('idx', int), ('name', 'O')])
ctrl = np.array(
[(0, 0, 0),
(1, 1, 1),
(1, 2, 0),
(2, 3, 0)],
dtype=[('letter', int), ('idx', int), ('name', int)])
ctrl_classes = {'letter': np.array(['a', 'b', 'c']),
'name': np.array(['Martin', 'Tim'])}
new_M, classes = label_encode(M)
self.assertTrue(np.array_equal(ctrl, new_M))
self.assertEqual(ctrl_classes.keys(), classes.keys())
for key in ctrl_classes:
self.assertTrue(np.array_equal(ctrl_classes[key], classes[key]))
def test_replace_missing_vals(self):
M = np.array([('a', 0, 0.0, 0.1),
('b', 1, 1.0, np.nan),
('', -999, np.nan, 0.0),
('d', 1, np.nan, 0.2),
('', -999, 2.0, np.nan)],
dtype=[('str', 'O'), ('int', int), ('float1', float),
('float2', float)])
ctrl = M.copy()
ctrl['float1'] = np.array([0.0, 1.0, -1.0, -1.0, 2.0])
ctrl['float2'] = np.array([0.1, -1.0, 0.0, 0.2, -1.0])
res = replace_missing_vals(M, 'constant', constant=-1.0)
self.assertTrue(np.array_equal(ctrl, res))
ctrl = M.copy()
ctrl['int'] = np.array([100, 1, -999, 1, -999])
ctrl['float1'] = np.array([100, 1.0, np.nan, np.nan, 2.0])
ctrl['float2'] = np.array([0.1, np.nan, 100, 0.2, np.nan])
res = replace_missing_vals(M, 'constant', missing_val=0, constant=100)
self.assertTrue(utils_for_tests.array_equal(ctrl, res))
ctrl = M.copy()
ctrl['int'] = np.array([0, 1, 1, 1, 1])
res = replace_missing_vals(M, 'most_frequent', missing_val=-999)
self.assertTrue(utils_for_tests.array_equal(ctrl, res))
ctrl = M.copy()
ctrl['float1'] = np.array([0.0, 1.0, 1.0, 1.0, 2.0])
ctrl['float2'] = np.array([0.1, 0.1, 0.0, 0.2, 0.1])
res = replace_missing_vals(M, 'mean', missing_val=np.nan)
self.assertTrue(utils_for_tests.array_equal(ctrl, res))
def test_generate_bin(self):
M = [1, 1, 1, 3, 3, 3, 5, 5, 5, 5, 2, 6]
ctrl = [0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 0, 3]
self.assertTrue(np.array_equal(ctrl, generate_bin(M, 3)))
M = np.array([0.1, 3.0, 0.0, 1.2, 2.5, 1.7, 2])
ctrl = [0, 3, 0, 1, 2, 1, 2]
self.assertTrue(np.array_equal(ctrl, generate_bin(M, 3)))
def test_where_all_are_true(self):
M = [[1,2,3], [2,3,4], [3,4,5]]
col_names = ['heigh','weight', 'age']
lables= [0,0,1]
M = diogenes.utils.cast_list_of_list_to_sa(
M,
col_names=col_names)
arguments = [{'func': row_val_eq, 'col_name': 'heigh', 'vals': 1},
{'func': row_val_lt, 'col_name': 'weight', 'vals': 3},
{'func': row_val_between, 'col_name': 'age', 'vals':
(3, 4)}]
res = where_all_are_true(
M,
arguments)
ctrl = np.array([True, False, False])
self.assertTrue(np.array_equal(res, ctrl))
def test_choose_rows_where(self):
M = [[1,2,3], [2,3,4], [3,4,5]]
col_names = ['heigh','weight', 'age']
lables= [0,0,1]
M = diogenes.utils.cast_list_of_list_to_sa(
M,
col_names=col_names)
arguments = [{'func': row_val_eq, 'col_name': 'heigh', 'vals': 1},
{'func': row_val_lt, 'col_name': 'weight', 'vals': 3},
{'func': row_val_between, 'col_name': 'age', 'vals':
(3, 4)}]
res = choose_rows_where(
M,
arguments)
ctrl = cast_list_of_list_to_sa([[1,2,3]],col_names=['heigh','weight', 'age'])
self.assertTrue(np.array_equal(res, ctrl))
def test_remove_rows_where(self):
M = [[1,2,3], [2,3,4], [3,4,5]]
col_names = ['heigh','weight', 'age']
lables= [0,0,1]
M = diogenes.utils.cast_list_of_list_to_sa(
M,
col_names=col_names)
arguments = [{'func': row_val_eq, 'col_name': 'heigh', 'vals': 1},
{'func': row_val_lt, 'col_name': 'weight', 'vals': 3},
{'func': row_val_between, 'col_name': 'age', 'vals':
(3, 4)}]
res = remove_rows_where(
M,
arguments)
ctrl = cast_list_of_list_to_sa([[2,3,4],[3,4,5]],col_names=['heigh','weight', 'age'])
self.assertTrue(np.array_equal(res, ctrl))
def test_combine_cols(self):
M = np.array(
[(0, 1, 2), (3, 4, 5), (6, 7, 8)],
dtype=[('f0', float), ('f1', float), ('f2', float)])
ctrl_sum = np.array([1, 7, 13])
ctrl_mean = np.array([1.5, 4.5, 7.5])
res_sum = combine_cols(M, combine_sum, ('f0', 'f1'))
res_mean = combine_cols(M, combine_mean, ('f1', 'f2'))
self.assertTrue(np.array_equal(res_sum, ctrl_sum))
self.assertTrue(np.array_equal(res_mean, ctrl_mean))
def test_normalize(self):
col = np.array([-2, -1, 0, 1, 2])
res, mean, stddev = normalize(col, return_fit=True)
self.assertTrue(np.allclose(np.std(res), 1.0))
self.assertTrue(np.allclose(np.mean(res), 0.0))
col = np.arange(10)
res = normalize(col, mean=mean, stddev=stddev)
self.assertTrue(np.allclose(res, (col - mean) / stddev))
def test_distance_from_point(self):
# Coords according to https://tools.wmflabs.org/geohack/
# Paris
lat_origin = 48.8567
lng_origin = 2.3508
# New York, Beijing, Jerusalem
lat_col = [40.7127, 39.9167, 31.7833]
lng_col = [-74.0059, 116.3833, 35.2167]
# According to http://www.movable-type.co.uk/scripts/latlong.html
# (Rounds to nearest km)
ctrl = np.array([5837, 8215, 3331])
res = distance_from_point(lat_origin, lng_origin, lat_col, lng_col)
# get it right within 1km
self.assertTrue(np.allclose(ctrl, res, atol=1, rtol=0))
if __name__ == '__main__':
unittest.main()
| jamestwhedbee/diogenes | tests/test_modify.py | Python | mit | 8,979 |
# Copyright (c) 2015 Tanium Inc
#
# Generated from console.wsdl version 0.0.1
#
#
from .base import BaseType
class SavedActionApproval(BaseType):
_soap_tag = 'saved_action_approval'
def __init__(self):
BaseType.__init__(
self,
simple_properties={'id': int,
'name': str,
'approved_flag': int},
complex_properties={'metadata': MetadataList},
list_properties={},
)
self.id = None
self.name = None
self.approved_flag = None
self.metadata = None
from metadata_list import MetadataList
| tanium/pytan | lib/taniumpy/object_types/saved_action_approval.py | Python | mit | 654 |
import unittest
import os
from os.path import dirname
import sys
import json
from rtree import index
from . import ROOT
from geotweet.mapreduce.utils.lookup import project, SpatialLookup
testdata = os.path.join(dirname(os.path.abspath(__file__)), 'testdata')
def read(geojson):
return json.loads(open(os.path.join(testdata, geojson), 'r').read())
"""
P53000
______
| |
| |
|______| Polygon 2
P3200
______
| |
| P |
|______| Polygon 2
"""
POLYGON_1 = read('polygon_102500_1.geojson')
POLYGON_2 = read('polygon_102500_2.geojson')
POINT_WITHIN = read('point_within.geojson')
POINT_53000M = read('point_53000m.geojson')
POINT_3200M = read('point_3200m.geojson')
def init_polygon_1_index():
location = SpatialLookup()
idx = index.Rtree()
polygon = location._build_obj(POLYGON_1)
location.data_store[1] = polygon
idx.insert(1, polygon['geometry'].bounds)
location.idx = idx
return location
def init_polygon_2_index():
location = init_polygon_1_index()
polygon = location._build_obj(POLYGON_2)
location.data_store[2] = polygon
location.idx.insert(2, polygon['geometry'].bounds)
return location
class GetObjectBasic(unittest.TestCase):
def setUp(self):
self.location = init_polygon_1_index()
def assert_found(self, point):
found = self.location.get_object(point)
error = "get_object failed to return object"
self.assertIsNotNone(found, error)
def assert_none(self, point):
found = self.location.get_object(point)
error = "get_object should return None: Actual < {0} >".format(found)
self.assertIsNone(found, error)
def test_basic(self):
self.assert_found(project(POINT_WITHIN['geometry']['coordinates']))
self.assert_none(project(POINT_3200M['geometry']['coordinates']))
self.assert_none(project(POINT_53000M['geometry']['coordinates']))
def test_buffer_none(self):
point = project(POINT_3200M['geometry']['coordinates'])
found = self.location.get_object(point, buffer_size=0)
self.assertIsNone(found)
def test_buffer_outside_buffer(self):
point = project(POINT_3200M['geometry']['coordinates'])
found = self.location.get_object(point, buffer_size=3000)
self.assertIsNone(found)
def test_buffer_within_buffer(self):
point = project(POINT_3200M['geometry']['coordinates'])
found = self.location.get_object(point, buffer_size=4000)
self.assertIsNotNone(found)
class GetObjectOrder(unittest.TestCase):
def setUp(self):
self.location = init_polygon_2_index()
def assert_found(self, point):
found = self.location.get_object(point)
error = "get_object failed to return object"
self.assertIsNotNone(found, error)
def assert_none(self, point):
found = self.location.get_object(point)
error = "get_object should return None: Actual < {0} >".format(found)
self.assertIsNone(found, error)
def test_buffer_nearest1(self):
point = project(POINT_WITHIN['geometry']['coordinates'])
found = self.location.get_object(point, buffer_size=100000)
self.assertIsNotNone(found, "get_object failed to return object")
error = "get_object failed to return object with id=polygon1: Actual < {0} >"
self.assertEqual('polygon1', found['id'], error.format(found['id']))
def test_buffer_nearest2(self):
point = project(POINT_3200M['geometry']['coordinates'])
found = self.location.get_object(point, buffer_size=100000)
self.assertIsNotNone(found, "get_object failed to return object")
error = "get_object failed to return object with id=polygon1: Actual < {0} >"
self.assertEqual('polygon1', found['id'], error.format(found['id']))
if __name__ == "__main__":
unittest.main()
| meyersj/geotweet | geotweet/tests/unit/mapreduce/utils/lookup_tests.py | Python | mit | 3,949 |
#!/usr/local/bin/python3
# This work is licensed under the Creative Commons Attribution 3.0 United
# States License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by/3.0/us/ or send a letter to Creative
# Commons, 171 Second Street, Suite 300, San Francisco, California, 94105, USA.
# from http://oranlooney.com/make-css-sprites-python-image-library/
# Orignial Author Oran Looney <[email protected]>
#mods by Josh Gourneau <[email protected]> to make one big horizontal sprite JPG with no spaces between images
import os
from PIL import Image
import glob
start_dir = "images/full_sprites/opaque/kanto/"
end_dir = "images/full_sprites/transparent/kanto/"
#get your images using glob
iconmap = os.listdir(start_dir)
#iconMap = sorted(iconMap)
print(len(iconmap))
for filename in iconmap:
image = Image.open(start_dir+filename)
image_width, image_height = image.size
print( "the image will by %d by %d" % (image_width, image_height))
print( "creating image...")
master = Image.new(
mode='RGBA',
size=(image_width, image_height),
color=(0,0,0,0)) # fully transparent
master.paste(image,(0,0))
data = master.getdata()
newdata = []
for item in data:
if item[0] == 255 and item[1] == 255 and item[2] == 255:
newdata.append((255,255,255,0))
else:
newdata.append(item)
master.putdata(newdata)
print( "saving master.jpg...")
master.save(end_dir+filename)
print( "saved!")
| ohnorobo/pokemon | transparent.py | Python | mit | 1,475 |
# coding: utf-8
"""
Fields represent CronTrigger options which map to :class:`~datetime.datetime`
fields.
"""
from calendar import monthrange
from apscheduler.triggers.cron.expressions import (
AllExpression, RangeExpression, WeekdayPositionExpression, LastDayOfMonthExpression, WeekdayRangeExpression)
__all__ = ('MIN_VALUES', 'MAX_VALUES', 'DEFAULT_VALUES', 'BaseField', 'WeekField', 'DayOfMonthField', 'DayOfWeekField')
MIN_VALUES = {'year': 1970, 'month': 1, 'day': 1, 'week': 1, 'day_of_week': 0, 'hour': 0, 'minute': 0, 'second': 0}
MAX_VALUES = {'year': 2 ** 63, 'month': 12, 'day:': 31, 'week': 53, 'day_of_week': 6, 'hour': 23, 'minute': 59,
'second': 59}
DEFAULT_VALUES = {'year': '*', 'month': 1, 'day': 1, 'week': '*', 'day_of_week': '*', 'hour': 0, 'minute': 0,
'second': 0}
class BaseField(object):
REAL = True
COMPILERS = [AllExpression, RangeExpression]
def __init__(self, name, exprs, is_default=False):
self.name = name
self.is_default = is_default
self.compile_expressions(exprs)
def get_min(self, dateval):
return MIN_VALUES[self.name]
def get_max(self, dateval):
return MAX_VALUES[self.name]
def get_value(self, dateval):
return getattr(dateval, self.name)
def get_next_value(self, dateval):
smallest = None
for expr in self.expressions:
value = expr.get_next_value(dateval, self)
if smallest is None or (value is not None and value < smallest):
smallest = value
return smallest
def compile_expressions(self, exprs):
self.expressions = []
# Split a comma-separated expression list, if any
exprs = str(exprs).strip()
if ',' in exprs:
for expr in exprs.split(','):
self.compile_expression(expr)
else:
self.compile_expression(exprs)
def compile_expression(self, expr):
for compiler in self.COMPILERS:
match = compiler.value_re.match(expr)
if match:
compiled_expr = compiler(**match.groupdict())
self.expressions.append(compiled_expr)
return
raise ValueError('Unrecognized expression "%s" for field "%s"' % (expr, self.name))
def __str__(self):
expr_strings = (str(e) for e in self.expressions)
return ','.join(expr_strings)
def __repr__(self):
return "%s('%s', '%s')" % (self.__class__.__name__, self.name, self)
class WeekField(BaseField):
REAL = False
def get_value(self, dateval):
return dateval.isocalendar()[1]
class DayOfMonthField(BaseField):
COMPILERS = BaseField.COMPILERS + [WeekdayPositionExpression, LastDayOfMonthExpression]
def get_max(self, dateval):
return monthrange(dateval.year, dateval.month)[1]
class DayOfWeekField(BaseField):
REAL = False
COMPILERS = BaseField.COMPILERS + [WeekdayRangeExpression]
def get_value(self, dateval):
return dateval.weekday()
| cychenyin/windmill | apscheduler/triggers/cron/fields.py | Python | mit | 3,059 |
from django.utils.translation import ugettext_lazy as _
from reviewboard.admin.read_only import is_site_read_only_for
from reviewboard.reviews.actions import (BaseReviewRequestAction,
BaseReviewRequestMenuAction)
from reviewboard.reviews.features import general_comments_feature
from reviewboard.reviews.models import ReviewRequest
from reviewboard.site.urlresolvers import local_site_reverse
from reviewboard.urls import diffviewer_url_names
class CloseMenuAction(BaseReviewRequestMenuAction):
"""A menu action for closing the corresponding review request."""
action_id = 'close-review-request-action'
label = _('Close')
def should_render(self, context):
"""Return whether the action should render.
Args:
context (dict):
The current render context.
Returns:
bool:
Whether the action should render.
"""
review_request = context['review_request']
user = context['request'].user
return (review_request.status == ReviewRequest.PENDING_REVIEW and
not is_site_read_only_for(user) and
(context['request'].user.pk == review_request.submitter_id or
(context['perms']['reviews']['can_change_status'] and
review_request.public)))
class SubmitAction(BaseReviewRequestAction):
"""An action for submitting the review request."""
action_id = 'submit-review-request-action'
label = _('Submitted')
def should_render(self, context):
"""Return whether the action should render.
Args:
context (dict):
The current render context.
Returns:
bool:
Whether the action should render.
"""
return (context['review_request'].public and
not is_site_read_only_for(context['request'].user))
class DiscardAction(BaseReviewRequestAction):
"""An action for discarding the review request."""
action_id = 'discard-review-request-action'
label = _('Discarded')
class DeleteAction(BaseReviewRequestAction):
"""An action for permanently deleting the review request."""
action_id = 'delete-review-request-action'
label = _('Delete Permanently')
def should_render(self, context):
"""Return whether the action should render.
Args:
context (dict):
The current render context.
Returns:
bool:
Whether the action should render.
"""
return (context['perms']['reviews']['delete_reviewrequest'] and
not is_site_read_only_for(context['request'].user))
class UpdateMenuAction(BaseReviewRequestMenuAction):
"""A menu action for updating the corresponding review request."""
action_id = 'update-review-request-action'
label = _('Update')
def should_render(self, context):
"""Return whether the action should render.
Args:
context (dict):
The current render context.
Returns:
bool:
Whether the action should render.
"""
review_request = context['review_request']
user = context['request'].user
return (review_request.status == ReviewRequest.PENDING_REVIEW and
not is_site_read_only_for(user) and
(user.pk == review_request.submitter_id or
context['perms']['reviews']['can_edit_reviewrequest']))
class UploadDiffAction(BaseReviewRequestAction):
"""An action for updating/uploading a diff for the review request."""
action_id = 'upload-diff-action'
def get_label(self, context):
"""Return this action's label.
The label will change depending on whether or not the corresponding
review request already has a diff.
Args:
context (django.template.Context):
The collection of key-value pairs from the template.
Returns:
unicode: The label that displays this action to the user.
"""
review_request = context['review_request']
draft = review_request.get_draft(context['request'].user)
if (draft and draft.diffset) or review_request.get_diffsets():
return _('Update Diff')
return _('Upload Diff')
def should_render(self, context):
"""Return whether or not this action should render.
If the corresponding review request has a repository, then an upload
diff form exists, so we should render this UploadDiffAction.
Args:
context (django.template.Context):
The collection of key-value pairs available in the template
just before this action is to be rendered.
Returns:
bool: Determines if this action should render.
"""
return (context['review_request'].repository_id is not None and
not is_site_read_only_for(context['request'].user))
class UploadFileAction(BaseReviewRequestAction):
"""An action for uploading a file for the review request."""
action_id = 'upload-file-action'
label = _('Add File')
class DownloadDiffAction(BaseReviewRequestAction):
"""An action for downloading a diff from the review request."""
action_id = 'download-diff-action'
label = _('Download Diff')
def get_url(self, context):
"""Return this action's URL.
Args:
context (django.template.Context):
The collection of key-value pairs from the template.
Returns:
unicode: The URL to invoke if this action is clicked.
"""
match = context['request'].resolver_match
# We want to use a relative URL in the diff viewer as we will not be
# re-rendering the page when switching between revisions.
if match.url_name in diffviewer_url_names:
return 'raw/'
return local_site_reverse('raw-diff', context['request'], kwargs={
'review_request_id': context['review_request'].display_id,
})
def get_hidden(self, context):
"""Return whether this action should be initially hidden to the user.
Args:
context (django.template.Context):
The collection of key-value pairs from the template.
Returns:
bool: Whether this action should be initially hidden to the user.
"""
match = context['request'].resolver_match
if match.url_name in diffviewer_url_names:
return match.url_name == 'view-interdiff'
return super(DownloadDiffAction, self).get_hidden(context)
def should_render(self, context):
"""Return whether or not this action should render.
Args:
context (django.template.Context):
The collection of key-value pairs available in the template
just before this action is to be rendered.
Returns:
bool: Determines if this action should render.
"""
review_request = context['review_request']
request = context['request']
match = request.resolver_match
# If we're on a diff viewer page, then this DownloadDiffAction should
# initially be rendered, but possibly hidden.
if match.url_name in diffviewer_url_names:
return True
return review_request.repository_id is not None
class EditReviewAction(BaseReviewRequestAction):
"""An action for editing a review intended for the review request."""
action_id = 'review-action'
label = _('Review')
def should_render(self, context):
"""Return whether the action should render.
Args:
context (dict):
The current render context.
Returns:
bool:
Whether the action should render.
"""
user = context['request'].user
return (user.is_authenticated and
not is_site_read_only_for(user))
class AddGeneralCommentAction(BaseReviewRequestAction):
"""An action for adding a new general comment to a review."""
action_id = 'general-comment-action'
label = _('Add General Comment')
def should_render(self, context):
"""Return whether the action should render.
Args:
context (dict):
The current render context.
Returns:
bool:
Whether the action should render.
"""
request = context['request']
user = request.user
return (user.is_authenticated and
not is_site_read_only_for(user) and
general_comments_feature.is_enabled(request=request))
class ShipItAction(BaseReviewRequestAction):
"""An action for quickly approving the review request without comments."""
action_id = 'ship-it-action'
label = _('Ship It!')
def should_render(self, context):
"""Return whether the action should render.
Args:
context (dict):
The current render context.
Returns:
bool:
Whether the action should render.
"""
user = context['request'].user
return (user.is_authenticated and
not is_site_read_only_for(user))
def get_default_actions():
"""Return a copy of all the default actions.
Returns:
list of BaseReviewRequestAction: A copy of all the default actions.
"""
return [
CloseMenuAction([
SubmitAction(),
DiscardAction(),
DeleteAction(),
]),
UpdateMenuAction([
UploadDiffAction(),
UploadFileAction(),
]),
DownloadDiffAction(),
EditReviewAction(),
AddGeneralCommentAction(),
ShipItAction(),
]
| reviewboard/reviewboard | reviewboard/reviews/default_actions.py | Python | mit | 9,922 |
#!/usr/bin/env python
"""
Retrieves menu from Drupal site
"""
from aashestrap.models import Menu
from django.core.management.base import BaseCommand
import urllib2
from django.http import HttpResponse
from BeautifulSoup import BeautifulSoup
from django.core.exceptions import ObjectDoesNotExist
class Command(BaseCommand):
def handle(self, *args, **options):
get_menu()
def get_menu():
# Try to retrieve the existing menu object
try:
menu = Menu.objects.get(pk=1)
# If there isn't one, instantiate one
except ObjectDoesNotExist:
menu = Menu(pk=1)
# Request aashe home page
request = urllib2.Request('http://www.aashe.org/')
response = urllib2.urlopen(request)
# Soup it
soup = BeautifulSoup(response)
# Search and extract the footer
results = soup.findAll(id="block-menu_block-3")
footer = results[0].__str__('utf8')
# Search and extract the navigation bar
results = soup.findAll(id="navigation")
header = results[0].__str__('utf8')
menu.footer = footer
menu.header = header
menu.save()
| AASHE/django-aashestrap | aashestrap/management/commands/get_menu.py | Python | mit | 1,101 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class FileuploadConfig(AppConfig):
name = 'fileupload'
| pcecconi/mapground | fileupload/apps.py | Python | mit | 160 |
"""
Django settings for blog project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'sm@g)(fbwdh5wc*xe@j++m9rh^uza5se9a57c5ptwkg*b@ki0x'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'posts',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
#'/var/www/static/',
]
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_cdn")
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "media_cdn")
| Dingo5733/djangoblog19 | src/trydjango19/settings.py | Python | mit | 3,453 |
import cPickle
import logging
import numpy
import os
import time
from collections import deque
from copy import deepcopy
from datetime import datetime
from pytz import timezone
from threading import Event, Thread
from coinbase.wallet.client import Client
from jarvis.utils.messaging.client import TwilioMessenger
from jarvis.modules.base import JarvisThreadedModule
def configure_debug_logging():
logging.basicConfig(level=logging.DEBUG)
def load_coinbase_config():
coinbase_key = os.environ.get('COINBASE_KEY')
coinbase_secret = os.environ.get('COINBASE_SECRET')
if not all([coinbase_key, coinbase_secret]):
raise Exception('Coinbase config not configured properly')
return (coinbase_key, coinbase_secret)
def load_from_file(path):
if os.path.exists(path):
with open(path,'r') as f:
return cPickle.loads(f.read())
return None
def store_to_file(path, obj):
with open(path,'w') as f:
f.write(cPickle.dumps(obj))
class CoinbaseClient(object):
def __init__(self):
self.api_key, self.api_secret = load_coinbase_config()
self.client = Client(self.api_key, self.api_secret)
def do(self, func, *args, **kwargs):
return getattr(self.client,func)(*args, **kwargs)
class TickerTimeseries(object):
def __init__(self, max_length, recent_cutoff,
load_path=None, poll_period=30, name=None):
self.timeseries = load_from_file(load_path)
if not self.timeseries:
self.timeseries = deque(maxlen=max_length)
self.large_movement_timeseries = deepcopy(self.timeseries)
self.recent_cutoff = recent_cutoff
self.max_length = max_length
self.poll_period = poll_period
self.name = name
def append(self, val):
self.timeseries.append(val)
self.large_movement_timeseries.append(val)
@property
def head(self):
return self.timeseries[-1]
@property
def tail(self):
return self.timeseries[0]
@property
def mean(self):
return numpy.mean(self.timeseries)
@property
def length(self):
return len(self.timeseries)
@classmethod
def anomaly(cls, series, recent_cutoff):
'''
Naive anomaly detection. Given a series it computes
the standard deviation and returns True if any of the values
in the last :recent_cutoff points are are more than
3 standard deviationsm above the mean
:series array of timeseries data
:recent_cutoff only consider anomalies on the most recent points
'''
std_dev = numpy.std(series)
mean = numpy.mean(series)
for point in series[-recent_cutoff:]:
abs_diff = abs(point - mean)
if abs_diff >= std_dev * 3 and abs_diff >= 3:
return True
return False
def is_anomalous(self):
# If we don't have enough data, don't do anything
if len(self.timeseries) < self.recent_cutoff:
return False
return self.anomaly(self.timeseries, self.recent_cutoff)
@classmethod
def large_movement(self, series):
if float(abs(series[0] - series[-1])) / series[0] > 0.03:
return True
return False
def is_large_movement(self):
if self.large_movement(self.large_movement_timeseries):
msg = MOVEMENT_NOTIFICATION % \
(self.name,
len(self.large_movement_timeseries) * self.poll_period / 60,
self.large_movement_timeseries[0],
self.large_movement_timeseries[-1])
self.large_movement_timeseries = deque(
[self.large_movement_timeseries[-1]],
maxlen=self.max_length)
return msg
return None
ANOMALY_NOTIFICATION = \
'''Anomalous bitcoin price activity detected. Mean price over the
past %d minutes is %.2f, current price is %.2f'''
MOVEMENT_NOTIFICATION = \
'''Large %s movement detected. Price %d minutes ago was %.2f,
current price is %.2f'''
class CoinbaseWatcher(object):
POLL_PERIOD = 30
RECENT_DATA = 60 * 5
MAX_LENGTH_MULTIPLE = 12 * 24
COOLDOWN_TICKS = 10
BTCTICKERPATH = "/tmp/bitccointicker"
ETHTICKERPATH = "/tmp/ethticker"
MSGPATH = "/tmp/bitcoinmsgs"
def __init__(self, stop):
recent_points = self.RECENT_DATA / self.POLL_PERIOD
self.twilio_client = TwilioMessenger()
self.coinbase_client = CoinbaseClient()
self.btc_timeseries = TickerTimeseries(
max_length=recent_points*self.MAX_LENGTH_MULTIPLE,
recent_cutoff=recent_points,
load_path=self.BTCTICKERPATH,
poll_period=self.POLL_PERIOD,
name='Bitcoin')
self.eth_timeseries = TickerTimeseries(
max_length=recent_points*self.MAX_LENGTH_MULTIPLE,
recent_cutoff=recent_points,
load_path=self.ETHTICKERPATH,
poll_period=self.POLL_PERIOD,
name='Ethereum')
self.cooldown = 0
self.stop = stop
self.sent_messages = load_from_file(self.MSGPATH)
if not self.sent_messages:
self.sent_messages = deque(maxlen=3)
@property
def raw_btc_timeseries(self):
return self.btc_timeseries.timeseries
@property
def raw_eth_timeseries(self):
return self.eth_timeseries.timeseries
@property
def in_cooldown(self):
self.cooldown = max(0,self.cooldown - 1)
if self.cooldown <= 0:
return False
return True
def initiate_cooldown(self):
self.cooldown = self.COOLDOWN_TICKS
def start(self):
while not self.stop.is_set():
try:
spot_price = self.coinbase_client.do(
'get_spot_price',currency_pair='BTC-USD')
self.btc_timeseries.append(float(spot_price['amount']))
# coinbase client doesn't actually support currency_pair
rates = self.coinbase_client.do('get_exchange_rates')
self.eth_timeseries.append(1 / float(rates['rates']['ETH']))
if not self.in_cooldown:
msg = self.btc_timeseries.is_large_movement()
if msg:
self.twilio_client.send_message(msg)
self.sent_messages.append((msg, time.time()))
self.initiate_cooldown()
msg = self.eth_timeseries.is_large_movement()
if msg:
self.twilio_client.send_message(msg)
self.sent_messages.append((msg, time.time()))
self.initiate_cooldown()
except Exception:
logging.exception("Exception in main loop")
time.sleep(self.POLL_PERIOD)
store_to_file(self.MSGPATH,self.sent_messages)
store_to_file(self.BTCTICKERPATH,self.btc_timeseries.timeseries)
store_to_file(self.ETHTICKERPATH,self.eth_timeseries.timeseries)
class CoinbaseWatcherModule(JarvisThreadedModule):
def init_module(self, event):
self.coinbase_watcher = CoinbaseWatcher(event)
return Thread(target=self.coinbase_watcher.start)
def get_recent_messages(self):
return [
(msg, self.convert_timestamp(timestamp)) \
for (msg,timestamp) in \
reversed(self.coinbase_watcher.sent_messages)
]
@classmethod
def convert_timestamp(cls, timestamp):
pacific = timezone("US/Pacific-New")
utc = timezone("UTC")
return utc.localize(datetime.utcfromtimestamp(
timestamp)).astimezone(pacific).strftime('%Y-%m-%d %H:%M:%S')
def get_bitcoin_ticker_timeseries(self):
seconds_per_point = self.coinbase_watcher.POLL_PERIOD
now = time.time()
return [
{
'date' : self.convert_timestamp(now-seconds_per_point*i),
'value' : val
} for i,val in enumerate(reversed(
self.coinbase_watcher.raw_btc_timeseries))
][::-1]
def get_eth_ticker_timeseries(self):
seconds_per_point = self.coinbase_watcher.POLL_PERIOD
now = time.time()
return [
{
'date' : self.convert_timestamp(now-seconds_per_point*i),
'value' : val
} for i,val in enumerate(reversed(
self.coinbase_watcher.raw_eth_timeseries))
][::-1]
if __name__ == '__main__':
configure_debug_logging()
watcher = CoinbaseWatcher()
watcher.start()
| kahuang/jarvis | jarvis/modules/coinbase/watcher.py | Python | mit | 8,706 |
#!/usr/bin/env python
def NUMBER(value):
return ("NUMBER", value)
def NAME(value):
return ("NAME", value)
def SYMBOL(value):
return ("SYMBOL", value)
def SEMICOLON():
return ("SEMICOLON", )
def OPENPAREN():
return ("OPENPAREN", )
def CLOSEPAREN():
return ("CLOSEPAREN", )
def OPENBRACKET():
return ("OPENBRACKET", )
def CLOSEBRACKET():
return ("CLOSEBRACKET", )
def ASSIGNMENT():
return ("ASSIGNMENT", )
def EOF():
return ("EOF", )
def FUNCTIONDEF():
return ("FUNCTIONDEF", )
def FUNCTIONRETURN():
return ("FUNCTIONRETURN", )
| rumblesan/diddy-vm | puff/tokens.py | Python | mit | 598 |
# coding: utf-8
"""
Talon.One API
The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import talon_one
from talon_one.models.update_loyalty_program import UpdateLoyaltyProgram # noqa: E501
from talon_one.rest import ApiException
class TestUpdateLoyaltyProgram(unittest.TestCase):
"""UpdateLoyaltyProgram unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test UpdateLoyaltyProgram
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = talon_one.models.update_loyalty_program.UpdateLoyaltyProgram() # noqa: E501
if include_optional :
return UpdateLoyaltyProgram(
title = '0',
description = '0',
subscribed_applications = [
56
],
default_validity = '0',
default_pending = '0',
allow_subledger = True
)
else :
return UpdateLoyaltyProgram(
)
def testUpdateLoyaltyProgram(self):
"""Test UpdateLoyaltyProgram"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| talon-one/talon_one.py | test/test_update_loyalty_program.py | Python | mit | 2,231 |
import os
import re
from . import utils
PARTIAL = re.compile('(?P<tag>{{>\s*(?P<name>.+?)\s*}})')
PARTIAL_CUSTOM = re.compile('^(?P<whitespace>\s*)(?P<tag>{{>\s*(?P<name>.+?)\s*}}(?(1)\r?\n?))', re.M)
# def get_template(path, ext='html', partials=None):
# path = os.path.join(TEMPLATES_DIR, '{}.{}'.format(path, ext))
# with open(path, 'r') as fp:
# template = fp.read()
# return build(template, partials)
def build(template, partials=None):
template = '{}\n'.format(template)
for regex in (PARTIAL_CUSTOM, PARTIAL):
for match in regex.finditer(template):
if partials is None:
substitution = get_template(match.group('name'))
else:
substitution = partials.get(match.group('name'), u'')
if substitution:
try:
substitution = '\n'.join('{}{}'.format(match.group('whitespace'), s) if s else s for s in substitution.split('\n'))
except IndexError:
pass
else:
substitution = substitution[len(match.group('whitespace')):]
template = template.replace(match.group('tag'), substitution)
return utils.purify(template)
| mylokin/mustache | mustache/template.py | Python | mit | 1,242 |
import socket as sk
from kivy.logger import Logger
def getWebsite():
return "www.google.com"
def getIpPort():
sock_info=sk.getaddrinfo(getWebsite(),80,proto=sk.IPPROTO_TCP)
return sock_info[0][-1]
def checkInternet():
sock=sk.socket()
sock.settimeout(1)
try:
sock.connect(getIpPort())
sock.send(b'GET /HTTP/1.0\r\n\r\n')
resp=sock.recv(8)
sock.shutdown(1)
sock.close()
if(resp==b'HTTP/1.0'):
return True
else:
return False
except Exception as e:
Logger.error(e)
return False
def checkSpeed():
import psutil
import time
init=[psutil.net_io_counters().bytes_sent,psutil.net_io_counters().bytes_recv]
time.sleep(1)
final=[psutil.net_io_counters().bytes_sent,psutil.net_io_counters().bytes_recv]
readings=[(final[0]-init[0]),(final[1]-init[1])]
print(readings)
if readings[0] < 200 or readings[1] < 200:
return False
else:
return True
| udaykrishna/unCap | checks.py | Python | mit | 1,028 |
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import subprocess
import tempfile
from awscli.customizations.emr import constants
from awscli.customizations.emr import emrutils
from awscli.customizations.emr import sshutils
from awscli.customizations.emr.command import Command
KEY_PAIR_FILE_HELP_TEXT = '\nA value for the variable Key Pair File ' \
'can be set in the AWS CLI config file using the ' \
'"aws configure set emr.key_pair_file <value>" command.\n'
class Socks(Command):
NAME = 'socks'
DESCRIPTION = ('Create a socks tunnel on port 8157 from your machine '
'to the master.\n%s' % KEY_PAIR_FILE_HELP_TEXT)
ARG_TABLE = [
{'name': 'cluster-id', 'required': True,
'help_text': 'Cluster Id of cluster you want to ssh into'},
{'name': 'key-pair-file', 'required': True,
'help_text': 'Private key file to use for login'},
]
def _run_main_command(self, parsed_args, parsed_globals):
try:
master_dns = sshutils.validate_and_find_master_dns(
session=self._session,
parsed_globals=parsed_globals,
cluster_id=parsed_args.cluster_id)
key_file = parsed_args.key_pair_file
sshutils.validate_ssh_with_key_file(key_file)
f = tempfile.NamedTemporaryFile(delete=False)
if (emrutils.which('ssh') or emrutils.which('ssh.exe')):
command = ['ssh', '-o', 'StrictHostKeyChecking=no', '-o',
'ServerAliveInterval=10', '-ND', '8157', '-i',
parsed_args.key_pair_file, constants.SSH_USER +
'@' + master_dns]
else:
command = ['putty', '-ssh', '-i', parsed_args.key_pair_file,
constants.SSH_USER + '@' + master_dns, '-N', '-D',
'8157']
print(' '.join(command))
rc = subprocess.call(command)
return rc
except KeyboardInterrupt:
print('Disabling Socks Tunnel.')
return 0
class SSH(Command):
NAME = 'ssh'
DESCRIPTION = ('SSH into master node of the cluster.\n%s' %
KEY_PAIR_FILE_HELP_TEXT)
ARG_TABLE = [
{'name': 'cluster-id', 'required': True,
'help_text': 'Cluster Id of cluster you want to ssh into'},
{'name': 'key-pair-file', 'required': True,
'help_text': 'Private key file to use for login'},
{'name': 'command', 'help_text': 'Command to execute on Master Node'}
]
def _run_main_command(self, parsed_args, parsed_globals):
master_dns = sshutils.validate_and_find_master_dns(
session=self._session,
parsed_globals=parsed_globals,
cluster_id=parsed_args.cluster_id)
key_file = parsed_args.key_pair_file
sshutils.validate_ssh_with_key_file(key_file)
f = tempfile.NamedTemporaryFile(delete=False)
if (emrutils.which('ssh') or emrutils.which('ssh.exe')):
command = ['ssh', '-o', 'StrictHostKeyChecking=no', '-o',
'ServerAliveInterval=10', '-i',
parsed_args.key_pair_file, constants.SSH_USER +
'@' + master_dns, '-t']
if parsed_args.command:
command.append(parsed_args.command)
else:
command = ['putty', '-ssh', '-i', parsed_args.key_pair_file,
constants.SSH_USER + '@' + master_dns, '-t']
if parsed_args.command:
f.write(parsed_args.command)
f.write('\nread -n1 -r -p "Command completed. Press any key."')
command.append('-m')
command.append(f.name)
f.close()
print(' '.join(command))
rc = subprocess.call(command)
os.remove(f.name)
return rc
class Put(Command):
NAME = 'put'
DESCRIPTION = ('Put file onto the master node.\n%s' %
KEY_PAIR_FILE_HELP_TEXT)
ARG_TABLE = [
{'name': 'cluster-id', 'required': True,
'help_text': 'Cluster Id of cluster you want to put file onto'},
{'name': 'key-pair-file', 'required': True,
'help_text': 'Private key file to use for login'},
{'name': 'src', 'required': True,
'help_text': 'Source file path on local machine'},
{'name': 'dest', 'help_text': 'Destination file path on remote host'}
]
def _run_main_command(self, parsed_args, parsed_globals):
master_dns = sshutils.validate_and_find_master_dns(
session=self._session,
parsed_globals=parsed_globals,
cluster_id=parsed_args.cluster_id)
key_file = parsed_args.key_pair_file
sshutils.validate_scp_with_key_file(key_file)
if (emrutils.which('scp') or emrutils.which('scp.exe')):
command = ['scp', '-r', '-o StrictHostKeyChecking=no',
'-i', parsed_args.key_pair_file, parsed_args.src,
constants.SSH_USER + '@' + master_dns]
else:
command = ['pscp', '-scp', '-r', '-i', parsed_args.key_pair_file,
parsed_args.src, constants.SSH_USER + '@' + master_dns]
# if the instance is not terminated
if parsed_args.dest:
command[-1] = command[-1] + ":" + parsed_args.dest
else:
command[-1] = command[-1] + ":" + parsed_args.src.split('/')[-1]
print(' '.join(command))
rc = subprocess.call(command)
return rc
class Get(Command):
NAME = 'get'
DESCRIPTION = ('Get file from master node.\n%s' % KEY_PAIR_FILE_HELP_TEXT)
ARG_TABLE = [
{'name': 'cluster-id', 'required': True,
'help_text': 'Cluster Id of cluster you want to get file from'},
{'name': 'key-pair-file', 'required': True,
'help_text': 'Private key file to use for login'},
{'name': 'src', 'required': True,
'help_text': 'Source file path on remote host'},
{'name': 'dest', 'help_text': 'Destination file path on your machine'}
]
def _run_main_command(self, parsed_args, parsed_globals):
master_dns = sshutils.validate_and_find_master_dns(
session=self._session,
parsed_globals=parsed_globals,
cluster_id=parsed_args.cluster_id)
key_file = parsed_args.key_pair_file
sshutils.validate_scp_with_key_file(key_file)
if (emrutils.which('scp') or emrutils.which('scp.exe')):
command = ['scp', '-r', '-o StrictHostKeyChecking=no', '-i',
parsed_args.key_pair_file, constants.SSH_USER + '@' +
master_dns + ':' + parsed_args.src]
else:
command = ['pscp', '-scp', '-r', '-i', parsed_args.key_pair_file,
constants.SSH_USER + '@' + master_dns + ':' +
parsed_args.src]
if parsed_args.dest:
command.append(parsed_args.dest)
else:
command.append(parsed_args.src.split('/')[-1])
print(' '.join(command))
rc = subprocess.call(command)
return rc
| mnahm5/django-estore | Lib/site-packages/awscli/customizations/emr/ssh.py | Python | mit | 7,731 |
from __future__ import absolute_import
from __future__ import unicode_literals
import io
import os.path
import sqlite3
import mock
import pytest
import six
from pre_commit.store import _get_default_directory
from pre_commit.store import Store
from pre_commit.util import cmd_output
from pre_commit.util import cwd
from pre_commit.util import rmtree
from testing.fixtures import git_dir
from testing.util import get_head_sha
def test_our_session_fixture_works():
"""There's a session fixture which makes `Store` invariantly raise to
prevent writing to the home directory.
"""
with pytest.raises(AssertionError):
Store()
def test_get_default_directory_defaults_to_home():
# Not we use the module level one which is not mocked
ret = _get_default_directory()
assert ret == os.path.join(os.path.expanduser('~/.cache'), 'pre-commit')
def test_adheres_to_xdg_specification():
with mock.patch.dict(
os.environ, {'XDG_CACHE_HOME': '/tmp/fakehome'},
):
ret = _get_default_directory()
assert ret == os.path.join('/tmp/fakehome', 'pre-commit')
def test_uses_environment_variable_when_present():
with mock.patch.dict(
os.environ, {'PRE_COMMIT_HOME': '/tmp/pre_commit_home'},
):
ret = _get_default_directory()
assert ret == '/tmp/pre_commit_home'
def test_store_require_created(store):
assert not os.path.exists(store.directory)
store.require_created()
# Should create the store directory
assert os.path.exists(store.directory)
# Should create a README file indicating what the directory is about
with io.open(os.path.join(store.directory, 'README')) as readme_file:
readme_contents = readme_file.read()
for text_line in (
'This directory is maintained by the pre-commit project.',
'Learn more: https://github.com/pre-commit/pre-commit',
):
assert text_line in readme_contents
def test_store_require_created_does_not_create_twice(store):
assert not os.path.exists(store.directory)
store.require_created()
# We intentionally delete the directory here so we can figure out if it
# calls it again.
rmtree(store.directory)
assert not os.path.exists(store.directory)
# Call require_created, this should not trigger a call to create
store.require_created()
assert not os.path.exists(store.directory)
def test_does_not_recreate_if_directory_already_exists(store):
assert not os.path.exists(store.directory)
# We manually create the directory.
# Note: we're intentionally leaving out the README file. This is so we can
# know that `Store` didn't call create
os.mkdir(store.directory)
open(store.db_path, 'a').close()
# Call require_created, this should not call create
store.require_created()
assert not os.path.exists(os.path.join(store.directory, 'README'))
def test_clone(store, tempdir_factory, log_info_mock):
path = git_dir(tempdir_factory)
with cwd(path):
cmd_output('git', 'commit', '--allow-empty', '-m', 'foo')
sha = get_head_sha(path)
cmd_output('git', 'commit', '--allow-empty', '-m', 'bar')
ret = store.clone(path, sha)
# Should have printed some stuff
assert log_info_mock.call_args_list[0][0][0].startswith(
'Initializing environment for ',
)
# Should return a directory inside of the store
assert os.path.exists(ret)
assert ret.startswith(store.directory)
# Directory should start with `repo`
_, dirname = os.path.split(ret)
assert dirname.startswith('repo')
# Should be checked out to the sha we specified
assert get_head_sha(ret) == sha
# Assert there's an entry in the sqlite db for this
with sqlite3.connect(store.db_path) as db:
path, = db.execute(
'SELECT path from repos WHERE repo = ? and ref = ?',
[path, sha],
).fetchone()
assert path == ret
def test_clone_cleans_up_on_checkout_failure(store):
try:
# This raises an exception because you can't clone something that
# doesn't exist!
store.clone('/i_dont_exist_lol', 'fake_sha')
except Exception as e:
assert '/i_dont_exist_lol' in six.text_type(e)
things_starting_with_repo = [
thing for thing in os.listdir(store.directory)
if thing.startswith('repo')
]
assert things_starting_with_repo == []
def test_clone_when_repo_already_exists(store):
# Create an entry in the sqlite db that makes it look like the repo has
# been cloned.
store.require_created()
with sqlite3.connect(store.db_path) as db:
db.execute(
'INSERT INTO repos (repo, ref, path) '
'VALUES ("fake_repo", "fake_ref", "fake_path")',
)
assert store.clone('fake_repo', 'fake_ref') == 'fake_path'
def test_require_created_when_directory_exists_but_not_db(store):
# In versions <= 0.3.5, there was no sqlite db causing a need for
# backward compatibility
os.makedirs(store.directory)
store.require_created()
assert os.path.exists(store.db_path)
| Lucas-C/pre-commit | tests/store_test.py | Python | mit | 5,138 |
# -*- coding: utf-8 -*-
import sys, logging
import numpy as np
from math import ceil
from gseapy.stats import multiple_testing_correction
from joblib import delayed, Parallel
def enrichment_score(gene_list, correl_vector, gene_set, weighted_score_type=1,
nperm=1000, seed=None, single=False, scale=False):
"""This is the most important function of GSEApy. It has the same algorithm with GSEA and ssGSEA.
:param gene_list: The ordered gene list gene_name_list, rank_metric.index.values
:param gene_set: gene_sets in gmt file, please use gsea_gmt_parser to get gene_set.
:param weighted_score_type: It's the same with gsea's weighted_score method. Weighting by the correlation
is a very reasonable choice that allows significant gene sets with less than perfect coherence.
options: 0(classic),1,1.5,2. default:1. if one is interested in penalizing sets for lack of
coherence or to discover sets with any type of nonrandom distribution of tags, a value p < 1
might be appropriate. On the other hand, if one uses sets with large number of genes and only
a small subset of those is expected to be coherent, then one could consider using p > 1.
Our recommendation is to use p = 1 and use other settings only if you are very experienced
with the method and its behavior.
:param correl_vector: A vector with the correlations (e.g. signal to noise scores) corresponding to the genes in
the gene list. Or rankings, rank_metric.values
:param nperm: Only use this parameter when computing esnull for statistical testing. Set the esnull value
equal to the permutation number.
:param seed: Random state for initializing gene list shuffling. Default: seed=None
:return:
ES: Enrichment score (real number between -1 and +1)
ESNULL: Enrichment score calculated from random permutations.
Hits_Indices: Index of a gene in gene_list, if gene is included in gene_set.
RES: Numerical vector containing the running enrichment score for all locations in the gene list .
"""
N = len(gene_list)
# Test whether each element of a 1-D array is also present in a second array
# It's more intuitive here than original enrichment_score source code.
# use .astype to covert bool to integer
tag_indicator = np.in1d(gene_list, gene_set, assume_unique=True).astype(int) # notice that the sign is 0 (no tag) or 1 (tag)
if weighted_score_type == 0 :
correl_vector = np.repeat(1, N)
else:
correl_vector = np.abs(correl_vector)**weighted_score_type
# get indices of tag_indicator
hit_ind = np.flatnonzero(tag_indicator).tolist()
# if used for compute esnull, set esnull equal to permutation number, e.g. 1000
# else just compute enrichment scores
# set axis to 1, because we have 2D array
axis = 1
tag_indicator = np.tile(tag_indicator, (nperm+1,1))
correl_vector = np.tile(correl_vector,(nperm+1,1))
# gene list permutation
rs = np.random.RandomState(seed)
for i in range(nperm): rs.shuffle(tag_indicator[i])
# np.apply_along_axis(rs.shuffle, 1, tag_indicator)
Nhint = tag_indicator.sum(axis=axis, keepdims=True)
sum_correl_tag = np.sum(correl_vector*tag_indicator, axis=axis, keepdims=True)
# compute ES score, the code below is identical to gsea enrichment_score method.
no_tag_indicator = 1 - tag_indicator
Nmiss = N - Nhint
norm_tag = 1.0/sum_correl_tag
norm_no_tag = 1.0/Nmiss
RES = np.cumsum(tag_indicator * correl_vector * norm_tag - no_tag_indicator * norm_no_tag, axis=axis)
if scale: RES = RES / N
if single:
es_vec = RES.sum(axis=axis)
else:
max_ES, min_ES = RES.max(axis=axis), RES.min(axis=axis)
es_vec = np.where(np.abs(max_ES) > np.abs(min_ES), max_ES, min_ES)
# extract values
es, esnull, RES = es_vec[-1], es_vec[:-1], RES[-1,:]
return es, esnull, hit_ind, RES
def enrichment_score_tensor(gene_mat, cor_mat, gene_sets, weighted_score_type, nperm=1000,
seed=None, single=False, scale=False):
"""Next generation algorithm of GSEA and ssGSEA. Works for 3d array
:param gene_mat: the ordered gene list(vector) with or without gene indices matrix.
:param cor_mat: correlation vector or matrix (e.g. signal to noise scores)
corresponding to the genes in the gene list or matrix.
:param dict gene_sets: gmt file dict.
:param float weighted_score_type: weighting by the correlation.
options: 0(classic), 1, 1.5, 2. default:1 for GSEA and 0.25 for ssGSEA.
:param int nperm: permutation times.
:param bool scale: If True, normalize the scores by number of genes_mat.
:param bool single: If True, use ssGSEA algorithm, otherwise use GSEA.
:param seed: Random state for initialize gene list shuffling.
Default: seed=None
:return: a tuple contains::
| ES: Enrichment score (real number between -1 and +1), for ssGSEA, set scale eq to True.
| ESNULL: Enrichment score calculated from random permutation.
| Hits_Indices: Indices of genes if genes are included in gene_set.
| RES: The running enrichment score for all locations in the gene list.
"""
rs = np.random.RandomState(seed)
# gene_mat -> 1d: prerank, ssSSEA or 2d: GSEA
keys = sorted(gene_sets.keys())
if weighted_score_type == 0:
# don't bother doing calcuation, just set to 1
cor_mat = np.ones(cor_mat.shape)
elif weighted_score_type > 0:
pass
else:
logging.error("Using negative values of weighted_score_type, not allowed")
raise ValueError("weighted_score_type should be postive numerics")
cor_mat = np.abs(cor_mat)
if cor_mat.ndim ==1:
# ssGSEA or Prerank
# genestes->M, genes->N, perm-> axis=2
N, M = len(gene_mat), len(keys)
# generate gene hits matrix
# for 1d ndarray of gene_mat, set assume_unique=True,
# means the input arrays are both assumed to be unique,
# which can speed up the calculation.
tag_indicator = np.vstack([np.in1d(gene_mat, gene_sets[key], assume_unique=True) for key in keys])
tag_indicator = tag_indicator.astype(int)
# index of hits
hit_ind = [ np.flatnonzero(tag).tolist() for tag in tag_indicator ]
# generate permutated hits matrix
perm_tag_tensor = np.repeat(tag_indicator, nperm+1).reshape((M,N,nperm+1))
# shuffle matrix, last matrix is not shuffled when nperm > 0
if nperm: np.apply_along_axis(lambda x: np.apply_along_axis(rs.shuffle,0,x),1, perm_tag_tensor[:,:,:-1])
# missing hits
no_tag_tensor = 1 - perm_tag_tensor
# calculate numerator, denominator of each gene hits
rank_alpha = (perm_tag_tensor*cor_mat[np.newaxis,:,np.newaxis])** weighted_score_type
elif cor_mat.ndim == 2:
# GSEA
# 2d ndarray, gene_mat and cor_mat are shuffled already
# reshape matrix
cor_mat = cor_mat.T
# gene_mat is a tuple contains (gene_name, permuate_gene_name_indices)
genes, genes_ind = gene_mat
# genestes->M, genes->N, perm-> axis=2
# don't use assume_unique=True in 2d array when use np.isin().
# elements in gene_mat are not unique, or will cause unwanted results
tag_indicator = np.vstack([np.in1d(genes, gene_sets[key], assume_unique=True) for key in keys])
tag_indicator = tag_indicator.astype(int)
perm_tag_tensor = np.stack([tag.take(genes_ind).T for tag in tag_indicator], axis=0)
#index of hits
hit_ind = [ np.flatnonzero(tag).tolist() for tag in perm_tag_tensor[:,:,-1] ]
# nohits
no_tag_tensor = 1 - perm_tag_tensor
# calculate numerator, denominator of each gene hits
rank_alpha = (perm_tag_tensor*cor_mat[np.newaxis,:,:])** weighted_score_type
else:
logging.error("Program die because of unsupported input")
raise ValueError("Correlation vector or matrix (cor_mat) is not supported")
# Nhint = tag_indicator.sum(1)
# Nmiss = N - Nhint
axis=1
P_GW_denominator = np.sum(rank_alpha, axis=axis, keepdims=True)
P_NG_denominator = np.sum(no_tag_tensor, axis=axis, keepdims=True)
REStensor = np.cumsum(rank_alpha / P_GW_denominator - no_tag_tensor / P_NG_denominator, axis=axis)
# ssGSEA: scale es by gene numbers ?
# https://gist.github.com/gaoce/39e0907146c752c127728ad74e123b33
if scale: REStensor = REStensor / len(gene_mat)
if single:
#ssGSEA
esmatrix = REStensor.sum(axis=axis)
else:
#GSEA
esmax, esmin = REStensor.max(axis=axis), REStensor.min(axis=axis)
esmatrix = np.where(np.abs(esmax)>np.abs(esmin), esmax, esmin)
es, esnull, RES = esmatrix[:,-1], esmatrix[:,:-1], REStensor[:,:,-1]
return es, esnull, hit_ind, RES
def ranking_metric_tensor(exprs, method, permutation_num, pos, neg, classes,
ascending, seed=None, skip_last=False):
"""Build shuffled ranking matrix when permutation_type eq to phenotype.
Works for 3d array.
:param exprs: gene_expression DataFrame, gene_name indexed.
:param str method: calculate correlation or ranking. methods including:
1. 'signal_to_noise' (s2n) or 'abs_signal_to_noise' (abs_s2n).
2. 't_test'.
3. 'ratio_of_classes' (also referred to as fold change).
4. 'diff_of_classes'.
5. 'log2_ratio_of_classes'.
:param int permuation_num: how many times of classes is being shuffled
:param str pos: one of labels of phenotype's names.
:param str neg: one of labels of phenotype's names.
:param list classes: a list of phenotype labels, to specify which column of
dataframe belongs to what class of phenotype.
:param bool ascending: bool. Sort ascending vs. descending.
:param seed: random_state seed
:param bool skip_last: (internal use only) whether to skip the permutation of the last rankings.
:return:
returns two 2d ndarray with shape (nperm, gene_num).
| cor_mat_indices: the indices of sorted and permutated (exclude last row) ranking matrix.
| cor_mat: sorted and permutated (exclude last row) ranking matrix.
"""
rs = np.random.RandomState(seed)
# S: samples, G: gene number
G, S = exprs.shape
# genes = exprs.index.values
expr_mat = exprs.values.T
perm_cor_tensor = np.tile(expr_mat, (permutation_num,1,1))
if skip_last:
# random shuffle on the first dim, the last matrix (expr_mat) is not shuffled
for arr in perm_cor_tensor[:-1]: rs.shuffle(arr)
else:
for arr in perm_cor_tensor: rs.shuffle(arr)
# metrics
classes = np.array(classes)
pos = classes == pos
neg = classes == neg
n_pos = np.sum(pos)
n_neg = np.sum(neg)
pos_cor_mean = perm_cor_tensor[:,pos,:].mean(axis=1)
neg_cor_mean = perm_cor_tensor[:,neg,:].mean(axis=1)
pos_cor_std = perm_cor_tensor[:,pos,:].std(axis=1, ddof=1)
neg_cor_std = perm_cor_tensor[:,neg,:].std(axis=1, ddof=1)
if method in ['signal_to_noise', 's2n']:
cor_mat = (pos_cor_mean - neg_cor_mean)/(pos_cor_std + neg_cor_std)
elif method in ['abs_signal_to_noise', 'abs_s2n']:
cor_mat = np.abs((pos_cor_mean - neg_cor_mean)/(pos_cor_std + neg_cor_std))
elif method == 't_test':
denom = np.sqrt((pos_cor_std**2)/n_pos + (neg_cor_std**2)/n_neg)
cor_mat = (pos_cor_mean - neg_cor_mean)/ denom
elif method == 'ratio_of_classes':
cor_mat = pos_cor_mean / neg_cor_mean
elif method == 'diff_of_classes':
cor_mat = pos_cor_mean - neg_cor_mean
elif method == 'log2_ratio_of_classes':
cor_mat = np.log2(pos_cor_mean / neg_cor_mean)
else:
logging.error("Please provide correct method name!!!")
raise LookupError("Input method: %s is not supported"%method)
# return matix[nperm+1, perm_cors]
cor_mat_ind = cor_mat.argsort()
# ndarray: sort in place
cor_mat.sort()
# genes_mat = genes.take(cor_mat_ind)
if ascending: return cor_mat_ind, cor_mat
# descending order of ranking and genes
# return genes_mat[:,::-1], cor_mat[:,::-1]
return cor_mat_ind[:, ::-1], cor_mat[:, ::-1]
def ranking_metric(df, method, pos, neg, classes, ascending):
"""The main function to rank an expression table. works for 2d array.
:param df: gene_expression DataFrame.
:param method: The method used to calculate a correlation or ranking. Default: 'log2_ratio_of_classes'.
Others methods are:
1. 'signal_to_noise' (s2n) or 'abs_signal_to_noise' (abs_s2n)
You must have at least three samples for each phenotype to use this metric.
The larger the signal-to-noise ratio, the larger the differences of the means (scaled by the standard deviations);
that is, the more distinct the gene expression is in each phenotype and the more the gene acts as a “class marker.”
2. 't_test'
Uses the difference of means scaled by the standard deviation and number of samples.
Note: You must have at least three samples for each phenotype to use this metric.
The larger the tTest ratio, the more distinct the gene expression is in each phenotype
and the more the gene acts as a “class marker.”
3. 'ratio_of_classes' (also referred to as fold change).
Uses the ratio of class means to calculate fold change for natural scale data.
4. 'diff_of_classes'
Uses the difference of class means to calculate fold change for natural scale data
5. 'log2_ratio_of_classes'
Uses the log2 ratio of class means to calculate fold change for natural scale data.
This is the recommended statistic for calculating fold change for log scale data.
:param str pos: one of labels of phenotype's names.
:param str neg: one of labels of phenotype's names.
:param dict classes: column id to group mapping.
:param bool ascending: bool or list of bool. Sort ascending vs. descending.
:return:
returns a pd.Series of correlation to class of each variable. Gene_name is index, and value is rankings.
visit here for more docs: http://software.broadinstitute.org/gsea/doc/GSEAUserGuideFrame.html
"""
# exclude any zero stds.
df_mean = df.groupby(by=classes, axis=1).mean()
df_std = df.groupby(by=classes, axis=1).std()
n_pos = np.sum(classes == pos)
n_neg = np.sum(classes == neg)
if method in ['signal_to_noise', 's2n']:
ser = (df_mean[pos] - df_mean[neg])/(df_std[pos] + df_std[neg])
elif method in ['abs_signal_to_noise', 'abs_s2n']:
ser = ((df_mean[pos] - df_mean[neg])/(df_std[pos] + df_std[neg])).abs()
elif method == 't_test':
ser = (df_mean[pos] - df_mean[neg])/ np.sqrt(df_std[pos]**2/n_pos+df_std[neg]**2/n_neg)
elif method == 'ratio_of_classes':
ser = df_mean[pos] / df_mean[neg]
elif method == 'diff_of_classes':
ser = df_mean[pos] - df_mean[neg]
elif method == 'log2_ratio_of_classes':
ser = np.log2(df_mean[pos] / df_mean[neg])
else:
logging.error("Please provide correct method name!!!")
raise LookupError("Input method: %s is not supported"%method)
ser = ser.sort_values(ascending=ascending)
return ser
def gsea_compute_tensor(data, gmt, n, weighted_score_type, permutation_type,
method, pheno_pos, pheno_neg, classes, ascending,
processes=1, seed=None, single=False, scale=False):
"""compute enrichment scores and enrichment nulls.
This function will split large array into smaller pieces to advoid memroy overflow.
:param data: preprocessed expression dataframe or a pre-ranked file if prerank=True.
:param dict gmt: all gene sets in .gmt file. need to call load_gmt() to get results.
:param int n: permutation number. default: 1000.
:param str method: ranking_metric method. see above.
:param str pheno_pos: one of labels of phenotype's names.
:param str pheno_neg: one of labels of phenotype's names.
:param list classes: a list of phenotype labels, to specify which column of dataframe belongs to what category of phenotype.
:param float weighted_score_type: default:1
:param bool ascending: sorting order of rankings. Default: False.
:param seed: random seed. Default: np.random.RandomState()
:param bool scale: if true, scale es by gene number.
:return: a tuple contains::
| zipped results of es, nes, pval, fdr.
| nested list of hit indices of input gene_list.
| nested list of ranked enrichment score of each input gene_sets.
| list of enriched terms
"""
w = weighted_score_type
subsets = sorted(gmt.keys())
genes_mat, cor_mat = data.index.values, data.values
base = 5 if data.shape[0] >= 5000 else 10
## phenotype permutation
np.random.seed(seed) # control the ranodm numbers
if permutation_type == "phenotype":
# shuffling classes and generate random correlation rankings
logging.debug("Start to permutate classes..............................")
if (n + 1) % base == 0: # n+1: last permute is for orignial ES calculation
num_bases = [ base ] * ((n + 1) // base)
skip_last = [0] * ( n // base) + [1] # last is not permuted
else:
num_bases = [ base ] * ((n + 1) // base) + [ (n +1) % base]
skip_last = [0] * ((n + 1) // base) + [ (n +1) % base]
random_seeds = np.random.randint(np.iinfo(np.int32).max, size=len(num_bases))
genes_ind = []
cor_mat = []
# split permutation array into smaller blocks to save memory
temp_rnk = Parallel(n_jobs=processes, require='sharedmem')(delayed(ranking_metric_tensor)(
data, method, b, pheno_pos, pheno_neg, classes, ascending, se, skip)
for b, skip, se in zip(num_bases, skip_last, random_seeds))
for k, temp in enumerate(temp_rnk):
gi, cor = temp
genes_ind.append(gi)
cor_mat.append(cor)
genes_ind, cor_mat = np.vstack(genes_ind), np.vstack(cor_mat)
# convert to tuple
genes_mat = (data.index.values, genes_ind)
logging.debug("Start to compute es and esnulls........................")
# Prerank, ssGSEA, GSEA
es = []
RES = []
hit_ind = []
esnull = []
temp_esnu = []
# split gmt dataset, too
block = ceil(len(subsets) / base)
random_seeds = np.random.randint(np.iinfo(np.int32).max, size=block)
# split large array into smaller blocks to avoid memory overflow
i, m = 1, 0
gmt_block = []
while i <= block:
# you have to reseed, or all your processes are sharing the same seed value
rs = random_seeds[i-1]
gmtrim = {k: gmt.get(k) for k in subsets[m:base * i]}
gmt_block.append(gmtrim)
m = base * i
i += 1
## if permutation_type == "phenotype": n = 0
## NOTE for GSEA: cor_mat is 2d array, it won't permute again when call enrichment_score_tensor
temp_esnu = Parallel(n_jobs=processes, require='sharedmem')(delayed(enrichment_score_tensor)(
genes_mat, cor_mat, gmtrim, w, n, rs, single, scale)
for gmtrim, rs in zip(gmt_block, random_seeds))
# esn is a list, don't need to use append method.
for si, temp in enumerate(temp_esnu):
# e, enu, hit, rune = temp.get()
e, enu, hit, rune = temp
esnull.append(enu)
es.append(e)
RES.append(rune)
hit_ind += hit
# concate results
es, esnull, RES = np.hstack(es), np.vstack(esnull), np.vstack(RES)
return gsea_significance(es, esnull), hit_ind, RES, subsets
def gsea_compute(data, gmt, n, weighted_score_type, permutation_type,
method, pheno_pos, pheno_neg, classes, ascending,
processes=1, seed=None, single=False, scale=False):
"""compute enrichment scores and enrichment nulls.
:param data: preprocessed expression dataframe or a pre-ranked file if prerank=True.
:param dict gmt: all gene sets in .gmt file. need to call load_gmt() to get results.
:param int n: permutation number. default: 1000.
:param str method: ranking_metric method. see above.
:param str pheno_pos: one of labels of phenotype's names.
:param str pheno_neg: one of labels of phenotype's names.
:param list classes: a list of phenotype labels, to specify which column of dataframe belongs to what category of phenotype.
:param float weighted_score_type: default:1
:param bool ascending: sorting order of rankings. Default: False.
:param seed: random seed. Default: np.random.RandomState()
:param bool scale: if true, scale es by gene number.
:return: a tuple contains::
| zipped results of es, nes, pval, fdr.
| nested list of hit indices of input gene_list.
| nested list of ranked enrichment score of each input gene_sets.
| list of enriched terms
"""
w = weighted_score_type
subsets = sorted(gmt.keys())
es = []
RES=[]
hit_ind=[]
esnull = [ [] for a in range(len(subsets)) ]
np.random.seed(seed) # control the ranodm numbers
logging.debug("Start to compute enrichment scores......................")
if permutation_type == "phenotype":
logging.debug("Start to permutate classes..............................")
# this version won't split large array into smaller ones
genes_mat, cor_mat = ranking_metric_tensor(exprs=data, method=method,
permutation_num=n+1,
pos=pheno_pos, neg=pheno_neg,
classes=classes,
ascending=ascending, seed=seed,
skip_last=True)
# compute es, esnulls. hits, RES
logging.debug("Start to compute enrichment nulls.......................")
es, esnull, hit_ind, RES = enrichment_score_tensor(gene_mat=genes_mat,
cor_mat=cor_mat,
gene_sets=gmt,
weighted_score_type=w,
nperm=n, seed=seed,
single=False, scale=False,)
else:
# Prerank, ssGSEA, GSEA with gene_set permutation
gl, cor_vec = data.index.values, data.values
logging.debug("Start to compute es and esnulls........................")
## this version don't split large array into smaller ones
# es, esnull, hit_ind, RES = enrichment_score_tensor(gene_mat=gl,
# cor_mat=cor_vec,
# gene_sets=gmt,
# weighted_score_type=w,
# nperm=n, rs=rs
# single=single, scale=scale)
temp_esnu=[]
# you have to reseed, or all your processes are sharing the same seed value
# np.random.seed(seed)
random_seeds= np.random.randint(np.iinfo(np.int32).max, size=len(subsets))
temp_esnu = Parallel(n_jobs=processes, require='sharedmem')(delayed(enrichment_score)(
gl, cor_vec, gmt.get(subset), w, n,
rs, single, scale)
for subset, rs in zip(subsets, random_seeds))
# esn is a list, don't need to use append method.
for si, temp in enumerate(temp_esnu):
e, enu, hit, rune = temp
esnull[si] = enu
es.append(e)
RES.append(rune)
hit_ind.append(hit)
return gsea_significance(es, esnull), hit_ind, RES, subsets
def normalize(es, esnull):
"""normalize the ES(S,pi) and the observed ES(S), separately rescaling
the positive and negative scores by dividing the mean of the ES(S,pi).
return: NES, NESnull
"""
nEnrichmentScores =np.zeros(es.shape)
nEnrichmentNulls=np.zeros(esnull.shape)
# esnullmean = np.zeros(es.shape)
# # calculate nESnulls
# for i in range(esnull.shape[0]):
# # NES
# enrNull = esnull[i]
# if es[i] >= 0:
# mes = enrNull[enrNull >= 0].mean()
# nEnrichmentScores[i] = es[i] / mes
# else:
# mes = enrNull[enrNull < 0 ].mean()
# nEnrichmentScores[i] = - es[i] / mes
# esnullmean[i] = mes
# # NESnull
# for j in range(esnull.shape[1]):
# if esnull[i,j] >= 0:
# nEnrichmentNulls[i,j] = esnull[i,j] / esnullmean[i]
# else:
# nEnrichmentNulls[i,j] = - esnull[i,j] / esnullmean[i]
esnull_pos = np.ma.MaskedArray(esnull, mask=(esnull<0)).mean(axis=1)
esnull_neg = np.ma.MaskedArray(esnull, mask=(esnull>=0)).mean(axis=1)
esnull_pos = np.array(esnull_pos)
esnull_neg = np.array(esnull_neg)
# NES
nEnrichmentScores = np.where(es>=0, es/esnull_pos, -es/esnull_neg)
# NES_NULL
nEnrichmentNulls = np.where(esnull>=0, esnull/esnull_pos[:,np.newaxis],
-esnull/esnull_neg[:,np.newaxis])
return nEnrichmentScores, nEnrichmentNulls
def gsea_pval(es, esnull):
"""Compute nominal p-value.
From article (PNAS):
estimate nominal p-value for S from esnull by using the positive
or negative portion of the distribution corresponding to the sign
of the observed ES(S).
"""
# to speed up, using numpy function to compute pval in parallel.
condlist = [ es < 0, es >=0]
choicelist = [(esnull < es.reshape(len(es),1)).sum(axis=1)/ (esnull < 0).sum(axis=1),
(esnull >= es.reshape(len(es),1)).sum(axis=1)/ (esnull >= 0).sum(axis=1)]
pvals = np.select(condlist, choicelist)
return pvals
def gsea_fdr(nEnrichmentScores, nEnrichmentNulls):
"""Create a histogram of all NES(S,pi) over all S and pi.
Use this null distribution to compute an FDR q value.
:param nEnrichmentScores: normalized ES
:param nEnrichmentNulls: normalized ESnulls
:return: FDR
"""
# FDR null distribution histogram
# vals = reduce(lambda x,y: x+y, nEnrichmentNulls, [])
# nvals = np.array(sorted(vals))
# or
nvals = np.sort(nEnrichmentNulls.flatten())
nnes = np.sort(nEnrichmentScores)
fdrs = []
# FDR computation
for i in range(len(nEnrichmentScores)):
nes = nEnrichmentScores[i]
# use the same pval method to calculate fdr
if nes >= 0:
allPos = int(len(nvals) - np.searchsorted(nvals, 0, side="left"))
allHigherAndPos = int(len(nvals) - np.searchsorted(nvals, nes, side="left"))
nesPos = len(nnes) - int(np.searchsorted(nnes, 0, side="left"))
nesHigherAndPos = len(nnes) - int(np.searchsorted(nnes, nes, side="left"))
# allPos = (nvals >= 0).sum()
# allHigherAndPos = (nvals >= nes).sum()
# nesPos = (nnes >=0).sum()
# nesHigherAndPos = (nnes >= nes).sum()
else:
allPos = int(np.searchsorted(nvals, 0, side="left"))
allHigherAndPos = int(np.searchsorted(nvals, nes, side="right"))
nesPos = int(np.searchsorted(nnes, 0, side="left"))
nesHigherAndPos = int(np.searchsorted(nnes, nes, side="right"))
# allPos = (nvals < 0).sum()
# allHigherAndPos = (nvals < nes).sum()
# nesPos = (nnes < 0).sum()
# nesHigherAndPos = (nnes < nes).sum()
try:
pi_norm = allHigherAndPos / float(allPos)
pi_obs = nesHigherAndPos / float(nesPos)
fdr = pi_norm / pi_obs
fdrs.append(fdr if fdr < 1 else 1.0)
except:
fdrs.append(1000000000.0)
logging.debug("Statistical testing finished.............................")
return fdrs
def gsea_significance(enrichment_scores, enrichment_nulls):
"""Compute nominal pvals, normalized ES, and FDR q value.
For a given NES(S) = NES* >= 0. The FDR is the ratio of the percentage of all (S,pi) with
NES(S,pi) >= 0, whose NES(S,pi) >= NES*, divided by the percentage of
observed S wih NES(S) >= 0, whose NES(S) >= NES*, and similarly if NES(S) = NES* <= 0.
"""
# For a zero by zero division (undetermined, results in a NaN),
np.seterr(divide='ignore', invalid='ignore')
# import warnings
# warnings.simplefilter("ignore")
es = np.array(enrichment_scores)
esnull = np.array(enrichment_nulls)
logging.debug("Start to compute pvals..................................")
# P-values.
pvals = gsea_pval(es, esnull).tolist()
logging.debug("Start to compute nes and nesnull........................")
# NES
nEnrichmentScores, nEnrichmentNulls = normalize(es, esnull)
logging.debug("Start to compute fdrs..................................")
# FDR
fdrs = gsea_fdr(nEnrichmentScores, nEnrichmentNulls)
#TODO: use multiple testing correction for ssgsea? ssGSEA2.0 use BH correction.
# https://github.com/broadinstitute/ssGSEA2.0/blob/master/src/ssGSEA2.0.R
# line 969
# fdrs, _ = multiple_testing_correction(pvals, alpha=0.05)
return zip(enrichment_scores, nEnrichmentScores, pvals, fdrs)
| BioNinja/gseapy | gseapy/algorithm.py | Python | mit | 31,007 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import csv
from collections import (
defaultdict as dd,
OrderedDict as od
)
from math import log
import datetime
from flask import (
Flask,
render_template,
g,
request,
redirect,
url_for,
send_from_directory,
flash,
jsonify,
make_response,
Markup,
Response
)
from flask_login import (
login_required,
login_user,
logout_user,
current_user
)
from packaging.version import Version
import gwadoc
import networkx as nx
## profiler
#from werkzeug.contrib.profiler import ProfilerMiddleware
from omw.utils.utils import fetch_sorted_meta_by_version
app = Flask(__name__)
# Common configuration settings go here
app.config['REMEMBER_COOKIE_DURATION'] = datetime.timedelta(minutes=30)
# Installation-specific settings go in omw_config.py
app.config.from_object('config')
# Load these only after creating and configuring the app object
from .common_login import *
from .common_sql import *
from .omw_sql import *
from .wn_syntax import *
import omw.cli
## profiler
#app.config['PROFILE'] = True
#app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])
#app.run(debug = True)
################################################################################
# LOGIN
################################################################################
login_manager.init_app(app)
@app.route("/login", methods=["GET", "POST"])
def login():
""" This login function checks if the username & password
match the admin.db; if the authentication is successful,
it passes the id of the user into login_user() """
if request.method == "POST" and \
"username" in request.form and \
"password" in request.form:
username = request.form["username"]
password = request.form["password"]
user = User.get(username)
# If we found a user based on username then compare that the submitted
# password matches the password in the database. The password is stored
# is a slated hash format, so you must hash the password before comparing it.
if user and hash_pass(password) == user.password:
login_user(user, remember=True)
# FIXME! Get this to work properly...
# return redirect(request.args.get("next") or url_for("index"))
return redirect(url_for("index"))
else:
flash(u"Invalid username, please try again.")
return render_template("login.html")
@app.route("/logout")
@login_required(role=0, group='open')
def logout():
logout_user()
return redirect(url_for("index"))
################################################################################
################################################################################
# SET UP CONNECTION WITH DATABASES
################################################################################
@app.before_request
def connect_dbs():
connect_admin()
connect_omw()
@app.teardown_appcontext
def teardown_dbs(exception):
db = g.pop('admin', None)
if db is not None:
db.close()
db = g.pop('omw', None)
if db is not None:
db.close()
################################################################################
# helper functions
################################################################################
def _get_cookie(name, default):
if name in request.cookies:
return request.cookies.get(name)
else:
return default
################################################################################
# AJAX REQUESTS
################################################################################
@app.route('/_thumb_up_id')
def thumb_up_id():
user = fetch_id_from_userid(current_user.id)
ili_id = request.args.get('ili_id', None)
rate = 1
r = rate_ili_id(ili_id, rate, user)
counts, up_who, down_who = f_rate_summary([ili_id])
html = """ <span style="color:green" title="Who voted up: {}">+{}</span><br>
<span style="color:red" title="Who voted down: {}">-{}</span>
""".format(up_who[int(ili_id)], counts[int(ili_id)]['up'],
down_who[int(ili_id)], counts[int(ili_id)]['down'])
return jsonify(result=html)
@app.route('/_thumb_down_id')
def thumb_down_id():
user = fetch_id_from_userid(current_user.id)
ili_id = request.args.get('ili_id', None)
rate = -1
r = rate_ili_id(ili_id, rate, user)
counts, up_who, down_who = f_rate_summary([ili_id])
html = """ <span style="color:green" title="Who voted up: {}">+{}</span><br>
<span style="color:red" title="Who voted down: {}">-{}</span>
""".format(up_who[int(ili_id)], counts[int(ili_id)]['up'],
down_who[int(ili_id)], counts[int(ili_id)]['down'])
return jsonify(result=html)
@app.route('/_comment_id')
def comment_id():
user = fetch_id_from_userid(current_user.id)
ili_id = request.args.get('ili_id', None)
comment = request.args.get('comment', None)
comment = str(Markup.escape(comment))
dbinsert = comment_ili_id(ili_id, comment, user)
return jsonify(result=dbinsert)
@app.route('/_detailed_id')
def detailed_id():
ili_id = request.args.get('ili_id', None)
rate_hist = fetch_rate_id([ili_id])
comm_hist = fetch_comment_id([ili_id])
users = fetch_allusers()
r_html = ""
for r, u, t in rate_hist[int(ili_id)]:
r_html += '{} ({} — {}): {} <br>'.format(
users[u]['full_name'], users[u]['userID'], t, r)
c_html = ""
for c, u, t in comm_hist[int(ili_id)]:
c_html += '{} ({} — {}): {} <br>'.format(
users[u]['full_name'], users[u]['userID'], t, c)
html = """
<td colspan="9">
<div style="width: 49%; float:left;">
<h6>Ratings</h6>
{}</div>
<div style="width: 49%; float:right;">
<h6>Comments</h6>
{}</div>
</td>""".format(r_html, c_html)
return jsonify(result=html)
@app.route('/_confirm_wn_upload')
def confirm_wn_upload_id():
"""
Ingest the uploaded wordnet into the database and return a report.
This happens when the user has confirmed they want to add a
validated wordnet.
"""
user = fetch_id_from_userid(current_user.id)
fn = request.args.get('fn', None)
report = ingest_wordnet(fn, user)
updateLabels()
return jsonify(result=report)
@app.route('/_add_new_project')
def add_new_project():
user = fetch_id_from_userid(current_user.id)
proj = request.args.get('proj_code', None)
proj = str(Markup.escape(proj))
if user and proj:
dbinsert = insert_new_project(proj, user)
return jsonify(result=dbinsert)
else:
return jsonify(result=False)
@app.route("/_load_lang_selector",methods=["GET"])
def omw_lang_selector():
selected_lang = int(_get_cookie('selected_lang', 1))
selected_lang2 = int(_get_cookie('selected_lang', 1))
lang_id, lang_code = fetch_langs()
html = '<select name="lang" style="font-size: 85%; width: 9em" required>'
for lid in lang_id.keys():
if selected_lang == lid:
html += """<option value="{}" selected>{}</option>
""".format(lid, lang_id[lid][1])
else:
html += """<option value="{}">{}</option>
""".format(lid, lang_id[lid][1])
html += '</select>'
html += '<select name="lang2" style="font-size: 85%; width: 9em" required>'
for lid in lang_id.keys():
if selected_lang2 == lid:
html += """<option value="{}" selected>{}</option>
""".format(lid, lang_id[lid][1])
else:
html += """<option value="{}">{}</option>
""".format(lid, lang_id[lid][1])
html += '</select>'
return jsonify(result=html)
@app.route('/_add_new_language')
def add_new_language():
user = fetch_id_from_userid(current_user.id)
bcp = request.args.get('bcp', None)
bcp = str(Markup.escape(bcp))
iso = request.args.get('iso', None)
iso = str(Markup.escape(iso))
name = request.args.get('name', None)
name = str(Markup.escape(name))
if bcp and name:
dbinsert = insert_new_language(bcp, iso, name, user)
return jsonify(result=dbinsert)
else:
return jsonify(result=False)
@app.route('/_load_proj_details')
def load_proj_details():
proj_id = request.args.get('proj', 0)
if proj_id:
proj_id = int(proj_id)
else:
proj_id = None
projs = fetch_proj()
srcs = fetch_src()
srcs_meta = fetch_src_meta()
html = str()
if proj_id:
i = 0
for src_id in srcs.keys():
if srcs[src_id][0] == projs[proj_id]:
i += 1
html += "<br><p><b>Source {}: {}-{}</b></p>".format(i,
projs[proj_id],srcs[src_id][1])
for attr, val in srcs_meta[src_id].items():
html += "<p style='margin-left: 40px'>"
html += attr + ": " + val
html += "</p>"
return jsonify(result=html)
@app.route('/_load_min_omw_concept/<ss>')
@app.route('/_load_min_omw_concept_ili/<ili_id>')
def min_omw_concepts(ss=None, ili_id=None):
if ili_id:
ss_ids = f_ss_id_by_ili_id(ili_id)
else:
ss_ids = [ss]
pos = fetch_pos()
langs_id, langs_code = fetch_langs()
ss, senses, defs, exes, links = fetch_ss_basic(ss_ids)
ssrels = fetch_ssrel()
selected_lang = int(_get_cookie('selected_lang', 1))
labels = fetch_labels( selected_lang, set(senses.keys()))
return jsonify(result=render_template('min_omw_concept.html',
pos = pos,
langs = langs_id,
senses=senses,
ss=ss,
links=links,
ssrels=ssrels,
defs=defs,
exes=exes,
labels=labels))
@app.route('/_load_min_omw_sense/<sID>')
def min_omw_sense(sID=None):
if sID:
s_id=int(sID)
langs_id, langs_code = fetch_langs()
pos = fetch_pos()
sense = fetch_sense(s_id)
forms=fetch_forms(sense[3])
selected_lang = int(_get_cookie('selected_lang', 1))
labels= fetch_labels(selected_lang,[sense[4]])
src_meta= fetch_src_meta()
src_sid=fetch_src_for_s_id([s_id])
sdefs = fetch_defs_by_sense([s_id])
if selected_lang in sdefs[s_id]:
sdef = sdefs[s_id][selected_lang] ## requested language
else:
sdef = sdefs[s_id][min(sdefs[s_id].keys())] ## a language
if not sdef:
sdef="no definition"
# return jsonify(result=render_template('omw_sense.html',
return jsonify(result=render_template('min_omw_sense.html',
s_id = s_id,
sdef=sdef,
sense = sense,
forms=forms,
langs = langs_id,
pos = pos,
labels = labels,
src_sid = src_sid,
src_meta = src_meta))
# l=lambda:dd(l)
# vr = l() # wn-lmf validation report
# @app.route('/_report_val1')
# def report_val1():
# filename = request.args.get('fn', None)
# if filename:
# vr1 = val1_DTD(current_user, filename)
# vr.update(vr1)
# if vr1['dtd_val'] == True:
# html = "DTD PASSED"
# return jsonify(result=html)
# else:
# html = "DTD FAILED" + '<br>' + vr['dtd_val_errors']
# return jsonify(result=html)
# else:
# return jsonify(result="ERROR")
@app.route('/_report_val2', methods=['GET', 'POST'])
@login_required(role=0, group='open')
def report_val2():
filename = request.args.get('fn', None)
vr, filename, wn, wn_dtls = validateFile(current_user.id, filename)
return jsonify(result=render_template('validation-report.html',
vr=vr, wn=wn, wn_dtls=wn_dtls, filename=filename))
# validateFile()
# filename = request.args.get('fn', None)
# if filename:
# vr = val1_DTD(current_user, filename)
# if vr['dtd_val'] == True:
# html = "DTD PASSED"
# return jsonify(result=html)
# else:
# html = "DTD FAILED" + '<br>' + vr['dtd_val_errors']
# return jsonify(result=html)
# else:
# return jsonify(result="ERROR")
# return jsonify(result="TEST_VAL2")
################################################################################
################################################################################
# VIEWS
################################################################################
@app.route('/', methods=['GET', 'POST'])
def index():
return render_template('index.html')
@app.route('/ili', methods=['GET', 'POST'])
def ili_welcome(name=None):
return render_template('ili_welcome.html')
@app.route('/omw', methods=['GET', 'POST'])
def omw_welcome(name=None):
projects = request.args.get('projects','current')
lang_id, lang_code = fetch_langs()
src_meta=fetch_src_meta()
### sort by language, project version (Newest first)
src_meta_sorted = fetch_sorted_meta_by_version(projects, src_meta, lang_id, lang_code)
return render_template('omw_welcome.html',
src_meta=src_meta_sorted,
lang_id=lang_id,
lang_code=lang_code,
licenses=licenses)
@app.route('/wordnet', methods=['GET', 'POST'])
def wordnet_license(name=None):
return render_template('wordnet_license.html')
@app.route('/omw_wns', methods=['GET', 'POST'])
def omw_wns(name=None):
projects = request.args.get('projects','current')
src_meta=fetch_src_meta()
stats = []
lang_id, lang_code = fetch_langs()
### sort by language name (1), id, version (FIXME -- reverse version)
src_sort=od()
keys=list(src_meta.keys())
keys.sort(key=lambda x: Version(src_meta[x]['version']),reverse=True) #Version
keys.sort(key=lambda x: src_meta[x]['id']) #id
keys.sort(key=lambda x: lang_id[lang_code['code'][src_meta[x]['language']]][1]) #Language
for k in keys:
if projects=='current': # only get the latest version
if src_meta[k]['version'] != max((src_meta[i]['version'] for i in src_meta
if src_meta[i]['id'] == src_meta[k]['id']),
key=lambda x: Version(x)):
continue
stats.append((src_meta[k], fetch_src_id_stats(k)))
return render_template('omw_wns.html',
stats=stats,
src_meta=src_meta,
lang_id=lang_id,
lang_code=lang_code,
licenses=licenses)
@app.route('/omw_stats', methods=['GET', 'POST'])
def omw_stats():
"""
statistics about wordnet as a big graph
"""
### get language
selected_lang = int(_get_cookie('selected_lang', 1))
### get hypernym graph
hypernym_dict=fetch_graph()
G = nx.DiGraph(hypernym_dict, name='OMW')
info = nx.info(G).splitlines()
cycles = list(nx.simple_cycles(G))
### get the synsets we need to label
sss = []
for c in cycles:
for ss in c:
sss.append(ss)
label = fetch_labels(selected_lang, sss)
return render_template('omw_stats.html',
info=info,
cycles=cycles,
label=label,
gwadoc=gwadoc)
@app.route("/useradmin",methods=["GET"])
@login_required(role=99, group='admin')
def useradmin():
users = fetch_allusers()
return render_template("useradmin.html", users=users)
@app.route("/langadmin",methods=["GET"])
@login_required(role=99, group='admin')
def langadmin():
lang_id, lang_code = fetch_langs()
return render_template("langadmin.html", langs=lang_id)
@app.route("/projectadmin",methods=["GET"])
@login_required(role=99, group='admin')
def projectadmin():
projs = fetch_proj()
srcs = fetch_src()
srcs_by_proj = dd(list)
for src_id in srcs: # should be in the right order, as versions must go up
srcs_by_proj[srcs[src_id][0]].append((srcs[src_id][1], src_id))
srcs_meta = fetch_src_meta()
return render_template("projectadmin.html",
projs=projs,
srcs_by_proj=srcs_by_proj,
srcs_meta=srcs_meta)
@app.route('/allconcepts', methods=['GET', 'POST'])
def allconcepts():
ili, ili_defs = fetch_ili()
rsumm, up_who, down_who = f_rate_summary(list(ili.keys()))
return render_template('concept-list.html', ili=ili,
rsumm=rsumm, up_who=up_who, down_who=down_who)
@app.route('/temporary', methods=['GET', 'POST'])
def temporary():
ili = fetch_ili_status(2)
rsumm, up_who, down_who = f_rate_summary(list(ili.keys()))
return render_template('concept-list.html', ili=ili,
rsumm=rsumm, up_who=up_who, down_who=down_who)
@app.route('/deprecated', methods=['GET', 'POST'])
def deprecated():
ili = fetch_ili_status(0)
rsumm, up_who, down_who = f_rate_summary(list(ili.keys()))
return render_template('concept-list.html', ili=ili,
rsumm=rsumm, up_who=up_who, down_who=down_who)
@app.route('/ili/concepts/<c>', methods=['GET', 'POST'])
def concepts_ili(c=None):
c = c.split(',')
ili, ili_defs = fetch_ili(c)
rsumm, up_who, down_who = f_rate_summary(list(ili.keys()))
return render_template('concept-list.html', ili=ili,
rsumm=rsumm, up_who=up_who, down_who=down_who)
@app.route('/ili/search', methods=['GET', 'POST'])
@app.route('/ili/search/<q>', methods=['GET', 'POST'])
def search_ili(q=None):
if q:
query = q
else:
query = request.form['query']
src_id = fetch_src()
kind_id = fetch_kind()
status_id = fetch_status()
ili = dict()
for c in query_omw("""SELECT * FROM ili WHERE def GLOB ?
""", [query]):
ili[c['id']] = (kind_id[c['kind_id']], c['def'],
src_id[c['origin_src_id']], c['src_key'],
status_id[c['status_id']], c['superseded_by_id'],
c['t'])
rsumm, up_who, down_who = f_rate_summary(list(ili.keys()))
return render_template('concept-list.html', ili=ili,
rsumm=rsumm, up_who=up_who, down_who=down_who)
@app.route('/upload', methods=['GET', 'POST'])
@login_required(role=0, group='open')
def upload():
return render_template('upload.html')
@app.route('/join', methods=['GET', 'POST'])
def join():
return render_template('join.html')
@app.route('/omw/uploads/<filename>')
def download_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'],
filename, as_attachment=True)
@app.route('/ili/validation-report', methods=['GET', 'POST'])
@login_required(role=0, group='open')
def validationReport():
vr, filename, wn, wn_dtls = validateFile(current_user.id)
return render_template('validation-report.html',
vr=vr, wn=wn, wn_dtls=wn_dtls,
filename=filename)
@app.route('/ili/report', methods=['GET', 'POST'])
@login_required(role=0, group='open')
def report():
inputfile = request.files.get('file')
inputurl = request.form.get('url')
if inputfile:
thing, ftype = inputfile, 'webfile'
elif inputurl:
thing, ftype = inputfile, 'url'
else:
thing, ftype = None, None
passed, filename = uploadFile(current_user.id, thing, ftype)
return render_template('report.html',
passed=passed,
filename=filename)
# return render_template('report.html')
@app.route('/omw/search', methods=['GET', 'POST'])
@app.route('/omw/search/<lang>/<q>', methods=['GET', 'POST'])
@app.route('/omw/search/<lang>,<lang2>/<q>', methods=['GET', 'POST'])
def search_omw(lang=None, lang2=None, q=None):
lang_dct, lang_code = fetch_langs()
if lang and q:
lang_id = int(lang_code['code'][lang])
if not lang2:
lang2 = lang
lang_id2 = int(lang_code['code'][lang2])
query = q
else:
lang_id = request.form['lang']
lang_id2 = request.form['lang2']
query = request.form['query']
query = query.strip()
sense = od()
lang_sense = dd(lambda: dd(list))
if query[0].isalpha(): ### search for inital character of both cases
if query[0].upper() != query[0].lower():
query = '['+query[0].upper() + query[0].lower()+']'+query[1:]
# GO FROM FORM TO SENSE, order results by pos
for s in query_omw("""
SELECT s.id as s_id, ss_id, wid, fid, lang_id, pos_id, lemma
FROM (SELECT w_id as wid, form.id as fid, lang_id, pos_id, lemma
FROM (SELECT id, lang_id, pos_id, lemma
FROM f WHERE lemma GLOB ? AND lang_id in (?,?)) as form
JOIN wf_link ON form.id = wf_link.f_id) word
JOIN s ON wid=w_id ORDER BY pos_id
""", (query, lang_id, lang_id2)):
sense[s['ss_id']] = [s['s_id'], s['wid'], s['fid'],
s['lang_id'], s['pos_id'], s['lemma']]
lang_sense[s['lang_id']][s['ss_id']] = [s['s_id'], s['wid'], s['fid'],
s['pos_id'], s['lemma']]
pos = fetch_pos()
ss, senses, defs, exes, links = fetch_ss_basic(sense.keys())
ili, ili_defs = fetch_ili([ss[k][0] for k in ss])
labels = fetch_labels(lang_id, set(senses.keys()))
projects = request.args.get('projects', 'current')
lang_idm, lang_codem = fetch_langs()
src_meta = fetch_src_meta()
src_meta_sorted = fetch_sorted_meta_by_version(projects, src_meta, lang_idm, lang_codem)
resp = make_response(render_template('omw_results.html',
query =query,
langsel = int(lang_id),
langsel2 = int(lang_id2),
pos = pos,
lang_dct = lang_dct,
sense=sense,
senses=senses,
ss=ss,
ili=ili,
links=links,
defs=defs,
exes=exes,
labels=labels,
src_meta=src_meta_sorted))
resp.set_cookie('selected_lang', str(lang_id))
resp.set_cookie('selected_lang2', str(lang_id2))
return resp
@app.route('/omw/core', methods=['GET', 'POST'])
def omw_core(): ### FIXME add lang as a paramater?
return render_template('omw_core.html')
@app.route('/omw/concepts/<ssID>', methods=['GET', 'POST'])
@app.route('/omw/concepts/ili/<iliID>', methods=['GET', 'POST'])
def concepts_omw(ssID=None, iliID=None):
if iliID:
ss_ids = f_ss_id_by_ili_id(iliID)
ili, ilidefs = fetch_ili([iliID])
else:
ss_ids = [ssID]
ili, ili_defs = dict(), dict()
pos = fetch_pos()
langs_id, langs_code = fetch_langs()
ss, senses, defs, exes, links = fetch_ss_basic(ss_ids)
if (not iliID) and int(ssID) in ss:
iliID = ss[int(ssID)][0]
ili, ilidefs = fetch_ili([iliID])
sss = list(ss.keys())
for s in links:
for l in links[s]:
sss.extend(links[s][l])
selected_lang = int(_get_cookie('selected_lang', 1))
selected_lang2 = int(_get_cookie('selected_lang2', 1))
labels = fetch_labels(selected_lang, set(sss))
ssrels = fetch_ssrel()
ss_srcs=fetch_src_for_ss_id(ss_ids)
src_meta=fetch_src_meta()
core_ss, core_ili = fetch_core()
s_ids = []
for x in senses:
for y in senses[x]:
for (s_id, lemma, freq) in senses[x][y]:
s_ids.append(s_id)
slinks = fetch_sense_links(s_ids)
## get the canonical form for each linked sense
srl = fetch_srel()
return render_template('omw_concept.html',
ssID=ssID,
iliID=iliID,
pos = pos,
langs = langs_id,
senses=senses,
slinks=slinks,
ss=ss,
links=links,
ssrels=ssrels,
defs=defs,
exes=exes,
ili=ili,
selected_lang = selected_lang,
selected_lang2 = selected_lang2,
labels=labels,
ss_srcs=ss_srcs,
src_meta=src_meta,
core=core_ss,
gwadoc=gwadoc,
srl=srl)
@app.route('/omw/senses/<sID>', methods=['GET', 'POST'])
def omw_sense(sID=None):
"""display a single sense (and its variants)"""
if sID:
langs_id, langs_code = fetch_langs()
pos = fetch_pos()
s_id=int(sID)
sense = fetch_sense(s_id)
slinks = fetch_sense_links([s_id])
forms=fetch_forms(sense[3])
selected_lang = int(_get_cookie('selected_lang',1))
labels= fetch_labels(selected_lang,[sense[4]])
src_meta= fetch_src_meta()
src_sid=fetch_src_for_s_id([s_id])
srel = fetch_srel()
## get the canonical form for each linked sense
slabel=fetch_sense_labels([x for v in slinks[int(s_id)].values() for x in v])
sdefs = fetch_defs_by_sense([s_id])
sdef = ''
if selected_lang in sdefs[s_id]:
sdef = sdefs[s_id][selected_lang] ## requested language
else:
sdef = sdefs[min(sdefs[s_id].keys())] ## a language
return render_template('omw_sense.html',
s_id = sID,
sdef = sdef,
sense = sense,
slinks = slinks[s_id],
srel = srel,
forms=forms,
langs = langs_id,
pos = pos,
labels = labels,
slabel = slabel,
src_sid = src_sid,
src_meta = src_meta,
gwadoc=gwadoc)
# URIs FOR ORIGINAL CONCEPT KEYS, BY INDIVIDUAL SOURCES
@app.route('/omw/src/<proj>/<ver>/<originalkey>', methods=['GET', 'POST'])
def src_omw(proj=None, ver=None, originalkey=None):
try:
src_id = f_src_id_by_proj_ver(proj, ver)
except:
src_id = None
if src_id:
ss = fetch_ss_id_by_src_orginalkey(src_id, originalkey)
else:
ss = None
return concepts_omw(ss)
##
## show wn statistics
##
##
@app.route('/omw/src/<proj>/<ver>', methods=['GET', 'POST'])
def omw_wn(proj=None,ver=None):
"""
Present a page describing a single wordnet
"""
### default to full = false (short version)
full = request.args.get('full') in ['true', 'True']
if proj and ver:
try:
src_id = f_src_id_by_proj_ver(proj, ver)
except:
src_id = None
srcs_meta = fetch_src_meta()
src_info = srcs_meta[src_id]
if full and src_id: ### give more stats
ssrel_stats=fetch_ssrel_stats(src_id)
srel_stats=fetch_srel_stats(src_id)
else:
ssrel_stats= {}
srel_stats= {}
pos_stats= fetch_src_id_pos_stats(src_id)
# get the pos names
pos = fetch_pos()
# get the examples for the POS
pos_ids= [ pos_stats[p]['id'] for p in pos_stats ]
pos_exe = fetch_pos_id_ss_mf(pos_ids, src_id = src_id)
### get the wordnet lang
langs_id, langs_code = fetch_langs()
wn_lang = src_info['language']
wn_lang_id = langs_code['code'][wn_lang]
# Get the labels for the synsets
sss = set()
for p in pos_exe:
for (ss_id, freq) in pos_exe[p]:
sss.add(ss_id)
label= fetch_labels(wn_lang_id,sss)
return render_template('omw_wn.html',
proj = proj,
ver = ver,
src_id=src_id,
src_info=src_info,
ssrel_stats=ssrel_stats,
srel_stats=srel_stats,
pos=pos,
pos_stats= pos_stats,
pos_exe=pos_exe,
label=label,
src_stats=fetch_src_id_stats(src_id),
licenses=licenses,
gwadoc=gwadoc)
@app.route('/omw/src-latex/<proj>/<ver>', methods=['GET', 'POST'])
def omw_wn_latex(proj=None, ver=None,full=False):
if proj and ver:
try:
src_id = f_src_id_by_proj_ver(proj, ver)
except:
src_id = None
srcs_meta = fetch_src_meta()
src_info = srcs_meta[src_id]
if full and src_id:
ssrel_stats=fetch_ssrel_stats(src_id)
else:
ssrel_stats= {}
return render_template('omw_wn_latex.html',
proj = proj,
ver = ver,
src_id=src_id,
src_info=src_info,
ssrel_stats=ssrel_stats,
pos_stats= fetch_src_id_pos_stats(src_id),
src_stats=fetch_src_id_stats(src_id))
@app.route('/cili.tsv')
def generate_cili_tsv():
tsv = fetch_cili_tsv()
return Response(tsv, mimetype='text/tab-separated-values')
@app.route('/core.tsv')
def generate_core_tsv():
"""output a list of the core ili concepts
ToDO: sort by frequency"""
tsv="""# ili_id\n"""
core_ss, core_ili = fetch_core()
for ili in core_ili:
tsv += "i{}\n".format(ili)
return Response(tsv, mimetype='text/tab-separated-values')
@app.context_processor
def utility_processor():
def scale_freq(f, maxfreq=1000):
if f > 0:
return 100 + 100 * log(f)/log(maxfreq)
else:
return 100
return dict(scale_freq=scale_freq)
# def style_sense(freq, conf, lang):
# """show confidence as opacity, show freq as size
# opacity is the square of the confidence
# freq is scaled as a % of maxfreq for that language
# TODO: highlight a word if searched for?"""
# style = ''
# if conf and conf < 1.0: ## should not be more than 1.0
# style += 'opacity: {f};'.format(conf*conf) ## degrade quicker
# if freq:
# ### should I be using a log here?
# maxfreq=1000 #(should do per lang)
# style += 'font-size: {f}%;'.format(100*(1+ log(freq)/log(maxfreq)))
# if style:
# style = "style='{}'".format(style)
###
### WN documentation
###
@app.route('/omw/doc/if', methods=['GET', 'POST'])
def omw_doc_if(name=None):
return render_template('doc/interface.html')
@app.route('/omw/doc/search', methods=['GET', 'POST'])
def omw_doc_search(name=None):
return render_template('doc/search.html')
@app.route('/omw/doc/validator', methods=['GET', 'POST'])
def omw_doc_validator(name=None):
return render_template('doc/validator.html')
@app.route('/omw/doc/feedback', methods=['GET', 'POST'])
def omw_doc_feedback(name=None):
return render_template('doc/feedback.html')
@app.route('/omw/doc/glob', methods=['GET', 'POST'])
def omw_doc_glob(name=None):
return render_template('doc/glob.html')
@app.route('/omw/doc/contribute', methods=['GET', 'POST'])
def omw_doc_contribute(name=None):
return render_template('doc/contribute.html')
@app.route('/omw/doc/feedback-doc', methods=['GET', 'POST'])
def omw_doc_feedback_documentation(name=None):
return render_template('doc/feedback_documentation.html')
@app.route('/omw/doc/upload', methods=['GET', 'POST'])
def omw_doc_upload(name=None):
return render_template('doc/upload.html',
title="Upload How-To")
@app.route('/omw/doc/metadata', methods=['GET', 'POST'])
def omw_doc_metadata():
licenses = fetch_licenses()
return render_template('doc/metadata.html',
licenses=licenses)
@app.route('/omw/doc/lmf', methods=['GET', 'POST'])
def omw_doc_lmf():
return render_template('doc/lmf.html')
@app.route('/omw/doc/', methods=['GET', 'POST'])
@app.route('/omw/doc/wn', methods=['GET', 'POST'])
def omw_doc_wn(name=None):
return render_template('doc/wn.html',
gwadoc=gwadoc)
@app.route('/omw/doc/pos', methods=['GET', 'POST'])
def omw_doc_pos(name=None):
"""
Provide dynamic documentation for the POS
ToDo: maybe do per src and or per lang
"""
### get the interface language
selected_lang = int(_get_cookie('selected_lang',1))
# get the pos names
pos = fetch_pos()
# get the examples for the POS
pos_exe = fetch_pos_id_ss_mf(pos['id'].keys(),
num=5)
# Get the labels for the synsets
sss = set()
for p in pos_exe:
for (ss_id, freq) in pos_exe[p]:
sss.add(ss_id)
label= fetch_labels(selected_lang,sss)
pos_freq = fetch_pos_id_freq()
return render_template('doc/pos.html',
pos=pos,
pos_exe=pos_exe,
pos_freq=pos_freq,
label=label)
@app.route('/omw/doc/variants', methods=['GET', 'POST'])
def omw_doc_variants(name=None):
"""
Give some documentation on how variants are represented
"""
fma = fetch_form_meta_attr()
fmv = fetch_form_meta_val()
return render_template('doc/variants.html',
fma=fma,
fmv=fmv)
@app.route('/omw/doc/glossary', methods=['GET', 'POST'])
def omw_doc_glossary(name=None):
return render_template('doc/glossary.html',
gwadoc=gwadoc)
@app.route('/omw/doc/tsv2lmf', methods=['GET', 'POST'])
def omw_doc_tsv2lmf(name=None):
return render_template('doc/tsv2lmf.html',
gwadoc=gwadoc)
@app.route('/omw/doc/add-wn', methods=['GET', 'POST'])
def omw_doc_add_wn(name=None):
return render_template('doc/add-wn.html',
title="Add WN from the Command Line")
@app.route('/omw/doc/doc', methods=['GET', 'POST'])
def omw_doc_doc(name=None):
return render_template('doc/doc.html',
gwadoc=gwadoc)
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', threaded=True)
| globalwordnet/OMW | omw/__init__.py | Python | mit | 35,734 |
from datetime import timedelta
import logging
from django.utils.timezone import now
from django.core.management.base import BaseCommand
from TWLight.users.models import Editor
from TWLight.users.helpers.editor_data import (
editor_global_userinfo,
editor_valid,
editor_enough_edits,
editor_not_blocked,
editor_bundle_eligible,
editor_account_old_enough,
)
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Updates editor info and Bundle eligibility for currently-eligible Editors."
def add_arguments(self, parser):
"""
Adds command arguments.
"""
parser.add_argument(
"--datetime",
action="store",
help="ISO datetime used for calculating eligibility. Defaults to now. Currently only used for backdating command runs in tests.",
)
parser.add_argument(
"--global_userinfo",
action="store",
help="Specify Wikipedia global_userinfo data. Defaults to fetching live data. Currently only used for faking command runs in tests.",
)
parser.add_argument(
"--timedelta_days",
action="store",
help="Number of days used to define 'recent' edits. Defaults to 30. Currently only used for faking command runs in tests.",
)
parser.add_argument(
"--wp_username",
action="store",
help="Specify a single editor to update. Other arguments and filters still apply.",
)
def handle(self, *args, **options):
"""
Updates editor info and Bundle eligibility for currently-eligible Editors.
Parameters
----------
args
options
Returns
-------
None
"""
# Default behavior is to use current datetime for timestamps to check all editors.
now_or_datetime = now()
datetime_override = None
timedelta_days = 0
wp_username = None
editors = Editor.objects.all()
# This may be overridden so that values may be treated as if they were valid for an arbitrary datetime.
# This is also passed to the model method.
if options["datetime"]:
datetime_override = now_or_datetime.fromisoformat(options["datetime"])
now_or_datetime = datetime_override
# These are used to limit the set of editors updated by the command.
# Nothing is passed to the model method.
if options["timedelta_days"]:
timedelta_days = int(options["timedelta_days"])
# Get editors that haven't been updated in the specified time range, with an option to limit on wp_username.
if timedelta_days:
editors = editors.exclude(
editorlogs__timestamp__gt=now_or_datetime
- timedelta(days=timedelta_days),
)
# Optional wp_username filter.
if options["wp_username"]:
editors = editors.filter(wp_username=str(options["wp_username"]))
# Iterator reduces memory footprint for large querysets
for editor in editors.iterator():
# T296853: avoid stale editor data while looping through big sets.
editor.refresh_from_db()
# `global_userinfo` data may be overridden.
if options["global_userinfo"]:
global_userinfo = options["global_userinfo"]
editor.check_sub(global_userinfo["id"])
# Default behavior is to fetch live `global_userinfo`
else:
global_userinfo = editor_global_userinfo(editor.wp_sub)
if global_userinfo:
editor.update_editcount(global_userinfo["editcount"], datetime_override)
# Determine editor validity.
editor.wp_enough_edits = editor_enough_edits(editor.wp_editcount)
editor.wp_not_blocked = editor_not_blocked(global_userinfo["merged"])
# We will only check if the account is old enough if the value is False
# Accounts that are already old enough will never cease to be old enough
if not editor.wp_account_old_enough:
editor.wp_account_old_enough = editor_account_old_enough(
editor.wp_registered
)
editor.wp_valid = editor_valid(
editor.wp_enough_edits,
editor.wp_account_old_enough,
# editor.wp_not_blocked can only be rechecked on login, so we're going with the existing value.
editor.wp_not_blocked,
editor.ignore_wp_blocks,
)
# Determine Bundle eligibility.
editor.wp_bundle_eligible = editor_bundle_eligible(editor)
# Save editor.
editor.save()
# Prune EditorLogs, with daily_prune_range set to only check the previous day to improve performance.
editor.prune_editcount(
current_datetime=datetime_override, daily_prune_range=2
)
# Update bundle authorizations.
editor.update_bundle_authorization()
| WikipediaLibrary/TWLight | TWLight/users/management/commands/user_update_eligibility.py | Python | mit | 5,266 |
# -*- coding: utf-8 -*-
import os
import shutil
from jinja2 import Environment, FileSystemLoader
from webassets import Environment as AssetsEnvironment
from webassets.ext.jinja2 import AssetsExtension
from webassets.loaders import YAMLLoader
class TemplateBuilder(object):
def __init__(self, path, output,
static_path='static', static_url='static',
asset_config='config.yml'):
self.path = path
self.output = output
self.output_path = os.path.join(path, output)
self.env = Environment(loader=FileSystemLoader(path),
extensions=[AssetsExtension])
try:
config_path = os.path.join(self.path, asset_config)
asset_config = YAMLLoader(config_path)
self.assets_env = asset_config.load_environment()
except IOError:
self.assets_env = AssetsEnvironment()
if 'directory' not in self.assets_env.config:
self.assets_env.directory = self.output_path
if 'url' not in self.assets_env.config:
self.assets_env.url = static_url
self.assets_env.load_path = [self.path]
self.env.assets_environment = self.assets_env
def build_template(self, template, context={}):
tmpl = self.env.get_template(template)
dump_path = os.path.join(self.output_path, template)
tmpl.stream().dump(dump_path)
def list_files(self):
templates, other = set(), set()
if getattr(self.assets_env, '_named_bundles', None):
bundles = [fp for name, bundle in self.assets_env._named_bundles.iteritems()
for fp in bundle.contents]
else:
bundles = []
for dirpath, dirnames, filenames in os.walk(self.path):
for filename in filenames:
filepath = os.path.join(dirpath, filename) \
[len(self.path):].strip(os.path.sep).replace(os.path.sep, '/')
if filepath[:2] == './':
filepath = filepath[2:]
if self.output in filepath or filepath in bundles:
continue
elif '.html' in filepath:
templates.add(filepath)
else:
other.add(filepath)
return sorted(templates), sorted(bundles), sorted(other)
class SiteBuilder(object):
def __init__(self, path, output='public', tmpl_builder_class=TemplateBuilder, **kwargs):
self.path = path
self.output_path = os.path.join(path, output)
self.tmpl_builder = tmpl_builder_class(self.path, output, **kwargs)
def build(self):
if not os.path.exists(self.output_path):
os.mkdir(self.output_path)
templates, bundles, others = self.tmpl_builder.list_files()
for template in templates:
# XXX: for now we are not handling contexts
self.tmpl_builder.build_template(template)
for other in others:
dirname = os.path.join(self.output_path, os.path.dirname(other))
if not os.path.exists(dirname):
os.makedirs(dirname)
shutil.copyfile(os.path.join(self.path, other), os.path.join(self.output_path, other))
| regadas/presstatic | presstatic/builder.py | Python | mit | 3,269 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import string
from time import strftime
def append_new_end(word,user_end):
space_count = word.count('~') # count number of placeholders
if space_count > 0:
total_word = word[:-space_count] + user_end # supplied from raw input
else:
total_word = word
return total_word
def create_updated_array(text_complete,text_new,number_sentences):
sentence = 0
while sentence < number_sentences:
word = 0
print
print text_new[sentence]
print
while word < len(text_new[sentence]):
user_end = raw_input(text_new[sentence][word].encode('utf-8') + ' ')
total_word = append_new_end(text_new[sentence][word],user_end)
total_word.encode('utf-8')
text_complete[sentence].append(total_word)
word += 1
sentence += 1
return text_complete
def print_output(text_complete,text_orig,number_sentences):
sentence = 0
while sentence < number_sentences:
contained = [x for x in text_complete[sentence] if x not in text_orig[sentence]]
print
print "Original Sentence: " ,
write_output(strftime("%Y-%m-%d %H:%M:%S"))
write_output('\n')
write_output("Original Sentence: ")
write_output('\n')
for each in text_orig[sentence]:
print each,
write_output(each.encode('utf-8') + ' '),
print
write_output('\n')
print
write_output("User Completed Sentence: ")
write_output('\n')
print "User completed text: " ,
for each in text_complete[sentence]:
print each,
write_output(each.encode('utf-8') + ' '),
print
print
write_output('\n')
write_output("User Mistakes: ")
write_output('\n')
print "User Mistakes: "
for each in contained:
print each
write_output(each.encode('utf-8') + ' '),
print
print
sentence += 1
def write_output(input_text):
with open('output.txt', 'a') as f:
f.write(input_text)
| kekeller/german_endings | analyze_data/user_input.py | Python | mit | 1,854 |
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/Platform.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """
env = Environment()
Platform('cygwin')(env)
print "'%s'" % env['PROGSUFFIX']
assert env['SHELL'] == 'sh'
Platform('os2')(env)
print "'%s'" % env['PROGSUFFIX']
env.Platform('posix')
print "'%s'" % env['PROGSUFFIX']
Platform('win32')(env)
print "'%s'" % env['PROGSUFFIX']
SConscript('SConscript')
""")
test.write('SConscript', """
env = Environment()
Platform('cygwin')(env)
print "'%s'" % env['LIBSUFFIX']
Platform('os2')(env)
print "'%s'" % env['LIBSUFFIX']
env.Platform('posix')
print "'%s'" % env['LIBSUFFIX']
Platform('win32')(env)
print "'%s'" % env['LIBSUFFIX']
""")
expect = test.wrap_stdout(read_str = """'.exe'
'.exe'
''
'.exe'
'.a'
'.lib'
'.a'
'.lib'
""", build_str = "scons: `.' is up to date.\n")
test.run(arguments = ".", stdout = expect)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| EmanueleCannizzaro/scons | test/Platform.py | Python | mit | 2,185 |
"""Run Monte Carlo simulations."""
from joblib import Parallel, delayed
from frbpoppy import Survey, CosmicPopulation, SurveyPopulation, pprint
from datetime import datetime
from copy import deepcopy
from glob import glob
import frbpoppy.paths
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
import uuid
POP_SIZE = 5e7
class SimulationOverview:
"""Given values, return uid
Load from file, or make."""
def __init__(self, load_csv=True):
p = frbpoppy.paths.populations()
self.filename = f'{p}mc/simluation_overview.csv'
if load_csv and os.path.isfile(self.filename):
self.load()
else:
self.df = pd.DataFrame()
def load(self):
self.df = pd.read_csv(self.filename, index_col=0)
self.df = self.df.loc[:, ~self.df.columns.str.contains('^Unnamed')]
def save(self):
self.df.to_csv(self.filename)
def append(self, df):
self.df = self.df.append(df, ignore_index=True)
def map_surveys(self, ix, names):
mapping = dict(zip(ix, names))
self.df.replace({"survey": mapping}, inplace=True)
class MonteCarlo:
def __init__(self, pop_size=1e2, load_csv=True):
self.survey_names = ['parkes-htru',
'chime-frb',
'askap-incoh',
'wsrt-apertif']
self.load_csv = load_csv
self.pop_size = pop_size
self.survey_ix = [i for i in range(len(self.survey_names))]
self.surveys = self.set_up_surveys()
self.so = SimulationOverview(load_csv=self.load_csv)
self.set_up_dirs()
def set_up_surveys(self):
"""Set up surveys."""
surveys = []
for name in self.survey_names:
survey = Survey(name=name)
survey.set_beam(model='airy', n_sidelobes=1)
if name in ('chime-frb', 'wsrt-apertif', 'parkes-htru'):
survey.set_beam(model=name)
surveys.append(survey)
return surveys
def set_up_dirs(self, run=np.nan):
"""Create subdirectory for saving populations.
Returns True if directory had to be set up."""
f = f'{frbpoppy.paths.populations()}mc/'
if not os.path.isdir(f):
os.mkdir(f)
return True
if not np.isnan(run):
f = f'{frbpoppy.paths.populations()}mc/run_{run}/'
if not os.path.isdir(f):
os.mkdir(f)
return True
return False
def gen_par_set_1(self,
parallel=True,
lum_min=np.nan,
lum_max=np.nan,
w_mean=np.nan,
w_std=np.nan,
dm_igm_slope=np.nan,
dm_host=np.nan,
run=0):
alphas = np.linspace(-2.5, -1, 11)
sis = np.linspace(-2, 2, 11)
lis = np.linspace(-2, 0, 11)
# Put all options into a dataframe
if 'run' in self.so.df:
self.so.df = self.so.df[self.so.df.run != run]
opt = np.meshgrid(alphas, sis, lis, self.survey_ix)
options = np.array(opt).T.reshape(-1, 4)
df = pd.DataFrame(options, columns=('alpha', 'si', 'li', 'survey'))
df['run'] = run
df['par_set'] = 1
df['uuid'] = [uuid.uuid4() for _ in range(len(df.index))]
df['date'] = datetime.today()
self.so.append(df)
self.so.map_surveys(self.survey_ix, self.survey_names)
self.so.save()
# Remove previous par_set of the same number
if not self.set_up_dirs(run=run):
fs = f'{frbpoppy.paths.populations()}mc/run_{run}/*'
for f in glob(fs):
os.remove(f)
def iter_alpha(i):
alpha = alphas[i]
pop = CosmicPopulation.complex(self.pop_size)
pop.set_dist(model='vol_co', z_max=1.0, alpha=alpha)
pop.set_lum(model='constant', value=1)
if not np.isnan(w_mean):
pop.set_w(model='lognormal', mean=w_mean, std=w_std)
if not np.isnan(dm_igm_slope):
pop.set_dm_igm(model='ioka', slope=dm_igm_slope)
pop.set_dm_host(model='constant', value=dm_host)
pop.generate()
for si in sis:
pop.set_si(model='constant', value=si)
pop.gen_si()
for li in lis:
pop.set_lum(model='powerlaw',
low=1e40,
high=1e45, power=li)
if not np.isnan(lum_min):
pop.set_lum(model='powerlaw', low=lum_min,
high=lum_max, index=li)
pop.gen_lum()
for survey in self.surveys:
surv_pop = SurveyPopulation(pop, survey)
# Get unique identifier
mask = (self.so.df.par_set == 1)
mask &= (self.so.df.run == run)
mask &= (self.so.df.alpha == alpha)
mask &= (self.so.df.si == si)
mask &= (self.so.df.li == li)
mask &= (self.so.df.survey == survey.name)
uuid = self.so.df[mask].uuid.iloc[0]
surv_pop.name = f'mc/run_{run}/{uuid}'
surv_pop.save()
if parallel:
n_cpu = min([3, os.cpu_count() - 1])
pprint(f'{os.cpu_count()} CPUs available')
r = range(len(alphas))
Parallel(n_jobs=n_cpu)(delayed(iter_alpha)(i) for i in tqdm(r))
else:
[iter_alpha(i) for i in tqdm(range(len(alphas)))]
def gen_par_set_2(self,
parallel=True,
alpha=-1.5,
si=0,
w_mean=np.nan,
w_std=np.nan,
dm_igm_slope=np.nan,
dm_host=np.nan,
run=np.nan):
lis = np.linspace(-1.5, 0, 11)
lum_mins = 10**np.linspace(38, 46, 11)
lum_maxs = 10**np.linspace(38, 46, 11)
# Put all options into a dataframe
self.so.df = self.so.df[self.so.df.run != run]
opt = np.meshgrid(lis, lum_mins, lum_maxs, self.survey_ix)
options = np.array(opt).T.reshape(-1, 4)
cols = ('li', 'lum_min', 'lum_max', 'survey')
df = pd.DataFrame(options, columns=cols)
df['par_set'] = 2
df['run'] = run
df['uuid'] = [uuid.uuid4() for _ in range(len(df.index))]
df['date'] = datetime.today()
df = df[~(df.lum_max < df.lum_min)]
self.so.append(df)
self.so.map_surveys(self.survey_ix, self.survey_names)
self.so.save()
# Remove previous par_set of the same number
if not self.set_up_dirs(run=run):
fs = f'{frbpoppy.paths.populations()}mc/run_{run}/*'
for f in glob(fs):
os.remove(f)
pop = CosmicPopulation.complex(self.pop_size)
if not np.isnan(alpha):
pop.set_dist(model='vol_co', z_max=1.0, alpha=alpha)
pop.set_si(model='constant', value=si)
pop.set_lum(model='constant', value=1)
if not np.isnan(w_mean):
pop.set_w(model='lognormal', mean=w_mean, std=w_std)
if not np.isnan(dm_igm_slope):
pop.set_dm_igm(model='ioka', slope=dm_igm_slope)
pop.set_dm_host(model='constant', value=dm_host)
pop.generate()
def adapt_pop(e):
li, lum_min, lum_max = e
if lum_max < lum_min:
return
t_pop = deepcopy(pop)
t_pop.set_lum(model='powerlaw', low=lum_min, high=lum_max,
power=li)
t_pop.gen_lum()
for survey in self.surveys:
surv_pop = SurveyPopulation(t_pop, survey)
# Get unique identifier
mask = (self.so.df.par_set == 2)
mask &= (self.so.df.run == run)
mask &= (self.so.df.li == li)
mask &= (self.so.df.lum_min == lum_min)
mask &= (self.so.df.lum_max == lum_max)
mask &= (self.so.df.survey == survey.name)
uuid = self.so.df[mask].uuid.iloc[0]
surv_pop.name = f'mc/run_{run}/{uuid}'
surv_pop.save()
n_cpu = min([3, os.cpu_count() - 1])
pprint(f'{os.cpu_count()} CPUs available')
mg = np.meshgrid(lis, lum_mins, lum_maxs)
loop = np.array(mg).T.reshape(-1, 3)
if parallel:
Parallel(n_jobs=n_cpu)(delayed(adapt_pop)(e) for e in tqdm(loop))
else:
[adapt_pop(e) for e in tqdm(loop)]
def gen_par_set_3(self,
parallel=True,
alpha=-1.5,
si=0,
li=-1,
lum_min=1e40,
lum_max=1e40,
dm_igm_slope=np.nan,
dm_host=np.nan,
run=np.nan):
w_means = 10**np.linspace(-3, 1, 11)
w_stds = np.linspace(0, 3, 11)
# Put all options into a dataframe
self.so.df = self.so.df[self.so.df.run != run]
opt = np.meshgrid(w_means, w_stds, self.survey_ix)
options = np.array(opt).T.reshape(-1, 3)
cols = ('w_mean', 'w_std', 'survey')
df = pd.DataFrame(options, columns=cols)
df['run'] = run
df['par_set'] = 3
df['uuid'] = [uuid.uuid4() for _ in range(len(df.index))]
df['date'] = datetime.today()
self.so.append(df)
self.so.map_surveys(self.survey_ix, self.survey_names)
self.so.save()
# Remove previous par_set of the same number
if not self.set_up_dirs(run=run):
fs = f'{frbpoppy.paths.populations()}mc/run_{run}/*'
for f in glob(fs):
os.remove(f)
pop = CosmicPopulation.complex(self.pop_size)
if not np.isnan(alpha):
pop.set_dist(model='vol_co', z_max=1.0, alpha=alpha)
pop.set_si(model='constant', value=si)
if not np.isnan(lum_min):
pop.set_lum(model='powerlaw', low=lum_min, high=lum_max, index=li)
if not np.isnan(dm_igm_slope):
pop.set_dm_igm(model='ioka', slope=dm_igm_slope)
pop.set_dm_host(model='constant', value=dm_host)
pop.generate()
def adapt_pop(e):
w_mean, w_std = e
t_pop = deepcopy(pop)
t_pop.set_w(model='lognormal', mean=w_mean, std=w_std)
t_pop.gen_w()
for survey in self.surveys:
surv_pop = SurveyPopulation(t_pop, survey)
# Get unique identifier
mask = (self.so.df.par_set == 3)
mask &= (self.so.df.run == run)
mask &= (self.so.df.run == run)
mask &= (self.so.df.w_mean == w_mean)
mask &= (self.so.df.w_std == w_std)
mask &= (self.so.df.survey == survey.name)
uuid = self.so.df[mask].uuid.iloc[0]
surv_pop.name = f'mc/run_{run}/{uuid}'
surv_pop.save()
n_cpu = min([3, os.cpu_count() - 1])
pprint(f'{os.cpu_count()} CPUs available')
mg = np.meshgrid(w_means, w_stds)
loop = np.array(mg).T.reshape(-1, 2)
if parallel:
Parallel(n_jobs=n_cpu)(delayed(adapt_pop)(e) for e in tqdm(loop))
else:
[adapt_pop(e) for e in tqdm(loop)]
def gen_par_set_4(self,
parallel=True,
alpha=-1.5,
si=0,
li=-1,
lum_min=1e40,
lum_max=1e40,
w_mean=np.nan,
w_std=np.nan,
run=np.nan):
dm_igm_slopes = np.linspace(800, 1200, 11)
dm_hosts = np.linspace(0, 500, 11)
# Put all options into a dataframe
self.so.df = self.so.df[self.so.df.run != run]
opt = np.meshgrid(dm_igm_slopes, dm_hosts, self.survey_ix)
options = np.array(opt).T.reshape(-1, 3)
cols = ('dm_igm_slope', 'dm_host', 'survey')
df = pd.DataFrame(options, columns=cols)
df['run'] = run
df['par_set'] = 4
df['uuid'] = [uuid.uuid4() for _ in range(len(df.index))]
df['date'] = datetime.today()
self.so.append(df)
self.so.map_surveys(self.survey_ix, self.survey_names)
self.so.save()
# Remove previous par_set of the same number
if not self.set_up_dirs(run=run):
fs = f'{frbpoppy.paths.populations()}mc/run_{run}/*'
for f in glob(fs):
os.remove(f)
pop = CosmicPopulation.complex(self.pop_size)
if not np.isnan(alpha):
pop.set_dist(model='vol_co', z_max=1.0, alpha=alpha)
pop.set_si(model='constant', value=si)
if not np.isnan(lum_min):
pop.set_lum(model='powerlaw', low=lum_min, high=lum_max, index=li)
if not np.isnan(w_mean):
pop.set_w(model='lognormal', mean=w_mean, std=w_std)
pop.generate()
def adapt_pop(e):
dm_igm_slope, dm_host = e
t_pop = deepcopy(pop)
t_pop.set_dm_igm(model='ioka', slope=dm_igm_slope)
t_pop.gen_dm_igm()
t_pop.set_dm_host(model='constant', value=dm_host)
t_pop.gen_dm_host()
t_pop.frbs.dm = t_pop.frbs.dm_mw + t_pop.frbs.dm_igm
t_pop.frbs.dm += t_pop.frbs.dm_host
for survey in self.surveys:
surv_pop = SurveyPopulation(t_pop, survey)
# Get unique identifier
mask = (self.so.df.par_set == 4)
mask &= (self.so.df.run == run)
mask &= (self.so.df.dm_igm_slope == dm_igm_slope)
mask &= (self.so.df.dm_host == dm_host)
mask &= (self.so.df.survey == survey.name)
uuid = self.so.df[mask].uuid.iloc[0]
surv_pop.name = f'mc/run_{run}/{uuid}'
surv_pop.save()
n_cpu = min([4, os.cpu_count() - 1])
pprint(f'{os.cpu_count()} CPUs available')
mg = np.meshgrid(dm_igm_slopes, dm_hosts)
loop = np.array(mg).T.reshape(-1, 2)
if parallel:
Parallel(n_jobs=n_cpu)(delayed(adapt_pop)(e) for e in tqdm(loop))
else:
[adapt_pop(e) for e in tqdm(loop)]
| davidgardenier/frbpoppy | tests/monte_carlo/simulations.py | Python | mit | 14,711 |
try: from setuptools import setup
except: from distutils.core import setup
setup( long_description=open("README.rst").read(),
name="""tinypath""",
license="""MIT""",
author="""Karim Bahgat""",
author_email="""[email protected]""",
py_modules=['tinypath'],
url="""http://github.com/karimbahgat/tinypath""",
version="""0.1.1""",
keywords="""paths files folders organizing""",
classifiers=['License :: OSI Approved', 'Programming Language :: Python', 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'Intended Audience :: End Users/Desktop'],
description="""Tinypath is a tiny object-oriented file path module that provides only the most crucial and commonly needed functionality, making it easy to learn and efficient to use.""",
)
| karimbahgat/TinyPath | setup.py | Python | mit | 816 |
from south.db import db
from django.db import models
from askmeanything.models import *
class Migration:
def forwards(self, orm):
"Write your forwards migration here"
def backwards(self, orm):
"Write your backwards migration here"
models = {
'askmeanything.poll': {
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polls'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'open': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'askmeanything.response': {
'answer': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'poll': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'responses'", 'to': "orm['askmeanything.Poll']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['askmeanything']
| MidwestCommunications/django-askmeanything | askmeanything/migrations/0004_rmpub.py | Python | mit | 4,171 |
import os
from shutil import copyfile
from photomanip.metadata import ImageExif, SetExifTool
from nose import tools
ORIGINAL_IMAGE_FILENAME = 'photomanip/tests/turd_ferguson.jpeg'
TEST_IMAGE_FILENAME = 'photomanip/tests/image_exif_test.jpg'
ORIGINAL_PHOTO_FILENAME = 'photomanip/tests/test_photo_0.jpg'
TEST_PHOTO_01_FILENAME = 'photomanip/tests/image_exposure_test_01.jpg'
TEST_PHOTO_02_FILENAME = 'photomanip/tests/image_exposure_test_02.jpg'
class TestImageExif:
@classmethod
def setup_class(cls):
cls.image_exif = ImageExif()
copyfile(ORIGINAL_IMAGE_FILENAME, TEST_IMAGE_FILENAME)
copyfile(ORIGINAL_PHOTO_FILENAME, TEST_PHOTO_01_FILENAME)
copyfile(ORIGINAL_PHOTO_FILENAME, TEST_PHOTO_02_FILENAME)
@classmethod
def teardown_class(cls):
os.remove(TEST_IMAGE_FILENAME)
os.remove(TEST_PHOTO_01_FILENAME)
os.remove(TEST_PHOTO_02_FILENAME)
def get_stored_tags(self, tag_list, filename):
with SetExifTool() as et:
stored_tags = et.get_tags(tag_list, filename)
return stored_tags
def test_imageexif_generate_tag_list(self):
get_list = self.image_exif.get_list
# test get list
tag_list = self.image_exif._generate_tag_list(get_list)
tools.eq_(set(tag_list), set([
'EXIF:DateTimeOriginal',
'File:ImageHeight',
'IPTC:Keywords',
'EXIF:ExposureTime',
'File:ImageWidth']))
# test set list
tag_list = self.image_exif._generate_tag_list(get_list, True)
tools.eq_(tag_list, {
'date_created': 'EXIF:DateTimeOriginal={}',
'exposure_time': 'EXIF:ExposureTime={}',
'image_height': 'File:ImageHeight={}',
'image_width': 'File:ImageWidth={}',
'keywords': 'IPTC:Keywords={}'})
def test_set_image_metadata(self):
output_meta = {
"name": "Terd Ferguson",
"keywords": "one, two, three",
"caption": "suck it, trebeck",
}
result = self.image_exif.set_image_metadata(TEST_IMAGE_FILENAME,
output_meta)
tools.eq_(result, '1 image files updated\n')
check_tags = self.image_exif._generate_tag_list(output_meta.keys())
stored_tags = self.get_stored_tags(check_tags, TEST_IMAGE_FILENAME)
# now check if the metadata matches
for key, val in output_meta.items():
mapped_key = self.image_exif.metadata_map[key]
tools.eq_(val, stored_tags[mapped_key])
def test_calculate_exposure_time(self):
tag_list = self.image_exif._generate_tag_list(['exposure_time'])
stored_tags = self.get_stored_tags(tag_list, TEST_PHOTO_01_FILENAME)
tools.eq_(stored_tags['EXIF:ExposureTime'], 0.001333333333)
def test_get_tags_containing(self):
tag_list = self.image_exif._generate_tag_list(['keywords'])
stored_tags = self.get_stored_tags(tag_list, TEST_PHOTO_01_FILENAME)
result = self.image_exif.get_tags_containing(
stored_tags['IPTC:Keywords'], 'faceit365')
tools.eq_(result, 'faceit365:date=20190308')
def test_get_metadata_batch(self):
fname_list = [TEST_PHOTO_01_FILENAME, TEST_PHOTO_02_FILENAME]
meta_list = self.image_exif.get_metadata_batch(fname_list)
meta_list[0].pop('SourceFile')
meta_list[1].pop('SourceFile')
tools.eq_(meta_list[0], meta_list[1])
| whlteXbread/photoManip | photomanip/tests/test_metadata.py | Python | mit | 3,503 |
__author__ = 'jhala'
import types
import os.path, time
import json
import logging
import logging.config
logging.config.fileConfig('logging.conf')
logger = logging.getLogger(__name__)
import re
appInfo='appinfo.json'
''' Helper Functions '''
''' get the file as an array of arrays ( header + rows and columns) '''
def fileInfo(fil):
fileArr=[]
for i in open(fil):
fileArr.append(i.strip().split(","))
return fileArr
''' Return the header as an array '''
def getHeader(fileArr):
for rowOne in fileArr:
return rowOne
def fileLastTouchedTime(fileName):
mtim= int(os.path.getmtime(fileName))
ctim= int(os.path.getctime(fileName))
tims = [ mtim, ctim]
tims.sort()
return tims[len(tims)-1]
def getImageLocation():
f=open(appInfo,'r')
loc=json.load(f)
return loc['imageLocation']
def getImageDataLocation():
f=open(appInfo,'r')
loc=json.load(f)
return loc['imageData']
def getMatLabFeatureExtractScript():
f=open(appInfo,'r')
loc=json.load(f)
return loc['matlabFeatureExtractScript']
def getMatLabSemanticElementsScript():
f=open(appInfo,'r')
loc=json.load(f)
return loc['matlabSemanticElementsScript']
def getMatlabSemanticElementsOutputFile():
f=open(appInfo,'r')
loc=json.load(f)
return loc['matlabSemanticElementsOutputFile']
def removeMatlabSemanticElementsOutputFile():
f=getMatlabSemanticElementsOutputFile()
if os.path.exists(f) and os.path.isfile(f):
os.remove(f)
def getMatlabFeatureOutputFile():
f=open(appInfo,'r')
loc=json.load(f)
return loc['matlabFeatureOutputFile']
def getTestImageName():
f=open(appInfo,'r')
loc=json.load(f)
return loc['testImage']
def removeMatlabFeatureOutputFile():
f=getMatlabFeatureOutputFile()
if os.path.exists(f) and os.path.isfile(f):
os.remove(f)
def checkFileNameExists(filName=str):
return os.path.exists(filName) and os.path.isfile(filName)
def getMainImageFileList():
fileList=[]
epoch=time.mktime(time.strptime('1970','%Y'))
for root, dirs, files in os.walk(getImageLocation()):
#print root
#print dirs
for fil in files:
thisFileName=os.path.join(root, fil)
dataFileExists=False
imageFileNewerThanDataFile=False
dataFileRequiresUpdate=False
if isMainImageFile(thisFileName) and checkFileNameExists(thisFileName):
mainImageLastTouched=fileLastTouchedTime(thisFileName)
expectedDataFileName = os.path.join(getImageDataLocation(), os.path.basename(root)+'_'+fil+'.json')
if checkFileNameExists(expectedDataFileName ):
dataFileExists=True
dataFileLastTouched=fileLastTouchedTime(expectedDataFileName)
else:
dataFileExists=False
dataFileLastTouched=epoch
if dataFileExists and ( mainImageLastTouched > dataFileLastTouched) :
dataFileRequiresUpdate=True
if not dataFileExists:
dataFileRequiresUpdate=True
lcImageExists=False
lcImageName = getLCImageName(thisFileName)
if lcImageName != None:
lcImageExists=True
fileList.append({ 'lcImageExists': lcImageExists , 'lcImageName' : lcImageName, 'dataFileRequiresUpdate' : dataFileRequiresUpdate, 'imageFile' : str(thisFileName), 'dataFile' : expectedDataFileName, 'imageLastTouched': mainImageLastTouched, 'dataLastTouched': dataFileLastTouched, 'dataFileExists' : dataFileExists} )
return fileList
def isMainImageFile(fileName):
if re.search('.jpg$',fileName, flags=re.IGNORECASE) and not re.search('LC.jpg$',fileName, flags=re.IGNORECASE):
return True
else:
return False
def getLCImageName(imageFileName):
r=re.match("(.*)(.jpg)", imageFileName, flags=re.IGNORECASE)
if not r:
logger.error("Invalid image file name given" + imageFileName)
return None
else:
lcImageName = r.group(1) + "LC"+ r.group(2)
if checkFileNameExists(lcImageName):
return lcImageName
else:
logger.error('Image file does not exist: ' +lcImageName)
return None
| dbk138/ImageRegionRecognition-FrontEnd | app/python - Copy/Helpers.py | Python | mit | 4,339 |
from django.contrib.sites.models import Site
from django.utils._os import safe_join
from django.views.generic import TemplateView
from skin.conf import settings
from skin.template.loaders.util import get_site_skin
class TemplateSkinView(TemplateView):
"""
A view that extends Djangos base TemplateView to allow you to set up skins.
"""
skin_name = None
skin_path = None
def get_skin_name(self):
if self.skin_name is None:
return settings.SKIN_NAME
else:
return self.skin_name
def get_skin(self):
return get_site_skin(site=Site.objects.get_current(), name=self.get_skin_name())
def get_skin_path(self):
if self.skin_path is not None:
return self.skin_path
skin = self.get_skin()
if skin is not None:
return skin.path
else:
return None
def get_template_names(self):
template_names = super(TemplateSkinView, self).get_template_names()
skin_path = self.get_skin_path()
skin_template_names = []
if skin_path is not None:
for template_name in template_names:
skin_template_names.append(safe_join(skin_path, template_name))
return skin_template_names + template_names | dwatkinsweb/django-skin | skin/views/views.py | Python | mit | 1,288 |
from direct.directnotify.DirectNotifyGlobal import *
from otp.ai.AIBaseGlobal import *
from toontown.building import DistributedBuildingAI
from toontown.building import GagshopBuildingAI
from toontown.building import HQBuildingAI
from toontown.building import KartShopBuildingAI
from toontown.building import PetshopBuildingAI
from toontown.hood import ZoneUtil
# from toontown.building import DistributedAnimBuildingAI
class DistributedBuildingMgrAI:
notify = directNotify.newCategory('DistributedBuildingMgrAI')
def __init__(self, air, branchId, dnaStore, trophyMgr):
self.air = air
self.branchId = branchId
self.canonicalBranchId = ZoneUtil.getCanonicalZoneId(self.branchId)
self.dnaStore = dnaStore
self.trophyMgr = trophyMgr
self.__buildings = {}
self.findAllLandmarkBuildings()
def cleanup(self):
for building in self.__buildings.values():
building.cleanup()
self.__buildings = {}
def isValidBlockNumber(self, blockNumber):
return blockNumber in self.__buildings
def isSuitBlock(self, blockNumber):
if not self.isValidBlockNumber(blockNumber):
return False
return self.__buildings[blockNumber].isSuitBlock()
def getSuitBlocks(self):
blocks = []
for blockNumber, building in self.__buildings.items():
if building.isSuitBlock():
blocks.append(blockNumber)
return blocks
def getEstablishedSuitBlocks(self):
blocks = []
for blockNumber, building in self.__buildings.items():
if building.isEstablishedSuitBlock():
blocks.append(blockNumber)
return blocks
def getToonBlocks(self):
blocks = []
for blockNumber, building in self.__buildings.items():
if isinstance(building, HQBuildingAI.HQBuildingAI):
continue
if isinstance(building, GagshopBuildingAI.GagshopBuildingAI):
continue
if isinstance(building, PetshopBuildingAI.PetshopBuildingAI):
continue
if isinstance(building, KartShopBuildingAI.KartShopBuildingAI):
continue
if not building.isSuitBlock():
blocks.append(blockNumber)
return blocks
def getBuildings(self):
return self.__buildings.values()
def getFrontDoorPoint(self, blockNumber):
if self.isValidBlockNumber(blockNumber):
return self.__buildings[blockNumber].getFrontDoorPoint()
def getBuildingTrack(self, blockNumber):
if self.isValidBlockNumber(blockNumber):
return self.__buildings[blockNumber].track
def getBuilding(self, blockNumber):
if self.isValidBlockNumber(blockNumber):
return self.__buildings[blockNumber]
def setFrontDoorPoint(self, blockNumber, point):
if self.isValidBlockNumber(blockNumber):
return self.__buildings[blockNumber].setFrontDoorPoint(point)
def getDNABlockLists(self):
blocks = []
hqBlocks = []
gagshopBlocks = []
petshopBlocks = []
kartshopBlocks = []
animBldgBlocks = []
for i in xrange(self.dnaStore.getNumBlockNumbers()):
blockNumber = self.dnaStore.getBlockNumberAt(i)
buildingType = self.dnaStore.getBlockBuildingType(blockNumber)
if buildingType == 'hq':
hqBlocks.append(blockNumber)
elif buildingType == 'gagshop':
gagshopBlocks.append(blockNumber)
elif buildingType == 'petshop':
petshopBlocks.append(blockNumber)
elif buildingType == 'kartshop':
kartshopBlocks.append(blockNumber)
elif buildingType == 'animbldg':
animBldgBlocks.append(blockNumber)
else:
blocks.append(blockNumber)
return (blocks, hqBlocks, gagshopBlocks, petshopBlocks, kartshopBlocks,
animBldgBlocks)
def findAllLandmarkBuildings(self):
backups = simbase.backups.load('blockinfo', (self.air.districtId, self.branchId), default={})
(blocks, hqBlocks, gagshopBlocks, petshopBlocks, kartshopBlocks,
animBldgBlocks) = self.getDNABlockLists()
for blockNumber in blocks:
self.newBuilding(blockNumber, backup=backups.get(blockNumber, None))
for blockNumber in animBldgBlocks:
self.newAnimBuilding(blockNumber, backup=backups.get(blockNumber, None))
for blockNumber in hqBlocks:
self.newHQBuilding(blockNumber)
for blockNumber in gagshopBlocks:
self.newGagshopBuilding(blockNumber)
for block in petshopBlocks:
self.newPetshopBuilding(block)
for block in kartshopBlocks:
self.newKartShopBuilding(block)
def newBuilding(self, blockNumber, backup=None):
building = DistributedBuildingAI.DistributedBuildingAI(
self.air, blockNumber, self.branchId, self.trophyMgr)
building.generateWithRequired(self.branchId)
if backup is not None:
state = backup.get('state', 'toon')
if ((state == 'suit') and simbase.air.wantCogbuildings) or (
(state == 'cogdo') and simbase.air.wantCogdominiums):
building.track = backup.get('track', 'c')
building.difficulty = backup.get('difficulty', 1)
building.numFloors = backup.get('numFloors', 1)
building.updateSavedBy(backup.get('savedBy'))
building.becameSuitTime = backup.get('becameSuitTime', time.mktime(time.gmtime()))
if (state == 'suit') and simbase.air.wantCogbuildings:
building.setState('suit')
elif (state == 'cogdo') and simbase.air.wantCogdominiums:
building.setState('cogdo')
else:
building.setState('toon')
else:
building.setState('toon')
else:
building.setState('toon')
self.__buildings[blockNumber] = building
return building
def newAnimBuilding(self, blockNumber, backup=None):
return self.newBuilding(blockNumber, backup=backup)
def newHQBuilding(self, blockNumber):
dnaStore = self.air.dnaStoreMap[self.canonicalBranchId]
exteriorZoneId = dnaStore.getZoneFromBlockNumber(blockNumber)
exteriorZoneId = ZoneUtil.getTrueZoneId(exteriorZoneId, self.branchId)
interiorZoneId = (self.branchId - (self.branchId%100)) + 500 + blockNumber
building = HQBuildingAI.HQBuildingAI(
self.air, exteriorZoneId, interiorZoneId, blockNumber)
self.__buildings[blockNumber] = building
return building
def newGagshopBuilding(self, blockNumber):
dnaStore = self.air.dnaStoreMap[self.canonicalBranchId]
exteriorZoneId = dnaStore.getZoneFromBlockNumber(blockNumber)
exteriorZoneId = ZoneUtil.getTrueZoneId(exteriorZoneId, self.branchId)
interiorZoneId = (self.branchId - (self.branchId%100)) + 500 + blockNumber
building = GagshopBuildingAI.GagshopBuildingAI(
self.air, exteriorZoneId, interiorZoneId, blockNumber)
self.__buildings[blockNumber] = building
return building
def newPetshopBuilding(self, blockNumber):
dnaStore = self.air.dnaStoreMap[self.canonicalBranchId]
exteriorZoneId = dnaStore.getZoneFromBlockNumber(blockNumber)
exteriorZoneId = ZoneUtil.getTrueZoneId(exteriorZoneId, self.branchId)
interiorZoneId = (self.branchId - (self.branchId%100)) + 500 + blockNumber
building = PetshopBuildingAI.PetshopBuildingAI(
self.air, exteriorZoneId, interiorZoneId, blockNumber)
self.__buildings[blockNumber] = building
return building
def newKartShopBuilding(self, blockNumber):
dnaStore = self.air.dnaStoreMap[self.canonicalBranchId]
exteriorZoneId = dnaStore.getZoneFromBlockNumber(blockNumber)
exteriorZoneId = ZoneUtil.getTrueZoneId(exteriorZoneId, self.branchId)
interiorZoneId = (self.branchId - (self.branchId%100)) + 500 + blockNumber
building = KartShopBuildingAI.KartShopBuildingAI(
self.air, exteriorZoneId, interiorZoneId, blockNumber)
self.__buildings[blockNumber] = building
return building
def save(self):
buildings = {}
for blockNumber in self.getSuitBlocks():
building = self.getBuilding(blockNumber)
backup = {
'state': building.fsm.getCurrentState().getName(),
'block': building.block,
'track': building.track,
'difficulty': building.difficulty,
'numFloors': building.numFloors,
'savedBy': building.savedBy,
'becameSuitTime': building.becameSuitTime
}
buildings[blockNumber] = backup
simbase.backups.save('blockinfo', (self.air.districtId, self.branchId), buildings)
| ToonTownInfiniteRepo/ToontownInfinite | toontown/building/DistributedBuildingMgrAI.py | Python | mit | 9,120 |
#!/usr/bin/python3
__author__ = 'ivan.shynkarenka'
import argparse
from TTWebClient.TickTraderWebClient import TickTraderWebClient
def main():
parser = argparse.ArgumentParser(description='TickTrader Web API sample')
parser.add_argument('web_api_address', help='TickTrader Web API address')
args = parser.parse_args()
# Create instance of the TickTrader Web API client
client = TickTraderWebClient(args.web_api_address)
# Public currencies
currencies = client.get_public_all_currencies()
for c in currencies:
print('Currency: {0}'.format(c['Name']))
currency = client.get_public_currency(currencies[0]['Name'])
print("{0} currency precision: {1}".format(currency[0]['Name'], currency[0]['Precision']))
if __name__ == '__main__':
main() | SoftFx/TTWebClient-Python | TTWebClientSample/public_currencies.py | Python | mit | 796 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-17 17:14
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import spectator.core.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
("spectator_core", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="Publication",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"time_created",
models.DateTimeField(
auto_now_add=True,
help_text="The time this item was created in the database.",
),
),
(
"time_modified",
models.DateTimeField(
auto_now=True,
help_text="The time this item was last saved to the database.",
),
),
(
"title",
models.CharField(
help_text="e.g. 'Aurora' or 'Vol. 39 No. 4, 16 February 2017'.",
max_length=255,
),
),
(
"title_sort",
spectator.core.fields.NaturalSortField(
"title",
db_index=True,
default="",
editable=False,
help_text="e.g. 'clockwork orange, a' or 'world cities, the'.",
max_length=255,
),
),
(
"kind",
models.CharField(
choices=[("book", "Book"), ("periodical", "Periodical")],
default="book",
max_length=20,
),
),
(
"official_url",
models.URLField(
blank=True,
help_text="Official URL for this book/issue.",
max_length=255,
verbose_name="Official URL",
),
),
(
"isbn_uk",
models.CharField(
blank=True,
help_text="e.g. '0356500489'.",
max_length=20,
verbose_name="UK ISBN",
),
),
(
"isbn_us",
models.CharField(
blank=True,
help_text="e.g. '0316098094'.",
max_length=20,
verbose_name="US ISBN",
),
),
(
"notes_url",
models.URLField(
blank=True,
help_text="URL of your notes/review.",
max_length=255,
verbose_name="Notes URL",
),
),
],
options={"ordering": ("title_sort",)},
),
migrations.CreateModel(
name="PublicationRole",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"time_created",
models.DateTimeField(
auto_now_add=True,
help_text="The time this item was created in the database.",
),
),
(
"time_modified",
models.DateTimeField(
auto_now=True,
help_text="The time this item was last saved to the database.",
),
),
(
"role_name",
models.CharField(
blank=True,
help_text="e.g. 'Headliner', 'Support', 'Editor', 'Illustrator', 'Director', etc. Optional.", # noqa: E501
max_length=50,
),
),
(
"role_order",
models.PositiveSmallIntegerField(
default=1,
help_text="The order in which the Creators will be listed.",
),
),
(
"creator",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="publication_roles",
to="spectator_core.Creator",
),
),
(
"publication",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="roles",
to="spectator_reading.Publication",
),
),
],
options={"ordering": ("role_order", "role_name"), "abstract": False},
),
migrations.CreateModel(
name="PublicationSeries",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"time_created",
models.DateTimeField(
auto_now_add=True,
help_text="The time this item was created in the database.",
),
),
(
"time_modified",
models.DateTimeField(
auto_now=True,
help_text="The time this item was last saved to the database.",
),
),
(
"title",
models.CharField(
help_text="e.g. 'The London Review of Books'.", max_length=255
),
),
(
"title_sort",
spectator.core.fields.NaturalSortField(
"title",
db_index=True,
default="",
editable=False,
help_text="e.g. 'london review of books, the'.",
max_length=255,
),
),
(
"url",
models.URLField(
blank=True,
help_text="e.g. 'https://www.lrb.co.uk/'.",
max_length=255,
verbose_name="URL",
),
),
],
options={
"verbose_name_plural": "Publication series",
"ordering": ("title_sort",),
},
),
migrations.CreateModel(
name="Reading",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"time_created",
models.DateTimeField(
auto_now_add=True,
help_text="The time this item was created in the database.",
),
),
(
"time_modified",
models.DateTimeField(
auto_now=True,
help_text="The time this item was last saved to the database.",
),
),
("start_date", models.DateField(blank=True, null=True)),
(
"start_granularity",
models.PositiveSmallIntegerField(
choices=[(3, "Y-m-d"), (4, "Y-m"), (6, "Y")], default=3
),
),
("end_date", models.DateField(blank=True, null=True)),
(
"end_granularity",
models.PositiveSmallIntegerField(
choices=[(3, "Y-m-d"), (4, "Y-m"), (6, "Y")], default=3
),
),
(
"is_finished",
models.BooleanField(
default=False, help_text="Did you finish the publication?"
),
),
(
"publication",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="spectator_reading.Publication",
),
),
],
options={"abstract": False},
),
migrations.AddField(
model_name="publication",
name="creators",
field=models.ManyToManyField(
related_name="publications",
through="spectator_reading.PublicationRole",
to="spectator_core.Creator",
),
),
migrations.AddField(
model_name="publication",
name="series",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="spectator_reading.PublicationSeries",
),
),
]
| philgyford/django-spectator | spectator/reading/migrations/0001_initial.py | Python | mit | 10,589 |
# - Coding UTF8 -
#
# Networked Decision Making
# Site: http://code.google.com/p/global-decision-making-system/
#
# License Code: GPL, General Public License v. 2.0
# License Content: Creative Commons Attribution 3.0
#
# Also visit: www.web2py.com
# or Groups: http://groups.google.com/group/web2py
# For details on the web framework used for this development
#
# Developed by Russ King ([email protected]
# Russ also blogs occasionally at proudofyourplanent.blogspot.com
# His general thinking on why this project is very important is available at
# http://www.scribd.com/doc/98216626/New-Global-Strategy
#
# This file contains settings for auth policy which are need before setup
# of rest of configuration so staying here for now
#
#########################################################################
from gluon.storage import Storage
settings = Storage()
#Settings for user logon - lets just uncomment as needed for now - not clear if there is much scope to
#allow changes and python social auth will hopefully be added I don't think dual login worked with google but
#lets setup again and see
#Plan for this for now is that netdecisionmaking will use web2py and Janrain while
#globaldecisionmaking will use google - for some reason Janrain doesn't seem
#to come up with google as a login and google login does not support dual methods
#reason for which has not been investigated
#settings.logon_methods = 'web2py'
#settings.logon_methods = 'google'
#settings.logon_methods = 'janrain'
settings.logon_methods = 'web2pyandjanrain'
settings.verification = False
settings.approval = False
| NewGlobalStrategy/NetDecisionMaking | models/0.py | Python | mit | 1,624 |
#! /usr/bin/python
import bottle
import settings
from controller import admin as admin_controller
from controller import email as email_controller
app = application = bottle.Bottle()
# Base url for regular users
app.route(settings.BASEPATH, 'GET', admin_controller.index)
app.route(settings.BASEPATH + '/', 'GET', admin_controller.index)
app.route(
settings.BASEPATH + '/tasks/<id>',
'GET',
admin_controller.read_user_tasks
)
app.route(
settings.BASEPATH + '/update/<id>',
'POST',
admin_controller.update_self
)
# Email handler
email = bottle.Bottle()
app.mount(settings.EMAIL_PATH, email)
email.route('/', 'POST', email_controller.receive_email)
email.route('/', 'GET', email_controller.test_form)
email.route('', 'GET', email_controller.test_form)
# Ansible admin
admin = bottle.Bottle()
app.mount(settings.ADMIN_PATH, admin)
admin.route('/tasks', 'GET', admin_controller.read_tasks)
admin.route('/create', 'POST', admin_controller.create_person)
admin.route('/delete', 'POST', admin_controller.delete_people)
admin.route('/<id>', 'GET', admin_controller.read_person)
admin.route('/<id>', 'POST', admin_controller.update_person)
admin.route('/', 'GET', admin_controller.admin)
# Static files
app.route(
settings.STATIC_PATH + '/<type>/<filename>',
'GET',
lambda **kwargs: bottle.static_file(
filename=kwargs['filename'], root='static/' + kwargs['type']
)
)
if __name__ == '__main__':
bottle.run(app=app, reloader=True, **settings.SERVER)
| TeachBoost/ansible | ansible.py | Python | mit | 1,499 |
import vk
import json
from sentiment_classifiers import SentimentClassifier, binary_dict, files
class VkFeatureProvider(object):
def __init__(self):
self._vk_api = vk.API(vk.Session())
self._vk_delay = 0.3
self._clf = SentimentClassifier(files['binary_goods'], binary_dict)
def _vk_grace(self):
import time
time.sleep(self._vk_delay)
def get_news(self, sources, amount=10):
# entry for Alex Anlysis tool
result = []
for source in sources:
try:
data = self._vk_api.wall.get(domain=source, count=amount, extended=1, fields='name')
self._vk_grace()
except:
return {}
news = []
for node in data['wall'][1:]:
try:
if node['post_type'] != 'post':
continue
text = node['text']
#print('{}'.format(text.encode('utf-8')))
rate = self._clf.predict_text(text)[0]
news.append({'text' : '{}'.format(text.encode('utf-8')), 'rate' : rate})
except Exception as e:
print('Exception: {}'.format(e))
result.append({'source': data['groups'][0]['name'], 'news': news})
#return json.dumps(result)
return result
# NOTE: the completely other feature, very usefull personally for me
def friends_intersect(self, uid_list):
result = None
try:
result = set(self._vk_api.friends.get(user_id=uid_list[0]))
self._vk_grace()
except:
pass
for i, uid in enumerate(uid_list[1:]):
try:
tmp = set(self._vk_api.friends.get(user_id=uid))
self._vk_grace()
except:
continue
if result is not None:
result = result.intersection(tmp)
else:
result = tmp
return result
def get_user_info(self, entry_uid, fname=None, lname=None):
try:
friend_list = self._vk_api.friends.get(user_id=entry_uid, fields='personal', name_case='nom')
self._vk_grace()
except:
return []
return [x for x in friend_list
if (not fname or fname in x['first_name']) and (not lname or lname in x['last_name'])]
def get_uid_set_info(self, uid_set):
result = []
for friend_uid in uid_set:
try:
friend = self._vk_api.users.get(user_id=friend_uid, fields='sex,personal', name_case='nom')
self._vk_grace()
except:
continue
result.append(friend)
return result
if __name__ == '__main__':
provider = VkFeatureProvider()
res = provider.get_news(['scientific.american'], 5)
print(res)
| ArtemMIPT/sentiment_analysis | vk_parser.py | Python | mit | 2,902 |
# flake8: noqa
"""Settings to be used for running tests."""
from settings import *
INSTALLED_APPS.append('integration_tests')
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
}
EMAIL_SUBJECT_PREFIX = '[test] '
EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
SOUTH_TESTS_MIGRATE = False
| bitmazk/webfaction-django-boilerplate | website/webapps/django/project/settings/test_settings.py | Python | mit | 371 |
import sublime, sublime_plugin, re
class EmmetCssFromOneLineCommand(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
line_region = view.line(view.sel()[0])
line_str = view.substr(line_region)
left_padding = re.findall(r'^(\s+)', line_str)[0]
# find commands in line
props_array = re.findall(r'([a-zA-Z0-9:!;().,?/\-+#]+)', line_str)
# Delete long string
view.replace(edit, line_region, '')
def runEmmet():
view.run_command("expand_abbreviation_by_tab")
# Processing first element
view.insert(edit, view.sel()[0].end(), left_padding + props_array[0])
runEmmet()
i = 1
while i < len(props_array):
view.insert(edit, view.sel()[0].end(), '\n' + left_padding + props_array[i])
runEmmet()
i += 1
| kakRostropovich/EmmetOneLine | emmet_css_from_one_line.py | Python | mit | 908 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import Error
from ._models_py3 import Key
from ._models_py3 import KeyListResult
from ._models_py3 import KeyValue
from ._models_py3 import KeyValueListResult
from ._models_py3 import Label
from ._models_py3 import LabelListResult
except (SyntaxError, ImportError):
from ._models import Error # type: ignore
from ._models import Key # type: ignore
from ._models import KeyListResult # type: ignore
from ._models import KeyValue # type: ignore
from ._models import KeyValueListResult # type: ignore
from ._models import Label # type: ignore
from ._models import LabelListResult # type: ignore
from ._azure_app_configuration_enums import (
Enum4,
Enum5,
Get6ItemsItem,
Get7ItemsItem,
Head6ItemsItem,
Head7ItemsItem,
)
__all__ = [
'Error',
'Key',
'KeyListResult',
'KeyValue',
'KeyValueListResult',
'Label',
'LabelListResult',
'Enum4',
'Enum5',
'Get6ItemsItem',
'Get7ItemsItem',
'Head6ItemsItem',
'Head7ItemsItem',
]
| Azure/azure-sdk-for-python | sdk/appconfiguration/azure-appconfiguration/azure/appconfiguration/_generated/models/__init__.py | Python | mit | 1,545 |
# -*- coding: utf-8 -*-
"""
https://mitpress.mit.edu/sicp/full-text/book/book-Z-H-16.html#%_thm_2.60
"""
from Chapter2.themes.lisp_list_structured_data import car, cdr, cons, lisp_list, nil, print_lisp_list
from Chapter2.themes.sequences_as_conventional_interfaces import accumulate
def element_of_set(x, set):
"""Tests if x is element of set with a representation of sets that allows duplicates"""
if set is nil:
return False
if x == car(set):
return True
return element_of_set(x, cdr(set))
def adjoin_set(x, set):
"""Adds x to set"""
return cons(x, set)
def union_set(set1, set2):
"""Computes union of set1 and set2"""
return accumulate(adjoin_set, set2, set1)
def intersection_set(set1, set2):
"""Computes intersection of set1 and set2"""
if set1 is nil or set2 is nil:
return nil
if element_of_set(car(set1), set2):
return cons(car(set1), intersection_set(cdr(set1), set2))
return intersection_set(cdr(set1), set2)
def run_the_magic():
s1 = lisp_list(2, 3, 2, 1, 3, 2, 2)
s2 = lisp_list(1, 1, 3)
s3 = lisp_list(1, 2, 3)
print(element_of_set(3, s1))
print_lisp_list(adjoin_set(4, s1))
print_lisp_list(intersection_set(s1, s2))
print_lisp_list(union_set(s1, s2))
from timeit import Timer
t1_element_of = Timer(stmt='element_of_set(3, %(s1)s)' % locals(),
setup='from Chapter2.exercise2_60 import element_of_set')
t2_element_of = Timer(stmt='element_of_set(3, %(s1)s)' % locals(),
setup='from Chapter2.sets_as_unordered_lists import element_of_set')
t1_adjoin = Timer(stmt='adjoin_set(4, %(s1)s)' % locals(), setup='from Chapter2.exercise2_60 import adjoin_set')
t2_adjoin = Timer(stmt='adjoin_set(4, %(s3)s)' % locals(),
setup='from Chapter2.sets_as_unordered_lists import adjoin_set')
t1_intersection = Timer(stmt='intersection_set(%(s1)s, %(s2)s)' % locals(),
setup='from Chapter2.exercise2_60 import intersection_set')
t2_intersection = Timer(stmt='intersection_set(%(s1)s, %(s3)s)' % locals(),
setup='from Chapter2.sets_as_unordered_lists import intersection_set')
t1_union = Timer(stmt='union_set(%(s1)s, %(s2)s)' % locals(),
setup='from Chapter2.exercise2_60 import union_set')
t2_union = Timer(stmt='union_set(%(s1)s, %(s2)s)' % locals(),
setup='from Chapter2.exercise2_59 import union_set')
header = '-----------Timing for *%s* operation'
def do_timing(timer1, timer2, op_name):
print(header % op_name)
t1 = timer1.timeit()
t2 = timer2.timeit()
print('-> With duplicate: %s' % t1)
print('-> Without duplicate: %s' % t2)
do_timing(t1_element_of, t2_element_of, 'element_of_set')
do_timing(t1_adjoin, t2_adjoin, 'adjoin_set')
do_timing(t2_intersection, t2_intersection, 'intersection_set')
do_timing(t1_union, t2_union, 'union_set')
print('The representation using unordered list with duplicates is better suited for applications where there are '
'many insertions in the data structure')
if __name__ == "__main__":
run_the_magic()
| aoyono/sicpy | Chapter2/exercises/exercise2_60.py | Python | mit | 3,259 |
""" You've recently read "The Gold-Bug" by Edgar Allan Poe, and was so impressed by the cryptogram in it that
decided to try and decipher an encrypted text yourself. You asked your friend to encode a piece of text using
a substitution cipher, and now have an encryptedText that you'd like to decipher.
The encryption process in the story you read involves frequency analysis: it is known that letter 'e' is the
most frequent one in the English language, so it's pretty safe to assume that the most common character in the
encryptedText stands for 'e'. To begin with, implement a function that will find the most frequent character
in the given encryptedText.
Example
For encryptedText = "$~NmiNmim$/NVeirp@dlzrCCCCfFfQQQ", the output should be
frequencyAnalysis(encryptedText) = 'C'.
Letter 'C' appears in the text more than any other character (4 times), which is why it is the answer.
""
from collections import Counter # "Counter" is what CodeFights asks for
def frequencyAnalysis(encryptedText):
return max(Counter(encryptedText), key=Counter(encryptedText).get) # CodeFights asks to change this line only
| ntthuy11/CodeFights | Arcade/04_Python/07_CaravanOfCollections/frequencyAnalysis.py | Python | mit | 1,131 |
# Requires trailing commas in Py2 but syntax error in Py3k
def True(
foo
):
True(
foo
)
def False(
foo
):
False(
foo
)
def None(
foo
):
None(
foo
)
def nonlocal (
foo
):
nonlocal(
foo
)
| zedlander/flake8-commas | test/data/keyword_before_parenth_form/py2_bad.py | Python | mit | 273 |
import tensorflow as tf
from tensorflow.contrib import slim as slim
from avb.ops import *
import math
def encoder(x, config, is_training=True):
df_dim = config['df_dim']
z_dim = config['z_dim']
a_dim = config['iaf_a_dim']
# Center x at 0
x = 2*x - 1
net = flatten_spatial(x)
net = slim.fully_connected(net, 300, activation_fn=tf.nn.softplus, scope="fc_0")
net = slim.fully_connected(net, 300, activation_fn=tf.nn.softplus, scope="fc_1")
zmean = slim.fully_connected(net, z_dim, activation_fn=None)
log_zstd = slim.fully_connected(net, z_dim, activation_fn=None)
a = slim.fully_connected(net, a_dim, activation_fn=None)
return zmean, log_zstd, a
| LMescheder/AdversarialVariationalBayes | avb/iaf/models/full0.py | Python | mit | 698 |
# -*- coding: UTF-8 -*-
import sys
sys.path.append("./")
import pandas as pd
import gensim
from utility.mongodb import MongoDBManager
from utility.sentence import segment, sent2vec
class Doc2Vector(object):
"""
文本转向量
"""
def __init__(self):
"""
:param keep_val: 设定的阈值
"""
self.mongo_db = MongoDBManager()
def doc2vect(self):
"""
所有文档转成向量存储到数据库
:return:
"""
model = gensim.models.Doc2Vec.load('./models/doc2vec_v1.model')
df_data = pd.read_excel("./data/new_prd.xlsx", names=["SysNo", "Title", "Content"])
content = []
title = []
for idx, row in df_data.iterrows():
seg_title = segment(row.Title)
seg_content = segment(row.Content)
# 转向量
content_vect = sent2vec(model, ' '.join(seg_content))
title_vect = sent2vec(model, ' '.join(seg_title))
content_vect = map(str, content_vect.tolist())
title_vect = map(str, title_vect.tolist())
content.append({"_id": int(idx) + 1, "data": list(content_vect)})
title.append({"_id": int(idx) + 1, "data": list(title_vect)})
self.mongo_db.insert("content_vector", content)
self.mongo_db.insert("title_vector", title)
print("finished")
if __name__ == '__main__':
doc2vect = Doc2Vector()
doc2vect.doc2vect()
| jarvisqi/learn_python | flaskweb/core/doc2vector.py | Python | mit | 1,476 |
from __future__ import unicode_literals
from future.builtins import int
from collections import defaultdict
from django.core.urlresolvers import reverse
from django.template.defaultfilters import linebreaksbr, urlize
from mezzanine import template
from mezzanine.conf import settings
from mezzanine.generic.forms import ThreadedCommentForm
from mezzanine.generic.models import ThreadedComment
from mezzanine.utils.importing import import_dotted_path
from mezzanine.pages.models import Page, RichTextPage
register = template.Library()
@register.assignment_tag
def allpages():
page_fields = [ 'content', 'created', 'description', 'expiry_date', 'gen_description', u'id', 'keywords', u'keywords_string', 'publish_date', 'short_url', 'slug', 'status', 'title', 'titles', 'updated']
output = []
# import pdb;pdb.set_trace()
AllPages = RichTextPage.objects.all()
for item in AllPages:
temp = {}
for fld in page_fields:
temp[fld] = getattr(item, fld)
output.append(temp)
return {
'pages': output
}
@register.filter()
def remove_slash(value):
return '#' + value[1:-1]
@register.filter()
def lower(value):
# import pdb;pdb.set_trace()
return value.lower() | aptuz/mezzanine_onepage_theme | one_page/customapp/templatetags/testtag.py | Python | mit | 1,240 |
import _plotly_utils.basevalidators
class ComputedValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="computed", parent_name="layout", **kwargs):
super(ComputedValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/layout/_computed.py | Python | mit | 395 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" sysdiag
Pierre Haessig — September 2013
"""
from __future__ import division, print_function
def _create_name(name_list, base):
'''Returns a name (str) built on `base` that doesn't exist in `name_list`.
Useful for automatic creation of subsystems or wires
'''
base = str(base).strip()
if base == '':
# avoid having '' as name (although it would not break the code...)
raise ValueError('base name should not be empty!')
if base not in name_list:
return base
# Else: build another name by counting
i = 0
name = base + str(i)
while name in name_list:
i += 1
name = base + str(i)
return name
class System(object):
'''Diagram description of a system
a System is either an interconnecion of subsystems
or an atomic element (a leaf of the tree)
'''
def __init__(self, name='root', parent=None):
self.name = name
# Parent system, if any (None for top-level):
self.parent = None
# Children systems, if any (None for leaf-level):
self.subsystems = []
self.wires = []
self.ports = []
self.params = {}
# If a parent system is provided, request its addition as a subsystem
if parent is not None:
parent.add_subsystem(self)
#end __init__()
def is_empty(self):
'''True if the System contains no subsystems and no wires'''
return (not self.subsystems) and (not self.wires)
@property
def ports_dict(self):
'''dict of ports, which keys are the names of the ports'''
return {p.name:p for p in self.ports}
@property
def subsystems_dict(self):
'''dict of subsystems, which keys are the names of the systems'''
return {s.name:s for s in self.subsystems}
def add_port(self, port, created_by_system = False):
'''add a Port to the System'''
if port in self.ports:
raise ValueError('port already added!')
# extract the port's name
name = port.name
port_names = [p.name for p in self.ports]
if name in port_names:
raise ValueError("port name '{}' already exists in {:s}!".format(
name, repr(self))
)
# Add parent relationship and add to the ports dict:
port.system = self
port._created_by_system = bool(created_by_system)
self.ports.append(port)
def del_port(self, port):
'''delete a Port of the System (and disconnect any connected wire)
'''
if (port.wire is not None) or (port.internal_wire is not None):
# TODO : implement the wire disconnection
raise NotImplementedError('Cannot yet delete a connected Port')
# Remove the ports list:
self.ports.remove(port)
def add_subsystem(self, subsys):
# 1) Check name uniqueness
name = subsys.name
subsys_names = [s.name for s in self.subsystems]
if name in subsys_names:
raise ValueError("system name '{}' already exists in {:s}!".format(
name, repr(self))
)
# 2) Add parent relationship and add to the system list
subsys.parent = self
self.subsystems.append(subsys)
def add_wire(self, wire):
# 1) Check name uniqueness
name = wire.name
wire_names = [w.name for w in self.wires]
if name in wire_names:
raise ValueError("wire name '{}' already exists in {:s}!".format(
name, repr(self))
)
# Add parent relationship and add to the ports dict:
wire.parent = self
self.wires.append(wire)
def create_name(self, category, base):
'''Returns a name (str) built on `base` that doesn't exist in
within the names of `category`.
'''
if category == 'subsystem':
components = self.subsystems
elif category == 'wire':
components = self.wires
else:
raise ValueError("Unknown category '{}'!".format(str(category)))
name_list = [c.name for c in components]
return _create_name(name_list, base)
def __repr__(self):
cls_name = self.__class__.__name__
s = "{:s}('{.name}')".format(cls_name, self)
return s
def __str__(self):
s = repr(self)
if self.parent:
s += '\n Parent: {:s}'.format(repr(self.parent))
if self.params:
s += '\n Parameters: {:s}'.format(str(self.params))
if self.ports:
s += '\n Ports: {:s}'.format(str(self.ports))
if self.subsystems:
s += '\n Subsytems: {:s}'.format(str(self.subsystems))
return s
def __eq__(self, other):
'''Systems compare equal if their class, `name` and `params` are equal.
and also their lists of ports and wires are *similar*
(see `_is_similar` methods of Port and Wire)
and finally their subsystems recursively compare equal.
parent systems are not compared (would generate infinite recursion).
'''
if not isinstance(other, System):
return NotImplemented
# Basic similarity
basic_sim = self.__class__ == other.__class__ and \
self.name == other.name and \
self.params == other.params
if not basic_sim:
return False
# Port similarity: (sensitive to the order)
ports_sim = all(p1._is_similar(p2) for (p1,p2)
in zip(self.ports, other.ports))
if not ports_sim:
return False
# Wires similarity
wires_sim = all(w1._is_similar(w2) for (w1,w2)
in zip(self.wires, other.wires))
if not wires_sim:
return False
print('equality at level {} is true'.format(self.name))
# Since everything matches, compare subsystems:
return self.subsystems == other.subsystems
# end __eq__()
def __ne__(self,other):
return not (self==other)
def _to_json(self):
'''convert the System instance to a JSON-serializable object
System is serialized with list of ports, subsystems and wires
but without connectivity information (e.g. no parent information)
ports created at the initialization of the system ("default ports")
are not serialized.
'''
# Filter out ports created at the initialization of the system
ports_list = [p for p in self.ports if not p._created_by_system]
cls_name = self.__module__ +'.'+ self.__class__.__name__
return {'__sysdiagclass__': 'System',
'__class__': cls_name,
'name':self.name,
'subsystems':self.subsystems,
'wires':self.wires,
'ports':ports_list,
'params':self.params
}
# end _to_json
def json_dump(self, output=None, indent=2, sort_keys=True):
'''dump (e.g. save) the System structure in json format
if `output` is None: return a json string
if `output` is a writable file: write in this file
'''
import json
if output is None:
return json.dumps(self, default=to_json, indent=indent, sort_keys=sort_keys)
else:
json.dump(self, output, default=to_json, indent=indent, sort_keys=sort_keys)
return
# end json_dump
class Port(object):
'''Port enables the connection of a System to a Wire
Each port has a `type` which only allows the connection of a Wire
of the same type.
it also have a `direction` ('none', 'in', 'out') that is set
at the class level
private attribute `_created_by_system` tells whether the port was created
automatically by the system's class at initialization or by a custom code
(if True, the port is not serialized by its system).
'''
direction = 'none'
def __init__(self, name, ptype):
self.name = name
self.type = ptype
self.system = None
self.wire = None
self.internal_wire = None
self._created_by_system = False
def __repr__(self):
cls_name = self.__class__.__name__
s = '{:s}({:s}, {:s})'.format(cls_name, repr(self.name), repr(self.type))
return s
def __str__(self):
s = repr(self) + ' of ' + repr(self.system)
return s
def _is_similar(self, other):
'''Ports are *similar* if their class, `type` and `name` are equal.
(their parent system are not compared)
'''
if not isinstance(other, Port):
return NotImplemented
return self.__class__ == other.__class__ and \
self.type == other.type and \
self.name == other.name
def _to_json(self):
'''convert the Port instance to a JSON-serializable object
Ports are serialized without any connectivity information
'''
cls_name = self.__module__ +'.'+ self.__class__.__name__
return {'__sysdiagclass__': 'Port',
'__class__': cls_name,
'name':self.name,
'type':self.type
}
# end _to_json
class InputPort(Port):
'''Input Port'''
direction = 'in'
def __init__(self, name, ptype=''):
super(InputPort, self).__init__(name, ptype)
class OutputPort(Port):
'''Output Port'''
direction = 'out'
def __init__(self, name, ptype=''):
super(OutputPort, self).__init__(name, ptype)
class Wire(object):
'''Wire enables the interconnection of several Systems
through their Ports'''
def __init__(self, name, wtype, parent=None):
self.name = name
self.parent = None
self.type = wtype
self.ports = []
# If a parent system is provided, request its addition as a wire
if parent is not None:
parent.add_wire(self)
def is_connect_allowed(self, port, port_level, raise_error=False):
'''Check that a connection between Wire ̀ self` and a Port `port` is allowed.
Parameters
----------
`port`: the Port instance to connect to
`port_level`: whether `port` belongs to a 'sibling' (usual case) or a
'parent' system (to enable connections to the upper level)
`raise_error`: if True, raising an error replaces returning False
Returns
-------
allowed: True or False
'''
assert port_level in ['sibling', 'parent']
# Port availability (is there already a wire connected?):
if port_level == 'sibling':
connected_wire = port.wire
elif port_level == 'parent':
connected_wire = port.internal_wire
if connected_wire is not None:
if raise_error:
raise ValueError('port is already connected to '+\
'{:s}!'.format(repr(connected_wire)))
else:
return False
# Check parent relationship:
if port_level == 'sibling':
# Check that the wire and port.system are siblings:
if self.parent is not port.system.parent:
if raise_error:
raise ValueError('wire and port.system should have a common parent!')
else:
return False
elif port_level == 'parent':
# Check that the port.system is the parent of the wire:
if self.parent is not port.system:
if raise_error:
raise ValueError('port.system should be the parent of the wire!')
else:
return False
# Wire-Port Type checking:
if self.type == '':
# untyped wire: connection is always possible
return True
elif port.type == self.type:
return True
else:
# Incompatible types
if raise_error:
raise TypeError("Wire type '{:s}'".format(str(self.type)) + \
" and Port type '{:s}'".format(str(port.type)) + \
" are not compatible!")
else:
return False
def connect_port(self, port, port_level='sibling'):
'''Connect the Wire to a Port `port`'''
if port in self.ports:
return # Port is aleady connected
# Type checking:
self.is_connect_allowed(port, port_level, raise_error=True)
# Add parent relationship:
assert port_level in ['sibling', 'parent']
if port_level=='sibling':
port.wire = self
elif port_level == 'parent':
port.internal_wire = self
# Book keeping of ports:
self.ports.append(port)
@property
def ports_by_name(self):
'''triplet representation of port connections
(level, port.system.name, port.name)
(used for serialization)
'''
def port_triplet(p):
'''triplet representation (level, port.system.name, port.name)'''
if p.system is self.parent:
level = 'parent'
elif p.system.parent is self.parent:
level = 'sibling'
else:
raise ValueError('The system of Port {}'.format(repr(p)) +\
'is neither a parent nor a sibling!')
return (level, p.system.name, p.name)
return [port_triplet(p) for p in self.ports]
def connect_by_name(self, s_name, p_name, level='sibling'):
'''Connects the ports named `p_name` of system named `s_name`
to be found at level `level` ('parent' or 'sibling' (default))
'''
# TODO (?) merge the notion of level in the name (make parent a reserved name)
assert level in ['sibling', 'parent']
# 1) find the system:
if level == 'parent':
syst = self.parent
assert self.parent.name == s_name
elif level == 'sibling':
syst = self.parent.subsystems_dict[s_name]
port = syst.ports_dict[p_name]
self.connect_port(port, level)
def __repr__(self):
cls_name = self.__class__.__name__
s = '{:s}({:s}, {:s})'.format(cls_name, repr(self.name), repr(self.type))
return s
def _is_similar(self, other):
'''Wires are *similar* if their class, `type` and `name` are equal
and if their connectivity (`ports_by_name`) is the same
(their parent system are not compared)
'''
if not isinstance(other, Wire):
return NotImplemented
return self.__class__ == other.__class__ and \
self.type == other.type and \
self.name == other.name and \
self.ports_by_name == other.ports_by_name
def _to_json(self):
'''convert the Wire instance to a JSON-serializable object
Wires are serialized with the port connectivity in tuples
(but parent relationship is not serialized)
'''
cls_name = self.__module__ +'.'+ self.__class__.__name__
return {'__sysdiagclass__': 'Wire',
'__class__': cls_name,
'name': self.name,
'type': self.type,
'ports': self.ports_by_name
}
# end _to_json
class SignalWire(Wire):
'''Signal Wire for the interconnection of several Systems
through their Input and Output Ports.
Each SignalWire can be connected to a unique Output Port (signal source)
and several Input Ports (signal sinks)
'''
def __init__(self, name, wtype='', parent=None):
super(SignalWire, self).__init__(name, wtype, parent)
def is_connect_allowed(self, port, port_level, raise_error=False):
'''Check that a connection between SignalWire ̀ self` and a Port `port`
is allowed.
Parameters
----------
`port`: the Port instance to connect to
`port_level`: whether `port` belongs to a 'sibling' (usual case) or a
'parent' system (to enable connections to the upper level)
`raise_error`: if True, raising an error replaces returning False
Returns
-------
allowed: True or False
'''
if port.direction not in ['in', 'out']:
if raise_error:
raise TypeError('Only Input/Output Port can be connected!')
else:
return False
def is_output(port, level):
'''an output port is either:
* a sibling system'port with direction == 'out' or
* a parent system'port with direction == 'in'
'''
if level=='detect':
wire = self
if wire.parent == port.system:
level = 'parent'
elif wire.parent == port.system.parent:
level = 'sibling'
else:
raise ValueError('Port is neither sibling nor parent')
is_out = (level=='sibling' and port.direction == 'out') or \
(level=='parent' and port.direction == 'in')
return is_out
# Now we have an I/O Port for sure:
if is_output(port, port_level):
# check that there is not already a signal source
other_ports = [p for p in self.ports if (is_output(p, 'detect')
and p is not port)]
if other_ports:
if raise_error:
raise ValueError('Only one output port can be connected!')
else:
return False
# Now the I/O aspect is fine. Launch some further checks:
return super(SignalWire, self).is_connect_allowed(port, port_level, raise_error)
def connect_systems(source, dest, s_pname, d_pname, wire_cls=Wire):
'''Connect systems `source` to `dest` using
port names `s_pname` and `d_pname`
with a wire of instance `wire_cls` (defaults to Wire)
The wire is created if necessary
Returns: the wire used for the connection
'''
# 1) find the ports
s_port = source.ports_dict[s_pname]
d_port = dest.ports_dict[d_pname]
# 2) find a prexisting wire:
w = None
if s_port.wire is not None:
w = s_port.wire
elif d_port.wire is not None:
w = d_port.wire
else:
parent = s_port.system.parent
wname = parent.create_name('wire','W')
wtype = s_port.type
w = wire_cls(wname, wtype, parent)
# 3) Make the connection:
w.connect_port(s_port)
w.connect_port(d_port)
return w
def to_json(py_obj):
'''convert `py_obj` to JSON-serializable objects
`py_obj` should be an instance of `System`, `Wire` or `Port`
'''
if isinstance(py_obj, System):
return py_obj._to_json()
if isinstance(py_obj, Wire):
return py_obj._to_json()
if isinstance(py_obj, Port):
return py_obj._to_json()
raise TypeError(repr(py_obj) + ' is not JSON serializable')
# end to_json
import sys
def _str_to_class(mod_class):
'''retreives the class from a "module.class" string'''
mod_name, cls_name = mod_class.split('.')
mod = sys.modules[mod_name]
return getattr(mod, cls_name)
def from_json(json_object):
'''deserializes a sysdiag json object'''
if '__sysdiagclass__' in json_object:
cls = _str_to_class(json_object['__class__'])
if json_object['__sysdiagclass__'] == 'Port':
port = cls(name = json_object['name'], ptype = json_object['type'])
return port
if json_object['__sysdiagclass__'] == 'System':
# TODO: specialize the instanciation for each class using
# _from_json class methods
syst = cls(name = json_object['name'])
syst.params = json_object['params']
# add ports if any:
for p in json_object['ports']:
syst.add_port(p)
# add subsystems
for s in json_object['subsystems']:
syst.add_subsystem(s)
# add wires
for w_dict in json_object['wires']:
# 1) decode the wire:
w_cls = _str_to_class(w_dict['__class__'])
w = w_cls(name = w_dict['name'], wtype = w_dict['type'])
syst.add_wire(w)
# make the connections:
for level, s_name, p_name in w_dict['ports']:
w.connect_by_name(s_name, p_name, level)
# end for each wire
return syst
return json_object
def json_load(json_dump):
import json
syst = json.loads(json_dump, object_hook=from_json)
return syst
| pierre-haessig/sysdiag | sysdiag.py | Python | mit | 21,428 |
import _plotly_utils.basevalidators
class HoverlabelValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="hoverlabel", parent_name="sankey", **kwargs):
super(HoverlabelValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Hoverlabel"),
data_docs=kwargs.pop(
"data_docs",
"""
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for align .
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for namelength .
""",
),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/sankey/_hoverlabel.py | Python | mit | 2,055 |
import ConfigParser
import os
import sys
import utils
## Create global names and functions ##
# Load file locations and configuration options
site_list_location = os.path.dirname(__file__) + '/sitelist.txt'
parser = ConfigParser.RawConfigParser()
parser.read(os.path.dirname(__file__) + '/config.cfg')
general = dict(parser.items('general'))
gmail_account = dict(parser.items('gmail_account'))
write_error = parser.getboolean('logging', 'log_errors')
write_change = parser.getboolean('logging', 'log_changes')
write_unchanged = parser.getboolean('logging', 'log_unchanged')
receiver = parser.get('mailing', 'mail_receivers')
mail_error = parser.getboolean('mailing', 'send_errors')
mail_change = parser.getboolean('mailing', 'send_changes')
# Name of this tool
tool_name = 'Dynamic DNS Updater'
# Tracks if a logger was created
logger = False
# Tracks if a mailer was created
mailer = False
# Dictionary of error codes and their corresponding messages
error_messages = {
'invalid_login' : 'Your Gmail username or password is incorrect.',
'logger_missing' : 'Problem writing to log file.',
'read_cache' : 'Problem reading from IP cache.',
'read_sitelist' : 'Problem reading the sitelist.',
'empty_url' : 'You have not provided an update URL.',
'check_ip' : 'Problem checking your IP address.',
'update_dns' : 'Problem updating your Dynamic DNS.'
}
# Handles logging and mailing of errors, as enabled by the user
def error_processor(code):
if write_error and logger: logger.log_error(error_messages[code])
if mail_error and mailer:
mailer.send_error(receiver, error_messages[code])
print '%s: Error - %s' % (tool_name, error_messages[code])
sys.exit()
## Create instances of utility classes ##
# Only create logger object if the user has chosen to log an event
if write_error or write_change or write_unchanged:
try: logger = utils.logger.Logger(general['log_file'])
except: logger = False
# Only create mailer object if user has chosen to mail an event
if mail_error or mail_change:
try: mailer = utils.mailer.Mailer(
gmail_account['gmail_user'],
gmail_account['gmail_password'])
except: error_processor('invalid_login')
# Notify user by mail that initializing a logger has failed, if they
# enabled any logging of events
if not logger and mailer:
if write_error or write_change or write_unchanged:
error_processor('logger_missing')
try: cacher = utils.cacher.Cacher(general['ip_cache_file'])
except: error_processor('read_cache')
try: checker = utils.checker.Checker(site_list_location)
except: error_processor('read_sitelist')
try: updater = utils.updater.Updater(general['update_urls'])
except: error_processor('empty_url')
## Main ##
old_ip = cacher.get_ip()
try: current_ip = checker.get_ip()
except: error_processor('check_ip')
# If IP has not changed, exit the program
if old_ip == current_ip:
if write_unchanged:
logger.log_no_change(old_ip)
print '%s: %s remains unchanged.' % (tool_name, old_ip)
sys.exit()
try: updater.update_dns()
except: error_processor('update_dns')
cacher.store_ip(current_ip)
print '%s: %s has been updated to %s' % (tool_name, old_ip, current_ip)
if write_change: logger.log_change(old_ip, current_ip)
if mail_change and mailer:
mailer.send_change(receiver, old_ip, current_ip)
| christianrenier/dynamic-dns-updater | __main__.py | Python | mit | 3,280 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#important: before running this demo, make certain that you import the library
#'paho.mqtt.client' into python (https://pypi.python.org/pypi/paho-mqtt)
#also make certain that ATT_IOT is in the same directory as this script.
import traceback # for logging exceptions
import logging
logging.getLogger().setLevel(logging.INFO) #before doing anything else, set the desired logging level, so all modules log correctly.
from ConfigParser import *
import RPi.GPIO as GPIO #provides pin support
import ATT_IOT as IOT #provide cloud support
from time import sleep #pause the app
import picamera
import cameraStreamer
import sys
import datetime # for generating a unique file name
ConfigName = 'rpicamera.config'
hasLISIPAROI = False
LISIPAROIPin = 4
streamer = None
camera = None
PreviewId = 1 # turn on/off preview on the stream server
RecordId = 2 # turn on/off recording on disk
StreamServerId = 3 # assign the destination to stream the video to.
ToggleLISIPAROIId = 4
PictureId = 5
_isPreview = False
_isRecording = False
def tryLoadConfig():
'load the config from file'
global hasLISIPAROI, LISIPAROIPin
c = ConfigParser()
if c.read(ConfigName):
#set up the ATT internet of things platform
IOT.DeviceId = c.get('cloud', 'deviceId')
IOT.ClientId = c.get('cloud', 'clientId')
IOT.ClientKey = c.get('cloud', 'clientKey')
hasLISIPAROI = bool(c.get('camera', 'has LISIPAROI'))
logging.info("has LISIPAROI:" + str(hasLISIPAROI) )
if hasLISIPAROI:
LISIPAROIPin = int(c.get('camera', 'LISIPAROI pin'))
logging.info("LISIPAROI pin:" + str(LISIPAROIPin) )
return True
else:
return False
def setupCamera():
'create the camera responsible for recording video and streaming object responsible for sending it to the server.'
global streamer, camera
camera = picamera.PiCamera()
camera.resolution = (640, 480)
camera.framerate = 30
streamer = cameraStreamer.CameraStreamer(camera)
def setBacklight(value):
'''turn on/off the backlight
value: string ('true' or 'false')
returns: true when input was succesfully processed, otherwise false
'''
if value == "true":
GPIO.output(LISIPAROIPin, GPIO.HIGH)
elif value == "false":
GPIO.output(LISIPAROIPin, GPIO.LOW)
else:
print("unknown value: " + value)
IOT.send(value, ToggleLISIPAROIId) #provide feedback to the cloud that the operation was succesful
def setPreview(value):
if _isRecording:
print("recording not allowed during preview, shutting down recording.")
setRecord(False)
if value == "true":
_isPreview = True
streamer.start_preview()
elif value == "false":
_isPreview = False
streamer.stop_preview()
else:
print("unknown value: " + value)
IOT.send(value, PreviewId) #provide feedback to the cloud that the operation was succesful
def setRecord(value):
if _isPreview:
print("preview not allowed during recording, shutting down preview.")
setPreview(False)
if value == "true":
camera.resolution = (1920, 1080) #set to max resulotion for record
camera.start_recording('video' + datetime.date.today().strftime("%d_%b_%Y_%H_%M%_S") + '.h264')
elif value == "false":
camera.stop_recording()
camera.resolution = (640, 480) #reset resulotion for preview
else:
print("unknown value: " + value)
IOT.send(value, RecordId) #provide feedback to the cloud that the operation was succesful
def takePicture():
'take a single picture, max resoution'
prevWasPreview = _isPreview
prevWasRecording = _isRecording
if _isRecording:
print("record not allowed while taking picture.")
setRecord(False)
if not _isPreview:
print("preview required for taking picture.")
setPreview(True)
sleep(2) # if preview was not running yet, give it some time to startup
camera.capture('picture' + datetime.date.today().strftime("%d_%b_%Y_%H_%M%_S") + '.jpg')
if prevWasPreview:
print("reactivating preview.")
setPreview(True)
elif prevWasRecording:
print("reactivating record.")
setRecord(True)
#callback: handles values sent from the cloudapp to the device
def on_message(id, value):
if id.endswith(str(ToggleLISIPAROIId)) == True:
value = value.lower() #make certain that the value is in lower case, for 'True' vs 'true'
setBacklight(value)
elif id.endswith(str(PreviewId)) == True:
value = value.lower() #make certain that the value is in lower case, for 'True' vs 'true'
setPreview(value)
elif id.endswith(str(RecordId)) == True:
value = value.lower() #make certain that the value is in lower case, for 'True' vs 'true'
setRecord(value)
elif id.endswith(str(StreamServerId)) == True:
streamer.streamServerIp = value
IOT.send(value, StreamServerId) #provide feedback to the cloud that the operation was succesful
elif id.endswith(str(PictureId)) == True:
if value.lower() == "true":
takePicture()
else:
print("unknown actuator: " + id)
def setupCloud():
IOT.on_message = on_message
#make certain that the device & it's features are defined in the cloudapp
IOT.connect()
if hasLISIPAROI:
IOT.addAsset(ToggleLISIPAROIId, "LISIPAROI", "Control the light on the camera", False, "boolean")
IOT.addAsset(PreviewId, "Preview", "Show/close a preview on the monitor that is connected to the RPI", True, "boolean")
IOT.addAsset(RecordId, "Record", "Start/stop recording the video stream on sd-card", True, "boolean")
IOT.addAsset(PictureId, "Picture", "take a picture (max resoution) and store on sd-card", True, "boolean")
IOT.addAsset(StreamServerId, "Stream server", "set the ip address of the server that manages the video", True, "string")
# get any previously defined settings
streamer.streamServerIp = IOT.getAssetState(StreamServerId)
if streamer.streamServerIp:
streamer.streamServerIp = streamer.streamServerIp['state']['value']
logging.info("sending stream to: " + streamer.streamServerIp)
else:
logging.info("no stream endpoint defined")
IOT.subscribe() #starts the bi-directional communication
# set current state of the device
IOT.send("false", ToggleLISIPAROIId)
IOT.send("false", PreviewId)
IOT.send("false", RecordId)
tryLoadConfig()
setupCamera() # needs to be done before setting up the cloud, cause we will get the settings from the cloud and assign them to the camera.
setupCloud()
if hasLISIPAROI:
try:
#setup GPIO using Board numbering
#GPIO.setmode(GPIO.BCM)
GPIO.setmode(GPIO.BOARD)
#set up the pins
GPIO.setup(LISIPAROIPin, GPIO.OUT)
except:
logging.error(traceback.format_exc())
#main loop: run as long as the device is turned on
while True:
#main thread doesn't have to do much, all is handled on the thread calling the message handler (for the actuators)
sleep(5)
| ATT-JBO/RPICameraRemote | RPICamera/RPICamera/RPICameraRemote.py | Python | mit | 7,821 |
from time import sleep
import unittest2 as unittest
from tweepy.api import API
from tweepy.auth import OAuthHandler
from tweepy.models import Status
from tweepy.streaming import Stream, StreamListener
from config import create_auth
from test_utils import mock_tweet
from mock import MagicMock, patch
class MockStreamListener(StreamListener):
def __init__(self, test_case):
super(MockStreamListener, self).__init__()
self.test_case = test_case
self.status_count = 0
self.status_stop_count = 0
self.connect_cb = None
def on_connect(self):
if self.connect_cb:
self.connect_cb()
def on_timeout(self):
self.test_case.fail('timeout')
return False
def on_error(self, code):
print("response: %s" % code)
return True
def on_status(self, status):
self.status_count += 1
self.test_case.assertIsInstance(status, Status)
if self.status_stop_count == self.status_count:
return False
class TweepyStreamTests(unittest.TestCase):
def setUp(self):
self.auth = create_auth()
self.listener = MockStreamListener(self)
self.stream = Stream(self.auth, self.listener, timeout=3.0)
def tearDown(self):
self.stream.disconnect()
def test_userstream(self):
# Generate random tweet which should show up in the stream.
def on_connect():
API(self.auth).update_status(mock_tweet())
self.listener.connect_cb = on_connect
self.listener.status_stop_count = 1
self.stream.userstream()
self.assertEqual(self.listener.status_count, 1)
def test_userstream_with_params(self):
# Generate random tweet which should show up in the stream.
def on_connect():
API(self.auth).update_status(mock_tweet())
self.listener.connect_cb = on_connect
self.listener.status_stop_count = 1
self.stream.userstream(_with='user', replies='all', stall_warnings=True)
self.assertEqual(self.listener.status_count, 1)
def test_sample(self):
self.listener.status_stop_count = 10
self.stream.sample()
self.assertEquals(self.listener.status_count,
self.listener.status_stop_count)
def test_filter_track(self):
self.listener.status_stop_count = 5
phrases = ['twitter']
self.stream.filter(track=phrases)
self.assertEquals(self.listener.status_count,
self.listener.status_stop_count)
def test_track_encoding(self):
s = Stream(None, None)
s._start = lambda async: None
s.filter(track=[u'Caf\xe9'])
# Should be UTF-8 encoded
self.assertEqual(u'Caf\xe9'.encode('utf8'), s.parameters['track'])
def test_follow_encoding(self):
s = Stream(None, None)
s._start = lambda async: None
s.filter(follow=[u'Caf\xe9'])
# Should be UTF-8 encoded
self.assertEqual(u'Caf\xe9'.encode('utf8'), s.parameters['follow'])
class TweepyStreamBackoffTests(unittest.TestCase):
def setUp(self):
#bad auth causes twitter to return 401 errors
self.auth = OAuthHandler("bad-key", "bad-secret")
self.auth.set_access_token("bad-token", "bad-token-secret")
self.listener = MockStreamListener(self)
self.stream = Stream(self.auth, self.listener)
def tearDown(self):
self.stream.disconnect()
def test_exp_backoff(self):
self.stream = Stream(self.auth, self.listener, timeout=3.0,
retry_count=1, retry_time=1.0, retry_time_cap=100.0)
self.stream.sample()
# 1 retry, should be 4x the retry_time
self.assertEqual(self.stream.retry_time, 4.0)
def test_exp_backoff_cap(self):
self.stream = Stream(self.auth, self.listener, timeout=3.0,
retry_count=1, retry_time=1.0, retry_time_cap=3.0)
self.stream.sample()
# 1 retry, but 4x the retry_time exceeds the cap, so should be capped
self.assertEqual(self.stream.retry_time, 3.0)
mock_resp = MagicMock()
mock_resp.return_value.status = 420
@patch('httplib.HTTPConnection.getresponse', mock_resp)
def test_420(self):
self.stream = Stream(self.auth, self.listener, timeout=3.0, retry_count=0,
retry_time=1.0, retry_420=1.5, retry_time_cap=20.0)
self.stream.sample()
# no retries, but error 420, should be double the retry_420, not double the retry_time
self.assertEqual(self.stream.retry_time, 3.0)
| dnr2/fml-twitter | tweepy-master/tests/test_streaming.py | Python | mit | 4,635 |
import sys, math, os
import matplotlib.pyplot as plt
def main():
# Check that there's at least one argument
if len(sys.argv) < 2:
print("Usage python {} <file1> [<file2> ...]".format(sys.argv[0]))
return 1
# Automatically detect if decayed
if "decayed" in sys.argv[1]:
plotDecayed = True
else:
plotDecayed = False
# Read input file
fil = "finalGraph.in"
if os.path.isfile(fil):
with open(fil, "r") as fread:
lstyles = fread.readline().strip().split()
labs = []
for line in fread:
labs.append(line.strip())
lowZ = 27 # Lowest z value to represent
# Read "species.dat" and store all the values in lists
species = "../../data/species.dat"
atomicNum = []; atomicMass = []; namesZ = {}
with open(species, "r") as fread:
for line in fread:
lnlst = line.split()
# Correct special names
if lnlst[1] == "d" or lnlst[2] == "0":
lnlst[1] = "h"
# Now relate positions with atomic numbers, atomic masses, and names
zNum = int(lnlst[0]) - int(lnlst[2])
atomicNum.append(zNum)
atomicMass.append(int(lnlst[0]))
namesZ[lnlst[1]] = zNum
# Read all initial solar values
solar = "../../data/solarVals.dat"
solarValues = {}
with open(solar, "r") as fread:
for line in fread:
lnlst = line.split()
isotName = lnlst[0] + lnlst[2]
# Add mass fraction value per atomic number
key = namesZ[lnlst[0]]; val = float(lnlst[1])*float(lnlst[2])
solarValues[key] = solarValues.get(key, 0) + val
# Go file by file
numDens = []
for archivo in sys.argv[1:]:
# Open file for reading
dens = []
fread = open(archivo, "r")
# Each line has mass, temperature, rho, radiat
# and elements in number fraction
newline = None
for line in fread:
if "#" in line:
continue
lnlst = line.split()
if len(lnlst) == 0:
if plotDecayed:
break
else:
continue
if not plotDecayed:
# Surface (newline[0] is the mass)
prevline = newline
newline = [float(x) for x in lnlst]
if newline[0] > 0.85:
break
if plotDecayed:
dens.append(float(lnlst[1]))
# Close file
fread.close()
# Calculate values of interest
if plotDecayed:
numDens.append(dens)
else:
numDens.append([(x + y)*0.5 for (x, y) in
zip(prevline[4:], newline[4:])])
# Calculate now the agb values and print the surface mass fractions per
# each isotope
print("# Surface number fraction values")
agbValues = []
for ii in range(len(numDens)):
dic = {}
dens = numDens[ii]
# Print the model name
print("# {}".format(sys.argv[ii + 1]))
# Add the values for each element
for jj in range(len(atomicNum)):
key = atomicNum[jj]
dic[key] = dic.get(key, 0) + dens[jj]*atomicMass[jj]
# Print the number fraction
print(dens[jj])
agbValues.append(dic)
print("")
# Now identify iron:
ironNumber = namesZ["fe"]
# Now divide every element by iron
for dens in agbValues:
ironDens = dens[ironNumber]
for key in dens:
dens[key] /= ironDens
# Solar as well
ironDens = solarValues[ironNumber]
for key in solarValues:
solarValues[key] /= ironDens
# Now create the final values
finalValues = []
zList = [x for x in solarValues.keys()]
zList.sort()
for dens in agbValues:
thisDens = []
for key in zList:
if key < lowZ:
continue
val = math.log10(dens[key]/solarValues[key])
thisDens.append(val)
finalValues.append(thisDens)
# Create xaxis:
xx = [x for x in zList if x >= lowZ]
# Print final values
print("# [X/Fe] values")
for ii in range(len(sys.argv[1:])):
print("# {}".format(sys.argv[ii + 1]))
print("")
for jj in range(len(xx)):
print(xx[jj], finalValues[ii][jj])
print("")
# From zList create contIndx. This list contains a number of
# tuples with the first and last index of any contiguous sequence
indx = 1; first = 0
prevKey = None; contIndx = []
for key in xx:
if prevKey is None:
prevKey = key
continue
# Check if keys are contiguous
if key - prevKey > 1:
contIndx.append((first, indx))
first = indx
prevKey = key
indx += 1
# Add last tuple
contIndx.append((first, indx + 1))
# Begin plot
figure = plt.figure()
plt.xlabel("Atomic number Z", size = 14)
plt.ylabel("[X/Fe]", size = 14)
# Plot values
if labs is None:
labs = sys.argv[1:]
ii = 0
for dens in finalValues:
# Plot first range
first, last = contIndx[0]
if lstyles is None:
lin, = plt.plot(xx[first:last], dens[first:last],
label = labs[ii], lw = 2)
else:
lin, = plt.plot(xx[first:last], dens[first:last], lstyles[ii],
label = labs[ii], lw = 2)
# Get color and line style
col, lst = lin.get_color(), lin.get_linestyle()
colStyle = col + lst
for elem in contIndx[1:]:
first, last = elem
plt.plot(xx[first:last], dens[first:last], colStyle, lw = 2)
ii += 1
# Set floating text
namAtm = {"Co":27, "Ge":32, "Se":34, "Kr":36, "Sr":38, "Zr":40,
"Mo":42, "Pd":46, "Cd":48, "Sn":50, "Te":52, "Ba":56,
"Ce":58, "Nd":60, "Sm":62, "Gd":64, "Dy":66, "Er":68,
"Yb":70, "Hf":72, "W":74, "Os":76, "Hg":80, "Pb":82,
"Rb":37, "Cs":55}
rNamAtm = ["Rb", "Cs"]
for name in namAtm:
yVal = 0
for ii in range(len(xx)):
if xx[ii] == namAtm[name]:
yVal = finalValues[-1][ii]
break
plt.text(namAtm[name] - 0.5, yVal*1.01, name, size = 14)
if name in rNamAtm:
plt.plot(namAtm[name], yVal, "ro")
else:
plt.plot(namAtm[name], yVal, "ko")
plt.legend(loc=0, ncol = 2)
plt.text(30, 1.1, "3M$_\odot$", fontsize = 16)
plt.show()
if __name__ == "__main__":
main()
| AndresYague/Snuppat | output/figuresAndTables/finalGraph.py | Python | mit | 7,072 |
#http://codeforces.com/problemset/problem/71/A
T = int(raw_input())
while(not T == 0):
word = str(raw_input())
if len(word)>10:
print word[0]+str(len(word[1:len(word)-1]))+word[len(word)-1]
else:
print word
T-=1
| creativcoder/AlgorithmicProblems | codeforces/long_words.py | Python | mit | 227 |
# -*- coding: utf-8 -*-
# Scrapy settings for aCloudGuru project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'aCloudGuru'
SPIDER_MODULES = ['aCloudGuru.spiders']
NEWSPIDER_MODULE = 'aCloudGuru.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'aCloudGuru (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'aCloudGuru.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'aCloudGuru.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'aCloudGuru.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| miztiik/scrapy-Demos | aCloudGuru/aCloudGuru/settings.py | Python | mit | 3,160 |
import asyncio
import functools
import random
import time
from testing import Client
from testing import default_test_setup
from testing import gen_data
from testing import gen_points
from testing import gen_series
from testing import InsertError
from testing import PoolError
from testing import QueryError
from testing import run_test
from testing import Series
from testing import Server
from testing import ServerError
from testing import SiriDB
from testing import TestBase
from testing import UserAuthError
from testing import parse_args
TIME_PRECISION = 'ms'
class TestCompression(TestBase):
title = 'Test compression'
GEN_POINTS = functools.partial(
gen_points, n=100, time_precision=TIME_PRECISION)
async def _test_series(self, client):
result = await client.query('select * from "series float"')
self.assertEqual(result['series float'], self.series_float)
result = await client.query('select * from "series int"')
self.assertEqual(result['series int'], self.series_int)
result = await client.query(
'list series name, length, type, start, end')
result['series'].sort()
self.assertEqual(
result,
{'columns': ['name', 'length', 'type', 'start', 'end'],
'series': [[
'series float',
10000, 'float',
self.series_float[0][0],
self.series_float[-1][0]], [
'series int',
10000, 'integer',
self.series_int[0][0],
self.series_int[-1][0]],
]})
@default_test_setup(
1,
time_precision=TIME_PRECISION,
optimize_interval=500,
compression=True)
async def run(self):
await self.client0.connect()
self.series_float = gen_points(
tp=float, n=10000, time_precision=TIME_PRECISION, ts_gap='5m')
random.shuffle(self.series_float)
self.series_int = gen_points(
tp=int, n=10000, time_precision=TIME_PRECISION, ts_gap='5m')
random.shuffle(self.series_int)
self.assertEqual(
await self.client0.insert({
'series float': self.series_float,
'series int': self.series_int
}), {'success_msg': 'Successfully inserted 20000 point(s).'})
self.series_float.sort()
self.series_int.sort()
await self._test_series(self.client0)
await self.client0.query('drop series /.*/ set ignore_threshold true')
# Create some random series and start 25 insert task parallel
series = gen_series(n=40)
for i in range(40):
await self.client0.insert_some_series(
series,
n=0.8,
timeout=0,
points=self.GEN_POINTS)
# Check the result
await self.assertSeries(self.client0, series)
for i in range(40):
await self.client0.insert_some_series(
series,
n=0.8,
timeout=0,
points=self.GEN_POINTS)
# Check the result
await self.assertSeries(self.client0, series)
self.client0.close()
result = await self.server0.stop()
self.assertTrue(result)
await self.server0.start(sleep=20)
await self.client0.connect()
# Check the result after rebooting the server
await self.assertSeries(self.client0, series)
if __name__ == '__main__':
random.seed(1)
parse_args()
run_test(TestCompression())
| transceptor-technology/siridb-server | itest/test_compression.py | Python | mit | 3,689 |
Subsets and Splits