max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
tmp/wswp/mongo_queue.py | godontop/python | 0 | 12796651 | <reponame>godontop/python<gh_stars>0
from datetime import datetime, timedelta
from pymongo import MongoClient, errors
class MongoQueue:
# possible states of a download
OUTSTANDING, PROCESSING, COMPLETE = range(3)
def __init__(self, client=None, timeout=300):
self.client = MongoClient() if client is None else client
self.db = self.client.cache
self.timeout = timeout
def __bool__(self):
"""Returns True if there are more jobs to process
"""
record = self.db.crawl_queue.find_one({'status': {'$ne': self.COMPLETE}})
return True if record else False
def push(self, url):
"""Add new URL to queue if does not exist
"""
try:
self.db.crawl_queue.insert({'_id': url, 'status': self.OUTSTANDING})
except errors.DuplicateKeyError as e:
pass # this is already in the queue
def pop(self):
"""Get an outstanding URL from the queue and set its status to
processing. If the queue is empty a KeyError exception is raised.
"""
record = self.db.crawl_queue.find_and_modify(query={'status': self.OUTSTANDING},
update={'$set': {'status': self.PROCESSING, 'timestamp': datetime.now()}})
if record:
return record['_id']
else:
self.repair()
raise KeyError()
def complete(self, url):
self.db.crawl_queue.update({'_id': url}, {'$set': {'status': self.COMPLETE}})
def repair(self):
"""Release stalled jobs
"""
record = self.db.crawl_queue.find_and_modify(
query={'timestamp': {'$lt': datetime.now() - timedelta(seconds=self.timeout)}, 'status': {'$ne': self.COMPLETE}},
update={'$set': {'status': self.OUTSTANDING}})
if record:
print('Released:', record['_id'])
| 2.84375 | 3 |
Scripts/s4cl_tests/utils/common_function_utils_tests.py | ColonolNutty/Sims4CommunityLibrary | 118 | 12796652 | """
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
from typing import Any
from sims4communitylib.modinfo import ModInfo
from sims4communitylib.testing.common_assertion_utils import CommonAssertionUtils
from sims4communitylib.testing.common_test_service import CommonTestService
from sims4communitylib.utils.common_function_utils import CommonFunctionUtils
# noinspection PyMissingOrEmptyDocstring
@CommonTestService.test_class(ModInfo.get_identity())
class CommonFunctionUtilsTests:
@staticmethod
@CommonTestService.test(True, True, True, True)
@CommonTestService.test(True, False, True, False)
@CommonTestService.test(True, False, False, True)
@CommonTestService.test(False, False, False, False)
def run_predicates_as_one_should_work_properly(func_result_one: bool, func_result_two: bool, all_must_pass: bool, expected_result: bool):
def _function_one(*_, **__) -> Any:
return func_result_one
def _function_two(*_, **__) -> Any:
return func_result_two
result = CommonFunctionUtils.run_predicates_as_one((_function_one, _function_two), all_must_pass=all_must_pass)()
CommonAssertionUtils.are_equal(result, expected_result)
@staticmethod
@CommonTestService.test(True, False)
@CommonTestService.test(False, True)
def run_predicate_with_reversed_result_should_work_properly(func_result: bool, expected_result: bool):
def _function(*_, **__) -> Any:
return func_result
result = CommonFunctionUtils.run_predicate_with_reversed_result(_function)()
CommonAssertionUtils.are_equal(result, expected_result)
@staticmethod
@CommonTestService.test()
def run_with_arguments_should_work_properly() -> None:
_additional_value = 'No'
_additional_key_word_value = 'What'
normal_val = 'one'
normal_key_val = 'two'
def _function(normal_arg: str, value_one: str, normal_key_arg: str=None, key_value: str=None) -> Any:
CommonAssertionUtils.are_equal(value_one, _additional_value)
CommonAssertionUtils.are_equal(key_value, _additional_key_word_value)
CommonAssertionUtils.are_equal(normal_arg, normal_val)
CommonAssertionUtils.are_equal(normal_key_arg, normal_key_val)
if normal_arg == normal_val and normal_key_arg == normal_key_val and value_one == _additional_value and key_value == _additional_key_word_value:
return True
result = CommonFunctionUtils.run_with_arguments(_function, _additional_value, key_value=_additional_key_word_value)(normal_val, normal_key_arg=normal_key_val)
CommonAssertionUtils.is_true(result, message='Failed to send proper arguments: {}'.format(result))
| 1.773438 | 2 |
check_db_connection.py | dondemonz/python_training_mantis | 0 | 12796653 | <filename>check_db_connection.py
import pymysql.cursors
db = pymysql.connect(host="127.0.0.1", database="bugtracker", user="root", password="")
try:
cursor = db.cursor()
cursor.execute("select * from mantis_project_table")
for row in cursor.fetchall():
print(row)
finally:
db.close() | 2.6875 | 3 |
sockfilter/error.py | cardforcoin/sockfilter | 1 | 12796654 | __all__ = ['SockFilterError']
import collections
class SockFilterError(Exception):
Tuple = collections.namedtuple('SockFilterError', ['address'])
def __init__(self, address):
self.address = address
def __repr__(self):
return repr(self._tuple)
def __str__(self):
return str(self._tuple)
def __unicode__(self):
return unicode(self._tuple)
def __eq__(self, other):
if not hasattr(other, '_tuple'):
return False
return self._tuple == other._tuple
def __ne__(self, other):
if not hasattr(other, '_tuple'):
return False
return self._tuple != other._tuple
@property
def _tuple(self):
return self.Tuple(address=self.address)
| 2.953125 | 3 |
searching/search_in_sorter_matrix.py | maanavshah/coding-interview | 0 | 12796655 | <filename>searching/search_in_sorter_matrix.py
# O(m + n) time | O(1) space
# m is row and n is col
def searchInSortedMatrix(matrix, target):
row = 0
col = len(matrix[0]) - 1
while row < len(matrix) and col > -1:
if target == matrix[row][col]:
return [row, col]
if target > matrix[row][col]:
row += 1
else:
col -= 1
return [-1, -1]
| 3.59375 | 4 |
MC_Event_Generator_with_Vegas/event_output.py | GuojinTseng/MY_MC_Generator | 0 | 12796656 | #==========================================================#
# Process: e+e- -> Z/gamma -> mu+mu-
# Author: <NAME>
# Date: 2018.7.16
# Version: 1.0
#==========================================================#
class Event_Output(object):
def output(self, i, p1, p2, p3, p4):
with open("event.txt","a") as events:
events.write("===================="+"event "+str(i)+"====================")
events.write("\n")
events.write("pem: "+str(p1))
events.write("\n")
events.write("pep: "+str(p2))
events.write("\n")
events.write("pmm: "+str(p3))
events.write("\n")
events.write("pmp: "+str(p4))
events.write("\n")
events.write("\n")
| 2.375 | 2 |
setup.py | anna-money/aio-background | 7 | 12796657 | import re
from pathlib import Path
from setuptools import setup
install_requires = ["croniter>=1.0.1"]
def read(*parts):
return Path(__file__).resolve().parent.joinpath(*parts).read_text().strip()
def read_version():
regexp = re.compile(r"^__version__\W*=\W*\"([\d.abrc]+)\"")
for line in read("aio_background", "__init__.py").splitlines():
match = regexp.match(line)
if match is not None:
return match.group(1)
else:
raise RuntimeError("Cannot find version in aio_background/__init__.py")
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="aio-background",
version=read_version(),
description="A thing to run tasks in the background",
long_description=long_description,
long_description_content_type="text/markdown",
platforms=["macOS", "POSIX", "Windows"],
author="<NAME>",
python_requires=">=3.9",
project_urls={},
author_email="<EMAIL>",
license="MIT",
packages=["aio_background"],
package_dir={"aio_background": "./aio_background"},
package_data={"aio_background": ["py.typed"]},
install_requires=install_requires,
include_package_data=True,
)
| 2.046875 | 2 |
ex43.py | Marcelo1080p/cev | 0 | 12796658 | peso = float(input('Qual é o seu Peso? Kg'))
altura = float(input('Qual é a sua Altura? m'))
imc = peso / (altura ** 2)
print('O seu Indice de massa muscular é {:.1f}'.format(imc))
if imc <= 18.5:
print('Você esta abaixo do Peso!')
elif imc <= 24.9:
print('Peso ideal. Parabéns!')
elif imc <= 29.9:
print('Levemente acima do Peso!')
elif imc <= 34.9:
print('Obesidade grau 1')
elif imc <= 39.9:
print('Obesidade grau 2 (Severa)')
elif imc >= 40:
print('Obesidade grau 3 (mórbida)') | 3.875 | 4 |
mocks/usocket.py | stefanhoelzl/alarm-clock | 1 | 12796659 | from socket import * | 1.125 | 1 |
backend/swagger_server/service/ocr.py | LeBoucEtMistere/ICHack20 | 5 | 12796660 | from google.cloud import vision
import io
import re
import os
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "swagger_server/firebase_key.json"
client = vision.ImageAnnotatorClient()
def process_image(image_file):
total = -1
with io.open(image_file, 'rb') as image_file:
content = image_file.read()
image = vision.types.Image(content=content)
# response = client.document_text_detection(image=image)
response = client.text_detection(image=image)
document = response.text_annotations[1:]
items = []
lines = {}
tally = {}
tally2 = {}
first_line = document[0].description
for text in document:
top_x_axis = text.bounding_poly.vertices[0].x
top_y_axis = text.bounding_poly.vertices[0].y
bottom_y_axis = text.bounding_poly.vertices[3].y
if top_y_axis not in lines:
lines[top_y_axis] = [(top_y_axis, bottom_y_axis), []]
for s_top_y_axis, s_item in lines.items():
if top_y_axis < s_item[0][1]:
lines[s_top_y_axis][1].append((top_x_axis, text.description))
break
for _, item in lines.items():
if item[1]:
words = sorted(item[1], key=lambda t: t[0])
items.append(
(item[0], ' '.join([word for _, word in words]), words))
for i in range(len(items)):
items[i] = items[i][1]
orders = []
pattern = re.compile(
"(([0-9]?[x]?[ ]?)([0-9a-zA-Z.']+[ ])+[$£€]?[0-9]+\.[0-9][0-9])")
total_regex = re.compile(
"(([0-9]+/[0-9]+)?[ ]?([0-9]+[:][0-9]+)?)?[ ]?((BALANCE DUE)?(Amount)?((Total)?(total)?(TOTAL)?[ ]?(Due)?(TO PAY)?))[ ]?[:]?[ ]?(([£$€]?)([0-9]+[.][0-9][0-9]))")
for i in range(len(items)):
if pattern.match(items[i]) and not total_regex.match(items[i]) and not re.match("Total Tax", items[i]) and not re.match("Tax", items[i]) and not re.match("Sales Tax", items[i]) and not re.match("Visa", items[i]) and not re.match("Subtotal", items[i]):
orders.append(items[i])
price = "[0-9]+\.[0-9]+"
for i in orders:
p = re.findall(price, i)[0]
tally[i.split(p)[0]] = float(p)
tally2["store"] = first_line
for i in range(len(items)):
if "$" in items[i]:
currency = "USD"
elif "€" in items[i]:
currency = "EUR"
elif "£" in items[i]:
currency = "GBP"
else:
currency = "UKN"
if total_regex.match(items[i]) and not re.match("[$]?[0-9]+\.[0-9][0-9]", items[i]):
tot = items[i]
p = re.findall(price, tot)[0]
tally2["total"] = float(p)
break
else:
tot = -1
tally2["currency"] = currency
return tally, tally2
| 2.453125 | 2 |
sceptre/__init__.py | bfurtwa/SCeptre | 5 | 12796661 | from .sceptre import *
__version__ = '1.1' | 1.078125 | 1 |
stripe_payment/models.py | aykutgk/GoNaturalistic | 0 | 12796662 | <reponame>aykutgk/GoNaturalistic
from django.db import models
from django.contrib.auth.models import User
#Stripe###########################
class Stripe_Error(models.Model):
user = models.ForeignKey(User, verbose_name="User", blank=True, null=True,on_delete=models.SET_NULL)
date = models.DateTimeField("Error Date and Time", auto_now_add=True)
json_data = models.TextField("Json Data",blank=True)
status = models.CharField("Http Status",max_length=255,blank=True)
type = models.CharField("Error type",max_length=255,blank=True)
code = models.CharField("Error code",max_length=255,blank=True)
param = models.CharField("Error param",max_length=255,blank=True)
message = models.CharField("Error message",max_length=255,blank=True)
class Meta:
ordering = ('-date',)
verbose_name = "Stripe Error"
def __unicode__(self):
return str(self.date)
| 2.171875 | 2 |
tql/algo_ml/models/classifier/baseline_xgb.py | Jie-Yuan/1_DataMining | 14 | 12796663 | <filename>tql/algo_ml/models/classifier/baseline_xgb.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = 'xgb'
__author__ = 'JieYuan'
__mtime__ = '19-1-2'
"""
import xgboost as xgb
class BaselineXGB(object):
"""
待补充: https://xgboost.readthedocs.io/en/release_0.81/tutorials/feature_interaction_constraint.html
新版xgb支持交叉特征interaction_constraints
tree_method='exact'
b_xgb = BaselineXGB(X, y, learning_rate=0.01)
b_xgb.run()
"""
def __init__(self, X, y, learning_rate=0.01, missing=None, metrics='auc', feval=None, objective='binary:logistic',
scale_pos_weight=1, n_jobs=8, seed=0): # seed不能为None
"""
https://blog.csdn.net/fuqiuai/article/details/79495910
https://blog.csdn.net/fantacy10000/article/details/84504394
:param objective:
'binary:logistic', 'multi:softmax', 'reg:linear'
:param metrics: string, list of strings or None, optional (default=None)
binary: 'auc', 'binary_error', 'binary_logloss'
multiclass: 'multi_error', 'multi_logloss'
https://lightgbm.readthedocs.io/en/latest/Parameters.html#metric-parameters
:param feval:
def feval(y_pred, y_true):
y_true = y_true.get_label()
return '1 / (1 + rmse)', 1 /(rmse(y_true, y_pred) + 1), True
:param scale_pos_weight:
"""
self.data = xgb.DMatrix(X, y, missing=missing)
self.objective = objective
self.metrics = metrics
self.feval = feval
self.best_iter = None
# sklearn params
self.params_sk = dict(
booster='gbtree',
objective=objective,
max_depth=7,
learning_rate=learning_rate,
gamma=0.0, # 描述分裂的最小 gain, 控制树的有用的分裂
min_child_weight=1, # 决定最小叶子节点样本权重和,使一个结点分裂的最小权值之和, 避免过拟合
subsample=0.8,
colsample_bytree=0.8, # 每棵树的列数
colsample_bylevel=0.8, # 每一层的列数
reg_alpha=0.0,
reg_lambda=0.0,
scale_pos_weight=scale_pos_weight,
random_state=seed,
n_jobs=n_jobs,
silent=True
)
self.params = self.params_sk.copy()
self.params['eta'] = self.params.pop('learning_rate')
self.params['alpha'] = self.params.pop('reg_alpha')
self.params['lambda'] = self.params.pop('reg_lambda')
if self.objective == 'multi:softmax':
self.num_class = len(set(y))
self.params['objective'] = self.objective
self.params['num_class'] = self.num_class
def run(self, return_model=False, nfold=5, early_stopping_rounds=100, verbose_eval=50):
print("XGB CV ...\n")
try:
cv_rst = xgb.cv(
self.params,
self.data,
metrics=self.metrics,
feval=self.feval,
nfold=nfold,
num_boost_round=2500,
stratified=False if 'reg' in self.objective else True,
early_stopping_rounds=early_stopping_rounds,
verbose_eval=verbose_eval,
as_pandas=False
)
except TypeError:
print("Please: self.xgb_data = xgb.DMatrix(data, label=None, missing=None, feature_types=None)")
if isinstance(self.metrics, str):
_ = cv_rst['test-%s-mean' % self.metrics]
self.best_iter = len(_)
print('\nBest Iter: %s' % self.best_iter)
print('Best Score: %s ' % _[-1])
else:
_ = cv_rst['test-%s-mean' % self.metrics]
self.best_iter = len(_)
print('\nBest Iter: %s' % self.best_iter)
print('Best Score: %s ' % _[-1])
self.params_sk['n_estimators'] = self.best_iter
if return_model:
print("\nReturning Model ...\n")
return xgb.train(self.params, self.data, self.best_iter)
| 2.34375 | 2 |
neb/util.py | cstein/neb | 20 | 12796664 | import numpy
""" Utility variables and functions
"""
aa2au = 1.8897261249935897 # bohr / AA
# converts nuclear charge to atom label
Z2LABEL = {
1: 'H', 2: 'He',
3: 'Li', 4: 'Be', 5: 'B', 6: 'C', 7: 'N', 8: 'O', 9: 'F', 10: 'Ne',
11: 'NA', 12: 'Mg', 13: 'Al', 14: 'Si', 15: 'P', 16: 'S', 17: 'Cl', 18: 'Ar'
}
# converts an atomic label to a nuclear charge
LABEL2Z = {}
for key in Z2LABEL:
LABEL2Z[Z2LABEL[key]] = key
# masses from UIPAC: http://www.chem.qmul.ac.uk/iupac/AtWt/
MASSES = {0: 0.00,
1: 1.00784, 2: 4.002602,
3: 6.938, 4: 9.01218, 5: 10.806, 6: 12.0096, 7: 14.00643, 8: 15.99903, 9: 18.998403, 10: 20.1797,
11: 22.9898, 12: 24.304, 13: 26.9815, 14: 28.084, 15: 30.973, 16: 32.059, 17: 35.446, 18: 39.948
}
# <NAME>al radii from Alvarez (2013), DOI: 2013/dt/c3dt50599e
# all values in Angstrom
VDWRADII = {0: 0.00,
1: 1.20, 2: 1.43,
3: 2.12, 4: 1.98, 5: 1.91, 6: 1.77, 7: 1.66, 8: 1.50, 9: 1.46, 10: 1.58,
11: 2.50, 12: 2.51, 13: 2.25, 14: 2.19, 15: 1.90, 16: 1.89, 17: 1.82, 18: 1.83
}
# Covalent radii from Pykko and Atsumi (2009), DOI: 0.1002/chem.200800987
# all values in Angstrom
COVALENTRADII = {0: 0.00,
1: 0.32, 2: 0.46,
3: 1.33, 4: 1.02, 5: 0.85, 6: 0.75, 7: 0.71, 8: 0.63, 9: 0.64, 10: 0.67,
11: 1.55, 12: 1.39, 13: 1.26, 14: 1.16, 15: 1.11, 16: 1.03, 17: 0.99, 18: 0.96
}
# Coordination numbers from Pykko and Atsumi (2009), DOI: 0.1002/chem.200800987
COORDINATION = {0: 0,
1: 1, 2: 1,
3: 1, 4: 2, 5: 3, 6: 4, 7: 3, 8: 2, 9: 1, 10: 1,
11: 1, 12: 2, 13: 3, 14: 4, 15: 3, 16: 2, 17: 1, 18: 1
}
def idamax(a):
""" Returns the index of maximum absolute value (positive or negative)
in the input array a.
Note: Loosely based of a subroutine in GAMESS with the same name
Arguments:
a -- a numpy array where we are to find the maximum
value in (either positive or negative)
Returns:
the index in the array where the maximum value is.
"""
idx = -1
v = 0.0
for i, value in enumerate(numpy.abs(a)):
if value > v:
idx = i
v = value
return idx
def idamin(a):
""" Returns the index of minimum absolute value (positive or negative)
in the input array a.
Arguments:
a -- a numpy array where we are to find the minimum
value in (either positive or negative)
Returns:
the index in the array where the maximum value is.
"""
idx = -1
v = 1.0e30
for i, value in enumerate(numpy.abs(a)):
if value < v:
idx = i
v = value
return idx
| 2.71875 | 3 |
tests/cid_service_test.py | futoin/citool | 13 | 12796665 | <reponame>futoin/citool<gh_stars>10-100
#
# Copyright 2015-2020 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import time
import signal
import stat
import pwd
import grp
import glob
import requests
from .cid_utbase import cid_UTBase
class cid_service_Test( cid_UTBase ) :
__test__ = True
TEST_DIR = os.path.join(cid_UTBase.TEST_RUN_DIR, 'servicecmd')
_create_test_dir = True
_rms_dir = os.path.join(TEST_DIR, 'rms')
def test01_prepare(self):
os.makedirs(os.path.join(self._rms_dir, 'Releases'))
os.makedirs('src')
os.chdir('src')
os.mkdir('data')
os.mkdir('cache')
if self.IS_MACOS:
self._writeFile('app.sh', """#!/bin/bash
pwd >&2
function lock() {
while :; do
if mkdir data/lock; then
break;
fi
sleep 0.1
done
}
function unlock() {
rmdir data/lock
}
function on_reload() {
lock
echo -n 1 >>data/reload.txt
unlock
}
lock
touch data/start.txt
touch data/reload.txt
echo -n "1" >>data/start.txt
echo -n "1" >>cache/start.txt
unlock
trap "on_reload" SIGHUP
trap "unlock; exit 0" SIGTERM
# high load
while true; do
dd if=/dev/urandom bs=64k count=1024 2>/dev/null | shasum -a 256 >/dev/null;
done
""")
else:
self._writeFile('app.sh', """#!/bin/bash
flock data/lock -c 'echo -n "1" >>data/start.txt; echo -n "1" >>cache/start.txt'
trap "flock data/lock -c 'echo -n 1 >>data/reload.txt'" SIGHUP
trap "exit 0" SIGTERM
# high load
while true; do
dd if=/dev/urandom bs=64K count=1024 status=none | sha256sum >/dev/null ;
done
""")
os.chmod('app.sh', stat.S_IRWXU)
self._writeJSON('futoin.json', {
'name' : 'service-test',
'rms' : 'scp',
'rmsRepo' : self._rms_dir,
'rmsPool' : 'Releases',
'persistent' : [
'data',
],
'writable' : [
'cache',
],
'entryPoints' : {
'app' : {
'tool' : 'exe',
'path' : 'app.sh',
'tune' : {
'scalable': True,
'reloadable': True,
'maxInstances' : 4,
},
}
},
})
self._call_cid(['package'])
self._call_cid(['promote', 'Releases'] + glob.glob('*.txz'))
def test02_deploy(self):
self._call_cid(['deploy', 'rms', 'Releases', '--deployDir=dst',
'--rmsRepo=scp:'+self._rms_dir, '--limit-cpus=4'])
deploy_conf = self._readJSON(os.path.join('dst', 'futoin.json'))
self.assertEqual(4, len(deploy_conf['deploy']['autoServices']['app']))
def test03_exec(self):
start_file = 'dst/persistent/data/start.txt'
reload_file = 'dst/persistent/data/reload.txt'
cache_file = 'dst/current/cache/start.txt'
try: os.unlink(start_file)
except: pass
try: os.unlink(reload_file)
except: pass
pid1 = os.fork()
if not pid1:
self._redirectAsyncStdIO()
os.execv(self.CIDTEST_BIN, [
self.CIDTEST_BIN, 'service', 'exec', 'app', '0',
'--deployDir=dst',
])
pid2 = os.fork()
if not pid2:
self._redirectAsyncStdIO()
os.execv(self.CIDTEST_BIN, [
self.CIDTEST_BIN, 'service', 'exec', 'app', '3',
'--deployDir=dst',
])
self._call_cid(['service', 'exec', 'app', '4', '--deployDir=dst'],
returncode=1)
for i in range(10):
time.sleep(1)
if not os.path.exists(start_file):
continue
if len(self._readFile(start_file)) == 2:
break
else:
self.assertTrue(False)
for i in range(10):
time.sleep(1)
if not os.path.exists(cache_file):
continue
if len(self._readFile(cache_file)) == 2:
break
else:
self.assertTrue(False)
self._call_cid(['service', 'reload', 'app', '4', str(pid2), '--deployDir=dst'],
returncode=1)
self._call_cid(['service', 'reload', 'app', '3', str(pid2), '--deployDir=dst'])
for i in range(10):
time.sleep(1)
if not os.path.exists(reload_file):
continue
if len(self._readFile(reload_file)) == 1:
break
else:
self.assertTrue(False)
self._call_cid(['service', 'stop', 'app', '4', str(pid2), '--deployDir=dst'],
returncode=1)
self._call_cid(['service', 'stop', 'app', '3', str(pid2), '--deployDir=dst'])
os.waitpid(pid2, 0)
self._call_cid(['service', 'stop', 'app', '0', str(pid1), '--deployDir=dst'])
os.waitpid(pid1, 0)
def test04_redeploy(self):
keep_file = 'dst/persistent/data/keep.txt'
self._call_cid(['deploy', 'rms', 'Releases', '--deployDir=dst',
'--redeploy'])
self._writeFile(keep_file, 'KEEP')
self._call_cid(['deploy', 'rms', 'Releases', '--deployDir=dst',
'--redeploy'])
self.assertTrue(os.path.exists(keep_file))
def test05_master(self):
start_file = 'dst/persistent/data/start.txt'
reload_file = 'dst/persistent/data/reload.txt'
cache_file = 'dst/current/cache/start.txt'
try: os.unlink(start_file)
except: pass
try: os.unlink(reload_file)
except: pass
self.assertFalse(os.path.exists(cache_file))
pid = os.fork()
if not pid:
self._redirectAsyncStdIO()
os.execv(self.CIDTEST_BIN, [
self.CIDTEST_BIN, 'service', 'master',
'--deployDir=dst',
])
for i in range(10):
time.sleep(1)
if not os.path.exists(start_file):
continue
if len(self._readFile(start_file)) == 4:
break
else:
self.assertTrue(False)
for i in range(10):
time.sleep(1)
if not os.path.exists(cache_file):
continue
if len(self._readFile(cache_file)) == 4:
break
else:
self.assertTrue(False)
os.kill(pid, signal.SIGUSR1)
for i in range(30):
time.sleep(1)
if not os.path.exists(reload_file):
continue
if len(self._readFile(reload_file)) == 4:
break
else:
self.assertTrue(False)
os.kill(pid, signal.SIGTERM)
os.waitpid(pid, 0)
| 1.742188 | 2 |
Tugas 0/Invoice/main.py | hafidh561/Pemrograman-Berorientasi-Objek | 0 | 12796666 | from Invoice import Invoice
def main():
items = [Invoice("RTX 2080", "VGA", 5, 10000000), Invoice("Intel i9 10900K", "Processor", 10, 8000000)]
for item in items:
print(item.part_num)
print(item.part_desc)
print(item.quantity)
print(item.price)
print("Total tagihanmu adalah", item.get_invoice_amount(), end="\n\n")
if __name__ == "__main__":
main()
| 3.1875 | 3 |
src/command_modules/azure-cli-network/azure/cli/command_modules/network/zone_file/record_processors.py | enterstudio/azure-cli | 2 | 12796667 | <gh_stars>1-10
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#The MIT License (MIT)
#Copyright (c) 2016 Blockstack
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#pylint: skip-file
import copy
def process_origin(data, template):
"""
Replace {$origin} in template with a serialized $ORIGIN record
"""
record = ""
if data is not None:
record += "$ORIGIN %s" % data
return template.replace("{$origin}", record)
def process_ttl(data, template):
"""
Replace {$ttl} in template with a serialized $TTL record
"""
record = ""
if data is not None:
record += "$TTL %s" % data
return template.replace("{$ttl}", record)
def process_soa(data, template):
"""
Replace {SOA} in template with a set of serialized SOA records
"""
record = template[:]
if data is not None:
assert len(data) == 1, "Only support one SOA RR at this time"
data = data[0]
soadat = []
domain_fields = ['mname', 'rname']
param_fields = ['serial', 'refresh', 'retry', 'expire', 'minimum']
for f in domain_fields + param_fields:
assert f in data.keys(), "Missing '%s' (%s)" % (f, data)
data_name = str(data.get('name', '@'))
soadat.append(data_name)
if data.get('ttl') is not None:
soadat.append( str(data['ttl']) )
soadat.append("IN")
soadat.append("SOA")
for key in domain_fields:
value = str(data[key])
soadat.append(value)
soadat.append("(")
for key in param_fields:
value = str(data[key])
soadat.append(value)
soadat.append(")")
soa_txt = " ".join(soadat)
record = record.replace("{soa}", soa_txt)
else:
# clear all SOA fields
record = record.replace("{soa}", "")
return record
def quote_field(data, field):
"""
Quote a field in a list of DNS records.
Return the new data records.
"""
if data is None:
return None
data_dup = copy.deepcopy(data)
for i in range(0, len(data_dup)):
data_dup[i][field] = '"%s"' % data_dup[i][field]
data_dup[i][field] = data_dup[i][field].replace(";", "\;")
return data_dup
def process_rr(data, record_type, record_keys, field, template):
"""
Meta method:
Replace $field in template with the serialized $record_type records,
using @record_key from each datum.
"""
if data is None:
return template.replace(field, "")
if type(record_keys) == list:
pass
elif type(record_keys) == str:
record_keys = [record_keys]
else:
raise ValueError("Invalid record keys")
assert type(data) == list, "Data must be a list"
record = ""
for i in range(0, len(data)):
for record_key in record_keys:
assert record_key in data[i].keys(), "Missing '%s'" % record_key
record_data = []
record_data.append( str(data[i].get('name', '@')) )
if data[i].get('ttl') is not None:
record_data.append( str(data[i]['ttl']) )
record_data.append(record_type)
record_data += [str(data[i][record_key]) for record_key in record_keys]
record += " ".join(record_data) + "\n"
return template.replace(field, record)
def process_ns(data, template):
"""
Replace {ns} in template with the serialized NS records
"""
return process_rr(data, "NS", "host", "{ns}", template)
def process_a(data, template):
"""
Replace {a} in template with the serialized A records
"""
return process_rr(data, "A", "ip", "{a}", template)
def process_aaaa(data, template):
"""
Replace {aaaa} in template with the serialized A records
"""
return process_rr(data, "AAAA", "ip", "{aaaa}", template)
def process_cname(data, template):
"""
Replace {cname} in template with the serialized CNAME records
"""
return process_rr(data, "CNAME", "alias", "{cname}", template)
def process_mx(data, template):
"""
Replace {mx} in template with the serialized MX records
"""
return process_rr(data, "MX", ["preference", "host"], "{mx}", template)
def process_ptr(data, template):
"""
Replace {ptr} in template with the serialized PTR records
"""
return process_rr(data, "PTR", "host", "{ptr}", template)
def process_txt(data, template):
"""
Replace {txt} in template with the serialized TXT records
"""
# quote txt
data_dup = quote_field(data, "txt")
return process_rr(data_dup, "TXT", "txt", "{txt}", template)
def process_srv(data, template):
"""
Replace {srv} in template with the serialized SRV records
"""
return process_rr(data, "SRV", ["priority", "weight", "port", "target"], "{srv}", template)
def process_spf(data, template):
"""
Replace {spf} in template with the serialized SPF records
"""
return process_rr(data, "SPF", "data", "{spf}", template)
def process_uri(data, template):
"""
Replace {uri} in templtae with the serialized URI records
"""
# quote target
data_dup = quote_field(data, "target")
return process_rr(data_dup, "URI", ["priority", "weight", "target"], "{uri}", template)
| 1.4375 | 1 |
python/PlacingMarbles.py | teinen/atcoder-beginners-selection-answers | 0 | 12796668 | <filename>python/PlacingMarbles.py
l = list(input())
c = 0
if l[0] == '1':
c+=1
if l[1] == '1':
c+=1
if l[2] == '1':
c+=1
print(c)
| 3.390625 | 3 |
csat/acquisition/admin.py | GaretJax/csat | 0 | 12796669 | from django.contrib import admin
from polymorphic.admin import PolymorphicParentModelAdmin
from polymorphic.admin import PolymorphicChildModelAdmin
from csat.acquisition import get_collectors, models
class AcquisitionSessionConfigAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'started', 'completed', 'temporary',
'status')
admin.site.register(models.AcquisitionSessionConfig,
AcquisitionSessionConfigAdmin)
class DataCollectorConfigAdmin(PolymorphicChildModelAdmin):
base_model = models.DataCollectorConfig
class GenericDataCollectorConfigAdmin(PolymorphicParentModelAdmin):
list_display = ('id', 'name', 'session_config')
base_model = models.DataCollectorConfig
def get_child_models(self):
def iter_chldren():
for collector in get_collectors():
yield (collector.get_model(), DataCollectorConfigAdmin)
return tuple(iter_chldren())
admin.site.register(models.DataCollectorConfig,
GenericDataCollectorConfigAdmin)
| 1.953125 | 2 |
eval_pf_willow.py | OliviaWang123456/ncnet | 0 | 12796670 | from __future__ import print_function, division
import os
from os.path import exists
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from collections import OrderedDict
from lib.model import ImMatchNet
from lib.pf_willow_dataset import PFDataset
from lib.normalization import NormalizeImageDict
from lib.torch_util import BatchTensorToVars, str_to_bool
from lib.point_tnf import corr_to_matches
from lib.eval_util import pck_metric
from lib.dataloader import default_collate
from lib.torch_util import collate_custom
import argparse
print('NCNet evaluation script - PF Willow dataset')
use_cuda = torch.cuda.is_available()
# Argument parsing
parser = argparse.ArgumentParser(description='Compute PF Willow matches')
parser.add_argument('--checkpoint', type=str, default='')
parser.add_argument('--image_size', type=int, default=400)
parser.add_argument('--eval_dataset_path', type=str, default='datasets/', help='path to PF Willow dataset')
args = parser.parse_args()
# Create model
print('Creating CNN model...')
model = ImMatchNet(use_cuda=use_cuda,
checkpoint=args.checkpoint)
# Dataset and dataloader
Dataset = PFDataset
collate_fn = default_collate
csv_file = 'PF-dataset/test_pairs_pf.csv'
cnn_image_size = (args.image_size, args.image_size)
dataset = Dataset(csv_file=os.path.join(args.eval_dataset_path, csv_file),
dataset_path=args.eval_dataset_path,
transform=NormalizeImageDict(['source_image', 'target_image']),
output_size=cnn_image_size)
dataset.pck_procedure = 'scnet'
# Only batch_size=1 is supported for evaluation
batch_size = 1
dataloader = DataLoader(dataset, batch_size=batch_size,
shuffle=False, num_workers=0,
collate_fn=collate_fn)
batch_tnf = BatchTensorToVars(use_cuda=use_cuda)
model.eval()
# initialize vector for storing results
stats = {}
stats['point_tnf'] = {}
stats['point_tnf']['pck'] = np.zeros((len(dataset), 1))
# Compute
for i, batch in enumerate(dataloader):
batch = batch_tnf(batch)
batch_start_idx = batch_size * i
corr4d = model(batch)
# get matches
xA, yA, xB, yB, sB = corr_to_matches(corr4d, do_softmax=True)
matches = (xA, yA, xB, yB)
stats = pck_metric(batch, batch_start_idx, matches, stats, args, use_cuda)
print('Batch: [{}/{} ({:.0f}%)]'.format(i, len(dataloader), 100. * i / len(dataloader)))
# Print results
results = stats['point_tnf']['pck']
good_idx = np.flatnonzero((results != -1) * ~np.isnan(results))
print('Total: ' + str(results.size))
print('Valid: ' + str(good_idx.size))
filtered_results = results[good_idx]
print('PCK:', '{:.2%}'.format(np.mean(filtered_results))) | 2.15625 | 2 |
Stock/Data/Engine/Common/DyStockDataCommonEngine.py | Leonardo-YXH/DevilYuan | 135 | 12796671 | <filename>Stock/Data/Engine/Common/DyStockDataCommonEngine.py
from .DyStockDataCodeTable import *
from .DyStockDataTradeDayTable import *
from .DyStockDataSectorCodeTable import *
class DyStockDataCommonEngine(object):
""" 代码表和交易日数据引擎 """
def __init__(self, mongoDbEngine, gateway, info):
self._mongoDbEngine = mongoDbEngine
self._gateway = gateway
self._info = info
self._codeTable = DyStockDataCodeTable(self._mongoDbEngine, self._gateway, self._info)
self._tradeDayTable = DyStockDataTradeDayTable(self._mongoDbEngine, self._gateway, self._info)
self._sectorCodeTable = DyStockDataSectorCodeTable(self._mongoDbEngine, self._gateway, self._info)
def updateCodes(self):
return self._codeTable.update()
def updateTradeDays(self, startDate, endDate):
return self._tradeDayTable.update(startDate, endDate)
def updateSectorCodes(self, sectorCode, startDate, endDate):
return self._sectorCodeTable.update(sectorCode, startDate, endDate)
def updateAllSectorCodes(self, startDate, endDate):
return self._sectorCodeTable.updateAll(startDate, endDate)
def getTradeDays(self, startDate, endDate):
return self._tradeDayTable.get(startDate, endDate)
def getLatestDateInDb(self):
return self._tradeDayTable.getLatestDateInDb()
def getLatestTradeDayInDb(self):
return self._tradeDayTable.getLatestTradeDayInDb()
def getIndex(self, code):
return self._codeTable.getIndex(code)
def getCode(self, name):
return self._codeTable.getCode(name)
def getIndexStockCodes(self, index=None):
return self._codeTable.getIndexStockCodes(index)
def getIndexSectorStockCodes(self, index=None):
if index in DyStockCommon.sectors:
return self._sectorCodeTable.getSectorStockCodes(index)
return self._codeTable.getIndexStockCodes(index)
@property
def shIndex(self):
return self._codeTable.shIndex
@property
def szIndex(self):
return self._codeTable.szIndex
@property
def cybIndex(self):
return self._codeTable.cybIndex
@property
def zxbIndex(self):
return self._codeTable.zxbIndex
@property
def etf50(self):
return self._codeTable.etf50
@property
def etf300(self):
return self._codeTable.etf300
@property
def etf500(self):
return self._codeTable.etf500
@property
def stockFunds(self):
return self._codeTable.stockFunds
@property
def stockSectors(self):
return self._codeTable.stockSectors
@property
def stockCodesFunds(self):
return self._codeTable.stockCodesFunds
@property
def stockAllCodesFunds(self):
return self._codeTable.stockAllCodesFunds
@property
def stockAllCodesFundsSectors(self):
return self._codeTable.stockAllCodesFundsSectors
@property
def stockAllCodes(self):
return self._codeTable.stockAllCodes
@property
def stockCodes(self):
return self._codeTable.stockCodes
@property
def stockIndexes(self):
return self._codeTable.stockIndexes
@property
def stockIndexesSectors(self):
return self._codeTable.stockIndexesSectors
def tDaysOffset(self, base, n):
return self._tradeDayTable.tDaysOffset(base, n)
def tDaysOffsetInDb(self, base, n=0):
return self._tradeDayTable.tDaysOffsetInDb(base, n)
def tDays(self, start, end):
return self._tradeDayTable.get(start, end)
def tDaysCountInDb(self, start, end):
return self._tradeDayTable.tDaysCountInDb(start, end)
def tLatestDay(self):
return self._tradeDayTable.tLatestDay()
def tOldestDay(self):
return self._tradeDayTable.tOldestDay()
def isInTradeDayTable(self, startDate, endDate):
return self._tradeDayTable.isIn(startDate, endDate)
def load(self, dates, codes=None):
if not self._codeTable.load(codes):
return False
return self._tradeDayTable.load(dates)
def loadCodeTable(self, codes=None):
return self._codeTable.load(codes)
def loadTradeDays(self, dates):
return self._tradeDayTable.load(dates)
def loadSectorCodeTable(self, sectorCode, date, codes=None):
return self._sectorCodeTable.load(sectorCode, date, codes)
def getSectorCodes(self, sectorCode):
return self._sectorCodeTable.getSectorStockCodes(sectorCode) | 2.0625 | 2 |
LC3-Longest Substring Without Repeating Characters.py | karthyvenky/LeetCode-Challenges | 0 | 12796672 | <filename>LC3-Longest Substring Without Repeating Characters.py
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
#Leetcode 3 - Longest substring without repeating characters
st = en = poi = 0
substr = temp = ''
maxlen = 0
for i in range(len(s)):
if s[i] not in temp:
temp += s[i]
else:
if maxlen < len(temp):
maxlen = len(temp)
substr = temp
st = poi
en = i - 1
while s[i] in temp:
temp = temp[1:]
poi = poi + i
temp += s[i]
if maxlen < len(temp):
maxlen = len(temp)
substr = temp
#print(f"Longest substring is {substr} and length is {maxlen} and from {st} to {en}")
return(maxlen) | 3.59375 | 4 |
lib_collection/graph/dfs_order.py | caser789/libcollection | 0 | 12796673 | <reponame>caser789/libcollection
class DFSOrder(object):
def __init__(self, graph):
self.marked = [False for _ in range(graph.v)]
self.pre = [0 for _ in range(graph.v)]
self.post = [0 for _ in range(graph.v)]
self.pre_order = []
self.post_order = []
self.pre_counter = 0
self.post_counter = 0
for v in range(graph.v):
if not self.marked[v]:
self.dfs(graph, v)
def dfs(self, graph, v):
self.marked[v] = True
self.pre[v] = self.pre_counter
self.pre_counter += 1
self.pre_order.append(v)
for w in graph.get_siblings(v):
if not self.marked[w]:
self.dfs(graph, w)
self.post[v] = self.post_counter
self.post_counter += 1
self.post_order.append(v)
if __name__ == '__main__':
from digraph import Digraph
d = Digraph(13)
d.add_edge(0, 6)
d.add_edge(0, 1)
d.add_edge(0, 5)
d.add_edge(2, 0)
d.add_edge(2, 3)
d.add_edge(3, 5)
d.add_edge(5, 4)
d.add_edge(6, 4)
d.add_edge(6, 9)
d.add_edge(7, 6)
d.add_edge(8, 7)
d.add_edge(9, 10)
d.add_edge(9, 11)
d.add_edge(9, 12)
d.add_edge(11, 12)
d.pprint()
order = DFSOrder(d)
print(order.pre)
print(order.post)
print(order.pre_order)
print(order.post_order)
| 2.578125 | 3 |
pyvalidator/utils/to_float.py | theteladras/py.validator | 15 | 12796674 | from typing import Union
from ..is_float import is_float
def to_float(input: str) -> Union[float, None]:
if not is_float(input):
return None
else:
return float(input)
| 3.546875 | 4 |
src/sw_adder.py | DavidRivasPhD/mrseadd | 0 | 12796675 | <filename>src/sw_adder.py
from spacy.lang.en.stop_words import STOP_WORDS
def add_sw(new_sw):
STOP_WORDS.add(new_sw)
return
| 1.9375 | 2 |
tests/test_dhcp.py | abaruchi/WhoIsConnected | 0 | 12796676 | import unittest
from ipaddress import IPv4Address, IPv6Address
from unittest.mock import patch
from utils.dhcp import parse_dhcp_lease_file
from utils.config_reader import ConfigData
def config_file_with_black_list(self):
return {
'dhcp': {
'lease_file': './dhcp_leases_test',
'ignore_hosts': ['orion']
}
}
class TestDHCP(unittest.TestCase):
def setUp(self):
dhcp_lease_file = './dhcp_leases_test'
self.dhcp_data = parse_dhcp_lease_file(dhcp_lease_file)
def test_data_parsing(self):
self.assertEqual(3, len(self.dhcp_data.keys()))
def test_ipv4_parsing(self):
ipv4_count = 0
for v in self.dhcp_data.values():
if v.get('ipv4'):
if isinstance(v['ipv4'], IPv4Address):
ipv4_count += 1
self.assertEqual(3, ipv4_count)
def test_ipv6_parsing(self):
ipv6_count = 0
for v in self.dhcp_data.values():
if v.get('ipv6'):
if isinstance(v['ipv6'], IPv6Address):
ipv6_count += 1
self.assertEqual(1, ipv6_count)
@patch.object(ConfigData, 'get_dhcp_info', config_file_with_black_list)
def test_ignore_host(self):
dhcp_data = parse_dhcp_lease_file('./dhcp_leases_test')
self.assertEqual(
'58:40:4e:b9:08:f0' in dhcp_data.keys(),
False
)
| 2.828125 | 3 |
pincer/middleware/activity_join_request.py | ashu96902/Pincer | 0 | 12796677 | <reponame>ashu96902/Pincer
# Copyright Pincer 2021-Present
# Full MIT License can be found in `LICENSE` at the project root.
"""sent when the user receives a Rich Presence Ask to Join request"""
# TODO: Implement event
| 0.902344 | 1 |
ip/ip_solver.py | bridgelessqiu/NMIN-FPE | 0 | 12796678 | <filename>ip/ip_solver.py
import algo
import numpy as np
import scipy as sp
import networkx as nx
import sys
if __name__ == "__main__":
# --------------------------- #
# Command line inputs #
# --------------------------- #
network_name = str(sys.argv[1])
exp_type = str(sys.argv[2]) # random, uniform
if exp_type == "uniform":
k = int(sys.argv[3]) # k-uniform threshold
# ---------------------- #
# Path to data #
# ---------------------- #
# The path to the network file
edgelist_path = "../networks/real/" + network_name + "/" + network_name + ".edges"
print("Network: {}".format(network_name))
if exp_type == "random":
threshold_path = "../networks/real/" + network_name + "/" + network_name + "_random_thresh.txt"
result_file = "results/" + exp_type + "_threshold/" + network_name + ".txt"
elif exp_type == "uniform":
threshold_path = "../networks/real/" + network_name + "/" + network_name + "_" + str(k) + "_uniform_thresh.txt"
result_file = "results/" + exp_type + "_threshold/" + network_name + "_uniform_" + str(k) + ".txt"
else:
print("unknown type")
# ------------------------------- #
# Network preporcessing #
# ------------------------------- #
# Read in the network
G = nx.read_edgelist(edgelist_path)
n = G.number_of_nodes()
# the ordering of the node in the matrix (THIS IS VERY VERY IMPORTNAT)
node_order = [str(i) for i in range(n)]
# The adjacency matrix
A = nx.to_scipy_sparse_matrix(G, nodelist = node_order)
A.setdiag(1) # Set all diagonal entries to 1, very important since we consider the closed neighborhood
# double check the ordering
for u in range(n):
if(G.degree(str(u)) + 1 != A[u].count_nonzero()):
print("wrong")
print(G.degree(str(u)), A[u].count_nonzero())
# -------------------------- #
# Extract thresholds #
# -------------------------- #
threshold_file_obj = open(threshold_path, 'r')
tau = np.array([0] * n)
for line in threshold_file_obj:
u = int(line.split(' ')[0])
t = int(line.split(' ')[1])
tau[u] = t
threshold_file_obj.close()
# ------------------------ #
# Run the solver #
# ------------------------ #
x = algo.ip_nmin_fpe(A, tau)
nmin_fpe = [u for u, s in enumerate(x) if s != 0]
f = open(result_file, 'w')
f.write(str(len(nmin_fpe)) + '\n')
f.close()
| 2.703125 | 3 |
tests/test_shape_filter.py | fabiommendes/easymunk | 1 | 12796679 | import pickle
import easymunk as p
class TestShapeFilter:
def test_init(self) -> None:
f = p.ShapeFilter()
assert f.group == 0
assert f.categories == 0xFFFFFFFF
assert f.mask == 0xFFFFFFFF
f = p.ShapeFilter(1, 2, 3)
assert f.group == 1
assert f.categories == 2
assert f.mask == 3
def test_constants(self) -> None:
assert p.ShapeFilter.ALL_MASKS() == 0xFFFFFFFF
assert p.ShapeFilter.ALL_CATEGORIES() == 0xFFFFFFFF
def test_eq(self) -> None:
f1 = p.ShapeFilter(1, 2, 3)
f2 = p.ShapeFilter(1, 2, 3)
f3 = p.ShapeFilter(2, 3, 4)
assert f1 == f2
assert f1 != f3
def test_pickle(self) -> None:
x = p.ShapeFilter(1, 2, 3)
s = pickle.dumps(x, 2)
actual = pickle.loads(s)
assert x == actual
class TestContactPoint:
pass
class TestContactPointSet:
pass
| 2.46875 | 2 |
makeIGVToolsSortScript.py | imk1/MethylationQTLCode | 0 | 12796680 | <reponame>imk1/MethylationQTLCode
def makeIGVToolsSortScript(bismarkFileNameListFileName, suffix, scriptFileName, codePath):
# Make a script that will use gatk to convert Hapmap files to VCF files
bismarkFileNameListFile = open(bismarkFileNameListFileName)
scriptFile = open(scriptFileName, 'w+')
for line in bismarkFileNameListFile:
# Iterate through the chromosomes and write a line in the script for each for each population
bismarkFileName = line.strip()
bismarkFileNameElements = bismarkFileName.split(".")
fileTypeLength = len(bismarkFileNameElements[-1])
outputFileName = bismarkFileName[0:len(bismarkFileName) - fileTypeLength] + suffix
scriptFile.write("java -Xmx4g -jar " + codePath + "/" + "igvtools.jar sort " + bismarkFileName + " " + outputFileName + "\n")
bismarkFileNameListFile.close()
scriptFile.close()
if __name__=="__main__":
import sys
bismarkFileNameListFileName = sys.argv[1]
suffix = sys.argv[2]
scriptFileName = sys.argv[3]
codePath = sys.argv[4] # Should not contain a / at the end
makeIGVToolsSortScript(bismarkFileNameListFileName, suffix, scriptFileName, codePath)
| 2.6875 | 3 |
ex2_02.py | FMarnix/Py4Ev | 0 | 12796681 | <reponame>FMarnix/Py4Ev
name = input("Enter your name: ")
print("Hi", name) | 1.984375 | 2 |
dlme_airflow/drivers/oai_xml.py | sul-dlss/dlme-airflow | 0 | 12796682 | <reponame>sul-dlss/dlme-airflow
import intake
import logging
import pandas as pd
from sickle import Sickle
from lxml import etree
class OAIXmlSource(intake.source.base.DataSource):
container = "dataframe"
name = "oai_xml"
version = "0.0.1"
partition_access = True
def __init__(self, collection_url, set, dtype=None, metadata=None):
super(OAIXmlSource, self).__init__(metadata=metadata)
self.collection_url = collection_url
self.set = set
self._collection = Sickle(self.collection_url)
self._path_expressions = self._get_path_expressions()
self._records = []
def _open_set(self):
oai_records = self._collection.ListRecords(metadataPrefix='oai_dc', set=self.set, ignore_deleted=True)
for oai_record in oai_records:
xtree = etree.fromstring(oai_record.raw)
record = self._construct_fields(xtree)
record.update(self._from_metadata(xtree))
self._records.append(record)
def _construct_fields(self, manifest: etree) -> dict:
output = {}
for field in self._path_expressions:
path = self._path_expressions[field]['path']
namespace = self._path_expressions[field]['namespace']
optional = self._path_expressions[field]['optional']
result = manifest.xpath(path, namespaces=namespace)
if len(result) < 1:
if optional is True:
# Skip and continue
continue
else:
logging.warn(f"Manifest missing {field}")
else:
output[field] = result[0].text.strip() # Use first value
return output
def uri2label(self, value: str, nsmap: dict):
for key in list(nsmap.keys()):
if nsmap[key] in value:
return value.replace(nsmap[key], "").replace("{}", "")
# TODO: Discuss if this output shoould be an array (line 63) or a string
def _from_metadata(self, manifest: etree) -> dict:
output = {}
NS = {'oai_dc': "http://www.openarchives.org/OAI/2.0/oai_dc/"}
oai_block = manifest.xpath("//oai_dc:dc", namespaces=NS)[0] # we want the first result
for metadata in oai_block.getchildren():
tag = self.uri2label(metadata.tag, metadata.nsmap)
output[tag] = metadata.text.strip()
return output
def _get_partition(self, i) -> pd.DataFrame:
return pd.DataFrame(self._records)
def _get_path_expressions(self):
paths = {}
for name, info in self.metadata.get("fields", {}).items():
paths[name] = info
return paths
# TODO: Ask/Investigate (with jnelson) what the purpose of dtyle=self.dtype
def _get_schema(self):
self._open_set()
return intake.source.base.Schema(
datashape=None,
dtype=self.dtype,
shape=None,
npartitions=len(self._records),
extra_metadata={},
)
def read(self):
self._load_metadata()
return pd.concat([self.read_partition(i) for i in range(self.npartitions)])
| 2.234375 | 2 |
app/routers/authentication.py | maktoobgar/fastapi | 1 | 12796683 | <reponame>maktoobgar/fastapi
from fastapi import APIRouter, Depends, status, HTTPException
from fastapi.security import OAuth2PasswordRequestForm
from app import schemas, database, models, token
from app.hashing import Hash
from sqlalchemy.orm import Session
from app.repository import user
router = APIRouter(prefix="/auth", tags=['Authentication'])
get_db = database.get_db
@router.post('/login')
def login(request: schemas.Login, db: Session = Depends(database.get_db)):
user = db.query(models.User).filter(
models.User.username == request.username).first()
if not user:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"Invalid Credentials")
if not Hash.verify(user.password, request.password):
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"Incorrect password")
access_token = token.create_access_token(data={"sub": user.email})
return {"access_token": access_token, "token_type": "bearer"}
@router.post('/signup', response_model=schemas.ShowUser)
def create_user(request: schemas.User, db: Session = Depends(get_db)):
return user.create(request, db)
| 2.5 | 2 |
pycalclib/type/Integer.py | Hedroed/PyCalculator | 1 | 12796684 | #!/usr/bin/env python3
# coding: utf-8
from .BaseType import BaseType
from ..Manager import Register
import re
class Integer(BaseType):
'''Integer type
An Integer is a number that can be written without a fractional component.
An Integer is only compose of digits from 0 to 9.
'''
name = 'int'
def format(self, value):
'''Format string to Integer'''
return int(value)
def detect(self, value):
'''Is value an Integer ?
Test if value is only compose of digits from 0 to 9.
'''
return re.match(r'^-?[0-9]+$', value) is not None
def fromBytes(self, _bytes):
'''Convert bytes to Integer using big endian'''
return int.from_bytes(_bytes, 'big')
def toBytes(self, value):
'''Convert Integer to bytes using big endian'''
return value.to_bytes(max(1, (value.bit_length() + 7) // 8), 'big')
def toString(self, value):
'''Return value as string'''
return str(value)
# Register the type
Register.registerType(Integer())
| 3.953125 | 4 |
src/backend/database_repository.py | Irsutoro/Where-is-my-money | 0 | 12796685 | <reponame>Irsutoro/Where-is-my-money
from database_management import Database, ResultSet
import configparser
DBINFO = configparser.ConfigParser()
DBINFO.read('database.config')
WMM_MAIN_DB = Database(DBINFO['Database']['Name'], DBINFO['User']['Name'], DBINFO['Database']['Host'], DBINFO['Database']['Port']) | 2.3125 | 2 |
server/app/models.py | byeonggukgong/wintercoding-todolist | 0 | 12796686 | <filename>server/app/models.py
# -*- coding: utf-8 -*-
from app.extensions import db, ma
class Todo(db.Model):
__tablename__ = 'todo'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String, nullable=False)
contents = db.Column(db.String, nullable=False)
priority = db.Column(db.Integer, default=1, nullable=False)
deadline = db.Column(db.DateTime, nullable=True)
is_done = db.Column(db.Boolean, default=False, nullable=True)
def __init__(self, title: str, contents: str) -> None:
self.title = title
self.contents = contents
def __repr__(self) -> str:
return f'<Todo {self.title}>'
class TodoSchema(ma.ModelSchema):
class Meta:
model = Todo
| 2.40625 | 2 |
plugins/plugin_manager_plugin/__init__.py | StarryPy/StarryPy-Historic | 38 | 12796687 | <filename>plugins/plugin_manager_plugin/__init__.py
from plugin_manager_plugin import PluginManagerPlugin
| 1.25 | 1 |
hackerrank/data-structures/2d-array.py | Ashindustry007/competitive-programming | 506 | 12796688 | #!/usr/bin/env python3
# https://www.hackerrank.com/challenges/2d-array
a=[0]*6
for i in range(6): a[i]=[int(x) for x in input().split()]
c=-9*9
for i in range(1,5):
for j in range(1,5):
c=max(c,a[i-1][j-1]+a[i-1][j]+a[i-1][j+1]+a[i][j]+a[i+1][j-1]+a[i+1][j]+a[i+1][j+1])
print(c)
| 3.15625 | 3 |
evohome_rf/systems.py | NotBobTheBuilder/evohome_rf | 0 | 12796689 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
"""Evohome RF - The evohome-compatible system."""
import logging
from asyncio import Task
from datetime import timedelta as td
from threading import Lock
from typing import List, Optional
from .command import Command, FaultLog, Priority
from .const import (
ATTR_DEVICES,
DEVICE_HAS_ZONE_SENSOR,
DISCOVER_ALL,
DISCOVER_PARAMS,
DISCOVER_SCHEMA,
DISCOVER_STATUS,
SystemMode,
SystemType,
__dev_mode__,
)
from .devices import Device, Entity
from .exceptions import CorruptStateError, ExpiredCallbackError
from .schema import (
ATTR_CONTROLLER,
ATTR_DHW_SYSTEM,
ATTR_HTG_CONTROL,
ATTR_HTG_SYSTEM,
ATTR_ORPHANS,
ATTR_UFH_SYSTEM,
ATTR_ZONES,
DISABLE_DISCOVERY,
MAX_ZONES,
)
from .zones import DhwZone, Zone
DEV_MODE = __dev_mode__
_LOGGER = logging.getLogger(__name__)
if DEV_MODE:
_LOGGER.setLevel(logging.DEBUG)
class SysFaultLog: # 0418
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._fault_log = FaultLog(self._ctl)
def _discover(self, discover_flag=DISCOVER_ALL) -> None:
super()._discover(discover_flag=discover_flag)
if discover_flag & DISCOVER_STATUS:
self._gwy._tasks.append(self._loop.create_task(self.get_fault_log()))
async def get_fault_log(self, force_refresh=None) -> Optional[dict]: # 0418
try:
return await self._fault_log.get_fault_log(force_refresh=force_refresh)
except ExpiredCallbackError:
return
@property
def status(self) -> dict:
status = super().status
assert "fault_log" not in status # TODO: removeme
status["fault_log"] = self._fault_log.fault_log
status["last_fault"] = self._msgz[" I"].get("0418")
return status
class SysDatetime: # 313F
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._datetime = None
def _discover(self, discover_flag=DISCOVER_ALL) -> None:
super()._discover(discover_flag=discover_flag)
if discover_flag & DISCOVER_STATUS:
self._gwy.send_cmd(Command.get_system_time(self.id))
# self._send_cmd("313F")
def _handle_msg(self, msg, prev_msg=None):
super()._handle_msg(msg)
if msg.code == "313F" and msg.verb in (" I", "RP"): # TODO: W
self._datetime = msg
@property
def datetime(self) -> Optional[str]:
return self._msg_payload(self._datetime, "datetime") # TODO: make a dt object
# def wait_for(self, cmd, callback):
# self._api_lock.acquire()
# self._send_cmd("313F", verb="RQ", callback=callback)
# time_start = dt.now()
# while not self._schedule_done:
# await asyncio.sleep(TIMER_SHORT_SLEEP)
# if dt.now() > time_start + TIMER_LONG_TIMEOUT:
# self._api_lock.release()
# raise ExpiredCallbackError("failed to set schedule")
# self._api_lock.release()
# async def get_datetime(self) -> str: # wait for the RP/313F
# await self.wait_for(Command("313F", verb="RQ"))
# return self.datetime
# async def set_datetime(self, dtm: dt) -> str: # wait for the I/313F
# await self.wait_for(Command("313F", verb=" W", payload=f"00{dtm_to_hex(dtm)}"))
# return self.datetime
@property
def status(self) -> dict:
status = super().status
assert ATTR_HTG_SYSTEM in status # TODO: removeme
assert "datetime" not in status[ATTR_HTG_SYSTEM] # TODO: removeme
status[ATTR_HTG_SYSTEM]["datetime"] = self.datetime
return status
class SysLanguage: # 0100
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._language = None
def _discover(self, discover_flag=DISCOVER_ALL) -> None:
super()._discover(discover_flag=discover_flag)
if discover_flag & DISCOVER_PARAMS:
self._send_cmd("0100") # language
def _handle_msg(self, msg, prev_msg=None):
super()._handle_msg(msg)
if msg.code == "0100" and msg.verb in (" I", "RP"):
self._language = msg
@property
def language(self) -> Optional[str]: # 0100
return self._msg_payload(self._language, "language")
@property
def params(self) -> dict:
params = super().params
assert ATTR_HTG_SYSTEM in params # TODO: removeme
assert "language" not in params[ATTR_HTG_SYSTEM] # TODO: removeme
params[ATTR_HTG_SYSTEM]["language"] = self.language
return params
class SysMode: # 2E04
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._system_mode = None
def _discover(self, discover_flag=DISCOVER_ALL) -> None:
super()._discover(discover_flag=discover_flag)
if discover_flag & DISCOVER_STATUS:
# self._send_cmd("2E04", payload="FF") # system mode
self._gwy.send_cmd(Command.get_system_mode(self.id))
def _handle_msg(self, msg, prev_msg=None):
super()._handle_msg(msg)
if msg.code == "2E04" and msg.verb in (" I", "RP"): # this is a special case
self._system_mode = msg
@property
def system_mode(self) -> Optional[dict]: # 2E04
return self._msg_payload(self._system_mode)
def set_mode(self, system_mode=None, until=None) -> Task:
"""Set a system mode for a specified duration, or indefinitely."""
cmd = Command.set_system_mode(self.id, system_mode=system_mode, until=until)
return self._gwy.send_cmd(cmd)
def set_auto(self) -> Task:
"""Revert system to Auto, set non-PermanentOverride zones to FollowSchedule."""
return self.set_mode(SystemMode.AUTO)
def reset_mode(self) -> Task:
"""Revert system to Auto, force *all* zones to FollowSchedule."""
return self.set_mode(SystemMode.RESET)
@property
def params(self) -> dict:
params = super().params
assert ATTR_HTG_SYSTEM in params # TODO: removeme
assert "system_mode" not in params[ATTR_HTG_SYSTEM] # TODO: removeme
params[ATTR_HTG_SYSTEM]["system_mode"] = self.system_mode
return params
class StoredHw:
MIN_SETPOINT = 30.0 # NOTE: these may be removed
MAX_SETPOINT = 85.0
DEFAULT_SETPOINT = 50.0
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._dhw = None
def _discover(self, discover_flag=DISCOVER_ALL) -> None:
super()._discover(discover_flag=discover_flag)
if discover_flag & DISCOVER_STATUS:
pass
def _handle_msg(self, msg, prev_msg=None):
"""Eavesdrop packets, or pairs of packets, to maintain the system state."""
def OUT_find_dhw_sensor(this):
"""Discover the stored HW this system (if any).
There is only 2 ways to to find a controller's DHW sensor:
1. The 10A0 RQ/RP *from/to a 07:* (1x/4h) - reliable
2. Use sensor temp matching - non-deterministic
Data from the CTL is considered more authorative. The RQ is initiated by the
DHW, so is not authorative. The I/1260 is not to/from a controller, so is
not useful.
"""
# 10A0: RQ/07/01, RP/01/07: can get both parent controller & DHW sensor
# 047 RQ --- 07:030741 01:102458 --:------ 10A0 006 00181F0003E4
# 062 RP --- 01:102458 07:030741 --:------ 10A0 006 0018380003E8
# 1260: I/07: can't get which parent controller - need to match temps
# 045 I --- 07:045960 --:------ 07:045960 1260 003 000911
# 1F41: I/01: get parent controller, but not DHW sensor
# 045 I --- 01:145038 --:------ 01:145038 1F41 012 000004FFFFFF1E060E0507E4
# 045 I --- 01:145038 --:------ 01:145038 1F41 006 000002FFFFFF
sensor = None
if this.code == "10A0" and this.verb == "RP":
if this.src is self and this.dst.type == "07":
sensor = this.dst
if sensor is not None:
if self.dhw is None:
self._get_zone("FA")
self.dhw._set_sensor(sensor)
super()._handle_msg(msg)
if msg.code in ("10A0", "1260"): # self.dhw.sensor is None and
# if self.dhw.sensor is None:
# find_dhw_sensor(msg)
pass
elif msg.code in ("1F41",): # dhw_mode
pass
def _get_zone(self, zone_idx, sensor=None, **kwargs) -> DhwZone:
"""Return a DHW zone (will create it if required).
Can also set a DHW zone's sensor & valves?.
"""
def create_dhw(zone_idx) -> DhwZone:
if self.dhw:
raise LookupError(f"Duplicate stored HW: {zone_idx}")
dhw = self._dhw = DhwZone(self)
if not self._gwy.config[DISABLE_DISCOVERY]:
dhw._discover() # discover_flag=DISCOVER_ALL)
return dhw
if zone_idx != "HW":
return
zone = self.dhw # TODO: self.zone_by_idx.get("HW") too?
if zone is None:
zone = create_dhw(zone_idx)
if kwargs.get("dhw_valve"):
zone._set_dhw_valve(kwargs["dhw_valve"])
if kwargs.get("htg_valve"):
zone._set_dhw_valve(kwargs["htg_valve"])
if sensor is not None:
zone._set_dhw_sensor(sensor)
return zone
@property
def dhw(self) -> DhwZone:
return self._dhw
def _set_dhw(self, dhw: DhwZone) -> None: # self._dhw
"""Set the DHW zone system."""
if not isinstance(dhw, DhwZone):
raise TypeError(f"stored_hw can't be: {dhw}")
if self._dhw is not None:
if self._dhw is dhw:
return
raise CorruptStateError("DHW shouldn't change: {self._dhw} to {dhw}")
if self._dhw is None:
# self._gwy._get_device(xxx)
# self.add_device(dhw.sensor)
# self.add_device(dhw.relay)
self._dhw = dhw
@property
def dhw_sensor(self) -> Device:
return self._dhw._dhw_sensor if self._dhw else None
@property
def hotwater_valve(self) -> Device:
return self._dhw._dhw_valve if self._dhw else None
@property
def heating_valve(self) -> Device:
return self._dhw._htg_valve if self._dhw else None
@property
def schema(self) -> dict:
assert ATTR_DHW_SYSTEM not in super().schema # TODO: removeme
return {**super().schema, ATTR_DHW_SYSTEM: self.dhw.schema if self.dhw else {}}
@property
def params(self) -> dict:
assert ATTR_DHW_SYSTEM not in super().params # TODO: removeme
return {**super().params, ATTR_DHW_SYSTEM: self.dhw.params if self.dhw else {}}
@property
def status(self) -> dict:
assert ATTR_DHW_SYSTEM not in super().status # TODO: removeme
return {**super().status, ATTR_DHW_SYSTEM: self.dhw.status if self.dhw else {}}
class MultiZone: # 0005 (+/- 000C?)
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.zones = []
self.zone_by_idx = {}
# self.zone_by_name = {}
self.zone_lock = Lock()
self.zone_lock_idx = None
# self._prev_30c9 = None # OUT: used to discover zone sensors
def _discover(self, discover_flag=DISCOVER_ALL) -> None:
super()._discover(discover_flag=discover_flag)
if discover_flag & DISCOVER_SCHEMA:
[ # 0005: find any zones + their type (RAD, UFH, VAL, MIX, ELE)
self._send_cmd("0005", payload=f"00{zone_type}")
for zone_type in ("08", "09", "0A", "0B", "11") # CODE_0005_ZONE_TYPE
]
[ # 0005: find any others - as per an RFG100
self._send_cmd("0005", payload=f"00{zone_type}")
for zone_type in ("00", "04", "0C", "0F", "10")
]
if discover_flag & DISCOVER_STATUS:
self._send_cmd("0006")
def _handle_msg(self, msg, prev_msg=None):
def OUT_find_zone_sensors() -> None:
"""Determine each zone's sensor by matching zone/sensor temperatures.
The temperature of each zone is reliably known (30C9 array), but the sensor
for each zone is not. In particular, the controller may be a sensor for a
zone, but unfortunately it does not announce its sensor temperatures.
In addition, there may be 'orphan' (e.g. from a neighbour) sensors
announcing temperatures with the same value.
This leaves only a process of exclusion as a means to determine which zone
uses the controller as a sensor.
"""
prev_msg, self._prev_30c9 = self._prev_30c9, msg
if prev_msg is None:
return
if len([z for z in self.zones if z.sensor is None]) == 0:
return # (currently) no zone without a sensor
# if self._gwy.serial_port: # only if in monitor mode...
secs = self._get_msg_value("1F09", "remaining_seconds")
if secs is None or msg.dtm > prev_msg.dtm + td(seconds=secs):
return # only compare against 30C9 (array) pkt from the last cycle
_LOGGER.debug("System state (before): %s", self)
changed_zones = {
z["zone_idx"]: z["temperature"]
for z in msg.payload
if z not in prev_msg.payload
} # zones with changed temps
_LOGGER.debug("Changed zones (from 30C9): %s", changed_zones)
if not changed_zones:
return # ctl's 30C9 says no zones have changed temps during this cycle
testable_zones = {
z: t
for z, t in changed_zones.items()
if self.zone_by_idx[z].sensor is None
and t not in [v for k, v in changed_zones.items() if k != z] + [None]
} # ...with unique (non-null) temps, and no sensor
_LOGGER.debug(
" - with unique/non-null temps (from 30C9), no sensor (from state): %s",
testable_zones,
)
if not testable_zones:
return # no testable zones
testable_sensors = [
d
for d in self._gwy.devices # not: self.devices
if d._ctl in (self, None)
and d.addr.type in DEVICE_HAS_ZONE_SENSOR
and d.temperature is not None
and d._msgs["30C9"].dtm > prev_msg.dtm # changed temp during last cycle
]
if _LOGGER.isEnabledFor(logging.DEBUG):
_LOGGER.debug(
"Testable zones: %s (unique/non-null temps & sensorless)",
testable_zones,
)
_LOGGER.debug(
"Testable sensors: %s (non-null temps & orphans or zoneless)",
{d.id: d.temperature for d in testable_sensors},
)
if testable_sensors: # the main matching algorithm...
for zone_idx, temp in testable_zones.items():
# TODO: when sensors announce temp, ?also includes it's parent zone
matching_sensors = [
s
for s in testable_sensors
if s.temperature == temp and s._zone in (zone_idx, None)
]
_LOGGER.debug("Testing zone %s, temp: %s", zone_idx, temp)
_LOGGER.debug(
" - matching sensor(s): %s (same temp & not from another zone)",
[s.id for s in matching_sensors],
)
if len(matching_sensors) == 1:
_LOGGER.debug(" - matched sensor: %s", matching_sensors[0].id)
zone = self.zone_by_idx[zone_idx]
zone._set_sensor(matching_sensors[0])
zone.sensor._set_ctl(self)
elif len(matching_sensors) == 0:
_LOGGER.debug(" - no matching sensor (uses CTL?)")
else:
_LOGGER.debug(" - multiple sensors: %s", matching_sensors)
_LOGGER.debug("System state (after): %s", self)
# now see if we can allocate the controller as a sensor...
if self._zone is not None:
return # the controller has already been allocated
if len([z for z in self.zones if z.sensor is None]) != 1:
return # no single zone without a sensor
testable_zones = {
z: t
for z, t in changed_zones.items()
if self.zone_by_idx[z].sensor is None
} # this will be true if ctl is sensor
if not testable_zones:
return # no testable zones
zone_idx, temp = list(testable_zones.items())[0]
_LOGGER.debug("Testing (sole remaining) zone %s, temp: %s", zone_idx, temp)
# want to avoid complexity of z._temp
# zone = self.zone_by_idx[zone_idx]
# if zone._temp is None:
# return # TODO: should have a (not-None) temperature
matching_sensors = [
s
for s in testable_sensors
if s.temperature == temp and s._zone in (zone_idx, None)
]
_LOGGER.debug(
" - matching sensor(s): %s (excl. controller)",
[s.id for s in matching_sensors],
)
# can safely(?) assume this zone is using the CTL as a sensor...
if len(matching_sensors) == 0:
_LOGGER.debug(" - matched sensor: %s (by exclusion)", self._ctl.id)
zone = self.zone_by_idx[zone_idx]
zone._set_sensor(self)
zone.sensor._set_ctl(self)
_LOGGER.debug("System state (finally): %s", self)
super()._handle_msg(msg)
if msg.code in ("000A",) and isinstance(msg.payload, list):
for zone_idx in self.zone_by_idx:
cmd = Command.get_zone_mode(self.id, zone_idx, priority=Priority.LOW)
self._gwy.send_cmd(cmd)
# for zone in self.zones:
# zone._discover(discover_flags=DISCOVER_PARAMS)
if msg.code in ("000A", "2309", "30C9"):
pass
# if isinstance(msg.payload, list):
# elif msg.code == "000C":
# self._msgs[f"{msg.code}"] = msg
# elif msg.code == "0005" and prev_msg is not None:
# zone_added = bool(prev_msg.code == "0004") # else zone_deleted
# elif msg.code == "30C9" and isinstance(msg.payload, list): # msg.is_array:
# find_zone_sensors()
def _get_zone(self, zone_idx, sensor=None, **kwargs) -> Zone:
"""Return a zone (will create it if required).
Can also set a zone's sensor, and zone_type, and actuators.
"""
def create_zone(zone_idx) -> Zone:
if int(zone_idx, 16) >= self._gwy.config[MAX_ZONES]:
raise ValueError(f"Invalid zone idx: {zone_idx} (exceeds max_zones)")
if zone_idx in self.zone_by_idx:
raise LookupError(f"Duplicated zone: {zone_idx} for {self}")
zone = Zone(self, zone_idx)
if not self._gwy.config[DISABLE_DISCOVERY]: # TODO: needs tidyup (ref #67)
zone._discover() # discover_flag=DISCOVER_ALL)
return zone
if zone_idx == "HW":
return super()._get_zone(zone_idx, sensor=sensor, **kwargs)
if int(zone_idx, 16) >= self._gwy.config[MAX_ZONES]:
raise ValueError(f"Unknown zone_idx/domain_id: {zone_idx}")
zone = self.zone_by_idx.get(zone_idx)
if zone is None:
zone = create_zone(zone_idx)
if kwargs.get("zone_type"):
zone._set_zone_type(kwargs["zone_type"])
if kwargs.get("actuators"): # TODO: check not an address before implmenting
for device in [d for d in kwargs["actuators"] if d not in zone.devices]:
zone.devices.append(device)
zone.device_by_id[device.id] = device
if sensor is not None:
zone._set_sensor(sensor)
return zone
@property
def _zones(self) -> dict:
return sorted(self.zones, key=lambda x: x.idx)
@property
def schema(self) -> dict:
assert ATTR_ZONES not in super().schema # TODO: removeme
return {**super().schema, ATTR_ZONES: {z.idx: z.schema for z in self._zones}}
@property
def params(self) -> dict:
assert ATTR_ZONES not in super().params # TODO: removeme
return {**super().params, ATTR_ZONES: {z.idx: z.params for z in self._zones}}
@property
def status(self) -> dict:
assert ATTR_ZONES not in super().status # TODO: removeme
return {**super().status, ATTR_ZONES: {z.idx: z.status for z in self._zones}}
class UfhSystem:
@property
def schema(self) -> dict:
assert ATTR_UFH_SYSTEM not in super().schema # TODO: removeme
return {
**super().schema,
ATTR_UFH_SYSTEM: {
d.id: d.schema for d in sorted(self._ctl.devices) if d.type == "02"
},
}
@property
def params(self) -> dict:
assert ATTR_UFH_SYSTEM not in super().params # TODO: removeme
return {
**super().params,
ATTR_UFH_SYSTEM: {
d.id: d.params for d in sorted(self._ctl.devices) if d.type == "02"
},
}
@property
def status(self) -> dict:
assert ATTR_UFH_SYSTEM not in super().status # TODO: removeme
return {
**super().status,
ATTR_UFH_SYSTEM: {
d.id: d.status for d in sorted(self._ctl.devices) if d.type == "02"
},
}
class SystemBase(Entity): # 3B00 (multi-relay)
"""The most basic controllers - a generic controller (e.g. ST9420C)."""
# 0008|0009|1030|1100|2309|3B00
def __init__(self, gwy, ctl, **kwargs) -> None:
# _LOGGER.debug("Creating a System: %s (%s)", dev_addr.id, self.__class__)
super().__init__(gwy, **kwargs)
self.id = ctl.id
gwy.systems.append(self)
gwy.system_by_id[self.id] = self
self._ctl = ctl
self._domain_id = "FF"
self._evo = None
self._heat_demand = None
self._htg_control = None
def __repr__(self) -> str:
return f"{self._ctl.id} (sys_base)"
# def __str__(self) -> str: # TODO: WIP
# return json.dumps({self._ctl.id: self.schema})
def _discover(self, discover_flag=DISCOVER_ALL) -> None:
# super()._discover(discover_flag=discover_flag)
if discover_flag & DISCOVER_SCHEMA:
[ # 000C: find the HTG relay and DHW sensor, if any (DHW relays in DHW)
self._send_cmd("000C", payload=dev_type)
for dev_type in ("000D", "000F") # CODE_000C_DEVICE_TYPE
# for dev_type, description in CODE_000C_DEVICE_TYPE.items() fix payload
# if description is not None
]
if discover_flag & DISCOVER_PARAMS:
self._send_cmd("1100", payload="FC") # TPI params
# # for code in ("3B00",): # 3EF0, 3EF1
# # for payload in ("0000", "00", "F8", "F9", "FA", "FB", "FC", "FF"):
# # self._send_cmd(code, payload=payload)
# # TODO: opentherm: 1FD4, 22D9, 3220
# if discover_flag & DISCOVER_PARAMS:
# for domain_id in range(0xF8, 0x100):
# self._send_cmd("0009", payload=f"{domain_id:02X}00")
if discover_flag & DISCOVER_STATUS:
# for domain_id in range(0xF8, 0x100):
# self._send_cmd("0008", payload=f"{domain_id:02X}00")
pass
def _handle_msg(self, msg) -> bool:
def OUT_is_exchange(this, prev): # TODO:use is?
return this.src is prev.dst and this.dst is prev.src.addr
def OUT_find_htg_relay(this, prev=None):
"""Discover the heat relay (10: or 13:) for this system.
There's' 3 ways to find a controller's heat relay (in order of reliability):
1. The 3220 RQ/RP *to/from a 10:* (1x/5min)
2a. The 3EF0 RQ/RP *to/from a 10:* (1x/1min)
2b. The 3EF0 RQ (no RP) *to a 13:* (3x/60min)
3. The 3B00 I/I exchange between a CTL & a 13: (TPI cycle rate, usu. 6x/hr)
Data from the CTL is considered 'authorative'. The 1FC9 RQ/RP exchange
to/from a CTL is too rare to be useful.
"""
# 18:14:14.025 066 RQ --- 01:078710 10:067219 --:------ 3220 005 0000050000
# 18:14:14.446 065 RP --- 10:067219 01:078710 --:------ 3220 005 00C00500FF
# 14:41:46.599 064 RQ --- 01:078710 10:067219 --:------ 3EF0 001 00
# 14:41:46.631 063 RP --- 10:067219 01:078710 --:------ 3EF0 006 0000100000FF # noqa
# 06:49:03.465 045 RQ --- 01:145038 13:237335 --:------ 3EF0 001 00
# 06:49:05.467 045 RQ --- 01:145038 13:237335 --:------ 3EF0 001 00
# 06:49:07.468 045 RQ --- 01:145038 13:237335 --:------ 3EF0 001 00
# 09:03:59.693 051 I --- 13:237335 --:------ 13:237335 3B00 002 00C8
# 09:04:02.667 045 I --- 01:145038 --:------ 01:145038 3B00 002 FCC8
# note the order: most to least reliable
heater = None
if this.code == "3220" and this.verb == "RQ":
if this.src is self and this.dst.type == "10":
heater = this.dst
elif this.code == "3EF0" and this.verb == "RQ":
if this.src is self and this.dst.type in ("10", "13"):
heater = this.dst
elif this.code == "3B00" and this.verb == " I" and prev is not None:
if prev.code == this.code and prev.verb == this.verb:
if this.src is self and prev.src.type == "13":
heater = prev.src
if heater is not None:
self._set_htg_control(heater)
if msg.code in ("000A", "2309", "30C9") and not isinstance(msg.payload, list):
pass
else:
super()._handle_msg(msg)
if msg.code == "0008" and msg.verb in (" I", "RP"):
if "domain_id" in msg.payload:
self._relay_demands[msg.payload["domain_id"]] = msg
if msg.payload["domain_id"] == "F9":
device = self.dhw.heating_valve if self.dhw else None
elif msg.payload["domain_id"] == "FA":
device = self.dhw.hotwater_valve if self.dhw else None
elif msg.payload["domain_id"] == "FC":
device = self.heating_control
else:
device = None
if False and device is not None: # TODO: FIXME
qos = {"priority": Priority.LOW, "retries": 2}
for code in ("0008", "3EF1"):
device._send_cmd(code, qos)
if msg.code == "3150" and msg.verb in (" I", "RP"):
if "domain_id" in msg.payload and msg.payload["domain_id"] == "FC":
self._heat_demand = msg.payload
# if msg.code in ("3220", "3B00", "3EF0"): # self.heating_control is None and
# find_htg_relay(msg, prev=prev_msg)
def _send_cmd(self, code, **kwargs) -> None:
dest = kwargs.pop("dest_addr", self._ctl.id)
payload = kwargs.pop("payload", "00")
super()._send_cmd(code, dest, payload, **kwargs)
@property
def devices(self) -> List[Device]:
return self._ctl.devices + [self._ctl] # TODO: to sort out
@property
def heating_control(self) -> Device:
if self._htg_control:
return self._htg_control
htg_control = [d for d in self._ctl.devices if d._domain_id == "FC"]
return htg_control[0] if len(htg_control) == 1 else None # HACK for 10:
def _set_htg_control(self, device: Device) -> None: # self._htg_control
"""Set the heating control relay for this system (10: or 13:)."""
if not isinstance(device, Device) or device.type not in ("10", "13"):
raise TypeError(f"{ATTR_HTG_CONTROL} can't be: {device}")
if self._htg_control is not None:
if self._htg_control is device:
return
raise CorruptStateError(
f"{ATTR_HTG_CONTROL} shouldn't change: {self._htg_control} to {device}"
)
# if device.evo is not None and device.evo is not self:
# raise LookupError
if self._htg_control is None:
self._htg_control = device
device._set_parent(self, domain="FC")
@property
def tpi_params(self) -> Optional[float]: # 1100
return self._get_msg_value("1100")
@property
def heat_demand(self) -> Optional[float]: # 3150/FC
if self._heat_demand:
return self._heat_demand["heat_demand"]
@property
def is_calling_for_heat(self) -> Optional[bool]:
"""Return True is the system is currently calling for heat."""
if not self._htg_control:
return
if self._htg_control.actuator_state:
return True
@property
def schema(self) -> dict:
"""Return the system's schema."""
schema = {ATTR_CONTROLLER: self._ctl.id, ATTR_HTG_SYSTEM: {}}
assert ATTR_HTG_SYSTEM in schema # TODO: removeme
assert ATTR_HTG_CONTROL not in schema[ATTR_HTG_SYSTEM] # TODO: removeme
schema[ATTR_HTG_SYSTEM][ATTR_HTG_CONTROL] = (
self.heating_control.id if self.heating_control else None
)
assert ATTR_ORPHANS not in schema[ATTR_HTG_SYSTEM] # TODO: removeme
schema[ATTR_ORPHANS] = sorted(
[d.id for d in self._ctl.devices if not d._domain_id and d.type != "02"]
) # devices without a parent zone, NB: CTL can be a sensor for a zones
# TODO: where to put this?
# assert "devices" not in schema # TODO: removeme
# schema["devices"] = {d.id: d.device_info for d in sorted(self._ctl.devices)}
return schema
@property
def params(self) -> dict:
"""Return the system's configuration."""
params = {ATTR_HTG_SYSTEM: {}}
assert ATTR_HTG_SYSTEM in params # TODO: removeme
# devices don't have params
# assert ATTR_HTG_CONTROL not in params[ATTR_HTG_SYSTEM] # TODO: removeme
# params[ATTR_HTG_SYSTEM][ATTR_HTG_CONTROL] = (
# self.heating_control.params if self.heating_control else None
# )
assert "tpi_params" not in params[ATTR_HTG_SYSTEM] # TODO: removeme
params[ATTR_HTG_SYSTEM]["tpi_params"] = (
self.heating_control._get_msg_value("1100")
if self.heating_control
else None
)
return params
@property
def status(self) -> dict:
"""Return the system's current state."""
status = {ATTR_HTG_SYSTEM: {}}
assert ATTR_HTG_SYSTEM in status # TODO: removeme
# assert ATTR_HTG_CONTROL not in status[ATTR_HTG_SYSTEM] # TODO: removeme
# status[ATTR_HTG_SYSTEM][ATTR_HTG_CONTROL] = (
# self.heating_control.status if self.heating_control else None
# )
status[ATTR_HTG_SYSTEM]["heat_demand"] = self.heat_demand
status[ATTR_DEVICES] = {d.id: d.status for d in sorted(self._ctl.devices)}
return status
class System(StoredHw, SysDatetime, SystemBase): # , SysFaultLog
"""The Controller class."""
def __init__(self, gwy, ctl, **kwargs) -> None:
super().__init__(gwy, ctl, **kwargs)
self._heat_demands = {}
self._relay_demands = {}
self._relay_failsafes = {}
def __repr__(self) -> str:
return f"{self._ctl.id} (system)"
def _handle_msg(self, msg) -> bool:
super()._handle_msg(msg)
if "domain_id" in msg.payload:
idx = msg.payload["domain_id"]
if msg.code == "0008":
self._relay_demands[idx] = msg
elif msg.code == "0009":
self._relay_failsafes[idx] = msg
elif msg.code == "3150":
self._heat_demands[idx] = msg
elif msg.code not in ("0001", "000C", "0418", "1100", "3B00"):
assert False, msg.code
@property
def heat_demands(self) -> Optional[dict]: # 3150
if self._heat_demands:
return {k: v.payload["heat_demand"] for k, v in self._heat_demands.items()}
@property
def relay_demands(self) -> Optional[dict]: # 0008
if self._relay_demands:
return {
k: v.payload["relay_demand"] for k, v in self._relay_demands.items()
}
@property
def relay_failsafes(self) -> Optional[dict]: # 0009
if self._relay_failsafes:
return {} # failsafe_enabled
@property
def status(self) -> dict:
"""Return the system's current state."""
status = super().status
assert ATTR_HTG_SYSTEM in status # TODO: removeme
status[ATTR_HTG_SYSTEM]["heat_demands"] = self.heat_demands
status[ATTR_HTG_SYSTEM]["relay_demands"] = self.relay_demands
status[ATTR_HTG_SYSTEM]["relay_failsafes"] = self.relay_failsafes
return status
class Evohome(SysLanguage, SysMode, MultiZone, UfhSystem, System): # evohome
# class Evohome(System): # evohome
"""The Evohome system - some controllers are evohome-compatible."""
def __init__(self, gwy, ctl, **kwargs) -> None:
super().__init__(gwy, ctl, **kwargs)
def __repr__(self) -> str:
return f"{self._ctl.id} (evohome)"
def _discover(self, discover_flag=DISCOVER_ALL) -> None:
super()._discover(discover_flag=discover_flag)
if discover_flag & DISCOVER_STATUS:
self._send_cmd("1F09")
def _handle_msg(self, msg) -> bool:
super()._handle_msg(msg)
# def xxx(zone_dict):
# zone = self.zone_by_idx[zone_dict.pop("zone_idx")]
# if msg.code == "000A":
# zone._zone_config = zone_dict
# elif msg.code == "2309":
# zone._temp = zone_dict
# elif msg.code == "30C9":
# zone._temp = zone_dict
# if msg.code in ("000A", "2309", "30C9"):
# if isinstance(msg.payload, list):
# super()._handle_msg(msg)
# [xxx(z) for z in msg.payload]
# else:
# xxx(msg.payload)
if msg.code in ("000A", "2309", "30C9") and isinstance(msg.payload, list):
pass
class Chronotherm(Evohome):
def __repr__(self) -> str:
return f"{self._ctl.id} (chronotherm)"
class Hometronics(System):
RQ_SUPPORTED = ("0004", "000C", "2E04", "313F") # TODO: WIP
RQ_UNSUPPORTED = ("xxxx",) # 10E0?
def __repr__(self) -> str:
return f"{self._ctl.id} (hometronics)"
def _discover(self, discover_flag=DISCOVER_ALL) -> None:
# super()._discover(discover_flag=discover_flag)
# will RP to: 0005/configured_zones_alt, but not: configured_zones
# will RP to: 0004
if discover_flag & DISCOVER_STATUS:
self._send_cmd("1F09")
class Programmer(Evohome):
def __repr__(self) -> str:
return f"{self._ctl.id} (programmer)"
class Sundial(Evohome):
def __repr__(self) -> str:
return f"{self._ctl.id} (sundial)"
SYSTEM_CLASSES = {
SystemType.CHRONOTHERM: Chronotherm,
SystemType.EVOHOME: Evohome,
SystemType.HOMETRONICS: Hometronics,
SystemType.PROGRAMMER: Programmer,
SystemType.SUNDIAL: Sundial,
SystemType.GENERIC: System,
}
| 2.03125 | 2 |
promise2012/Vnet2d/layer.py | kant/VNet | 64 | 12796690 | <gh_stars>10-100
'''
covlution layer,pool layer,initialization。。。。
'''
import tensorflow as tf
import numpy as np
# Weight initialization (Xavier's init)
def weight_xavier_init(shape, n_inputs, n_outputs, activefuncation='sigmoid', uniform=True, variable_name=None):
if activefuncation == 'sigmoid':
if uniform:
init_range = tf.sqrt(6.0 / (n_inputs + n_outputs))
initial = tf.random_uniform(shape, -init_range, init_range)
return tf.Variable(initial, name=variable_name)
else:
stddev = tf.sqrt(2.0 / (n_inputs + n_outputs))
initial = tf.truncated_normal(shape, mean=0.0, stddev=stddev)
return tf.Variable(initial, name=variable_name)
elif activefuncation == 'relu':
if uniform:
init_range = tf.sqrt(6.0 / (n_inputs + n_outputs)) * np.sqrt(2)
initial = tf.random_uniform(shape, -init_range, init_range)
return tf.Variable(initial, name=variable_name)
else:
stddev = tf.sqrt(2.0 / (n_inputs + n_outputs)) * np.sqrt(2)
initial = tf.truncated_normal(shape, mean=0.0, stddev=stddev)
return tf.Variable(initial, name=variable_name)
elif activefuncation == 'tan':
if uniform:
init_range = tf.sqrt(6.0 / (n_inputs + n_outputs)) * 4
initial = tf.random_uniform(shape, -init_range, init_range)
return tf.Variable(initial, name=variable_name)
else:
stddev = tf.sqrt(2.0 / (n_inputs + n_outputs)) * 4
initial = tf.truncated_normal(shape, mean=0.0, stddev=stddev)
return tf.Variable(initial, name=variable_name)
# Bias initialization
def bias_variable(shape, variable_name=None):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=variable_name)
# 2D convolution
def conv2d(x, W, strides=1):
conv_2d = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
return conv_2d
def normalizationlayer(x, is_train, height=None, width=None, norm_type='None', G=16, esp=1e-5, scope=None):
with tf.name_scope(scope + norm_type):
if norm_type == 'None':
output = x
elif norm_type == 'batch':
output = tf.contrib.layers.batch_norm(x, center=True, scale=True, is_training=is_train)
elif norm_type == 'group':
# tranpose:[bs,h,w,c]to[bs,c,h,w]follwing the paper
x = tf.transpose(x, [0, 3, 1, 2])
N, C, H, W = x.get_shape().as_list()
G = min(G, C)
if H == None and W == None:
H,W=height,width
x = tf.reshape(x, [-1, G, C // G, H, W])
mean, var = tf.nn.moments(x, [2, 3, 4], keep_dims=True)
x = (x - mean) / tf.sqrt(var + esp)
# per channel gama and beta
gama = tf.get_variable(scope + norm_type + 'group_gama', [C], initializer=tf.constant_initializer(1.0))
beta = tf.get_variable(scope + norm_type + 'group_beta', [C], initializer=tf.constant_initializer(0.0))
gama = tf.reshape(gama, [1, C, 1, 1])
beta = tf.reshape(beta, [1, C, 1, 1])
output = tf.reshape(x, [-1, C, H, W]) * gama + beta
## tranpose:[bs,c,h,w]to[bs,h,w,c]follwing the paper
output = tf.transpose(output, [0, 2, 3, 1])
return output
# 2D deconvolution
def deconv2d(x, W, stride=2):
x_shape = tf.shape(x)
output_shape = tf.stack([x_shape[0], x_shape[1] * stride, x_shape[2] * stride, x_shape[3] // stride])
return tf.nn.conv2d_transpose(x, W, output_shape, strides=[1, stride, stride, 1], padding='SAME')
# Unet crop and concat
def crop_and_concat(x1, x2):
x1_shape = tf.shape(x1)
x2_shape = tf.shape(x2)
# offsets for the top left corner of the crop
offsets = [0, (x1_shape[1] - x2_shape[1]) // 2, (x1_shape[2] - x2_shape[2]) // 2, 0]
size = [-1, x2_shape[1], x2_shape[2], -1]
x1_crop = tf.slice(x1, offsets, size)
return tf.concat([x1_crop, x2], 3)
# Resnet add
def resnet_Add(x1, x2):
"""
x1 shape[-1] is small x2 shape[-1]
"""
if x1.get_shape().as_list()[3] != x2.get_shape().as_list()[3]:
# Option A:zero-padding
residual_connection = x2 + tf.pad(x1, [[0, 0], [0, 0], [0, 0],
[0, x2.get_shape().as_list()[3] - x1.get_shape().as_list()[3]]])
else:
residual_connection = x2 + x1
# residual_connection=tf.add(x1,x2)
return residual_connection
| 2.5625 | 3 |
encoder.py | Devanshu-singh-VR/Chat_Bot_Attention | 0 | 12796691 | <filename>encoder.py<gh_stars>0
import tensorflow as tf
class Encoder(tf.keras.Model):
def __init__(self, vocab_size, embedding, enc_units, batch_size):
super(Encoder, self).__init__()
self.batch_size = batch_size
self.enc_units = enc_units # Size of the hidden units present in GRU.
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding)
self.GRU = tf.keras.layers.GRU(self.enc_units, return_sequences=True, return_state=True, recurrent_initializer='glorot_uniform', kernel_regularizer=tf.keras.regularizers.L2(0.001))
# here GRU (Gated recurrent unit) is used
"""
inputs: initial input from training data
hidden: initial hidden state for GRU
output: all intermediate hidden states of GRU
state: final hidden state of GRU
"""
def call(self, inputs, hidden):
inputs = self.embedding(inputs)
output, state = self.GRU(inputs, initial_state = hidden)
return output, state
if __name__ == '__main__':
print('oot sssd enc') | 2.953125 | 3 |
ex6_sd3.py | jlaw8504/lp3thw | 0 | 12796692 | <reponame>jlaw8504/lp3thw
# the other instances of formatting strings used variables that were not
# strings. To prove it, I'll use the function type to print out the
# type of each variable used in a formatted string
# recreate the variables from ex6
types_of_people = 10
x = f"There are {types_of_people} type of people."
binary = "binary"
do_not = "don't"
y = f"Those who know {binary} and those who {do_not}."
hilarious = False
# print type of each variable
print("types_of_people:", type(types_of_people))
print("x:", type(x))
print("binary:", type(binary))
print("do_not:", type(do_not))
print("y:", type(y))
print("hilarious:", type(hilarious))
| 4.1875 | 4 |
algorithms_datastructures/graphs/implementations/structures.py | vaishnavprachi98/technical-interviews | 65 | 12796693 | <reponame>vaishnavprachi98/technical-interviews<filename>algorithms_datastructures/graphs/implementations/structures.py
"""
@author: <NAME>
@since: 23/08/2016
@modified:
Based Vertex and Edge clases
- a bit too generic too be honest
- should be speific for use
"""
class Vertex:
def __init__(self, x=None, point=None, rep=None):
self.name = x # integer 'name' or index....why did I call this name?
self.pointer = point # points to a linked list in adjacency list
self.rep = rep # string representation
self.distance = None
self.index = x # made this because I didn't want to refactor to change .name to .index everywhere.
class Edge:
def __init__(self, origin, destination, x=None, weight=None, capacity=None, flow=None, residual_capacity=None, residual_flow=None):
self.origin = origin
self.destination = destination
self.weight = weight
self.capacity = capacity
self.name = x
self.flow = flow
self.residual_capacity = residual_capacity
self.residual_flow = residual_flow
def get_endpoints(self):
return self.origin, self.destination
def get_opposite(self, vertex):
"""Return the vertex that is opposite v on this edge"""
return self.destination if vertex is self.origin else self.origin
def to_string(self, ends_vertex_obs = False):
if ends_vertex_obs:
return "og: " + str(self.origin.name) + " to: " + str(self.destination.name)
return "og: " + str(self.origin) + " to: " + str(self.destination)
| 3.734375 | 4 |
data/aml-pipelines-scripts/train.py | ajakupov/DataHawkPipelines | 0 | 12796694 | <gh_stars>0
import argparse
import os
import pandas as pd
import numpy as np
import math
import pickle
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn_pandas import DataFrameMapper
from sklearn.metrics import mean_squared_error
print("In train.py")
print("As a data scientist, this is where I write my training code.")
parser = argparse.ArgumentParser("train")
parser.add_argument("--input", type=str, help="input directory", dest="input", required=True)
parser.add_argument("--output", type=str, help="output directory", dest="output", required=True)
args = parser.parse_args()
print("Argument 1: %s" % args.input)
print("Argument 2: %s" % args.output)
# Load your processed features and outputs
df = pd.read_csv(os.path.join(args.input, 'nyc-taxi-processed-data.csv'))
x_df = df.drop(['totalAmount'], axis=1)
y_df = df['totalAmount']
X_train, X_test, y_train, y_test = train_test_split(x_df, y_df, test_size=0.2, random_state=0)
# we will not transform / scale the four engineered features:
# hour_sine, hour_cosine, day_of_week_sine, day_of_week_cosine
categorical = ['normalizeHolidayName', 'isPaidTimeOff']
numerical = ['vendorID', 'passengerCount', 'tripDistance', 'day_of_month', 'month_num',
'snowDepth', 'precipTime', 'precipDepth', 'temperature']
numeric_transformations = [([f], Pipeline(steps=[
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())])) for f in numerical]
categorical_transformations = [([f], OneHotEncoder(handle_unknown='ignore', sparse=False)) for f in categorical]
transformations = numeric_transformations + categorical_transformations
# df_out will return a data frame, and default = None will pass the engineered features unchanged
mapper = DataFrameMapper(transformations, input_df=True, df_out=True, default=None, sparse=False)
clf = Pipeline(steps=[('preprocessor', mapper),
('regressor', GradientBoostingRegressor())])
clf.fit(X_train, y_train)
y_predict = clf.predict(X_test)
y_actual = y_test.values.flatten().tolist()
rmse = math.sqrt(mean_squared_error(y_actual, y_predict))
print('The RMSE score on test data for GradientBoostingRegressor: ', rmse)
# Save the model
if not (args.output is None):
os.makedirs(args.output, exist_ok=True)
output_filename = os.path.join(args.output, 'nyc-taxi-fare.pkl')
pickle.dump(clf, open(output_filename, 'wb'))
print('Model file nyc-taxi-fare.pkl saved!')
| 2.6875 | 3 |
tests/run.py | kapilgarg1996/gmc | 2 | 12796695 | import argparse
import unittest
import os
import importlib
import sys
from gmc.conf import settings, ENVIRONMENT_VARIABLE
from gmc.core import handler
def build_suite(test_labels=None):
suite = unittest.TestSuite()
test_loader = unittest.defaultTestLoader
test_labels = test_labels or ['.']
discover_kwargs = {}
for label in test_labels:
kwargs = discover_kwargs.copy()
tests = None
label_as_path = os.path.abspath(label)
# if a module, or "module.ClassName[.method_name]", just run those
if not os.path.exists(label_as_path):
tests = test_loader.loadTestsFromName(label)
elif os.path.isdir(label_as_path):
top_level = label_as_path
while True:
init_py = os.path.join(top_level, '__init__.py')
if os.path.exists(init_py):
try_next = os.path.dirname(top_level)
if try_next == top_level:
# __init__.py all the way down? give up.
break
top_level = try_next
continue
break
kwargs['top_level_dir'] = top_level
if not (tests and tests.countTestCases()) and is_discoverable(label):
# Try discovery if path is a package or directory
tests = test_loader.discover(start_dir=label, **kwargs)
# Make unittest forget the top-level dir it calculated from this
# run, to support running tests from two different top-levels.
test_loader._top_level_dir = None
suite.addTests(tests)
return suite
def is_discoverable(label):
"""
Check if a test label points to a python package or file directory.
Relative labels like "." and ".." are seen as directories.
"""
try:
mod = importlib.import_module(label)
except (ImportError, TypeError):
pass
else:
return hasattr(mod, '__path__')
return os.path.isdir(os.path.abspath(label))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'modules', nargs='*',
help='Optional path(s) to test modules; e.g. "test_settings" or '
'"test_settings.tests.TestSettings.test_settings_loader".',
)
parser.add_argument('--settings', help='Test gmc with different settings file')
args = parser.parse_args()
if args.settings:
handler.execute_from_command_line(['', args.settings], quiet=True)
os.environ['DUMMY'] = "FALSE"
else:
os.environ[ENVIRONMENT_VARIABLE] = 'setting'
os.environ['DUMMY'] = "TRUE"
args.modules = [os.path.normpath(labels) for labels in args.modules]
suite = build_suite(args.modules)
runner = unittest.TextTestRunner()
runner.run(suite) | 2.46875 | 2 |
config.py | bert386/rpi-flask-bluez-controller | 0 | 12796696 | # -*- coding: utf-8 -*-
""" Singleton class to manage configuration
Description:
Todo:
"""
import json
import os
import sys
import logging
import constant
class Config(object):
# Here will be the instance stored.
__instance = None
@classmethod
def getInstance(cls):
""" Static access method. """
if Config.__instance == None:
raise Exception("Any configuration is not initialized yet!")
return Config.__instance
def __init__(self, url):
""" Virtually private constructor. """
if Config.__instance != None:
raise Exception("This class is a singleton!")
else:
self.config = dict()
self.load(url)
self._url = url
Config.__instance = self
def load(self, url):
try:
self.config = json.load(open(url))
self.config["version"] = constant.APPVERSION
logging.info(self.config)
except Exception as error:
logging.error(error, exc_info=True)
return self.config
def store(self):
try:
with open(self._url, "w") as outfile:
json.dump(self.config, outfile, indent=4)
except Exception as error:
logging.error(error, exc_info=True)
| 2.96875 | 3 |
eunice012716/Week1/ch3/3.2/exercise1.py | coookie89/Intern-Training | 1 | 12796697 | <reponame>coookie89/Intern-Training
if __name__ == "__main__":
print(
"Zero Initialization:",
"Since the derivatives will remain same for every w in W[l],",
"this method serves almost no purpose as it causes neurons to perform the same calculation in each iterations and produces same outputs.",
)
| 2.953125 | 3 |
05-matplotlib/q02/question.py | EdwinJUGomez/CDA_2021_EdwinJUGomez | 0 | 12796698 | ##
## Graficacion usando Matplotlib
## ===========================================================================
##
## Construya una gráfica similar a la presentada en el archivo `original.png`
## usando el archivo `data.csv`. La gráfica generada debe salvarse en el
## archivo `generada.png`.
##
## Salve la figura al disco con:
##
## plt.savefig('generada.png')
##
## >>> Escriba su codigo a partir de este punto <<<
##
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
df = pd.read_csv('data.csv', sep=',')
data = df.groupby('Region').sum()[['Poblacion 0-14', 'Poblacion 15-64', 'Poblacion 65+']]
fig, axs = plt.subplots(1, 6, sharex='col', sharey='row', figsize=(13,6), dpi=72);
plt.subplots_adjust(wspace = 0.1, hspace=0.1)
plt.setp(axs[0], ylabel='Poblacion')
for index, region in enumerate(data.index):
axs[index].bar(range(3), data.iloc[index,:], color=['tab:orange', 'tab:blue', 'tab:green'])
for n, ax in enumerate(axs):
ax.set_xticks(range(3));
ax.set_xticklabels(data.columns, rotation=90);
ax.set_title(data.index[n]);
plt.tight_layout()
plt.savefig('generada.png'); | 3.15625 | 3 |
bin/write_options.py | davidcorne/markdown-editor | 0 | 12796699 | #!/usr/bin/env python
# Written by: DGC
#
#D This is purely for developer use, it will not be included in the program it
#D is just for adding/changing options in the standard Options.pickle file.
#
# python imports
from __future__ import unicode_literals
import os
import csv
import pickle
import sys
import re
# local imports
OPTIONS = {
"code_css": "Standard",
"code_css_class": "highlight",
"show_html": False,
"processor": "markdown_all",
"markdown_css": "Markdown",
"display_line_numbers": False,
"font": "Arial,12,-1,5,50,0,0,0,0,0",
}
LOCALISATION_OPTIONS = {
"language": "en_GB",
"available_languages": ["en_GB", "de_DE", "en_AU", "en_US", "fr_FR"],
}
TEST_OPTIONS = {
"code_css": "",
"code_css_class": "highlight",
"show_html": False,
"processor": "markdown_all",
"markdown_css": "",
"display_line_numbers": False,
"font": "Arial,12,-1,5,50,0,0,0,0,0",
# "language": "en_GB",
}
#==============================================================================
def write_config_file(object, file_name, directory="Resources"):
"""
Pickles the object to file_name where file_name is a relative path under
Resources
"""
options_path = os.path.join(
os.path.dirname(sys.argv[0]),
"../" + directory
)
file_path = os.path.join(options_path, file_name)
with open(file_path, "wb") as options_file:
pickle.dump(object, options_file)
#==============================================================================
def write_options_files():
write_config_file(OPTIONS, "Options.pickle")
write_config_file(
LOCALISATION_OPTIONS,
"Languages.pickle",
directory="Resources/Languages"
)
write_config_file(TEST_OPTIONS, "Options.pickle", directory="Integration")
#==============================================================================
def verify_keys(file_name, keys, verifier):
keys = set(keys)
verifier = set(verifier)
difference = [k for k in keys if k not in verifier]
if (difference):
raise Exception(
"Bad key found in %s: %s" %(file_name, str(difference))
)
#==============================================================================
def write_user_strings(file_name, verifier):
with open("data/" + file_name + ".csv", "rb") as csvfile:
table = csv.reader(csvfile)
for i, row in enumerate(table):
if (i == 0):
keys = row[1:]
verify_keys(file_name, keys, verifier)
continue
language = row[0]
user_text = dict(zip(keys, row[1:]))
write_config_file(
user_text,
file_name + ".pickle",
directory="Resources/Languages/" + language)
#==============================================================================
def generate_keys(pattern):
keys = list()
for path in os.listdir("."):
if (path[-3:] == ".py"):
with open(path, "r") as py_file:
lines = py_file.readlines()
for line in lines:
if (pattern.lower() in line.lower()):
match = re.search(".*" + pattern + "\[\"(.*)\"\]", line)
if match:
keys.append(match.group(1))
return keys
#==============================================================================
def generate_user_text_keys():
return generate_keys("USER_TEXT")
#==============================================================================
def generate_tool_tips_keys():
return generate_keys("TOOL_TIP")
#==============================================================================
def write_user_text():
write_user_strings("UserText", generate_user_text_keys())
#==============================================================================
def write_tool_tips():
write_user_strings("ToolTips", generate_tool_tips_keys())
#==============================================================================
if (__name__ == "__main__"):
write_options_files()
write_user_text()
write_tool_tips()
| 2.59375 | 3 |
registration/migrations/0003_student_registration.py | NUKSI911/School-Mng | 0 | 12796700 | <reponame>NUKSI911/School-Mng<gh_stars>0
# Generated by Django 3.0.8 on 2020-07-18 06:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registration', '0002_auto_20200717_1825'),
]
operations = [
migrations.CreateModel(
name='Student_Registration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('Date_of_Birth', models.DateField()),
('Blood_Group', models.CharField(max_length=2)),
('Nationality', models.CharField(max_length=15)),
('Email', models.EmailField(max_length=254)),
('Religion', models.CharField(max_length=25)),
('Phone_No', models.CharField(max_length=12)),
],
),
]
| 1.757813 | 2 |
main.py | JonathaTrue/Python_par | 0 | 12796701 | import os
clear = lambda: os.system('clear')
clear()
opcao = 1
while(opcao != 0):
print("Atividade de PARADIGMAS")
print(" 1 - QUESTÃO 01")
print(" 2 - QUESTÃO 02")
print(" 3 - QUESTÃO 03")
print(" 4 - QUESTÃO 04")
print(" 5 - QUESTÃO 05")
print(" 0 - PARA SAIR")
print("")
opcao = int(input("Digite o desejado: "))
clear()
if(opcao == 1):
print("PESSOA MAIS VELHA")
print("Digite o nome da primeira pessoa: ")
p1 = input("Nome: ")
i1 = int(input("Idade: "))
print("Digite o nome da Segunda pessoa: ")
p2 = input("Nome: ")
i2 = int(input("Idade: "))
if(i1 > i2):
print("pessoa mais velha: {}".format(p1))
else:
print("pessoa mais velha: {}".format(p2))
print("")
elif(opcao == 2):
print("MEDIA DE SALÁRIO")
print("Digite o nome do primeiro funcionairo: ")
p1 = input("Nome: ")
s1 = float(input("Salário: "))
print("Digite o nome do Segundo funcionairo: ")
p2 = input("Nome: ")
s2 = float(input("Salário: "))
media =(s1 + s2 ) / 2
print("Salário médio = {}".format(media))
print("")
elif(opcao == 3):
from retangulo import Retangulo
print("LARGURA E ALTURA DE UM RETÂNGULO")
print("Entre com a largura e altura do retângulo: ")
L1 = float(input("Largura: "))
A1 = float(input("Altura: "))
retangulo = Retangulo(A1,L1)
area = retangulo.Area(A1,L1)
perimetro = retangulo.Perimetro(A1, L1)
diagonal = retangulo.Diagonal(A1, L1)
print("Area: {}".format(area))
print("Perímetro: {}".format(perimetro))
print("Diagonal: {}".format(diagonal))
print("")
elif(opcao == 4):
from funcionario import Funcionario
print("ATUALIZAÇÃO DE DADOS")
print("Digite um funcionario")
nome = input("Nome: ")
salario = float(input("Salario: "))
imposto = float(input("Imposto: "))
funcionario1 = Funcionario(nome, salario, imposto)
liquido = funcionario1.SalarioLiquido(salario, imposto)
print(" ")
print("Funcionairo : {} , R$ {}".format(nome, liquido))
comis = float(input("Digite a porcentagem para aumentar o salário: "))
aument = funcionario1.AumentoSalario(salario, liquido, comis)
print("Dados Atualizados : {} R$ {}".format(nome, aument))
print("")
elif(opcao == 5):
print("APROVADO OU REPROVADO")
nome = input("Nome do Aluno: ")
nota1 = float(input("Primeira nota : "))
nota2 = float(input("Segunda nota : "))
nota3 = float(input("Terceira nota : "))
notas = (nota1 + nota2 + nota3)
print("")
vb = 60.0
vr = vb - notas
if(notas >= vb):
print("Nota final = {:.2f}".format(notas))
print("Aprovado")
else:
print("Nota final = {:.2f}".format(notas))
print("Reprovado")
print("Faltaram {:.2f} pontos".format(vr))
print("")
elif(opcao == 0):
break
else:
print("Valor informado inválido! ") | 3.921875 | 4 |
lib/pushtree/pushtree_nfa.py | zepheira/amara | 6 | 12796702 | <reponame>zepheira/amara<gh_stars>1-10
import itertools
from amara.xpath import parser as xpath_parser
from amara.xpath import locationpaths
from amara.xpath.locationpaths import axisspecifiers
from amara.xpath.locationpaths import nodetests
from amara.xpath.functions import nodesets
from amara.xpath.expressions import booleans
import amara.xpath.expressions.nodesets # another nodesets!
from amara.xpath.expressions import basics
counter = itertools.count(1)
# [@x]
class AttributeExistsPred(object):
def __init__(self, name):
self.name = name
# [@x="a"]
class AttributeBinOpPred(object):
def __init__(self, name, op, value):
self.name = name
self.op = op
self.value = value
class AttributeFunctionCallPred(object):
def __init__(self, func):
self.func = func
#####
# This would yield nodes, attribute, PIs, and comments
#class AnyTest(object):
# pass
class BaseNodeTest(object):
match_type = "node"
# What about *[@spam] ?
class AnyNodeTest(BaseNodeTest):
def __str__(self):
return "AnyNode (*)"
class NodeTest(BaseNodeTest):
def __init__(self, name, predicates):
# (ns, name)
self.name = name
self.predicates = predicates
def __str__(self):
return "Node ns=%r localname=%r predicates=%r" % (self.name[0], self.name[1],
self.predicates)
# predicates make no sense here because we only support downward axes
# and these have no downward axes. (XXX I think.)
class AttributeTest(object):
match_type = "attr"
def __init__(self, name, predicates):
self.name = name
assert not predicates
self.predicates = predicates
def __str__(self):
return "Attr name=%r" % (self.name,)
class ProcessingInstructionTest(object):
match_type = "processing-instruction"
def __init__(self, target):
self.target = target
def __str__(self):
return "processing-instruction(%r)" % (self.target,)
class CommentTest(object):
match_type = "comment"
class NFA(object):
def __init__(self):
self.start_edges = []
self.edges = {} # from_node_id -> [(to_node_id, test), ...]
self.terminal_nodes = set()
# The start node has no type
self.match_types = {} # node_id -> match_type
self.labeled_handlers = {} # node_id -> (label, PushtreeHandler)
def copy(self):
nfa = NFA()
nfa.start_edges[:] = self.start_edges
nfa.edges.update(self.edges)
nfa.terminal_nodes.update(self.terminal_nodes)
nfa.match_types.update(self.match_types)
nfa.labeled_handlers.update(self.labeled_handlers)
return nfa
def get_edges(self, node_id):
if node_id is None:
return self.start_edges
return self.edges[node_id]
def add_handler(self, labeled_handler):
for node_id in self.terminal_nodes:
self.labeled_handlers[node_id].append(labeled_handler)
def new_node(self, from_node_id, test):
edges = self.get_edges(from_node_id)
to_node_id = next(counter)
self.edges[to_node_id] = []
self.match_types[to_node_id] = test.match_type
self.labeled_handlers[to_node_id] = []
edges.append( (to_node_id, test) )
return to_node_id
def connect(self, from_node_id, to_node_id, test):
self.get_edges(from_node_id).append( (to_node_id, test) )
def extend(self, other):
assert not set(self.edges) & set(other.edges), "non-empty intersection"
if not self.start_edges:
self.start_edges[:] = other.start_edges
self.edges.update(other.edges)
self.match_types.update(other.match_types)
for node_id in self.terminal_nodes:
self.edges[node_id].extend(other.start_edges)
self.terminal_nodes.clear()
self.terminal_nodes.update(other.terminal_nodes)
self.labeled_handlers.update(other.labeled_handlers)
def union(self, other):
assert not set(self.edges) & set(other.edges), "non-empty intersection"
self.start_edges.extend(other.start_edges)
self.edges.update(other.edges)
self.match_types.update(other.match_types)
self.terminal_nodes.update(other.terminal_nodes)
self.labeled_handlers.update(other.labeled_handlers)
def dump(self):
for node_id, edges in [(None, self.start_edges)] + sorted(self.edges.items()):
if node_id is None:
node_name = "(start)"
labels = ""
else:
node_name = str(node_id)
action = str(self.match_types[node_id])
labels += " " + str([x[0] for x in self.labeled_handlers[node_id]])
is_terminal = "(terminal)" if (node_id in self.terminal_nodes) else ""
print node_name, is_terminal, labels
self._dump_edges(edges)
print "======"
def _dump_edges(self, edges):
for (to_node_id, test) in edges:
print "", test, "->", to_node_id
def _add_initial_loop(nfa):
start_edges = nfa.start_edges[:]
any_node = nfa.new_node(None, AnyNodeTest())
for (to_node_id, test) in start_edges:
nfa.connect(any_node, to_node_id, test)
nfa.connect(any_node, any_node, AnyNodeTest()) # loop
def to_nfa(expr, namespaces):
#print "Eval", expr.__class__
if (expr.__class__ is locationpaths.relative_location_path):
# This is a set of path specifiers like
# "a" "a/b", "a/b/c[0]/d[@x]" (relative location path)
# "@a", "a/@b", and even "@a/@b", which gives nothing
nfa = NFA()
for step in expr._steps:
nfa.extend(to_nfa(step, namespaces))
_add_initial_loop(nfa)
return nfa
if (expr.__class__ is locationpaths.absolute_location_path):
# This is an absolute path like
# "/a", "/a[0]/b[@x]"
nfa = NFA()
for step in expr._steps:
axis = step.axis
axis_name = axis.name
assert axis_name in ("child", "descendant"), axis_name
subnfa = to_nfa(step, namespaces)
if axis_name == "descendant":
_add_initial_loop(subnfa)
nfa.extend(subnfa)
return nfa
if (expr.__class__ is locationpaths.abbreviated_absolute_location_path):
# This is an abbreviated_absolute_location_path
# "//a", "a//b"
nfa = NFA()
for step in expr._steps:
nfa.extend(to_nfa(step, namespaces))
_add_initial_loop(nfa)
return nfa
if expr.__class__ is locationpaths.location_step:
# This is a step along some axis, such as:
# "a" - step along the child axis
# "a[@x][@y='1']" - step along the child axis, with two predicates
# "@a" - step along the attribute axis
axis = expr.axis
axis_name = axis.name
assert axis_name in ("child", "descendant", "attribute"), axis_name
if axis_name == "attribute":
klass = AttributeTest
else:
klass = NodeTest
nfa = NFA()
node_test = expr.node_test
if node_test.__class__ is nodetests.local_name_test:
# Something without a namespace, like "a"
node_id = nfa.new_node(None,
klass(node_test.name_key, expr.predicates))
elif node_test.__class__ is nodetests.namespace_test:
# Namespace but no name, like "a:*"
namespace = namespaces[node_test._prefix]
node_id = nfa.new_node(None,
klass((namespace, None), expr.predicates))
elif node_test.__class__ is nodetests.qualified_name_test:
prefix, localname = node_test.name_key
namespace = namespaces[prefix]
node_id = nfa.new_node(None,
klass((namespace, localname), expr.predicates))
elif node_test.__class__ is nodetests.processing_instruction_test:
node_id = nfa.new_node(None,
ProcessingInstructionTest(node_test._target))
elif node_test.__class__ is locationpaths.nodetests.principal_type_test:
node_id = nfa.new_node(None,
klass((None, None), None))
else:
die(node_test)
nfa.terminal_nodes.add(node_id)
#if axis_name == "descendant":
# _add_initial_loop(nfa)
#print "QWERQWER"
#nfa.dump()
return nfa
if expr.__class__ is amara.xpath.expressions.nodesets.union_expr:
# "a|b"
nfa = to_nfa(expr._paths[0], namespaces)
for path in expr._paths[1:]:
nfa.union(to_nfa(path, namespaces))
return nfa
die(expr)
def node_intersect(parent_test, child_test):
if parent_test is not None:
assert isinstance(parent_test, BaseNodeTest), parent_test
assert getattr(parent_test, "predicates", None) is None
assert isinstance(child_test, BaseNodeTest), child_test
assert getattr(child_test, "predicates", None) is None
if parent_test is None:
if isinstance(child_test, AnyNodeTest):
return True, False
if isinstance(child_test, NodeTest):
return child_test, False
if isinstance(parent_test, AnyNodeTest):
if isinstance(child_test, AnyNodeTest):
return True, False
if isinstance(child_test, NodeTest):
return child_test, False
elif isinstance(parent_test, NodeTest):
if isinstance(child_test, AnyNodeTest):
return True, True
if isinstance(child_test, NodeTest):
# XXX This is wrong. Resolved namespaces can be the same even
# if the namespace fields are different.
# XXX check for predicates!
if parent_test.name == child_test.name:
return True, False
return False, child_test
def attr_intersect(parent_test, child_test):
if parent_test is not None:
assert isinstance(parent_test, AttributeTest), parent_test
assert isinstance(child_test, AttributeTest), child_test
if parent_test is None:
return child_test, False
if parent_test.name == child_test.name:
return True, False
return False, child_test
def pi_intersect(parent_test, child_test):
if parent_test is not None:
assert isinstance(parent_test, ProcessingInstructionTest), parent_test
assert isinstance(child_test, ProcessingInstructionTest), child_test
if parent_test is None:
return child_test, False
# Is there any way to match *any* PI?
# Looks like Amara support XPath 1.0, where this is a string
if parent_test.target == child_test.target:
return True, False
return False, child_test
def comment_intersect(parent_test, child_test):
if parent_test is not None:
assert isinstance(parent_test, CommentTest), parent_test
assert isinstance(child_test, CommentTest), child_test
return True, False
# Used to make a decision tree. Either the test passes or it fails.
# TODO: something more sophisticated? For example, if there are a
# large number of element tag tests then sort the tags and start
# in the middle. Should give O(log(number of tags)) performance
# instead of O(n). However, for now, n is no more than 10 or so.
class Branch(object):
def __init__(self, test, if_true, if_false):
self.test = test
self.if_true = if_true
self.if_false = if_false
class StateTable(object):
def __init__(self, match_type, nfa, nfa_node_ids):
self.match_type = match_type # 'None' for the start node
self.nfa = nfa
self.nfa_node_ids = nfa_node_ids
self.node_tree = Branch(None, set(), set())
self.attr_tree = Branch(None, set(), set())
self.pi_tree = Branch(None, set(), set())
self.comment_tree = Branch(None, set(), set())
def add(self, test, to_node_id):
if isinstance(test, BaseNodeTest):
self._add(self.node_tree, test, to_node_id, node_intersect)
elif isinstance(test, AttributeTest):
self._add(self.attr_tree, test, to_node_id, attr_intersect)
elif isinstance(test, ProcessingInstructionTest):
self._add(self.pi_tree, test, to_node_id, pi_intersect)
elif isinstance(test, CommentTest):
self._add(self.comment_tree, test, to_node_id, comment_intersect)
else:
raise AssertionError(test)
def _add(self, tree, test, to_node_id, intersect):
new_true_test, new_false_test = intersect(tree.test, test)
if new_true_test == True:
self._add_to_leaves(tree.if_true, to_node_id)
elif new_true_test:
if isinstance(tree.if_true, set):
new_branch = Branch(new_true_test,
tree.if_true | set([to_node_id]),
tree.if_true)
tree.if_true = new_branch
else:
self._add(tree.if_true, new_true_test, to_node_id, intersect)
if new_false_test == True:
self._add_to_leaves(tree.if_false, to_node_id)
elif new_false_test:
if isinstance(tree.if_false, set):
new_branch = Branch(new_false_test,
tree.if_false | set([to_node_id]),
tree.if_false)
tree.if_false = new_branch
else:
self._add(tree.if_false, new_false_test, to_node_id, intersect)
def _add_to_leaves(self, tree, to_node_id):
if isinstance(tree, set):
tree.add(to_node_id)
else:
self._add_to_leaves(tree.if_true, to_node_id)
self._add_to_leaves(tree.if_false, to_node_id)
def get_final_nodes(self):
result = {}
for match_type, tree in ( (BaseNodeTest.match_type, self.node_tree),
(AttributeTest.match_type, self.attr_tree),
(ProcessingInstructionTest.match_type, self.pi_tree),
(CommentTest.match_type, self.comment_tree) ):
visit = [tree]
while visit:
node = visit.pop()
if isinstance(node, set):
if node:
result[frozenset(node)] = match_type
elif node is not None:
visit.append(node.if_true)
visit.append(node.if_false)
return result.items()
def dump(self, numbering):
# Do I report anything for having reached here?
if list(self.nfa_node_ids) != [None]:
for nfa_node in self.nfa_node_ids:
labels = [x[0] for x in self.nfa.labeled_handlers[nfa_node]]
if labels:
print "Report", self.nfa.match_types[nfa_node], labels
for (name, tree) in ( ("NODE", self.node_tree),
("ATTR", self.attr_tree),
("PROCESSING-INSTRUCTION", self.pi_tree),
("COMMENT", self.comment_tree) ):
if tree is None:
print " No", name, "tree"
else:
print name, "tree:"
# The first branch is always true
self._dump(tree.if_true, 0, numbering)
def _dump(self, tree, depth, numbering):
s = "-"*depth
if isinstance(tree, set):
if tree:
k = sorted(tree)
print s, "<>", numbering[frozenset(tree)], k
else:
print s, "<> (empty)"
else:
print s, tree.test, "?"
self._dump(tree.if_true, depth+1, numbering)
self._dump(tree.if_false, depth+1, numbering)
def all_transitions(nfa, current_dfa):
transitions = []
for node_id in current_dfa:
if node_id is None:
new_transitions = nfa.start_edges
else:
# XXX I can't transition from something
# which wasn't a node or a record
match_type = nfa.match_types[node_id]
if match_type not in ("node", "record"):
continue
new_transitions = nfa.edges[node_id]
transitions.extend(new_transitions)
return transitions
def transition(nfa_state, event):
for (to_node_id, test) in edge in nfa_state.edges:
if edge[0] == event:
yield edge[1]
# Raymond's code
def nfa_to_dfa(nfa):
numbering = {} # from frozenset -> 0, 1, 2, ...
dfa_start = frozenset([None]) # nfa start node
result = {} # []
seen = set([dfa_start])
todo = [(dfa_start, None)]
while todo:
current_dfa, match_type = todo.pop()
#print "All transitions from", current_dfa
transitions = all_transitions(nfa, current_dfa)
if not transitions:
# Make sure there's always a target.
# This also stores any handler events
result[current_dfa] = StateTable(match_type, nfa, current_dfa)
numbering[current_dfa] = len(numbering)
continue
# This adds element, attribute, comment, etc. transitions
state_table = StateTable(match_type, nfa, current_dfa)
for to_node_id, test in transitions:
state_table.add(test, to_node_id)
for nfa_nodes, match_type in state_table.get_final_nodes():
some_dfa = frozenset(nfa_nodes)
if some_dfa not in seen:
seen.add(some_dfa)
todo.append( (some_dfa, match_type) )
result[current_dfa] = state_table
numbering[current_dfa] = len(numbering)
# for k, table in sorted(result.items(), key=lambda x:sorted(x[0])):
# print "State", sorted(k)
# table.dump()
return result, numbering
def die(expr):
import inspect
print " == FAILURE =="
print type(expr)
print dir(expr)
for k, v in inspect.getmembers(expr):
if k.startswith("__") and k.endswith("__"):
continue
print repr(k), repr(v)
raise AssertionError(expr)
def build_states(nfa, dfa, numbering):
# unique node numbers
states = []
for dfa_id, node_ids in sorted( (dfa_id, node_ids)
for (node_ids, dfa_id) in numbering.items() ):
assert dfa_id == len(states)
if dfa_id == 0:
assert node_ids == set([None])
# handlers (which are in (id, class) pairs)
table = dfa[node_ids]
if dfa_id == 0:
handlers = ()
else:
handler_map = {}
for node_id in node_ids:
for (label, handler) in nfa.labeled_handlers[node_id]:
handler_map[label] = handler
# This are PushtreeHandler instances. I could find the
# actual instances I need except the startElement and
# endElement use different method.
handlers = []
for (label, handler) in sorted(handler_map.items()):
handlers.append(handler)
# node tree
tree = table.node_tree.if_true
if isinstance(tree, set):
# Special case when there are no decisions to make
if not tree:
node_ops = [] # ... because there are no states
else:
node_ops = [(None, None, None, -numbering[frozenset(tree)])]
else:
node_ops = [tree]
todo = [0]
while todo:
i = todo.pop()
#print "Access", i, len(node_ops)
tree = node_ops[i]
if isinstance(tree.if_true, set):
if tree.if_true:
if_true = -numbering[frozenset(tree.if_true)]
else:
if_true = 0
else:
if_true = len(node_ops)
node_ops.append(tree.if_true)
if isinstance(tree.if_false, set):
if tree.if_false:
if_false = -numbering[frozenset(tree.if_false)]
else:
if_false = 0
else:
if_false = len(node_ops)
node_ops.append(tree.if_false)
namespace, localname = tree.test.name
node_ops[i] = (namespace, localname, None, if_true, if_false)
#print "Added", node_ops[i]
if if_false > 0:
todo.append(if_false)
if if_true > 0:
todo.append(if_true)
node_ops = tuple(node_ops)
# attr tree
attr_ops = []
tree = table.attr_tree.if_true
while not isinstance(tree, set):
namespace, localname = tree.test.name
attr_ops.append( (namespace, localname, numbering[frozenset(tree.if_true)]) )
tree = tree.if_false
if tree:
# Match any attribute
attr_ops.append( (None, None, numbering[frozenset(tree)]) )
attr_ops = tuple(attr_ops)
# processing instruction tree
pi_ops = []
tree = table.pi_tree.if_true
while not isinstance(tree, set):
target = tree.test.target
pi_ops.append( (target, numbering[frozenset(tree.if_true)]) )
tree = tree.if_false
if tree:
pi_ops.append( (None, numbering[frozenset(tree)]) )
pi_ops = tuple(pi_ops)
# comment tree
tree = table.comment_tree.if_true
assert isinstance(tree, set)
if tree:
comment_state = numbering[frozenset(tree)]
else:
comment_state = 0
states.append( (handlers, node_ops, attr_ops, pi_ops, comment_state) )
return tuple(states)
class Expression(object):
def __init__(self, id, xpath, nfa):
self.id = id
self.xpath = xpath
self._nfa = nfa
def nfas_to_machine_states(nfas):
union_nfa = nfas[0].copy() # XXX start with empty and union everything?
for nfa in nfas[1:]:
union_nfa.union(nfa)
dfa, numbering = nfa_to_dfa(union_nfa)
return build_states(union_nfa, dfa, numbering)
class PushtreeManager(object):
def __init__(self, subtree_xpath, subtree_handler = None, namespaces = None):
if namespaces is None:
namespaces = {}
self.namespaces = namespaces
self.expressions = []
self._add(subtree_xpath, subtree_handler)
def _add(self, xpath, xpath_handler):
nfa = to_nfa(xpath_parser.parse(xpath), self.namespaces)
i = len(self.expressions)
nfa.add_handler((i, xpath_handler))
exp = Expression(i, xpath, nfa)
self.expressions.append(exp)
return exp
def add(self, xpath, xpath_handler=None):
return self._add(xpath, xpath_handler)
def _build_machine_states(self):
return nfas_to_machine_states([x._nfa for x in self.expressions])
def build_pushtree_handler(self):
return RuleMachineHandler(self._build_machine_states())
# Special handler object to bridge with pushbind support in the builder
# Implemented by beazley. Note: This is not a proper SAX handler
class RuleMachineHandler(object):
def __init__(self, machine_states):
self.machine_states = machine_states
def startDocument(self,node):
self.stack = [0]
#dump_machine_states(self.machine_states)
def startElementNS(self, node, name, qname, attrs):
state = self.stack[-1]
#print "startElementNS", name, qname, attrs, "state", state
if state == -1:
#print "goto -1"
self.stack.append(-1)
return
element_ops = self.machine_states[state][1]
if not element_ops:
# This was a valid target, but there's nothing leading off from it
#print "GOTO -1"
self.stack.append(-1)
return
namespace, localname = name
i = 0
while 1:
ns, ln, test_function, if_true, if_false = element_ops[i]
assert test_function is None
if ((ns is None or ns == namespace) and
(ln is None or ln == localname)):
i = if_true
else:
i = if_false
if i == 0:
# dead-end; no longer part of the DFA and the
# 0 node is defined to have no attributes
self.stack.append(-1)
return
if i < 0:
next_state = -i
break
# otherwise, loop
#print "GoTo", next_state
self.stack.append(next_state)
handlers = self.machine_states[next_state][0]
for handler in handlers:
handler.startElementMatch(node)
# Also handle any attributes
attr_ops = self.machine_states[next_state][2]
if not attr_ops:
return
for namespace, localname in attrs.keys():
for (ns, ln, attr_state_id) in attr_ops:
#print "attr test:", (ns, ln), (namespace, localname)
if ((ns is None or namespace == ns) and
(ln is None or localname == ln)):
# Match!
handlers = self.machine_states[attr_state_id][0]
for handler in handlers:
#print "Notify attribute match:", event_ids, (namespace, localname)
# This is a hack until I can figure out how to get
# the attribute node
handler.attributeMatch( (node, (namespace, localname) ) )
def endElementNS(self, node, name, qname):
#print "endElementNS", node, name, qname
last_state = self.stack.pop()
if last_state == -1:
return
handlers = self.machine_states[last_state][0]
for handler in reversed(handlers):
handler.endElementMatch(node)
def processingInstruction(self, node, target, data):
state = self.stack[-1]
if state == -1:
return
pi_ops = self.machine_states[state][3]
for (pi_target, pi_state) in pi_ops:
if pi_target == target:
handlers = self.machine_states[pi_state][0]
for handler in handlers:
handler.processingInstruction(node)
# For Dave
class RulePatternHandler(RuleMachineHandler):
def __init__(self, pattern, end_node_handler, attr_handler, namespaces=None):
self.xpm = xpm = ExpressionManager(namespaces=namespaces);
xpm.add(pattern)
nfa, dfa, numbering = xpm.build_dfa_tables()
machine_states = build_instructions(nfa,dfa,numbering)
RuleMachineHandler.__init__(self, machine_states,
end_node_handler = end_node_handler,
attr_handler = attr_handler)
def dump_machine_states(machine_states):
for i, x in enumerate(machine_states):
print "== INFO FOR", i, "=="
handlers, node_ops, attr_ops, pi_ops, comment_state = x
print " HANDLERS", handlers
print " NODE OPS"
for node_op in node_ops:
print node_op
print " ATTR OPS"
for attr_op in attr_ops:
print attr_op
print " PI OPS"
for pi_op in pi_ops:
print pi_op
print " COMMENT STATE =", comment_state
class PushtreeHandler(object):
def startSubtree(self, element):
pass
def endSubtree(self, element):
pass
def startElementMatch(self, node):
pass
def endElementMatch(self, node):
pass
def attributeMatch(self, node):
pass
def commentMatch(self, node):
pass
def processingInstructionMatch(self, node):
pass
class VerbosePushtreeHandler(PushtreeHandler):
def __init__(self, prefix=None):
if prefix is None:
prefix = ""
else:
prefix = "(%s) " % (prefix,)
self.prefix = prefix
def startSubtree(self, element):
print self.prefix+"startSubtree", element
def endSubtree(self, element):
print self.prefix+"endSubtree", element
def startElementMatch(self, node):
print self.prefix+"startElementMatch", node
def endElementMatch(self, node):
print self.prefix+"endElementMatch", node
def attributeMatch(self, node):
print self.prefix+"attributeMatch", node
def commentMatch(self, node):
print self.prefix+"commentMatch", node
def processingInstructionMatch(self, node):
print self.prefix+"processingInstructionMatch", node
if __name__ == '__main__':
testxml = """\
<body>
<li>Ignore me<b/></li>
<ul>
<li x='1'>This <i>is</i> test</li>
<li x='2'><a href='spam'>that</a> was nothing</li>
</ul>
</body>
"""
manager = PushtreeManager("body/ul/li", VerbosePushtreeHandler("main"))
manager.expressions[0]._nfa.dump()
manager.add("pre/post", VerbosePushtreeHandler("pre/post"))
manager.expressions[1]._nfa.dump()
manager.add("//a", VerbosePushtreeHandler("//a"))
manager.expressions[2]._nfa.dump()
manager.add("@x", VerbosePushtreeHandler("@x"))
manager.expressions[3]._nfa.dump()
manager.add("a", VerbosePushtreeHandler("a"))
manager.expressions[4]._nfa.dump()
#manager.add(".//*")
machine_states = manager._build_machine_states()
dump_machine_states(machine_states)
hand = RuleMachineHandler(machine_states)
import os
doc = amara.parse(testxml,rule_handler=hand)
os._exit(0)
| 2.5625 | 3 |
test/unit/agent/pipelines/syslog.py | empiricompany/nginx-amplify-agent | 1 | 12796703 | # -*- coding: utf-8 -*-
import time
import logging
from logging.handlers import SysLogHandler
from hamcrest import *
from amplify.agent.pipelines.syslog import SyslogTail, SYSLOG_ADDRESSES, AmplifyAddresssAlreadyInUse
from test.base import BaseTestCase, disabled_test
__author__ = "<NAME>"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__license__ = ""
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
class SyslogTailTestCase(BaseTestCase):
def setup_method(self, method):
super(SyslogTailTestCase, self).setup_method(method)
self.tail = SyslogTail(address=('localhost', 514), interval=0.1)
# Set up python logger
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.setLevel(logging.DEBUG)
self.handler = SysLogHandler(address=('localhost', 514))
self.handler.setFormatter(logging.Formatter(' amplify: %(message)s'))
self.logger.addHandler(self.handler)
def teardown_method(self, method):
# Revert logger stuff
self.handler.close()
self.handler = None
self.logger = None
# Kill the SyslogTail
self.tail.stop()
self.tail = None
def test_overall(self):
time.sleep(0.1) # Release GIL so async listener can "hear" the DGRAMs
count = 1
while count <= 5:
self.logger.debug('This is message #%s' % count)
count += 1
time.sleep(0.1) # Release GIL so async listener can handle DGRAMs
# Check to see that SyslogListener read 5 messages
assert_that(self.tail.cache, has_length(count-1))
# Check the cache directly to make sure messages were decoded.
for i in range(5):
assert_that(self.tail.cache[i], equal_to(u'This is message #%s\x00' % (i+1)))
# Go through and check the messages via iteration
count = 1
for line in self.tail:
assert_that(line, equal_to(u'This is message #%s\x00' % count))
count += 1
# Check that cache was cleared after iteration
assert_that(self.tail.cache, has_length(0))
# TODO: test_overall doesn't work if there are other tests run with it...why?
# The tests below pass, but will cause test_overall to fail if run...so skipped for now.
@disabled_test
def test_addresses(self):
assert_that(('localhost', 514), is_in(SYSLOG_ADDRESSES))
@disabled_test
def test_socket_conflict(self):
assert_that(
calling(SyslogTail).with_args(address=('localhost', 514)),
raises(AmplifyAddresssAlreadyInUse)
)
| 1.960938 | 2 |
tests/test_sdf_gradient_field_wrt_twist.py | Algomorph/LevelSetFusion-Python | 8 | 12796704 | <gh_stars>1-10
# import unittest
from unittest import TestCase
import numpy as np
from rigid_opt.sdf_gradient_field import calculate_gradient_wrt_twist
from math_utils.transformation import twist_vector_to_matrix2d
def sdf_gradient_wrt_to_twist(live_field, y_field, x_field, twist_vector, offset, voxel_size):
sdf_gradient_wrt_to_voxel = np.zeros((1, 2))
if y_field - 1 < 0:
post_sdf = live_field[y_field + 1, x_field]
if post_sdf < -1:
sdf_gradient_wrt_to_voxel[0, 1] = 0
else:
sdf_gradient_wrt_to_voxel[0, 1] = post_sdf - live_field[y_field, x_field]
elif y_field + 1 > live_field.shape[0] - 1:
pre_sdf = live_field[y_field - 1, x_field]
if pre_sdf < -1:
sdf_gradient_wrt_to_voxel[0, 1] = 0
else:
sdf_gradient_wrt_to_voxel[0, 1] = live_field[y_field, x_field] - pre_sdf
else:
pre_sdf = live_field[y_field - 1, x_field]
post_sdf = live_field[y_field + 1, x_field]
if (post_sdf < -1) or (pre_sdf < -1):
sdf_gradient_wrt_to_voxel[0, 1] = 0
else:
sdf_gradient_wrt_to_voxel[0, 1] = (post_sdf - pre_sdf) / 2
if x_field - 1 < 0:
post_sdf = live_field[y_field, x_field + 1]
if post_sdf < -1:
sdf_gradient_wrt_to_voxel[0, 0] = 0
else:
sdf_gradient_wrt_to_voxel[0, 0] = post_sdf - live_field[y_field, x_field]
elif x_field + 1 > live_field.shape[1] - 1:
pre_sdf = live_field[y_field, x_field - 1]
if pre_sdf < -1:
sdf_gradient_wrt_to_voxel[0, 0] = 0
else:
sdf_gradient_wrt_to_voxel[0, 0] = live_field[y_field, x_field] - pre_sdf
else:
pre_sdf = live_field[y_field, x_field - 1]
post_sdf = live_field[y_field, x_field + 1]
if (post_sdf < -1) or (pre_sdf < -1):
sdf_gradient_wrt_to_voxel[0, 0] = 0
else:
sdf_gradient_wrt_to_voxel[0, 0] = (post_sdf - pre_sdf) / 2
x_voxel = (x_field + offset[0])*voxel_size
z_voxel = (y_field + offset[2])*voxel_size
point = np.array([[x_voxel, z_voxel, 1.]], dtype=np.float32).T
twist_matrix_homo_inv = twist_vector_to_matrix2d(-twist_vector)
trans = np.dot(twist_matrix_homo_inv, point)
voxel_gradient_wrt_to_twist = np.array([[1, 0, trans[1]],
[0, 1, -trans[0]]])
return np.dot(sdf_gradient_wrt_to_voxel/voxel_size, voxel_gradient_wrt_to_twist).reshape((1, -1))
class MyTestCase(TestCase):
def test_sdf_gradient_wrt_twist01(self):
live_field = np.array([[1, 0, -1],
[1, 0, -1],
[1, 0, -1]])
twist_vector = np.array([[0.],
[0.],
[0.]])
offset = np.array([-1, -1, 1])
voxel_size = 1
gradient_field = calculate_gradient_wrt_twist(live_field,
twist_vector,
array_offset=offset,
voxel_size=voxel_size)
expected_gradient_field = np.zeros((live_field.shape[0], live_field.shape[1], 3), dtype=np.float32)
for y_field in range(live_field.shape[0]):
for x_field in range(live_field.shape[1]):
expected_gradient_field[y_field, x_field] = sdf_gradient_wrt_to_twist(live_field, y_field, x_field,
twist_vector, offset, voxel_size)
self.assertTrue(np.allclose(expected_gradient_field, gradient_field))
def test_sdf_gradient_wrt_twist02(self):
live_field = np.array([[1, 1, 1],
[0, 0, 0],
[-1, -1, -1]])
twist_vector = np.array([[0.],
[0.],
[0.]])
offset = np.array([-1, -1, 1])
voxel_size = 1
gradient_field = calculate_gradient_wrt_twist(live_field,
twist_vector,
array_offset=offset,
voxel_size=voxel_size)
expected_gradient_field = np.zeros((live_field.shape[0], live_field.shape[1], 3), dtype=np.float32)
for y_field in range(live_field.shape[0]):
for x_field in range(live_field.shape[1]):
expected_gradient_field[y_field, x_field] = sdf_gradient_wrt_to_twist(live_field, y_field, x_field,
twist_vector, offset, voxel_size)
self.assertTrue(np.allclose(expected_gradient_field, gradient_field))
def test_sdf_gradient_wrt_twist03(self):
live_field = np.array([[1, 1, 1],
[0, 0, 0],
[-1, -1, -1]])
twist_vector = np.array([[0.],
[0.],
[0.]])
offset = np.array([-1, -1, 1])
voxel_size = 2
gradient_field = calculate_gradient_wrt_twist(live_field,
twist_vector,
array_offset=offset,
voxel_size=voxel_size)
expected_gradient_field = np.zeros((live_field.shape[0], live_field.shape[1], 3), dtype=np.float32)
for y_field in range(live_field.shape[0]):
for x_field in range(live_field.shape[1]):
expected_gradient_field[y_field, x_field] = sdf_gradient_wrt_to_twist(live_field, y_field, x_field,
twist_vector, offset, voxel_size)
self.assertTrue(np.allclose(expected_gradient_field, gradient_field))
def test_sdf_gradient_wrt_twist04(self):
live_field = np.array([[1, 0, -1],
[1, 0, -1],
[1, 0, -1]])
twist_vector = np.array([[0.],
[1.],
[0.]])
offset = np.array([-1, -1, 1])
voxel_size = 1
gradient_field = calculate_gradient_wrt_twist(live_field,
twist_vector,
array_offset=offset,
voxel_size=voxel_size)
expected_gradient_field = np.zeros((live_field.shape[0], live_field.shape[1], 3), dtype=np.float32)
for y_field in range(live_field.shape[0]):
for x_field in range(live_field.shape[1]):
expected_gradient_field[y_field, x_field] = sdf_gradient_wrt_to_twist(live_field, y_field, x_field,
twist_vector, offset, voxel_size)
self.assertTrue(np.allclose(expected_gradient_field, gradient_field))
def test_sdf_gradient_wrt_twist05(self):
live_field = np.array([[1, 0, -1],
[1, 0, -1],
[1, 0, -1]])
twist_vector = np.array([[0.],
[-1.],
[0.]])
offset = np.array([-1, -1, 1])
voxel_size = 1
gradient_field = calculate_gradient_wrt_twist(live_field,
twist_vector,
array_offset=offset,
voxel_size=voxel_size)
expected_gradient_field = np.zeros((live_field.shape[0], live_field.shape[1], 3), dtype=np.float32)
for y_field in range(live_field.shape[0]):
for x_field in range(live_field.shape[1]):
expected_gradient_field[y_field, x_field] = sdf_gradient_wrt_to_twist(live_field, y_field, x_field,
twist_vector, offset, voxel_size)
self.assertTrue(np.allclose(expected_gradient_field, gradient_field))
def test_sdf_gradient_wrt_twist06(self):
live_field = np.array([[1, 0, -1],
[1, 0, -1],
[1, 0, -1]])
twist_vector = np.array([[0.],
[0.],
[.5]])
offset = np.array([-1, -1, 1])
voxel_size = 0.5
gradient_field = calculate_gradient_wrt_twist(live_field,
twist_vector,
array_offset=offset,
voxel_size=voxel_size)
expected_gradient_field = np.zeros((live_field.shape[0], live_field.shape[1], 3), dtype=np.float32)
for y_field in range(live_field.shape[0]):
for x_field in range(live_field.shape[1]):
expected_gradient_field[y_field, x_field] = sdf_gradient_wrt_to_twist(live_field, y_field, x_field,
twist_vector, offset, voxel_size)
self.assertTrue(np.allclose(expected_gradient_field, gradient_field))
| 2.171875 | 2 |
surveytoolbox/PointStore.py | JayArghArgh/survey-toolbox | 0 | 12796705 | <gh_stars>0
class NewPointStore:
# TODO implement local database storage options.
def __init__(self):
self.point_store = {}
self.number_points = 0
def set_new_point(self, point):
point_name = point.get_point_name()
self.point_store[point_name] = point
return True
def get_point_store(self):
return self.point_store
| 2.6875 | 3 |
Ex_52.py | soldierloko/Curso-em-Video | 0 | 12796706 | #Faça um programa que leia um número inteiro e diga se ele é ou não um número primo.
num = int(input('Digite um número: '))
tot = 0
for i in range(1,num+1):
if num % i == 0:
print('\033[33m', end='')
tot += 1
else:
print('\033[31m', end='')
print('{} '.format(i), end='')
print('\n\033[mO Número {} foi divisível {} vezes'.format(num,tot))
if tot == 2:
print('E por isso ele é PRIMO!')
else:
print('E por isso ele NÃO e PRIMO!') | 3.96875 | 4 |
shoutout/models.py | samarv/travo | 0 | 12796707 | <reponame>samarv/travo
from django.db import models
class organization(models.Model):
def __str__(self):
return self.name
name = models.CharField(max_length=50, unique=True)
slack_org_id = models.CharField(max_length=50, unique=True)
channel_name = models.CharField(max_length=50)
channel_id = models.CharField(max_length=50)
access_token = models.CharField(max_length=200)
installation_date = models.DateTimeField(
'installation date', auto_now_add=True)
bot_access_token = models.CharField(max_length=200)
class user(models.Model):
def __str__(self):
return self.name
org_id = models.ForeignKey(organization, on_delete=models.CASCADE)
slack_mem_id = models.CharField(max_length=50, unique=True)
email = models.CharField(max_length=50, unique=True)
name = models.CharField(max_length=50)
avatar = models.CharField(max_length=500)
class shoutout(models.Model):
def __str__(self):
return self.message
giver_id = models.ForeignKey(user, on_delete=models.CASCADE)
receiver_id = models.ForeignKey(
user, on_delete=models.CASCADE, related_name="r_id")
message = models.CharField(max_length=5000)
timestamps = models.DateTimeField('timestamp', auto_now_add=True)
message_ts = models.CharField(max_length=500)
| 2.125 | 2 |
utils.py | emilv/magic-carpet | 0 | 12796708 | import datetime
from enum import Enum
import hitherdither
from PIL import Image
from inky.inky_uc8159 import Inky
WIDTH, HEIGHT = 600, 448
SATURATION = 1.0
start_log = datetime.datetime.now()
last_log = start_log
def log(msg: str) -> None:
global last_log
now = datetime.datetime.now()
diff = (now - last_log).total_seconds()
from_start = (now - start_log).total_seconds()
last_log = now
print(f"[{from_start:5.2f} +{diff:.2f} ]\t{msg}")
class DitheringModes(Enum):
DEFAULT = "default"
SMALL_DOTS = "small_dots"
LARGE_DOTS = "large_dots"
def dithered(
inky: Inky, image: Image, mode: DitheringModes = DitheringModes.DEFAULT
) -> Image:
log("Dithering")
palette = hitherdither.palette.Palette(
inky._palette_blend(SATURATION, dtype="uint24")
)
thresholds = [64, 64, 64] # Threshold for snapping colours, I guess?
if mode == DitheringModes.SMALL_DOTS:
image_dithered = hitherdither.ordered.cluster.cluster_dot_dithering(
image, palette, thresholds, order=4
)
elif mode == DitheringModes.LARGE_DOTS:
image_dithered = hitherdither.ordered.cluster.cluster_dot_dithering(
image, palette, thresholds, order=8
)
else:
image_dithered = hitherdither.ordered.bayer.bayer_dithering(
image, palette, thresholds, order=8
)
log("Done dithering")
return image_dithered
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
| 2.75 | 3 |
artifactory_cleanup/context_managers.py | martinm82/artifactory-cleanup | 0 | 12796709 | <gh_stars>0
from contextlib import contextmanager
from teamcity import is_running_under_teamcity
from teamcity.messages import TeamcityServiceMessages
@contextmanager
def block(name):
"""
As TC.block use "name" as a parameter, contextlib.nullcontext() can not be
used directly
"""
yield
@contextmanager
def test(testName):
"""
As TC.test use "testName" as a parameter, contextlib.suppress() can not be
used directly
"""
yield
def get_context_managers():
if is_running_under_teamcity():
TC = TeamcityServiceMessages()
ctx_mgr_block = TC.block
ctx_mgr_test = TC.test
else:
ctx_mgr_block = block
ctx_mgr_test = test
return ctx_mgr_block, ctx_mgr_test
| 2.1875 | 2 |
tools/doc2md.py | wiltonlazary/Nidium | 1,223 | 12796710 | #!/usr/bin/env python2.7
import json
from pprint import pprint
import os
import sys
import re
import dokumentor
import subprocess
def parseParam(arg, indent=0, isReturn=False):
out = ""
if isReturn:
out += "Returns (%s): %s\n" % (parseParamsType(arg["typed"]), arg["description"])
else:
out += "%s* `%s` (%s): %s\n" % (' ' * indent, arg["name"], parseParamsType(arg["typed"]), arg["description"])
if "params" in arg:
# Callback function
for subArg in arg["params"]:
out += parseParam(subArg, indent + 4)
elif type(arg["typed"][0]) is dict:
# Object
for subArg in arg["typed"][0]["details"]:
out += parseParam(subArg, 0 if isReturn else indent + 4)
elif type(arg["typed"][0]) is list:
# Array of Object
for subArg in arg["typed"][0][0]["details"]:
out += parseParam(subArg, 0 if isReturn else indent + 4)
return out
def parseParamsType(types):
out = ""
comma = ""
for t in types:
out += comma
if type(t) is list:
out += "Object[]"
elif type(t) is dict:
out += "Object"
else:
if t[0] == "[":
out += t[1:-1].capitalize() + "[]"
else:
if t == "null":
out += t
else:
out += t if t[0].isupper() else t.capitalize()
comma = " | "
return out
def parseMethod(method, isEvent=False):
out = ""
if isEvent:
out += "\n## Event: %s\n" % (re.sub("[A-Za-z_0-9]+\.", "", method["name"]))
else:
fnArgs = ""
if len(method["params"]) > 0:
comma = ""
for arg in method["params"]:
name = comma + arg["name"]
if arg["default"] != "None":
name += "=%s" % arg["default"]
if arg["is_optional"]:
name = "[%s]" % name
fnArgs += name
comma = ", "
out += "\n## %s%s%s(%s)\n" % ("`static `" if method["is_static"] else "",
"new " if method["is_constructor"] else "",
method["name"],
fnArgs)
if method["is_slow"]:
out += "<!-- YAML\n- Slow method\n-->\n"
out += method["description"] + "\n"
if len(method["params"]) > 0:
out += "\nParams:\n"
for arg in method["params"]:
out += parseParam(arg)
if method["returns"] and not method["is_constructor"]:
if method["returns"]["nullable"]:
method["returns"]["typed"].append("null")
tmp = parseParam(method["returns"], isReturn=True)
if tmp:
out += "\n" + tmp
out += parseSeeAlso(method["sees"])
return out
def parseProperty(prop):
out = ""
out += "\n## %s%s%s%s (%s)\n" % ("`static` " if prop["is_static"] else "",
"`readonly` " if prop["is_readonly"] else "",
prop["name"],
"=" + prop["default"] if prop["default"] != "None" else "",
parseParamsType(prop["typed"]))
out += prop["description"] + "\n"
out += parseExample(prop["examples"])
out += parseSeeAlso(prop["sees"])
return out
def parseSeeAlso(seeAlso):
return ""
"""
out = ""
if len(seeAlso) > 0:
out += "\nSee also:\n"
for see in seeAlso:
out += "* `%s`\n" % (see["data"])
return out
"""
def parseExample(examples):
out = ""
if len(examples) > 0:
out += "\n"
for ex in examples:
out += "\n```%s\n%s\n```\n" % (ex["language"], ex["data"])
return out
def parse(klass, data):
out = ""
out += "# Class: %s" % (klass) + "\n"
item = data["base"][klass]
out += item["description"]
out += parseExample(item["examples"])
out += parseSeeAlso(item["sees"])
if data["constructors"]:
out += parseMethod(data["constructors"][klass])
if data["methods"]:
for name, method in data["methods"].iteritems():
out += parseMethod(method)
if data["static_methods"]:
for name, method in data["static_methods"].iteritems():
out += parseMethod(method)
if data["properties"]:
for name, prop in data["properties"].iteritems():
out += parseProperty(prop)
if data["events"]:
for evName, ev in data["events"].iteritems():
out += parseMethod(ev, isEvent=True)
return out
print("Running dokumentor")
class captureDokumentor:
def __init__(self):
self.data = ""
def write(self, msg):
self.data += msg
def flush(self=None):
pass
sys.stdout = captureDokumentor()
dokumentor.process("../docs/")
docs = sys.modules['DOCC'].DOC
dokumentor.report("json", docs)
data = json.loads(sys.stdout.data)
sys.stdout = sys.__stdout__
hierarchy = {}
for section, items in data["_sections"].iteritems():
if section not in data:
data[section] = {"base": { section: {"description": "", "sees":[], "examples": {}}}, "constructors": {}, "methods": [], "properties": [], "events":[], "static_methods": []}
hierarchy[section] = {"data": parse(section, data[section])}
hierarchy[section]["children"] = {}
for klass in items:
hierarchy[section]["children"][klass] = parse(klass, data[klass])
path = "../docs/en/api/"
try:
os.mkdir(path)
except:
pass
for directory in hierarchy:
if len(hierarchy[directory]["children"]) > 1:
subPath = path + directory + "/"
try:
os.mkdir(subPath)
except:
pass
print("Writing %s" % subPath + directory + ".md")
with open(subPath + directory + ".md", "w") as f:
f.write(hierarchy[directory]["data"])
for child in hierarchy[directory]["children"]:
print(" - Writing %s" % subPath + child + ".md")
with open(subPath + child + ".md", "w") as f:
f.write(hierarchy[directory]["children"][child])
else:
print("Writing %s" % path + directory + ".md")
with open(path + directory + ".md", "w") as f:
f.write(hierarchy[directory]["data"])
| 2.859375 | 3 |
gokart_pipeliner/pipeliner.py | vaaaaanquish/gokart-pipeliner | 8 | 12796711 | <filename>gokart_pipeliner/pipeliner.py<gh_stars>1-10
from typing import List
import logging
import sys
import luigi
import gokart
from gokart_pipeliner.instantiation_task import InstantiationTask
from gokart_pipeliner.enum import TYPING
from gokart_pipeliner.config_manager import ConfigManager
class GokartPipeliner:
def __init__(self,
params: TYPING.PARAMS = dict(),
config_path_list: TYPING.STR_LIST = list()):
self.config = ConfigManager(params, config_path_list)
def run(self,
tasks: List[luigi.task_register.Register],
params: TYPING.PARAMS = dict(),
return_value: bool = False,
verbose: bool = True) -> TYPING.RETURN_VALURE:
if verbose:
logging.disable(0)
else:
logging.disable(sys.maxsize)
luigi.auto_namespace(scope=__name__)
luigi.task_register.Register.disable_instance_cache()
params = self.config.make_running_params(params)
task = InstantiationTask.run(tasks, params=params)
luigi.build([task], local_scheduler=True)
luigi.task_register.Register.clear_instance_cache()
if return_value:
output = task.output()
if type(output) == list:
return [x.load() for x in output]
return output.load()
return None
def print_dependence_tree(self,
tasks: List[luigi.task_register.Register],
params: TYPING.PARAMS = dict()):
params = self.config.make_running_params(params)
task = InstantiationTask.run(tasks, params=params)
print('//-----[dependence_tree]------')
print(gokart.info.make_tree_info(task))
print('//----------------------------')
| 2.234375 | 2 |
apps/combineCSVElectrochem.py | ryanpdwyer/pchem | 0 | 12796712 |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
import streamlit as st
import io
import base64
from util import process_file
def limit_x_values(data, x_column, settings):
st.markdown("### Limit x Range")
x_min = st.number_input("Choose minimum x:", value=min([min(df[x_column].values) for df in data]))
x_max = st.number_input("Choose maximum x:", value=max([max(df[x_column].values) for df in data]))
settings['x_min'] = x_min
settings['x_max'] = x_max
data_out = []
for df in data:
mask = (df[x_column].values > x_min) * (df[x_column].values < x_max)
data_out.append(df[mask])
return data_out, settings
scales = {'A': 1, 'mA': 1e3, 'µA': 1e6}
def scale_current(data, y_column, settings):
st.markdown("### Scale Current")
scale = st.selectbox("Scale:", list(scales.keys()), index=1)
settings['y_scale'] = scale
data_out = []
for df in data:
df2 = df.copy()
df2[y_column] = df2[y_column] * scales[scale]
data_out.append(df2)
return data_out, settings
# def process_data(data, y_column, settings):
# st.markdown("### Rescale y-axis")
# st.selectbox("Choose y-axis scale:", value=[0, 3, 6, 9], format_func=
def run():
df = None
cols = None
x_column = y_column = None
combined_data = None
processing="None"
if 'ever_submitted' not in st.session_state:
st.session_state.ever_submitted = False
settings = {"processing": "None"}
st.markdown("""## Combine CSV Electrochemistry files
This helper will combine multiple CSV files (or Excel spreadsheets)
for easy plotting.
""")
files = st.file_uploader("Upload CSV or Excel Files",
accept_multiple_files=True)
if files:
st.write(files)
filenames = [(i, f.name) for i, f in enumerate(files)]
data = [process_file(f) for f in files]
ind_fname = st.selectbox("Choose data to display: ", filenames,
format_func=lambda x: x[1], index=0)
st.write("""## Labels
Use the boxes below to change the labels for each line that will go on the graph.
""")
labels = [st.text_input(f"{filename[0]}. {filename[1]}", value=filename[1]) for filename in filenames]
if ind_fname:
df = data[ind_fname[0]]
cols = list(df.columns)
st.write("## Choose columns")
with st.form("column_chooser_and_run"):
x_column = st.selectbox("Choose the x column: ", cols)
y_column = st.selectbox("Choose y column: ", cols, index=len(cols)-1)
submitted = st.form_submit_button()
st.session_state.ever_submitted = submitted | st.session_state.ever_submitted
use_plotly = st.checkbox("Use plotly?", value=False)
if data is not None:
data, settings = limit_x_values(data, x_column, settings)
data, settings = scale_current(data, y_column, settings)
# data, settings = normalize_data(data, x_column, settings)
# x_data = combined_data[x_column].values
# Plotting
if use_plotly:
fig = go.Figure()
else:
fig, ax = plt.subplots()
for df, fname, label in zip(data, filenames, labels):
if use_plotly:
fig.add_trace(go.Line(x=df[x_column], y=df[y_column], name=str(fname[0])+"-"+label))
else:
ax.plot(df[x_column].values, df[y_column].values, label=str(fname[0])+"-"+label)
y_label_default = f"{y_column} ({settings['y_scale']})"
st.markdown("### Plotting options")
x_label = st.text_input("x-axis label: ", value=x_column)
y_label = st.text_input('y-axis label: ', value=y_label_default)
grid = st.checkbox("Grid?", value=False)
if grid and not use_plotly:
ax.grid()
if use_plotly:
fig.update_layout(xaxis_title=x_label, yaxis_title=y_label)
st.plotly_chart(fig)
else:
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.legend()
st.pyplot(fig)
# # Saving
# st.markdown("### Output options")
# st.write(combined_data)
# filename = st.text_input("Filename:", value="data")
# write_excel(combined_data, filename)
if __name__ == "__main__":
run()
| 2.90625 | 3 |
4_Recursion/recursive_palindrome.py | AnthonyRChao/Problem-Solving-With-Algorithms-And-Data-Structures | 6 | 12796713 | """
Write a function that takes a string as a parameter and returns True if the
string is a palindrome, False otherwise.
"""
from string import ascii_lowercase
def palindrome(mystr):
mystr = mystr.lower()
mystr_char_list = []
for char in mystr:
if char in ascii_lowercase:
mystr_char_list.append(char)
mystr_chars = ''.join(mystr_char_list)
if len(mystr_chars) == 0 or len(mystr_chars) == 1:
return True
else:
if mystr_chars[0] == mystr_chars[-1]:
return palindrome(mystr_chars[1:len(mystr_chars) - 1])
else:
return False
def main():
print(palindrome("racecar"))
print(palindrome("kayak"))
print(palindrome("Live not on evil"))
print(palindrome("aibohphobia"))
print(palindrome("Reviled did I live, said I, as evil I did deliver"))
print(palindrome("Go hang a salami; I'm a lasagna hog."))
print(palindrome("Able was I ere I saw Elba"))
print(palindrome("Kanakanak"))
print(palindrome("Wassamassaw"))
if __name__ == "__main__":
main()
| 3.96875 | 4 |
consensus/entities/member.py | flaudanum/consensus | 0 | 12796714 | <gh_stars>0
from typing import Sequence, Optional
from consensus.entities.alternative import Alternative
from consensus.entities.ranking import Ranking
class Member:
@property
def name(self):
return self._name
@property
def ranking(self) -> Ranking:
return self._ranking
def __init__(self, name: str):
self._name = name
self._ranking: Optional[Ranking] = None
def make_ranking(self, alternatives: Sequence[Alternative], ranking: Sequence[Sequence[str]], intensities=()):
self._ranking = Ranking(alternatives, ranking, intensities)
| 2.75 | 3 |
pipescaler/processors/threshold_processor.py | KarlTDebiec/PipeScaler | 1 | 12796715 | <reponame>KarlTDebiec/PipeScaler<gh_stars>1-10
#!/usr/bin/env python
# pipescaler/processors/threshold_processor.py
#
# Copyright (C) 2020-2021 <NAME>
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license.
from __future__ import annotations
from argparse import ArgumentParser
from inspect import cleandoc
from typing import Any, no_type_check
import numba as nb
import numpy as np
from PIL import Image
from pipescaler.core import Processor, validate_image
class ThresholdProcessor(Processor):
"""Converts image to black and white using threshold, optionally denoising."""
def __init__(
self, threshold: int = 128, denoise: bool = False, **kwargs: Any
) -> None:
super().__init__(**kwargs)
# Store configuration
self.threshold = threshold
self.denoise = denoise
def process_file(self, infile: str, outfile: str) -> None:
# Read image
input_image = validate_image(infile, "L")
# Process image
output_image = input_image.point(lambda p: p > self.threshold and 255)
if self.denoise:
output_data = np.array(output_image)
self.denoise_data(output_data)
output_image = Image.fromarray(output_data)
output_image = output_image.convert("L")
# Write image
output_image.save(outfile)
@classmethod
def construct_argparser(cls, **kwargs: Any) -> ArgumentParser:
"""
Constructs argument parser.
Args:
kwargs (Any): Additional keyword arguments
Returns:
parser (ArgumentParser): Argument parser
"""
description = kwargs.pop("description", cleandoc(cls.__doc__))
parser = super().construct_argparser(description=description, **kwargs)
parser.add_argument(
"--threshold",
default=128,
type=int,
help="threshold differentiating black and white (0-255, default: "
"%(default)s)",
)
parser.add_argument(
"--denoise",
default=False,
type=bool,
help="Flip color of pixels bordered by less than 5 pixels of "
"the same color",
)
return parser
@no_type_check
@staticmethod
@nb.jit(nopython=True, nogil=True, cache=True, fastmath=True)
def denoise_data(data: np.ndarray) -> None:
for x in range(1, data.shape[1] - 1):
for y in range(1, data.shape[0] - 1):
slc = data[y - 1 : y + 2, x - 1 : x + 2]
if data[y, x] == 0:
if (slc == 0).sum() < 4:
data[y, x] = 255
else:
if (slc == 255).sum() < 4:
data[y, x] = 0
if __name__ == "__main__":
ThresholdProcessor.main()
| 2.78125 | 3 |
example_problems/tutorial/graph_connectivity/bots/certificate_of_connectivity_bot.py | romeorizzi/TAlight | 3 | 12796716 | #!/usr/bin/env python3
from sys import stderr, exit
import sys
import graph_connectivity_lib as gcl
def startAlgo():
numNodes = None
spoon = input().strip()
# Getting graph
while spoon[:len("graph:")] != "graph:":
# Getting number of nodes
if spoon[:len("# number of nodes:")] == "# number of nodes:":
numNodes = spoon.split(':')[1]
numNodes = int("".join(numNodes.split()))
# Getting number archs
if spoon[:len("# number of arcs: ")] == "# number of arcs: ":
m = spoon.split(':')[1]
m = int("".join(m.split()))
spoon = input().strip()
# Creating graph
grafo = gcl.Graph(numNodes)
# Getting arcs
for _ in range(m):
spoon = input().strip()
v, u = spoon.split(' ')
v, u = int(v), int(u)
grafo.add_edge(v, u)
# Ricevo istruzioni dal servizio
while spoon[:len("# Tell me")] != "# Tell me":
spoon = input().strip()
# Checking spanning tree
input_spTree, not_visited = grafo.spanning_tree()
# Telling sp tree length
print(len(input_spTree))
#printing sp tree
for i in range(len(input_spTree)):
u, v = input_spTree[i]
print(f"{u} {v}")
# Getting response
spoon = input().strip()
while spoon != "#end".strip():
print(spoon)
sys.stderr.write(str(spoon)+ "\n")
spoon = input().strip()
# Main
spoon = input().strip()
while spoon[:len("#start")] != "#start":
spoon = input().strip()
# Reading the graph
startAlgo() | 3.296875 | 3 |
examples/text_classificaiton/functional.py | Zzoay/YaoNLP | 0 | 12796717 | <filename>examples/text_classificaiton/functional.py
from torch import max
from torch.nn.functional import cross_entropy
def compute_acc(logit, y_gt):
predicts = max(logit, 1)[1]
corrects = (predicts.view(y_gt.size()).data == y_gt.data).float().sum()
accuracy = 100.0 * float(corrects/len(y_gt))
return accuracy | 2.671875 | 3 |
ml/notebook_examples/functions/main.py | bhjeong-goldenplanet/automl | 146 | 12796718 | <filename>ml/notebook_examples/functions/main.py
import logging
import datetime
import logging
import time
import kfp
import kfp.compiler as compiler
import kfp.dsl as dsl
import requests
# TODO: replace yours
# HOST = 'https://<yours>.pipelines.googleusercontent.com'
HOST = 'https://7c7f7f3e3d11e1d4-dot-us-central2.pipelines.googleusercontent.com'
@dsl.pipeline(
name='Sequential',
description='A pipeline with two sequential steps.'
)
def sequential_pipeline(filename='gs://ml-pipeline-playground/shakespeare1.txt'):
"""A pipeline with two sequential steps."""
op1 = dsl.ContainerOp(
name='filechange',
image='library/bash:4.4.23',
command=['sh', '-c'],
arguments=['echo "%s" > /tmp/results.txt' % filename],
file_outputs={'newfile': '/tmp/results.txt'})
op2 = dsl.ContainerOp(
name='echo',
image='library/bash:4.4.23',
command=['sh', '-c'],
arguments=['echo "%s"' % op1.outputs['newfile']]
)
def get_access_token():
url = 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token'
r = requests.get(url, headers={'Metadata-Flavor': 'Google'})
r.raise_for_status()
access_token = r.json()['access_token']
return access_token
def hosted_kfp_test(data, context):
logging.info('Event ID: {}'.format(context.event_id))
logging.info('Event type: {}'.format(context.event_type))
logging.info('Data: {}'.format(data))
logging.info('Bucket: {}'.format(data['bucket']))
logging.info('File: {}'.format(data['name']))
file_uri = 'gs://%s/%s' % (data['bucket'], data['name'])
logging.info('Using file uri: %s', file_uri)
logging.info('Metageneration: {}'.format(data['metageneration']))
logging.info('Created: {}'.format(data['timeCreated']))
logging.info('Updated: {}'.format(data['updated']))
token = get_access_token()
logging.info('attempting to launch pipeline run.')
ts = int(datetime.datetime.utcnow().timestamp() * 100000)
client = kfp.Client(host=HOST, existing_token=token)
compiler.Compiler().compile(sequential_pipeline, '/tmp/sequential.tar.gz')
exp = client.create_experiment(name='gcstriggered') # this is a 'get or create' op
res = client.run_pipeline(exp.id, 'sequential_' + str(ts), '/tmp/sequential.tar.gz',
params={'filename': file_uri})
logging.info(res)
| 2.34375 | 2 |
tha2/nn/batch_module/batch_input_module.py | luuil/talking-head-anime-2-demo | 626 | 12796719 | from abc import ABC, abstractmethod
from typing import List
from torch import Tensor
from torch.nn import Module
from tha2.nn.base.module_factory import ModuleFactory
class BatchInputModule(Module, ABC):
def __init__(self):
super().__init__()
@abstractmethod
def forward_from_batch(self, batch: List[Tensor]):
pass
class BatchInputModuleFactory(ModuleFactory):
def __init__(self):
super().__init__()
@abstractmethod
def create(self) -> BatchInputModule:
pass
| 2.5625 | 3 |
Externals/micromegas_4.3.5/Packages/smodels-v1.1.0patch1/smodels/tools/externalNllFast.py | yuanfangtardis/vscode_project | 0 | 12796720 | <gh_stars>0
#!/usr/bin/env python
"""
.. module:: externalNllFast
:synopsis: Wrapper for all nllfast versions.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from __future__ import print_function
try:
import commands as executor
except ImportError:
import subprocess as executor
import os
from smodels.tools.externalTool import ExternalTool
from smodels.tools.smodelsLogging import logger
class ExternalNllFast(ExternalTool):
"""
An instance of this class represents the installation of nllfast.
"""
def __init__(self, sqrts, nllfastVersion, testParams, testCondition):
"""
:param sqrts: sqrt of s, in TeV, as an integer,
:param nllfastVersion: version of the nllfast tool
:param testParams: what are the test params we need to run things with?
:param testCondition: the line that should be the last output line when
running executable
:srcPath: the path of the source code, for compilation
"""
ExternalTool.__init__(self)
self.sqrts = int(sqrts)
self.name = "nllfast%d" % sqrts
self.nllfastVersion = nllfastVersion
path = "<install>/lib/nllfast/nllfast-"
location = path + self.nllfastVersion + "/"
self.cdPath = self.absPath(location)
self.executablePath = self.cdPath + "/nllfast_%dTeV" % self.sqrts
self.testParams = testParams
self.testCondition = testCondition
self.srcPath = self.cdPath
self.executable = ""
def compile(self):
"""
Try to compile nllfast.
"""
logger.info("Trying to compile %s", self.name)
cmd = "cd %s; make" % self.srcPath
out = executor.getoutput(cmd)
# out = subprocess.check_output ( cmd, shell=True, universal_newlines=True )
logger.info(out)
return True
def fetch(self):
"""
Fetch and unpack tarball.
"""
import urllib, tarfile
tempfile = "/tmp/nllfast7.tar.gz"
f = open(tempfile, "w")
url = "http://smodels.hephy.at/externaltools/nllfast%d.tar.gz" \
% self.sqrts
logger.info("fetching tarball from " + url)
R = urllib.urlopen(url)
l = R.readlines()
for line in l:
f.write(line)
R.close()
f.close()
tar = tarfile.open(tempfile)
for item in tar:
tar.extract(item, self.srcPath + "/")
def unlink(self, inputFile):
"""
Remove inputFile.out
"""
return
# fname = "%s/%s.out" % (self.cdPath, inputFile)
# if os.path.exists(fname):
# os.unlink(fname)
def run_(self, params):
"""
Execute nllfast7.
:params params: parameters used (e.g. gg cteq5 .... )
:returns: stdout and stderr, or error message
"""
cmd = "cd %s; %s %s" % (self.cdPath, self.executablePath, params)
out = executor.getoutput(cmd)
# out = subprocess.check_output ( cmd, shell=True, universal_newlines=True )
out = out.split("\n")
return out
def run(self, process, pdf, squarkmass, gluinomass):
"""
Execute nllfast.
:params process: which process: st, sb, gg, gdcpl, sdcpl, ss, sg, tot
:params pdf: cteq=cteq6, mstw2008
:params squarkmass: squarkmass, None if squark decoupled
:params gluinomass: gluinomass, None if gluino decoupled
:returns: stdout and stderr, or error message
"""
processes = ["st", "sb", "gg", "gdcpl", "sdcpl", "ss", "sg", "tot"]
if not process in processes:
return None
if not pdf in ["cteq", "cteq6", "mstw", "mstw2008"]:
return None
if not squarkmass:
return self.run_("%s %s %s") % (process, pdf, gluinomass)
if not gluinomass:
return self.run_("%s %s %s") % (process, pdf, squarkmass)
return self.run_("%s %s %s %s") % \
(process, pdf, squarkmass, gluinomass)
def checkInstallation(self):
"""
Checks if installation of tool is valid by looking for executable and
executing it.
"""
if not os.path.exists(self.executablePath):
logger.error("Executable '%s' not found. Maybe you didn't compile " \
"the external tools in smodels/lib?", self.executablePath)
return False
if not os.access(self.executablePath, os.X_OK):
logger.error("%s is not executable", self.executable)
return False
out = self.run_(self.testParams)
if out[-1].find(self.testCondition) == -1:
logger.error("Setup invalid: " + str(out))
return False
self.unlink("gg")
return True
class ExternalNllFast7(ExternalNllFast):
"""
An instance of this class represents the installation of nllfast 7.
"""
def __init__(self):
ExternalNllFast.__init__(self, 7, "1.2",
testParams="gg cteq 500 600",
testCondition="500. 600. 0.193E+00 "
"0.450E+00 0.497E+00")
class ExternalNllFast8(ExternalNllFast):
"""
An instance of this class represents the installation of nllfast 8.
"""
def __init__(self):
ExternalNllFast.__init__(self, 8, "2.1",
testParams="gg cteq 500 600",
testCondition="500. 600. 0.406E+00 "
"0.873E+00 0.953E+00")
class ExternalNllFast13(ExternalNllFast):
"""
An instance of this class represents the installation of nllfast 8.
"""
def __init__(self):
ExternalNllFast.__init__(self, 13, "3.1",
testParams="gg cteq 500 600",
testCondition="600. 0.394E+01 0.690E+01 "
"0.731E+01 0.394E+00" )
nllFastTools = { 7 : ExternalNllFast7(),
8 : ExternalNllFast8(),
13 : ExternalNllFast13() }
if __name__ == "__main__":
for (sqrts, tool) in nllFastTools.items():
print("%s: installed in %s" % (tool.name, tool.installDirectory()))
| 2.03125 | 2 |
musicmod/__init__.py | alfonsof/music-list | 1 | 12796721 | # __init__.py
__all__ = ['createlist', 'viewlist'] | 1.09375 | 1 |
pysnmp/TRENDMICRO-NVW-MIB.py | agustinhenze/mibs.snmplabs.com | 11 | 12796722 | <reponame>agustinhenze/mibs.snmplabs.com
#
# PySNMP MIB module TRENDMICRO-NVW-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/TRENDMICRO-NVW-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:20:11 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Bits, Counter64, Gauge32, ObjectIdentity, Counter32, NotificationType, IpAddress, iso, ModuleIdentity, Unsigned32, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "Counter64", "Gauge32", "ObjectIdentity", "Counter32", "NotificationType", "IpAddress", "iso", "ModuleIdentity", "Unsigned32", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "Integer32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
tmNVW, = mibBuilder.importSymbols("TRENDMICRO-SMI", "tmNVW")
nvwScanCurrConn = MibScalar((1, 3, 6, 1, 4, 1, 6101, 2500, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nvwScanCurrConn.setStatus('mandatory')
nvwScanCurrMem = MibScalar((1, 3, 6, 1, 4, 1, 6101, 2500, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nvwScanCurrMem.setStatus('mandatory')
nvwPolicyCurrConn = MibScalar((1, 3, 6, 1, 4, 1, 6101, 2500, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nvwPolicyCurrConn.setStatus('mandatory')
nvwTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 6101, 2500, 251))
oppOn = NotificationType((1, 3, 6, 1, 4, 1, 6101, 2500, 251, 1))
if mibBuilder.loadTexts: oppOn.setStatus('current')
oppOff = NotificationType((1, 3, 6, 1, 4, 1, 6101, 2500, 251, 2))
if mibBuilder.loadTexts: oppOff.setStatus('current')
bootFactory = NotificationType((1, 3, 6, 1, 4, 1, 6101, 2500, 251, 3))
if mibBuilder.loadTexts: bootFactory.setStatus('current')
bootPrevious = NotificationType((1, 3, 6, 1, 4, 1, 6101, 2500, 251, 4))
if mibBuilder.loadTexts: bootPrevious.setStatus('current')
haFailover = NotificationType((1, 3, 6, 1, 4, 1, 6101, 2500, 251, 5))
if mibBuilder.loadTexts: haFailover.setStatus('current')
mibBuilder.exportSymbols("TRENDMICRO-NVW-MIB", nvwPolicyCurrConn=nvwPolicyCurrConn, nvwScanCurrMem=nvwScanCurrMem, oppOn=oppOn, bootFactory=bootFactory, nvwTraps=nvwTraps, oppOff=oppOff, bootPrevious=bootPrevious, nvwScanCurrConn=nvwScanCurrConn, haFailover=haFailover)
| 1.429688 | 1 |
tools/sample.py | VanessaDo/cloudml-samples | 1,552 | 12796723 | <gh_stars>1000+
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multiline docstrings should
work but could be problematic.
"""
# This is safer.
"""Sample of what to_ipynb.py does"""
# Consecutive Comments are grouped into the same markdown cell.
# The leading '#' symbol is removed so the markdown cells look better.
# *It is okay to use [markdown](https://www.google.com/search?q=markdown).*
import argparse
import os
# Consecutive imports are grouped into a cell.
# Comments cause a new cell to be created, but blank lines between imports are ignored.
# This next import should say `from helpers import ...` even if its source says `from module.helpers import ...`
# Code manipulation is registered in `samples.yaml`.
from module.helpers import (
some_function)
import yyy
import zzz
# Top level classes, function definitions, and expressions are in their own cells.
class A(object): # Inline comments are left as is.
# Inner comments are left as is.
def __init__(self):
pass
class B(object):
pass
def func(arg):
"""Docstrings are left as is"""
def inner_func():
print(arg)
return inner_func
a = A()
print(a)
# This is a markdown cell.
def main(args):
help(func)
# The last thing of the .py file must be the `if __name__ == '__main__':` block.
if __name__ == '__main__':
# Its content is grouped into the last code cell.
# All args should have a default value if the notebook is expected to be runnable without code change.
parser = argparse.ArgumentParser()
parser.add_argument(
'--job-dir',
type=str,
help='Job dir',
default='/tmp/sample'
)
# Use parse_known_args to ignore args passed in when running as a notebook.
args, _ = parser.parse_known_args()
main(args)
| 2.5625 | 3 |
FreeCodeCamp.org/Inheritance/ChineseChef.py | MizaN13/PythonAbc | 0 | 12796724 | from Chef import Chef
# inheriting Chef class from ChineseChef class
class ChineseChef(Chef):
def make_special_dish(self):
print("The chef makes orange chicken")
def make_fried_rice(self):
print("The Chef makes fried rice.") | 3.390625 | 3 |
00_Code/01_LeetCode/525_ContiguousArray.py | KartikKannapur/Data_Structures_and_Algorithms_Python | 1 | 12796725 | """
Given a binary array, find the maximum length of a contiguous subarray with equal number of 0 and 1.
Example 1:
Input: [0,1]
Output: 2
Explanation: [0, 1] is the longest contiguous subarray with equal number of 0 and 1.
Example 2:
Input: [0,1,0]
Output: 2
Explanation: [0, 1] (or [1, 0]) is a longest contiguous subarray with equal number of 0 and 1.
Note: The length of the given binary array will not exceed 50,000.
"""
class Solution(object):
def findMaxLength(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
"""
Method 1: Hash Table
* Initialize
- counter
- max length variable
- hash table with {0: 0}
* Enumerate
When we come across the number 0, we subtract 1 from count
When we come across the number 1, we add 1 to count
At each stage, we check if count exists in the hash table.
If it does then we update the length as the maximum between
max_length and index-table[count]
Reference:https://discuss.leetcode.com/topic/80056/python-o-n-solution-with-visual-explanation/2
Your runtime beats 80.81 % of python submissions
"""
count = 0
max_length = 0
table = {0: 0}
for index, num in enumerate(nums):
if num == 0:
count -= 1
else:
count += 1
if count in table:
print(table)
max_length = max(max_length, index + 1 - table[count])
else:
table[count] = index + 1
return max_length
| 4.0625 | 4 |
test/drafts/2017-09-29_1852 Overall Testing/002.py | friedrichromstedt/upy | 3 | 12796726 | import numpy
import upy2
from upy2 import u, U
from upy2.typesetting import ScientificTypesetter
# This test probably doesn't demonstrate the intended behaviour
# anymore since :func:`upy2.numpy_operators.install_numpy_operators`
# is now called unconditionally upon importing ``upy2``.
U(2).default()
ScientificTypesetter(stddevs=2, precision=2).default()
wavelength = 420 +- u(10)
print wavelength
print wavelength ** 0.5
print "-----"
compound = numpy.asarray([10, 11]) +- u([1, 2])
print compound
print "-----"
#upy2.install_numpy_operators()
compound2 = numpy.asarray([[100], [42]]) +- u([[1], [1.5]])
print compound2
| 2.28125 | 2 |
tests/test_piece_builder.py | victor-paltz/embedding-reader | 6 | 12796727 | <filename>tests/test_piece_builder.py
from embedding_reader.piece_builder import build_pieces
import random
import pytest
import pandas as pd
def build_random_filecounts(min_count=100, max_count=10000):
count_before = 0
results = []
for i in range(1000):
r = random.randint(min_count, max_count)
results.append([r, count_before, str(i) + ".npy", "someval" + str(i)])
count_before += r
return pd.DataFrame(results, columns=["count", "count_before", "filename", "custommeta"])
@pytest.mark.parametrize(["start", "end"], [(0, 100000), (100, 100000), (10000, 300000)])
def test_piece_builder(start, end):
# generate random file counts
# call piece builder
# check sum is correct
# check each piece has a reasonable size
batch_size = 1000
max_piece_size = 100
file_counts = build_random_filecounts()
pieces = build_pieces(
file_counts, batch_size, start, end, max_piece_size=max_piece_size, metadata_columns=["custommeta"]
)
assert pieces["piece_length"].sum() == end - start
filename_to_count = {filename: count for count, filename in zip(file_counts["count"], file_counts["filename"])}
for piece_start, piece_end, piece_length, batch_start, batch_end, batch_length, filename in zip(
pieces["piece_start"],
pieces["piece_end"],
pieces["piece_length"],
pieces["batch_start"],
pieces["batch_end"],
pieces["batch_length"],
pieces["filename"],
):
assert 0 < piece_length <= max_piece_size
assert piece_start >= 0
assert piece_start < piece_end
assert batch_start < batch_end
assert batch_length <= batch_size
assert batch_end <= end
assert batch_end - batch_start <= batch_size
assert piece_end <= filename_to_count[filename]
# check each piece has a reasonable size
assert pieces["piece_length"].max() <= max_piece_size
@pytest.mark.parametrize(["start", "end"], [(9, 15)])
def test_piece_builder_with_empty_file(start, end):
# generate random file counts
# call piece builder
# check piece length is not empty
batch_size = 1000
max_piece_size = 100
file_counts = build_random_filecounts(min_count=0, max_count=1)
pieces = build_pieces(
file_counts, batch_size, start, end, max_piece_size=max_piece_size, metadata_columns=["custommeta"]
)
for piece_start, piece_end, piece_length, batch_start, batch_end, batch_length, filename in zip(
pieces["piece_start"],
pieces["piece_end"],
pieces["piece_length"],
pieces["batch_start"],
pieces["batch_end"],
pieces["batch_length"],
pieces["filename"],
):
assert piece_length != 0
assert piece_start < piece_end
| 2.34375 | 2 |
iota/commands/extended/broadcast_and_store.py | EasonC13/iota.py | 347 | 12796728 | <reponame>EasonC13/iota.py<gh_stars>100-1000
from iota.commands import FilterCommand
from iota.commands.core.broadcast_transactions import \
BroadcastTransactionsCommand
from iota.commands.core.store_transactions import StoreTransactionsCommand
import asyncio
__all__ = [
'BroadcastAndStoreCommand',
]
class BroadcastAndStoreCommand(FilterCommand):
"""
Executes ``broadcastAndStore`` extended API command.
See :py:meth:`iota.api.Iota.broadcast_and_store` for more info.
"""
command = 'broadcastAndStore'
def get_request_filter(self):
pass
def get_response_filter(self):
pass
async def _execute(self, request: dict) -> dict:
# Submit the two coroutines to the already running event loop
await asyncio.gather(
BroadcastTransactionsCommand(self.adapter)(**request),
StoreTransactionsCommand(self.adapter)(**request),
)
return {
'trytes': request['trytes'],
}
| 2.46875 | 2 |
lol.py | BDOGS2000/website_spamer-reloader | 1 | 12796729 | <filename>lol.py
import time
from selenium import webdriver
import requests
driver = webdriver.Chrome("driver.exe")
def main():
url = input("URL: ")
driver.get(url)
count = 0
input()
tap_closer()
while True:
count += 1
r = requests.get(url)
print(str(count) + " : " + str(r.status_code))
driver.switch_to.window(driver.window_handles[0])
driver.refresh()
# Change the "0.5" to how many seconds you want in between reloads
# time.sleep(0.5)
tap_closer()
def tap_closer():
check = True
while check:
if len(driver.window_handles) > 1:
driver.switch_to.window(driver.window_handles[1])
driver.close()
else:
check = False
if __name__ == "__main__":
main()
| 2.984375 | 3 |
ImitationLearning/VisualAttention/Decoder.py | Suryavf/SelfDrivingCar | 11 | 12796730 | <reponame>Suryavf/SelfDrivingCar<gh_stars>10-100
import torch
import torch.nn as nn
from IPython.core.debugger import set_trace
import ImitationLearning.VisualAttention.network.Gate as G
""" Basic Decoder Module
--------------------
Ref: <NAME>., & <NAME>. (2017). "Interpretable learning for self-driving
cars by visualizing causal attention". In Proceedings of the IEEE
international conference on computer vision (pp. 2942-2950).
* Input: feature [batch,L,D]
hidden [1,batch,H]
* Output: alpha [batch,L,1]
"""
class BasicDecoder(nn.Module):
""" Constructor """
def __init__(self,AttentionNet,ControlNet,cube_size,n_hidden):
super(BasicDecoder, self).__init__()
# Parameters
self.D = cube_size[2] # 64
self.L = cube_size[0]*cube_size[1] # 90
self.R = self.L*self.D # 5760
self.H = n_hidden # 512
self.M = n_hidden # 512
self.sequence_len = 20
self.batch_size = 120
self.n_out = 3
# Output container
self.pred = torch.zeros([self.batch_size,self.n_out])
self. map = torch.zeros([self.batch_size,self. L ])
# Declare layers
self.attn = AttentionNet
self.ctrl = ControlNet
self.lstm = nn.LSTM( input_size = self.R,
hidden_size = self.H,
num_layers = 1)
self.init_Wh = nn.Linear(self.D,self.H,bias=True )
self.init_Wc = nn.Linear(self.D,self.H,bias=True )
self.init_tanh = nn.Tanh()
# Initialization
torch.nn.init.xavier_uniform_(self.init_Wh.weight)
torch.nn.init.xavier_uniform_(self.init_Wc.weight)
self.lstm.reset_parameters()
""" Initialize LSTM: hidden state and cell state
Ref: Xu, Kelvin, et al. "Show, attend and tell: Neural
image caption generation with visual attention."
International conference on machine learning. 2015.
* Input: feature [batch,L,D]
* Output: hidden [1,batch,H]
cell [1,batch,H]
"""
def initializeLSTM(self,feature):
with torch.no_grad():
# Mean features
feature = torch.mean(feature,1) # [batch,L,D] -> [batch,D]
hidden = self.init_Wh(feature) # [batch,D]*[D,H] -> [batch,H]
hidden = self.init_tanh(hidden) # [batch,H]
hidden = hidden.unsqueeze(0) # [1,batch,H]
cell = self.init_Wc(feature) # [batch,D]*[D,H] -> [batch,H]
cell = self.init_tanh(cell) # [batch,H]
cell = cell.unsqueeze(0) # [1,batch,H]
# (h,c) ~ [num_layers, batch, hidden_size]
return hidden.contiguous(),cell.contiguous()
""" Forward """
def forward(self,feature):
# Parameters
sequence_len = self.sequence_len
n_out = self.n_out
if self.training: batch_size = int(feature.shape[0]/sequence_len)
else : batch_size = feature.shape[0]
# Data
if self.training: sequence = feature.view(batch_size,sequence_len,self.L,self.D).transpose_(0,1) # [sequence,batch,L,D]
else : sequence = feature # [batch,L,D]
# Input to inicialize LSTM
if self.training: xt = sequence[0]
else : xt = sequence[0].unsqueeze(0)
# Inicialize hidden state and cell state
# * hidden ~ [1,batch,H]
# * cell ~ [1,batch,H]
hidden,cell = self.initializeLSTM(xt)
# Prediction container
if self.training:
self.pred = self.pred.view(sequence_len,batch_size, n_out)
self. map = self. map.view(sequence_len,batch_size,self.L)
# Sequence loop
n_range = self.sequence_len if self.training else batch_size
n_visual = batch_size if self.training else 1
for k in range(n_range):
# One time
if self.training: xt = sequence[k] # [batch,L,D]
else : xt = sequence[k].unsqueeze(0) # [ 1 ,L,D]
# Visual Attention
alpha = self.attn(xt,hidden)# [batch,L,1]
visual = xt * alpha # [batch,L,D]x[batch,L,1] = [batch,L,D]
visual = visual.reshape(n_visual,self.R) # [batch,R]
visual = visual.unsqueeze(0) # [1,batch,R]
# LSTM
# * yt ~ [sequence,batch,H]
# * hidden ~ [ layers ,batch,H]
# * cell ~ [ layers ,batch,H]
_,(hidden,cell) = self.lstm(visual,(hidden,cell))
# Control
self.pred[k] = self.ctrl(visual, hidden, training=self.training)
self. map[k] = alpha.squeeze()
if self.training:
self.pred = self.pred.transpose(0,1).contiguous().view(batch_size*sequence_len, n_out)
self. map = self. map.transpose(0,1).contiguous().view(batch_size*sequence_len,self.L)
return self.pred, self.map
""" Dual Decoder Module
-------------------
* Input: feature [batch,L,D]
hidden [1,batch,H]
* Output: alpha [batch,L,1]
"""
class DualDecoder(nn.Module):
""" Constructor """
def __init__(self,AttentionNet,ControlNet,cube_size,n_hidden):
super(DualDecoder, self).__init__()
# Parameters
self.D = cube_size[2] # 64 # cube_size[0]
self.L = cube_size[0]*cube_size[1] # 90 # cube_size[1]*cube_size[2]
self.R = self.L*self.D # 5760 # self.L*self.D
self.H = n_hidden # 1024 # hidden_size
self.M = n_hidden # 1024 # hidden_size
self.sequence_len = 20
self.batch_size = 120
self.n_out = 3
self.study = False
# Declare layers
self.attn = AttentionNet
self.ctrl = ControlNet
self.lstm = nn.LSTM( input_size=self.R, hidden_size=self.H, num_layers=2 )
self.init_Wh1 = nn.Linear(self.D,self.H,bias=True )
self.init_Wh2 = nn.Linear(self.H,self.H,bias=True )
self.init_Wc1 = nn.Linear(self.D,self.H,bias=True )
self.init_Wc2 = nn.Linear(self.H,self.H,bias=True )
self.init_tanh = nn.Tanh()
# Initialization
torch.nn.init.xavier_uniform_(self.init_Wh1.weight)
torch.nn.init.xavier_uniform_(self.init_Wh2.weight)
torch.nn.init.xavier_uniform_(self.init_Wc1.weight)
torch.nn.init.xavier_uniform_(self.init_Wc2.weight)
self.lstm.reset_parameters()
""" Initialize LSTM: hidden state and cell state
Ref: <NAME>, et al. "Show, attend and tell: Neural
image caption generation with visual attention."
International conference on machine learning. 2015.
* Input: feature [batch,L,D]
* Output: hidden [1,batch,H]
cell [1,batch,H]
"""
def initializeLSTM(self,feature):
with torch.no_grad():
# Mean features
feature = torch.mean(feature,1) # [batch,L,D] -> [batch,D]
hidden1 = self.init_Wh1 (feature) # [batch,D]*[D,H] -> [batch,H]
hidden1 = self.init_tanh(hidden1) # [batch,H]
hidden2 = self.init_Wh2 (hidden1) # [batch,H]*[H,H] -> [batch,H]
hidden2 = self.init_tanh(hidden2) # [batch,H]
cell1 = self.init_Wc1 (feature) # [batch,D]*[D,H] -> [batch,H]
cell1 = self.init_tanh( cell1 ) # [batch,H]
cell2 = self.init_Wc2 ( cell1 ) # [batch,H]*[H,H] -> [batch,H]
cell2 = self.init_tanh( cell2 ) # [batch,H]
hidden = torch.stack([hidden1,hidden2], dim=0).contiguous() # [2,batch,H]
cell = torch.stack([ cell1, cell2], dim=0).contiguous() # [2,batch,H]
# (h,c) ~ [num_layers, batch, hidden_size]
return hidden,cell
""" Forward """
def forward(self,feature):
# Parameters
sequence_len = self.sequence_len
n_out = self.n_out
if self.training: batch_size = int(feature.shape[0]/sequence_len)
else : batch_size = feature.shape[0]
# Data
if self.training: sequence = feature.view(batch_size,sequence_len,self.L,self.D).transpose_(0,1) # [sequence,batch,L,D]
else : sequence = feature # [batch,L,D]
# Input to inicialize LSTM
if self.training: xt = sequence[0]
else : xt = sequence[0].unsqueeze(0)
# Inicialize hidden state and cell state
# * hidden ~ [1,batch,H]
# * cell ~ [1,batch,H]
hidden,cell = self.initializeLSTM(xt)
# Prediction container
if self.training:
pred = torch.zeros([sequence_len,batch_size, n_out]).to( torch.device('cuda:0') )
map_ = torch.zeros([sequence_len,batch_size,self.L]).to( torch.device('cuda:0') )
else:
pred = torch.zeros([batch_size, n_out]).to( torch.device('cuda:0') )
map_ = torch.zeros([batch_size,self.L]).to( torch.device('cuda:0') )
# Study rountime
if self.study:
action = torch.zeros([self.batch_size,self.H]).to( torch.device('cuda:0') )
atten = torch.zeros([self.batch_size,self.H]).to( torch.device('cuda:0') )
else:
action,atten = (None,None)
# Sequence loop
n_range = self.sequence_len if self.training else batch_size
n_visual = batch_size if self.training else 1
for k in range(n_range):
# One time
if self.training: xt = sequence[k] # [batch,L,D]
else : xt = sequence[k].unsqueeze(0) # [ 1 ,L,D]
# Visual Attention
alpha = self.attn(xt,hidden[1].unsqueeze(0))# [batch,L,1]
visual = xt * alpha # [batch,L,D]x[batch,L,1] = [batch,L,D]
visual = visual.reshape(n_visual,self.R) # [batch,R]
visual = visual.unsqueeze(0) # [1,batch,R]
# LSTM
# * yt ~ [sequence,batch,H]
# * hidden ~ [ layers ,batch,H]
# * cell ~ [ layers ,batch,H]
_,(hidden,cell) = self.lstm(visual,(hidden,cell))
# Control
pred[k] = self.ctrl(visual, hidden[0].unsqueeze(0))
map_[k] = alpha.squeeze()
if self.study:
action[k] = hidden[0].squeeze()
atten [k] = hidden[1].squeeze()
if self.training:
pred = pred.transpose(0,1).contiguous().view(batch_size*sequence_len, n_out)
map_ = map_.transpose(0,1).contiguous().view(batch_size*sequence_len,self.L)
# Return
return pred, map_, {'action': action, 'attention': atten}
""" TVA Decoder Module
------------------
Ref: <NAME>., & <NAME>. (2017). "Interpretable learning for self-driving
cars by visualizing causal attention". In Proceedings of the IEEE
international conference on computer vision (pp. 2942-2950).
* Input: feature [batch,L,D]
hidden [1,batch,H]
* Output: alpha [batch,L,1]
"""
class TVADecoder(nn.Module):
""" Constructor """
def __init__(self,AttentionNet,cube_size,n_hidden):
super(TVADecoder, self).__init__()
# Parameters
self.D = cube_size[2] # 64
self.L = cube_size[0]*cube_size[1] # 90
self.R = self.L*self.D # 5760
self.H = n_hidden # 512
self.M = n_hidden # 512
self.sequence_len = 20
self.batch_size = 120
self.n_out = 3
self.study = False
# Declare layers
self.attn = AttentionNet
self.lstm = nn.LSTM( input_size = self.R,
hidden_size = self.H,
num_layers = 1)
self.init_Wh = nn.Linear(self.D,self.H,bias=True )
self.init_Wc = nn.Linear(self.D,self.H,bias=True )
self.init_tanh = nn.Tanh()
# Initialization
torch.nn.init.xavier_uniform_(self.init_Wh.weight)
torch.nn.init.xavier_uniform_(self.init_Wc.weight)
self.lstm.reset_parameters()
""" Initialize LSTM: hidden state and cell state
Ref: <NAME>, et al. "Show, attend and tell: Neural
image caption generation with visual attention."
International conference on machine learning. 2015.
* Input: feature [batch,L,D]
* Output: hidden [1,batch,H]
cell [1,batch,H]
"""
def initializeLSTM(self,feature):
with torch.no_grad():
# Mean features
feature = torch.mean(feature,1) # [batch,L,D] -> [batch,D]
hidden = self.init_Wh(feature) # [batch,D]*[D,H] -> [batch,H]
hidden = self.init_tanh(hidden) # [batch,H]
hidden = hidden.unsqueeze(0) # [1,batch,H]
cell = self.init_Wc(feature) # [batch,D]*[D,H] -> [batch,H]
cell = self.init_tanh(cell) # [batch,H]
cell = cell.unsqueeze(0) # [1,batch,H]
# (h,c) ~ [num_layers, batch, hidden_size]
return hidden.contiguous(),cell.contiguous()
""" Forward """
def forward(self,feature):
# Parameters
sequence_len = self.sequence_len
if self.training: batch_size = int(feature.shape[0]/sequence_len)
else : batch_size = feature.shape[0]
# Data
if self.training: sequence = feature.view(batch_size,sequence_len,self.L,self.D).transpose_(0,1) # [sequence,batch,L,D]
else : sequence = feature # [batch,L,D]
# Input to inicialize LSTM
if self.training: xt = sequence[0]
else : xt = sequence[0].unsqueeze(0)
# Inicialize hidden state and cell state
# * hidden ~ [1,batch,H]
# * cell ~ [1,batch,H]
hidden,cell = self.initializeLSTM(xt)
# Prediction container
if self.training:
vis_ = torch.zeros([sequence_len,batch_size,self.R]).to( torch.device('cuda:0') )
alp_ = torch.zeros([sequence_len,batch_size,self.L]).to( torch.device('cuda:0') )
bet_ = torch.zeros([sequence_len,batch_size,self.D]).to( torch.device('cuda:0') )
hdd_ = torch.zeros([sequence_len,batch_size,self.H]).to( torch.device('cuda:0') )
else:
vis_ = torch.zeros([batch_size,self.R]).to( torch.device('cuda:0') )
alp_ = torch.zeros([batch_size,self.L]).to( torch.device('cuda:0') )
bet_ = torch.zeros([batch_size,self.D]).to( torch.device('cuda:0') )
hdd_ = torch.zeros([batch_size,self.H]).to( torch.device('cuda:0') )
# Study rountime
if self.study:
hc = torch.zeros([self.batch_size,self.H]).to( torch.device('cuda:0') )
else:
hc = None # hc,ha,hb = (None,None,None)
# Sequence loop
n_range = self.sequence_len if self.training else batch_size
n_visual = batch_size if self.training else 1
for k in range(n_range):
# One time
if self.training: xt = sequence[k] # [batch,L,D]
else : xt = sequence[k].unsqueeze(0) # [ 1 ,L,D]
# Visual Attention
alpha,beta = self.attn(xt,hidden) # [batch,L,1]
# Spatial
spatial = xt * alpha # [batch,L,D]x[batch,L,1] = [batch,L,D]
visual = spatial + xt
# Categorical
visual = visual * beta # [batch,L,D]x[batch,1,D] = [batch,L,D]
visual = visual.reshape(n_visual,self.R) # [batch,R]
visual = visual.unsqueeze(0) # [1,batch,R]
# LSTM
# * yt ~ [sequence,batch,H]
# * hidden ~ [ layers ,batch,H]
# * cell ~ [ layers ,batch,H]
_,(hidden,cell) = self.lstm(visual,(hidden,cell))
# Output
vis_[k] = visual # [1,batch,R]
alp_[k] = alpha.squeeze() # [1,batch,L]
bet_[k] = beta.squeeze() # [1,batch,D]
hdd_[k] = hidden[0].unsqueeze(0) # [1,batch,H]
if self.study:
hc[k] = hidden[0].squeeze()
if self.training:
vis_ = vis_.transpose(0,1).contiguous().view(batch_size*sequence_len,self.R)
alp_ = alp_.transpose(0,1).contiguous().view(batch_size*sequence_len,self.L)
bet_ = bet_.transpose(0,1).contiguous().view(batch_size*sequence_len,self.D)
hdd_ = hdd_.transpose(0,1).contiguous().view(batch_size*sequence_len,self.H)
return vis_, hdd_, {'alpha': alp_, 'beta': bet_}, {'control': hc}
# ------------------------------------------------------------------------------------------------
#
#
# ------------------------------------------------------------------------------------------------
class CatDecoder(nn.Module):
""" Constructor """
def __init__(self, HighEncoderNet, SpatialNet, FeatureNet, CommandNet,
LowLevelDim=128, HighLevelDim=512,
n_hidden=1024, n_state=64,n_task=3,
study=False):
super(CatDecoder, self).__init__()
self.study = study
# Parameters
self.H = n_hidden # output LSTM 1024 2048
self.R = int(n_hidden/4) # input LSTM 256 512
self.S = n_state
self.n_task = n_task
self.sequence_len = 20
# Attention
self.HighEncoder = HighEncoderNet
self.SpatialAttn = SpatialNet
self.FeatureAttn = FeatureNet
self. CmdDecoder = CommandNet
self.Gate = G.GRUGate(LowLevelDim)
# Output
self.dimReduction = nn.Conv2d(HighLevelDim,self.R, kernel_size=1, bias=False)
self.lstm = nn.LSTM( input_size = self.R,
hidden_size = self.H,
num_layers = 1)
self.init_Wh = nn.Linear(LowLevelDim,self.H,bias=True )
self.init_Wc = nn.Linear(LowLevelDim,self.H,bias=True )
self.init_tanh = nn.Tanh()
self.avgpool1 = nn.AdaptiveAvgPool2d((1, 1))
self.avgpool2 = nn.AdaptiveAvgPool2d((1, 1))
self.normSpa = nn.BatchNorm2d(LowLevelDim)
self.ReLU = nn.ReLU()
# Initialization
torch.nn.init.xavier_uniform_(self.dimReduction.weight)
torch.nn.init.xavier_uniform_(self. init_Wh.weight)
torch.nn.init.xavier_uniform_(self. init_Wc.weight)
self.lstm.reset_parameters()
""" Initialize LSTM: hidden state and cell state
Ref: Xu, Kelvin, et al. "Show, attend and tell: Neural
image caption generation with visual attention."
International conference on machine learning. 2015.
* Input: feature [batch,D,h,w]
* Output: hidden [1,batch,H]
cell [1,batch,H]
"""
def initializeLSTM(self,feature):
with torch.no_grad():
# Mean features
feature = torch.mean(feature,(2,3)) # [batch,D,h,w] -> [batch,D]
hidden = self.init_Wh(feature) # [batch,D]*[D,H] -> [batch,H]
hidden = self.init_tanh(hidden) # [batch,H]
hidden = hidden.unsqueeze(0) # [1,batch,H]
cell = self.init_Wc(feature) # [batch,D]*[D,H] -> [batch,H]
cell = self.init_tanh(cell) # [batch,H]
cell = cell.unsqueeze(0) # [1,batch,H]
# (h,c) ~ [num_layers, batch, hidden_size]
return hidden.contiguous(),cell.contiguous()
""" Forward
- eta [batch,channel,high,width]
"""
def forward(self,feature,command):
# Parameters
sequence_len = self.sequence_len
if self.training: batch_size = int(feature.shape[0]/sequence_len)
else : batch_size = feature.shape[0]
_,C,H,W = feature.shape # Batch of Tensor Images is a tensor of (B, C, H, W) shape
# Data
if self.training: sequence = feature.view(batch_size,sequence_len,C,H,W).transpose(0,1) # [sequence,batch, ...]
else : sequence = feature # [batch, ...]
# Inicialize hidden state and cell state
# * hidden ~ [1,batch,H]
# * cell ~ [1,batch,H]
if self.training: xt = sequence[0] # Input to inicialize LSTM
else : xt = sequence[0].unsqueeze(0)
ht,ct = self.initializeLSTM(xt)
# Command decoder
cmd = self.CmdDecoder(command)
if self.training: cmd = cmd.view(batch_size,sequence_len,-1).transpose(0,1) # [sequence,batch,4]
# Prediction container
st_,ht_ = list(),list()
# State initialization
if self.training: st = torch.cuda.FloatTensor(batch_size,self.n_task,self.S).uniform_()
else : st = torch.cuda.FloatTensor( 1,self.n_task,self.S).uniform_()
# Study
if self.study: α,β,F = list(),list(),list()
else : α,β,F = None, None, None
# Sequence loop
n_range = self.sequence_len if self.training else batch_size
for k in range(n_range):
# One time
if self.training: ηt = sequence[k] # [batch,L,D]
else : ηt = sequence[k].unsqueeze(0) # [ 1 ,L,D]
if self.training: cm = cmd[k] # [batch, 4 ]
else : cm = cmd[k].unsqueeze(0) # [ 1 , 4 ]
# Spatial Attention
xt, αt = self.SpatialAttn(ηt,st)
xt = self.Gate(ηt,xt)
# High-level encoder
zt = self.HighEncoder(xt)
# Feature-based attention
# s[t] = f(z[t],h[t-1])
_zt = self.avgpool1( zt)
_zt = torch.flatten(_zt, 1)
st, βt, Ft = self.FeatureAttn(_zt,ht[0],cm) # [batch,S]
# Dimension reduction to LSTM
rt = self.dimReduction(zt)
rt = self. avgpool2(rt)
rt = torch.flatten(rt , 1)
rt = rt.unsqueeze(0)
# LSTM
# * yt ~ [sequence,batch,H]
# * hidden ~ [ layers ,batch,H]
# * cell ~ [ layers ,batch,H]
_,(ht,ct)= self.lstm(rt,(ht,ct))
# Output
st_.append(st.squeeze()) # [batch,n_task,S]
ht_.append(ht.squeeze()) # [batch, H]
# Study
if self.study:
α.append(αt)
β.append(βt)
F.append(Ft)
# Concatenate
st_ = torch.stack(st_,dim=0)
ht_ = torch.stack(ht_,dim=0)
if self.training:
st_ = st_.transpose(0,1).reshape(batch_size*sequence_len,self.n_task,self.S)
ht_ = ht_.transpose(0,1).reshape(batch_size*sequence_len, self.H)
# Compile study
if self.study:
α = torch.stack(α, dim=0)
β = torch.stack(β, dim=0)
F = torch.stack(F, dim=0)
return st_, {'hidden': ht_, 'feature': F}, {'alpha': α, 'beta': β}
| 2.859375 | 3 |
d3.py | Otavioarp/Desafio | 0 | 12796731 | '''
<NAME>
Email: <EMAIL>
'''
def retorna_pessoas_preferem_um_unico_palco(quantidade_pessoas_evento):
return int ( 25 / 100 * quantidade_pessoas_evento ) | 1.632813 | 2 |
tests/regressiontests/forms/localflavor/jp.py | huicheese/Django-test3 | 23 | 12796732 | <filename>tests/regressiontests/forms/localflavor/jp.py
# -*- coding: utf-8 -*-
# Tests for the contrib/localflavor/ JP form fields.
tests = r"""
# JPPostalCodeField ###############################################################
A form field that validates its input is a Japanese postcode.
Accepts 7 digits(with/out hyphen).
>>> from django.contrib.localflavor.jp.forms import JPPostalCodeField
>>> f = JPPostalCodeField()
>>> f.clean('251-0032')
u'2510032'
>>> f.clean('2510032')
u'2510032'
>>> f.clean('2510-032')
Traceback (most recent call last):
...
ValidationError: [u'Enter a postal code in the format XXXXXXX or XXX-XXXX.']
>>> f.clean('251a0032')
Traceback (most recent call last):
...
ValidationError: [u'Enter a postal code in the format XXXXXXX or XXX-XXXX.']
>>> f.clean('a51-0032')
Traceback (most recent call last):
...
ValidationError: [u'Enter a postal code in the format XXXXXXX or XXX-XXXX.']
>>> f.clean('25100321')
Traceback (most recent call last):
...
ValidationError: [u'Enter a postal code in the format XXXXXXX or XXX-XXXX.']
>>> f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
>>> f = JPPostalCodeField(required=False)
>>> f.clean('251-0032')
u'2510032'
>>> f.clean('2510032')
u'2510032'
>>> f.clean('2510-032')
Traceback (most recent call last):
...
ValidationError: [u'Enter a postal code in the format XXXXXXX or XXX-XXXX.']
>>> f.clean('')
u''
>>> f.clean(None)
u''
# JPPrefectureSelect ###############################################################
A Select widget that uses a list of Japanese prefectures as its choices.
>>> from django.contrib.localflavor.jp.forms import JPPrefectureSelect
>>> w = JPPrefectureSelect()
>>> print w.render('prefecture', 'kanagawa')
<select name="prefecture">
<option value="hokkaido">Hokkaido</option>
<option value="aomori">Aomori</option>
<option value="iwate">Iwate</option>
<option value="miyagi">Miyagi</option>
<option value="akita">Akita</option>
<option value="yamagata">Yamagata</option>
<option value="fukushima">Fukushima</option>
<option value="ibaraki">Ibaraki</option>
<option value="tochigi">Tochigi</option>
<option value="gunma">Gunma</option>
<option value="saitama">Saitama</option>
<option value="chiba">Chiba</option>
<option value="tokyo">Tokyo</option>
<option value="kanagawa" selected="selected">Kanagawa</option>
<option value="yamanashi">Yamanashi</option>
<option value="nagano">Nagano</option>
<option value="niigata">Niigata</option>
<option value="toyama">Toyama</option>
<option value="ishikawa">Ishikawa</option>
<option value="fukui">Fukui</option>
<option value="gifu">Gifu</option>
<option value="shizuoka">Shizuoka</option>
<option value="aichi">Aichi</option>
<option value="mie">Mie</option>
<option value="shiga">Shiga</option>
<option value="kyoto">Kyoto</option>
<option value="osaka">Osaka</option>
<option value="hyogo">Hyogo</option>
<option value="nara">Nara</option>
<option value="wakayama">Wakayama</option>
<option value="tottori">Tottori</option>
<option value="shimane">Shimane</option>
<option value="okayama">Okayama</option>
<option value="hiroshima">Hiroshima</option>
<option value="yamaguchi">Yamaguchi</option>
<option value="tokushima">Tokushima</option>
<option value="kagawa">Kagawa</option>
<option value="ehime">Ehime</option>
<option value="kochi">Kochi</option>
<option value="fukuoka">Fukuoka</option>
<option value="saga">Saga</option>
<option value="nagasaki">Nagasaki</option>
<option value="kumamoto">Kumamoto</option>
<option value="oita">Oita</option>
<option value="miyazaki">Miyazaki</option>
<option value="kagoshima">Kagoshima</option>
<option value="okinawa">Okinawa</option>
</select>
"""
| 2.25 | 2 |
__init__.py | codefresh-customer-success/yeti | 0 | 12796733 | #!/usr/local/bin/python3
### IMPORTS ###
### FUNCTIONS ###
### CLASSES ###
| 1.554688 | 2 |
generate_encodings.py | AsimMessi/FaceRecognition | 0 | 12796734 | <gh_stars>0
from tqdm import tqdm
import face_recognition
import glob
import csv
import numpy as np
def generate_training_data(folder):
r=0
print("Generating encodings for db images..")
image_encodings=[]
with tqdm(total=len(glob.glob(folder+"/*.jpg"))) as pbar:
for img in glob.glob(folder+"/*.jpg"):
enc=[]
img_name=img[3:]
known_img=face_recognition.load_image_file(img)
try:
en=face_recognition.face_encodings(known_img)[0]
except:
print("can't generate encodings for "+img_name+", give another image")
pass
#en=np.array(en)
for e in en:
enc.append(e)
image_encodings.append([img_name,enc])
pbar.update(1)
return image_encodings
encodings=generate_training_data("db")
#print(encodings[0])
csvfile = "Encodings/encodings.csv"
i=0
with open(csvfile, "w") as output:
writer = csv.writer(output, lineterminator='\n')
if(i==0):
writer.writerow(["Img_Name","Encoding"])
i+=1
writer.writerows(encodings)
print("Encodings updated for all images")
| 2.859375 | 3 |
tests/lib/util/test_utils.py | CMSgov/qpp-claims-to-quality-public | 13 | 12796735 | <filename>tests/lib/util/test_utils.py
"""Test util files."""
import json
from claims_to_quality.lib.util import new_relic_insights, slack
import mock
def test_new_relic_insights_payload():
"""Test NR insights payload creation."""
event = {
'environment': 'TEST',
'success': 'True'
}
payload = new_relic_insights.build_new_relic_insights_payload(
event_type='analyzerStartup', event_body=event
)
event['eventType'] = 'analyzerStartup'
assert json.loads(payload) == event
@mock.patch('subprocess.call')
def test_new_relic_insights_post(call):
"""Test NR insights post."""
payload = json.dumps({
'environment': 'TEST',
'success': 'True',
'eventType': 'analyzerStartup'
})
new_relic_insights.post_to_new_relic(payload=payload)
subprocess_calls = call.call_args_list
# Check that subprocess.call was triggered only once.
assert len(subprocess_calls) == 1
_, kwargs = subprocess_calls[0]
checks = [
'{"environment": "TEST", "success": "True", "eventType": "analyzerStartup"}',
]
for check in checks:
assert check in kwargs['args'][0]
@mock.patch('subprocess.call')
def test_slack_post(call):
"""Test Slack."""
slack.post_to_slack(message='hello')
subprocess_calls = call.call_args_list
# Check that subprocess.call was triggered only once.
assert len(subprocess_calls) == 1
_, kwargs = subprocess_calls[0]
checks = [
"curl -X POST -H 'Content-type: application/json'",
"""--data \'{"text":"hello"}\'"""
]
for check in checks:
assert check in kwargs['args'][0]
@mock.patch('subprocess.call')
def test_slack_post_here(call):
"""Test Slack."""
slack.post_to_slack_tagging_here(message='hello')
subprocess_calls = call.call_args_list
# Check that subprocess.call was triggered only once.
assert len(subprocess_calls) == 1
_, kwargs = subprocess_calls[0]
checks = [
"curl -X POST -H 'Content-type: application/json'",
"""--data \'{"text":"<!here> hello"}\'"""
]
for check in checks:
assert check in kwargs['args'][0]
| 2.234375 | 2 |
sung.pw/test_shellcode32.py | rmagur1203/exploit-codes | 0 | 12796736 | from pwn import *
context.log_level = 'debug'
e = ELF('./test_shellcode32')
r = remote("sunrin.site", 9017)#process('./test_shellcode32')
context(arch='i386', os='linux')
sc = shellcraft.pushstr('/home/pwn/flag')
sc += shellcraft.open("esp", 0, 0) # fd = open("./flag", 0, 0);
sc += shellcraft.read("eax", "esp", 100) # read(fd, esp, 100);
sc += shellcraft.write(1, "esp", 100) # write(1, esp, 100);
r.sendline(asm(sc))
r.interactive() | 1.609375 | 2 |
roomba_600_driver/launch/teleop.launch.py | 3ccd/create_autonomy | 0 | 12796737 | <reponame>3ccd/create_autonomy
from launch import LaunchDescription
from launch_ros.actions import Node
from launch.actions import DeclareLaunchArgument, SetEnvironmentVariable
from launch.substitutions import LaunchConfiguration, ThisLaunchFileDir
config = LaunchConfiguration(
'params', default=[ThisLaunchFileDir(), '/teleop.yaml'])
def generate_launch_description():
return LaunchDescription([
Node(
package='joy',
node_executable='joy_node'),
Node(
package='roomba_600_driver',
node_executable='teleop_node',
output="screen",
parameters=[config]),
])
| 2.140625 | 2 |
check_colors.py | IgaLewandowska/Check_colors | 1 | 12796738 | #import Pillow
#from colors.py import generate_colors
import colorthief
from colorthief import ColorThief
import glob
from pathlib import Path
def get_colors(image):
dominant_color = ColorThief(image).get_palette(color_count=3, quality=3)
return dominant_color
#print (get_colors('blockchains/polygon/info/logo.png'))
def get_files():
files = list(Path('blockchains').glob('**/*.png'))
return files
#print (get_files())
def main():
colors_list = []
for i in get_files():
color = get_colors(i)
name = str(i).split('/')[-2]
if name == 'info':
name = str(i).split('/')[-3]
tmp = {name: color}
print(tmp)
colors_list.append(tmp)
print(colors_list)
main()
| 3.09375 | 3 |
english.py | procedure2012/MyDict | 0 | 12796739 | import pandas as pd
from crawler import MyDict
class MyEnglishDict(MyDict):
def __init__(self, url):
super(MyEnglishDict, self).__init__(url)
def lookup(self, word):
output = {}
raw_text = self.get_web_result(self.url, word)
phonetic_symbols = raw_text.find(name='ul', class_='Mean_symbols__5dQX7')
if phonetic_symbols is None:
return None
phonetic_symbols = phonetic_symbols.find_all('li')
if len(phonetic_symbols) < 2:
return None
phonetic_symbols_text = [x for x in phonetic_symbols[1].strings]
output['phonetic_symbol'] = phonetic_symbols_text[1]
print(output['phonetic_symbol'])
meanings = raw_text.find(name='ul', class_='Mean_part__1RA2V').find_all('li')
if meanings is None:
return None
definitions = []
for m in meanings:
lexical_category = m.find('i').string
raw_definitions = m.find_all('span')
sub_definitions = [lexical_category]
for d in raw_definitions:
sub_definitions.append(d.text)
definitions.append(' '.join(sub_definitions))
output['definitions'] = '\n'.join(definitions)
print(output['definitions'])
return output
if __name__ == '__main__':
df = pd.DataFrame(columns=["Word", "Audio", "Meaning", "Example", "Source"])
dictionary = MyEnglishDict("http://www.iciba.com/word?w=")
while True:
print("//--------------------------------------")
unknown_word = input("请输入查询的单词/输入‘886’离开:")
if unknown_word == '886':
empty = input("将要替代上次记录的结果,按回车继续...")
df.to_csv('./save_recode_english.csv', index=False, header=False, encoding='utf-8_sig')
break
result = dictionary.lookup(unknown_word)
if result is None:
print("找不到单词TAT")
continue
sentence = input("输入例句/输入'N'不保存:")
if sentence != 'N':
source = input("输入例句来源(可选):")
df = df.append([{'Word': unknown_word, "Audio": result['phonetic_symbol'], "Meaning": result['definitions'], "Example": sentence, "Source": source}], ignore_index=True)
| 3.203125 | 3 |
weatherstation/misc/utils.py | CombatMage/weatherstation_py | 0 | 12796740 | <filename>weatherstation/misc/utils.py
"""utils for setting locale and getting well formated time"""
import os
import platform
import locale
import time
def read_api_key(path):
"""read api key from given path"""
path = os.path.abspath(path)
if not os.path.exists(path):
raise ValueError("no key found at given path: " + path)
with open(path) as f:
return f.readline().strip()
def set_locale_de():
"""set the current locale to German or de_DE.utf8 (depending on the os)"""
try:
if platform.system() == "Windows":
locale.setlocale(locale.LC_ALL, "German")
else:
locale.setlocale(locale.LC_ALL, "de_DE.utf8")
except locale.Error:
pass
def get_time_human_readable():
"""
returns well formated time string.
for example: Donnerstag, 21:00
"""
return time.strftime("%A, %H:%M")
| 3.09375 | 3 |
cyllene/g_graphclass.py | liuzhengqi1996/math452 | 3 | 12796741 | import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid.axislines import SubplotZero
import numpy as np
import cyllene.f_functionclass as f_funct
import sympy as sp
'''
A lot of problems need to be resolved:
1)Can we keep a record of the graphs graphed? this can be done by just keeping the numpy arrays ?
2)we need to be able to deal with poles of functions ( for example 1/x at x = 0, 1/(x^2-1) at x = -1, 1 ...etc)
'''
class graph():
def __init__(self):
self.fig = plt.figure(1)
self.ax = SubplotZero(self.fig,111)
self.fig.add_subplot(self.ax)
for direction in ["xzero","yzero"]:
self.ax.axis[direction].set_axisline_style("-|>")
self.ax.axis[direction].set_visible(True)
for direction in ["left","right","bottom","top"]:
self.ax.axis[direction].set_visible(False)
def make_graph(self, f):
I = f.behaviour("largest interval")
ps = float(I.args[0])
pe = float(I.args[1])
t = np.arange(ps, pe, 0.01)
self.ax.plot(t, f.eval_np(t))
def make_graphs(self, *functions,Interval=None):
if(Interval == None):
f = functions[0]
I = f.behaviour("largest interval")
l,r = float(I.args[0]), float(I.args[1])
for f in functions:
I = f.behaviour("largest interval")
l,r = min(l,float(I.args[0])), max(r,float(I.args[1]))
else:
l,r = float(Interval.args[0]), float(Interval.args[1])
self.Interval = sp.Interval(l,r)
t = np.arange(l,r,.01)
for f in functions:
self.ax.plot(t,f.eval_np(t))
def make_secent(self,f,x1,x2):
I = f.behaviour("largest interval")
ps = float(I.args[0])
pe = float(I.args[1])
t = np.arange(ps, pe, 0.01)
sec = f.secent_line(x1,x2)
self.ax.plot(t, sec.eval_np(t))
self.plot_point(x1, sec.eval_np(x1))
self.plot_point(x2,sec.eval_np(x2))
def make_tangent(self,f,x):
I = f.behaviour("largest interval")
ps = float(I.args[0])
pe = float(I.args[1])
t = np.arange(ps, pe, 0.01)
tan = f.tangent_line(x)
self.ax.plot(t, tan.eval_np(t))
self.plot_point(x, tan.eval_np(x))
def plot_point(self, x, y):
self.ax.plot(np.array([x]), np.array([y]), 'ro')
def zoom_y(self, f, I):
self.zoom_x(I)
self.zoom_y(f.range(I))
def zoom_x(self,I):
ps = float(I.args[0])
pe = float(I.args[1])
self.ax.set_xlim(ps,pe)
def zoom_y(self,I):
ps = float(I.args[0])
pe = float(I.args[1])
self.ax.set_ylim(ps,pe)
def show(self):
return self.fig | 3.046875 | 3 |
ex055.py | PedroHPAlmeida/exercicios-Python-CEV | 0 | 12796742 | <reponame>PedroHPAlmeida/exercicios-Python-CEV
#entrada & processamento
for i in range(0, 5):
peso = float(input(f'Peso da 1ª pessoa: '))
if i == 0:
maior = menor = peso
else:
if peso > maior:
maior = peso
if peso < menor:
menor = peso
#saida
print(f'O maior peso lido foi de {maior:.2f}Kg')
print(f'O menor peso lido foi de {menor:.2f}Kg')
| 3.78125 | 4 |
src/denzel/app/logic/pipeline.py | eliorc/denzel | 17 | 12796743 | <filename>src/denzel/app/logic/pipeline.py
# -------- Handled by api container --------
def verify_input(json_data):
"""
Verifies the validity of an API request content
:param json_data: Parsed JSON accepted from API call
:type json_data: dict
:return: Data for the the process function
"""
# callback_uri is needed to sent the responses to
if 'callback_uri' not in json_data:
raise ValueError('callback_uri not supplied')
return json_data
# -------- Handled by denzel container --------
def load_model():
"""
Load model and its assets to memory
:return: Model, will be used by the predict and process functions
"""
return # return the loaded model object
def process(model, json_data):
"""
Process the json_data passed from verify_input to model ready data
:param model: Loaded object from load_model function
:param json_data: Data from the verify_input function
:return: Model ready data
"""
# return model ready data
return
def predict(model, data):
"""
Predicts and prepares the answer for the API-caller
:param model: Loaded object from load_model function
:param data: Data from process function
:return: Response to API-caller
:rtype: dict
"""
# return a dictionary that will be parsed to JSON and sent back to API-caller
return {}
| 2.703125 | 3 |
main.py | CarlFredriksson/sentiment_classification | 0 | 12796744 | import sc_utils
import model_factory
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
INPUT_LENGTH = 100
# Prepare data
X_train, Y_train, X_test, Y_test = sc_utils.load_data()
X_train, Y_train, X_val, Y_val, X_test, Y_test, tokenizer = sc_utils.preprocess_data(X_train, Y_train, X_test, Y_test, INPUT_LENGTH)
embedding_matrix = sc_utils.create_embedding_matrix(tokenizer)
print("X_train.shape: " + str(X_train.shape))
print("Y_train.shape: " + str(Y_train.shape))
print("X_val.shape: " + str(X_val.shape))
print("Y_val.shape: " + str(Y_val.shape))
print("X_test.shape: " + str(X_test.shape))
print("Y_test.shape: " + str(Y_test.shape))
print("embedding_matrix.shape: " + str(embedding_matrix.shape))
# Create model
#model = model_factory.create_baseline_model(embedding_matrix, INPUT_LENGTH)
model = model_factory.create_rnn_model(embedding_matrix, INPUT_LENGTH)
#model = model_factory.create_bidir_rnn_model(embedding_matrix, INPUT_LENGTH)
#model = model_factory.create_train_emb_rnn_model(embedding_matrix, INPUT_LENGTH)
model.summary()
# Train model
model.fit(X_train, Y_train, batch_size=200, epochs=30)
# Evaluate model on validation set
val_loss, val_accuracy = model.evaluate(X_val, Y_val, verbose=0)
print("Accuracy on validation set: " + str(val_accuracy * 100) + "%")
# Evaluate model on test set
test_loss, test_accuracy = model.evaluate(X_test, Y_test, verbose=0)
print("Accuracy on test set: " + str(test_accuracy * 100) + "%")
# Test model on my own texts
reviews = [
"This movie is bad. I don't like it it all. It's terrible.",
"I love this movie. I've seen it many times and it's still awesome.",
"I don't think this movie is as bad as most people say. It's actually pretty good."
]
print("Testing model on my own texts:")
print(reviews)
reviews = tokenizer.texts_to_sequences(reviews)
reviews = pad_sequences(reviews, maxlen=INPUT_LENGTH, padding="post")
reviews = np.array(reviews)
pred = model.predict(reviews)
print(pred)
print("The model predicts:")
sentiment_str = "Negative" if pred[0][0] < 0.5 else "Positive"
print(sentiment_str + " on the first text")
sentiment_str = "Negative" if pred[1][0] < 0.5 else "Positive"
print(sentiment_str + " on the second text")
sentiment_str = "Negative" if pred[2][0] < 0.5 else "Positive"
print(sentiment_str + " on the third text")
| 2.8125 | 3 |
snakefmt/__main__.py | jeremiahlewis-vw/snakefmt | 83 | 12796745 | <filename>snakefmt/__main__.py
import sys
from snakefmt import snakefmt
if __name__ == "__main__":
sys.exit(snakefmt.main())
| 1.65625 | 2 |
reddit2telegram/new_channel.py | soulofrubber/reddit2telegram | 187 | 12796746 | #encoding:utf-8
import os
import utils.channels_stuff
def run_script(channel):
os.system('python supplier.py --sub ' + channel.lower())
def med_fashioned_way():
subreddit_name = input('Subreddit name: ')
channel_name = input('Channel name: ')
tags = input('#Tags #in #that #way: ')
print('Submodule is created.')
utils.channels_stuff.set_new_channel(channel_name, subreddit=subreddit_name, tags=tags.lower())
print(channel_name.lower())
print('Run the bot for the first time.')
run_script(channel_name)
print('Done.')
if __name__ == '__main__':
med_fashioned_way()
| 2.515625 | 3 |
paper_code/distributed_evolution/policies/core.py | adam-katona/QualityEvolvabilityES | 1 | 12796747 | #Copyright (c) 2019 Uber Technologies, Inc.
#
#Licensed under the Uber Non-Commercial License (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at the root directory of this project.
#
#See the License for the specific language governing permissions and
#limitations under the License.
import pickle
class Policy:
needs_stats = False
def seed(self, seed):
pass
def act(self, states):
raise NotImplementedError()
def reset(self):
pass
def supp_fitness(self): # pylint: disable=no-self-use
return 0.0
def set_theta(self, theta):
raise NotImplementedError()
def _serialize(self, *args, **kwargs): # pylint: disable=no-self-use
frame = pickle.dumps((args, kwargs))
return [frame]
@classmethod
def deserialize(cls, frames):
args, kwargs = pickle.loads(frames[0])
return cls(*args, **kwargs)
| 2.078125 | 2 |
client/scripts/assets/generator_strings.py | thefstock/FirstockPy | 1 | 12796748 | DATASOURCE = """
\"\"\"
Datasource for handling {module} operations
\"\"\"
from ...utils.datasources import NorenRestDataSource
from . import endpoints
from .models import *
class {classname}DataSource(NorenRestDataSource):
\"\"\"
Datasource for handling {module} operations
\"\"\"
pass
"""
ENDPOINTS = """
\"\"\"
API endpoint paths stored as constants
\"\"\"
# add your endpoints here
"""
INIT = """
\"\"\"
The {module} module
\"\"\"
from .datasource import *
from .models import *
"""
MODELS_INIT = """
\"\"\"
The request and response models for {module} module
\"\"\"
# import your models here
"""
MODEL = """
\"\"\"
Request and response models for {name} request
\"\"\"
from typing import Optional
from pydantic import BaseModel
from datetime import datetime
from ....common.enums import ResponseStatus
from ....utils.decoders import build_loader, datetime_decoder
__all__ = ['{classname}RequestModel', '{classname}ResponseModel']
class {classname}RequestModel(BaseModel):
\"\"\"
The request model for {name} endpoint
\"\"\"
uid: str
\"\"\"The user id of the login user\"\"\"
class {classname}ResponseModel(BaseModel):
\"\"\"
The response model for {name} endpoint
\"\"\"
stat: ResponseStatus
\"\"\"The {name} success or failure status\"\"\"
request_time: Optional[datetime]
\"\"\"It will be present only on successful response.\"\"\"
emsg: Optional[str]
\"\"\"Error message if the request failed\"\"\"
class Config:
\"\"\"model configuration\"\"\"
json_loads = build_loader({{
\"request_time\": datetime_decoder()
}})
"""
METHOD = """
def {name}(self, model: {model}RequestModel, key: str = None) -> {model}ResponseModel:
\"\"\"
{description}
Args:
model ({model}RequestModel): The data to be send as {model}RequestModel.
key (str, optional): The key obtained on login success. Uses the token in the state if not passed explicitly.
Returns:
{model}ResponseModel: The response as {model}ResponseModel.
\"\"\"
response_json = self._run_request(model, endpoints.{endpoint}, key)
# convert the request to response model
return {model}ResponseModel.parse_raw(response_json)
"""
TEST_SUITE = """
import os
import json
import unittest
from unittest.mock import MagicMock
from py_client import Client, ResponseStatus
from py_client.models import LoginResponseModel
from py_client.modules.{module}.models import *
from py_client.modules.{module} import endpoints
from .common import create_login_model
from .mock import mock_post
class {classname}(unittest.TestCase):
\"\"\"
Test {name} module
\"\"\"
def setUp(self) -> None:
self.client = Client(os.getenv('API_URL'), os.getenv('SOCKET_URL'))
# mock methods
self.post_mock = MagicMock(wraps=mock_post)
self.client.{module}.post = self.post_mock
self.client.users.login = MagicMock(return_value=LoginResponseModel(susertoken='<PASSWORD>'))
# login
self.credentials = create_login_model()
self.token = self.client.login(self.credentials).susertoken
assert self.token is not None
"""
TEST_CASE = """
def test_{name}(self):
model = {model}RequestModel(...)
response = self.client.{module}.{name}(model)
with self.subTest('request should be called with proper data'):
expected_data = {{ ... }}
expected_body = f'jData={{json.dumps(expected_data)}}&jKey={{self.token}}'
self.post_mock.assert_called_with(endpoints.{endpoint}, expected_body)
with self.subTest('response should be parsed properly'):
assert response is not None
assert response.stat is not None
if response.stat == ResponseStatus.OK:
# check
pass
else:
assert response.emsg is not None
assert type(response.emsg) == str
""" | 2.421875 | 2 |
saltcontainers/models.py | dincamihai/pytest-containers | 7 | 12796749 | <filename>saltcontainers/models.py
import re
import json
import yaml
import tarfile
import logging
import six
import subprocess
from .utils import retry, load_json
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class ContainerModel(dict):
def _get_container_pid(self, pid):
container_pid = None
if pid:
with open('/proc/{0}/status'.format(pid), 'rb') as _file:
contents = _file.read()
try:
container_pid = re.search("NSpid.+{0}.+".format(pid), contents).group().split('\t')[-1]
except:
logger.warning("Unable to obtain container pid from {0}".format(pid))
return container_pid
def kill(self, cmd_exec_id):
pid = self['config']['client'].exec_inspect(cmd_exec_id).get('Pid', None)
container_pid = self._get_container_pid(pid)
self.run('kill -9 {0}'.format(container_pid))
@retry()
def run(self, command, stream=False):
return self['config']['client'].run(
self['config']['name'], command, stream=stream)
@retry()
def check_run(self, command, stream=False):
cmd_exec = self['config']['client'].exec_create(self['config']['name'], cmd=command, stderr=False)
return cmd_exec['Id'], self['config']['client'].exec_start(cmd_exec['Id'], stream=stream)
def get_suse_release(self):
info = dict()
content = self.run('cat /etc/SuSE-release')
for line in content.split('\n'):
match = re.match('([a-zA-Z]+)\s*=\s*(\d+)', line)
if match:
info.update([[match.group(1), int(match.group(2))]])
return info
def get_os_release(self):
content = self.run('cat /etc/os-release')
return dict(
filter(
lambda it: len(it) == 2,
[it.replace('"', '').strip().split('=') for it in content.split('\n')]
)
)
def connect(self):
for item in self['config']['networking_config']['EndpointsConfig'].keys():
self['config']['client'].connect_container_to_network(self['config']['name'], item)
def disconnect(self):
for item in self['config']['networking_config']['EndpointsConfig'].keys():
self['config']['client'].disconnect_container_from_network(self['config']['name'], item)
def remove(self):
self['config']['client'].stop(self['config']['name'])
self['config']['client'].remove_container(
self['config']['name'], v=True)
class BaseModel(dict):
def salt_call(self, salt_command, *args):
command = "salt-call {0} {1} --output=json -l quiet".format(
salt_command, ' '.join(args)
)
raw = self['container'].run(command)
try:
out = json.loads(raw or '{}')
except ValueError:
raise Exception(raw)
return out.get('local')
def start(self):
self['container'].run(self['cmd'])
class MasterModel(BaseModel):
def salt_key_raw(self, *args):
command = ['salt-key']
command.extend(args)
command.append('--output=json')
return self['container'].run(' '.join(command))
def salt_key(self, *args):
return json.loads(self.salt_key_raw(*args))
def salt_key_accept(self, minion_id):
return self.salt_key_raw('-a', minion_id, '-y')
def salt(self, minion_id, salt_command, *args):
command = "salt {0} {1} --output=json -l quiet".format(
minion_id, salt_command, ' '.join(args))
data = self['container'].run(command)
return load_json(data)
def salt_run(self, command, *args):
docker_command = "salt-run {0} {1} --output=json -l quiet".format(
command, ' '.join(args))
data = self['container'].run(docker_command)
return load_json(data)
def salt_ssh(self, target, cmd):
roster = self['container']['config']['salt_config']['roster']
target_id = target['config']['name']
SSH = "salt-ssh -l quiet -i --out json --key-deploy --passwd {0} {1} {{0}}".format(
target['ssh_config']['password'], target_id)
data = self['container'].run(SSH.format(cmd))
return load_json(data)[target_id]
def update_roster(self):
roster = self['container']['config']['salt_config']['root'] / 'roster'
content = {}
for item in self['container']['config']['salt_config']['roster']:
content[item['config']['name']] = {
"host": item["ip"],
"user": item['ssh_config']['user'],
"password": item['ssh_config']['password']
}
roster.write(yaml.safe_dump(content, default_flow_style=False))
self['container']['config']['client'].copy_to(self, roster.strpath, '/etc/salt/')
class MinionModel(BaseModel):
def stop(self):
self['container'].run('pkill salt-minion')
| 1.90625 | 2 |
chronometer/test_rx.py | andrewgryan/bokeh-playground | 3 | 12796750 | import unittest
import rx
class History(rx.Stream):
def __init__(self):
self.events = []
super().__init__()
def notify(self, value):
self.events.append(value)
class TestRx(unittest.TestCase):
def test_combine_streams(self):
clicks = rx.Stream()
indices = rx.Stream()
result = rx.scan_reset(
clicks,
accumulator=lambda a, i: a + i,
reset=indices)
history = History()
result.register(history)
indices.emit(10)
indices.emit(20)
clicks.emit(+0)
indices.emit(30)
clicks.emit(+1)
result = history.events
expect = [20, 31]
self.assertEqual(expect, result)
def test_combine_streams_with_seed_values(self):
clicks = rx.Stream()
indices = rx.Stream()
result = rx.scan_reset_emit_seed(
clicks, lambda a, i: a + i,
reset=indices)
history = History()
result.register(history)
indices.emit(10)
indices.emit(20)
indices.emit(30)
result = history.events
expect = [10, 20, 30]
self.assertEqual(expect, result)
def test_general_case(self):
clicks = rx.Stream()
indices = rx.Stream()
result = rx.scan_reset_emit_seed(
clicks, lambda a, i: a + i,
reset=indices)
history = History()
result.register(history)
indices.emit(10)
clicks.emit(1)
clicks.emit(-1)
indices.emit(20)
clicks.emit(1)
indices.emit(30)
clicks.emit(-1)
result = history.events
expect = [10, 11, 10, 20, 21, 30, 29]
self.assertEqual(expect, result)
| 2.84375 | 3 |
Subsets and Splits