id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
11205931
|
__author__ = '<NAME>'
__license__ = 'MIT'
__version__ = '0.1'
__email__ = 'mail 64 cacodaemon 46 de'
from .OnSelectionModifiedListener import OnSelectionModifiedListener
from .WindowHelper import WindowHelper
from .Utils import Utils
|
StarcoderdataPython
|
8075138
|
<filename>exercises/networking_v2/roles/ansible-network.network-engine/lib/network_engine/plugins/__init__.py
# (c) 2018, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.loader import PluginLoader
template_loader = PluginLoader(
'TemplateEngine',
'network_engine.plugins.template',
None,
'template_plugins',
required_base_class='TemplateBase'
)
parser_loader = PluginLoader(
'ParserEngine',
'network_engine.plugins.parser',
None,
'parser_plugins',
# required_base_class='ParserBase'
)
|
StarcoderdataPython
|
6595979
|
"""
--- Day 19: Monster Messages ---
https://adventofcode.com/2020/day/19
"""
from aocd import data
import lark
def solve(rules, messages):
rules = rules.translate(str.maketrans("0123456789", "abcdefghij"))
parser = lark.Lark(rules, start="a")
result = 0
for message in messages.splitlines():
try:
parser.parse(message)
except lark.LarkError:
pass
else:
result += 1
return result
rules, messages = data.split("\n\n")
print("part a:", solve(rules, messages))
rules = rules.replace("8: 42", "8: 42 | 42 8").replace("11: 42 31", "11: 42 31 | 42 11 31")
print("part b:", solve(rules, messages))
|
StarcoderdataPython
|
9742831
|
<gh_stars>1-10
import argparse
import itertools
import numpy as np
from collections import defaultdict
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Calculate scores for candidate cooccurrences using the results of an SVD decomposition')
parser.add_argument('--svdU',required=True,type=str,help='U component of SVD decomposition')
parser.add_argument('--svdV',required=True,type=str,help='V component of SVD decomposition')
parser.add_argument('--svdSV',required=True,type=str,help='SV component of SVD decomposition')
parser.add_argument('--relationsToIgnore',type=str,help='Relations to ignore in count')
parser.add_argument('--sv',required=True,type=int,help='Number of singular values to use from SVD')
parser.add_argument('--idsFile',required=True,type=str,help='File containing IDs to iterate over')
parser.add_argument('--threshold',required=True,type=float,help='Optional argument to only output scores that are greater than a threshold')
parser.add_argument('--outFile',required=True,type=str,help='Path to output file')
args = parser.parse_args()
threshold = None
if args.threshold:
threshold = args.threshold
print "Loading SVD U"
svdU = np.loadtxt(args.svdU)
svdU_index = map(int,svdU[:,0].tolist())
svdU_lookup = { x:i for i,x in enumerate(svdU_index) }
svdU = svdU[:,1:]
print "len(svdU_index) = ", len(svdU_index)
print "svdU.shape = ", svdU.shape
print "Loading SVD V"
svdV = np.loadtxt(args.svdV)
svdV_index = map(int,svdV[:,0].tolist())
svdV_lookup = { x:i for i,x in enumerate(svdV_index) }
svdV = svdV[:,1:]
print "len(svdV_index) = ", len(svdV_index)
print "svdV.shape = ", svdV.shape
print "Loading SVD SV"
svdSV = np.loadtxt(args.svdSV, comments="%")
print "svdSV.shape = ", svdSV.shape
print "Truncating data..."
svdU = svdU[:,:args.sv]
svdV = svdV[:,:args.sv]
svdSV = svdSV[:args.sv]
print "svdU.shape = ", svdU.shape
print "svdV.shape = ", svdV.shape
print "svdSV.shape = ", svdSV.shape
print "Pre-multiplying svdV by svdSV"
svdV = np.dot(np.diag(svdSV),svdV.T)
print "svdV.shape = ", svdV.shape
relationsToIgnore = defaultdict(set)
if args.relationsToIgnore:
print "Loading relations to score..."
with open(args.relationsToIgnore) as f:
for line in f:
x,y = line.strip().split()[0:2]
x,y = int(x),int(y)
#relationsToScore.append((x,y))
relationsToIgnore[x].add(y)
relationsToIgnore[y].add(x)
translatedRelationsToIgnore_U = {}
translatedRelationsToIgnore_V = {}
for k in relationsToIgnore.keys():
relationsToIgnore[k] = sorted(list(relationsToIgnore[k]))
translatedRelationsToIgnore_U[k] = [ svdU_lookup[x] for x in relationsToIgnore[k] ]
translatedRelationsToIgnore_V[k] = [ svdV_lookup[x] for x in relationsToIgnore[k] ]
with open(args.idsFile) as f:
ids = [ int(line.strip()) for line in f ]
print "Calculating scores..."
predCount = 0
for row in ids:
# We only reconstruct one triangle of the matrix (where x<y)
# Hence the min/max functions
#xIndex = svdU_lookup[min(x,y)]
#xIndex = svdU_lookup[row]
yIndex = svdV_lookup[row]
#scores = np.dot(svdU[xIndex,:],svdV)
scores = np.dot(svdU,svdV[:,yIndex])
mask = np.zeros((len(ids)))
mask[range(svdU_lookup[row])] = 1
mask[translatedRelationsToIgnore_U[row]] = 0
maskedScores = scores * mask
tmpCount = (maskedScores > threshold).sum()
#if tmpCount > 0:
# where = np.where(maskedScores > threshold)
# print scores[where]
# print row,where
predCount += tmpCount
print "Scoring complete"
with open(args.outFile,'w') as outF:
outF.write("%d\n" % predCount)
print "Written to %s" % args.outFile
|
StarcoderdataPython
|
5119243
|
from helper import *
try:
from flask import Flask
except ImportError as __ex:
print("Install Flask. Exception:", str(__ex))
app = Flask(__name__)
@app.route("/")
def mainpage():
return readFiletoMemory("show.html")
<EMAIL>("/0.png")
#def getimg0():
# return captureScreen()
<EMAIL>("/1.png")
#def getimg1():
# return captureScreen()
@app.route("/scr/<filename>")
def getscreen(filename):
return captureScreen()
# main
def main():
app.run(debug=True)
if __name__=="__main__":
main()
|
StarcoderdataPython
|
3257045
|
<gh_stars>10-100
"""
Tests for the :mod:`regression_tests.tools.decompiler_test_settings` module.
"""
import unittest
from regression_tests.test_settings import TestSettings
from regression_tests.tools.decompiler_arguments import DecompilerArguments
from regression_tests.tools.decompiler_runner import DecompilerRunner
from regression_tests.tools.decompiler_test import DecompilerTest
from regression_tests.tools.decompiler_test_settings import DecompilerTestSettings
class DecompilerTestSettingsTests(unittest.TestCase):
"""Tests for `DecompilerTestSettings`."""
def test_test_settings_creates_decompilation_test_settings_when_tool_is_not_specified(self):
settings = TestSettings(input='file.exe')
self.assertIsInstance(settings, DecompilerTestSettings)
def test_test_settings_creates_decompilation_test_settings_when_tool_is_specified(self):
settings = TestSettings(tool=DecompilerTestSettings.TOOL, input='file.exe')
self.assertIsInstance(settings, DecompilerTestSettings)
def test_tool_returns_correct_value(self):
settings = DecompilerTestSettings(input='file.exe')
self.assertEqual(settings.tool, DecompilerTestSettings.TOOL)
def test_input_passed_to_constructor_is_accessible(self):
INPUT = 'file.exe'
settings = DecompilerTestSettings(input=INPUT)
self.assertEqual(INPUT, settings.input)
def test_args_passed_to_constructor_are_accessible(self):
ARGS = '--keep-unreachable-funcs'
settings = DecompilerTestSettings(input='file.exe', args=ARGS)
self.assertEqual(ARGS, settings.args)
def test_timeout_passed_to_constructor_is_accessible(self):
TIMEOUT = 100
settings = DecompilerTestSettings(input='file.exe', timeout=TIMEOUT)
self.assertEqual(settings.timeout, TIMEOUT)
def test_pdb_passed_to_constructor_is_accessible(self):
PDB = 'file.pdb'
settings = DecompilerTestSettings(input='file.exe', pdb=PDB)
self.assertEqual(PDB, settings.pdb)
def test_pdb_as_list_returns_empty_list_if_pdb_is_not_set(self):
settings = DecompilerTestSettings(input='file.exe', pdb=None)
self.assertEqual([], settings.pdb_as_list)
def test_pdb_as_list_returns_pdb_when_pdb_is_list(self):
PDB = ['file1.pdb', 'file2.pdb']
settings = DecompilerTestSettings(input='file.exe', pdb=PDB)
self.assertEqual(PDB, settings.pdb_as_list)
def test_pdb_as_list_returns_list_when_pdb_is_single_file(self):
PDB = 'file.pdb'
settings = DecompilerTestSettings(input='file.exe', pdb=PDB)
self.assertEqual([PDB], settings.pdb_as_list)
def test_has_multiple_pdbs_returns_true_when_there_are_multiple_pdbs(self):
settings = DecompilerTestSettings(input='file.exe', pdb=['file1.pdb', 'file2.pdb'])
self.assertTrue(settings.has_multiple_pdbs())
def test_has_multiple_pdbs_returns_false_when_there_is_just_single_pdb(self):
settings = DecompilerTestSettings(input='file.exe', pdb='file.pdb')
self.assertFalse(settings.has_multiple_pdbs())
def test_duplicate_pdbs_are_merged(self):
settings = DecompilerTestSettings(input='file.exe', pdb=['file.pdb', 'file.pdb'])
self.assertEqual(settings.pdb, 'file.pdb')
settings = DecompilerTestSettings(
input='file.exe',
pdb=['file.pdb', 'other.pdb', 'file.pdb']
)
self.assertEqual(settings.pdb, ['file.pdb', 'other.pdb'])
def test_config_passed_to_constructor_is_accessible(self):
CONFIG = 'file.json'
settings = DecompilerTestSettings(input='file.exe', config=CONFIG)
self.assertEqual(CONFIG, settings.config)
def test_config_as_list_returns_empty_list_if_config_is_not_set(self):
settings = DecompilerTestSettings(input='file.exe', config=None)
self.assertEqual([], settings.config_as_list)
def test_config_as_list_returns_config_when_config_is_list(self):
CONFIG = ['file1.json', 'file2.json']
settings = DecompilerTestSettings(input='file.exe', config=CONFIG)
self.assertEqual(CONFIG, settings.config_as_list)
def test_config_as_list_returns_list_when_config_is_single_file(self):
CONFIG = 'file.json'
settings = DecompilerTestSettings(input='file.exe', config=CONFIG)
self.assertEqual([CONFIG], settings.config_as_list)
def test_has_multiple_configs_returns_true_when_there_are_multiple_configs(self):
settings = DecompilerTestSettings(input='file.exe', config=['file1.json', 'file2.json'])
self.assertTrue(settings.has_multiple_configs())
def test_has_multiple_configs_returns_false_when_there_is_just_single_config(self):
settings = DecompilerTestSettings(input='file.exe', config='file.json')
self.assertFalse(settings.has_multiple_configs())
def test_duplicate_configs_are_merged(self):
settings = DecompilerTestSettings(input='file.exe', config=['file.json', 'file.json'])
self.assertEqual(settings.config, 'file.json')
settings = DecompilerTestSettings(
input='file.exe',
config=['file.json', 'other.json', 'file.json']
)
self.assertEqual(settings.config, ['file.json', 'other.json'])
def test_static_code_archive_passed_to_constructor_is_accessible(self):
STATIC_CODE_ARCHIVE = 'file.a'
settings = DecompilerTestSettings(input='file.exe', static_code_archive=STATIC_CODE_ARCHIVE)
self.assertEqual(STATIC_CODE_ARCHIVE, settings.static_code_archive)
def test_static_code_archive_as_list_returns_empty_list_if_static_code_archive_is_not_set(self):
settings = DecompilerTestSettings(input='file.exe', static_code_archive=None)
self.assertEqual([], settings.static_code_archive_as_list)
def test_static_code_archive_as_list_returns_static_code_archive_when_static_code_archive_is_list(self):
STATIC_CODE_ARCHIVE = ['file1.a', 'file2.a']
settings = DecompilerTestSettings(input='file.exe', static_code_archive=STATIC_CODE_ARCHIVE)
self.assertEqual(STATIC_CODE_ARCHIVE, settings.static_code_archive_as_list)
def test_static_code_archive_as_list_returns_list_when_static_code_archive_is_single_file(self):
STATIC_CODE_ARCHIVE = 'file.a'
settings = DecompilerTestSettings(input='file.exe', static_code_archive=STATIC_CODE_ARCHIVE)
self.assertEqual([STATIC_CODE_ARCHIVE], settings.static_code_archive_as_list)
def test_has_multiple_static_code_archives_returns_true_when_there_are_multiple_static_code_archives(self):
settings = DecompilerTestSettings(input='file.exe', static_code_archive=['file1.a', 'file2.a'])
self.assertTrue(settings.has_multiple_static_code_archives())
def test_has_multiple_static_code_archives_returns_false_when_there_is_just_single_static_code_archive(self):
settings = DecompilerTestSettings(input='file.exe', static_code_archive='file.a')
self.assertFalse(settings.has_multiple_static_code_archives())
def test_duplicate_static_code_archives_are_merged(self):
settings = DecompilerTestSettings(input='file.exe', static_code_archive=['file.a', 'file.a'])
self.assertEqual(settings.static_code_archive, 'file.a')
settings = DecompilerTestSettings(
input='file.exe',
static_code_archive=['file.a', 'other.a', 'file.a']
)
self.assertEqual(settings.static_code_archive, ['file.a', 'other.a'])
def test_static_code_sigfile_passed_to_constructor_is_accessible(self):
STATIC_CODE_SIGFILE = 'file.sig'
settings = DecompilerTestSettings(input='file.exe', static_code_sigfile=STATIC_CODE_SIGFILE)
self.assertEqual(STATIC_CODE_SIGFILE, settings.static_code_sigfile)
def test_static_code_sigfile_as_list_returns_empty_list_if_static_code_sigfile_is_not_set(self):
settings = DecompilerTestSettings(input='file.exe', static_code_sigfile=None)
self.assertEqual([], settings.static_code_sigfile_as_list)
def test_static_code_sigfile_as_list_returns_static_code_sigfile_when_static_code_sigfile_is_list(self):
STATIC_CODE_SIGFILE = ['file1.sig', 'file2.sig']
settings = DecompilerTestSettings(input='file.exe', static_code_sigfile=STATIC_CODE_SIGFILE)
self.assertEqual(STATIC_CODE_SIGFILE, settings.static_code_sigfile_as_list)
def test_static_code_sigfile_as_list_returns_list_when_static_code_sigfile_is_single_file(self):
STATIC_CODE_SIGFILE = 'file.sig'
settings = DecompilerTestSettings(input='file.exe', static_code_sigfile=STATIC_CODE_SIGFILE)
self.assertEqual([STATIC_CODE_SIGFILE], settings.static_code_sigfile_as_list)
def test_has_multiple_static_code_sigfiles_returns_true_when_there_are_multiple_static_code_sigfiles(self):
settings = DecompilerTestSettings(input='file.exe', static_code_sigfile=['file1.sig', 'file2.sig'])
self.assertTrue(settings.has_multiple_static_code_sigfiles())
def test_has_multiple_static_code_sigfiles_returns_false_when_there_is_just_single_static_code_sigfile(self):
settings = DecompilerTestSettings(input='file.exe', static_code_sigfile='file.sig')
self.assertFalse(settings.has_multiple_static_code_sigfiles())
def test_duplicate_static_code_sigfiles_are_merged(self):
settings = DecompilerTestSettings(input='file.exe', static_code_sigfile=['file.sig', 'file.sig'])
self.assertEqual(settings.static_code_sigfile, 'file.sig')
settings = DecompilerTestSettings(
input='file.exe',
static_code_sigfile=['file.sig', 'other.sig', 'file.sig']
)
self.assertEqual(settings.static_code_sigfile, ['file.sig', 'other.sig'])
def test_arch_passed_to_constructor_is_accessible(self):
ARCH = 'x86'
settings = DecompilerTestSettings(input='file.exe', arch=ARCH)
self.assertEqual(ARCH, settings.arch)
def test_arch_as_list_returns_empty_list_if_arch_is_not_set(self):
settings = DecompilerTestSettings(input='file.exe')
self.assertEqual([], settings.arch_as_list)
def test_arch_as_list_returns_arch_when_arch_is_list(self):
ARCH = ['x86', 'arm']
settings = DecompilerTestSettings(input='file.exe', arch=ARCH)
self.assertEqual(ARCH, settings.arch_as_list)
def test_arch_as_list_returns_list_when_there_is_single_arch(self):
ARCH = 'x86'
settings = DecompilerTestSettings(input='file.exe', arch=ARCH)
self.assertEqual([ARCH], settings.arch_as_list)
def test_has_multiple_archs_returns_true_when_there_are_multiple_archs(self):
settings = DecompilerTestSettings(input='file.exe', arch=['x86', 'arm'])
self.assertTrue(settings.has_multiple_archs())
def test_has_multiple_archs_returns_false_when_there_is_just_single_arch(self):
settings = DecompilerTestSettings(input='file.exe', arch='x86')
self.assertFalse(settings.has_multiple_archs())
def test_duplicate_archs_are_merged(self):
settings = DecompilerTestSettings(input=['file.exe'], arch=['x86', 'x86'])
self.assertEqual(settings.arch, 'x86')
settings = DecompilerTestSettings(input=['file.exe'], arch=['x86', 'arm', 'x86'])
self.assertEqual(settings.arch, ['x86', 'arm'])
def test_mode_passed_to_constructor_is_accessible(self):
MODE = 'bin'
settings = DecompilerTestSettings(input='file.exe', mode=MODE)
self.assertEqual(MODE, settings.mode)
def test_mode_or_default_returns_mode_when_set(self):
mode = 'raw'
settings = DecompilerTestSettings(input='file.exe', mode=mode)
self.assertEqual(mode, settings.mode_or_default)
def test_mode_or_default_returns_default_mode_when_mode_is_not_set(self):
settings = DecompilerTestSettings(input='file.exe')
self.assertEqual(settings.mode_or_default, 'bin')
def test_mode_as_list_returns_empty_list_if_mode_is_not_set(self):
settings = DecompilerTestSettings(input='file.exe')
self.assertEqual([], settings.mode_as_list)
def test_mode_as_list_returns_mode_when_mode_is_list(self):
MODE = ['bin', 'raw']
settings = DecompilerTestSettings(input='file.exe', mode=MODE)
self.assertEqual(MODE, settings.mode_as_list)
def test_mode_as_list_returns_list_when_there_is_single_mode(self):
MODE = 'bin'
settings = DecompilerTestSettings(input='file.exe', mode=MODE)
self.assertEqual([MODE], settings.mode_as_list)
def test_has_multiple_modes_returns_true_when_there_are_multiple_modes(self):
settings = DecompilerTestSettings(input='file.exe', mode=['bin', 'raw'])
self.assertTrue(settings.has_multiple_modes())
def test_has_multiple_modes_returns_false_when_there_is_just_single_mode(self):
settings = DecompilerTestSettings(input='file.exe', mode='bin')
self.assertFalse(settings.has_multiple_modes())
def test_duplicate_modes_are_merged(self):
settings = DecompilerTestSettings(input=['file.exe'], mode=['bin', 'bin'])
self.assertEqual(settings.mode, 'bin')
def test_hll_passed_to_constructor_is_accessible(self):
HLL = 'c'
settings = DecompilerTestSettings(input='file.exe', hll=HLL)
self.assertEqual(HLL, settings.hll)
def test_hll_as_list_returns_empty_list_if_hll_is_not_set(self):
settings = DecompilerTestSettings(input='file.exe')
self.assertEqual([], settings.hll_as_list)
def test_hll_as_list_returns_hll_when_hll_is_list(self):
HLL = ['c', 'py']
settings = DecompilerTestSettings(input='file.exe', hll=HLL)
self.assertEqual(HLL, settings.hll_as_list)
def test_hll_as_list_returns_list_when_there_is_single_hll(self):
HLL = 'c'
settings = DecompilerTestSettings(input='file.exe', hll=HLL)
self.assertEqual([HLL], settings.hll_as_list)
def test_has_multiple_hlls_returns_true_when_there_are_multiple_hlls(self):
settings = DecompilerTestSettings(input='file.exe', hll=['c', 'py'])
self.assertTrue(settings.has_multiple_hlls())
def test_has_multiple_hlls_returns_false_when_there_is_just_single_hll(self):
settings = DecompilerTestSettings(input='file.exe', hll='c')
self.assertFalse(settings.has_multiple_hlls())
def test_duplicate_hlls_are_merged(self):
settings = DecompilerTestSettings(input=['file.exe'], hll=['c', 'c'])
self.assertEqual(settings.hll, 'c')
settings = DecompilerTestSettings(input=['file.exe'], hll=['c', 'py', 'c'])
self.assertEqual(settings.hll, ['c', 'py'])
def test_ar_index_passed_to_constructor_is_accessible(self):
AR_INDEX = 0
settings = DecompilerTestSettings(input='archive.a', ar_index=AR_INDEX)
self.assertEqual(AR_INDEX, settings.ar_index)
def test_ar_index_as_list_returns_empty_list_if_ar_index_is_not_set(self):
settings = DecompilerTestSettings(input='archive.a')
self.assertEqual([], settings.ar_index_as_list)
def test_ar_index_as_list_returns_ar_index_when_ar_index_is_list(self):
AR_INDEX = [0, 1]
settings = DecompilerTestSettings(input='archive.a', ar_index=AR_INDEX)
self.assertEqual(AR_INDEX, settings.ar_index_as_list)
def test_ar_index_as_list_returns_list_when_there_is_single_ar_index(self):
AR_INDEX = 0
settings = DecompilerTestSettings(input='archive.a', ar_index=AR_INDEX)
self.assertEqual([AR_INDEX], settings.ar_index_as_list)
def test_has_multiple_ar_indexes_returns_true_when_there_are_multiple_ar_indexes(self):
settings = DecompilerTestSettings(input='archive.a', ar_index=[0, 1])
self.assertTrue(settings.has_multiple_ar_indexes())
def test_has_multiple_ar_indexes_returns_false_when_there_is_just_single_ar_index(self):
settings = DecompilerTestSettings(input='archive.a', ar_index=0)
self.assertFalse(settings.has_multiple_ar_indexes())
def test_duplicate_ar_indexes_are_merged(self):
settings = DecompilerTestSettings(input=['archive.a'], ar_index=[0, 0])
self.assertEqual(settings.ar_index, 0)
settings = DecompilerTestSettings(input=['archive.a'], ar_index=[0, 1, 0])
self.assertEqual(settings.ar_index, [0, 1])
def test_ar_name_passed_to_constructor_is_accessible(self):
AR_NAME = 'file.o'
settings = DecompilerTestSettings(input='archive.a', ar_name=AR_NAME)
self.assertEqual(AR_NAME, settings.ar_name)
def test_ar_name_as_list_returns_empty_list_if_ar_name_is_not_set(self):
settings = DecompilerTestSettings(input='archive.a')
self.assertEqual([], settings.ar_name_as_list)
def test_ar_name_as_list_returns_ar_name_when_ar_name_is_list(self):
AR_NAME = ['file1.o', 'file2.o']
settings = DecompilerTestSettings(input='archive.a', ar_name=AR_NAME)
self.assertEqual(AR_NAME, settings.ar_name_as_list)
def test_ar_name_as_list_returns_list_when_there_is_single_ar_name(self):
AR_NAME = 'file.o'
settings = DecompilerTestSettings(input='archive.a', ar_name=AR_NAME)
self.assertEqual([AR_NAME], settings.ar_name_as_list)
def test_has_multiple_ar_names_returns_true_when_there_are_multiple_ar_names(self):
settings = DecompilerTestSettings(
input='archive.a',
ar_name=['file1.o', 'file2.o']
)
self.assertTrue(settings.has_multiple_ar_names())
def test_has_multiple_ar_names_returns_false_when_there_is_just_single_ar_name(self):
settings = DecompilerTestSettings(input='archive.a', ar_name='file.o')
self.assertFalse(settings.has_multiple_ar_names())
def test_duplicate_ar_names_are_merged(self):
settings = DecompilerTestSettings(
input=['archive.a'],
ar_name=['file.o', 'file.o']
)
self.assertEqual(settings.ar_name, 'file.o')
settings = DecompilerTestSettings(
input=['archive.a'],
ar_name=['file1.o', 'file2.o', 'file1.o']
)
self.assertEqual(settings.ar_name, ['file1.o', 'file2.o'])
def test_tool_arguments_class_returns_correct_value(self):
settings = DecompilerTestSettings(input='file.exe')
self.assertEqual(settings.tool_arguments_class, DecompilerArguments)
def test_tool_runner_class_returns_correct_value(self):
settings = DecompilerTestSettings(input='file.exe')
self.assertEqual(settings.tool_runner_class, DecompilerRunner)
def test_tool_test_class_returns_correct_value(self):
settings = DecompilerTestSettings(input='file.exe')
self.assertEqual(settings.tool_test_class, DecompilerTest)
|
StarcoderdataPython
|
6623172
|
import numpy as np
import sklearn as sk
import sklearn.model_selection
from rfc_worker import RFCWorker
from hb_optimizer import HBOptimizer
from metrics import compute_metrics
from koi_dataset import load_koi_dataset
# Set the LOCALHOST, PROJECT_NAME constants
LOCALHOST = '127.0.0.1'
PROJECT_NAME = 'exoplanet-detection'
# Set the parameters for hyperparameters optimization
eta = 3
min_budget = 8
max_budget = 216
n_iterations = 8
n_workers = 4
n_repetitions = 10
# Load the dataset
x_data, y_data = load_koi_dataset()
(n_samples, n_features) = x_data.shape
# Initialize the optimizer
optimizer = HBOptimizer(
LOCALHOST, PROJECT_NAME, RFCWorker,
eta, min_budget, max_budget, n_iterations
)
metrics = {
'precision': 0.0, 'recall': 0.0, 'f1': 0.0,
'confusion': [[0, 0], [0, 0]], 'importances': np.zeros(n_features)
}
# Repeat multiple times the test
for _ in range(n_repetitions):
# Split the dataset in train set and test set
x_train, x_test, y_train, y_test = sk.model_selection.train_test_split(
x_data, y_data, test_size=0.20, stratify=y_data
)
# Start the optimizer
optimizer.start()
# Run the optimizer
config = optimizer.run(n_workers, x_train, y_train)
# Build and train the best model
rfc = RFCWorker.build(config, max_budget)
rfc.fit(x_train, y_train)
# Compute some evaluation metrics
scores = compute_metrics(rfc, x_test, y_test)
for k in metrics:
metrics[k] = metrics[k] + scores[k]
# Close the optimizer
optimizer.close()
# Normalize the metrics
for k in metrics:
metrics[k] = metrics[k] / n_repetitions
# Print the metrics
print(metrics)
|
StarcoderdataPython
|
6437625
|
from collections import Counter
def main():
n, k = map(
int,
input().split(),
)
*c, = map(
int,
input().split(),
)
cnt = Counter(c[:k])
mx = len(cnt)
for i in range(k, n):
cnt[c[i]] += 1
x = c[i - k]
cnt[x] -= 1
if cnt[x] == 0: cnt.pop(x)
mx = max(mx, len(cnt))
print(mx)
main()
|
StarcoderdataPython
|
11331458
|
<reponame>ajpmaclean/vtk-examples<gh_stars>10-100
#!/usr/bin/env python
import os
# noinspection PyUnresolvedReferences
import vtkmodules.vtkInteractionStyle
# noinspection PyUnresolvedReferences
import vtkmodules.vtkRenderingOpenGL2
from vtkmodules.vtkCommonColor import vtkNamedColors
from vtkmodules.vtkCommonTransforms import vtkTransform
from vtkmodules.vtkFiltersCore import vtkTubeFilter
from vtkmodules.vtkFiltersGeneral import vtkTransformPolyDataFilter
from vtkmodules.vtkFiltersModeling import (
vtkLinearExtrusionFilter,
vtkRibbonFilter
)
from vtkmodules.vtkIOLegacy import vtkPolyDataReader
from vtkmodules.vtkRenderingCore import (
vtkActor,
vtkFollower,
vtkPolyDataMapper,
vtkRenderWindow,
vtkRenderWindowInteractor,
vtkRenderer
)
from vtkmodules.vtkRenderingFreeType import vtkVectorText
def main():
colors = vtkNamedColors()
fileNames, useRibbons = get_program_parameters()
useTubes = not useRibbons
# Set up the stocks
renderers = list()
topRenderer = vtkRenderer()
bottomRenderer = vtkRenderer()
renderers.append(topRenderer)
renderers.append(bottomRenderer)
zPosition = 0.0
for fn in fileNames:
zPosition = AddStock(renderers, fn, os.path.basename((os.path.splitext(fn)[0])), zPosition, useTubes)
# Setup the render window and interactor.
renderWindow = vtkRenderWindow()
renderWindow.AddRenderer(renderers[0])
renderWindow.AddRenderer(renderers[1])
renderWindowInteractor = vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
renderers[0].SetViewport(0.0, 0.4, 1.0, 1.0)
renderers[1].SetViewport(0.0, 0.0, 1.0, 0.4)
renderers[0].GetActiveCamera().SetViewAngle(5.0)
renderers[0].ResetCamera()
renderers[0].GetActiveCamera().Zoom(1.4)
renderers[0].ResetCameraClippingRange()
renderers[0].SetBackground(colors.GetColor3d("SteelBlue"))
renderers[1].GetActiveCamera().SetViewUp(0, 0, -1)
renderers[1].GetActiveCamera().SetPosition(0, 1, 0)
renderers[1].GetActiveCamera().SetViewAngle(5.0)
renderers[1].ResetCamera()
renderers[1].GetActiveCamera().Zoom(2.2)
renderers[1].ResetCameraClippingRange()
renderers[1].SetBackground(colors.GetColor3d("LightSteelBlue"))
renderWindow.SetSize(500, 800)
renderWindow.SetWindowName('Stocks')
renderWindow.Render()
renderWindowInteractor.Start()
def get_program_parameters():
import argparse
description = 'Two views from the stock visualization script.'
epilogue = '''
The top shows closing price over time; the bottom shows volume over time.
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('filenames', nargs='+',
help='List of one or more filenames corresponding to stocks. e.g. GE.vtk GM.vtk IBM.vtk DEC.vtk')
parser.add_argument('-r', dest='useRibbons', action='store_true', help='Use ribbons instead of tubes.')
args = parser.parse_args()
return args.filenames, args.useRibbons
def AddStock(renderers, filename, name, zPosition, useTubes):
print("Adding", name)
# Read the data
PolyDataRead = vtkPolyDataReader()
PolyDataRead.SetFileName(filename)
PolyDataRead.Update()
# Create the labels.
TextSrc = vtkVectorText()
TextSrc.SetText(name)
numberOfPoints = PolyDataRead.GetOutput().GetNumberOfPoints()
nameIndex = int((numberOfPoints - 1) * 0.8)
nameLocation = PolyDataRead.GetOutput().GetPoint(nameIndex)
x = nameLocation[0] * 0.15
y = nameLocation[1] + 5.0
z = zPosition
# Create a tube and ribbpn filter. One or the other will be used
TubeFilter = vtkTubeFilter()
TubeFilter.SetInputConnection(PolyDataRead.GetOutputPort())
TubeFilter.SetNumberOfSides(8)
TubeFilter.SetRadius(0.5)
TubeFilter.SetRadiusFactor(10000)
RibbonFilter = vtkRibbonFilter()
RibbonFilter.SetInputConnection(PolyDataRead.GetOutputPort())
RibbonFilter.VaryWidthOn()
RibbonFilter.SetWidthFactor(5)
RibbonFilter.SetDefaultNormal(0, 1, 0)
RibbonFilter.UseDefaultNormalOn()
Extrude = vtkLinearExtrusionFilter()
Extrude.SetInputConnection(RibbonFilter.GetOutputPort())
Extrude.SetVector(0, 1, 0)
Extrude.SetExtrusionType(1)
Extrude.SetScaleFactor(0.7)
Transform = vtkTransform()
Transform.Translate(0, 0, zPosition)
Transform.Scale(0.15, 1, 1)
TransformFilter = vtkTransformPolyDataFilter()
TransformFilter.SetInputConnection(Extrude.GetOutputPort())
TransformFilter.SetTransform(Transform)
# Select tubes or ribbons
if useTubes:
TransformFilter.SetInputConnection(TubeFilter.GetOutputPort())
else:
TransformFilter.SetInputConnection(Extrude.GetOutputPort())
for r in range(0, len(renderers)):
LabelMapper = vtkPolyDataMapper()
LabelMapper.SetInputConnection(TextSrc.GetOutputPort())
LabelActor = vtkFollower()
LabelActor.SetMapper(LabelMapper)
LabelActor.SetPosition(x, y, z)
LabelActor.SetScale(2, 2, 2)
LabelActor.SetOrigin(TextSrc.GetOutput().GetCenter())
# Increment zPosition.
zPosition += 8.0
StockMapper = vtkPolyDataMapper()
StockMapper.SetInputConnection(TransformFilter.GetOutputPort())
StockMapper.SetScalarRange(0, 8000)
StockActor = vtkActor()
StockActor.SetMapper(StockMapper)
renderers[r].AddActor(StockActor)
renderers[r].AddActor(LabelActor)
LabelActor.SetCamera(renderers[r].GetActiveCamera())
return zPosition
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
9609949
|
<reponame>sndnyang/vat_chainer<gh_stars>0
import numpy as np
from chainer import Variable, cuda
import chainer.functions as cfunc
from dllib.chainer_functions.utils import distance, entropy
from .vat import at_loss, vat_loss
# baseline
XI = 1e-6
def loss_labeled(forward, x, t, args):
y = forward(x, update_batch_stats=True)
L = cfunc.softmax_cross_entropy(y, Variable(t))
if 'lat' in args.trainer:
one_hot = np.zeros(y.shape).astype(np.float32)
one_hot[np.arange(y.shape[0]), cuda.to_cpu(t)] = 1
L += at_loss(forward, x, Variable(cuda.to_gpu(one_hot, device=t.device)), train=True, eps=args.eps)
return L
def loss_unlabeled(forward, x, args):
if 'vat' in args.trainer:
# Virtual adversarial training loss
logit = forward(x, train=True, update_batch_stats=False)
lds = vat_loss(forward, distance, x, eps=args.eps, xi=args.xi, p_logit=logit.data, args=args)
if 'ent' in args.trainer:
ent_y_x = entropy(logit)
lds += ent_y_x
# Virtual adversarial training loss + Conditional Entropy loss
return lds
else:
raise NotImplementedError
def loss_test(forward, x, t):
logit = forward(x, train=False)
L, acc = cfunc.softmax_cross_entropy(logit, t).data, cfunc.accuracy(logit, t).data
return L, acc
|
StarcoderdataPython
|
8045371
|
<reponame>ale-ben/AnimeUnityEngine
from AnimeUnityEngine import logging_aux, common_classes
import json
@logging_aux.logger_wraps()
def get_formatted_search_results(res_obj):
# Per comodità se non è un array lo trasformo in array
if not isinstance(res_obj, type([])):
res_obj = [res_obj]
anime_arr = []
for anime_ob in res_obj:
# Creo oggetto anime e assegno i campi
anime = None
anime = common_classes.Anime(anime_ob['id'], anime_ob['title'], anime_ob['type'], anime_ob['episodes_length'])
anime.status = anime_ob['status']
anime.year = anime_ob['date']
anime.slug = anime_ob['slug']
anime.title_eng = anime_ob['title_eng']
anime.cover_image = anime_ob['imageurl_cover']
anime.thumbnail = anime_ob['imageurl']
anime.episodes = []
for ep in anime_ob['episodes']:
# Creo oggetto episodio e assegno campi
episode = None
episode = common_classes.Episode(ep['id'], ep['number'], ep['created_at'], ep['link'])
# Aggiungo episodio alla lista dell'anime
anime.episodes.append(episode)
# Aggiungo anime alla lista
if 'related' in anime_ob:
anime.related = []
for rel in anime_ob['related']:
anime.related.append(common_classes.Related(rel['id'], rel['type'], rel['title'], rel['slug']))
anime_arr.append(anime)
return order_search_res(anime_arr)
# Cerco anime per id
@logging_aux.logger_wraps()
def get_selected_anime_obj_by_id(anime_arr, a_id=None):
for res in anime_arr:
if str(res.a_id) == a_id:
return res
return anime_arr[0]
def get_year(anime):
return anime.year
def order_search_res(anime_list):
anime_list.sort(key=get_year)
return anime_list
if __name__ == "__main__":
with open('./doc/test_dir/search_result_final.json') as f:
search_res = json.load(f)
anime_obj = get_formatted_search_results(search_res)
print(anime_obj)
print(order_search_res(anime_obj))
print(get_selected_anime_obj_by_id(anime_obj, 743))
|
StarcoderdataPython
|
1893791
|
import web
import energyServer
import datetime
import calendar
import json
urls = ("/", "personalFootprint"
)
class personalFootprint:
def GET(self):
web.header('Access-Control-Allow-Origin', '*')
web.header('Access-Control-Allow-Credentials', 'true')
start = calendar.timegm(datetime.datetime(2019, 5, 9, 0).utctimetuple())
end = calendar.timegm(datetime.datetime(2019, 5, 15, 0).utctimetuple())
#end = calendar.timegm(datetime.datetime.now().utctimetuple())
ret = energyServer.db.retrieveStateParameters(start, end)
#footprint = ret["footprint"]
print("Scraped Database")
jsonDump = json.dumps(ret)
return jsonDump
footprint = web.application(urls, locals());
|
StarcoderdataPython
|
6569715
|
<filename>smart/core.py
# -*- coding utf-8 -*-#
# ------------------------------------------------------------------
# Name: core
# Author: liangbaikai
# Date: 2020/12/22
# Desc: there is a python file description
# ------------------------------------------------------------------
import asyncio
import importlib
import inspect
import time
import traceback
import uuid
from asyncio import Lock
from collections import deque
from contextlib import suppress
from typing import Dict
from smart.log import log
from smart.downloader import Downloader
from smart.item import Item
from smart.pipline import Piplines
from smart.request import Request
from smart.scheduler import Scheduler
from smart.setting import gloable_setting_dict
class Engine:
def __init__(self, spider, middlewire=None, pipline: Piplines = None):
self.lock = None
self.task_dict: Dict[str, asyncio.Task] = {}
self.pip_task_dict: Dict[str, asyncio.Task] = {}
self.spider = spider
self.middlewire = middlewire
self.piplines = pipline
duplicate_filter_class = self._get_dynamic_class_setting("duplicate_filter_class")
scheduler_container_class = self._get_dynamic_class_setting("scheduler_container_class")
net_download_class = self._get_dynamic_class_setting("net_download_class")
self.scheduler = Scheduler(duplicate_filter_class(), scheduler_container_class())
req_per_concurrent = self.spider.cutome_setting_dict.get("req_per_concurrent") or gloable_setting_dict.get(
"req_per_concurrent")
self.downloader = Downloader(self.scheduler, self.middlewire, seq=req_per_concurrent,
downer=net_download_class())
self.request_generator_queue = deque()
self.stop = False
self.log = log
def _get_dynamic_class_setting(self, key):
class_str = self.spider.cutome_setting_dict.get(
key) or gloable_setting_dict.get(
key)
_module = importlib.import_module(".".join(class_str.split(".")[:-1]))
_class = getattr(_module, class_str.split(".")[-1])
return _class
def iter_request(self):
while True:
if not self.request_generator_queue:
yield None
continue
request_generator = self.request_generator_queue[0]
spider, real_request_generator = request_generator[0], request_generator[1]
try:
# execute and get a request from cutomer code
# request=real_request_generator.send(None)
request_or_item = next(real_request_generator)
if isinstance(request_or_item, Request):
request_or_item.__spider__ = spider
except StopIteration:
self.request_generator_queue.popleft()
continue
except Exception as e:
# 可以处理异常
self.request_generator_queue.popleft()
self._handle_exception(spider, e)
continue
yield request_or_item
def _check_complete_pip(self, task):
if task.cancelled():
self.log.debug(f" a task canceld ")
return
if task and task.done() and task._key:
if task.exception():
self.log.error(f"a task occurer error in pipline {task.exception()} ")
else:
self.log.debug(f"a task done ")
result = task.result()
if result and isinstance(result, Item):
if hasattr(task, '_index'):
self._hand_piplines(task._spider, result, task._index + 1)
self.pip_task_dict.pop(task._key)
def _check_complete_callback(self, task):
if task.cancelled():
self.log.debug(f" a task canceld ")
return
if task and task.done() and task._key:
self.log.debug(f"a task done ")
self.task_dict.pop(task._key)
async def start(self):
self.spider.on_start()
# self.spider
self.request_generator_queue.append((self.spider, iter(self.spider)))
# self.request_generator_queue.append( iter(self.spider))
# core implenment
while not self.stop:
# paused
if self.lock and self.lock.locked():
await asyncio.sleep(1)
continue
request_or_item = next(self.iter_request())
if isinstance(request_or_item, Request):
self.scheduler.schedlue(request_or_item)
if isinstance(request_or_item, Item):
self._hand_piplines(self.spider, request_or_item)
request = self.scheduler.get()
can_stop = self._check_can_stop(request)
# if request is None and not self.task_dict:
if can_stop:
# there is no request and the task has been completed.so ended
self.log.debug(
f" here is no request and the task has been completed.so engine will stop ..")
self.stop = True
break
if isinstance(request, Request):
self._ensure_future(request)
resp = self.downloader.get()
if resp is None:
# let the_downloader can be scheduled, test 0.001-0.0006 is better
await asyncio.sleep(0.0005)
continue
custome_callback = resp.request.callback
if custome_callback:
request_generator = custome_callback(resp)
if request_generator:
self.request_generator_queue.append((custome_callback.__self__, request_generator))
# self.request_generator_queue.append( request_generator)
if self.spider.state != "runing":
self.spider.state = "runing"
self.spider.state = "closed"
self.spider.on_close()
self.log.debug(f" engine stoped..")
await asyncio.sleep(0.15)
def pause(self):
self.log.info(f" out called pause.. so engine will pause.. ")
asyncio.create_task(self._lock())
self.spider.state = "pause"
def recover(self):
if self.lock and self.lock.locked():
self.log.info(f" out called recover.. so engine will recover.. ")
self.lock.release()
def close(self):
# can make external active end engine
self.stop = True
tasks = asyncio.all_tasks()
for it in tasks:
it.cancel()
asyncio.gather(*tasks, return_exceptions=True)
self.log.debug(f" out called stop.. so engine close.. ")
async def _lock(self):
if self.lock is None:
self.lock = Lock()
await self.lock.acquire()
def _ensure_future(self, request: Request):
# compatible py_3.6
task = asyncio.ensure_future(self.downloader.download(request))
key = str(<KEY>
task._key = key
self.task_dict[key] = task
task.add_done_callback(self._check_complete_callback)
def _handle_exception(self, spider, e):
if spider:
try:
self.log.error(f" occured exceptyion e {e} ", exc_info=True)
spider.on_exception_occured(e)
except BaseException:
pass
def _check_can_stop(self, request):
if request:
return False
if len(self.task_dict) > 0:
return False
if len(self.request_generator_queue) > 0:
return False
if self.downloader.response_queue.qsize() > 0:
return False
if len(self.pip_task_dict) > 0:
return False
return True
def _hand_piplines(self, spider_ins, item, index=0):
if self.piplines is None or len(self.piplines.piplines) <= 0:
self.log.info("get a item but can not find a piplinse to handle it so ignore it ")
return
if len(self.piplines.piplines) < index + 1:
return
pip = self.piplines.piplines[index][1]
if not callable(pip):
return
if not inspect.iscoroutinefunction(pip):
task = asyncio.get_running_loop().run_in_executor(None, pip, spider_ins, item)
else:
task = asyncio.ensure_future(pip(spider_ins, item))
key = str(<KEY>())
task._key = key
task._index = index
task._spider = spider_ins
self.pip_task_dict[key] = task
task.add_done_callback(self._check_complete_pip)
|
StarcoderdataPython
|
1686
|
"""
Setup DB with example data for tests
"""
from django.contrib.auth.hashers import make_password
from django.contrib.auth.models import User, Group
from django.core.management.base import BaseCommand
from api import models
class Command(BaseCommand):
help = 'Setup DB with example data for tests'
def handle(self, *args, **options):
print('---- Creating Users ----')
User.objects.get_or_create(username='thomastest', password=make_password('<PASSWORD>'))
thomas = User.objects.get(username='thomastest')
User.objects.get_or_create(username='norbert', password=make_password('<PASSWORD>'))
norbert = User.objects.get(username='norbert')
User.objects.get_or_create(username='stefan', password=make_password('<PASSWORD>'))
stefan = User.objects.get(username='stefan')
superuser = Group.objects.get(name='superuser')
superuser.user_set.add(thomas)
netadmin = Group.objects.get(name='netadmin')
netadmin.user_set.add(norbert)
support = Group.objects.get(name='support')
support.user_set.add(stefan)
print('---- Creating Inventory ----')
models.Inventory.objects.create(name='Example', hosts_file='web_nornir/nornir_config/example_config/hosts.yaml',
groups_file='web_nornir/nornir_config/example_config/groups.yaml', type=1)
models.Inventory.objects.create(name='INS Lab', hosts_file='web_nornir/nornir_config/inslab_config/hosts.yaml',
groups_file='web_nornir/nornir_config/inslab_config/groups.yaml', type=1)
print('---- Creating Job Templates ----')
models.JobTemplate.objects.create(name='hello_world', description='This prints a hello world',
file_name='hello_world.py', created_by_id=1)
models.JobTemplate.objects.create(name='Get CDP Neighbors', description='Lists all CDP neighbors',
file_name='get_cdp_neighbors.py', created_by_id=1)
models.JobTemplate.objects.create(name='Get Interfaces',
description='Gets brief information about all interfaces, sh ip int br',
file_name='get_interfaces.py', created_by_id=1)
models.JobTemplate.objects.create(name='Ping Device',
description='Pings a chosen network device and reports if reachable',
file_name='ping.py', variables=['target'], created_by_id=1)
models.JobTemplate.objects.create(name='Get Configuration', description='Gets all configuration from device',
file_name='get_configuration.py', created_by_id=1)
print('---- Creating Tasks ----')
models.Task.objects.create(name='Get Hello World', created_by_id=1, template_id=1, inventory_id=1)
models.Task.objects.create(name='Get CDP neighbors of INS lab', created_by_id=2, template_id=2, inventory_id=2)
models.Task.objects.create(name='Get interfaces of INS lab', created_by_id=2, template_id=3, inventory_id=2)
print('---- ALL DONE!! ----')
|
StarcoderdataPython
|
6656222
|
from enum import Enum
import stringcase
from faker import Faker
from faker_extensions.abstract_providers import WeightedProvider
# https://www.pfma.org.uk/pet-population-2017
class Pets(Enum):
INDOOR_FISH = 1
OUTDOOR_FISH = 2
DOG = 3
CAT = 4
RABBIT = 5
INDOOR_BIRD = 6
REPTILE = 7
DOMESTIC_FOWL = 7
GUINEA_PIG = 8
HAMSTER = 9
def name_value(self):
return stringcase.sentencecase(self.name.lower())
pet_distributions = {
Pets.INDOOR_FISH: 0.08,
Pets.OUTDOOR_FISH: 0.05,
Pets.DOG: 0.24,
Pets.CAT: 0.17,
Pets.RABBIT: 0.02,
Pets.INDOOR_BIRD: 0.01,
Pets.REPTILE: 0.02,
Pets.DOMESTIC_FOWL: 0.01,
Pets.GUINEA_PIG: 0.02,
Pets.HAMSTER: 0.01,
None: 0.37
}
class PetProvider(WeightedProvider):
def __init__(self, generator):
super().__init__(pet_distributions, generator)
def pet(self):
return self.get_choice()
def main():
fake = Faker(['en_UK'])
fake.add_provider(PetProvider(fake))
pet = fake.pet()
print(pet)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1822265
|
<reponame>LocalGround/localground<gh_stars>1-10
from rest_framework import generics
from localground.apps.site.api import serializers, filters
from localground.apps.site.api.views.abstract_views import \
QueryableListCreateAPIView
from localground.apps.site import models
from localground.apps.site.api.permissions import CheckProjectPermissions
from django.db.models import Q
from localground.apps.lib.helpers import get_timestamp_no_milliseconds
class ProjectList(QueryableListCreateAPIView):
serializer_class = serializers.ProjectSerializer
filter_backends = (filters.SQLFilterBackend,)
model = models.Project
paginate_by = 100
def get_queryset(self):
if self.request.user.is_authenticated():
return models.Project.objects.get_objects(self.request.user)
else:
return models.Project.objects.get_objects_public(
access_key=self.request.GET.get('access_key')
)
def perform_create(self, serializer):
if serializer.validated_data.get("access_authority") is None:
d = {'access_authority': models.ObjectAuthority.objects.get(id=1)}
serializer.save(**d)
else:
serializer.save()
class ProjectInstance(generics.RetrieveUpdateDestroyAPIView):
queryset = models.Project.objects.select_related('owner').all()
serializer_class = serializers.ProjectDetailSerializer
|
StarcoderdataPython
|
200126
|
# -*- coding: UTF-8 -*-
import unittest
import mock
from taskcat._client_factory import Boto3Cache
class TestBoto3Cache(unittest.TestCase):
@mock.patch("taskcat._client_factory.boto3", autospec=True)
def test_stable_concurrency(self, mock_boto3):
# Sometimes boto fails with KeyErrors under high concurrency
for key_error in ["endpoint_resolver", "credential_provider"]:
mock_boto3.Session.side_effect = [KeyError(key_error), mock.DEFAULT]
c = Boto3Cache(_boto3=mock_boto3)
c.session("default")
|
StarcoderdataPython
|
3210481
|
from regresspy.regression import Regression
from regresspy.loss import mae,sse,mse,rmse
|
StarcoderdataPython
|
1770386
|
# External Dependencies
import gatt
import queue
import time
import threading
# Internal Dependencies
from pyroot import RootPhy
class RootGATT(RootPhy):
root_identifier_uuid = '48c5d828-ac2a-442d-97a3-0c9822b04979'
def __init__(self, name = None, dev = 'hci0', wait_for_connect = True):
"""Sets up Bluetooth manager to look for robots.
Parameters
----------
name : str, optional
Name of the robot to connect to; if no name supplied, will connect
to the first robot it sees.
dev : str, optional
Name of the device to connect to; default is hci0
wait_for_connect : bool, optional
If true (default), blocks until a connection is made
"""
self._ble_manager = BluetoothDeviceManager(adapter_name = dev)
self._ble_manager.desired_name = name
self._ble_manager.start_discovery(service_uuids=[self.root_identifier_uuid])
self._ble_thread = threading.Thread(target = self._ble_manager.run)
self._ble_thread.start()
if wait_for_connect:
self.wait_for_connect()
def wait_for_connect(self, timeout = float('inf')):
"""Blocking function initializing robot connection.
Parameters
----------
timeout : float, optional
Time to wait for connection; if None, will wait forever. Will throw
TimeoutError if timeout exceeded.
"""
timeout += time.time()
while self._ble_manager.robot is None and time.time() < timeout:
time.sleep(0.1) # wait for a root robot to be discovered
if self._ble_manager.robot is None:
raise TimeoutError('Timed out waiting for ' + self._ble_manager.desired_name)
while not self._ble_manager.robot.service_resolution_complete:
time.sleep(0.1) # allow services to resolve before continuing
self.rx_q = self._ble_manager.robot.rx_q
def is_connected(self):
"""Utility function for determining state of bluetooth thread."""
return self._ble_thread.is_alive()
def disconnect(self):
"""Disconnects BLE from robot and stops comms thread."""
self._ble_manager.stop()
self._ble_manager.robot.disconnect()
self._ble_thread.join()
def send_raw(self, packet):
"""Helper method to send raw BLE packets to the robot.
Parameters
----------
packet : bytes
20-byte packet to send to the robot.
"""
if len(packet) == 20:
self._ble_manager.robot.tx_characteristic.write_value(packet)
else:
print('Error: send_raw_ble: Packet wrong length.')
class BluetoothDeviceManager(gatt.DeviceManager):
robot = None # root robot device
desired_name = None
def device_discovered(self, device):
print("[%s] Discovered: %s" % (device.mac_address, device.alias()))
if self.desired_name == None:
self.desired_name = device.alias()
if self.desired_name == device.alias():
self.stop_discovery() # Stop searching
self.robot = RootDevice(mac_address=device.mac_address, manager=self)
self.robot.connect()
class RootDevice(gatt.Device):
uart_service_uuid = '6e400001-b5a3-f393-e0a9-e50e24dcca9e'
tx_characteristic_uuid = '6e400002-b5a3-f393-e0a9-e50e24dcca9e' # Write
rx_characteristic_uuid = '6e400003-b5a3-f393-e0a9-e50e24dcca9e' # Notify
service_resolution_complete = False
def __init__(self, mac_address, manager, managed=True):
try:
self.rx_q = queue.SimpleQueue()
except AttributeError:
self.rx_q = queue.Queue()
super().__init__(mac_address, manager, managed)
def connect_succeeded(self):
super().connect_succeeded()
print("[%s] Connected" % (self.mac_address))
def connect_failed(self, error):
super().connect_failed(error)
print("[%s] Connection failed: %s" % (self.mac_address, str(error)))
def disconnect_succeeded(self):
super().disconnect_succeeded()
self.service_resolution_complete = False
print("[%s] Disconnected" % (self.mac_address))
def services_resolved(self):
super().services_resolved()
print("[%s] Resolved services" % (self.mac_address))
self.uart_service = next(
s for s in self.services
if s.uuid == self.uart_service_uuid)
self.tx_characteristic = next(
c for c in self.uart_service.characteristics
if c.uuid == self.tx_characteristic_uuid)
self.rx_characteristic = next(
c for c in self.uart_service.characteristics
if c.uuid == self.rx_characteristic_uuid)
self.rx_characteristic.enable_notifications() # listen to RX messages
self.service_resolution_complete = True
def characteristic_value_updated(self, characteristic, value):
self.rx_q.put(value)
|
StarcoderdataPython
|
11338498
|
<filename>extractEncodedWord.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import codecs
import os
import re
import sys
# Read and process MS Word documents, converting font encoding into
# Unicode characters.
# https://openpyxl.readthedocs.io/en/default/tutorial.html
OldFont = ''
FONTS_TO_CONVERT = {
'Arjyaban Normal': {'encoding': 'Arjyaban', 'replace': 'RibengUni'},
'Arjyaban CN': {'encoding': 'Arjyaban', 'replace': 'RibengUni'},
'RajgirCN': {'encoding': 'Arjyaban', 'replace': 'RibengUni'},
'Chakma(SuJoyan)': {'encoding': 'Sujoyan', 'replace': 'RibengUni'},
'SutonnyMJ': {'encoding': 'Sutonny'},
}
from docx import Document
import convertUtil
import conversion # The converter(s) to use.
# Flag for handling all characters in an Old font.
convertAllInOldFontRange = True
debugFlag = False
# Check for font-encoded text and convert it to Unicode.
# It assumes that the font has been detected.
def checkAndConvertText(textIn, encoding):
# Handle use characters.
result = conversion.convertToUnicode(textIn, encoding)
return result
def convertDoc(doc, unicodeFont, debugInfo=None, extractedFileName='extracted.tsv'):
fonts_found = {}
allMissingChars = {}
sections = doc.sections
print (' %d sections' % len(sections))
paragraphs = doc.paragraphs
print (' %d paragraphs' % len(doc.paragraphs))
if debugInfo:
print (' %d tables' % len(doc.tables))
if doc.inline_shapes:
print (' %d inline_shapes' % len(doc.inline_shapes))
if doc.part:
print (' %s part' % dir(doc.part))
print (' doc dir: %s' % dir(doc))
for section in sections:
print ('Section = %s' % section)
numConverts = 0
notConverted = 0
paraNum = 0
# Output line for extracted text and Python conversion
extractedLine = []
extractedFile = codecs.open(extractedFileName, 'w', 'utf-8')
extractedCount = 0
for para in paragraphs:
para_format = para.paragraph_format
para_style = para.style
para_alignment = para.alignment
para_part = para.part
if debugInfo:
print (' Paragraph %d' % paraNum)
print (' para format = %s' % para_format)
print (' para style = %s' % para_style)
print (' para alignment = %s' % para_alignment)
print (' para part = %s' % para_part)
if para_part:
print (' inline_shapes = %s' % para_part.inline_shapes)
runs = para.runs
if debugInfo:
print (' %d runs in paragraph' % (len(runs)))
print (' paragraph text = %s' % (para.text))
runNum = 0
runNum = 1
# Should I replace the encoding find in the paragraph, too?
for run in runs:
fontObj = run.font
fontName = fontObj.name
if fontName in FONTS_TO_CONVERT:
print ('Run font = %s. Text has %d chars' % (fontName, len(run.text)))
if len(run.text):
if debugInfo:
print (' Run %d text(%d) = >%s<' % (runNum, len(run.text), run.text))
thisText = run.text
fontObj = run.font
fontName = fontObj.name
if fontName is None:
# ??? Get the paragraph's style
fontName = para.style.font.name
fontObj = run.font
print ('*** Getting font %s from Paragraph ***' % (fontName))
if fontName not in fonts_found:
# Find the characters in each font.
fonts_found[fontName] = []
if debugInfo:
print ('FONT FOUND = %s' % fontName)
print (' text = %s' % thisText.encode('utf-8'))
for t in thisText:
if t not in fonts_found[fontName]:
fonts_found[fontName].append(t)
if fontName not in FONTS_TO_CONVERT:
continue
if thisText:
# Record code points found in this encoding.
encoding = FONTS_TO_CONVERT[fontName]['encoding']
# print ('Encoding %s for %s' % (encoding, thisText.encode('utf-8')))
(convertedText, missingChars) = conversion.convertToUnicode(thisText, encoding)
if thisText != convertedText:
numConverts += 1
try:
extractedFile.write('%s\t%s\t%s\n' % (
fontName, thisText, convertedText))
run.text = convertedText
except Exception as err:
print ('Error = %s' % err)
print ("Error in setting run.text with %s" % convertedText.encode('utf-8'))
else:
if debugInfo:
print ('******** Not converted encoding = %s, text = >%s<' % (
encoding, thisText.encode('utf-8')))
notConverted += 1
if missingChars:
for t in missingChars:
if t in allMissingChars:
allMissingChars[t] += 1
else:
allMissingChars[t] = 1
# Change the font, even if there was no text changed.
if fontName in FONTS_TO_CONVERT and 'replace' in FONTS_TO_CONVERT[fontName]:
try:
print (' Reset font %s to %s ' % (fontObj.name, FONTS_TO_CONVERT[fontName]['replace']))
fontObj.name = FONTS_TO_CONVERT[fontName]['replace']
except Exception as err:
print ('Problem replacing font in fontObj %s with %s' % (fontObj, fontName))
runNum += 1
paraNum += 1
extractedFile.close()
print (' %d values converted to Unicode' % numConverts)
for font in fonts_found:
print ('font = %s' % font)
print (' chars = %s' % sorted(fonts_found[font]))
print ('All Missing Chars = %s' % allMissingChars)
return (numConverts, notConverted)
# Process one DOCX, substituting the
def convertOneDoc(path_to_doc, unicodeFont='RibengUni',
outpath=None, isString=False):
print ('Converting text in file: %s' % path_to_doc)
doc = Document(path_to_doc)
newName = os.path.splitext(path_to_doc)[0]
extractName = newName + '_extracted.tsv'
(numConverts, numNotConverted) = convertDoc(
doc, unicodeFont, debugInfo=debugFlag,
extractedFileName=extractName)
fonts_found = {}
allMissingChars = {}
sections = doc.sections
print (' %d sections' % len(sections))
paragraphs = doc.paragraphs
print (' %d paragraphs' % len(doc.paragraphs))
if debugFlag:
print (' %d tables' % len(doc.tables))
if doc.inline_shapes:
print (' %d inline_shapes' % len(doc.inline_shapes))
if doc.part:
print (' %s part' % dir(doc.part))
print (' doc dir: %s' % dir(doc))
for section in sections:
print ('Section = %s' % section)
numConverts = 0
notConverted = 0
paraNum = 0
if numConverts:
unicode_path_to_doc = newName + '.unicode.docx'
doc.save(unicode_path_to_doc)
print (' ** Saved new version to file %s\n' % unicode_path_to_doc)
print (' uncoverted = %d' % numNotConverted)
else:
print (' @@@ No conversion done, so no new file created.\n')
def processArgs(argv):
if len(sys.argv) <= 1:
print ('Usage:')
print (' convertWordChakma.py inputFile.docx')
print (' convertWordChakma.py inputFile1.docx inputFile2.docx ... ')
print (' convertWordChakma.py -i fileWithFileNames')
return None
path_to_docs = []
if len(argv) == 2:
path_to_docs.append(sys.argv[1])
else:
if len(argv) == 3 and argv[1] == '-f':
# Get the file containing conversion list and get all items.
path_to_docs = convertUtil.infileToList(argv[2])
if not path_to_docs:
print ('Error: no contents found in file %s' %
argv[2])
return
else:
# Expect a list of files in the
path_to_docs = [path for path in argv[1:]]
return path_to_docs
def main(argv):
print ('ARGS = %s' % argv)
if len(argv) > 1:
path_to_doc = sys.argv[1]
else:
print ('Please provide an input file of type .docx')
return
doc_list = processArgs(argv)
convertFileCount = 0
for doc_path in doc_list:
convertOneDoc(doc_path)
convertFileCount += 1
print ('%d processed' % convertFileCount)
if __name__ == "__main__":
main(sys.argv)
|
StarcoderdataPython
|
9717583
|
import copy
import json
import os
import unittest
from unittest import TestCase
import requests
from mock import MagicMock, Mock, patch
import ingest.exporter.ingestexportservice as ingestexportservice
from ingest.api.dssapi import DssApi
from ingest.api.ingestapi import IngestApi
from ingest.exporter.staging import StagingService
from ingest.api.stagingapi import FileDescription
from ingest.exporter.ingestexportservice import IngestExporter, LinkSet
from ingest.utils.IngestError import ExporterError
BASE_PATH = os.path.dirname(__file__)
class TestExporter(TestCase):
def setUp(self):
self.longMessage = True
# Setup mocked APIs
self.mock_dss_api = MagicMock(spec=DssApi)
self.mock_ingest_api = MagicMock(spec=IngestApi)
self.mock_staging_service = MagicMock(spec=StagingService)
def test_get_input_bundle(self):
# given:
exporter = IngestExporter(ingest_api=self.mock_ingest_api, dss_api=self.mock_dss_api,
staging_service=self.mock_staging_service)
# and:
self.mock_ingest_api.get_related_entities.return_value = ['bundle1', 'bundle2']
process = {}
# when:
input_bundle = exporter.get_input_bundle(process)
# then:
self.assertEqual('bundle1', input_bundle)
def test__bundle_metadata__appends_provenance_block(self):
# Setup input metadata JSON
arbitrary_uuid = "1234-5678-9012"
arbitrary_submission_date = "2019-01-01T01:01:01.000Z"
arbitrary_update_date = "2019-02-02T02:02:02.000Z"
arbitrary_schema_url = "https://schema.humancellatlas.org/type/project/1.2.3/project"
file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'fixtures', 'sample_metadata.json'))
with open(file_path) as f:
sample_metadata_json = json.load(f)
sample_metadata_json["submissionDate"] = arbitrary_submission_date
sample_metadata_json["dcpVersion"] = arbitrary_update_date
sample_metadata_json["content"]["describedBy"] = arbitrary_schema_url
# Execute test
exporter = IngestExporter(ingest_api=self.mock_ingest_api, dss_api=self.mock_dss_api,
staging_service=self.mock_staging_service)
provenance_filled_metadata = exporter.bundle_metadata(sample_metadata_json, arbitrary_uuid)
# Verify provenance block's existance and that contents match as expected
self.assertTrue("provenance" in provenance_filled_metadata)
self.assertEqual(provenance_filled_metadata["provenance"]["document_id"], arbitrary_uuid)
self.assertEqual(provenance_filled_metadata["provenance"]["submission_date"], arbitrary_submission_date)
self.assertEqual(provenance_filled_metadata["provenance"]["update_date"], arbitrary_update_date)
self.assertEqual(provenance_filled_metadata["provenance"]["schema_major_version"], 1)
self.assertEqual(provenance_filled_metadata["provenance"]["schema_minor_version"], 2)
@unittest.skip
def test_upload_metadata_files(self):
# given:
exporter = IngestExporter(ingest_api=self.mock_ingest_api, dss_api=self.mock_dss_api,
staging_service=self.mock_staging_service)
# and:
file_desc = FileDescription('checksums', 'contentType', 'name', 'name', 'file_url')
exporter.upload_file = MagicMock(return_value=file_desc)
metadata_files_info = {
'project': {
'dss_filename': 'project.json',
'dss_uuid': 'uuid',
'content': {},
'content_type': 'type',
'upload_filename': 'filename'
},
'biomaterial': {
'dss_uuid': None,
'content': {},
'content_type': 'type',
'upload_filename': 'filename'
},
'process': {
'dss_uuid': None,
'content': {},
'content_type': 'type',
'upload_filename': 'filename'
},
'protocol': {
'dss_uuid': None,
'content': {},
'content_type': 'type',
'upload_filename': 'filename'
},
'file': {
'dss_uuid': None,
'content': {},
'content_type': 'type',
'upload_filename': 'filename'
},
'links': {
'dss_uuid': None,
'content': {},
'content_type': 'type',
'upload_filename': 'filename'
}
}
# when:
exporter.upload_metadata_files('sub_uuid', metadata_files_info)
# then:
for metadata_list in metadata_files_info.values():
for metadata_file in metadata_list:
self.assertEqual(metadata_file['dss_uuid'], 'uuid')
self.assertEqual(metadata_file['upload_file_url'], 'file_url')
def test_upload_metadata_files_error(self):
# given:
exporter = IngestExporter(ingest_api=self.mock_ingest_api, dss_api=self.mock_dss_api,
staging_service=self.mock_staging_service)
# and:
exporter.upload_file = Mock(side_effect=Exception('test upload file error'))
metadata_files_info = {
'project': {
'dss_filename': 'project.json',
'dss_uuid': 'uuid',
'content': {},
'content_type': 'type',
'upload_filename': 'filename'
},
'biomaterial': {
'dss_uuid': None,
'content': {},
'content_type': 'type',
'upload_filename': 'filename'
},
'process': {
'dss_uuid': None,
'content': {},
'content_type': 'type',
'upload_filename': 'filename'
},
'protocol': {
'dss_uuid': None,
'content': {},
'content_type': 'type',
'upload_filename': 'filename'
},
'file': {
'dss_uuid': None,
'content': {},
'content_type': 'type',
'upload_filename': 'filename'
},
'links': {
'dss_uuid': None,
'content': {},
'content_type': 'type',
'upload_filename': 'filename'
}
}
# when, then:
with self.assertRaises(ingestexportservice.BundleFileUploadError):
exporter.upload_metadata_files('sub_uuid', metadata_files_info)
def test_put_bundle_in_dss_error(self):
# given:
exporter = IngestExporter(ingest_api=self.mock_ingest_api, dss_api=self.mock_dss_api,
staging_service=self.mock_staging_service)
# and:
self.mock_dss_api.put_bundle.side_effect = Exception('test create bundle error')
# when, then:
with self.assertRaises(ingestexportservice.BundleDSSError):
exporter.put_bundle_in_dss('bundle_uuid', 'bundle_version', [])
# mocks linked entities in the ingest API, attempts to build a bundle by crawling from an assay
# process, asserts that the bundle created is equivalent to a known bundle
@unittest.skip
@patch('ingest.api.dssapi.DssApi')
def test_create_bundle_manifest(self, dss_api_constructor):
# given:
dss_api_constructor.return_value = MagicMock('mock_dss_api')
class MockRequestResponse:
def __init__(self, json_payload, status_code):
self.payload = json_payload
self.status_code = status_code
self.text = json.dumps(json_payload)
def json(self):
return self.payload
def mock_entities_url_to_file_dict():
mock_entity_url_to_file_dict = dict()
# analysis process
mock_entity_url_to_file_dict["processes/mock-analysis-process-id"] = "/processes/mock_analysis_process.json"
mock_entity_url_to_file_dict[
"processes/mock-analysis-process-id/derivedFiles"] = \
"/processes/mock_analysis_process_derived_files.json"
mock_entity_url_to_file_dict[
"processes/mock-analysis-process-id/inputFiles"] = "/processes/mock_analysis_process_input_files.json"
mock_entity_url_to_file_dict[
"processes/mock-analysis-process-id/inputBundleManifests"] = \
"/processes/mock_analysis_process_input_bundle_manifests.json"
# input bundle manifests
mock_entity_url_to_file_dict[
"bundleManifests/mock-input-bundle-manifest-id"] = "/processes/mock_bundle_manifest.json"
# files
mock_entity_url_to_file_dict["files/mock-fastq-read1-id"] = "/processes/mock_fastq_read1.json"
mock_entity_url_to_file_dict[
"files/mock-fastq-read1-id/derivedByProcesses"] = "/files/mock_fastq_read1_derived_by_processes.json"
mock_entity_url_to_file_dict["files/mock-fastq-read2-id"] = "/processes/mock_fastq_read2.json"
mock_entity_url_to_file_dict[
"files/mock-fastq-read2-id/derivedByProcesses"] = "/files/mock_fastq_read2_derived_by_processes.json"
# wrapper process(lib prep -> sequencing)
mock_entity_url_to_file_dict[
"processes/mock-assay-process-id"] = "/processes/wrapper_process_lib_prep_and_sequencing.json"
mock_entity_url_to_file_dict[
"processes/mock-assay-process-id/chainedProcesses"] = \
"/processes/wrapper_process_lib_prep_and_sequencing_chained_processes.json"
mock_entity_url_to_file_dict[
"processes/mock-assay-process-id/inputBiomaterials"] = \
"/processes/wrapper_process_lib_prep_and_sequencing_input_biomaterial.json"
mock_entity_url_to_file_dict[
"processes/mock-assay-process-id/derivedFiles"] = \
"/processes/wrapper_process_lib_prep_and_sequencing_derived_files.json"
# lib prep process
mock_entity_url_to_file_dict["processes/mock-lib-prep-process-id"] = "/processes/mock_lib_prep_process.json"
mock_entity_url_to_file_dict[
"processes/mock-lib-prep-process-id/protocols"] = "/processes/mock_lib_prep_process_protocols.json"
# sequencing process
mock_entity_url_to_file_dict[
"processes/mock-sequencing-process-id"] = "/processes/mock_sequencing_process.json"
mock_entity_url_to_file_dict[
"processes/mock-sequencing-process-id/protocols"] = "/processes/mock_sequencing_process_protocols.json"
# cell suspension
mock_entity_url_to_file_dict[
"biomaterials/mock-cell-suspension-id"] = "/biomaterials/mock_cell_suspension.json"
mock_entity_url_to_file_dict[
"biomaterials/mock-cell-suspension-id/derivedByProcesses"] = \
"/biomaterials/mock_cell_suspension_derived_by_processes.json"
# wrapper process(dissociation -> enrichment)
mock_entity_url_to_file_dict[
"processes/mock-dissociation-enrichment-process-id"] = \
"/processes/wrapper_process_dissociation_and_enrichment.json"
mock_entity_url_to_file_dict[
"processes/mock-dissociation-enrichment-process-id/chainedProcesses"] = \
"/processes/wrapper_process_dissociation_and_enrichment_chained_processes.json"
mock_entity_url_to_file_dict[
"processes/mock-dissociation-enrichment-process-id/inputBiomaterials"] = \
"/processes/wrapper_process_dissociation_and_enrichment_input_biomaterial.json"
mock_entity_url_to_file_dict[
"processes/mock-dissociation-enrichment-process-id/derivedBiomaterials"] = \
"/processes/wrapper_process_dissociation_and_enrichment_derived_biomaterial.json"
# dissociation process
mock_entity_url_to_file_dict[
"processes/mock-dissociation-process-id"] = "/processes/mock_dissociation_process.json"
mock_entity_url_to_file_dict[
"processes/mock-dissociation-process-id/protocols"] = \
"/processes/mock_dissociation_process_protocols.json"
# enrichment process
mock_entity_url_to_file_dict[
"processes/mock-enrichment-process-id"] = "/processes/mock_encrichment_process.json"
mock_entity_url_to_file_dict[
"processes/mock-enrichment-process-id/protocols"] = "/processes/mock_enrichment_process_protocols.json"
# specimen
mock_entity_url_to_file_dict["biomaterials/mock-specimen-id"] = "/biomaterials/mock_specimen.json"
mock_entity_url_to_file_dict[
"biomaterials/mock-specimen-id/derivedByProcesses"] = \
"/biomaterials/mock_specimen_derived_by_processes.json"
# sampling process
mock_entity_url_to_file_dict["processes/mock-sampling-process-id"] = "/processes/mock_sampling_process.json"
mock_entity_url_to_file_dict[
"processes/mock-sampling-process-id/inputBiomaterials"] = \
"/processes/mock_sampling_process_input_biomaterial.json"
mock_entity_url_to_file_dict[
"processes/mock-sampling-process-id/derivedBiomaterials"] = \
"/processes/mock_sampling_process_derived_biomaterials.json"
# donor
mock_entity_url_to_file_dict["biomaterials/mock-donor-id"] = "/biomaterials/mock_donor.json"
# project
mock_entity_url_to_file_dict["projects/mock-project-id"] = "/projects/mock_project.json"
return mock_entity_url_to_file_dict
regular_requests_get = copy.deepcopy(requests.get)
def mock_entities_retrieval(*args, **kwargs):
test_ingest_dir = BASE_PATH + '/bundles/ingest-data'
mock_entity_url_to_file_dict = mock_entities_url_to_file_dict()
url = args[0]
if 'mock-ingest-api' not in url:
return regular_requests_get(*args, **kwargs)
else: # mockville
entity_relative_url = url.replace('http://mock-ingest-api/', '')
if entity_relative_url in mock_entity_url_to_file_dict:
entity_file_location = mock_entity_url_to_file_dict[entity_relative_url]
with open(test_ingest_dir + entity_file_location, 'rb') as entity_file:
entity_json = json.load(entity_file)
else: # don't have a mock for this entity; if it's a request for an empty input
# biomaterials/files/protocols, return a suitable empty _embedded
entity_json = {'_embedded': dict(),
'_links': {
'self': {
'href': url
}}}
if 'derivedByProcesses' in entity_relative_url or 'chainedProcesses' in entity_relative_url:
entity_json['_embedded'] = {'processes': list()}
elif 'inputBiomaterials' in entity_relative_url or 'derivedBiomaterials' in entity_relative_url:
entity_json['_embedded'] = {'biomaterials': list()}
elif 'inputBundleManifests' in entity_relative_url:
entity_json['_embedded'] = {'bundleManifests': list()}
elif 'inputFiles' in entity_relative_url or 'derivedFiles' in entity_relative_url:
entity_json['_embedded'] = {'files': list()}
elif 'protocols' in entity_relative_url:
entity_json['_embedded'] = {'protocols': list()}
elif 'projects' in entity_relative_url:
with open(test_ingest_dir + '/projects/mock_project.json', 'rb') as project_file:
entity_json['_embedded'] = {'projects': [json.load(project_file)]}
else:
raise Exception("Unknown resource in mock entities tests:" + url)
return MockRequestResponse(entity_json, 200)
# mock the calls to the ingest API for the entities in the bundle
get_requests_mock = Mock()
get_requests_mock.side_effect = mock_entities_retrieval
requests.get = get_requests_mock
exporter = ingestexportservice.IngestExporter()
process_info = exporter.get_all_process_info('http://mock-ingest-api/processes/mock-assay-process-id')
metadata_by_type = exporter.get_metadata_by_type(process_info)
bundle_metadata_info = exporter.prepare_metadata_files(metadata_by_type)
# assert that the contents of the bundle metadata info match that of the expected bundle
self.assertEqual( # biomaterials...
frozenset([biomaterial['hca_ingest']['document_id'] for biomaterial in
bundle_metadata_info['biomaterial']['content']['biomaterials']]),
frozenset([biomaterial['hca_ingest']['document_id'] for biomaterial in
json_from_expected_bundle_file('assay/expected/Mouse Melanoma_biomaterial_bundle.json')[
'biomaterials']]))
self.assertEqual( # processes...
frozenset([process['hca_ingest']['document_id'] for process in
bundle_metadata_info['process']['content']['processes']]),
frozenset([process['hca_ingest']['document_id'] for process in
json_from_expected_bundle_file('assay/expected/Mouse Melanoma_process_bundle.json')[
'processes']]))
self.assertEqual( # protocols...
frozenset([protocol['hca_ingest']['document_id'] for protocol in
bundle_metadata_info['protocol']['content']['protocols']]),
frozenset([protocol['hca_ingest']['document_id'] for protocol in
json_from_expected_bundle_file('assay/expected/Mouse Melanoma_protocol_bundle.json')[
'protocols']]))
self.assertEqual( # files...
frozenset([file['hca_ingest']['document_id'] for file in bundle_metadata_info['file']['content']['files']]),
frozenset([file['hca_ingest']['document_id'] for file in
json_from_expected_bundle_file('assay/expected/Mouse Melanoma_file_bundle.json')['files']]))
self.assertEqual( # links...
frozenset([tuple(sorted(link.items())) for link in bundle_metadata_info['links']['content']['links']]),
frozenset([tuple(sorted(link.items())) for link in
json_from_expected_bundle_file('assay/expected/Mouse Melanoma_links_bundle.json')['links']])
)
self.assertEqual( # projects...
bundle_metadata_info['project']['content']['hca_ingest']['document_id'],
json_from_expected_bundle_file('assay/expected/Mouse Melanoma_project_bundle.json')['hca_ingest'][
'document_id']
)
# now run it on analysis
process_info = exporter.get_all_process_info('http://mock-ingest-api/processes/mock-analysis-process-id')
bundle_metadata_info = exporter.prepare_metadata_files(process_info)
# assert that the contents of the bundle metadata info match that of the expected bundle
self.assertEqual( # biomaterials...
frozenset([biomaterial['hca_ingest']['document_id'] for biomaterial in
bundle_metadata_info['biomaterial']['content']['biomaterials']]),
frozenset([biomaterial['hca_ingest']['document_id'] for biomaterial in
json_from_expected_bundle_file('analysis/expected/Mouse Melanoma_biomaterial_bundle.json')[
'biomaterials']]))
self.assertEqual( # processes...
frozenset([process['hca_ingest']['document_id'] for process in
bundle_metadata_info['process']['content']['processes']]),
frozenset([process['hca_ingest']['document_id'] for process in
json_from_expected_bundle_file('analysis/expected/Mouse Melanoma_process_bundle.json')[
'processes']]))
self.assertEqual( # protocols...
frozenset([protocol['hca_ingest']['document_id'] for protocol in
bundle_metadata_info['protocol']['content']['protocols']]),
frozenset([protocol['hca_ingest']['document_id'] for protocol in
json_from_expected_bundle_file('analysis/expected/Mouse Melanoma_protocol_bundle.json')[
'protocols']]))
self.assertEqual( # files...
frozenset([file['hca_ingest']['document_id'] for file in bundle_metadata_info['file']['content']['files']]),
frozenset([file['hca_ingest']['document_id'] for file in
json_from_expected_bundle_file('analysis/expected/Mouse Melanoma_file_bundle.json')['files']]))
self.assertEqual( # links...
frozenset([tuple(sorted(link.items())) for link in bundle_metadata_info['links']['content']['links']]),
frozenset([tuple(sorted(link.items())) for link in
json_from_expected_bundle_file('analysis/expected/Mouse Melanoma_links_bundle.json')['links']])
)
self.assertEqual( # projects...
bundle_metadata_info['project']['content']['hca_ingest']['document_id'],
json_from_expected_bundle_file('analysis/expected/Mouse Melanoma_project_bundle.json')['hca_ingest'][
'document_id']
)
def test_add_links_no_duplicates(self):
# given
mock_link = {
"process": "4674424e-3ab1-491c-8295-a68c7bb04b61",
"inputs": ["aaa4424e-3ab1-491c-8295-a68c7bb04b61", "bbb4424e-3ab1-491c-8295-a68c7bb04b61"],
"input_type": "file",
"outputs": ["ccc4424e-3ab1-491c-8295-a68c7bb04b61", "ddd4424e-3ab1-491c-8295-a68c7bb04b61"],
"protocols": []
}
another_mock_link = copy.deepcopy(mock_link)
another_mock_link["process"] = "5554424e-3ab1-491c-8295-a68c7bb04b61"
# when
links = LinkSet()
links.add_link(mock_link)
links.add_link(another_mock_link)
links.add_link(mock_link)
# then
self.assertTrue(len(links.get_links()) == 2)
self.assertTrue(links.get_links()[0] == mock_link)
self.assertTrue(links.get_links()[1] == another_mock_link)
SUBMISSION = {
"triggersAnalysis": False,
"_links": {
"self": {
"href": "http://api.ingest.data.humancellatlas.org/SubmissionEnvelope/1234"
}
}
}
def test_upload_error_posted_to_ingest_api(self):
# given:
exporter = IngestExporter(ingest_api=self.mock_ingest_api, dss_api=self.mock_dss_api,
staging_service=self.mock_staging_service)
self.mock_staging_service.staging_area_exists = Mock(return_value=True)
# and:
self.mock_ingest_api.get_entity_by_uuid = MagicMock(return_value=self.SUBMISSION)
exporter.logger.info = MagicMock()
exporter.get_all_process_info = MagicMock()
exporter.get_metadata_by_type = MagicMock()
exporter.prepare_metadata_files = MagicMock()
exporter.bundle_links = MagicMock()
exporter.create_bundle_manifest = MagicMock()
error = Exception('Error thrown for Unit Test')
error_json = ExporterError(str(error)).getJSON()
exporter.upload_metadata_files = MagicMock(side_effect=error)
# when:
self.assertRaises(Exception, lambda: exporter.export_bundle(bundle_uuid=None, bundle_version=None, submission_uuid=None, process_uuid=None))
# then:
self.mock_ingest_api.create_submission_error.assert_called_once_with(
self.SUBMISSION.get("_links").get("self").get("href"),
error_json
)
def test_dss_upload_error_posted_to_ingest_api(self):
# given:
exporter = IngestExporter(ingest_api=self.mock_ingest_api, dss_api=self.mock_dss_api,
staging_service=self.mock_staging_service)
self.mock_staging_service.staging_area_exists = Mock(return_value=True)
# and:
self.mock_ingest_api.get_entity_by_uuid = MagicMock(return_value=self.SUBMISSION)
exporter.logger.info = MagicMock()
exporter.get_all_process_info = MagicMock()
exporter.get_metadata_by_type = MagicMock()
exporter.prepare_metadata_files = MagicMock()
exporter.bundle_links = MagicMock()
exporter.create_bundle_manifest = MagicMock()
exporter.upload_metadata_files = MagicMock()
exporter.get_metadata_files = MagicMock(return_value=list())
exporter.get_data_files = MagicMock(return_value=list())
error = Exception('Error thrown for Unit Test')
error_json = ExporterError(str(error)).getJSON()
exporter.put_bundle_in_dss = MagicMock(side_effect=error)
# when:
with self.assertRaises(Exception) as context:
exporter.export_bundle(bundle_uuid=None, bundle_version=None, submission_uuid=None, process_uuid=None)
# then:
self.assertIsNotNone(context.exception)
self.mock_ingest_api.create_submission_error.assert_called_once_with(
self.SUBMISSION.get("_links").get("self").get("href"),
error_json
)
def json_from_expected_bundle_file(relative_dir):
# relative dir is relative to test/bundles
with open(BASE_PATH + '/bundles/' + relative_dir, 'rb') as expected_bundle_file:
return json.load(expected_bundle_file)
|
StarcoderdataPython
|
4897913
|
<reponame>parampavar/localstack
import io
import logging
import os
import tarfile
import zipfile
from subprocess import Popen
from typing import Optional, Union
from .run import run
from .strings import truncate
LOG = logging.getLogger(__name__)
def is_zip_file(content):
stream = io.BytesIO(content)
return zipfile.is_zipfile(stream)
def get_unzipped_size(path: str):
"""Returns the size of the unzipped file."""
with zipfile.ZipFile(path, "r") as zip_ref:
return sum(f.file_size for f in zip_ref.infolist())
def unzip(path: str, target_dir: str, overwrite: bool = True) -> Optional[Union[str, Popen]]:
from localstack.utils.platform import is_debian
is_in_debian = is_debian()
if is_in_debian:
# Running the native command can be an order of magnitude faster in Alpine on Travis-CI
flags = "-o" if overwrite else ""
flags += " -q"
try:
return run("cd %s; unzip %s %s" % (target_dir, flags, path), print_error=False)
except Exception as e:
error_str = truncate(str(e), max_length=200)
LOG.info(
'Unable to use native "unzip" command (using fallback mechanism): %s', error_str
)
try:
zip_ref = zipfile.ZipFile(path, "r")
except Exception as e:
LOG.warning("Unable to open zip file: %s: %s", path, e)
raise e
def _unzip_file_entry(zip_ref, file_entry, target_dir):
"""Extracts a Zipfile entry and preserves permissions"""
out_path = os.path.join(target_dir, file_entry.filename)
if is_in_debian and os.path.exists(out_path) and os.path.getsize(out_path) > 0:
# this can happen under certain circumstances if the native "unzip" command
# fails with a non-zero exit code, yet manages to extract parts of the zip file
return
zip_ref.extract(file_entry.filename, path=target_dir)
perm = file_entry.external_attr >> 16
# Make sure to preserve file permissions in the zip file
# https://www.burgundywall.com/post/preserving-file-perms-with-python-zipfile-module
os.chmod(out_path, perm or 0o777)
try:
for file_entry in zip_ref.infolist():
_unzip_file_entry(zip_ref, file_entry, target_dir)
finally:
zip_ref.close()
def untar(path: str, target_dir: str):
mode = "r:gz" if path.endswith("gz") else "r"
with tarfile.open(path, mode) as tar:
tar.extractall(path=target_dir)
|
StarcoderdataPython
|
328074
|
from netifaces import AF_INET, AF_INET6
import netifaces as ni
ip_version = "IPv4"
default_interface = None
default_ip_address_4 = None
default_ip_address_6 = None
# Find the first default interface with an IPv4 and an IPv6 address
interfaces = ni.interfaces()
for interface in interfaces:
if interface.startswith("lo"):
continue
if AF_INET in ni.ifaddresses(interface):
if "addr" in ni.ifaddresses(interface)[AF_INET][0]:
default_interface = str(interface)
default_ip_address_4 = str(ni.ifaddresses(default_interface)[AF_INET][0]['addr'])
if AF_INET6 in ni.ifaddresses(interface):
if "addr" in ni.ifaddresses(interface)[AF_INET6][0]:
default_ip_address_6 = str(ni.ifaddresses(default_interface)[AF_INET6][0]['addr'])
break
print ("default IP interface: " + str(default_interface))
print ('default IPv4 address: ' + str(default_ip_address_4))
print ("default IPv6 address: " + str(default_ip_address_6))
def set_ip_version(new_ip_version):
global ip_version
ip_version = new_ip_version
def get_ip_version():
return ip_version
def set_interface(interface):
global default_interface
global default_ip_address_4
global default_ip_address_6
default_interface = interface
if AF_INET in ni.ifaddresses(interface):
default_ip_address_4 = ni.ifaddresses(default_interface)[AF_INET][0]['addr']
if AF_INET6 in ni.ifaddresses(interface) and 'addr' in interface[AF_INET6][0]:
default_ip_address_6 = ni.ifaddresses(default_interface)[AF_INET6][0]['addr']
def get_interface():
return default_interface
def set_ip_address(ip):
global default_ip_address_4
default_ip_address_4 = ip
def set_ip_address6(ip):
global default_ip_address_6
default_ip_address_6 = ip
|
StarcoderdataPython
|
8029928
|
<filename>voiceplay/datasources/lastfm.py<gh_stars>1-10
#-*- coding: utf-8 -*-
""" Last.FM API module with retries and caching """
import datetime
import json
import logging
import random
random.seed()
import sys
import time
from copy import deepcopy
# works after installing `future` package
from queue import Queue # pylint:disable=import-error
import pylast
from tqdm import tqdm
from voiceplay.config import Config
from voiceplay.database import voiceplaydb
from voiceplay.logger import logger
from voiceplay.utils.helpers import debug_traceback
from voiceplay.utils.track import TrackNormalizer
def lfm_retry(retry_count=1):
"""
Retry + cache decorator
"""
def lfm_retry_func(func):
"""
retry function
"""
def func_wrapper(*args, **kwargs):
"""
function wrapper
"""
rargs = list(args)
rargs.pop(0)
rargs = str(rargs) + str(kwargs)
func_name = str(func.__name__)
result = voiceplaydb.get_lastfm_method(func_name, rargs)
result = json.loads(result) if result else None
if result:
return result
for _ in range(1, retry_count + 1):
try:
result = func(*args, **kwargs)
if result:
voiceplaydb.set_lastfm_method(func_name, rargs, json.dumps(result))
break
except Exception as exc:
message = 'Method/function %r failed with %r, retrying...' % (func_name, exc)
debug_traceback(sys.exc_info(), __file__, message=message)
return result
return func_wrapper
return lfm_retry_func
class VoicePlayLastFm(object):
"""
Last.Fm API
"""
def __init__(self):
cfg_data = Config.cfg_data()
try:
self.network = pylast.LastFMNetwork(api_key=cfg_data['lastfm']['key'],
api_secret=cfg_data['lastfm']['secret'],
username=cfg_data['lastfm']['username'],
password_hash=cfg_data['lastfm']['password'])
self.scrobble_enabled = True
except Exception as _:
# last.fm network registration failed, possibly due to scrobbling/API issue, try data only
self.scrobble_enabled = False
self.network = pylast.LastFMNetwork(api_key=cfg_data['lastfm']['key'],
api_secret=cfg_data['lastfm']['secret'])
@lfm_retry(retry_count=3)
def get_top_tracks_geo(self, country_code):
"""
Get top tracks based on country of origin.
Country name: ISO 3166-1
"""
tracks = self.network.get_geo_top_tracks(country_code)
return self.trackarize(tracks)
@lfm_retry(retry_count=3)
def get_top_tracks_global(self):
"""
Global top tracks (chart)
"""
tracks = self.network.get_top_tracks()
return self.trackarize(tracks)
@lfm_retry(retry_count=3)
def get_top_tracks(self, artist):
"""
Get top tracks by artist
"""
artist = self.get_corrected_artist(artist)
aobj = pylast.Artist(artist, self.network)
tracks = self.trackarize(aobj.get_top_tracks())
return [item for item in tracks if not TrackNormalizer.is_locally_blacklisted(item)]
@lfm_retry(retry_count=3)
def get_station(self, query):
"""
Get station based on artist/tag
"""
if self.get_query_type(query) != 'artist':
aobj = pylast.Tag(query, self.network)
tracks = self.trackarize(aobj.get_top_tracks())
else:
tracks = []
for artist in self.get_similar_artists(query):
tracks += self.get_top_tracks(artist)
return tracks
@lfm_retry(retry_count=3)
def get_top_albums(self, artist):
"""
Get top albums for provided artist
"""
album_list = []
artist = self.get_corrected_artist(artist)
aobj = pylast.Artist(artist, self.network)
albums = aobj.get_top_albums()
for album in albums:
album_list.append(album.item.title)
return album_list
@lfm_retry(retry_count=3)
def get_tracks_for_album(self, artist, album):
"""
Get top tracks for artist + album
"""
result = []
artist = self.get_corrected_artist(artist)
tracks = pylast.Album(artist, album.title(), self.network).get_tracks()
for track in tracks:
pretty_track = track.artist.name + ' - ' + track.title
if not TrackNormalizer.is_locally_blacklisted(pretty_track):
result.append(pretty_track)
return result
@lfm_retry(retry_count=3)
def get_corrected_artist(self, artist):
"""
Get corrected artist
"""
a_s = pylast.ArtistSearch(artist, self.network)
reply = a_s.get_next_page()
if isinstance(reply, list) and reply:
return reply[0].name
else:
return artist
@lfm_retry(retry_count=3)
def get_query_type(self, query):
"""
Detect whether query is just artist or artist - track or tag (dull smiley)
"""
query = query.lower()
text = query.capitalize()
# known issue, see http://www.last.fm/music/Vocal+Trance
if query == 'vocal trance':
reply = 'artist_track'
elif self.get_corrected_artist(text).lower() == text.lower():
reply = 'artist'
else:
reply = 'artist_track'
return reply
@lfm_retry(retry_count=3)
def get_artist_icon(self, artist, image_size=pylast.COVER_SMALL):
"""
Get artist icon
supported sizes: small, medium, large
"""
artist = self.get_corrected_artist(artist)
aobj = pylast.Artist(artist, self.network)
return aobj.get_cover_image(image_size)
@lfm_retry(retry_count=3)
def get_artist_tags(self, artist, limit=10):
"""
Get artist tags
"""
tags = []
artist = self.get_corrected_artist(artist)
aobj = pylast.Artist(artist, self.network)
# make sure this is sorted
for tag in sorted(aobj.get_top_tags(), key=lambda item: int(item.weight), reverse=True):
tags.append(tag.item.get_name().lower())
return tags[:limit]
@lfm_retry(retry_count=3)
def get_similar_artists(self, artist, limit=10):
"""
Get similar artists
"""
artist = self.get_corrected_artist(artist)
result = []
aobj = pylast.Artist(artist, self.network)
for artist in aobj.get_similar():
result.append(artist.item.name)
return result[:limit]
def get_recent_tracks(self, limit=20):
"""
Get list of recently played tracks
"""
tracklist = []
user = self.network.get_user(self.network.username)
for track in user.get_recent_tracks(limit=limit):
artist = track.track.get_artist().name
title = track.track.get_title()
if sys.version_info.major == 2:
artist = artist.encode('utf8')
title = title.encode('utf8')
tracklist.append('{0!s} - {1!s}'.format(artist, title))
return tracklist
def scrobble(self, artist, track):
"""
Scrobble track
"""
if sys.version_info.major == 2:
artist = artist.encode('utf8')
track = track.encode('utf8')
full_track = '{0!s} - {1!s}'.format(artist, track)
if not self.scrobble_enabled:
logger.debug('Scrobbling disabled, track %r not sent', full_track)
return
recent_tracks = self.get_recent_tracks(limit=1)
if full_track in recent_tracks:
logger.debug('Scrobbling skipped, track %r already scrobbled', full_track)
return
logger.debug('Scrobbling track: %r', full_track)
timestamp = int(time.mktime(datetime.datetime.now().timetuple()))
self.network.scrobble(artist=artist, title=track, timestamp=timestamp)
@staticmethod
def trackarize(array):
"""
Convert lastfm track entities to track names
TODO: find better name for this method
"""
top_tracks = []
for track in array:
top_tracks.append(track.item.artist.name + ' - ' + track.item.title)
return top_tracks
@staticmethod
def numerize(array):
"""
Name tracks
"""
reply = []
for idx, element in enumerate(array):
reply.append('%s: %s' % (idx + 1, element))
return reply
class StationCrawl(object):
"""
Last.FM station crawler (recursive search)
"""
playlist_get_timeout = 60
artist_genre_blacklist = {'black metal': ['<NAME>', '<NAME>', 'One Direction',
'<NAME>', '<NAME>', '<NAME>', 'Muse'],
'vocal trance': ['Groove Coverage', 'Sylver', 'Fragma', 'Franky Tunes',
'Paffendorf', '<NAME>', '<NAME>',
'<NAME>', 'Lasgo', '<NAME>',
'<NAME>', '<NAME>']}
def __init__(self):
self.lfm = VoicePlayLastFm()
self.exit = False
self.genre_queue = Queue()
self.playlist_queue = Queue()
self.session_playlist = []
def artist_blacklisted_for_genre(self, artist, genre):
"""
Check if artist is blacklisted for specific genre
"""
blacklisted = False
blacklisted_artists = self.artist_genre_blacklist.get(genre.lower(), [])
if artist.encode('utf-8') in blacklisted_artists:
logger.debug('Artist %s is blacklisted for genre %s', artist, genre.lower())
blacklisted = True
return blacklisted
def similar_artists(self, artist, genre):
"""
Find similar artists for artist/genre combination
"""
sm_artists = []
similar = self.lfm.get_similar_artists(artist)
iterator = tqdm(similar) if logger.level == logging.DEBUG else similar
for similar_artist in iterator:
if genre.lower() in self.lfm.get_artist_tags(similar_artist) and not similar_artist in sm_artists and not self.artist_blacklisted_for_genre(similar_artist, genre):
logger.debug('Genre match for %s', similar_artist)
sm_artists.append(similar_artist)
return sm_artists
def for_genre(self, genre):
"""
Search tracks for a specific genre and add them to playlist
"""
sm_artists = []
# seed data
logger.debug(genre)
tracks = self.lfm.get_station(genre)
random.shuffle(tracks)
for track in tracks:
artist = track.split(' - ')[0]
# check station blacklist
if self.artist_blacklisted_for_genre(artist, genre):
continue
# check history /banned/ blacklist
if TrackNormalizer.is_locally_blacklisted(track):
logger.debug('Track %s is blacklisted using "ban" command', track)
continue
self.playlist_queue.put(track)
for atmp in self.similar_artists(artist, genre):
if not atmp in sm_artists:
# new one
sm_artists.append(atmp)
[self.playlist_queue.put(tr) for tr in self.lfm.get_top_tracks(atmp)[:3]]
# operate on dataset
result = sm_artists
for artist in sm_artists:
tmp = self.similar_artists(artist, genre)
for aname in tmp:
if not aname in sm_artists and not aname in result:
# new one
result.append(aname)
[self.playlist_queue.put(tr) for tr in self.lfm.get_top_tracks(aname)[:3]]
def put_genre(self, genre):
"""
Add genre to queue
"""
self.genre_queue.put(genre)
def genre_loop(self):
"""
Poll queue for newly added genres
"""
while not self.exit:
if self.genre_queue.empty():
time.sleep(0.01)
continue
else:
item = self.genre_queue.get()
self.for_genre(item)
def playlist_loop(self):
"""
Poll playlist for newly added tracks
"""
while not self.exit:
if self.playlist_queue.empty():
time.sleep(0.01)
continue
else:
item = self.playlist_queue.get()
if not item in self.session_playlist:
self.session_playlist.append(item)
session_playlist = deepcopy(self.session_playlist)
random.shuffle(session_playlist)
self.session_playlist = session_playlist
def set_exit(self, status):
"""
Set exit flag
"""
self.exit = status
@property
def playlist(self):
"""
Return current playlist
"""
start = time.time()
while time.time() - start <= self.playlist_get_timeout:
if not self.session_playlist:
time.sleep(0.1)
else:
break
return self.session_playlist
|
StarcoderdataPython
|
6572369
|
import tuio
import threading
from tuio.objects import Tuio2DCursor
from tuio.objects import Tuio2DObject
class _TuioCallbackListener(tuio.observer.AbstractListener):
"""
Private Helper Class to react on Tuio Events we get from pytuio
Sorry, this is very hackish to get things done...
notify is called when an object is added or updated
notify_remove is called whenever an TUIO alive messages arrive. if this
happens, whe compare which cursor or objects are still alive and then decide
to call removeCursor/removeObject callbacks. This is a hack into pytuio as
the original implementation does not deal with remove-stuff very well.
If error might occour, please don't hesitate to ask!
"""
_callback = None
#stores whether a object is active
_obj_cache = {}
#stores whether a cursor is active
_cur_cache = {}
def __init__(self, name, subject, callback):
tuio.observer.AbstractListener.__init__(self, name, subject)
self._callback = callback
def notify(self, event):
"""
This function will be called each time we get a set/update message via
TUIO. It adds the given object to the cache and calls the right callback
appropriately
"""
# get the object we have to process
obj = event.object
# distinguish between Object and Cursor here
if isinstance(event.object, Tuio2DObject):
# check if object has been known before and either call added or
# updated callbacks
if obj.sessionid not in self._obj_cache:
#finally add it to the cache anyway
self._obj_cache[obj.sessionid] = obj
self._callback.objectAdded(obj)
else:
self._callback.objectUpdated(obj)
if isinstance(event.object, Tuio2DCursor):
# check if object has been known before and either call added or
# updated callbacks
if obj.sessionid not in self._cur_cache:
self._callback.cursorAdded(obj)
else:
self._callback.cursorUpdated(obj)
# finally add it to the cache anyway
self._cur_cache[obj.sessionid] = obj
# print "NOTIFY", self._cur_cache.keys(), self._obj_cache.keys()
def notifyRemove(self, event):
"""
This function will be called each time we receive a TUIO "alive"
messages which tells us about object removal
"""
profile, message = event
alive = message[3:]
if str(type(profile)) == "<class 'tuio.profiles.Tuio2DcurProfile'>":
# compare the alive message with current cache and remove all
# cursors that are not in alive message anymore
diff = list(set(self._cur_cache.keys())-set(alive))
for d in diff:
self._callback.cursorRemoved(self._cur_cache[d])
del self._cur_cache[d]
else:
# compare the alive message with current object cache and remove all
# objects that are not in the alive message anymore
diff = list(set(self._obj_cache.keys())-set(alive))
for d in diff:
self._callback.objectRemoved(self._obj_cache[d])
del self._obj_cache[d]
#print "NOTIFY_REMOVE: ", self._cur_cache.keys(), self._obj_cache.keys()
class TuioCallback(threading.Thread):
"""
This class handles I/O to reactivision or tuio simulator
It is designed in such a way that in can run in a different thread so that
you can use another event loop in the mainthread (eg. "main()"-thread).
This is necessary, as Qt/PySide needs it own eventloop later on.
We then might inject object-events into Qt's eventloop from this different
thread.
Call the run() in order to start processing events. Mind however, that this
blocks until a TUIO message has been received.
You can make this class to have it's own thread. See the docs for __init__()
to understand how. Then you have to call start() in order to start
processing in a different thread. You can call stop() to leave.
As CPython implementation uses a global interpreter lock, no race conditions
actually should happen.
You finally have to derive from this class and reimplement the callback
handlers. Here you should implement the way you want to react on TUIO
messages.
"""
def __init__(self, host = '127.0.0.1', port = 3333, threaded = False):
"""
sets up the TuioCallback instance
parameters:
host: which address to listen on. localhost by default
port: the port to listen on
threaded: decide whether it should be threaded or not.
"""
# according to python docs we have to call this first
threading.Thread.__init__(self,group=None, target=None, name=None, args=(), kwargs={})
self._event = threading.Event()
self._event.set()
self._host = host
self._port = port
self._tracking = tuio.Tracking(host=host,port=port)
self._callback = _TuioCallbackListener("Callback",
self._tracking.eventManager,
self)
self.daemon = False
if threaded:
self.start()
def __del__(self):
self.stop()
def stop(self):
"""
Call this function to exit the eventloop and let the thread finish.
"""
self._event.clear()
def run(self):
"""
This is the function that will be run inside another thread. We need
this in order for our TuioLister to be event-based (can block when
reading from sockets)
You can use this function as simple eventloop as well.
"""
# do infinite loop here
# _event is stopping condition
self._tracking.update()
#### Callbacks you have to reimplement
def objectAdded(self, obj):
""" implement me """
raise NotImplementedError("Must subclass me")
def objectUpdated(self, obj):
""" implement me """
raise NotImplementedError("Must subclass me")
def objectRemoved(self, obj):
""" implement me """
raise NotImplementedError("Must subclass me")
def cursorAdded(self, obj):
""" implement me """
raise NotImplementedError("Must subclass me")
def cursorUpdated(self, obj):
""" implement me """
raise NotImplementedError("Must subclass me")
def cursorRemoved(self, obj):
""" implement me """
raise NotImplementedError("Must subclass me")
if __name__ == "__main__":
# a simple test if threading does work as desired
tc = TuioCallback(threaded=True)
try:
# make the main thread looping forever
while True:
pass
except KeyboardInterrupt:
# if you interrupt by keyboard input, stop the thread
# aswell
tc.stop()
|
StarcoderdataPython
|
4925738
|
<reponame>youaresherlock/PythonPractice
#!usr/bin/python
# -*- coding:utf8 -*-
class Student:
def __init__(self, name):
self.name = name
def __contains__(self, item):
return item.name in self.name
s1 = Student('clarence')
s2 = Student('cla')
print(s2 in s1) # True
|
StarcoderdataPython
|
5066128
|
from sklearn.base import TransformerMixin
import pandas as pd
import numpy as np
class DataFrameImputer(TransformerMixin):
def __init__(self):
"""Impute missing values.
Columns of dtype object are imputed with the most frequent value
in column.
Columns of other types are imputed with mean of column.
"""
def fit(self, X, y=None):
# Grab list of object column names before doing imputation
self.obj_list = X.select_dtypes(include=['object']).columns.values
self.fill = pd.Series([X[c].value_counts().index[0]
if X[c].dtype == np.dtype('O') else X[c].mean() for c in X],
index=X.columns)
return self
def transform(self, X, y=None):
X = X.fillna(self.fill)
for i in self.obj_list:
X[i] = X[i].astype(object)
return X
|
StarcoderdataPython
|
66877
|
<filename>khel_wgs_sc2/workflow/ui.py<gh_stars>0
import time
import datetime
from tkinter import filedialog
from tkinter import *
import re
def progressBar(iterable, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = "\r"):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
"""
total = len(iterable)
# Progress Bar Printing Function
def printProgressBar (iteration):
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print(f'\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)
# Initial Call
printProgressBar(0)
# Update Progress Bar
for i, item in enumerate(iterable):
yield item
printProgressBar(i + 1)
# Print New Line on Complete
print()
def get_run_data():
# seq run order first
ask = True
while ask:
platform = input("\nSelect a platform.\n'c' for ClearLabs\n'i' for ISeq (Illumina)\n--> ")
if (platform.lower() == "c" or platform.lower() == "i"):
ask = False
# logic for clearlabs data
if platform.lower() == "c":
# get seq run id next
platform = "ClearLabs"
seq_run_id = ""
ask = True
while ask:
seq_run_id = input("\nPlease copy/paste the seq_run_id value from the ClearLabs website below\nExample: Run BB1L12.2021-06-16.01\n--> ")
# check that input is valid
if not re.search("Run BB\dL\d{2}.\d{4}-\d{2}-\d{2}.\d{2}", seq_run_id):
print("Invalid input, try again.")
else:
ask = False
# now, pull meaningful information out of supplied data
machine_num = seq_run_id[8:10]
run_date = datetime.datetime.strptime(seq_run_id[11:21], '%Y-%m-%d').strftime("%m/%d/%Y")
day_run_num = int(seq_run_id[-2:])
# get the run data from clearlabs21
ask = True
print("\nPlease copy/paste all run data from the clearlabs website below\n")
c = 0
pos_dict = {"A":1, "B":2, "C":3, "D":4, "E":5, "F":6, "G":7, "H":8}
run_data = {"hsn":[], "position":[], "avg_depth":[], "percent_cvg":[]}
while c < 224:
u_input = input("")
if c % 7 == 0: # it is a seq_run_position
# format input first
pos = (int(u_input[-1])*8 - 8) + pos_dict[u_input[0]]
run_data["position"].append(pos)
elif c % 7 == 1: # it is an hsn
hsn = ""
if re.search("\d{7}..", u_input):
hsn = u_input[0:-2]
else:
hsn = u_input
run_data["hsn"].append(hsn)
elif c % 7 == 3: # it is depth
depth = u_input.replace("x", "")
run_data["avg_depth"].append(int(depth))
elif c % 7 == 4: # it is coverage
coverage = u_input.replace("%", "")
coverage = float(coverage)/100
run_data["percent_cvg"].append(coverage)
else:
pass
c += 1
return run_data, machine_num, run_date, day_run_num, platform ;
# if not A or B, it is ISeq
else:
print("ISeq section not yet implemented.")
print("Closing in 10 seconds...")
time.sleep(10)
raise ValueError("ISeq functionality not yet implemented!")
def get_path():
time.sleep(1)
print("Opening dialog box...")
time.sleep(1)
root = Tk()
root.withdraw()
path_read = filedialog.askopenfilename()
return path_read
def get_path_folder():
time.sleep(1)
print("Opening dialog box...")
time.sleep(1)
root = Tk()
root.withdraw()
path = filedialog.askdirectory()
return path
|
StarcoderdataPython
|
1764629
|
import numpy as np
import probtorch
import torch
from torchvision.utils import make_grid
from base import BaseTrainer
from utils import inf_loop, MetricTracker
class Trainer(BaseTrainer):
"""
Trainer class
"""
def __init__(self, model, criterion, metric_ftns, optimizer, config, data_loader,
valid_data_loader=None, lr_scheduler=None, len_epoch=None,
num_particles=1):
super().__init__(model, criterion, metric_ftns, optimizer, config)
self.config = config
self.data_loader = data_loader
if len_epoch is None:
# epoch-based training
self.len_epoch = len(self.data_loader)
else:
# iteration-based training
self.data_loader = inf_loop(data_loader)
self.len_epoch = len_epoch
self.valid_data_loader = valid_data_loader
self.do_validation = self.valid_data_loader is not None
self.lr_scheduler = lr_scheduler
self.log_step = int(np.sqrt(data_loader.batch_size))
self.num_particles = num_particles
self.train_metrics = MetricTracker('loss', *[m.__name__ for m in self.metric_ftns], writer=self.writer)
self.valid_metrics = MetricTracker('loss', *[m.__name__ for m in self.metric_ftns], writer=self.writer)
def validate(self, epochs=1):
"""
Full validation logic
"""
metrics = []
for epoch in range(epochs):
result = self._valid_epoch(epoch)
# save logged informations into log dict
log = {'epoch': epoch}
log.update(result)
# print logged informations to the screen
for key, value in log.items():
self.logger.info(' {:15s}: {}'.format(str(key), value))
metrics.append(result)
return metrics
def _train_epoch(self, epoch):
"""
Training logic for an epoch
:param epoch: Integer, current training epoch.
:return: A log that contains average loss and metric in this epoch.
"""
self.model.train()
self.train_metrics.reset()
for batch_idx, (data, target) in enumerate(self.data_loader):
data, target = data.to(self.device), target.to(self.device)
self.optimizer.zero_grad()
output = self.model(data)
loss = self.criterion(output, target)
loss.backward()
self.optimizer.step()
self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)
self.train_metrics.update('loss', loss.item())
for met in self.metric_ftns:
self.train_metrics.update(met.__name__, met(target))
if batch_idx % self.log_step == 0:
self.logger.debug('Train Epoch: {} {} Loss: {:.6f}'.format(
epoch,
self._progress(batch_idx),
loss))
self.writer.add_image('input', make_grid(data.cpu(), nrow=8, normalize=True))
if batch_idx == self.len_epoch:
break
log = self.train_metrics.result()
if self.do_validation:
val_log = self._valid_epoch(epoch)
log.update(**{'val_'+k : v for k, v in val_log.items()})
if self.lr_scheduler is not None:
self.lr_scheduler.step()
return log
def _valid_epoch(self, epoch):
"""
Validate after training an epoch
:param epoch: Integer, current training epoch.
:return: A log that contains information about validation
"""
self.model.eval()
self.valid_metrics.reset()
with torch.no_grad():
for batch_idx, (data, target) in enumerate(self.valid_data_loader):
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
loss = self.criterion(output, target)
self.writer.set_step((epoch - 1) * len(self.valid_data_loader) + batch_idx, 'valid')
self.valid_metrics.update('loss', loss.item())
valid_metric_metadata = {
'dataset': self.valid_data_loader.dataset,
'model': self.model,
'device': self.device
}
for met in self.metric_ftns:
self.valid_metrics.update(met.__name__, met(output, target, metadata=valid_metric_metadata))
self.writer.add_image('input', make_grid(data.cpu(), nrow=8, normalize=True))
# add histogram of model parameters to the tensorboard
for name, p in self.model.named_parameters():
self.writer.add_histogram(name, p, bins='auto')
return self.valid_metrics.result()
def _progress(self, batch_idx):
base = '[{}/{} ({:.0f}%)]'
if hasattr(self.data_loader, 'n_samples'):
current = batch_idx * self.data_loader.batch_size
total = self.data_loader.n_samples
else:
current = batch_idx
total = self.len_epoch
return base.format(current, total, 100.0 * current / total)
|
StarcoderdataPython
|
3529379
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from sys import argv
from desert import Desert
from desert import box
from desert import bzspl
from desert import circle
from desert import stroke
from desert.color import rgb
from desert.color import white
from desert.color import black
from desert.helpers import filename
VERBOSE = 'vv'
def main(arg):
imsize = 1000
with Desert(imsize, verbose=VERBOSE)\
.init(fg=rgb(1.0, 0.0, 0.0, 0.1),
bg=white()) as desert:
draw = desert.draw
draw([box(0.15, (0.3, 0.3), 2.0),
box(0.2, (0.2, 0.5), 0.1),
box((0.05, 0.5), (0.25, 0.9), 1.0),
box((0.3, 0.2), (0.1, 0.9), 2.0)])
desert.show()
desert.set_fg(rgb(0, 0.5, 0.5, 0.1))
draw([box(0.15, (0.5, 0.3), 2.0),
box(0.2, (0.5, 0.5), 0.1),
box((0.05, 0.5), (0.6, 0.9), 1.0),
box((0.1, 0.2), (0.3, 0.9), 1.0)])
desert.show()
desert.set_fg(rgb(0, 0.0, 0.8, 0.1))
draw([box(0.05, ((0.7, 0.3), (0.7, 0.8)), 1.0)])
desert.show()
draw([stroke(((0.1, 0.1),
(0.1, 0.1),
(0.1, 0.9),
(0.1, 0.1)),
((0.9, 0.9),
(0.1, 0.9),
(0.9, 0.9),
(0.2, 0.15)), 2)])
desert.set_fg(rgb(0, 0.7, 0.2, 0.1))
draw([circle(0.05, ((0.5, 0.4), (0.8, 0.4)), 1.0)])
draw([circle(0.05, ((0.9, 0.1),
(0.9, 0.15),
(0.9, 0.2),
(0.9, 0.25),
(0.9, 0.3)), 2)\
.rgb([rgb(0.2, 0.2, 0.9, 0.3),
rgb(0.9, 0.2, 0.2, 0.3),
rgb(0.2, 0.9, 0.2, 0.3),
rgb(0.9, 0.9, 0.2, 0.3),
rgb(0.2, 0.9, 0.9, 0.3)]),
circle(0.05, ((0.85, 0.1),
(0.85, 0.3)), 2)\
.rgb([rgb(0.5, 0.2, 0.9, 0.3),
rgb(0.9, 0.5, 0.2, 0.3)])]).show()
desert.set_fg(black())
draw([bzspl([[(0.1, 0.2),
(0.3, 0.4),
(0.5, 0.6)],
[(0.15, 0.25),
(0.35, 0.45),
(0.55, 0.65)]], 2)]).show()
draw([bzspl([[(0.1, 0.2),
(0.8, 0.3),
(0.3, 0.9)]], 2)]).show()
draw([bzspl([[(0.1, 0.2),
(0.8, 0.3),
(0.3, 0.9)],
[(0.35, 0.25),
(0.85, 0.35),
(0.35, 0.95)]], 2)]).show()
draw([bzspl([[(0.1, 0.2),
(0.4, 0.25),
(0.9, 0.15),
(0.9, 0.3),
(0.95, 0.45),
(0.8, 0.9),
(0.1, 0.87)]], 2, closed=True)]).show()
draw([bzspl([[(0.15, 0.2),
(0.45, 0.25),
(0.95, 0.15),
(0.95, 0.3),
(0.98, 0.45),
(0.85, 0.9),
(0.15, 0.87)]], 2)]).show()
desert.set_fg(rgb(1, 0, 1, 1))
draw([bzspl([[(0.5, 0.1),
(0.3, 0.2),
(0.1, 0.3),
(0.4, 0.3),
(0.95, 0.45),
(0.8, 0.9),
(0.1, 0.87)]], 2)]).gforce().show(3).save(filename(arg))
if __name__ == '__main__':
main(argv)
|
StarcoderdataPython
|
1918474
|
"""
Saves the command line to a file.
The command then may be repeated and should produce the same output.
"""
from gna.ui import basecmd
import pipes
from packages.env.lib.cwd import update_namespace_cwd
from sys import argv
class cmd(basecmd):
@classmethod
def initparser(cls, parser, env):
parser.add_argument('output', nargs='*', help='filename to save cmd')
parser.add_argument('-v', '--verbose', action='store_true', help='print the command line')
def init(self):
update_namespace_cwd(self.opts, 'output')
self.level = 0
cmd = argv[0]
opts = argv[1:]
self.out = cmd
self.inc()
for opt in opts:
self.append(opt)
if self.opts.verbose:
print('Command line:')
print(self.out)
header = '#!/usr/bin/bash\n\n'
if self.opts.output:
for opath in self.opts.output:
with open(opath, 'w') as f:
f.writelines([header, self.out, '\n'])
print('Command line saved to:', opath)
def newline(self):
self.out+=' \\\n'+self.level*' '
def inc(self):
self.level+=1
def append(self, opt):
if opt=='--':
self.newline()
self.out+=pipes.quote(opt)+' '
__tldr__ = """\
The main argument is the output file name to save the command.
Save the whole command to the file 'command.sh':
```sh
./gna \\
-- comment Initialize a gaussian peak with default configuration and 50 bins \\
-- gaussianpeak --name peak_MC --nbins 50 \\
-- cmd-save command.sh
```
In the verbose mode it also prints the command to the stdout.
"""
|
StarcoderdataPython
|
5101635
|
import numpy as np
from sificc_lib import utils
class DataModelQlty():
'''Data model for the features and targets to train SiFi-CC Quality
Neural Network. The training data should be generated seperately
from a trained SiFi-CC Neural Network.
Features R_n*(9*clusters_limit) format: {
cluster entries,
cluster energy,
cluster energy uncertainty,
cluster position (x,y,z),
cluster position uncertainty (x,y,z)
} * clusters_limit
Targets R_n*11 format: {
event type (is ideal Compton or not),
e energy,
p energy,
e position (x,y,z),
p position (x,y,z),
e cluster index,
p cluster index,
}
Reco R_n*9 format: {
event type (is ideal Compton or not),
e energy,
p energy,
e position (x,y,z),
p position (x,y,z),
}
Quality R_n*4 format: {
e energy quality,
p energy quality,
e position quality,
p position quality,
}
'''
def __init__(self, file_name, *, batch_size = 64, validation_percent = .05, test_percent = .1,
weight_compton = 1, weight_non_compton = 1):
self.__validation_percent = validation_percent
self.__test_percent = test_percent
self.batch_size = batch_size
self.weight_compton = weight_compton
self.weight_non_compton = weight_non_compton
self.cluster_size = 9
self.append_dim = True
self.__std_factor = 15
self.__balanced_training = False
# loading training matrices
with open(file_name, 'rb') as f_train:
npz = np.load(f_train)
self._features = npz['features']
self._targets = npz['targets']
self._reco = npz['reco']
self._seq = npz['sequence']
self._qlty = npz['quality']
# assert number of columns is correct
assert self._features.shape[1] % self.cluster_size == 0
# define clusters limit
self.clusters_limit = self._features.shape[1] // self.cluster_size
#normalize features, targets, and reco
self._features = (self._features - self.__mean_features) / self.__std_features
self._targets = (self._targets - self.__mean_targets) / self.__std_targets
self._reco = (self._reco - self.__mean_targets[:-2]) / self.__std_targets[:-2]
# compute the starting position of the validation and test sets
self.validation_start_pos = int(self.length * (1-self.validation_percent-self.test_percent))
self.test_start_pos = int(self.length * (1-self.test_percent))
def _denormalize_features(self, data):
if data.shape[-1] == self._features.shape[-1]:
return (data * self.__std_features) + self.__mean_features
raise Exception('data has invalid shape of {}'.format(data.shape))
def _denormalize_targets(self, data):
if data.shape[-1] == self._targets.shape[-1]:
return (data * self.__std_targets) + self.__mean_targets
elif data.shape[-1] == self._reco.shape[-1]:
return (data * self.__std_targets[:-2]) + self.__mean_targets[:-2]
else:
raise Exception('data has invalid shape of {}'.format(data.shape))
def normalize_targets(self, data):
if data.shape[-1] == self._targets.shape[-1]:
return (data - self.__mean_targets) / self.__std_targets
elif data.shape[-1] == self._reco.shape[-1]:
return (data - self.__mean_targets[:-2]) / self.__std_targets[:-2]
else:
raise Exception('data has invalid shape of {}'.format(data.shape))
def get_targets_dic(self, start=None, end=None):
start = start if start is not None else 0
end = end if end is not None else self.length
return {
'type': self._target_type[start:end],
'e_cluster': self._target_e_cluster[start:end],
'p_cluster': self._target_p_cluster[start:end],
'pos_x': self._target_pos_x[start:end],
'pos_y': self._target_pos_y[start:end],
'pos_z': self._target_pos_z[start:end],
'energy': self._target_energy[start:end],
'quality': self._target_qlty[start:end]
}
def get_features(self, start=None, end=None):
start = start if start is not None else 0
end = end if end is not None else self.length
if self.append_dim:
return self._features[start:end].reshape((-1, self._features.shape[1], 1))
else:
return self._features[start:end]
def shuffle(self, only_train=True):
# if balancing the data is activated, select another random sample from the
# background events
if self.__balanced_training:
non_comptons = np.random.choice(self.__background_pool, self.__n_comptons)
index = np.concatenate([non_comptons, self.__base_index], axis=0)
self._features = self.__features_all[index]
self._targets = self.__targets_all[index]
self._reco = self.__reco_all[index]
self._seq = self.__seq_all[index]
self._qlty = self.__qlty_all[index]
limit = self.validation_start_pos if only_train else self.length
sequence = np.arange(self.length)
sequence[:limit] = np.random.permutation(limit)
self._features = self._features[sequence]
self._targets = self._targets[sequence]
self._reco = self._reco[sequence]
self._seq = self._seq[sequence]
self._qlty = self._qlty[sequence]
@property
def steps_per_epoch(self):
return int(np.ceil(self.validation_start_pos/self.batch_size))
@property
def balance_training(self):
'''Balance the samples in the training set in order to make the number of Compton
samples equal to the number of background samples. Default value is False.'''
return self.__balanced_training
@balance_training.setter
def balance_training(self, value):
# when balancing is activated
if (not self.__balanced_training) and (value==True):
# copy the original datasets
self.__features_all = self._features.copy()
self.__targets_all = self._targets.copy()
self.__reco_all = self._reco.copy()
self.__seq_all = self._seq.copy()
self.__qlty_all = self._qlty.copy()
# compute the list of background events to choose a sample from and
# the list of base features (all the comptons + validation set + test set)
train_type = self.train_y['type'].ravel().astype(bool)
self.__n_comptons = train_type.sum()
comptons = np.where(train_type)[0]
self.__background_pool = np.where(~train_type)[0]
valid_test = np.arange(self.validation_start_pos, self.length)
self.__base_index = np.concatenate([comptons, valid_test], axis=0)
# select a sample from the background and add it to the base features to
# compose a balanced training set
non_comptons = np.random.choice(self.__background_pool, self.__n_comptons)
index = np.concatenate([non_comptons, self.__base_index], axis=0)
self._features = self.__features_all[index]
self._targets = self.__targets_all[index]
self._reco = self.__reco_all[index]
self._seq = self.__seq_all[index]
self._qlty = self.__qlty_all[index]
# fix the position of the validation and test starting positions
diff = self.__targets_all.shape[0] - self._targets.shape[0]
self.validation_start_pos = self.validation_start_pos - diff
self.test_start_pos = self.test_start_pos - diff
# shuffle the training part
self.shuffle(only_train=True)
# when balancing is deactivated
elif self.__balanced_training and (value==False):
# compute the difference in size
diff = self.__targets_all.shape[0] - self._targets.shape[0]
# restore the original values
self._features = self.__features_all
self._targets = self.__targets_all
self._reco = self.__reco_all
self._seq = self.__seq_all
self._qlty = self.__qlty_all
# fix the position of the validation and test starting positions
self.validation_start_pos = self.validation_start_pos - diff
self.test_start_pos = self.test_start_pos - diff
self.__balanced_training = value
def generate_batch(self, augment=False):
while True:
self.shuffle(only_train=True)
for step in range(self.steps_per_epoch):
start = step * self.batch_size
end = (step+1) * self.batch_size
# end should not enter the validation range
end = end if end <= self.validation_start_pos else self.validation_start_pos
features_batch = self.get_features(start, end)
targets_batch = self.get_targets_dic(start, end)
if augment:
sequence, expanded_sequence = self.__get_augmentation_sequence()
features_batch = features_batch[:,expanded_sequence]
targets_batch['e_cluster'][:,1] = np.where(np.equal(targets_batch['e_cluster'][:,[1]], sequence))[1]
targets_batch['p_cluster'][:,1] = np.where(np.equal(targets_batch['p_cluster'][:,[1]], sequence))[1]
yield (
features_batch,
targets_batch,
targets_batch['type'] * self.weight_compton + \
(1-targets_batch['type']) * self.weight_non_compton
)
def __get_augmentation_sequence(self):
num_clusters = self.clusters_limit
sequence = np.random.permutation(num_clusters)
expanded_sequence = np.repeat(sequence * self.cluster_size, self.cluster_size) + \
np.tile(np.arange(self.cluster_size), num_clusters)
return sequence, expanded_sequence
def shuffle_training_clusters(self):
# e_pos = 9
# p_pos = 10
for i in range(self.length):
sequence, expanded_sequence = self.__get_augmentation_sequence()
self._features[i] = self._features[i, expanded_sequence]
self._targets[i,9] = np.where(np.equal(self._targets[i,9], sequence))[0]
self._targets[i,10] = np.where(np.equal(self._targets[i,10], sequence))[0]
################# Properties #################
@property
def length(self):
return self._targets.shape[0]
@property
def validation_percent(self):
return self.__validation_percent
@property
def test_percent(self):
return self.__test_percent
@property
def train_x(self):
return self.get_features(None, self.validation_start_pos)
@property
def train_y(self):
return self.get_targets_dic(None, self.validation_start_pos)
@property
def train_row_y(self):
return self._targets[:self.validation_start_pos]
@property
def validation_x(self):
return self.get_features(self.validation_start_pos, self.test_start_pos)
@property
def validation_y(self):
return self.get_targets_dic(self.validation_start_pos, self.test_start_pos)
@property
def validation_row_y(self):
return self._targets[self.validation_start_pos: self.test_start_pos]
@property
def test_x(self):
return self.get_features(self.test_start_pos, None)
@property
def test_y(self):
return self.get_targets_dic(self.test_start_pos, None)
@property
def test_row_y(self):
return self._targets[self.test_start_pos:]
@property
def reco_valid(self):
return self._reco[self.validation_start_pos: self.test_start_pos]
@property
def reco_test(self):
return self._reco[self.test_start_pos:]
@property
def __mean_features(self):
# define normalization factors
mean_entries = [1.7874760910930447]
mean_energies = [1.3219832176828306]
mean_energies_unc = [0.03352665535144364]
mean_positions = [3.08466733e+02, 8.30834656e-02, -8.41913642e-01]
mean_positions_unc = [1.05791671, 12.8333989, 0.94994155]
# declare the mean of a single cluster and repeat it throughout the clusters
mean = np.concatenate((
mean_entries,
mean_energies,
mean_energies_unc,
mean_positions,
mean_positions_unc
))
mean = np.tile(mean, self.clusters_limit)
return mean
@property
def __std_features(self):
# define normalization factors
std_entries = [1.6479899119958636]
std_energies = [1.8812291744163367]
std_energies_unc = [0.025137531990537407]
std_positions = [97.44675577, 30.56710605, 27.5600849]
std_positions_unc = [1.01437355, 6.11019272, 0.76225179]
std = np.concatenate((
std_entries,
std_energies,
std_energies_unc,
std_positions,
std_positions_unc
))
std = np.tile(std, self.clusters_limit)
return std
@property
def __mean_targets(self):
mean_e_energy = [1.207963305458394]
mean_p_energy = [2.081498278344268]
mean_e_position = [2.02256879e+02, 1.00478623e-02, -3.36698613e+00]
mean_p_position = [3.93714750e+02, 1.02343097e-01, 1.31962800e+00]
mean = np.concatenate((
[0],
mean_e_energy,
mean_p_energy,
mean_e_position,
mean_p_position,
[0,0]
))
return mean
@property
def __std_targets(self):
std_e_energy = [1.7854439595674854] / np.array(self.__std_factor)
std_p_energy = [1.675908762593649] / np.array(self.__std_factor)
std_e_position = [20.45301063, 27.74893174, 27.19126733] / np.array(self.__std_factor)
std_p_position = [23.59772062, 28.41093766, 28.10100634] / np.array(self.__std_factor)
std = np.concatenate((
[1],
std_e_energy,
std_p_energy,
std_e_position,
std_p_position,
[1,1]
))
return std
@property
def _target_type(self):
# [t]
return self._targets[:,[0]]
@property
def _target_e_cluster(self):
# [t, e_clus]
return self._targets[:,[0,9]]
@property
def _target_p_cluster(self):
# [t, p_clus]
return self._targets[:,[0,10]]
@property
def _target_pos_x(self):
# [t, e_clus, e_pos_x, p_clus, p_pos_x]
return self._targets[:,[0,9,3,10,6]]
@property
def _target_pos_y(self):
# [t, e_clus, e_pos_y, p_clus, p_pos_y]
return self._targets[:,[0,9,4,10,7]]
@property
def _target_pos_z(self):
# [t, e_clus, e_pos_z, p_clus, p_pos_z]
return self._targets[:,[0,9,5,10,8]]
@property
def _target_energy(self):
# [t, e_enrg, p_enrg]
return self._targets[:,[0,1,2]]
@property
def _target_qlty(self):
# [t, e_energy_qlty, p_energy_qlty, e_pos_qlty, p_pos_qlty]
return np.concatenate([self._targets[:,[0]], self._qlty], axis=1)
|
StarcoderdataPython
|
5001964
|
<filename>pygrank/measures/__init__.py
from pygrank.measures.unsupervised import *
from pygrank.measures.supervised import *
from pygrank.measures.combination import *
from pygrank.measures.multigroup import *
from pygrank.measures.utils import *
|
StarcoderdataPython
|
11300964
|
'''
Test NetDev()
'''
from lnxproc import netdev
from .basetestcase import BaseTestCase
class TestNetDev(BaseTestCase):
'''
Test NetDev class
'''
key = 'NetDev'
module = netdev
def test_netdev(self):
'''
Test normal instantiation
'''
self.generic_test()
|
StarcoderdataPython
|
1854973
|
<reponame>AryaGuo/cadical<filename>synthesis/config.py
class Config:
def __init__(self):
# file paths
self.output_root = '../result'
self.meta_file = '../grammars/bnf.bnf'
self.grammar_file = '../grammars/expr.bnf'
# GP params
self.pop_size = 30
self.depth_lim = 10
self.tournament_size = 2
self.elitism = True
self.seed = None
self.epoch = 50
self.mutation_rate = 0.1
self.crossover_rate = 0.9
self.gen_restart = 50
# checkpoint
self.save = 1
self.report = 5
# evaluation
self.eval_time = 60
self.eval_threshold = 60
self.test_time = 5000
self.test_threshold = 6000
self.eval = None
# method
self.score = 'par-2'
self.STGP = False
self.monkeys = None # 'grammars.typed'
self.load = None
self.config = '"--sat"' # passed to Cadical
self.wt = dict()
self.wt['LHS'] = 1
self.OP_prob = 0.7
cfg = Config()
|
StarcoderdataPython
|
11275688
|
# from mypackage.mypackage import *
|
StarcoderdataPython
|
6642504
|
<gh_stars>1-10
#!/usr/bin/env python3
# Read input
dfs = [int(line) for line in open('01_input.txt', 'r')]
# Part 1
print(f"Part 1: {sum(dfs)}")
# Part 2
f = 0
seen = {f}
found = False
while not found:
for df in dfs:
f += df
if f in seen:
print(f"Part 2: {f}")
found = True
break
seen.add(f)
|
StarcoderdataPython
|
8026608
|
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime
import csv
import sys
font = {'size': 35}
matplotlib.rc('font', **font)
def cmToInches(cm):
return cm / 2.54
def plot_position_over_time():
csv_data_file = sys.argv[2]
timestamps = []
x_positions = []
y_positions = []
z_positions = []
with open(csv_data_file, 'r', newline='') as file:
myreader = csv.reader(file, delimiter=',')
is_first_row = True
for row in myreader:
if is_first_row:
is_first_row = False
else:
timestamps.append(float(row[0]))
x_positions.append(float(row[1]))
y_positions.append(float(row[2]))
z_positions.append(float(row[3]))
fig, ax = plt.subplots()
ax.plot(timestamps, x_positions)
ax.plot(timestamps, y_positions)
ax.plot(timestamps, z_positions)
legend = plt.legend(('x', 'y', 'z'), loc=(
0.02, 0.8), prop={'size': 30})
for legendObj in legend.legendHandles:
legendObj.set_linewidth(4.2)
ax.set(xlabel='time (s)', ylabel='Position (cm)',
title='Position over time')
ax.grid()
name = csv_data_file.split("_position")[0]
ratio = 30 / 18
width_in_inches = 65
w = cmToInches(width_in_inches)
h = cmToInches(width_in_inches / ratio)
fig.set_size_inches(w, h)
fig.savefig(f"{name}_position_over_time.png", format='png', dpi=72)
def plot_yaw_over_time():
csv_data_file = sys.argv[2]
timestamps = []
yaws = []
with open(csv_data_file, 'r', newline='') as file:
myreader = csv.reader(file, delimiter=',')
is_first_row = True
for row in myreader:
if is_first_row:
is_first_row = False
else:
timestamps.append(float(row[0]))
yaws.append(float(row[1]))
fig, ax = plt.subplots()
ax.plot(timestamps, yaws)
ax.set(xlabel='time (s)', ylabel='Yaw (°)',
title='Yaw over time')
ax.grid()
name = csv_data_file.split("_yaw")[0]
ratio = 30 / 18
width_in_inches = 65
w = cmToInches(width_in_inches)
h = cmToInches(width_in_inches / ratio)
fig.set_size_inches(w, h)
fig.savefig(f"{name}_yaw_over_time.png", format='png', dpi=72)
def plot_pos_and_yaw_over_time():
positions_csv_data_file, yaws_csv_data_file = sys.argv[2], sys.argv[3]
timestamps = []
x_positions = []
y_positions = []
z_positions = []
yaws = []
with open(positions_csv_data_file, 'r', newline='') as file:
myreader = csv.reader(file, delimiter=',')
is_first_row = True
for row in myreader:
if is_first_row:
is_first_row = False
else:
timestamps.append(float(row[0]))
x_positions.append(float(row[1]))
y_positions.append(float(row[2]))
z_positions.append(float(row[3]))
with open(yaws_csv_data_file, 'r', newline='') as file:
myreader = csv.reader(file, delimiter=',')
is_first_row = True
for row in myreader:
if is_first_row:
is_first_row = False
else:
yaws.append(float(row[1]))
fig, axs = plt.subplots(2)
axs[0].set_ylim([-150, 200])
axs[1].set_ylim([0, 200])
axs[0].plot(timestamps, x_positions)
axs[0].plot(timestamps, y_positions)
axs[0].plot(timestamps, z_positions)
legend = axs[0].legend(('x', 'y', 'z'), loc=(
0.02, 0.8), prop={'size': 30})
for legendObj in legend.legendHandles:
legendObj.set_linewidth(4.2)
axs[0].set(xlabel='time (s)', ylabel='Position (cm)',
title='Position over time')
axs[0].grid()
axs[1].plot(timestamps, yaws)
axs[1].set(xlabel='time (s)', ylabel='Yaw (°)',
title='Yaw over time')
axs[1].grid()
name = positions_csv_data_file.split("_position")[0]
ratio = 30 / 30
width_in_inches = 65
w = cmToInches(width_in_inches)
h = cmToInches(width_in_inches / ratio)
fig.set_size_inches(w, h)
fig.savefig(f"{name}_position_and_yaw_over_time.png", format='png', dpi=72)
def plot_flights():
plot_keyword_to_function_mapper = {
"position": plot_position_over_time,
"yaw": plot_yaw_over_time,
"both": plot_pos_and_yaw_over_time
}
plot_function_to_use = plot_keyword_to_function_mapper[sys.argv[1]]
plot_function_to_use()
def heuristic_function(x):
# x: distance in meters
# y: velocity in meters / second
if x < 0.45:
y = 0
elif 0.45 <= x and x < 1.2:
y = 1
elif 1.2 <= x and x < 3.6:
y = 2
elif 3.6 <= x and x < 7.6:
y = 3
else:
y = 5
return y
def plot_heuristic_function():
distance_step = 0
distance_steps = [0]
for _ in range(1000):
distance_step += 0.01
distance_steps.append(distance_step)
velocities = []
for distance_step in distance_steps:
current_velocity = heuristic_function(distance_step)
velocities.append(current_velocity)
fig, ax = plt.subplots()
ax.plot(distance_steps, velocities)
ax.set(xlabel='distance (m)', ylabel='velocity (m/s)',
title='Heuristic controller')
ax.grid()
ax.set_xticks([0, 0.45, 1.2, 3.6, 7.6, 10])
ax.set_xticklabels(["0", "0.45", "1.2", "3.6", "7.6", "10"])
ax.tick_params(labelsize=25)
name = "Heuristic controller"
ratio = 30 / 15
width_in_inches = 60
w = cmToInches(width_in_inches)
h = cmToInches(width_in_inches / ratio)
fig.set_size_inches(w, h)
fig.savefig(f"{name}.png", format='png', dpi=72)
plot_heuristic_function()
|
StarcoderdataPython
|
3412375
|
import datetime
import uuid
import ckan.model as model
from sqlalchemy import Column, MetaData, or_, types
from sqlalchemy.ext.declarative import declarative_base
log = __import__('logging').getLogger(__name__)
Base = declarative_base()
metadata = MetaData()
def make_uuid():
return str(uuid.uuid4())
class Recommendation(Base):
__tablename__ = 'recommendation'
id = Column(types.UnicodeText, primary_key=True, default=make_uuid, nullable=False)
created_at = Column(types.DateTime, default=datetime.datetime.now, nullable=False)
ip_address = Column(types.UnicodeText, nullable=False)
package_id = Column(types.UnicodeText, nullable=False)
user_id = Column(types.UnicodeText)
@classmethod
def get_package_recommendations(cls, package_id):
'''Get all recommendations for a specific package'''
return model.Session.query(cls).filter(cls.package_id == package_id).all()
@classmethod
def create_package_recommendation(cls, package_id, ip_address, user_id=None):
recommendation = Recommendation(
package_id=package_id,
ip_address=ip_address,
user_id=user_id,
)
model.Session.add(recommendation)
model.repo.commit()
@classmethod
def get_package_recommendations_count_for_user(cls, ip_address, package_id, user_id=None):
'''Get the amount of recommendations created by a specific user or IP address for a package'''
return model.Session.query(cls).filter(
or_(cls.user_id == user_id, cls.ip_address == ip_address),
cls.package_id == package_id
).count()
def init_tables(engine):
Base.metadata.create_all(engine)
log.info('Recommendation tables are initialized.')
|
StarcoderdataPython
|
140345
|
# Copyright (c) 2021 <NAME>. All rights reserved.
# This code is licensed under Apache 2.0 with Commons Clause license (see LICENSE.md for details)
"""Base class for working with records.
vectorbt works with two different representations of data: matrices and records.
A matrix, in this context, is just an array of one-dimensional arrays, each corresponding
to a separate feature. The matrix itself holds only one kind of information (one attribute).
For example, one can create a matrix for entry signals, with columns being different strategy
configurations. But what if the matrix is huge and sparse? What if there is more
information we would like to represent by each element? Creating multiple matrices would be
a waste of memory.
Records make possible representing complex, sparse information in a dense format. They are just
an array of one-dimensional arrays of fixed schema. You can imagine records being a DataFrame,
where each row represents a record and each column represents a specific attribute.
```plaintext
a b
0 1.0 5.0
attr1 = 1 2.0 NaN
2 NaN 7.0
3 4.0 8.0
a b
0 9.0 13.0
attr2 = 1 10.0 NaN
2 NaN 15.0
3 12.0 16.0
|
v
id col idx attr1 attr2
0 0 0 0 1 9
1 1 0 1 2 10
2 2 0 3 4 12
3 3 1 0 5 13
4 4 1 1 7 15
5 5 1 3 8 16
```
Another advantage of records is that they are not constrained by size. Multiple records can map
to a single element in a matrix. For example, one can define multiple orders at the same time step,
which is impossible to represent in a matrix form without using complex data types.
Consider the following example:
```pycon
>>> import numpy as np
>>> import pandas as pd
>>> from numba import njit
>>> from collections import namedtuple
>>> import vectorbt as vbt
>>> example_dt = np.dtype([
... ('id', np.int_),
... ('col', np.int_),
... ('idx', np.int_),
... ('some_field', np.float_)
... ])
>>> records_arr = np.array([
... (0, 0, 0, 10.),
... (1, 0, 1, 11.),
... (2, 0, 2, 12.),
... (3, 1, 0, 13.),
... (4, 1, 1, 14.),
... (5, 1, 2, 15.),
... (6, 2, 0, 16.),
... (7, 2, 1, 17.),
... (8, 2, 2, 18.)
... ], dtype=example_dt)
>>> wrapper = vbt.ArrayWrapper(index=['x', 'y', 'z'],
... columns=['a', 'b', 'c'], ndim=2, freq='1 day')
>>> records = vbt.Records(wrapper, records_arr)
```
## Printing
There are two ways to print records:
* Raw dataframe that preserves field names and data types:
```pycon
>>> records.records
id col idx some_field
0 0 0 0 10.0
1 1 0 1 11.0
2 2 0 2 12.0
3 3 1 0 13.0
4 4 1 1 14.0
5 5 1 2 15.0
6 6 2 0 16.0
7 7 2 1 17.0
8 8 2 2 18.0
```
* Readable dataframe that takes into consideration `Records.field_config`:
```pycon
>>> records.records_readable
Id Column Timestamp some_field
0 0 a x 10.0
1 1 a y 11.0
2 2 a z 12.0
3 3 b x 13.0
4 4 b y 14.0
5 5 b z 15.0
6 6 c x 16.0
7 7 c y 17.0
8 8 c z 18.0
```
## Mapping
`Records` are just [structured arrays](https://numpy.org/doc/stable/user/basics.rec.html) with a bunch
of methods and properties for processing them. Their main feature is to map the records array and
to reduce it by column (similar to the MapReduce paradigm). The main advantage is that it all happens
without conversion to the matrix form and wasting memory resources.
`Records` can be mapped to `vectorbt.records.mapped_array.MappedArray` in several ways:
* Use `Records.map_field` to map a record field:
```pycon
>>> records.map_field('some_field')
<vectorbt.records.mapped_array.MappedArray at 0x7ff49bd31a58>
>>> records.map_field('some_field').values
array([10., 11., 12., 13., 14., 15., 16., 17., 18.])
```
* Use `Records.map` to map records using a custom function.
```pycon
>>> @njit
... def power_map_nb(record, pow):
... return record.some_field ** pow
>>> records.map(power_map_nb, 2)
<vectorbt.records.mapped_array.MappedArray at 0x7ff49c990cf8>
>>> records.map(power_map_nb, 2).values
array([100., 121., 144., 169., 196., 225., 256., 289., 324.])
```
* Use `Records.map_array` to convert an array to `vectorbt.records.mapped_array.MappedArray`.
```pycon
>>> records.map_array(records_arr['some_field'] ** 2)
<vectorbt.records.mapped_array.MappedArray object at 0x7fe9bccf2978>
>>> records.map_array(records_arr['some_field'] ** 2).values
array([100., 121., 144., 169., 196., 225., 256., 289., 324.])
```
* Use `Records.apply` to apply a function on each column/group:
```pycon
>>> @njit
... def cumsum_apply_nb(records):
... return np.cumsum(records.some_field)
>>> records.apply(cumsum_apply_nb)
<vectorbt.records.mapped_array.MappedArray at 0x7ff49c990cf8>
>>> records.apply(cumsum_apply_nb).values
array([10., 21., 33., 13., 27., 42., 16., 33., 51.])
>>> group_by = np.array(['first', 'first', 'second'])
>>> records.apply(cumsum_apply_nb, group_by=group_by, apply_per_group=True).values
array([10., 21., 33., 46., 60., 75., 16., 33., 51.])
```
Notice how cumsum resets at each column in the first example and at each group in the second example.
## Filtering
Use `Records.apply_mask` to filter elements per column/group:
```pycon
>>> mask = [True, False, True, False, True, False, True, False, True]
>>> filtered_records = records.apply_mask(mask)
>>> filtered_records.count()
a 2
b 1
c 2
dtype: int64
>>> filtered_records.values['id']
array([0, 2, 4, 6, 8])
```
## Grouping
One of the key features of `Records` is that you can perform reducing operations on a group
of columns as if they were a single column. Groups can be specified by `group_by`, which
can be anything from positions or names of column levels, to a NumPy array with actual groups.
There are multiple ways of define grouping:
* When creating `Records`, pass `group_by` to `vectorbt.base.array_wrapper.ArrayWrapper`:
```pycon
>>> group_by = np.array(['first', 'first', 'second'])
>>> grouped_wrapper = wrapper.replace(group_by=group_by)
>>> grouped_records = vbt.Records(grouped_wrapper, records_arr)
>>> grouped_records.map_field('some_field').mean()
first 12.5
second 17.0
dtype: float64
```
* Regroup an existing `Records`:
```pycon
>>> records.regroup(group_by).map_field('some_field').mean()
first 12.5
second 17.0
dtype: float64
```
* Pass `group_by` directly to the mapping method:
```pycon
>>> records.map_field('some_field', group_by=group_by).mean()
first 12.5
second 17.0
dtype: float64
```
* Pass `group_by` directly to the reducing method:
```pycon
>>> records.map_field('some_field').mean(group_by=group_by)
a 11.0
b 14.0
c 17.0
dtype: float64
```
!!! note
Grouping applies only to reducing operations, there is no change to the arrays.
## Indexing
Like any other class subclassing `vectorbt.base.array_wrapper.Wrapping`, we can do pandas indexing
on a `Records` instance, which forwards indexing operation to each object with columns:
```pycon
>>> records['a'].records
id col idx some_field
0 0 0 0 10.0
1 1 0 1 11.0
2 2 0 2 12.0
>>> grouped_records['first'].records
id col idx some_field
0 0 0 0 10.0
1 1 0 1 11.0
2 2 0 2 12.0
3 3 1 0 13.0
4 4 1 1 14.0
5 5 1 2 15.0
```
!!! note
Changing index (time axis) is not supported. The object should be treated as a Series
rather than a DataFrame; for example, use `some_field.iloc[0]` instead of `some_field.iloc[:, 0]`.
Indexing behavior depends solely upon `vectorbt.base.array_wrapper.ArrayWrapper`.
For example, if `group_select` is enabled indexing will be performed on groups,
otherwise on single columns.
## Caching
`Records` supports caching. If a method or a property requires heavy computation, it's wrapped
with `vectorbt.utils.decorators.cached_method` and `vectorbt.utils.decorators.cached_property`
respectively. Caching can be disabled globally via `caching` in `vectorbt._settings.settings`.
!!! note
Because of caching, class is meant to be immutable and all properties are read-only.
To change any attribute, use the `copy` method and pass the attribute as keyword argument.
## Saving and loading
Like any other class subclassing `vectorbt.utils.config.Pickleable`, we can save a `Records`
instance to the disk with `Records.save` and load it with `Records.load`.
## Stats
!!! hint
See `vectorbt.generic.stats_builder.StatsBuilderMixin.stats` and `Records.metrics`.
```pycon
>>> records.stats(column='a')
Start x
End z
Period 3 days 00:00:00
Total Records 3
Name: a, dtype: object
```
`Records.stats` also supports (re-)grouping:
```pycon
>>> grouped_records.stats(column='first')
Start x
End z
Period 3 days 00:00:00
Total Records 6
Name: first, dtype: object
```
## Plots
!!! hint
See `vectorbt.generic.plots_builder.PlotsBuilderMixin.plots` and `Records.subplots`.
This class is too generic to have any subplots, but feel free to add custom subplots to your subclass.
## Extending
`Records` class can be extended by subclassing.
In case some of our fields have the same meaning but different naming (such as the base field `idx`)
or other properties, we can override `field_config` using `vectorbt.records.decorators.override_field_config`.
It will look for configs of all base classes and merge our config on top of them. This preserves
any base class property that is not explicitly listed in our config.
```pycon
>>> from vectorbt.records.decorators import override_field_config
>>> my_dt = np.dtype([
... ('my_id', np.int_),
... ('my_col', np.int_),
... ('my_idx', np.int_)
... ])
>>> my_fields_config = dict(
... dtype=my_dt,
... settings=dict(
... id=dict(name='my_id'),
... col=dict(name='my_col'),
... idx=dict(name='my_idx')
... )
... )
>>> @override_field_config(my_fields_config)
... class MyRecords(vbt.Records):
... pass
>>> records_arr = np.array([
... (0, 0, 0),
... (1, 0, 1),
... (2, 1, 0),
... (3, 1, 1)
... ], dtype=my_dt)
>>> wrapper = vbt.ArrayWrapper(index=['x', 'y'],
... columns=['a', 'b'], ndim=2, freq='1 day')
>>> my_records = MyRecords(wrapper, records_arr)
>>> my_records.id_arr
array([0, 1, 2, 3])
>>> my_records.col_arr
array([0, 0, 1, 1])
>>> my_records.idx_arr
array([0, 1, 0, 1])
```
Alternatively, we can override the `_field_config` class attribute.
```pycon
>>> @override_field_config
... class MyRecords(vbt.Records):
... _field_config = dict(
... dtype=my_dt,
... settings=dict(
... id=dict(name='my_id'),
... idx=dict(name='my_idx'),
... col=dict(name='my_col')
... )
... )
```
!!! note
Don't forget to decorate the class with `@override_field_config` to inherit configs from base classes.
You can stop inheritance by not decorating or passing `merge_configs=False` to the decorator.
"""
import inspect
import string
import numpy as np
import pandas as pd
from vectorbt import _typing as tp
from vectorbt.base.array_wrapper import ArrayWrapper, Wrapping
from vectorbt.base.reshape_fns import to_1d_array
from vectorbt.generic.plots_builder import PlotsBuilderMixin
from vectorbt.generic.stats_builder import StatsBuilderMixin
from vectorbt.records import nb
from vectorbt.records.col_mapper import ColumnMapper
from vectorbt.records.mapped_array import MappedArray
from vectorbt.utils import checks
from vectorbt.utils.attr_ import get_dict_attr
from vectorbt.utils.config import merge_dicts, Config, Configured
from vectorbt.utils.decorators import cached_method
__pdoc__ = {}
RecordsT = tp.TypeVar("RecordsT", bound="Records")
IndexingMetaT = tp.Tuple[ArrayWrapper, tp.RecordArray, tp.MaybeArray, tp.Array1d]
class MetaFields(type):
"""Meta class that exposes a read-only class property `MetaFields.field_config`."""
@property
def field_config(cls) -> Config:
"""Field config."""
return cls._field_config
class RecordsWithFields(metaclass=MetaFields):
"""Class exposes a read-only class property `RecordsWithFields.field_config`."""
@property
def field_config(self) -> Config:
"""Field config of `${cls_name}`.
```json
${field_config}
```
"""
return self._field_config
class MetaRecords(type(StatsBuilderMixin), type(PlotsBuilderMixin), type(RecordsWithFields)):
pass
class Records(Wrapping, StatsBuilderMixin, PlotsBuilderMixin, RecordsWithFields, metaclass=MetaRecords):
"""Wraps the actual records array (such as trades) and exposes methods for mapping
it to some array of values (such as PnL of each trade).
Args:
wrapper (ArrayWrapper): Array wrapper.
See `vectorbt.base.array_wrapper.ArrayWrapper`.
records_arr (array_like): A structured NumPy array of records.
Must have the fields `id` (record index) and `col` (column index).
col_mapper (ColumnMapper): Column mapper if already known.
!!! note
It depends on `records_arr`, so make sure to invalidate `col_mapper` upon creating
a `Records` instance with a modified `records_arr`.
`Records.replace` does it automatically.
**kwargs: Custom keyword arguments passed to the config.
Useful if any subclass wants to extend the config.
"""
_field_config: tp.ClassVar[Config] = Config(
dict(
dtype=None,
settings=dict(
id=dict(
name='id',
title='Id'
),
col=dict(
name='col',
title='Column',
mapping='columns'
),
idx=dict(
name='idx',
title='Timestamp',
mapping='index'
)
)
),
readonly=True,
as_attrs=False
)
@property
def field_config(self) -> Config:
"""Field config of `${cls_name}`.
```json
${field_config}
```
"""
return self._field_config
def __init__(self,
wrapper: ArrayWrapper,
records_arr: tp.RecordArray,
col_mapper: tp.Optional[ColumnMapper] = None,
**kwargs) -> None:
Wrapping.__init__(
self,
wrapper,
records_arr=records_arr,
col_mapper=col_mapper,
**kwargs
)
StatsBuilderMixin.__init__(self)
# Check fields
records_arr = np.asarray(records_arr)
checks.assert_not_none(records_arr.dtype.fields)
field_names = {
dct.get('name', field_name)
for field_name, dct in self.field_config.get('settings', {}).items()
}
dtype = self.field_config.get('dtype', None)
if dtype is not None:
for field in dtype.names:
if field not in records_arr.dtype.names:
if field not in field_names:
raise TypeError(f"Field '{field}' from {dtype} cannot be found in records or config")
self._records_arr = records_arr
if col_mapper is None:
col_mapper = ColumnMapper(wrapper, self.col_arr)
self._col_mapper = col_mapper
def replace(self: RecordsT, **kwargs) -> RecordsT:
"""See `vectorbt.utils.config.Configured.replace`.
Also, makes sure that `Records.col_mapper` is not passed to the new instance."""
if self.config.get('col_mapper', None) is not None:
if 'wrapper' in kwargs:
if self.wrapper is not kwargs.get('wrapper'):
kwargs['col_mapper'] = None
if 'records_arr' in kwargs:
if self.records_arr is not kwargs.get('records_arr'):
kwargs['col_mapper'] = None
return Configured.replace(self, **kwargs)
def get_by_col_idxs(self, col_idxs: tp.Array1d) -> tp.RecordArray:
"""Get records corresponding to column indices.
Returns new records array."""
if self.col_mapper.is_sorted():
new_records_arr = nb.record_col_range_select_nb(
self.values, self.col_mapper.col_range, to_1d_array(col_idxs)) # faster
else:
new_records_arr = nb.record_col_map_select_nb(
self.values, self.col_mapper.col_map, to_1d_array(col_idxs))
return new_records_arr
def indexing_func_meta(self, pd_indexing_func: tp.PandasIndexingFunc, **kwargs) -> IndexingMetaT:
"""Perform indexing on `Records` and return metadata."""
new_wrapper, _, group_idxs, col_idxs = \
self.wrapper.indexing_func_meta(pd_indexing_func, column_only_select=True, **kwargs)
new_records_arr = self.get_by_col_idxs(col_idxs)
return new_wrapper, new_records_arr, group_idxs, col_idxs
def indexing_func(self: RecordsT, pd_indexing_func: tp.PandasIndexingFunc, **kwargs) -> RecordsT:
"""Perform indexing on `Records`."""
new_wrapper, new_records_arr, _, _ = self.indexing_func_meta(pd_indexing_func, **kwargs)
return self.replace(
wrapper=new_wrapper,
records_arr=new_records_arr
)
@property
def records_arr(self) -> tp.RecordArray:
"""Records array."""
return self._records_arr
@property
def values(self) -> tp.RecordArray:
"""Records array."""
return self.records_arr
def __len__(self) -> int:
return len(self.values)
@property
def records(self) -> tp.Frame:
"""Records."""
return pd.DataFrame.from_records(self.values)
@property
def recarray(self) -> tp.RecArray:
return self.values.view(np.recarray)
@property
def col_mapper(self) -> ColumnMapper:
"""Column mapper.
See `vectorbt.records.col_mapper.ColumnMapper`."""
return self._col_mapper
@property
def records_readable(self) -> tp.Frame:
"""Records in readable format."""
df = self.records.copy()
field_settings = self.field_config.get('settings', {})
for col_name in df.columns:
if col_name in field_settings:
dct = field_settings[col_name]
if dct.get('ignore', False):
df = df.drop(columns=col_name)
continue
field_name = dct.get('name', col_name)
if 'title' in dct:
title = dct['title']
new_columns = dict()
new_columns[field_name] = title
df.rename(columns=new_columns, inplace=True)
else:
title = field_name
if 'mapping' in dct:
if isinstance(dct['mapping'], str) and dct['mapping'] == 'index':
df[title] = self.get_map_field_to_index(col_name)
else:
df[title] = self.get_apply_mapping_arr(col_name)
return df
def get_field_setting(self, field: str, setting: str, default: tp.Any = None) -> tp.Any:
"""Resolve any setting of the field. Uses `Records.field_config`."""
return self.field_config.get('settings', {}).get(field, {}).get(setting, default)
def get_field_name(self, field: str) -> str:
"""Resolve the name of the field. Uses `Records.field_config`.."""
return self.get_field_setting(field, 'name', field)
def get_field_title(self, field: str) -> str:
"""Resolve the title of the field. Uses `Records.field_config`."""
return self.get_field_setting(field, 'title', field)
def get_field_mapping(self, field: str) -> tp.Optional[tp.MappingLike]:
"""Resolve the mapping of the field. Uses `Records.field_config`."""
return self.get_field_setting(field, 'mapping', None)
def get_field_arr(self, field: str) -> tp.Array1d:
"""Resolve the array of the field. Uses `Records.field_config`."""
return self.values[self.get_field_name(field)]
def get_map_field(self, field: str, **kwargs) -> MappedArray:
"""Resolve the mapped array of the field. Uses `Records.field_config`."""
return self.map_field(self.get_field_name(field), mapping=self.get_field_mapping(field), **kwargs)
def get_apply_mapping_arr(self, field: str, **kwargs) -> tp.Array1d:
"""Resolve the mapped array on the field, with mapping applied. Uses `Records.field_config`."""
return self.get_map_field(field, **kwargs).apply_mapping().values
def get_map_field_to_index(self, field: str, **kwargs) -> tp.Index:
"""Resolve the mapped array on the field, with index applied. Uses `Records.field_config`."""
return self.get_map_field(field, **kwargs).to_index()
@property
def id_arr(self) -> tp.Array1d:
"""Get id array."""
return self.values[self.get_field_name('id')]
@property
def col_arr(self) -> tp.Array1d:
"""Get column array."""
return self.values[self.get_field_name('col')]
@property
def idx_arr(self) -> tp.Optional[tp.Array1d]:
"""Get index array."""
idx_field_name = self.get_field_name('idx')
if idx_field_name is None:
return None
return self.values[idx_field_name]
@cached_method
def is_sorted(self, incl_id: bool = False) -> bool:
"""Check whether records are sorted."""
if incl_id:
return nb.is_col_idx_sorted_nb(self.col_arr, self.id_arr)
return nb.is_col_sorted_nb(self.col_arr)
def sort(self: RecordsT, incl_id: bool = False, group_by: tp.GroupByLike = None, **kwargs) -> RecordsT:
"""Sort records by columns (primary) and ids (secondary, optional).
!!! note
Sorting is expensive. A better approach is to append records already in the correct order."""
if self.is_sorted(incl_id=incl_id):
return self.replace(**kwargs).regroup(group_by)
if incl_id:
ind = np.lexsort((self.id_arr, self.col_arr)) # expensive!
else:
ind = np.argsort(self.col_arr)
return self.replace(records_arr=self.values[ind], **kwargs).regroup(group_by)
def apply_mask(self: RecordsT, mask: tp.Array1d, group_by: tp.GroupByLike = None, **kwargs) -> RecordsT:
"""Return a new class instance, filtered by mask."""
mask_indices = np.flatnonzero(mask)
return self.replace(
records_arr=np.take(self.values, mask_indices),
**kwargs
).regroup(group_by)
def map_array(self,
a: tp.ArrayLike,
idx_arr: tp.Optional[tp.ArrayLike] = None,
mapping: tp.Optional[tp.MappingLike] = None,
group_by: tp.GroupByLike = None,
**kwargs) -> MappedArray:
"""Convert array to mapped array.
The length of the array should match that of the records."""
if not isinstance(a, np.ndarray):
a = np.asarray(a)
checks.assert_shape_equal(a, self.values)
if idx_arr is None:
idx_arr = self.idx_arr
return MappedArray(
self.wrapper,
a,
self.col_arr,
id_arr=self.id_arr,
idx_arr=idx_arr,
mapping=mapping,
col_mapper=self.col_mapper,
**kwargs
).regroup(group_by)
def map_field(self, field: str, **kwargs) -> MappedArray:
"""Convert field to mapped array.
`**kwargs` are passed to `Records.map_array`."""
mapped_arr = self.values[field]
return self.map_array(mapped_arr, **kwargs)
def map(self,
map_func_nb: tp.RecordMapFunc, *args,
dtype: tp.Optional[tp.DTypeLike] = None,
**kwargs) -> MappedArray:
"""Map each record to a scalar value. Returns mapped array.
See `vectorbt.records.nb.map_records_nb`.
`**kwargs` are passed to `Records.map_array`."""
checks.assert_numba_func(map_func_nb)
mapped_arr = nb.map_records_nb(self.values, map_func_nb, *args)
mapped_arr = np.asarray(mapped_arr, dtype=dtype)
return self.map_array(mapped_arr, **kwargs)
def apply(self,
apply_func_nb: tp.RecordApplyFunc, *args,
group_by: tp.GroupByLike = None,
apply_per_group: bool = False,
dtype: tp.Optional[tp.DTypeLike] = None,
**kwargs) -> MappedArray:
"""Apply function on records per column/group. Returns mapped array.
Applies per group if `apply_per_group` is True.
See `vectorbt.records.nb.apply_on_records_nb`.
`**kwargs` are passed to `Records.map_array`."""
checks.assert_numba_func(apply_func_nb)
if apply_per_group:
col_map = self.col_mapper.get_col_map(group_by=group_by)
else:
col_map = self.col_mapper.get_col_map(group_by=False)
mapped_arr = nb.apply_on_records_nb(self.values, col_map, apply_func_nb, *args)
mapped_arr = np.asarray(mapped_arr, dtype=dtype)
return self.map_array(mapped_arr, group_by=group_by, **kwargs)
@cached_method
def count(self, group_by: tp.GroupByLike = None, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Return count by column."""
wrap_kwargs = merge_dicts(dict(name_or_index='count'), wrap_kwargs)
return self.wrapper.wrap_reduced(
self.col_mapper.get_col_map(group_by=group_by)[1],
group_by=group_by, **wrap_kwargs)
# ############# Stats ############# #
@property
def stats_defaults(self) -> tp.Kwargs:
"""Defaults for `Records.stats`.
Merges `vectorbt.generic.stats_builder.StatsBuilderMixin.stats_defaults` and
`records.stats` from `vectorbt._settings.settings`."""
from vectorbt._settings import settings
records_stats_cfg = settings['records']['stats']
return merge_dicts(
StatsBuilderMixin.stats_defaults.__get__(self),
records_stats_cfg
)
_metrics: tp.ClassVar[Config] = Config(
dict(
start=dict(
title='Start',
calc_func=lambda self: self.wrapper.index[0],
agg_func=None,
tags='wrapper'
),
end=dict(
title='End',
calc_func=lambda self: self.wrapper.index[-1],
agg_func=None,
tags='wrapper'
),
period=dict(
title='Period',
calc_func=lambda self: len(self.wrapper.index),
apply_to_timedelta=True,
agg_func=None,
tags='wrapper'
),
count=dict(
title='Count',
calc_func='count',
tags='records'
)
),
copy_kwargs=dict(copy_mode='deep')
)
@property
def metrics(self) -> Config:
return self._metrics
# ############# Plotting ############# #
@property
def plots_defaults(self) -> tp.Kwargs:
"""Defaults for `Records.plots`.
Merges `vectorbt.generic.plots_builder.PlotsBuilderMixin.plots_defaults` and
`records.plots` from `vectorbt._settings.settings`."""
from vectorbt._settings import settings
records_plots_cfg = settings['records']['plots']
return merge_dicts(
PlotsBuilderMixin.plots_defaults.__get__(self),
records_plots_cfg
)
@property
def subplots(self) -> Config:
return self._subplots
# ############# Docs ############# #
@classmethod
def build_field_config_doc(cls, source_cls: tp.Optional[type] = None) -> str:
"""Build field config documentation."""
if source_cls is None:
source_cls = Records
return string.Template(
inspect.cleandoc(get_dict_attr(source_cls, 'field_config').__doc__)
).substitute(
{'field_config': cls.field_config.to_doc(), 'cls_name': cls.__name__}
)
@classmethod
def override_field_config_doc(cls, __pdoc__: dict, source_cls: tp.Optional[type] = None) -> None:
"""Call this method on each subclass that overrides `field_config`."""
__pdoc__[cls.__name__ + '.field_config'] = cls.build_field_config_doc(source_cls=source_cls)
Records.override_field_config_doc(__pdoc__)
Records.override_metrics_doc(__pdoc__)
Records.override_subplots_doc(__pdoc__)
|
StarcoderdataPython
|
9600487
|
from signal import signal, SIGTERM
from threading import Lock
from readerwriterlock import rwlock
class StrictIndex:
def __init__(self, capacity: int = int(1e5), segmentation_size: int = 25) -> None:
self.lock = rwlock.RWLockFair()
def write(self, batch):
pass
def read(self, bat_size):
pass
|
StarcoderdataPython
|
1820953
|
<gh_stars>10-100
from polyphony import testbench
def while08(n):
x = 1
y = 2
while True:
#z = y
y = x
x = 5
n -= 1
if n < 0:
break
print(x, y)
return x + y
@testbench
def test():
assert 6 == while08(0)
assert 10 == while08(1)
test()
|
StarcoderdataPython
|
3492418
|
<gh_stars>0
import math
import svgwrite
from pyplot import Point, ShapeFiller
def draw_big_a(d):
paper_centre = Point(102.5, 148)
fontsize = 96*8*0.5
family="Arial"
text = "ﷺ"
ext = d.text_bound(text, fontsize=fontsize, family=family)
text_place = Point(paper_centre.x - ext.width/2, paper_centre.y + ext.height/2)
letter_paths = d.make_text(text, text_place, fontsize=fontsize, family=family)
sf = ShapeFiller(letter_paths)
paths = []
for path in sf.get_paths(0.4*d.pen_type.pen_width, angle=math.pi/2): # math.pi/2):
paths.append(path)
d.add_polylines(paths, container=d.add_layer("1"), stroke=svgwrite.rgb(30, 100, 30, '%'))
closed_letter_paths = []
for letter_path in letter_paths:
x = [_ for _ in letter_path]
x.append(x[0])
closed_letter_paths.append(x)
d.add_polylines(closed_letter_paths, container=d.add_layer("0"))
def draw_word_square(d):
d.add_polylines(d.make_word_square((20, 20), 96, 'Cas<NAME>', ["SATOR","AREPO","TENET","OPERA","ROTAS"], angle=0))
def test_text_sizes(d):
family='HersheyScript1smooth'
fontsize=8
d.add_text(f"{family}: {d.pen_type.name}", (20, 20), fontsize=fontsize, family=family)
for fontsize in range(4, 13):
d.add_text(f"{fontsize}pt: abcdefg", (20, 20 + 20 * (fontsize-3)), fontsize=fontsize, family=family)
def test_text_and_shape(d):
letter_paths = d.make_text("TEST", (20, 80), 96, family="Arial")
circle = d.make_circle((50, 70), 15)
letter_paths.append(circle)
sf = ShapeFiller(letter_paths)
for path in sf.get_paths(4*d.pen_type.pen_width / 5, angle=math.pi/2):
d.add_polyline(path)
def draw_text_by_letter_and_whole_for_comparison(drawing, family='Arial', s=None):
fontsize = 20
# family = 'CNC Vector' # good machine font
# family = 'CutlingsGeometric' # spaces too big!
# family = 'CutlingsGeometricRound' # spaces too big!
# family = 'HersheyScript1smooth' # good "handwriting" font
# family = 'Stymie Hairline' # a bit cutsey, but ok
ys = 80
s = "all work and no play makes Jack a dull boy" if s is None else s
(x, y) = (20, ys)
for c in s:
drawing.add_text(c, (x, y), fontsize, family=family)
(w, h) = drawing.text_bound_letter(c, fontsize, family=family)
(x, y) = (x + w, y)
drawing.add_text(s, (20, ys+20), fontsize, family=family)
def test_boxed_text(d):
family='CutlingsGeometricRound'
family='HersheyScript1smooth'
family='CNC Vector'
family='CNC Vector'
position = (20, 40)
for i in range(0, 10):
fontsize = 12 + i
ext = d.add_text(f"WAKEFIELD: {fontsize}pt", position, fontsize=fontsize, family=family)
d.add_rect((position[0] - 2, position[1] + ext.y_bearing - 2), ext.width + 4, ext.height + 4)
d.add_rect((position[0] - 2.2, position[1] + ext.y_bearing - 2.2), ext.width + 4.4, ext.height + 4.4)
position = (position[0], position[1] + ext.height + 10)
family='HersheyScript1smooth'
position = (120, 40)
for i in range(0, 10):
fontsize = 12 + i
ext = d.add_text(f"WAKEFIELD: {fontsize}pt", position, fontsize=fontsize, family=family)
d.add_rect((position[0] - 2, position[1] + ext.y_bearing - 2), ext.width + 4, ext.height + 4)
d.add_rect((position[0] - 2.2, position[1] + ext.y_bearing - 2.2), ext.width + 4.4, ext.height + 4.4)
position = (position[0], position[1] + ext.height + 10)
|
StarcoderdataPython
|
8187149
|
<gh_stars>10-100
import torch
def _Add_DifferentChannels(tensorA,tensorB):
_,ac,_,_ = tensorA.shape
_,bc,_,_ = tensorB.shape
if ac == bc:
return tensorA+tensorB
partiralchannels,shorttensor,longtensor = ac,tensorA,tensorB
if bc<ac:
partiralchannels,shorttensor,longtensor = bc,tensorB,tensorA
return torch.cat([shorttensor+longtensor[:,:partiralchannels,...],longtensor[:,partiralchannels:,...]],dim=1)
|
StarcoderdataPython
|
1617528
|
<reponame>yogeshwari-vs/2D-Paramotoring-Pygame
import sys
import multiprocessing
from level_1 import main_level_1
from level_2 import main_level_2
from level_3 import main_level_3
def main():
"""
Runs all 3 levels of the game
"""
volume_button_on_status = main_level_1.main()
main_level_2.main(volume_button_on_status)
main_level_3.main(volume_button_on_status)
if __name__ == '__main__':
multiprocessing.freeze_support()
main()
sys.exit()
|
StarcoderdataPython
|
8080476
|
<filename>Square/Sqthing.py<gh_stars>0
#!/usr/bin/env python3
from ev3dev.ev3 import *
from time import *
m1 = Motor('outA')
m2 = Motor('outB')
sp = 1000
ts = 250
timeforward = 1.7 #1.7
def moveForward(t):
m1.run_forever(speed_sp = sp)
m2.run_forever(speed_sp = sp)
sleep(t)
def turnRight(t):
m1.run_forever(speed_sp = -1*ts)
m2.run_forever(speed_sp = ts)
sleep(t)
def turnLeft(t):
m1.run_forever(speed_sp = ts)
m2.run_forever(speed_sp = -1*ts)
sleep(t)
def stopMotor():
m1.stop()
m2.stop()
Sound.speak("3,2,1, Drop it!").wait()
for i in range(3):
moveForward(timeforward)
turnLeft(.52)
moveForward(1.5)
stopMotor()
Sound.speak("2 plus 2 is 4, minus one is three, quick maths. Ski, rah, pow pow pow pow pow. Yuh ").wait()
|
StarcoderdataPython
|
111324
|
<filename>vivareal/vivareal/spiders/vivareal.py
import scrapy,yaml,json
from vivareal.vivareal.items import Anuncio
class vivarealSpider(scrapy.Spider):
name = "vivareal"
allowed_domains = ["glue-api.vivareal.com"]
DOWNLOAD_DELAY = 0.50
def __init__ (self, directory='',params='',*args):
super(vivarealSpider,self).__init__(*args)
self.directory = directory
self.params = params
def start_requests(self):
for p in self.params:
var = yaml.load(p)
locationId = var["locationId"]
minima = var["minima"]
maxima = var["maxima"]
businessType = var["negocio"]
if businessType == "VENDA":
businessType = "SALE"
if businessType == "ALUGUEL":
businessType = "RENTAL"
unitType = var["imovel"]
if unitType == "APARTAMENTO":
unitType = "APARTMENT"
if unitType == "CASA":
unitType = "HOME"
if unitType == "CASA DE CONDOMINIO":
unitType = "CONDOMINIUM"
if unitType == "CHÁCARA":
unitType = "COUNTRY_HOUSE"
if unitType == "COBERTURA":
unitType = "PENTHOUSE"
if unitType == "LOTE/TERRENO":
unitType = "RESIDENTIAL_ALLOTMENT_LAND"
if unitType == "SOBRADO":
unitType = "TWO_STORY_HOUSE"
if unitType == "CONSULTORIO":
unitType = "CLINIC"
if unitType == "RESIDENTIAL_BUILDING":
unitType = "EDIFICIO RESIDENCIAL"
if unitType == "FAZENDA/SITIO":
unitType = "FARM"
if unitType == "GALPÃO/DEPÓSITO/ARMAZEM":
unitType = "SHED_DEPOSIT_WAREHOUSE"
if unitType == "IMOVEL COMERCIAL":
unitType = "COMMERCIAL_PROPERTY"
if unitType == "LOJA":
unitType = "STORE"
if unitType == "LOTE/TERRENO (COMERCIAL)":
unitType = "COMMERCIAL_ALLOTMENT_LAND"
if unitType == "PONTO COMERCIAL":
unitType = "BUSINESS"
if unitType == "SALA COMERCIAL":
unitType = "OFFICE"
url = 'https://glue-api.vivareal.com/v1/listings?filter=((address.locationId LIKE "{0}%3E%25" OR address.locationId:"{0}")) '.format(locationId)
if minima:
url = url + "AND usableAreas >= {0}".format(minima)
if maxima:
url = url + "AND usableAreas <= {0}".format(maxima)
url = url + ' AND pricingInfos.businessType:"{0}" AND unitTypes:"{1}"&includeFields=addresses%2ClistingsLocation%2Cseo%2Csearch%2Curl%2Cexpansion%2Cfacets%2Cdevelopments&size=36&from=0'.format(businessType, unitType)
yield scrapy.Request(url)
def parse(self, response):
data = json.loads(response.body)
result = data["search"]["result"]
listings = result['listings']
if listings:
for i in listings:
item = Anuncio()
listing = i["listing"]
item["_id"] = listing.get("id")
item["amenities"] = listing.get("amenities")
item["feedsId"] = listing.get("feedsId")
item["usableAreas"] = listing.get("usableAreas")
item["description"] = listing.get("description")
item["listingType"] = listing.get("listingType")
item["videos"] = listing.get("videos")
item["title"] = listing.get("title")
item["createdAt"] = listing.get("createdAt")
item["publisherId"] = listing.get("publisherId")
item["unitTypes"] = listing.get("unitTypes")
item["providerId"] = listing.get("providerId")
item["condominiumName"] = listing.get("condominiumName")
item["propertyType"] = listing.get("propertyType")
item["suites"] = listing.get("suites")
item["publicationType"] = listing.get("publicationType")
item["externalId"] = listing.get("externalId")
item["bathrooms"] = listing.get("bathrooms")
item["totalAreas"] = listing.get("totalAreas")
item["logoUrl"] = listing.get("logoUrl")
item["bedrooms"] = listing.get("bedrooms")
item["promotions"] = listing.get("promotions")
item["highlights"] = listing.get("highlights")
pricingInfos = listing.get("pricingInfos")
if pricingInfos :
item["yearlyIptu"] = pricingInfos[0].get("yearlyIptu")
item["monthlyCondoFee"] = pricingInfos[0].get("monthlyCondoFee")
item["businessType"] = ','.join([x.get("businessType") for x in pricingInfos])
for p in pricingInfos:
if p.get("businessType") == "SALE":
item["sale_price"] = p.get("price")
elif p.get("businessType") == "RENTAL":
item["rental_price"] = p.get("price")
item["showPrice"] = listing.get("showPrice")
item["displayAddress"] = listing.get("displayAddress")
contact = listing.get("contact")
item["phones"] = contact.get("phones")
item["listingStatus"] = listing.get("listingStatus")
item["parkingSpaces"] = listing.get("parkingSpaces")
item["updatedAt"] = listing.get("updatedAt")
item["images"] = listing.get("images")
address = listing.get("address")
item["country"] = address.get("country")
item["state"] = address.get("state")
item["city"] = address.get("city")
item["neighborhood"] = address.get("neighborhood")
item["street"] = address.get("street")
item["streetNumber"] = address.get("streetNumber")
item["unitNumber"] = address.get("unitNumber")
item["zipCode"] = address.get("zipCode")
item["locationId"] = address.get("locationId")
item["zone"] = address.get("zone")
item["district"] = address.get("district")
geoLocation = address.get("geoLocation")
if geoLocation:
item["precision"] = geoLocation.get("precision")
location = geoLocation.get("location")
item["latitude"] = location.get("lat")
item["longitude"] = location.get("lon")
url = i["url"]
link = url.get("link")
item["href"] = link.get("href")
item["properAddress"] = i["properAddress"]
publisherUrl = i["publisherUrl"]
link = publisherUrl.get("link")
item["publisherUrl"] = link.get("href")
yield item
url = response.url.split('&from=')
next_page = int(url[1]) + 36
url = url[0] + "&from=" + str(next_page)
yield scrapy.Request(url=url)
|
StarcoderdataPython
|
6484252
|
# -*- coding: utf-8 -*-
"""
revision.cli
~~~~~~~~~~~~
:copyright: (c) 2018 by SENSY Inc.
:license: MIT, see LICENSE for more details.
"""
from __future__ import absolute_import
import json
import os
import sys
import click
from revision.config import (
DEFAULT_CONFIG_PATH,
DEFAULT_CONFIG_TMPL,
DEFAULT_REVISION_FILEPATH
)
from revision.constants import (
CONSOLE_ERROR,
CONSOLE_INFO,
CONSOLE_WARNING,
MESSAGE_LINE_SEPARATOR,
MESSAGE_NEW_LINE,
MESSAGE_TEMPLATE
)
from revision.data import Revision
from revision.decorators import pass_orchestrator
__all__ = (
"main",
)
def exception_handler(exception_type, exception, traceback):
click.echo(
"{} {}: {}".format(
CONSOLE_ERROR,
exception_type.__name__,
exception
),
err=True
)
def create_default_config():
with open(DEFAULT_CONFIG_PATH, "w") as f:
json.dump(DEFAULT_CONFIG_TMPL, f, indent=2)
@click.group()
@click.option("--config", default=None)
@click.option("--debug", is_flag=True)
def cli(config, debug):
if config:
ctx = click.get_current_context()
ctx.obj.update({
'config_path': config
})
if debug:
sys.excepthook = exception_handler
@cli.command()
def init():
if os.path.exists(DEFAULT_CONFIG_PATH):
click.echo("{} {} file always exist.".format(
CONSOLE_WARNING,
DEFAULT_REVISION_FILEPATH
))
else:
create_default_config()
click.echo("{} {} file is created.".format(
CONSOLE_INFO,
DEFAULT_CONFIG_PATH
))
@cli.command()
@click.option("--amend", is_flag=True)
@pass_orchestrator
def commit(orchestrator, amend):
#: Because the click checks the `VISUAL` environment variable first.
editor = os.environ.get('EDITOR')
message = click.edit(MESSAGE_TEMPLATE, editor=editor)
if message is None:
return
lines = message.split(MESSAGE_LINE_SEPARATOR)
description = lines[1].strip(MESSAGE_NEW_LINE)
message = lines[2].strip(MESSAGE_NEW_LINE)
revision = Revision.create(
description=description,
message=message
)
orchestrator.commit(revision, amend)
click.echo("{} created new commit: \n\n{}".format(
CONSOLE_INFO,
revision.to_markdown()
))
@cli.command()
@pass_orchestrator
def push(orchestrator):
orchestrator.push()
@cli.command()
@pass_orchestrator
def pull(orchestrator):
orchestrator.pull()
def main():
client_key = None
if len(sys.argv) >= 2:
i = 0
subcmd = cli.commands.keys()
for arg in sys.argv:
i += 1
if i == 1:
continue
if len(arg) and arg[:2] == '--':
continue
if arg in subcmd:
continue
client_key = arg
sys.argv.pop(i - 1)
break
cli(obj={
"client_key": client_key
})
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
9732369
|
from django.contrib.auth import get_user_model
from django.test import TestCase
from rest_framework_jwt.session.session import SessionStore
from rest_framework_jwt.settings import api_settings
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
jwt_decode_handler = api_settings.JWT_DECODE_HANDLER
User = get_user_model()
class BaseTestCase(TestCase):
def setUp(self):
self.email = '<EMAIL>'
self.username = 'jpueblo'
self.password = 'password'
self.user = User.objects.create_user(
self.username, self.email, self.password)
self.data = {
'username': self.username,
'password': <PASSWORD>
}
def test_payload(self):
user = self.user
payload = jwt_payload_handler(user)
assert payload['user_id'] == user.pk
assert payload['username'] == user.username
payload = jwt_payload_handler(extra_data={'cart_id': 123})
assert payload['user_id'] == ''
assert payload['username'] == ''
assert payload['cart_id'] == 123
payload = jwt_payload_handler(extra_data={'cart_id': 123, 'user_id': 4, 'username': 'test'})
assert payload['user_id'] == 4
assert payload['username'] == 'test'
assert payload['cart_id'] == 123
payload = jwt_payload_handler(user,
extra_data={'cart_id': 123, 'user_id': 4, 'username': 'test'})
assert payload['user_id'] == user.pk
assert payload['username'] == user.username
assert payload['cart_id'] == 123
def test_new_session(self):
store = SessionStore()
store['cart_id'] = 123
store.save()
assert store.session_key
assert jwt_decode_handler(store.session_key)['cart_id'] == 123
def test_with_exists_token(self):
user = self.user
payload = jwt_payload_handler(user)
token = jwt_encode_handler(payload)
store = SessionStore(token)
assert store['user_id'] == user.id
def test_with_exists_session(self):
user = self.user
payload = jwt_payload_handler(user)
token = jwt_encode_handler(payload)
store = SessionStore(token)
store['cart_id'] = 123
store.save()
store2 = SessionStore(store.session_key)
assert store2['user_id'] == user.id
assert store2['cart_id'] == 123
|
StarcoderdataPython
|
209391
|
#!/usr/bin/python3
import threading
import time
import json
import random
from websocket import create_connection
# Ideally fetch the canvas once first and get these
edge = 512
pixels = edge * edge
connections = []
users = 50
target_url = "ws://localhost:3001/ws"
for _ in range(users):
try:
ws = create_connection(target_url)
except:
print('Failed to create connection')
continue
ws.send(json.dumps({'requestType': 'initialAuth'}))
while True:
response = ws.recv()
print("Got response: {}".format(response))
resp = json.loads(response)
if resp['rt'] == 'authSuccessful':
print("Got user {}".format(resp['uuid']))
connections.append((resp['uuid'], ws))
break
# Each user runs this func on a bg thread
def random_pixels(uuid, socket):
while True:
time.sleep(random.uniform(0.2, 30.0))
x = random.randint(0,edge)
y = random.randint(0,edge)
id = random.randint(0,16)
socket.send(json.dumps({'requestType': 'postTile', 'userID': uuid, 'X': x, 'Y': y, 'colorID': str(id)}))
def getcanvas(uuid, socket):
while True:
time.sleep(random.uniform(10,120))
socket.send(json.dumps({'requestType': 'getCanvas', 'userID': uuid}))
threads = []
for thing in connections:
print("Booting getcanvas thread for {}".format(thing[0]))
thread = threading.Thread(target=getcanvas, name=thing[0], args=thing)
thread.start()
threads.append(thread)
print("Booting pixel thread for {}".format(thing[0]))
pixthread = threading.Thread(target=random_pixels, name=thing[0], args=thing)
pixthread.start()
threads.append(pixthread)
time.sleep(20)
|
StarcoderdataPython
|
1637658
|
from discord.ext import commands
import aiohttp, discord, os, traceback, aiosqlite
class SeaWake(commands.Bot):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
async def start(self, *args, **kwargs):
self.session = aiohttp.ClientSession()
self.sus_users = await aiosqlite.connect('sus_users.db')
await super().start(*args, **kwargs)
async def close(self):
await self.session.close()
await self.sus_users.close()
await super().close()
bot = SeaWake(command_prefix = commands.when_mentioned_or("s!", "s.", "S.", "S!", "!") ,intents = discord.Intents.all())
for filename in os.listdir('./cogs'):
if filename.endswith('.py'):
try:
bot.load_extension(f'cogs.{filename[:-3]}')
except commands.errors.ExtensionError:
traceback.print_exc()
|
StarcoderdataPython
|
6567361
|
<reponame>lisa-1010/dkt
# path_names.py
# @author: <NAME>
# @created: Oct 1 2016
#
#===============================================================================
# DESCRIPTION:
#
# Exports path names for files and directories, so paths are consistent across
# modules.
#
#===============================================================================
# CURRENT STATUS: Working
#===============================================================================
# USAGE: from path_names import *
def trajectories_dir_path(hoc_num):
return '../data/hoc{}/trajectories/'.format(hoc_num)
def next_problem_dir_path(hoc_num):
return '../data/hoc{}/nextProblem/'.format(hoc_num)
def student_to_traj_path(hoc_num):
return '../preprocessed_data/hoc{}/student_id_to_trajectory_id_map.pickle'.format(hoc_num)
def traj_to_asts_path(hoc_num):
return '../preprocessed_data/hoc{}/trajectory_id_to_asts_map.pickle'.format(hoc_num)
def traj_to_score_path(hoc_num):
return '../preprocessed_data/hoc{}/trajectory_id_to_score_map.pickle'.format(hoc_num)
def ast_id_to_program_embedding_path(hoc_num):
return '../preprocessed_data/hoc{}/ast_id_to_program_embedding_map.pickle'.format(hoc_num)
def traj_to_total_steps_path(hoc_num):
return '../preprocessed_data/hoc{}/trajectory_id_to_total_steps_map.pickle'.format(hoc_num)
def ast_to_poisson_rate_path(hoc_num):
return '../preprocessed_data/hoc{}/ast_id_to_poisson_rate_map.pickle'.format(hoc_num)
|
StarcoderdataPython
|
236962
|
<gh_stars>0
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import panflute as pf
is_in_block = None
def inlatex(text: str) -> pf.RawInline:
return pf.RawInline(text=text, format='latex')
def action(elem, doc):
global is_in_block
if isinstance(elem, pf.Header):
ret = list()
if is_in_block is not None:
ret.append(pf.Para(inlatex(r"\end{%s}" % is_in_block)))
is_in_block = None
if elem.level == 3:
blocktype = ''
blocktitle = ''
for cname in elem.classes:
if cname in [
'block', 'alertblock', 'exampleblock',
'theorem', 'example', 'definition', 'proof'
]:
blocktype = cname
blocktitle = ''.join([
pf.stringify(x) for x in elem.content.list
])
if blocktype != '':
is_in_block = blocktype
latexstr = r'\begin{%s}' % blocktype
if blocktitle != '':
latexstr += '[%s]' % blocktitle
if ':' in elem.identifier:
latexstr += r' \label{%s}' % elem.identifier
ret.append(pf.Para(inlatex(latexstr)))
else:
ret.append(elem)
else:
if len(ret) == 0:
return
ret.append(elem)
return ret
elif isinstance(elem, pf.CodeBlock):
ret = list()
if is_in_block is not None:
ret.append(pf.Para(inlatex(r'\end{%s}' % is_in_block)))
is_in_block = None
caption = elem.attributes.get('caption')
if caption is None:
ret.append(pf.Para(inlatex(r'\begin{codeblock}')))
else:
ret.append(pf.Para(inlatex(r'\begin{codeblock}[%s]' % caption)))
ret.append(elem)
ret.append(pf.Para(inlatex(r'\end{codeblock}')))
return ret
elif isinstance(elem, pf.RawBlock):
if elem.text.startswith('<!--') and is_in_block is not None:
ret = list()
ret.append(pf.Para(inlatex(r'\end{%s}' % is_in_block)))
ret.append(elem)
is_in_block = None
return ret
elif isinstance(elem, pf.Math):
if elem.format == 'InlineMath':
return
ret = list()
math_content = elem.text
if '&' in math_content or r'\\' in math_content:
math_environ = 'align'
else:
math_environ = 'equation'
new_content = r'\begin{%s} ' % math_environ
new_content += math_content
new_content += r'\end{%s}' % math_environ
return pf.RawInline(new_content, format='latex')
return
def main(doc=None):
pf.run_filter(action=action, doc=doc)
return
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3225283
|
# -*- coding: utf-8 -*-
"""Implementation of ProjE."""
from typing import Optional
import numpy
import torch
import torch.autograd
from torch import nn
from ..base import EntityRelationEmbeddingModel
from ...losses import Loss
from ...nn.init import xavier_uniform_
from ...regularizers import Regularizer
from ...triples import TriplesFactory
from ...typing import DeviceHint
__all__ = [
'ProjE',
]
class ProjE(EntityRelationEmbeddingModel):
r"""An implementation of ProjE from [shi2017]_.
ProjE is a neural network-based approach with a *combination* and a *projection* layer. The interaction model
first combines $h$ and $r$ by following combination operator:
.. math::
\textbf{h} \otimes \textbf{r} = \textbf{D}_e \textbf{h} + \textbf{D}_r \textbf{r} + \textbf{b}_c
where $\textbf{D}_e, \textbf{D}_r \in \mathbb{R}^{k \times k}$ are diagonal matrices which are used as shared
parameters among all entities and relations, and $\textbf{b}_c \in \mathbb{R}^{k}$ represents the candidate bias
vector shared across all entities. Next, the score for the triple $(h,r,t) \in \mathbb{K}$ is computed:
.. math::
f(h, r, t) = g(\textbf{t} \ z(\textbf{h} \otimes \textbf{r}) + \textbf{b}_p)
where $g$ and $z$ are activation functions, and $\textbf{b}_p$ represents the shared projection bias vector.
.. seealso::
- Official Implementation: https://github.com/nddsg/ProjE
"""
#: The default strategy for optimizing the model's hyper-parameters
hpo_default = dict(
embedding_dim=dict(type=int, low=50, high=350, q=25),
)
#: The default loss function class
loss_default = nn.BCEWithLogitsLoss
#: The default parameters for the default loss function class
loss_default_kwargs = dict(reduction='mean')
def __init__(
self,
triples_factory: TriplesFactory,
embedding_dim: int = 50,
automatic_memory_optimization: Optional[bool] = None,
loss: Optional[Loss] = None,
preferred_device: DeviceHint = None,
random_seed: Optional[int] = None,
inner_non_linearity: Optional[nn.Module] = None,
regularizer: Optional[Regularizer] = None,
) -> None:
super().__init__(
triples_factory=triples_factory,
embedding_dim=embedding_dim,
automatic_memory_optimization=automatic_memory_optimization,
loss=loss,
preferred_device=preferred_device,
random_seed=random_seed,
regularizer=regularizer,
entity_initializer=xavier_uniform_,
relation_initializer=xavier_uniform_,
)
# Global entity projection
self.d_e = nn.Parameter(torch.empty(self.embedding_dim, device=self.device), requires_grad=True)
# Global relation projection
self.d_r = nn.Parameter(torch.empty(self.embedding_dim, device=self.device), requires_grad=True)
# Global combination bias
self.b_c = nn.Parameter(torch.empty(self.embedding_dim, device=self.device), requires_grad=True)
# Global combination bias
self.b_p = nn.Parameter(torch.empty(1, device=self.device), requires_grad=True)
if inner_non_linearity is None:
inner_non_linearity = nn.Tanh()
self.inner_non_linearity = inner_non_linearity
def _reset_parameters_(self): # noqa: D102
super()._reset_parameters_()
bound = numpy.sqrt(6) / self.embedding_dim
nn.init.uniform_(self.d_e, a=-bound, b=bound)
nn.init.uniform_(self.d_r, a=-bound, b=bound)
nn.init.uniform_(self.b_c, a=-bound, b=bound)
nn.init.uniform_(self.b_p, a=-bound, b=bound)
def score_hrt(self, hrt_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102
# Get embeddings
h = self.entity_embeddings(indices=hrt_batch[:, 0])
r = self.relation_embeddings(indices=hrt_batch[:, 1])
t = self.entity_embeddings(indices=hrt_batch[:, 2])
# Compute score
hidden = self.inner_non_linearity(self.d_e[None, :] * h + self.d_r[None, :] * r + self.b_c[None, :])
scores = torch.sum(hidden * t, dim=-1, keepdim=True) + self.b_p
return scores
def score_t(self, hr_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102
# Get embeddings
h = self.entity_embeddings(indices=hr_batch[:, 0])
r = self.relation_embeddings(indices=hr_batch[:, 1])
t = self.entity_embeddings(indices=None)
# Rank against all entities
hidden = self.inner_non_linearity(self.d_e[None, :] * h + self.d_r[None, :] * r + self.b_c[None, :])
scores = torch.sum(hidden[:, None, :] * t[None, :, :], dim=-1) + self.b_p
return scores
def score_h(self, rt_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102
# Get embeddings
h = self.entity_embeddings(indices=None)
r = self.relation_embeddings(indices=rt_batch[:, 0])
t = self.entity_embeddings(indices=rt_batch[:, 1])
# Rank against all entities
hidden = self.inner_non_linearity(
self.d_e[None, None, :] * h[None, :, :]
+ (self.d_r[None, None, :] * r[:, None, :] + self.b_c[None, None, :]),
)
scores = torch.sum(hidden * t[:, None, :], dim=-1) + self.b_p
return scores
|
StarcoderdataPython
|
1808530
|
<reponame>DanielCamachoFonseca/Flask-app-demo<filename>Backend-Frontend/main.py
#Este modulo se encarga de correr la aplicacion, importa el modulo o instancia App
from src.app import app
HOST='localhost'
PORT=4000
DEBUG=True
if(__name__ == '__main__'):
app.run(HOST, PORT, DEBUG)
|
StarcoderdataPython
|
6641255
|
<reponame>oolorg/opencenter<filename>tests/test_solver.py<gh_stars>0
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# OpenCenter(TM) is Copyright 2013 by Rackspace US, Inc.
##############################################################################
#
# OpenCenter is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. This
# version of OpenCenter includes Rackspace trademarks and logos, and in
# accordance with Section 6 of the License, the provision of commercial
# support services in conjunction with a version of OpenCenter which includes
# Rackspace trademarks and logos is prohibited. OpenCenter source code and
# details are available at: # https://github.com/rcbops/opencenter or upon
# written request.
#
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 and a copy, including this
# notice, is available in the LICENSE file accompanying this software.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the # specific language governing permissions and limitations
# under the License.
#
##############################################################################
import copy
import json
import sys
from util import OpenCenterTestCase
from opencenter.webapp import ast
from opencenter.webapp import solver
from opencenter.db import api as db_api
import opencenter.backends
api = db_api.api_from_models()
class SolverTestCase(OpenCenterTestCase):
def setUp(self):
if opencenter.backends.primitive_by_name('test.set_test_fact') is None:
opencenter.backends.load_specific_backend('tests.test',
'TestBackend')
if opencenter.backends.primitive_by_name('test2.add_backend') is None:
opencenter.backends.load_specific_backend('tests.test2',
'Test2Backend')
self._clean_all()
self.interfaces = {}
self.adv = self._stub_node(
'adventurator',
facts={'backends': ['node', 'agent']})
self.container = self._stub_node(
'container',
facts={'backends': ['node', 'container']})
self.node = self._stub_node('node-1')
chef_expr = '(facts.chef_server_uri != None) and ' \
'(facts.chef_server_pem != None)'
# some of our current primitives require this
self.interfaces['chef'] = self._model_create('filters',
name='chef-server',
filter_type='interface',
expr=chef_expr)
self.api = db_api.api_from_models()
self.assertEquals(len(self._model_get_all('tasks')), 0)
def tearDown(self):
self._clean_all()
def _make_adventurator(self):
self._model_create('attrs', node_id=self.adv['id'],
key='opencenter_agent_output_modules',
value=['adventurator'])
def _plan_includes(self, plan, primitive):
return primitive in [x['primitive'] for x in plan]
def _plan_entry(self, plan, primitive, nth=0):
return [x for x in plan if x['primitive'] == primitive][nth]
def _assert_task(self, plan, nth):
tasks = self._model_get_all('tasks')
self.assertTrue(len(tasks) > nth)
task_plan = tasks[nth]['payload']['adventure_dsl']
self.assertEquals(task_plan, plan)
def _run_plan_safe(self, plan, node_id):
# we'll run a plan, skipping those that are harmful
# node.run_task, for example
for step in plan:
primitive = step['primitive']
ns = {}
if 'ns' in step:
ns = step['ns']
if not '.' in primitive:
continue
f = opencenter.backends.primitive_by_name(primitive)
f({}, self.api, node_id, **ns)
def test_no_adventurator(self):
# trying to run any solved thing should fail
resp = self._model_create('facts', node_id=self.node['id'],
key='parent_id',
value=self.container['id'],
please=True,
raw=True, expect_code=403)
# this is somewhat bogus... point is really that it should 403
self.assertEquals(resp['message'], 'no adventurator')
def test_reparent(self):
# make sure setting parent results in an unambiguous solve
# with node.set_parent and a ns of parent=id
#
self._make_adventurator()
resp = self._model_create('facts', node_id=self.node['id'],
key='parent_id',
value=self.container['id'],
please=True,
raw=True, expect_code=202)
self.assertTrue('plan' in resp)
self.assertTrue(self._plan_includes(resp['plan'], 'node.set_parent'))
entry = self._plan_entry(resp['plan'], 'node.set_parent')
self.assertTrue('ns' in entry)
self.assertEquals(str(entry['ns']['parent']),
str(self.container['id']))
# we should have a task... make sure 0th task has same plan
self._assert_task(resp['plan'], 0)
# roll the task forward and make sure the actual consequence happens
self._run_plan_safe(resp['plan'], self.node['id'])
node = self._model_get_by_id('nodes', self.node['id'])
self.assertEquals(str(node['facts']['parent_id']),
str(self.container['id']))
def test_bogusfact(self):
# make sure that setting a fact not present in the backend
# results in a 403
self._make_adventurator()
self._model_create('facts', node_id=self.node['id'],
key='bogus_value',
value=self.container['id'],
please=True,
raw=True, expect_code=403)
def test_implied_backend(self):
self._make_adventurator()
resp = self._model_create('facts', node_id=self.node['id'],
key='solved_fact',
value='test_value',
please=True,
raw=True, expect_code=202)
# make sure this is up to expectations
self.assertTrue('plan' in resp)
plan = resp['plan']
self.assertTrue(self._plan_includes(plan, 'node.add_backend'))
self.assertTrue(self._plan_includes(plan, 'node.set_fact'))
# we should have a task... make sure 0th task has same plan
self._assert_task(resp['plan'], 0)
# roll the task forward and make sure the actual consequence happens
self._run_plan_safe(resp['plan'], self.node['id'])
node = self._model_get_by_id('nodes', self.node['id'])
self.assertEquals(node['facts']['solved_fact'], 'test_value')
self.assertTrue('test' in node['facts']['backends'])
def test_implied_backend(self):
self._make_adventurator()
resp = self._model_create('facts', node_id=self.node['id'],
key='solved_fact',
value='test_value',
please=True,
raw=True, expect_code=202)
# make sure this is up to expectations
self.assertTrue('plan' in resp)
plan = resp['plan']
self.assertTrue(self._plan_includes(plan, 'node.add_backend'))
self.assertTrue(self._plan_includes(plan, 'node.set_fact'))
# we should have a task... make sure 0th task has same plan
self._assert_task(resp['plan'], 0)
# roll the task forward and make sure the actual consequence happens
self._run_plan_safe(resp['plan'], self.node['id'])
node = self._model_get_by_id('nodes', self.node['id'])
self.assertEquals(node['facts']['solved_fact'], 'test_value')
self.assertTrue('test' in node['facts']['backends'])
# try the same fact change and make sure we don't drag in
# the backend now.
resp = self._model_create('facts', node_id=self.node['id'],
key='solved_fact',
value='test_value2',
please=True,
raw=True, expect_code=202)
self.assertTrue('plan' in resp)
plan = resp['plan']
self.assertFalse(self._plan_includes(plan, 'node.add_backend'))
def test_required_args(self):
self._make_adventurator()
resp = self._model_create('facts', node_id=self.node['id'],
key='unsettable_fact',
value='blah',
please=True,
raw=True, expect_code=409)
# make sure this is up to expectations
self.assertTrue('plan' in resp)
plan = resp['plan']
self.assertTrue(self._plan_includes(plan, 'test.set_test_fact'))
self.assertTrue(self._plan_includes(plan, 'node.add_backend'))
# we should have a plan with args, as specified by test backend..
entry = self._plan_entry(resp['plan'], 'test.set_test_fact')
self.assertTrue('args' in entry)
self.assertTrue('other_thing' in entry['args'])
self.assertEquals(len(entry['args']), 1)
# here, we should pump in another thing.
def test_pre_applied_consequences(self):
# make sure that when we do a subsolver for additional constraints,
# we apply the consequence of the primitive for which we are
# subsolving in the ephemeral. That is, we assume that the
# condition we are solving for will be successful, something
# like a disproof by counterexample, I guess.
import logging
solver = logging.getLogger('opencenter.webapp.solver')
solver.setLevel(logging.INFO)
newcontainer = self._model_create('nodes', name='newcontainer')
self._model_create('facts', node_id=newcontainer['id'], key='backends',
value=['node', 'container', 'test2'])
self._model_create('facts', node_id=newcontainer['id'],
key='test2_otherfact',
value='force_inherit_backend')
self._model_create('facts', node_id=self.node['id'],
key='parent_id', value=self.container['id'])
self._model_create('facts', node_id=self.node['id'],
key='backends', value=['node', 'agent'])
self._make_adventurator()
self.logger.debug('Current limit: %s' % sys.getrecursionlimit())
# without rolling forward consequences, this loops and
# crashes with max recursion
resp = self._model_create('facts', node_id=self.node['id'],
key='parent_id', value=newcontainer['id'],
please=True, raw=True, expect_code=202)
self.assertTrue('plan' in resp)
self.logger.debug('plan is: %s' % resp['plan'])
self._run_plan_safe(resp['plan'], self.node['id'])
node = self._model_get_by_id('nodes', self.node['id'])
self.assertEquals(int(node['facts']['parent_id']),
int(newcontainer['id']))
# This asserts that the consequences were pre-applied on the
# subsolve
self.assertEquals(int(node['facts']['test2_fact']),
int(newcontainer['id']))
# def test_nova_backend(self):
# # make sure adding a nova backend pulls in chef-client
# self._make_adventurator()
# # pop in a nova fact, which should pull in both
# # a nova backend and a chef-client backend
# resp = self._model_create('facts', node_id=self.node['id'],
# key='nova_az',
# value='nova',
# please=True,
# raw=True, expect_code=202)
# self.assertTrue('plan' in resp)
# plan = resp['plan']
# entry = self._plan_entry(plan, 'node.add_backend')
# self.assertTrue('ns' in entry)
# self.assertTrue('backend' in entry['ns'])
# self.assertTrue('chef-client' == entry['ns']['backend'])
# after we get scaffolding by default
# def test_install_chef_server(self):
# self._make_adventurator()
# resp = self.client.post('/adventures/3/execute',
# content_type='application/json',
# data={'node': self.node['id']})
# self.assertEquals(resp.status_code, 402)
|
StarcoderdataPython
|
394480
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Provides all derived/non-derived inputs of to be used later in performance calculations """
from utils import Attribute
__author__ = '<NAME>'
__all__ = ['Constants', 'Attribute']
# working_dir = os.path.dirname(os.path.realpath(__file__))
# TODO consider making use of https://docs.python.org/2/library/trace.html for dependency tracking
class Constants(object):
# TODO re-document class
""" An OOP Version of the above constants to use for the following part of this assignment, supporting lazy
evaluation where not every attribute or property will be triggered at run-time, thus increasing performance """
# TODO re-incorporate singleton after code verification
# __instance__ = None
#
# def __new__(cls, *args, **kwargs):
# """ Stops the :py:class:`Base` from instantiating more than once, if an instance exists in the current process
# that instance is then returned as a pointer for all other sub-classes. """
# if cls.__instance__ is None:
# cls.__instance__ = super(Constants, cls).__new__(cls, *args, **kwargs)
# return cls.__instance__
@Attribute
def g(self):
""" Gravitational Acceleration in SI meter per second [m /s]"""
return 9.81
@Attribute
def rho_sl(self):
""" ISA Sea Level Atmospheric Density in SI kilogram per meter cubed [kg/m^3]"""
return 1.225
@Attribute
def temperature_sl(self):
""" ISA Sea Level Temperature in SI Kelvin [K] """
return 288.15
@Attribute
def pressure_sl(self):
""" ISA Sea Level Pressure in SI Pascal [Pa] """
return 101325.
@Attribute
def specific_heat_air(self):
""" Specific Heat of Air at Constant Pressure c_p in SI Joule per kilogram Kelvin [J/kg K] """
return 1000.
@Attribute
def specific_heat_gas(self):
""" Specific Heat of Fuel/Air Mixture at Constant Pressure c_p in SI Joule per kilogram Kelvin [J/kg K] """
return 1150.
@Attribute
def kappa_air(self):
""" Ratio of Specific Heats of Air [-] """
return 1.4
@Attribute
def kappa_gas(self):
""" Ratio of Specific Heats for the Fuel/Air Mixture [-] """
return 1.33
@Attribute
def lower_heating_value(self):
""" Lower Heating Value (LHV) of Kerosene in SI Joule [J] """
return 43. * 1e6
@Attribute
def gas_constant(self):
""" Universal Gas Constant in SI Joule per kilogram Kelvin [J/kg K] """
return 287.05
@Attribute
def afr_stoichiometric(self):
""" Determines the dimensionless stoichiometric air-to-fuel ratio (AFR) of Kerosene (Reader pg. 95) """
return 14.66
if __name__ == '__main__':
obj = Constants()
|
StarcoderdataPython
|
3277105
|
<reponame>XSoyOscar/Algorithms<gh_stars>100-1000
# https://leetcode.com/problems/binary-search/
class Solution:
def search(self, nums, target):
l, r = 0, len(nums) - 1
while l <= r:
mid = (l + r) // 2
if nums[mid] < target:
l = mid
elif nums[mid] > target:
r = mid
else:
return mid
return -1
# Your MyLinkedList object will be instantiated and called as such:
obj = Solution()
index = obj.search([2], 2)
print("Index: ", index)
|
StarcoderdataPython
|
3244825
|
'''Utility functions for performing fast SVD.'''
import scipy.linalg as linalg
import numpy as np
from EigenPro import utils
def nystrom_kernel_svd(samples, kernel_fn, top_q):
"""Compute top eigensystem of kernel matrix using Nystrom method.
Arguments:
samples: data matrix of shape (n_sample, n_feature).
kernel_fn: tensor function k(X, Y) that returns kernel matrix.
top_q: top-q eigensystem.
Returns:
eigvals: top eigenvalues of shape (top_q).
eigvecs: (rescaled) top eigenvectors of shape (n_sample, top_q).
"""
n_sample, _ = samples.shape
kmat = kernel_fn(samples, samples).cpu().data.numpy()
scaled_kmat = kmat / n_sample
vals, vecs = linalg.eigh(scaled_kmat,
eigvals=(n_sample - top_q, n_sample - 1))
eigvals = vals[::-1][:top_q]
eigvecs = vecs[:, ::-1][:, :top_q] / np.sqrt(n_sample)
return utils.float_x(eigvals), utils.float_x(eigvecs)
|
StarcoderdataPython
|
1862995
|
sentences1 = ["alice and bob love leetcode", "i think so too", "this is great thanks very much"]
sentences2 = ["please wait", "continue to fight", "continue to win"]
sentences3 = ["w jrpihe zsyqn l dxchifbxlasaehj", "nmmfrwyl jscqyxk a xfibiooix xolyqfdspkliyejsnksfewbjom",
"xnleojowaxwpyogyrayfgyuzhgtdzrsyococuqexggigtberizdzlyrdsfvryiynhg",
"krpwiazoulcixkkeyogizvicdkbrsiiuhizhkxdpssynfzuigvcbovm",
"rgmz rgztiup wqnvbucfqcyjivvoeedyxvjsmtqwpqpxmzdupfyfeewxegrlbjtsjkusyektigr",
"o lgsbechr lqcgfiat pkqdutzrq iveyv iqzgvyddyoqqmqerbmkxlbtmdtkinlk",
"hrvh efqvjilibdqxjlpmanmogiossjyxepotezo", "qstd zui nbbohtuk", "qsdrerdzjvhxjqchvuewevyzlkyydpeeblpc"]
class Solution:
def mostWordsFound(sentences: list[str]):
count = 0
counts = []
for i in range(0, len(sentences)): # O(N)
for j in sentences[i].split(): # O(N)
count += 1
counts.append(count)
count = 0
max_value = counts[0]
for total in counts: # O(N)
if total > max_value:
max_value = total
return max_value
# TC: O(N)
print(Solution.mostWordsFound(sentences1))
print(Solution.mostWordsFound(sentences2))
print(Solution.mostWordsFound(sentences3))
|
StarcoderdataPython
|
352967
|
<filename>code/data.py
# Converting Exploring data into a script.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.coordinates import SkyCoord
import astropy.units as u
import astropy.coordinates as coord
from dustmaps.bayestar import BayestarQuery
from stardate.lhf import age_model
import aviary as av
from tools import getDust
from photometric_teff import bprp_to_teff
plotpar = {'axes.labelsize': 30,
'font.size': 30,
'legend.fontsize': 15,
'xtick.labelsize': 30,
'ytick.labelsize': 30,
'text.usetex': True}
plt.rcParams.update(plotpar)
def load_and_merge_data():
# Load Gaia-Kepler crossmatch.
with fits.open("../data/kepler_dr2_1arcsec.fits") as data:
gaia = pd.DataFrame(data[1].data, dtype="float64")
m = gaia.parallax.values > 0
gaia = gaia.iloc[m]
# Load Santos stars
santos = pd.read_csv("../data/santos.csv", skiprows=41)
santos["kepid"] = santos.KIC
# Load McQuillan stars
mc1 = pd.read_csv("../data/Table_1_Periodic.txt")
mc1["kepid"] = mc1.KID.values
# Merge santos, mcquillan and Gaia
mc_santos = pd.merge(santos, mc1, how="outer", on="kepid",
suffixes=["_santos", ""])
mc_sant_gaia = pd.merge(mc_santos, gaia, how="left", on="kepid",
suffixes=["KIC", ""])
df0 = mc_sant_gaia.drop_duplicates(subset="kepid")
# Add LAMOST RVs
lamost = pd.read_csv("../data/KeplerRot-LAMOST.csv")
lamost["kepid"] = lamost.KIC.values
lam = pd.merge(df0, lamost, on="kepid", how="left",
suffixes=["", "_lamost"])
df = lam.drop_duplicates(subset="kepid")
# Add <NAME>'s masses
travis = pd.read_csv("../data/Ruth_McQuillan_Masses_Out.csv")
masses = pd.DataFrame(dict({"kepid": travis.KIC.values,
"Mass": travis.iso_mass.values}))
masses.head()
df = pd.merge(masses, df, how="right", on="kepid",
suffixes=["_berger", ""])
df = df.drop_duplicates(subset="kepid")
return df
def load_and_merge_aperiodic():
# Load Gaia-Kepler crossmatch.
with fits.open("../data/kepler_dr2_1arcsec.fits") as data:
gaia = pd.DataFrame(data[1].data, dtype="float64")
m = gaia.parallax.values > 0
gaia = gaia.iloc[m]
# Load McQuillan stars
mc2 = pd.read_csv("../data/Table_2_Non_Periodic.txt")
mc2["kepid"] = mc2.KID.values
mc2 = mc2.iloc[np.isfinite(mc2.Prot.values)]
# Merge mcquillan and Gaia
mc_gaia = pd.merge(mc2, gaia, how="left", on="kepid",
suffixes=["KIC", ""])
df0 = mc_gaia.drop_duplicates(subset="kepid")
# Add LAMOST RVs
lamost = pd.read_csv("../data/KeplerRot-LAMOST.csv")
lamost["kepid"] = lamost.KIC.values
lam = pd.merge(df0, lamost, on="kepid", how="left",
suffixes=["", "_lamost"])
df = lam.drop_duplicates(subset="kepid")
return df
def combine_rv_measurements(df):
rv, rv_err = [np.ones(len(df))*np.nan for i in range(2)]
ml = np.isfinite(df.RV_lam.values)
rv[ml] = df.RV_lam.values[ml]
rv_err[ml] = df.e_RV_lam.values[ml]
print(sum(ml), "stars with LAMOST RVs")
mg = (df.radial_velocity.values != 0)
mg &= np.isfinite(df.radial_velocity.values)
rv[mg] = df.radial_velocity.values[mg]
rv_err[mg] = df.radial_velocity_error.values[mg]
print(sum(mg), "stars with Gaia RVs")
df["rv"] = rv
df["rv_err"] = rv_err
return df
# S/N cuts
def sn_cuts(df):
sn = df.parallax.values/df.parallax_error.values
m = (sn > 10)
m &= (df.parallax.values > 0) * np.isfinite(df.parallax.values)
m &= df.astrometric_excess_noise.values < 5
print(len(df.iloc[m]), "stars after S/N cuts")
# Jason's wide binary cuts
# m &= df.astrometric_excess_noise.values > 0
# m &= df.astrometric_excess_noise_sig.values > 6
# Jason's short-period binary cuts
# m &= radial_velocity_error < 4
# print(len(df.iloc[m]), "stars after Jason's binary cuts")
# assert 0
df = df.iloc[m]
return df
def deredden(df):
print("Loading Dustmaps")
bayestar = BayestarQuery(max_samples=2, version='bayestar2019')
print("Calculating Ebv")
coords = SkyCoord(df.ra.values*u.deg, df.dec.values*u.deg,
distance=df.r_est.values*u.pc)
ebv, flags = bayestar(coords, mode='percentile', pct=[16., 50., 84.],
return_flags=True)
# Calculate Av
Av_bayestar = 2.742 * ebv
print(np.shape(Av_bayestar), "shape")
Av = Av_bayestar[:, 1]
Av_errm = Av - Av_bayestar[:, 0]
Av_errp = Av_bayestar[:, 2] - Av
Av_std = .5*(Av_errm + Av_errp)
# Catch places where the extinction uncertainty is zero and default to an
# uncertainty of .05
m = Av_std == 0
Av_std[m] = .05
df["ebv"] = ebv[:, 1] # The median ebv value.
df["Av"] = Av
df["Av_errp"] = Av_errp
df["Av_errm"] = Av_errm
df["Av_std"] = Av_std
# Calculate dereddened photometry
AG, Abp, Arp = getDust(df.phot_g_mean_mag.values,
df.phot_bp_mean_mag.values,
df.phot_rp_mean_mag.values, df.ebv.values)
df["bp_dered"] = df.phot_bp_mean_mag.values - Abp
df["rp_dered"] = df.phot_rp_mean_mag.values - Arp
df["bprp_dered"] = df["bp_dered"] - df["rp_dered"]
df["G_dered"] = df.phot_g_mean_mag.values - AG
abs_G = mM(df.G_dered.values, df.r_est)
df["abs_G"] = abs_G
return df
# Calculate Absolute magntitude
def mM(m, D):
return 5 - 5*np.log10(D) + m
def remove_nans_binaries_subgiants(df, plot=False):
# Remove NaNs
m2 = np.isfinite(df.abs_G.values)
df = df.iloc[m2]
# Remove binaries
x = df.bp_dered - df.rp_dered
y = df.abs_G
AT = np.vstack((x**6, x**5, x**4, x**3, x**2, x, np.ones_like(x)))
ATA = np.dot(AT, AT.T)
w = np.linalg.solve(ATA, np.dot(AT, y))
minb, maxb, extra = 0, 2.2, .27
xs = np.linspace(minb, maxb, 1000)
subcut = 4.
m = (minb < x) * (x < maxb)
m &= (y < np.polyval(w, x) - extra) + (subcut > y)
flag = np.zeros(len(df))
flag[~m] = np.ones(len(flag[~m]))
df["flag"] = flag
test = df.iloc[df.flag.values == 1]
if plot:
plt.plot(df.bp_dered - df.rp_dered, df.abs_G, ".", alpha=.1)
plt.plot(test.bp_dered - test.rp_dered, test.abs_G, ".", alpha=.1)
plt.ylim(10, 1)
plt.savefig("test")
return df
def add_phot_teff(df):
# Calculate photometric Teff
teffs = bprp_to_teff(df.bp_dered - df.rp_dered)
df["color_teffs"] = teffs
return df
def add_gyro_ages(df, plot=False):
print("Calculating gyro ages")
logages = []
for i, p in enumerate(df.Prot.values):
logages.append(age_model(np.log10(p), df.phot_bp_mean_mag.values[i] -
df.phot_rp_mean_mag.values[i]))
df["log_age"] = np.array(logages)
df["age"] = (10**np.array(logages))*1e-9
if plot:
plt.figure(figsize=(16, 9), dpi=200)
singles = df.flag.values == 1
plt.scatter(df.bprp_dered.values[singles], df.abs_G.values[singles],
c=df.age.values[singles], vmin=0, vmax=5, s=50, alpha=.2,
cmap="viridis", rasterized=True, edgecolor="none")
plt.xlabel("$\mathrm{G_{BP}-G_{RP}~[dex]}$")
plt.ylabel("$\mathrm{G~[dex]}$")
plt.colorbar(label="$\mathrm{Gyrochronal~age~[Gyr]}$")
plt.ylim(11, 5.5)
plt.xlim(.8, 2.7);
plt.savefig("age_gradient")
return df
def add_velocities(df):
xyz, vxyz = av.simple_calc_vxyz(df.ra.values, df.dec.values,
1./df.parallax.values, df.pmra.values,
df.pmdec.values,
df.rv.values)
vx, vy, vz = vxyz
x, y, z = xyz
df["vx"] = vxyz[0].value
df["vy"] = vxyz[1].value
df["vz"] = vxyz[2].value
df["x"] = xyz[0].value
df["y"] = xyz[1].value
df["z"] = xyz[2].value
return df
def calc_vb(df):
d = coord.Distance(parallax=df.parallax.values*u.mas)
vra = (df.pmra.values*u.mas/u.yr * d).to(u.km/u.s,
u.dimensionless_angles())
vdec = (df.pmdec.values*u.mas/u.yr * d).to(u.km/u.s,
u.dimensionless_angles())
c = coord.SkyCoord(ra=df.ra.values*u.deg, dec=df.dec.values*u.deg,
distance=d, pm_ra_cosdec=df.pmra.values*u.mas/u.yr,
pm_dec=df.pmdec.values*u.mas/u.yr)
gal = c.galactic
v_b = (gal.pm_b * gal.distance).to(u.km/u.s, u.dimensionless_angles())
df["vb"] = v_b
return df
if __name__ == "__main__":
print("Loading data...")
df = load_and_merge_data()
print(len(df), "stars")
print("Combining RV measurements...")
df = combine_rv_measurements(df)
print(len(df), "stars")
print("S/N cuts")
df = sn_cuts(df)
print(len(df), "stars")
print("Get dust and redenning...")
df = deredden(df)
print(len(df), "stars")
print("Flag subgiants and binaries.")
df = remove_nans_binaries_subgiants(df)
print(len(df), "stars")
print("Calculate photometric temperatures.")
df = add_phot_teff(df)
print(len(df), "stars")
print("Calculate gyro ages")
df = add_gyro_ages(df)
print(len(df), "stars")
print("Calculating velocities")
df = add_velocities(df)
print(len(df), "stars")
print("Calculating vb velocities")
df = calc_vb(df)
print(len(df), "stars")
print(len(df.iloc[np.isfinite(df.rv.values) & (df.rv.values != 0.)]),
"of those have RV measurements.")
print("Saving file")
fname = "../aviary/mc_san_gaia_lam.csv"
print(fname)
# df.to_csv(fname)
##-APERIODIC-STARS---------------------------------------------------------
#print("Loading data...")
#df = load_and_merge_aperiodic()
#print(len(df), "stars")
#print("Combining RV measurements...")
#df = combine_rv_measurements(df)
#print(len(df), "stars")
#print("S/N cuts")
#df = sn_cuts(df)
#print(len(df), "stars")
#print("Get dust and redenning...")
#df = deredden(df)
#print(len(df), "stars")
#print("Calculate photometric temperatures.")
#df = add_phot_teff(df)
#print(len(df), "stars")
#print("Calculate gyro ages")
#df = add_gyro_ages(df)
#print(len(df), "stars")
#print("Calculating velocities")
#df = add_velocities(df)
#print(len(df), "stars")
#print("Calculating vb velocities")
#df = calc_vb(df)
#print(len(df), "stars")
#print("Saving file")
#fname = "../data/aperiodic.csv"
#print(fname)
#df.to_csv(fname)
|
StarcoderdataPython
|
5175523
|
from PYB11Generator import *
#-------------------------------------------------------------------------------
# GeometryRegistrar
#-------------------------------------------------------------------------------
@PYB11singleton
class GeometryRegistrar:
# The instance attribute. We expose this as a property of the class.
@PYB11static
@PYB11returnpolicy("reference")
def instance(self):
return "GeometryRegistrar&"
# The coordinate system
@PYB11static
@PYB11pycppname("coords")
@PYB11returnpolicy("reference")
#@PYB11ignore
def get_coords(self):
return "CoordinateType"
@PYB11static
@PYB11pycppname("coords")
#@PYB11ignore
def set_coords(self,
x = "const CoordinateType"):
return "void"
#coords = property(get_coords, set_coords, doc="The coordinate system")
|
StarcoderdataPython
|
291184
|
<filename>WeatherCrawler/HausruckWatherProvider.py
#!usr/bin/env python
# -*-coding:utf-8 -*-
from bs4 import BeautifulSoup
from html.parser import HTMLParser
import urllib3
import re
import time
import datetime
import locale
from WeatherData import WeatherData
import subprocess
class HausruckWatherProvider(object):
"""description of class"""
def _getTextFromTr(tableRows, idx, subIdx = 1, tag = 'font'):
return tableRows[idx].findChildren(tag)[subIdx].text
def _parseInt(self, str):
return int(re.search("(-?\d+)", str)[1])
def _parseFloat(self, str):
return float(re.search("(-?\d+,\d+)", str)[1].replace(',','.'))
def _parseTime(self, str):
timePart = datetime.datetime.strptime(re.search("(\d+:\d+)", str)[1], '%H:%M')
return datetime.datetime.now().replace(hour=timePart.hour, minute=timePart.minute, second=0, microsecond=0)
def _parseDirection(self, str):
search = re.search(".+ / (.+)", str)
if search == None:
return None
return self._parseInt(str)
def _parseStringDirectionAndValue(self, str):
return re.search("(.+) (\d+.+)", str)
def _stringDirectionToInt(self, str):
if (str == 'N'):
return 0
if (str == 'N-NO'):
return 22.5
if (str == 'NO'):
return 45
if (str == 'O-NO'):
return 67.5
if (str == 'O'):
return 90
if (str == 'O-SO'):
return 112.5
if (str == 'SO'):
return 135
if (str == 'S-SO'):
return 157.5
if (str == 'S'):
return 180
if (str == 'S-SW'):
return 202.5
if (str == 'SW'):
return 225
if (str == 'W-SW'):
return 247.5
if (str == 'W'):
return 270
if (str == 'W-NW'):
return 292.5
if (str == 'NW'):
return 315
if (str == 'N-NW'):
return 337.5
return None
def _monthTextToNr(self, str):
if (str.startswith('J')):
return '1'
if (str.startswith('F')):
return '2'
if (str.startswith('Mä')):
return '3'
if (str.startswith('Ap')):
return '4'
if (str.startswith('Ma')):
return '5'
if (str.startswith('Jun')):
return '6'
if (str.startswith('Jul')):
return '7'
if (str.startswith('Au')):
return '8'
if (str.startswith('S')):
return '9'
if (str.startswith('O')):
return '10'
if (str.startswith('N')):
return '11'
if (str.startswith('D')):
return '12'
raise Exception('Failed to parse month: ' + str)
def getWatherData(self):
#header = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36'}
#http = urllib3.PoolManager(1, header)
#response = http.request('GET', 'http://wetter-hausruckviertel.at/wetter_wolfsegg/current.html')
#soup = BeautifulSoup(response.data.decode('utf-8'))
output = subprocess.Popen(['FetchPageWorkaround.exe', 'http://wetter-hausruckviertel.at/wetter_wolfsegg/current.html'], stdout=subprocess.PIPE).communicate()[0]
soup = BeautifulSoup(output)
tableRows = soup.findAll("tr")
dataDateGrp = re.search("(\d{1,2}\.) (.+)( \d{4})", HausruckWatherProvider._getTextFromTr(tableRows, 2))
dataTime = datetime.datetime.strptime(HausruckWatherProvider._getTextFromTr(tableRows, 3) + ' ' + dataDateGrp[1] + self._monthTextToNr(dataDateGrp[2]) + dataDateGrp[3], '%H:%M %d.%m %Y')
elevation = int(re.search("(\d+) m", tableRows[0].findChildren('b')[0].text)[1])
temp2m = self._parseFloat(HausruckWatherProvider._getTextFromTr(tableRows, 4, 2))
temp2mMin = self._parseFloat(HausruckWatherProvider._getTextFromTr(tableRows, 4, 4))
temp2mMinTime = self._parseTime(HausruckWatherProvider._getTextFromTr(tableRows, 4, 3))
temp2mMax = self._parseFloat(HausruckWatherProvider._getTextFromTr(tableRows, 4, 6))
temp2mMaxTime = self._parseTime(HausruckWatherProvider._getTextFromTr(tableRows, 4, 5))
humidity = self._parseInt(HausruckWatherProvider._getTextFromTr(tableRows, 6, 2))
humidityMin = self._parseInt(HausruckWatherProvider._getTextFromTr(tableRows, 6, 4))
humidityMinTime = self._parseTime(HausruckWatherProvider._getTextFromTr(tableRows, 6, 3))
humidityMax = self._parseInt(HausruckWatherProvider._getTextFromTr(tableRows, 6, 6))
humidityMaxTime = self._parseTime(HausruckWatherProvider._getTextFromTr(tableRows, 6, 5))
dewPoint = self._parseFloat(HausruckWatherProvider._getTextFromTr(tableRows, 7, 2))
dewPointMin = self._parseFloat(HausruckWatherProvider._getTextFromTr(tableRows, 7, 4))
dewPointMinTime = self._parseTime(HausruckWatherProvider._getTextFromTr(tableRows, 7, 3))
dewPointMax = self._parseFloat(HausruckWatherProvider._getTextFromTr(tableRows, 7, 6))
dewPointMaxTime = self._parseTime(HausruckWatherProvider._getTextFromTr(tableRows, 7, 5))
pressure = self._parseFloat(HausruckWatherProvider._getTextFromTr(tableRows, 8, 2))
pressure3hTrend = self._parseFloat(HausruckWatherProvider._getTextFromTr(tableRows, 8, 3))
if tableRows[8].findChildren('td')[3].img.attrs['src']=='p_do.gif':
pressure3hTrend = -pressure3hTrend
wellness = HausruckWatherProvider._getTextFromTr(tableRows, 9)
forecastShort = HausruckWatherProvider._getTextFromTr(tableRows, 10)
forecastLong = HausruckWatherProvider._getTextFromTr(tableRows, 11).strip()
snowLine = self._parseInt(HausruckWatherProvider._getTextFromTr(tableRows, 12))
cloudBase = self._parseInt(HausruckWatherProvider._getTextFromTr(tableRows, 13))
uvIndex = self._parseFloat(HausruckWatherProvider._getTextFromTr(tableRows, 14, 2))
solarRadiation = self._parseInt(HausruckWatherProvider._getTextFromTr(tableRows, 15, 2))
solarRadiationMax = self._parseInt(HausruckWatherProvider._getTextFromTr(tableRows, 15, 4))
solarRadiationMaxTime = self._parseTime(HausruckWatherProvider._getTextFromTr(tableRows, 15, 3))
evapotranspiration = self._parseFloat(HausruckWatherProvider._getTextFromTr(tableRows, 16))
windchill = self._parseFloat(HausruckWatherProvider._getTextFromTr(tableRows, 17, 2))
windchillMin = self._parseFloat(HausruckWatherProvider._getTextFromTr(tableRows, 17, 4))
windchillMinTime = self._parseTime(HausruckWatherProvider._getTextFromTr(tableRows, 17, 3))
windchillMax = self._parseFloat(HausruckWatherProvider._getTextFromTr(tableRows, 17, 6))
windchillMaxTime = self._parseTime(HausruckWatherProvider._getTextFromTr(tableRows, 17, 5))
windSpeed = self._parseFloat(HausruckWatherProvider._getTextFromTr(tableRows, 19, 2))
windDirection = self._parseDirection(HausruckWatherProvider._getTextFromTr(tableRows, 20))
windDominatingDirection = self._stringDirectionToInt(HausruckWatherProvider._getTextFromTr(tableRows, 20, 3))
windMaxTime = self._parseTime(HausruckWatherProvider._getTextFromTr(tableRows, 19, 3))
windMaxGrp = self._parseStringDirectionAndValue(HausruckWatherProvider._getTextFromTr(tableRows, 19, 4))
windMax = self._parseFloat(windMaxGrp[2])
windMaxDirection = self._stringDirectionToInt(windMaxGrp[1])
gust = self._parseFloat(HausruckWatherProvider._getTextFromTr(tableRows, 18, 2))
gustDirection = self._parseDirection(HausruckWatherProvider._getTextFromTr(tableRows, 21))
gustMaxTime = self._parseTime(HausruckWatherProvider._getTextFromTr(tableRows, 18, 3))
gustMaxGrp = self._parseStringDirectionAndValue(HausruckWatherProvider._getTextFromTr(tableRows, 18, 4))
gustMax = self._parseFloat(gustMaxGrp[2])
gustMaxDirection = self._stringDirectionToInt(gustMaxGrp[1])
lastFrost = datetime.datetime.strptime(HausruckWatherProvider._getTextFromTr(tableRows, 22, 3), '(%H:%M\xa0\xa0%d.%m.%Y)')
lastFrostDuration = HausruckWatherProvider._getTextFromTr(tableRows, 22, 2)[7:]
rainLastHour = self._parseFloat(HausruckWatherProvider._getTextFromTr(tableRows, 23, 1))
rainDay = self._parseFloat(HausruckWatherProvider._getTextFromTr(tableRows, 23, 2))
rainLast = datetime.datetime.strptime(tableRows[26].findChildren('b')[0].contents[0], '%H:%M\xa0\xa0%d.%m.%Y')
sunrise = self._parseTime(HausruckWatherProvider._getTextFromTr(tableRows, 33, 0, 'b'))
sunZenith = self._parseTime(HausruckWatherProvider._getTextFromTr(tableRows, 34, 0, 'b'))
sunset = self._parseTime(HausruckWatherProvider._getTextFromTr(tableRows, 35, 0, 'b'))
cloudiness = 100 - self._parseInt(HausruckWatherProvider._getTextFromTr(tableRows, 37, 0, 'b'))
moonPhase = self._parseInt(HausruckWatherProvider._getTextFromTr(tableRows, 41, 0, 'b'))
moonNextFullRaw = HausruckWatherProvider._getTextFromTr(tableRows, 42, 0, 'b').replace('\xa0', '')
if len(moonNextFullRaw) <= 5:
moonNextFull = self._parseTime(moonNextFullRaw)
else:
moonNextFullGrp = re.search("(.+\.) (.+)( \d{4})", moonNextFullRaw)
moonNextFull = datetime.datetime.strptime(moonNextFullGrp[1] + self._monthTextToNr(dataDateGrp[2]) + moonNextFullGrp[3], '%H:%M %d.%m %Y')
return WeatherData('Wolfsegg',
dataTime, elevation, temp2m, temp2mMin, temp2mMinTime, temp2mMax, temp2mMaxTime,
humidity, humidityMin, humidityMinTime, humidityMax, humidityMaxTime,
dewPoint, dewPointMin, dewPointMinTime, dewPointMax, dewPointMaxTime,
pressure, pressure3hTrend,
wellness, forecastShort, forecastLong, snowLine, cloudBase, uvIndex,
solarRadiation, solarRadiationMax, solarRadiationMaxTime,
evapotranspiration,
windchill, windchillMin, windchillMinTime, windchillMax, windchillMaxTime,
windSpeed, windDirection, windDominatingDirection, windMaxTime, windMax, windMaxDirection,
gust, gustDirection, gustMaxTime, gustMax, gustMaxDirection,
lastFrost, lastFrostDuration,
rainLastHour, rainDay, rainLast,
sunrise, sunZenith, sunset, cloudiness,
moonPhase, moonNextFull
)
|
StarcoderdataPython
|
1687097
|
# TO DO: implement difficulties and text box
# TO DO: resize sprites using pygame.sprite
# TO DO: implement menu
# TO DO: sound effects
import pygame, sys, os
from pygame.locals import * # @UnusedWildImport
import minesweeper
os.environ['SDL_VIDEO_WINDOW_POS'] = 'center'
# fonts
pygame.font.init()
smallFont = pygame.font.SysFont('arial', 14)
medFont = pygame.font.SysFont('arial', 24)
bigFont = pygame.font.SysFont('arial', 96, True)
# graphics
pygame.display.init()
pygame.display.set_caption('Minesweeper v0.1')
screen = pygame.display.set_mode((640,640),0,32)
coveredTile = pygame.image.load('sprites/spr_covered_tile.png').convert_alpha()
uncoveredTiles = {}
for num in xrange(9):
uncoveredTiles[num] = pygame.image.load('sprites/spr_uncovered_tile_' + `num` + '.png').convert_alpha()
flaggedTile = pygame.image.load('sprites/spr_flagged_tile.png').convert_alpha()
minedTile = pygame.image.load('sprites/spr_mined_tile.png').convert_alpha()
selectedMinedTile = pygame.image.load('sprites/spr_selected_mined_tile.png').convert_alpha()
correctTile = pygame.image.load('sprites/spr_correct_tile.png').convert_alpha()
# constants and globals
X_OFFSET = 70
Y_OFFSET = 70
NUM_ROWS_IN_GRID = 10
NUM_COLS_IN_GRID = 10
NUM_MINES = 10
LINE_WIDTH = 3
RECT_SIZE = 50
BG_COLOR = (64,64,64)
notifications = {0:'', 1:'Unflag tile first.', 2:'Flag limit reached.', 3:'Press r to restart or e to exit.'}
# game-specific
def initializeGame():
global mouseButtonStates, imageReprs, notification, b, gameOver, won, time, timeOffset
mouseButtonStates = (False, False, False) # left, middle, right
imageReprs = [[None for col in xrange(10)] for row in xrange(10)] # @UnusedVariable
notification = notifications[0]
b = minesweeper.Board(NUM_ROWS_IN_GRID, NUM_COLS_IN_GRID, NUM_MINES)
gameOver = False
won = False
timeOffset = 0
pygame.init()
def exitGame():
pygame.quit()
sys.exit()
initializeGame()
while True:
mouseButtonJustPressed = [False, False, False]
for event in pygame.event.get():
if event.type == QUIT:
exitGame()
if event.type == MOUSEBUTTONDOWN:
mouseButtonStates = pygame.mouse.get_pressed()
if mouseButtonStates[0]:
mouseButtonJustPressed[0] = True
if mouseButtonStates[1]:
mouseButtonJustPressed[1] = True
if mouseButtonStates[2]:
mouseButtonJustPressed[2] = True
if event.type == KEYDOWN and gameOver:
if event.key == K_r:
initializeGame()
timeOffset = pygame.time.get_ticks()
if event.key == K_e:
exitGame()
x,y = pygame.mouse.get_pos()
# debugLabel = smallFont.render('mouse coords: ' + str(x) + ', ' + str(y), 1, (0,128,255))
screen.fill(BG_COLOR)
for row in xrange(NUM_ROWS_IN_GRID):
for col in xrange(NUM_COLS_IN_GRID):
tile = b.getTileAt(row, col)
rect = Rect((X_OFFSET+RECT_SIZE*col,Y_OFFSET+RECT_SIZE*row), (RECT_SIZE,RECT_SIZE))
if not gameOver:
if rect.collidepoint(x,y) and not tile.isUncovered():
if mouseButtonJustPressed[0]: # left mouse: uncover
try:
gameOver = b.uncoverTileAt(row, col)
except minesweeper.UncoverError:
notification = notifications[1]
else:
notification = notifications[0]
if b.getNumCoveredTiles() == b.getNumMines(): # check if won
gameOver = True
won = True
elif mouseButtonJustPressed[2]: # right mouse: flag/unflag
try:
b.flagTileAt(row, col)
except minesweeper.FlagError:
notification = notifications[2]
else:
notification = notifications[0]
if tile.isUncovered():
if tile.isMined(): # selected mined tile
imageReprs[row][col] = selectedMinedTile
else:
imageReprs[row][col] = uncoveredTiles[tile.getNumber()]
elif tile.isFlagged():
if gameOver and tile.isMined(): # guessed correctly
imageReprs[row][col] = correctTile
else: # guessed incorrectly or game is not over
imageReprs[row][col] = flaggedTile
else: # tile is unturned
if gameOver and tile.isMined():
if won:
imageReprs[row][col] = coveredTile
else: # reveal mines when game over and lost
imageReprs[row][col] = minedTile
else:
imageReprs[row][col] = coveredTile
screen.blit(imageReprs[row][col], (rect.x,rect.y))
if not gameOver:
rawTime = pygame.time.get_ticks()
timeLabel = medFont.render('Time: ' + `(rawTime - timeOffset)/1000.`, 1, (0,128,155))
screen.blit(timeLabel, (10,10))
if gameOver:
if won:
resultLabel = bigFont.render('YOU WIN!', 1, (0,128,255))
else:
resultLabel = bigFont.render('GAME OVER!', 1, (255,32,0))
notification = notifications[3]
screen.blit(resultLabel, (320-resultLabel.get_width()/2,320-resultLabel.get_height()/2))
if notification != '':
notificationLabel = medFont.render(notification, 1, (0,128,255))
screen.blit(notificationLabel, (320-notificationLabel.get_width()/2,600-notificationLabel.get_height()/2))
pygame.display.update()
|
StarcoderdataPython
|
3483340
|
import logging
import os
import sys
import aiohttp
from discord.ext.commands import Bot
from plugins.plugin_manager import PluginManager
TOKEN = os.environ.get('MEIORDEL_TOKEN')
MEI_CHANNEL = os.environ.get("MEIORDEL_CHANNEL")
MEI_VOICE_CHANNEL = os.environ.get("MEIORDEL_VOICE_CHANNEL")
COMMAND_PREFIX = "m!"
DESCRIPTION = "A personal assistant disguised as a Discord bot"
VERSION = "0.0.1"
LOGGER = logging.getLogger("discord_mei.%s" % __name__)
class Mei(Bot):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.plugin_manager = PluginManager(self)
self.plugin_manager.load_all()
self.voice_channel_id = MEI_VOICE_CHANNEL
async def on_ready(self):
LOGGER.info('Logged in as')
LOGGER.info(self.user.name)
LOGGER.info(self.user.id)
LOGGER.info('------')
LOGGER.info("I will be your shield! Ready for combat!")
def configure_logging(is_debug=False):
log_format = "%(asctime)s [%(name)s] [%(levelname)s] %(message)s"
logging.basicConfig(format=log_format,
filename='discord_mei.log',
level=logging.DEBUG if is_debug else logging.INFO)
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(logging.Formatter(log_format))
console_handler.setLevel(logging.DEBUG)
# Add it to the root logger
logging.getLogger('').addHandler(console_handler)
LOGGER.info("******* Mei Ordel - I'll be your Discord bot! *******")
LOGGER.debug("Ready to DEBUG!")
def main():
configure_logging()
bot = Mei(command_prefix=COMMAND_PREFIX,
description=DESCRIPTION,
pm_help=True,
connector=aiohttp.TCPConnector(verify_ssl=False))
LOGGER.info("Running Mei!")
bot.run(TOKEN)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
6597436
|
from tests.system.action.base import BaseActionTestCase
class UserDeleteTemporaryActionTest(BaseActionTestCase):
def test_delete_correct(self) -> None:
self.create_model("meeting/1", {"temporary_user_ids": [111]})
self.create_model(
"user/111", {"username": "username_srtgb123", "meeting_id": 1}
)
response = self.client.post(
"/",
json=[{"action": "user.delete_temporary", "data": [{"id": 111}]}],
)
self.assert_status_code(response, 200)
self.assert_model_deleted("user/111")
def test_delete_not_temporary(self) -> None:
self.create_model("user/111", {"username": "username_srtgb123"})
response = self.client.post(
"/",
json=[{"action": "user.delete_temporary", "data": [{"id": 111}]}],
)
self.assert_status_code(response, 400)
self.assert_model_exists("user/111")
def test_delete_wrong_id(self) -> None:
self.create_model("user/112", {"username": "username_srtgb123"})
response = self.client.post(
"/",
json=[{"action": "user.delete_temporary", "data": [{"id": 111}]}],
)
self.assert_status_code(response, 400)
model = self.get_model("user/112")
assert model.get("username") == "username_srtgb123"
|
StarcoderdataPython
|
12840379
|
<filename>test.py
import _logging as logging
logger = logging.logging()
logger.DEBUG('TEST')
logger.ERROR('TEST')
logger.INFO('TEST')
logger.WARNING('TEST')
|
StarcoderdataPython
|
5163360
|
import sys
from datetime import datetime
from string import ascii_letters, digits
from random import choice
class MockOPSigninResponse:
TOKEN_LEN = 43
ERROR_STATUS = 1
SUCCESS_STATUS = 0
ERROR_TEMPLATE = "[ERROR] {} Authentication: DB: 401: Unauthorized\n"
SUCCESS_TEMPLATE = (
"export OP_SESSION_{}=\"{}\"\n"
"# This command is meant to be used with your shell's eval function.\n"
"# Run 'eval $(op signin {})' to sign in to your 1Password account.\n"
"# Use the --raw flag to only output the session token.\n"
)
def __init__(self, shorthand, signin_success=True, raw=True):
if signin_success:
token = self._generate_token()
if raw:
self._output = token
else:
self._output = self.SUCCESS_TEMPLATE.format(shorthand, token, shorthand)
self._error_output = None
self.exit_status = self.SUCCESS_STATUS
else:
_timestamp = datetime.now().strftime("%Y/%m/%d %H:%M:%S")
self._output = None
self._error_output = self.ERROR_TEMPLATE.format(_timestamp)
self.exit_status = self.ERROR_STATUS
def _generate_token(self):
string_format = ascii_letters + digits
_token = "".join(choice(string_format) for x in range(0, self.TOKEN_LEN))
return _token
def respond(self, *args):
if self._output is not None:
sys.stdout.write(self._output)
if self._error_output is not None:
sys.stderr.write(self._error_output)
return self.exit_status
|
StarcoderdataPython
|
1635053
|
# -*- coding: utf-8 -*-
#The MIT License (MIT)
#
#Copyright (c) 2015,2018 <NAME>
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
#
#exit-command, does nothing except provides help text to help-command and cmd name
# for command completion
#
from ..utils.utils import *
from ..utils.functions import *
from .SuperCommand import *
from ..globals import *
from ..globals import GlobalVariables
class ExitCommand(SuperCommand):
def __init__(self,cmd_handler):
super().__init__(cmd_handler)
def parseCommandArgs(self,userInputList):
#implement in command class
#parse arguments like in this method
cmd_parser = ThrowingArgumentParser(prog="exit",description='Exit program.')
(self.cmd_args,self.help_text)=parseCommandArgs(cmd_parser,userInputList)
def execute(self):
#implement command here
print("Exit program.")
|
StarcoderdataPython
|
13011
|
<gh_stars>1-10
#!/usr/bin/env python
'''
This program is free software; you can redistribute it and/or modify
it under the terms of the Revised BSD License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Revised BSD License for more details.
Copyright 2011-2016 Game Maker 2k - https://github.com/GameMaker2k
Copyright 2011-2016 <NAME> - https://github.com/KazukiPrzyborowski
$FileInfo: pypkg-gen.py - Last Update: 6/1/2016 Ver. 0.2.0 RC 1 - Author: cooldude2k $
'''
from __future__ import absolute_import, division, print_function, unicode_literals;
import re, os, sys, time, platform, datetime, argparse, subprocess;
__version_info__ = (0, 2, 0, "rc1");
if(__version_info__[3]!=None):
__version__ = str(__version_info__[0])+"."+str(__version_info__[1])+"."+str(__version_info__[2])+"+"+str(__version_info__[3]);
if(__version_info__[3]==None):
__version__ = str(__version_info__[0])+"."+str(__version_info__[1])+"."+str(__version_info__[2]);
proname = "pypkg-gen";
prover = __version__;
profullname = proname+" "+prover;
def which_exec(execfile):
for path in os.environ["PATH"].split(":"):
if os.path.exists(path + "/" + execfile):
return path + "/" + execfile;
linuxdist = [None];
try:
linuxdist = platform.linux_distribution();
except AttributeError:
linuxdist = [None];
getlinuxdist = linuxdist;
setdistroname = "debian";
setdistrocname = "jessie";
if(getlinuxdist[0] is not None and (getlinuxdist[0].lower()=="debian" or getlinuxdist[0].lower()=="ubuntu" or getlinuxdist[0].lower()=="linuxmint")):
setdistroname = getlinuxdist[0].lower();
setdistrocname = getlinuxdist[2].lower();
if(setdistrocname==""):
lsblocatout = which_exec("lsb_release");
pylsblistp = subprocess.Popen([lsblocatout, "-c"], stdout=subprocess.PIPE, stderr=subprocess.PIPE);
pylsbout, pylsberr = pylsblistp.communicate();
if(sys.version[0]=="3"):
pylsbout = pylsbout.decode("utf-8");
pylsb_esc = re.escape("Codename:")+'([a-zA-Z\t+\s+]+)';
pylsbname = re.findall(pylsb_esc, pylsbout)[0].lower();
setdistrocname = pylsbname.strip();
if(getlinuxdist[0] is not None and getlinuxdist[0].lower()=="archlinux"):
setdistroname = getlinuxdist[0].lower();
setdistrocname = None;
parser = argparse.ArgumentParser(conflict_handler = "resolve", add_help = True);
parser.add_argument("-v", "--version", action = "version", version = profullname);
parser.add_argument("-s", "--source", default = os.path.realpath(os.getcwd()), help = "source dir");
parser.add_argument("-d", "--distro", default = setdistroname, help = "enter linux distribution name");
parser.add_argument("-c", "--codename", default = setdistrocname, help = "enter release code name");
parser.add_argument("-p", "--pyver", default = sys.version[0], help = "enter version of python to use");
getargs = parser.parse_args();
bashlocatout = which_exec("bash");
getargs.source = os.path.realpath(getargs.source);
getargs.codename = getargs.codename.lower();
getargs.distro = getargs.distro.lower();
if(getargs.pyver=="2"):
getpyver = "python2";
if(getargs.pyver=="3"):
getpyver = "python3";
if(getargs.pyver!="2" and getargs.pyver!="3"):
if(sys.version[0]=="2"):
getpyver = "python2";
if(sys.version[0]=="3"):
getpyver = "python3";
get_pkgbuild_dir = os.path.realpath(getargs.source+os.path.sep+"pkgbuild");
get_pkgbuild_dist_pre_list = [d for d in os.listdir(get_pkgbuild_dir) if os.path.isdir(os.path.join(get_pkgbuild_dir, d))];
get_pkgbuild_dist_list = [];
for dists in get_pkgbuild_dist_pre_list:
tmp_pkgbuild_python = os.path.realpath(get_pkgbuild_dir+os.path.sep+dists+os.path.sep+getpyver);
if(os.path.exists(tmp_pkgbuild_python) and os.path.isdir(tmp_pkgbuild_python)):
get_pkgbuild_dist_list.append(dists);
if(not getargs.distro in get_pkgbuild_dist_list):
print("Could not build for "+getargs.distro+" distro.");
sys.exit();
if(getargs.distro=="debian" or getargs.distro=="ubuntu" or getargs.distro=="linuxmint"):
pypkgpath = os.path.realpath(getargs.source+os.path.sep+"pkgbuild"+os.path.sep+getargs.distro+os.path.sep+getpyver+os.path.sep+"pydeb-gen.sh");
pypkgenlistp = subprocess.Popen([bashlocatout, pypkgpath, getargs.source, getargs.codename], stdout=subprocess.PIPE, stderr=subprocess.PIPE);
pypkgenout, pypkgenerr = pypkgenlistp.communicate();
if(sys.version[0]=="3"):
pypkgenout = pypkgenout.decode("utf-8");
print(pypkgenout);
pypkgenlistp.wait();
if(getargs.distro=="archlinux"):
pypkgpath = os.path.realpath(getargs.source+os.path.sep+"pkgbuild"+os.path.sep+getargs.distro+os.path.sep+getpyver+os.path.sep+"pypac-gen.sh");
pypkgenlistp = subprocess.Popen([bashlocatout, pypkgpath, getargs.source, getargs.codename], stdout=subprocess.PIPE, stderr=subprocess.PIPE);
pypkgenout, pypkgenerr = pypkgenlistp.communicate();
if(sys.version[0]=="3"):
pypkgenout = pypkgenout.decode("utf-8");
print(pypkgenout);
pypkgenlistp.wait();
|
StarcoderdataPython
|
3550826
|
<filename>packages/vaex-server/vaex/server/_version.py
__version_tuple__ = (0, 4, 0, 'dev.0')
__version__ = '0.4.0-dev.0'
|
StarcoderdataPython
|
3244706
|
from django.db.models import Prefetch
from django.shortcuts import get_object_or_404
from rest_framework.generics import RetrieveAPIView, CreateAPIView, ListAPIView
from rest_framework.viewsets import ModelViewSet
from rest_framework import permissions
from chat.models import Chat, Message, Profile
from .serializers import MessageSerializer, ParticipantListSerializer, InitialChatDetailSerializer, CommonChatDetailSerializer, MainProfileSerializer
from chat.services import get_friend_list_of_given_user
class FriendListView(ListAPIView):
'''A ListView that returns profiles of friends of requested user'''
model = Profile
serializer_class = ParticipantListSerializer
permission_classes = (permissions.IsAuthenticated, )
def get_queryset(self):
user_id = self.request.user.id
if user_id:
return get_friend_list_of_given_user(user_id)
class AllProfilesListView(ListAPIView):
'''A ListView that returns profiles of all users'''
model = Profile
serializer_class = ParticipantListSerializer
permission_classes = (permissions.IsAuthenticated, )
def get_queryset(self):
return Profile.objects.exclude(id=self.request.user.id)
class ChatDetailView(RetrieveAPIView):
model = Chat
permission_classes = (permissions.IsAuthenticated, )
def get_serializer_class(self):
try:
last_message_index = int(
self.request.query_params.get('last_message_index', 0))
except ValueError:
pass
if last_message_index == 0:
return InitialChatDetailSerializer
return CommonChatDetailSerializer
def get_queryset(self):
user_id = self.request.user.id
return Chat.objects.filter(participant_list=user_id)
def get_serializer_context(self):
# context = super().get_serializer_context()
# context.update({'user_id': self.request.user.id})
try:
last_message_index = int(
self.request.query_params.get('last_message_index', 0))
except ValueError:
last_message_index = 0
return {
'user_id': self.request.user.id,
'last_message_index': last_message_index,
}
class ProfileDetailView(RetrieveAPIView):
model = Profile
serializer_class = MainProfileSerializer
permission_classes = (permissions.IsAuthenticated, )
def get_queryset(self):
user_id = self.request.user.id
return Profile.objects.filter(pk=user_id).prefetch_related(
'chat_list__last_message__author', 'chat_list__participant_list')
|
StarcoderdataPython
|
11344067
|
"""This module contains the electric results class ."""
from ansys.dpf.post.common import _AvailableKeywords
from ansys.dpf.post.scalar import Scalar
from ansys.dpf.post.vector import Vector
class ElectricField(Vector):
"""Defines the temperature object for thermal/electric analysis, that is a scalar object."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._operator_name = "EF"
# disable element scoping
if _AvailableKeywords.element_scoping in kwargs:
raise Exception(
"Element scoping is not available with thermal/electric results."
)
self.definition._Definition__element_scoping_locked = True
def __str__(self):
txt = super().__str__()
txt += "\n"
txt += "This is an electric field object."
return txt
class ElectricPotential(Scalar):
"""Defines the temperature object for thermal/electric analysis, that is a scalar object."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._operator_name = "VOLT"
# disable element scoping
if _AvailableKeywords.element_scoping in kwargs:
raise Exception(
"Element scoping is not available with thermal/electric results."
)
self.definition._Definition__element_scoping_locked = True
def __str__(self):
txt = super().__str__()
txt += "\n"
txt += "This is an electric potential object."
return txt
|
StarcoderdataPython
|
4869804
|
<reponame>crowmurk/mallenom<filename>mallenom/workcal/views.py
import datetime
from django.forms.models import model_to_dict
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
from django.urls import reverse_lazy
from django.views.generic import (
CreateView,
DetailView,
UpdateView,
DeleteView
)
from django.views.generic.base import TemplateView
from django.views.generic.edit import FormView
from django_tables2 import SingleTableMixin
from django_filters.views import FilterView
from core.views import (
ActionTableDeleteMixin,
DeleteMessageMixin,
)
from core.logger import log
from .models import DayType, Day
from .forms import DayTypeForm, DayForm, CalendarUploadForm
from .tables import DayTypeTable, DayTable
from .filters import DayTypeFilter, DayFilter
from .utils import WorkCalendarParser
# Create your views here.
class DayTypeList(SingleTableMixin, ActionTableDeleteMixin, FilterView):
model = DayType
table_class = DayTypeTable
table_pagination = False
filterset_class = DayTypeFilter
template_name = 'workcal/daytype_list.html'
action_table_model = DayType
table_pagination = False
def get_table_kwargs(self):
if self.request.user.is_superuser:
return {}
return {
'exclude': ('delete', ),
}
class DayTypeCreate(CreateView):
model = DayType
form_class = DayTypeForm
class DayTypeDetail(DetailView):
model = DayType
class DayTypeUpdate(UpdateView):
model = DayType
form_class = DayTypeForm
class DayTypeDelete(DeleteMessageMixin, DeleteView):
model = DayType
success_url = reverse_lazy('workcal:daytype:list')
class DayList(SingleTableMixin, ActionTableDeleteMixin, FilterView):
model = Day
table_class = DayTable
table_pagination = False
filterset_class = DayFilter
template_name = 'workcal/day_list.html'
action_table_model = Day
def get_table_kwargs(self):
if self.request.user.is_superuser:
return {}
return {
'exclude': ('delete', ),
}
class DayCreate(CreateView):
model = Day
form_class = DayForm
def get_initial(self):
"""Получает дату из аргументов запроса
и добавяет к начальным данным формы.
"""
initial = super().get_initial()
if self.request.method == 'GET':
date = self.request.GET.get('date', '')
try:
date = datetime.date.fromisoformat(date)
except ValueError:
date = None
except AttributeError:
try:
date = datetime.date(*map(int, date.split('-')))
except ValueError:
date = None
if date is not None:
initial.update({'date': date})
return initial
class DayDetail(DetailView):
model = Day
class DayUpdate(UpdateView):
model = Day
form_class = DayForm
class DayDelete(DeleteMessageMixin, DeleteView):
model = Day
success_url = reverse_lazy('workcal:day:list')
class CalendarDetail(TemplateView):
template_name = "workcal/calendar_detail.html"
def get_context_data(self, **kwargs):
"""Изменяет контекст, контролируя год календаря
для отображения (должен быть в допустимых пределах).
"""
context = super().get_context_data(**kwargs)
year = context.get('year')
if not year or not datetime.MINYEAR <= year <= datetime.MAXYEAR:
year = datetime.date.today().year
context['year'] = year
return context
class CalendarUpload(FormView):
template_name = 'workcal/calendar_upload_form.html'
form_class = CalendarUploadForm
success_url = reverse_lazy('workcal:calendar:current')
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST, request.FILES)
if not form.is_valid():
return self.form_invalid(form)
csv_file = request.FILES["file"]
# Файл слишком большой
if csv_file.multiple_chunks():
form.add_error(
'file',
_("Uploaded file is too big ({0:.2f} MB).").format(
csv_file.size / (1000 * 1000),
),
)
return self.form_invalid(form)
# Загрузка и обработка данных
try:
data = WorkCalendarParser(
csv_file.read().decode("utf-8"),
).get_days_list(form.cleaned_data['year'])
except Exception as error:
form.add_error(
'file',
_("Parsing error ({type}): {error}").format(
error=error,
type=type(error).__name__,
))
return self.form_invalid(form)
# Нет данных для обработки
if not data:
form.add_error(
'__all__',
_("Nothing to upload: data set is empty"),
)
return self.form_invalid(form)
# Переходим к импорту данных
return self.form_valid(form, data)
def form_valid(self, form, data):
# Сущесвующие в базе дни
days_exists = list(Day.objects.filter(
date__year=form.cleaned_data['year'],
).values_list('date', flat=True))
# Дни для добавления
days_raw = filter(
lambda x: x['date'] not in days_exists,
data,
)
log.info("Starting import CSV...")
errors = False
for day_raw in days_raw:
# Добавляем дни, используя форму
day_form = DayForm(day_raw)
if day_form.is_valid():
day_form.save()
else:
errors = True
log.error("{}: {} ".format(
model_to_dict(day_form.instance),
day_form.errors.as_data(),
))
if errors:
log.error("CSV import finished with errors")
messages.error(
self.request,
_("CSV import finished with errors (see more in logs)"),
)
else:
log.info("CSV import finished without errors")
messages.success(
self.request,
_("CSV import finished without errors"),
)
return super().form_valid(form)
|
StarcoderdataPython
|
8079606
|
<filename>certstream_analytics/transformers/base.py
"""
Transform the certificate data from certstream before passing it to the
processing pipeline.
"""
from abc import ABCMeta, abstractmethod
# pylint: disable=no-init,too-few-public-methods
class Transformer:
"""
Define the template of all transformer class.
"""
__metaclass__ = ABCMeta
@abstractmethod
def apply(self, raw):
"""
Move along, nothing to see here.
"""
class PassthroughTransformer(Transformer):
"""
A dummy transformer that doesn't do anything.
"""
def apply(self, raw):
"""
Move along, nothing to see here.
"""
return raw
class CertstreamTransformer(Transformer):
"""
Transform data from certstream into something readily consumable by the
processing pipeline.
"""
def apply(self, raw):
"""
The format of the message from certstream can be found at their github
documentation.
So far, we are only interested in the domain names, the timestamps, and
probably the content of the subject. So the returned stucture is as
follows:
{
# These fields are extracted from certstream
cert_index: INTEGER,
seen: TIMESTAMP,
chain: [
ORGANIZATION
],
not_before: TIMESTAMP,
not_after: TIMESTAMP,
all_domains: [
SAN
],
# This is a place holder field which are used later by the
# analysers. Each analyser will append its result here.
analysers: [
{
analyser: ANALYSER NAME,
output: ANYTHING GOESE HERE,
},
],
}
"""
filtered = {
'cert_index': raw['data']['cert_index'],
'seen': raw['data']['seen'],
'chain': [],
# The analyser result will be stored here later on
'analysers': [],
}
interested_fields = ['not_before', 'not_after', 'all_domains']
if raw['data']['leaf_cert']['all_domains']:
filtered.update({k: raw['data']['leaf_cert'][k] for k in interested_fields})
return filtered
return None
|
StarcoderdataPython
|
1946367
|
<filename>src/gpyts/syncGpyts/__init__.py
#!/usr/bin/python3
#MIT License
#Copyright (c) 2021 Ripe
import requests, random, json, time, os, io, re
from .. import config, errors
from typing import Union, List
from .. types import Translation, TextToSpeech
class Gpyts():
"""Gpyts is a library for Google translation and gTTS using Google Translation API.
"""
def __init__(self, tld: Union[str, List[str]] = None, endpoint: Union[str, List[str]] = None, client: str = None, labled: bool = True, proxy: str = None) -> None:
"""Configuration for Service Url and Client.
Note:
Provide tld, endpoint, client only if you know valid combination of values.
Example of tld(s):
co.uk, tl
Example of endpoint(s):
translate.google.com, client0.google.com, translate.googleapis.com
Example of client(s):
gtx, t
Either use `tld` or `endpoint`, it wont work together. Just `tld` is required for most part even thats optional too.
Args:
tld (str | List[str], Optional): Custom tld's you can provide like `com` or `co.uk`.
endpoint (str | List[str], Optional): Custom endpoint url to be used (random choosed if multiple provided) than default `endpoint`.
client (str, Optional) : Custom client to be used than default `client`.
labled (bool, Optional): Method return either labled or indexed json to be used than default `method`.
proxy (str, optional) : Proxy to be used like `http://user:pass@ip:port`.
"""
self.__ioses = None
self.__tld = tld or ''
self.endpoint = config.tdlpoint if tld else endpoint or config.endpoint
self.client = client or config.client
self.__method = config.method[labled]
self.proxy = {re.match(r'^(http|https)://',proxy).group(1) : proxy} if proxy and re.match(r'^(http|https)://',proxy) else None
def translate(self, text: str, to_lang: str, from_lang: str = 'auto', i_enc: str = 'UTF-8', o_enc: str = 'UTF-8', web: bool = False) -> Translation:
"""Translate given text to target langauge.
Args:
text (str): Text to be translated.
to_lang (str): Target language code to be translated.
from_lang (str, Optional): Source langauge code to be translated.
i_enc (str, Optional): Input encoding.
o_enc (str, Optional): Onput encoding.
web (bool, Optional) : Uses (scrap) mini version of google translate web instead of api.
Returns:
Translation (obj): Result class object of translation.
Raises:
FloodError: If google translation api gives http 503.
ConfigError: If `endpoint` or `client` is invalid.
InvalidLanguage: If given `to_lang` or `from_lang` is an unlisted language code.
"""
cfgvar = {
'q' : text,
'hl' : 'en',
'sl' : from_lang,
'tl' : to_lang,
'dt' : ['t','at','rm'],
'ie' : i_enc,
'oe' : o_enc,
'sp' : 'pbmt',
'client' : self.client
}
result = self.__request('https://{endpoint}{tld}/{method}'.format(
endpoint = random.choice(self.endpoint) if type(self.endpoint) == list else self.endpoint,
tld = random.choice(self.__tld) if type(self.__tld) == list else self.__tld,
method = 'm' if web else '%s_a/%s' % (config.key[1], self.__method)
),
var = self.__isvalid(cfgvar),
proxy = self.proxy
)
return Translation(self.__parsets(result) if web else json.loads(result))
def tts(self, text: str, lang: str, download: Union[str, bool, io.BytesIO] = './', slow: bool = False, i_enc: str = 'UTF-8') -> TextToSpeech:
"""Converts given Text to speech in target langauge.
Args:
text (str): Text to be converted.
lang (str): Target language code to be converted.
download (str, Optional) : Downloads to a specified path.
slow (bool, Optional) : Slow down the speech.
i_enc (str, Optional): Input encoding.
Returns:
TextToSpeech (obj): Result class object of tts.
Raises:
FloodError: If google translation api gives http 503.
ConfigError: If `endpoint` or `client` is invalid.
InvalidLanguage: If given `lang` is an unlisted language code.
"""
cfgvar = {
'q' : text,
'ie' : i_enc,
'hl' : 'en',
'tl' : lang,
'client': self.client or 'tw-ob',
'ttsspeed': 1.-slow or .3,
'total' : 1,
'idx': 0,
}
result = self.__request('https://{endpoint}{tld}/{method}'.format(
endpoint = random.choice(self.endpoint) if type(self.endpoint) == list else self.endpoint,
tld = random.choice(self.__tld) if type(self.__tld) == list else self.__tld,
method = '%s_tts' % config.key[1],
),
var = self.__isvalid(cfgvar),
proxy = self.proxy,
full = True
)
return TextToSpeech({'lang' : lang, 'text' : text, 'file' : self.__savetts(download, result.content) or result.url})
def iso(self, full: bool = False) -> dict:
"""Lists all supported iso langauge codes for both google translate (gts) and text2speech (tts).
Returns:
langs (dict of list[str]) : Having both `gts` and `tts`.
"""
return {'gts' : config.supported_gts_lang if full else config.supported_gts_lang.values(), 'tts' : config.supported_tts_lang}
def __isvalid(self, var: dict) -> dict:
"""Validates var
Args:
var (dict): Var to be validated,
"""
if not var['q']:
raise ValueError("Text can't be empty")
if not var.get('sl') and var['tl'] not in config.supported_tts_lang:
raise errors.InvalidLanguage("Unlisted target language code given. tts")
if var.get('tl') and var['tl'] not in config.supported_gts_lang.values():
raise errors.InvalidLanguage("Unlisted target language code given. gts")
if var.get('sl') and var['sl'] not in config.supported_gts_lang.values() and var['sl'] != 'auto':
raise errors.InvalidLanguage("Unlisted source language code given. gts")
return var
def __parsets(self, content: str) -> dict:
"""Parses translation from content
Args:
content (str): Content from which to be extracted.
"""
match = re.search(r"aria-label=\"Source text\".+value=\"(.*)\"><div class=\"translate-button-container\">.+<div class=\"result-container\">(.*)</div><div class=\"links-container\">", content, re.MULTILINE)
result = {}
if match:
result = {
'src' : match.group(1),
'sentences' : [{'trans' : match.group(2)}]
}
return result
def __savetts(self, path: Union[str, bool, io.BytesIO], payload: Union[bytes, str]):
"""Saves tts to local file
Args:
path (str): Path to save file.
payload (byte): Content of the tts output.
"""
if type(path) == io.BytesIO:
path.write(payload)
elif path or path == None:
paths = path.rsplit('/', 1)
if len(paths)> 1:
os.makedirs(paths[0], exist_ok=True)
if len(paths)> 1 and not paths[1]:
path += 'text2speech.mp3'
open(path, 'wb').write(payload)
else:
path = False
return path
def __request(self, url: str, var: dict, proxy: dict, full: bool = False) -> dict:
"""Request to google translator api
Args:
var (dict): Configuration arguemnts for translator.
"""
self.__ioses = self.__ioses or requests.Session()
response = self.__ioses.get(url, params = var, proxies = proxy, headers = config.headers)
if response.status_code == 200:
return response if full else response.content.decode('UTF-8')
elif response.status_code in [404, 403, 408, 504]:
raise errors.ConfigError('Invalid endpoint url or client given.')
elif response.status_code in [503]:
raise errors.FloodError('Too many requests please try later.')
else:
response.raise_for_status()
|
StarcoderdataPython
|
284067
|
<filename>KongFuPanda/Classification/KNN/KNN.py
from numpy import *
import operator
class knn:
# KNN-计算,归并,排序
'''
inX:输入向量
dataSet:训练数据集
labels:标签
k:k值
'''
def classify(self, inX, dataSet, labels, k):
dataSetSize = dataSet.shape[0]
diffMat = tile(inX, (dataSetSize, 1)) - dataSet
sqDiffMat = diffMat ** 2
sqDistances = sqDiffMat.sum(axis=1)
distances = sqDistances ** 0.5
sortedDistIndicies = distances.argsort()
classCount = {}
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1
sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
'''
处理数据:离线文本数据格式化为标准数据
返回:训练数据集和标签集
'''
def getFile(self, fileName):
fr = open(fileName)
numberOfLines = len(fr.readlines()) # get the number of lines in the file
returnMat = zeros((numberOfLines, 3)) # prepare matrix to return
classLabelVector = [] # prepare labels return
fr = open(fileName)
index = 0
for line in fr.readlines():
line = line.strip()
listFromLine = line.split('\t')
returnMat[index, :] = listFromLine[0:3]
classLabelVector.append(int(listFromLine[-1]))
index += 1
return returnMat, classLabelVector
'''
归一化特征值
将数据值归一到(0,1)
'''
def autoNorm(self, dataSet):
minVals = dataSet.min(0)
maxVals = dataSet.max(0)
ranges = maxVals - minVals
m = dataSet.shape[0]
normDataSet = dataSet - tile(minVals, (m, 1))
normDataSet = normDataSet / tile(ranges, (m, 1)) # element wise divide
return normDataSet, ranges, minVals
'''
图像处理
'''
def imgVector(self,fileName):
returnVect = zeros((1, 1024))
fr = open(fileName)
for i in range(32):
lineStr = fr.readline()
for j in range(32):
returnVect[0, 32 * i + j] = int(lineStr[j])
return returnVect
|
StarcoderdataPython
|
9697041
|
<filename>utils/algo_utils.py
"""Common functions for the algorithms.
"""
__author__ = "<NAME>"
__version__ = "1.0"
from crack.utils.structures import merge_dicts
def init_algos_stats():
records = {
"time": 0,
"operations": 0,
"per_algo": [],
"keys": {},
"levels": [0]
}
return records
def record_algos_stats(models, records, algo, cut=None, imb=None, t=None, operations=None, **algopt):
stats = {
"algo": algo,
"cut" : cut,
"imb" : imb,
"time": t,
"operations": operations,
}
if t is not None:
records["time"] += t
if operations is not None:
records["operations"] += operations
records["per_algo"].append(merge_dicts(stats, algopt))
|
StarcoderdataPython
|
1969985
|
from .doc_cache import DocCache
|
StarcoderdataPython
|
1837092
|
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
#The Data
#kdd = pd.read_csv('kddcup99_csv', names=kdd_cols)
#kdd_t = pd.read_csv('KDDTest+.txt', names=kdd_cols)
kdd = pd.read_csv('kddcup99_csv.csv')
kdd.head()
#kdd_cols = [kdd.columns[0]] + sorted(list(set(kdd.protocol_type.values))) + sorted(list(set(kdd.service.values))) + sorted(list(set(kdd.flag.values))) + kdd.columns[4:].tolist()
attack_map = [x.strip().split() for x in open('training_attack_types_binary', 'r')]
attack_map = {k:v for (k,v) in attack_map}
attack_map
kdd['label'] = kdd['label'].replace(attack_map)
#kdd_t['class'] = kdd_t['class'].replace(attack_map)
##############################################
def cat_encode(df, col):
return pd.concat([df.drop(col, axis=1), pd.get_dummies(df[col].values)], axis=1)
def log_trns(df, col):
return df[col].apply(np.log1p)
cat_lst = ['protocol_type', 'service', 'flag']
for col in cat_lst:
kdd = cat_encode(kdd, col)
kdd_t = cat_encode(kdd_t, col)
log_lst = ['duration', 'src_bytes', 'dst_bytes']
for col in log_lst:
kdd[col] = log_trns(kdd, col)
kdd_t[col] = log_trns(kdd_t, col)
kdd = kdd[kdd_cols]
for col in kdd_cols:
if col not in kdd_t.columns:
kdd_t[col] = 0
kdd_t = kdd_t[kdd_cols]
kdd.head()
##############################################
difficulty = kdd.pop('difficulty')
target = kdd.pop('class')
y_diff = kdd_t.pop('difficulty')
y_test = kdd_t.pop('class')
target = pd.get_dummies(target)
y_test = pd.get_dummies(y_test)
target
y_test
target = target.values
train = kdd.values
test = kdd_t.values
y_test = y_test.values
#Normalization
min_max_scaler = MinMaxScaler()
train = min_max_scaler.fit_transform(train)
test = min_max_scaler.transform(test)
train.shape
for idx, col in enumerate(list(kdd.columns)):
print(idx, col)
#The Model
from keras.callbacks import EarlyStopping
from keras.models import Sequential
from keras.layers import Dense, Activation, Reshape, Dropout, BatchNormalization
from keras.layers.embeddings import Embedding
from keras.callbacks import TensorBoard
def build_network():
models = []
model = Sequential()
model.add(Dense(1024, input_dim=122, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(.01))
model.add(Dense(768, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(.01))
model.add(Dense(512, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(.01))
model.add(Dense(256, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(.01))
model.add(Dense(128, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(.01))
model.add(Dense(2))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
NN = build_network()
tsb = TensorBoard(log_dir='./logs')
NN.summary()
#early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=3, verbose=0, mode='auto')
#NN.fit(x=train, y=target, epochs=500, validation_split=0.1, batch_size=128, callbacks=[early_stopping, tsb])
NN.fit(x=train, y=target, epochs=10, validation_split=0.1, batch_size=64, callbacks=[tsb])
# anaconda terminal directory D:\> tensorboard --logdir=logs
#http://localhost:6006/
#The Performance
from sklearn.metrics import confusion_matrix
preds = NN.predict(test)
pred_lbls = np.argmax(preds, axis=1)
true_lbls = np.argmax(y_test, axis=1)
NN.evaluate(test, y_test)
confusion_matrix(true_lbls, pred_lbls)
from sklearn.metrics import f1_score
f1_score(true_lbls, pred_lbls, average='weighted')
#output
NN.save("DNN_NSLKDD_model_epochs_500_001v2.h5")
#new pre
|
StarcoderdataPython
|
3277082
|
# -*- coding:utf-8 -*-
from django.contrib.auth.base_user import BaseUserManager
from django.contrib.auth.models import AbstractUser, PermissionsMixin
from django.db.models import Model
from django.db.models.fields import (
BigIntegerField, BooleanField, EmailField, BigAutoField, DateTimeField,
CharField, GenericIPAddressField, IPAddressField
)
from apps.util import FixedCharField
class UserManager(BaseUserManager):
def _create_user(self, email, password, **kwargs):
user = self.model(email=email, **kwargs)
user.set_password(password)
user.save()
return user
def create_user(self, email, password, **kwargs):
kwargs['is_superuser'] = False
return self._create_user(email, password, **kwargs)
def create_superuser(self, email, password, **kwargs):
kwargs['is_superuser'] = True
return self._create_user(email, password, **kwargs)
class User(AbstractUser, PermissionsMixin):
username = FixedCharField(
max_length=20,
null=False,
blank=False,
charset='utf8mb4',
collade='utf8mb4_general_ci',
verbose_name='昵称'
)
first_name = None
last_name = None
is_author = BooleanField(
null=False,
blank=True,
default=False,
verbose_name='是否可以创作'
)
is_superuser = BooleanField(null=False, default=True)
is_active = BooleanField(null=False, default=True)
email = EmailField(unique=True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username', ]
LOGIN_USERNAME_FIELDS = ['email', ]
objects = UserManager()
class UserDetails(Model):
id = BigIntegerField(
null=False, blank=False, verbose_name='账号', primary_key=True
) # user外键
motto = CharField(max_length=255, verbose_name='个性签名')
img = CharField(
null=True, max_length=255, blank=False, verbose_name='头像'
)
favorite_color = FixedCharField(
max_length=6,
null=False,
blank=False,
charset='ascii',
verbose_name='页面颜色'
)
gender = BooleanField(
null=True,
blank=True,
verbose_name='性别',
choices=((True, '女'), (False, '男'), (None, '未指定'))
)
class SignInInfo(Model):
id = BigAutoField(verbose_name='ID', primary_key=True)
create_time = DateTimeField(
null=False, blank=True,
verbose_name='登录时间', auto_now_add=True
)
ipv4 = FixedCharField(
max_length=4, null=True, blank=True, default=None, verbose_name='IPv4'
)
mac = FixedCharField(
max_length=6, blank=True, null=True,
default=None, verbose_name='mac地址'
)
gps = CharField(
max_length=255, blank=True, null=True,
default=None, verbose_name='地理位置'
)
agent = CharField(
max_length=255, blank=True,
default=None, verbose_name='设备信息'
)
|
StarcoderdataPython
|
5115827
|
#
# Class for constant active material
#
import pybamm
from .base_active_material import BaseModel
class Constant(BaseModel):
"""Submodel for constant active material
Parameters
----------
param : parameter class
The parameters to use for this submodel
domain : str
The domain of the model either 'Negative' or 'Positive'
options : dict
Additional options to pass to the model
**Extends:** :class:`pybamm.active_material.BaseModel`
"""
def get_fundamental_variables(self):
if self.domain == "Negative":
x_n = pybamm.standard_spatial_vars.x_n
eps_solid = self.param.epsilon_s_n(x_n)
deps_solid_dt = pybamm.FullBroadcast(
0, "negative electrode", "current collector"
)
elif self.domain == "Positive":
x_p = pybamm.standard_spatial_vars.x_p
eps_solid = self.param.epsilon_s_p(x_p)
deps_solid_dt = pybamm.FullBroadcast(
0, "positive electrode", "current collector"
)
variables = self._get_standard_active_material_variables(eps_solid)
variables.update(
self._get_standard_active_material_change_variables(deps_solid_dt)
)
return variables
|
StarcoderdataPython
|
11349265
|
"""Test that names are kept unique in data."""
# --- import -------------------------------------------------------------------------------------
import pytest
import numpy as np
import WrightTools as wt
# --- test ---------------------------------------------------------------------------------------
@pytest.mark.skip()
def test_exception():
d = wt.Data()
points = np.linspace(0, 1, 51)
d.create_axis(name="w1", points=points, units="eV")
try:
d.create_channel(name="w1")
except wt.exceptions.NameNotUniqueError:
assert True
else:
assert False
|
StarcoderdataPython
|
11299868
|
# -*- coding: utf-8 -*-
"""Master Controller Service.
This version polls REDIS Events rather than the database directly.
"""
import argparse
import random
import time
from typing import List
import urllib
from prometheus_client import CollectorRegistry, Gauge, push_to_gateway
from sip_config_db._events.event import Event
from sip_config_db.states import SDPState, ServiceState
from sip_config_db.states.services import get_service_state_list
from sip_logging import init_logger
from .__init__ import LOG, __service_id__, __service_name__
# Create a collector registry for alarm gauges
COLLECTOR_REGISTRY = CollectorRegistry()
# Create a gauge for service state alarms. Its normal value is zero and
# we set it to 1 if there is a service in the alarm state.
SIP_STATE_ALARM = Gauge('sip_state', 'Gauge for generating SIP state alarms',
registry=COLLECTOR_REGISTRY)
def _update_service_current_state(service: ServiceState):
"""Update the current state of a service.
Updates the current state of services after their target state has changed.
Args:
service (ServiceState): Service state object to update
"""
LOG.debug("Setting current state from target state for %s", service.id)
service.update_current_state(service.target_state)
def _update_services_instant_gratification(sdp_target_state: str):
"""For demonstration purposes only.
This instantly updates the services current state with the
target state, rather than wait on them or schedule random delays
in bringing them back up.
"""
service_states = get_service_state_list()
# Set the target state of services
for service in service_states:
if service.current_state != sdp_target_state:
LOG.debug('Setting the current state of %s to be %s', service.id,
sdp_target_state)
service.update_current_state(sdp_target_state)
# Should we be picking up the events?
def _update_services_target_state(sdp_target_state: str):
"""Update the target states of services based on SDP target state.
When we get a new target state this function is called to ensure
components receive the target state(s) and/or act on them.
Args:
sdp_target_state (str): Target state of SDP
"""
service_states = get_service_state_list()
# Set the target state of services
for service in service_states:
if service.current_state != sdp_target_state:
LOG.debug('Setting the target state of %s to be %s', service.id,
sdp_target_state)
service.update_target_state(sdp_target_state)
# The function below should not be called here as it is updating the
# **CURRENT** state of services!
# LOG.debug("Simulate services changing state ...")
# _update_services_instant_gratification(sdp_target_state)
def _handle_sdp_target_state_updated(sdp_state: SDPState):
"""Respond to an SDP target state change event.
This function sets the current state of SDP to the target state if that is
possible.
TODO(BMo) This cant be done as a blocking function as it is here!
"""
LOG.info('Handling SDP target state updated event...')
LOG.info('SDP target state: %s', sdp_state.target_state)
# Map between the SDP target state and the service target state?
if sdp_state.target_state == 'off':
_update_services_target_state('off')
# TODO: Work out if the state of SDP has reached the target state.
# If yes, update the current state.
sdp_state.update_current_state(sdp_state.target_state)
def _parse_args():
"""Command line parser."""
parser = argparse.ArgumentParser(description='{} service.'.
format(__service_id__))
parser.add_argument('--random_errors', action='store_true',
help='Enable random errors')
parser.add_argument('-v', action='store_true',
help='Verbose mode (enable debug printing)')
parser.add_argument('-vv', action='store_true', help='Extra verbose mode')
args = parser.parse_args()
if args.vv:
init_logger(log_level='DEBUG', show_log_origin=True)
elif args.v:
init_logger(logger_name='sip.ec.master_controller', log_level='DEBUG')
else:
init_logger(log_level='INFO')
return args
def _init(sdp_state: SDPState):
"""Initialise the Master Controller Service.
Performs the following actions:
1. Registers ServiceState objects into the Config Db.
2. If initialising for the first time (unknown state),
sets the SDPState to 'init'
3. Initialises the state of Services, if running for the first time
(their state == unknown)
4. Waits some time and sets the Service states to 'on'. This emulates
waiting for Services to become available.
5. Once all services are 'on', sets the SDP state to 'standby'.
"""
# Parse command line arguments.
LOG.info("Initialising: %s", __service_id__)
# FIXME(BMo) There is a bug when SDP or services 'start' in the 'off'
# state. At the moment it is impossible to transition out of this.
# FIXME(BMo) **Hack** Register all services or if already registered do
# nothing (this is handled by the ServiceState object).
_services = [
"ExecutionControl:AlarmReceiver:1.0.0",
"ExecutionControl:AlertManager:1.0.0",
"ExecutionControl:ConfigurationDatabase:5.0.1",
"ExecutionControl:MasterController:1.3.0",
"ExecutionControl:ProcessingController:1.2.6",
"ExecutionControl:ProcessingBlockController:1.3.0",
"TangoControl:Database:1.0.4",
"TangoControl:MySQL:1.0.3",
"TangoControl:SDPMaster:1.2.1",
"TangoControl:Subarrays:1.2.0",
"TangoControl:ProcessingBlocks:1.2.0",
"Platform:Kafka:2.1.1",
"Platform:Prometheus:1.0.0",
"Platform:PrometheusPushGateway:0.7.0",
"Platform:RedisCommander:210.0.0",
"Platform:Zookeeper:3.4.13"
]
for service_id in _services:
subsystem, name, version = service_id.split(':')
ServiceState(subsystem, name, version)
# If the SDP state is 'unknown', mark the SDP state as init.
# FIXME(BMo) This is not right as we want to allow for recovery from
# failure without just reinitialising...!? ie. respect the old sate
# NOTE: If the state is 'off' we will want to reset the database
# with 'skasip_config_db_init --clear'
if sdp_state.current_state in ['unknown', 'off']:
try:
LOG.info("Setting the SDPState to 'init'")
sdp_state.update_current_state('init', force=True)
except ValueError as error:
LOG.critical('Unable to set the State of SDP to init! %s',
str(error))
LOG.info("Updating Service States")
service_state_list = get_service_state_list()
# FIXME(BMo) **Hack** Mark all Services in the 'unknown' state as
# initialising.
for service_state in service_state_list:
if service_state.current_state in ['unknown', 'off']:
service_state.update_current_state('init', force=True)
# FIXME(BMo) **Hack** After 'checking' that the services are 'on' set
# their state on 'on' after a short delay.
# FIXME(BMo) This check should not be serialised!!! (should be part of the
# event loop)
for service_state in service_state_list:
if service_state.current_state == 'init':
time.sleep(random.uniform(0, 0.2))
service_state.update_current_state('on')
# FIXME(BMo): **Hack** Now the all services are on, set the sate of SDP to
# 'standby'
# FIXME(BMo) This should also be part of the event loop.
services_on = [service.current_state == 'on'
for service in service_state_list]
if all(services_on):
LOG.info('All Services are online!.')
sdp_state.update_current_state('standby')
else:
LOG.critical('Master Controller failed to initialise.')
return service_state_list
def _process_event(event: Event, sdp_state: SDPState,
service_states: List[ServiceState]):
"""Process a SDP state change event."""
LOG.debug('Event detected! (id : "%s", type: "%s", data: "%s")',
event.object_id, event.type, event.data)
if event.object_id == 'SDP' and event.type == 'current_state_updated':
LOG.info('SDP current state updated, no action required!')
if event.object_id == 'SDP' and event.type == 'target_state_updated':
LOG.info("SDP target state changed to '%s'",
sdp_state.target_state)
# If the sdp is already in the target state do nothing
if sdp_state.target_state == sdp_state.current_state:
LOG.warning('SDP already in %s state',
sdp_state.current_state)
return
# Check that a transition to the target state is allowed in the
# current state.
if not sdp_state.is_target_state_allowed(sdp_state.target_state):
LOG.error('Transition to %s is not allowed when in state %s',
sdp_state.target_state, sdp_state.current_state)
sdp_state.target_state = sdp_state.current_state
return
_update_services_target_state(sdp_state.target_state)
# If asking SDP to turn off, also turn off services.
if sdp_state.target_state == 'off':
LOG.info('Turning off services!')
for service_state in service_states:
service_state.update_target_state('off')
service_state.update_current_state('off')
LOG.info('Processing target state change request ...')
time.sleep(0.1)
LOG.info('Done processing target state change request!')
# Assuming that the SDP has responding to the target
# target state command by now, set the current state
# to the target state.
sdp_state.update_current_state(sdp_state.target_state)
if sdp_state.current_state == 'alarm':
LOG.debug('raising SDP state alarm')
SIP_STATE_ALARM.set(1)
else:
SIP_STATE_ALARM.set(0)
try:
# FIXME(BMo) the pushgateway host should not be hardcoded!
push_to_gateway('platform_pushgateway:9091', job='SIP',
registry=COLLECTOR_REGISTRY)
except urllib.error.URLError:
LOG.warning("Unable to connect to the Alarms service!")
# TODO(BMo) function to watch for changes in the \
# current state of services and update the state of SDP
# accordingly.
def _process_state_change_events():
"""Process events relating to the overall state of SDP.
This function starts and event loop which continually checks for
and responds to SDP state change events.
"""
sdp_state = SDPState()
service_states = get_service_state_list()
state_events = sdp_state.get_event_queue(subscriber=__service_name__)
state_is_off = sdp_state.current_state == 'off'
counter = 0
while True:
time.sleep(0.1)
if not state_is_off:
# *Hack* to avoid problems with historical events not being
# correctly handled by EventQueue.get(), replay old events every
# 10s
# - see issue #54
if counter % 1000 == 0:
LOG.debug('Checking published events ... %d', counter / 1000)
_published_events = state_events.get_published_events(
process=True)
for _state_event in _published_events:
_process_event(_state_event, sdp_state, service_states)
else:
_state_event = state_events.get()
if _state_event:
_process_event(_state_event, sdp_state, service_states)
state_is_off = sdp_state.current_state == 'off'
counter += 1
def main():
"""Merge temp_main and main."""
# Parse command line args.
_parse_args()
LOG.info("Starting: %s", __service_id__)
# Subscribe to state change events.
# FIXME(BMo) This API is unfortunate as it looks like we are only
# subscribing to sdp_state events.
LOG.info('Subscribing to state change events (subscriber = %s)',
__service_name__)
sdp_state = SDPState()
_ = sdp_state.subscribe(subscriber=__service_name__)
# Initialise the service.
_ = _init(sdp_state)
LOG.info('Finished initialising!')
# Enter a pseudo event-loop (using Sched) to monitor for state change
# events
# (Also random set services into a fault or alarm state if enabled)
LOG.info('Responding to state change events ...')
try:
_process_state_change_events()
except ValueError as error:
LOG.critical('Value error: %s', str(error))
except KeyboardInterrupt as err:
LOG.debug('Keyboard Interrupt %s', err)
LOG.info('Exiting!')
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
6607766
|
<reponame>JackKelly/slicedpy
from __future__ import print_function, division
from pda.channel import Channel
from slicedpy.appliance import Appliance
import matplotlib.pyplot as plt
from os import path
DATA_DIR = '/data/mine/domesticPowerData/BellendenRd/wattsUp'
def train_appliance(label, sig_data_filenames):
"""
Args:
* label (str): e.g. 'washing machine'
* sig_data_filenames (list of strings): filenames of signature data files
"""
app = Appliance(label)
# Train
for f_name in sig_data_filenames:
print('Loading', f_name)
chan = Channel()
chan.load_wattsup(path.join(DATA_DIR, f_name))
sps = app.train_on_single_example(chan)
# chan.series.diff().hist()
# Plot raw power data
fig1, ax1 = plt.subplots()
chan.plot(ax1, date_format='%H:%M:%S')
ax1.set_title(f_name)
# Plot power segments
for ps in sps: # power segment in signature power segment
ps.plot(ax1)
# Draw power state graph
app.draw_power_state_graph()
# Print out some useful info
for node in app.power_state_graph.nodes():
print(node)
print("node", node.power.get_model().mean, "essential=", node.essential)
# train_appliance('washing machine', ['washingmachine1.csv', 'washingmachine2.csv'])
# train_appliance('tv', ['tv1.csv'])
# train_appliance('toaster', ['toaster1.csv'])
# train_appliance('kettle', ['kettle1.csv'])
train_appliance('breadmaker', ['breadmaker1.csv'])
# train_appliance('fridge', ['fridge8May2012.csv'])
plt.show()
|
StarcoderdataPython
|
5187242
|
# -*- coding: utf-8 -*-
"""生成初始的 kMandarin_8105.txt"""
from merge_unihan import parse_pinyins, code_to_hanzi
def parse_china_x():
with open('tools/china-8105-06062014.txt') as fp:
for line in fp:
line = line.strip()
if line.startswith('#') or not line:
continue
yield line.split()[0]
def parse_zdic():
with open('zdic.txt') as fp:
return parse_pinyins(fp)
def parse_kmandain():
with open('pinyin.txt') as fp:
return parse_pinyins(fp)
def diff(kmandarin, zdic, commons):
for key in commons:
hanzi = code_to_hanzi(key)
if key in kmandarin:
value = kmandarin[key][0]
if key in zdic and value != zdic[key][0]:
yield '{0}: {1} # {2} -> {3}'.format(
key, value, hanzi, zdic[key][0]
)
else:
yield '{0}: {1} # {2}'.format(key, value, hanzi)
elif key in zdic:
value = zdic[key][0]
yield '{0}: {1} # {2}'.format(key, value, hanzi)
else:
yield '# {0}: {1} # {2}'.format(key, '<-', hanzi)
if __name__ == '__main__':
zdic = parse_zdic()
kmandarin = parse_kmandain()
commons = parse_china_x()
lst = diff(kmandarin, zdic, commons)
for x in lst:
print(x)
|
StarcoderdataPython
|
6692802
|
from django.contrib.contenttypes.models import ContentType
from django.core import serializers
from django.db import IntegrityError
import json
from Poem.api.internal_views.utils import one_value_inline, two_value_inline, \
inline_metric_for_db
from Poem.api.views import NotFound
from Poem.helpers.history_helpers import create_history, create_comment, \
update_comment
from Poem.poem.models import Metric, TenantHistory
from Poem.poem_super_admin import models as admin_models
from Poem.tenants.models import Tenant
from rest_framework import status
from rest_framework.authentication import SessionAuthentication
from rest_framework.response import Response
from rest_framework.views import APIView
from tenant_schemas.utils import get_public_schema_name, schema_context
def update_metrics(metrictemplate, name, probekey):
schemas = list(Tenant.objects.all().values_list('schema_name', flat=True))
schemas.remove(get_public_schema_name())
for schema in schemas:
with schema_context(schema):
try:
met = Metric.objects.get(name=name, probekey=probekey)
met.name = metrictemplate.name
met.probeexecutable = metrictemplate.probeexecutable
met.parent = metrictemplate.parent
met.attribute = metrictemplate.attribute
met.dependancy = metrictemplate.dependency
met.flags = metrictemplate.flags
met.files = metrictemplate.files
met.parameter = metrictemplate.parameter
met.fileparameter = metrictemplate.fileparameter
if metrictemplate.config:
for item in json.loads(metrictemplate.config):
if item.split(' ')[0] == 'path':
objpath = item
metconfig = []
for item in json.loads(met.config):
if item.split(' ')[0] == 'path':
metconfig.append(objpath)
else:
metconfig.append(item)
met.config = json.dumps(metconfig)
met.save()
history = TenantHistory.objects.filter(
object_id=met.id,
content_type=ContentType.objects.get_for_model(Metric)
)[0]
history.serialized_data = serializers.serialize(
'json', [met],
use_natural_foreign_keys=True,
use_natural_primary_keys=True
)
history.object_repr = met.__str__()
history.save()
except Metric.DoesNotExist:
continue
class ListMetricTemplates(APIView):
authentication_classes = (SessionAuthentication,)
def get(self, request, name=None):
if name:
metrictemplates = admin_models.MetricTemplate.objects.filter(
name=name
)
if metrictemplates.count() == 0:
raise NotFound(status=404, detail='Metric template not found')
else:
metrictemplates = admin_models.MetricTemplate.objects.all()
results = []
for metrictemplate in metrictemplates:
config = two_value_inline(metrictemplate.config)
parent = one_value_inline(metrictemplate.parent)
probeexecutable = one_value_inline(metrictemplate.probeexecutable)
attribute = two_value_inline(metrictemplate.attribute)
dependency = two_value_inline(metrictemplate.dependency)
flags = two_value_inline(metrictemplate.flags)
files = two_value_inline(metrictemplate.files)
parameter = two_value_inline(metrictemplate.parameter)
fileparameter = two_value_inline(metrictemplate.fileparameter)
ostag = []
if metrictemplate.probekey:
for repo in metrictemplate.probekey.package.repos.all():
ostag.append(repo.tag.name)
if metrictemplate.probekey:
probeversion = metrictemplate.probekey.__str__()
else:
probeversion = ''
results.append(dict(
id=metrictemplate.id,
name=metrictemplate.name,
mtype=metrictemplate.mtype.name,
ostag=ostag,
probeversion=probeversion,
parent=parent,
probeexecutable=probeexecutable,
config=config,
attribute=attribute,
dependency=dependency,
flags=flags,
files=files,
parameter=parameter,
fileparameter=fileparameter
))
results = sorted(results, key=lambda k: k['name'])
if name:
del results[0]['ostag']
return Response(results[0])
else:
return Response(results)
def post(self, request):
if request.data['parent']:
parent = json.dumps([request.data['parent']])
else:
parent = ''
if request.data['probeexecutable']:
probeexecutable = json.dumps([request.data['probeexecutable']])
else:
probeexecutable = ''
try:
if request.data['mtype'] == 'Active':
mt = admin_models.MetricTemplate.objects.create(
name=request.data['name'],
mtype=admin_models.MetricTemplateType.objects.get(
name=request.data['mtype']
),
probekey=admin_models.ProbeHistory.objects.get(
name=request.data['probeversion'].split(' ')[0],
package__version=request.data['probeversion'].split(
' '
)[1][1:-1]
),
parent=parent,
probeexecutable=probeexecutable,
config=inline_metric_for_db(request.data['config']),
attribute=inline_metric_for_db(request.data['attribute']),
dependency=inline_metric_for_db(request.data['dependency']),
flags=inline_metric_for_db(request.data['flags']),
files=inline_metric_for_db(request.data['files']),
parameter=inline_metric_for_db(request.data['parameter']),
fileparameter=inline_metric_for_db(
request.data['fileparameter']
)
)
else:
mt = admin_models.MetricTemplate.objects.create(
name=request.data['name'],
mtype=admin_models.MetricTemplateType.objects.get(
name=request.data['mtype']
),
parent=parent,
flags=inline_metric_for_db(request.data['flags'])
)
if request.data['cloned_from']:
clone = admin_models.MetricTemplate.objects.get(
id=request.data['cloned_from']
)
comment = 'Derived from ' + clone.name
create_history(mt, request.user.username, comment=comment)
else:
create_history(mt, request.user.username)
return Response(status=status.HTTP_201_CREATED)
except IntegrityError:
return Response(
{'detail':
'Metric template with this name already exists.'},
status=status.HTTP_400_BAD_REQUEST
)
def put(self, request):
metrictemplate = admin_models.MetricTemplate.objects.get(
id=request.data['id']
)
old_name = metrictemplate.name
old_probekey = metrictemplate.probekey
if request.data['parent']:
parent = json.dumps([request.data['parent']])
else:
parent = ''
if request.data['probeexecutable']:
probeexecutable = json.dumps([request.data['probeexecutable']])
else:
probeexecutable = ''
if request.data['probeversion']:
new_probekey = admin_models.ProbeHistory.objects.get(
name=request.data['probeversion'].split(' ')[0],
package__version=request.data['probeversion'].split(
' '
)[1][1:-1]
)
else:
new_probekey = None
try:
if request.data['mtype'] == 'Active' and \
old_probekey != new_probekey:
metrictemplate.name = request.data['name']
metrictemplate.probekey = new_probekey
metrictemplate.parent = parent
metrictemplate.probeexecutable = probeexecutable
metrictemplate.config = inline_metric_for_db(
request.data['config']
)
metrictemplate.attribute = inline_metric_for_db(
request.data['attribute']
)
metrictemplate.dependency = inline_metric_for_db(
request.data['dependency']
)
metrictemplate.flags = inline_metric_for_db(
request.data['flags']
)
metrictemplate.files = inline_metric_for_db(
request.data['files']
)
metrictemplate.parameter = inline_metric_for_db(
request.data['parameter']
)
metrictemplate.fileparameter = inline_metric_for_db(
request.data['fileparameter']
)
metrictemplate.save()
create_history(metrictemplate, request.user.username)
else:
new_data = {
'name': request.data['name'],
'probekey': new_probekey,
'mtype': admin_models.MetricTemplateType.objects.get(
name=request.data['mtype']
),
'parent': parent,
'probeexecutable': probeexecutable,
'config': inline_metric_for_db(request.data['config']),
'attribute': inline_metric_for_db(
request.data['attribute']
),
'dependency': inline_metric_for_db(
request.data['dependency']
),
'flags': inline_metric_for_db(request.data['flags']),
'files': inline_metric_for_db(request.data['files']),
'parameter': inline_metric_for_db(
request.data['parameter']
),
'fileparameter': inline_metric_for_db(
request.data['fileparameter']
)
}
admin_models.MetricTemplate.objects.filter(
id=request.data['id']
).update(**new_data)
new_data.update({
'version_comment': update_comment(
admin_models.MetricTemplate.objects.get(
id=request.data['id']
)
)
})
admin_models.MetricTemplateHistory.objects.filter(
name=old_name, probekey=old_probekey
).update(**new_data)
mt = admin_models.MetricTemplate.objects.get(
pk=request.data['id']
)
update_metrics(mt, old_name, old_probekey)
return Response(status=status.HTTP_201_CREATED)
except IntegrityError:
return Response(
{'detail': 'Metric template with this name already exists.'},
status=status.HTTP_400_BAD_REQUEST
)
def delete(self, request, name=None):
schemas = list(Tenant.objects.all().values_list('schema_name',
flat=True))
schemas.remove(get_public_schema_name())
if name:
try:
mt = admin_models.MetricTemplate.objects.get(name=name)
for schema in schemas:
with schema_context(schema):
try:
admin_models.History.objects.filter(
object_id=mt.id,
content_type=ContentType.objects.get_for_model(
mt)
).delete()
m = Metric.objects.get(name=name)
TenantHistory.objects.filter(
object_id=m.id,
content_type=ContentType.objects.get_for_model(
m
)
).delete()
m.delete()
except Metric.DoesNotExist:
pass
mt.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
except admin_models.MetricTemplate.DoesNotExist:
raise NotFound(status=404, detail='Metric template not found')
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
class ListMetricTemplatesForImport(APIView):
authentication_classes = (SessionAuthentication,)
def get(self, request):
metrictemplates = admin_models.MetricTemplate.objects.all().order_by(
'name'
)
results = []
for mt in metrictemplates:
vers = admin_models.MetricTemplateHistory.objects.filter(
object_id=mt
).order_by('-date_created')
if mt.probekey:
probeversion = mt.probekey.__str__()
tags = set()
probeversion_dict = dict()
for ver in vers:
for repo in ver.probekey.package.repos.all():
tags.add(repo.tag.name)
if repo.tag.name not in probeversion_dict:
probeversion_dict.update(
{
repo.tag.name: ver.probekey.__str__()
}
)
tags = list(tags)
if 'CentOS 6' in probeversion_dict:
centos6_probeversion = probeversion_dict['CentOS 6']
else:
centos6_probeversion = ''
if 'CentOS 7' in probeversion_dict:
centos7_probeversion = probeversion_dict['CentOS 7']
else:
centos7_probeversion = ''
else:
tags = list(admin_models.OSTag.objects.all().values_list(
'name', flat=True
))
probeversion = ''
centos6_probeversion = ''
centos7_probeversion = ''
tags.sort()
results.append(
dict(
name=mt.name,
mtype=mt.mtype.name,
probeversion=probeversion,
centos6_probeversion=centos6_probeversion,
centos7_probeversion=centos7_probeversion,
ostag=tags
)
)
return Response(results)
class ListMetricTemplatesForProbeVersion(APIView):
authentication_classes = (SessionAuthentication,)
def get(self, request, probeversion):
if probeversion:
metrics = admin_models.MetricTemplate.objects.filter(
probekey__name=probeversion.split('(')[0],
probekey__package__version=probeversion.split('(')[1][0:-1]
)
if metrics.count() == 0:
raise NotFound(status=404, detail='Metrics not found')
else:
return Response(
metrics.order_by('name').values_list('name', flat=True)
)
class ListMetricTemplateTypes(APIView):
authentication_classes = (SessionAuthentication,)
def get(self, request):
types = admin_models.MetricTemplateType.objects.all().values_list(
'name', flat=True
)
return Response(types)
|
StarcoderdataPython
|
6409283
|
<filename>main.py
import sys, random
assert sys.version_info >= (3,7), "This script requires at least Python 3.7"
quit = False
range = 15
while not quit:
random_number = random.randint(1,range)
count = 1
number = -1
while number != random_number:
number = input("Go ahead and guess a number between 1 and {}: ".format(range))
if not number.isdigit():
print("Hey! Guess a number!")
else:
number = int(number)
count = count + 1
print("Sorry, that's not correct. That's okay, try again!")
if number > random_number:
print("That's too high!")
elif number < random_number:
print("That's too low!")
print("Great job! You got it right!")
print("You got it in {} tries! Good job!".format(count))
play_again = input("\nWant to try again (yes or no)? ")
play_again = play_again.lower()
if play_again == "yes" or play_again == "y":
quit = False
else:
quit = True
print("\n\nThanks for playing! Catch you on the flip side!")
|
StarcoderdataPython
|
5129249
|
<reponame>paysonwallach/envplus<filename>venn/commands/edit.py<gh_stars>1-10
#
# Venn
#
# Copyright (c) 2019 <NAME>
#
# Released under the terms of the Hippocratic License
# (https://firstdonoharm.dev/version/1/1/license.html)
#
import os
import cleo
import venn.env
from venn import command
class EditCommand(command.BaseCommand):
"""
Edit the active virtual environment
edit
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def handle(self):
super().handle()
venn.env.execute(self.pf, [os.environ["EDITOR"], self.pf.filepath])
|
StarcoderdataPython
|
8018659
|
<reponame>pytask-dev/pytask-parallel
"""Configure pytask."""
import os
from _pytask.config import hookimpl
from _pytask.shared import get_first_non_none_value
from pytask_parallel.backends import PARALLEL_BACKENDS_DEFAULT
from pytask_parallel.callbacks import delay_callback
from pytask_parallel.callbacks import n_workers_callback
from pytask_parallel.callbacks import parallel_backend_callback
@hookimpl
def pytask_parse_config(config, config_from_cli, config_from_file):
"""Parse the configuration."""
config["n_workers"] = get_first_non_none_value(
config_from_cli,
config_from_file,
key="n_workers",
default=1,
callback=n_workers_callback,
)
if config["n_workers"] == "auto":
config["n_workers"] = max(os.cpu_count() - 1, 1)
config["delay"] = get_first_non_none_value(
config_from_cli,
config_from_file,
key="delay",
default=0.1,
callback=delay_callback,
)
config["parallel_backend"] = get_first_non_none_value(
config_from_cli,
config_from_file,
key="parallel_backend",
default=PARALLEL_BACKENDS_DEFAULT,
callback=parallel_backend_callback,
)
@hookimpl
def pytask_post_parse(config):
"""Disable parallelization if debugging is enabled."""
if config["pdb"] or config["trace"]:
config["n_workers"] = 1
|
StarcoderdataPython
|
13354
|
#!/usr/bin/env python
# filename: pair.py
#
# Copyright (c) 2015 <NAME>
# License: The MIT license (http://opensource.org/licenses/MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import copy
import sys
import traceback
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
from abtools import germlines
from abtools.alignment import global_alignment
from abtools.sequence import Sequence
class Pair(object):
'''
Holds a pair of sequences, corresponding to HC and LC of a single mAb.
Input is a list of dicts, with each dict containing sequence information from a single
chain, formatted as would be returned from a query on a MongoDB database containing
AbStar output.
'''
def __init__(self, seqs, name=None, h_selection_func=None, l_selection_func=None):
self._seqs = seqs
self._heavy = None
self._light = None
self._heavies = [s for s in seqs if s['chain'] == 'heavy']
self._lights = [s for s in seqs if s['chain'] in ['kappa', 'lambda']]
self._name = name
self._fasta = None
self._sample = None
self._subject = None
self._group = None
self._experiment = None
self._timepoint = None
self._is_pair = None
self._vrc01_like = None
self._lineage = None
self._select_heavy = h_selection_func
self._select_light = l_selection_func
def __eq__(self, other):
return (self.heavy, self.light) == (other.heavy, other.light)
def __ne__(self, other):
return not self == other
def __hash(self):
return hash((self.heavy, self.light))
@property
def heavy(self):
if self._heavy is None:
# self._heavies = [s for s in self._seqs if s['chain'] == 'heavy']
if len(self._heavies) > 0:
if self._select_heavy is not None:
self._heavy = Sequence(self._select_heavy(self._heavies))
else:
self._heavy = Sequence(self._heavies[0])
else:
self._heavy = None
return self._heavy
@heavy.setter
def heavy(self, heavy):
self._heavy = heavy
@property
def light(self):
if self._light is None:
# self._lights = [s for s in self._seqs if s['chain'] in ['kappa', 'lambda']]
if len(self._lights) > 0:
if self._select_light is not None:
self._light = Sequence(self._select_light(self._lights))
else:
self._light = Sequence(self._lights[0])
else:
self._light = None
return self._light
@light.setter
def light(self, light):
self._light = light
@property
def is_pair(self):
if all([self.heavy is not None, self.light is not None]):
return True
return False
@property
def lineage(self):
if self._lineage is None:
self._lineage = self.heavy['clonify']['id']
return self._lineage
@property
def vrc01_like(self):
if self._vrc01_like is None:
if any([self.heavy is None, self.light is None]):
self._vrc01_like = False
else:
self._vrc01_like = all([self.heavy['v_gene']['gene'] == 'IGHV1-2', self.light['cdr3_len'] == 5])
return self._vrc01_like
@property
def name(self):
if self._name is None:
if self.heavy is not None:
self._name = self.heavy['seq_id']
elif self.light is not None:
self._name = self.light['seq_id']
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def sample(self):
if self._sample is None:
slist = []
if self.experiment is not None:
slist.append(str(self.experiment))
if self.group is not None:
slist.append(str(self.group))
if self.subject is not None:
slist.append(str(self.subject))
if self.timepoint is not None:
slist.append(str(self.timepoint))
if slist:
self._sample = '|'.join(slist)
return self._sample
@property
def subject(self):
if self._subject is None:
if self.heavy is not None and 'subject' in list(self.heavy.keys()):
self._subject = self.heavy['subject']
elif self.light is not None and 'subject' in list(self.light.keys()):
self._subject = self.light['subject']
return self._subject
@subject.setter
def subject(self, subject):
self._subject = subject
@property
def group(self):
if self._group is None:
if self.heavy is not None and 'group' in list(self.heavy.keys()):
self._group = self.heavy['group']
elif self.light is not None and 'group' in list(self.light.keys()):
self._group = self.light['group']
return self._group
@group.setter
def group(self, group):
self._group = group
@property
def experiment(self):
if self._experiment is None:
if self.heavy is not None and 'experiment' in list(self.heavy.keys()):
self._experiment = self.heavy['experiment']
elif self.light is not None and 'experiment' in list(self.light.keys()):
self._experiment = self.light['experiment']
return self._experiment
@experiment.setter
def experiment(self, experiment):
self._experiment = experiment
@property
def timepoint(self):
if self._timepoint is None:
if self.heavy is not None and 'timepoint' in list(self.heavy.keys()):
self._timepoint = self.heavy['timepoint']
elif self.light is not None and 'timepoint' in list(self.light.keys()):
self._timepoint = self.light['timepoint']
return self._timepoint
@timepoint.setter
def timepoint(self, timepoint):
self._timepoint = timepoint
def refine(self, heavy=True, light=True, species='human'):
for seq in [s for s in [self.heavy, self.light] if s is not None]:
try:
self.remove_ambigs(seq)
self._refine_v(seq, species)
self._refine_j(seq, species)
self._retranslate(seq)
except:
print('REFINEMENT FAILED: {}, {} chain'.format(s['seq_id'], s['chain']))
print(traceback.format_exception_only(sys.exc_info()[0], sys.exc_info()[1]))
@staticmethod
def remove_ambigs(seq):
# fix Ns in the nucleotide sequence
vdj = ''
for s, g in zip(seq['vdj_nt'], seq['vdj_germ_nt']):
if s.upper() == 'N':
vdj += g
else:
vdj += s
seq['vdj_nt'] = vdj
# fix Xs in the amino acid sequence
vdj = ''
for s, g in zip(seq['vdj_aa'], seq['vdj_germ_aa']):
if s.upper() == 'X':
vdj += g
else:
vdj += s
seq['vdj_aa'] = vdj
@staticmethod
def _refine_v(seq, species):
'''
Completes the 5' end of a a truncated sequence with germline nucleotides.
Input is a MongoDB dict (seq) and the species.
'''
vgerm = germlines.get_germline(seq['v_gene']['full'], species)
aln = global_alignment(seq['vdj_nt'], vgerm)
prepend = ''
for s, g in zip(aln.aligned_query, aln.aligned_target):
if s != '-':
break
else:
prepend += g
seq['vdj_nt'] = prepend + seq['vdj_nt']
@staticmethod
def _refine_j(seq, species):
'''
Completes the 3' end of a a truncated sequence with germline nucleotides.
Input is a MongoDB dict (seq) and the species.
'''
jgerm = germlines.get_germline(seq['j_gene']['full'], species)
aln = global_alignment(seq['vdj_nt'], jgerm)
append = ''
for s, g in zip(aln.aligned_query[::-1], aln.aligned_target[::-1]):
if s != '-':
break
else:
append += g
seq['vdj_nt'] = seq['vdj_nt'] + append[::-1]
@staticmethod
def _retranslate(seq):
'''
Retranslates a nucleotide sequence following refinement.
Input is a Pair sequence (basically a dict of MongoDB output).
'''
if len(seq['vdj_nt']) % 3 != 0:
trunc = len(seq['vdj_nt']) % 3
seq['vdj_nt'] = seq['vdj_nt'][:-trunc]
seq['vdj_aa'] = Seq(seq['vdj_nt'], generic_dna).translate()
def fasta(self, key='vdj_nt', append_chain=True):
'''
Returns the sequence pair as a fasta string. If the Pair object contains
both heavy and light chain sequences, both will be returned as a single string.
By default, the fasta string contains the 'vdj_nt' sequence for each chain. To change,
use the <key> option to select an alternate sequence.
By default, the chain (heavy or light) will be appended to the sequence name:
>MySequence_heavy
To just use the pair name (which will result in duplicate sequence names for Pair objects
with both heavy and light chains), set <append_chain> to False.
'''
fastas = []
for s, chain in [(self.heavy, 'heavy'), (self.light, 'light')]:
if s is not None:
c = '_{}'.format(chain) if append_chain else ''
fastas.append('>{}{}\n{}'.format(s['seq_id'], c, s[key]))
return '\n'.join(fastas)
def get_pairs(db, collection, experiment=None, subject=None, group=None, name='seq_id',
delim=None, delim_occurance=1, pairs_only=False):
'''
Gets sequences and assigns them to the appropriate mAb pair, based on the sequence name.
Inputs:
::db:: is a pymongo database connection object
::collection:: is the collection name, as a string
If ::subject:: is provided, only sequences with a 'subject' field matching ::subject:: will
be included. ::subject:: can be either a single subject (as a string) or an iterable
(list or tuple) of subject strings.
If ::group:: is provided, only sequences with a 'group' field matching ::group:: will
be included. ::group:: can be either a single group (as a string) or an iterable
(list or tuple) of group strings.
::name:: is the dict key of the field to be used to group the sequences into pairs.
Default is 'seq_id'
::delim:: is an optional delimiter used to truncate the contents of the ::name:: field.
Default is None, which results in no name truncation.
::delim_occurance:: is the occurance of the delimiter at which to trim. Trimming is performed
as delim.join(name.split(delim)[:delim_occurance]), so setting delim_occurance to -1 will
trucate after the last occurance of delim. Default is 1.
::pairs_only:: setting to True results in only truly paired sequences (pair.is_pair == True)
will be returned. Default is False.
Returns a list of Pair objects, one for each mAb pair.
'''
match = {}
if subject is not None:
if type(subject) in (list, tuple):
match['subject'] = {'$in': subject}
elif type(subject) in (str, str):
match['subject'] = subject
if group is not None:
if type(group) in (list, tuple):
match['group'] = {'$in': group}
elif type(group) in (str, str):
match['group'] = group
if experiment is not None:
if type(experiment) in (list, tuple):
match['experiment'] = {'$in': experiment}
elif type(experiment) in (str, str):
match['experiment'] = experiment
seqs = list(db[collection].find(match))
return assign_pairs(seqs, name=name, delim=delim,
delim_occurance=delim_occurance, pairs_only=pairs_only)
def assign_pairs(seqs, name='seq_id', delim=None, delim_occurance=1, pairs_only=False):
'''
Assigns sequences to the appropriate mAb pair, based on the sequence name.
Inputs:
::seqs:: is a list of dicts, of the format returned by querying a MongoDB containing
Abstar output.
::name:: is the dict key of the field to be used to group the sequences into pairs.
Default is 'seq_id'
::delim:: is an optional delimiter used to truncate the contents of the ::name:: field.
Default is None, which results in no name truncation.
::delim_occurance:: is the occurance of the delimiter at which to trim. Trimming is performed
as delim.join(name.split(delim)[:delim_occurance]), so setting delim_occurance to -1 will
trucate after the last occurance of delim. Default is 1.
::pairs_only:: setting to True results in only truly paired sequences (pair.is_pair == True)
will be returned. Default is False.
Returns a list of Pair objects, one for each mAb pair.
'''
pdict = {}
for s in seqs:
if delim is not None:
pname = delim.join(s[name].split(delim)[:delim_occurance])
else:
pname = s[name]
if pname not in pdict:
pdict[pname] = [s, ]
else:
pdict[pname].append(s)
pairs = [Pair(pdict[n], name=n) for n in list(pdict.keys())]
if pairs_only:
pairs = [p for p in pairs if p.is_pair]
return pairs
def deduplicate(pairs, aa=False, ignore_primer_regions=False):
'''
Removes duplicate sequences from a list of Pair objects.
If a Pair has heavy and light chains, both chains must identically match heavy and light chains
from another Pair to be considered a duplicate. If a Pair has only a single chain,
identical matches to that chain will cause the single chain Pair to be considered a duplicate,
even if the comparison Pair has both chains.
Note that identical sequences are identified by simple string comparison, so sequences of
different length that are identical over the entirety of the shorter sequence are not
considered duplicates.
By default, comparison is made on the nucleotide sequence. To use the amino acid sequence instead,
set aa=True.
'''
nr_pairs = []
just_pairs = [p for p in pairs if p.is_pair]
single_chains = [p for p in pairs if not p.is_pair]
_pairs = just_pairs + single_chains
for p in _pairs:
duplicates = []
for nr in nr_pairs:
identical = True
vdj = 'vdj_aa' if aa else 'vdj_nt'
offset = 4 if aa else 12
if p.heavy is not None:
if nr.heavy is None:
identical = False
else:
heavy = p.heavy[vdj][offset:-offset] if ignore_primer_regions else p.heavy[vdj]
nr_heavy = nr.heavy[vdj][offset:-offset] if ignore_primer_regions else nr.heavy[vdj]
if heavy != nr_heavy:
identical = False
if p.light is not None:
if nr.light is None:
identical = False
else:
light = p.light[vdj][offset:-offset] if ignore_primer_regions else p.light[vdj]
nr_light = nr.light[vdj][offset:-offset] if ignore_primer_regions else nr.light[vdj]
if light != nr_light:
identical = False
duplicates.append(identical)
if any(duplicates):
continue
else:
nr_pairs.append(p)
return nr_pairs
def refine(pairs, heavy=True, light=True, species='human'):
refined_pairs = copy.deepcopy(pairs)
for p in refined_pairs:
p.refine(heavy, light, species)
return refined_pairs
|
StarcoderdataPython
|
3265166
|
import unittest
import sys
sys.path.append('CS_DOWNLOADER\cs_downloader\downloader')
import daterange_processor as drp
|
StarcoderdataPython
|
1691357
|
from .flatten_params_wrapper import FlatParameter
from .fully_sharded_data_parallel import FullyShardedDataParallel
from .fully_sharded_data_parallel import (
CPUOffload,
BackwardPrefetch,
ShardingStrategy,
MixedPrecision,
FullStateDictConfig,
LocalStateDictConfig,
)
from .fully_sharded_data_parallel import StateDictType, OptimStateKeyType
from .wrap import ParamExecOrderWrapPolicy
|
StarcoderdataPython
|
3267127
|
<reponame>lclarko/GDX-Analytics
# See https://github.com/snowplow/snowplow/wiki/Python-Tracker
# and https://github.com/snowplow-proservices/ca.bc.gov-schema-registry
import time
import random
from snowplow_tracker import Subject, Tracker, AsyncEmitter
from snowplow_tracker import SelfDescribingJson
# Set up core Snowplow environment
s = Subject()
e = AsyncEmitter("spm.apps.gov.bc.ca", protocol="https")
t = Tracker(e, encode_base64=False, app_id='orgbook_api')
# Example Snowplow for an external API V3 call to "/search/topic?name=BC0772006"
search_json = SelfDescribingJson( 'iglu:ca.bc.gov.orgbook/api_call/jsonschema/1-0-0', {
'internal_call': False,
'api_version': 'v3',
'endpoint': 'search/topic',
'total': 1,
'response_time': 67,
'parameters': ['name']
})
# Example Snowplow for an external API V3 call to "/credentialtype"
credentialtype_json = SelfDescribingJson( 'iglu:ca.bc.gov.orgbook/api_call/jsonschema/1-0-0', {
'internal_call': False,
'api_version': 'v3',
'endpoint': 'credentialtype',
'response_time': 102,
'total': 6
})
# Example Snowplow for an external API V3 call to "/credentialtype/1/language"
credentialtype_language_json = SelfDescribingJson( 'iglu:ca.bc.gov.orgbook/api_call/jsonschema/1-0-0', {
'internal_call': False,
'api_version': 'v3',
'endpoint': 'credentialtype/{id}/language',
'response_time': 302,
'total': 1,
'parameters': ['id']
})
t.track_self_describing_event(search_json)
time.sleep(5)
t.track_self_describing_event(credentialtype_json)
time.sleep(5)
t.track_self_describing_event(credentialtype_language_json)
time.sleep(5)
|
StarcoderdataPython
|
11313840
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from . import DynamoDbExpressionParser, AttributeValueEncoder, DaxCborTypes
from .CborEncoder import CborEncoder
from .DaxError import DaxClientError, DaxErrorCode, DaxValidationError
import six
if six.PY2:
from .grammar2.DynamoDbGrammarListener import DynamoDbGrammarListener
else:
from .grammar.DynamoDbGrammarListener import DynamoDbGrammarListener
import antlr4
# TODO I feel like this should be able to take a CborEncoder and fill it
# directly, rather than build a bunch of bytes for appending. But maybe
# in this case it's better to be consistent with other clients than
# clever.
class Expressions(object):
def __init__(self):
self.Condition = None
self.KeyCondition = None
self.Filter = None
self.Update = None
self.Projection = None
self.ExpressionAttributeNames = None
self.ExpressionAttributeValues = None
def encode_condition_expression(condition_expr, expr_attr_names, expr_attr_values):
return encode_expressions(condition_expr, None, None, None, None, expr_attr_names, expr_attr_values).Condition
def encode_key_condition_expression(key_condition_expr, expr_attr_names, expr_attr_values):
return encode_expressions(None, key_condition_expr, None, None, None, expr_attr_names, expr_attr_values).KeyCondition
def encode_filter_expression(filter_expr, expr_attr_names, expr_attr_values):
return encode_expressions(None, None, filter_expr, None, None, expr_attr_names, expr_attr_values).Filter
def encode_update_expression(update_expr, expr_attr_names, expr_attr_values):
return encode_expressions(None, None, None, update_expr, None, expr_attr_names, expr_attr_values).Update
def encode_projection_expression(projection_expr, expr_attr_names, expr_attr_values):
return encode_expressions(None, None, None, None, projection_expr, expr_attr_names, expr_attr_values).Projection
def encode_expressions(condition_expr, key_condition_expr, filter_expr, update_expr, projection_expr,
expr_attr_names, expr_attr_values):
exprs = [
('Condition', condition_expr),
('KeyCondition', key_condition_expr),
('Filter', filter_expr),
('Update', update_expr),
('Projection', projection_expr)
]
output = Expressions()
generator = CborSExprGenerator(expr_attr_names, expr_attr_values)
for _type, expr in exprs:
if not expr:
setattr(output, _type, None)
continue
typestr = _type + 'Expression'
try:
if _type in ('Condition', 'KeyCondition', 'Filter'):
expr_array_len = 3
tree = DynamoDbExpressionParser.parse_condition(expr, ExpressionErrorListener(expr, typestr))
elif _type == 'Projection':
expr_array_len = 2
tree = DynamoDbExpressionParser.parse_projection(expr, ExpressionErrorListener(expr, typestr))
elif _type == 'Update':
expr_array_len = 3
tree = DynamoDbExpressionParser.parse_update(expr, ExpressionErrorListener(expr, typestr))
else:
raise DaxClientError('Unknown expresion type ' + str(_type), DaxErrorCode.Validation)
except Exception:
# TODO Something, eventually
raise
generator._reset(_type)
antlr4.tree.Tree.ParseTreeWalker.DEFAULT.walk(generator, tree)
generator._validate_intermediate_state()
spec = generator.stack.pop()
enc = CborEncoder()
enc.append_array_header(expr_array_len)
enc.append_int(CborSExprGenerator.ENCODING_FORMAT)
enc.append_raw(spec)
if _type != 'Projection':
enc.append_array_header(len(generator.var_values))
for var_val in generator.var_values:
enc.append_raw(var_val)
setattr(output, _type, enc.as_bytes())
generator._validate_final_state()
output.ExpressionAttributeNames = expr_attr_names
output.ExpressionAttributeValues = expr_attr_values
return output
class CborSExprGenerator(DynamoDbGrammarListener):
ENCODING_FORMAT = 1
ATTRIBUTE_VALUE_PREFIX = ':'
ATTRIBUTE_NAME_PREFIX = '#'
def __init__(self, expression_attribute_names, expression_attribute_values):
self.stack = []
self._reset(None)
self.expr_attr_names = expression_attribute_names
self.expr_attr_values = expression_attribute_values
self.unused_expr_attr_names = set(self.expr_attr_names.keys()) if self.expr_attr_names else set()
self.unused_expr_attr_values = set(self.expr_attr_values.keys()) if self.expr_attr_values else set()
def _reset(self, _type):
self._type = _type
self.nesting_level = 0
self.var_name_by_id = {}
self.var_values = []
def _validate_intermediate_state(self):
if len(self.stack) != 1:
raise DaxValidationError('Invalid {}Expression, Stack size = {}'.format(self._type, len(self.stack)))
if self.nesting_level != 0:
raise DaxValidationError('Invalid {}Expression, Nesting level = {}'.format(self._type, self.nesting_level))
def _validate_final_state(self):
if self.unused_expr_attr_names:
names = self._join_missing_names(self.unused_expr_attr_names)
raise DaxValidationError('Value provided in ExpressionAttributeNames unused in expressions: keys: {' + names + '}')
if self.unused_expr_attr_values:
names = self._join_missing_names(self.unused_expr_attr_values)
raise DaxValidationError('Value provided in ExpressionAttributeValues unused in expressions: keys: {' + names + '}')
def _validate_not_equals(self, expr_type, actual, not_expected):
for n in not_expected:
if actual.lower() == n.lower():
exp_type_str = expr_type or ''
raise DaxValidationError(
"Invalid {}Expression: The function '{}' is not allowed in a {} expression".format(
exp_type_str, actual, exp_type_str.lower()))
def _join_missing_names(self, names):
return ', '.join(names)
def enterComparator(self, ctx):
self.nesting_level += 1
def exitComparator(self, ctx):
arg2 = self.stack.pop()
func = self.stack.pop()
arg1 = self.stack.pop()
self.stack.append(self._encode_array([func, arg1, arg2]))
self.nesting_level -= 1
def exitComparator_symbol(self, ctx):
text = ctx.getText()
try:
func = OPERATORS[text]
except KeyError:
raise DaxClientError('Invalid function ' + text, DaxErrorCode.Validation, False)
self.stack.append(self._encode_function_code(func))
def exitPath(self, ctx):
n = ctx.getChildCount()
components = self.stack[-n:]; del self.stack[-n:]
self.stack.append(self._encode_function(Func.DocumentPath, components))
def exitListAccess(self, ctx):
value = ctx.getText()
ordinal = int(value[1:-1]) # get rid of []
self.stack.append(self._encode_list_access(ordinal))
def exitId_(self, ctx):
_id = ctx.getText()
if _id[0] == CborSExprGenerator.ATTRIBUTE_NAME_PREFIX:
try:
sub = self.expr_attr_names[_id]
except KeyError:
raise DaxValidationError('Invalid {}Expression. Substitution value not provided for {}', self._type, _id)
self.unused_expr_attr_names.discard(_id)
self.stack.append(self._encode_attribute_value({'S': sub}))
else:
self.stack.append(self._encode_document_path_element(_id))
def exitLiteralSub(self, ctx):
literal = ctx.getText()
self.stack.append(self._encode_variable(literal[1:]))
def exitAnd(self, ctx):
arg2 = self.stack.pop()
arg1 = self.stack.pop()
self.stack.append(self._encode_function(Func.And, [arg1, arg2]))
def exitOr(self, ctx):
arg2 = self.stack.pop()
arg1 = self.stack.pop()
self.stack.append(self._encode_function(Func.Or, [arg1, arg2]))
def exitNegation(self, ctx):
arg = self.stack.pop()
self.stack.append(self._encode_function(Func.Not, [arg]))
def enterIn(self, ctx):
self.nesting_level += 1
def exitIn(self, ctx):
numargs = (ctx.getChildCount() - 3) // 2 # arg + IN + ( + args*2-1 + )
args = self.stack[-numargs:]; del self.stack[-numargs:]
arg1 = self.stack.pop()
# a in (b,c,d) => (In a (b c d))
self.stack.append(self._encode_function(Func.In, [arg1, self._encode_array(args)]))
self.nesting_level -= 1
def enterBetween(self, ctx):
self.nesting_level += 1
def exitBetween(self, ctx):
args = self.stack[-3:]; del self.stack[-3:]
# a between b and c => (Between a b c)
self.stack.append(self._encode_function(Func.Between, args))
self.nesting_level -= 1
def enterFunctionCall(self, ctx):
funcname = ctx.ID().getText()
if self._type == 'Update':
self._validate_not_equals(self._type, funcname, ['attribute_exists', 'attribute_not_exists',
'attribute_type', 'begins_with', 'contains', 'size'])
if self.nesting_level > 0 and funcname.lower() != 'if_not_exists':
raise DaxValidationError('Only if_not_exists() function can be nested (got ' + funcname.lower() + ')')
elif self._type == 'Filter' or self._type == 'Condition':
self._validate_not_equals(self._type, funcname, ['if_not_exists', 'list_append'])
if self.nesting_level == 0 and funcname.lower() == 'size':
raise DaxValidationError(
"Invalid {}Expression: The function '{}' is not allowed to be used this way in an expression".format(
self._type, funcname))
elif self.nesting_level > 0 and funcname.lower() != 'size':
raise DaxValidationError('Only size() function can be nested (got ' + funcname.lower() + ')')
self.nesting_level += 1
def exitFunctionCall(self, ctx):
funcname = ctx.ID().getText().lower()
try:
func = FUNCS[funcname]
except KeyError:
raise DaxValidationError(
'Invalid {}Expression: Invalid function name: function: {}'.format(self._type, funcname))
numargs = (ctx.getChildCount() - 2) // 2 # children = fname + ( + numOperands*2-1 +)
args = self.stack[-numargs:]; del self.stack[-numargs:]
# func(a,b,c,..) => (func a b c ..)
self.stack.append(self._encode_function(func, args))
self.nesting_level -= 1
def exitProjection(self, ctx):
numpaths = (ctx.getChildCount() + 1) // 2 # path, path, ... path
paths = self.stack[-numpaths:]; del self.stack[-numpaths:]
self.stack.append(self._encode_array(paths))
def exitUpdate(self, ctx):
updates = self.stack[:]; del self.stack[:]
self.stack.append(self._encode_array(updates))
def exitSet_action(self, ctx):
operand = self.stack.pop()
path = self.stack.pop()
self.stack.append(self._encode_function(Func.SetAction, [path, operand]))
def exitRemove_action(self, ctx):
path = self.stack.pop()
self.stack.append(self._encode_function(Func.RemoveAction, [path]))
def exitAdd_action(self, ctx):
value = self.stack.pop()
path = self.stack.pop()
self.stack.append(self._encode_function(Func.AddAction, [path, value]))
def exitDelete_action(self, ctx):
value = self.stack.pop()
path = self.stack.pop()
self.stack.append(self._encode_function(Func.DeleteAction, [path, value]))
def enterPlusMinus(self, ctx):
self.nesting_level += 1
def exitPlusMinus(self, ctx):
op2 = self.stack.pop()
op1 = self.stack.pop()
operator = ctx.getChild(1).getText()
try:
func = OPERATORS[operator]
except KeyError:
raise DaxClientError('Must be +/-', DaxErrorCode.Validation, False)
self.stack.append(self._encode_function(func, [op1, op2]))
self.nesting_level -= 1
def _encode_document_path_element(self, s):
return CborEncoder().append_string(s).as_bytes()
def _encode_attribute_value(self, val):
return AttributeValueEncoder.encode_attribute_value(val)
def _encode_array(self, array):
enc = CborEncoder()
enc.append_array_header(len(array))
for item in array:
# Array items are already CBOR encoded
enc.append_raw(item)
return enc.as_bytes()
def _encode_function_code(self, func):
return CborEncoder().append_int(func).as_bytes()
def _encode_function(self, func, args):
enc = CborEncoder()
enc.append_array_header(len(args) + 1)
enc.append_int(func)
for arg in args:
# Args are already CBOR encoded
enc.append_raw(arg)
return enc.as_bytes()
def _encode_list_access(self, ordinal):
enc = CborEncoder()
enc.append_tag(DaxCborTypes.TAG_DDB_DOCUMENT_PATH_ORDINAL)
enc.append_int(ordinal)
return enc.as_bytes()
def _encode_variable(self, var_name):
fullname = CborSExprGenerator.ATTRIBUTE_VALUE_PREFIX + var_name
try:
val = self.expr_attr_values[fullname]
except KeyError:
raise DaxClientError(
'Invalid {}Expression: An expression attribute value used in expression is not defined: {}'.format(
self._type, fullname),
DaxErrorCode.Validation, False)
self.unused_expr_attr_values.discard(fullname)
try:
var_id = self.var_name_by_id[var_name]
except KeyError:
var_id = len(self.var_values)
self.var_name_by_id[var_name] = var_id
self.var_values.append(self._encode_attribute_value(val))
enc = CborEncoder()
enc.append_array_header(2)
enc.append_int(Func.Variable)
enc.append_int(var_id)
return enc.as_bytes()
from antlr4.error.ErrorListener import ErrorListener
class ExpressionErrorListener(ErrorListener):
def __init__(self, expr, expr_type):
super(ExpressionErrorListener, self).__init__()
self.expr = expr
self.expr_type = expr_type
def syntaxError(self, recognizer, offendingSymbol, line, column, message, exception):
raise DaxClientError(
'Invalid {}: Syntax error; token: "{}", near: line {} char {}'.format(self.expr_type, offendingSymbol.text, line, column),
DaxErrorCode.Validation, False)
class Func:
# NOTE = Ordinal is used as identifiers in CBor encoded format
# Comparison operators #
Equal = 0
NotEqual = 1
LessThan = 2
GreaterEqual = 3
GreaterThan = 4
LessEqual = 5
# Logical operators #
And = 6
Or = 7
Not = 8
# Range operators #
Between = 9
# Enumeration operators #
In = 10
# Functions #
AttributeExists = 11
AttributeNotExists = 12
AttributeType = 13
BeginsWith = 14
Contains = 15
Size = 16
# Document path elements #
Variable = 17 # takes 1 argument which is a placeholder for a value. function substitutes argument with corresponding value
DocumentPath = 18 # maps a CBOR object to a document path
# Update Actions #
SetAction = 19
AddAction = 20
DeleteAction = 21
RemoveAction = 22
# Update operations #
IfNotExists = 23
ListAppend = 24
Plus = 25
Minus = 26
OPERATORS = {
'+': Func.Plus,
'-': Func.Minus,
'=': Func.Equal,
'<>': Func.NotEqual,
'<': Func.LessThan,
'<=': Func.LessEqual,
'>': Func.GreaterThan,
'>=': Func.GreaterEqual,
'and': Func.And,
'or': Func.Or,
'not': Func.Not,
'between': Func.Between,
'in': Func.In,
':': Func.Variable,
'.': Func.DocumentPath,
}
ACTIONS = {
'SET': Func.SetAction,
'ADD': Func.AddAction,
'DELETE': Func.DeleteAction,
'REMOVE': Func.RemoveAction,
}
FUNCS = {
'attribute_exists': Func.AttributeExists,
'attribute_not_exists': Func.AttributeNotExists,
'attribute_type': Func.AttributeType,
'begins_with': Func.BeginsWith,
'contains': Func.Contains,
'size': Func.Size,
'if_not_exists': Func.IfNotExists,
'list_append': Func.ListAppend
}
|
StarcoderdataPython
|
343972
|
<gh_stars>1-10
"""
This module contains methods that model the properties of galaxy cluster
populations.
"""
|
StarcoderdataPython
|
6561222
|
import os
import json
import unittest
from mock import Mock
from dmcontent.content_loader import ContentLoader
from werkzeug.datastructures import MultiDict
from app.presenters.search_presenters import filters_for_lot, set_filter_states
content_loader = ContentLoader('tests/fixtures/content')
content_loader.load_manifest('g6', 'data', 'manifest')
questions_builder = content_loader.get_builder('g6', 'manifest')
def _get_fixture_data():
test_root = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..")
)
fixture_path = os.path.join(
test_root, 'fixtures', 'search_results_fixture.json'
)
with open(fixture_path) as fixture_file:
return json.load(fixture_file)
def _get_fixture_multiple_pages_data():
test_root = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..")
)
fixture_path = os.path.join(
test_root, 'fixtures', 'search_results_multiple_pages_fixture.json'
)
with open(fixture_path) as fixture_file:
return json.load(fixture_file)
class TestSearchFilters(unittest.TestCase):
def _get_filter_group_by_label(self, lot, label):
filter_groups = filters_for_lot(lot, questions_builder)
for filter_group in filter_groups:
if filter_group['label'] == label:
return filter_group
def _get_request_for_params(self, params):
return Mock(args=MultiDict(params))
def test_get_filter_groups_from_questions_with_radio_filters(self):
radios_filter_group = self._get_filter_group_by_label(
'saas', 'Radios example'
)
self.assertEqual({
'label': 'Radios example',
'filters': [
{
'label': 'Option 1',
'name': 'radiosExample',
'id': 'radiosExample-option-1',
'value': 'option 1',
},
{
'label': 'Option 2',
'name': 'radiosExample',
'id': 'radiosExample-option-2',
'value': 'option 2',
}
]
}, radios_filter_group)
def test_get_filter_groups_from_questions_with_checkbox_filters(self):
checkboxes_filter_group = self._get_filter_group_by_label(
'saas', 'Checkboxes example'
)
self.assertEqual({
'label': 'Checkboxes example',
'filters': [
{
'label': 'Option 1',
'name': 'checkboxesExample',
'id': 'checkboxesExample-option-1',
'value': 'option 1',
},
{
'label': 'Option 2',
'name': 'checkboxesExample',
'id': 'checkboxesExample-option-2',
'value': 'option 2',
}
]
}, checkboxes_filter_group)
def test_get_filter_groups_from_questions_with_boolean_filters(self):
booleans_filter_group = self._get_filter_group_by_label(
'saas', 'Booleans example'
)
self.assertEqual({
'label': 'Booleans example',
'filters': [
{
'label': 'Option 1',
'name': 'booleanExample1',
'id': 'booleanExample1',
'value': 'true',
},
{
'label': 'Option 2',
'name': 'booleanExample2',
'id': 'booleanExample2',
'value': 'true',
}
]
}, booleans_filter_group)
def test_request_filters_are_set(self):
search_filters = filters_for_lot('saas', questions_builder)
request = self._get_request_for_params({
'q': 'email',
'booleanExample1': 'true'
})
set_filter_states(search_filters, request)
self.assertEqual(search_filters[0]['filters'][0]['name'],
'booleanExample1')
self.assertEqual(search_filters[0]['filters'][0]['checked'], True)
self.assertEqual(search_filters[0]['filters'][1]['name'],
'booleanExample2')
self.assertEqual(search_filters[0]['filters'][1]['checked'], False)
def test_filter_groups_have_correct_default_state(self):
request = self._get_request_for_params({
'q': 'email',
'lot': 'paas'
})
search_filters = filters_for_lot('paas', questions_builder)
set_filter_states(search_filters, request)
self.assertEqual(
search_filters[0],
{
'label': 'Booleans example',
'filters': [
{
'checked': False,
'label': 'Option 1',
'name': 'booleanExample1',
'id': 'booleanExample1',
'value': 'true',
},
{
'checked': False,
'label': 'Option 2',
'name': 'booleanExample2',
'id': 'booleanExample2',
'value': 'true',
}
]
}
)
def test_filter_groups_have_correct_state_when_changed(self):
request = self._get_request_for_params({
'q': 'email',
'lot': 'paas',
'booleanExample1': 'true'
})
search_filters = filters_for_lot('paas', questions_builder)
set_filter_states(search_filters, request)
self.assertEqual(
search_filters[0],
{
'label': 'Booleans example',
'filters': [
{
'checked': True,
'label': 'Option 1',
'name': 'booleanExample1',
'id': 'booleanExample1',
'value': 'true',
},
{
'checked': False,
'label': 'Option 2',
'name': 'booleanExample2',
'id': 'booleanExample2',
'value': 'true',
}
]
}
)
def test_no_lot_is_the_same_as_all(self):
all_filters = self._get_filter_group_by_label(
'all', 'Radios example'
)
no_lot_filters = self._get_filter_group_by_label(
None, 'Radios example'
)
self.assertTrue(all_filters)
self.assertEqual(all_filters, no_lot_filters)
def test_instance_has_correct_filter_groups_for_paas(self):
search_filters = filters_for_lot('paas', questions_builder)
filter_group_labels = [
group['label'] for group in search_filters
]
self.assertTrue('Booleans example' in filter_group_labels)
self.assertTrue('Checkboxes example' in filter_group_labels)
self.assertTrue('Radios example' in filter_group_labels)
def test_instance_has_correct_filter_groups_for_iaas(self):
search_filters = filters_for_lot('iaas', questions_builder)
filter_group_labels = [
group['label'] for group in search_filters
]
self.assertFalse('Booleans example' in filter_group_labels)
self.assertTrue('Checkboxes example' in filter_group_labels)
self.assertTrue('Radios example' in filter_group_labels)
|
StarcoderdataPython
|
307598
|
<filename>idea_fare/urls.py<gh_stars>0
import debug_toolbar
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import include, path
from users import views as user_views
from utils.decorators import require_superuser
def dec_patterns(patterns):
decorated_patterns = []
for pattern in patterns:
callback = pattern.callback
pattern.callback = require_superuser(callback)
pattern._callback = require_superuser(callback)
decorated_patterns.append(pattern)
return decorated_patterns
urlpatterns = [
path('admin/', (dec_patterns(admin.site.urls[0]),) + admin.site.urls[1:]),
]
urlpatterns += [
path('', include('ideas.urls')),
path('register/', user_views.register, name='register'),
path('profile/', user_views.profile, name='profile'),
path('login/', auth_views.LoginView.as_view(
redirect_authenticated_user=True,
template_name='users/login.html'),
name='login'
),
path('logout/', auth_views.LogoutView.as_view(
template_name='users/logout.html'),
name='logout'
),
path('password-change/', user_views.password_change, name='password-change'),
path('password-reset/',
auth_views.PasswordResetView.as_view(
template_name='users/password_reset.html'),
name='password_reset'
),
path('password-reset/done/',
auth_views.PasswordResetDoneView.as_view(
template_name='users/password_reset_done.html'),
name='password_reset_done'
),
path('password-reset-confirm/<uidb64>/<token>/',
auth_views.PasswordResetConfirmView.as_view(
template_name='users/password_reset_confirm.html'),
name='password_reset_confirm'
),
path('password-reset-complete/',
auth_views.PasswordResetCompleteView.as_view(
template_name='users/password_reset_complete.html'),
name='password_reset_complete'
),
path('flag/', include('flag.urls'))
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
urlpatterns += [path('__debug__', include(debug_toolbar.urls))]
|
StarcoderdataPython
|
6664077
|
from django.conf import settings
def get_disabled_features():
"""
Load the disabled features from the settings file
"""
return settings.FEATUREFLAGS_DISABLE
|
StarcoderdataPython
|
6476449
|
<gh_stars>1-10
from tkinter import ttk, PhotoImage
from Components.CustomEntry import CustomEntry
from os.path import basename, join
from threading import Thread
class MainPage(ttk.Frame):
def __init__(self: object, parent: object, props: dict) -> ttk.Frame:
super().__init__(parent)
# variables
theme: str = props['theme'].get_theme()
self.hash_manager: object = props['hash_manager']
self.about_page: object = props['about_page']
# icons cache
self.icons: dict = {
'add': PhotoImage(file=join('Resources', 'Icons', theme, 'add.png')),
'info': PhotoImage(file=join('Resources', 'Icons', theme, 'info.png'))
}
# page layout
# left frame
self.left_frame: ttk.Frame = ttk.Frame(self, style='dark.TFrame')
ttk.Button(self.left_frame, image=self.icons['add'], text='Add file', compound='left', command=self.__open_file).pack(
side='top', anchor='c', padx=10, pady=10)
ttk.Button(self.left_frame, image=self.icons['info'], text='About', compound='left', command=self.__open_about).pack(
side='bottom', anchor='c', padx=10, pady=(0, 10))
self.left_frame.pack(side='left', fill='y')
# right frame
self.right_frame: ttk.Frame = ttk.Frame(self)
# entry
# sha 256 entry
self.sha256_entry: CustomEntry = CustomEntry(
self.right_frame, text='Sha256', theme=theme)
self.sha256_entry.pack(side='top', pady=10, fill='x', padx=10)
# sha1 entry
self.sha1_entry: CustomEntry = CustomEntry(
self.right_frame, text='Sha1', theme=theme)
self.sha1_entry.pack(side='top', pady=(0, 10), fill='x', padx=10)
# md5 entry
self.md5_entry: CustomEntry = CustomEntry(
self.right_frame, text='Md5', theme=theme)
self.md5_entry.pack(side='top', pady=(0, 10), fill='x', padx=10)
# pack right frame
self.right_frame.pack(side='right', fill='both', expand=True)
def __open_file(self: object) -> None:
if self.hash_manager.open_file():
self.master.title(f'PyHash -> {basename(self.hash_manager.file)}')
self.sha256_entry.set('Calculating ...')
self.sha1_entry.set('Calculating ...')
self.md5_entry.set('Calculating ...')
self.__get_hashes()
else:
self.master.title('PyHash')
self.sha256_entry.set('')
self.sha1_entry.set('')
self.md5_entry.set('')
def __get_hashes(self: object) -> None:
Thread(target=lambda: self.sha256_entry.set(
self.hash_manager.get_sha256()), daemon=True).start()
Thread(target=lambda: self.sha1_entry.set(
self.hash_manager.get_sha1()), daemon=True).start()
Thread(target=lambda: self.md5_entry.set(
self.hash_manager.get_md5()), daemon=True).start()
def __open_about(self: object) -> None:
self.about_page.tkraise()
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.