max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
eufy_security_ws_python/model/version.py | bachya/eufy-security-ws-python | 20 | 12799351 | <reponame>bachya/eufy-security-ws-python<gh_stars>10-100
"""Define utilities related to eufy-websocket-ws versions."""
from dataclasses import dataclass
@dataclass
class VersionInfo:
"""Define the server's version info."""
driver_version: str
server_version: str
min_schema_version: int
max_schema_version: int
@classmethod
def from_message(cls, msg: dict) -> "VersionInfo":
"""Create an instance from a version message."""
return cls(
driver_version=msg["driverVersion"],
server_version=msg["serverVersion"],
min_schema_version=msg.get("minSchemaVersion", 0),
max_schema_version=msg.get("maxSchemaVersion", 0),
)
| 2.421875 | 2 |
Practica1/Suma binaria/Grafica.py | JosueHernandezR/An-lisis-de-Algoritmos | 1 | 12799352 | <gh_stars>1-10
#Análisis de Algoritmos 3CV2
# <NAME>
# <NAME>
# Práctica 1 Suma Binaria
# En este archivo se crean las funciones para el graficado del algoritmo
import matplotlib.pyplot as plt
import numpy as np
def graph ( size, time ):
# Título de la ventana.
plt.figure ( "Complejidad temporal del algoritmo de suma binaria" )
# Título de la gráfica.
plt.title ( "Suma binaria:", color = ( 0.3, 0.4, 0.6 ), weight = "bold" )
# Construye los parámetros del gráfico.
t, n = parametros ( size, time )
# Definir los límites del gráfico.
plt.xlim ( 0, size )
plt.ylim ( 0, time )
# Función propuesta: g ( n ) = ( 5/3 )n.
# Nombres de los ejes.
plt.xlabel ( "Tamaño ( n )", color = ( 0.3, 0.4, 0.6 ), size = "large" )
plt.ylabel ( "Tiempo ( t )", color = ( 0.3, 0.4, 0.6 ), size = "large" )
# Gráfico.
plt.plot ( n, _t, 'bs', label = "g( n ) = ( 5/3 )n" )
plt.plot ( n, t, 'g^', linewidth = 3, label = "E( n ) = n" )
plt.plot ( n, _t, 'r--', label = "g( n ) = ( 5/3 )n" )
plt.plot ( n, t, 'b--', linewidth = 3, label = "E( n ) = n" )
plt.legend ( loc = "lower right" )
plt.show ( )
def parametros ( size, time ):
# tiempo frente a puntos de gráfico.
t, n = [ ], [ 0 ]
# div: Variable auxiliar que ayuda a trazar el gráfico.
div = float ( "{0:.2f}".format ( 1 / round ( time / size ) ) )
# Tiempo ( t ) parametros.
for i in range ( time ):
t.append ( i )
# Tamaño ( n ) parametros.
for i in range ( time ):
if ( i != 0 ):
n.append ( float ( "{0:.2f}".format ( n [ i - 1 ] + div ) ) )
# Return de valores.
return t, n | 3.734375 | 4 |
rdfmodule/rdf_fixer.py | DocMinus/chem-rdf-fixer | 1 | 12799353 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Chemical RDF converter & fixer.
Version 2.3 (Dec 28, 14:25:00 2021)
Added mol sanitization and try/catch
run by calling
rdf_fixer.convert(filename or path)
(optional: returns list of new filenames)
@author: <NAME> (DocMinus)
license: MIT License
Copyright (c) 2021 DocMinus
"""
import os
import re
import pandas as pd
from collections import OrderedDict
import rdkit.Chem as rdc
from rdkit.Chem.MolStandardize import rdMolStandardize
from rdkit import RDLogger
# Important, or else waaaay too many RDkit details in output
RDLogger.logger().setLevel(RDLogger.CRITICAL)
def fix(RDF_IN: str) -> "zipped":
"""Retrieving all .RDF files in a subdirectory recursively.
Then submit to conversion (i.e. fixing)
Parts of os.walk snippet originated on Reddit somewhere, forgot where though.
Args:
RDF_IN = filename, alt. directory and subdirectories to scan
Returns:
zipped List of the new file names
Order: input_file; fixed_file; csv_file
"""
file_list_in = []
file_list_ok = []
file_list_csv = []
if os.path.isfile(RDF_IN):
if RDF_IN.endswith(("rdf", "RDF")):
file_list_in.append(os.path.join(RDF_IN))
file_list_ok.append(os.path.splitext(RDF_IN)[0] + "_fixed.rdf")
file_list_csv.append(os.path.splitext(RDF_IN)[0] + ".csv")
elif os.path.isdir(RDF_IN):
for subdir, dirs, files in os.walk(RDF_IN):
for file in files:
if file.endswith(("rdf", "RDF")):
file_list_in.append(os.path.join(subdir, file))
file_list_ok.append(
os.path.join(subdir, os.path.splitext(file)[0] + "_fixed.rdf")
)
file_list_csv.append(
os.path.join(subdir, os.path.splitext(file)[0] + ".csv")
)
zipped = zip(file_list_in, file_list_ok, file_list_csv)
# note: zip gets unpacked upon usage and disappears
for file_in, file_ok, file_csv in zipped:
print("Converting file: ", file_in)
convert(file_in, file_ok, file_csv)
return zip(file_list_in, file_list_ok, file_list_csv)
def convert(RDF_IN_FILE: str, RDF_OK_FILE: str, RDF_CSV_FILE: str):
"""original script with single file usage wrapped into this 'convert' function
Args:
RDF_IN_FILE: original input RDF file including path
RDF_OK_FILE: new RDF file with corrections (if any)
RDF_CSV_FILE: resulting CSV file (incl. path)
Returns:
None - output are the new files.
"""
##############################################################
# Fix erroneous entries (empty mols) by deleting those entries
with open(RDF_IN_FILE) as file_in:
seed_line = file_in.readline()
previous_line = seed_line # get first line as "seed" for upcoming loop
# seed_line is later reused again
with open(RDF_OK_FILE, "w") as file_out:
write_to_file = True
for current_line in open(RDF_IN_FILE):
# prevent first line from being written twice
if current_line.startswith("$RDFILE") and previous_line.startswith(
"$RDFILE"
):
continue
# correct molecule block
# True
write_to_file = current_line.startswith(
"$RXN"
) and previous_line.startswith("$RFMT")
# else for empty molecule block
write_to_file = not (
current_line.startswith("$DTYPE") and previous_line.startswith("$RFMT")
)
if write_to_file:
file_out.write(previous_line)
previous_line = current_line
file_out.write(previous_line)
# the last line is not caught in the loop, hence written out here.
# end of fix section
####################
def scifi_or_reax(in_file: str) -> str:
"""Determine if Scifinder or Reaxys rdf file
(Scifinder contains 'SCHEME' in the enumeration)
Returned string is multiple string.replace() methods,
to render script independent of source
Args:
in_file (str): filename of the corrected file (in principle,
the original one would work as well;
alt even global variable possible instead)
Returns:
SCI_REAX (str): "RXN:" (scifinder) or string "ROOT:" (reaxys)
"""
f = open(in_file)
NUMBER_OF_LINES = 3
for i in range(NUMBER_OF_LINES):
line_three = f.readline()
return "RXN:" if re.match(".+SCHEME", line_three) else "ROOT:"
def build_empty_table(in_file: str, SCI_REAX: str):
"""Scans file three times to build a pandas df used as main table
Args:
in_file (str): filename of the corrected file: RDF_OK_FILE
SCI_REAX (str): "RXN:" (scifinder) or string "ROOT:" (reaxys) used in replacements
Returns:
da_table (object): the (empty) pandas df working table
max_reagents (int): number for later positioning of reagents smiles in table
max_products (int): <> (products)
"""
# get the IDs and use as row index
list_of_IDs = [] # i.e. rows
for line in open(in_file):
if line.startswith("$RFMT"):
list_of_IDs.append(line.strip().split(" ")[2])
# determine max no of reagents/products
flag = 0
max_reagents = 0
max_products = 0
for line in open(in_file):
if line.startswith("$RXN") | flag == 1:
flag = 1
if re.match("\s\s[0-9]\s\s[0-9]\n", line):
# analyse the " y z" line.
# implies: y reactants, z products.
x = line.strip().split(" ")
number_reagents = int(x[0])
number_products = int(x[1])
if number_reagents > max_reagents:
max_reagents = number_reagents
if number_products > max_products:
max_products = number_products
flag = 0
# build the column headers
fields = []
for i in range(max_reagents):
tmp_name = "Reagent" + str(i)
fields.append(tmp_name)
for i in range(max_products):
tmp_name = "Product" + str(i)
fields.append(tmp_name)
for line in open(in_file):
if line.startswith("$DTYPE"):
fields.append((line.strip().split(" ")[1]).replace(SCI_REAX, ""))
# finally, build the table
da_table = pd.DataFrame(
index=list_of_IDs, columns=list(OrderedDict.fromkeys(fields))
)
return da_table, max_reagents, max_products
##############################################################
# Initialize Table and diverse variables
# get string replacement variable depending on source
SCI_REAX = scifi_or_reax(RDF_OK_FILE)
# build table according to files specs. get max no of reagents & products at the same time.
my_table, max_reagents, max_products = build_empty_table(RDF_OK_FILE, SCI_REAX)
####################################################################
# Here comes the actual data extraction and addition to pandas table
#
############### GET MOLECULES #############
# (structure same for Reaxys and Scifinder)
#
flag = 0
# 0 = generic
# 1 = start of reaction block
# 2 = single MOL (molecules)
# 9 = skip
molecule = []
number_reagents = 0
number_products = 0
number_molecules = 0
iterate_molecules = 0
mol_string = ""
rxn_id = ""
multiple_row_text = ""
# get first line as "seed" for upcoming loop
previous_line = seed_line
for line in open(RDF_OK_FILE):
current_line = line
# get reaction ID
if current_line.startswith("$RFMT"):
rxn_id = str(current_line.strip().split(" ")[2])
flag = 0
continue
# start of a new reaction block
if current_line.startswith("$RXN") | flag == 1:
flag = 1
if re.match("\s\s[0-9]\s\s[0-9]\n", current_line):
# analyse the " y z" line. Not hard-coding this since it might change?
# implies: y reactants, z product.
x = current_line.strip().split(" ")
number_reagents = int(x[0])
number_products = int(x[1])
number_molecules = number_reagents + number_products
# create fresh list of max no of molecules, for use in $MOL block
# yes, always same size within a *given file*, can change from file to file(!)
for i in range(number_molecules):
molecule.append([])
if current_line == "\n" or re.match("\s\s[0-9]\s\s[0-9]\n", current_line):
# checks for empty lines and the number of molecules lines and skips them
continue
# after determining a block, find the molecules within the block
if (current_line == "$MOL\n") | (flag == 2):
flag = 2
if current_line != "$MOL\n" and (iterate_molecules < number_molecules):
molecule[iterate_molecules].append(current_line)
if current_line == "M END\n":
iterate_molecules += 1
# end of the complete reaction block
if current_line.startswith("$D") & (previous_line == "M END\n"):
flag = 9 # could just use flag = 0(?)
# rebuild the string of a molecule
counter_reagents = 0
counter_products = 0
num_mols_this_instance = len(molecule)
# should always be max_mol now, so doesn't matter
for mol in range(num_mols_this_instance):
mol_string = "".join(molecule[mol])
if mol_string == "":
smiles = ""
else:
mol = rdc.MolFromMolBlock(mol_string, sanitize=False)
if mol is None:
continue
try:
rdc.SanitizeMol(mol)
except ValueError as _e:
print("Error: ", _e)
continue
mol.UpdatePropertyCache(strict=False)
rdc.SanitizeMol(
mol,
sanitizeOps=(
rdc.SANITIZE_ALL
^ rdc.SANITIZE_CLEANUP
^ rdc.SANITIZE_PROPERTIES
),
)
mol = rdMolStandardize.Normalize(mol)
smiles = rdc.MolToSmiles(mol)
# some mols might be empty, this if/else positions reagents/products accordingly
if counter_reagents + 1 <= number_reagents:
my_table.loc[
rxn_id, my_table.columns[counter_reagents]
] = smiles
counter_reagents += 1
else:
my_table.loc[
rxn_id, my_table.columns[counter_products + max_reagents]
] = smiles
counter_products += 1
# reset variables
iterate_molecules = 0
molecule = []
mol_string = ""
previous_line = current_line
################################
#
######### GET single line data ##########
#
# Nota bene: this will write first line of multiline columns as well
# but doesn't matter since those will be correctly overwritten later on
rxn_id = ""
previous_line = seed_line
for line in open(RDF_OK_FILE):
current_line = line
# get reaction ID
if current_line.startswith("$RFMT"):
rxn_id = str(current_line.strip().split(" ")[2])
# flag = 0
continue
if previous_line.startswith("$DTYPE") and current_line.startswith("$DATUM"):
current_column = previous_line.strip().split(" ")[1].replace(SCI_REAX, "")
row_text = current_line.replace("\n", " ")
# flag = 1
my_table.loc[rxn_id, current_column] = row_text.replace("$DATUM ", "")
previous_line = current_line
################################
#
### Extract Experimental Procedure ###
# Multiline, both,
# Reaxys and Scifinder
#
flag = 0
# 0 = generic
# 5 = exp procedure text over multiple lines
# 9 = skip
rxn_id = ""
multiple_row_text = ""
previous_line = seed_line
for line in open(RDF_OK_FILE):
current_line = line
# get reaction ID
if current_line.startswith("$RFMT"):
rxn_id = str(current_line.strip().split(" ")[2])
flag = 0
continue
# get experimental section
if SCI_REAX == "RXN:":
if re.match(".+EXP_PROC", previous_line) or flag == 5:
# start of the experimental section. spans over multiple line
if re.match(".+EXP_PROC", previous_line):
current_column = (
previous_line.strip().split(" ")[1].replace(SCI_REAX, "")
)
if re.match(".+NOTES", current_line) or re.match(
".+REFERENCE.+", current_line
):
# this is the end of experimental block
flag = 9
my_table.loc[rxn_id, current_column] = multiple_row_text.replace(
"$DATUM ", ""
)
multiple_row_text = ""
else:
multiple_row_text += current_line.replace("\n", " ")
flag = 5
else: # Reaxys
if re.match(".+TXT", previous_line) or flag == 5:
# start of the experimental section. spans over multiple line
if re.match(".+TXT", previous_line):
current_column = (
previous_line.strip().split(" ")[1].replace(SCI_REAX, "")
)
if re.match(".+STP", current_line):
# this is the end of experimental block
flag = 9
my_table.loc[rxn_id, current_column] = multiple_row_text.replace(
"$DATUM ", ""
)
multiple_row_text = ""
else:
multiple_row_text += current_line.replace("\n", " ")
flag = 5
previous_line = current_line
################################
#
######## Extract Notes ########
# (only Scifinder)
#
flag = 0
# 0 = generic
# 6 = notes, text potentially over multiple lines
# 9 = skip
rxn_id = ""
multiple_row_text = ""
previous_line = seed_line
for line in open(RDF_OK_FILE):
current_line = line
# get reaction ID
if current_line.startswith("$RFMT"):
rxn_id = str(current_line.strip().split(" ")[2])
flag = 0
continue
# Get Notes
if re.match(".+NOTES", previous_line) or flag == 6:
flag = 6
# start of the Notes section. might span over multiple line
if re.match(".+NOTES", previous_line):
current_column = (
previous_line.strip().split(" ")[1].replace(SCI_REAX, "")
)
if current_line.startswith("$DTYPE"):
# this is the end of Notes block
flag = 9
my_table.loc[rxn_id, current_column] = multiple_row_text.replace(
"$DATUM ", ""
)
multiple_row_text = ""
else:
multiple_row_text += current_line.replace("\n", " ")
flag = 6
previous_line = current_line
################################
#
######## Extract title ########
# (only Scifinder)
#
flag = 0
# 0 = generic
# 7 = title
# 9 = skip
rxn_id = ""
multiple_row_text = ""
previous_line = seed_line
for line in open(RDF_OK_FILE):
current_line = line
# get reaction ID
if current_line.startswith("$RFMT"):
rxn_id = str(current_line.strip().split(" ")[2])
flag = 0
continue
# Get Title
if re.match(".+TITLE", previous_line) or flag == 7:
flag = 7
# start of the Title section. might span over multiple line
if re.match(".+TITLE", previous_line):
current_column = (
previous_line.strip().split(" ")[1].replace(SCI_REAX, "")
)
if current_line.startswith("$DTYPE"):
# this is the end of title block
flag = 9
my_table.loc[rxn_id, current_column] = multiple_row_text.replace(
"$DATUM ", ""
)
multiple_row_text = ""
else:
multiple_row_text += current_line.replace("\n", " ")
flag = 7
previous_line = current_line
################################
#
####### Extract authors ########
# (only Scifinder)
#
flag = 0
# 0 = generic
# 8 = authors
# 9 = skip
rxn_id = ""
multiple_row_text = ""
previous_line = seed_line
for line in open(RDF_OK_FILE):
current_line = line
# get reaction ID
if current_line.startswith("$RFMT"):
rxn_id = str(current_line.strip().split(" ")[2])
flag = 0
continue
# Get Authors
if re.match(".+AUTHOR", previous_line) or flag == 8:
flag = 8
if re.match(".+AUTHOR", previous_line):
current_column = (
previous_line.strip().split(" ")[1].replace(SCI_REAX, "")
)
if current_line.startswith("$DTYPE"):
# this is the end of author block
flag = 9
my_table.loc[rxn_id, current_column] = multiple_row_text.replace(
"$DATUM ", ""
)
multiple_row_text = ""
else:
multiple_row_text += current_line.replace("\n", " ")
flag = 8
previous_line = current_line
################################
#
### Extract citation (i.e. source) ###
#
# This is done last, since for Scifinder
# this is the last entry in a file
# not necessary for reaxys, but it will go through it anyway
# (less ifs and doesn't screw anything up)
#
flag = 0
# 0 = generic
# 9 = skip
# 4 = citation
rxn_id = ""
multiple_row_text = ""
previous_line = seed_line
for line in open(RDF_OK_FILE):
current_line = line
# get reaction ID
if current_line.startswith("$RFMT"):
rxn_id = str(current_line.strip().split(" ")[2])
flag = 0
continue
# Get Citation
if re.match(".+CITATION", previous_line) or flag == 4:
flag = 4
if re.match(".+CITATION", previous_line):
current_column = (
previous_line.strip().split(" ")[1].replace(SCI_REAX, "")
)
if current_line.startswith("$DTYPE"):
# this is the end of citation block
flag = 9
my_table.loc[rxn_id, current_column] = multiple_row_text.replace(
"$DATUM ", ""
)
multiple_row_text = ""
else:
multiple_row_text += current_line.replace("\n", " ")
flag = 4
previous_line = current_line
################################
# End of file scanning #
############################################
# Finish table for export to csv file format
my_table = my_table.replace(pd.np.nan, "", regex=True) # need to remove NaN
my_table.drop(
list(my_table.filter(regex="COPYRIGHT")), axis=1, inplace=True
) # skip the copyright (optional)
my_table.to_csv(RDF_CSV_FILE, sep="\t", header=True, index=True)
# end of script
# one could add a return value for better error handling.
return None
| 2.796875 | 3 |
boot.py | DanijelMi/ChuckTesta | 0 | 12799354 | <gh_stars>0
# This file is executed on every boot (including wake-boot from deepsleep)
import esp
import machine
esp.osdebug(None)
SESSION_FILENAME = 'SessionData.txt'
exceptionMessage = None
def reboot():
machine.reset()
def help1():
print("This is a WIP help menu")
print("Some description")
# Reads data about the light state before the restart, used after restart
def readSession():
try:
f = open(SESSION_FILENAME, 'r')
text = f.read()
f.close()
return text
except OSError:
print("Session File not found")
return "00000"
# Saves data about the light state before restarting
def saveSession(bootMode, param1, param2):
f = open(SESSION_FILENAME, 'w')
f.write(str(bootMode))
f.write(str(param1))
f.write(str(param2))
f.close()
def getCrashReport():
import sys
sys.print_exception(exceptionMessage, sys.stderr)
# Decide whether to go into workMode or editMode
def main():
import gc
import connectionManager as cm
cm.setWifi(True)
gc.collect()
try:
text = readSession()
if int(text[0]) == 0:
print("Entering Edit Mode gracefully")
import editMode
editMode.main()
else:
print("Entering Work Mode")
import workMode
workMode.main()
machine.reset()
except Exception as exc:
global exceptionMessage
exceptionMessage = exc
import triac
triac.activate(0)
import editMode
editMode.main()
if __name__ == "__main__":
main()
| 2.453125 | 2 |
.deploy/run_tests.py | stigok/ruterstop | 8 | 12799355 | #!/usr/bin/env python3
from subprocess import run
from sys import argv, exit
PYVER = argv[1]
IMAGE = f"ruterstop:python{PYVER}"
print("Building", IMAGE)
run(
[
"docker",
"build",
"--network=host",
"--file=.deploy/Dockerfile",
f"--build-arg=PYTHON_VERSION={PYVER}",
f"--build-arg=POETRY_VERSION=1.1.5",
f"--tag=ruterstop:python{PYVER}",
".",
],
check=True,
)
print("Running unit-tests", IMAGE)
run(
[
"docker",
"run",
"--network=host",
"--rm",
IMAGE,
]
+ ["unittest"],
check=True,
)
print("Running livetest", IMAGE)
run(
[
"docker",
"run",
"--network=host",
"--rm",
IMAGE,
]
+ ["ruterstop", "--stop-id=6013"],
check=True,
)
print("Success!")
| 2.171875 | 2 |
machine_replacement.py | dsbrown1331/broil | 1 | 12799356 | import bayesian_irl
import mdp_worlds
import utils
import mdp
import numpy as np
import scipy
import random
import generate_efficient_frontier
import matplotlib.pyplot as plt
def generate_reward_sample():
#rewards for no-op are gamma distributed
r_noop = []
locs = 1/2
scales = [20, 40, 80,190]
for i in range(4):
r_noop.append(-np.random.gamma(locs, scales[i], 1)[0])
r_noop = np.array(r_noop)
#rewards for repair are -N(100,1) for all but last state where it is -N(130,20)
r_repair = -100 + -1 * np.random.randn(4)
return np.concatenate((r_noop, r_repair))
def generate_posterior_samples(num_samples):
print("samples")
all_samples = []
for i in range(num_samples):
r_sample = generate_reward_sample()
all_samples.append(r_sample)
print("mean of posterior from samples")
print(np.mean(all_samples, axis=0))
posterior = np.array(all_samples)
return posterior.transpose() #each column is a reward sample
if __name__=="__main__":
seed = 1234
np.random.seed(seed)
scipy.random.seed(seed)
random.seed(seed)
num_states = 4
num_samples = 2000
gamma = 0.95
alpha = 0.99
lamda = 0.9
posterior = generate_posterior_samples(num_samples)
r_sa = np.mean(posterior, axis=1)
init_distribution = np.ones(num_states)/num_states #uniform distribution
mdp_env = mdp.MachineReplacementMDP(num_states, r_sa, gamma, init_distribution)
print("---MDP solution for expectation---")
print("mean MDP reward", r_sa)
u_sa = mdp.solve_mdp_lp(mdp_env, debug=True)
print("mean policy from posterior")
utils.print_stochastic_policy_action_probs(u_sa, mdp_env)
print("MAP/Mean policy from posterior")
utils.print_policy_from_occupancies(u_sa, mdp_env)
print("rewards")
print(mdp_env.r_sa)
print("expected value = ", np.dot(u_sa, r_sa))
stoch_pi = utils.get_optimal_policy_from_usa(u_sa, mdp_env)
print("expected return", mdp.get_policy_expected_return(stoch_pi, mdp_env))
print("values", mdp.get_state_values(u_sa, mdp_env))
print('q-values', mdp.get_q_values(u_sa, mdp_env))
#run CVaR optimization, maybe just the robust version for now
u_expert = np.zeros(mdp_env.num_actions * mdp_env.num_states)
# print("solving for CVaR optimal policy")
posterior_probs = np.ones(num_samples) / num_samples #uniform dist since samples from MCMC
#generate efficient frontier
lambda_range = [0.0, 0.3, 0.5, 0.75, 0.95,0.99, 1.0]
#generate_efficient_frontier.calc_frontier(mdp_env, u_expert, posterior, posterior_probs, lambda_range, alpha, debug=False)
alpha = 0.99
print("calculating optimal policy for alpha = {} over lambda = {}".format(alpha, lambda_range))
cvar_rets = generate_efficient_frontier.calc_frontier(mdp_env, u_expert, posterior, posterior_probs, lambda_range, alpha, debug=False)
cvar_rets_array = np.array(cvar_rets)
plt.figure()
plt.plot(cvar_rets_array[:,0], cvar_rets_array[:,1], '-o')
#go through and label the points in the figure with the corresponding lambda values
unique_pts_lambdas = []
unique_pts = []
for i,pt in enumerate(cvar_rets_array):
unique = True
for upt in unique_pts:
if np.linalg.norm(upt - pt) < 0.00001:
unique = False
break
if unique:
unique_pts_lambdas.append((pt[0], pt[1], lambda_range[i]))
unique_pts.append(np.array(pt))
#calculate offset
offsetx = (np.max(cvar_rets_array[:,0]) - np.min(cvar_rets_array[:,0]))/30
offsety = (np.max(cvar_rets_array[:,1]) - np.min(cvar_rets_array[:,1]))/17
for i,pt in enumerate(unique_pts_lambdas):
if i in [0,1,2,4]:
plt.text(pt[0] - 6.2*offsetx, pt[1] , r"$\lambda = {}$".format(str(pt[2])), fontsize=19, fontweight='bold')
elif i in [3]:
plt.text(pt[0] - 6.2*offsetx, pt[1] - 1.2*offsety , r"$\lambda = {}$".format(str(pt[2])), fontsize=19, fontweight='bold')
elif i in [5]:
plt.text(pt[0] - 5.5*offsetx, pt[1] - 1.5*offsety, r"$\lambda = {}$".format(str(pt[2])), fontsize=19, fontweight='bold')
else:
plt.text(pt[0]-offsetx, pt[1] - 1.5*offsety, r"$\lambda = {}$".format(str(pt[2])), fontsize=19, fontweight='bold')
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
plt.xlabel("Robustness (CVaR)", fontsize=20)
plt.ylabel("Expected Return", fontsize=20)
plt.tight_layout()
plt.savefig('./figs/machine_replacement/efficient_frontier_machine_replacement.png')
plt.show()
| 2.625 | 3 |
example_code/python_scripts/get_most_recent_sentiment_by_symbol.py | khmurakami/pystocktwits_data_utils | 4 | 12799357 | <reponame>khmurakami/pystocktwits_data_utils
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pystocktwits_data_utils import PyStockTwitData
from pystocktwits_data_utils.utils import return_json_file
data = PyStockTwitData()
recent_msg = data.get_most_recent_sentiment_by_symbol_id('AAPL')
print(recent_msg)
return_json_file(recent_msg,
"../sample_json_output/get_most_recent_sentiment_by_symbol.json")
| 1.882813 | 2 |
accumulators/decorator.py | icecrime/Accumulators | 1 | 12799358 | <gh_stars>1-10
# Copyright 2013 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decorator module provides the @accumulator decorator, which offers a
straightforward way of implementing trivial accumulators through a simple
accumulation function.
For example:
>>> @Accumulator.immediate() # Don't forget the parentheses here!
... def count(accumulator_set, value, datum):
... return value + datum
...
The decorator takes care of the boilerplate code required to produce an object
which implements AccumulatorBase.
"""
import functools
from accumulators.accumulator_base import AccumulatorBase
class ImmediateAccuFromFunc(AccumulatorBase):
"""The ImmediateAccumulatorWrapper takes a naked function and turns it into
something which implements AccumulatorBase.
"""
def __init__(self, accumulator_set, starting_value, fn):
super(ImmediateAccuFromFunc, self).__init__(accumulator_set, starting_value)
self.fn = fn
def __call__(self, *args, **kwargs):
# Dispatch stored attributes to the decorated function.
self.accu = self.fn(self.accumulator_set, self.accu, *args, **kwargs)
class LazyAccuFromFunc(AccumulatorBase):
"""The LazyAccumulatorWrapper takes a naked function and turns it into
something which implements AccumulatorBase.
"""
def __init__(self, accumulator_set, starting_value, fn):
super(LazyAccuFromFunc, self).__init__(accumulator_set, starting_value)
self.fn = fn
def __call__(self, *args, **kwargs):
pass # No immediate accumulation
def value(self):
# Dispatch stored attributes to the decorated function.
return self.fn(self.accumulator_set)
class Accumulator(object):
__slots__ = ()
@staticmethod
def immediate(depends_on=[], result_name=None, starting_value=0):
"""Immediate accumulator decorator.
Args:
depends_on: list of accumulators on which this value depends
result_name: name of the shortcut extractor function for the set
starting_value: starting value for the accumulator (defaults to 0)
"""
return Accumulator._make_wrapper(ImmediateAccuFromFunc, depends_on,
result_name, starting_value)
@staticmethod
def lazy(depends_on=[], result_name=None):
"""Lazy accumulator decorator.
Args:
depends_on: list of accumulators on which this value depends
result_name: name of the shortcut extractor function for the set
"""
return Accumulator._make_wrapper(LazyAccuFromFunc, depends_on,
result_name, starting_value=0)
@staticmethod
def _make_wrapper(accu_type, depends_on, result_name, starting_value):
def _wrapper(fn):
# The wrapped function acts as factory: it produces AccumulatorBase
# instances, and forwards decorator parameters to the constructor.
@functools.wraps(fn)
def wrapped(accumulator_set):
return accu_type(accumulator_set, starting_value, fn)
# We need the Accumulating object to declare its dependencies and
# its value identifier (if necessary). In this case, we attach this
# information to the wrapped function.
wrapped.depends_on = depends_on
wrapped.value_identifier = result_name
return wrapped
return _wrapper
| 2.84375 | 3 |
keerthilinepro.py | Alu0331/python | 0 | 12799359 | import time
import math
@profile
def primes(n):
start = time.time()
prime1 = [2]
sn=int(math.sqrt(n))
for attempt in range(3,sn+1,2):
if all((attempt % prime != 0 and n%attempt==0) for prime in prime1):
prime1.append(attempt)
end = time.time()
print(end - start)
return prime1
n=primes(600851475143)
print(max(n))
| 3.640625 | 4 |
data.py | luyug/Condenser | 96 | 12799360 | # Copyright 2021 Condenser Author All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from dataclasses import dataclass
from typing import List, Dict
import torch
from torch.utils.data import Dataset
from transformers import DataCollatorForWholeWordMask
@dataclass
class CondenserCollator(DataCollatorForWholeWordMask):
max_seq_length: int = 512
def __post_init__(self):
super(CondenserCollator, self).__post_init__()
from transformers import BertTokenizer, BertTokenizerFast
from transformers import RobertaTokenizer, RobertaTokenizerFast
if isinstance(self.tokenizer, (BertTokenizer, BertTokenizerFast)):
self.whole_word_cand_indexes = self._whole_word_cand_indexes_bert
elif isinstance(self.tokenizer, (RobertaTokenizer, RobertaTokenizerFast)):
self.whole_word_cand_indexes = self. _whole_word_cand_indexes_roberta
else:
raise NotImplementedError(f'{type(self.tokenizer)} collator not supported yet')
self.specials = self.tokenizer.all_special_tokens
def _whole_word_cand_indexes_bert(self, input_tokens: List[str]):
cand_indexes = []
for (i, token) in enumerate(input_tokens):
if token in self.specials:
continue
if len(cand_indexes) >= 1 and token.startswith("##"):
cand_indexes[-1].append(i)
else:
cand_indexes.append([i])
return cand_indexes
def _whole_word_cand_indexes_roberta(self, input_tokens: List[str]):
cand_indexes = []
for (i, token) in enumerate(input_tokens):
if token in self.specials:
raise ValueError('We expect only raw input for roberta for current implementation')
if i == 0:
cand_indexes.append([0])
elif not token.startswith('\u0120'):
cand_indexes[-1].append(i)
else:
cand_indexes.append([i])
return cand_indexes
def _whole_word_mask(self, input_tokens: List[str], max_predictions=512):
"""
Get 0/1 labels for masked tokens with whole word mask proxy
"""
cand_indexes = self._whole_word_cand_indexes_bert(input_tokens)
random.shuffle(cand_indexes)
num_to_predict = min(max_predictions, max(1, int(round(len(input_tokens) * self.mlm_probability))))
masked_lms = []
covered_indexes = set()
for index_set in cand_indexes:
if len(masked_lms) >= num_to_predict:
break
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(masked_lms) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
covered_indexes.add(index)
masked_lms.append(index)
assert len(covered_indexes) == len(masked_lms)
mask_labels = [1 if i in covered_indexes else 0 for i in range(len(input_tokens))]
return mask_labels
def _truncate(self, example: List[int]):
tgt_len = self.max_seq_length - self.tokenizer.num_special_tokens_to_add(False)
if len(example) <= tgt_len:
return example
trunc = len(example) - tgt_len
trunc_left = random.randint(0, trunc)
trunc_right = trunc - trunc_left
truncated = example[trunc_left:]
if trunc_right > 0:
truncated = truncated[:-trunc_right]
if not len(truncated) == tgt_len:
print(len(example), len(truncated), trunc_left, trunc_right, tgt_len, flush=True)
raise ValueError
return truncated
def _pad(self, seq, val=0):
tgt_len = self.max_seq_length
assert len(seq) <= tgt_len
return seq + [val for _ in range(tgt_len - len(seq))]
def __call__(self, examples: List[Dict[str, List[int]]]):
encoded_examples = []
masks = []
mlm_masks = []
for e in examples:
e_trunc = self._truncate(e['text'])
tokens = [self.tokenizer._convert_id_to_token(tid) for tid in e_trunc]
mlm_mask = self._whole_word_mask(tokens)
mlm_mask = self._pad([0] + mlm_mask)
mlm_masks.append(mlm_mask)
encoded = self.tokenizer.encode_plus(
self._truncate(e['text']),
add_special_tokens=True,
max_length=self.max_seq_length,
padding="max_length",
truncation=True,
return_token_type_ids=False,
)
masks.append(encoded['attention_mask'])
encoded_examples.append(encoded['input_ids'])
inputs, labels = self.mask_tokens(
torch.tensor(encoded_examples, dtype=torch.long),
torch.tensor(mlm_masks, dtype=torch.long)
)
batch = {
"input_ids": inputs,
"labels": labels,
"attention_mask": torch.tensor(masks),
}
return batch
@dataclass
class CoCondenserCollator(CondenserCollator):
def __call__(self, examples):
examples = sum(examples, [])
examples = [{'text': e} for e in examples]
return super(CoCondenserCollator, self).__call__(examples)
class CoCondenserDataset(Dataset):
def __init__(self, dataset, data_args):
self.dataset = dataset
self.data_args = data_args
def __len__(self):
return len(self.dataset)
def __getitem__(self, item):
spans = self.dataset[item]['spans']
return random.sample(spans, 2)
| 1.976563 | 2 |
{{cookiecutter.project_slug}}/src/tests/core/test_core_exceptions.py | abogoyavlensky/cookiecutter-django-api | 7 | 12799361 | from rest_framework.exceptions import APIException
from core.exceptions import common_exception_handler
def test_common_exception_handler_if_error_without_detail(mocker):
exp = APIException({'data': 'test'})
response = common_exception_handler(exp, mocker.Mock())
assert response.data['service_name'] == 'unittest.mock.Mock:'
assert response.data['error_name'] == 'APIException'
assert response.data['detail'] == {'data': 'test'}
def test_common_exception_handler_if_error_is_string(mocker):
exp = APIException(['testing error'])
response = common_exception_handler(exp, mocker.Mock())
assert response.data['service_name'] == 'unittest.mock.Mock:'
assert response.data['error_name'] == 'APIException'
assert response.data['detail'] == ['testing error']
| 2.671875 | 3 |
tests/service/test_account.py | dyens/sdk-python | 0 | 12799362 | <filename>tests/service/test_account.py
from dynaconf import settings
class TestAccount:
"""Test Account."""
def test_account_list(self, api):
"""Test account list."""
accounts = api.Account.list().all()
assert list(accounts)
def test_account(self, account):
"""Test account list."""
assert account.account.name == settings.TEST_ACCOUNT_NAME
| 2.53125 | 3 |
venv/lib/python3.8/site-packages/typing_extensions.py | GiulianaPola/select_repeats | 1 | 12799363 | /home/runner/.cache/pip/pool/31/7e/c3/6f40f37bb639fd5ec71c56a301b7fc20fbafc36652d3ba3fb1fa41384f | 0.800781 | 1 |
emmet/abbreviation/tokenizer/tokens.py | jingyuexing/py-emmet | 29 | 12799364 | class Token:
__slots__ = ('start', 'end')
def __init__(self, start: int=None, end: int=None):
self.start = start
self.end = end
@property
def type(self):
"Type of current token"
return self.__class__.__name__
def to_json(self):
return dict([(k, self.__getattribute__(k)) for k in dir(self) if not k.startswith('__') and k != 'to_json'])
class Repeater(Token):
__slots__ = ('count', 'value', 'implicit')
def __init__(self, count: int, value: int, implicit: bool=False, *args):
super(Repeater, self).__init__(*args)
self.count = count
self.value = value
self.implicit = implicit
class RepeaterNumber(Token):
__slots__ = ('size', 'reverse', 'base', 'parent')
def __init__(self, size: int, reverse: bool, base: int=0, parent: int=0, *args):
super(RepeaterNumber, self).__init__(*args)
self.size = size
self.reverse = reverse
self.base = base
self.parent = parent
class RepeaterPlaceholder(Token):
__slots__ = ('value',)
def __init__(self, value: str=None, *args):
super(RepeaterPlaceholder, self).__init__(*args)
self.value = value
class Field(Token):
__slots__ = ('name', 'index')
def __init__(self, name: str, index: int=None, *args):
super(Field, self).__init__(*args)
self.index = index
self.name = name
class Operator(Token):
__slots__ = ('operator',)
def __init__(self, operator: str, *args):
super(Operator, self).__init__(*args)
self.operator = operator
class Bracket(Token):
__slots__ = ('open', 'context')
def __init__(self, is_open: bool, context: str, *args):
super(Bracket, self).__init__(*args)
self.open = is_open
self.context = context
class Quote(Token):
__slots__ = ('single', )
def __init__(self, single: bool, *args):
super(Quote, self).__init__(*args)
self.single = single
class Literal(Token):
__slots__ = ('value',)
def __init__(self, value: str, *args):
super(Literal, self).__init__(*args)
self.value = value
class WhiteSpace(Token): pass
| 2.734375 | 3 |
__init__.py | ScottHull/fEquilibrium | 0 | 12799365 | from radioactivity import *
from thermodynamics import *
from dynamics import *
from box import *
from stats import *
from meta import * | 1.070313 | 1 |
ENTRY_MODULE/FirstStepsInCoding/LAB/09_Yard_Greening.py | sleepychild/ProgramingBasicsPython | 0 | 12799366 | <gh_stars>0
price = float(input()) * 7.61
discount = price * 0.18
final = price - discount
print(f'The final price is: {final} lv.\nThe discount is: {discount} lv.')
| 3.4375 | 3 |
ui/gui.py | AivGitHub/clutcher | 0 | 12799367 | <filename>ui/gui.py<gh_stars>0
from concurrent.futures import ThreadPoolExecutor
import pathlib
from PyQt5.QtWidgets import QMainWindow, QMessageBox, QFileDialog
import threading
from clutcher import settings
from torrent.structure.torrent import Torrent
from ui.generated import Ui_MainFrame
class MainFrame(QMainWindow, Ui_MainFrame):
def __init__(self, parent=None, **kwargs):
super().__init__(parent)
self.setupUi(self)
# Triggers
self.action_Add_Files.triggered.connect(self.add_files)
self.action_Exit.triggered.connect(self.close)
def retranslateUi(self, MainFrame):
super().retranslateUi(MainFrame)
MainFrame.setWindowTitle(settings.NAME.capitalize())
def closeEvent(self, event) -> None:
super().closeEvent(event)
# TODO: Should be not self.tr, but translate because must be an event!
reply = QMessageBox.question(self, 'Message', self.tr('Are you sure to quit?'), QMessageBox.Yes,
QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
def process(self, torrent: Torrent) -> None:
# self.save_to_database(torrent)
print(f'Task Executed {threading.current_thread()}')
def add_files(self) -> None:
files = QFileDialog.getOpenFileNames(self, 'Open a file', '', 'All Files (*.*)')
files = files[0]
broken_files = []
torrents = []
for file in files:
try:
torrents.append(Torrent(file))
except Exception as e:
broken_files.append(pathlib.Path(file).name)
pass
if broken_files:
self.show_message(f'Errors for files: {", ".join(broken_files)} have occurred.', _type='error')
with ThreadPoolExecutor() as executor:
running_tasks = [executor.submit(self.process, torrent) for torrent in torrents]
for running_task in running_tasks:
running_task.result()
def show_message(self, message: str, _type: str = 'message', text_color: str = None) -> None:
if text_color:
self.statusBar().setStyleSheet(f'color : {text_color}')
self.statusbar.showMessage(message)
return None
if _type == 'error':
text_color = 'red'
elif _type == 'message':
text_color = 'black'
else:
text_color = 'black'
self.statusBar().setStyleSheet(f'color : {text_color}')
self.statusbar.showMessage(message)
| 2.28125 | 2 |
h/h_api/bulk_api/model/data_body.py | kevinjalbert/h | 0 | 12799368 | """Models representing the data modifying payloads."""
from h.h_api.enums import DataType
from h.h_api.model.json_api import JSONAPIData
from h.h_api.schema import Schema
class UpsertBody(JSONAPIData):
data_type = None
query_fields = []
@classmethod
def create(cls, attributes, id_reference):
query = {field: attributes.pop(field, None) for field in cls.query_fields}
return super().create(
data_type=cls.data_type,
attributes=attributes,
meta={"query": query},
id_reference=id_reference,
)
@property
def query(self):
"""The query used to select which item to update."""
return self.meta["query"]
class UpsertUser(UpsertBody):
"""The data to upsert a user."""
validator = Schema.get_validator("bulk_api/command/upsert_user.json")
data_type = DataType.USER
query_fields = ["authority", "username"]
class UpsertGroup(UpsertBody):
"""The data to upsert a group."""
validator = Schema.get_validator("bulk_api/command/upsert_group.json")
data_type = DataType.GROUP
query_fields = ["authority", "authority_provided_id"]
class CreateGroupMembership(JSONAPIData):
"""The data to add a user to a group."""
validator = Schema.get_validator("bulk_api/command/create_group_membership.json")
@classmethod
def create(cls, user_ref, group_ref):
"""
Create a create group membership body for adding users to groups.
:param user_ref: Custom user reference
:param group_ref: Custom group reference
:return:
"""
return super().create(
DataType.GROUP_MEMBERSHIP,
relationships={
"member": {
"data": {"type": DataType.USER.value, "id": {"$ref": user_ref}}
},
"group": {
"data": {"type": DataType.GROUP.value, "id": {"$ref": group_ref}}
},
},
)
@property
def member(self):
"""The user which is a member of this group.
:return: A value object with `id` and `ref` properties.
"""
return _IdRef(self.relationships["member"]["data"]["id"])
@property
def group(self):
"""The group which this user is a member of.
:return: A value object with `id` and `ref` properties.
"""
return _IdRef(self.relationships["group"]["data"]["id"])
class _IdRef:
"""A value object which represents an id reference or concrete id."""
def __init__(self, value):
if isinstance(value, dict):
self.id, self.ref = None, value.get("$ref")
else:
self.id, self.ref = value, None
| 2.640625 | 3 |
tests/test_app.py | Olive-Wangui/Pitchy | 0 | 12799369 | <gh_stars>0
import unittest
from app.models import User, Post, Comment
class PitchTest(unittest.TestCase):
def setUp(self):
self.new_user = User(username='Olly', email='<EMAIL>', password='<PASSWORD>')
self.new_post = Post()
self.new_comment = Comment()
def test_user_instance(self):
pass
def test_post_instance(self):
pass
def test_comment_instance(self):
pass
if __name__ == '__main__':
unittest.main() | 2.65625 | 3 |
GArDen/compose/__init__.py | zaidurrehman/EDeN | 0 | 12799370 | #!/usr/bin/env python
"""Provides ways to join distinct graphs."""
from GArDen.transform.contraction import Minor
from sklearn.base import BaseEstimator, TransformerMixin
import networkx as nx
import logging
logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
class Flatten(BaseEstimator, TransformerMixin):
"""DisjointUnion."""
def __init__(self):
"""Construct."""
pass
def transform(self, graphs_list):
"""transform."""
try:
for graphs in graphs_list:
for graph in graphs:
yield graph
except Exception as e:
logger.debug('Failed iteration. Reason: %s' % e)
logger.debug('Exception', exc_info=True)
# ------------------------------------------------------------------------------
class DisjointUnion(BaseEstimator, TransformerMixin):
"""DisjointUnion."""
def __init__(self):
"""Construct."""
pass
def transform(self, graphs_list):
"""transform."""
try:
for graphs in graphs_list:
transformed_graph = self._disjoint_union(graphs)
yield transformed_graph
except Exception as e:
logger.debug('Failed iteration. Reason: %s' % e)
logger.debug('Exception', exc_info=True)
def _disjoint_union(self, graphs):
# make the disjoint union of all graphs
graph_global = nx.Graph()
for graph in graphs:
graph_global = nx.disjoint_union(graph_global, graph)
return graph_global
# ------------------------------------------------------------------------------
class Union(BaseEstimator, TransformerMixin):
"""Union."""
def __init__(self, attribute='position'):
"""Construct."""
self.attribute = attribute
def transform(self, graphs_list):
"""transform."""
try:
minor = Minor()
graphs = self._union_list(graphs_list)
return minor.transform(graphs)
except Exception as e:
logger.debug('Failed iteration. Reason: %s' % e)
logger.debug('Exception', exc_info=True)
def _union_list(self, graphs_list):
for graphs in graphs_list:
transformed_graph = self._union(graphs)
yield transformed_graph
def _union(self, graphs):
graph_global = nx.Graph()
for graph in graphs:
graph_global = nx.disjoint_union(graph_global, graph)
for n in graph_global.nodes():
if self.attribute in graph_global.node[n]:
graph_global.node[n]['part_id'] = \
[graph_global.node[n][self.attribute]]
graph_global.node[n]['part_name'] = \
[graph_global.node[n]['label']]
return graph_global
| 2.796875 | 3 |
python/showwhy-backend/showwhy_backend/InferenceJoinResultActivity/__init__.py | microsoft/showwhy | 18 | 12799371 | <filename>python/showwhy-backend/showwhy_backend/InferenceJoinResultActivity/__init__.py
#
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project.
#
import uuid
from typing import Dict
from showwhy_inference.inference import join_results
from shared_code.io.storage import get_storage_client
storage = get_storage_client()
def main(body: Dict):
session_id = body["session_id"]
context = storage.read_context(session_id)
result_name = body["result"]
results = body["results"]
results_df = join_results(results)
file_name = storage.write_output(
session_id, str(uuid.uuid4()), results_df, file_type="partial", extension="csv"
)
context[result_name] = results_df
storage.write_context(context)
return {"output": file_name}
| 2.109375 | 2 |
SentimentAnalysis/creat_data/tencent.py | renjunxiang/Sentiment-analysis | 30 | 12799372 | from SentimentAnalysis.creat_data.config import tencent
import pandas as pd
import numpy as np
import requests
import json
import time
import random
import hashlib
from urllib import parse
from collections import OrderedDict
AppID = tencent['account']['id_1']['APP_ID']
AppKey = tencent['account']['id_1']['AppKey']
def cal_sign(params_raw,AppKey=AppKey):
# 官方文档例子为php,给出python版本
# params_raw = {'app_id': '10000',
# 'time_stamp': '1493449657',
# 'nonce_str': '20e3408a79',
# 'key1': '腾讯AI开放平台',
# 'key2': '示例仅供参考',
# 'sign': ''}
# AppKey = '<KEY>'
# cal_sign(params_raw=params_raw,
# AppKey=AppKey)
# 返回:BE918C28827E0783D1E5F8E6D7C37A61
params = OrderedDict()
for i in sorted(params_raw):
if params_raw[i] != '':
params[i] = params_raw[i]
newurl = parse.urlencode(params)
newurl += ('&app_key=' + AppKey)
sign = hashlib.md5(newurl.encode("latin1")).hexdigest().upper()
return sign
def creat_label(texts,
AppID=AppID,
AppKey=AppKey):
'''
:param texts: 需要打标签的文档列表
:param AppID: 腾讯ai账号信息,默认调用配置文件id_1
:param AppKey: 腾讯ai账号信息,默认调用配置文件id_1
:return: 打好标签的列表,包括原始文档、标签、置信水平、是否成功
'''
url = tencent['api']['nlp_textpolar']['url']
results = []
# 逐句调用接口判断
count_i=0
for one_text in texts:
params = {'app_id': AppID,
'time_stamp': int(time.time()),
'nonce_str': ''.join([random.choice('1234567890abcdefghijklmnopqrstuvwxyz') for i in range(10)]),
'sign': '',
'text': one_text}
params['sign'] = cal_sign(params_raw=params,
AppKey=AppKey) # 获取sign
r = requests.post(url=url,
params=params) # 获取分析结果
result = json.loads(r.text)
# print(result)
results.append([one_text,
result['data']['polar'],
result['data']['confd'],
result['ret'],
result['msg']
])
r.close()
count_i += 1
if count_i % 50 == 0:
print('tencent finish:%d' % (count_i))
return results
if __name__ == '__main__':
results = creat_label(texts=['价格便宜啦,比原来优惠多了',
'壁挂效果差,果然一分价钱一分货',
'东西一般般,诶呀',
'讨厌你',
'一般'])
results = pd.DataFrame(results, columns=['evaluation',
'label',
'confidence',
'ret',
'msg'])
results['label'] = np.where(results['label'] == 1, '正面',
np.where(results['label'] == 0, '中性', '负面'))
print(results)
| 2.359375 | 2 |
testYOLOv3.py | SuicideMonkey/Object-Detection-API-Tensorflow | 303 | 12799373 | <filename>testYOLOv3.py
import tensorflow as tf
import numpy as np
import os
import utils.tfrecord_voc_utils as voc_utils
import YOLOv3 as yolov3
# import matplotlib.pyplot as plt
# import matplotlib.patches as patches
# from skimage import io, transform
from utils.voc_classname_encoder import classname_to_ids
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
lr = 0.001
batch_size = 12
buffer_size = 256
epochs = 160
reduce_lr_epoch = []
config = {
'mode': 'train', # 'train', 'test'
'data_shape': [448, 448, 3],
'num_classes': 20,
'weight_decay': 5e-4,
'keep_prob': 0.5, # not used
'data_format': 'channels_last', # 'channels_last' 'channels_first'
'batch_size': batch_size,
'coord_scale': 1,
'noobj_scale': 1,
'obj_scale': 5.,
'class_scale': 1.,
'num_priors': 3,
'nms_score_threshold': 0.5,
'nms_max_boxes': 10,
'nms_iou_threshold': 0.5,
'priors': [[[10., 13.], [16, 30.], [33., 23.]],
[[30., 61.], [62., 45.], [59., 119.]],
[[116., 90.], [156., 198.], [373.,326.]]]
}
image_augmentor_config = {
'data_format': 'channels_last',
'output_shape': [448, 448],
# 'zoom_size': [520, 520],
# 'crop_method': 'random',
'flip_prob': [0., 0.5],
'fill_mode': 'BILINEAR',
'keep_aspect_ratios': False,
'constant_values': 0.,
# 'color_jitter_prob': 0.5,
# 'rotate': [0.5, -10., 10.],
'pad_truth_to': 60,
}
data = os.listdir('./voc2007/')
data = [os.path.join('./voc2007/', name) for name in data]
train_gen = voc_utils.get_generator(data,
batch_size, buffer_size, image_augmentor_config)
trainset_provider = {
'data_shape': [448, 448, 3],
'num_train': 5011,
'num_val': 0, # not used
'train_generator': train_gen,
'val_generator': None # not used
}
testnet = yolov3.YOLOv3(config, trainset_provider)
testnet.load_weight('./weight/test-40449')
for i in range(epochs):
print('-'*25, 'epoch', i, '-'*25)
if i in reduce_lr_epoch:
lr = lr/10.
print('reduce lr, lr=', lr, 'now')
mean_loss = testnet.train_one_epoch(lr)
print('>> mean loss', mean_loss)
testnet.save_weight('latest', './weight/test') # 'latest', 'best'
# img = io.imread()
# img = transform.resize(img, [448,448])
# img = np.expand_dims(img, 0)
# result = testnet.test_one_image(img)
# id_to_clasname = {k:v for (v,k) in classname_to_ids.items()}
# scores = result[0]
# bbox = result[1]
# class_id = result[2]
# print(scores, bbox, class_id)
# plt.figure(1)
# plt.imshow(np.squeeze(img))
# axis = plt.gca()
# for i in range(len(scores)):
# rect = patches.Rectangle((bbox[i][1],bbox[i][0]), bbox[i][3]-bbox[i][1],bbox[i][2]-bbox[i][0],linewidth=2,edgecolor='b',facecolor='none')
# axis.add_patch(rect)
# plt.text(bbox[i][1],bbox[i][0], id_to_clasname[class_id[i]]+str(' ')+str(scores[i]), color='red', fontsize=12)
# plt.show()
| 1.882813 | 2 |
shape_recognition/libraries/braile_recognition/braille.py | ys1998/tactile-shape-recognition | 0 | 12799374 | # -*- coding: utf-8 -*-
'''
#-------------------------------------------------------------------------------
# NATIONAL UNIVERSITY OF SINGAPORE - NUS
# SINGAPORE INSTITUTE FOR NEUROTECHNOLOGY - SINAPSE
# Singapore
# URL: http://www.sinapseinstitute.org
#-------------------------------------------------------------------------------
# Neuromorphic Engineering Group
# Author: <NAME>, PhD
# Contact:
#-------------------------------------------------------------------------------
# Description: defines classes for processing tactile data to be used for
# braille recognition.
# The 'Braille' class stores the SVM model used to recognize braille characters.
# this class abstracts the process of data processing, meaning that it only deals
# with the data ready for training and/or classification procedures.
# For handling data, the class 'BrailleHandler' should be used instead
#-------------------------------------------------------------------------------
'''
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
#LIBRARIES
import os, os.path, sys
sys.path.append('../general')
import numpy as np
import scipy as sp
from sklearn.svm import SVC
from sklearn.externals import joblib
from dataprocessing import * #import the detect_peaks method
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
#Feature extraction for SVM-based braille classification
class BrailleHandler():
#---------------------------------------------------------------------------
#read a file and return the data
def loadFile(filepath):
if os.path.isfile(filepath):
#return the data contained in the data
return np.loadtxt(filepath)
else:
return False #file not found
def convert2vector(data):
return np.transpose(data)
#convert the data from a file into a vector
def oldconvert2vector(data,nrows,ncols):
#first convert to 3D matrix
datamat = BrailleHandler.oldconvert2frames(data,nrows,ncols)
numsamples = np.size(datamat,2) #number of samples or frames
dataVector = np.zeros((nrows*ncols,numsamples))
taxelCounter = 0
for i in range(nrows):
for j in range(ncols):
dataVector[taxelCounter] = datamat[i,j,:]
taxelCounter+=1
return dataVector #return the dataVector
#convert data from the file that are arranged
#in a 2D array (every line contains reading from all rows for one column)
#into a 3D array (row,col,frame)
def oldconvert2frames(data,nrows,ncols):
datamat = np.zeros((nrows,ncols,np.int(np.floor(np.divide(np.size(data,0),nrows)))),dtype=int)
c = 0
for ii in range(0,(np.size(data,0)-nrows),nrows):
datamat[:,:,c] = data[ii:ii+nrows,:]
c = c+1
return datamat #return the 3D matrix
#---------------------------------------------------------------------------
#find the number of peaks in every single taxel
def countPeaks(inputMatrix,threshold):
if len(inputMatrix.shape) == 3: #3D matrix
nrows = inputMatrix.shape[0] #number of rows
ncols = inputMatrix.shape[1] #number of columns
nsamples = inputMatrix.shape[2] #number of samples
#feature vector containing the number of peaks for
#each taxel of the tactile sensor
featureVector = np.zeros(nrows*ncols)
#matrix M*NxT where each row corresponds to a taxel and the
#columns to the time series signal
tactileSignal = np.zeros((nrows*ncols,nsamples))
#counter for the index of the tactileSignal matrix
counter = 0
#loop through the rows
for k in range(nrows):
#loop through the columns
for w in range(ncols):
#get a single taxel signal
tactileSignal[counter] = inputMatrix[k,w,:]
#count the number of peaks in the signal
#and built the feature vector
#find the peaks
tmppeaks = detect_peaks(tactileSignal[counter],mph=threshold,mpd=20,show=False)
#number of peaks is the length of 'tmppeaks'
featureVector[counter] = len(tmppeaks)
#increment the counter
counter+=1
#list of list, every element of the list corresponds to
#the time series of a single taxel
else:
#find the total number of taxels in the tactile array
numberTaxels = len(inputMatrix)
#feature vector containing the number of peaks for
#each taxel of the tactile sensor
featureVector = np.zeros(numberTaxels)
#scan all the taxels
for k in range(numberTaxels):
#find the peaks
tmppeaks = detect_peaks(inputMatrix[k],mph=threshold,mpd=20,show=False)
#number of peaks is the length of 'tmppeaks'
featureVector[k] = len(tmppeaks)
#return the feature vector
return featureVector
#-------------------------------------------------------------------------------
#create the training data based on the list of the text files to be loaded
#and the labels corresponding for each text data
def createTrainingData(dataFiles,nrows,ncols,filt=False):
for k in range(len(dataFiles)):
#get the filename
filename = dataFiles[k]
#load the data
datafile = BrailleHandler.loadFile(filename)
#convert to vector
#datavector = BrailleHandler.oldconvert2vector(datafile,nrows,ncols)
datavector = BrailleHandler.convert2vector(datafile)
#if data should be filtered
if filt == True:
#for every taxel
for i in range(np.size(datavector,0)):
mva = MovingAverage() #window size = 10, sampfreq = 100 Hz
#for every sample, get the moving average response
for z in range(np.size(datavector,1)):
datavector[i,z] = mva.getSample(datavector[i,z])
#find the number of peaks
peakTh = 0.05 #threshold for peak detection
#create the feature vector
featurevector = BrailleHandler.countPeaks(datavector,peakTh)
#if it is the first iteration, create the training data
if k != 0:
trainingData = np.vstack((trainingData,featurevector))
else:
trainingData = featurevector
return trainingData
#-------------------------------------------------------------------------------
#Braille Recognition Class
class Braille():
def __init__(self):
#labels for every class
#dictionary to associate label names and values
self.classes = dict()
#SVM model
self.modelSVM = None
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
#load a pre-trained SVM model from a file
def load(self,filepath):
#checks if the file exists
if os.path.isfile(filepath):
self.modelSVM = joblib.load(filepath) #loads the SVM model
return True #load ok
else:
return False #file not found
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
#save a new SVM model
def save(self,filename):
#saving
joblib.dump(self.modelSVM,filename+'.pkl')
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
#train a SVM model
def train(self,trainingData,labels):
#create a new SVM model
self.modelSVM = SVC()
#pass the training data and the labels for training
self.modelSVM.fit(trainingData,labels)
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
#classification
#features should be a feature vector following the same pattern
#that was used for training
def classify(self,features):
#check if there is a SVM model to classify the data
if self.modelSVM is not None:
#classify based on the input features
svmResp = self.modelSVM.predict(features)
#return the output of the classifier
return svmResp
else:
return False
#---------------------------------------------------------------------------
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
if __name__=='__main__':
#---------------------------------------------------------------------------
import numpy as np #numpy
import matplotlib.pyplot as plt #matplotlib
NROWS = 4 #number of columns in the tactile array
NCOLS = 4 #number of lines in the tactile array
peakTh = 300 #threshold for detecting peaks
#load the braille data from file
#2D matrix
datafile = np.loadtxt('NewData_BRC/BRC_B1.txt')
#convert data to a 3D matrix
tactileData = BrailleHandler.oldconvert2frames(datafile,NROWS,NCOLS)
#feature vector containing the number of peaks for each taxel
features = BrailleHandler.countPeaks(tactileData,peakTh)
#---------------------------------------------------------------------------
#feature extraction with 2D array
#moving average of the 2D matrix
#create a moving average object
#default parameters, windowsize = 10, sampfreq = 100 Hz
mva = MovingAverage()
tactileVector = BrailleHandler.oldconvert2vector(datafile,NROWS,NCOLS)
numsamples = np.size(tactileData,2) #total number of samples
tactileMVA = np.zeros((NROWS*NCOLS,numsamples))
counter = 0 #taxel counter
for k in range(NROWS*NCOLS): #scan all the columns
for z in range(numsamples): #filtering the signal sample by sample
tactileMVA[counter,z] = mva.getSample(tactileVector[k,z])
counter+=1 #increment the taxel counter
#with the filtered data, count peaks again
filtFeatures = BrailleHandler.countPeaks(tactileMVA,peakTh)
#print the filtered feature vector
print(filtFeatures) | 1.773438 | 2 |
plot_property.py | NogaBar/open_lth | 0 | 12799375 | <reponame>NogaBar/open_lth
import argparse
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
import os
import json
import numpy as np
from platforms.platform import get_platform
import seaborn as sns
def main(args):
property= {}
rand_property = {}
sparsity = []
for dir in os.listdir(args.dir): # level directories
if not 'level' in dir:
continue
with open(os.path.join(args.dir, dir, 'main', 'sparsity_report.json'), 'rb') as f_sparse:
sparse_dict = json.load(f_sparse)
sparsity.append((sparse_dict['unpruned'] / sparse_dict['total']) * 100)
for subdir in os.listdir(os.path.join(args.dir, dir)): # find the right directory in level
if 'properties' in subdir:
path = os.path.join(args.dir, dir, subdir, f'properties_{args.property}.log')
if args.property in path and args.sub_dir in path:
with open(path, 'rb') as f:
dict = json.load(f)
property[int(dir.split('_')[1])] = dict['lth']
rand_property[int(dir.split('_')[1])] = dict['random']
sparsity = np.array(sparsity)
keys = np.array(list(rand_property.keys()))
id = np.argsort(np.array(list(rand_property.keys())))
colors_list = ['blue', 'green', 'magenta', 'red', 'brown', 'cyan', 'purple', 'grey', 'orange', 'pink', 'lime']
generator = rand_property[0].keys() if 'weight' in args.property else range(len(rand_property[0]))
layers = [range(args.layers[0])] if len(args.layers) == 1 else args.layers
for i, layer in enumerate(generator):
# if args.layers > 0 and i >= args.layers:
# break
# elif args.layers < 0 and i < len(generator) + args.layers:
# continue
if (i + 1) not in layers:
continue
prop_l = np.array([v[layer] for k, v in property.items()])
prop_rand_l = np.array([v[layer] for k, v in rand_property.items()])
plt.plot(sparsity[id], prop_l[id], 'o', label=f'lth layer{i+1}', color=colors_list[i])
plt.plot(sparsity[id], prop_rand_l[id], 'x', label=f'random layer{i+1}', color=colors_list[i])
plt.yscale('log')
plt.legend()
# save figure
save_dir = os.path.join(args.dir, 'plots')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
plt.savefig(os.path.join(save_dir, f'property_{args.property}_{args.sub_dir}.pdf'))
if __name__ == '__main__':
# Arguement Parser
parser = argparse.ArgumentParser()
parser.add_argument("--dir", type=str, help="Directory")
parser.add_argument("--property", type=str, help="property")
parser.add_argument("--sub_dir", type=str, help="property directory number")
parser.add_argument("--layers", nargs='+', type=int, help="number of layers to plot")
args = parser.parse_args()
main(args) | 2.3125 | 2 |
gradient_boosting_classifier.py | rikudoayush/Tkinter-Gui-And-ML- | 1 | 12799376 | from algorithm import Algorithm
from tkinter import *
from tkinter import ttk
class Gradient_Boosting_Classifier(Algorithm):
def __init__(self, frame):
self.frame = frame
self.name = "Gradient Boosting Classifier"
#Options for the loss criteria.
self.Loss_Label = ttk.Label(frame, text="Loss Function:")
self.Loss = StringVar()
self.Loss.set('deviance')
self.Loss_Deviance = ttk.Radiobutton(frame, text='Deviance', variable=self.Loss, value='deviance')
self.Loss_Exponential = ttk.Radiobutton(frame, text='Exponential', variable=self.Loss, value='exponential')
#Options for the learning rate.
self.LearningRate_Label = ttk.Label(frame, text="Learning Rate:")
self.LearningRate = StringVar()
self.LearningRate.set('0.1')
self.LearningRate_Box = Spinbox(frame, textvariable=self.LearningRate, from_=0.0, to=1.0, increment=0.01, width=5)
#Options for the number of boosting stages.
self.Estimators_Label = ttk.Label(frame, text='# of Stages:')
self.Estimators = StringVar()
self.Estimators.set('100')
self.Estimators_Box = ttk.Entry(frame, textvariable=self.Estimators, width=7)
#Options for the max depth
self.MaxDepth_Label = ttk.Label(frame, text='Max Depth:')
self.MaxDepth = StringVar()
self.MaxDepth.set('0')
self.MaxDepth_Box = ttk.Entry(frame, textvariable=self.MaxDepth, width=7)
#Options for the minimum number of samples before an internal node is split.
self.MinSamplesSplit_Label = ttk.Label(frame, text='Min Samples to Split:')
self.MinSamplesSplit = StringVar()
self.MinSamplesSplit.set('2')
self.MinSamplesSplit_Box = ttk.Entry(frame, textvariable=self.MinSamplesSplit, width=7)
#Options for the minimum number of leaf nodes
self.MinSamplesLeaf_Label = ttk.Label(frame, text='Min # of Leaf Nodes:')
self.MinSamplesLeaf = StringVar()
self.MinSamplesLeaf.set('1')
self.MinSamplesLeaf_Box = ttk.Entry(frame, textvariable=self.MinSamplesLeaf, width=7)
#Options for the minimum fraction of leaf nodes
self.MinFractionLeaf_Label = ttk.Label(frame, text='Min % of Leaf Nodes:')
self.MinFractionLeaf = StringVar()
self.MinFractionLeaf.set('0.0')
self.MinFractionLeaf_Box = ttk.Entry(frame, textvariable=self.MinFractionLeaf, width=7)
#Options for batch size
self.Subsample_Label = ttk.Label(frame, text='Batch Size:')
self.Subsample = StringVar()
self.Subsample.set('1.0')
self.Subsample_Box = Spinbox(frame, from_=0.0, to=1.0, increment=0.01, textvariable=self.Subsample, width=5)
#Options for max features.
self.MaxFeatures_Label = ttk.Label(frame, text='Max Features:')
self.MaxFeatures = StringVar()
self.MaxFeatures.set('none')
self.MaxFeatures_Integer = StringVar()
self.MaxFeatures_Float = StringVar()
self.MaxFeatures_Float.set('0.1')
self.MaxFeatures_None = ttk.Radiobutton(frame, text='None', variable=self.MaxFeatures, value='none')
self.MaxFeatures_Integer_Button = ttk.Radiobutton(frame, text='Number:', variable=self.MaxFeatures, value='integer')
self.MaxFeatures_Integer_Box = ttk.Entry(frame, textvariable=self.MaxFeatures_Integer, width=7)
self.MaxFeatures_Float_Button = ttk.Radiobutton(frame, text='Percentage:', variable=self.MaxFeatures, value='float')
self.MaxFeatures_Float_Box = Spinbox(frame, from_=0.0, to=1.0, textvariable=self.MaxFeatures_Float, width=5, increment=0.01)
self.MaxFeatures_Auto = ttk.Radiobutton(frame, text='Auto', variable=self.MaxFeatures, value='auto')
self.MaxFeatures_Log2 = ttk.Radiobutton(frame, text='Log2', variable=self.MaxFeatures, value='log2')
#Options for the max # of leaf nodes
self.MaxLeafNodes_Label = ttk.Label(frame, text='Max Leaf Nodes:')
self.MaxLeafNodes = StringVar()
self.MaxLeafNodes.set('none')
self.MaxLeafNodes_None = ttk.Radiobutton(frame, text='None', variable=self.MaxLeafNodes, value='none')
self.MaxLeafNodes_Integer = StringVar()
self.MaxLeafNodes_Integer.set('0')
self.MaxLeafNodes_Integer_Button = ttk.Radiobutton(frame, text='Number:', variable=self.MaxLeafNodes, value='integer')
self.MaxLeafNodes_Integer_Box = ttk.Entry(frame, textvariable=self.MaxLeafNodes_Integer, width=7)
#Options for verbosity
self.Verbose_Label = ttk.Label(frame, text='Verbose Level:')
self.Verbose = StringVar()
self.Verbose.set('0')
self.Verbose_Box = ttk.Entry(frame, textvariable=self.Verbose, width=7)
def Display_Options(self): #Display options for the Decision Tree Classifier.
self.clear_frame(self.frame)
#Insert the options into the frame.
self.Loss_Label.grid(column=0,row=0, sticky=(W))
self.Loss_Deviance.grid(column=1, row=0, sticky=(W))
self.Loss_Exponential.grid(column=2, row=0, sticky=(W))
self.LearningRate_Label.grid(column=0, row=1, sticky=(W))
self.LearningRate_Box.grid(column=1, row=1, sticky=(W))
self.Estimators_Label.grid(column=0, row=2, sticky=(W))
self.Estimators_Box.grid(column=1, row=2, sticky=(W))
self.MaxDepth_Label.grid(column=0, row=3, sticky=(W))
self.MaxDepth_Box.grid(column=1, row=3, sticky=(W))
self.MinSamplesSplit_Label.grid(column=0, columnspan=2, row=4, sticky=(W))
self.MinSamplesSplit_Box.grid(column=2, row=4, sticky=(W))
self.MinSamplesLeaf_Label.grid(column=0, columnspan=2, row=5, sticky=(W))
self.MinSamplesLeaf_Box.grid(column=2, row=5, sticky=(W))
self.MinFractionLeaf_Label.grid(column=0, columnspan=2, row=6, sticky=(W))
self.MinFractionLeaf_Box.grid(column=2, row=6, sticky=(W))
self.Subsample_Label.grid(column=0, row=7, sticky=(W))
self.Subsample_Box.grid(column=1, row=7, sticky=(W))
self.MaxFeatures_Label.grid(column=0, row=8, sticky=(W))
self.MaxFeatures_None.grid(column=0, row=9, sticky=(W))
self.MaxFeatures_Integer_Button.grid(column=0, row=10, sticky=(W))
self.MaxFeatures_Integer_Box.grid(column=1, row=10, sticky=(W))
self.MaxFeatures_Float_Button.grid(column=0, row=11, sticky=(W))
self.MaxFeatures_Float_Box.grid(column=1, row=11, sticky=(W))
self.MaxFeatures_Auto.grid(column=0, row=12, sticky=(W))
self.MaxFeatures_Log2.grid(column=0, row=12, sticky=(W))
self.MaxLeafNodes_Label.grid(column=0, row=13, sticky=(W))
self.MaxLeafNodes_None.grid(column=0, row=14, sticky=(W))
self.MaxLeafNodes_Integer_Button.grid(column=0, row=15, sticky=(W))
self.MaxLeafNodes_Integer_Box.grid(column=1, row=15, sticky=(W))
self.Verbose_Label.grid(column=0, row=16, sticky=(W))
self.Verbose_Box.grid(column=1, row=16, sticky=(W))
| 3.546875 | 4 |
deepy/layers/word_embed.py | uaca/deepy | 260 | 12799377 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import theano
import theano.tensor as T
from deepy.layers import NeuralLayer
class WordEmbedding(NeuralLayer):
"""
Word embedding layer.
The word embeddings are randomly initialized, and are learned over the time.
"""
def __init__(self, size, vocab_size, zero_index=None, mask=None, load_values=None, init=None):
from deepy.core.neural_var import NeuralVariable
super(WordEmbedding, self).__init__("word_embed")
self.size = size
self.vocab_size = vocab_size
self.output_dim = size
self.zero_index = zero_index
self._mask = mask.tensor if type(mask) == NeuralVariable else mask
self._init = init
self._load_values = load_values
self.init(1)
def prepare(self):
if self._load_values is not None:
self.embed_matrix = theano.shared(self._load_values, name="embeddings")
else:
self.embed_matrix = self.create_weight(self.vocab_size, self.size, "embeddings", initializer=self._init)
self.register_parameters(self.embed_matrix)
def compute_tensor(self, x, mask=None):
mask = mask if mask else self._mask
if self.zero_index is not None:
mask = T.neq(x, self.zero_index)
# To avoid negative index
x = T.cast(x * mask, "int32")
if x.ndim == 1:
ret_tensor = self.embed_matrix[x]
else:
ret_tensor = self.embed_matrix[x.flatten()].reshape(list(x.shape) + [self.size])
if mask:
if x.ndim == 2:
ret_tensor *= mask[:, :, None]
elif x.ndim == 1:
ret_tensor *= mask[:, None]
return ret_tensor
| 2.609375 | 3 |
Part 5 - Association Rule Learning/Section 28 - Apriori/Apriori_Python/apriori.py | xavialex/Machine-Learning-Templates | 1 | 12799378 | <reponame>xavialex/Machine-Learning-Templates
# Apriori
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Data Preprocessing
dataset = pd.read_csv('Market_Basket_Optimisation.csv', header = None) # Se especifica el header = None para no perder la primera línea
# Conversión del dataset a una lista de listas
transactions = []
for i in range(0, 7501):
transactions.append([str(dataset.values[i,j]) for j in range(0, 20)])
# Training Apriori on the dataset
from apyori import apriori
rules = apriori(transactions, min_support = 0.003, min_confidence = 0.2, min_lift = 3, min_length = 2)
# min_support: El support de un producto que es adquirido 3 veces al día: 3*7/7501
# min_confidence: Buena combinación para min_support
# min_lift: Porque sí, hacer pruebas para obtener buenos valores para los parámetros
# Visualising the results
results = list(rules)
myResults = [list(x) for x in results]
# La lista se encuentra ordenada de mayor a menor relevancia (lift)
# En frozenset aparecen los productos que están relacionados
# Hacer doble click en la lista que aparece para cada valor hasta llegar a la última
# El primer valor numérico es la confidence, en el primer caso 0.29, por lo que la gente que compre light cream tiene un 29 % de probabilidades de comprar pollo
# El segundo y último es el lift. Un valor de 4.84 es alto con respecto a nuestro límite inferior (3), por lo que tiene sentido que sea la asociación más fuerte del dataset
| 3.15625 | 3 |
2016-12-built-in-intents/parse_data.py | Carolyn95/NLU-data | 0 | 12799379 | import pdb
import json
import numpy as np
file = 'benchmark_data.json'
with open(file, 'r') as f:
json_data = json.load(f)
print(json_data.keys()) # ['domains', 'version']
domains = json_data['domains']
print('domain length', len(domains))
corr_data = []
for domain in domains:
temp = {}
temp['long_description'] = domain['description']
temp['short_description'] = domain['name']
intents = domain['intents']
print('intent length', len(intents))
for intent in intents:
temp['intent'] = intent['name']
queries = intent['queries']
print('query length', len(queries))
for query in queries:
temp['query'] = query['text']
corr_data.append(temp)
print(len(corr_data))
corr_data = np.array(corr_data)
np.save('benchmark_data.npy', corr_data)
"""
(Pdb) json_data['domains'][3]['intents'][0].keys()
dict_keys(['description', 'benchmark', 'queries', 'slots', '@type', 'name'])
len(json_data['domains'][3]['intents'][0]['description'])
json_data['domains'][3]['intents'][0]['queries']
# length
(Pdb) json_data['domains'][3]['intents'][0]['queries'][0].keys()
dict_keys(['text', 'results_per_service'])
json_data['domains'][3]['intents'][0]['queries'][0]['text']
print(domains.keys()) # ['description', '@type', 'intents', 'name']
"Queries that are related to places (restaurants, shops, concert halls, etc), as well as to the user's location."
'Queries that are related to reservation.'
'Queries that are related to transit and navigation.'
'Queries that relate to weather.'
(Pdb) json_data['domains'][3]['name']
'weather'
(Pdb) json_data['domains'][2]['name']
'transit'
(Pdb) json_data['domains'][1]['name']
'reservation'
(Pdb) json_data['domains'][0]['name']
'places'
print(len(domains)) # 4
(Pdb) len(json_data['domains'][0]['intents'])
4
(Pdb) len(json_data['domains'][1]['intents'])
2
(Pdb) len(json_data['domains'][2]['intents'])
3
(Pdb) len(json_data['domains'][3]['intents'])
1
"""
| 2.59375 | 3 |
molmolpy/g_mmpbsa/g_mmpbsa_dask.py | hovo1990/molmolpy | 1 | 12799380 | # -*- coding: utf-8 -*-
# !/usr/bin/env python
#
# @file __init__.py
# @brief G_MMPBSA DASK PROJECT
# @author <NAME>
#
# <!--------------------------------------------------------------------------
#
# Copyright (c) 2016-2019,<NAME>.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the molmolpy Developers nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ------------------------------------------------------------------------ -->
import itertools
import time
color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold',
'darkorange'])
import multiprocessing
import mdtraj as md
from molmolpy.utils.cluster_quality import *
from molmolpy.utils import folder_utils
import json
from molmolpy.utils import helper as hlp
# matplotlib.style.use('ggplot')
sns.set(style="darkgrid")
low_seed = 1
high_seed = 999999999
mgltools_utilities = '/home/john1990/MGLTools-1.5.6/MGLToolsPckgs/AutoDockTools/Utilities24'
class GMMPBSAObject(object):
"""
Usage example
>>> EPI_folder = '/media/Work/MEGA/Programming/StressHormones/dock_EPI'
>>> EPI_samples = '/media/Work/MEGA/Programming/StressHormones/'
>>>
>>>
>>> receptor_file = EPI_folder + os.sep + 'centroid_model_clust2.pdbqt'
>>> ligand_file = EPI_folder + os.sep + 'EPI.pdbqt'
>>> molname = 'EPI'
>>> receptor_name = 'LasR'
>>> run_type = 'vina_sample'
>>>
>>>
>>>
>>> receptor_file = EPI_folder + os.sep + 'centroid.pdb'
>>> ligand_file = EPI_folder + os.sep + 'EPI.pdb'
>>> molname = 'EPI'
>>> receptor_name = 'LasR'
>>>
>>>
>>> EPI_uber_dock = uber_docker.UberDockerObject(receptor_file, ligand_file, '.', molname=molname, receptor_name=receptor_name)
>>>
>>>
>>> EPI_uber_dock.prepare_uber_dock_protocol()
>>> EPI_uber_dock.run_uber_dock_protocol()
Use together
>>> self.prepare_uber_dock_protocol() for preparation
>>> self.run_uber_dock_protocol()
or seperately
>>> EPI_uber_dock.calculate_max_radius_from_com()
>>> EPI_uber_dock.calculate_cube_edges()
>>> EPI_uber_dock.calculate_box_edges_from_com()
>>>
>>>
>>> EPI_uber_dock.prepare_uber_docker()
>>>
>>>
>>> #This is for rDock, and it works so comment this part for a while
>>> EPI_uber_dock.prepare_rdock_settings()
>>> EPI_uber_dock.generate_rdock_cavity()
>>> # Prepare and run Dock programs
>>> EPI_uber_dock.prep_rDock_dock_run_commands()
>>> EPI_uber_dock.run_rDock_simulation(parallel=True, waitTime=15)
>>>
>>> #This is for FlexAid
>>> EPI_uber_dock.prepare_flexaid_settings()
>>> EPI_uber_dock.process_flexaid_ligand()
>>> EPI_uber_dock.get_flexaid_clefts()
>>> EPI_uber_dock.flexaid_generate_ga_dat_parameters()
>>> EPI_uber_dock.flexaid_generate_config_input()
>>> EPI_uber_dock.prep_FlexAid_dock_run_commands()
>>> EPI_uber_dock.run_FlexAid_simulation(parallel=True, waitTime=15)
>>>
>>>
>>> # This is for Autodock vina
>>> EPI_uber_dock.set_up_Vina_Box()
>>> EPI_uber_dock.prepare_Vina_run()
>>> EPI_uber_dock.prepVinaSim_uberDock()
>>> EPI_uber_dock.runVinaSim_uber()
Molecule object loading of pdb and pbdqt file formats.
Then converts to pandas dataframe.
Create MoleculeObject by parsing pdb or pdbqt file.
2 types of parsers can be used: 1.molmolpy 2. pybel
Stores molecule information in pandas dataframe as well as numpy list.
Read more in the :ref:`User Guide <MoleculeObject>`.
Parameters
----------
filename : str, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
>>> LasR_MOR_mmpbsa_calc = g_mmpbsa_dask.GMMPBSAObject(traj, topol_file, tpr_file, mdp_file, index_file, first_index, second_index, molname, receptor_name)
>>>
>>>
>>>
>>> LasR_MOR_mmpbsa_calc.prepare_g_mmpbsa_dask_protocol(client)
>>>
>>>
>>> LasR_MOR_mmpbsa_calc.prepare_for_dask_cluster(parallel=True)
>>> #
>>> # LasR_MOR_mmpbsa_calc.run_dask_docking(client)
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
"""
def __init__(self,
traj, topol, tpr_file, mdp_file, index_file, first_index, second_index,
molname='Unknown',
receptor_name='Unknown',
folder_path='.',
job_name = 'Unknown',
load_state_file=None):
self.load_state_file = load_state_file
if load_state_file is not None:
self.load_state_data_json(self.load_state_file)
else:
print('G_MMPBSA Object has been created')
self.trajectory_file = traj
self.topology_file = topol
self.tpr_file = tpr_file
self.mdp_file = mdp_file
self.index_file = index_file
self.first_index = first_index
self.second_index = second_index
self.prep_g_mmpbsa_run = False
self.folder_exists = False
# Running vina,whether it's for exhaustiveness or traditional run
self.folder_path = folder_path
self.command_run_list = []
self.command_samples_run_list = []
self.molecule_name = molname
self.ligand_name = molname
self.receptor_name = receptor_name
self.run_type = 'g_mmpbsa'
self.state_data = {}
self.state_data_samples = {}
self.g_mmpbsa_run_finished = False
self.g_mmpbsa_sim_states = {'simStates': {}}
self.objects_loaded = False
self.g_mmpbsa_prepared = False
# This part needs clarification
self.prep_mdtraj_object()
# original data before transformation
# Add receptor name
def set_mgltools_path(self, path):
print('MGLTools path is set to ', path)
self.mgltools_utilities = path
def set_flexaid_path(self, path):
print('FlexAid path is set to ', path)
self.flexaid_path = path
def set_ledock_path(self, path):
print('LeDock path is set to ', path)
self.ledock_path = path
def prep_mdtraj_object(self):
'''
Prepare receptor mdtraj object
get mdtraj topology and save as pandas dataframe
Calculate pdb receptor center of mass
:return:
'''
self.trajectory_mdtraj = md.load_xtc(self.trajectory_file, top=self.topology_file)
self.trajectory_mdtraj_topology = self.trajectory_mdtraj.topology
self.trajectory_mdtraj_topology_dataframe = self.trajectory_mdtraj.topology.to_dataframe()
self.objects_loaded = True
def get_uber_g_mmpbsa_run_folder_name(self):
curr_folder = os.getcwd()
return curr_folder + os.sep + self.run_folder_name
def prepare_g_mmpbsa_dask_protocol(self, dask_client=None,
prep_g_mmpbsa=True):
'''
prepare dask tasks for g_mmpbsa
:return:
'''
self.prepare_g_mmpbsa()
test = 1
curr_client = dask_client
# Testing Phase
total_free_cores = 16
# Production
# worker_status = run_dask_tools.get_dask_worker_status(curr_client)
#
# get_worker_free = run_dask_tools.check_free_resources(worker_status)
#
#
# test = 1
#
# total_free_cores = 0
#
# for worker in get_worker_free:
# preped = get_worker_free[worker]['preped']
# total_free_cores += preped['freeCores']
if prep_g_mmpbsa is False:
print('prep gmmpbsa ', prep_g_mmpbsa)
return 'Do not prepare run files'
if self.g_mmpbsa_prepared is True:
print('Do not prep files')
return 'Do not prep files'
traj_len = len(self.trajectory_mdtraj)
import math
# Free core approach
div_traj = math.ceil(traj_len/total_free_cores)
# select_indexes = list(range(total_free_cores))
# Maximum parallel
#div_traj = math.trunc(traj_len/total_free_cores)
select_frames = list(range(0,traj_len,div_traj))
select_indexes = list(range(len(select_frames)))
folder_to_save = self.g_mmpbsa_folder
temp_mdtraj = []
temp_mdtraj_indexes = []
file_save_list = []
abs_file_save_list = []
simStates = {'simStates':{}}
for i,traj in zip(select_indexes,select_frames):
temp_state = {str(i):{}}
temp_traj = self.trajectory_mdtraj[traj:traj+div_traj]
temp_mdtraj.append(temp_traj)
temp_mdtraj_indexes.append(i)
file_save = 'traj_part{0}.xtc'.format(i)
abs_file_save = folder_to_save + os.sep + file_save
file_save_list.append(file_save)
abs_file_save_list.append(abs_file_save)
temp_state[str(i)].update({'runFinished':False,
'index':i,
'absFolder':folder_to_save,
'fileSave':file_save,
'absFileSave':abs_file_save,
'firstIndex':self.first_index,
'secondIndex':self.second_index,
'indexFile':self.index_file,
'mdpFile':self.mdp_file,
'tprFile':self.tpr_file})
energy_mm = 'energy_MM_{0}.xvg'.format(i)
polar = 'polar_{0}.xvg'.format(i)
apolar = 'apolar_{0}.xvg'.format(i)
contrib_mm = 'contrib_MM_{0}.dat'.format(i)
contrib_pol = 'contrib_pol_{0}.dat'.format(i)
contrib_apol = 'contrib_apol_{0}.dat'.format(i)
temp_state[str(i)].update({'energyMM':energy_mm,
'polar':polar,
'apolar':apolar,
'contrib_MM':contrib_mm,
'contrib_pol':contrib_pol,
'contrib_apol':contrib_apol})
temp_traj.save(abs_file_save)
temp_state[str(i)].update({'fileSaved': True
})
simStates['simStates'].update(temp_state)
self.mdtraj_frames = select_frames
self.mdtraj_sliced = temp_mdtraj
self.mdtraj_parts = temp_mdtraj_indexes
self.file_save_list = file_save_list
self.abs_file_save_list = abs_file_save_list
self.simStates = simStates
test = 1
self.g_mmpbsa_prepared = True
self.state_data['energySoftware']['g_mmpbsa'].update({'frames': self.mdtraj_frames})
self.state_data['energySoftware']['g_mmpbsa'].update({'prepare': self.g_mmpbsa_prepared})
self.state_data['energySoftware']['g_mmpbsa'].update({'parts': self.mdtraj_parts})
self.state_data['energySoftware']['g_mmpbsa'].update({'fileList': self.file_save_list})
self.state_data['energySoftware']['g_mmpbsa'].update({'absFileList': self.abs_file_save_list})
self.state_data['energySoftware']['g_mmpbsa'].update(self.simStates)
self.state_data['energySoftware']['g_mmpbsa'].update({'firstIndex': self.first_index})
self.state_data['energySoftware']['g_mmpbsa'].update({'secondIndex': self.second_index})
self.state_data['energySoftware']['g_mmpbsa'].update({'indexFile': self.index_file})
self.state_data['energySoftware']['g_mmpbsa'].update({'mdpFile': self.mdp_file})
self.state_data['energySoftware']['g_mmpbsa'].update({'tprFile': self.tpr_file})
self.save_state_data_json()
test = 1
#self.g_mmpbsa_sim_states = self.state_data['energySoftware']['g_mmpbsa']['simStates']
#self.ledock_samples = self.state_data['energySoftware']['g_mmpbsa']['LeDockSample_list']
# Divide trajectory to number of free cores
# TODO article Pagadala Software for molecular docking: a review
# This will be for leDock
# if prep_g_mmpbsa is True:
# # self.prepare_uber_docker()
# self.prepare_ledock_settings()
# self.prep_LeDock_dock_run_commands()
@hlp.timeit
def prep_LeDock_dock_run_commands(self, num_samples=10):
'''
Prepare rdock run commands and save to json
:param num_samples: test value 6
:return:
'''
try:
self.g_mmpbsa_sim_states = self.state_data['dockSoftware']['LeDock']['simStates']
self.ledock_samples = self.state_data['dockSoftware']['LeDock']['LeDockSample_list']
print('No need to generate LeDock commands')
except:
self.state_data['dockSoftware']['LeDock'].update({'LeDockSample_list': self.ledock_samples})
self.state_data['dockSoftware']['LeDock'].update(self.LeDock_sim_states)
for sample_num in self.ledock_samples:
self.prep_LeDock_dock_command(sample_num)
print('Now continue for LeDock:D')
self.save_state_data_json()
test = 1
self.prep_LeDock_run = True
@hlp.timeit
def prep_LeDock_dock_command(self, sample_num, pose_gen=20):
'''
prepare each separate rDock run command
:param sample_num:
:param pose_gen: default generate 20 poses
:return:
'''
try:
if self.setup_ledock_pameters is not False:
# print("Running Vina")
# TODO need to think about seed
#./ ledock_linux_x86 dock. in
command_receptor = self.ledock_path + os.sep + 'ledock_linux_x86'
sample_data = self.ledock_input_info[str(sample_num)]
parm_name = sample_data['ledock_parm_name']
test = 1
self.save_run_name = "ledock_{0}_sample_{1}".format(self.run_type, sample_num)
random_seed = np.random.randint(low_seed, high_seed)
command_to_run = "{0} {1}".format(command_receptor, parm_name)
ligand_clear_dok = sample_data['ligand_clear_name'] + '.dok'
# -spli MOR_flexaid.dok
command_to_clean = "{0} -spli {1}".format(command_receptor, ligand_clear_dok)
print(command_to_run)
self.LeDock_command_run_list.append(command_to_run)
print("Launching new Sim")
temp_dict = {str(sample_num): {'save_run_name': self.save_run_name,
'commandRun': command_to_run,
'commandToClean':command_to_clean,
'dokFileName':ligand_clear_dok,
'runFinished': False}}
self.LeDock_sim_states.update(temp_dict)
self.state_data['dockSoftware']['LeDock']['simStates'].update(temp_dict)
# try:
# os.system(command_to_run)
# except KeyboardInterrupt:
# # quit
# sys.exit()
print("LeDock command generation finished")
else:
print('Please setup LeDock settings')
except Exception as e:
print("error in runSim: ", e)
sys.exit(0)
@hlp.timeit
def check_dask_jobs(self, submitted_jobs_dask, finished_jobs, finished_jobs_dict):
import copy
# modified_submitted_jobs_dask = copy.deepcopy(submitted_jobs_dask)
for i, job in enumerate(submitted_jobs_dask):
status = job.status
if status == 'finished':
test = 1
# pop_item = modified_submitted_jobs_dask.pop(i)
try:
if finished_jobs_dict[i] is True:
continue
except Exception as error:
pass
finished_jobs.append(job)
finished_jobs_dict.update({i: True})
results = job.result()
test = 1
try:
key = list(results.keys())[0]
prog = results[key]['Program'] # need [0] key
sample_num = results[key]['part_num']
if prog == 'g_mmpbsa':
sample_num = results[key]['part_num']
results_dask = results[key]['dask']
original_data = self.state_data['energySoftware'][prog]
abs_folder = self.g_mmpbsa_folder # original_data['AbsFolder']
out_name = abs_folder + os.sep + results_dask['out_filename']
out_mem = results_dask['out_mem']
out_file = open(out_name, 'w')
out_file.write(out_mem)
out_file.close()
out_name = abs_folder + os.sep + results_dask['apolar_filename']
out_mem = results_dask['apolar_mem']
out_file = open(out_name, 'w')
out_file.write(out_mem)
out_file.close()
out_name = abs_folder + os.sep + results_dask['polar_filename']
out_mem = results_dask['polar_mem']
out_file = open(out_name, 'w')
out_file.write(out_mem)
out_file.close()
out_name = abs_folder + os.sep + results_dask['energyMM_filename']
out_mem = results_dask['energyMM_mem']
out_file = open(out_name, 'w')
out_file.write(out_mem)
out_file.close()
out_name = abs_folder + os.sep + results_dask['contribMM_filename']
out_mem = results_dask['contribMM_mem']
out_file = open(out_name, 'w')
out_file.write(out_mem)
out_file.close()
out_name = abs_folder + os.sep + results_dask['contrib_apol_filename']
out_mem = results_dask['contrib_apol_mem']
out_file = open(out_name, 'w')
out_file.write(out_mem)
out_file.close()
out_name = abs_folder + os.sep + results_dask['contrib_pol_filename']
out_mem = results_dask['contrib_pol_mem']
out_file = open(out_name, 'w')
out_file.write(out_mem)
out_file.close()
# out_pdbqt_filename = out_pdbqt_name
# self.state_data['dockSoftware'][prog]['simStates'][str(sample_num )] = \
# results[key]
update_results = copy.deepcopy(results)
update_results[key].pop('dask', None)
# self.state_data['dockSoftware'][prog]['simStates'][str(sample_num )] = results[key]
# self.state_data['energySoftware'][prog]['simStates'][str(sample_num)] = update_results[key]
self.before_dask['energySoftware'][prog]['simStates'][str(sample_num)] = update_results[key]
# results_dask = results[key]['dask']
# else:
# self.state_data['dockSoftware'][prog]['simStates'][str(sample_num)] = results[key]
# if filename is None and filedata is None:
# # filename = self.json_state_file
# filename = self.absolute_json_state_file
# filedata = self.state_data
self.save_state_data_json(filedata=self.before_dask, filename=self.absolute_json_state_file)
# allow CPU to cool down
# self.hold_nSec(5)
print('This success ---> ', i)
except Exception as error:
print('error is ', error)
# print('i is ', i)
print('Finished checking dask submissions ---\n')
print('---' * 10)
return finished_jobs, finished_jobs_dict
# @hlp.timeit
def run_dask_gmmpbsa(self, client=None, max_jobs_to_run=10):
# from molmolpy.moldock import run_dask_tools
from molmolpy.tools import run_dask_tools
test = 1
curr_client = client
worker_status = run_dask_tools.get_dask_worker_status(curr_client)
get_worker_free = run_dask_tools.check_free_resources(worker_status)
import copy
original_get_worker_free = copy.deepcopy(get_worker_free)
# TEST IT WORKS
# queue_jobs = self.run_mmpbsa_dask
# job_test = queue_jobs[0]
#
# result = run_dask_tools.run_gmmpbsa_using_dask(job_test)
test = 1
# Local upload test
# big_future = self.dask_prep
# run_dask_tools.upload_g_mmpbsa_files_dask(big_future)
#TODO
# Scatter a lot better using scatter for big files for upload G_MMPBSA files
# test = 1
# tasks_upload = []
# big_future = client.scatter(self.dask_prep, broadcast=True)
# for worker in get_worker_free:
# worker_info = get_worker_free[worker]
# worker_address = worker_info['preped']['workerAddress']
#
# retries_num = 2
#
# # Upload files to all clients client.upload_file
# task = client.submit(run_dask_tools.upload_g_mmpbsa_files_dask,
# big_future,
# workers=[worker_address],
# key='key_scatter_{0}'.format(worker_address),
# retries=retries_num)
# tasks_upload.append(task)
# print("Starting uploading to ", worker_address)
test = 1
# TODO
# This part runs the main program
submitted_jobs = []
submitted_jobs_dask = []
queue_jobs = self.run_mmpbsa_dask
job_quantity = len(queue_jobs)
finished_jobs = []
finished_jobs_dict = {}
worker_status_free = None
test = 1
# maybe 2 async threads, one checks finished simulations, other submits jobs
###############################################################################################
gmmbpsa_min_mem = 1000
retries_num = 2
curr_index = 0
curr_worker = 0
# prepare worker ids for easier switch
worker_ids = {}
for i, id in enumerate(get_worker_free):
worker_ids.update({i: id})
custom_index_curr = 3
while len(queue_jobs) > 0:
if curr_index == len(queue_jobs):
curr_index = 0
if curr_worker == len(worker_ids):
curr_worker = 0
print('-----------------------------------------------------------------')
worker_status_temp = run_dask_tools.get_dask_worker_status(curr_client, custom_index=custom_index_curr)
get_worker_free_temp = run_dask_tools.check_free_resources(worker_status_temp)
custom_index_curr += 2
print('----------------TEST------------------')
curr_item = queue_jobs[curr_index]
test = 1
curr_worker_id = worker_ids[curr_worker]
workstation_info_temp = get_worker_free_temp[curr_worker_id]
workstation_preped_temp = workstation_info_temp['preped']
workstation_address = workstation_preped_temp['workerAddress']
# This way folder is buggy
workstation_dir = original_get_worker_free[curr_worker_id]['preped']['workerDir']
workstation_freemem = workstation_preped_temp['freeMemory']
workstation_freecpu = workstation_preped_temp['freeCores']
curr_item_prog = curr_item['Program']
############################################################
# submitted_jobs_dask len less than 16
jobs_running = len(submitted_jobs_dask) - len(finished_jobs)
max_jobus = max_jobs_to_run
# g_mmpbsa part
if curr_item_prog == 'g_mmpbsa':
if workstation_freemem > gmmbpsa_min_mem and jobs_running <max_jobus:
print('Submit MMPBSA job to DASK')
pop_item = queue_jobs.pop(curr_index)
key_name = pop_item['save_run_name']
run_name = 'key_{0}_{1}'.format(key_name, curr_worker_id)
print('Cur run ', run_name)
if curr_index == 0:
curr_index = 0
else:
curr_index -= 1
pop_item.update({'workingDir':workstation_dir})
submitted_jobs.append(pop_item)
# MAYBE CHECK FOLDER HERE
#
#big_future = client.scatter(pop_item, workers=[workstation_address], hash=False)
big_future = pop_item
task_g_mmpbsa = client.submit(run_dask_tools.run_gmmpbsa_using_dask,
big_future,
workers=[workstation_address],
key=run_name,
retries=retries_num)
submitted_jobs_dask.append(task_g_mmpbsa)
else:
key_name = curr_item['save_run_name']
run_name = 'key_{0}_{1}'.format(key_name, curr_worker_id)
print('Passed running ', run_name)
# submitted_jobs_dask_temp, finished_jobs_temp = self.check_dask_jobs(submitted_jobs_dask,finished_jobs)
finished_jobs, finished_jobs_dict = self.check_dask_jobs(submitted_jobs_dask, finished_jobs,
finished_jobs_dict)
test = 1
###################################################3
# update index
# print(curr_item)
# How to save submitted jobs state
print('-------')
if curr_index == 0 and len(submitted_jobs_dask) == 1:
curr_index = 0
else:
curr_index += 1
curr_worker += 1
time.sleep(10)
test = 1
# ###############################################################################################
#
# # work_address = workstation1_preped['workerAddress']
# #
# # # This is to run on dask server
# #
# # # TODO this works need to create a quiiee
# # retries_num = 2
# # task = client.submit(run_dask_tools.run_vina_using_dask,
# # data,
# # workers=[work_address],
# # key='key_test',
# # retries=retries_num)
#
# # TODO This part needs further refinement
#
# # break
#
# test = 1
#
print('Last Check of submitted jobs')
while len(finished_jobs) != job_quantity:
finished_jobs, finished_jobs_dict = self.check_dask_jobs(submitted_jobs_dask, finished_jobs,
finished_jobs_dict)
time.sleep(60)
print('->' * 10)
print('Everything is finished :))))))')
print('---' * 10)
print('\n')
def prepare_for_dask_cluster(self, LeDock=2, rDock=2, FlexAid=2, Vina=2, parallel=False):
'''
run uber dock protocol for LeDock, rDock,FlexAid, Vina
:return:
'''
current_pid = multiprocessing.current_process().pid
print("Main Process with PID:{}".format(current_pid))
# free_threads_for_Vina = num_threads - LeDock-rDock-FlexAid
run_g_mmpbsa = []
run_mmpbsa_queue = []
# Prepare outputs
import copy
self.before_dask = copy.deepcopy(self.state_data)
################################################################################
if self.g_mmpbsa_prepared is True:
full_g_mmpbsa_data = self.state_data['energySoftware']['g_mmpbsa']
test = 1
tpr_abs= full_g_mmpbsa_data['tprFile']
tpr_file = open(tpr_abs, 'rb')
tpr_mem = tpr_file.read()
tpr_filename = tpr_abs.split(os.sep)[-1]
#
mdp_abs= full_g_mmpbsa_data['mdpFile']
mdp_file = open(mdp_abs, 'r')
mdp_mem = mdp_file.read()
mdp_filename = mdp_abs.split(os.sep)[-1]
index_abs= full_g_mmpbsa_data['indexFile']
index_file = open(index_abs, 'r')
index_mem = index_file.read()
index_filename = index_abs.split(os.sep)[-1]
# data_pre = self.state_data['energySoftware']['g_mmpbsa']
# data_pre.update({'dask': {}})
data_pre = {}
data_pre.update({'tprName':tpr_filename, 'tprMem':tpr_mem})
data_pre.update({'mdpName':mdp_filename, 'mdpMem':mdp_mem})
data_pre.update({'indexName':index_filename, 'indexMem':index_mem})
self.dask_prep = data_pre
for part_num in full_g_mmpbsa_data['parts']:
# self.run_FlexAid_sim(FlexAid_sample_num, waitTime=waitTime)
data = self.state_data['energySoftware']['g_mmpbsa']['simStates'][str(part_num)]
save_run_name = "g_mmpbsa_part_{0}".format(part_num)
data.update({'Program': 'g_mmpbsa'})
data.update({'part_num': part_num})
data.update({'save_run_name': save_run_name})
data.update({'dask': {}})
traj_abs = data['absFileSave']
traj_file = open(traj_abs, 'rb')
traj_mem = traj_file.read()
traj_filename = data['fileSave']
data['dask'].update({'tprName': tpr_filename})
data['dask'].update({'mdpName': mdp_filename})
data['dask'].update({'indexName': index_filename})
data['dask'].update({'trajMem':traj_mem, 'trajName':traj_filename})
data['dask'].update({'tprName': tpr_filename, 'tprMem': tpr_mem})
data['dask'].update({'mdpName': mdp_filename, 'mdpMem': mdp_mem})
data['dask'].update({'indexName': index_filename, 'indexMem': index_mem})
test = 1
# data['dask'].update({'cavFile':cav_file_mem })
# self.state_data['dockSoftware']['LeDock']['simStates'][str(LeDock_sample_num)] = data
test = 1
run_g_mmpbsa.append(data)
# # result = run_dock_tools.run_LeDock_sim_parallel(LeDock_sample_num, data)
# # test = 1
#
# test = 1
###################################################################################################
test = 1
####################################################################################################
self.run_mmpbsa_dask = run_g_mmpbsa
curr_LeDock = 0
# very slow
# while len(run_docking_queue) != 40:
# run_docking_queue += run_docking_LeDock[curr_LeDock:curr_LeDock + LeDock]
# curr_LeDock += LeDock
#
# test = 1
# run_docking_queue += run_docking_rDock[curr_rDock:curr_rDock + rDock]
# curr_rDock += rDock
#
# run_docking_queue += run_docking_FlexAid[curr_FlexAid:curr_FlexAid + FlexAid]
#
# curr_FlexAid += FlexAid
#
# run_docking_queue += run_docking_Vina[curr_Vina:curr_Vina + Vina]
# curr_Vina += Vina
#
# test = 1
#
test = 1
run_mmpbsa_queue = run_g_mmpbsa
# run_docking_queue = run_docking_LeDock + run_docking_FlexAid + run_docking_Vina
final_queue_job = []
# Need to select those that are not finished
for pre_job in run_mmpbsa_queue:
# print(pre_job)
if pre_job['runFinished'] is False:
final_queue_job.append(pre_job)
test = 1
self.run_mmpbsa_dask = final_queue_job
# random.shuffle(self.run_docking_queue)
print('Finished preparing g_mmpbsa jobs')
# TODO should I add json saving of information or not?
def load_state_data_json(self, filename):
'''
:param filename: load json state data
:return:
'''
# self.absolute_path = os.path.abspath(filename)
self.load_state_called = True
print(os.path.abspath(__file__))
self.state_data = json.load(open(filename, "r"))
# os.chdir('HSL_exhaustiveness')
self.trajectory_file = self.state_data['trajectoryFile']
self.mdp_file = self.state_data['mdpFile']
self.tpr_file = self.state_data['tprFile']
self.index_file = self.state_data['indexFile']
self.folder_path = self.state_data['folderPath']
self.run_type = self.state_data['runType']
self.molecule_name = self.state_data['molName']
self.receptor_name = self.state_data['receptorName']
# TODO test
self.sim_folder_run = self.state_data['simRunFolder'] # .split('/')[-1]
self.directories = self.state_data['directory']
self.folder_exists = self.state_data['folderCreated']
self.absolute_json_state_file = self.state_data['absoluteJsonStates']
self.g_mmpbsa_folder = self.state_data['RunFolder']
self.json_state_file = self.state_data['jsonStates']
test = 1
# self.rdock_folder_name = self.receptor_name + '_' + self.molecule_name + '_' + 'rDock'
# self.rdock_absolute_folder_name = self.uber_dock_folder + os.sep + self.rdock_folder_name
# self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type)
# self.directories = folder_utils.find_folder_in_path(self.uber_dock_folder, self.rdock_folder_name)
# print('TADA ', self.directories)
test = 1
# This will hold information about run states
# self.uber_dock_folder = self.get_uber_dock_run_folder_name()
########################################################################################
# # LeDock settings part
#
# self.ledock_data = self.state_data['dockSoftware']['LeDock']
# test = 1
#
# # Try to load initial LeDock
try:
self.mdtraj_frames = self.state_data['energySoftware']['g_mmpbsa']['frames']
self.mdtraj_parts = self.state_data['energySoftware']['g_mmpbsa']['parts']
self.file_save_list = self.state_data['energySoftware']['g_mmpbsa']['fileList']
self.abs_file_save_list = self.state_data['energySoftware']['g_mmpbsa']['absFileList']
self.simStates = self.state_data['energySoftware']['g_mmpbsa']['simStates']
test = 1
self.g_mmpbsa_prepared = self.state_data['energySoftware']['g_mmpbsa']['prepare']
# self.state_data['energySoftware']['g_mmpbsa'].update({'frames': self.mdtraj_frames})
# self.state_data['energySoftware']['g_mmpbsa'].update({'prepare': self.g_mmpbsa_prepared})
# self.state_data['energySoftware']['g_mmpbsa'].update({'parts': self.mdtraj_parts})
# self.state_data['energySoftware']['g_mmpbsa'].update({'fileList': self.file_save_list})
# self.state_data['energySoftware']['g_mmpbsa'].update({'absFileList': self.abs_file_save_list})
# self.state_data['energySoftware']['g_mmpbsa'].update(self.simStates)
except:
print('G_mmpbsa is empty verify yolo')
#
# test = 1
#
# try:
# self.setup_ledock_pameters = self.ledock_data['setup_LeDock']
# self.ledock_num_samples = self.ledock_data['num_samples']
# self.ledock_input_info = self.ledock_data['LeDockInputInfo']
# self.param_ledock_template = self.ledock_data['paramFull']
# except:
# print('LeDock setting part is empty verify yolo')
#
# try:
# self.ledock_param_title = self.ledock_data['LeDock_params']['title']
# self.rdock_title = self.ledock_data['LeDock_params']['title']
#
# self.receptor_file_ledock = self.ledock_data['LeDock_params']['receptorFile']
# self.ledock_rmsd = self.ledock_data['LeDock_params']['LeDockRMSD']
#
# self.ledock_xmin = self.ledock_data['LeDock_params']['xmin']
# self.ledock_xmax = self.ledock_data['LeDock_params']['xmax']
# self.ledock_ymin = self.ledock_data['LeDock_params']['ymin']
# self.ledock_ymax = self.ledock_data['LeDock_params']['ymax']
# self.ledock_zmin = self.ledock_data['LeDock_params']['zmin']
# self.ledock_zmax = self.ledock_data['LeDock_params']['zmax']
#
# except:
# print('LeDock_params is empty verify yolo')
#
# try:
# self.LeDock_sim_states = self.state_data['dockSoftware']['LeDock']['simStates']
# self.ledock_samples = self.state_data['dockSoftware']['LeDock']['LeDockSample_list']
# print('No need to generate LeDock commands')
# self.prep_LeDock_run = True
# except:
# print('LeDock_params simStates is empty verify yolo')
#
# test = 1
def prepare_g_mmpbsa(self):
'''
Prepare g_mmpbsa run folder and initial json configuration
:return:
'''
self.run_folder_name = self.receptor_name + '_' + self.molecule_name + '_' + self.run_type
self.sim_folder_run = self.folder_path + os.sep + self.run_folder_name
# Create folder don't forget
# self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type)
self.directories = folder_utils.find_folder_in_path(self.folder_path, self.run_folder_name)
print('TADA ', self.directories)
self.json_state_file = self.sim_folder_run + os.sep + self.receptor_name + '_' + self.molecule_name + '_' + self.run_type + '.json'
# This will hold information about run states
self.g_mmpbsa_folder = self.get_uber_g_mmpbsa_run_folder_name()
self.absolute_json_state_file = self.g_mmpbsa_folder + os.sep + self.receptor_name + '_' + self.molecule_name + '_' + self.run_type + '.json'
if len(self.directories) == 0:
print('Creating folder for g_mmpbsa run\n')
print(self.sim_folder_run)
folder_utils.create_folder(self.sim_folder_run)
self.folder_exists = True
programs_dict = {'energySoftware': {'g_mmpbsa': {}}}
self.state_data.update({'trajectoryFile': self.trajectory_file,
'mdpFile': self.mdp_file,
'tprFile': self.tpr_file,
'indexFile': self.index_file,
'runFolderName': self.run_folder_name,
'folderPath': self.folder_path,
'jsonStates': self.json_state_file,
'runType': self.run_type,
'molName': self.molecule_name,
'receptorName': self.receptor_name,
'simRunFolder': self.sim_folder_run,
'RunFolder': self.g_mmpbsa_folder,
'absoluteJsonStates': self.absolute_json_state_file,
'directory': self.directories,
'folderCreated': self.folder_exists,
'simStates': {}})
self.state_data.update(programs_dict)
# self.prepVinaSim_exhaust()
self.save_state_data_json()
self.load_state_called = False
else:
self.load_state_file = self.json_state_file
self.load_state_called = True
self.load_state_data_json(self.load_state_file)
def prepare_ledock_settings(self):
'''
Prepare ultraDock folder and initial json configuration
>>> EPI_uber_dock.prepare_rdock_settings()
Convert with pybel to mol2 for receptor and sd for ligand
:return:
'''
# self.output_receptor_rdock = Outputfile("mol2", "{0}.mol2".format(self.receptor_name))
# self.output_receptor_rdock.write(self.receptor_pybel)
# self.output_receptor_rdock.close()
#
# self.output_ligand_rdock = Outputfile("sd", "{0}.sd".format(self.ligand_name))
# self.output_ligand_rdock.write(self.ligand_pybel )
# self.output_ligand_rdock.close()
self.ledock_folder_name = self.receptor_name + '_' + self.molecule_name + '_' + 'LeDock'
self.ledock_absolute_folder_name = self.uber_dock_folder + os.sep + self.ledock_folder_name
test = 1
# self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type)
self.ledock_directories = folder_utils.find_folder_in_path(self.uber_dock_folder, self.ledock_folder_name)
print('TADA ', self.ledock_directories)
test = 1
# This will hold information about run states
# self.uber_dock_folder = self.get_uber_dock_run_folder_name()
if len(self.ledock_directories) == 0:
print('Creating rdock folder in uberDocker folder \n')
print(self.ledock_directories)
folder_utils.create_folder(self.ledock_absolute_folder_name)
test = 1
self.receptor_ledock_pdb = "{0}.pdb".format(self.receptor_name)
self.ligand_ledock_mol2 = "{0}.mol2".format(self.ligand_name)
self.absolute_receptor_ledock_pdb = self.ledock_absolute_folder_name + os.sep + self.receptor_ledock_pdb
self.absolute_ligand_ledock_mol2 = self.ledock_absolute_folder_name + os.sep + self.ligand_ledock_mol2
self.receptor_pybel.write("pdb", self.absolute_receptor_ledock_pdb, overwrite=True)
self.ligand_pybel.write("mol2", self.absolute_ligand_ledock_mol2, overwrite=True)
self.ledock_folder_exists = True
test = 1
# TODO enter ledock folder and process structure for docking using lepro
# ./lepro_linux_x86 LasR_flexaid.pdb
os.chdir(self.ledock_absolute_folder_name)
command_receptor = self.ledock_path + os.sep + 'lepro_linux_x86' + ' {0} '.format(self.receptor_ledock_pdb)
os.system(command_receptor)
self.lepro_pdb_file = 'pro.pdb'
# Need to check whteter lepro ran fine
print('Updated receptor with LePro\n')
os.chdir(self.uber_dock_folder)
self.state_data['dockSoftware']['LeDock'].update(
{'receptor_pdb': self.receptor_ledock_pdb,
'ligand_mol2': self.ligand_ledock_mol2,
'lepro_pdb': self.lepro_pdb_file,
'lepro_abs_pdb': self.ledock_absolute_folder_name + os.sep + self.lepro_pdb_file,
'abs_receptor_pdb': self.absolute_receptor_ledock_pdb,
'abs_ligand_mol2': self.absolute_ligand_ledock_mol2,
'LeDockFolderStatus': self.ledock_folder_exists,
'LeDockAbsFolder': self.ledock_absolute_folder_name,
'LeDockFolderName': self.ledock_folder_name})
self.save_state_data_json()
self.load_state_called = False
self.ledock_title = self.receptor_name + '_' + self.ligand_name + '_LeDock Parameter file'
self.ledock_rmsd = 0.5
self.set_up_ledock_dock_blind_parameters(title=self.ledock_title,
receptor_file=self.lepro_pdb_file,
ledock_rmsd=self.ledock_rmsd,
x_center=self.x_center,
y_center=self.y_center,
z_center=self.z_center)
else:
print('state has beeen loaded \n')
##############################################################################
def flexaid_generate_ga_dat_parameters(self):
'''
Generate GA dat parameters for flexaid docking
:return:
'''
self.flexaid_ga_dat_param_template = '''# Number of chromosomes (number individuals in the population)
# Integer in interval [1-N]
NUMCHROM 500
# Number of generations
# Integer in interval [1-N]
NUMGENER 500
# Use Adaptive Genetic-Algorithm
# Value of 0 or 1
ADAPTVGA 1
# Adaptive crossover and mutation probabilities
# Floats in interval [0.0,1.0]
ADAPTKCO 0.95 0.10 0.95 0.10
# Constant crossover probability
# Float in interval [0.0,1.0]
# Only considered when ADAPTVGA is 0
CROSRATE 0.90
# Constant mutation probability
# Float in interval [0.0,1.0]
# Only considered when ADAPTVGA is 0
MUTARATE 0.10
# Crossover operator
# Intragenic crossovers are possible
INTRAGEN
# Specifies that the initial population is generated randomly
POPINIMT RANDOM
# Fitness function
# Value in [LINEAR,PSHARE]
FITMODEL PSHARE
# Parameters of the shared fitness function
# Floats in interval [0.0,1000.0]
SHAREALF 4.0
SHAREPEK 5.0
SHARESCL 10.0
# Reproduction model
# Values in [BOOM,STEADY]
REPMODEL BOOM
# Fraction of population to create
# Only considered when REPMODEL is BOOM
BOOMFRAC 1.0
# Number of new individuals to generate at each generation
# Only considered when REPMODEL is STEADY
# Integer in interval [1,N-1] where N is NUMCHROM
STEADNUM 950
# Number of TOP individuals to print in console
# Integer in interval [1,N] where N is NUMCHROM
PRINTCHR 10
'''
self.generate_ga_dat_pameters = True
self.generate_ga_dat = 'ga_inp_' + self.receptor_name + '-' + self.ligand_name + '.dat'
self.generate_ga_dat_name_abs = self.flexaid_absolute_input_folder + os.sep + self.generate_ga_dat
self.generate_ga_dat_object_file = open(self.generate_ga_dat_name_abs, 'w')
self.generate_ga_dat_object_file.write(self.flexaid_ga_dat_param_template)
self.generate_ga_dat_object_file.close()
self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}})
self.state_data['dockSoftware']['FlexAid']['GA_params'].update(
{'generateGA_param': self.generate_ga_dat_pameters,
'GA_DataName': self.generate_ga_dat,
'GA_DATA_Abs': self.generate_ga_dat_name_abs,
'GA_ParamFull': self.flexaid_ga_dat_param_template})
# self.state_data_samples = self.state_data.copy()
self.save_state_data_json()
# TODO this part needs to be thought out
####################################################################################################################
def flexaid_generate_ga_dat_parameters_dask(self):
'''
Generate GA dat parameters for flexaid docking
:return:
'''
self.flexaid_ga_dat_param_template = '''# Number of chromosomes (number individuals in the population)
# Integer in interval [1-N]
NUMCHROM 500
# Number of generations
# Integer in interval [1-N]
NUMGENER 500
# Use Adaptive Genetic-Algorithm
# Value of 0 or 1
ADAPTVGA 1
# Adaptive crossover and mutation probabilities
# Floats in interval [0.0,1.0]
ADAPTKCO 0.95 0.10 0.95 0.10
# Constant crossover probability
# Float in interval [0.0,1.0]
# Only considered when ADAPTVGA is 0
CROSRATE 0.90
# Constant mutation probability
# Float in interval [0.0,1.0]
# Only considered when ADAPTVGA is 0
MUTARATE 0.10
# Crossover operator
# Intragenic crossovers are possible
INTRAGEN
# Specifies that the initial population is generated randomly
POPINIMT RANDOM
# Fitness function
# Value in [LINEAR,PSHARE]
FITMODEL PSHARE
# Parameters of the shared fitness function
# Floats in interval [0.0,1000.0]
SHAREALF 4.0
SHAREPEK 5.0
SHARESCL 10.0
# Reproduction model
# Values in [BOOM,STEADY]
REPMODEL BOOM
# Fraction of population to create
# Only considered when REPMODEL is BOOM
BOOMFRAC 1.0
# Number of new individuals to generate at each generation
# Only considered when REPMODEL is STEADY
# Integer in interval [1,N-1] where N is NUMCHROM
STEADNUM 950
# Number of TOP individuals to print in console
# Integer in interval [1,N] where N is NUMCHROM
PRINTCHR 10
'''
generate_ga_dat = 'ga_inp_' + self.receptor_name + '-' + self.ligand_name + '.dat'
generate_ga_dat_name_abs = self.flexaid_absolute_input_folder + os.sep + self.generate_ga_dat
return [generate_ga_dat, ]
# self.generate_ga_dat_object_file = open(self.generate_ga_dat_name_abs, 'w')
# self.generate_ga_dat_object_file.write(self.flexaid_ga_dat_param_template)
# self.generate_ga_dat_object_file.close()
#
# self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}})
#
# self.state_data['dockSoftware']['FlexAid']['GA_params'].update(
# {'generateGA_param': self.generate_ga_dat_pameters,
# 'GA_DataName': self.generate_ga_dat,
# 'GA_DATA_Abs': self.generate_ga_dat_name_abs,
# 'GA_ParamFull': self.flexaid_ga_dat_param_template})
# self.state_data_samples = self.state_data.copy()
##############################################################################################
def flexaid_generate_config_input_dask(self):
'''
Generate flexaid config input file
Flexaid is very strict about spaces
:return:
'''
flexaid_config_input_template = '''# Optimization method (genetic-algorithms)
METOPT GA
# The variation in degrees for the anchor angle of the ligand
# Float in interval [1.0-30.0]
VARANG 5.0
# The variation in degrees for the anchor dihedral of the ligand
# Float in interval [1.0-30.0]
VARDIH 5.0
# The variation in degrees for flexible dihedrals of the ligand
# Float in interval [1.0-30.0]
VARFLX 10.0
# Use Vcontacts in the calculations of surfaces in contact
COMPLF VCT
# Do not consider intramolecular interactions
NOINTR
# Side-chain rotamer acceptance threshold
# Float in interval [0.0-1.0]
DEECLA 0.8
# Use instances of side-chain conformers rather than using the Penultimate Rotamer Library
#ROTOBS
# Defines the grid spacing of the binding-site
# Float in interval [0.1,1.0]
SPACER 0.375
# Exclude hetero groups in the target (water,metal,modified amino acids,cofactors,ligands)
# To exclude these groups, uncomment the next line
#EXCHET
# Include water molecules in the target (always removed by default)
# Only considered if EXCHET is disabled
# To include water molecules, uncomment the next line
#INCHOH
# Permeability allowed between atoms
# Float in interval [0.0,1.0] from fully permeable to no permeability
PERMEA 0.9
# Permeability for side-chain rotamer acceptance
# Float in interval [0.0,1.0] from fully permeable to no permeability
ROTPER 0.8
# Solvent term penalty
# When the value is 0.0 the solvent interactions are derived from the interaction matrix
# Float in interval [-200.0,200.0]
SLVPEN 0.0
# Use Vcontacts indexing
VINDEX
# Vcontacts plane definition
# Value in [B,R,X] for Bissecting, Radical and Extended radical plane
# See McConkey et al. (2002) Bioinformatics. 18(10); 1365-1373
VCTPLA R
# Use normalized surfaces in contacts
NORMAR
# Define the RMSD cutoff between clusters
# Float in interval [0.5,3.0]
CLRMSD 2.0
# Number of results/docking poses to output
MAXRES 20
# Only output scored atoms in the final results
# Comment the next line if you wish to obtain the whole complex
SCOOUT
# Only calculate the CF for ligand atoms despite including flexible side-chains
#SCOLIG
# Ends reading of CONFIG file
ENDINP
'''
final_str = ''''''
# Specify the processed target file to use
pdbnam = 'PDBNAM ' + '{0}\n\n'.format(
self.receptor_flexaid_mol2)
# Specify the processed ligand file to use
# BTN.inp has the unique RESNUMC identifier LIG9999A
inplig = 'INPLIG ' + '{0}.inp\n\n'.format(
self.ligand_flexaid_initials)
# Specify to use one or multiple cleft(s) as binding-site
rgnopt_locclf = 'RNGOPT LOCCLF ' + 'global_binding_site.pdb\n\n'
# Specify the degrees of freedom (DOF) of the processed ligand with residue number 9999 and chain A
# Translational DOF of the ligand (-1)
optimz1 = 'OPTIMZ 9999 {0} -1\n\n'.format(self.flexaid_res_chain)
# Rotational DOF of the ligand (0)
optimz2 = 'OPTIMZ 9999 {0} 0\n\n'.format(self.flexaid_res_chain)
# Add one extra line for each flexible bond of the ligand
# The allowable flexible bonds are listed as FLEDIH lines in Processed_files/BTN.inp
# In our example, Biotin has 5 flexible bonds
flexible_bonds_data = open(
self.flexaid_absolute_processed_files_folder + os.sep + '{0}.inp'.format(self.ligand_flexaid_initials), 'r')
flexible_bonds_data_text = flexible_bonds_data.read()
flexible_bonds_data.close()
flexible_bonds_data_text_list = flexible_bonds_data_text.split('\n')
flexible_index_list_phrases = []
flexible_index_list = []
for i in flexible_bonds_data_text_list:
if 'FLEDIH' in i:
print(i)
temp = i.split(' ')
print(temp)
flex_index = temp[-2]
flexible_index_list.append(int(flex_index))
temp_line = 'OPTIMZ {0} {1} {2}\n'.format(self.flexaid_res_number, self.flexaid_res_chain, flex_index)
flexible_index_list_phrases.append(temp_line)
test = 1
final_str += pdbnam
final_str += inplig
final_str += rgnopt_locclf
final_str += optimz1
final_str += optimz2
for y in flexible_index_list_phrases:
final_str += y
final_str += '\n'
rmsdst = 'RMSDST ' + '{0}_ref.pdb\n\n'.format(
self.ligand_flexaid_initials)
final_str += rmsdst
final_str += flexaid_config_input_template
generate_config_input_file = 'CONFIG_' + self.receptor_name + '-' + self.ligand_name + '.inp'
return generate_config_input_file, final_str
# self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}})
#
# self.state_data['dockSoftware']['FlexAid']['GA_params'].update(
# {'generateGA_param': self.generate_ga_dat_pameters,
# 'GA_DataName': self.generate_ga_dat,
# 'GA_DATA_Abs': self.generate_ga_dat_name_abs,
# 'GA_ParamFull': self.flexaid_ga_dat_param_template})
#
# # self.state_data_samples = self.state_data.copy()
#
# self.save_state_data_json()
# TODO this part needs to be thought out
####################################################################################################################
def prepare_samples_collection_run(self, standard_exhaust=128,
num_samples_run=100,
run_type='samples_run'):
if self.setup_box is False:
print('Please setup simulation box')
sys.exit(0)
self.run_type_samples = run_type
self.prep_samples_run = True
self.samples_exhaust = standard_exhaust
self.samples_run = list(range(1, num_samples_run + 1))
self.run_folder_name_samples = self.receptor_name + '_' + self.molecule_name + '_' + self.run_type_samples
self.sim_folder_run_samples = self.folder_path + os.sep + self.run_folder_name_samples
# Create folder don't forget
# Exhaustiveness for all samples
# self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type)
self.directories_samples = folder_utils.find_folder_in_path(self.folder_path, self.run_folder_name_samples)
print('TADA ', self.directories_samples)
self.json_samples_state_file = self.sim_folder_run_samples + os.sep + self.receptor_name + '_' + self.molecule_name + '_' + self.run_type_samples + '.json'
# This will hold information about run states
if len(self.directories_samples) == 0:
print('Creating folder for vina samples run\n')
print('Vina run type: {0}'.format(self.run_type_samples))
print(self.sim_folder_run_samples)
folder_utils.create_folder(self.sim_folder_run_samples)
self.folder_exists_samples = True
self.state_data_samples.update({'receptorFile': self.receptor_file,
'ligandFile': self.ligand_file,
'exhaustivenessList': self.exhaustiveness,
'samples_exhaust': self.samples_exhaust,
'samplesList': self.samples_run,
'folderPath': self.folder_path,
'runType': self.run_type_samples,
'molName': self.molecule_name,
'receptorName': self.receptor_name,
'simRunFolder': self.sim_folder_run_samples,
'directory': self.directories_samples,
'setup': self.setup_box,
'folderCreated': self.folder_exists_samples,
'simStates': {}})
self.prepVinaSim_samples()
self.save_state_data_json(filedata=self.state_data_samples, filename=self.json_samples_state_file)
self.load_state_called_samples = False
self.prep_sample_run = True
else:
self.load_state_file_samples = self.json_samples_state_file
self.load_state_called_samples = True
self.load_samples_state_data_json(self.load_state_file_samples)
self.prep_sample_run = True
def get_exhaust_run_folder_name(self):
curr_folder = os.getcwd()
return curr_folder + os.sep + self.run_folder_name
def get_samples_run_folder_name(self):
curr_folder = os.getcwd()
print("Yippie yi kay", curr_folder)
return curr_folder + os.sep + self.run_folder_name_samples
def save_state_data_json(self, filedata=None, filename=None):
'''
:param filename: Saves state file
:return:
'''
# import json
# with open(filename, 'w') as outfile:
# json.dump(self.cluster_models, outfile)
# pickle.dump(self.cluster_models, open(filename, "wb"))
# TODO create folder for run saving state run
# filename = self.sim_folder_run + os.sep + self.receptor_name + '_' + self.molecule_name + '.json'
if filename is None and filedata is None:
# filename = self.json_state_file
filename = self.absolute_json_state_file
filedata = self.state_data
# elif filedata is not None:
# filedata = filedata
# filename = self.absolute_json_state_file
else:
filedata = filedata
filename = filename
json.dump(filedata, open(filename, "w"), sort_keys=True, indent=4)
# TODO should I add json saving of information or not?
def load_samples_state_data_json(self, filename):
'''
:param filename: load json state data
:return:
'''
# self.absolute_path = os.path.abspath(filename)
self.load_state_called_samples = True
print(os.path.abspath(__file__))
self.state_data_samples = json.load(open(filename, "r"))
# os.chdir('HSL_exhaustiveness')
self.receptor_file = self.state_data_samples['receptorFile']
self.ligand_file = self.state_data_samples['ligandFile']
self.exhaustiveness = self.state_data_samples['exhaustivenessList']
self.samples_run = self.state_data_samples['samplesList']
self.folder_path = self.state_data_samples['folderPath']
self.run_type = self.state_data_samples['runType']
self.molecule_name = self.state_data_samples['molName']
self.receptor_name = self.state_data_samples['receptorName']
# TODO test
self.samples_exhaust = self.state_data_samples['samples_exhaust']
self.sim_folder_run_samples = self.state_data_samples['simRunFolder'] # .split('/')[-1]
self.directories_samples = self.state_data_samples['directory']
self.setup_box = self.state_data_samples['setup']
self.folder_exists = self.state_data_samples['folderCreated']
self.x_center = self.state_data_samples['boxSettings']['center_x']
self.y_center = self.state_data_samples['boxSettings']['center_y']
self.z_center = self.state_data_samples['boxSettings']['center_z']
self.x_size = self.state_data_samples['boxSettings']['size_x']
self.y_size = self.state_data_samples['boxSettings']['size_y']
self.z_size = self.state_data_samples['boxSettings']['size_z']
self.num_modes = self.state_data_samples['boxSettings']['numModes']
def hold_nSec(self, n):
for i in range(1, n + 1):
print(i)
time.sleep(1) # Delay for 1 sec
print('Ok %s secs have pass' % (n))
@hlp.timeit
def prepVinaSampleCommand(self, sample_num):
# try:
if self.setup_box is not False:
# print("Running Vina")
# TODO need to think about seed
self.save_run_name = 'vina_' + self.run_type_samples + '_' + str(sample_num)
command_to_run = "vina --receptor {0} " \
"--ligand {1} " \
"--center_x {2} " \
"--center_y {3} " \
"--center_z {4} " \
"--size_x {5} " \
"--size_y {6} " \
"--size_z {7} " \
"--exhaustiveness {8} " \
"--num_modes {9} " \
"--seed 10 " \
"--log {10}.txt " \
"--out {11}_out.pdbqt".format(self.receptor_file,
self.ligand_file,
self.x_center,
self.y_center,
self.z_center,
self.x_size,
self.y_size,
self.z_size,
self.samples_exhaust,
self.num_modes,
self.save_run_name,
self.save_run_name)
print(command_to_run)
self.command_samples_run_list.append(command_to_run)
print("Launching new Sim")
self.state_data_samples['simStates'].update({str(sample_num): {'save_run_name': self.save_run_name,
'commandRun': command_to_run,
'runFinished': False}})
# try:
# os.system(command_to_run)
# except KeyboardInterrupt:
# # quit
# sys.exit()
print("Vina sample run command prep finished")
else:
print('Please setup vina box settings')
# except Exception as e:
# print("error in Sample runSim: ", e)
# sys.exit(0)
def get_molecule_name(self):
return self.molecule_name
def get_receptor_name(self):
return self.receptor_name
def set_molecule_name(self, mol_name):
self.molecule_name = mol_name
def set_receptor_name(self, receptor_name):
self.receptor_name = receptor_name
# This might need to get modified
def find_sample_files(self, folder):
try:
VIP = []
for dirname, dirnames, filenames in os.walk(folder):
for i in filenames:
# print i
if 'out' in i:
VIP.append(i)
# This is not necessary since info is inside pdbqt file
# elif 'vina_sample_' in i:
# VIP.append(i)
return VIP
except Exception as e:
print("error in find_files: ", e)
sys.exit(0)
def find_sample_folders(self, folder_path='.', dir_name='vina_sample'):
try:
dir_names = []
for dirname, dirnames, filenames in os.walk(folder_path):
# print(dirname, '-')
if dir_name in dirname: #
# print(dir_name)
dir_names.append(dirname)
# print sorted(dir_names)
return sorted(dir_names)
except Exception as e:
print("Problem with finding folders : ", e)
sys.exit(0)
| 1.1875 | 1 |
randomized_tsp/Genetic.py | akshatkarani/tsp_solver | 1 | 12799381 | import random
from randomized_tsp.utils import cost, random_neighbour, random_tour
def init_population(population_size, num_of_cities):
"""
Initializes the population
"""
population = set()
while len(population) != population_size:
population.add(tuple(random_tour(num_of_cities)))
return [list(tour) for tour in population]
def calculate_fitness(population, num_of_cities, distance_matrix):
"""
Return a fitness list for the population
Fitness is just 1 / cost(tour)
"""
fitness = [1 / cost(num_of_cities, distance_matrix, tour)
for tour in population]
sum_fitness = sum(fitness)
return [f / sum_fitness for f in fitness]
def order_crossover(num_of_cities, parent1, parent2):
"""
Implements order crossover operator
"""
start = random.randint(0, num_of_cities - 2)
end = random.randint(start, num_of_cities - 1)
child1 = parent1[start:end]
child2 = parent2[start:end]
for city in parent1:
if city not in child2:
child2.append(city)
for city in parent2:
if city not in child1:
child1.append(city)
return [child1, child2]
def cycle_crossover(num_of_cities, parent1, parent2):
"""
Implements cycle crossover operator
"""
child1 = [-1] * num_of_cities
child2 = child1.copy()
i = 0
while child1[i] == -1:
child1[i] = parent1[i]
i = parent1.index(parent2[i])
i = 0
while child2[i] == -1:
child2[i] = parent2[i]
i = parent2.index(parent1[i])
for i in range(num_of_cities):
if child1[i] == -1:
child1[i] = parent2[i]
if child2[i] == -1:
child2[i] = parent1[i]
return [child1, child2]
def mutate(num_of_cities, child):
"""
Given a child will will give a mutation
Mutation is just random exchange any two cities
"""
return random_neighbour(num_of_cities, child)
def _genetic_algorithm(num_of_cities,
distance_matrix,
population_size,
mutation_prob,
crossover):
"""
Implements the genetic algorithm for TSP
Returns the best tour found and cost of that tour
"""
crossover_func = order_crossover
if crossover == 'cycle':
crossover_func = cycle_crossover
population = init_population(population_size, num_of_cities)
num_of_epochs = num_of_cities * 2
# In my experience a good value for `num_of_epochs` is directly
# proportional to `num_of_cities`.
# You can also experiment with different terminating condition
for _ in range(num_of_epochs):
# selection
fitness = calculate_fitness(population, num_of_cities, distance_matrix)
selected = random.choices(population, fitness, k=population_size)
random.shuffle(selected)
# offsprings
offsprings = []
for i in range(population_size // 2):
children = crossover_func(num_of_cities, selected[i], selected[i + population_size // 2])
offsprings.extend(children)
# mutation
for index in range(population_size):
if random.uniform(0, 1) < mutation_prob:
offsprings[index] = mutate(num_of_cities, offsprings[index])
# replacement
population.extend(offsprings)
fitness = calculate_fitness(population, num_of_cities, distance_matrix)
population = [tour for _, tour in sorted(zip(fitness, population), reverse=True)]
population = population[:population_size]
return population[0], cost(num_of_cities, distance_matrix, population[0])
| 3.671875 | 4 |
taggit/__init__.py | vhf/django-taggit | 0 | 12799382 | VERSION = (0, 12, 1)
| 1.15625 | 1 |
src/QDialog.py | liyu13264/PyQt5_Practice | 0 | 12799383 | <reponame>liyu13264/PyQt5_Practice
import sys
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QDialog, QApplication, QMainWindow, QPushButton
class QDialogDemo(QMainWindow):
def __init__(self):
super(QDialogDemo, self).__init__()
self.button = QPushButton(self)
self.init()
def init(self):
self.setWindowTitle('QDialog')
self.resize(300, 300)
self.button.setText('CLICK')
self.button.clicked.connect(self.function)
@staticmethod
def function():
dialog = QDialog()
button = QPushButton('QUIT', dialog)
# button.setText()
button.move(50, 50)
button.clicked.connect(dialog.close)
dialog.setWindowTitle('...')
dialog.setWindowModality(Qt.ApplicationModal)
# this position is necessary
dialog.exec_()
if __name__ == '__main__':
app = QApplication(sys.argv)
main = QDialogDemo()
main.show()
sys.exit(app.exec_())
| 3.09375 | 3 |
alexa-british-problems.py | Sorsby/alexa-british-problems | 0 | 12799384 | import json
import time
import requests
import unidecode
from flask import Flask
from flask_ask import Ask, question, session, statement
APP = Flask(__name__)
ASK = Ask(APP, "/british_problems")
def get_british_problems():
"""Get the titles of the /r/britishproblems posts"""
user_pass_dict = {'user': 'alexabritishproblems',
'passwd': '<PASSWORD>&',
'api_type': 'json'}
sess = requests.Session()
sess.headers.update(
{'User-Agent': 'alexa:british_problems:0.1 ' +
'(by /u/alexabritishproblems)'})
sess.post('https://wwww.reddit.com/api/login', data=user_pass_dict)
time.sleep(1)
url = "https://reddit.com/r/britishproblems/.json?limit=10"
html = sess.get(url)
data = json.loads(html.content.decode('utf-8'))
titles = [unidecode.unidecode(listing['data']['title'])
for listing in data['data']['children']]
return titles
titles = get_british_problems()
print titles
@APP.route('/')
def homepage():
"""Flask default routing"""
return 'greetz fren'
@ASK.launch
def start_skill():
"""Entry point for the alexa skill"""
welcome_message = 'Hello there, would you like to hear a very British problem?'
return question(welcome_message)
@ASK.intent("GetNewBritishProblem")
def handle_get_problem_intent():
"""Handles the intent for getting a new british problem and outputting it to Alexa"""
british_problem = get_british_problems()
return statement(british_problem)
@ASK.intent("NoIntent")
def handle_no_intent():
"""Handles an unmatched intent"""
goodbye_message = 'See you later... bye.'
return statement(goodbye_message)
if __name__ == '__main__':
APP.run(debug=True)
| 2.984375 | 3 |
isic/core/models/base.py | ImageMarkup/isic | 0 | 12799385 | from django.db import models
from django_extensions.db.fields import CreationDateTimeField
from django_extensions.db.models import TimeStampedModel
class CreationSortedTimeStampedModel(TimeStampedModel):
class Meta(TimeStampedModel.Meta):
abstract = True
ordering = ['-created']
get_latest_by = 'created'
created = CreationDateTimeField(db_index=True)
class CopyrightLicense(models.TextChoices):
CC_0 = 'CC-0', 'CC-0'
# These 2 require attribution
CC_BY = 'CC-BY', 'CC-BY'
CC_BY_NC = 'CC-BY-NC', 'CC-BY-NC'
| 2.28125 | 2 |
utils/math/distributions.py | david-zwicker/py-utils | 0 | 12799386 | '''
Created on Feb 24, 2015
@author: <NAME> <<EMAIL>>
This module provides functions and classes for probability distributions, which
build upon the scipy.stats package and extend it.
'''
from __future__ import division
import numpy as np
from scipy import stats, special, linalg, optimize
from ..data_structures.cache import cached_property
def lognorm_mean_var_to_mu_sigma(mean, variance, definition='scipy'):
""" determines the parameters of the log-normal distribution such that the
distribution yields a given mean and variance. The optional parameter
`definition` can be used to choose a definition of the resulting parameters
that is suitable for the given software package. """
mean2 = mean**2
mu = mean2/np.sqrt(mean2 + variance)
sigma = np.sqrt(np.log(1 + variance/mean2))
if definition == 'scipy':
return mu, sigma
elif definition == 'numpy':
return np.log(mu), sigma
else:
raise ValueError('Unknown definition `%s`' % definition)
def lognorm_mean(mean, sigma):
""" returns a lognormal distribution parameterized by its mean and a spread
parameter `sigma` """
if sigma == 0:
return DeterministicDistribution(mean)
else:
mu = mean * np.exp(-0.5 * sigma**2)
return stats.lognorm(scale=mu, s=sigma)
def lognorm_mean_var(mean, variance):
""" returns a lognormal distribution parameterized by its mean and its
variance. """
if variance == 0:
return DeterministicDistribution(mean)
else:
scale, sigma = lognorm_mean_var_to_mu_sigma(mean, variance, 'scipy')
return stats.lognorm(scale=scale, s=sigma)
def lognorm_sum_leastsq(count, var_norm, sim_terms=1e5, bins=64):
""" returns the parameters of a log-normal distribution that estimates the
sum of `count` log-normally distributed random variables with mean 1 and
variance `var_norm`. These parameters are determined by fitting the
probability density function to a histogram obtained by drawing `sim_terms`
random numbers """
sum_mean = count
sum_var = count * var_norm
# get random numbers
dist = lognorm_mean_var(1, var_norm)
vals = dist.rvs((int(sim_terms), count)).sum(axis=1)
# get the histogram
val_max = sum_mean + 3 * np.sqrt(sum_var)
bins = np.linspace(0, val_max, bins + 1)
xs = 0.5*(bins[:-1] + bins[1:])
density, _ = np.histogram(vals, bins=bins, range=[0, val_max],
density=True)
def pdf_diff(params):
""" evaluate the estimated pdf """
scale, sigma = params
return stats.lognorm.pdf(xs, scale=scale, s=sigma) - density
# do the least square fitting
params_init = lognorm_mean_var_to_mu_sigma(sum_mean, sum_var, 'scipy')
params, _ = optimize.leastsq(pdf_diff, params_init)
return params
def lognorm_sum(count, mean, variance, method='fenton'):
""" returns an estimate of the distribution of the sum of `count`
log-normally distributed variables with `mean` and `variance`. The returned
distribution is again log-normal with mean and variance determined from the
given parameters. Here, several methods can be used:
`fenton` - match the first two moments of the distribution
`leastsq` - minimize the error in the interval
"""
if method == 'fenton':
# use the moments directly
return lognorm_mean_var(count * mean, count * variance)
elif method == 'leastsq':
# determine the moments from fitting
var_norm = variance / mean**2
scale, sigma = lognorm_sum_leastsq(count, var_norm)
return stats.lognorm(scale=scale * mean, s=sigma)
else:
raise ValueError('Unknown method `%s` for determining the sum of '
'lognormal distributions. Accepted methods are '
'[`fenton`, `leastsq`].')
def gamma_mean_var(mean, variance):
""" returns a gamma distribution with given mean and variance """
alpha = mean**2 / variance
beta = variance / mean
return stats.gamma(scale=beta, a=alpha)
def loguniform_mean(mean, width):
""" returns a loguniform distribution parameterized by its mean and a spread
parameter `width`. The ratio between the maximal value and the minimal value
is given by width**2 """
if width == 1:
# treat special case separately
return DeterministicDistribution(mean)
else:
scale = mean * (2*width*np.log(width)) / (width**2 - 1)
return LogUniformDistribution(scale=scale, s=width)
def loguniform_mean_var(mean, var):
""" returns a loguniform distribution parameterized by its mean and
variance. Here, we need to solve a non-linear equation numerically, which
might degrade accuracy and performance of the result """
if var < 0:
raise ValueError('Variance must be positive')
elif var == 0:
# treat special case separately
return DeterministicDistribution(mean)
else:
# determine width parameter numerically
cv2 = var / mean**2 # match square coefficient of variation
def _rhs(q):
""" match the coefficient of variation """
return 0.5 * (q + 1) * np.log(q) / (q - 1) - 1 - cv2
width = optimize.newton(_rhs, 1.1)
return loguniform_mean(mean, np.sqrt(width))
def random_log_uniform(v_min, v_max, size):
""" returns random variables that a distributed uniformly in log space """
log_min, log_max = np.log(v_min), np.log(v_max)
res = np.random.uniform(log_min, log_max, size)
return np.exp(res)
def dist_skewness(dist):
""" returns the skewness of the distribution `dist` """
mean = dist.mean()
var = dist.var()
return (dist.moment(3) - 3*mean*var - mean**3) / var**(3/2)
class DeterministicDistribution_gen(stats.rv_continuous):
""" deterministic distribution that always returns a given value
Code copied from
https://docs.scipy.org/doc/scipy/reference/tutorial/stats.html#making-a-continuous-distribution-i-e-subclassing-rv-continuous
"""
def _cdf(self, x):
return np.where(x < 0, 0., 1.)
def _stats(self):
return 0., 0., 0., 0.
def _rvs(self):
return np.zeros(self._size)
DeterministicDistribution = DeterministicDistribution_gen(
name='DeterministicDistribution'
)
class LogUniformDistribution_gen(stats.rv_continuous):
"""
Log-uniform distribution.
"""
def freeze(self, *args, **kwds):
frozen = super(LogUniformDistribution_gen, self).freeze(*args, **kwds)
frozen.support = self.support(*args, **kwds)
return frozen
def support(self, *args, **kwds):
""" return the interval in which the PDF of the distribution is
non-zero """
extra_args, _, _, _ = self._parse_args_stats(*args, **kwds)
mean = self.mean(*args, **kwds)
scale = extra_args[0]
width = mean * (2*scale*np.log(scale)) / (scale**2 - 1)
return (width / scale, width * scale)
def _rvs(self, s):
""" random variates """
# choose the receptor response characteristics
return random_log_uniform(1/s, s, self._size)
def _pdf(self, x, s):
""" probability density function """
s = s[0] # reset broadcasting
res = np.zeros_like(x)
idx = (1 < x*s) & (x < s)
res[idx] = 1/(x[idx] * np.log(s*s))
return res
def _cdf(self, x, s):
""" cumulative probability function """
s = s[0] # reset broadcasting
res = np.zeros_like(x)
idx = (1 < x*s) & (x < s)
log_s = np.log(s)
res[idx] = (log_s + np.log(x[idx]))/(2 * log_s)
res[x > s] = 1
return res
def _ppf(self, q, s):
""" percent point function (inverse of cdf) """
s = s[0] # reset broadcasting
res = np.zeros_like(q)
idx = (q > 0)
res[idx] = s**(2*q[idx] - 1)
return res
def _stats(self, s):
""" calculates statistics of the distribution """
mean = (s**2 - 1)/(2*s*np.log(s))
var = ((s**4 - 1) * np.log(s) - (s**2 - 1)**2) \
/ (4 * s**2 * np.log(s)**2)
return mean, var, None, None
LogUniformDistribution = LogUniformDistribution_gen(
a=0, name='LogUniformDistribution'
)
class HypoExponentialDistribution(object):
"""
Hypoexponential distribution.
Unfortunately, the framework supplied by scipy.stats.rv_continuous does not
support a variable number of parameters and we thus only mimic its
interface here.
"""
def __init__(self, rates, method='sum'):
""" initializes the hypoexponential distribution.
`rates` are the rates of the underlying exponential processes
`method` determines what method is used for calculating the cdf and can
be either `sum` or `eigen`
"""
if method in {'sum', 'eigen'}:
self.method = method
# prepare the rates of the system
self.rates = np.asarray(rates)
self.alpha = 1 / self.rates
if np.any(rates <= 0):
raise ValueError('All rates must be positive')
if len(np.unique(self.alpha)) != len(self.alpha):
raise ValueError('The current implementation only supports cases '
'where all rates are different from each other.')
# calculate terms that we need later
with np.errstate(divide='ignore'):
mat = self.alpha[:, None] \
/ (self.alpha[:, None] - self.alpha[None, :])
mat[(self.alpha[:, None] - self.alpha[None, :]) == 0] = 1
self._terms = np.prod(mat, 1)
def rvs(self, size):
""" random variates """
# choose the receptor response characteristics
return sum(np.random.exponential(scale=alpha, size=size)
for alpha in self.alpha)
def mean(self):
""" mean of the distribution """
return self.alpha.sum()
def variance(self):
""" variance of the distribution """
return (2 * np.sum(self.alpha**2 * self._terms) -
(self.alpha.sum())**2)
def pdf(self, x):
""" probability density function """
if not np.isscalar(x):
x = np.asarray(x)
res = np.zeros_like(x)
nz = (x > 0)
if np.any(nz):
if self.method == 'sum':
factor = np.exp(-x[nz, None] * self.rates[..., :]) \
/ self.rates[..., :]
res[nz] = np.sum(self._terms[..., :] * factor, axis=1)
else:
Theta = (np.diag(-self.rates, 0) +
np.diag(self.rates[:-1], 1))
for i in np.flatnonzero(nz):
res.flat[i] = \
1 - linalg.expm(x.flat[i]*Theta)[0, :].sum()
elif x == 0:
res = 0
else:
if self.method == 'sum':
factor = np.exp(-x*self.rates)/self.ratesx
res[nz] = np.sum(self._terms * factor)
else:
Theta = np.diag(-self.rates, 0) + np.diag(self.rates[:-1], 1)
res = 1 - linalg.expm(x*Theta)[0, :].sum()
return res
def cdf(self, x):
""" cumulative density function """
if not np.isscalar(x):
x = np.asarray(x)
res = np.zeros_like(x)
nz = (x > 0)
if np.any(nz):
factor = np.exp(-x[nz, None]*self.rates[..., :])
res = 1 - np.sum(self._terms[..., :] * factor, axis=1)
elif x == 0:
res = 0
else:
factor = np.exp(-x*self.rates)
res = 1 - np.sum(self._terms * factor)
return res
# ==============================================================================
# OLD DISTRIBUTIONS THAT MIGHT NOT BE NEEDED ANYMORE
# ==============================================================================
class PartialLogNormDistribution_gen(stats.rv_continuous):
"""
partial log-normal distribution.
a fraction `frac` of the distribution follows a log-normal distribution,
while the remaining fraction `1 - frac` is zero
Similar to the lognorm distribution, this does not support any location
parameter
"""
def _rvs(self, s, frac):
""" random variates """
# choose the items response characteristics
res = np.exp(s * np.random.standard_normal(self._size))
if frac != 1:
# switch off items randomly
res[np.random.random(self._size) > frac] = 0
return res
def _pdf(self, x, s, frac):
""" probability density function """
s, frac = s[0], frac[0] # reset broadcasting
return frac / (s*x*np.sqrt(2*np.pi)) * np.exp(-1/2*(np.log(x)/s)**2)
def _cdf(self, x, s, frac):
""" cumulative probability function """
s, frac = s[0], frac[0] # reset broadcasting
return 1 + frac*(-0.5 + 0.5*special.erf(np.log(x)/(s*np.sqrt(2))))
def _ppf(self, q, s, frac):
""" percent point function (inverse of cdf) """
s, frac = s[0], frac[0] # reset broadcasting
q_scale = (q - (1 - frac)) / frac
res = np.zeros_like(q)
idx = (q_scale > 0)
res[idx] = np.exp(s * special.ndtri(q_scale[idx]))
return res
PartialLogNormDistribution = PartialLogNormDistribution_gen(
a=0, name='PartialLogNormDistribution'
)
class PartialLogUniformDistribution_gen(stats.rv_continuous):
"""
partial log-uniform distribution.
a fraction `frac` of the distribution follows a log-uniform distribution,
while the remaining fraction `1 - frac` is zero
"""
def _rvs(self, s, frac):
""" random variates """
# choose the receptor response characteristics
res = random_log_uniform(1/s, s, self._size)
# switch off receptors randomly
if frac != 1:
res[np.random.random(self._size) > frac] = 0
return res
def _pdf(self, x, s, frac):
""" probability density function """
s, frac = s[0], frac[0] # reset broadcasting
res = np.zeros_like(x)
idx = (1 < x*s) & (x < s)
res[idx] = frac/(x[idx] * np.log(s*s))
return res
def _cdf(self, x, s, frac):
""" cumulative probability function """
s, frac = s[0], frac[0] # reset broadcasting
res = np.zeros_like(x)
idx = (1 < x*s) & (x < s)
log_s = np.log(s)
res[idx] = (log_s + np.log(x[idx]))/(2 * log_s)
res[x > s] = 1
return (1 - frac) + frac*res
def _ppf(self, q, s, frac):
""" percent point function (inverse of cdf) """
s, frac = s[0], frac[0] # reset broadcasting
q_scale = (q - (1 - frac)) / frac
res = np.zeros_like(q)
idx = (q_scale > 0)
res[idx] = s**(2*q_scale[idx] - 1)
return res
PartialLogUniformDistribution = PartialLogUniformDistribution_gen(
a=0, name='PartialLogUniformDistribution'
)
NORMAL_DISTRIBUTION_NORMALIZATION = 1/np.sqrt(2*np.pi)
class NormalDistribution(object):
""" class representing normal distributions """
def __init__(self, mean, var, count=None):
""" normal distributions are described by their mean and variance.
Additionally, count denotes how many observations were used to
estimate the parameters. All values can also be numpy arrays to
represent many distributions efficiently """
self.mean = mean
self.var = var
self.count = count
def copy(self):
return self.__class__(self.mean, self.var, self.count)
@cached_property()
def std(self):
""" return standard deviation """
return np.sqrt(self.var)
def pdf(self, value, mask=None):
""" return probability density function at value """
if mask is None:
mean = self.mean
var = self.var
std = self.std
else:
mean = self.mean[mask]
var = self.var[mask]
std = self.std[mask]
return NORMAL_DISTRIBUTION_NORMALIZATION/std \
* np.exp(-0.5*(value - mean)**2 / var)
def add_observation(self, value):
""" add an observed value and adjust mean and variance of the
distribution. This returns a new distribution and only works if
count was set """
if self.count is None:
return self.copy()
else:
M2 = self.var*(self.count - 1)
count = self.count + 1
delta = value - self.mean
mean = self.mean + delta/count
M2 = M2 + delta*(value - mean)
return NormalDistribution(mean, M2/(count - 1), count)
def distance(self, other, kind='kullback-leibler'):
""" return the distance between two normal distributions """
if kind == 'kullback-leibler':
dist = 0.5*(np.log(other.var/self.var) +
(self.var + (self.mean - self.mean)**2)/other.var - 1)
elif kind == 'bhattacharyya':
var_ratio = self.var/other.var
term1 = np.log(0.25*(var_ratio + 1/var_ratio + 2))
term2 = (self.mean - other.mean)**2/(self.var + other.var)
dist = 0.25*(term1 + term2)
elif kind == 'hellinger':
dist_b = self.distance(other, kind='bhattacharyya')
dist = np.sqrt(1 - np.exp(-dist_b))
else:
raise ValueError('Unknown distance `%s`' % kind)
return dist
def welch_test(self, other):
""" performs Welch's t-test of two normal distributions """
# calculate the degrees of freedom
s1, s2 = self.var/self.count, other.var/other.count
nu1, nu2 = self.count - 1, other.count - 1
dof = (s1 + s2)**2/(s1**2/nu1 + s2**2/nu2)
# calculate the Welch t-value
t = (self.mean - other.mean)/np.sqrt(s1 + s2)
# calculate the probability using the Student's T distribution
prob = stats.t.sf(np.abs(t), dof) * 2
return prob
def overlap(self, other, common_variance=True):
""" estimates the amount of overlap between two distributions """
if common_variance:
if self.count is None:
if other.count is None: # neither is sampled
S = np.sqrt(0.5*(self.var + other.var))
else: # other is sampled
S = self.std
else:
if other.count is None: # self is sampled
S = other.std
else: # both are sampled
expr = ((self.count - 1)*self.var +
(other.count - 1)*other.var)
S = np.sqrt(expr/(self.count + other.count - 2))
delta = np.abs(self.mean - other.mean)/S
return 2*stats.norm.cdf(-0.5*delta)
else:
# here, we would have to integrate numerically
raise NotImplementedError
| 3.09375 | 3 |
2020/03/ape.py | notxenonbox/adventofcode | 0 | 12799387 | <gh_stars>0
lines = []
with open('input.txt') as f:
lines = f.readlines()
lines = list(map(lambda x: x.strip(), lines))
linelen = len(lines[0])
linec = len(lines)
def check(x, y):
posX = 0
posY = 0
trees = 0
while posY < linec:
if lines[posY][posX % linelen] == '#':
trees += 1
posX+=x
posY+=y
return trees
print(f'part 1: {check(3, 1)}')
slopes = [(1, 1), (3, 1), (5, 1), (7, 1), (1, 2)]
result = 1
for i in slopes:
result *= check(i[0], i[1])
print(f'part 2: {result}')
| 3.34375 | 3 |
shoppingcart/shop/context_processors.py | bsurajbh/shopping-cart | 0 | 12799388 | <filename>shoppingcart/shop/context_processors.py
from .models import Category
def menu_links(request):
"""get menu links"""
links = Category.objects.all()
return dict(links=links)
| 1.90625 | 2 |
explicalib/calibration/evaluation/metrics/classwise/__init__.py | euranova/estimating_eces | 2 | 12799389 | <reponame>euranova/estimating_eces
# -*- coding: utf-8 -*-
"""
@author: nicolas.posocco
"""
from .classwise_ece import classwise_ece
from .classwise_ece_c import classwise_ece_c
from .classwise_ece_a import classwise_ece_a
from .classwise_ece_ac import classwise_ece_ac
| 0.765625 | 1 |
models.py | RahulRagesh/GAT | 1 | 12799390 | from layers import *
from metrics import *
class Model(object):
def __init__(self, **kwargs):
name = kwargs.get('name')
if not name:
name = self.__class__.__name__.lower()
self.name = name
self._LAYER_UIDS = {}
self.vars = {}
self.placeholders = {}
self.layers = []
self.activations = []
self.inputs = None
self.outputs = None
self.loss = 0
self.accuracy = 0
self.optimizer = None
self.opt_op = None
def _build(self):
raise NotImplementedError
def build(self):
""" Wrapper for _build() """
with tf.variable_scope(self.name):
self._build()
# Build sequential layer model
self.activations.append(self.inputs)
for layer in self.layers:
hidden = layer(self.activations[-1])
self.activations.append(hidden)
self.outputs = self.activations[-1]
# Store model variables for easy access
variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)
self.vars = {var.name: var for var in variables}
# Build metrics
self._loss()
self._accuracy()
# Joint Optimzer for Attention and Model Parameters
self.opt_op = self.optimizer.minimize(self.loss)
#####################################################
# Uncomment these optimizers for Alternate Optimization
# of Attention and Model Parameters
#####################################################
'''
attention_variables = tf.get_collection('ATTENTION_WEIGHTS', scope=self.name)
self.attention_op = self.optimizer.minimize(self.loss, var_list=attention_variables)
model_variables = tf.get_collection('MODEL_WEIGHTS', scope=self.name)
self.model_op = self.optimizer.minimize(self.loss, var_list=model_variables)
'''
#####################################################
def get_layer_uid(self,layer_name=''):
"""Helper function, assigns unique layer IDs."""
if layer_name not in self._LAYER_UIDS:
self._LAYER_UIDS[layer_name] = 1
return 1
else:
self._LAYER_UIDS[layer_name] += 1
return self._LAYER_UIDS[layer_name]
def predict(self):
pass
def _loss(self):
raise NotImplementedError
def _accuracy(self):
raise NotImplementedError
def save(self, sess=None):
if not sess:
raise AttributeError("TensorFlow session not provided.")
saver = tf.train.Saver(self.vars)
save_path = saver.save(sess, "tmp/%s.ckpt" % self.name)
print("Model saved in file: %s" % save_path)
def load(self, sess=None):
if not sess:
raise AttributeError("TensorFlow session not provided.")
saver = tf.train.Saver(self.vars)
save_path = "tmp/%s.ckpt" % self.name
saver.restore(sess, save_path)
print("Model restored from file: %s" % save_path)
class GAT(Model):
def __init__(self, configs, placeholders, input_dim, **kwargs):
super(GAT, self).__init__(**kwargs)
self.configs = configs
self.inputs = placeholders['features']
self.input_dim = input_dim
self.output_dim = placeholders['labels'].get_shape().as_list()[1]
self.placeholders = placeholders
self.seed = configs['seed']
self.optimizer = tf.train.AdamOptimizer(learning_rate=configs['learning_rate'])
self.num_heads = configs['num_heads']
self.build()
def _loss(self):
# Weight decay loss
for layer in self.layers:
for var in layer.vars.values():
if 'weight' in var.name:
self.loss += self.configs['weight_decay'] * tf.nn.l2_loss(var)
# Cross entropy error
self.pred_error = masked_softmax_cross_entropy(self.outputs, self.placeholders['labels'],
self.placeholders['labels_mask'])
self.loss += self.pred_error
def _accuracy(self):
self.accuracy = masked_accuracy(self.outputs, self.placeholders['labels'],
self.placeholders['labels_mask'])
def _build(self):
self.layers.append(GraphAttention(input_dim=self.input_dim,
output_dim=self.configs['hidden_dims'],
num_heads = self.num_heads,
average_heads=False,
act=tf.nn.elu,
placeholders=self.placeholders,
model_dropout=True,
attention_dropout=True,
bias=True,
attention_bias=True,
parent_model=self))
self.layers.append(GraphAttention(input_dim=self.configs['hidden_dims']*self.num_heads,
output_dim=self.output_dim,
num_heads=1,
average_heads=True,
act=lambda x: x,
placeholders=self.placeholders,
model_dropout=True,
attention_dropout=True,
bias=True,
attention_bias=True,
parent_model=self))
def predict(self):
return tf.nn.softmax(self.outputs)
| 2.453125 | 2 |
build/manage.py | abrahamrhoffman/archangel | 0 | 12799391 | <filename>build/manage.py
import configparser
import subprocess
import argparse
import shutil
import base64
import glob
import sys
import os
import promote
class Manage(object):
def __init__(self, image, verbose=False):
self.init_feedback()
self.verbose = verbose
self.devnull = open(os.devnull, "w")
self.image = image
parser = configparser.ConfigParser()
parser.read("authentication.ini")
self.dockerhub_username = base64.b64decode(
parser.get("dockerhub", "username")).decode("utf-8")
self.dockerhub_password = <PASSWORD>4decode(
parser.get("dockerhub", "password")).decode("utf-8")
self.dockerhub_priv_username = base64.b64decode(
parser.get("dockerhub_priv", "username")).decode("utf-8")
self.dockerhub_priv_password = <PASSWORD>.b64decode(
parser.get("dockerhub_priv", "password")).decode("utf-8")
def init_feedback(self):
print("#########################")
print("# Void Build Pipeline #")
print("#########################")
def docker_login_pub(self):
cmd = ("docker login " +
"--username {} ".format(self.dockerhub_username) +
"--password {}".format(self.dockerhub_password))
subprocess.call(cmd, shell=True)
def docker_login_priv(self):
cmd = ("docker login dockerhub.paypalcorp.com:443 " +
"--username {} ".format(self.dockerhub_priv_username) +
"--password {}".format(self.dockerhub_priv_password))
subprocess.call(cmd, shell=True)
def build(self):
sys.stdout.write("Building Container... ")
sys.stdout.flush()
cmd = ("docker build -t {} ../"
.format(self.image))
if self.verbose:
subprocess.call(cmd, shell=True)
else:
subprocess.call(cmd,
stdout=self.devnull,
stderr=self.devnull,
shell=True)
sys.stdout.write("Done")
sys.stdout.flush()
print("")
def push(self):
sys.stdout.write("Pushing Container... ")
sys.stdout.flush()
cmd = ("docker push {}"
.format(self.image))
if self.verbose:
subprocess.call(cmd, shell=True)
else:
subprocess.call(cmd,
stdout=self.devnull,
stderr=self.devnull,
shell=True)
sys.stdout.write("Done")
sys.stdout.flush()
print("")
def promote(self):
sys.stdout.write("Promoting Container... ")
sys.stdout.flush()
prom = promote.Promote(self.image)
prom.run()
sys.stdout.write("Done")
sys.stdout.flush()
def clean(self):
files_to_remove = glob.glob("./*.pyc")
for aFile in files_to_remove:
os.remove(aFile)
shutil.rmtree("__pycache__")
def run(self):
self.docker_login_pub()
self.build()
self.push()
self.promote()
self.clean()
self.docker_login_priv()
def main():
parser = argparse.ArgumentParser()
required = parser.add_argument_group('Required arguments')
required.add_argument("-i",
"--image",
action="store",
help="Docker image and tag",
required=True)
required.add_argument("-v",
"--verbose",
action="store_true",
help="Show output (default False)")
args = parser.parse_args()
if args.verbose:
manage = Manage(args.image, args.verbose)
else:
manage = Manage(args.image)
manage.run()
if __name__ == "__main__":
main()
| 2.296875 | 2 |
voltaire/website/models/users.py | voltaire/website | 1 | 12799392 | from sqlalchemy.dialects.postgresql import UUID
from .. import db
from . import Base
roles = db.Table(
'roles',
db.Column('role_id', UUID, db.ForeignKey('role.id')),
db.Column('user_id', UUID, db.ForeignKey('user.id'))
)
socialmedianetworks = db.Table(
'socialmedianetworks',
db.Column('socialmedianetwork_id', UUID, db.ForeignKey('socialmedianetwork.id')), # noqa
db.Column('profile_id', UUID, db.ForeignKey('profile.id'))
)
class User(Base, db.Model):
__tablename__ = 'user'
openid = db.Column(db.String(200))
profile = db.relationship('Profile', backref='user', lazy='dynamic',
uselist=False)
roles = db.relationship('Role', backref=db.backref('user', lazy='dynamic'),
lazy='dynamic', secondary=roles)
def __init__(self, openid):
self.openid = openid
class Role(Base, db.Model):
__tablename__ = 'role'
name = db.Column(db.String(80), unique=True)
def __init__(self, name):
self.name = name
class SocialMediaNetwork(Base, db.Model):
__tablename__ = 'socialmedianetwork'
network = db.Column(db.String(10))
username = db.Column(db.String(255))
def __init__(self, network, username):
self.network = network
self.username = username
class Profile(Base, db.Model):
__tablename__ = 'profile'
username = db.Column(db.String(80), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
display_name = db.Column(db.String(80), index=True, unique=True)
first_name = db.Column(db.String(80), index=True)
last_name = db.Column(db.String(80), index=True)
user_id = db.Column(UUID, db.ForeignKey('user.id'))
social_media_networks = db.relationship(
'SocialMediaNetwork', backref=db.backref('user', lazy='dynamic'),
lazy='dynamic', secondary=socialmedianetworks)
def __init__(self, username, email, first_name=None, last_name=None,
display_name=None, user_id=None, social_media_networks=None):
self.username = username
self.email = email
if not display_name:
self.display_name = username
else:
self.display_name = display_name
self.first_name = first_name
self.last_name = last_name
self.social_media_networks = social_media_networks
self.user_id = user_id
| 2.5 | 2 |
tests/test_mappedPayload.py | gertschreuder/kinesis-consumer | 0 | 12799393 | #!/usr/bin/env python3
# coding=utf-8
import json
import os
import sys
import unittest
from src.utils.payloadHelper import PayloadHelper
class MappedPayloadTests(unittest.TestCase):
def setUp(self):
pass
def test_hartbeat(self):
script_dir = os.path.dirname(__file__)
rel_path = 'data/source/heartbeatPayload.json'
abs_file_path = os.path.join(script_dir, rel_path)
with open(abs_file_path) as hartbeatData:
self.hartbeatJson = json.load(hartbeatData)
self.helper = PayloadHelper()
for item in self.hartbeatJson:
payload = self.helper.map(item)
self.assertIsNotNone(payload.heartbeat)
def tearDown(self):
self.helper = None
self.assertIsNone(self.helper)
pass
if __name__ == '__main__':
unittest.main(exit=False)
| 2.671875 | 3 |
api/letterjam/word.py | arnehuang/letterjam | 0 | 12799394 | import random
import string
from .player import Player
# from flask import current_app
# from flask_socketio import SocketIO, emit
# socketio = SocketIO(current_app)
# logger = current_app.logger
class Word:
def __init__(self, word: str, player: Player, guesser: Player = None):
self.word = word.upper()
self.scrambled = Word.scramble(word) # Scrambled also contains bonus letters at endgame
self.creator = player
self.guesser = guesser
self.revealed_idx = 0
def __repr__(self):
return "Word" + ','.join([str(x) for x in [self.word, self.scrambled, self.guesser, self.revealed_idx]])
def advance(self, state):
if self.revealed_idx >= len(self.word) - 1:
self.scrambled += random.choice(string.ascii_lowercase)
self.revealed_idx += 1
state.update_history_log(f"{self.guesser} advanced")
# socketio.emit('word advanced', {}, namespace='/word')
def assign_guesser(self, players):
# TODO: Assign a random guesser instead of a fixed one
if self.guesser is None:
self_idx = players.index(self.creator)
if self_idx < len(
players) - 1: # Array [a, b, c] has len 3 and idx 0, 1, 2. If it's 0 or 1, move right otherwise overflow
guesser_idx = self_idx + 1
else:
guesser_idx = 0
self.guesser = players[guesser_idx]
# def assign_guesser(self, player: Player):
# self.guesser = player
@staticmethod
def scramble(word):
l = list(word)
random.shuffle(l)
return ''.join(l)
@staticmethod
def word_for_guesser(guesser: Player, list_of_words):
for word_in_list in list_of_words:
if guesser == word_in_list.guesser:
return word_in_list
return None
| 2.796875 | 3 |
scripts/tests/test_migrate_mailing_lists_to_mailchimp_field.py | DanielSBrown/osf.io | 1 | 12799395 | from nose.tools import *
from tests.base import OsfTestCase
from tests.factories import UserFactory
from scripts.migration.migrate_mailing_lists_to_mailchimp_field import main, get_users_with_no_mailchimp_mailing_lists
class TestMigrateMailingLists(OsfTestCase):
def setUp(self):
super(TestMigrateMailingLists, self).setUp()
self.user1 = UserFactory(mailing_lists={'mail': True})
self.user2 = UserFactory(mailing_lists={'mail': False})
self.user3 = UserFactory()
self.user1.save()
self.user2.save()
def test_get_users_with_mailing_lists(self):
users_with_mailing_list_ids = [user._id for user in get_users_with_no_mailchimp_mailing_lists()]
assert_equal(len(users_with_mailing_list_ids), 2)
assert_true(self.user1._id in users_with_mailing_list_ids)
assert_true(self.user2._id in users_with_mailing_list_ids)
assert_false(self.user3._id in users_with_mailing_list_ids)
def test_migration_of_mailing_lists(self):
assert_equal(self.user1.mailchimp_mailing_lists, {})
assert_equal(self.user2.mailchimp_mailing_lists, {})
main()
self.user1.reload()
self.user2.reload()
assert_true(self.user1.mailchimp_mailing_lists.get(u'mail'))
assert_false(self.user2.mailchimp_mailing_lists.get(u'mail'))
| 2.296875 | 2 |
tests/test_transverse_deformation.py | aripekka/tbcalc | 1 | 12799396 | <reponame>aripekka/tbcalc
# -*- coding: utf-8 -*-
"""
Tests for the transverse deformation functions. Run with pytest.
Created on Sat May 9 00:09:00 2020
@author: aripekka
"""
import sys
import os.path
import numpy as np
sys.path.insert(1, os.path.join(os.path.dirname(__file__),'..'))
from tbcalc.transverse_deformation import *
from pyTTE import TTcrystal, Quantity
def test_isotropic_circular():
#Calculate the reference stresses and strains as implemented in the
#deprecated sbcalc package
E = 165
nu = 0.22
thickness = 0.1
Rx = 1000.0
Ry = 500.0
R = np.sqrt(Rx*Ry)
L = 100.0
x=np.linspace(-L/2,L/2,150)
X,Y=np.meshgrid(x,x)
stress = {}
strain = {}
stress['xx'] = -E/(16*R**2)*(X**2 + 3*Y**2 -L**2/4)
stress['yy'] = -E/(16*R**2)*(3*X**2 + Y**2 -L**2/4)
stress['xy'] = E/(8*R**2)*X*Y
stress['yx'] = stress['xy']
strain['xx'] = ((1-nu)*L**2/4-(1-3*nu)*X**2-(3-nu)*Y**2)/(16*R**2)
strain['yy'] = ((1-nu)*L**2/4-(1-3*nu)*Y**2-(3-nu)*X**2)/(16*R**2)
strain['xy'] = (1+nu)/(8*R**2)*X*Y
strain['yx'] = strain['xy']
strain['zz'] = nu/(4*R**2)*(X**2+Y**2-L**2/8)
#missing zero strains from sbcalc
strain['xz'] = X*0
strain['zx'] = X*0
strain['yz'] = X*0
strain['zy'] = X*0
for k in stress:
stress[k][X**2+Y**2 > L**2/4] = np.nan
for k in strain:
strain[k][X**2+Y**2 > L**2/4] = np.nan
#add int indexing
int2char_ind = ['','x','y','z']
for i in range(1,3):
for j in range(1,3):
stress[i*10+j] = stress[int2char_ind[i]+int2char_ind[j]]
for i in range(1,4):
for j in range(1,4):
strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]]
#COMPARE THE REFERENCE TO THE IMPLEMENTATION
stress_imp, strain_imp, P_imp = isotropic_circular(Rx, Ry, L, thickness, nu, E)
meps = np.finfo(np.float).eps #machine epsilon
for i in range(1,3):
for j in range(1,3):
num_ind = i*10+j
str_ind = int2char_ind[i]+int2char_ind[j]
assert np.all(np.logical_or(np.abs(stress[num_ind] - stress_imp[num_ind](X,Y)) < meps,
np.logical_and(np.isnan(stress[num_ind]), np.isnan(stress_imp[num_ind](X,Y)))))
assert np.all(np.logical_or(np.abs(stress[str_ind] - stress_imp[str_ind](X,Y)) < meps,
np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y)))))
for i in range(1,4):
for j in range(1,4):
num_ind = i*10+j
str_ind = int2char_ind[i]+int2char_ind[j]
assert np.all(np.logical_or(np.abs(strain[num_ind] - strain_imp[num_ind](X,Y)) < meps,
np.logical_and(np.isnan(strain[num_ind]), np.isnan(strain_imp[num_ind](X,Y)))))
assert np.all(np.logical_or(np.abs(strain[str_ind] - strain_imp[str_ind](X,Y)) < meps,
np.logical_and(np.isnan(strain[str_ind]), np.isnan(strain_imp[str_ind](X,Y)))))
#check the contact force
P = -thickness*(stress['xx']/Rx+stress['yy']/Ry)
assert np.all(np.logical_or(np.abs(P - P_imp(X,Y)) < meps,
np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y)))))
def test_anisotropic_circular_vs_sbcalc():
#For compliance matrix
ttx = TTcrystal(crystal='Si',hkl=[9,5,1], thickness=Quantity(0.1,'mm'))
S = ttx.S.in_units('GPa^-1')
thickness = 0.1
Rx = 1000.0
Ry = 500.0
R = np.sqrt(Rx*Ry)
L = 100.0
#Calculate the stresses and strains as in the now deprecated sbcalc
x=np.linspace(-L/2,L/2,150)
X,Y=np.meshgrid(x,x)
r_squared = X**2+Y**2
phi = np.arctan2(Y,X)
stress = {}
strain = {}
D = 1/(2*R**2*(3*(S[0,0]+S[1,1])+2*S[0,1]+S[5,5]))
stress['xx'] = D*(L**2/4-X**2-3*Y**2)
stress['yy'] = D*(L**2/4-3*X**2-Y**2)
stress['xy'] = 2*D*X*Y
stress['yx'] = stress['xy']
#shorthand notation
uzzaux1 = (S[2,0]+S[2,1])*L**2/4
uzzaux2 = 2*(S[2,0]+S[2,1])
uzzaux3 = np.sqrt((S[2,1]-S[2,0])**2+S[2,5]**2)
beta = np.arctan2(S[2,5],(S[2,1]-S[2,0]))
strain['zz'] = D*(uzzaux1 - (uzzaux2+uzzaux3*np.cos(2*phi+beta))*r_squared) #In sbcalc, there's incorrectly sin instead of cos
for k in stress:
stress[k][X**2+Y**2 > L**2/4] = np.nan
for k in strain:
strain[k][X**2+Y**2 > L**2/4] = np.nan
stress_imp, strain_imp, P_imp = anisotropic_circular(Rx, Ry, L, thickness, S)
meps = np.finfo(np.float).eps #machine epsilon
#add int indexing
int2char_ind = ['','x','y','z']
for i in range(1,3):
for j in range(1,3):
str_ind = int2char_ind[i]+int2char_ind[j]
assert np.all(np.logical_or(np.abs(stress[str_ind] - stress_imp[str_ind](X,Y)) < meps,
np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y)))))
assert np.all(np.logical_or(np.abs(strain['zz'] - strain_imp['zz'](X,Y)) < meps,
np.logical_and(np.isnan(strain['zz']), np.isnan(strain_imp['zz'](X,Y)))))
#check the contact force
P = -thickness*(stress['xx']/Rx+stress['yy']/Ry)
assert np.all(np.logical_or(np.abs(P - P_imp(X,Y)) < meps,
np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y)))))
def test_anisotropic_circular_vs_isotropic_circular():
E = 165
nu = 0.22
thickness = 0.1
Rx = 1000.0
Ry = 500.0
L = 100.0
S = np.zeros((6,6))
#The elastic matrix for isotropic crystal
S[0,0] = 1
S[1,1] = 1
S[2,2] = 1
S[0,1] = -nu
S[0,2] = -nu
S[1,2] = -nu
S[1,0] = -nu
S[2,0] = -nu
S[2,1] = -nu
S[3,3] = 2*(1+nu)
S[4,4] = 2*(1+nu)
S[5,5] = 2*(1+nu)
S = S/E
stress_iso, strain_iso, P_iso = isotropic_circular(Rx, Ry, L, thickness, nu, E)
stress_aniso, strain_aniso, P_aniso = anisotropic_circular(Rx, Ry, L, thickness, S)
x=np.linspace(-L/2,L/2,150)
X,Y=np.meshgrid(x,x)
meps = np.finfo(np.float).eps #machine epsilon
int2char_ind = ['','x','y','z']
meps = np.finfo(np.float).eps #machine epsilon
#Check stresses
for i in range(1,3):
for j in range(1,3):
num_ind = i*10+j
str_ind = int2char_ind[i]+int2char_ind[j]
assert np.all(np.logical_or(np.abs(stress_iso[num_ind](X,Y) - stress_aniso[num_ind](X,Y)) < meps,
np.logical_and(np.isnan(stress_iso[num_ind](X,Y)), np.isnan(stress_aniso[num_ind](X,Y)))))
assert np.all(np.logical_or(np.abs(stress_iso[str_ind](X,Y) - stress_aniso[str_ind](X,Y)) < meps,
np.logical_and(np.isnan(stress_iso[str_ind](X,Y)), np.isnan(stress_aniso[str_ind](X,Y)))))
#Check strains
for i in range(1,4):
for j in range(1,4):
num_ind = i*10+j
str_ind = int2char_ind[i]+int2char_ind[j]
assert np.all(np.logical_or(np.abs(strain_iso[num_ind](X,Y) - strain_aniso[num_ind](X,Y)) < meps,
np.logical_and(np.isnan(strain_iso[num_ind](X,Y)), np.isnan(strain_aniso[num_ind](X,Y)))))
assert np.all(np.logical_or(np.abs(strain_iso[str_ind](X,Y) - strain_aniso[str_ind](X,Y)) < meps,
np.logical_and(np.isnan(strain_iso[str_ind](X,Y)), np.isnan(strain_aniso[str_ind](X,Y)))))
#Check contact forces
assert np.all(np.logical_or(np.abs(P_iso(X,Y) - P_aniso(X,Y)) < meps,
np.logical_and(np.isnan(P_iso(X,Y)), np.isnan(P_aniso(X,Y)))))
def test_isotropic_rectangular():
#Calculate the reference stresses and strains as implemented in the
#deprecated sbcalc package
E = 165
nu = 0.22
thickness = 0.1
Rx = 1000.0
Ry = 500.0
R = np.sqrt(Rx*Ry)
a = 100.0
b = 50.0
x=np.linspace(-a/2,a/2,150)
X,Y=np.meshgrid(x,x)
stress = {}
strain = {}
g = 8 + 10*((a/b)**2+(b/a)**2) + (1-nu)*((a/b)**2-(b/a)**2)**2
stress['xx'] = E/(g*R**2) * (a**2/12-X**2 + ((1+nu)/2 + 5*(a/b)**2 + (1-nu)/2*(a/b)**4)*(b**2/12-Y**2))
stress['yy'] = E/(g*R**2) * (b**2/12-Y**2 + ((1+nu)/2 + 5*(b/a)**2 + (1-nu)/2*(b/a)**4)*(a**2/12-X**2)) #sbcalc has a typo on this line (corrected here)
stress['xy'] = 2*E/(g*R**2)*X*Y
stress['yx'] = stress['xy']
strain['zz'] = nu/(g*R**2) * (((3+nu)/2+5*(b/a)**2+(1-nu)/2*(b/a)**4)*(X**2 - a**2/12)+\
((3+nu)/2+5*(a/b)**2+(1-nu)/2*(a/b)**4)*(Y**2 - b**2/12))
for k in stress:
stress[k][np.abs(X) > a/2] = np.nan
stress[k][np.abs(Y) > b/2] = np.nan
for k in strain:
strain[k][np.abs(X) > a/2] = np.nan
strain[k][np.abs(Y) > b/2] = np.nan
#add int indexing
int2char_ind = ['','x','y','z']
for i in range(1,3):
for j in range(1,3):
stress[i*10+j] = stress[int2char_ind[i]+int2char_ind[j]]
for i in range(3,4):
for j in range(3,4):
strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]]
#COMPARE THE REFERENCE TO THE IMPLEMENTATION
stress_imp, strain_imp, P_imp = isotropic_rectangular(Rx, Ry, a, b, thickness, nu, E)
meps = np.finfo(np.float).eps #machine epsilon
for i in range(1,3):
for j in range(1,3):
num_ind = i*10+j
str_ind = int2char_ind[i]+int2char_ind[j]
assert np.all(np.logical_or(np.abs(stress[num_ind] - stress_imp[num_ind](X,Y)) < meps,
np.logical_and(np.isnan(stress[num_ind]), np.isnan(stress_imp[num_ind](X,Y)))))
assert np.all(np.logical_or(np.abs(stress[str_ind] - stress_imp[str_ind](X,Y)) < meps,
np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y)))))
for i in range(3,4):
for j in range(3,4):
num_ind = i*10+j
str_ind = int2char_ind[i]+int2char_ind[j]
assert np.all(np.logical_or(np.abs(strain[num_ind] - strain_imp[num_ind](X,Y)) < meps,
np.logical_and(np.isnan(strain[num_ind]), np.isnan(strain_imp[num_ind](X,Y)))))
assert np.all(np.logical_or(np.abs(strain[str_ind] - strain_imp[str_ind](X,Y)) < meps,
np.logical_and(np.isnan(strain[str_ind]), np.isnan(strain_imp[str_ind](X,Y)))))
#check the contact force
P = -thickness*(stress['xx']/Rx+stress['yy']/Ry)
assert np.all(np.logical_or(np.abs(P - P_imp(X,Y)) < meps,
np.logical_and(np.isnan(P), np.isnan(P_imp(X,Y)))))
def test_anisotropic_rectangular_vs_isotropic_rectangular():
E = 165
nu = 0.22
thickness = 0.1
Rx = 1000.0
Ry = 500.0
a = 100.0
b = 100.0
S = np.zeros((6,6))
#The elastic matrix for isotropic crystal
S[0,0] = 1
S[1,1] = 1
S[2,2] = 1
S[0,1] = -nu
S[0,2] = -nu
S[1,2] = -nu
S[1,0] = -nu
S[2,0] = -nu
S[2,1] = -nu
S[3,3] = 2*(1+nu)
S[4,4] = 2*(1+nu)
S[5,5] = 2*(1+nu)
S = S/E
stress_iso, strain_iso, P_iso = isotropic_rectangular(Rx, Ry, a, b, thickness, nu, E)
stress_aniso, strain_aniso, P_aniso = anisotropic_rectangular(Rx, Ry, a, b, thickness, S)
x=np.linspace(-a/2,a/2,150)
X,Y=np.meshgrid(x,x)
meps = np.finfo(np.float).eps #machine epsilon
int2char_ind = ['','x','y','z']
meps = np.finfo(np.float).eps #machine epsilon
#Check stresses
for i in range(1,3):
for j in range(1,3):
num_ind = i*10+j
str_ind = int2char_ind[i]+int2char_ind[j]
assert np.all(np.logical_or(np.abs(stress_iso[num_ind](X,Y) - stress_aniso[num_ind](X,Y)) < meps,
np.logical_and(np.isnan(stress_iso[num_ind](X,Y)), np.isnan(stress_aniso[num_ind](X,Y)))))
assert np.all(np.logical_or(np.abs(stress_iso[str_ind](X,Y) - stress_aniso[str_ind](X,Y)) < meps,
np.logical_and(np.isnan(stress_iso[str_ind](X,Y)), np.isnan(stress_aniso[str_ind](X,Y)))))
#Check strains
for i in range(1,4):
for j in range(1,4):
num_ind = i*10+j
str_ind = int2char_ind[i]+int2char_ind[j]
assert np.all(np.logical_or(np.abs(strain_iso[num_ind](X,Y) - strain_aniso[num_ind](X,Y)) < meps,
np.logical_and(np.isnan(strain_iso[num_ind](X,Y)), np.isnan(strain_aniso[num_ind](X,Y)))))
assert np.all(np.logical_or(np.abs(strain_iso[str_ind](X,Y) - strain_aniso[str_ind](X,Y)) < meps,
np.logical_and(np.isnan(strain_iso[str_ind](X,Y)), np.isnan(strain_aniso[str_ind](X,Y)))))
#Check contact forces
assert np.all(np.logical_or(np.abs(P_iso(X,Y) - P_aniso(X,Y)) < meps,
np.logical_and(np.isnan(P_iso(X,Y)), np.isnan(P_aniso(X,Y)))))
def test_anisotropic_rectangular_vs_old_Version():
#For compliance matrix
ttx = TTcrystal(crystal='Si',hkl=[9,5,3], thickness=Quantity(0.1,'mm'))
a = 100.0
b = 100.0
x=np.linspace(-a/2,a/2,150)
X,Y=np.meshgrid(x,x)
thickness = 0.1
Rx = 1000.0
Ry = 500.0
R = np.sqrt(Rx*Ry)
S = ttx.S.in_units('GPa^-1')
stress = {}
strain = {}
#Numerical solution through solving a linear system A*x = b
B = np.array([0,0,0,0,0,0,0,0,-0.5])
#Construction of A matrix rows
#[C11, C20, C02, C22, C31, C13, C40, C04, lambda]
A = []
A.append([12*S[5,5], -12*S[1,5], -12*S[0,5], -(S[0,5]*a**2 + S[1,5]*b**2), S[5,5]*a**2, S[5,5]*b**2, -S[1,5]*a**2, -S[0,5]*b**2, 0]) #dL/C11
A.append([-12*S[0,5], 12*S[0,1], 12*S[0,0], (S[0,0]*a**2 + S[0,1]*b**2), -S[0,5]*a**2, -S[0,5]*b**2, S[0,1]*a**2, S[0,0]*b**2, 0]) #dL/C02
A.append([-12*S[1,5], 12*S[1,1], 12*S[1,0], (S[1,0]*a**2 + S[1,1]*b**2), -S[1,5]*a**2, -S[1,5]*b**2, S[1,1]*a**2, S[0,1]*b**2, 0]) #dL/C20
A.append([-(S[0,5]*a**2 + S[1,5]*b**2), S[0,1]*a**2 + S[1,1]*b**2, S[0,0]*a**2 + S[0,1]*b**2, (S[0,1]+2*S[5,5])*a**2*b**2/6 + 3*(S[0,0]*a**4 + S[1,1]*b**4)/20,
-(3*S[0,5]*a**4/20 + 5*S[1,5]*a**2*b**2/12), -(3*S[1,5]*b**4/20 + 5*S[0,5]*a**2*b**2/12), 3*S[0,1]*a**4/20 + S[1,1]*a**2*b**2/12, 3*S[0,1]*b**4/20 + S[0,0]*a**2*b**2/12, 2*S[0,1] + S[5,5]]) #dL/C22
A.append([S[5,5]*a**2, -S[1,5]*a**2, -S[0,5]*a**2, -(3*S[0,5]*a**4/20 + 5*S[1,5]*a**2*b**2/12), S[1,1]*a**2*b**2/3 + 3*S[5,5]*a**4/20, (S[0,1]/3 + S[5,5]/12)*a**2*b**2, -3*S[1,5]*a**4/20, - S[0,5]*a**2*b**2/12, -2*S[1,5]]) #dL/C31
A.append([S[5,5]*b**2, -S[1,5]*b**2, -S[0,5]*b**2, -(3*S[1,5]*b**4/20 + 5*S[0,5]*a**2*b**2/12), (S[0,1]/3 + S[5,5]/12)*a**2*b**2, S[0,0]*a**2*b**2/3 + 3*S[5,5]*b**4/20, -S[1,5]*a**2*b**2/12, -3*S[0,5]*b**4/20, -2*S[0,5]]) #dL/C13
A.append([-S[1,5]*a**2, S[1,1]*a**2, S[0,1]*a**2, S[1,1]*a**2*b**2/12 + 3*S[0,1]*a**4/20, -3*S[1,5]*a**4/20, -S[1,5]*a**2*b**2/12, 3*S[1,1]*a**4/20, S[0,1]*a**2*b**2/12, S[1,1]]) #dL/C40
A.append([-S[0,5]*b**2, S[0,1]*b**2, S[0,0]*b**2, S[0,0]*a**2*b**2/12 + 3*S[0,1]*b**4/20, -S[0,5]*a**2*b**2/12, -3*S[0,5]*b**4/20, S[0,1]*a**2*b**2/12, 3*S[0,0]*b**4/20, S[0,0]]) #dL/C04
A.append([0, 0, 0, 2*S[0,1] + S[5,5], -2*S[1,5], -2*S[0,5], S[1,1], S[0,0], 0])
A = np.array(A)
C11, C20, C02, C22, C31, C13, C40, C04, L = np.linalg.solve(A,B)
stress['xx'] = (C02 + C22*X**2 + 2*C13*X*Y + C04*Y**2)/R**2
stress['yy'] = (C20 + C22*Y**2 + 2*C31*X*Y + C40*X**2)/R**2
stress['xy'] = -(C11 + 2*C22*X*Y + C31*X**2 + C13*Y**2)/R**2
stress['yx'] = stress['xy'].copy()
strain['zz'] = S[2,0]*stress['xx'] + S[2,1]*stress['yy'] + S[2,5]*stress['xy']
strain['xz'] = 0.5*(S[3,0]*stress['xx'] + S[3,1]*stress['yy'] + S[3,5]*stress['xy'] )
strain['zx'] = strain['xz'].copy()
#Apply mask
mask = np.ones(X.shape)
mask[np.abs(X)>a/2] = 0
mask[np.abs(Y)>b/2] = 0
for key in stress:
stress[key][mask < 0.5] = np.nan
for key in strain:
strain[key][mask < 0.5] = np.nan
stress_imp, strain_imp, P_imp = anisotropic_rectangular(Rx, Ry, a, b, thickness, S)
#add int indexing
int2char_ind = ['','x','y','z']
for i in range(1,3):
for j in range(1,3):
stress[i*10+j] = stress[int2char_ind[i]+int2char_ind[j]]
for i in [1,3]:
for j in [1,3]:
if i==1 and j == 1:
continue
strain[i*10+j] = strain[int2char_ind[i]+int2char_ind[j]]
meps = np.finfo(np.float).eps #machine epsilon
for i in range(1,3):
for j in range(1,3):
num_ind = i*10+j
str_ind = int2char_ind[i]+int2char_ind[j]
assert np.all(np.logical_or(np.abs(stress[num_ind] - stress_imp[num_ind](X,Y)) < meps,
np.logical_and(np.isnan(stress[num_ind]), np.isnan(stress_imp[num_ind](X,Y)))))
assert np.all(np.logical_or(np.abs(stress[str_ind] - stress_imp[str_ind](X,Y)) < meps,
np.logical_and(np.isnan(stress[str_ind]), np.isnan(stress_imp[str_ind](X,Y)))))
for i in [1,3]:
for j in [1,3]:
if i==1 and j == 1:
continue
num_ind = i*10+j
str_ind = int2char_ind[i]+int2char_ind[j]
assert np.all(np.logical_or(np.abs(strain[num_ind] - strain_imp[num_ind](X,Y)) < meps,
np.logical_and(np.isnan(strain[num_ind]), np.isnan(strain_imp[num_ind](X,Y)))))
assert np.all(np.logical_or(np.abs(strain[str_ind] - strain_imp[str_ind](X,Y)) < meps,
np.logical_and(np.isnan(strain[str_ind]), np.isnan(strain_imp[str_ind](X,Y)))))
| 2.03125 | 2 |
hotpot/data_handling/squad/squad_data.py | faezezps/SiMQC | 36 | 12799397 | import pickle
from typing import List, Set
from os.path import join, exists, isfile, isdir
from os import makedirs, listdir
from hotpot.config import CORPUS_DIR
from hotpot.configurable import Configurable
from hotpot.data_handling.data import RelevanceQuestion
from hotpot.data_handling.word_vectors import load_word_vectors
from hotpot.utils import ResourceLoader
""" Squad data. For now, leaving out answer spans. When we want to predict answers, we will deal with it."""
class SquadParagraph(object):
def __init__(self, doc_title: str, par_id: int, par_text: List[str], pickle_text=True):
self.doc_title = doc_title
self.par_id = par_id
self.par_text = par_text
self.pickle_text = pickle_text
@property
def num_tokens(self):
return len(self.par_text)
def get_paragraph_without_text_pickling(self):
return SquadParagraph(self.doc_title, self.par_id, self.par_text, pickle_text=False)
def __repr__(self) -> str:
return f"Title: {self.doc_title}, Id: {self.par_id}\n" \
f"Paragraph:\n" + ' '.join(self.par_text)
def __getstate__(self):
if not self.pickle_text:
state = self.__dict__.copy()
state['par_text'] = None
return state
return self.__dict__
class SquadDocument(object):
def __init__(self, title: str, paragraphs: List[SquadParagraph]):
self.title = title
self.paragraphs = paragraphs
self.id_to_par = self._build_id_paragraph_dict()
def _build_id_paragraph_dict(self):
return {x.par_id: x for x in self.paragraphs}
def get_par(self, par_id) -> SquadParagraph:
return self.id_to_par[par_id]
def add_par(self, par: SquadParagraph):
if par.par_id in self.id_to_par:
raise ValueError("This paragraph id already exists in this document!")
if par.doc_title != self.title:
raise ValueError("Paragraph title not matching document title!")
self.paragraphs.append(SquadParagraph(par.doc_title, par.par_id, par.par_text, pickle_text=True))
self.id_to_par[par.par_id] = self.paragraphs[-1]
def __repr__(self) -> str:
return f"Title: {self.title}. Number of paragraphs: {len(self.paragraphs)}"
class SquadQuestion(object):
""" Squad Question and paragraphs."""
def __init__(self, question_id: str, question: List[str],
answers: Set[str], paragraph: SquadParagraph):
self.question_id = question_id
self.question = question
self.answers = answers
self.paragraph = paragraph # .get_paragraph_without_text_pickling()
def __repr__(self) -> str:
return f"{self.question_id}: {' '.join(self.question)}\nAnswer(s): {self.answers}\n" \
f"Paragraph:\n" + ' '.join(self.paragraph.par_text)
class SquadQuestionWithDistractors(SquadQuestion):
def __init__(self, question_id: str, question: List[str],
answers: Set[str], paragraph: SquadParagraph,
distractors: List[SquadParagraph]):
super().__init__(question_id, question, answers, paragraph)
# self.distractors = [x.get_paragraph_without_text_pickling() for x in distractors]
self.distractors = distractors
def add_distractors(self, paragraphs: List[SquadParagraph]):
""" Doesn't add duplicates """
for paragraph in paragraphs:
if any((x.par_id == paragraph.par_id and x.doc_title == paragraph.doc_title) for x in self.distractors):
continue
# self.distractors.append(paragraph.get_paragraph_without_text_pickling())
self.distractors.append(paragraph)
def squad_question_to_relevance_question(squad_question: SquadQuestionWithDistractors) -> RelevanceQuestion:
return RelevanceQuestion(dataset_name='squad',
question_id=squad_question.question_id,
question_tokens=squad_question.question,
supporting_facts=[squad_question.paragraph.par_text],
distractors=[x.par_text for x in squad_question.distractors])
class SquadRelevanceCorpus(Configurable):
TRAIN_DOC_FILE = "train_documents.pkl"
TRAIN_FILE = "train_questions.pkl"
DEV_DOC_FILE = "dev_documents.pkl"
DEV_FILE = "dev_questions.pkl"
NAME = "squad"
VOCAB_FILE = "squad_vocab.txt"
WORD_VEC_SUFFIX = "_pruned"
@staticmethod
def make_corpus(train_documents: List[SquadDocument],
train: List[SquadQuestionWithDistractors],
dev_documents: List[SquadDocument],
dev: List[SquadQuestionWithDistractors]):
dir = join(CORPUS_DIR, SquadRelevanceCorpus.NAME)
# if isfile(dir) or (exists(dir) and len(listdir(dir))) > 0:
# raise ValueError("Directory %s already exists and is non-empty" % dir)
if not exists(dir):
makedirs(dir)
train_document_dict = {doc.title: doc for doc in train_documents}
if len(train_document_dict) != len(train_documents):
raise ValueError("different train documents have the same title!")
dev_document_dict = {doc.title: doc for doc in dev_documents}
if len(dev_document_dict) != len(dev_documents):
raise ValueError("different dev documents have the same title!")
for name, data in [(SquadRelevanceCorpus.TRAIN_FILE, train), (SquadRelevanceCorpus.DEV_FILE, dev),
(SquadRelevanceCorpus.TRAIN_DOC_FILE, train_document_dict),
(SquadRelevanceCorpus.DEV_DOC_FILE, dev_document_dict)]:
if data is not None:
with open(join(dir, name), 'wb') as f:
pickle.dump(data, f)
def __init__(self):
dir = join(CORPUS_DIR, self.NAME)
if not exists(dir) or not isdir(dir):
raise ValueError("No directory %s, corpus not built yet?" % dir)
self.dir = dir
self.train_title_to_document = None
self.dev_title_to_document = None
@property
def evidence(self):
return None
def get_vocab_file(self):
self.get_vocab()
return join(self.dir, self.VOCAB_FILE)
def get_vocab(self):
""" get all-lower cased unique words for this corpus, includes train/dev/test files """
voc_file = join(self.dir, self.VOCAB_FILE)
if exists(voc_file):
with open(voc_file, "r") as f:
return [x.rstrip() for x in f]
else:
voc = set()
for fn in [self.get_train, self.get_dev, self.get_test]:
for question in fn():
voc.update(x.lower() for x in question.question)
for para in (question.distractors + [question.paragraph]):
voc.update(x.lower() for x in para.par_text)
voc_list = sorted(list(voc))
with open(voc_file, "w") as f:
for word in voc_list:
f.write(word)
f.write("\n")
return voc_list
def get_pruned_word_vecs(self, word_vec_name, voc=None):
"""
Loads word vectors that have been pruned to the case-insensitive vocab of this corpus.
WARNING: this includes dev words
This exists since loading word-vecs each time we startup can be a big pain, so
we cache the pruned vecs on-disk as a .npy file we can re-load quickly.
"""
vec_file = join(self.dir, word_vec_name + self.WORD_VEC_SUFFIX + ".npy")
if isfile(vec_file):
print("Loading word vec %s for %s from cache" % (word_vec_name, self.name))
with open(vec_file, "rb") as f:
return pickle.load(f)
else:
print("Building pruned word vec %s for %s" % (self.name, word_vec_name))
voc = self.get_vocab()
vecs = load_word_vectors(word_vec_name, voc)
with open(vec_file, "wb") as f:
pickle.dump(vecs, f)
return vecs
def get_resource_loader(self):
return ResourceLoader(self.get_pruned_word_vecs)
def _load_document_dict(self, train: bool):
if train:
if self.train_title_to_document is None:
self.train_title_to_document = self._load(join(self.dir, self.TRAIN_DOC_FILE))
else:
if self.dev_title_to_document is None:
self.dev_title_to_document = self._load(join(self.dir, self.DEV_DOC_FILE))
def _insert_text_to_paragraph(self, paragraph: SquadParagraph, train: bool):
title_to_doc = self.train_title_to_document if train else self.dev_title_to_document
paragraph.par_text = title_to_doc[paragraph.doc_title].get_par(paragraph.par_id).par_text
paragraph.pickle_text = True # So that there will be no problems later
def _insert_text_to_question(self, question: SquadQuestionWithDistractors, train: bool):
for par in [question.paragraph] + question.distractors:
self._insert_text_to_paragraph(par, train)
def _populate_questions(self, questions: List[SquadQuestionWithDistractors], train: bool):
self._load_document_dict(train)
for q in questions:
self._insert_text_to_question(q, train)
def get_train(self) -> List[SquadQuestionWithDistractors]:
questions = self._load(join(self.dir, self.TRAIN_FILE))
self._populate_questions(questions, train=True)
return questions
def get_dev(self) -> List[SquadQuestionWithDistractors]:
questions = self._load(join(self.dir, self.DEV_FILE))
self._populate_questions(questions, train=False)
return questions
def get_test(self) -> List[SquadQuestionWithDistractors]:
return []
def _load(self, file):
if not exists(file):
return []
with open(file, "rb") as f:
return pickle.load(f)
def __getstate__(self):
state = self.__dict__.copy()
state['train_title_to_document'] = None
state['dev_title_to_document'] = None
return state
def __setstate__(self, state):
self.__dict__ = state
| 2.421875 | 2 |
src/saturnv_ui/saturnv/ui/presenters/__init__.py | epkaz93/saturnv | 1 | 12799398 | from .basepresenter import BasePresenter, PresenterWidgetMixin
from .mainpresenter import MainPresenter
| 1.03125 | 1 |
image_processing/src/image_processing/image_processing.py | Fricodelco/image_matching | 0 | 12799399 | <reponame>Fricodelco/image_matching
#!/usr/bin/env python3
from cv2 import resize
from numpy import reshape
import rospy
import cv2
import numpy as np
from PIL import Image
import rospkg
import gdal
from dataclasses import dataclass
from geodetic_conv import GeodeticConvert
from decimal import Decimal
import os
from time import time
@dataclass
class img_point:
pixel_y: int = 0
pixel_x: int = 0
lat: float = 0.0
lon: float = 0.0
class image_processing():
def __init__(self, filename = None, img = None):
self.cuda = self.is_cuda_cv()
self.main_points = []
self.g_c = GeodeticConvert()
self.img = None
self.pixel_size = 0
self.kp = None
self.dp = None
self.cadr_scale = 0.0
time_start = time()
if filename is not None:
home = os.getenv("HOME")
data_path = home+'/copa5/map'
file_exists = os.path.exists(data_path+'/'+filename+'.tif')
# print("start job")
try:
if file_exists is True:
# raster = gdal.Open(data_path+'/'+filename+'.tif')
self.img = cv2.imread(data_path+'/'+filename+'.tif')
else:
# raster = gdal.Open(data_path+'/'+filename+'.TIF')
self.img = cv2.imread(data_path+'/'+filename+'.TIF')
except:
# print("NO MAP FILE")
return None
# print("map loaded", time() - time_start)
time_start = time()
# self.img = raster.ReadAsArray()
# self.img = np.dstack((self.img[0],self.img[1],self.img[2]))
# self.img = self.img[0]
self.img = cv2.cvtColor(self.img, cv2.COLOR_RGB2GRAY)
# print("to gray complete", time() - time_start)
time_start = time()
with open(data_path+'/'+filename+'.@@@') as f:
lines = f.readlines()
for i in range(2, len(lines)):
sub_str = lines[i].split(' ')
sub_str = [j for j in sub_str if j]
try:
sub_str.remove('\n')
except:
e = 0
sub_str = [float(k) for k in sub_str]
point = img_point(sub_str[0], sub_str[1],
sub_str[2], sub_str[3])
self.main_points.append(point)
else:
self.img = img
# self.img = self.img[:,:,2]
def find_pixel_size(self):
self.g_c.initialiseReference(self.main_points[0].lat, self.main_points[0].lon, 0)
x_1, y_1, z_1 = self.g_c.geodetic2Ned(self.main_points[1].lat, self.main_points[1].lon, 0)
x_2, y_2, z_2 = self.g_c.geodetic2Ned(self.main_points[3].lat, self.main_points[3].lon, 0)
if abs(x_1) > abs(x_2):
x = x_1
else:
x = x_2
if abs(y_1) > abs(y_2):
y = y_1
else:
y = y_2
pixel_size_1 = Decimal((abs(x)))/Decimal(self.img.shape[0])
pixel_size_2 = Decimal((abs(y)))/Decimal(self.img.shape[1])
pixel_size = (Decimal(pixel_size_1) + Decimal(pixel_size_2))/Decimal(2)
self.pixel_size = pixel_size
return pixel_size
def find_pixel_size_by_height(self, height, poi):
x = Decimal(np.tanh(poi/2)*2*height)
self.pixel_size = x/Decimal(self.img.shape[1])
def is_cuda_cv(self):
try:
count = cv2.cuda.getCudaEnabledDeviceCount()
if count > 0:
# print("CUDA IS ENABLED")
return True
else:
# print("CUDA IS DISABLED")
return False
except:
# print("CUDA IS DISABLED")
return False
def find_kp_dp_scale(self, match_finder):
self.img, self.cadr_scale, self.pixel_size = match_finder.rescale_cadr(self.img, self.pixel_size)
self.kp, self.dp, self.img = match_finder.find_kp_dp(self.img)
# def main():
# map_ = image_processing(filename = '26_12_2021_nn')
# map_.find_pixel_size()
# if __name__ == '__main__':
# main()
| 2.390625 | 2 |
mass_flask_webui/context_processors.py | mass-project/mass_server | 8 | 12799400 | from flask import current_app, render_template, url_for
from markupsafe import Markup
from mass_flask_core.models import FileSample, IPSample, DomainSample, URISample, ExecutableBinarySample, UserLevel
from mass_flask_webui.config import webui_blueprint
@webui_blueprint.context_processor
def sample_processors():
def sample_icon(sample):
if isinstance(sample, FileSample):
return Markup('<i class="fa fa-file"></i>')
elif isinstance(sample, IPSample):
return Markup('<i class="fa fa-desktop"></i>')
elif isinstance(sample, DomainSample):
return Markup('<i class="fa fa-globe"></i>')
elif isinstance(sample, URISample):
return Markup('<i class="fa fa-at"></i>')
else:
return Markup('<i class="fa fa-question"></i>')
def is_file_sample(sample):
return isinstance(sample, FileSample)
def is_executable_binary_sample(sample):
return isinstance(sample, ExecutableBinarySample)
def tag_search_link(tag):
kwargs = {
'common-tags': tag,
'submit': 'Submit'
}
return url_for('.sample_search', **kwargs)
return dict(
sample_icon=sample_icon,
is_file_sample=is_file_sample,
is_executable_binary_sample=is_executable_binary_sample,
tag_search_link=tag_search_link
)
@webui_blueprint.context_processor
def user_processors():
def user_level(user):
if user.user_level == UserLevel.USER_LEVEL_ADMIN:
return 'Administrator'
elif user.user_level == UserLevel.USER_LEVEL_MANAGER:
return 'Manager'
elif user.user_level == UserLevel.USER_LEVEL_PRIVILEGED:
return 'Privileged user'
elif user.user_level == UserLevel.USER_LEVEL_USER:
return 'Normal user'
elif user.user_level == UserLevel.USER_LEVEL_ANONYMOUS:
return 'Guest user'
else:
return 'Unknown user level'
return dict(
user_level=user_level
)
@webui_blueprint.context_processor
def generic_processors():
def mass_version():
return current_app.version
def pagination(paginator):
return Markup(render_template('pagination.html', paginator=paginator))
return dict(
mass_version=mass_version,
pagination=pagination
)
| 2.234375 | 2 |
tests/test_cases.py | cariad/differently | 0 | 12799401 | <filename>tests/test_cases.py
from os import scandir
from pathlib import Path
from differently import JsonDifferently
def test() -> None:
for directory in scandir(Path() / "tests" / "cases"):
a = JsonDifferently.load(Path(directory) / "a.json")
b = JsonDifferently.load(Path(directory) / "b.json")
actual_json = str(JsonDifferently(a, b, color=True))
with open(Path(directory) / "actual-json.txt", "w") as f:
f.write(actual_json)
with open(Path(directory) / "expect-json.txt", "r") as f:
expect = f.read()
if actual_json != expect:
print("ACTUAL:")
print(actual_json)
print()
print("EXPECTED:")
print(expect)
assert False
| 2.765625 | 3 |
ad_fcemu/nes_cpu_test.py | sisyphus1993/fc-emulator | 0 | 12799402 | <filename>ad_fcemu/nes_cpu_test.py
# -*- coding: utf-8 -*-
import log_differ as ld
import nes_cpu as nc
import nes_file_test as nft
def test_load_nes():
nes = nft.prepared_nes()
mem = nc.Memory(None, None)
mem.load_nes(nes)
if len(nes.prg_rom) == 32 * 1024:
expected = nes.prg_rom
else:
expected = nes.prg_rom * 2
result = mem.prg_rom
assert expected == result, result
def test_status():
for i in range(255):
s = nc._Status(i)
# 第 5 位始终为 1
assert s._ignore == 1
assert s.value == (i | 0b00100000), s.value
def test_status2():
s = nc._Status(0x24)
expected = dict(
carry=0,
zero=0,
interrupt=1,
decimal=0,
overflow=0,
negative=0,
)
for k, v in expected.items():
r = getattr(s, k)
assert r == v, r
def address_for_log_info(addr):
if addr is None:
return -1
else:
return addr
def test_by_log_differ():
differ = ld.LogDiffer.from_json('misc/nestest_log.json')
nes = nft.prepared_nes()
mem = nc.Memory(None, None)
mem.load_nes(nes)
cpu = nc.NesCPU(mem)
# nestest.nes 所需的特殊初始化
cpu.pc = 0xc000
cpu.status = nc._Status(0x24)
while True:
info = cpu.dump_registers()
op, addr, mode = cpu._prepare()
info['op'] = op
info['address'] = address_for_log_info(addr)
try:
differ.diff(info)
except ld.AllTestsPassed:
break
cpu._execute(op, addr, mode)
def test_push_pop1():
mem = nc.Memory(None, None)
cpu = nc.NesCPU(mem)
cpu.push(1)
expected = 1
result = cpu.pop()
assert expected == result, result
def test_push_pop2():
mem = nc.Memory(None, None)
cpu = nc.NesCPU(mem)
cpu.push(1)
cpu.push(2)
cpu.push(3)
cpu.push(4)
expected = [4, 3, 2, 1]
result = [cpu.pop() for _ in range(4)]
assert expected == result, result
def test_push():
""" addr = s + 0x0100 """
mem = nc.Memory(None, None)
cpu = nc.NesCPU(mem)
sp = cpu.sp
cpu.push(1)
expected = 1
addr = sp + 0x0100
result = cpu.memory[addr]
assert expected == result, result
def _test_ppu():
nes = nft.prepared_nes()
cpu = nc.NesCPU()
cpu.load_nes(nes)
cpu.interrupt('reset')
for _ in range(20000):
cpu.execute()
expected = [
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 45, 45, 32, 82, 117, 110, 32, 97, 108, 108, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 66, 114, 97, 110, 99, 104, 32, 116, 101, 115, 116, 115,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 70, 108, 97, 103, 32, 116,
101, 115, 116, 115, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 45, 45, 32,
73, 109, 109, 101, 100, 105, 97, 116, 101, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 32, 32, 32, 45, 45, 32, 73, 109, 112, 108, 105, 101, 100, 32, 116, 101, 115, 116, 115, 32, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 83, 116, 97, 99, 107, 32, 116, 101, 115,
116, 115, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 65, 99, 99,
117, 109, 117, 108, 97, 116, 111, 114, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 45, 45, 32, 40, 73, 110, 100, 105, 114, 101, 99, 116, 44, 88, 41, 32, 116, 101, 115, 116, 115, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 90, 101, 114, 111, 112, 97, 103, 101, 32, 116, 101, 115,
116, 115, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 65, 98, 115, 111, 108, 117,
116, 101, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 45, 45,
32, 40, 73, 110, 100, 105, 114, 101, 99, 116, 41, 44, 89, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32, 32,
32, 32, 32, 32, 32, 32, 45, 45, 32, 65, 98, 115, 111, 108, 117, 116, 101, 44, 89, 32, 116, 101, 115, 116,
115, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 90, 101, 114, 111, 112, 97, 103, 101,
44, 88, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 45, 45, 32, 65,
98, 115, 111, 108, 117, 116, 101, 44, 88, 32, 116, 101, 115, 116, 115, 32, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 85, 112, 47, 68, 111, 119, 110, 58, 32, 115, 101,
108, 101, 99, 116, 32, 116, 101, 115, 116, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 83, 116,
97, 114, 116, 58, 32, 114, 117, 110, 32, 116, 101, 115, 116, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 32, 32, 83, 101, 108, 101, 99, 116, 58, 32, 73, 110, 118, 97, 108, 105, 100, 32, 111, 112, 115, 33,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
]
result = cpu.ppu.memory[0x2000:0x2400]
assert expected == result, result
| 2.328125 | 2 |
04-FaceRecognition-II/thetensorclan-backend-heroku/models/__init__.py | amitkml/TSAI-DeepVision-EVA4.0-Phase-2 | 1 | 12799403 | from .utils import get_classifier, MODEL_REGISTER | 1.0625 | 1 |
Code_Challenges/bubble_sort.py | fuse999/Python_Sandbox | 0 | 12799404 | import math
import os
import random
import re
import sys
a = [6, 4, 1]
# Complete the countSwaps function below.
def countSwaps(a):
n = len(a)
swapcount = 0
for i in range(n):
for j in range(0, n-i-1):
if a[j] > a[j+1]:
swapcount += 1
a[j], a[j+1] = a[j+1], a[j]
print(f"Array is sorted in {swapcount} swaps.")
print(f"First Element: {a[0]}")
print(f"Last Element: {a[-1]}")
countSwaps(a)
| 3.484375 | 3 |
pets/models.py | guipeeix7/website | 6 | 12799405 | from users.models import User
from django.db import models
PET_SIZES = [('P', 'Pequeno'), ('M', 'Médio'), ('G', 'Grande')]
PET_SEX = [('M', 'Macho'), ('F', 'Fêmea')]
# PET TYPE
GATO = 'Gato'
CACHORRO = 'Cachorro'
PASSARO = 'Pássaro'
ROEDOR = 'Roedor'
OUTRO = 'Outro'
# DEFAULT
DE00 = 'Sem raça definida'
DE01 = 'Outra'
# CAT BREED
CB00 = 'Abssínios'
CB01 = 'Alemão de pelo comprido'
CB02 = 'American Curl'
CB03 = 'American Shorthair'
CB04 = 'American Wirehair'
CB05 = 'Azul Russo'
CB06 = 'Balineses'
CB07 = 'Bengalês'
CB08 = 'Bobtail'
CB09 = 'Bobtail Japonês'
CB10 = 'Bombay'
CB11 = 'British Shorthair'
CB12 = 'Burmês'
CB13 = 'Burmilla'
CB14 = 'Califórinia Spangled'
CB15 = 'Chartreux'
CB16 = 'Cornish Rex'
CB17 = 'Cymric'
CB18 = 'Devon Rex'
CB19 = 'Exóticos'
CB20 = 'Foldex'
CB21 = 'German Rex'
CB22 = 'Habana'
CB23 = 'High Land Fold'
CB24 = 'Himalaios'
CB25 = 'Javaneses'
CB26 = 'Khao Manee'
CB27 = 'Korat'
CB28 = 'Maine Coon'
CB29 = 'Manx'
CB30 = '<NAME>'
CB31 = '<NAME>'
CB32 = 'Ragdoll'
CB33 = '<NAME>'
CB34 = 'Ragamuffin'
CB35 = 'Ragdoll'
# DOG BREED
DB00 = 'Akita'
DB01 = 'Basset hound'
DB02 = 'Beagle'
DB03 = 'Boiadeiro australiano'
DB04 = 'Border collie'
DB05 = 'Boston terrier'
DB06 = 'Boxer'
DB07 = 'Buldogue'
DB08 = 'Bull terrier'
DB09 = 'Chihuahua'
DB10 = 'Chow chow'
DB11 = 'Dálmata'
DB12 = 'Doberman'
DB13 = 'Dogo argentino'
DB14 = 'Dogue alemão'
DB15 = 'Fila brasileiro'
DB16 = 'Golden retriever'
DB17 = 'Husky siberiano'
DB18 = '<NAME>'
DB19 = 'Labrador'
DB20 = 'Lhasa apso'
DB21 = 'Lulu da pomerânia'
DB22 = 'Maltês'
DB23 = 'Pastor alemão'
DB24 = 'Pastor australianoPastor de Shetland'
DB25 = 'Pequinês'
DB26 = 'Pinscher'
DB27 = 'Pit bull'
DB28 = 'Poodle'
DB29 = 'Pug'
DB30 = 'Rottweiler'
DB31 = 'Shar-pei'
DB32 = 'Shiba'
DB33 = 'Shih tzu'
DB34 = 'Weimaraner'
DB35 = 'Yorkshire'
# BIRD BREED
BB00 = 'Agapornis'
BB01 = 'Araponga'
BB02 = 'Arara'
BB03 = 'Azulão'
BB04 = 'Bavete'
BB05 = 'Bicudo'
BB06 = 'Cabloquinho'
BB07 = 'Cacatua'
BB08 = 'Calafete'
BB09 = 'Calopsita'
BB10 = 'Canário'
BB11 = 'Cardeal'
BB12 = 'Coleiro'
BB13 = 'Cordonbleu'
BB14 = 'Coruja'
BB15 = 'Curió'
BB16 = 'Diamante Mandarin'
BB17 = 'Dominó'
BB18 = 'Explêndido'
BB19 = 'Granatina'
BB20 = 'Jandaia'
BB21 = 'Lóris'
BB22 = 'Mainá'
BB23 = 'Modesto'
BB24 = 'Papagaio'
BB25 = 'Pássaro Preto'
BB26 = 'Patativa'
BB27 = 'Perequito Autraliano'
BB28 = 'Pica-pau'
BB29 = 'Pintassilgo'
BB30 = 'Pombo'
BB31 = 'Rolinha'
BB32 = 'Rouxinol'
BB33 = 'S<NAME>'
BB34 = 'Tangará'
BB35 = 'Tico-tico'
BB36 = 'Tucano'
# RODENT BREED
RB00 = 'Camundongo'
RB01 = 'Chinchila'
RB02 = 'Gerbil - Esquilo da MOngólia'
RB03 = 'Hamster Anão Russo'
RB04 = 'Hamster Sírio'
RB05 = 'Mecol - Twister'
RB06 = 'Porquinho da índia'
RB07 = 'Topolino'
TYPE_CHOICES = [(GATO, GATO), (CACHORRO, CACHORRO), (PASSARO, PASSARO), (ROEDOR, ROEDOR), (OUTRO, OUTRO),]
BREED_CHOICES = [
(DE00, DE00), (DE01, DE01),
(CB00, CB00), (CB01, CB01), (CB02, CB02), (CB03, CB03), (CB04, CB04), (CB05, CB05),
(CB06, CB06), (CB07, CB07), (CB08, CB08), (CB09, CB09), (CB10, CB10), (CB11, CB11),
(CB12, CB12), (CB13, CB13), (CB14, CB14), (CB15, CB15), (CB16, CB16), (CB17, CB17),
(CB18, CB18), (CB19, CB19), (CB20, CB20), (CB21, CB21), (CB22, CB22), (CB23, CB23),
(CB24, CB24), (CB25, CB25), (CB26, CB26), (CB27, CB27), (CB28, CB28), (CB29, CB29),
(CB30, CB30), (CB31, CB31), (CB32, CB32), (CB33, CB33), (CB34, CB34), (CB35, CB35),
(DB00, DB00), (DB01, DB01), (DB02, DB02), (DB03, DB03), (DB04, DB04), (DB05, DB05),
(DB06, DB06), (DB07, DB07), (DB08, DB08), (DB09, DB09), (DB10, DB10), (DB11, DB11),
(DB12, DB12), (DB13, DB13), (DB14, DB14), (DB15, DB15), (DB16, DB16), (DB17, DB17),
(DB18, DB18), (DB19, DB19), (DB20, DB20), (DB21, DB21), (DB22, DB22), (DB23, DB23),
(DB24, DB24), (DB25, DB25), (DB26, DB26), (DB27, DB27), (DB28, DB28), (DB29, DB29),
(DB30, DB30), (DB31, DB31), (DB32, DB32), (DB33, DB33), (DB34, DB34), (DB35, DB35),
(BB00, BB00), (BB01, BB01), (BB02, BB02), (BB03, BB03), (BB04, BB04), (BB05, BB05),
(BB06, BB06), (BB07, BB07), (BB08, BB08), (BB09, BB09), (BB10, BB10), (BB11, BB11),
(BB12, BB12), (BB13, BB13), (BB14, BB14), (BB15, BB15), (BB16, BB16), (BB17, BB17),
(BB18, BB18), (BB19, BB19), (BB20, BB20), (BB21, BB21), (BB22, BB22), (BB23, BB23),
(BB24, BB24), (BB25, BB25), (BB26, BB26), (BB27, BB27), (BB28, BB28), (BB29, BB29),
(BB30, BB30), (BB31, BB31), (BB32, BB32), (BB33, BB33), (BB34, BB34), (BB35, BB35),
(RB00, RB00), (RB01, RB01), (RB02, RB02), (RB03, RB03), (RB04, RB04), (RB05, RB05),
(RB06, RB06), (RB07, RB07),
]
def get_cat_breeds():
catBreeds = [
DE00, DE01,
CB00, CB01, CB02, CB03, CB04, CB05, CB06, CB07, CB08, CB09, CB10, CB11,
CB12, CB13, CB14, CB15, CB16, CB17, CB18, CB19, CB20, CB21, CB22, CB23,
CB24, CB25, CB26, CB27, CB28, CB29, CB30, CB31, CB32, CB33, CB34, CB35,
]
return catBreeds
def get_dog_breeds():
dogBreeds = [
DE00, DE01,
DB00, DB01, DB02, DB03, DB04, DB05, DB06, DB07, DB08, DB09, DB10, DB11,
DB12, DB13, DB14, DB15, DB16, DB17, DB18, DB19, DB20, DB21, DB22, DB23,
DB24, DB25, DB26, DB27, DB28, DB29, DB30, DB31, DB32, DB33, DB34, DB35,
]
return dogBreeds
def get_bird_breeds():
birdBreeds = [
DE00, DE01,
BB00, BB01, BB02, BB03, BB04, BB05, BB06, BB07, BB08, BB09, BB10, BB11,
BB12, BB13, BB14, BB15, BB16, BB17, BB18, BB19, BB20, BB21, BB22, BB23,
BB24, BB25, BB26, BB27, BB28, BB29, BB30, BB31, BB32, BB33, BB34, BB35,
]
return birdBreeds
def get_rodent_breeds():
rodentBreeds = [
DE00, DE01,
RB00, RB01, RB02, RB03, RB04, RB05, RB06, RB07,
]
return rodentBreeds
def get_other_breeds():
otherBreed = [DE01,]
return otherBreed
class Pet(models.Model):
user = models.ForeignKey(User, default=None, on_delete=models.CASCADE)
image = models.ImageField(upload_to='pet_image', blank=False, null=False)
name = models.CharField(max_length=30, blank=False, null=False)
description = models.CharField(max_length=500, blank=False, null=False)
age = models.PositiveSmallIntegerField(null=True)
size = models.CharField(max_length=1, choices=PET_SIZES, blank=False, null=False)
sex = models.CharField(max_length=1, choices=PET_SEX, blank=False, null=False)
vaccinated = models.BooleanField(default=False)
castrated = models.BooleanField(default=False)
dewormed = models.BooleanField(default=False)
vulnerable = models.BooleanField(default=False)
isAdopted = models.BooleanField(default=False)
pet_type = models.CharField(max_length=50, choices=TYPE_CHOICES)
breed = models.CharField(max_length=50, choices=BREED_CHOICES)
| 1.914063 | 2 |
2018/03/intact.py | jhnesk/advent-of-code | 0 | 12799406 | <filename>2018/03/intact.py
#!/usr/bin/env python3.7
import sys
lines = [claim.rstrip('\n') for claim in open(sys.argv[1])]
canvas = [[0 for x in range(1000)] for y in range(1000)]
for claim in lines:
x = int(claim.split(' ')[2].split(',')[0])
y = int(claim.split(' ')[2].split(',')[1][:-1])
a = int(claim.split(' ')[3].split('x')[0])
b = int(claim.split(' ')[3].split('x')[1])
for i in range(x, x + a):
for j in range(y, y + b):
canvas[i][j] += 1
def checkIntact(canvas, claim):
x = int(claim.split(' ')[2].split(',')[0])
y = int(claim.split(' ')[2].split(',')[1][:-1])
a = int(claim.split(' ')[3].split('x')[0])
b = int(claim.split(' ')[3].split('x')[1])
for i in range(x, x + a):
for j in range(y, y + b):
if canvas[i][j] > 1:
return False
# If we get here the claim is intact!
return True
for claim in lines:
if checkIntact(canvas, claim):
print(claim.split(' ')[0][1:])
break
| 3.03125 | 3 |
ripiu/djangocms_aoxomoxoa/admin/options/navigation.py | ripiu/djangocms_aoxomoxoa | 0 | 12799407 | <reponame>ripiu/djangocms_aoxomoxoa
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
class TilesGridNavigationUniteOptionsAdmin(admin.ModelAdmin):
'''
Tiles - Grid
'''
fieldsets = (
(_('Navigation options'), {
'classes': ('collapse',),
'fields': (
'grid_num_rows',
'theme_navigation_type',
'theme_bullets_margin_top',
'theme_bullets_color',
'bullets_space_between',
'theme_arrows_margin_top',
'theme_space_between_arrows',
'theme_navigation_align',
'theme_navigation_offset_hor',
)
}),
)
class CarouselNavigationUniteOptionsAdmin(admin.ModelAdmin):
'''
Carousel
'''
fieldsets = (
(_('Navigation options'), {
'classes': ('collapse',),
'fields': (
'theme_enable_navigation',
'theme_navigation_position',
'theme_navigation_enable_play',
'theme_navigation_align',
'theme_navigation_offset_hor',
'theme_navigation_margin',
'theme_space_between_arrows',
)
}),
)
| 1.75 | 2 |
engine/modularity.py | tchimih/NSD_project | 1 | 12799408 | <reponame>tchimih/NSD_project
import networkx as nx
# All rights are reserved to the authors ... I only used a span of his code :)
__author__ = """<NAME> (<EMAIL>)"""
def modularity(partition, graph, weight='weight'):
"""Compute the modularity of a partition of a graph
Parameters
----------
partition : dict
the partition of the nodes, i.e a dictionary where keys are their nodes
and values the communities
graph : networkx.Graph
the networkx graph which is decomposed
weight : str, optional
the key in graph to use as weight. Default to 'weight'
Returns
-------
modularity : float
The modularity
Raises
------
KeyError
If the partition is not a partition of all graph nodes
ValueError
If the graph has no link
TypeError
If graph is not a networkx.Graph
References
----------
.. 1. Newman, M.E.J. & <NAME>. Finding and evaluating community
structure in networks. Physical Review E 69, 26113(2004).
Examples
--------
>>> G=nx.erdos_renyi_graph(100, 0.01)
>>> part = best_partition(G)
>>> modularity(part, G)
if type(graph) != nx.Graph:
raise TypeError("Bad graph type, use only non directed graph")
"""
inc = dict([])
deg = dict([])
links = graph.size(weight=weight)
if links == 0:
raise ValueError("A graph without link has an undefined modularity")
for node in graph:
com = partition[node]
deg[com] = deg.get(com, 0.) + graph.degree(node, weight=weight)
for neighbor, datas in graph[node].items():
edge_weight = datas.get(weight, 1)
if partition[neighbor] == com:
if neighbor == node:
inc[com] = inc.get(com, 0.) + float(edge_weight)
else:
inc[com] = inc.get(com, 0.) + float(edge_weight) / 2.
res = 0.
for com in set(partition.values()):
res += inc.get(com, 0.) - \
((deg.get(com, 0.) ** 2) / (4. * links))
return (1.0 / links) * res
| 2.890625 | 3 |
ea_sim/utils.py | lis-epfl/Tensoft-G21 | 1 | 12799409 | <filename>ea_sim/utils.py<gh_stars>1-10
import os
import json
import pickle
import sqlite3
import numpy as np
import matplotlib
# select matplotlib backend
matplotlib.use('pdf')
import matplotlib.pyplot as plt
from params_conf import N_MODULES, STIFF_TABLE
# ================================== #
# Utils functions #
# ================================== #
def print_dict(d, level=0, list_on_levels=False):
for k, v in sorted(d.items()):
if isinstance(v, dict):
print('\t'*level + k+':')
print_dict(v, level+1, list_on_levels)
elif isinstance(v, list) and list_on_levels and k == 'modules_conf':
print('\t' * level + '{}: ['.format(k))
for element in v:
print('\t' * (level+1) + '{}'.format(element))
print('\t' * level + ']')
else:
print('\t'*level + '{}: {}'.format(k, v))
def print_header(config, bar_num=50):
print('# ' + '='*bar_num + ' #')
print('\t\tEVOLUTIONARY SIMULATION\n\t' + ' '*7 + 'OF TENSEGRITY SOFT ROBOTS')
print('# ' + '='*bar_num + ' #\n')
print('# ' + '='*bar_num + ' #')
print('\t' + ' '*7 + 'EXPERIMENT CONFIGURATION')
print('# ' + '='*bar_num + ' #')
print_dict(config)
print('# ' + '='*bar_num + ' #')
def print_header_contr_evo(config, bar_num=50):
print('# ' + '=' * bar_num + ' #')
print('\t\tEVOLUTIONARY SIMULATION\n\t' +
' OF TENSEGRITY SOFT ROBOTS CONTROLLERS \n\t' +
('FOR GOAL REACHING AFTER SQUEEZING TASK' if 'SGR' in config['simulation_path']
else ' ' * 8 + 'FOR GOAL REACHING TASK')
)
print('# ' + '=' * bar_num + ' #\n')
print('# ' + '=' * bar_num + ' #')
print('\t' + ' ' * 7 + 'EXPERIMENT CONFIGURATION')
print('# ' + '=' * bar_num + ' #')
print_dict(config, list_on_levels=True)
print('# ' + '=' * bar_num + ' #')
def print_header_coev(config, bar_num=55):
print('# ' + '=' * bar_num + ' #')
print('\t\tCO-EVOLUTIONARY SIMULATION\n\t' +
' ' * 7 + 'OF MORPHOLOGY AND CONTROLLER\n\t' +
' ' * 8 + 'OF TENSEGRITY SOFT ROBOTS\n\t' +
(' ' * 2 + 'FOR GOAL REACHING AFTER SQUEEZING TASK' if 'SGR' in config['simulation_path']
else ' ' * 10 + 'FOR GOAL REACHING TASK')
)
print('# ' + '=' * bar_num + ' #\n')
print('# ' + '=' * bar_num + ' #')
print('\t' + ' ' * 8 + 'EXPERIMENT CONFIGURATION')
print('# ' + '=' * bar_num + ' #')
print_dict(config, list_on_levels=False)
print('# ' + '=' * bar_num + ' #')
def print_header_double_map(config, bar_num=55):
print('# ' + '=' * bar_num + ' #')
print('\t\t EVOLUTIONARY SIMULATION\n\t' +
' ' * 7 + 'OF MORPHOLOGY AND CONTROLLER\n\t' +
' ' * 8 + 'OF TENSEGRITY SOFT ROBOTS\n\t' +
(' ' * 2 + 'FOR GOAL REACHING AFTER SQUEEZING TASK' if 'SGR' in config['simulation_path']
else ' ' * 10 + 'FOR GOAL REACHING TASK') + '\n\t' +
' ' * 7 + '(DOUBLE MAP-ELITES VARIANT)'
)
print('# ' + '=' * bar_num + ' #\n')
print('# ' + '=' * bar_num + ' #')
print('\t' + ' ' * 8 + 'EXPERIMENT CONFIGURATION')
print('# ' + '=' * bar_num + ' #')
print_dict(config, list_on_levels=False)
print('# ' + '=' * bar_num + ' #')
def print_header_single_map(config, bar_num=55):
print('# ' + '=' * bar_num + ' #')
print('\t\t EVOLUTIONARY SIMULATION\n\t' +
' ' * 7 + 'OF MORPHOLOGY AND CONTROLLER\n\t' +
' ' * 8 + 'OF TENSEGRITY SOFT ROBOTS\n\t' +
(' ' * 2 + 'FOR GOAL REACHING AFTER SQUEEZING TASK' if 'SGR' in config['simulation_path']
else ' ' * 10 + 'FOR GOAL REACHING TASK') + '\n\t' +
' ' * 7 + '(SINGLE MAP-ELITES VARIANT)'
)
print('# ' + '=' * bar_num + ' #\n')
print('# ' + '=' * bar_num + ' #')
print('\t' + ' ' * 8 + 'EXPERIMENT CONFIGURATION')
print('# ' + '=' * bar_num + ' #')
print_dict(config, list_on_levels=False)
print('# ' + '=' * bar_num + ' #')
def store_checkpoint(checkpoint, filename):
with open(filename, 'wb') as cp_file:
pickle.dump(checkpoint, cp_file)
def record_info(logbook, stats, gen, pop, inv_ind):
if stats is not None:
record = stats.compile(pop) if stats is not None else {}
logbook.record(gen=gen, nevals=len(inv_ind), **record)
def record_population(num_sims, population, file, skips, pbar=None, verbose=False, coev=False):
fitness_values = np.asarray(list(map(lambda i: i.fitness.values, population)))
if coev:
fitness_values = [1.0 / f for f in fitness_values]
pop_stats = {
'num_sims': num_sims,
'avg_fitness': np.mean(fitness_values),
'std_dev': np.std(fitness_values),
'min': np.min(fitness_values),
'max': np.max(fitness_values)
}
# store the current population values
file.write(json.dumps({
**pop_stats,
'population': [individual.info(coev=coev) for individual in population]
}))
if verbose:
if pbar is not None:
pbar.set_postfix({
'avg': pop_stats['avg_fitness'],
'std': pop_stats['std_dev'],
'min': pop_stats['min'],
'max': pop_stats['max'],
'skip': skips
})
else:
print('num_sims: {} | Fitness -> avg: {} std: {} min: {} max: {}'.format(
pop_stats['num_sims'], pop_stats['avg_fitness'],
pop_stats['std_dev'], pop_stats['min'], pop_stats['max']
))
def evaluate_ind(toolbox, individuals, glob_history, local_history=None, coev=False, eval_all=False):
"""
:param toolbox:
:param individuals:
:param glob_history: a dictionary that maps simulator
input strings into corresponding computed fitness.
:param local_history:
:param coev:
:param eval_all:
:return:
"""
# consider only new individuals (offsprings)
invalid_ind = [ind for ind in individuals if (not ind.fitness.valid or (coev and eval_all))]
# select for evaluation only solutions
# that have not been already evaluated
skip = []
to_evaluate = []
for ind in invalid_ind:
ind_string = ind.string_input()
if not coev and ind_string in glob_history:
# assign fitness previously computed for the same configuration
ind.fitness.values = glob_history[ind_string]
# store also record into a local history to support MAP creation functionality
if local_history is not None:
local_history[ind_string] = glob_history[ind_string]
skip.append(ind)
else:
to_evaluate.append(ind)
n_evaluations = 0
if len(to_evaluate) > 0:
fitnesses = toolbox.map(toolbox.evaluate, to_evaluate)
n_evaluations = len(fitnesses)
for ind, fit in zip(to_evaluate, fitnesses):
ind.fitness.values = fit
ind_string = ind.string_input()
# update history records with latest fitness
glob_history[ind_string] = fit
# store also record into a local history to support MAP creation functionality
if local_history is not None:
local_history[ind_string] = fit
if not coev:
n_evaluations = len(individuals)
return to_evaluate + skip, len(skip), n_evaluations
def plot_population_stats(pop, results_folder, seed, num_sim, normalize=False):
""" Plot the fitness/num_modules/stiffness distribution across given robot population
:param pop: population of robot individuals
:param results_folder: folder where to store the plots
:param seed: simulation seed
:param num_sim: simulation id
:param normalize: whether to reports the plots in a normalized manner (default: False)
:return:
"""
# split robots interested properties into three lists
fits, n_mods, stiffs = list(zip(*[(ind.get_fitness(), ind.num_modules, ind.stiffness)
for ind in pop]))
# plot the properties distribution of last generation
configs = [
{
'data': fits,
'title': 'Fitness',
'h_range': (0, np.max(fits)),
'out_file': os.path.join(results_folder, 'fitness',
'fit_dist_sim_{}_{}.pdf'.format(seed, num_sim)),
'bins': max(len(fits)//4, 1),
'discrete_hist': False,
'norm': normalize
},
{
'data': n_mods,
'title': '# modules',
'h_range': (2, 11),
'out_file': os.path.join(results_folder, 'n_modules',
'num_mods_dist_sim_{}_{}.pdf'.format(seed, num_sim)),
'bins': N_MODULES,
'discrete_hist': True,
'norm': normalize
},
{
'data': stiffs,
'title': 'Stiffness',
'out_file': os.path.join(results_folder, 'stiffness',
'stiff_dist_sim_{}_{}.pdf'.format(seed, num_sim)),
'bins': STIFF_TABLE,
'discrete_hist': True,
'norm': normalize
}
]
for conf in configs:
plot_population_dist(conf)
def plot_population_dist(conf):
""" Plot the distribution of the values of given property across
:param conf:
:return:
"""
font = {'family': 'Source Sans Pro', 'size': 12, 'weight': 'light'}
matplotlib.rc('font', **font)
matplotlib.rcParams["axes.titlepad"] = 15
matplotlib.rcParams['figure.dpi'] = 300
# create potential needed directory where to store the graph
os.makedirs(os.path.dirname(conf['out_file']), exist_ok=True)
num_colors = len(conf['bins']) if isinstance(conf['bins'], list) else conf['bins']
colors = plt.cm.viridis(np.linspace(0, 1, num_colors))
fig = plt.figure(figsize=(12, 5))
ax = fig.gca()
if conf['discrete_hist']:
inds_vals = {str(sv): 0 for sv in conf['bins']}
for ind_stiff in conf['data']:
inds_vals[str(ind_stiff)] += 1
x, y = list(zip(*[(k, v) for k, v in sorted(inds_vals.items(), key=lambda r: float(r[0]))]))
if conf['norm']:
y = np.asarray(y)/np.sum(y)
ax.bar(x, y, color=colors)
else:
_, bins, patches = ax.hist(conf['data'], bins=conf['bins'], density=conf['norm'],
range=conf['h_range'])
ax.set_xticks(bins)
for i, (c, p) in enumerate(zip(bins, patches)):
plt.setp(p, 'facecolor', colors[i])
ax.set_title('{} distribution across last generation'.format(conf['title']),
fontweight='normal')
ax.set_xlabel(conf['title'])
ax.set_ylabel('# Individuals')
if conf['norm']:
ax.set_ylim(0, 1.1)
plt.savefig(conf['out_file'], bbox_inches='tight')
plt.close()
def parse_robot_string(rb_string):
robot = []
for module_str in rb_string.split('--')[:-1]:
params = module_str.split('-')
module = {
'order': int(params[0].strip()),
'connectedModules': int(params[1].strip()),
'connectedFaces': int(params[2].strip()),
'freq': float(params[3].strip()),
'amplitude': float(params[4].strip()),
'phase': float(params[5].strip()),
'rot': float(params[6].strip()),
'stiff': float(params[7].strip())
}
robot.append(module)
return robot
# ================================== #
# SIM HISTORY MANAGEMENT #
# ================================== #
def load_history(history_file):
""" Load simulation history from provided file.
In case no file is given, returns an empty history
:param history_file:
:return:
"""
history = {}
if history_file is not None and os.path.exists(history_file):
with open(history_file) as history_in:
# skip header
history_in.readline()
for line in history_in:
robot_string, fitness = line.strip().split(',')
history[robot_string.strip()] = (float(fitness),)
return history
def store_history(history, history_file):
""" Store simulation history in provided file
:param history:
:param history_file:
:return:
"""
with open(history_file, 'w') as out_file:
out_file.write('rob_string,fitness\n')
for rob_string, fit in history.items():
out_file.write('{},{}\n'.format(rob_string.strip(), fit[0]))
# NOTE: these functions below are currently not used.
def load_history_db(history_db):
""" Load simulation history from provided db.
In case no db is given, returns an empty history
:param history_db:
:return:
"""
history = {}
if history_db is not None:
conn = sqlite3.connect(history_db)
cursor = conn.cursor()
for robot_string, fitness in cursor.execute('SELECT * FROM history'):
history[robot_string] = (float(fitness),)
cursor.close()
conn.close()
return history
def store_history_db(history, history_db):
""" Store simulation history in provided db
:param history:
:param history_db:
:return:
"""
def history_gen():
for record in history.items():
yield record
to_init = not os.path.exists(history_db)
conn = sqlite3.connect(history_db)
cursor = conn.cursor()
# create the
if to_init:
cursor.execute('''CREATE TABLE history(robot_string VARCHAR PRIMARY KEY, fitness REAL NOT NULL)''')
cursor.executemany('''REPLACE INTO history(robot_string, fitness) VALUES (?)''', history_gen())
cursor.close()
conn.close() | 2.3125 | 2 |
Card.py | Ilphrin/TuxleTriad | 1 | 12799410 | # coding: utf-8
import pygame
import os
from functions import *
from color import *
from pygame.locals import *
from listOfCards import allCards, values
from About import About
from Text import *
from color import *
class Card(pygame.sprite.Sprite):
"""Manages the cards in the game"""
def __init__(self, number, owner):
super(pygame.sprite.Sprite).__init__(Card)
self.owner = owner
self.number = number
self.name = allCards[self.number]
self.image = None
#self.verso = carteVerso
self.About = About(self.name, self)
# We put the numbers of the card according to listeCartes.py
self.top = values[number][0]
self.right = values[number][1]
self.bottom = values[number][2]
self.left = values[number][3]
self.values = []
self.values.append(self.top)
self.values.append(self.right)
self.values.append(self.bottom)
self.values.append(self.left)
self.parseToInt()
# Which element
self.elementName = values[number][4]
# Offensive or defensive. Unused for now
self.type = values[number][5]
self.modifierValue = 0
self.inHand = 1
getCard(self)
self.rect = self.image.get_rect()
if self.elementName != None:
self.element, self.elementRect = loadElement(self.elementName)
self.elementRect.topright = self.rect.topright
self.elementRect.move_ip(-2, 2)
self.image.blit(self.element, self.elementRect)
def changeOwner(self):
getCard(self)
self.image.set_alpha()
def addModifier(self, value):
"""Add bonus or malus to the card and draw the bonus on the card"""
self.modifierValue = value
if value > 0:
value = "+" + str(value)
else:
value = str(value)
self.modifier = Text(value, "rimouski sb.ttf", white, 60)
self.modifierBack = Text(value, "rimouski sb.ttf", black, 60)
#self.modifier.rect.topleft = self.rect.topleft
self.modifier.rect.move_ip(35, 15)
self.modifierBack.rect.move_ip(38, 18)
self.image.blit(self.modifierBack.surface, self.modifierBack.rect)
self.image.blit(self.modifier.surface, self.modifier.rect)
for i in range(0, 4):
self.values[i] += self.modifierValue
def addCursor(self):
"""Add a colored border to the focused card"""
self.border, self.borderRect = loadImage("images/border.png")
def parseToInt(self):
for i in range(0, 4):
if (self.values[i] == 'A'):
self.values[i] = 10
else:
self.values[i] = int(self.values[i])
def __repr__(self):
return "<Card at %s >" % (self.rect)
| 3.453125 | 3 |
tests/unit/stream_alert_alert_processor/test_outputs.py | ashmere/streamalert | 1 | 12799411 | <filename>tests/unit/stream_alert_alert_processor/test_outputs.py<gh_stars>1-10
"""
Copyright 2017-present, Airbnb Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# pylint: disable=protected-access
from collections import Counter, OrderedDict
import json
import boto3
from mock import call, patch
from moto import mock_s3, mock_kms, mock_lambda
from nose.tools import (
assert_equal,
assert_is_none,
assert_is_not_none,
assert_set_equal
)
from stream_alert.alert_processor import outputs
from stream_alert.alert_processor.output_base import OutputProperty
from stream_alert_cli.helpers import create_lambda_function, put_mock_creds
from tests.unit.stream_alert_alert_processor import CONFIG, FUNCTION_NAME, KMS_ALIAS, REGION
from tests.unit.stream_alert_alert_processor.helpers import (
get_random_alert,
get_alert,
remove_temp_secrets
)
def test_existing_get_output_dispatcher():
"""Get output dispatcher - existing"""
service = 'aws-s3'
dispatcher = outputs.get_output_dispatcher(
service, REGION, FUNCTION_NAME, CONFIG)
assert_is_not_none(dispatcher)
def test_nonexistent_get_output_dispatcher():
"""Get output dispatcher - nonexistent"""
nonexistent_service = 'aws-s4'
dispatcher = outputs.get_output_dispatcher(nonexistent_service,
REGION,
FUNCTION_NAME,
CONFIG)
assert_is_none(dispatcher)
@patch('logging.Logger.error')
def test_get_output_dispatcher_logging(log_mock):
"""Get output dispatcher - log error"""
bad_service = 'bad-output'
outputs.get_output_dispatcher(bad_service, REGION, FUNCTION_NAME, CONFIG)
log_mock.assert_called_with(
'designated output service [%s] does not exist',
bad_service)
def test_user_defined_properties():
"""Get user defined properties"""
for output in outputs.STREAM_OUTPUTS.values():
props = output(REGION, FUNCTION_NAME, CONFIG).get_user_defined_properties()
# The user defined properties should at a minimum contain a descriptor
assert_is_not_none(props.get('descriptor'))
class TestPagerDutyOutput(object):
"""Test class for PagerDutyOutput"""
@classmethod
def setup_class(cls):
"""Setup the class before any methods"""
cls.__service = 'pagerduty'
cls.__descriptor = 'unit_test_pagerduty'
cls.__backup_method = None
cls.__dispatcher = outputs.get_output_dispatcher(cls.__service,
REGION,
FUNCTION_NAME,
CONFIG)
@classmethod
def teardown_class(cls):
"""Teardown the class after all methods"""
cls.__dispatcher = None
def test_get_default_properties(self):
"""Get Default Properties - PagerDuty"""
props = self.__dispatcher._get_default_properties()
assert_equal(len(props), 1)
assert_equal(props['url'],
'https://events.pagerduty.com/generic/2010-04-15/create_event.json')
def _setup_dispatch(self):
"""Helper for setting up PagerDutyOutput dispatch"""
remove_temp_secrets()
# Cache the _get_default_properties and set it to return None
self.__backup_method = self.__dispatcher._get_default_properties
self.__dispatcher._get_default_properties = lambda: None
output_name = self.__dispatcher.output_cred_name(self.__descriptor)
creds = {'url': 'http://pagerduty.foo.bar/create_event.json',
'service_key': 'mocked_service_key'}
put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS)
return get_alert()
def _teardown_dispatch(self):
"""Replace method with cached method"""
self.__dispatcher._get_default_properties = self.__backup_method
@patch('logging.Logger.info')
@patch('urllib2.urlopen')
@mock_s3
@mock_kms
def test_dispatch_success(self, url_mock, log_info_mock):
"""PagerDutyOutput dispatch success"""
alert = self._setup_dispatch()
url_mock.return_value.getcode.return_value = 200
self.__dispatcher.dispatch(descriptor=self.__descriptor,
rule_name='rule_name',
alert=alert)
self._teardown_dispatch()
log_info_mock.assert_called_with('Successfully sent alert to %s', self.__service)
@patch('logging.Logger.error')
@patch('urllib2.urlopen')
@mock_s3
@mock_kms
def test_dispatch_failure(self, url_mock, log_error_mock):
"""PagerDutyOutput dispatch failure"""
alert = self._setup_dispatch()
bad_message = '{"error": {"message": "failed", "errors": ["err1", "err2"]}}'
url_mock.return_value.read.return_value = bad_message
url_mock.return_value.getcode.return_value = 400
self.__dispatcher.dispatch(descriptor=self.__descriptor,
rule_name='rule_name',
alert=alert)
self._teardown_dispatch()
log_error_mock.assert_called_with('Failed to send alert to %s', self.__service)
@patch('logging.Logger.error')
@mock_s3
@mock_kms
def test_dispatch_bad_descriptor(self, log_error_mock):
"""PagerDutyOutput dispatch bad descriptor"""
alert = self._setup_dispatch()
self.__dispatcher.dispatch(descriptor='bad_descriptor',
rule_name='rule_name',
alert=alert)
self._teardown_dispatch()
log_error_mock.assert_called_with('Failed to send alert to %s', self.__service)
@mock_s3
@mock_kms
class TestPhantomOutput(object):
"""Test class for PhantomOutput"""
@classmethod
def setup_class(cls):
"""Setup the class before any methods"""
cls.__service = 'phantom'
cls.__descriptor = 'unit_test_phantom'
cls.__dispatcher = outputs.get_output_dispatcher(cls.__service,
REGION,
FUNCTION_NAME,
CONFIG)
@classmethod
def teardown_class(cls):
"""Teardown the class after all methods"""
cls.__dispatcher = None
def _setup_dispatch(self, url):
"""Helper for setting up PhantomOutput dispatch"""
remove_temp_secrets()
output_name = self.__dispatcher.output_cred_name(self.__descriptor)
creds = {'url': url,
'ph_auth_token': '<PASSWORD>'}
put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket, REGION, KMS_ALIAS)
return get_alert()
@patch('logging.Logger.info')
@patch('urllib2.urlopen')
def test_dispatch_existing_container(self, url_mock, log_mock):
"""PhantomOutput dispatch success, existing container"""
alert = self._setup_dispatch('phantom.foo.bar')
url_mock.return_value.getcode.return_value = 200
url_mock.return_value.read.side_effect = ['{"count": 1, "data": [{"id": 1948}]}']
self.__dispatcher.dispatch(descriptor=self.__descriptor,
rule_name='rule_name',
alert=alert)
log_mock.assert_called_with('Successfully sent alert to %s', self.__service)
@patch('logging.Logger.info')
@patch('urllib2.urlopen')
def test_dispatch_new_container(self, url_mock, log_mock):
"""PhantomOutput dispatch success, new container"""
alert = self._setup_dispatch('phantom.foo.bar')
url_mock.return_value.getcode.return_value = 200
url_mock.return_value.read.side_effect = ['{"count": 0, "data": []}', '{"id": 1948}']
self.__dispatcher.dispatch(descriptor=self.__descriptor,
rule_name='rule_name',
alert=alert)
log_mock.assert_called_with('Successfully sent alert to %s', self.__service)
@patch('logging.Logger.error')
@patch('urllib2.urlopen')
def test_dispatch_container_failure(self, url_mock, log_mock):
"""PhantomOutput dispatch failure (setup container)"""
alert = self._setup_dispatch('phantom.foo.bar')
url_mock.return_value.getcode.return_value = 400
self.__dispatcher.dispatch(descriptor=self.__descriptor,
rule_name='rule_name',
alert=alert)
log_mock.assert_called_with('Failed to send alert to %s', self.__service)
@patch('logging.Logger.error')
@patch('urllib2.urlopen')
def test_dispatch_container_error(self, url_mock, log_mock):
"""PhantomOutput dispatch decode error (setup container)"""
alert = self._setup_dispatch('phantom.foo.bar')
url_mock.return_value.getcode.return_value = 200
url_mock.return_value.read.return_value = 'this\nis\nnot\njson'
self.__dispatcher.dispatch(descriptor=self.__descriptor,
rule_name='rule_name',
alert=alert)
response = str(
call('An error occurred while decoding '
'Phantom container query response to JSON: %s', ValueError(
'No JSON object could be decoded',)))
assert_equal(str(log_mock.call_args_list[0]), response)
@patch('logging.Logger.error')
@patch('urllib2.urlopen')
def test_dispatch_failure(self, url_mock, log_mock):
"""PhantomOutput dispatch failure (artifact)"""
alert = self._setup_dispatch('phantom.foo.bar')
url_mock.return_value.read.side_effect = ['', '{"id": 1902}']
# Use side_effect to change the getcode return value the second time
# it is called. This allows testing issues down the chain somewhere
url_mock.return_value.getcode.side_effect = [200, 400]
self.__dispatcher.dispatch(descriptor=self.__descriptor,
rule_name='rule_name',
alert=alert)
log_mock.assert_called_with('Failed to send alert to %s', self.__service)
@patch('logging.Logger.error')
def test_dispatch_bad_descriptor(self, log_error_mock):
"""PhantomOutput dispatch bad descriptor"""
alert = self._setup_dispatch('phantom.foo.bar')
self.__dispatcher.dispatch(descriptor='bad_descriptor',
rule_name='rule_name',
alert=alert)
log_error_mock.assert_called_with('Failed to send alert to %s', self.__service)
@patch('stream_alert.alert_processor.output_base.StreamOutputBase._request_helper')
def test_dispatch_container_query(self, request_mock):
"""PhantomOutput - Container Query URL"""
alert = self._setup_dispatch('phantom.foo.bar')
self.__dispatcher.dispatch(descriptor=self.__descriptor,
rule_name='rule_name',
alert=alert)
full_url = 'phantom.foo.bar/rest/container?_filter_name="rule_name"&page_size=1'
headers = {'ph-auth-token': 'mocked_auth_token'}
request_mock.assert_has_calls([call(full_url, None, headers, False)])
class TestSlackOutput(object):
"""Test class for PagerDutyOutput"""
@classmethod
def setup_class(cls):
"""Setup the class before any methods"""
cls.__service = 'slack'
cls.__descriptor = 'unit_test_channel'
cls.__dispatcher = outputs.get_output_dispatcher(cls.__service,
REGION,
FUNCTION_NAME,
CONFIG)
@classmethod
def teardown_class(cls):
"""Teardown the class after all methods"""
cls.__dispatcher = None
def test_format_message_single(self):
"""Format Single Message - Slack"""
rule_name = 'test_rule_single'
alert = get_random_alert(25, rule_name)
loaded_message = json.loads(self.__dispatcher._format_message(rule_name, alert))
# tests
assert_set_equal(set(loaded_message.keys()), {'text', 'mrkdwn', 'attachments'})
assert_equal(
loaded_message['text'],
'*StreamAlert Rule Triggered: test_rule_single*')
assert_equal(len(loaded_message['attachments']), 1)
def test_format_message_mutliple(self):
"""Format Multi-Message - Slack"""
rule_name = 'test_rule_multi-part'
alert = get_random_alert(30, rule_name)
loaded_message = json.loads(self.__dispatcher._format_message(rule_name, alert))
# tests
assert_set_equal(set(loaded_message.keys()), {'text', 'mrkdwn', 'attachments'})
assert_equal(
loaded_message['text'],
'*StreamAlert Rule Triggered: test_rule_multi-part*')
assert_equal(len(loaded_message['attachments']), 2)
assert_equal(loaded_message['attachments'][1]
['text'].split('\n')[3][1:7], '000028')
def test_format_message_default_rule_description(self):
"""Format Message Default Rule Description - Slack"""
rule_name = 'test_empty_rule_description'
alert = get_random_alert(10, rule_name, True)
loaded_message = json.loads(self.__dispatcher._format_message(rule_name, alert))
# tests
default_rule_description = '*Rule Description:*\nNo rule description provided\n'
assert_equal(
loaded_message['attachments'][0]['pretext'],
default_rule_description)
def test_json_to_slack_mrkdwn_str(self):
"""JSON to Slack mrkdwn - simple str"""
simple_str = 'value to format'
result = self.__dispatcher._json_to_slack_mrkdwn(simple_str, 0)
assert_equal(len(result), 1)
assert_equal(result[0], simple_str)
def test_json_to_slack_mrkdwn_dict(self):
"""JSON to Slack mrkdwn - simple dict"""
simple_dict = OrderedDict([('test_key_01', 'test_value_01'),
('test_key_02', 'test_value_02')])
result = self.__dispatcher._json_to_slack_mrkdwn(simple_dict, 0)
assert_equal(len(result), 2)
assert_equal(result[1], '*test_key_02:* test_value_02')
def test_json_to_slack_mrkdwn_nested_dict(self):
"""JSON to Slack mrkdwn - nested dict"""
nested_dict = OrderedDict([
('root_key_01', 'root_value_01'),
('root_02', 'root_value_02'),
('root_nested_01', OrderedDict([
('nested_key_01', 100),
('nested_key_02', 200),
('nested_nested_01', OrderedDict([
('nested_nested_key_01', 300)
]))
]))
])
result = self.__dispatcher._json_to_slack_mrkdwn(nested_dict, 0)
assert_equal(len(result), 7)
assert_equal(result[2], '*root_nested_01:*')
assert_equal(Counter(result[4])['\t'], 1)
assert_equal(Counter(result[6])['\t'], 2)
def test_json_to_slack_mrkdwn_list(self):
"""JSON to Slack mrkdwn - simple list"""
simple_list = ['test_value_01', 'test_value_02']
result = self.__dispatcher._json_to_slack_mrkdwn(simple_list, 0)
assert_equal(len(result), 2)
assert_equal(result[0], '*[1]* test_value_01')
assert_equal(result[1], '*[2]* test_value_02')
def test_json_to_slack_mrkdwn_multi_nested(self):
"""JSON to Slack mrkdwn - multi type nested"""
nested_dict = OrderedDict([
('root_key_01', 'root_value_01'),
('root_02', 'root_value_02'),
('root_nested_01', OrderedDict([
('nested_key_01', 100),
('nested_key_02', 200),
('nested_nested_01', OrderedDict([
('nested_nested_key_01', [
6161,
1051,
51919
])
]))
]))
])
result = self.__dispatcher._json_to_slack_mrkdwn(nested_dict, 0)
assert_equal(len(result), 10)
assert_equal(result[2], '*root_nested_01:*')
assert_equal(Counter(result[4])['\t'], 1)
assert_equal(result[-1], '\t\t\t*[3]* 51919')
def test_json_list_to_text(self):
"""JSON list to text"""
simple_list = ['test_value_01', 'test_value_02']
result = self.__dispatcher._json_list_to_text(simple_list, '\t', 0)
assert_equal(len(result), 2)
assert_equal(result[0], '*[1]* test_value_01')
assert_equal(result[1], '*[2]* test_value_02')
def test_json_map_to_text(self):
"""JSON map to text"""
simple_dict = OrderedDict([('test_key_01', 'test_value_01'),
('test_key_02', 'test_value_02')])
result = self.__dispatcher._json_map_to_text(simple_dict, '\t', 0)
assert_equal(len(result), 2)
assert_equal(result[1], '*test_key_02:* test_value_02')
def _setup_dispatch(self):
"""Helper for setting up SlackOutput dispatch"""
remove_temp_secrets()
output_name = self.__dispatcher.output_cred_name(self.__descriptor)
creds = {'url': 'https://api.slack.com/web-hook-key'}
put_mock_creds(output_name, creds, self.__dispatcher.secrets_bucket,
REGION, KMS_ALIAS)
return get_alert()
@patch('logging.Logger.info')
@patch('urllib2.urlopen')
@mock_s3
@mock_kms
def test_dispatch_success(self, url_mock, log_info_mock):
"""SlackOutput dispatch success"""
alert = self._setup_dispatch()
url_mock.return_value.getcode.return_value = 200
self.__dispatcher.dispatch(descriptor=self.__descriptor,
rule_name='rule_name',
alert=alert)
log_info_mock.assert_called_with('Successfully sent alert to %s', self.__service)
@patch('logging.Logger.error')
@patch('urllib2.urlopen')
@mock_s3
@mock_kms
def test_dispatch_failure(self, url_mock, log_error_mock):
"""SlackOutput dispatch failure"""
alert = self._setup_dispatch()
error_message = 'a helpful error message'
url_mock.return_value.read.return_value = error_message
url_mock.return_value.getcode.return_value = 400
self.__dispatcher.dispatch(descriptor=self.__descriptor,
rule_name='rule_name',
alert=alert)
log_error_mock.assert_any_call('Encountered an error while sending to Slack: %s',
error_message)
log_error_mock.assert_any_call('Failed to send alert to %s', self.__service)
@patch('logging.Logger.error')
@mock_s3
@mock_kms
def test_dispatch_bad_descriptor(self, log_error_mock):
"""SlackOutput dispatch bad descriptor"""
alert = self._setup_dispatch()
self.__dispatcher.dispatch(descriptor='bad_descriptor',
rule_name='rule_name',
alert=alert)
log_error_mock.assert_called_with('Failed to send alert to %s', self.__service)
class TestAWSOutput(object):
"""Test class for AWSOutput Base"""
@classmethod
def setup_class(cls):
"""Setup the class before any methods"""
# pylint: disable=abstract-class-instantiated
cls.__abstractmethods_cache = outputs.AWSOutput.__abstractmethods__
outputs.AWSOutput.__abstractmethods__ = frozenset()
cls.__dispatcher = outputs.AWSOutput(REGION, FUNCTION_NAME, CONFIG)
cls.__dispatcher.__service__ = 'aws-s3'
@classmethod
def teardown_class(cls):
"""Teardown the class after all methods"""
outputs.AWSOutput.__abstractmethods__ = cls.__abstractmethods_cache
cls.__dispatcher = None
def test_aws_format_output_config(self):
"""AWSOutput format output config"""
props = {
'descriptor': OutputProperty(
'short_descriptor',
'descriptor_value'),
'aws_value': OutputProperty(
'unique arn value, bucket, etc',
'bucket.value')}
formatted_config = self.__dispatcher.format_output_config(CONFIG, props)
assert_equal(len(formatted_config), 2)
assert_is_not_none(formatted_config.get('descriptor_value'))
assert_is_not_none(formatted_config.get('unit_test_bucket'))
def test_dispatch(self):
"""AWSOutput dispatch pass"""
passed = self.__dispatcher.dispatch()
assert_is_none(passed)
class TestS3Ouput(object):
"""Test class for S3Output"""
@classmethod
def setup_class(cls):
"""Setup the class before any methods"""
cls.__service = 'aws-s3'
cls.__descriptor = 'unit_test_bucket'
cls.__dispatcher = outputs.get_output_dispatcher(cls.__service,
REGION,
FUNCTION_NAME,
CONFIG)
@classmethod
def teardown_class(cls):
"""Teardown the class after all methods"""
cls.dispatcher = None
def test_locals(self):
"""S3Output local variables"""
assert_equal(self.__dispatcher.__class__.__name__, 'S3Output')
assert_equal(self.__dispatcher.__service__, self.__service)
def _setup_dispatch(self):
"""Helper for setting up S3Output dispatch"""
bucket = CONFIG[self.__service][self.__descriptor]
boto3.client('s3', region_name=REGION).create_bucket(Bucket=bucket)
return get_alert()
@patch('logging.Logger.info')
@mock_s3
def test_dispatch(self, log_mock):
"""S3Output dispatch"""
alert = self._setup_dispatch()
self.__dispatcher.dispatch(descriptor=self.__descriptor,
rule_name='rule_name',
alert=alert)
log_mock.assert_called_with('Successfully sent alert to %s', self.__service)
class TestLambdaOuput(object):
"""Test class for LambdaOutput"""
@classmethod
def setup_class(cls):
"""Setup the class before any methods"""
cls.__service = 'aws-lambda'
cls.__descriptor = 'unit_test_lambda'
cls.__dispatcher = outputs.get_output_dispatcher(cls.__service,
REGION,
FUNCTION_NAME,
CONFIG)
@classmethod
def teardown_class(cls):
"""Teardown the class after all methods"""
cls.dispatcher = None
def test_locals(self):
"""LambdaOutput local variables"""
assert_equal(self.__dispatcher.__class__.__name__, 'LambdaOutput')
assert_equal(self.__dispatcher.__service__, self.__service)
def _setup_dispatch(self, alt_descriptor=''):
"""Helper for setting up LambdaOutput dispatch"""
function_name = CONFIG[self.__service][alt_descriptor or self.__descriptor]
create_lambda_function(function_name, REGION)
return get_alert()
@mock_lambda
@patch('logging.Logger.info')
def test_dispatch(self, log_mock):
"""LambdaOutput dispatch"""
alert = self._setup_dispatch()
self.__dispatcher.dispatch(descriptor=self.__descriptor,
rule_name='rule_name',
alert=alert)
log_mock.assert_called_with('Successfully sent alert to %s', self.__service)
@mock_lambda
@patch('logging.Logger.info')
def test_dispatch_with_qualifier(self, log_mock):
"""LambdaOutput dispatch with qualifier"""
alt_descriptor = '{}_qual'.format(self.__descriptor)
alert = self._setup_dispatch(alt_descriptor)
self.__dispatcher.dispatch(descriptor=alt_descriptor,
rule_name='rule_name',
alert=alert)
log_mock.assert_called_with('Successfully sent alert to %s', self.__service)
| 1.789063 | 2 |
code/lstm_without_peephole.py | sanketkalwar/LSTM | 1 | 12799412 | <filename>code/lstm_without_peephole.py
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
plt.ion()
dataset = open('../data/input.txt','r').read()
#dataset = open('../data/code.txt','r').read()
len_of_dataset = len(dataset)
print('len of dataset:',len_of_dataset)
vocab = set(dataset)
len_of_vocab = len(vocab)
print('len of vocab:',len_of_vocab)
char_to_idx = {char:idx for idx,char in enumerate(vocab)}
idx_to_char = {idx:char for idx,char in enumerate(vocab)}
print('char_to_idx:',char_to_idx)
print('idx_to_char:',idx_to_char)
start_ptr = 0
lr = 1e-1
time_step = 25
mean =0.0
std =0.01
epoches = 10000
def sigmoid(x):
return 1/(1+np.exp(-x))
def softmax(x):
return np.exp(x)/np.sum(np.exp(x))
Wi,Ri,bi = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1))
Wo,Ro,bo = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1))
Wf,Rf,bf = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1))
Wz,Rz,bz = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.random.normal(mean,std,(len_of_vocab,1))
Wy,by = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),np.zeros((len_of_vocab,1))
mWi,mRi,mbi = np.zeros_like(Wi),np.zeros_like(Ri),np.zeros_like(bi)
mWo,mRo,mbo = np.zeros_like(Wo),np.zeros_like(Ro),np.zeros_like(bo)
mWf,mRf,mbf = np.zeros_like(Wf),np.zeros_like(Rf),np.zeros_like(bf)
mWz,mRz,mbz = np.zeros_like(Wz),np.zeros_like(Rz),np.zeros_like(bz)
mWy,mby = np.zeros_like(Wy),np.zeros_like(by)
def sample(y_p,c_p):
idx = []
x = np.zeros((len_of_vocab,1))
x[10,0] = np.random.randint(0,len_of_vocab)
for t in range(200):
I = np.dot(Wi,x)+np.dot(Ri,y_p)+bi
i_g = sigmoid(I)
O = np.dot(Wo,x)+np.dot(Ro,y_p)+bo
o_g = sigmoid(O)
F = np.dot(Wf,x) + np.dot(Rf,y_p)+bf
f_g = sigmoid(F)
Z = np.dot(Wz,x) + np.dot(Rz,y_p)+bz
z_g = np.tanh(Z)
c_p = i_g*z_g + f_g *c_p
y_p = o_g * np.tanh(c_p)
os = np.dot(Wy,y_p)+by
p = softmax(os)
id = np.random.choice(len_of_vocab,1,p=p.ravel())[0]
idx.append(id)
x = np.zeros((len_of_vocab,1))
x[id,0]=1
print(''.join([idx_to_char[c] for c in idx]))
def forward_backward_pass(i,o,y_p,c_p):
cs = {}
ys = {}
i_g = {}
o_g = {}
f_g = {}
z_g = {}
os = {}
cs[-1] = np.copy(c_p)
ys[-1] = np.copy(y_p)
p = {}
loss = 0
for t in range(time_step):
x = np.zeros((len_of_vocab,1))
x[i[t],0] = 1
I = np.dot(Wi,x)+np.dot(Ri,ys[t-1])+bi
i_g[t] = sigmoid(I)
O = np.dot(Wo,x)+np.dot(Ro,ys[t-1])+bo
o_g[t] = sigmoid(O)
F = np.dot(Wf,x) + np.dot(Rf,ys[t-1])+bf
f_g[t] = sigmoid(F)
Z = np.dot(Wz,x) + np.dot(Rz,ys[t-1])+bz
z_g[t] = np.tanh(Z)
cs[t] = i_g[t]*z_g[t] + f_g[t] *cs[t-1]
ys[t] = o_g[t] * np.tanh(cs[t])
os[t] = np.dot(Wy,ys[t])+by
p[t] = softmax(os[t])
loss += -np.log(p[t][o[t],0])
dWi,dRi,dbi = np.zeros_like(Wi),np.zeros_like(Ri),np.zeros_like(bi)
dWo,dRo,dbo = np.zeros_like(Wo),np.zeros_like(Ro),np.zeros_like(bo)
dWf,dRf,dbf = np.zeros_like(Wf),np.zeros_like(Rf),np.zeros_like(bf)
dWz,dRz,dbz = np.zeros_like(Wz),np.zeros_like(Rz),np.zeros_like(bz)
dWy,dby = np.zeros_like(Wy),np.zeros_like(by)
dy_z,dy_f,dy_o,dy_i = np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1))
dcs_c = np.zeros((len_of_vocab,1))
for t in reversed(range(time_step)):
x = np.zeros((len_of_vocab,1))
x[i[t],0] = 1
do = np.copy(p[t])
do[o[t],0] -= 1
dWy += np.outer(do,ys[t])
dby += do
dy = np.dot(Wy,do)
dy = dy + dy_z + dy_f + dy_i + dy_o
dcs = o_g[t] * (1-np.tanh(cs[t])*np.tanh(cs[t]))*dy + dcs_c
dcs_c = f_g[t]*dcs
dig = z_g[t]*dcs
dog = np.tanh(cs[t])*dy
dzg = i_g[t]*dcs
dfg = cs[t-1]*dcs
dzg_ = (1-z_g[t]*z_g[t])*dzg
dWz += np.outer(dzg_,x)
dRz += np.outer(dzg_,ys[t-1])
dbz += dzg_
dy_z = np.dot(Rz.T,dzg_)
dfg_ = f_g[t] * (1-f_g[t])*dfg
dWf += np.outer(dfg_,x)
dRf += np.outer(dfg_,ys[t-1])
dy_f = np.dot(Rf.T,dfg_)
dbf += dfg_
dog_ = o_g[t]*(1-o_g[t])*dog
dWo += np.outer(dog_,x)
dRo += np.outer(dog_,ys[t-1])
dy_o = np.dot(Ro.T,dog_)
dbo += dog_
dig_ = i_g[t]*(1-i_g[t])*dig
dWi += np.outer(dig_,x)
dRi += np.outer(dig_,ys[t-1])
dy_i = np.dot(Ri.T,dig_)
dbi += dig_
for param in [dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz]:
np.clip(param,-1,1,out=param)
return loss,dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby,ys[time_step-1],cs[time_step-1]
y_prev,c_prev = np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1))
n = 0
x=[]
y=[]
smooth_loss = -np.log(1/len_of_vocab)*time_step
while n<=epoches:
if start_ptr+time_step>len_of_dataset:
start_ptr = 0
y_prev = np.zeros((len_of_vocab,1))
else:
input = [char_to_idx[c] for c in dataset[start_ptr:start_ptr+time_step]]
output = [char_to_idx[c] for c in dataset[start_ptr+1:start_ptr+time_step+1]]
loss,dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby,y_prev,c_prev=forward_backward_pass(i=input,o=output,y_p=y_prev,c_p=c_prev)
for params,dparams,mparams in zip([Wi,Ri,bi,Wo,Ro,bo,Wf,Rf,bf,Wz,Rz,bz,Wy,by],\
[dWi,dRi,dbi,dWo,dRo,dbo,dWf,dRf,dbf,dWz,dRz,dbz,dWy,dby],[mWi,mRi,mbi,mWo,mRo,mbo,mWf,mRf,mbf,mWz,mRz,mbz,mWy,mby]):
mparams += dparams*dparams
params += -lr*dparams/np.sqrt(mparams+1e-8)
smooth_loss = (0.999*smooth_loss)+(0.001*loss)
x.append(n)
y.append(smooth_loss)
if n%1000 == 0:
print('smooth_loss:',loss)
sample(y_p=y_prev,c_p=c_prev)
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.plot(x,y,color='r')
plt.pause(1e-9)
n+=1
start_ptr += time_step
plt.savefig('../Performance/lstm_without_peephole.png') | 3.234375 | 3 |
dataStructures/tree.py | auxsophia/Spellbook | 0 | 12799413 | '''
Terminology used in trees
Root
The top node in a tree.
Child
A node directly connected to another node when moving away from the root.
Parent
The converse notion of a child.
Siblings
A group of nodes with the same parent.
Descendant
A node reachable by repeated proceeding from parent to child. Also known as subchild.
Ancestor
A node reachable by repeated proceeding from child to parent.
Leaf
External node (not common)
A node with no children.
Branch node
Internal node
A node with at least one child.
Degree
For a given node, its number of children. A leaf is necessarily degree zero.
Edge
The connection between one node and another.
Path
A sequence of nodes and edges connecting a node with a descendant.
Level
The level of a node is defined as: 1 + the number of edges between the node and the root.
Height of node
The height of a node is the number of edges on the longest path between that node and a leaf.
Height of tree
The height of a tree is the height of its root node.
Depth
The depth of a node is the number of edges from the tree's root node to the node.
Forest
A forest is a set of n ≥ 0 disjoint trees.
'''
class Node(object):
def __init__(self, data, num_children = 2):
self.data = data
self.num_children = num_children
self.children = []
return self
class Tree(object):
def __init__(self, data, num_children = 2, notation = "prefix"):
self.root = Node(data, num_children)
self.notation = notation
# Common operations:
# Incomplete: insert with queue.
def insert(data, node = self.root):
if len(node.children) < node.num_children:
node.children.append(Node(data, node.num_children))
else:
for child in node.children:
if len(child.children) < child.num_children:
Tree.insert(data, node = child)
return
# Searching for an item
# Adding a new item at a certain position on the tree
# Deleting an item
# Pruning: Removing a whole section of a tree
# Grafting: Adding a whole section to a tree
# Finding the root for any node
# Finding the lowest common ancestor of two nodes
# Enumerating all the items
# Enumerating a section of a tree
| 3.5 | 4 |
get_random_quote/migrations/0017_auto_20200525_0235.py | helloprash/birthday | 0 | 12799414 | <gh_stars>0
# Generated by Django 3.0.3 on 2020-05-24 21:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('get_random_quote', '0016_auto_20200525_0146'),
]
operations = [
migrations.RenameField(
model_name='quote',
old_name='photo',
new_name='image_object',
),
]
| 1.625 | 2 |
selenium_pipeline/screenshot_with_sel.py | Praneethvvs/CircleCi_FastApi | 0 | 12799415 | <reponame>Praneethvvs/CircleCi_FastApi<gh_stars>0
from time import sleep
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium import webdriver
sleep(5)
def screenshot_main():
driver = webdriver.Remote("http://:4444/wd/hub",desired_capabilities=DesiredCapabilities.CHROME)
driver.get("https://python.org")
driver.save_screenshot("screenshot.png") | 2.765625 | 3 |
sss/sss.py | TAMU-CSE/mitre-ectf2021 | 1 | 12799416 | <filename>sss/sss.py
#!/usr/bin/python3
# 2021 Collegiate eCTF
# SCEWL Security Server
# <NAME>
#
# (c) 2021 The MITRE Corporation
#
# This source file is part of an example system for MITRE's 2021 Embedded System CTF (eCTF).
# This code is being provided only for educational purposes for the 2021 MITRE eCTF competition,
# and may not meet MITRE standards for quality. Use this code at your own risk!
#
# This is the Secure SCEWL Server that handles SED registration and key distribution for any given
# deployment. Minimal changes have been made to the provided source files to allow for a these
# features. It should be noted that any key generation is done within respective dockerfiles and
# this script primarily focuses on verifying an SED as valid and distributing deployment wide keys.
#
# Registration:
# 1) Given any SED with valid dev_id, establish path to SSS registration secret and scewl_secret
# 2) Validate the scewl_secret that resides on the registering SED by comparing to the SSS's
# registration secret
# 3) Distribute AES key (16B), HMAC key (64B) and Random seed (32B), given a match
# 4) Send some error given a discrepancy
#
# Succesful execution of this procedure means a given SED is valid and may communicate with other
# deployed SEDs while through use of the aformentioned keys. If an SED doesn't receive these keys
# its messages will be thrown out by any receiving SED which is part of the deployment.
#
# Deregistration is handled by sending deregistration message and removing registration secret from
# the SED (see dockerfiles/3_remove_sed.Dockerfile)
import socket
import select
import struct
import argparse
import logging
import os
import secrets
from typing import NamedTuple
SSS_IP = 'localhost'
SSS_ID = 1
# mirroring scewl enum at scewl.c:4
ALREADY, REG, DEREG = -1, 0, 1
logging.basicConfig(level=logging.INFO)
Device = NamedTuple('Device', [('id', int), ('status', int), ('csock', socket.socket)])
class SSS:
def __init__(self, sockf):
# Make sure the socket does not already exist
try:
os.unlink(sockf)
except OSError:
if os.path.exists(sockf):
raise
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.bind(sockf)
self.sock.listen(10)
self.devs = {}
@staticmethod
def sock_ready(sock, op='r'):
rready, wready, _ = select.select([sock], [sock], [], 0)
return rready if op == 'r' else wready
def handle_transaction(self, csock: socket.SocketType):
logging.debug('handling transaction')
data = b''
while len(data) < 76:
recvd = csock.recv(76 - len(data))
data += recvd
# check for closed connection
if not recvd:
raise ConnectionResetError
logging.debug(f'Received buffer: {repr(data)}')
# Unpack message received from a given SED
_, _, _, _, dev_id, op, scewl_secret = struct.unpack('<HHHHHH64s', data)
'''Message responses are constructed below'''
# Read in corresponding scewl secret
secret_path = f'/secrets/{dev_id}_secret'
if os.path.exists(secret_path):
with open(secret_path, "rb") as secret_file:
# Read in the registration secret for verification
checked_secret = secret_file.read(64)
# Scewl_secret mismatch, registration key provided by SED is invalid. Log this event
# and back ALREADY resp_op into the response. Without deployment keys, this SED is
# considered invalid for registration.
if checked_secret != scewl_secret:
logging.info(f'{dev_id}:expected: {checked_secret}, found: {scewl_secret}')
resp_op = ALREADY
logging.info(f'{dev_id}:key mismatch')
body = struct.pack('<Hh', dev_id, resp_op)
# Requesting repeat transaction in the case that an SED state already reflects the
# received op. Log this event.
elif dev_id in self.devs and self.devs[dev_id].status == op:
resp_op = ALREADY
logging.info(f'{dev_id}:already {"Registered" if op == REG else "Deregistered"}')
body = struct.pack('<Hh', dev_id, resp_op)
# Record registration transaction and read in keys, then pack into response. This is
# a valid SED which can communicate in the deployment.
# AES key: 16 bytes
# HMAC key: 64 bytes
# Random seed: 32bytes
elif op == REG:
self.devs[dev_id] = Device(dev_id, REG, csock)
resp_op = REG
with open("/secrets/aes_key", "rb") as aes_file:
aes_key = aes_file.read(16)
with open("/secrets/hmac_key", "rb") as hmac_file:
hmac_key = hmac_file.read(64)
logging.info(f'{dev_id}:Registered')
seed = secrets.token_bytes(32)
body = struct.pack('<Hh16s32s64s', dev_id, resp_op, aes_key, seed, hmac_key)
# Record deregistration for an SED which was verified previously to register and
# hasn't already been deregistered.
else:
self.devs[dev_id] = Device(dev_id, DEREG, csock)
resp_op = DEREG
logging.info(f'{dev_id}:Deregistered')
body = struct.pack('<Hh', dev_id, resp_op)
# Record some error from reading in the SEDs {dev_id}_secrets folder. This may happen if
# an SED is attempted to register, which should not be included on the deployment as specified
# by the {dev_id}_secrets folders generated in dockerfiles/2b_create_sed_secrets.Dockerfile
else:
resp_op = ALREADY
logging.info(f'{dev_id}:bad ID')
body = struct.pack('<Hh', dev_id, resp_op)
# Send response to SED constructed in the previous section
resp = struct.pack('<2sHHH', b'SC', dev_id, SSS_ID, len(body)) + body
logging.debug(f'Sending response {repr(data)}')
csock.send(resp)
# The following methods reflect the provided insecure implementation and keep the SSS active
# to received registration and deregistration messages before responding
def start(self):
unattributed_socks = set()
# serve forever
while True:
# check for new client
if self.sock_ready(self.sock):
csock, _ = self.sock.accept()
logging.info(f':New connection')
unattributed_socks.add(csock)
continue
# check pool of unattributed sockets first
for csock in unattributed_socks:
try:
if self.sock_ready(csock):
self.handle_transaction(csock)
unattributed_socks.remove(csock)
break
except (ConnectionResetError, BrokenPipeError):
logging.info(':Connection closed')
unattributed_socks.remove(csock)
csock.close()
break
# check pool of attributed sockets first
old_ids = []
for dev in self.devs.values():
if dev.csock and self.sock_ready(dev.csock):
try:
self.handle_transaction(dev.csock)
except (ConnectionResetError, BrokenPipeError):
logging.info(f'{dev.id}:Connection closed')
dev.csock.close()
old_ids.append(dev.id)
for dev_id in old_ids:
del self.devs[dev_id]
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('sockf', help='Path to socket to bind the SSS to')
return parser.parse_args()
def main():
args = parse_args()
# map of SCEWL IDs to statuses
sss = SSS(args.sockf)
sss.start()
if __name__ == '__main__':
main()
| 2.0625 | 2 |
data_gen.py | amahendra98/ga-pytorch | 2 | 12799417 | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import torch
from torch.utils.data import Dataset
from sklearn.model_selection import train_test_split
# Define the class for the Meta-material dataset
class MetaMaterialDataSet(Dataset):
""" The Meta Material Dataset Class """
def __init__(self, ftr, lbl, bool_train):
"""
Instantiate the Dataset Object
:param ftr: the features which is always the Geometry !!
:param lbl: the labels, which is always the Spectra !!
:param bool_train:
"""
self.ftr = ftr
self.lbl = lbl
self.bool_train = bool_train
self.len = len(ftr)
def __len__(self):
return self.len
def __getitem__(self, ind):
return self.ftr[ind, :], self.lbl[ind, :]
## Copied from Omar's code
# Make geometry samples
def MM_Geom(n):
# Parameter bounds for metamaterial radius and height
r_min = 20
r_max = 200
h_min = 20
h_max = 100
# Defines hypergeometric space of parameters to choose from
space = 10
r_space = np.linspace(r_min, r_max, space + 1)
h_space = np.linspace(h_min, h_max, space + 1)
# Shuffles r,h arrays each iteration and then selects 0th element to generate random n x n parameter set
r, h = np.zeros(n, dtype=float), np.zeros(n, dtype=float)
for i in range(n):
np.random.shuffle(r_space)
np.random.shuffle(h_space)
r[i] = r_space[0]
h[i] = h_space[0]
return r, h
# Make geometry and spectra
def Make_MM_Model(n):
r, h = MM_Geom(n)
spectra = np.zeros(300)
geom = np.concatenate((r, h), axis=0)
for i in range(n):
w0 = 100 / h[i]
wp = (1 / 100) * np.sqrt(np.pi) * r[i]
g = (1 / 1000) * np.sqrt(np.pi) * r[i]
w, e2 = Lorentzian(w0, wp, g)
spectra += e2
return geom, spectra
# Calculate Lorentzian function to get spectra
def Lorentzian(w0, wp, g):
freq_low = 0
freq_high = 5
num_freq = 300
w = np.arange(freq_low, freq_high, (freq_high - freq_low) / num_freq)
# e1 = np.divide(np.multiply(np.power(wp, 2), np.add(np.power(w0, 2), -np.power(w, 2))),
# np.add(np.power(np.add(np.power(w0, 2), -np.power(w, 2)), 2),
# np.multiply(np.power(w, 2), np.power(g, 2))))
e2 = np.divide(np.multiply(np.power(wp, 2), np.multiply(w, g)),
np.add(np.power(np.add(np.power(w0, 2), -np.power(w, 2)), 2),
np.multiply(np.power(w, 2), np.power(g, 2))))
return w, e2
# Generates randomized dataset of simulated spectra for training and testing
def Prepare_Data(osc, sets, batch_size):
features = []
labels = []
for i in range(sets):
geom, spectra = Make_MM_Model(osc)
features.append(geom)
labels.append(spectra)
features = np.array(features, dtype='float32')
labels = np.array(labels, dtype='float32')
ftrsize = features.size / sets
lblsize = labels.size / sets
print('Size of Features is %i, Size of Labels is %i' % (ftrsize, lblsize))
print('There are %i datasets:' % sets)
ftrTrain, ftrTest, lblTrain, lblTest = train_test_split(features, labels, test_size=0.2, random_state=1234)
train_data = MetaMaterialDataSet(ftrTrain, lblTrain, bool_train=True)
test_data = MetaMaterialDataSet(ftrTest, lblTest, bool_train=False)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size)
print('Number of Training samples is {}'.format(len(ftrTrain)))
print('Number of Test samples is {}'.format(len(ftrTest)))
return train_loader, test_loader
def gen_data(name):
train_loader, test_loader = Prepare_Data(1, 10000, 1000)
with open(name, 'a') as datafile:
for j, (geometry, spectra) in enumerate(train_loader):
concate = np.concatenate([geometry, spectra], axis=1)
# print(np.shape(concate))
np.savetxt(datafile, concate, delimiter=',')
if __name__ == "__main__":
train_loader, test_loader = Prepare_Data(1, 10000, 1000)
with open('toy_data/mm1d_6.csv', 'a') as datafile:
for j, (geometry, spectra) in enumerate(train_loader):
concate = np.concatenate([geometry, spectra], axis=1)
#print(np.shape(concate))
np.savetxt(datafile, concate, delimiter=',') | 2.953125 | 3 |
test/core_tests/test_params_object.py | RobinDePauw/talos | 0 | 12799418 | <filename>test/core_tests/test_params_object.py
import talos as ta
def test_params_object():
'''Tests the object from Params()'''
print('Start testing Params object...')
p = ta.Params()
# without arguments
p.activations()
p.batch_size()
p.dropout()
p.epochs()
p.kernel_initializers()
p.layers()
p.neurons()
p.lr()
p.optimizers()
p.shapes()
p.shapes_slope()
p.automated()
p = ta.Params(replace=False)
# with arguments
p.activations()
p.batch_size(10, 100, 5)
p.dropout()
p.epochs(10, 100, 5)
p.kernel_initializers()
p.layers(12)
p.neurons(10, 100, 5)
p.lr()
p.optimizers('multi_label')
p.shapes()
p.shapes_slope()
p.automated('sloped')
return "Finished testing Params object!"
| 2.46875 | 2 |
FBG_ReStrain_Python/Load_Sync.py | GilmarPereira/ReStrain | 0 | 12799419 | <reponame>GilmarPereira/ReStrain
""" Python Class To Load and Sync two files: Thermocouple and FBG
Copyright (C) <NAME>
2015 DTU Wind Energy
Author: <NAME>
Email: <EMAIL>; <EMAIL>
Last revision: 02-08-2016
***License***:
This file is part of FBG_ReStrain.
FBG_ReStrain is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
FBG_ReStrain is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Foobar. If not, see <http://www.gnu.org/licenses/>
"""
#Packages
import pandas as pd
import datetime as dt
class FBG_Temp_Loading(object):
def __init__(self,TempPaths,TempFileNum,TempSkipRows,TempSep,TempColNumb,TempColNames,FBGPaths,FBGFileNum,FBGSkipRows,FBGSep,FBGColNumb,FBGColNames,TimeCorrect=0):
""" Initialized the FBG and Temp Loading Class file
Input_Parameters:
----------
"""
#Temp Input
self.TempPaths=TempPaths
self.TempFileNum=TempFileNum
self.TempSkipRows=TempSkipRows #Skip to the begin of the Data (skip head)
self.TempSep=TempSep # Data separador
self.TempColNumb=TempColNumb
self.TempColNames=TempColNames
self.TimeCorrect=TimeCorrect
#FBG Input
self.FBGPaths=FBGPaths
self.FBGFileNum=FBGFileNum
self.FBGSkipRows=FBGSkipRows#Skip to the begin of the Data (skip head)
self.FBGSep=FBGSep # Data separador
self.FBGColNumb=FBGColNumb
self.FBGColNames=FBGColNames
#Load File
#Temp
self.TempData=pd.DataFrame()
for i in range (0,self.TempFileNum):
TempPathtemp=self.TempPaths[i].replace("\\","/",99)
TempDataTemp=pd.read_csv(TempPathtemp,sep=self.TempSep,names=self.TempColNames,skiprows=self.TempSkipRows,parse_dates=[['Date', 'Time']],dayfirst=True)
#Merge Date and Hour
self.TempData=pd.concat([self.TempData,TempDataTemp],ignore_index=True)
#Correct Time
self.TempData['Date_Time']=self.TempData['Date_Time']+dt.timedelta(hours=self.TimeCorrect)
#Date_Time timestamp
self.TempData=self.TempData.set_index('Date_Time')
#FBG
self.FBGData=pd.DataFrame()
for i in range(0,self.FBGFileNum):
FBGPathtemp=self.FBGPaths[i].replace("\\","/",99)
FBGDataTemp=pd.read_csv(FBGPathtemp,sep=self.FBGSep,names=self.FBGColNames,skiprows=self.FBGSkipRows,parse_dates=[['Date', 'Time']],dayfirst=True)
#Merge Date and Hour
self.FBGData= pd.concat([self.FBGData,FBGDataTemp],ignore_index=True)
#Delete collum Sample
self.FBGData=self.FBGData.drop('Sample',1)
#Organixe index
self.FBGData=self.FBGData.set_index('Date_Time')
def Syncron(self):
""" Syncronize the FBG files and Temp files
----------
"""
self.SyncData= pd.concat([self.FBGData,self.TempData],axis=1, join='inner')
#Create collum with increment/sample serie
#self.SyncData['Increment']=pd.Series(range(0,len(self.SyncData)), index=self.SyncData.index)
self.SyncData.insert(0,'Increment/Sample',pd.Series(range(0,len(self.SyncData)), index=self.SyncData.index))
#Commands used to run this file without the GUI
"""
TempPath=['C:\\Users\\gfpe\\Desktop\\Example_Temp_File\\sync\\temp.csv']
TempFileNum=1
TempColNumb=5
TempColNames=['Date','Time','Temp1','Temp2','Temp3']
TempSkipRows=1
TempSep=';'
FBGPaths=['C:\\Users\\gfpe\\Desktop\\Example_Temp_File\\Sync\\BM Data [2015.07.16.09.31.02 ; 2015.07.16.10.31.01].txt','C:\\Users\\gfpe\\Desktop\\Example_Temp_File\\Sync\\BM Data [2015.07.16.10.31.02 ; 2015.07.16.11.31.01].txt','C:\\Users\\gfpe\\Desktop\\Example_Temp_File\\Sync\\BM Data [2015.07.16.11.31.02 ; 2015.07.16.12.31.01].txt']
FBGFileNum=3
FBGColNumb=5
FBGColNames=['Date','Time','Sample','FBG1','FBG2']
FBGSkipRows=2
FBGSep='\t'
test=FBG_Temp_Loading(TempPath,TempFileNum,TempSkipRows,TempSep,TempColNumb,TempColNames,FBGPaths,FBGFileNum,FBGSkipRows,FBGSep,FBGColNumb,FBGColNames)
#test.FBGData['FBG1'].plot()
test.Syncron()
#To save
test.FBGData.to_csv('C:\\Users\\gfpe\\Desktop\\FBG.csv', sep=';')
test.TempData.to_csv('C:\\Users\\gfpe\\Desktop\\temp.csv', sep=';')
test.SyncData.to_csv('C:\\Users\\gfpe\\Desktop\\res.csv', sep=';')
"""
| 2.125 | 2 |
PiGPIO/migrations/0007_programlog.py | girisandeep/Django-PiGPIO | 8 | 12799420 | <reponame>girisandeep/Django-PiGPIO<filename>PiGPIO/migrations/0007_programlog.py
# Generated by Django 2.1.7 on 2019-03-25 23:17
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('PiGPIO', '0006_program_logging'),
]
operations = [
migrations.CreateModel(
name='ProgramLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('info', models.CharField(default='', max_length=256, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('step', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='PiGPIO.ProgramStep')),
],
),
]
| 1.96875 | 2 |
tests/scripts/unicode💩.py | benfred/py-spy | 8,112 | 12799421 | <filename>tests/scripts/unicode💩.py<gh_stars>1000+
#!/env/bin/python
# -*- coding: utf-8 -*-
import time
def function1(seconds):
time.sleep(seconds)
if __name__ == "__main__":
function1(100)
| 1.640625 | 2 |
chart/repl/src/python/sseq_display.py | JoeyBF/sseq | 7 | 12799422 | <reponame>JoeyBF/sseq
from js import (
location,
console
)
from js_wrappers.async_js import Fetcher
from js_wrappers.filesystem import FileHandle
import json
import pathlib
from spectralsequence_chart import SseqChart
from spectralsequence_chart.serialization import JSON
from working_directory import get_working_directory_a, set_working_directory_a
from functools import wraps
from repl.handler_decorator import collect_handlers, handle
fetcher = Fetcher("api/")
def create_display(name):
disp = SseqDisplay(name)
print(f"Creating display at {disp.url}")
return disp.chart
async def load_display_a(name):
disp = SseqDisplay(name)
await disp.load_a()
return disp.chart
@collect_handlers("message_handlers")
class SseqDisplay:
""" A Spectral Sequence display. This contains the logic to communicate between the SseqChart and the browser.
All of the data is contained in the field SseqDisplay.chart which is the SseqChart object that is being displayed.
You may want to store the chart into a variable and use it directly.
"""
#
displays = {}
def __init__(self, name, chart=None):
self.name = name
self.chart = None
self.save_file_handle = FileHandle()
self.autosave = False
chart = chart or SseqChart(name)
self.set_sseq(chart)
self.subscribers = {}
SseqDisplay.displays[name] = self
from repl.executor import Executor
self.executor = Executor.executor
self._started = False
self.executor.loop.call_soon(self.start_a())
def __repr__(self):
if self._started:
return f'{type(self).__name__}(name="{self.name}", url="{self.url}", chart={self.chart})'
return f"""{type(self).__name__}(name="{self.name}", state="Not started, run 'await display.start_a()' to start.")"""
# def __dir__(self):
# """ getattr and dir have to be set up carefully to allow jedi to provide good docs for the SseqChart functions. """
# result = self.chart.__dir__()
# result.extend(self.__dict__.keys())
# return sorted(set(result))
# def __getattr__(self, name):
# """ getattr and dir have to be set up carefully to allow jedi to provide good docs for the SseqChart functions. """
# if not hasattr(self.chart, name):
# raise AttributeError(f'Instance of {self.__class__.__name__} has no attribute {name}')
# return getattr(self.chart, name)
def load_json(self, json_obj):
if type(json_obj) is str:
json_obj = json.loads(json_obj)
self.set_sseq(SseqChart.from_json(json_obj))
def set_sseq(self, chart):
if self.chart is not None:
self.chart._agent = None
self.chart = chart
self.chart._agent = self
@property
def url(self):
directory = str(pathlib.Path(location.pathname).parent)
return f"{location.protocol}//{location.host}{directory}/charts/{self.name}"
async def start_a(self):
if self._started:
return
self._started = True
response = await fetcher.put(f"charts/{self.name}", {})
if response.status >= 400:
raise Exception(f"Failed to create chart: {response.status_text}")
body = await response.json()
print(f'Display started. Visit "{self.url}" to view.')
async def reset_state_a(self):
with self.chart._batched_messages_lock:
self.chart._clear_batched_messages()
await self.send_message_a("chart.state.reset", state = self.chart.to_json())
await self.maybe_autosave_a()
def update(self):
self.executor.loop.call_soon(self.update_a())
async def update_a(self):
await self.chart.update_a()
async def send_batched_messages_a(self, messages):
console.log("Sending batched messages:", messages)
await self.send_message_a("chart.update", messages = messages)
await self.maybe_autosave_a()
async def maybe_autosave_a(self):
if self.autosave and self.save_file_handle.is_open():
await self.save_a()
async def save_a(self):
await self.save_file_handle.ensure_open_a(modify=True)
await self.save_file_handle.write_text_a(JSON.stringify(self.chart))
async def save_as_a(self, path = None):
if path:
working_directory = await get_working_directory_a()
if not working_directory:
raise RuntimeError("...")
self.save_file_handle = await working_directory.path(path).resolve_file_handle_a(create=True)
else:
self.save_file_handle = FileHandle()
await self.save_a()
async def load_a(self, path = None):
if path:
working_directory = await get_working_directory_a()
if not working_directory:
raise RuntimeError("...")
self.save_file_handle = await working_directory.path(path).resolve_file_handle_a()
else:
self.save_file_handle = FileHandle()
await self.save_file_handle.open_a()
self.set_sseq(JSON.parse(await self.save_file_handle.read_text_a()))
await self.reset_state_a()
@staticmethod
def dispatch_message(obj):
message = json.loads(obj["message"])
del obj["message"]
message.update(obj)
chart_name = message["chart_name"]
del message["chart_name"]
display = SseqDisplay.displays[chart_name]
display.handle_message(**message)
def handle_message(self, cmd, args, port, client_id, uuid, kwargs):
kwargs = dict(kwargs)
console.log(f"SseqDisplay.handle_message({cmd}, {JSON.stringify(kwargs)})")
self.executor.loop.call_soon(self.message_handlers[cmd](
self, uuid=uuid, port=port, client_id=client_id, **kwargs
))
@staticmethod
def _create_message(cmd, **kwargs):
return JSON.stringify(dict(cmd=cmd, args=[], kwargs=kwargs))
async def send_message_a(self, cmd, **kwargs):
message = SseqDisplay._create_message(cmd, **kwargs)
for port in self.subscribers.values():
port.postMessage(message)
async def send_message_to_target_client_a(self, port, cmd, uuid, **kwargs):
port.postMessage(JSON.stringify(dict(
cmd=cmd, uuid=uuid,
args=[], kwargs=kwargs
)))
@handle("new_user")
async def new_user__a(self, uuid, port, client_id):
print("Handling new user...")
# Might as well make sure that we don't have other charts that are out of date.
# So let's send an update to the existing charts first.
await self.update_a()
self.subscribers[client_id] = port
# "initialize" command sets chart range and page in addition to setting the chart.
# "initialize" does a superset of what "reset" does.
port.postMessage(SseqDisplay._create_message("chart.state.initialize", state = self.chart.to_json()))
@handle("initialize.complete")
async def initialize__complete__a(self, uuid, port, client_id):
print("initialize.complete")
def _wrap_chart_func(func):
@wraps(func)
def wrap(self, *args, **kwargs):
return func(self.chart, *args, **kwargs)
return wrap
def _bind_chart_attribute(name):
func = getattr(SseqChart, name)
func_type_name = type(func).__name__
if func_type_name == "function":
wrapped = _wrap_chart_func(func)
elif func_type_name == "property":
wrapped_fget = None
wrapped_fset = None
wrapped_fdel = None
if func.fget:
wrapped_fget = _wrap_chart_func(func.fget)
if func.fset:
wrapped_fset = _wrap_chart_func(func.fset)
if func.fdel:
wrapped_fdel = _wrap_chart_func(func.fdel)
wrapped = property(wrapped_fget, wrapped_fset, wrapped_fdel)
else:
raise AssertionError()
setattr(SseqDisplay, name, wrapped)
# for a in dir(SseqChart):
# if a.startswith("_") or a in dir(SseqDisplay):
# continue
# # The __getattr__ and __dir__ methods above aren't enough to get docs for properties.
# # For properties, we copy a wrapper from SseqChart to SseqDisplay.
# # Note that if we do this for methods too, it screws up jedi get_signatures.
# # So __dir__ / __getattr__ work only for methods and this works only for properties...
# if type(getattr(SseqChart, a)) is property:
# _bind_chart_attribute(a)
| 2.578125 | 3 |
aiocloudflare/api/zones/spectrum/analytics/events/summary/summary.py | Stewart86/aioCloudflare | 2 | 12799423 | from aiocloudflare.commons.auth import Auth
class Summary(Auth):
_endpoint1 = "zones"
_endpoint2 = "spectrum/analytics/events/summary"
_endpoint3 = None
| 1.179688 | 1 |
2021/22b.py | msullivan/advent-of-code | 8 | 12799424 | <filename>2021/22b.py
#!/usr/bin/env python3
from __future__ import annotations
import sys
import re
from dataclasses import dataclass
from typing import List, NamedTuple, Tuple, Set
def extract(s):
return [int(x) for x in re.findall(r'(-?\d+).?', s)]
class Pos(NamedTuple):
x: int
y: int
z: int
def dist(self, y: Pos) -> int:
x = self
return abs(x[0] - y[0]) + abs(x[1] - y[1]) + abs(x[2] - y[2])
@dataclass(frozen=True, order=True)
class Box:
bot: Pos
top: Pos
def split_at(self, axis, val):
atop = self.top[axis]
abot = self.bot[axis]
mid = val - 1
assert mid >= abot
assert atop != abot
axname = "xyz"[axis]
return [
Box(self.bot, self.top._replace(**{axname: mid})),
Box(self.bot._replace(**{axname: mid+1}), self.top),
]
def overlap(self, other):
return (
self.top.x >= other.bot.x and other.top.x >= self.bot.x
and self.top.y >= other.bot.y and other.top.y >= self.bot.y
and self.top.z >= other.bot.z and other.top.z >= self.bot.z
)
def volume(self):
assert self.top.x-self.bot.x >= 0
assert self.top.y-self.bot.y >= 0
assert self.top.z-self.bot.z >= 0
return (self.top.x-self.bot.x+1)*(self.top.y-self.bot.y+1)*(self.top.z-self.bot.z+1)
def splitoff(n, m):
if not n.overlap(m):
return [n], None
on = n
fns = []
for axis in [0,1,2]:
if n.bot[axis] < m.bot[axis] <= n.top[axis]:
l, n = n.split_at(axis, m.bot[axis])
fns.append(l)
if n.bot[axis] <= m.top[axis] < n.top[axis]:
n, l = n.split_at(axis, m.top[axis]+1)
fns.append(l)
assert sum(x.volume() for x in fns) + n.volume() == on.volume()
check(fns, {m})
return fns, n
def check(lhs, rhs):
return
for x in lhs:
for y in rhs:
assert x == y or not x.overlap(y), (x,y)
def main(args):
data = [s.strip() for s in sys.stdin if s.strip() and not s.startswith('#')]
cmds = [(s.split(" ")[0], extract(s)) for s in data]
cmds = [(s, ((x[0], x[2], x[4]), (x[1], x[3], x[5]))) for s, x in cmds]
on = set()
for i, (cmd, (lo, hi)) in enumerate(cmds):
box = Box(Pos(*lo), Pos(*hi))
print(i, cmd, box)
new_on = set()
for x in on:
splits, _ = splitoff(x, box)
new_on.update(splits)
check(new_on, {box})
if cmd == "on":
new_on |= {box}
on = new_on
check(on, on)
print(i, sum(x.volume() for x in on), len(on))
# just for fun, also compute part 1
part1 = {
z
for x in on
if (z := splitoff(x, Box(Pos(-50,-50,-50), Pos(50,50,50)))[1])
}
print(sum(x.volume() for x in part1))
print(sum(x.volume() for x in on))
if __name__ == '__main__':
main(sys.argv)
| 2.796875 | 3 |
Knowledge test/11 - Decomposition.py | davwheat/btec-python-challenges | 0 | 12799425 | # Read the problem below and then implement it in code. You do not need to submit your
# written decomposition of how you’ve worked it out but make sure to comment your code
# to explain what you’ve done.
#
# A computer generates a random number from 0 – 10. It then asks the user to make a
# guess. They have 5 attempts to get it right. If they get it correct, the program says
# they’ve won and ends. If they’re wrong, they’re asked to guess again and told how many
# attempts they have remaining.
from random import randint
# Inclusive
random_num = randint(0, 10)
turns = 5
# 5 turns
for turn in range(turns - 1, 0, -1):
guess = int(input("Make a guess: "))
if (guess == random_num):
print("You're correct!")
break
else:
print(f"Incorrect guess. You have {turn} guesses remaining.")
| 4.25 | 4 |
msldap/external/asciitree/asciitree/traversal.py | zhuby1973/msldap | 79 | 12799426 | <gh_stars>10-100
from .util import KeyArgsConstructor
class Traversal(KeyArgsConstructor):
"""Traversal method.
Used by the tree rendering functions like :class:`~asciitree.LeftAligned`.
"""
def get_children(self, node):
"""Return a list of children of a node."""
raise NotImplementedError
def get_root(self, tree):
"""Return a node representing the tree root from the tree."""
return tree
def get_text(self, node):
"""Return the text associated with a node."""
return str(node)
class DictTraversal(Traversal):
"""Traversal suitable for a dictionary. Keys are tree labels, all values
must be dictionaries as well."""
def get_children(self, node):
return list(node[1].items())
def get_root(self, tree):
return list(tree.items())[0]
def get_text(self, node):
return node[0]
class AttributeTraversal(Traversal):
"""Attribute traversal.
Uses an attribute of a node as its list of children.
"""
attribute = 'children' #: Attribute to use.
def get_children(self, node):
return getattr(node, self.attribute)
| 3.671875 | 4 |
weboauth2/apps/oauth2/views/app.py | askar-alty/oauth2 | 1 | 12799427 | from django.urls import reverse_lazy
from oauth2_provider import views
from .. import mixins
class ApplicationList(mixins.TwoFactorMixin, mixins.ApplicationViewMixin, views.ApplicationList):
template_name = 'oauth2/applications/list.html'
class ApplicationRegistration(mixins.ApplicationCreationMixin, mixins.TwoFactorMixin, views.ApplicationRegistration):
template_name = 'oauth2/applications/register.html'
def get_success_url(self):
return reverse_lazy('application_detail', kwargs={'pk': self.object.pk})
class ApplicationDetail(mixins.ApplicationViewMixin, mixins.TwoFactorMixin, views.ApplicationDetail):
template_name = 'oauth2/applications/detail.html'
class ApplicationDelete(mixins.ApplicationDeleteMixin, mixins.TwoFactorMixin, views.ApplicationDelete):
template_name = 'oauth2/applications/delete.html'
success_url = reverse_lazy('application_list')
class ApplicationUpdate(mixins.ApplicationChangeMixin, mixins.TwoFactorMixin, views.ApplicationUpdate):
template_name = 'oauth2/applications/update.html'
def get_success_url(self):
return reverse_lazy('application_detail', kwargs={'pk': self.object.pk})
| 2.0625 | 2 |
modules/clan.py | mrhappyasthma/HoN-Trivia-Bot | 9 | 12799428 | # -*- coding: utf8 -*-
from hon.packets import ID
def setup(bot):
bot.config.module_config('welcome_members',[1,'Will welcome members in /c m if set to non-zero value'])
bot.config.module_config('officers', [[], 'Officers alts'])
bot.config.module_config('allowdnd', [[], 'Allowed to use DND command'])
bot.dnd = []
def change_member(bot,origin,data):
who,status,whodid = data[0],data[1],data[2]
if status == 0:
del(bot.clan_roster[who])
elif status == 1:
if who in bot.clan_roster:
bot.clan_roster[who]['rank'] = 'Member'
else:
bot.clan_roster[who] = {"rank":"Member"}
elif status == 2:
bot.clan_roster[who]['rank'] = 'Officer'
elif status == 3:#not sure about this one
bot.clan_roster[who]['rank'] = 'Leader'
change_member.event = [ID.HON_SC_CLAN_MEMBER_CHANGE]
def add_member(bot,origin,data):
id = data[0]
bot.clan_roster[id] = {"rank":"Member"}
if bot.config.welcome_members > 0 and id in bot.id2nick:
nick = bot.id2nick[id]
bot.write_packet(ID.HON_CS_CLAN_MESSAGE,'Welcome, {0}!'.format(nick))
add_member.event = [ID.HON_SC_CLAN_MEMBER_ADDED]
def member_changestatus(bot,origin,data):
id = data[0]
if id in bot.clan_roster:
bot.clan_status[id] = data[1]
if data[0] in [ID.HON_STATUS_OFFLINE]:
for key, nick in enumerate(bot.dnd):
if id in bot.id2nick and bot.id2nick[id] == nick:
del(bot.dnd[key])
break
if data[1] in [ID.HON_STATUS_ONLINE]:
""" """
#nick = bot.id2nick[id]
#bot.clan_roster[id]['upgrades'] = user_upgrades(bot, nick)
elif id in bot.id2nick and bot.id2nick[id] == bot.config.owner:
bot.clan_status[id] = data[1]
member_changestatus.event = [ID.HON_SC_UPDATE_STATUS]
def member_initstatus(bot,origin,data):
for u in data[1]:
id = u[0]
if id in bot.clan_roster:
if u[1] in [ID.HON_STATUS_ONLINE, ID.HON_STATUS_INGAME]:
nick = bot.id2nick[id]
#bot.clan_roster[id]['upgrades'] = user_upgrades(bot, nick)
bot.clan_status[id] = u[1]
elif bot.id2nick[id] == bot.config.owner:
bot.clan_status[id] = u[1]
member_initstatus.event = [ID.HON_SC_INITIAL_STATUS]
member_initstatus.thread = False
def invite(bot,input):
"""invites to clan, admins only"""
if not input.admin: return False
bot.write_packet(ID.HON_CS_CLAN_ADD_MEMBER,input.group(2))
bot.reply("Invited {0}".format(input.group(2)))
invite.commands = ['invite']
def remove(bot,input):
"""remove from clan, admins only"""
if not input.admin: return False
nick = input.group(2).lower()
if nick not in bot.nick2id:
bot.reply('Sorry, I don''t know ' + nick)
else:
id = bot.nick2id[nick]
bot.write_packet(ID.HON_CS_CLAN_REMOVE_MEMBER,id)
query = { 'f' : 'set_rank', 'target_id' : id, 'member_ck': bot.cookie, 'rank' : 'Remove', 'clan_id' : bot.clan_info['clan_id'] }
bot.masterserver_request(query)
bot.reply(nick + " was removed from the clan")
remove.commands = ['remove']
status = {
ID.HON_STATUS_OFFLINE: "Offline",
ID.HON_STATUS_ONLINE: "Online",
ID.HON_STATUS_INLOBBY: "In Lobby",
ID.HON_STATUS_INGAME: "In Game"
}
def sublist(alist, value):
return [dictio for dictio in alist if alist[dictio] == value]
def info(bot,input):
"""Get clan member info"""
if not input.group(2):
bot.reply(
"{0} - Members: {1}, Online: {2}, In-Game: {3}"
.format(
bot.clan_info['name'],
len(bot.clan_roster),
len(sublist(bot.clan_status, ID.HON_STATUS_ONLINE)),
len(sublist(bot.clan_status, ID.HON_STATUS_INGAME))
)
)
else:
nick = input.group(2).lower()
if nick not in bot.nick2id:
bot.reply("Unknown Player")
else:
id = bot.nick2id[nick]
if id in bot.clan_roster:
player = bot.clan_roster[id]
rank = player['rank']
query = {'nickname' : nick}
query['f'] = 'show_stats'
query['table'] = 'player'
data = bot.masterserver_request(query,cookie=True)
bot.reply("{0} - Rank: {1}, Last Online: {2}, Status: {3}".format(nick, rank, data['last_activity'], status[bot.clan_status[id]]))
else:
bot.reply("Not in clan")
info.commands = ['info']
def officers(bot, input):
"""Find available officers"""
avail_officers = []
for ply in bot.id2nick:
if ply == bot.account_id:
continue
if bot.id2nick[ply] in bot.dnd:
continue
if ply in bot.clan_status and ply in bot.clan_roster:
if bot.clan_status[ply] is ID.HON_STATUS_ONLINE and (bot.clan_roster[ply]['rank'] in ['Officer', 'Leader'] or bot.id2nick[ply] in bot.config.officers):
avail_officers.append(bot.id2nick[ply])
elif bot.id2nick[ply] in bot.config.officers:
if ply in bot.user_status and bot.user_status[ply] is ID.HON_STATUS_ONLINE:
avail_officers.append(bot.id2nick[ply])
if len(avail_officers) > 0:
outstr = ', '.join(avail_officers)
else:
outstr = 'None'
bot.reply( "Available officers: {0}".format( outstr ) )
officers.commands = ['officers']
def officer(bot, input):
"""Add Officer Alt"""
if not input.admin:
return officers(bot, input)
if not input.group(2):
return
nick = input.group(2).lower()
if not nick in bot.config.officers:
bot.config.set_add('officers', nick)
bot.reply("Added {0} to officer list".format(nick))
else:
bot.reply(nick + " is already an officer")
officer.commands = ['officer']
def unofficer(bot, input):
"""Remove Officer Alt"""
if not input.admin:
return False
if not input.group(2):
return
nick = input.group(2).lower()
if nick in bot.config.officers:
bot.config.set_del('officers', nick)
bot.reply("Removed {0} from officer list".format(nick))
else:
bot.reply(nick + " isn't an officer")
unofficer.commands = ['unofficer']
def announce(bot, input):
if not input.admin:
return False
if not input.group(2):
return
bot.write_packet(ID.HON_CS_CLAN_MESSAGE, input.group(2))
announce.commands = ['announce']
def dnd(bot, input):
"""Users can set themselves to not appear in player listing commands"""
if input.nick not in bot.nick2id:
bot.reply("Error occurred")
return
id = bot.nick2id[input.nick]
if not id in bot.clan_roster or (id in bot.clan_roster and not bot.clan_roster[id]['rank'] in ['Officer', 'Leader']):
if not input.nick in bot.config.officers and not input.nick in bot.config.allowdnd:
return
for key, nick in enumerate(bot.dnd):
if input.nick == nick:
bot.reply("You are now available.")
del(bot.dnd[key])
return
bot.reply("You are now unavailable.")
bot.dnd.append(input.nick)
dnd.commands = ['dnd']
| 2.265625 | 2 |
demo/doctype/task.py | Nikhil9168/demo | 0 | 12799429 | import frappe
def execute():
a=frappe.new_doc("Task")
a.subject='axy'
a.save()
print(a.name)
# #bench execute demo.doctype.task.execute
# print('***************') | 1.71875 | 2 |
tests/messages/data/project/_hidden_by_default/hidden_file.py | kolonialno/babel | 1 | 12799430 | from gettext import gettext
def foo():
print(gettext('ssshhh....'))
| 1.773438 | 2 |
assets/nurses_2/widgets/_root.py | Reverend-Toady/Duck-Builder | 1 | 12799431 | import numpy as np
from ..colors import Color
from .widget import Widget, overlapping_region
from .widget_data_structures import Point, Size, Rect
class _Root(Widget):
"""
Root widget. Meant to be instantiated by the `App` class. Renders to terminal.
"""
def __init__(self, app, env_out, default_char, default_color: Color):
self._app = app
self.env_out = env_out
self.default_char = default_char
self.default_color = default_color
self.children = [ ]
self.resize(env_out.get_size())
def resize(self, dim: Size):
"""
Resize canvas. Last render is erased.
"""
self.env_out.erase_screen()
self.env_out.flush()
self._dim = dim
self._last_canvas = np.full(dim, self.default_char, dtype=object)
self._last_colors = np.full((*dim, 6), self.default_color, dtype=np.uint8)
self.canvas = np.full_like(self._last_canvas, "><") # "><" will guarantee an entire screen redraw.
self.colors = self._last_colors.copy()
# Buffer arrays to re-use in the `render` method:
self._char_diffs = np.zeros_like(self.canvas, dtype=np.bool8)
self._color_diffs = np.zeros_like(self.colors, dtype=np.bool8)
self._reduced_color_diffs = np.zeros_like(self.canvas, dtype=np.bool8)
for child in self.children:
child.update_geometry()
@property
def top(self):
return 0
@property
def left(self):
return 0
@property
def pos(self):
return Point(0, 0)
@property
def absolute_pos(self):
return Point(0, 0)
@property
def is_transparent(self):
return False
@property
def is_visible(self):
return True
@property
def parent(self):
return None
@property
def root(self):
return self
@property
def app(self):
return self._app
def absolute_to_relative_coords(self, coord):
return coord
def render(self):
"""
Paint canvas. Render to terminal.
"""
# Swap canvas with last render:
self.canvas, self._last_canvas = self._last_canvas, self.canvas
self.colors, self._last_colors = self._last_colors, self.colors
# Bring arrays into locals:
canvas = self.canvas
colors = self.colors
char_diffs = self._char_diffs
color_diffs = self._color_diffs
reduced_color_diffs = self._reduced_color_diffs
env_out = self.env_out
write = env_out._buffer.append
# Erase canvas:
canvas[:] = self.default_char
colors[:, :] = self.default_color
overlap = overlapping_region
height, width = canvas.shape
rect = Rect(
0,
0,
height,
width,
height,
width,
)
for child in self.children:
if region := overlap(rect, child):
dest_slice, child_rect = region
child.render(canvas[dest_slice], colors[dest_slice], child_rect)
# Find differences between current render and last render:
# (This is optimized version of `(last_canvas != canvas) | np.any(last_colors != colors, axis=-1)`
# that re-uses buffers instead of creating new arrays.)
np.not_equal(self._last_canvas, canvas, out=char_diffs)
np.not_equal(self._last_colors, colors, out=color_diffs)
np.any(color_diffs, axis=-1, out=reduced_color_diffs)
np.logical_or(char_diffs, reduced_color_diffs, out=char_diffs)
write("\x1b[?25l") # Hide cursor
ys, xs = np.nonzero(char_diffs)
for y, x, color, char in zip(ys, xs, colors[ys, xs], canvas[ys, xs]):
# The escape codes for moving the cursor and setting the color concatenated:
write("\x1b[{};{}H\x1b[0;38;2;{};{};{};48;2;{};{};{}m{}".format(y + 1, x + 1, *color, char))
write("\x1b[0m") # Reset attributes
env_out.flush()
def dispatch_press(self, key_press):
"""
Dispatch key press to descendants until handled.
"""
any(widget.dispatch_press(key_press) for widget in reversed(self.children))
def dispatch_click(self, mouse_event):
"""
Dispatch mouse event to descendents until handled.
"""
any(widget.dispatch_click(mouse_event) for widget in reversed(self.children))
| 2.546875 | 3 |
slashdot_news_aggregate.py | tmzwane/Slashdot_News_Aggregate | 1 | 12799432 | import re
import time
from datetime import datetime
import urllib
import mechanicalsoup
import getpass
from bs4 import BeautifulSoup
# Conver Time Function
def convertTime(time):
try:
time = time.replace(",","").replace("@","").replace("."," ").replace(":"," ")
t2 = datetime.strptime(time[3:], "%A %B %d %Y %I %M%p")
return (t2-datetime(1970,1,1)).total_seconds()
except Exception:
print ("Error while converting time to seconds")
return 1
url = 'https://slashdot.org/'
# List Variables
outputList = []
article_headline_list = []
writer_list = []
time_posted_list = []
response = []
# Count Variables
totalRecords=0
totalRecordsOut=0
page=-1
timestamp=0
browser = mechanicalsoup.StatefulBrowser()
On_This_Page = False
logged_in = False
# Loop until logged in
browser.open(url)
while not logged_in:
nick = input("Enter your nickname for slashdot.org: ") #Chazzio1
passw = getpass.getpass("Enter your password: ") #<PASSWORD>
while(timestamp<1):
try:
timestamp = int(input("Enter timestamp in seconds since 1970: ")) # 1535241600
except Exception:
"Not a valid number"
browser.select_form(nr=1)
browser['unickname'] = nick
browser['upasswd'] = passw
result = browser.submit_selected()
response = result.content
soup_0 = BeautifulSoup(response, "lxml")
user = str(soup_0.find_all(class_="user-access"))
if user.find(nick)>0:
logged_in=True
print ("Logged in")
else:
print ("Try Again\n")
time.sleep(5)
# Loop until date found
while not(On_This_Page):
page+=1
try:
browser.open(url)
except Exception:
print ("Error cannot open next page ")
print ("Page " + url + " may not exist")
browser.close()
break
#release resources
# HTML to BeautifulSoup
response = ""
response=result.content
soup = ""
soup = BeautifulSoup(response, "lxml")
# Find all Headlines
article_headline = soup.find_all('span',class_="story-title")
poster = soup.find_all('span', class_="story-byline")
time_posted = soup.find_all('time')
# Store all required info
for headline in article_headline:
title = '\"'+headline.a.get_text()+'\"'
article_headline_list.append(title) #Get Text headline
totalRecords+=1
for t in time_posted:
time_posted_list.append(convertTime(t.get("datetime")))
for val in poster:
writer = val.find(text=True)
writer = " ".join(re.split("\s+", writer, flags=re.UNICODE))
writer = writer.replace(' ', '')
writer = writer.replace('Posted','')
writer = writer.replace('by','')
writer_list.append(writer)
# Make output List as per format required
for j in range(totalRecords):
if (int(time_posted_list[j]) < timestamp):
On_This_Page = True
break
else:
outputList.append(str(
"{" "\n" "\"headline\": ") + str(article_headline_list[j]) +
"\n\"author\": \"" + str(writer_list[j]) +
"\"\n\"date\": " + str(int(time_posted_list[j])) + "\n},\n"
)
totalRecordsOut+=1;
# All records on page within timeframe, open next page
if totalRecordsOut%totalRecords == 0:
totalRecordsOut=0
url = str('https://slashdot.org/?page=') + str(page+1)
# Display this message while loading other pages
print ("Opening next page " + url)
for headline in outputList:
print (headline)
print ("Total headlines returned: " + str(totalRecordsOut))
browser.close()
| 3.046875 | 3 |
tests/test_environ.py | cheremnov/bddcli | 5 | 12799433 | import os
from bddcli import Given, stdout, Application, when, given
def foos(): # pragma: no cover
e = os.environ.copy()
# For Linux and Windows
discarded_variables = ['LC_CTYPE', 'PWD',
'COMSPEC', 'PATHEXT', 'PROMPT', 'SYSTEMROOT']
# Windows environment variables are case-insensitive, lowercase them
print(' '.join(
f'{k}: {v}' for k, v in e.items() if k not in discarded_variables
).lower())
app = Application('foo', 'tests.test_environ:foos')
def test_environ():
with Given(app, environ={'bar': 'baz'}):
assert stdout == 'bar: baz\n'
when(environ=given - 'bar')
assert stdout == '\n'
when(environ=given + {'qux': 'quux'})
assert stdout == 'bar: baz qux: quux\n'
when(environ=given | {'bar': 'quux'})
assert stdout == 'bar: quux\n'
| 2.515625 | 3 |
VEnCode_Django/settings/production.py | AndreMacedo88/VEnCode-Web | 0 | 12799434 | from .base import *
import dj_database_url
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# If DEBUG is False, send the errors to the email:
ADMINS = [
('Andre', '<EMAIL>'),
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'VEnCode_Django.urls'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config("DB_NAME"),
'USER': config("DB_USER"),
'PASSWORD': '',
'HOST': config("DB_HOST"),
'PORT': '',
}
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# Configure Django App for Heroku.
django_heroku.settings(locals())
# Production set up for heroku:
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Allauth configurations, backend to send sign-in e-mail verification e-mail:
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# REDIS related settings
CELERY_BROKER_URL = config('REDIS_URL', default="redis://")
CELERY_RESULT_BACKEND = config('REDIS_URL', default="redis://")
BROKER_URL = config('REDIS_URL', default="redis://")
# Allauth related settings
EMAIL_HOST = config("MAILGUN_SMTP_SERVER")
EMAIL_PORT = config("MAILGUN_SMTP_PORT")
EMAIL_HOST_USER = DEFAULT_FROM_EMAIL = config("MAILGUN_SMTP_LOGIN")
EMAIL_HOST_PASSWORD = config("MAILGUN_SMTP_PASSWORD")
| 1.96875 | 2 |
openpnm/contrib/_transient_multiphysics.py | xu-kai-xu/OpenPNM | 2 | 12799435 | import logging
import numpy as np
from openpnm.utils import SettingsAttr, Docorator
from openpnm.integrators import ScipyRK45
from openpnm.algorithms import GenericAlgorithm
from openpnm.algorithms._solution import SolutionContainer, TransientSolution
logger = logging.getLogger(__name__)
docstr = Docorator()
@docstr.dedent
class TransientMultiPhysicsSettings:
r"""
Parameters
----------
%(GenericAlgorithmSettings.parameters)s
algorithms: list
List of transient algorithm objects to be solved in a coupled manner
"""
algorithms = []
@docstr.dedent
class TransientMultiPhysics(GenericAlgorithm):
r"""
A subclass for transient multiphysics simulations.
"""
def __init__(self, algorithms, settings=None, **kwargs):
self.settings = SettingsAttr(TransientMultiPhysicsSettings, settings)
self.settings.algorithms = [alg.name for alg in algorithms]
self._algs = algorithms
super().__init__(settings=self.settings, **kwargs)
def run(self, x0, tspan, saveat=None, integrator=None):
"""
Runs all of the transient algorithms simultaneoulsy and returns the
solution.
Parameters steal from transient reactive transport
----------
x0 : ndarray or float
Array (or scalar) containing initial condition values.
tspan : array_like
Tuple (or array) containing the integration time span.
saveat : array_like or float, optional
If an array is passed, it signifies the time points at which
the solution is to be stored, and if a scalar is passed, it
refers to the interval at which the solution is to be stored.
integrator : Integrator, optional
Integrator object which will be used to to the time stepping.
Can be instantiated using openpnm.integrators module.
Returns
-------
TransientSolution
The solution object, which is basically a numpy array with
the added functionality that it can be called to return the
solution at intermediate times (i.e., those not stored in the
solution object). In the case of multiphysics, the solution object
is a combined array of solutions for each physics. The solution
for each physics is available on each algorithm object
independently.
"""
logger.info('Running TransientMultiphysics')
if np.isscalar(saveat):
saveat = np.arange(*tspan, saveat)
if (saveat is not None) and (tspan[1] not in saveat):
saveat = np.hstack((saveat, [tspan[1]]))
integrator = ScipyRK45() if integrator is None else integrator
for i, alg in enumerate(self._algs):
# Perform pre-solve validations
alg._validate_settings()
alg._validate_data_health()
# Write x0 to algorithm the obj (needed by _update_iterative_props)
x0_i = self._get_x0(x0, i)
alg['pore.ic'] = x0_i = np.ones(alg.Np, dtype=float) * x0_i
alg._merge_inital_and_boundary_values()
# Build RHS (dx/dt = RHS), then integrate the system of ODEs
rhs = self._build_rhs()
# Integrate RHS using the given solver
soln = integrator.solve(rhs, x0, tspan, saveat)
# Return dictionary containing solution
self.soln = SolutionContainer()
for i, alg in enumerate(self._algs):
# Slice soln and attach as TransientSolution object to each alg
t = soln.t
x = soln[i*alg.Np:(i+1)*alg.Np, :]
alg.soln = TransientSolution(t, x)
# Add solution of each alg to solution dictionary
self.soln[alg.settings['quantity']] = alg.soln
return self.soln
def _run_special(self, x0): ...
def _build_rhs(self):
"""
Returns a function handle, which calculates dy/dt = rhs(y, t).
Notes
-----
``y`` is a composite array that contains ALL the variables that
the multiphysics algorithm solves for, e.g., if the constituent
algorithms are ``TransientFickianDiffusion``, and
``TransientFourierConduction``, ``y[0:Np-1]`` refers to the
concentration, and ``[Np:2*Np-1]`` refers to the temperature
values.
"""
def ode_func(t, y):
# Initialize RHS
rhs = []
for i, alg in enumerate(self._algs):
# Get x from y, assume alg.Np is same for all algs
x = self._get_x0(y, i) # again use helper function
# Store x onto algorithm,
alg.x = x
# Build A and b
alg._update_A_and_b()
A = alg.A.tocsc()
b = alg.b
# Retrieve volume
V = alg.network[alg.settings["pore_volume"]]
# Calcualte RHS
rhs_alg = np.hstack(-A.dot(x) + b)/V
rhs = np.hstack((rhs, rhs_alg))
return rhs
return ode_func
def _get_x0(self, x0, i):
tmp = [alg.Np for alg in self._algs]
idx_end = np.cumsum(tmp)
idx_start = np.hstack((0, idx_end[:-1]))
x0 = x0[idx_start[i]:idx_end[i]]
return x0
| 2.46875 | 2 |
Python/Buch_ATBS/Teil_1/Kapitel_06_Stringbearbeitung/05_formatierte_ausgabe.py | Apop85/Scripts | 0 | 12799436 | # Formatierte Stringausgabe
dic={'Käse' : 5, 'Brot' : 3, 'Wein' : 2,
'Eier' : 6, 'Nuss' : 12, 'Tee' : 14,
'Müsli' : 1}
print(('Inventar'.center(16, '#')).center(60))
for namen, anzahl in dic.items():
print((namen.ljust(13, '.') + str(anzahl).rjust(3, '.')).center(60))
print(('#'.center(16, '#')).center(60))
| 3.515625 | 4 |
shorty/config/parser.py | dennisxtria/shorty | 0 | 12799437 | <filename>shorty/config/parser.py
import json
import sys
from pathlib import Path
def create_config():
"""
Creates a merged dictionary between the `config.json` and
the respective environment's config, by merging them.
Sidenote: the implementation for the generic access token
was made specific so that the request headers are easily generated
with just changing the access token per environment.
"""
with open("./config.json", "r") as config_file:
default_config = json.load(config_file)
if len(sys.argv) >= 1:
env = sys.argv[1]
else:
env = "dev"
if env not in {"dev", "prod", "staging", "test"}:
raise ValueError("Invalid env name")
env_config_path = Path("./config.{}.json".format(env))
if env_config_path.exists():
with open(env_config_path) as env_config_file:
env_config = json.load(env_config_file)
else:
raise FileExistsError("The env config file does not exist")
return dict(_merge_configs(default_config, env_config))
def _merge_configs(dict1, dict2):
for k in set(dict1.keys()).union(dict2.keys()):
if k in dict1 and k in dict2:
if isinstance(dict1[k], dict) and isinstance(dict2[k], dict):
yield (k, dict(_merge_configs(dict1[k], dict2[k])))
else:
yield (k, dict2[k])
elif k in dict1:
yield (k, dict1[k])
else:
if k == "generic_access_token":
dict1["headers"]["authorization"] = (
dict1["headers"]["authorization"] + dict2[k]
)
yield (k, dict1["headers"]["authorization"])
else:
yield (k, dict2[k])
config = create_config()
| 3.15625 | 3 |
mmdeploy/codebase/mmocr/models/text_recognition/sar_encoder.py | zhiqwang/mmdeploy | 746 | 12799438 | # Copyright (c) OpenMMLab. All rights reserved.
import mmocr.utils as utils
import torch
import torch.nn.functional as F
from mmdeploy.core import FUNCTION_REWRITER
@FUNCTION_REWRITER.register_rewriter(
func_name='mmocr.models.textrecog.encoders.SAREncoder.forward',
backend='default')
def sar_encoder__forward(ctx, self, feat, img_metas=None):
"""Rewrite `forward` of SAREncoder for default backend.
Rewrite this function to:
1. convert tuple value of feat.size to int, making model exportable.
2. use torch.ceil to replace original math.ceil and if else in mmocr.
Args:
ctx (ContextCaller): The context with additional information.
self: The instance of the class SAREncoder.
feat (Tensor): Encoded feature map of shape (N, C, H, W).
img_metas (Optional[list[dict]]): A list of image info dict where each
dict has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys, see
:class:`mmdet.datasets.pipelines.Collect`.
Returns:
holistic_feat (Tensor): A feature map output from SAREncoder. The shape
[N, M].
"""
if img_metas is not None:
assert utils.is_type_list(img_metas, dict)
assert len(img_metas) == feat.size(0)
valid_ratios = None
if img_metas is not None:
valid_ratios = [
img_meta.get('valid_ratio', 1.0) for img_meta in img_metas
] if self.mask else None
h_feat = int(feat.size(2))
feat_v = F.max_pool2d(feat, kernel_size=(h_feat, 1), stride=1, padding=0)
feat_v = feat_v.squeeze(2) # bsz * C * W
feat_v = feat_v.permute(0, 2, 1).contiguous() # bsz * W * C
holistic_feat = self.rnn_encoder(feat_v)[0] # bsz * T * C
if valid_ratios is not None:
valid_hf = []
T = holistic_feat.size(1)
for i, valid_ratio in enumerate(valid_ratios):
# use torch.ceil to replace original math.ceil and if else in mmocr
valid_step = torch.ceil(T * valid_ratio).long() - 1
valid_hf.append(holistic_feat[i, valid_step, :])
valid_hf = torch.stack(valid_hf, dim=0)
else:
valid_hf = holistic_feat[:, -1, :] # bsz * C
holistic_feat = self.linear(valid_hf) # bsz * C
return holistic_feat
| 2.265625 | 2 |
tests/test_node_expand.py | spyysalo/wikitextprocessor | 38 | 12799439 | # Tests for WikiText parsing
#
# Copyright (c) 2020-2021 <NAME>. See file LICENSE and https://ylonen.org
import unittest
from wikitextprocessor import Wtp
from wikitextprocessor.parser import (print_tree, NodeKind, WikiNode)
def parse_with_ctx(title, text, **kwargs):
assert isinstance(title, str)
assert isinstance(text, str)
ctx = Wtp()
ctx.analyze_templates()
ctx.start_page(title)
root = ctx.parse(text, **kwargs)
print("parse_with_ctx: root", type(root), root)
return root, ctx
def parse(title, text, **kwargs):
root, ctx = parse_with_ctx(title, text, **kwargs)
assert isinstance(root, WikiNode)
assert isinstance(ctx, Wtp)
return root
class NodeExpTests(unittest.TestCase):
def backcvt(self, text, expected):
root, ctx = parse_with_ctx("test", text)
self.assertEqual(ctx.errors, [])
self.assertEqual(ctx.warnings, [])
t = ctx.node_to_wikitext(root)
self.assertEqual(t, expected)
def tohtml(self, text, expected):
root, ctx = parse_with_ctx("test", text)
self.assertEqual(ctx.errors, [])
self.assertEqual(ctx.warnings, [])
t = ctx.node_to_html(root)
self.assertEqual(t, expected)
def totext(self, text, expected):
root, ctx = parse_with_ctx("test", text)
self.assertEqual(ctx.errors, [])
self.assertEqual(ctx.warnings, [])
t = ctx.node_to_text(root)
self.assertEqual(t, expected)
def test_basic1(self):
self.backcvt("", "")
def test_basic2(self):
self.backcvt("foo bar\nxyz\n", "foo bar\nxyz\n")
def test_basic3(self):
self.backcvt("&amp;", "&amp;")
def test_basic4(self):
self.backcvt("{{", "{{")
def test_title1(self):
self.backcvt("== T1 ==\nxyz\n", "\n== T1 ==\n\nxyz\n")
def test_title2(self):
self.backcvt("=== T1 ===\nxyz\n", "\n=== T1 ===\n\nxyz\n")
def test_title3(self):
self.backcvt("==== T1 ====\nxyz\n", "\n==== T1 ====\n\nxyz\n")
def test_title4(self):
self.backcvt("===== T1 =====\nxyz\n", "\n===== T1 =====\n\nxyz\n")
def test_title5(self):
self.backcvt("====== T1 ======\nxyz\n", "\n====== T1 ======\n\nxyz\n")
def test_hline1(self):
self.backcvt("aaa\n----\nbbbb", "aaa\n\n----\n\nbbbb")
def test_list1(self):
self.backcvt("*a\n* b\n", "*a\n* b\n")
def test_list2(self):
self.backcvt("abc\n*a\n* b\ndef", "abc\n*a\n* b\ndef")
def test_list3(self):
self.backcvt("abc\n*a\n*# c\n*# d\n* b\ndef",
"abc\n*a\n*# c\n*# d\n* b\ndef")
def test_list4(self):
self.backcvt("abc\n*a\n**b\n*:c\n",
"abc\n*a\n**b\n*:c\n")
def test_pre1(self):
self.backcvt("a<pre>foo\n bar</pre>b",
"a<pre>foo\n bar</pre>b")
def test_preformatted1(self):
self.backcvt(" a\n b", " a\n b")
def test_link1(self):
self.backcvt("[[foo bar]]", "[[foo bar]]")
def test_link2(self):
self.backcvt("[[foo|bar]]", "[[foo|bar]]")
def test_link3(self):
self.backcvt("a [[foo]]s bar", "a [[foo]]s bar")
def test_template1(self):
self.backcvt("{{foo|a|b|c=4|{{{arg}}}}}", "{{foo|a|b|c=4|{{{arg}}}}}")
def test_template2(self):
self.backcvt("{{foo}}", "{{foo}}")
def test_template3(self):
self.backcvt("{{!}}", "{{!}}")
def test_templatearg1(self):
self.backcvt("{{{1}}}", "{{{1}}}")
def test_templatearg1(self):
self.backcvt("{{{{{templ}}}}}", "{{{{{templ}}}}}")
def test_templatearg2(self):
self.backcvt("{{{a|def}}}", "{{{a|def}}}")
def test_templatearg3(self):
self.backcvt("{{{a|}}}", "{{{a|}}}")
def test_parserfn1(self):
self.backcvt("{{#expr: 1 + 2}}", "{{#expr: 1 + 2}}")
def test_parserfn2(self):
self.backcvt("{{#expr:1+{{v}}}}", "{{#expr:1+{{v}}}}")
def test_parserfn3(self):
self.backcvt("{{ROOTPAGENAME}}", "{{ROOTPAGENAME:}}")
def test_url1(self):
self.backcvt("[https://wikipedia.org]", "[https://wikipedia.org]")
def test_url2(self):
self.backcvt("https://wikipedia.org/", "[https://wikipedia.org/]")
def test_url3(self):
self.backcvt("https://wikipedia.org/x/y?a=7%255",
"[https://wikipedia.org/x/y?a=7%255]")
def test_table1(self):
self.backcvt("{| |}", "\n{| \n\n|}\n")
def test_table2(self):
self.backcvt('{| class="x"\n|}', '\n{| class="x"\n\n|}\n')
def test_tablecaption1(self):
self.backcvt("{|\n|+\ncapt\n|}", "\n{| \n\n|+ \n\ncapt\n\n|}\n")
def test_tablerowcell1(self):
self.backcvt("{|\n|- a=1\n| cell\n|}",
'\n{| \n\n|- a="1"\n\n| cell\n\n\n|}\n')
def test_tablerowhdr1(self):
self.backcvt("{|\n|- a=1\n! cell\n|}",
'\n{| \n\n|- a="1"\n\n! cell\n\n\n|}\n')
def test_magicword1(self):
self.backcvt("a\n__TOC__\nb", "a\n\n__TOC__\n\nb")
def test_html1(self):
self.backcvt("a<b>foo</b>b", "a<b>foo</b>b")
def test_html1(self):
self.backcvt('a<span class="bar">foo</span>b',
'a<span class="bar">foo</span>b')
def test_italic1(self):
self.backcvt("''i''", "''i''")
def test_bold1(self):
self.backcvt("''b''", "''b''")
def test_text1(self):
self.totext("", "")
def test_text2(self):
self.totext("\nfoo bar ", "foo bar")
def test_text3(self):
self.totext("<b>foo</b>", "foo")
def test_text4(self):
self.totext("<h1>foo</h1><p>bar</p>", "foo\n\nbar")
def test_text5(self):
self.totext("foo<ref x=1>bar</ref> z", "foo z")
| 2.84375 | 3 |
algorithm/leetcode/Python_2.7.10/00004.py | leonard-sxy/algorithm-practice | 1 | 12799440 | <reponame>leonard-sxy/algorithm-practice<gh_stars>1-10
#
## https://leetcode.com/problems/median-of-two-sorted-arrays/
#
class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
if nums1 is None or nums2 is None:
raise ValueError('Inputs should be arrays.')
total_len = len(nums1) + len(nums2)
if total_len == 0:
raise ValueError('Two arrays cannot both be empty.')
is_even = True if total_len % 2 == 0 else False
median = [None, None]
if is_even:
median[1] = int(total_len / 2)
median[0] = median[1] - 1
else:
median[0] = int((total_len - 1) / 2)
if len(nums1) == 0:
if is_even:
return (nums2[median[0]] + nums2[median[1]]) / 2.0
else:
return nums2[median[0]]
if len(nums2) == 0:
if is_even:
return (nums1[median[0]] + nums1[median[1]]) / 2.0
else:
return nums1[median[0]]
median_sum = idx_1 = idx_2 = 0
while True:
if median[0] is None and median[1] is None:
break
if idx_1 == len(nums1):
if median[0] is not None and idx_1 + idx_2 == median[0]:
median_sum += nums2[idx_2]
if median[1] is not None:
median_sum += nums2[idx_2 + 1]
break
if median[1] is not None and idx_1 + idx_2 == median[1]:
median_sum += nums2[idx_2]
break
idx_2 += 1
continue
if idx_2 == len(nums2):
if median[0] is not None and idx_1 + idx_2 == median[0]:
median_sum += nums1[idx_1]
if median[1] is not None:
median_sum += nums1[idx_1 + 1]
break
if median[1] is not None and idx_1 + idx_2 == median[1]:
median_sum += nums1[idx_1]
break
idx_1 += 1
continue
if nums1[idx_1] == nums2[idx_2]:
if median[0] is not None and idx_1 + idx_2 == median[0] - 1:
median_sum += nums1[idx_1]
median[0] = None
if idx_1 + idx_2 == median[0]:
median_sum += nums1[idx_1]
median[0] = None
if median[1] is not None:
median_sum += nums2[idx_2]
break
if idx_1 + idx_2 == median[1] and median[1] is not None:
median_sum += nums1[idx_1]
break
idx_1 += 1
idx_2 += 1
elif nums1[idx_1] > nums2[idx_2]:
if median[0] is not None and idx_1 + idx_2 == median[0]:
median_sum += nums2[idx_2]
idx_2 += 1
median[0] = None
continue
if median[1] is not None and idx_1 + idx_2 == median[1]:
median_sum += nums2[idx_2]
break
idx_2 += 1
else:
if median[0] is not None and idx_1 + idx_2 == median[0]:
median_sum += nums1[idx_1]
idx_1 += 1
median[0] = None
continue
if median[1] is not None and idx_1 + idx_2 == median[1]:
median_sum += nums1[idx_1]
break
idx_1 += 1
return median_sum / 2.0 if is_even else median_sum
s = Solution()
s.findMedianSortedArrays([1, 2], [1, 2])
| 3.578125 | 4 |
cogs/utility.py | ldgregory/bishbot | 0 | 12799441 | <reponame>ldgregory/bishbot
#! /usr/bin/env python3
"""
Bishbot - https://github.com/ldgregory/bishbot
<NAME> <<EMAIL>>
fun.py v0.1
Tested to Python v3.7.3
Description:
Fun commands for everyone
Changelog:
20210606 - Fixed members command for intents
20200522 - Initial code
Copyright 2020 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import discord
import json
import os
import requests
from discord.ext import commands
from dotenv import load_dotenv
load_dotenv()
BOT_PREFIX = os.getenv('DISCORD_BOT_PREFIX')
GUILD = os.getenv('DISCORD_GUILD')
AIRVISUAL_KEY = os.getenv('AIRVISUAL_KEY')
class Utility(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
print('- Utility Cog loaded')
@commands.command(name='ban',
description='Ban member from server',
help='Ban member from server',
ignore_extra=True,
hidden=True,
enabled=False)
async def ban(self, ctx, member: discord.member, *, reason=None):
if member.hasPermission('BAN_MEMBERS'):
await member.ban(reason=reason)
await ctx.send(f"Banned {member.mention}")
@commands.command(name='clear',
description='Clear x messages, defaults to 3',
help='Clear x messages, defaults to 3',
ignore_extra=True,
hidden=False,
enabled=True)
@commands.has_permissions(manage_messages=True)
async def clear(self, ctx, amount=3):
await ctx.channel.purge(limit=amount)
@commands.command(name='ip_abuse',
description='Get abuse score for IP',
help='Get abuse score for IP',
ignore_extra=True,
hidden=False,
enabled=True)
async def ip_abuse(self, ctx, ipAddress):
ABUSEIPDB_KEY = os.getenv('ABUSEIPDB_KEY')
url = 'https://api.abuseipdb.com/api/v2/check'
querystring = {
'ipAddress': ipAddress,
'maxAgeInDays': '90'
}
headers = {
'Accept': 'application/json',
'Key': ABUSEIPDB_KEY
}
response = requests.request(method='GET', url=url, headers=headers, params=querystring)
ip_info = json.loads(response.text)
data = '**Data provided by abuseipdb.com**\n\n'
for k, v in ip_info['data'].items():
data += f"{k}: {v}\n"
await ctx.channel.send(data)
@commands.command(name='kick',
description='Kick member off server',
help='Kick member off server',
ignore_extra=True,
hidden=True,
enabled=False)
async def kick(self, ctx, member: discord.member, *, reason=None):
if member.hasPermission('KICK_MEMBERS'):
await member.kick(reason=reason)
await ctx.send(f"Kicked {member.mention}")
@commands.command(name='member',
description='Member information',
help='Member information',
ignore_extra=True,
hidden=False,
enabled=True)
async def member(self, ctx, mention):
guild = discord.utils.get(self.bot.guilds, name=GUILD)
for member in guild.members:
if str(member.id) == str(mention.lstrip('<@!').rstrip('>')):
await ctx.channel.send(f"Nickname: {member.nick}\n"
f"Discord Name: {member.name}#{member.discriminator}\n"
f"Discord ID: {member.id}\n"
f"Joined: {member.joined_at}\n"
f"Status: {member.status}\n"
# f"Is on Mobile: {member.is_on_mobile}\n"
# f"Activity: {str(member.activity.type).lstrip('ActivityType.')} {member.activity.name}\n"
f"Guild: {member.guild}\n"
f"Guild Permissions: {member.guild_permissions}\n"
f"Top Role: {member.top_role}\n"
f"Roles: {str(', '.join([role.name for role in member.roles]).replace('@', ''))}\n")
@commands.command(name='members',
description='Current Members',
help='Current Members',
ignore_extra=True,
hidden=False,
enabled=True)
async def members(self, ctx, showall=None):
guild = discord.utils.get(self.bot.guilds, name=GUILD)
members = ''
for member in guild.members:
if showall == 'showall':
members += f"- {member.display_name} : "
members += f"{member.name} ("
members += ', '.join([role.name for role in member.roles]) + ")\n"
else:
members += f"- {member.display_name}\n"
await ctx.channel.send(f"**Server Members: {guild.member_count}**\n{members.replace('@', '')}")
@commands.command(name='nickname',
description='Change nickname',
help='Change nickname',
ignore_extra=True,
hidden=False,
enabled=True)
async def nickname(self, ctx, *, nickname):
await ctx.author.edit(nick=f"{nickname}")
# role = get(ctx.message.server.roles, name='ROLE_NAME')
# if role: # If get could find the role
# await client.add_role(ctx.message.author, role)
@commands.command(name='ping',
description='Ping latency',
help='Ping latency',
ignore_extra=True,
hidden=False,
enabled=True)
async def ping(self, ctx):
await ctx.channel.send(f"Pong... {round(self.bot.latency * 1000)} ms")
@commands.command(name='server',
description='Server information',
help='Server information',
ignore_extra=True,
hidden=False,
enabled=True)
@commands.has_role('admins')
async def server(self, ctx):
guild = discord.utils.get(self.bot.guilds, name=GUILD)
text_channels = '\n - '.join([channel.name for channel in guild.text_channels])
voice_channels = '\n - '.join([channel.name for channel in guild.voice_channels])
members = '\n - '.join([member.name for member in guild.members])
await ctx.channel.send(f"Server Name: {guild.name} (ID: {guild.id})\n"
f"Server Owner: {guild.owner} (ID: {guild.owner_id})\n"
f"Server Description: {guild.description}\n"
f"Region: {guild.region}\n"
f"File Size Limit: {guild.filesize_limit} bytes\n\n"
f"**Text Channels:**\n - {text_channels}\n\n"
f"**Voice Channels:**\n - {voice_channels}\n\n"
f"**Server Members: {guild.member_count}**\n - {members}")
@commands.command(name='unban',
description='Unban member from server',
help='Unban member from server',
ignore_extra=True,
hidden=True,
enabled=False)
async def unban(self, ctx, *, member):
banned_users = await ctx.guild.bans()
member_name, member_discriminator = member.split('#')
for ban_entry in banned_users:
user = ban_entry.user
if (user.name, user.discriminator) == (member_name, member_discriminator):
await ctx.guild.unban(user)
await ctx.send(f"Unbanned {user.mention}")
return
@commands.command(name='weather',
description='Weather as !weather CITY STATE',
help='Weather as !weather Santa_Fe New_Mexico',
ignore_extra=True,
hidden=True,
enabled=True)
async def weather(self, ctx, city, state):
# This intended as a 'good enough' tool. There are some accuracy issues
# such as conversion of C to F temps and wind_mapping where NNE is
# actually 11.25 - 33.75 degrees vs the ints required by range().
url = f"https://api.airvisual.com/v2/city?city={city.replace('_', '%20')}&state={state.replace('_', '%20')}&country=USA&key={AIRVISUAL_KEY}"
response = requests.request(method='GET', url=url)
jsonResponse = json.loads(response.text)
if jsonResponse['status'] == 'success':
weather_mapping = {'01': 'Clear Sky',
'02': 'Few Clouds',
'03': 'Scattered Clouds',
'04': 'Broken Clouds',
'09': 'Shower Rain',
'10': 'Rain',
'11': 'Thunderstorm',
'13': 'Snow',
'50': 'Mist'}
aqius_mapping = {'Good': range(0, 50),
'Moderate': range(51, 100),
'Unhealthy for Sensitive Groups': range(101, 150),
'Unhealthy': range(151, 200),
'Very Unhealthy': range(201, 300),
'Hazardous': range(301, 500)}
wind_mapping = {'North': range(349, 360),
'N': range(0, 11),
'NNE': range(12, 34),
'NE': range(35, 56),
'ENE': range(57, 79),
'E': range(80, 101),
'ESE': range(102, 124),
'SE': range(125, 146),
'SSE': range(147, 169),
'S': range(170, 191),
'SSW': range(192, 214),
'SW': range(215, 236),
'WSW': range(237, 259),
'W': range(260, 281),
'WNW': range(282, 304),
'NW': range(305, 326),
'NNW': range(327, 348)}
data = f"{jsonResponse['data']['city']}, {jsonResponse['data']['state']}, {jsonResponse['data']['country']}\n"
data += f"{jsonResponse['data']['location']['coordinates']}\n"
# ic comes through like 01d or 01n to differentiate day or night, we don't care
# so we're just mapping the numerical part to the human friendly text version.
data += f"{weather_mapping[jsonResponse['data']['current']['weather']['ic'][:-1]]}\n"
# Print out the AQIUS then do a map to ranges and print out a human friendly
# equivilant.
data += f"Air Quality Index: {jsonResponse['data']['current']['pollution']['aqius']} - "
for key, val in aqius_mapping.items():
if int(jsonResponse['data']['current']['pollution']['aqius']) in val:
data += f"{key}\n"
data += f"Temperature: {int(((jsonResponse['data']['current']['weather']['tp']) * 9) / 5) + 32}°F\n"
data += f"Pressure: {jsonResponse['data']['current']['weather']['pr']} hPa\n"
data += f"Humidity: {jsonResponse['data']['current']['weather']['hu']}%\n"
data += f"Wind: {int(jsonResponse['data']['current']['weather']['ws'] * 2.236936)} m/h from "
for key, val in wind_mapping.items():
if int(jsonResponse['data']['current']['weather']['wd']) in val:
data += f"{jsonResponse['data']['current']['weather']['wd']}° ({key})\n"
await ctx.channel.send(data)
else:
await ctx.channel.send(f"No data found. Make sure to use underscores instead of spaces in city or state, i.e. Sante_Fe New_Mexico")
@commands.command(name='whoami',
description='Info about you',
help='Info about you',
ignore_extra=True,
hidden=True,
enabled=True)
async def whoami(self, ctx):
await ctx.channel.send(f"User Name: {ctx.author.name}\nUser ID: {ctx.author.id}")
def setup(bot):
bot.add_cog(Utility(bot))
| 1.929688 | 2 |
MessengerCounter.py | KMChris/messenger-counter | 2 | 12799442 | import collections
import io
import json
import math
import zipfile
import logging
from urllib.error import URLError
from urllib.request import urlopen
import pandas as pd
from matplotlib import pyplot as plt
# Getting data
def set_source(filename):
"""
Sets source global variable to the path of .zip file.
:param filename: path to the downloaded .zip file
:return: None
You can provide relative path to file
>>> set_source('facebook-YourName.zip')
Absolute path (works only on Windows)
>>> set_source('C:/Users/Admin/Downloads/facebook-YourName.zip')
"""
filename = f'file:///{filename}' if filename[1] == ':' \
else (f'file:./{filename}' if filename.endswith('.zip') else f'file:./{filename}.zip')
try:
global source
source = zipfile.ZipFile(io.BytesIO(urlopen(filename).read()))
except URLError:
logging.error('File not found, try again.')
def get_data(conversation=None, chars=False, user=False):
"""
Reads data from messages.json or messages_chars.json
and finds key based on the beginning of the string.
:param conversation: beginning of the conversation id
or None for overall statistics (default None)
:param chars: True for counting chars in messages_chars.json,
False for counting messages in messages.json (default False)
:param user: True for user name instead of conversation id,
False otherwise (default False)
:return: dictionary containing the data and if applicable
a key pointing to a specific conversation, otherwise None
"""
try:
data = json.loads(open('messages_chars.json' if chars else 'messages.json', 'r', encoding='utf-8').read())
if user:
data = pd.DataFrame(data).fillna(0).astype('int')
for key in data.index:
if key.lower().startswith(conversation.lower()):
return data, key
else:
logging.error('Conversation not found.')
return None, None
if conversation is not None:
for key in data.keys():
if key.lower().startswith(conversation.lower()):
return data, key
else:
logging.error('Conversation not found.')
return None, None
else:
return data, None
except FileNotFoundError:
logging.error('Characters not counted.' if chars else 'Messages not counted.')
# Counting messages and characters
def count_messages():
"""
Counts messages and saves output to messages.json.
:return: None
"""
namelist = source.namelist()
total, senders = {}, {x.split('/')[2] for x in namelist
if (x.endswith('/') and x.startswith('messages/inbox/') and x != 'messages/inbox/')}
for sender in senders:
messages, i = collections.Counter(), 0
while True:
try:
i += 1
messages += collections.Counter(pd.DataFrame(json.loads(
source.open('messages/inbox/' + sender + '/message_' + str(i) + '.json').read())[
'messages']).iloc[:, 0])
except KeyError:
break
total[sender] = {k.encode('iso-8859-1').decode('utf-8'): v for k, v in messages.items()}
total[sender]['total'] = sum(messages.values())
with open('messages.json', 'w', encoding='utf-8') as output:
json.dump(total, output, ensure_ascii=False)
def count_characters():
"""
Counts characters from messages and saves output to messages_chars.json.
:return: None
"""
namelist = source.namelist()
total, senders = {}, {x.split('/')[2] for x in namelist
if (x.endswith('/') and x.startswith('messages/inbox/') and x != 'messages/inbox/')}
for sender in senders:
counted_all, i = collections.Counter(), 0
while True:
try:
i += 1
frame = pd.DataFrame(json.loads(
source.open('messages/inbox/' + sender + '/message_' + str(i) + '.json').read())['messages'])
frame['counted'] = frame.apply(
lambda row: collections.Counter(str(row['content']).encode('iso-8859-1').decode('utf-8')), axis=1)
counted_all += sum(frame['counted'], collections.Counter())
except KeyError:
break
total[sender] = dict(counted_all)
with open('messages_chars.json', 'w', encoding='utf-8') as output:
json.dump(total, output, ensure_ascii=False)
def count(chars=False):
"""
Counts messages or characters from messages
and saves output to the file.
:param chars: True for counting characters,
False for counting messages (default False)
:return: None
"""
if chars:
count_characters()
else:
count_messages()
# Statistics
def statistics(data_source, conversation=None, chars=False):
"""
Prints statistics of given data source.
:param data_source: dictionary containing prepared data generated
by the get_data() function
:param conversation: conversation id or None for overall statistics
(default None)
:param chars: True for character statistics instead of messages,
False otherwise (default False)
:return: None
"""
if conversation is None:
if chars:
characters_statistics(data_source)
else:
messages_statistics(data_source)
else:
if chars:
raise NotImplementedError()
else:
print(conversation)
conversation_statistics(data_source, conversation)
def messages_statistics(data_source):
"""
Prints messages overall statistics of given data source.
:param data_source: dictionary containing prepared data generated
by the get_data() function
:return: None
"""
data_source = pd.DataFrame(data_source).fillna(0).astype('int')
pd.set_option('display.max_rows', None)
total_values = data_source.loc['total'].sort_values(ascending=False)
print(total_values)
print(total_values.describe())
total_values = total_values.sort_values()
plt.rcdefaults()
plt.barh(total_values.index.astype(str).str[:10][-20:], total_values.iloc[-20:])
plt.show()
def conversation_statistics(data_source, conversation):
"""
Prints messages statistics for specific conversation of given data source.
:param data_source: dictionary containing prepared data generated
by the get_data() function
:param conversation: conversation id, or key from get_data() function
:return: None
"""
data_source = pd.DataFrame(data_source)
data_source = data_source.loc[:, conversation]
data_source = data_source[data_source > 0].sort_values(ascending=False).astype('int')
pd.set_option('display.max_rows', None)
print(data_source)
def characters_statistics(data_source):
"""
Prints characters statistics of given data source.
:param data_source: dictionary containing prepared data generated
by the get_data() function
:return: None
"""
data_source = pd.DataFrame(data_source)
data_source['total'] = data_source.sum(axis=1)
data_source = data_source.iloc[:, -1]
data_source = data_source.sort_values(ascending=False).astype('int')
pd.set_option('display.max_rows', None)
print(data_source)
print(f'Total characters: {data_source.sum()}')
# TODO characters conversation statistics
def characters_conversation_statistics(data_source, conversation):
"""
Prints characters statistics for specific conversation of given data source.
:param data_source: dictionary containing prepared data generated
by the get_data() function
:param conversation: conversation id, or key from get_data() function
:return: None
"""
pass
# User statistics
def user_statistics(data_source, user_name):
"""
Prints detailed statistics for specific person of given data source.
:param data_source: dictionary containing prepared data generated
by the get_data() function
:param user_name: person name, or key from get_data() function
:return: None
"""
data_source = data_source.loc[user_name]
data_source = data_source[data_source > 0].sort_values(ascending=False)
data_source.index = data_source.index.map(lambda x: x.split('_')[0][:30])
pd.set_option('display.max_rows', None)
print(user_name, 'statistics:')
print(data_source)
# Intervals
def interval_count(inbox_name, function, delta=0.0):
"""
Counts number of messages based on given timeframe function
:param inbox_name: directory name that contains requested messages
(usually conversation id)
:param function: pandas function that returns requested time part
:param delta: number of hours to time shift by
and count messages differently (default 0.0)
:return: dictionary of number of messages grouped by timeframe
"""
messages, i = collections.Counter(), 0
while True:
try:
i += 1
# iterates over all .json files in requested directory
messages += collections.Counter(function(pd.to_datetime(pd.DataFrame(json.loads(
source.open('messages/inbox/' + inbox_name + '/message_' + str(i) + '.json').read())[
'messages']).iloc[:, 1], unit='ms').dt.tz_localize('UTC').dt.tz_convert(
'Europe/Warsaw').add(pd.Timedelta(hours=-delta))))
except KeyError:
break
return messages
def interval_plot(messages):
"""
Shows chart based on previously defined timeframe
:param messages: dictionary of number of messages
grouped by timeframe
:return: None
"""
messages = pd.Series(messages).sort_index()
print(messages.describe())
plt.bar(messages.index, messages)
plt.savefig('messages.pdf')
plt.show()
# Hours
def hours(difference, conversation=None):
"""
Shows chart of average number of messages
send by hour throughout the day.
:param difference: number of hours to time shift by
and show statistics differently
:param conversation: conversation id or None for statistics
from all conversations (default None)
:return: None
"""
if conversation is None:
hours_chats(difference)
else:
data = json.loads(open('messages.json', 'r', encoding='utf-8').read())
for key in data.keys():
if key.lower().startswith(conversation.lower()):
hours_conversation(key, difference)
break
else:
print('Conversation not found.')
def hours_conversation(conversation, delta=0.0):
"""
Shows chart of average number of messages send
in specific conversation by hour throughout the day.
:param conversation: conversation id, or key from get_data() function
:param delta: number of hours to time shift by
and show statistics differently (default 0.0)
:return: None
"""
hours_plot(interval_count(conversation, lambda x: x.dt.hour, delta), delta)
def hours_chats(delta=0.0):
"""
Shows chart of average number of messages send
across all conversations by hour throughout the day.
:param delta: number of hours to time shift by
and show statistics differently (default 0.0)
:return: None
"""
messages = collections.Counter()
for sender in {x.split('/')[2] for x in source.namelist()
if (x.endswith('/') and x.startswith('messages/inbox/') and x != 'messages/inbox/')}:
messages += interval_count(sender, lambda x: x.dt.hour, delta)
hours_plot(messages, delta)
def hours_plot(messages, delta):
"""
Shows chart of average number of messages
grouped by hour throughout the day.
:param messages: dictionary of number of messages
grouped by timeframe
:param delta: number of hours to time shift by
and show statistics differently
:return: None
"""
messages = pd.DataFrame(messages, index=[0])
print(messages.iloc[0].describe())
plt.bar(messages.columns, messages.iloc[0])
plt.xticks(list(range(24)), [f'{x % 24}:{int(abs((delta - int(delta)) * 60)):02}'
for x in range(-(-math.floor(delta) % 24),
math.floor(delta) % 24 if math.floor(delta) % 24 != 0 else 24)], rotation=90)
plt.xlim(-1, 24)
plt.savefig('messages.pdf')
plt.show()
# Daily
def daily(difference, conversation=None):
"""
Shows chart of number of messages per day.
:param difference: number of hours to time shift by
and show statistics differently
:param conversation: conversation id or None for statistics
from all conversations (default None)
:return: None
"""
if conversation is None:
daily_chats(difference)
else:
data = json.loads(open('messages.json', 'r', encoding='utf-8').read())
for key in data.keys():
if key.lower().startswith(conversation.lower()):
daily_conversation(key, difference)
break
else:
print('Conversation not found.')
def daily_conversation(conversation, delta=0.0):
"""
Shows chart of number of messages per day
from the beginning of the conversation.
:param conversation: conversation id, or key from get_data() function
:param delta: number of hours to time shift by
and show statistics differently (default 0.0)
:return: None
"""
interval_plot(interval_count(conversation, lambda x: x.dt.date, delta))
def daily_chats(delta=0.0):
"""
Shows chart of number of messages per day
across all conversation.
:param delta: number of hours to time shift by
and show statistics differently (default 0.0)
:return: None
"""
messages = collections.Counter()
for sender in {x.split('/')[2] for x in source.namelist() if
(x.endswith('/') and x.startswith('messages/inbox/') and x != 'messages/inbox/')}:
messages += interval_count(sender, lambda x: x.dt.date, delta)
interval_plot(messages)
# Monthly (not working)
def monthly_conversation(conversation): # TODO not working charts for monthly
"""
Shows chart of number of messages per month.
:param conversation: conversation id or None for statistics
from all conversations (default None)
:return: None
"""
interval_plot(interval_count(conversation, lambda x: x.dt.to_period("M").astype('datetime64[ns]')))
def monthly_chats():
"""
Shows chart of number of messages per month
across all conversation.
:return: None
"""
messages = collections.Counter()
for sender in {x.split('/')[2] for x in source.namelist() if
(x.endswith('/') and x.startswith('messages/inbox/') and x != 'messages/inbox/')}:
messages += interval_count(sender, lambda x: x.dt.to_period("M").astype('datetime64[ns]'))
interval_plot(messages)
# Yearly
def yearly(conversation=None):
"""
Shows chart of number of messages per year.
:param conversation: conversation id or None for statistics
from all conversations (default None)
:return: None
"""
if conversation is None:
yearly_chats()
else:
data = json.loads(open('messages.json', 'r', encoding='utf-8').read())
for key in data.keys():
if key.lower().startswith(conversation.lower()):
yearly_conversation(key)
break
else:
print('Conversation not found.')
def yearly_conversation(conversation):
"""
Shows chart of number of messages per year
from the beginning of the conversation.
:param conversation: conversation id, or key from get_data() function
:return: None
"""
interval_plot(interval_count(conversation, lambda x: x.dt.year))
def yearly_chats():
"""
Shows chart of number of messages per year
across all conversation.
:return: None
"""
messages = collections.Counter()
for sender in {x.split('/')[2] for x in source.namelist()
if (x.endswith('/') and x.startswith('messages/inbox/') and x != 'messages/inbox/')}:
messages += interval_count(sender, lambda x: x.dt.year)
messages = pd.DataFrame(messages, index=[0])
print(messages.iloc[0].describe())
plt.bar(messages.columns, messages.iloc[0])
plt.savefig('messages.pdf')
plt.show()
if __name__=='__main__':
while True:
filename = input('Enter filename: ')
filename = f'file:///{filename}' if filename[1] == ':'\
else (f'file:./{filename}' if filename.endswith('.zip') else f'file:./{filename}.zip')
try:
source = zipfile.ZipFile(io.BytesIO(urlopen(filename).read()))
break
except URLError:
print('File not found, try again.')
while True:
user_input = input('>').split(' ')
if user_input[0] == 'exit':
break
if user_input[0] == '' or user_input[0] == 'count':
count_messages()
if user_input[0] == 'chars':
count_characters()
if user_input[0] == 'help' or user_input[0] == '?':
print('Messenger Counter available commands:')
print(' count - counts all messages and saves to messages.json')
print(' chars - counts all characters and saves to messages_chars.json')
print(' stats [conversation, -c] - displays statistics for counted messages')
print(' [detailed statistics for specific conversation, character statistics]')
print(' user [name] - detailed statistics for specific user')
print(' yearly [name] - yearly messages')
print(' [specific user]')
# print(' monthly [name, -d] - monthly messages (available soon)')
# print(' [specific user, day difference]')
print(' daily [name, -h] - daily messages')
print(' [specific user, hours difference]')
print(' hours [name, -h] - hour distribution of messages')
print(' [specific user, hours difference]')
print(' help - displays this help prompt')
print(' exit - exits the program')
if user_input[0] == 'stats':
if len(user_input) > 2 and user_input[2] == '-c':
try:
data = json.loads(open('messages_chars.json', 'r', encoding='utf-8').read())
for key in data.keys():
if key.startswith(user_input[1]):
characters_conversation_statistics(data, key)
break
else:
print('Conversation not found.')
except FileNotFoundError:
if input('Characters not counted. Count characters?[y/n] ').lower() == 'y':
count_characters()
elif len(user_input) > 1 and not user_input[1] == '-c':
try:
data = json.loads(open('messages.json', 'r', encoding='utf-8').read())
for key in data.keys():
if key.startswith(user_input[1]):
conversation_statistics(data, key)
break
else:
print('Conversation not found.')
except FileNotFoundError:
if input('Messages not counted. Count messages?[y/n] ').lower() == 'y':
count_messages()
elif len(user_input) > 1 and user_input[1] == '-c':
try:
data = json.loads(open('messages_chars.json', 'r', encoding='utf-8').read())
characters_statistics(data)
except FileNotFoundError:
if input('Characters not counted. Count characters?[y/n] ').lower() == 'y':
count_characters()
else:
try:
data = json.loads(open('messages.json', 'r', encoding='utf-8').read())
messages_statistics(data)
except FileNotFoundError:
if input('Messages not counted. Count messages?[y/n] ').lower() == 'y':
count_messages()
if user_input[0] == 'user':
if len(user_input) > 1:
try:
data = json.loads(open('messages.json', 'r', encoding='utf-8').read())
data = pd.DataFrame(data).fillna(0).astype('int')
for key in data.index:
if key.startswith(' '.join(user_input[1:])):
user_statistics(data, key)
break
else:
print('Conversation not found.')
except FileNotFoundError:
if input('Messages not counted. Count messages?[y/n] ').lower() == 'y':
count_messages()
else:
print('Please specify user name.')
if user_input[0] == 'daily':
if len(user_input) > 1 and not user_input[1] == '-h':
try:
data = json.loads(open('messages.json', 'r', encoding='utf-8').read())
if len(user_input) > 1:
for key in data.keys():
if key.startswith(user_input[1]):
if len(user_input) < 3:
daily_conversation(key)
else:
daily_conversation(key, float(user_input[2]))
break
else:
print('Conversation not found.')
else:
print('Please specify conversation.')
except FileNotFoundError:
if input('Messages not counted. Count messages?[y/n] ').lower() == 'y':
count_messages()
elif len(user_input) > 1 and user_input[1] == '-h':
daily_chats(float(user_input[2]))
else:
daily_chats()
if user_input[0] == 'monthly':
if len(user_input) > 1:
try:
data = json.loads(open('messages.json', 'r', encoding='utf-8').read())
if len(user_input) > 1:
for key in data.keys():
if key.startswith(user_input[1]):
monthly_conversation(key)
else:
print('Conversation not found.')
else:
print('Please specify conversation.')
except FileNotFoundError:
if input('Messages not counted. Count messages?[y/n] ').lower() == 'y':
count_messages()
else:
monthly_chats()
if user_input[0] == 'yearly':
if len(user_input) > 1:
try:
data = json.loads(open('messages.json', 'r', encoding='utf-8').read())
if len(user_input) > 1:
for key in data.keys():
if key.startswith(user_input[1]):
yearly_conversation(key)
break
else:
print('Conversation not found.')
else:
print('Please specify conversation.')
except FileNotFoundError:
if input('Messages not counted. Count messages?[y/n] ').lower() == 'y':
count_messages()
else:
yearly_chats()
if user_input[0] == 'hours':
if len(user_input) > 1 and not user_input[1] == '-h':
try:
data = json.loads(open('messages.json', 'r', encoding='utf-8').read())
if len(user_input) > 1:
for key in data.keys():
if key.startswith(user_input[1]):
if len(user_input) < 3:
hours_conversation(key)
else:
hours_conversation(key, float(user_input[2]))
break
else:
print('Conversation not found.')
else:
print('Please specify conversation.')
except FileNotFoundError:
if input('Messages not counted. Count messages?[y/n] ').lower() == 'y':
count_messages()
elif len(user_input) > 1 and user_input[1] == '-h':
hours_chats(float(user_input[2]))
else:
hours_chats()
| 3.109375 | 3 |
tests/unit/selection/factories/test_expand_path_cfg.py | shane-breeze/AlphaTwirl | 0 | 12799443 | <filename>tests/unit/selection/factories/test_expand_path_cfg.py<gh_stars>0
# <NAME> <<EMAIL>>
import os
import sys
import pytest
from alphatwirl.selection.factories.expand import expand_path_cfg
from alphatwirl.selection.factories.factory import FactoryDispatcher
from alphatwirl.selection.modules.LambdaStr import LambdaStr
from alphatwirl.selection.modules import All, Any, Not
##__________________________________________________________________||
# path_cfg, expanded, obj
params = [
pytest.param(
'ev : ev.nJets[0] >= 2',
dict(
components=(),
factory='LambdaStrFactory',
lambda_str='ev : ev.nJets[0] >= 2',
),
LambdaStr(
name='ev : ev.nJets[0] >= 2',
lambda_str='ev : ev.nJets[0] >= 2',
),
id='string:lambda_str'
),
pytest.param(
'ev : ev.nJets[0] >= {n}',
dict(
components=(),
factory='LambdaStrFactory',
lambda_str='ev : ev.nJets[0] >= {n}',
),
LambdaStr(
name='ev : ev.nJets[0] >= 5242',
lambda_str='ev : ev.nJets[0] >= 5242',
),
id='string:lambda_str-not-formatted'
),
pytest.param(
dict(All=()),
{'factory': 'AllFactory', 'components': ()},
All(name='All', selections=[]),
id='dict-all-empty'
),
pytest.param(
dict(Any=()),
{'factory': 'AnyFactory', 'components': ()},
Any(name='Any', selections=[]),
id='dict-any-empty'
),
pytest.param(
dict(Any=(
'ev : ev.x[0] == 0',
dict(All=(
'ev : ev.x[0] >= 1',
'ev : ev.y[0] >= 100',
)),
dict(Not=dict(
Any=(
'ev : ev.z[0] == 0',
'ev : ev.w[0] >= 300',
),
)),
)),
dict(
factory='AnyFactory',
components=(
dict(
factory='LambdaStrFactory',
components=(),
lambda_str='ev : ev.x[0] == 0',
),
dict(
factory='AllFactory',
components=(
dict(
factory='LambdaStrFactory',
components=(),
lambda_str='ev : ev.x[0] >= 1',
),
dict(
factory='LambdaStrFactory',
components=(),
lambda_str='ev : ev.y[0] >= 100',
)
)
),
dict(
factory='NotFactory',
components=(
dict(
factory='AnyFactory',
components=(
dict(
factory='LambdaStrFactory',
components=(),
lambda_str='ev : ev.z[0] == 0'
),
dict(
factory='LambdaStrFactory',
components=(),
lambda_str='ev : ev.w[0] >= 300',
),
),
),
),
)
)
),
Any(
name='Any',
selections=[
LambdaStr(
name='ev : ev.x[0] == 0',
lambda_str='ev : ev.x[0] == 0'
),
All(
name='All',
selections=[
LambdaStr(
name='ev : ev.x[0] >= 1',
lambda_str='ev : ev.x[0] >= 1'),
LambdaStr(
name='ev : ev.y[0] >= 100',
lambda_str='ev : ev.y[0] >= 100')
]
),
Not(
name='Not',
selection=Any(
name='Any',
selections=[
LambdaStr(
name='ev : ev.z[0] == 0',
lambda_str='ev : ev.z[0] == 0'
),
LambdaStr(
name='ev : ev.w[0] >= 300',
lambda_str='ev : ev.w[0] >= 300'
)
]
)
)
]
),
id='example',
## marks=pytest.mark.skip(reason='not fully expanded')
),
]
@pytest.mark.parametrize('path_cfg, expected, _', params)
def test_expand_path_cfg(path_cfg, expected, _):
actual = expand_path_cfg(path_cfg=path_cfg)
assert expected == actual
# give expanded one
actual = expand_path_cfg(path_cfg=actual)
assert expected == actual
@pytest.mark.parametrize('path_cfg, _, expected', params)
def test_factory(path_cfg, _, expected):
kargs = dict(
AllClass=All, AnyClass=Any, NotClass=Not,
LambdaStrClass=LambdaStr,
n=5242,
)
obj = FactoryDispatcher(path_cfg=path_cfg, **kargs)
assert repr(expected) == repr(obj)
assert str(expected) == str(obj)
##__________________________________________________________________||
@pytest.mark.parametrize('path_cfg, error', [
pytest.param(
dict(All=(), Any=()), ValueError, id='multiple-vertices-All-Any'
),
pytest.param(
dict(All=(), Not=()), ValueError, id='multiple-vertices-All-Not'
),
pytest.param(
dict(Any=(), Not=()), ValueError, id='multiple-vertices-Any-Not'
),
pytest.param(
dict(), ValueError, id='empty-dict'
),
])
def test_expand_path_cfg_raise(path_cfg, error):
with pytest.raises(error):
expand_path_cfg(path_cfg=path_cfg)
##__________________________________________________________________||
| 2.15625 | 2 |
terbilang.py | Ellenn01/Tugas-pertemuan-12 | 0 | 12799444 | <reponame>Ellenn01/Tugas-pertemuan-12
import os
kata = ['', 'one', 'two', 'tree', 'four', 'five', 'six', 'seven', 'eight', 'nine']
def terbilang (n) :
if n < 10 :
return kata [n]
elif n >= 1_000_000_000 :
return terbilang (n // 1_000_000_000) + ' billion ' + (terbilang(n % 1_000_000_000) if n % 1_000_000_000 != 0 else '')
elif n >= 1_000_000 :
return terbilang (n // 1_000_000) + ' million ' + (terbilang(n % 1_000_000) if n % 1_000_000 != 0 else '')
elif n >= 1_000 :
if n // 1_000 == 1 :
return 'one thousand' + (terbilang(n % 1_000) if n % 1_000 != 0 else '')
else :
return terbilang (n // 1_000) + ' thouosand ' + (terbilang(n % 1_000) if n % 1_000 !=0 else '')
elif n >= 100 :
if n // 100 == 1 :
return 'one hundred' + (terbilang(n % 100) if n % 100 != 0 else '')
else :
return terbilang (n // 100) + 'hundred' + (terbilang(n % 100) if n % 100 !=0 else '')
elif n >= 20 :
if n == 21 :
return 'twenty one'
if n == 22 :
return 'twenty two'
if n == 23 :
return 'twenty three'
if n == 24 :
return 'twenty four'
if n == 25 :
return 'twenty five'
if n == 26 :
return 'twenty six'
if n == 27 :
return 'twenty seven'
if n == 28 :
return 'twenty eight'
if n == 29 :
return 'twenty nine'
if n == 30 :
return 'thirty'
if n == 50 :
return 'fifty'
return terbilang (n // 10) +'ty'+ (terbilang(n % 10) if n % 10 !=0 else '')
else :
if n == 10 :
return 'ten'
elif n == 11 :
return 'eleven'
elif n == 12 :
return 'twelve'
elif n == 13 :
return 'thirteen'
elif n == 14 :
return 'fourteen'
elif n == 15 :
return 'fifteen'
else :
return terbilang (n % 10) + 'teen'
while True :
os.system('cls')
try :
n = int(input('Number ? '))
print (f'{n:,} = {terbilang(n)}')
except :
print('ty again ...')
os.system('pause') | 3.75 | 4 |
graph_builder/parse_osm_xml.py | rkalz/MyBhamMap | 2 | 12799445 | import xml.etree.ElementTree as et
from objects.node import Node
from objects.way import Way
def extract_road(item, roads):
way_id = int(item.attrib['id'])
way = Way(way_id)
is_highway = False
for child in item:
if child.tag == "nd":
way.add_node(int(child.attrib['ref']))
elif child.tag == "tag":
key = child.attrib['k']
val = child.attrib['v']
if key == "name" or (key == "ref" and way.name is None):
way.name = val
elif key == "oneway":
way.is_one_way = val == "yes"
elif key == "highway":
is_highway = True
if way.name is not None and is_highway:
roads.append(way)
def extract_node(item, nodes):
node_id = int(item.attrib['id'])
node_lat = float(item.attrib['lat'])
node_lon = float(item.attrib['lon'])
node = Node(node_id, node_lat, node_lon)
for child in item:
key = child.attrib['k']
val = child.attrib['v']
if child.tag == "tag":
node.add_tag(key, val)
nodes[node_id] = node
def parse_osm_file(filename):
tree = et.parse(filename)
roads = []
nodes = dict()
for item in tree.iter():
if item.tag == "node":
extract_node(item, nodes)
elif item.tag == "way":
extract_road(item, roads)
return roads, nodes
if __name__ == "__main__":
roads, nodes = parse_osm_file("../osm_birmingham.xml")
print(str(len(nodes)) + " nodes in dataset")
print(str(len(roads)) + " roads in dataset")
pass
| 2.75 | 3 |
code/sample_2-1-20.py | KoyanagiHitoshi/AtCoder-Python-Introduction | 1 | 12799446 | <filename>code/sample_2-1-20.py
print(7/3)
| 0.980469 | 1 |
linearRegression/linearReg.py | zzw0929/deeplearning | 4 | 12799447 | <filename>linearRegression/linearReg.py
# coding: utf-8
import numpy as np
from numpy import *
import math
import sys
def loadDataSet(filename):
f = open(filename)
lines = f.readlines()
dataSet = []
labels = []
for i in lines:
i = i.strip()
cols = i.split("\t")
dataSet.append(list(map(lambda a:float(a), cols[0:-1])))
labels.append(float(cols[-1]))
f.close()
return dataSet, labels
def sigmoid(num):
return 1.0 / (1+math.exp(-num))
# loss function, forcast => y_hat, real=>labels and y
def loss(forcast, real):
return -(real * math.log(forcast) + (1-real)*log(1-forcast))
def sigmoidOnEle(matrix):
m,n = shape(matrix)
arr = []
for i in range(m):
rows = []
for j in range(n):
sig = sigmoid(matrix[i,j])
rows.append(sig)
arr.append(rows)
return mat(arr)
"""
alpha: learning rate is 0.001
maxCycle: loop times is 500
matrix rule: martix_A's columns equals matrix_B's rows
"""
def linearReg(dataSet, labels):
dataSetMat = mat(dataSet)
labelMat = mat(labels).transpose()
alpha = 0.001
maxCycles = 1000
m,n = shape(dataSet)
weights = zeros((n,1))
b = 0
for i in range(maxCycles):
# this is Z set()
z = np.dot(dataSetMat, weights)+b
# this is A set()
y_hat = sigmoidOnEle(z)
error_martrix = y_hat - labelMat
dz = (1.0 / m) * np.dot(dataSetMat.transpose(),error_martrix)
weights = weights - alpha * dz
db = (1.0 / m) * np.sum(error_martrix)
b = b - alpha * db
return weights, b
if __name__ == '__main__':
filename = sys.argv[1]
dataSet, labels = loadDataSet(filename)
w,b= linearReg(dataSet, labels)
print(w)
print(b)
# some test
#test_data = mat(dataSet[0:10])
#test_labels = mat(labels[0:10])
#y_hat = np.dot(test_data,w) + b
| 3.1875 | 3 |
cadishi/dict_util.py | bio-phys/cadishi | 14 | 12799448 | <filename>cadishi/dict_util.py
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# Cadishi --- CAlculation of DIStance HIstograms
#
# Copyright (c) <NAME>, <NAME>
# See the file AUTHORS.rst for the full list of contributors.
#
# Released under the MIT License, see the file LICENSE.txt.
"""Various NumPy- and dictionary-related utilities.
Implements add, append, and scale operations for numerical data (ie. NumPy
arrays) stored in dictionaries. In addition, an ASCII output routine is
provided.
"""
import copy
import numpy as np
import json
from . import util
def sum_values(X, Y, skip_keys=['radii', 'frame']):
"""Implement X += Y where X and Y are Python dictionaries (with string keys)
that contain summable data types.
The operation is applied to X for any value in Y, excluding keys that are in
the list skip_keys.
Typically, the values of X, Y are NumPy arrays (e.g. histograms) that are summed.
Parameters
----------
X : dict
X is a dictionary with string keys that contains NumPy arrays.
Y : dict
Y is a dictionary with string keys that contains NumPy arrays.
skip_keys : list of strings
skip_keys is a list of strings for which the sum operation is skipped.
Returns
-------
None
The function sum_values operates on X directly
and does not return anything.
"""
assert isinstance(X, dict)
assert isinstance(Y, dict)
for key in list(Y.keys()):
if key in skip_keys:
continue
if key not in X:
X[key] = copy.deepcopy(Y[key])
else:
X[key] += Y[key]
def scale_values(X, C, skip_keys=['radii', 'frame']):
"""Implement X = X times C where X is a Python dictionary that contains supported
data types.
The operation is applied to any value in X, excluding keys that are in the
list skip_keys.
Typically, the values of X are NumPy arrays (histograms) that are rescaled
after summation using a scalar C (e.g. to implement averaging operation).
Parameters
----------
X : dict
X is a dictionary with string keys that contains NumPy arrays.
C : scalar, NumPy array
C is a multiplier, either a scalar of a NumPy array of size compatible
with the contents of X.
skip_keys : list of strings
skip_keys is a list of strings for which the sum operation is skipped.
Returns
-------
None
The function scale_values operates on X directly
and does not return anything.
"""
assert isinstance(X, dict)
for key in list(X.keys()):
if key in skip_keys:
continue
X[key] *= C
def append_values(X, Y, skip_keys=['radii']):
"""Implement X.append(Y) where X and Y are Python dictionaries that contain
NumPy data types. The operation is applied to X for any value in Y,
excluding keys that are in the list skip_keys. Typically, the values of X,
Y are NumPy arrays (e.g. particle numbers) that are appended.
Parameters
----------
X : dict
X is a dictionary with string keys that contains NumPy arrays.
Y : dict
Y is a dictionary with string keys that contains NumPy arrays.
skip_keys : list of strings
skip_keys is a list of strings for which the append operation is skipped.
Returns
-------
None
The function scale_values operates on X directly
and does not return anything.
"""
assert isinstance(X, dict)
assert isinstance(Y, dict)
for key in list(Y.keys()):
if key in skip_keys:
continue
if key not in X:
X[key] = copy.deepcopy(Y[key])
else:
X[key] = np.append(X[key], Y[key])
def write_dict(dic, path, level=0):
"""Write a dictionary containing NumPy arrays or other Python data
structures to text files. In case the dictionary contains other
dictionaries, the function is called recursively. The keys should
be strings to guarantee successful operation.
Parameters
----------
dic : dictionary
A dictionary containing NumPy arrays or other Python data structures.
path : string
Path where the dictionary and its data shall be written to.
level : int, optional
Level in the nested-dictionary hierarchy during recursive operation.
This parameter was added for debugging purposes and does not have any
practical relevance.
Returns
-------
None
The function write_dict does not return anything.
"""
np_keys = []
py_keys = []
for key in list(dic.keys()):
val = dic[key]
if isinstance(val, dict):
_path = path + '/' + key
_level = level + 1
write_dict(val, _path, _level)
else:
if isinstance(val, np.ndarray):
np_keys.append(key)
else:
py_keys.append(key)
# ---
np_keys.sort()
py_keys.sort()
# --- (1) save NumPy arrays to text files
rad = 'radii'
if rad in np_keys:
np_keys.remove(rad)
np_keys.insert(0, rad)
# ---
np_all_1d = True
for key in np_keys:
val = dic[key]
if (len(val.shape) > 1):
np_all_1d = False
break
if (len(np_keys) > 0):
if np_all_1d:
# --- concatenate arrays into a 2d array
val = dic[np_keys[0]]
n_row = val.shape[0]
n_col = len(np_keys)
arr = np.zeros([n_row, n_col])
for idx, key in enumerate(np_keys):
arr[:, idx] = (dic[key])[:]
# --- build header
if rad in np_keys:
np_keys.remove(rad)
header = '#'
for key in np_keys:
header = header + ' ' + key
# --- dump data
util.savetxtHeader(path + '.dat', header, arr)
else:
# --- we save arrays with more than one dimension separately
for key in np_keys:
arr = dic[key]
# --- dump data
util.savetxtHeader(path + '/' + key + '.dat', '# ' + key, arr)
# --- (2) for robustness, save any other Python data to JSON text files
if (len(py_keys) > 0):
for key in py_keys:
file_name = path + '/' + key + '.json'
util.md(file_name)
with open(file_name, "w") as fp:
json.dump(dic[key], fp, indent=4, sort_keys=True)
| 3.3125 | 3 |
tableview/__init__.py | ryansturmer/tableview | 1 | 12799449 | <reponame>ryansturmer/tableview<gh_stars>1-10
from core import (TableView, load, loads, __version__)
| 0.832031 | 1 |
grocercheck/scripts/lazy_coding_scripts/generateCodeForModel.py | andy0liang/GrocerCheck | 0 | 12799450 | <filename>grocercheck/scripts/lazy_coding_scripts/generateCodeForModel.py
days = ["mon",'tue','wed','thu','fri','sat','sun']
for day in days:
for i in range(0,24):
if(i<10):
i = '0'+str(i)
else:
i = str(i)
print(day+i+' = models.IntegerField(null=True)')
| 2.796875 | 3 |
Subsets and Splits