id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
1601937
|
<filename>sumu/gadget.py<gh_stars>0
"""The module implements the algorithm Gadget as first detailed in
:footcite:`viinikka:2020a`.
Limitations:
The computations rely heavily on bitwise operations, which for
reasons of efficiency have been implemented using primitive data
types (i.e., uint64_t). In the current version this sets a hard
limit on the maximum number of variables in the data at 256.
"""
import sys
import os
import time
import pprint
import numpy as np
from .weight_sum import weight_sum
from .mcmc import PartitionMCMC, MC3
from .utils.bitmap import bm, bm_to_ints, bm_to_pyint_chunks, bms_to_np64, np64_to_bm
from .utils.io import read_candidates, get_n, pretty_dict, pretty_title
from .utils.math_utils import log_minus_exp, close, comb, subsets
from .scorer import BDeu, BGe
from .CandidateRestrictedScore import CandidateRestrictedScore
from .DAGR import DAGR as DAGR_precompute
from .candidates import candidate_parent_algorithm
import numpy as np
from .stats import stats
# default parameter values used by multiple classes
default = {
"score": lambda discrete:
{"name": "bdeu", "ess": 10} if discrete else {"name": "bge"},
"prior": {"name": "fair"},
"max_id": -1,
"K": lambda n: min(n-1, 16),
"d": lambda n: min(n-1, 3),
"cp_algo": "greedy-lite",
"mc3": 16,
"cc_tolerance": 2**-32,
"cc_cache_size": 10**7,
"pruning_eps": 0.001,
"logfile": sys.stdout,
"silent": False,
"stats_period": 15
}
from glmnet import ElasticNet
from copy import deepcopy
class Data:
"""Class for holding data.
Assumes the input data is either discrete or continuous. In the
discrete case the first row of the data can optionally hold the
arities of the variables, or they can be inferred as the number of
distinct values in the data columns. In the continuous case all
rows represent data. All variable name handling should be managed
separately, somewhere else.
The data can be input as either a path to a space delimited csv
file, a numpy array or a object of type Data (in which case a new
object is created pointing to same data).
"""
def __init__(self, data_or_path, arities=False):
self.arities = arities
if type(data_or_path) == Data:
self.data = data_or_path.data
self.discrete = data_or_path.discrete
self.arities = data_or_path.arities
return
self.data = data_or_path
path = type(self.data) == str
if path:
with open(self.data) as f:
if '.' in f.read():
discrete = False
else: # numpy data
discrete = self.data.dtype != np.float64
self.discrete = discrete
if discrete:
if path:
self.data = np.loadtxt(data_or_path, dtype=np.int32, delimiter=' ')
if arities:
self.arities = self.data[0]
self.data = self.data[1:]
else: # Continuous
if path:
self.data = np.loadtxt(data_or_path, dtype=np.float64, delimiter=' ')
@property
def n(self):
return self.data.shape[1]
@property
def N(self):
return self.data.shape[0]
def all(self):
# This is to simplify passing data to R
data = self.data
if self.arities is not False:
arities = np.reshape(self.arities, (-1, len(self.n)))
data = np.append(arities, data, axis=0)
return data
class Gadget():
def __init__(self, *, data, array, score=None, prior=default["prior"],
max_id=default["max_id"], K=None, d=None,
cp_algo=default["cp_algo"], cp_path=None,
mc3=default["mc3"],
burn_in, iterations, thinning,
cc_tolerance=default["cc_tolerance"],
cc_cache_size=default["cc_cache_size"],
pruning_eps=default["pruning_eps"],
logfile=default["logfile"],
stats_period=default["stats_period"]):
self.data = Data(data)
self.array = array
self.iterations = iterations
if score is None:
score = default["score"](self.data.discrete)
if K is None:
K = default["K"](self.data.n)
if d is None:
d = default["d"](self.data.n)
self.params = {
"score": score,
"prior": prior,
"maxid": max_id,
"K": K,
"d": d,
"cp_algo": cp_algo,
"cp_path": cp_path,
"mc3": mc3,
"burn_in": burn_in,
"iterations": iterations,
"thinning": thinning,
"cc_tolerance": cc_tolerance,
"cc_cache_size": cc_cache_size,
"pruning_eps": pruning_eps,
"stats_period": stats_period
}
self._silent = default["silent"]
# No output.
if logfile is None:
self._silent = True
self._logfile = open(os.devnull, "w")
self._logfilename = ""
# Output to file.
elif type(logfile) == str:
self._logfile = open(logfile, "a")
self._logfilename = self._logfile.name
# Output to sdout.
else:
self._logfile = logfile
self._logfilename = ""
self._outputwidth = max(80, 6+12+6*mc3-1)
def _param(self, *params):
# Utility to simplify passing parameters
return {k: self.params[k] for k in params}
def sample(self):
if self._logfile:
print(pretty_title("1. RUN PARAMETERS", 0,
self._outputwidth), file=self._logfile)
print(pretty_dict(self.params), file=self._logfile)
print(pretty_title("2. FINDING CANDIDATE PARENTS", 2,
self._outputwidth), file=self._logfile)
self._logfile.flush()
stats["t"]["C"] = time.time()
self._find_candidate_parents()
print("Step 0")
stats["t"]["C"] = time.time() - stats["t"]["C"]
if self._logfile:
#print(pretty_dict(self.C), file=self._logfile)
print(pprint.pformat(self.C) + "\n", file=self._logfile)
print("time used: {}s\n".format(round(stats["t"]["C"])), file=self._logfile)
print(pretty_title("3. PRECOMPUTING SCORING STRUCTURES FOR CANDIDATE PARENT SETS", 2,
self._outputwidth), file=self._logfile)
self._logfile.flush()
stats["t"]["crscore"] = time.time()
self._precompute_scores_for_all_candidate_psets()
print("Step 1")
self._precompute_candidate_restricted_scoring()
print("Step 2")
self._precompute_candidate_complement_scoring()
print("Step 3")
stats["t"]["crscore"] = time.time() - stats["t"]["crscore"]
if self._logfile:
print("time used: {}s\n".format(round(stats["t"]["crscore"])),
file=self._logfile)
print(pretty_title("4. PRECOMPUTING SCORING STRUCTURES FOR COMPLEMENTARY PARENT SETS", 2,
self._outputwidth), file=self._logfile)
self._logfile.flush()
stats["t"]["ccscore"] = time.time()
self._precompute_candidate_complement_scoring()
stats["t"]["ccscore"] = time.time() - stats["t"]["ccscore"]
if self._logfile:
print("time used: {}s\n".format(round(stats["t"]["ccscore"])),
file=self._logfile)
print(pretty_title("5. RUNNING MCMC", 2, self._outputwidth),
file=self._logfile)
self._logfile.flush()
stats["t"]["mcmc"] = time.time()
self._init_mcmc()
print("Step 4")
self._run_mcmc()
print("Step 5")
return self # .generate_final_dag()
def TL(self,x,y,pen_bic,pen_gic):
m = ElasticNet()
if len(x.shape)<2:
x.reshape((x.shape[0],1))
m = m.fit(x, y)
betas = m.coef_path_
intercepts = m.intercept_path_
BIC = np.inf
for i in range(betas.shape[1]):
RSS = np.sum((y-np.matmul(x,betas[:,i])-intercepts[i])**2)
k = np.sum(betas[:,i]!=0)
BIC_new = RSS+pen_bic*k
if BIC_new < BIC :
BIC = BIC_new
beta_bic = betas[:,i]
intercept = intercepts[i]
thresholds = beta_bic[beta_bic>0]
thresholds.sort()
beta_gic = np.zeros_like(beta_bic)
GIC = np.inf
for delta in thresholds:
beta_thres = deepcopy(beta_bic)
beta_thres[beta_thres< delta] = 0
RSS = np.sum((y-np.matmul(x,beta_thres)-intercept)**2)
k = np.sum(beta_thres!=0)
GIC_new = RSS+pen_gic*k
if GIC_new < GIC :
GIC = GIC_new
beta_gic = beta_thres
return beta_gic,intercept
def _find_candidate_parents(self):
self.l_score = LocalScore(data=self.data,
**self._param("score", "maxid"))
if self.params["cp_path"] is None:
self.C = candidate_parent_algorithm[self.params["cp_algo"]](self.params["K"],
n=self.data.n,
scores=self.l_score,
data=self.data)
else:
self.C = read_candidates(self.params["cp_path"])
# TODO: Use this everywhere instead of the dict
self.C_array = np.empty((self.data.n, self.params["K"]), dtype=np.int32)
for v in self.C:
self.C_array[v] = np.array(self.C[v])
def _precompute_scores_for_all_candidate_psets(self):
self.score_array = self.l_score.all_candidate_restricted_scores(self.C_array)
def _precompute_candidate_restricted_scoring(self):
self.c_r_score = CandidateRestrictedScore(score_array=self.score_array,
C=self.C_array,
**self._param("K",
"cc_tolerance",
"cc_cache_size",
"pruning_eps"),
logfile=self._logfilename,
silent=self._silent)
def _precompute_candidate_complement_scoring(self):
self.c_c_score = None
if self.params["K"] < self.data.n - 1:
# NOTE: CandidateComplementScore gives error if K >= n-1.
self.l_score = LocalScore(data=self.data,
score=self.params["score"],
maxid=self.params["d"])
self.c_c_score = CandidateComplementScore(self.C, self.l_score, self.params["d"])
del self.l_score
def _init_mcmc(self):
self.score = Score(C=self.C,
score_array=self.score_array,
c_r_score=self.c_r_score,
c_c_score=self.c_c_score)
if self.params["mc3"] > 1:
self.mcmc = MC3([PartitionMCMC(self.C, self.score, self.params["d"],
temperature=i/(self.params["mc3"]-1))
for i in range(self.params["mc3"])])
else:
self.mcmc = PartitionMCMC(self.C, self.score, self.params["d"])
def _run_mcmc(self):
msg_tmpl = "{:<5.5} {:<12.12}" + " {:<5.5}"*self.params["mc3"]
temps = list(stats["mcmc"].keys())[::-1]
temps_labels = [round(t, 2) for t in temps]
moves = stats["mcmc"][1.0].keys()
def print_stats_title():
msg = "Cumulative acceptance probability by move and inverse temperature.\n\n"
msg += msg_tmpl.format("%", "move", *temps_labels)
msg += "\n"+"-"*self._outputwidth
print(msg, file=self._logfile)
self._logfile.flush()
def print_stats(i, header=False):
if header:
print_stats_title()
p = round(100*i/(self.params["burn_in"] + self.params["iterations"]))
p = str(p)
for m in moves:
ar = [stats["mcmc"][t][m]["accep_ratio"] for t in temps]
ar = [round(r,2) if type(r) == float else "" for r in ar]
msg = msg_tmpl.format(p, m, *ar)
print(msg, file=self._logfile)
if self.params["mc3"] > 1:
ar = stats["mc3"]["accepted"] / stats["mc3"]["proposed"]
ar = [round(r, 2) for r in ar] + [0.0]
msg = msg_tmpl.format(p, "MC^3", *ar)
print(msg, file=self._logfile)
print(file=self._logfile)
self._logfile.flush()
timer = time.time()
header = True
for i in range(self.params["burn_in"]):
self.mcmc.sample()
self.Rs = self.mcmc.sample()
for i in range(self.iterations):
temp_mcmc = self.mcmc.sample()
if temp_mcmc[1] > self.Rs[1]:
self.Rs = temp_mcmc
self.Rs = [self.Rs]
def generate_final_dag(self,pen_bic,pen_gic):
dag = self.Rs[0][0]
arr= self.array
previous_parent = list(dag[0])
final_dag = dict()
intercept = dict()
for i in range(1, len(dag)):
for j in dag[i]:
# code fro regression
y = arr[:, j]
x = np.zeros((arr.shape[0], len(previous_parent)+1))
for col in range(len(previous_parent)):
x[:, col] = arr[:, previous_parent[col]]
#x[:,len(previous_parent)] = 1
# m = ElasticNet()
# m = m.fit(x, y)
### Tresholded LASSO
# print(m.coef_path_)
beta,inter = self.TL(x,y,pen_bic,pen_gic)
final_dag[j] = list()
intercept[j] = inter
for v in range(len(previous_parent)):
if beta[v] != 0:
final_dag[j].append((previous_parent[v], beta[v])) ## If intersept is 0 and changes order
# print(previous_parent, j)
previous_parent = list(set(previous_parent).union(dag[i]))
return final_dag,intercept
return self.dags, self.dag_scores
class LocalScore:
"""Class for computing local scores given input data.
Implemented scores are BDeu and BGe. The scores by default use the "fair"
modular structure prior :cite:`eggeling:2019`.
"""
def __init__(self, *, data, score=None, prior=default["prior"], maxid=default["max_id"]):
self.data = Data(data)
self.score = score
if score is None:
self.score = default["score"](self.data.discrete)
self.prior = prior
self.priorf = {"fair": self._prior_fair,
"unif": self._prior_unif}
self.maxid = maxid
self._precompute_prior()
if self.score["name"] == "bdeu":
self.scorer = BDeu(data=self.data.data,
maxid=self.maxid,
ess=self.score["ess"])
elif self.score["name"] == "bge":
self.scorer = BGe(data=self.data,
maxid=self.maxid)
def _prior_fair(self, indegree):
return self._prior[indegree]
def _prior_unif(self, indegree):
return 0
def _precompute_prior(self):
if self.prior["name"] == "fair":
self._prior = np.zeros(self.data.n)
self._prior = -np.array(list(map(np.log, [float(comb(self.data.n - 1, k))
for k in range(self.data.n)])))
def local(self, v, pset):
"""Local score for input node v and pset, with score function self.scoref.
This is the "safe" version, raising error if queried with invalid input.
The unsafe self._local will just segfault.
"""
if v in pset:
raise IndexError("Attempting to query score for (v, pset) where v \in pset")
# Because min() will raise error with empty pset
if v in range(self.data.n) and len(pset) == 0:
return self._local(v, pset)
if min(v, min(pset)) < 0 or max(v, max(pset)) >= self.data.n:
raise IndexError("Attempting to query score for (v, pset) where some variables don't exist in data")
return self._local(v, pset)
def _local(self, v, pset):
# NOTE: How expensive are nested function calls?
return self.scorer.local(v, pset) + self.priorf[self.prior["name"]](len(pset))
def clear_cache(self):
self.scorer.clear_cache()
def complementary_scores_dict(self, C, d):
"""C candidates, d indegree for complement psets"""
cscores = dict()
for v in C:
cscores[v] = dict()
for pset in subsets([u for u in C if u != v], 1, d):
if not (set(pset)).issubset(C[v]):
cscores[v][pset] = self._local(v, np.array(pset))
return cscores
def all_candidate_restricted_scores(self, C=None):
if C is None:
C = np.array([np.array([j for j in range(self.data.n) if j != i])
for i in range(self.data.n)], dtype=np.int32)
prior = np.array([bin(i).count("1") for i in range(2**len(C[0]))])
prior = np.array(list(map(lambda k: self.priorf[self.prior["name"]](k), prior)))
return self.scorer.all_candidate_restricted_scores(C) + prior
def all_scores_dict(self, C=None):
# NOTE: Not used in Gadget pipeline, but useful for example
# when computing input data for aps.
scores = dict()
if C is None:
C = {v: tuple(sorted(set(range(self.data.n)).difference({v}))) for v in range(self.data.n)}
for v in C:
tmp = dict()
for pset in subsets(C[v], 0, [len(C[v]) if self.maxid == -1 else self.maxid][0]):
tmp[pset] = self._local(v, np.array(pset))
scores[v] = tmp
return scores
class Score: # should be renamed to e.g. ScoreHandler
def __init__(self, *, C, score_array,
c_r_score, c_c_score):
self.C = C
self.n = len(self.C)
self.score_array = score_array
self.c_r_score = c_r_score
self.c_c_score = c_c_score
def sum(self, v, U, T=set()):
"""Returns the sum of scores for node v over the parent sets that
1. are subsets of U;
2. and, if T is not empty, have at least one member in T.
The sum is computed over first the scores restricted to candidate
parents (self.C), and then the result is augmented by scores
complementary to those restricted to the candidate parents, until
some predefined level of error.
Args:
v (int): Label of the node whose local scores are summed.
U (set): Parent sets of scores to be summed are the subsets of U.
T (set): Parent sets must have at least one member in T (if T is not empty).
Returns:
Sum of scores (float).
"""
U_bm = bm(U.intersection(self.C[v]), ix=self.C[v])
# T_bm can be 0 if T is empty or does not intersect C[v]
T_bm = bm(T.intersection(self.C[v]), ix=self.C[v])
if len(T) > 0:
if T_bm == 0:
W_prime = -float("inf")
else:
W_prime = self.c_r_score.sum(v, U_bm, T_bm)
else:
W_prime = self.c_r_score.sum(v, U_bm)
if self.c_c_score is None or U.issubset(self.C[v]):
return W_prime
if len(T) > 0:
return self.c_c_score.sum(v, U, T, W_prime)#[0]
else:
# empty pset handled in c_r_score
return self.c_c_score.sum(v, U, U, W_prime)#[0]
def sample_pset(self, v, U, T=set()):
U_bm = bm(U.intersection(self.C[v]), ix=self.C[v])
T_bm = bm(T.intersection(self.C[v]), ix=self.C[v])
if len(T) > 0:
if T_bm == 0:
w_crs = -float("inf")
else:
w_crs = self.c_r_score.sum(v, U_bm, T_bm)
else:
w_crs = self.c_r_score.sum(v, U_bm)
w_ccs = -float("inf")
if self.c_c_score is not None and not U.issubset(self.C[v]):
if len(T) > 0:
w_ccs, contribs = self.c_c_score.sum(v, U, T, -float("inf"), contribs=True)
else:
# Empty pset is handled in c_r_score
w_ccs, contribs = self.c_c_score.sum(v, U, U, -float("inf"), contribs=True)
if -np.random.exponential() < w_crs - np.logaddexp(w_ccs, w_crs):
# Sampling from candidate psets.
pset = self.c_r_score.sample_pset(v, U_bm, T_bm, -np.random.exponential())
family_score = self.score_array[v][pset]
family = (v, set(self.C[v][i] for i in bm_to_ints(pset)))
else:
# Sampling from complement psets.
p = np.array([self.c_c_score.ordered_scores[v][j]
for j in contribs])
p = np.exp(p - w_ccs)
j = np.random.choice(contribs, p=p)
pset = np64_to_bm(self.c_c_score.ordered_psets[v][j])
family_score = self.c_c_score.ordered_scores[v][j]
family = (v, set(bm_to_ints(pset)))
return family, family_score
def sample_DAG(self, R):
DAG = list()
DAG_score = 0
for v in range(self.n):
for i in range(len(R)):
if v in R[i]:
break
if i == 0:
family = (v, set())
family_score = self.score_array[v][0]
else:
U = set().union(*R[:i])
T = R[i-1]
family, family_score = self.sample_pset(v, U, T)
DAG.append(family)
DAG_score += family_score
return DAG, DAG_score
class CandidateComplementScore:
"""For computing the local score sum complementary to those obtained from :py:class:`.old_CandidateRestrictedScore` and constrained by maximum indegree.
"""
def __init__(self, C, scores, d):
self.C = C
self.n = len(C)
self.d = d
self.minwidth = (self.n - 1) // 64 + 1
# object of class LocalScore
scores = scores.complementary_scores_dict(C, d)
ordered_psets = dict()
ordered_scores = dict()
for v in scores:
ordered_scores[v] = sorted(scores[v].items(), key=lambda item: item[1], reverse=True)
ordered_psets[v] = bms_to_np64([bm(item[0]) for item in ordered_scores[v]], minwidth=self.minwidth)
ordered_scores[v] = np.array([item[1] for item in ordered_scores[v]], dtype=np.float64)
self.ordered_psets = ordered_psets
self.ordered_scores = ordered_scores
if self.d == 1:
self.pset_to_idx = dict()
for v in scores:
# wrong if over 64 variables?
# ordered_psets[v] = ordered_psets[v].flatten()
ordered_psets[v] = [np64_to_bm(pset) for pset in ordered_psets[v]]
self.pset_to_idx[v] = dict()
for i, pset in enumerate(ordered_psets[v]):
self.pset_to_idx[v][pset] = i
self.t_ub = np.zeros(shape=(self.n, self.n), dtype=np.int32)
for u in range(1, self.n+1):
for t in range(1, u+1):
self.t_ub[u-1][t-1] = self.n_valids_ub(u, t)
def n_valids(self, v, U, T):
n = 0
for k in range(1, self.d+1):
n += comb(len(U), k) - comb(len(U.intersection(self.C[v])), k)
n -= comb(len(U.difference(T)), k) - comb(len(U.difference(T).intersection(self.C[v])), k)
return n
def n_valids_ub(self, u, t):
n = 0
for k in range(self.d+1):
n += comb(u, k) - comb(u - t, k)
return n
def sum(self, v, U, T, W_prime, debug=False, contribs=False):
# NOTE: This is the final score calculation.
if self.d == 1: # special case
pset_idxs = list()
w_contribs = list()
for u in T:
if u not in self.C[v]:
pset_idx = self.pset_to_idx[v][bm(u)]
pset_idxs.append(pset_idx)
w_contribs.append(self.ordered_scores[v][pset_idx])
w_contribs.append(W_prime)
W_sum = np.logaddexp.reduce(w_contribs)
else:
if self.n <= 64:
U_bm = bm(U)
T_bm = bm(T)
else:
U_bm = bm_to_pyint_chunks(bm(U), self.minwidth)
T_bm = bm_to_pyint_chunks(bm(T), self.minwidth)
W_sum = weight_sum(w=W_prime,
psets=self.ordered_psets[v],
weights=self.ordered_scores[v],
n=self.n,
U=U_bm,
T=T_bm,
t_ub=int(self.t_ub[len(U)][len(T)]),
contribs=contribs)
if contribs is True:
W_sum, pset_idxs = W_sum
if contribs is True:
return W_sum, pset_idxs
return W_sum
|
StarcoderdataPython
|
3257741
|
from .pdf_reports import pug_to_html, write_report, EGF_LOGO_URL
|
StarcoderdataPython
|
1684871
|
<filename>K64F Python Interfacing Testing/V2_Serial_Read.py
import numpy as np
import serial.tools.list_ports as port_list
import serial
def List_All_Mbed_USB_Devices(Buadrate = 115200):
ports = list(port_list.comports())
Num_Serial_Devices = len(ports)
Num_Mbed_Devices = 0
COM_PORTS = []
connectionType = [] # Create a unique value for USB K64F devices which trigger new functions
# Say 11 = mbed USB, 10 = mbed ANY, 12 = mbed TCP, 14 = mbed WIFI
VID_PID = [] # USB VID:PID are the Vendor/Product ID respectively - smae for each K64F Board? - You can determine HIC ID from last 8 digits of Serial Number?
# Note that the 0240 at the start of Serial Number refers to the K64F Family
ID_USB = [] # ID_USB will be the USB serial number - should be unique
Baud_Rate = [] # For now assume all operating at 9600 - may change later so might need to add later on
# IP = [] # Don't think we need this for USB Serial(Mbed) devices
if Num_Serial_Devices > 0:
for i in range(Num_Serial_Devices):
COM_Port = ports[i].usb_description() # ports[i].device outputs COM_PORT (Note port[i][0][0:16] is a particular device - port[i][0] is the COM Port of the device)
if(ports[i][1].startswith("mbed Serial Port") or ports[i][1].startswith("USB Serial Device")): # port[i] is a particular device - port[i][1] is the description of the device - port[i][1][0:16] are the characters containing the mbed Serial Port description
try:
Serial_device = serial.Serial(port=COM_Port, baudrate=Buadrate, bytesize=8, timeout=1, stopbits=serial.STOPBITS_ONE)
except:
raise Exception ("Issues connecting with mbed Device on %s", COM_Port) # Need to implement proper error handling
# How can we/Do we need to check we have actually connected to device - and that it is meant to be used for what we are using it for
if(not Serial_device.readable()):
raise Exception ("Issues connecting with mbed Device on %s", COM_Port) # Need to implement proper error handling
Num_Mbed_Devices += 1
COM_PORTS.append(COM_Port)
USB_INFO = ports[i].usb_info().split('=') # USB-PID should be Unique
USB_VIDPID = USB_INFO[1].split(' ')[0]
VID_PID.append(USB_VIDPID)
USB_Serial_Number = USB_INFO[2].split(' ')[0]
ID_USB.append(USB_Serial_Number)
connectionType.append(11) # Added 10 onto definitions used by LJM library to avoid mixing up - however can change if confusing
Serial_device.close() # Close COM Port communication once info obtained
return(Num_Mbed_Devices, COM_PORTS, connectionType, ID_USB, VID_PID)
def _Serial_Read_Raw_Bytes(Serial_Device, Expected_Bytes):
serialBytes = "" # Used to hold data coming over UART
serialBytes = Serial_Device.read(size=Expected_Bytes)
if(len(serialBytes) != Expected_Bytes):
print("Error", serialBytes)
else:
return serialBytes
def _Decode_Raw_ADC(input, Num_Channels = 8):
data = []
if(len(input) != Num_Channels * 2):
print("Erorr with provided Raw ADC Data", len(input), (Num_Channels * 2))
else:
for x in range(Num_Channels):
data.append(np.int16(int.from_bytes(input[2*x : 2*x + 2], 'little')))
return data
def _Convert_ADC_Raw(Raw_Reading, ADC_Resolution, Max_Min_Voltage):
quant_step = (2 * Max_Min_Voltage) / (2**ADC_Resolution)
return Raw_Reading * quant_step
def Read_ADC_Voltage(input, Num_Channels = 8, ADC_Resolution = 16, Max_Min_Voltage = 5):
Voltages = []
Raw_2s_comp_values = _Decode_Raw_ADC(input, Num_Channels)
# If invalid input: Raw_2s_comp_values will be an empty list, so return value will also be empty
for x in Raw_2s_comp_values:
Voltages.append(_Convert_ADC_Raw(x, ADC_Resolution, Max_Min_Voltage))
return Voltages
def Read_Time_In_Secs(input, Num_Bytes = 2):
if(len(input) != Num_Bytes):
print("Erorr with provided RTC Data")
else:
return np.uint32(int.from_bytes(input, 'little'))
def Read_Compressed_PWM_Duty(input, Num_Channels = 6):
Duty_Cycles = []
if(len(input) != Num_Channels):
print("Error with PWM data")
else:
for x in input: # Auto conversion from bytes to int?? Seems to work anyways
Duty_Cycles.append(x)
return Duty_Cycles
def Read_Sample(Serial_Device, Expected_Bytes):
Raw_Data = _Serial_Read_Raw_Bytes(Serial_Device, Expected_Bytes)
Time = Read_Time_In_Secs(Raw_Data[0:4], 4)
Voltages = Read_ADC_Voltage(Raw_Data[4:20], 8)
PWM_Duties = Read_Compressed_PWM_Duty(Raw_Data[20:26], 6)
return Time, Voltages, PWM_Duties
# Testing
if __name__ == "__main__":
Serial_Baudrate = 9600
mbed_USB_info = List_All_Mbed_USB_Devices(Serial_Baudrate)
for i in range(5):
print(mbed_USB_info[i])
Bytes_Per_Sample = 32
Number_Samples = 9
serial_port = serial.Serial(port=mbed_USB_info[1][0], baudrate=Serial_Baudrate, bytesize=8, timeout=1, stopbits=serial.STOPBITS_ONE)
for x in range(Number_Samples):
Time, Voltages, PWM = Read_Sample(serial_port, Bytes_Per_Sample)
print(Time, Voltages, PWM)
|
StarcoderdataPython
|
1654360
|
<reponame>hnc01/online-judge<gh_stars>0
'''
https://leetcode.com/problems/course-schedule-ii/
210. Course Schedule II
There are a total of numCourses courses you have to take, labeled from 0 to numCourses - 1.
You are given an array prerequisites where prerequisites[i] = [ai, bi] indicates that you must take course bi first if you want to take course ai.
For example, the pair [0, 1], indicates that to take course 0 you have to first take course 1.
Return the ordering of courses you should take to finish all courses. If there are many valid answers, return any of them. If it is impossible to finish all courses, return an empty array.
'''
class Solution:
class Graph:
adjacency = None
vertices = None
def __init__(self):
self.adjacency = {} # mapping each vertex to its adjacent vertices
self.vertices = set()
def add_vertex(self, val):
self.vertices.add(val)
def add_edge(self, source, destination):
if source in self.adjacency:
self.adjacency[source].add(destination)
else:
temp = set()
temp.add(destination)
self.adjacency[source] = temp
def dfs_visit_for_topological(self, u, topological_sort_order, visited, ancestry):
# we are exploring u now so we need to mark it as grey
visited.add(u)
ancestry.add(u)
adjacent_vertices = []
if u in self.adjacency:
adjacent_vertices = self.adjacency[u]
# now discover every edge reachable from u
for v in adjacent_vertices:
if v not in visited:
if self.dfs_visit_for_topological(v, topological_sort_order, visited, ancestry) == True:
return True
elif v in ancestry:
# we've seen this vertex AND its an ancestor
# if the edge is a backedge then we have a cycle
# i.e. the current node is linked back to an ancestor
return True
# we're done exploring u
ancestry.remove(u)
topological_sort_order.insert(0, u)
return False
def dfs_for_topological(self):
# set the is_topological to true so the function can insert the vertices
# into the linked list topological_sort_order
topological_sort_order = []
visited = set()
ancestry = set()
# we dfs visit every undiscovered vertex in G
for u in self.vertices:
if u not in visited:
# dfs_visit_for_topological returns true if there are cycles
contains_cycles = self.dfs_visit_for_topological(u, topological_sort_order, visited, ancestry)
if contains_cycles:
return []
# else continue
return topological_sort_order
def findOrder(self, numCourses: int, prerequisites: [[int]]) -> [int]:
g = self.Graph()
for i in range(0, numCourses):
g.add_vertex(i)
for prerequisite in prerequisites:
# we create an edge from prerequisite[1] to prerequisite[0]
source = prerequisite[1]
destination = prerequisite[0]
# we need to take source course before we take destination course
g.add_edge(source, destination)
return g.dfs_for_topological()
numCourses = 2
prerequisites = [[1,0]]
numCourses = 4
prerequisites = [[1,0],[2,0],[3,1],[3,2]]
numCourses = 1
prerequisites = []
numCourses = 2
prerequisites = [[1,0], [0,1]]
print(Solution().findOrder(numCourses, prerequisites))
|
StarcoderdataPython
|
1601510
|
__author__ = 'royrusso'
import pytest
pytest_plugins = ["docker_compose"]
@pytest.mark.hq_ops
def test_get_clusters(fixture):
fixture.clear_all_clusters()
response = fixture.app.get('/api/clusters')
assert 200 == response.status_code
res = fixture.get_response_data(response)
assert res['data'] == []
@pytest.mark.hq_ops
def test_connect_to_clusters(session_scoped_container_getter, fixture):
fixture.clear_all_clusters()
container = session_scoped_container_getter.get('elasticsearch').network_info[0]
es_cluster_connect = '{"ip": "%s", "port": "%s"}' % (container.hostname, container.host_port)
response = fixture.app.post('/api/clusters/_connect', data=es_cluster_connect,
content_type='application/json')
assert 201 == response.status_code
res = fixture.get_response_data(response)
assert res['data'][0]['cluster_version'].startswith("2")
response = fixture.app.get('/api/clusters')
assert 200 == response.status_code
res = fixture.get_response_data(response)
assert len(res['data']) == 1
@pytest.mark.hq_ops
def test_delete_connection(session_scoped_container_getter, fixture):
fixture.add_all_clusters(session_scoped_container_getter, clear_first=True)
response = fixture.app.get('/api/clusters')
assert 200 == response.status_code
res = fixture.get_response_data(response)
assert len(res['data']) == 1
# lets deletes a specific version
for c in res['data']:
if c['cluster_version'].startswith("2"):
response = fixture.app.delete('/api/clusters/' + c['cluster_name'] + '/_connect')
assert 200 == response.status_code
break
response = fixture.app.get('/api/clusters')
assert 200 == response.status_code
res = fixture.get_response_data(response)
assert len(res['data']) == 0
@pytest.mark.hq_ops
def test_delete_all_connections(session_scoped_container_getter, fixture):
fixture.clear_all_clusters()
container = session_scoped_container_getter.get('elasticsearch').network_info[0]
es_cluster_connect = '{"ip": "%s", "port": "%s"}' % (container.hostname, container.host_port)
fixture.app.post('/api/clusters/_connect', data=es_cluster_connect,
content_type='application/json')
response = fixture.app.delete('/api/clusters/_all/_connect')
assert 200 == response.status_code
response = fixture.app.get('/api/clusters')
assert 200 == response.status_code
res = fixture.get_response_data(response)
assert len(res['data']) == 0
|
StarcoderdataPython
|
3309815
|
# (c) 2015 <NAME> <<EMAIL>>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import jinja2
from units.compat import unittest
from ansible.template import AnsibleUndefined, _escape_backslashes, _count_newlines_from_end
# These are internal utility functions only needed for templating. They're
# algorithmic so good candidates for unittesting by themselves
class TestBackslashEscape(unittest.TestCase):
test_data = (
# Test backslashes in a filter arg are double escaped
dict(
template=u"{{ 'test2 %s' | format('\\1') }}",
intermediate=u"{{ 'test2 %s' | format('\\\\1') }}",
expectation=u"test2 \\1",
args=dict()
),
# Test backslashes inside the jinja2 var itself are double
# escaped
dict(
template=u"Test 2\\3: {{ '\\1 %s' | format('\\2') }}",
intermediate=u"Test 2\\3: {{ '\\\\1 %s' | format('\\\\2') }}",
expectation=u"Test 2\\3: \\1 \\2",
args=dict()
),
# Test backslashes outside of the jinja2 var are not double
# escaped
dict(
template=u"Test 2\\3: {{ 'test2 %s' | format('\\1') }}; \\done",
intermediate=u"Test 2\\3: {{ 'test2 %s' | format('\\\\1') }}; \\done",
expectation=u"Test 2\\3: test2 \\1; \\done",
args=dict()
),
# Test backslashes in a variable sent to a filter are handled
dict(
template=u"{{ 'test2 %s' | format(var1) }}",
intermediate=u"{{ 'test2 %s' | format(var1) }}",
expectation=u"test2 \\1",
args=dict(var1=u'\\1')
),
# Test backslashes in a variable expanded by jinja2 are double
# escaped
dict(
template=u"Test 2\\3: {{ var1 | format('\\2') }}",
intermediate=u"Test 2\\3: {{ var1 | format('\\\\2') }}",
expectation=u"Test 2\\3: \\1 \\2",
args=dict(var1=u'\\1 %s')
),
)
def setUp(self):
self.env = jinja2.Environment()
def test_backslash_escaping(self):
for test in self.test_data:
intermediate = _escape_backslashes(test['template'], self.env)
self.assertEqual(intermediate, test['intermediate'])
template = jinja2.Template(intermediate)
args = test['args']
self.assertEqual(template.render(**args), test['expectation'])
class TestCountNewlines(unittest.TestCase):
def test_zero_length_string(self):
self.assertEqual(_count_newlines_from_end(u''), 0)
def test_short_string(self):
self.assertEqual(_count_newlines_from_end(u'The quick\n'), 1)
def test_one_newline(self):
self.assertEqual(_count_newlines_from_end(u'The quick brown fox jumped over the lazy dog' * 1000 + u'\n'), 1)
def test_multiple_newlines(self):
self.assertEqual(_count_newlines_from_end(u'The quick brown fox jumped over the lazy dog' * 1000 + u'\n\n\n'), 3)
def test_zero_newlines(self):
self.assertEqual(_count_newlines_from_end(u'The quick brown fox jumped over the lazy dog' * 1000), 0)
def test_all_newlines(self):
self.assertEqual(_count_newlines_from_end(u'\n' * 10), 10)
def test_mostly_newlines(self):
self.assertEqual(_count_newlines_from_end(u'The quick brown fox jumped over the lazy dog' + u'\n' * 1000), 1000)
class TestAnsibleUndefined(unittest.TestCase):
def test_getattr(self):
val = AnsibleUndefined()
self.assertIs(getattr(val, 'foo'), val)
self.assertRaises(AttributeError, getattr, val, '__UNSAFE__')
|
StarcoderdataPython
|
3225835
|
def get_config():
return {
'aws': {
'profile_name': 'mgap'
},
'clarifai': {
'api_key': ''
},
'elucidate': {
'host': 'http://localhost',
'port': 8080,
'base_path': '/annotation',
'annotation_model': 'w3c',
'request_headers_seed': {
'Accept': 'application/ld+json;profile="http://www.w3.org/ns/anno.jsonld"',
'Content-Type': 'application/ld+json'
}
},
'google_vision': {
'api_key': ''
},
'iiif': {
'image_api_default_params': {
'region': 'full',
'size': 'full',
'rotation': '0',
'quality': 'default',
'format': 'jpg'
}
},
'rabbitmq': {
'username': 'guest',
'password': '',
'host': 'localhost',
'port': 5672,
'vhost': ''
},
'redis': {
'host': 'redis',
'port': 6379,
'db': {
'computer_vision_results': '0',
'celery_task_results': '1'
}
},
'solr': {
'indexes': {
'amazon_rekognition': 'http://localhost:8983/solr/amazon_rekognition',
'clarifai': 'http://localhost:8983/solr/clarifai',
'google_vision': 'http://localhost:8983/solr/google_vision',
'combined': 'http://localhost:8983/solr/combined'
},
'tags_field': 'tags_ssim',
'copy_fields': [
{
'src': 'subject_tesim',
'dst': 'subject_sim'
},
{
'src': 'human_readable_type_tesim',
'dst': 'human_readable_type_sim'
},
{
'src': 'human_readable_resource_type_tesim',
'dst': 'human_readable_resource_type_sim'
},
{
'src': 'genre_tesim',
'dst': 'genre_sim'
},
{
'src': 'named_subject_tesim',
'dst': 'named_subject_sim'
},
{
'src': 'location_tesim',
'dst': 'location_sim'
},
{
'src': 'language_tesim',
'dst': 'language_sim'
}
]
},
'web_annotation': {
'annotation_seed': {
'@context': 'http://www.w3.org/ns/anno.jsonld',
'type': 'Annotation',
'motivation': 'tagging',
'target': {
'type': 'SpecificResource',
'selector': {
'type': 'ImageApiSelector'
}
},
'creator': {
'type': 'Organization',
'name': 'UCLA Library',
'homepage': 'https://library.ucla.edu'
},
'generator': {
'type': 'Software',
'name': 'Machine Generated Annotations Pipeline',
'homepage': 'https://github.com/UCLALibrary/mgap'
}
},
'annotation_body_seed': {
'type': 'TextualBody',
'format': 'text/json',
'language': 'en',
'purpose': 'tagging',
'creator': {
'type': 'Software'
},
'generator': {
'type': 'Software'
}
},
'annotation_container_seed': {
'@context': 'http://www.w3.org/ns/anno.jsonld',
'type': 'AnnotationCollection'
}
}
}
|
StarcoderdataPython
|
3371289
|
import os
from tqdm import tqdm
import numpy as np
import cv2
data_type = "SCUT-EnsText"
path = "SCUT-EnsText/train"
assert data_type=="SCUT-EnsText" or data_type=="SCUT-Syn"
if data_type=="SCUT-EnsText":
os.makedirs(os.path.join(path, "mask"), exist_ok=True)
file_names = list(map(lambda x: x.split(".")[0], os.listdir(os.path.join(path, "all_images"))))
for file_name in tqdm(file_names):
f_gt = open(os.path.join(path, "all_gts", file_name+".txt"), "r", encoding="utf-8")
boxes = list(map(lambda x: np.array(list(map(int, x.strip().split(",")))).reshape(-1, 2), f_gt.readlines()))
f_gt.close()
img = cv2.imread(os.path.join(path, "all_images", file_name+".jpg"))
mask = np.ones(img.shape)
cv2.fillPoly(mask, boxes, (0., 0., 0.))
cv2.imwrite(os.path.join(path, "mask", file_name+".jpg"), (mask*255.).astype(np.uint8))
|
StarcoderdataPython
|
8528
|
# @AUTHOR : lonsty
# @DATE : 2020/3/28 18:01
class CookiesExpiredException(Exception):
pass
class NoImagesException(Exception):
pass
class ContentParserError(Exception):
pass
class UserNotFound(Exception):
pass
|
StarcoderdataPython
|
45671
|
<reponame>hillyuan/Panzer
#! /usr/bin/env python
"""
Script for analyzing Panzer kernel performance on next-generation
architectures. Runs hierarchic parallelism and generates plots from
data.
"""
__version__ = "1.0"
__author__ = "<NAME>"
__date__ = "Dec 2018"
# Import python modules for command-line options, the operating system, regular
# expressions, and system functions
import commands
import argparse
import os
import re
import sys
import datetime
#############################################################################
def main():
"""Script for analyzing Phalanx performance on next-generation architectures."""
# Initialization
print '****************************************'
print '* Starting Panzer Analysis'
print '****************************************'
parser = argparse.ArgumentParser(description='Panzer hierarchic parallelism analysis script')
parser.add_argument('-r', '--run', action='store_true', help='Run the executable to generate data and output to files.')
parser.add_argument('-a', '--analyze', action='store_true', help='Analyze the data from files generated with --run.')
parser.add_argument('-p', '--prefix', help='Add a prefix string to all output filenames.')
parser.add_argument('-v', '--verbose', action='store_true', help='Print more data to screen.')
parser.add_argument('-o', '--basis-order', type=int, required=True, help='FE basis order.')
parser.add_argument('-ts', '--team-size', type=int, required=True, help='Team size for hierarchic parallelism.')
parser.add_argument('-vs', '--vector-size', type=int, required=True, help='Vector size for hierarchic parallelism.')
group = parser.add_mutually_exclusive_group()
group.add_argument('-b', '--base-exe', action='store_true', default=False, help="Use the Base executable")
group.add_argument('-m', '--mixed-exe', action='store_true', default=False, help="Use the Mixed Field Type executable")
group.add_argument('-d', '--device-dag-exe', action='store_true', default=True, help="Use the Device DAG executable")
args = parser.parse_args()
nx = 20
ny = 20
nz = 20
order = args.basis_order
ts = args.team_size
vs = args.vector_size
print "basis order = %d, team size = %d, vector size = %d\n" % (order, ts, vs)
executable = "./PanzerAdaptersSTK_MixedPoissonExample.exe"
print "Starting Workset Analysis"
ws_step_size = 100
workset_range = range(100,2000+ws_step_size,ws_step_size)
print "workset range = "+str(workset_range)
timings = {}
if args.analyze:
import numpy as np
timings["panzer::AssemblyEngine::evaluate_volume(panzer::Traits::Jacobian)"] = np.zeros(len(workset_range),dtype=np.float64)
timings["[panzer::Traits::Jacobian] GatherSolution (Tpetra): GRADPHI_FIELD (panzer::Traits::Jacobian)"] = np.zeros(len(workset_range),dtype=np.float64)
timings["[panzer::Traits::Jacobian] DOFDiv: DIV_GRADPHI_FIELD (panzer::Traits::Jacobian)"] = np.zeros(len(workset_range),dtype=np.float64)
timings["[panzer::Traits::Jacobian] Integrator_DivBasisTimesScalar (EVALUATES): RESIDUAL_GRADPHI_FIELD"] = np.zeros(len(workset_range),dtype=np.float64)
timings["[panzer::Traits::Jacobian] Sine Source"] = np.zeros(len(workset_range),dtype=np.float64)
timings["[panzer::Traits::Jacobian] Integrator_DivBasisTimesScalar (CONTRIBUTES): RESIDUAL_GRADPHI_FIELD"] = np.zeros(len(workset_range),dtype=np.float64)
timings["[panzer::Traits::Jacobian] SCATTER_GRADPHI_FIELD Scatter Residual (Jacobian)"] = np.zeros(len(workset_range),dtype=np.float64)
timings["[panzer::Traits::Jacobian] DOF: GRADPHI_FIELD accel_jac (panzer::Traits::Jacobian)"] = np.zeros(len(workset_range),dtype=np.float64)
timings["[panzer::Traits::Jacobian] Integrator_GradBasisDotVector (EVALUATES): RESIDUAL_PHI_MASS_OP"] = np.zeros(len(workset_range),dtype=np.float64)
timings["[panzer::Traits::Jacobian] GatherSolution (Tpetra): PHI (panzer::Traits::Jacobian)"] = np.zeros(len(workset_range),dtype=np.float64)
timings["[panzer::Traits::Jacobian] DOFGradient: GRAD_PHI (panzer::Traits::Jacobian)"] = np.zeros(len(workset_range),dtype=np.float64)
timings["[panzer::Traits::Jacobian] Integrator_GradBasisDotVector (EVALUATES): RESIDUAL_PHI_DIFFUSION_OP"] = np.zeros(len(workset_range),dtype=np.float64)
timings["[panzer::Traits::Jacobian] SumStatic Rank 2 Evaluator"] = np.zeros(len(workset_range),dtype=np.float64)
timings["[panzer::Traits::Jacobian] SCATTER_PHI Scatter Residual (Jacobian)"] = np.zeros(len(workset_range),dtype=np.float64)
#print dir(np)
for i in range(len(workset_range)):
ws = workset_range[i]
filename = "fea_nx_%i_ny_%i_nz_%i_order_%i_ws_%i_ts_%i_vs_%i.dat" % (nx, ny, nz , order, ws, ts, vs)
if args.prefix:
filename = args.prefix+filename
command = executable+" --x-elements=%i --y-elements=%i --z-elements=%i --hgrad-basis-order=%i --hdiv-basis-order=%i --workset-size=%i --use-shared-mem-for-ad --no-check-order" % (nx, ny, nz, order, order, ws) +" >& "+filename
if args.run:
#print 'generating data...'
if args.verbose:
print " Running \""+command+"\" ...",
sys.stdout.flush()
os.system(command);
if args.verbose:
print "completed!"
sys.stdout.flush()
if args.analyze:
f = open(filename, mode='r')
lines = f.readlines()
for line in lines:
if args.verbose:
print line,
for key,value in timings.iteritems():
if key in line:
split_line = line.split()
timings[key][i] += float(split_line[-4])
if args.verbose:
print " found key: "+key+" = "+str(split_line[-4])
break
f.close()
do_jac = True
if args.analyze:
import matplotlib.pyplot as plt
fig = plt.figure()
plt.semilogy()
# maroon = #990033, light blue = #00ffff
#plt.plot(workset_range,timings["Jacobian Evaluation Time <<Host DAG>>"],label="Jac Total Time (Host DAG)",marker="o",color="#990033",markersize=8)
#plt.plot(workset_range,timings["Jacobian Evaluation Time <<Device DAG>>"],label="Jac Total Time (Device DAG)",marker="s",color="r",markersize=8)
#plt.plot(workset_range,timings["Residual Evaluation Time <<Host DAG>>"],label="Res Total Time (Host DAG)",marker="o",color="b",markersize=8)
plt.plot(workset_range,timings["panzer::AssemblyEngine::evaluate_volume(panzer::Traits::Jacobian)"],label="Jacobian Volume Assembly Total Time",marker="s",color="#00ffff",markersize=8)
plt.xlabel("Workset Size",fontsize=16)
plt.ylabel("Time (s)",fontsize=16)
plt.tick_params(labelsize=16)
title = "nel=%i,order=%i" % (nx*ny*nz,order)
plt.title(title)
#plt.legend(bbox_to_anchor=(1,1))
plt.legend(loc='upper center', bbox_to_anchor=(0.5,1.0),ncol=2,fancybox=True,shadow=True, prop={'size': 12})
plt.grid()
dag_timings_filename = "total_time_nx_%i_ny_%i_nz_%i_order_%i_ts_%i_vs_%i.png" % (nx, ny, nz ,order, ts, vs)
fig.savefig(dag_timings_filename)
#plt.show()
fig = plt.figure(2)
#plt.clf()
plt.semilogy()
plt.plot(workset_range,timings["[panzer::Traits::Jacobian] Integrator_DivBasisTimesScalar (EVALUATES): RESIDUAL_GRADPHI_FIELD"],label="Integrator DivBasisTimesScalar (eval)",marker='s')
plt.plot(workset_range,timings["[panzer::Traits::Jacobian] Integrator_DivBasisTimesScalar (CONTRIBUTES): RESIDUAL_GRADPHI_FIELD"],label="Integrator DivBasisTimesScalar (contrib)",marker='^')
plt.plot(workset_range,timings["[panzer::Traits::Jacobian] Integrator_GradBasisDotVector (EVALUATES): RESIDUAL_PHI_MASS_OP"],label="Integrator GradBasisDotVector (mass op)",marker='*')
plt.plot(workset_range,timings["[panzer::Traits::Jacobian] Integrator_GradBasisDotVector (EVALUATES): RESIDUAL_PHI_DIFFUSION_OP"],label="Integrator GradBasisDotVector (diff op)",marker='D')
plt.plot(workset_range,timings["[panzer::Traits::Jacobian] DOF: GRADPHI_FIELD accel_jac (panzer::Traits::Jacobian)"],label="DOF (GradPHI)",marker='+')
plt.plot(workset_range,timings["[panzer::Traits::Jacobian] DOFGradient: GRAD_PHI (panzer::Traits::Jacobian)"],label="DOFGradient (GradPhi)",marker='x')
#plt.plot(workset_range,timings["[panzer::Traits::Jacobian] DOFDiv: DIV_GRADPHI_FIELD (panzer::Traits::Jacobian)"],label="DOF Div (GradPhi)",marker='o')
#plt.plot(workset_range,timings[""],label="Res Scatter",marker='.',color="#ff6600")
plt.xlabel("Workset Size",fontsize=16)
plt.ylabel("Time (s)",fontsize=16)
plt.tick_params(labelsize=16)
plt.ylim(1.0e-4,1.0e1)
title = "nel=%i,order=%i" % (nx*ny*nz,order)
plt.title(title)
#plt.legend(bbox_to_anchor=(1,1))
plt.legend(loc='upper center', bbox_to_anchor=(0.5,0.25),ncol=2,fancybox=True,shadow=True, prop={'size': 10})
#plt.axis([0,2000,1.0e-4,0.1])
plt.grid()
res_evaluator_timings_filename = "kernel_timings_nx_%i_ny_%i_nz_%i_order_%i_ts_%i_vs_%i.png" % (nx, ny, nz, order, ts, vs)
fig.savefig(res_evaluator_timings_filename)
#print dir(plt)
# Plot to assess savings
count = 0;
for key,value in timings.iteritems():
filename_f = "raw_data_output_timer_%i_nx_%i_ny_%i_nz_%i_order_%i_ws_%i_ts_%i_vs_%i.csv" % (count, nx, ny, nz, order, ws, ts, vs)
write_file = open(filename_f,'w')
count += 1;
write_file.write(str(key)+"\n")
for i in range(len(workset_range)):
write_file.write(str(workset_range[i])+", "+str(timings[key][i])+"\n")
print "Finished Workset Analysis"
if args.verbose:
print timings
# f = open(filename, mode='r')
# lines = f.readlines()
# for line in lines:
# print line,
# split_line = line.split(" ")
# print split_line[1]
# f.close()
#os.chdir('/Users')
# Write timestamp for backup
#os.chdir('/Users/rppawlo')
#timestamp_file = open('BACKUP_DATE', 'w')
#today = datetime.datetime.today()
#date = today.strftime("YYYY.MM.DD: %Y.%m.%d at HH.MM.SS: %H.%M.%S")
#timestamp_file.write(date)
#timestamp_file.close()
print '****************************************'
print '* Finished Panzer Analysis!'
print '****************************************'
#############################################################################
# If called from the command line, call main()
#############################################################################
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1795756
|
<gh_stars>10-100
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'LedgerTagArgs',
'StreamKinesisConfigurationArgs',
'StreamTagArgs',
]
@pulumi.input_type
class LedgerTagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class StreamKinesisConfigurationArgs:
def __init__(__self__, *,
aggregation_enabled: Optional[pulumi.Input[bool]] = None,
stream_arn: Optional[pulumi.Input[str]] = None):
if aggregation_enabled is not None:
pulumi.set(__self__, "aggregation_enabled", aggregation_enabled)
if stream_arn is not None:
pulumi.set(__self__, "stream_arn", stream_arn)
@property
@pulumi.getter(name="aggregationEnabled")
def aggregation_enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "aggregation_enabled")
@aggregation_enabled.setter
def aggregation_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "aggregation_enabled", value)
@property
@pulumi.getter(name="streamArn")
def stream_arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "stream_arn")
@stream_arn.setter
def stream_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stream_arn", value)
@pulumi.input_type
class StreamTagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
"""
A key-value pair to associate with a resource.
:param pulumi.Input[str] key: The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
:param pulumi.Input[str] value: The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
|
StarcoderdataPython
|
1682597
|
##############################################################################
#
# Copyright (c) 2008 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Tests of relstorage.adapters.mysql"""
from __future__ import absolute_import
import logging
import time
import unittest
from ZODB.utils import u64 as bytes8_to_int64
from ZODB.tests import StorageTestBase
from relstorage.adapters.mysql import MySQLAdapter
from relstorage.options import Options
from relstorage._util import timestamp_at_unixtime
from . import StorageCreatingMixin
from . import TestCase
from .util import skipOnCI
from .util import AbstractTestSuiteBuilder
from .util import DEFAULT_DATABASE_SERVER_HOST
class MySQLAdapterMixin(object):
# The MySQL schema adapter uses DROP TABLE
# and then CREATE TABLE to zap when ``zap_all(slow=True)``.
# This is *much* faster than ``DELETE FROM`` on large
# databases (since we can't use truncate.). But for small databases,
# it adds lots of extra overhead to re-create those tables all the
# time, and ``DELETE FROM`` is the way to go.
zap_slow = True
def __get_db_name(self):
if self.keep_history:
db = self.base_dbname
else:
db = self.base_dbname + '_hf'
return db
def __get_adapter_options(self, dbname=None):
dbname = dbname or self.__get_db_name()
assert isinstance(dbname, str), (dbname, type(dbname))
return {
'db': dbname,
'user': 'relstoragetest',
'passwd': '<PASSWORD>',
'host': DEFAULT_DATABASE_SERVER_HOST,
}
def make_adapter(self, options, db=None):
return MySQLAdapter(
options=options,
**self.__get_adapter_options(db)
)
def get_adapter_class(self):
return MySQLAdapter
def get_adapter_zconfig(self):
options = self.__get_adapter_options()
options['driver'] = self.driver_name
formatted_options = '\n'.join(
' %s %s' % (k, v)
for k, v in options.items()
)
return u"""
<mysql>
%s
</mysql>
""" % (formatted_options)
def verify_adapter_from_zconfig(self, adapter):
self.assertEqual(adapter._params, self.__get_adapter_options())
class TestGenerateTID(MySQLAdapterMixin,
StorageCreatingMixin,
TestCase,
StorageTestBase.StorageTestBase):
# pylint:disable=too-many-ancestors
def setUp(self):
super(TestGenerateTID, self).setUp()
self._storage = self._closing(self.make_storage())
def test_extract_parts(self):
unix_time = 1564063129.1277142
query = """
SELECT EXTRACT(year FROM ts) as YEAR,
EXTRACT(month FROM ts) AS month,
EXTRACT(day FROM ts) AS day,
EXTRACT(hour FROM ts) AS hour,
EXTRACT(minute FROM ts) AS minute,
%s MOD 60 AS seconds
FROM (
SELECT FROM_UNIXTIME(%s) + 0.0 AS ts
) t
"""
cursor = self._storage._load_connection.cursor
cursor.execute(query, (unix_time, unix_time))
year, month, day, hour, minute, seconds = cursor.fetchone()
self.assertEqual(year, 2019)
self.assertEqual(month, 7)
self.assertEqual(day, 25)
self.assertEqual(hour, 13) # If this is not 13, the time_zone is incorrect
self.assertEqual(minute, 58)
self.assertEqual(
round(float(seconds), 6),
49.127714)
def test_known_time(self):
now = 1564054182.277615
gmtime = (2019, 7, 25, 11, 29, 42, 3, 206, 0)
self.assertEqual(
time.gmtime(now),
gmtime
)
ts_now = timestamp_at_unixtime(now)
self.assertEqual(
ts_now.raw(),
b'\x03\xd1Oq\xb4bn\x00'
)
self.test_current_time(now)
# Problematic values due to rounding
# of minutes due to seconds
for now, gmtime in (
(1565774811.9655108,
(2019, 8, 14, 9, 26, 51, 2, 226, 0)),
(1565767799.607957,
(2019, 8, 14, 7, 29, 59, 2, 226, 0)),
(1565775177.915336,
(2019, 8, 14, 9, 32, 57, 2, 226, 0)),
(1565775299.106127,
(2019, 8, 14, 9, 34, 59, 2, 226, 0)),
(1565775479.180209,
(2019, 8, 14, 9, 37, 59, 2, 226, 0)),
):
self.assertEqual(time.gmtime(now), gmtime)
self.test_current_time(now)
def test_current_time(self, now=None):
from persistent.timestamp import TimeStamp
from relstorage._util import int64_to_8bytes
if now is None:
now = time.time()
storage = self._storage
ts_now = timestamp_at_unixtime(now)
expected_tid_int = bytes8_to_int64(ts_now.raw())
__traceback_info__ = now, now % 60.0, time.gmtime(now), ts_now, expected_tid_int
cursor = storage._load_connection.cursor
cursor.execute('CALL make_tid_for_epoch(%s, @tid)', (now,))
cursor.execute('SELECT @tid')
tid, = cursor.fetchall()[0]
tid_as_timetime = TimeStamp(int64_to_8bytes(tid)).timeTime()
__traceback_info__ += (tid_as_timetime - ts_now.timeTime(),)
self.assertEqual(
tid,
expected_tid_int
)
class MySQLTestSuiteBuilder(AbstractTestSuiteBuilder):
__name__ = 'MySQL'
def __init__(self):
from relstorage.adapters.mysql import drivers
super(MySQLTestSuiteBuilder, self).__init__(
drivers,
MySQLAdapterMixin,
extra_test_classes=(TestGenerateTID,)
)
def _compute_large_blob_size(self, use_small_blobs):
# MySQL is limited to the blob_chunk_size as there is no
# native blob streaming support. (Note: this depends on the
# max_allowed_packet size on the server as well as the driver;
# both values default to 1MB. So keep it small.)
return Options().blob_chunk_size
def _make_check_class_HistoryFreeRelStorageTests(self, bases, name, klass_dict=None):
bases = (GenericMySQLTestsMixin, ) + bases
klass_dict = {}
return self._default_make_check_class(bases, name, klass_dict=klass_dict)
# pylint:disable=line-too-long
def _make_check_class_HistoryPreservingRelStorageTests(self, bases, name, klass_dict=None):
return self._make_check_class_HistoryFreeRelStorageTests(bases, name, klass_dict)
class GenericMySQLTestsMixin(object):
@skipOnCI("Travis MySQL goes away error 2006")
def check16MObject(self):
# NOTE: If your mySQL goes away, check the server's value for
# `max_allowed_packet`, you probably need to increase it.
# JAM uses 64M.
# http://dev.mysql.com/doc/refman/5.7/en/packet-too-large.html
super(GenericMySQLTestsMixin, self).check16MObject()
def checkMyISAMTablesProduceErrorWhenNoCreate(self):
from ZODB.POSException import StorageError
def cb(_conn, cursor):
cursor.execute('ALTER TABLE new_oid ENGINE=MyISAM;')
self._storage._adapter.connmanager.open_and_call(cb)
# Now open a new storage that's not allowed to create
with self.assertRaisesRegex(
StorageError,
'MyISAM is no longer supported.*new_oid'
):
self.open(create_schema=False)
def checkMyISAMTablesAutoMigrate(self):
# Verify we have a broken state.
self.checkMyISAMTablesProduceErrorWhenNoCreate()
# Now a storage that can alter a table will do so.
storage = self.open()
storage.close()
storage = self.open(create_schema=False)
storage.close()
def checkIsolationLevels(self):
def assert_storage(storage):
load_cur = storage._load_connection.cursor
store_cur = storage._store_connection.cursor
version_detector = storage._adapter.version_detector
if not version_detector.supports_transaction_isolation(load_cur):
raise unittest.SkipTest("Needs MySQL better than %s" % (
version_detector.get_version(load_cur)
))
for cur, ex_iso, ex_ro, ex_timeout in (
# Timeout for load is mysql default.
[load_cur, 'REPEATABLE-READ', True, 50],
[store_cur, 'READ-COMMITTED', False, self.DEFAULT_COMMIT_LOCK_TIMEOUT],
):
cur.execute("""
SELECT @@transaction_isolation,
@@transaction_read_only,
@@innodb_lock_wait_timeout
""")
row, = cur.fetchall()
iso, ro, timeout = row
__traceback_info__ = row
iso = iso.decode('ascii') if not isinstance(iso, str) else iso
self.assertEqual(iso, ex_iso)
self.assertEqual(ro, ex_ro)
self.assertEqual(timeout, ex_timeout)
# By default
assert_storage(self._storage)
# In a new instance, and after we do a transaction with it.
from ZODB.DB import DB
import transaction
db = self._closing(DB(self._storage))
conn = self._closing(db.open())
assert_storage(conn._storage)
conn.root()['obj'] = 1
transaction.commit()
assert_storage(conn._storage)
def test_suite():
return MySQLTestSuiteBuilder().test_suite()
if __name__ == '__main__':
logging.basicConfig()
logging.getLogger("zc.lockfile").setLevel(logging.CRITICAL)
unittest.main(defaultTest="test_suite")
|
StarcoderdataPython
|
185575
|
<filename>wgdi/retain.py
import re
import sys
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import wgdi.base as base
class retain():
def __init__(self, options):
self.position = 'order'
for k, v in options:
setattr(self, str(k), v)
print(str(k), ' = ', v)
if hasattr(self, 'ylim'):
self.ylim = [float(k) for k in self.ylim.split(',')]
else:
self.ylim = [0,1]
self.colors = [str(k) for k in self.colors.split(',')]
self.figsize = [float(k) for k in self.figsize.split(',')]
def run(self):
gff = base.newgff(self.gff)
lens = base.newlens(self.lens, self.position)
gff = gff[gff['chr'].isin(lens.index)]
alignment = pd.read_csv(self.alignment,header=None, index_col=0)
alignment = alignment.join(gff[['chr',self.position]], how='left')
self.retain = self.align_chr(alignment)
self.retain[self.retain.columns[:-2]
].to_csv(self.savefile, sep='\t', header=None)
fig, axs = plt.subplots(
len(lens), 1, sharex=True, sharey=True, figsize=tuple(self.figsize))
fig.add_subplot(111, frameon=False)
align = dict(family='Arial', verticalalignment="center", horizontalalignment="center")
plt.ylabel(self.ylabel+'\n\n\n\n', fontsize=20, **align)
for spine in plt.gca().spines.values():
spine.set_visible(False)
plt.tick_params(top=False, bottom=False, left=False,
right=False, labelleft=False, labelbottom=False)
groups = self.retain.groupby(['chr'])
for i in range(len(lens)):
group = groups.get_group(lens.index[i])
for j in self.retain.columns[:-2]:
axs[i].plot(group['order'].values, group[j].values,
linestyle='-', color=self.colors[j-1], linewidth=1)
axs[i].spines['right'].set_visible(False)
axs[i].spines['top'].set_visible(False)
axs[i].set_ylim(self.ylim)
axs[i].tick_params(labelsize=12)
for i in range(len(lens)):
x, y = axs[i].get_xlim()[1]*0.95, axs[i].get_ylim()[1]*0.5
axs[i].text(x, y, self.refgenome+' ' +
str(lens.index[i]), fontsize=18, **align)
plt.subplots_adjust(left=0.1, right=0.95, top=0.95, bottom=0.05)
plt.savefig(self.savefig, dpi=500)
plt.show()
sys.exit(0)
def align_chr(self, alignment):
for i in alignment.columns[:-2]:
alignment.loc[alignment[i].str.contains('\w', na=False), i] = 1
alignment.loc[alignment[i] == '.', i] = 0
alignment.loc[alignment[i] == ' ', i] = 0
alignment[i].fillna(0, inplace=True)
for chr, group in alignment.groupby(['chr']):
a = self.retain(group[i].values.tolist())
alignment.loc[group.index, i] = a
return alignment
def retain(self, arr):
a = []
for i in range(0, len(arr)):
start, end = i-int(self.step), i+int(self.step)
if start < 0:
start = 0
if end > len(arr):
end = len(arr)
ave = sum(arr[start:end])/(end-start)
a.append(ave)
return a
|
StarcoderdataPython
|
170853
|
a = int(input())
b = int(input())
print(b - (a % b) if a % b != 0 else 0)
|
StarcoderdataPython
|
3246436
|
<gh_stars>0
# -*- coding: utf-8 -*-
# author: <NAME>
# <NAME>
# email: <EMAIL>
#
|
StarcoderdataPython
|
1667978
|
# https://leetcode.com/problems/longest-palindromic-substring/
class Solution(object):
def longestPalindrome(self, s):
result = ""
for i in range(len(s)):
temp = self.helper(s,i,i)
if len(temp)> len(result):
result = temp
temp = self.helper(s,i,i+1)
if len(result)<len(temp):
result = temp
return result
def helper(self,s,l,r):
while l>=0 and r<len(s) and s[l]==s[r]:
l-=1
r+=1
return s[l+1:r]
|
StarcoderdataPython
|
1612881
|
#!/usr/bin/env python
"""
Code and Configuration Comments Parsing and Stripping
This module is designed to make it easy to remove or retrieve comments from a
source file. This is accomplished in a single pass using a relatively complex
regular expression. The module provides two interface functions: `get()`
and `strip()`.
If the pattern itself is of interest, it is a visible property of the module:
`pattern`.
"""
import re
__version__ = '0.0.0'
#=============================================================================
# Language Comment Documentation
# list of single-line comment markers in various languages
single_line_comments = [
'#', '//', ';', "'", '"', '--', 'REM', 'Rem', '::', '!', '%', '\\'
]
single_line_map = [
( 'shell', 'Perl', 'PHP', 'Python', 'conf', 'Apache', 'Ruby', 'Make',
'Bash', 'Bourne Shell', 'C Shell', 'Tcl' ),
( 'C', 'C++', 'Java', 'PHP', 'JavaScript', 'ActionScript', 'ECMAScript' ),
( 'ini', 'Lisp', 'assembly' ),
( 'Visual Basic', 'VBScript' ),
( 'Vimscript' ),
( 'SQL', 'Haskell', 'Ada', 'Lua', 'VHDL', 'SGML' ),
( 'batch' ),
( 'batch', 'Visual Basic', 'VBScript' ),
( 'batch' ),
( 'Fortran 90' ),
( 'MATLAB' ),
( 'Forth' )
]
# list of multi-line comment markers in various languages
multi_line_comments = [
'/*~*/', '<!-- ~ -->', '{-~-}', '%{\n~\n}%', '(*~*)', '=begin~=end',
'=begin~=cut', '#|~|#', '--[[~]]', '--[[=[~]=]', '<#~#>'
]
multi_line_map = [
( 'C', 'C++', 'Java', 'JavaScript', 'PHP', 'SQL', 'CSS', 'Objective-C',
'C#', 'ActionScript', 'ECMAScript' ),
( 'SGML', 'XML', 'HTML', 'XHTML' ),
( 'Haskell' ),
( 'MATLAB' ),
( 'Pascal', 'OCaml' ),
( 'Ruby' ),
( 'Perl' ),
( 'Lisp' ),
( 'Lua' ),
( 'Lua' ),
( 'Bash', 'Bourne Shell', 'C Shell', 'Tcl' )
]
#=============================================================================
# Utility Pattern Fragments
_quotes = '"\'`'
_whites = ' \t\r\n'
_missme = r'(?:(?!(?P=quote))|[^\\\r\n])'
_patterns = {
# double-quoted string (with potentially escaped double quotes inside)
'dqs' : r'"[^"\\\r\n]*(?:\\.[^"\\\r\n]*)*"',
# single-quoted string (same as above)
'sqs' : r"'[^'\\\r\n]*(?:\\.[^'\\\r\n]*)*'",
# backtick-quoted string (same as above)
'bqs' : r'`[^`\\\r\n]*(?:\\.[^`\\\r\n]*)*`',
# double-, single-, and backtick-quoted strings
#'mqs' : r'[{0}][^{0}\\\r\n]*(?:\\.[^{0}\\\r\n]*)*[{0}]'.format( _quotes ),
'mqs' : r'(?P<quote>[{q}]){m}*(?:\\.{m}*)*(?P=quote)'.format(
q = _quotes,
m = _missme
),
# C-style, multiline comments
'csc' : r'/\*(?:.|[\r\n])*?\*/',
# C++- and shell-style comments
'ssc' : r'(?://|#).*$',
# allow string formatting easy access to these
'quotes' : _quotes,
'whites' : _whites
}
#=============================================================================
# The Comment Stripping Pattern
#
# The strategy involves judiciously attempting to match string literals first.
# The two top-level alternatives are to match either a multi-line comment or a
# single-line comment (in that order). The replacer or consumer of the match
# must then evaluate if the pattern matched a string by checking if the first
# subgroup has been populated. If not, the (entire) pattern has matched a
# comment.
#
# This strategy allows us to protect the stripping system from nearly all
# conceivable (and valid) permutations of embedding string literals and
# comments.
_pattern = '({mqs})|[{whites}]?{csc}[{whites}]?|{ssc}'.format( **_patterns )
#=============================================================================
# The Compiled Pattern (for external users)
pattern = re.compile( _pattern, flags = re.MULTILINE )
#=============================================================================
# Test Cases : [ key, input, expected ]
_test_cases = [
# no comments, just a line
[ 'base-single', 'a b c', 'a b c' ],
# no comments, just multiple lines
[ 'base-multi', 'a\nb\nc', 'a\nb\nc' ],
# multiple lines inside comment
[ 'multi', 'a /* b\nc */ d', 'a d' ],
# shell comment
[ 'shell', 'a\n# b\nc', 'a\n\nc' ],
# all shell comments on all lines
[ 'shell-all', '# a\n# b\n# c', '\n\n' ],
# mixed comment styles on all lines
[ 'mixed-all', '# a\n// b\n/* c */', '\n\n' ],
# mixed comments with nested comment symbols
[ 'mixed-emb',
'# a /* b */\n/* c // d */\ne /* f */\ng # h',
'\n\ne \ng ' ],
# valid tokens, artificial/injected separation
[ 'multi-inject', 'a/* b */c', 'a c' ],
# asterisk inside comment
[ 'multi-asterisk', 'a /* b*c */ d', 'a d' ],
# multiple asterisks
[ 'multi-astersiks', 'a /** b ** c **/ d', 'a d' ],
# island tokens between comments
[ 'multi-island', 'a /* b */ c /* d */ e', 'a c e' ],
# shell-style comment in a quoted string
[ 'str-emb-shell', 'a "b # c" d', 'a "b # c" d' ],
# C++-style comment in a quoted string
[ 'str-emb-c++', 'a "b // c" d', 'a "b // c" d' ],
# C-style comment in a single-quoted string
[ 'str-emb-c', "a 'b /* c */ d' e", "a 'b /* c */ d' e" ],
# strings with embedded, but valid quotes
[ 'str-emb-str', 'a "b \'c\' d" e', 'a "b \'c\' d" e' ],
# strings with embedded comments
[ 'str-emb-multi', 'a "b \'c\' /* d */ e" f', 'a "b \'c\' /* d */ e" f' ],
# embedded string with embedded comments
[ 'str-emb-str-multi',
'a "b \'c /* d */\' e" f',
'a "b \'c /* d */\' e" f' ],
# invalid string delimiter
[ 'str-invalid', 'a " # b', 'a " ' ],
# comments with embedded strings
[ 'shell-emb-str', 'a # b "c" d', 'a ' ],
[ 'multi-emb-str', 'a /* b "c" */ d', 'a d' ]
]
#=============================================================================
def get( string ):
"""
Iteratively yields all comments from the given string.
@param string A string from which all comments will be retrieved
@yield A string containing the current comment
"""
# perform an iterative search on the subject string
comments = re.finditer( _pattern, string, flags = re.MULTILINE )
# iterate through matched patterns
for comment in comments:
# retrieve the complete match, and the first subgroup
g0, g1 = comment.group( 0, 1 )
# the first subgroup is from string literal matching. if the subgroup
# is populated, ignore it, and advance to the next match. if the
# subgroup is None, the entire pattern has matched a comment.
if g1 is None:
# yield this comment (optionally matched whitespace is removed)
yield g0.strip()
#=============================================================================
def strip( string ):
"""
Removes all comments from the given string.
@param string A string from which all comments will be removed
@return A string with no comments
"""
# run the regular expression against the subject string
return re.sub( _pattern, _replacer, string, flags = re.MULTILINE )
#=============================================================================
def _replacer( match ):
"""
Replacement function for `re.sub()` callbacks.
@param match The MatchObject instance for the current match
@return The string to use in place of the current match
"""
#print( '## Match:', match.group( 0 ), 'Groups:', match.groups() )
# get the entire match string and the first subgroup
g0, g1 = match.group( 0, 1 )
# string literal was matched, do not remove it from the subject string
if g1 is not None:
return g1
# C-style comments with no surrounding space are replaced with a space
# to allow "BEFORE/* ... */AFTER" to become "BEFORE AFTER"
if g0.startswith( '/*' ) and g0.endswith( '*/' ):
return ' '
# restore optionally-matched surrounding whitespace characters
replace = ''
if g0[ 0 ] in _whites:
replace += g0[ 0 ]
if g0[ -1 ] in _whites:
replace += g0[ -1 ]
return replace
#=============================================================================
def _print_multiline( fh, left, right ):
"""
Prints a pair of multi-line strings side-by-side.
@param left The left-hand multi-line string
@param right The right-hand multi-line string
"""
l_lines = left.split( '\n' )
r_lines = right.split( '\n' )
nl_lines = len( l_lines )
nr_lines = len( r_lines )
num_lines = max( nl_lines, nr_lines )
if nl_lines < num_lines:
l_lines.extend( [ '' ] * ( num_lines - nl_lines ) )
if nr_lines < num_lines:
r_lines.extend( [ '' ] * ( num_lines - nr_lines ) )
max_left = max( len( line ) for line in l_lines )
max_right = max( len( line ) for line in r_lines )
max_line = max( max_left, max_right )
box_bar = '-' * max_line
bar = '+{0}+{0}+\n'.format( box_bar )
fh.write( bar )
fmt = '|{{:{0}}}|{{:{0}}}|\n'.format( max_line )
for left_line, right_line in zip( l_lines, r_lines ):
fh.write( fmt.format( left_line, right_line ) )
fh.write( bar )
#=============================================================================
def _run_tests( run ):
"""
Runs each test case (for a selected test target) through each test pattern
providing feedback on what works, and how well.
@param run The test target to execute (strip or get)
@return The result of test execution (0 = success)
"""
# count the number of failures
failures = 0
# get testing
if ( run == 'all' ) or ( run == 'get' ):
# example of various styles of comments
case = """a
//b
c
#d
e
f /* g */ h
/*
* i j
* k l
*/
m"""
# this should be the resulting list of comments
expected = [ '//b', '#d', '/* g */', '/*\n * i j\n * k l\n */' ]
# fetch the comments all at once (not a typical usage pattern)
actual = list( get( case ) )
# check what was retrieved
if actual == expected:
print 'PASSED get test "mixed"'
else:
print 'FAILED get test "mixed"'
print 'Expected:', expected
print 'Actual :', actual
failures += 1
# strip testing
if ( run == 'all' ) or ( run == 'strip' ):
# iterate through each test case
for key, string, expected in _test_cases:
# strip the comments
actual = strip( string )
# test the results
if actual == expected:
print( 'PASSED strip test "{}"'.format( key ) )
else:
print( 'FAILED strip test "{}"'.format( key ) )
_print_multiline( sys.stdout, string, actual )
failures += 1
# display complete test result
if failures == 0:
print( '*** All Tests PASSED ***' )
else:
print( '*** {} Tests FAILED ***'.format( failures ) )
# return test status
return 0 if failures == 0 else 1
#=============================================================================
def main( argv ):
"""
Script execution entry point
@param argv List of arguments passed to the script
@return Shell exit code (0 = success)
"""
# imports when using this as a script
import argparse
# create and configure an argument parser
parser = argparse.ArgumentParser(
description = 'Code and Configuration Comments Regular Expressions',
add_help = False
)
parser.add_argument(
'-h',
'--help',
default = False,
help = 'Display this help message and exit.',
action = 'help'
)
parser.add_argument(
'-r',
'--run',
default = 'all',
help = 'Specify the test to execute (all, get, or strip).',
nargs = '?'
)
parser.add_argument(
'-v',
'--version',
default = False,
help = 'Display script version and exit.',
action = 'version',
version = __version__
)
# parse the arguments
args = parser.parse_args( argv[ 1 : ] )
# run all the tests, and return status to shell
return _run_tests( args.run )
#=============================================================================
if __name__ == "__main__":
import sys
sys.exit( main( sys.argv ) )
|
StarcoderdataPython
|
3319111
|
# coding: utf-8
# flake8: noqa
"""
Swagger Petstore
This is a sample server Petstore server. You can find out more about Swagger at [http://swagger.io](http://swagger.io) or on [irc.freenode.net, #swagger](http://swagger.io/irc/). For this sample, you can use the api key `special-key` to test the authorization filters. # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
__version__ = "0.0.0"
# import apis into sdk package
from fds.sampleapi.api.pet_api import PetApi
from fds.sampleapi.api.store_api import StoreApi
from fds.sampleapi.api.user_api import UserApi
# import ApiClient
from fds.sampleapi.api_client import ApiClient
from fds.sampleapi.configuration import Configuration
from fds.sampleapi.exceptions import OpenApiException
from fds.sampleapi.exceptions import ApiTypeError
from fds.sampleapi.exceptions import ApiValueError
from fds.sampleapi.exceptions import ApiKeyError
from fds.sampleapi.exceptions import ApiException
# import models into sdk package
from fds.sampleapi.models.api_response import ApiResponse
from fds.sampleapi.models.category import Category
from fds.sampleapi.models.order import Order
from fds.sampleapi.models.pet import Pet
from fds.sampleapi.models.tag import Tag
from fds.sampleapi.models.user import User
|
StarcoderdataPython
|
3317158
|
<reponame>SciampiJacopo/py-test
import sys
import pygame
class UIImageClass:
def __init__(self):
info = pygame.display.Info()
self.screenW = info.current_w
self.screenH = info.current_h
def createBackgroundImage(self, imagePath):
image = pygame.image.load(sys.path[0] + imagePath)
image = pygame.transform.smoothscale(
image, (self.screenW, self.screenH))
return image
|
StarcoderdataPython
|
3290671
|
<reponame>fgitmichael/AutoregressiveModeDisentangling
import torch
from mode_disent.test.action_sampler import ActionSampler
from mode_disent_no_ssm.network.mode_model import ModeLatentNetwork as ModeLatentNetworkNoSSM
class ActionSamplerNoSSM(ActionSampler):
def __init__(self,
mode_model: ModeLatentNetworkNoSSM,
device):
self.mode_model = mode_model
self.device = device if torch.cuda.is_available() else 'cpu'
self._mode = None
self._mode_next = None
def reset(self, mode=None):
if mode is None:
mode_to_set = self.mode_model.sample_mode_prior(batch_size=1)['samples']
self.set_mode(mode_to_set)
else:
self.set_mode(mode)
def set_mode(self, mode):
self._mode = mode.to(self.device)
self._mode_next = mode.to(self.device)
def set_mode_next(self, mode):
self._mode_next = mode.to(self.device)
def update_mode_to_next(self):
self._mode = self._mode_next
def _get_action(self,
mode,
state_rep):
"""
Args:
mode : (1, mode_dim) tensor
state_rep : (1, state_rep_dim) tensor
Return:
action : (1, action_dim) tensor
"""
action_recon = self.mode_model.action_decoder(
state_rep_seq=state_rep,
mode_sample=mode
)
return action_recon['samples']
def __call__(self, state_rep):
"""
Args:
state_rep : (1, state_rep_dim) tensor
"""
# Action decoder needs sequences of type (N, S, dim)
state_rep = state_rep.unsqueeze(0)
return self._get_action(self._mode, state_rep)
|
StarcoderdataPython
|
1685815
|
<reponame>RootA/flask-cli
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime, timedelta
from routes import db, app
|
StarcoderdataPython
|
3203799
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
"""
try:
from collections.abc import Mapping as MappingABC
except ImportError:
from collections import Mapping as MappingABC
from ..utilities.future_from_2 import str, object, repr_compat, unicode
from ..utilities.unique import NOARG
from .deep_bunch import DeepBunch
class TagBunch(object):
"""
"""
__slots__ = ('_dict', '_tag_dicts',)
def __init__(
self,
write_dict = None,
gfunc = None,
**kwargs
):
if write_dict is None:
write_dict = DeepBunch()
self._dict = write_dict
self._tag_dicts = kwargs
if gfunc is True:
self.require_tag('gfunc')
elif gfunc is not None:
self.set_tag('gfunc', gfunc)
return
@property
def _gfunc(self):
return self.get_tag('gfunc')
def __getitem__(self, key):
gfunc = self._tag_dicts.get('gfunc', None)
if gfunc is not None and (key not in self._dict):
gval = gfunc.get(key, None)
if gval is not None:
item = gval()
self._dict[key] = item
item = self._dict[key]
if isinstance(item, MappingABC):
subtags = {}
for tagkey, tagdict in list(self._tag_dicts.items()):
if isinstance(tagdict, MappingABC):
try:
subtags[tagkey] = tagdict[key]
except KeyError:
continue
item = self.__class__(item, **subtags)
return item
def __getattr__(self, key):
try:
return self.__getitem__(key)
except KeyError:
raise AttributeError("'{0}' not in {1}".format(key, self))
def __setitem__(self, key, item):
self._dict[key] = item
def __setattr__(self, key, item):
if key in self.__slots__:
return super(TagBunch, self).__setattr__(key, item)
return self.__setitem__(key, item)
def __delitem__(self, key):
del self._dict[key]
def __delattr__(self, key):
return self.__delitem__(key)
def get(self, key, default = NOARG):
try:
return self[key]
except KeyError:
if default is not NOARG:
return default
raise
def setdefault(self, key, default):
try:
return self[key]
except KeyError:
self[key] = default
return default
def get_tag(self, tagkey, default = NOARG):
try:
return self._tag_dicts[tagkey]
except KeyError:
if default is not NOARG:
return default
raise
def has_tag(self, key):
return key in self._tag_dicts
def require_tag(self, tagkey):
if tagkey not in self._tag_dicts:
self._tag_dicts[tagkey] = DeepBunch({})
return
def set_tag(self, tagkey, obj):
self._tag_dicts[tagkey] = obj
return
def __contains__(self, key):
return (key in self._dict)
def has_key(self, key):
return key in self
def __dir__(self):
items = list(k for k in self._dict.keys() if isinstance(k, (str, unicode)))
items.sort()
#items += dir(super(Bunch, self))
return items
@repr_compat
def __repr__(self):
return (
'{0}({1}, {2})'
).format(
self.__class__.__name__,
self._dict,
self._tag_dicts,
)
#def __eq__(self, other):
# return
#
#def __ne__(self, other):
# return not (self == other)
def __iter__(self):
return iter(list(self.keys()))
def __len__(self):
return len(self._dict)
def iterkeys(self):
return iter(list(self._dict.keys()))
def keys(self):
return list(self._dict.keys())
def itervalues(self):
for key in list(self.keys()):
yield self[key]
return
def values(self):
return list(self.values())
def iteritems(self):
for key in list(self.keys()):
yield key, self[key]
return
def items(self):
return list(self.items())
MappingABC.register(TagBunch)
|
StarcoderdataPython
|
1641162
|
<reponame>altynbek07/python-qazaq-transliterator<filename>qazaq_transliterator/__init__.py
from .qazaq_transliterator import translit
|
StarcoderdataPython
|
4822121
|
<gh_stars>0
from django.db import models
from django import forms
# Create your models here.
class Categoria(models.Model):
nome_categoria = models.CharField('Nome Categoria', max_length=250)
descricao = models.TextField('Descricao')
#falta inserir dps as estatisticas
def __str__(self) -> str:
return self.nome_categoria
class DadosSede(models.Model):
nome_empresa = models.CharField('Nome da empresa', max_length=250)
texto_sobre = models.TextField('Sobre nós')
chamada = models.TextField('Chamada', null=True)
whatsapp = models.TextField('Whatsapp', null=True, blank=True)
localizacao = models.TextField('Localização', null=True, blank=True)
instagram = models.TextField("Instagram", null=True, blank=True)
email = models.TextField(null=True);
def __str__(self) -> str:
return self.nome_empresa
class Formulario(forms.Form):
DIAS_CHOICES =(
("SE", "Segunda"),
("TE", "Terça"),
("QA", "Quarta"),
("QI", "Quinta"),
("SX", "Sexta"),
("SA", "Sabado"),
("DM", "Domingo"),
)
dias = forms.MultipleChoiceField(choices=DIAS_CHOICES)
class Tarefa(models.Model):
categoria = models.ForeignKey(Categoria, on_delete=models.CASCADE, null=True)
nome_tarefa = models.CharField('Nome Tarefa', max_length=250)
descricao = models.TextField('Descrição')
duracao = models.TimeField()
hora_notificacao = models.TimeField()
hora_criacao = models.DateTimeField()
#frequencia_semana = models.OneToOneField(Formulario, on_delete=models.CASCADE) nao funciona assim
frequencia_dias = models.IntegerField(default=1)
concluida = models.BooleanField(default=False)
class Insignia(models.Model):
nome_insignia = models.CharField(max_length=100)
xp_maximo = models.IntegerField(default=500)
lvl_maximo = models.IntegerField(default=10)
lvl_atual = models.IntegerField(default=0)
class Medalha(models.Model):
nome_medalha = models.CharField(max_length=100)
xp_atribuido = models.IntegerField(default=100)
|
StarcoderdataPython
|
123195
|
<gh_stars>1-10
"""
Finetune goldenretriever on knwoledge bases in Elasticsearch
Sample usage:
------------
python -m src.finetune.main
"""
import os
import pickle
import datetime
import pandas as pd
import numpy as np
import logging
import random
import sys
import tarfile
import shutil
import tensorflow as tf
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.model_selection import train_test_split
from scipy.stats.mstats import rankdata
from src.models import GoldenRetriever
from src.encoders import USEEncoder, ALBERTEncoder, BERTEncoder
from src.data_handler.kb_handler import kb, kb_handler
from src.minio_handler import MinioClient
from src.finetune.eval import eval_model
from src.finetune.generators import random_triplet_generator, hard_triplet_generator
from src.finetune.config import CONFIG
if __name__ == "__main__":
# Setup logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler())
# To prevent duplicate logs
logger.propagate = False
# Define file/directory paths
concat_kb_names = "_".join(CONFIG.kb_names)
model_folder_name = "model_" + concat_kb_names
results_folder_name = "results_" + concat_kb_names
MAIN_DIR = CONFIG.save_dir
MODEL_DIR = os.path.join(MAIN_DIR, model_folder_name, CONFIG.model_name)
MODEL_BEST_DIR = os.path.join(MAIN_DIR, model_folder_name, CONFIG.model_name, 'best')
MODEL_LAST_DIR = os.path.join(MAIN_DIR, model_folder_name, CONFIG.model_name, 'last')
EVAL_DIR = os.path.join(MAIN_DIR, results_folder_name, CONFIG.model_name)
if not os.path.isdir(MODEL_LAST_DIR): os.makedirs(MODEL_LAST_DIR)
if not os.path.isdir(EVAL_DIR):os.makedirs(EVAL_DIR)
EVAL_SCORE_PATH = os.path.join(EVAL_DIR, '_eval_scores.xlsx')
EVAL_DICT_PATH = os.path.join(EVAL_DIR, '_eval_details.pickle')
logger.info(f'\nModels will be saved at: {MODEL_DIR}')
logger.info(f'Best model will be saved at: {MODEL_BEST_DIR}')
logger.info(f'Last trained model will be saved at {MODEL_LAST_DIR}')
logger.info(f'Saving Eval_Score at: {EVAL_SCORE_PATH}')
logger.info(f'Saving Eval_Dict at: {EVAL_DICT_PATH}\n')
# Create training set based on chosen random seed
logger.info("Generating training/ evaluation set")
"""
LOAD MODEL
"""
# Instantiate chosen model
logger.info(f"Instantiating model: {CONFIG.model_name}")
encoders = {
"albert": ALBERTEncoder,
"bert": BERTEncoder,
"USE": USEEncoder
}
if CONFIG.model_name not in encoders:
raise ValueError("Model not found: %s" % (CONFIG.model_name))
# init the model and encoder
enc = encoders[CONFIG.model_name](max_seq_length=CONFIG.max_seq_length)
model = GoldenRetriever(enc)
# Set optimizer parameters
model.opt_params = {'learning_rate': CONFIG.learning_rate,'beta_1': CONFIG.beta_1,'beta_2': CONFIG.beta_2,'epsilon': CONFIG.epsilon}
"""
PULL AND PARSE KB FROM SQL
"""
train_dict = dict()
test_dict = dict()
df_list = []
# Get df using kb_handler
kbh = kb_handler()
kbs = kbh.load_es_kb(CONFIG.kb_names)
df = pd.concat([single_kb.create_df() for single_kb in kbs]).reset_index(drop='True')
kb_names = df['kb_name'].unique()
for kb_name in kb_names:
kb_id = df[df['kb_name'] == kb_name].index.values
train_idx, test_idx = train_test_split(kb_id, test_size=0.4,
random_state=100)
train_dict[kb_name] = train_idx
test_dict[kb_name] = test_idx
"""
FINETUNE
"""
if CONFIG.task_type == 'train_eval':
logger.info("Fine-tuning model")
# see the performance of out of box model
OOB_overall_eval, eval_dict = eval_model(model, df, test_dict)
epoch_eval_score = OOB_overall_eval.loc['Across_all_kb','mrr_score']
logger.info(f'Eval Score for OOB: {epoch_eval_score}')
earlystopping_counter = 0
for i in range(CONFIG.num_epochs):
epoch_start_time = datetime.datetime.now()
logger.info(f'Running Epoch #: {i}')
cost_mean_total = 0
batch_counter = 0
epoch_start_time = datetime.datetime.now()
# train_dataset_loader = random_triplet_generator(df, train_dict, CONFIG)
train_dataset_loader = hard_triplet_generator(df, train_dict, model, CONFIG)
for q, r, neg_r in train_dataset_loader:
if random.randrange(100) <= 10:
logger.info(f'\nTRIPLET SPOT CHECK')
logger.info(f'{q[0]}')
logger.info(f'{r[0]}')
logger.info(f'{neg_r[0]}\n')
batch_start_time = datetime.datetime.now()
if batch_counter % 100 == 0:
logger.info(f'Running batch #{batch_counter}')
cost_mean_batch = model.finetune(question=q, answer=r, context=r, \
neg_answer=neg_r, neg_answer_context=neg_r, \
margin=CONFIG.margin, loss=CONFIG.loss_type)
cost_mean_total += cost_mean_batch
batch_end_time = datetime.datetime.now()
if batch_counter == 0 and i == 0:
len_training_triplets = sum([len(train_idxes) for kb, train_idxes in train_dict.items()])
num_batches = len_training_triplets // len(q)
logger.info(f'Training batches of size: {len(q)}')
logger.info(f'Number of batches per epoch: {num_batches}')
logger.info(f'Time taken for first batch: {batch_end_time - batch_start_time}')
if batch_counter == num_batches:
break
batch_counter += 1
epoch_overall_eval, eval_dict = eval_model(model, df, test_dict)
epoch_eval_score = epoch_overall_eval.loc['Across_all_kb','mrr_score']
print(epoch_eval_score)
logger.info(f'Number of batches trained: {batch_counter}')
logger.info(f'Loss for Epoch #{i}: {cost_mean_total}')
logger.info(f'Eval Score for Epoch #{i}: {epoch_eval_score}')
epoch_end_time = datetime.datetime.now()
logger.info(f'Time taken for Epoch #{i}: {epoch_end_time - epoch_start_time}')
# Save model for first epoch
if i == 0:
lowest_cost = cost_mean_total
highest_epoch_eval_score = epoch_eval_score
best_epoch = i
earlystopping_counter = 0
best_model_path = os.path.join(MODEL_BEST_DIR, str(i))
if os.path.exists(best_model_path) : shutil.rmtree(best_model_path)
os.makedirs(best_model_path)
model.export_encoder(best_model_path)
# Model checkpoint
if epoch_eval_score > highest_epoch_eval_score:
best_epoch = i
lowest_cost = cost_mean_total
highest_epoch_eval_score = epoch_eval_score
best_model_path = os.path.join(MODEL_BEST_DIR, str(i))
if os.path.exists(best_model_path) : shutil.rmtree(best_model_path)
os.makedirs(best_model_path)
model.export_encoder(best_model_path)
logger.info(f'Saved best model with cost of {lowest_cost} for Epoch #{i}')
logger.info(f'Saved best model with cost of {highest_epoch_eval_score} for Epoch #{i}')
earlystopping_counter = 0
else:
# Activate early stopping counter
earlystopping_counter += 1
# Early stopping
if earlystopping_counter == CONFIG.early_stopping_steps:
logger.info("Early stop executed")
model.export_encoder(MODEL_LAST_DIR)
break
epoch_end_time = datetime.datetime.now()
logger.info(f'Time Taken for Epoch #{i}: {epoch_end_time - epoch_start_time}')
logger.info(f'Average time Taken for each batch: {(epoch_end_time - epoch_start_time)/batch_counter}')
# Restore best model. User will have to define path to model if only eval is done.
logger.info("Restoring model")
if CONFIG.task_type == 'train_eval':
model.restore_encoder(os.path.join(MODEL_BEST_DIR, str(best_epoch)))
else:
if CONFIG.eval_model_dir:
model.restore_encoder(CONFIG.eval_model_dir)
else:
logger.info("Using out-of-box model")
pass
"""
EVAL MODEL
"""
logger.info("Evaluating model")
overall_eval, eval_dict = eval_model(model, df, test_dict)
print("="*10 + ' OOB ' + "="*10)
print(OOB_overall_eval)
print("="*10 + ' FINETUNED ' + "="*10)
print(overall_eval)
# save the scores and details for later evaluation. WARNING: User will need to create the necessary directories to save df
overall_eval.to_excel(EVAL_SCORE_PATH)
with open(EVAL_DICT_PATH, 'wb') as handle:
pickle.dump(eval_dict, handle)
"""
SAVE MODEL IN MINIO
"""
minio = MinioClient(CONFIG.MINIO_URL, CONFIG.MINIO_ACCESS_KEY, CONFIG.MINIO_SECRET_KEY)
tar = tarfile.open("weights.tar.gz", mode="w:gz")
tar.add( os.path.join(MODEL_BEST_DIR, str(best_epoch)) )
tar.close()
minio.make_bucket("finetunedweights")
minio.upload_model_weights("finetunedweights",
CONFIG.SAVEDMODELNAME,
"weights.tar.gz")
os.remove("weights.tar.gz")
|
StarcoderdataPython
|
4802520
|
#!/usr/bin/env python
# Set card game
from random import shuffle
colors = ["red", "green", "blue"]
counts = range(1,4)
shapes = ["diamond", "squiggle", "oval"]
shades = ["blank", "filled", "hatched"]
deck = []
class Card:
def __init__(self, col, cnt, shp, shd):
self.col, self.cnt, self.shp, self.shd = col, cnt, shp, shd
def __repr__(self):
return "<%s %s %s %s>" % (self.col, self.cnt, self.shp, self.shd)
for col in colors:
for cnt in counts:
for shape in shapes:
for shd in shades:
deck.append(Card(col, cnt, shape, shd))
print("len(deck)", len(deck))
shuffle(deck)
cards, deck = deck[:12], deck[12:]
attrs = "col cnt shp shd".split()
for c in cards:
print(c)
def is_set(cards):
def same_or_diff(attr):
vals = set(getattr(c,attr) for c in cards)
return len(vals) in (1,3)
return all(same_or_diff(a) for a in attrs)
|
StarcoderdataPython
|
1791132
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-19 01:35
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import functools
import nc.models
import nc.validators
class Migration(migrations.Migration):
dependencies = [
('nc', '0005_auto_20180518_2231'),
]
operations = [
migrations.CreateModel(
name='Asset',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=12)),
('color', models.CharField(max_length=6)),
('pic', models.ImageField(blank=True, default=None, null=True, upload_to=functools.partial(nc.models.profile_file_directory_path, *(), **{b'field': 'pic'}), validators=[nc.validators.FileSizeValidator(limit_value=2621440), nc.validators.MimeTypeValidator(allowed_mimetypes=[b'image/x-cmu-raster', b'image/x-xbitmap', b'image/gif', b'image/x-portable-bitmap', b'image/jpeg', b'application/x-hdf', b'application/postscript', b'image/png', b'image/vnd.microsoft.icon', b'image/x-rgb', b'video/mpeg', b'image/x-ms-bmp', b'image/x-xpixmap', b'image/x-portable-graymap', b'image/x-portable-pixmap', b'image/tiff', b'application/pdf'])])),
('issuer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='assets_issued', to='nc.Account')),
],
),
]
|
StarcoderdataPython
|
3378179
|
<filename>lib/Topics.py<gh_stars>0
from enum import Enum
class Topics(Enum):
wsWriteMessage = 'wsWriteMessage'
wsReceivedMessage = 'wsReceivedMessage'
wsWriteMessageJSON = 'wsWriteMessageJSON'
wsReceivedMessageJSON = 'wsReceivedMessageJSON'
|
StarcoderdataPython
|
3227289
|
<gh_stars>0
from dataclasses import dataclass
from typing import Optional, Type
from datek_jaipur.application.adapters.base import BaseAdapter
from datek_jaipur.domain.compound_types.game import Game
@dataclass
class Scope:
adapter_class: Type[BaseAdapter]
game: Optional[Game] = None
|
StarcoderdataPython
|
1781361
|
<reponame>CyberZHG/mos-6502-restricted-assembler
from unittest import TestCase
from asm_6502 import Assembler
class TestAssembleLSR(TestCase):
def setUp(self) -> None:
self.assembler = Assembler()
def test_lsr_accumulator(self):
code = "LSR A"
results = self.assembler.assemble(code, add_entry=False)
self.assertEqual([
(0x0000, [0x4A]),
], results)
def test_lsr_zero_page(self):
code = "LSR $10"
results = self.assembler.assemble(code, add_entry=False)
self.assertEqual([
(0x0000, [0x46, 0x10]),
], results)
def test_lsr_zero_page_x(self):
code = "LSR $10,X"
results = self.assembler.assemble(code, add_entry=False)
self.assertEqual([
(0x0000, [0x56, 0x10]),
], results)
def test_lsr_absolute(self):
code = "LSR $ABCD"
results = self.assembler.assemble(code, add_entry=False)
self.assertEqual([
(0x0000, [0x4E, 0xCD, 0xAB]),
], results)
def test_lsr_absolute_indexed_x(self):
code = "LSR $ABCD,X"
results = self.assembler.assemble(code, add_entry=False)
self.assertEqual([
(0x0000, [0x5E, 0xCD, 0xAB]),
], results)
|
StarcoderdataPython
|
100679
|
import itertools
from unittest import skip
from django.core import urlresolvers
from rest_framework.test import APIClient, APIRequestFactory
from rest_framework.test import APITestCase, force_authenticate
from api.tests.factories import (
UserFactory, AnonymousUserFactory, IdentityFactory, ProviderFactory, AllocationSourceFactory,
UserAllocationSourceFactory
)
from api.v2.views import AllocationSourceViewSet as ViewSet
class AllocationSourceTests(APITestCase):
def setUp(self):
self.anonymous_user = AnonymousUserFactory()
self.user_without_sources = UserFactory.create(username='test-username')
self.user_with_sources = UserFactory.create(username='test-username-with-sources')
self.provider = ProviderFactory.create()
self.user_identity = IdentityFactory.create_identity(
created_by=self.user_without_sources,
provider=self.provider)
self.user_identity = IdentityFactory.create_identity(
created_by=self.user_with_sources,
provider=self.provider)
self.allocation_source_1 = AllocationSourceFactory.create(name='TG-TRA110001',
compute_allowed=1000)
self.allocation_source_2 = AllocationSourceFactory.create(name='TG-TRA220002',
compute_allowed=2000)
self.allocation_source_3 = AllocationSourceFactory.create(name='TG-TRA330003',
compute_allowed=3000)
UserAllocationSourceFactory.create(user=self.user_with_sources, allocation_source=self.allocation_source_1)
UserAllocationSourceFactory.create(user=self.user_with_sources, allocation_source=self.allocation_source_2)
def test_can_create_allocation_source(self):
"""Can I even create an allocation source?"""
client = APIClient()
client.force_authenticate(user=self.user_without_sources)
allocation_source = AllocationSourceFactory.create(name='TG-TRA990001',
compute_allowed=9000)
expected_values = {
'name': 'TG-TRA990001',
'compute_allowed': 9000
}
self.assertDictContainsSubset(expected_values, allocation_source.__dict__)
def test_anonymous_user_cant_see_allocation_sources(self):
request_factory = APIRequestFactory()
list_view = ViewSet.as_view({'get': 'list'})
url = urlresolvers.reverse('api:v2:allocationsource-list')
self.assertEqual(url, '/api/v2/allocation_sources')
request = request_factory.get(url)
force_authenticate(request, user=self.anonymous_user)
response = list_view(request)
self.assertEqual(response.status_code, 403)
self.assertEqual(response.status_text, 'Forbidden')
def test_loggedin_user_with_no_sources_cant_see_allocation_sources(self):
request_factory = APIRequestFactory()
list_view = ViewSet.as_view({'get': 'list'})
url = urlresolvers.reverse('api:v2:allocationsource-list')
self.assertEqual(url, '/api/v2/allocation_sources')
request = request_factory.get(url)
force_authenticate(request, user=self.user_without_sources)
response = list_view(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.status_text, 'OK')
self.assertEqual(response.data['count'], 0)
def test_loggedin_user_can_list_allocation_sources(self):
request_factory = APIRequestFactory()
list_view = ViewSet.as_view({'get': 'list'})
url = urlresolvers.reverse('api:v2:allocationsource-list')
self.assertEqual(url, '/api/v2/allocation_sources')
request = request_factory.get(url)
force_authenticate(request, user=self.user_with_sources)
response = list_view(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.status_text, 'OK')
expected_values = [
{
'name': 'TG-TRA110001',
'compute_allowed': 1000
},
{
'name': 'TG-TRA220002',
'compute_allowed': 2000
}
]
self.assertEqual(response.data['count'], len(expected_values))
for allocation_source, expected_dict in itertools.izip_longest(expected_values, response.data['results']):
self.assertDictContainsSubset(allocation_source, expected_dict)
@skip('TODO: Figure out why it fails')
def test_loggedin_user_can_get_allocation_source(self):
request_factory = APIRequestFactory()
retrieve_view = ViewSet.as_view({'get': 'retrieve'})
url = urlresolvers.reverse('api:v2:allocationsource-detail', args=(self.allocation_source_1.id,))
self.assertEqual(url, '/api/v2/allocation_sources/{}'.format(self.allocation_source_1.id))
request = request_factory.get(url)
force_authenticate(request, user=self.user_with_sources)
response = retrieve_view(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.status_text, 'OK')
|
StarcoderdataPython
|
3207973
|
<reponame>peter-wangxu/python_play
import eventlet
from eventlet import wsgi
def app(environ,start_response):
start_response("200 OK",[("Content-Type","text/plain")])
return "Hello World\n"
if __name__ == "__main__":
wsgi.server(eventlet.listen(("localhost",6785)), app)
|
StarcoderdataPython
|
3209910
|
import numpy as np
import matplotlib.pyplot as plt
import visa
import time
import math
class DSO6012A(object):
def __init__(self):
scopeID = "USB0::0x0957::0x1722::MY45002264::INSTR" # For DSO6012A
#scopeID = "USB0::0x0957::0x1798::MY54231293::INSTR" # For DSO-X-2014A
rm = visa.ResourceManager()
self.inst = rm.open_resource(scopeID,read_termination='\n')
def write(self,command):
# print command
return self.inst.write(command)
def getChanData(self,channel):
self.write(":WAVEFORM:SOURCE CHAN"+str(int(channel)))
self.write(":WAVEFORM:FORMAT ASCii")
self.write(":WAVEFORM:DATA?")
data = self.inst.read()
numberOfDigit=int(data[1])
data=data[numberOfDigit+3:]
data = data.split(',')
data = np.array(data)
data = data.astype(np.float)
return data
def getWaveForm(self, channel):
self.write(":DIGITIZE CHANNEL"+str(int(channel)))
data = self.getChanData(channel)
self.write("RUN")
return data
def getAllChanWF(self):
self.write(":VIEW CHANNEL1;:VIEW CHANNEL2;:DIGITIZE")
data1 = self.getChanData(1)
data2 = self.getChanData(2)
self.write("RUN")
return data1,data2
def getPointNumber(self):
self.inst.write(":WAVEFORM:POINTS?")
pointNumber = self.inst.read()
pointNumber = int(pointNumber)
return pointNumber
def acquire(self,channel=None,plot=False,autoscale=True):
if autoscale:
if channel: self.myAutoScale(channel)
else :
self.myAutoScale(1)
self.myAutoScale(2)
x = self.getTimeRangeArray()
if channel:
y1 = self.getWaveForm(channel)
else:
y1,y2 = self.getAllChanWF()
if plot:
plt.plot(x,y1)
if not channel:
plt.plot(x,y2)
plt.show(block=False)
if channel:
table = np.eye(len(x),2)
else: table = np.eye(len(x),3)
table[:,0] = x
table[:,1] = y1
if not channel:
table[:,2] = y2
return table
def getTimeRange(self):
self.inst.write(":TIMEBASE:RANGE?")
timerange = self.inst.read()
timerange = float(timerange)
return timerange
def getTimeRangeArray(self):
pointNumber = self.getPointNumber()
timerange = self.getTimeRange()
x = np.linspace(-timerange/2.,timerange/2.,pointNumber)
return x
def getRange(self, channel):
self.inst.write(":CHANNEL"+str(int(channel))+":RANGE?")
range = self.inst.read()
range = float(range)
print "getRange: "+str(range)
return range
def setRange(self,range,channel):
print "Chan"+str(channel)+" setRange: "+str(range)
self.inst.write(":CHANNEL"+str(int(channel))+":RANGE "+str(range))
self.getRange(channel)
return
def getOffset(self, channel):
self.inst.write(":CHANNEL"+str(int(channel))+":OFFSET?")
offset = self.inst.read()
offset = float(offset)
print "getOffset: "+str(offset)
return offset
def setOffset(self,offset,channel):
print "Chan"+str(channel)+" setOffset: "+str(offset)
self.inst.write(":CHANNEL"+str(int(channel))+":OFFSET "+str(offset))
return
def getMinMax(self,channel):
data = self.getWaveForm(channel)
sigMin = min(data)
sigMax = max(data)
print "min: "+str(sigMin)+" max: "+str(sigMax)+" ampl: "+str(sigMax-sigMin)
return sigMin, sigMax
def getAverage(self,channel,autoscale=False):
if autoscale: self.myAutoScale(channel)
data = self.getWaveForm(channel)
avg = np.mean(data)
print "avg: "+str(avg)
return avg
def myAutoScale(self,channel):
range = 4
offset = 0
self.setRange(range,channel)
self.setOffset(offset,channel)
sigMin, sigMax = self.getMinMax(channel)
range = max(0.1,sigMax-sigMin) #Prevent from narrowing the range too soon
offset = (sigMax+sigMin)/2
self.setRange(range,channel)
self.setOffset(offset,channel)
sigMin, sigMax = self.getMinMax(channel)
range = 1.2*math.ceil(1.2*(sigMax-sigMin)/0.008)*0.008 #Get the minimum range that fits the signal
offset = (sigMax+sigMin)/2
self.setRange(range,channel)
self.setOffset(offset,channel)
sigMin, sigMax = self.getMinMax(channel)
offset = (sigMax+sigMin)/2
self.setOffset(offset,channel)
return
if __name__=='__main__':
scope = DSO6012A()
|
StarcoderdataPython
|
1719429
|
<reponame>Piphi5/MHM-Country-Demo
from datetime import datetime
import numpy as np
import sys, os
import pandas as pd
from arcgis.gis import GIS
from arcgis import features
from arcgis.features import GeoAccessor
from autoupdater.utils import OverwriteFS
temp_layer_name = "Temp_layer"
class Country_Updater:
def __init__(self, protocol, inputid, outputid, username, password):
self.gis = GIS(
url="https://igestrategies.maps.arcgis.com",
username=username,
password=password,
)
self.inputid = inputid
self.outputid = outputid
self.filename = f"{protocol}CountryEnriched.csv"
def get_data(self):
item = self.gis.content.get(itemid=self.inputid)
countries_item = self.gis.content.get(itemid="2b93b06dc0dc4e809d3c8db5cb96ba69")
self.temp_layer = features.analysis.join_features(
target_layer=item,
join_layer=countries_item,
spatial_relationship="intersects",
join_operation="JoinOneToMany",
output_name=temp_layer_name,
)
self.temp_df = GeoAccessor.from_layer(self.temp_layer.layers[0])
self.temp_df = self.temp_df.drop(
labels=["AFF_ISO", "ISO", "Join_Count", "OBJECTID", "TARGET_FID", "SHAPE"],
axis=1,
)
return self.temp_df
def to_csv(self):
self.temp_df.to_csv(self.filename)
def update_layers(self):
self.temp_df.to_csv(self.filename)
item = self.gis.content.get(self.outputid)
overwrite_output = OverwriteFS.overwriteFeatureService(
item, updateFile=self.filename, touchItems=True, verbose=True
)
delete_output = self.temp_layer.delete()
return overwrite_output["success"], delete_output
def run(self):
self.get_data()
self.update_layers()
|
StarcoderdataPython
|
3340208
|
###########################################
###########################################
#### Function to generate reports in Reportlab
###########################################
###########################################
# libraries
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
## report lab
from reportlab.pdfgen import canvas
from reportlab.pdfgen.canvas import Canvas
from reportlab.lib.utils import ImageReader
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
# text options
from textwrap import wrap
# import palettes
from pyADVISE import palettes
### Bring in the needed font
pdfmetrics.registerFont(TTFont('Gill Sans MT', 'gil_____.ttf'))
pdfmetrics.registerFont(TTFont('Gill Sans MT Bold', 'gilb____.ttf'))
pdfmetrics.registerFont(TTFont('Gill Sans MT It', 'gili____.ttf'))
palette= palettes.USAID_general()
def inch(a):
a = a*72
return a
# split the notes at the bottom
def split(obs, n):
obs = "\n".join(wrap(obs, n)).split('\n')
return obs
def apply_scripting(textobject, text, rise, font='Gill Sans MT'):
textobject.setFont(font, 6)
textobject.setRise(rise)
textobject.textOut(text)
textobject.setFont(font, 12)
textobject.setRise(0)
########################################
###### simple table
#######################################
def table(c, df, column_w, split_list, height, margin, width, gap, text_font='something', table_ystart=9.19, enter=.24, vert_lines = [3, 3.6, 4.1], font_color = ['Rich Black', 'Rich Black','Rich Black', 'Medium Gray', 'Medium Gray'], font_type = ['', '', '', 'It', 'It'], top_label=False, column_citation = ['footnote_1', None, None, None, None], pic_size=(.25,.25),
second_line=False, table_title='Table', font='Gill Sans MT', line_width=.5, image_folder='C:/Users/alightner/Documents/Shared/pyADVISE/images/',
picture=False, picture_name='Picture', column_labels =['country', 'SSA', 'Year']):
'''This function generates a table in the style of
the country reports. '''
palette= palettes.USAID_general()
###################################
### mapping options
####################################
# we only want to plot the non-footnote columns, generate a different dataset, drop witin main
footnotes_df = df[[c for c in df.columns if c.lower()[:-2] == 'footnotes']]
df = df[[c for c in df.columns if c.lower()[:-2] != 'footnotes']]
#### determine the number of enters needed per column
enters_by_row = pd.DataFrame()
for col in range(0, len(list(df.columns))):
# access the values of the column
values = df.iloc[:,col].values
# split the values on the associated split_num
values_split = [split(values[i], split_list[col]) for i in range(0, len(values))]
# generate the lenght (number of enters for each row in each column)
length_of_values = [len(values_split[i]) for i in range(0, len(values_split))]
# add lengths to dataframe
enters_by_row[col] = length_of_values
# generate the max of each row, place into an array
table_gap = enters_by_row.max(axis=1).to_frame().reset_index()[0].values
# for some unknown reason, this only works after changing to a list
table_gap = list(table_gap)
# this var is called table gap in th rest of the code
# adjust for the difference between gaps for line space and between lines of text.
for i in range(0, len(table_gap)):
# because there is a header, I subtract one from the count
a = table_gap[i]-1
#make all gaps betweeen text only 50 percent of enter value
if a>0:
gap = 1+(a*.45)
table_gap[i] = gap
# starting point to be iterated on
y = table_ystart
########################################################
# generate horizontal lines for table
#######################################################
c.setLineWidth(line_width)
c.setFillColor(palette['Rich Black'])
c.setStrokeColor(palette['Rich Black'])
for i in range(0, len(table_gap)):
c.line(inch(margin), inch(y), inch(margin+column_w), inch(y))
#print(table_gap[i])
y= y-(enter*table_gap[i])
# final line outside of the loop
c.line(inch(margin), inch(y), inch(margin+column_w), inch(y))
# mark the end of the table => return w
end_of_table = inch(y)
##############################################
# generate values for the tables
##############################################
#################### formatting choices
# set font
c.setFont(font, size=10, leading = None)
# set indent for
indent_value=0.09
# choose levels of *** the order matters, the order refers to where each line of text will begin relative to the margin.
indent = [indent_value]+ [vert_lines[i]+indent_value for i in range(0,len(vert_lines))]
# generate column_footnotes list for plotting
footnotes_dict = {str(i): footnotes_df['footnotes_'+str(i)].values for i in range(0, len(footnotes_df.columns))}
keys = list(footnotes_dict.keys())
# iterate over each colomn
for col in range(0, len(list(df))):
# select the values in the column of interest
values = df.iloc[:,col].values
# generate a list of the given text which consists of strings the size of the split_list[s] - will not cut words apart.
values = [split(values[i], split_list[col]) for i in range(0, len(values))]
#################################
#### Font Settings
#################################
### Select the font type
if font_type[col]=='It':
c.setFont(font+' It', size=10, leading = None)
elif font_type[col]=='Bold':
c.setFont(font+' Bold', size=10, leading = None)
else:
c.setFont(font, size=10, leading = None)
# Select color of the text
c.setFillColor(palette[font_color[col]])
c.setStrokeColor(palette[font_color[col]])
############################
###### place text
############################
# choose where the text starts relative to the first line in the table
y_s = table_ystart-0.16
# loop over each row in a particular column (for longer rows values)
for i in range(0, len(values)):
# number of rows
lines = len(values)
# place text in the respective row (think about generalizing this in the future -- now it just works)
if lines==1:
n = 0
# for the one value we have.
for g in values[i]:
# generate text with superscripting
textobject = c.beginText()
textobject.setTextOrigin(inch(margin+indent[col]),inch(y_s-(n*0.65*enter)))
textobject.textOut(g[0])
try:
apply_scripting(textobject, footnotes_dict[keys[col]][i], 4)
print(keys[col])
except:
a = 0
c.drawText(textobject)
n +=1
y_s = y_s-(enter*table_gap[i])
if lines>1:
n = 0
for f in range(0, len(values[i])):
# generate text with superscripting
textobject = c.beginText()
textobject.setTextOrigin(inch(margin+indent[col]),inch(y_s-(n*0.53*enter)))
textobject.textOut(values[i][f])
try:
if f == len(values[i])-1:
apply_scripting(textobject, footnotes_dict[keys[col]][i], 4)
except:
a = 0
c.drawText(textobject)
n +=1
y_s = y_s-(enter*table_gap[i])
########### draw column lines
column1 = vert_lines
c.setFillColor(palette['Rich Black'])
c.setStrokeColor(palette['Rich Black'])
for i in range(0, len(column1)):
c.line(inch(margin+column1[i]), inch(table_ystart),inch(margin+column1[i]), end_of_table)
if second_line ==True:
c.line(inch(margin+column1+1.35), inch(table_ystart),inch(margin+column1+1.35), inch(table_ystart-(enter*4)))
########### draw title
c.setFont(font+' Bold', size=12, leading = None)
c.setFillColor(palette['Rich Black'])
c.setStrokeColor(palette['Rich Black'])
c.drawString(inch(margin+pic_size[1]+0.08+0.2), inch(table_ystart+.075), table_title)
########### draw table labels
c.setFont(font+' Bold', size=8.5, leading = None)
if top_label ==True:
for i in range(0, len(column_labels)):
c.drawString(inch(margin+vert_lines[i]+0.05), inch(table_ystart+.075), column_labels[i])
############ place the visuals
############ default is the image folder in pyADVISE
if picture ==True:
image = image_folder+picture_name
c.drawImage(image, inch(margin+.08), inch(table_ystart+.03), width=np.round(inch(pic_size[1])), height=np.round(inch(pic_size[0])))
# return end of the table for reference for next table or plot
return c, end_of_table
#####################################
###### USAID Header
#####################################
def USAID_header(c, height, margin, column_w, gap, width, country='Malawi', date='July 2018',):
palette= palettes.USAID_general()
# set current color, every fill color will be this color after this point until changed
c.setFillColor(palette['Medium Blue'])
c.setStrokeColor(palette['Medium Blue'])
# set the top of the box circle
top_box = height-0.7
# blue top
c.rect(0, inch(top_box), inch(9), inch(top_box), fill=1)
#blue line
c.setLineWidth(2)
c.line(0, inch(top_box-.55), inch(9), inch(top_box-.55))
c.setLineWidth(1)
# grey box
c.setFillColor(palette['Light Gray'])
c.setStrokeColor(palette['Light Gray'])
c.rect(inch(margin), inch(9.7), inch(margin+column_w-0.5), inch(.6), fill=1)
# title and country
c.setFont('OpenSans-Light', size=30, leading = None)
c.setFillColor(palette['White'])
c.setStrokeColor(palette['White'])
c.drawString(inch(margin+0.12), inch(top_box+.15), 'COUNTRY PROFILE')
c.setFillColor(palette['White'])
c.setStrokeColor(palette['White'])
c.setFont('OpenSans-Light', size=12, leading = None)
#c.drawString(inch(margin+0.12), inch(top_box+.15), 'COUNTRY PROFILE')
c.drawRightString(inch(width-margin), inch(top_box+.15), 'USAID Data Services (EADS)')
c.setFillColor(palette['Medium Blue'])
c.setStrokeColor(palette['Medium Blue'])
c.setFont('OpenSans-Bold', size=24, leading = None)
c.drawString(inch(margin+0.12), inch(top_box-.4), country.upper())
c.setFillColor(palette['Medium Blue'])
c.setStrokeColor(palette['Medium Blue'])
c.setFont('OpenSans-Bold', size=15, leading = None)
c.drawRightString(inch(width-margin), inch(top_box-.4), date.upper())
return c
def USAID_footer_text_page1(c, location=(150, 60), font='OpenSans-Light', size=8):
# begin the text object
textobject = c.beginText()
# place the text object
textobject.setTextOrigin(location[0], location[1])
# set font for the text options
textobject.setFont(font, size=size, leading = None)
textobject.textLines('''
Prepared by USAID Data Services with data from the International Data and Economic Analysis
website (https://idea.usaid.gov/). DISCLAIMER: The views expressed in this publication do not necessarily reflect
the views of the United States Agency for International Development (USAID) or the United States Government.
''')
c.drawText(textobject)
return c
def SDG_footer_text_page2(c, location=(150, 60), font='OpenSans-Light', size=8):
# set palette
c.setFillColor(palette['Rich Black'])
c.setStrokeColor(palette['Rich Black'])
# begin the text object
textobject = c.beginText()
# place the text object
textobject.setTextOrigin(location[0], location[1])
# set font for the text options
textobject.setFont(font, size=size, leading = None)
textobject.textLines('''
Sources: 1. Regions based on USAID classifications.; 2. World Bank, World Development Indicators (WDI); 3. Calculated by Data Services, based on World Bank, World Development Indicators; 4. International
Monetary Fund (IMF), World Economic Outlook Database (WEO); 5. World Economic Forum (WEF), Enabling Trade Index; 6. U.S. International Trade Commission (USITC), Trade DataWeb; 7. Food and Agri-
cultural Organization (FAO), FAOSTAT Land and Fertilizer Data; 8. World Economic Forum (WEF), Global Competitiveness Index; 9. Notre Dame Climate Adaptation Initiative (ND-GAIN) Country Index; 10. UN
Office for Disaster Risk Reduction (UNISDR), Global Assessment Report on Disaster Risk Reduction; 11. CIESIN and Yale, Environmental Performance Index (EPI); 12. Demographic and Health Surveys (DHS),
STATcompiler; 13. Food and Agricultural Organization (FAO), AQUASTAT; 14. WHO/UNICEF, Joint Monitoring Programme (JMP) for Water Supply, Sanitation, and Hygiene; 15. World Economic Forum (WEF),
Networked Readiness Index; 16. World Bank, Millennium Development Goals; 17. World Bank, Enterprise Surveys; 18. World Bank, Enabling the Business of Agriculture; 19. International Telecommun-
ication Union (ITU), World Telecommunication/ICT Indicators Database'''
)
c.drawText(textobject)
return c
#####################################
###### SDG Header
#####################################
def SDG_header(c, gray_list, height, margin, column_w, gap, width, country='Malawi', date='July 2018',
title_text='SUSTAINABLE DEVELOPMENT PROFILE', subtitle_text='PREPARED BY USAID DATA SERVICES'):
# set current color, every fill color will be this color after this point until changed
c.setFillColor(palette['Medium Blue'])
c.setStrokeColor(palette['Medium Blue'])
# set the top of the box circle
top_box = height-1
# blue top
c.rect(0, inch(top_box), inch(9), inch(top_box), fill=1)
#blue line
c.setLineWidth(1)
c.line(0, inch(top_box-.4), inch(9), inch(top_box-.4))
c.setLineWidth(1)
#####################
# grey box
#####################
gray_start = top_box-1.1
gray_height = top_box -gray_start- 0.45
c.setFillColor(palette['Light Gray'])
c.setStrokeColor(palette['Light Gray'])
c.rect(inch(margin), inch(gray_start), inch(margin+column_w-0.25), inch(gray_height), fill=1)
# gray texts = Region, Subregion, Income group
c.setFont('Gill Sans MT Bold', size=10, leading = None)
text_start = (gray_start + gray_height)
c.setFillColor(palette['Rich Black'])
c.setStrokeColor(palette['Rich Black'])
labels = ['blank(index at 0)', 'Region¹', 'Subregion', 'Income Group']
for i in [1,2,3]:
c.setFont('Gill Sans MT Bold', size=10, leading = None)
c.drawString(inch(margin+.14),inch(text_start+.09-(0.22*i)), labels[i])
c.setFont('Gill Sans MT', size=10, leading = None)
c.drawString(inch(margin+1.8),inch(text_start+.09-(0.22*i)), gray_list[i-1])
##############################
# title and country
################################
c.setFont('Gill Sans MT', size=33.5, leading = None)
c.setFillColor(palette['White'])
c.setStrokeColor(palette['White'])
c.drawString(inch(margin+0.02), inch(top_box+.48), title_text)
c.setFillColor(palette['White'])
c.setStrokeColor(palette['White'])
c.setFont('OpenSans-Light', size=12, leading = None)
c.drawString(inch(margin+0.02), inch(top_box+.17), subtitle_text)
c.setFillColor(palette['Medium Blue'])
c.setStrokeColor(palette['Medium Blue'])
c.setFont('Gill Sans MT Bold', size=24, leading = None)
c.drawString(inch(margin+0.02), inch(top_box-.33), country.upper())
c.setFont('Gill Sans MT Bold', size=15, leading = None)
c.drawRightString(inch(width-margin), inch(top_box-.26), date.upper())
return c
def SDG_header_page2(c, country, date, height, margin, width):
# usaid palette
palette= palettes.USAID_general()
# set start_point for the text
start = height-.3
# write country
c.setFillColor(palette['Medium Blue'])
c.setStrokeColor(palette['Medium Blue'])
c.setFont('Gill Sans MT Bold', size=14, leading = None)
c.drawString(inch(margin+0.02), inch(start), country.upper())
# write date
c.setFont('Gill Sans MT Bold', size=15, leading = None)
c.drawRightString(inch(width-margin), inch(start), date.upper())
return c
|
StarcoderdataPython
|
3361346
|
<gh_stars>1-10
#!/usr/bin/env python
################################################################################
#
# Copyright (c) 2009 The MadGraph5_aMC@NLO Development team and Contributors
#
# This file is a part of the MadGraph5_aMC@NLO project, an application which
# automatically generates Feynman diagrams and matrix elements for arbitrary
# high-energy processes in the Standard Model and beyond.
#
# It is subject to the MadGraph5_aMC@NLO license which should accompany this
# distribution.
#
# For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch
#
################################################################################
""" Manager for running the test library
This library offer a simple way to launch test.
To run a test/class of test/test file/module of test/...
you just have to launch
test_manager.run(NAME)
or
test_manager.run(LIST_OF_NAME)
the NAME can contain regular expression (in python re standard format)
"""
import sys
if not sys.version_info[0] == 2 or sys.version_info[1] < 6:
sys.exit('MadGraph5_aMC@NLO works only with python 2.6 or later (but not python 3.X).\n\
Please upgrade your version of python.')
import inspect
import tarfile
import logging
import logging.config
import optparse
import os
import re
import unittest
import time
import datetime
import shutil
import glob
from functools import wraps
#Add the ROOT dir to the current PYTHONPATH
root_path = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0]
sys.path.insert(0, root_path)
# Only for profiling with -m cProfile!
#root_path = os.path.split(os.path.dirname(os.path.realpath(sys.argv[0])))[0]
#sys.path.append(root_path)
import tests.IOTests
import aloha
import aloha.aloha_lib as aloha_lib
from madgraph import MG4DIR
from madgraph.interface.extended_cmd import Cmd
from madgraph.iolibs.files import cp, ln, mv
import madgraph.various.misc as misc
#position of MG_ME
MGME_dir = MG4DIR
colored = "\x1b[1;%dm%s\x1b[0m"
IOTestManager = tests.IOTests.IOTestManager
path = os.path
pjoin = path.join
_file_path = os.path.dirname(os.path.realpath(__file__))
_input_file_path = path.abspath(os.path.join(_file_path,'input_files'))
_hc_comparison_files = pjoin(_input_file_path,'IOTestsComparison')
_hc_comparison_tarball = pjoin(_input_file_path,'IOTestsComparison.tar.bz2')
_hc_comparison_modif_log = pjoin(_input_file_path,'IOTestsRefModifs.log')
class MyTextTestRunner(unittest.TextTestRunner):
bypassed = []
def run(self, test):
"Run the given test case or test suite."
keyboardstop=False
MyTextTestRunner.stream = self.stream
result = self._makeResult()
startTime = time.time()
try:
test(result)
except KeyboardInterrupt:
keyboardstop=True
pass
except:
raise
stopTime = time.time()
timeTaken = float(stopTime - startTime)
result.printErrors()
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
if not result.wasSuccessful():
self.stream.write("FAILED (")
failed, errored = map(len, (result.failures, result.errors))
if failed:
self.stream.writeln("failures=%d)" % failed)
self.stream.writeln(' '.join([str(t[0]).split()[0] for t in result.failures]))
if errored:
if failed:
self.stream.write("FAILED ( ")
self.stream.writeln(" errors=%d)" % errored)
self.stream.writeln(' '.join([str(t[0]).split()[0] for t in result.errors]))
else:
self.stream.writeln("OK")
if self.bypassed:
self.stream.writeln("Bypassed %s:" % len(self.bypassed))
self.stream.writeln(" ".join(self.bypassed))
if keyboardstop:
self.stream.writeln("Some of the tests Bypassed due to Ctrl-C")
return result
def run_border(self, test, to_check):
"Run the given test case or test suite."
MyTextTestRunner.stream = self.stream
result = self._makeResult()
startTime = time.time()
test(result)
stopTime = time.time()
timeTaken = float(stopTime - startTime)
result.printErrors()
self.stream.writeln(result.separator2)
run = result.testsRun
#self.stream.writeln("Ran %d test%s in %.3fs" %
# (run, run != 1 and "s" or "", timeTaken))
#self.stream.writeln()
if not result.wasSuccessful():
self.stream.write("FAILED (")
failed, errored = map(len, (result.failures, result.errors))
if failed:
self.stream.write("failures=%d" % failed)
if errored:
if failed: self.stream.write(", ")
self.stream.write("errors=%d" % errored)
self.stream.writeln(")")
print to_check
to_check= to_check.rsplit('.',1)[1]
print to_check
if result.failures:
print 'fail', to_check,[str(R[0]) for R in result.failures]
if result.errors:
print 'errors',to_check,[str(R[0]) for R in result.errors]
if any(to_check in str(R[0]) for R in result.failures) or\
any(to_check in str(R[0]) for R in result.errors):
sys.exit(0)
#else:
# self.stream.writeln("OK")
#if self.bypassed:
# self.stream.writeln("Bypassed %s:" % len(self.bypassed))
# self.stream.writeln(" ".join(self.bypassed))
return result
#===============================================================================
# run
#===============================================================================
def run(expression='', re_opt=0, package='./tests/unit_tests', verbosity=1,
timelimit=[0,0]):
""" running the test associated to expression. By default, this launch all
test inherited from TestCase. Expression can be the name of directory,
module, class, function or event standard regular expression (in re format)
"""
#init a test suite
testsuite = unittest.TestSuite()
collect = unittest.TestLoader()
TestSuiteModified.time_limit = float(timelimit[1])
TestSuiteModified.mintime_limit = float(timelimit[0])
for test_fct in TestFinder(package=package, expression=expression, \
re_opt=re_opt):
data = collect.loadTestsFromName(test_fct)
assert(isinstance(data,unittest.TestSuite))
data.__class__ = TestSuiteModified
testsuite.addTest(data)
output = MyTextTestRunner(verbosity=verbosity).run(testsuite)
if TestSuiteModified.time_limit < 0:
ff = open(pjoin(root_path,'tests','time_db'), 'w')
ff.write('\n'.join(['%s %s' % a for a in TestSuiteModified.time_db.items()]))
ff.close()
return output
#import tests
#print 'runned %s checks' % tests.NBTEST
#return out
#===============================================================================
# run
#===============================================================================
def run_border_search(to_crash='',expression='', re_opt=0, package='./tests/unit_tests', verbosity=1,
timelimit=[0,0]):
""" running the test associated to expression one by one. and follow them by the to_crash one
up to the time that to_crash is actually crashing. Then the run stops and print the list of the
routine tested. Then the code re-run itself(via a fork) to restrict the list.
The code stops when the list is of order 1. The order of the test is randomize at each level!
"""
#init a test suite
collect = unittest.TestLoader()
TestSuiteModified.time_limit = float(timelimit[1])
TestSuiteModified.mintime_limit = float(timelimit[0])
all_test = TestFinder(package=package, expression=expression, re_opt=re_opt)
import random
random.shuffle(all_test)
print "to_crash"
to_crash = TestFinder(package=package, expression=to_crash, re_opt=re_opt)
to_crash.collect_dir(package, checking=True)
for test_fct in all_test:
testsuite = unittest.TestSuite()
data = collect.loadTestsFromName(test_fct)
assert(isinstance(data,unittest.TestSuite))
data.__class__ = TestSuiteModified
testsuite.addTest(data)
data = collect.loadTestsFromName(to_crash[0])
assert(isinstance(data,unittest.TestSuite))
data.__class__ = TestSuiteModified
testsuite.addTest(data)
# Running it
print "run it for %s" % test_fct
output = MyTextTestRunner(verbosity=verbosity).run_border(testsuite, to_crash[0])
return output
#import tests
#print 'runned %s checks' % tests.NBTEST
#return out
#===============================================================================
# listIOTests
#===============================================================================
def listIOTests(arg=['']):
""" Listing the IOtests associated to expression and returning them as a
list of tuples (folderName,testName).
"""
if len(arg)!=1 or not isinstance(arg[0],str):
print "Exactly one argument, and in must be a string, not %s."%arg
return
arg=arg[0]
IOTestManager.testFolders_filter = arg.split('/')[0].split('&')
IOTestManager.testNames_filter = arg.split('/')[1].split('&')
IOTestManager.filesChecked_filter = '/'.join(arg.split('/')[2:]).split('&')
all_tests = []
# The version below loads the test so it is slow because all tests are setUp
# and therefore loaded. The other method is however less accurate because it
# might be that the reference file have not been generated yet
# for IOTestsClass in IOTestFinder():
# IOTestsClass().setUp()
# all_tests = IOTestManager.all_tests.keys()
# Extract the tarball for hardcoded comparison if necessary
if not path.isdir(_hc_comparison_files):
if path.isfile(_hc_comparison_tarball):
tar = tarfile.open(_hc_comparison_tarball,mode='r:bz2')
tar.extractall(path.dirname(_hc_comparison_files))
tar.close()
else:
os.makedirs(_hc_comparison_files)
# We look through the uncompressed tarball for the name of the folders and
# test. It is however less accurate since it might be that some test
# reference folder have not been generated yet
for dirPath in glob.glob(path.join(_hc_comparison_files,"*")):
if path.isdir(dirPath):
folderName=path.basename(dirPath)
for testPath in glob.glob(path.join(_hc_comparison_files,\
folderName,"*")):
if path.isdir(testPath):
all_tests.append((folderName,path.basename(testPath)))
return all_tests
#===============================================================================
# runIOTests
#===============================================================================
def runIOTests(arg=[''],update=True,force=0,synchronize=False):
""" running the IOtests associated to expression. By default, this launch all
the tests created in classes inheriting IOTests.
"""
# Update the tarball, while removing the .backups.
def noBackUps(tarinfo):
if tarinfo.name.endswith('.BackUp'):
return None
else:
return tarinfo
if synchronize:
print "Please, prefer updating the reference file automatically "+\
"rather than by hand."
tar = tarfile.open(_hc_comparison_tarball, "w:bz2")
tar.add(_hc_comparison_files, \
arcname=path.basename(_hc_comparison_files), filter=noBackUps)
tar.close()
# I am too lazy to work out the difference with the existing tarball and
# put it in the log. So this is why one should refrain from editing the
# reference files by hand.
text = " \nModifications performed by hand on %s at %s in"%(\
str(datetime.date.today()),misc.format_timer(0.0)[14:])
text += '\n MadGraph5_aMC@NLO v. %(version)s, %(date)s\n'%misc.get_pkg_info()
log = open(_hc_comparison_modif_log,mode='a')
log.write(text)
log.close()
print "INFO:: Ref. tarball %s updated"%str(_hc_comparison_tarball)
return
if len(arg)!=1 or not isinstance(arg[0],str):
print "Exactly one argument, and in must be a string, not %s."%arg
return
arg=arg[0]
# Extract the tarball for hardcoded comparison if necessary
if not path.isdir(_hc_comparison_files):
if path.isfile(_hc_comparison_tarball):
tar = tarfile.open(_hc_comparison_tarball,mode='r:bz2')
tar.extractall(path.dirname(_hc_comparison_files))
tar.close()
else:
os.makedirs(_hc_comparison_files)
# Make a backup of the comparison file directory in order to revert it if
# the user wants to ignore the changes detected (only when updating the refs)
hc_comparison_files_BackUp = _hc_comparison_files+'_BackUp'
if update and path.isdir(_hc_comparison_files):
if path.isdir(hc_comparison_files_BackUp):
shutil.rmtree(hc_comparison_files_BackUp)
shutil.copytree(_hc_comparison_files,hc_comparison_files_BackUp)
IOTestManager.testFolders_filter = arg.split('/')[0].split('&')
IOTestManager.testNames_filter = arg.split('/')[1].split('&')
IOTestManager.filesChecked_filter = '/'.join(arg.split('/')[2:]).split('&')
#print "INFO:: Using folders %s"%str(IOTestManager.testFolders_filter)
#print "INFO:: Using test names %s"%str(IOTestManager.testNames_filter)
#print "INFO:: Using file paths %s"%str(IOTestManager.filesChecked_filter)
# Initiate all the IOTests from all the setUp()
IOTestsInstances = []
start = time.time()
for IOTestsClass in IOTestFinder():
# Instantiate the class
IOTestsInstances.append(IOTestsClass())
# Run the setUp
IOTestsInstances[-1].setUp()
# Find the testIO defined and use them in load mode only, we will run
# them later here.
IOTestsFunctions = IOTestFinder()
IOTestsFunctions.collect_function(IOTestsClass,prefix='testIO')
if len(IOTestsFunctions) ==0:
continue
for IOTestFunction in IOTestsFunctions:
start = time.time()
# Add all the tests automatically (i.e. bypass filters) if the
# specified test is the name of the IOtest. the [7:] is to
# skip the testIO prefix
name_filer_bu = None
if IOTestFunction.split('.')[-1][7:] in \
IOTestManager.testNames_filter:
name_filer_bu = IOTestManager.testNames_filter
IOTestManager.testNames_filter = ['ALL']
existing_tests = IOTestManager.all_tests.keys()
eval('IOTestsInstances[-1].'+IOTestFunction.split('.')[-1]+\
'(load_only=True)')
if name_filer_bu:
new_tests = [test[0] for test in IOTestManager.all_tests.keys() \
if test not in existing_tests]
IOTestManager.testNames_filter = name_filer_bu + new_tests
name_filer_bu = None
setUp_time = time.time() - start
if setUp_time > 0.5:
print colored%(34,"Loading IOtest %s is slow (%s)"%
(colored%(32,'.'.join(IOTestFunction.split('.')[-3:])),
colored%(34,'%.2fs'%setUp_time)))
if len(IOTestsInstances)==0:
print "No IOTest found."
return
# runIOTests cannot be made a classmethod, so I use an instance, but it does
# not matter which one as no instance attribute will be used.
try:
modifications = IOTestsInstances[-1].runIOTests( update = update,
force = force, verbose=True, testKeys=IOTestManager.all_tests.keys())
except KeyboardInterrupt:
if update:
# Remove the BackUp of the reference files.
if not path.isdir(hc_comparison_files_BackUp):
print "\nWARNING:: Update interrupted and modifications already "+\
"performed could not be reverted."
else:
shutil.rmtree(_hc_comparison_files)
mv(hc_comparison_files_BackUp,_hc_comparison_files)
print colored%(34,
"\nINFO:: Update interrupted, existing modifications reverted.")
sys.exit(0)
else:
print "\nINFO:: IOTest runs interrupted."
sys.exit(0)
tot_time = time.time() - start
if modifications == 'test_over':
print colored%(32,"\n%d IOTests "%len(IOTestManager.all_tests.keys()))+\
"successfully tested in %s."%(colored%(34,'%.2fs'%tot_time))
sys.exit(0)
elif not isinstance(modifications,dict):
print "Error during the files update."
sys.exit(0)
if len(modifications['missing'])>0:
text = '\n'
text += colored%(31,
"The following files were not generated by the tests, fix this!")
text += '\n'+'\n'.join([" %s"%mod for mod in modifications['missing']])
print text
modifications['missing'] = []
if sum(len(v) for v in modifications.values())>0:
# Display the modifications
text = colored%(34, " \nModifications performed on %s at %s in"%(\
str(datetime.date.today()),misc.format_timer(0.0)[14:]))
text += colored%(34,
'\n MadGraph5_aMC@NLO v. %(version)s, %(date)s\n'%misc.get_pkg_info())
for key in modifications.keys():
if len(modifications[key])==0:
continue
text += colored%(32,"The following reference files have been %s :"%key)
text += '\n'+'\n'.join([" %s"%mod for mod in modifications[key]])
text += '\n'
print text
try:
answer = Cmd.timed_input(question=
"Do you want to apply the modifications listed above? [y/n] >",default="y")
except KeyboardInterrupt:
answer = 'n'
if answer == 'y':
log = open(_hc_comparison_modif_log,mode='a')
log.write(text)
log.close()
if IOTestManager._compress_ref_fodler:
tar = tarfile.open(_hc_comparison_tarball, "w:bz2")
tar.add(_hc_comparison_files, \
arcname=path.basename(_hc_comparison_files), filter=noBackUps)
tar.close()
print colored%(32,"INFO:: tarball %s updated"%str(_hc_comparison_tarball))
else:
print colored%(32,"INFO:: Reference %s updated"%\
str(os.path.basename(_hc_comparison_files)))
if len(modifications['created'])>0:
print colored%(31,"Some ref. files have been created; add "+\
"them to the revision with\n "+
"bzr add tests/input_files/IOTestsComparison")
# Make sure to remove the BackUp files
filelist = glob.glob(os.path.join(_hc_comparison_files,
'*','*','*.BackUp'))
for f in filelist:
os.remove(f)
else:
if path.isdir(hc_comparison_files_BackUp):
shutil.rmtree(_hc_comparison_files)
shutil.copytree(hc_comparison_files_BackUp,_hc_comparison_files)
print colored%(32,"INFO:: No modifications applied.")
else:
print colored%(31,
"ERROR:: Could not revert the modifications. No backup found.")
else:
print colored%(32,"\nNo modifications performed. No update necessary.")
# Remove the BackUp of the reference files.
if path.isdir(hc_comparison_files_BackUp):
shutil.rmtree(hc_comparison_files_BackUp)
class TimeLimit(Exception): pass
#===============================================================================
# TestSuiteModified
#===============================================================================
class TestSuiteModified(unittest.TestSuite):
""" This is a wrapper for the default implementation of unittest.TestSuite
so that we can add the decorator for the resetting of the global variables
everytime the TestSuite is __call__'ed., hence avoiding side effects from
them."""
time_limit = 1
mintime_limit=0
time_db = {}
stop_eval = False # bypass all following test when this is on True (but those in preserve)
@tests.IOTests.set_global()
def __call__(self, *args, **kwds):
bypass= []
to_preserve=[]
# if 'TESTLHEParser' in str(self):
# TestSuiteModified.stop_eval = False
if any(name in str(self) for name in bypass):
MyTextTestRunner.stream.write('s')
return
if TestSuiteModified.stop_eval and \
all(name not in str(self) for name in to_preserve):
MyTextTestRunner.stream.write('s')
return
time_db = TestSuiteModified.time_db
time_limit = TestSuiteModified.time_limit
mintime_limit = TestSuiteModified.mintime_limit
if not time_db and time_limit > 0:
if not os.path.exists(pjoin(root_path, 'tests','time_db')):
TestSuiteModified.time_limit = -1
else:
#for line in open(pjoin(root_path, 'tests','time_db')):
# print line.split()
TestSuiteModified.time_db = dict([(' '.join(line.split()[:-1]), float(line.split()[-1]))
for line in open(pjoin(root_path, 'tests','time_db'))
])
time_db = TestSuiteModified.time_db
if str(self) in time_db and (time_db[str(self)] > abs(time_limit) or\
time_db[str(self)] < abs(mintime_limit)):
if any(name in str(self) for name in to_preserve):
MyTextTestRunner.stream.write('T->R:')
# TestSuiteModified.stop_eval = True
else:
MyTextTestRunner.stream.write('T')
#print dir(self._tests[0]), type(self._tests[0]),self._tests[0]
# MyTextTestRunner.bypassed.append(str(self._tests[0]).split()[0])
return
start = time.time()
super(TestSuiteModified,self).__call__(*args,**kwds)
if not str(self) in time_db:
TestSuiteModified.time_db[str(self)] = time.time() - start
TestSuiteModified.time_limit *= -1
#===============================================================================
# TestFinder
#===============================================================================
class TestFinder(list):
""" Class introspecting the test module to find the available test.
The routine collect_dir looks in all module/file to find the different
functions in different test class. This produce a list, on which external
routines can loop on.
In order to authorize definition and loop on this object on the same time,
i.e: for test in TestFinder([opt])-. At each time a loop is started,
we check if a collect_dir ran before, and run it if necessary.
"""
search_class = unittest.TestCase
class TestFinderError(Exception):
"""Error associated to the TestFinder class."""
pass
def __init__(self, package='tests/', expression='', re_opt=0):
""" initialize global variable for the test """
list.__init__(self)
self.package = package
self.rule = []
if self.package[-1] != '/':
self.package += '/'
self.restrict_to(expression, re_opt)
self.launch_pos = ''
def _check_if_obj_build(self):
""" Check if a collect is already done
Uses to have smart __iter__ and __contain__ functions
"""
if len(self) == 0:
start = time.time()
self.collect_dir(self.package, checking=True)
print 'loading test takes %ss' % (time.time()-start)
def __iter__(self):
""" Check that a collect was performed (do it if needed) """
self._check_if_obj_build()
return list.__iter__(self)
def __contains__(self, value):
""" Check that a collect was performed (do it if needed) """
self._check_if_obj_build()
return list.__contains__(self, value)
def collect_dir(self, directory, checking=True):
""" Find the file and the subpackage in this package """
#ensures that we are at root position
move = False
if self.launch_pos == '':
move = True
self.go_to_root()
for name in os.listdir(os.path.join(root_path,directory)):
local_check = checking
status = self.status_file(os.path.join(root_path, directory,name))
#directory + '/' + name)
if status is None:
continue
if checking:
if self.check_valid(directory + '/' + name):
local_check = False #since now perform all the test
if status == 'file':
self.collect_file(directory + '/' + name, local_check)
elif status == "module":
self.collect_dir(directory + '/' + name, local_check)
if move:
self.go_to_initpos()
def collect_file(self, filename, checking=True):
""" Find the different class instance derivated of TestCase """
start = time.time()
pyname = self.passin_pyformat(filename)
__import__(pyname)
obj = sys.modules[pyname]
#look at class
for name in dir(obj):
class_ = getattr(obj, name)
if inspect.isclass(class_) and \
issubclass(class_, unittest.TestCase):
if checking:
if self.check_valid(name):
check_inside = False
else:
check_inside = True
else:
check_inside = False
self.collect_function(class_, checking=check_inside, \
base=pyname)
time_to_load = time.time() - start
if time_to_load > 0.1:
logging.critical("file %s takes a long time to load (%.4fs)" % (pyname, time_to_load))
def collect_function(self, class_, checking=True, base='', prefix='test'):
"""
Find the different test function in this class
test functions should start with test
"""
if not inspect.isclass(class_):
raise self.TestFinderError, 'wrong input class_'
if not issubclass(class_, unittest.TestCase):
raise self.TestFinderError, 'wrong input class_'
#devellop the name
if base:
base += '.' + class_.__name__
else:
base = class_.__name__
candidate = [base + '.' + name for name in dir(class_) if \
name.startswith(prefix)\
and inspect.ismethod(eval('class_.' + name))]
if not checking:
self += candidate
else:
self += [name for name in candidate if self.check_valid(name)]
def restrict_to(self, expression, re_opt=0):
"""
store in global the expression to fill in order to be a valid test
"""
if isinstance(expression, list):
pass
elif isinstance(expression, basestring):
if expression in '':
expression = ['.*'] #made an re authorizing all regular name
else:
expression = [expression]
else:
raise self.TestFinderError, 'obj should be list or string'
self.rule = []
for expr in expression:
#fix the beginning/end of the regular expression
if not expr.startswith('^'):
expr = '^' + expr
if not expr.endswith('$'):
expr = expr + '$'
self.rule.append(re.compile(expr, re_opt))
def check_valid(self, name):
""" check if the name correspond to the rule """
if not isinstance(name, basestring):
raise self.TestFinderError, 'check valid take a string argument'
for specific_format in self.format_possibility(name):
for expr in self.rule:
if expr.search(specific_format):
return True
return False
@staticmethod
def status_file(name):
""" check if a name is a module/a python file and return the status """
if os.path.isfile(os.path.join(root_path, name)):
if name.endswith('.py') and '__init__' not in name:
return 'file'
elif os.path.isdir(os.path.join(root_path, name)):
if os.path.isfile(os.path.join(root_path, name , '__init__.py')):
return 'module'
@classmethod
def passin_pyformat(cls, name):
""" transform a relative position in a python import format """
if not isinstance(name, basestring):
raise cls.TestFinderError, 'collect_file takes a file position'
name = name.replace('//', '/') #sanity
#deal with begin/end
if name.startswith('./'):
name = name[2:]
if not name.endswith('.py'):
raise cls.TestFinderError, 'Python files should have .py extension'
else:
name = name[:-3]
if name.startswith('/'):
raise cls.TestFinderError, 'path must be relative'
if '..' in name:
raise cls.TestFinderError, 'relative position with \'..\' is' + \
' not supported for the moment'
#replace '/' by points -> Python format
name = name.replace('/', '.')
#position
return name
def format_possibility(self, name):
""" return the different string derivates from name in order to
scan all the different format authorizes for a restrict_to
format authorizes:
1) full file position
2) name of the file (with extension)
3) full file position whithour extension
4) name of the file (whithout extension)
5) name of the file (but suppose name in python format)
6) if name is a python file, try with a './' and with package pos
"""
def add_to_possibility(possibility, val):
""" add if not exist """
if val not in possibility:
possibility.append(val)
#end local def
#sanity
if name.startswith('./'):
name = name[2:]
name = name.replace('//', '/')
# init with solution #
out = [name]
# add solution 2
new_pos = name.split('/')[-1]
add_to_possibility(out, new_pos)
#remove extension and add solution3 and 6
if name.endswith('.py'):
add_to_possibility(out, './' + name)
add_to_possibility(out, self.package + name)
name = name[:-3]
add_to_possibility(out, name)
#add solution 4
new_pos = name.split('/')[-1]
add_to_possibility(out, new_pos)
#add solution 5
new_pos = name.split('.')[-1]
add_to_possibility(out, new_pos)
return out
def go_to_root(self):
"""
go to the root directory of the module.
This ensures that the script works correctly whatever the position
where is launched
"""
#self.launch_pos = os.path.realpath(os.getcwd())
#self.root_path = root_path
#os.chdir(root_path)
def go_to_initpos(self):
"""
go to the root directory of the module.
This ensures that the script works correctly whatever the position
where is launched
"""
#os.chdir(self.launch_pos)
#self.launch_pos = ''
#===============================================================================
# IOTestFinder
#===============================================================================
class IOTestFinder(TestFinder):
""" Class introspecting the test modules to find the available IOTest classes.
The routine collect_dir looks in all module/file to find the different
functions in different test class. This produce a list, on which external
routines can loop on.
In order to authorize definition and loop on this object on the same time,
i.e: for test in TestFinder([opt])-. At each time a loop is started,
we check if a collect_dir ran before, and run it if necessary.
"""
class IOTestFinderError(Exception):
"""Error associated to the TestFinder class."""
pass
def __init__(self, package='tests/', expression='', re_opt=0):
""" initialize global variable for the test """
if expression!='' or re_opt!=0:
raise IOTestFinderError('Only use IOTestFinder for searching for'+\
' all classes')
super(IOTestFinder,self).__init__(package,expression,re_opt)
def collect_file(self, filename, checking=True):
""" Find the different class instance derivated of TestCase """
start = time.time()
pyname = self.passin_pyformat(filename)
__import__(pyname)
obj = sys.modules[pyname]
#look at class
for name in dir(obj):
class_ = getattr(obj, name)
if inspect.isclass(class_) and class_!=tests.IOTests.IOTestManager and \
issubclass(class_, tests.IOTests.IOTestManager):
self.append(class_)
time_to_load = time.time() - start
if time_to_load > 0.1:
logging.critical("file %s takes a long time to load (%.4fs)" % \
(pyname, time_to_load))
if __name__ == "__main__":
help = """
Detailed information about the IOTests at the wiki webpage:
https://cp3.irmp.ucl.ac.be/projects/madgraph/wiki/DevelopmentPage/CodeTesting
Use the argument -U to update the hardcoded tests used by the IOTests.
When provided with no argument, it will update everything.
Otherwise it can be called like this:
./test_manager.py -U "folders/testNames/filePaths"
the arguments between '/' are specified according to this format
(For each of the three category, you can use the keyword 'ALL' to select
all of the IOTests in this category)
folders -> "folder1&folder2&folder3&etc..."
testNames -> "testName1&testName2&testName3&etc..."
filePaths -> "filePath1&filePath2&filePath3&etc..."
Notice that the filePath use a file path relative to
the position SubProcess/<P0_proc_name>/ in the output.
You are allowed to use the parent directory specification ".."
You can use the synthax [regexp] instead of a specific filename.
This includes only the files in this directory matching it.
> Ex. '../../Source/DHELAS/[.+\.(inc|f)]' matches any file in DHELAS
with extension .inc or .f
Also, you can prepend '-' to the folder or test name to veto it instead of
selecting it.
> Ex. '-longTest' considers all tests but the one named
'longTest' one (synthax not available for filenames).
If you prepend '+' to the folder or test name, then you will include all
items in this category which starts with what follows '+'.
> Ex. '+short' includes all IOTests starting with 'short'
To bypass the monitoring of the modifications of the files with a name of
a file already reviewed, you can use -f. To bypass ALL monitoring, use -F
(this is not recommended).
Finally, you can run the test only from here too. Same synthax as above,
but use the option -R
And you can quickly run a given IOTest with
./test_manager.py -R testName
or all IOtest of a give group with
./test_manager.py -R -g groupName
You can list all tests in the reference folder with
./test_manager.py -L
while possibly also specifying what kind of test to list with the same
syntax as for -R or -U, i.e.
./test_manager.py -L +short/ALL/ALL
for example.
"""
usage = "usage: %prog [expression1]... [expressionN] [options] "
parser = optparse.OptionParser(usage=usage)
parser.add_option("-v", "--verbose", default=1,
help="defined the verbosity level [%default]")
parser.add_option("-r", "--reopt", type="int", default=0,
help="regular expression tag [%default]")
parser.add_option("-p", "--path", default='tests/unit_tests',
help="position to start the search (from root) [%default]")
parser.add_option("-l", "--logging", default='CRITICAL',
help="logging level (DEBUG|INFO|WARNING|ERROR|CRITICAL) [%default]")
parser.add_option("-F", "--force", action="store_true", default=False,
help="Force the update, bypassing its monitoring by the user")
parser.add_option("-f", "--semiForce", action="store_true", default=False,
help="Bypass monitoring of ref. file only if another ref. file with "+\
"the same name has already been monitored.")
parser.add_option("-U", "--IOTestsUpdate", action="store_true", default=False,
help="Process the IOTests to update them.")
parser.add_option("-R", "--IOTestsRun", action="store_true", default=False,
help="Process the IOTests to Run them.")
parser.add_option("-L", "--ListIOTests", action="store_true", default=False,
help="Lists all IOTests in the reference Folder.")
parser.add_option("-g", "--group", action="store_true", default=False,
help="Specifies you want to run all tests belonging to a group of a "+\
"given name.")
parser.add_option("-s", "--synchronize", action="store_true", default=False,
help="Replace the IOTestsComparison.tar.bz2 tarball with the "+\
"content of the folder IOTestsComparison")
parser.add_option("-t", "--timed", default="Auto",
help="limit the duration of each test. Negative number re-writes the information file.")
parser.add_option("-T", "--mintime", default="0",
help="limit on the minimal duration of each test.")
parser.add_option("", "--border_effect", default=None,
help="Define the test which are sensitive to a border effect, the test will find which test creates this border effect")
parser.add_option("-N", "--notification", default=45,
help="Running time, below which no notification is raised. (-1 for no notification)")
(options, args) = parser.parse_args()
if options.IOTestsUpdate:
options.IOTests = 'U'
elif options.IOTestsRun:
options.IOTests = 'R'
else:
options.IOTests = 'No'
if options.ListIOTests:
options.IOTests = 'L'
if options.IOTests=='No':
if len(args) == 0:
args = ''
else:
if len(args)>1:
print "Specify at most one argument to specify what IOTests to run."
if len(args) == 0:
args = ['ALL/ALL/ALL']
if options.group:
if len(args)==1:
args = ['%s/ALL/ALL'%str(args[0])]
else:
print "Specify the name of the IOTest group you want to run."
else:
specs = args[0].split('/')
if len(specs)==1:
test_name = args[0] if args[0][:7]!='testIO_' else args[0][7:]
args = ['ALL/%s/ALL'%test_name]
elif len(specs)==2:
args = ['%s/%s/ALL'%(specs[0],
(specs[1] if specs[1][:7]!='testIO_' else specs[1]))]
elif len(specs)==3:
args = ['%s/%s/%s'%(specs[0],
(specs[1] if specs[1][:7]!='testIO_' else specs[1]),specs[2])]
else:
print "The IOTest specification can include at most two '/'."
if len(args) == 1 and args[0]=='help':
print help
sys.exit(0)
if options.path == 'U':
options.path = 'tests/unit_tests'
elif options.path == 'P':
options.path = 'tests/parallel_tests'
elif options.path == 'A':
options.path = 'tests/acceptance_tests'
if options.timed == "Auto":
if options.path == 'tests/unit_tests':
options.timed = 1
elif options.path == 'tests/parallel_tests':
options.timed = 400
elif options.path == 'tests/acceptance_tests':
options.timed = 10
else:
options.timed = 0
start_time = time.time()
try:
logging.config.fileConfig(os.path.join(root_path,'tests','.mg5_logging.conf'))
logging.root.setLevel(eval('logging.' + options.logging))
logging.getLogger('madgraph').setLevel(eval('logging.' + options.logging))
logging.getLogger('madevent7').setLevel(eval('logging.' + options.logging))
logging.getLogger('cmdprint').setLevel(eval('logging.' + options.logging))
logging.getLogger('tutorial').setLevel('ERROR')
except:
pass
if options.synchronize and IOTestManager._compress_ref_fodler:
print "The tarball synchronization is not necessary since"+ \
" MadGraph5_aMCatNLO is configured not to compress the references files."
if options.IOTests=='No' and not options.synchronize:
if not options.border_effect:
#logging.basicConfig(level=vars(logging)[options.logging])
output = run(args, re_opt=options.reopt, verbosity=options.verbose, \
package=options.path, timelimit=[options.mintime,options.timed])
else:
output = run_border_search(options.border_effect, args, re_opt=options.reopt, verbosity=options.verbose, \
package=options.path, timelimit=[options.mintime,options.timed])
else:
if options.IOTests=='L':
print "Listing all tests defined in the reference files ..."
print '\n'.join("> %s/%s"%(colored%(34,test[0]),colored%(32,test[1]))
for test in listIOTests(args) if IOTestManager.need(test[0],test[1]))
exit()
if options.force:
force = 10
elif options.semiForce:
force = 1
else:
force = 0
output = runIOTests(args,update=options.IOTests=='U',force=force,
synchronize=options.synchronize)
if 0 < float(options.notification) < time.time()-start_time:
if isinstance(output, unittest.runner.TextTestResult):
run = output.testsRun
failed, errored, skipped = map(len,
(output.failures, output.errors, output.skipped))
output = "run: %s, failed: %s error: %s, skipped: %s" % \
(run, failed, errored, skipped)
misc.apple_notify("tests finished", str(output))
#some example
# run('iolibs')
# run('test_test_manager.py')
# run('./tests/unit_tests/bin/test_test_manager.py')
# run('IOLibsMiscTest')
# run('TestTestFinder')
# run('test_check_valid_on_file')
# run('test_collect_dir.*') # '.*' stands for all possible char (re format)
# python tests/test_manager.py test_decay.py -l INFO|less
|
StarcoderdataPython
|
1704136
|
<reponame>lyarenei/mausmakro<gh_stars>1-10
import unittest
from unittest.mock import patch
from mausmakro.lib.enums import Opcode
from mausmakro.lib.exceptions import LabelException, ParserException
from mausmakro.lib.types import Command, Conditional
from mausmakro.parsing import Parser
# noinspection PyUnresolvedReferences
# Initialized in TestParser.setUp method
def mock_generate_label(_):
mock_generate_label.cnt += 1
return f'fbartest_{mock_generate_label.cnt}'
class TestParser(unittest.TestCase):
def setUp(self) -> None:
mock_generate_label.cnt = 0
def test_comments(self):
filename = 'test_macros/comments.txt'
ins, labels = Parser(filename).parse()
expected_labels = {'foobar': 0}
expected_ins = [
Command(Opcode.LABEL, 'foobar'),
Command(Opcode.WAIT, 1),
Command(Opcode.END),
]
self.assertListEqual(ins, expected_ins)
self.assertDictEqual(labels, expected_labels)
@patch.object(Parser, '_generate_label', mock_generate_label)
def test_conditional(self):
filename = 'test_macros/conditional.txt'
ins, labels = Parser(filename).parse()
cond = Conditional(Opcode.IF)
cond.condition = Command(Opcode.FIND, ('image.png', 5))
cond.end_label = 'fbartest_1'
cond.else_label = None
expected_labels = {'foobar': 0, cond.end_label: 3}
expected_ins = [
Command(Opcode.LABEL, 'foobar'),
cond,
Command(Opcode.WAIT, 4),
Command(Opcode.LABEL, cond.end_label),
Command(Opcode.END),
]
self.assertListEqual(ins, expected_ins)
self.assertDictEqual(labels, expected_labels)
@patch.object(Parser, '_generate_label', mock_generate_label)
def test_conditional_else(self):
filename = 'test_macros/conditional_else.txt'
ins, labels = Parser(filename).parse()
cond = Conditional(Opcode.IF)
cond.condition = Command(Opcode.FIND, ('image.png', 5))
cond.end_label = 'fbartest_1'
cond.else_label = 'fbartest_2'
expected_labels = {'foobar': 0, cond.else_label: 4, cond.end_label: 6}
expected_ins = [
Command(Opcode.LABEL, 'foobar'),
cond,
Command(Opcode.WAIT, 4),
Command(Opcode.JUMP, cond.end_label),
Command(Opcode.LABEL, cond.else_label),
Command(Opcode.WAIT, 5),
Command(Opcode.LABEL, cond.end_label),
Command(Opcode.END),
]
self.assertListEqual(ins, expected_ins)
self.assertDictEqual(labels, expected_labels)
def test_duplicate_label(self):
filename = 'test_macros/duplicate_label.txt'
parser = Parser(filename)
self.assertRaises(ParserException, parser.parse)
def test_duplicate_macro(self):
filename = 'test_macros/duplicate_macros.txt'
parser = Parser(filename)
self.assertRaises(ParserException, parser.parse)
def test_empty(self):
filename = 'test_macros/empty.txt'
self.assertRaises(ParserException, Parser, filename)
def test_empty_macro(self):
filename = 'test_macros/empty_macro.txt'
self.assertRaises(ParserException, Parser, filename)
def test_import_statement(self):
filename = 'test_macros/import_statement.txt'
ins, labels = Parser(filename).parse()
expected_labels = {'foobar': 0, 'foobaz': 4}
expected_ins = [
Command(Opcode.LABEL, 'foobar'),
Command(Opcode.WAIT, 1),
Command(Opcode.CLICK, (1, 1)),
Command(Opcode.END),
Command(Opcode.LABEL, 'foobaz'),
Command(Opcode.WAIT, 1),
Command(Opcode.END),
]
self.assertListEqual(ins, expected_ins)
self.assertDictEqual(labels, expected_labels)
def test_indents_newlines(self):
filename = 'test_macros/indents_newlines.txt'
ins, labels = Parser(filename).parse()
expected_labels = {'foobar': 0}
expected_ins = [
Command(Opcode.LABEL, 'foobar'),
Command(Opcode.WAIT, 5),
Command(Opcode.JUMP, 'foobar'),
Command(Opcode.END),
]
self.assertListEqual(ins, expected_ins)
self.assertDictEqual(labels, expected_labels)
def test_invalid_name(self):
filename = 'test_macros/invalid_name.txt'
self.assertRaises(ParserException, Parser, filename)
def test_procedure(self):
filename = 'test_macros/procedure.txt'
ins, labels = Parser(filename).parse()
expected_labels = {'foo': 0}
expected_ins = [
Command(Opcode.LABEL, 'foo'),
Command(Opcode.CLICK, (1, 1)),
Command(Opcode.RETURN),
]
self.assertListEqual(ins, expected_ins)
self.assertDictEqual(labels, expected_labels)
def test_valid_name(self):
filename = 'test_macros/valid_name.txt'
ins, labels = Parser(filename).parse()
expected_labels = {'fO0-bA_r': 0}
expected_ins = [
Command(Opcode.LABEL, 'fO0-bA_r'),
Command(Opcode.WAIT, 1),
Command(Opcode.END),
]
self.assertListEqual(ins, expected_ins)
self.assertDictEqual(labels, expected_labels)
def test_undefined_label(self):
filename = 'test_macros/undefined_label.txt'
parser = Parser(filename)
parser.parse()
self.assertRaises(LabelException, parser.perform_checks)
|
StarcoderdataPython
|
49154
|
"""Compile, run and lint files."""
import dataclasses
import logging
import os
import pathlib
import shlex
import sys
from functools import partial
from typing import List, Optional
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal
from porcupine import get_tab_manager, menubar, tabs
from . import no_terminal, terminal
log = logging.getLogger(__name__)
@dataclasses.dataclass
class CommandsConfig:
compile: str = ""
run: str = ""
lint: str = ""
def get_command(
tab: tabs.FileTab, which_command: Literal["compile", "run", "lint"], basename: str
) -> Optional[List[str]]:
assert os.sep not in basename, f"{basename!r} is not a basename"
commands = tab.settings.get("commands", CommandsConfig)
assert isinstance(commands, CommandsConfig)
template = getattr(commands, which_command)
if not template.strip():
return None
exts = "".join(pathlib.Path(basename).suffixes)
no_ext = pathlib.Path(basename).stem
format_args = {
"file": basename,
"no_ext": no_ext,
"no_exts": basename[: -len(exts)] if exts else basename,
"python": "py" if sys.platform == "win32" else "python3",
"exe": f"{no_ext}.exe" if sys.platform == "win32" else f"./{no_ext}",
}
result = [
part.format(**format_args)
for part in shlex.split(template, posix=(sys.platform != "win32"))
]
return result
def do_something(something: Literal["compile", "run", "compilerun", "lint"]) -> None:
tab = get_tab_manager().select()
assert isinstance(tab, tabs.FileTab)
tab.save()
if tab.path is None:
# user cancelled a save as dialog
return
workingdir = tab.path.parent
basename = tab.path.name
if something == "run":
command = get_command(tab, "run", basename)
if command is not None:
terminal.run_command(workingdir, command)
elif something == "compilerun":
def run_after_compile() -> None:
assert isinstance(tab, tabs.FileTab)
command = get_command(tab, "run", basename)
if command is not None:
terminal.run_command(workingdir, command)
compile_command = get_command(tab, "compile", basename)
if compile_command is not None:
no_terminal.run_command(workingdir, compile_command, run_after_compile)
else:
command = get_command(tab, something, basename)
if command is not None:
no_terminal.run_command(workingdir, command)
def on_new_tab(tab: tabs.Tab) -> None:
if isinstance(tab, tabs.FileTab):
tab.settings.add_option("commands", CommandsConfig())
def setup() -> None:
get_tab_manager().add_tab_callback(on_new_tab)
menubar.get_menu("Run").add_command(label="Compile", command=partial(do_something, "compile"))
menubar.get_menu("Run").add_command(label="Run", command=partial(do_something, "run"))
menubar.get_menu("Run").add_command(
label="Compile and Run", command=partial(do_something, "compilerun")
)
menubar.get_menu("Run").add_command(label="Lint", command=partial(do_something, "compilerun"))
# TODO: disable the menu items when they don't correspond to actual commands
for label in {"Compile", "Run", "Compile and Run", "Lint"}:
menubar.set_enabled_based_on_tab(
f"Run/{label}", (lambda tab: isinstance(tab, tabs.FileTab))
)
|
StarcoderdataPython
|
3268859
|
from flask import Flask, render_template
import pymysql
import folium
from folium import plugins
import netifaces
app = Flask(__name__)
'''
* get_gateway_address()는 LocalHost gateway 정보를 반환
* @ https://pypi.org/project/netifaces/
* @ ex) 작성자의 ip 주소인 '192.168.0.10'을 반환
'''
def get_gateway_address():
return netifaces.gateways()['default'][2][0]
# DB 기본 설정
HOST = get_gateway_address() #'192.168.0.10'
# HOST = 'localhost'
PORT = 3306
USER = "username"
PASSWD = "<PASSWORD>"
CHARSET = "utf8"
db_name = 'travel'
table_name = ['countries_location', 'status']
'''
* Database로 접근하여 status table의 모든 정보를 가져옴
* @ pymysql 모듈을 사용해 DB로 접근하고 ('입국규정', '검역규정','격리규정','환승규정')값을 return함
* @ cursor 객체의 execute() 메서드를 사용하여 작성한 코드를 DB서버에 보냄
'''
def get_datas(country:str) -> str:
with pymysql.connect(
host = HOST,
port = PORT,
user = USER,
password = <PASSWORD>,
charset = CHARSET,
database = db_name
) as connection:
with connection.cursor() as cursor:
cursor.execute("select * from status where country='{}'".format(country))
entry = list(cursor.fetchall())
return entry[0][2:]
'''
* Database로 접근하여 countries_location table의 모든 정보를 가져옴
* @ pymysql 모듈을 사용해 DB로 접근하고 경도, 위도 값을 return함
* @ cursor 객체의 execute() 메서드를 사용하여 작성한 코드를 DB서버에 보냄
'''
def get_datas_country():
with pymysql.connect(
host = HOST,
port = PORT,
user = USER,
password = <PASSWORD>,
charset = CHARSET,
database = db_name
) as connection:
with connection.cursor() as cursor:
cursor.execute("select * from countries_location")
entry = list(cursor.fetchall())
return entry
# 웹 페이지의 첫 부분
@app.route('/')
def main():
entry = get_datas_country()
folium_map = folium.Map(
max_bounds= True,
min_zoom= 2,
min_lat= -84,
max_lat= 84,
min_lon= -175,
max_lon= 187,
height= 600,
width= 1200
)
for i in range(0,len(entry)):
country = entry[i][1]
latitude = entry[i][2]
latitude = latitude.replace(" ",".")
latitude = latitude[0: -2]
longitude = entry[i][3]
longitude = longitude.replace(" ",".")
longitude = longitude[0: -2]
# html = '<a href="http://'+HOST+':8282/'+country+'" target="_self">'+country+'</a>'
html = '<a href="http://localhost:8282/'+country+'" target="_self">'+country+'</a>'
folium.Marker(
location=[int(float(latitude)),int(float(longitude))],
tooltip= country,
popup=html,
icon=folium.Icon(color='blue', icon='star')
).add_to(folium_map)
return render_template('map.html', mymap=folium_map._repr_html_())
# 나라 이름을 클릭 시 다른 웹 페이지로 넘어가 DB에 있는 데이터를 출력
@app.route('/<country>')
def about(country):
print('접속함')
tmp = [country]
tmp.append(['입국규정', '검역규정','격리규정','환승규정'])
tmp.append(get_datas(country))
return render_template('test.html', tmp = tmp)
app.run(host='0.0.0.0', port=8282, debug=True)
|
StarcoderdataPython
|
4822999
|
from RFEM.initModel import Model
from RFEM.enums import ObjectTypes, SelectedObjectInformation
class ObjectInformation():
# missing def __init__( with definition of self and its variables
# object_type, no, parent_no, information, row_key and result.
def CentreOfGravity(self,
type = ObjectTypes.E_OBJECT_TYPE_MEMBER,
parent_no = 0,
no: int = 1,
coord: str = 'X'):
'''
This function returns the centre of gravity position (X, Y or Z) for a selected object.
Args:
type (enum): Object Type
parent_no (int): Object Parent Number
Note:
(1) A geometric object has, in general, a parent_no = 0
(2) The parent_no parameter becomes significant for example with loads
no (int): The Object Tag
coord (str): Desired global basis vector component of the Centre of Gravity (i.e. X, Y or Z)
'''
self.object_type = type
self.no = no
self.parent_no = parent_no
result = ObjectInformation.__BuildResultsArray(self)
if coord == 'X' or coord.lstrip().rstrip().upper() == 'X':
return result['section'][0].rows[0][0].value
elif coord == 'Y' or coord.lstrip().rstrip().upper() == 'Y':
return result['section'][0].rows[0][1].value
elif coord == 'Z' or coord.lstrip().rstrip().upper() == 'Z':
return result['section'][0].rows[0][2].value
else:
raise Exception ('WARNING: The desired Coordinate input not requested. Please provide either "X", "Y" or "Z"')
def MemberInformation(self,
no: int = 1,
information = SelectedObjectInformation.LENGTH):
'''
This function returns further information associated with a member.
Args:
no (int): Member Tag
information (enum): Desired Information (Length / Volume / Mass)
'''
if information.name == 'AREA':
raise Exception ('WARNING: Area information is only relevant for Surface and Volume Information.')
self.object_type = ObjectTypes.E_OBJECT_TYPE_MEMBER
self.no = no
self.parent_no = 0
self.information = information
self.row_key = 2
self.result = ObjectInformation.__BuildResultsArray(self)
return ObjectInformation.__AreaVolumeMassInformationLength(self)
def SurfaceInformation(self,
no: int = 1,
information = SelectedObjectInformation.AREA):
'''
This function returns further information associated with a surface.
Args:
no (int): Surface Tag
information (enum): Desired Information (Area / Volume / Mass)
'''
if information.name == 'LENGTH':
raise Exception ('WARNING: Length information is only relevant for Member Information.')
self.object_type = ObjectTypes.E_OBJECT_TYPE_SURFACE
self.no = no
self.parent_no = 0
self.information = information
self.row_key = 3
self.result = ObjectInformation.__BuildResultsArray(self)
return ObjectInformation.__AreaVolumeMassInformationLength(self)
def SolidInformation(self,
no: int = 1,
information = SelectedObjectInformation.AREA):
'''
This function returns further information associated with a solid.
Args:
no (int): Solid Tag
information (enum): Desired Information (Area / Volume / Mass)
'''
if information.name == 'LENGTH':
raise Exception ('WARNING: Length information is only relevant for Member Information.')
self.object_type = ObjectTypes.E_OBJECT_TYPE_SOLID
self.no = no
self.parent_no = 0
self.information = information
self.row_key = 4
self.result = ObjectInformation.__BuildResultsArray(self)
return ObjectInformation.__AreaVolumeMassInformationLength(self)
def __BuildResultsArray(self):
elements = Model.clientModel.factory.create('ns0:array_of_get_center_of_gravity_and_objects_info_elements_type')
clientObject = Model.clientModel.factory.create('ns0:get_center_of_gravity_and_objects_info_element_type')
clientObject.parent_no = self.parent_no
clientObject.no = self.no
clientObject.type = self.object_type.name
elements.element.append(clientObject)
result = Model.clientModel.service.get_center_of_gravity_and_objects_info(elements)
result = Model.clientModel.dict(result)
return result
def __AreaVolumeMassInformationLength(self):
if self.information.name == "LENGTH" or self.information.name == "AREA":
return self.result['section'][self.row_key].rows[0][0].value
elif self.information.name == "VOLUME":
return self.result['section'][self.row_key].rows[0][1].value
elif self.information.name == "MASS":
return self.result['section'][self.row_key].rows[0][2].value
|
StarcoderdataPython
|
133614
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import smbus
import time
import datetime
import threading
import crc16
ADDRESS = 0x5c # 7bit address (will be left shifted to add the read write bit)
READ_INT = 10 # [sec], each reading interval is to be grater than 2 sec
LOG_INT = 600 # [sec]
DEBUG_MODE = True
#W_ADDR = ADDRESS << 1 + 0x00 # ADDRESS(7bit) + R/W bit(1bit)
#R_ADDR = ADDRESS << 1 + 0x01 # ADDRESS(7bit) + R/W bit(1bit)
# Print a given message with the date
def printDateMsg(msg):
d = datetime.datetime.today()
print d.strftime('%Y/%m/%d %H:%M:%S') + ' [TRMO] ' + msg
# am2320
class Thermo():
def __init__(self):
self.__i2c_bus = smbus.SMBus(1)
self.__hum = 0.0
self.__tmp = 0.0
self.tu = threading.Thread(target=self.__updateValue)
self.tu.setDaemon(True)
self.tu.start()
self.tl = threading.Thread(target=self.__logValue)
self.tl.setDaemon(True)
self.tl.start()
def __updateValue(self):
while True:
try:
self.__i2c_bus.write_i2c_block_data(ADDRESS, 0x00, []) # センサsleep解除
except:
pass # センサはACK が帰ってくるとは限らない仕様
time.sleep(0.001) # Min: 800us
try:
self.__i2c_bus.write_i2c_block_data(ADDRESS,0x03,[0x00,0x04]) # 読み取り命令
except:
if DEBUG_MODE: printDateMsg("[Error] am2320(1) ")
self.__hum = 0.0 # 読み取り失敗時は0.0
self.__tmp = 0.0
time.sleep(READ_INT)
continue
time.sleep(0.002) # Min: 1.5ms
try:
block = self.__i2c_bus.read_i2c_block_data(ADDRESS,0,8) # データ受取
except:
if DEBUG_MODE: printDateMsg("[Error] am2320(2) ")
self.__hum = 0.0 # 読み取り失敗時は0.0
self.__tmp = 0.0
time.sleep(READ_INT)
continue
# # CRC Chech
# #block[0] = 0x03
# print(block)
# crc = crc16.crc16(block[0:6])
# print(crc & 0xFF, crc >> 8)
self.__hum = (block[2] << 8 | block[3])/10.0
self.__tmp = (block[4] << 8 | block[5])/10.0
time.sleep(READ_INT)
def __logValue(self):
while True:
time.sleep(LOG_INT)
printDateMsg(self.stringValue())
def getHum(self):
return self.__hum
def getTmp(self):
return self.__tmp
def stringValue(self):
return "Humidity: " + str(self.getHum()) + "%, " \
+ "Temparature: " + str(self.getTmp()) + "℃"
def displayValue(self):
print self.stringValue()
def main_loop():
while True:
thermo.displayValue()
time.sleep(1)
if __name__ == '__main__':
thermo = Thermo()
try:
main_loop()
except KeyboardInterrupt:
print "Keyboard Interrupt"
# finally:
# thermo.stop()
# ============= EOF ======================
|
StarcoderdataPython
|
3214850
|
# decide which modules the package expoerts
# __all__ = ['...']
|
StarcoderdataPython
|
3284955
|
<filename>grouper/fe/handlers/permission_view.py
from __future__ import annotations
from typing import TYPE_CHECKING
from grouper.fe.templates import PermissionTemplate
from grouper.fe.util import GrouperHandler
from grouper.usecases.view_permission import ViewPermissionUI
if TYPE_CHECKING:
from grouper.entities.audit_log_entry import AuditLogEntry
from grouper.entities.permission import Permission, PermissionAccess
from typing import Any, List
class PermissionView(GrouperHandler, ViewPermissionUI):
def view_permission_failed_not_found(self, name: str) -> None:
self.notfound()
def viewed_permission(
self,
permission: Permission,
access: PermissionAccess,
audit_log_entries: List[AuditLogEntry],
) -> None:
template = PermissionTemplate(
permission=permission, access=access, audit_log_entries=audit_log_entries
)
self.render_template_class(template)
def get(self, *args: Any, **kwargs: Any) -> None:
name = self.get_path_argument("name")
argument = self.get_argument("argument", None)
usecase = self.usecase_factory.create_view_permission_usecase(self)
usecase.view_permission(
name, self.current_user.username, audit_log_limit=20, argument=argument
)
|
StarcoderdataPython
|
1659625
|
# -*- coding: utf-8 -*-
"""
# Author : Camey
# DateTime : 2021/12/1 7:47 下午
# Description :
"""
import math
from COMMON.model import MLP
import torch
import torch.nn as nn
import os
import numpy as np
import torch.optim as optim
from COMMON.memory import ReplayBuffer
import random
class DQN:
def __init__(self, cfg, state_dim, action_dim):
self.state_dim = state_dim
self.action_dim = action_dim
self.device = cfg.device
self.gamma = cfg.gamma
self.frame_idx = 0 # 用于epsilon的衰减计数
self.epsilon = lambda frame_idx: cfg.epsilon_end + \
(cfg.epsilon_start - cfg.epsilon_end) * math.exp(-1. * frame_idx / cfg.epsilon_decay)
self.batch_size = cfg.batch_size
self.policy_net = MLP(state_dim, action_dim, hidden_dim=cfg.hidden_dim).to(self.device)
self.target_net = MLP(state_dim, action_dim, hidden_dim=cfg.hidden_dim).to(self.device)
# 行为函数和评估函数
for target_param, param in zip(self.target_net.parameters(), self.policy_net.parameters()): # 复制参数
target_param.data.copy_(param.data)
self.optimizer = optim.Adam(self.policy_net.parameters(), lr=cfg.lr)
self.memory = ReplayBuffer(cfg.memory_capacity)
def choose_action(self, state):
self.frame_idx += 1
if random.random() > self.epsilon(self.frame_idx):
with torch.no_grad():
state = torch.tensor([state], device=self.device, dtype=torch.float32)
q_values = self.policy_net(state)
action = q_values.max(1)[1].item() # 选择Q值最大的动作
else:
action = random.randrange(self.action_dim)
return action
def predict(self, state):
with torch.no_grad():
state = torch.tensor([state],device=self.device, dtype=torch.float32)
q_values = self.policy_net(state)
action = q_values.max(1)[1].item()
return action
def update(self):
if len(self.memory) < self.batch_size:
return
state_batch, action_batch, reward_batch, next_state_batch, done_batch = self.memory.sample(self.batch_size)
# 转为张量
state_batch = torch.tensor(state_batch, device=self.device, dtype=torch.float)
action_batch = torch.tensor(action_batch, device=self.device).unsqueeze(1)
reward_batch = torch.tensor(reward_batch, device=self.device, dtype=torch.float)
next_state_batch = torch.tensor(next_state_batch, device=self.device, dtype=torch.float)
done_batch = torch.tensor(np.float32(done_batch), device=self.device)
q_values = self.policy_net(state_batch).gather(dim=1, index=action_batch)
next_q_values = self.target_net(next_state_batch).max(1)[0].detach()
# 计算期望的Q值,对于终止状态,done_batch=1,对应的expected_q_value等于reward
expected_q_values = reward_batch + self.gamma * next_q_values * (1 - done_batch)
loss = nn.MSELoss()(q_values, expected_q_values.unsqueeze(1))
#优化模型
self.optimizer.zero_grad()
loss.backward()
for param in self.policy_net.parameters():
param.grad.data.clamp(-1, 1)
self.optimizer.step()
def save(self, path):
torch.save(self.target_net.state_dict(), os.path.join(path, "dqn_checkpoint.pth"))
def load(self, path):
self.target_net.load_state_dict(torch.load(os.path.join(path,"dqn_checkpoint.pth")))
for target_param, param in zip(self.target_net.parameters(), self.policy_net.parameters()):
param.data.copy_(target_param.data)
|
StarcoderdataPython
|
3257496
|
# coding=utf-8
from __future__ import unicode_literals, print_function
from pylexibank.dataset import CldfDataset, TranscriptionReport
from pylexibank.cli import _readme
from pylexibank.util import download_and_unpack_zipfiles
from clldutils.path import Path
from pylexibank.lingpy_util import getEvoBibAsSource, iter_alignments
import lingpy as lp
TRANSCRIPTION_REPORT_CFG = dict(column='Segments', segmentized=True)
URL = "https://github.com/SequenceComparison/SupplementaryMaterial/zipball/master"
PATH = Path('SequenceComparison-SupplementaryMaterial-cc4bf85/benchmark/cognates/')
DSETS = ['SLV.csv', 'SIN.csv', 'ROM.csv', 'PIE.csv', 'PAN.csv', 'OUG.csv',
'KSL.csv', 'JAP.csv', 'IEL.csv', 'IDS.csv', 'GER.csv', 'BAI.csv']
sources = ['Starostin2005b', 'Hou2004', 'Starostin2005b', 'Starostin2005b',
'Greenhill2008', 'Zhivlov2011', 'Kessler2001', 'Hattori1973', 'Dunn2012', 'List2014c',
'Starostin2005', 'Wang2006']
correct_languages = {
"Guixian": "Guiyang",
"Berawan (Long Terawan)": "Berawan_Long_Terawan",
"Merina (Malagasy)": "Merina_Malagasy"
}
correct_concepts = {
"ear 1": "ear",
"i": "I",
"lie 1": "lie",
"light": "watery",
"soja sauce": "soya sauce",
"two pairs": "two ounces",
"walk (go)": "walk(go)",
"warm (hot)": "warm",
"gras": "grass",
"saliva (splits)": "saliva (spit)"
}
def download(dataset, **kw):
download_and_unpack_zipfiles(URL, dataset, *[PATH.joinpath(dset) for dset in DSETS])
def cldf(dataset, concepticon, **kw):
gloss2con = {x['GLOSS']: x['CONCEPTICON_ID'] for x in dataset.concepts}
lang2glot = {x['NAME']: x['GLOTTOCODE'] for x in dataset.languages}
for dset, srckey in zip(DSETS, sources):
wl = lp.Wordlist(dataset.raw.joinpath(dset).as_posix())
if 'tokens' not in wl.header:
wl.add_entries('tokens', 'ipa', lp.ipa2tokens, merge_vowels=False,
expand_nasals=True)
src = getEvoBibAsSource(srckey)
with CldfDataset((
'ID',
'Language_ID',
'Language_name',
'Language_iso',
'Parameter_ID',
'Parameter_name',
'Value',
'Source',
'Segments',
'Cognacy',
'Loan'
)
, dataset, subset=dset.split('.')[0]) as ds:
ds.sources.add(src)
errors = []
cognates = []
for k in wl:
concept = wl[k, 'concept']
if '(V)' in concept:
concept = concept[:-4]
concept = correct_concepts.get(concept, concept)
if concept not in gloss2con:
errors += [concept]
doculect = correct_languages.get(wl[k, 'doculect'], wl[k, 'doculect'])
loan = wl[k, 'cogid'] < 0
cogid = abs(wl[k, 'cogid'])
wid = '{0}-{1}'.format(dset.split('.')[0], k)
ds.add_row([
wid,
lang2glot[doculect],
wl[k, 'doculect'],
'',
gloss2con.get(wl[k, 'concept'], ''),
wl[k, 'concept'],
wl[k, 'ipa'],
srckey,
' '.join(wl[k, 'tokens'] or ['']),
cogid,
wl[k, 'loan']
])
cognates.append([
wid,
ds.name,
wl[k, 'ipa'],
cogid,
'borrowed' if loan else '',
'expert',
srckey,
'',
'',
''
])
dataset.cognates.extend(
iter_alignments(lp.Alignments(wl), cognates, method='library'))
for er in sorted(set(errors)):
print(er, dset)
|
StarcoderdataPython
|
120526
|
<filename>multilineage_organoid/utils.py
""" Utility functions used by multiple modules
* :py:func:`lowpass_filter`: Lowpass filter a signal with the filtfilt function
* :py:func:`calc_frequency_domain`: Convert a time domain signal to frequency
"""
# Imports
from typing import Tuple
# 3rd party
import numpy as np
from scipy.signal import butter, filtfilt, welch
# Our own imports
from .consts import FILTER_ORDER, FILTER_CUTOFF, DEBUG_OPTIMIZER
# Functions
def lowpass_filter(signals: np.ndarray,
sample_rate: float,
order: int = FILTER_ORDER,
cutoff: float = FILTER_CUTOFF):
""" Lowpass filter the data
:param ndarray signals:
A t x k array of k signals with t timepoints
:param float sample_rate:
The sample rate for the signals
:param int order:
The order for the butterworth filter
:param float cutoff:
the cutoff in Hz for the filter
"""
nyq = 0.5 * sample_rate
normal_cutoff = cutoff / nyq
if normal_cutoff <= 0.0 or normal_cutoff >= 1.0:
if DEBUG_OPTIMIZER:
print(f'Cannot filter, got -3dB: {normal_cutoff}')
print(f'Nyquist rate: {nyq}')
print(f'Sample rate (Hz): {sample_rate}')
return signals
b, a = butter(order, normal_cutoff, btype='low', analog=False)
filtered_signals = []
for i in range(signals.shape[1]):
signal = signals[:, i]
sigmask = ~np.isnan(signal)
yf = filtfilt(b, a, signal[sigmask])
yfinal = np.full_like(signal, np.nan)
yfinal[sigmask] = yf
filtered_signals.append(yfinal)
return np.stack(filtered_signals, axis=1)
def calc_frequency_domain(time: np.ndarray,
signal: np.ndarray) -> Tuple[np.ndarray]:
""" Calculate the frequency domain data
:param ndarray time:
The time array in seconds
:param ndarray signal:
The signal intensity
:returns:
The frequency array, the power at each frequency
"""
dt = time[1] - time[0]
sample_rate = 1.0 / dt
xf, yf = welch(signal, fs=sample_rate) # Welch's power estimate method
return xf, 10.0 * np.log10(yf)
|
StarcoderdataPython
|
110391
|
<reponame>saper0/scikit-hubness
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: BSD-3-Clause
from sklearn.neighbors import NearestCentroid
__all__ = ['NearestCentroid']
|
StarcoderdataPython
|
96158
|
<gh_stars>0
GITHUB_PULLS_PROVIDER_ID = 'github_pulls'
|
StarcoderdataPython
|
4824720
|
<gh_stars>0
# -*- coding: utf-8 -*-
from abc import ABC, abstractmethod
from typing import Optional
from pip_services3_commons.data import FilterParams, PagingParams, DataPage
from pip_service_data_python.data.EntityV1 import EntityV1
class IEntitiesPersistence(ABC):
@abstractmethod
def __init__(self):
pass
def get_page_by_filter(self, correlation_id: Optional[str], filter_params: FilterParams, paging: PagingParams) -> DataPage:
pass
def get_one_by_id(self, correlation_id: Optional[str], entity_id: str) -> EntityV1:
pass
def get_one_by_name(self, correlation_id: Optional[str], entity_name: str) -> EntityV1:
pass
def create(self, correlation_id: Optional[str], entity: EntityV1) -> EntityV1:
pass
def update(self, correlation_id: Optional[str], entity: EntityV1) -> EntityV1:
pass
def delete_by_id(self, correlation_id: Optional[str], entity_id: str) -> EntityV1:
pass
|
StarcoderdataPython
|
36766
|
<filename>qiling/qiling/os/windows/dlls/kernel32/fileapi.py
#!/usr/bin/env python3
#
# Cross Platform and Multi Architecture Advanced Binary Emulation Framework
#
import struct, time, os
from shutil import copyfile
from datetime import datetime
from qiling.exception import *
from qiling.os.windows.const import *
from qiling.os.const import *
from qiling.os.windows.fncc import *
from qiling.os.windows.utils import *
from qiling.os.windows.thread import *
from qiling.os.windows.handle import *
from qiling.exception import *
from qiling.os.windows.structs import *
dllname = 'kernel32_dll'
# DWORD GetFileType(
# HANDLE hFile
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_GetFileType(ql, address, params):
hFile = params["hFile"]
if hFile == STD_INPUT_HANDLE or hFile == STD_OUTPUT_HANDLE or hFile == STD_ERROR_HANDLE:
ret = FILE_TYPE_CHAR
else:
obj = ql.os.handle_manager.get(hFile)
if obj is None:
raise QlErrorNotImplemented("API not implemented")
else:
# technically is not always a type_char but.. almost
ret = FILE_TYPE_CHAR
return ret
# HANDLE FindFirstFileA(
# LPCSTR lpFileName,
# LPWIN32_FIND_DATAA lpFindFileData
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params_type={'LPCSTR': 'POINTER'})
def hook_FindFirstFileA(ql, address, params):
filename = params['lpFileName']
pointer = params['lpFindFileData']
if filename == None:
return INVALID_HANDLE_VALUE
elif len(filename) >= MAX_PATH:
return ERROR_INVALID_PARAMETER
target_dir = os.path.join(ql.rootfs, filename.replace("\\", os.sep))
print('TARGET_DIR = %s' % target_dir)
real_path = ql.os.path.transform_to_real_path(filename)
# Verify the directory is in ql.rootfs to ensure no path traversal has taken place
if not os.path.exists(real_path):
ql.os.last_error = ERROR_FILE_NOT_FOUND
return INVALID_HANDLE_VALUE
# Check if path exists
filesize = 0
try:
f = ql.os.fs_mapper.open(real_path, mode="r")
filesize = os.path.getsize(real_path).to_bytes(8, byteorder="little")
except FileNotFoundError:
ql.os.last_error = ERROR_FILE_NOT_FOUND
return INVALID_HANDLE_VALUE
# Get size of the file
file_size_low = (int.from_bytes(filesize, "little")) & 0xffffff
file_size_high = (int.from_bytes(filesize, "little") >> 32)
# Create a handle for the path
new_handle = Handle(obj=f)
ql.os.handle_manager.append(new_handle)
# Spoof filetime values
filetime = datetime.now().microsecond.to_bytes(8, byteorder="little")
find_data = Win32FindData(
ql,
FILE_ATTRIBUTE_NORMAL,
filetime, filetime, filetime,
file_size_high, file_size_low,
0, 0,
filename,
0, 0, 0, 0,)
find_data.write(pointer)
ret = new_handle.id
return ret
# HANDLE FindFirstFileExA(
# LPCSTR lpFileName,
# FINDEX_INFO_LEVELS fInfoLevelId,
# FINDEX_SEARCH_OPS fSearchOp,
# LPVOID lpSearchFilter,
# DWORD dwAdditionalFlags
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params_type={'LPCSTR': 'POINTER'})
def hook_FindFirstFileExA(ql, address, params):
pass
# HANDLE FindNextFileA(
# LPCSTR lpFileName,
# LPWIN32_FIND_DATAA lpFindFileData
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params_type={'LPCSTR': 'POINTER'})
def hook_FindNextFileA(ql, address, params):
pass
# BOOL FindClose(
# HANDLE hFindFile
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_FindClose(ql, address, params):
pass
# BOOL ReadFile(
# HANDLE hFile,
# LPVOID lpBuffer,
# DWORD nNumberOfBytesToRead,
# LPDWORD lpNumberOfBytesRead,
# LPOVERLAPPED lpOverlapped
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_ReadFile(ql, address, params):
ret = 1
hFile = params["hFile"]
lpBuffer = params["lpBuffer"]
nNumberOfBytesToRead = params["nNumberOfBytesToRead"]
lpNumberOfBytesRead = params["lpNumberOfBytesRead"]
lpOverlapped = params["lpOverlapped"]
if hFile == STD_INPUT_HANDLE:
if ql.os.automatize_input:
# TODO maybe insert a good random generation input
s = (b"A" * (nNumberOfBytesToRead - 1)) + b"\x00"
else:
ql.log.debug("Insert input")
s = ql.os.stdin.read(nNumberOfBytesToRead)
slen = len(s)
read_len = slen
if slen > nNumberOfBytesToRead:
s = s[:nNumberOfBytesToRead]
read_len = nNumberOfBytesToRead
ql.mem.write(lpBuffer, s)
ql.mem.write(lpNumberOfBytesRead, ql.pack(read_len))
else:
f = ql.os.handle_manager.get(hFile).obj
data = f.read(nNumberOfBytesToRead)
ql.mem.write(lpBuffer, data)
ql.mem.write(lpNumberOfBytesRead, ql.pack32(lpNumberOfBytesRead))
return ret
# BOOL WriteFile(
# HANDLE hFile,
# LPCVOID lpBuffer,
# DWORD nNumberOfBytesToWrite,
# LPDWORD lpNumberOfBytesWritten,
# LPOVERLAPPED lpOverlapped
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params={
"hFile": HANDLE,
"lpBuffer": POINTER,
"nNumberOfBytesToWrite": DWORD,
"lpNumberOfBytesWritten": POINTER,
"lpOverlapped": POINTER
})
def hook_WriteFile(ql, address, params):
ret = 1
hFile = params["hFile"]
lpBuffer = params["lpBuffer"]
nNumberOfBytesToWrite = params["nNumberOfBytesToWrite"]
lpNumberOfBytesWritten = params["lpNumberOfBytesWritten"]
lpOverlapped = params["lpOverlapped"]
if hFile == STD_OUTPUT_HANDLE:
s = ql.mem.read(lpBuffer, nNumberOfBytesToWrite)
ql.os.stdout.write(s)
ql.os.utils.string_appearance(s.decode())
ql.mem.write(lpNumberOfBytesWritten, ql.pack(nNumberOfBytesToWrite))
else:
f = ql.os.handle_manager.get(hFile)
if f is None:
# Invalid handle
ql.os.last_error = ERROR_INVALID_HANDLE
return 0
else:
f = f.obj
buffer = ql.mem.read(lpBuffer, nNumberOfBytesToWrite)
f.write(bytes(buffer))
ql.mem.write(lpNumberOfBytesWritten, ql.pack32(nNumberOfBytesToWrite))
return ret
def _CreateFile(ql, address, params, name):
ret = INVALID_HANDLE_VALUE
s_lpFileName = params["lpFileName"]
dwDesiredAccess = params["dwDesiredAccess"]
dwShareMode = params["dwShareMode"]
lpSecurityAttributes = params["lpSecurityAttributes"]
dwCreationDisposition = params["dwCreationDisposition"]
dwFlagsAndAttributes = params["dwFlagsAndAttributes"]
hTemplateFile = params["hTemplateFile"]
# access mask DesiredAccess
mode = ""
if dwDesiredAccess & GENERIC_WRITE:
mode += "wb"
else:
mode += "r"
try:
f = ql.os.fs_mapper.open(s_lpFileName, mode)
except FileNotFoundError:
ql.os.last_error = ERROR_FILE_NOT_FOUND
return INVALID_HANDLE_VALUE
new_handle = Handle(obj=f)
ql.os.handle_manager.append(new_handle)
ret = new_handle.id
return ret
# HANDLE CreateFileA(
# LPCSTR lpFileName,
# DWORD dwDesiredAccess,
# DWORD dwShareMode,
# LPSECURITY_ATTRIBUTES lpSecurityAttributes,
# DWORD dwCreationDisposition,
# DWORD dwFlagsAndAttributes,
# HANDLE hTemplateFile
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params={
"lpFileName": STRING,
"dwDesiredAccess": DWORD,
"dwShareMode": DWORD,
"lpSecurityAttributes": POINTER,
"dwCreationDisposition": DWORD,
"dwFlagsAndAttributes": DWORD,
"hTemplateFile": HANDLE
})
def hook_CreateFileA(ql, address, params):
ret = _CreateFile(ql, address, params, "CreateFileA")
return ret
# HANDLE CreateFileW(
# LPCWSTR lpFileName,
# DWORD dwDesiredAccess,
# DWORD dwShareMode,
# LPSECURITY_ATTRIBUTES lpSecurityAttributes,
# DWORD dwCreationDisposition,
# DWORD dwFlagsAndAttributes,
# HANDLE hTemplateFile
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_CreateFileW(ql, address, params):
ret = _CreateFile(ql, address, params, "CreateFileW")
return ret
# DWORD GetTempPathW(
# DWORD nBufferLength,
# LPWSTR lpBuffer
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_GetTempPathW(ql, address, params):
temp = (ql.os.windir + "Temp" + "\\\x00").encode('utf-16le')
dest = params["lpBuffer"]
temp_path = os.path.join(ql.rootfs, "Windows", "Temp")
if not os.path.exists(temp_path):
os.makedirs(temp_path, 0o755)
ql.mem.write(dest, temp)
return len(temp)
# DWORD GetTempPathA(
# DWORD nBufferLength,
# LPSTR lpBuffer
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params={
"nBufferLength": DWORD,
"lpBuffer": POINTER
})
def hook_GetTempPathA(ql, address, params):
temp = (ql.os.windir + "Temp" + "\\\x00").encode('utf-8')
dest = params["lpBuffer"]
temp_path = os.path.join(ql.rootfs, "Windows", "Temp")
if not os.path.exists(temp_path):
os.makedirs(temp_path, 0o755)
ql.mem.write(dest, temp)
return len(temp)
# DWORD GetShortPathNameW(
# LPCWSTR lpszLongPath,
# LPWSTR lpszShortPath,
# DWORD cchBuffer
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_GetShortPathNameW(ql, address, params):
paths = params["lpszLongPath"].split("\\")
dst = params["lpszShortPath"]
max_size = params["cchBuffer"]
res = paths[0]
for path in paths[1:]:
nameAndExt = path.split(".")
name = nameAndExt[0]
ext = "" if len(nameAndExt) == 1 else "." + nameAndExt[1]
if len(name) > 8:
name = name[:6] + "~1"
res += "\\" + name + ext
res += "\x00"
res = res.encode("utf-16le")
if max_size < len(res):
return len(res)
else:
ql.mem.write(dst, res)
return len(res) - 1
# BOOL GetVolumeInformationW(
# LPCWSTR lpRootPathName,
# LPWSTR lpVolumeNameBuffer,
# DWORD nVolumeNameSize,
# LPDWORD lpVolumeSerialNumber,
# LPDWORD lpMaximumComponentLength,
# LPDWORD lpFileSystemFlags,
# LPWSTR lpFileSystemNameBuffer,
# DWORD nFileSystemNameSize
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_GetVolumeInformationW(ql, address, params):
root = params["lpRootPathName"]
if root != 0:
pt_volume_name = params["lpVolumeNameBuffer"]
if pt_volume_name != 0:
# TODO implement
volume_name = (ql.os.profile["VOLUME"]["name"] + "\x00").encode("utf-16le")
ql.mem.write(pt_volume_name, volume_name)
lpMaximumComponentLength = params["lpMaximumComponentLength"]
if lpMaximumComponentLength != 0:
ql.mem.write(lpMaximumComponentLength, (255).to_bytes(2, byteorder="little"))
pt_serial_number = params["lpVolumeSerialNumber"]
if pt_serial_number != 0:
# TODO maybe has to be int
serial_number = (ql.os.profile["VOLUME"]["serial_number"] + "\x00").encode("utf-16le")
ql.mem.write(pt_serial_number, serial_number)
pt_system_type = params["lpFileSystemNameBuffer"]
pt_flag = params["lpFileSystemFlags"]
if pt_flag != 0:
# TODO implement
flag = 0x00020000.to_bytes(4, byteorder="little")
ql.mem.write(pt_flag, flag)
if pt_system_type != 0:
system_type = (ql.os.profile["VOLUME"]["type"] + "\x00").encode("utf-16le")
ql.mem.write(pt_system_type, system_type)
else:
raise QlErrorNotImplemented("API not implemented")
return 1
# UINT GetDriveTypeW(
# LPCWSTR lpRootPathName
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params_type={'LPCWSTR': 'POINTER'})
def hook_GetDriveTypeW(ql, address, params):
path = params["lpRootPathName"]
if path != 0:
if path == ql.os.profile["PATH"]["systemdrive"]:
return DRIVE_FIXED
# TODO add configuration for drives
else:
raise QlErrorNotImplemented("API not implemented")
return DRIVE_NO_ROOT_DIR
# BOOL GetDiskFreeSpaceW(
# LPCWSTR lpRootPathName,
# LPDWORD lpSectorsPerCluster,
# LPDWORD lpBytesPerSector,
# LPDWORD lpNumberOfFreeClusters,
# LPDWORD lpTotalNumberOfClusters
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params_type={'LPCWSTR': 'POINTER'})
def hook_GetDiskFreeSpaceW(ql, address, params):
path = params["lpRootPathName"]
if path == ql.os.profile["PATH"]["systemdrive"]:
pt_sectors = params["lpSectorsPerCluster"]
pt_bytes = params["lpBytesPerSector"]
pt_free_clust = params["lpNumberOfFreeClusters"]
pt_total_clust = params["lpTotalNumberOfClusters"]
sectors = ql.os.profile.getint("VOLUME", "sectors_per_cluster").to_bytes(4, byteorder="little")
bytes = ql.os.profile.getint("VOLUME", "bytes_per_sector").to_bytes(4, byteorder="little")
free_clust = ql.os.profile.getint("VOLUME", "number_of_free_clusters").to_bytes(4, byteorder="little")
total_clust = ql.os.profile.getint("VOLUME", "number_of_clusters").to_bytes(4, byteorder="little")
ql.mem.write(pt_sectors, sectors)
ql.mem.write(pt_bytes, bytes)
ql.mem.write(pt_free_clust, free_clust)
ql.mem.write(pt_total_clust, total_clust)
else:
raise QlErrorNotImplemented("API not implemented")
return 0
# BOOL CreateDirectoryA(
# LPCSTR lpPathName,
# LPSECURITY_ATTRIBUTES lpSecurityAttributes
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_CreateDirectoryA(ql, address, params):
path_name = params["lpPathName"]
target_dir = os.path.join(ql.rootfs, path_name.replace("\\", os.sep))
print('TARGET_DIR = %s' % target_dir)
real_path = ql.os.path.transform_to_real_path(path_name)
# Verify the directory is in ql.rootfs to ensure no path traversal has taken place
if not os.path.exists(real_path):
os.mkdir(real_path)
return 1
else:
ql.os.last_error = ERROR_ALREADY_EXISTS
return 0
# DWORD GetFileSize(
# HANDLE hFile,
# LPDWORD lpFileSizeHigh
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params_type={'LPDWORD': 'DWORD'})
def hook_GetFileSize(ql, address, params):
try:
handle = ql.handle_manager.get(params['hFile'].file)
return os.path.getsize(handle.name)
except:
ql.os.last_error = ERROR_INVALID_HANDLE
return 0xFFFFFFFF #INVALID_FILE_SIZE
# HANDLE CreateFileMappingA(
# HANDLE hFile,
# LPSECURITY_ATTRIBUTES lpFileMappingAttributes,
# DWORD flProtect,
# DWORD dwMaximumSizeHigh,
# DWORD dwMaximumSizeLow,
# LPCSTR lpName
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params={
"hFile": HANDLE,
"lpFileMappingAttributes": POINTER,
"flProtect": DWORD,
"dwMaximumSizeHigh": DWORD,
"dwMaximumSizeLow": DWORD,
"lpName": STRING,
})
def hook_CreateFileMappingA(ql, address, params):
hFile = params['hFile']
lpName = params['lpName']
new_handle = Handle(obj=hFile, name=lpName)
ql.os.handle_manager.append(new_handle)
ret = new_handle.id
return ret
# HANDLE CreateFileMappingW(
# HANDLE hFile,
# LPSECURITY_ATTRIBUTES lpFileMappingAttributes,
# DWORD flProtect,
# DWORD dwMaximumSizeHigh,
# DWORD dwMaximumSizeLow,
# LPCWSTR lpName
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params={
"hFile": HANDLE,
"lpFileMappingAttributes": POINTER,
"flProtect": DWORD,
"dwMaximumSizeHigh": DWORD,
"dwMaximumSizeLow": DWORD,
"lpName": WSTRING,
})
def hook_CreateFileMappingW(ql, address, params):
hFile = params['hFile']
lpName = params['lpName']
new_handle = Handle(obj=hFile, name=lpName)
ql.os.handle_manager.append(new_handle)
ret = new_handle.id
return ret
# LPVOID MapViewOfFile(
# HANDLE hFileMappingObject,
# DWORD dwDesiredAccess,
# DWORD dwFileOffsetHigh,
# DWORD dwFileOffsetLow,
# SIZE_T dwNumberOfBytesToMap
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params={
"hFileMappingObject": HANDLE,
"dwDesiredAccess": DWORD,
"dwFileOffsetHigh": DWORD,
"dwFileOffsetLow": DWORD,
"dwNumberOfBytesToMap": DWORD
})
def hook_MapViewOfFile(ql, address, params):
hFileMappingObject = params['hFileMappingObject']
dwFileOffsetLow = params['dwFileOffsetLow']
dwNumberOfBytesToMap = params['dwNumberOfBytesToMap']
map_file_handle = ql.os.handle_manager.search_by_obj(hFileMappingObject)
if map_file_handle is None:
ret = ql.os.heap.alloc(dwNumberOfBytesToMap)
new_handle = Handle(obj=hFileMappingObject, name=ret)
ql.os.handle_manager.append(new_handle)
else:
ret = map_file_handle.name
hFile = ql.os.handle_manager.get(hFileMappingObject).obj
if ql.os.handle_manager.get(hFile):
f = ql.os.handle_manager.get(hFile).obj
if type(f) is file:
f.seek(dwFileOffsetLow, 0)
data = f.read(dwNumberOfBytesToMap)
ql.mem.write(ret, data)
return ret
# BOOL UnmapViewOfFile(
# LPCVOID lpBaseAddress
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params={
"lpBaseAddress": POINTER
})
def hook_UnmapViewOfFile(ql, address, params):
lpBaseAddress = params['lpBaseAddress']
map_file_hande = ql.os.handle_manager.search(lpBaseAddress)
if not map_file_hande:
return 0
ql.os.heap.free(map_file_hande.name)
ql.os.handle_manager.delete(map_file_hande.id)
return 1
# BOOL CopyFileA(
# LPCSTR lpExistingFileName,
# LPCSTR lpNewFileName,
# BOOL bFailIfExists
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params={
"lpExistingFileName": STRING,
"lpNewFileName": STRING,
"bFailIfExists": DWORD
})
def hook_CopyFileA(ql, address, params):
lpExistingFileName = ql.os.path.transform_to_real_path(params["lpExistingFileName"])
lpNewFileName = ql.os.path.transform_to_real_path(params["lpNewFileName"])
bFailIfExists = params["bFailIfExists"]
if bFailIfExists and os.path.exists(lpNewFileName):
return 0
copyfile(lpExistingFileName, lpNewFileName)
return 1
# BOOL SetFileAttributesA(
# LPCSTR lpFileName,
# DWORD dwFileAttributes
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params={
"lpFileName": STRING,
"dwFileAttributes": DWORD
})
def hook_SetFileAttributesA(ql, address, params):
return 1
|
StarcoderdataPython
|
1604797
|
<filename>scripts/slave/recipe_modules/syzygy/chromium_config.py
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import DEPS
CONFIG_CTX = DEPS['chromium'].CONFIG_CTX
from recipe_engine.config_types import Path
SYZYGY_SLN = Path('[CHECKOUT]', 'syzygy', 'syzygy.sln')
# The common bits of configuration shared across all valid Syzygy
# configurations. This is included in _syzygy_msvs and _syzygy_ninja,
# exactly one of which is included in each valid usable configuration.
@CONFIG_CTX(includes=['msvs2013'])
def _syzygy_base(c):
c.project_generator.tool = 'gyp'
# We don't use a component build, so remove the GYP define.
c.gyp_env.GYP_DEFINES.pop('component', None)
@CONFIG_CTX(includes=['msvs', '_syzygy_base'])
def _syzygy_msvs(dummy_c):
pass
@CONFIG_CTX(includes=['ninja', '_syzygy_base'])
def _syzygy_ninja(c):
# Generate MSVS projects as well for ease of debugging on the bot.
c.gyp_env.GYP_GENERATORS.add('ninja')
c.gyp_env.GYP_GENERATORS.add('msvs-ninja')
# Inject a Ninja no-op build confirmation step.
c.compile_py.ninja_confirm_noop = True
# The common bits of configuration shared by continuous builder
# configurations: syzygy, syzygy_msvs.
@CONFIG_CTX()
def _syzygy_continuous(c):
assert 'official_build' not in c.gyp_env.GYP_DEFINES
c.compile_py.default_targets.clear()
c.compile_py.default_targets.add('build_all')
# Configuration to be used by continuous builders: Debug, Release and Coverage.
@CONFIG_CTX(includes=['_syzygy_ninja', '_syzygy_continuous'])
def syzygy(dummy_c):
pass
# Configuration to be used by continuous builders: Debug, Release and Coverage.
# Currently this is only used by the Debug builder to ensure at least one bot
# continues to build with MSVS.
@CONFIG_CTX(includes=['_syzygy_msvs', '_syzygy_continuous'])
def syzygy_msvs(c):
c.compile_py.solution = SYZYGY_SLN
@CONFIG_CTX(includes=['_syzygy_ninja'],
config_vars={'BUILD_CONFIG': 'Release'})
def syzygy_official(c):
c.compile_py.clobber = True
c.compile_py.default_targets.clear()
c.compile_py.default_targets.add('official_build')
c.gyp_env.GYP_DEFINES['official_build'] = 1
@CONFIG_CTX(includes=['_syzygy_ninja'],
config_vars={'BUILD_CONFIG': 'Release'})
def kasko_official(c):
c.compile_py.clobber = True
c.compile_py.default_targets.clear()
c.compile_py.default_targets.add('official_kasko_build')
c.gyp_env.GYP_DEFINES['official_build'] = 1
|
StarcoderdataPython
|
1728102
|
<reponame>BenSchZA/aquarius
# Copyright 2018 Ocean Protocol Foundation
# SPDX-License-Identifier: Apache-2.0
import copy
import json
import pytest
from aquarius.constants import BaseURLs
from aquarius.run import app
app = app
@pytest.fixture
def base_ddo_url():
return BaseURLs.BASE_AQUARIUS_URL + '/assets/ddo'
@pytest.fixture
def client_with_no_data():
client = app.test_client()
client.delete(BaseURLs.BASE_AQUARIUS_URL + '/assets/ddo')
yield client
@pytest.fixture
def client():
client = app.test_client()
client.delete(BaseURLs.BASE_AQUARIUS_URL + '/assets/ddo')
post = client.post(BaseURLs.BASE_AQUARIUS_URL + '/assets/ddo',
data=json.dumps(json_update),
content_type='application/json')
if post.status_code not in (200, 201):
raise AssertionError(f'register asset failed: {post}')
post2 = client.post(BaseURLs.BASE_AQUARIUS_URL + '/assets/ddo',
data=json.dumps(json_dict),
content_type='application/json')
yield client
client.delete(
BaseURLs.BASE_AQUARIUS_URL + '/assets/ddo/%s' % json.loads(post.data.decode('utf-8'))['id'])
client.delete(
BaseURLs.BASE_AQUARIUS_URL + '/assets/ddo/%s' % json.loads(post2.data.decode('utf-8'))[
'id'])
json_dict = {
"@context": "https://w3id.org/did/v1",
"id": "did:op:0c184915b07b44c888d468be85a9b28253e80070e5294b1aaed81c2f0264e430",
"created": "2019-05-22T08:44:27Z",
"publicKey": [
{
"id": "did:op:<KEY>",
"type": "EthereumECDSAKey",
"owner": "0x00Bd138aBD70e2F00903268F3Db08f2D25677C9e"
}
],
"authentication": [
{
"type": "RsaSignatureAuthentication2018",
"publicKey": "did:op:0c184915b07b44c888d468be85a9b28253e80070e5294b1aaed81c2f0264e430"
}
],
"service": [
{
"type": "authorization",
"serviceEndpoint": "http://localhost:12001",
"service": "SecretStore",
"index": 0
},
{
"type": "access",
"serviceEndpoint": "http://localhost:8030/api/v1/brizo/services/consume",
"purchaseEndpoint": "http://localhost:8030/api/v1/brizo/services/access/initialize",
"index": 1,
"templateId": "0x208aca4B0316C9996F085cbD57E01c11Bc0E7cb1",
"name": "dataAssetAccessServiceAgreement",
"creator": "",
"serviceAgreementTemplate": {
"contractName": "EscrowAccessSecretStoreTemplate",
"events": [
{
"name": "AgreementCreated",
"actorType": "consumer",
"handler": {
"moduleName": "escrowAccessSecretStoreTemplate",
"functionName": "fulfillLockRewardCondition",
"version": "0.1"
}
}
],
"fulfillmentOrder": [
"lockReward.fulfill",
"accessSecretStore.fulfill",
"escrowReward.fulfill"
],
"conditionDependency": {
"lockReward": [],
"accessSecretStore": [],
"escrowReward": [
"lockReward",
"accessSecretStore"
]
},
"conditions": [
{
"name": "lockReward",
"timelock": 0,
"timeout": 0,
"contractName": "LockRewardCondition",
"functionName": "fulfill",
"events": [
{
"name": "Fulfilled",
"actorType": "publisher",
"handler": {
"moduleName": "lockRewardCondition",
"functionName": "fulfillAccessSecretStoreCondition",
"version": "0.1"
}
}
],
"parameters": [
{
"name": "_rewardAddress",
"type": "address",
"value": "0x2AaC920AA4D10b80db9ed0E4EC04A3ff612F2bc6"
},
{
"name": "_amount",
"type": "uint256",
"value": "888000000000000000000000000000000"
}
]
},
{
"name": "accessSecretStore",
"timelock": 0,
"timeout": 0,
"contractName": "AccessSecretStoreCondition",
"functionName": "fulfill",
"events": [
{
"name": "Fulfilled",
"actorType": "publisher",
"handler": {
"moduleName": "accessSecretStore",
"functionName": "fulfillEscrowRewardCondition",
"version": "0.1"
}
},
{
"name": "TimedOut",
"actorType": "consumer",
"handler": {
"moduleName": "accessSecretStore",
"functionName": "fulfillEscrowRewardCondition",
"version": "0.1"
}
}
],
"parameters": [
{
"name": "_documentId",
"type": "bytes32",
"value": "0c184915b07b44c888d468be85a9b28253e80070e5294b1aaed81c2f0264e430"
},
{
"name": "_grantee",
"type": "address",
"value": ""
}
]
},
{
"name": "escrowReward",
"timelock": 0,
"timeout": 0,
"contractName": "EscrowReward",
"functionName": "fulfill",
"events": [
{
"name": "Fulfilled",
"actorType": "publisher",
"handler": {
"moduleName": "escrowRewardCondition",
"functionName": "verifyRewardTokens",
"version": "0.1"
}
}
],
"parameters": [
{
"name": "_amount",
"type": "uint256",
"value": "888000000000000000000000000000000"
},
{
"name": "_receiver",
"type": "address",
"value": ""
},
{
"name": "_sender",
"type": "address",
"value": ""
},
{
"name": "_lockCondition",
"type": "bytes32",
"value": ""
},
{
"name": "_releaseCondition",
"type": "bytes32",
"value": ""
}
]
}
]
}
},
{
"type": "metadata",
"serviceEndpoint": "http://localhost:5000/api/v1/aquarius/assets/ddo/did:op:0c184915b07b44c888d468be85a9b28253e80070e5294b1aaed81c2f0264e430",
"attributes": {
"main": {
"name": "Ocean protocol white paper",
"type": "dataset",
"dateCreated": "2012-10-10T17:00:00Z",
"datePublished": "2012-10-10T17:00:00Z",
"author": "Ocean Protocol Foundation Ltd.",
"license": "CC-BY",
"price": "888000000000000000000000000000000",
"files": [
{
"checksum": "efb2c764274b745f5fc37f97c6b0e761",
"contentType": "text/csv",
"checksumType": "MD5",
"contentLength": "4535431",
"resourceId": "access-log2018-02-13-15-17-29-18386C502CAEA932",
"index": 0
},
{
"checksum": "efb2c764274b745f5fc37f97c6b0e761",
"contentType": "text/csv",
"contentLength": "4535431",
"resourceId": "access-log2018-02-13-15-17-29-18386C502CAEA932",
"index": 1
},
{
"index": 2,
"contentType": "text/csv",
}
]
},
"encryptedFiles": "<tests.resources.mocks.secret_store_mock.SecretStoreMock object at 0x7f8146a94710>.0c184915b07b44c888d468be85a9b28253e80070e5294b1aaed81c2f0264e430!![{\"url\": \"https://testocnfiles.blob.core.windows.net/testfiles/testzkp.pdf\", \"checksum\": \"efb2c764274b745f5fc37f97c6b0e761\", \"checksumType\": \"MD5\", \"contentLength\": \"4535431\", \"resourceId\": \"access-log2018-02-13-15-17-29-18386C502CAEA932\"}, {\"url\": \"s3://ocean-test-osmosis-data-plugin-dataseeding-1537375953/data.txt\", \"checksum\": \"efb2c764274b745f5fc37f97c6b0e761\", \"contentLength\": \"4535431\", \"resourceId\": \"access-log2018-02-13-15-17-29-18386C502CAEA932\"}, {\"url\": \"http://ipv4.download.thinkbroadband.com/5MB.zip\"}]!!0",
"curation": {
"rating": 0.93,
"numVotes": 123,
"schema": "Binary Voting"
},
"additionalInformation": {
"description": "Introduce the main concepts and vision behind ocean protocol",
"copyrightHolder": "Ocean Protocol Foundation Ltd.",
"workExample": "Text PDF",
"inLanguage": "en",
"categories": [
"white-papers"
],
"tags": ["data exchange", "sharing", "curation", "bonding curve"],
"links": [
{
"url": "http://data.ceda.ac.uk/badc/ukcp09/data/gridded-land-obs/gridded-land-obs"
"-daily/"
},
{
"url": "http://data.ceda.ac.uk/badc/ukcp09/data/gridded-land-obs/gridded-land-obs"
"-averages-25km/"
},
{
"url": "http://data.ceda.ac.uk/badc/ukcp09/"
}
],
"updateFrequency": "yearly",
"structuredMarkup": [
{
"uri": "http://skos.um.es/unescothes/C01194/jsonld",
"mediaType": "application/ld+json"
},
{
"uri": "http://skos.um.es/unescothes/C01194/turtle",
"mediaType": "text/turtle"
}
]
}
},
"index": 2
}
],
"proof": {
"type": "DDOIntegritySignature",
"created": "2019-05-22T08:44:27Z",
"creator": "0x00Bd138aBD70e2F00903268F3Db08f2D25677C9e",
"signatureValue": "0xbd7b46b3ac664167bc70ac211b1a1da0baed9ead91613a5f02dfc25c1bb6e3ff40861b455017e8a587fd4e37b703436072598c3a81ec88be28bfe33b61554a471b"
}
}
json_dict2 = {
"@context": "https://w3id.org/did/v1",
"id": "did:op:0c184915b07b44c888d468be85a9b28253e80070e5294b1aaed81c2f0264e430",
"created": "2019-05-22T08:44:27Z",
"publicKey": [
{
"id": "did:op:0c184915b07b44c888d468be85a9b28253e80070e5294b1aaed81c2f0264e430",
"type": "EthereumECDSAKey",
"owner": "0x00Bd138aBD70e2F00903268F3Db08f2D25677C9e"
}
],
"authentication": [
{
"type": "RsaSignatureAuthentication2018",
"publicKey": "did:op:0c184915b07b44c888d468be85a9b28253e80070e5294b1aaed81c2f0264e430"
}
],
"service": [
{
"type": "authorization",
"serviceEndpoint": "http://localhost:12001",
"service": "SecretStore",
"index": 0
},
{
"type": "access",
"serviceEndpoint": "http://localhost:8030/api/v1/brizo/services/consume",
"purchaseEndpoint": "http://localhost:8030/api/v1/brizo/services/access/initialize",
"index": 1,
"templateId": "0x208aca4B0316C9996F085cbD57E01c11Bc0E7cb1",
"name": "dataAssetAccessServiceAgreement",
"creator": "",
"serviceAgreementTemplate": {
"contractName": "EscrowAccessSecretStoreTemplate",
"events": [
{
"name": "AgreementCreated",
"actorType": "consumer",
"handler": {
"moduleName": "escrowAccessSecretStoreTemplate",
"functionName": "fulfillLockRewardCondition",
"version": "0.1"
}
}
],
"fulfillmentOrder": [
"lockReward.fulfill",
"accessSecretStore.fulfill",
"escrowReward.fulfill"
],
"conditionDependency": {
"lockReward": [],
"accessSecretStore": [],
"escrowReward": [
"lockReward",
"accessSecretStore"
]
},
"conditions": [
{
"name": "lockReward",
"timelock": 0,
"timeout": 0,
"contractName": "LockRewardCondition",
"functionName": "fulfill",
"events": [
{
"name": "Fulfilled",
"actorType": "publisher",
"handler": {
"moduleName": "lockRewardCondition",
"functionName": "fulfillAccessSecretStoreCondition",
"version": "0.1"
}
}
],
"parameters": [
{
"name": "_rewardAddress",
"type": "address",
"value": "0x2AaC920AA4D10b80db9ed0E4EC04A3ff612F2bc6"
},
{
"name": "_amount",
"type": "uint256",
"value": "888000000000000000000000000000000"
}
]
},
{
"name": "accessSecretStore",
"timelock": 0,
"timeout": 0,
"contractName": "AccessSecretStoreCondition",
"functionName": "fulfill",
"events": [
{
"name": "Fulfilled",
"actorType": "publisher",
"handler": {
"moduleName": "accessSecretStore",
"functionName": "fulfillEscrowRewardCondition",
"version": "0.1"
}
},
{
"name": "TimedOut",
"actorType": "consumer",
"handler": {
"moduleName": "accessSecretStore",
"functionName": "fulfillEscrowRewardCondition",
"version": "0.1"
}
}
],
"parameters": [
{
"name": "_documentId",
"type": "bytes32",
"value": "0c184915b07b44c888d468be85a9b28253e80070e5294b1aaed81c2f0264e430"
},
{
"name": "_grantee",
"type": "address",
"value": ""
}
]
},
{
"name": "escrowReward",
"timelock": 0,
"timeout": 0,
"contractName": "EscrowReward",
"functionName": "fulfill",
"events": [
{
"name": "Fulfilled",
"actorType": "publisher",
"handler": {
"moduleName": "escrowRewardCondition",
"functionName": "verifyRewardTokens",
"version": "0.1"
}
}
],
"parameters": [
{
"name": "_amount",
"type": "uint256",
"value": "888000000000000000000000000000000"
},
{
"name": "_receiver",
"type": "address",
"value": ""
},
{
"name": "_sender",
"type": "address",
"value": ""
},
{
"name": "_lockCondition",
"type": "bytes32",
"value": ""
},
{
"name": "_releaseCondition",
"type": "bytes32",
"value": ""
}
]
}
]
}
},
{
"type": "metadata",
"serviceEndpoint": "http://localhost:5000/api/v1/aquarius/assets/ddo/did:op:0c184915b07b44c888d468be85a9b28253e80070e5294b1aaed81c2f0264e430",
"attributes": {
"main": {
"name": "Ocean protocol white paper",
"type": "dataset",
"dateCreated": "2012-10-10T17:00:00Z",
"datePublished": "2012-10-10T17:00:00Z",
"author": "Ocean Protocol Foundation Ltd.",
"license": "CC-BY",
"price": "888000000000000000000000000000000",
"files": [
{
"checksum": "efb2c764274b745f5fc37f97c6b0e761",
"contentType": "text/csv",
"checksumType": "MD5",
"contentLength": "4535431",
"resourceId": "access-log2018-02-13-15-17-29-18386C502CAEA932",
"index": 0
},
{
"checksum": "efb2c764274b745f5fc37f97c6b0e761",
"contentType": "text/csv",
"contentLength": "4535431",
"resourceId": "access-log2018-02-13-15-17-29-18386C502CAEA932",
"index": 1
},
{
"index": 2,
"contentType": "text/csv",
}
],
},
"encryptedFiles": "<tests.resources.mocks.secret_store_mock.SecretStoreMock object at 0x7f8146a94710>.0c184915b07b44c888d468be85a9b28253e80070e5294b1aaed81c2f0264e430!![{\"url\": \"https://testocnfiles.blob.core.windows.net/testfiles/testzkp.pdf\", \"checksum\": \"efb2c764274b745f5fc37f97c6b0e761\", \"checksumType\": \"MD5\", \"contentLength\": \"4535431\", \"resourceId\": \"access-log2018-02-13-15-17-29-18386C502CAEA932\"}, {\"url\": \"s3://ocean-test-osmosis-data-plugin-dataseeding-1537375953/data.txt\", \"checksum\": \"efb2c764274b745f5fc37f97c6b0e761\", \"contentLength\": \"4535431\", \"resourceId\": \"access-log2018-02-13-15-17-29-18386C502CAEA932\"}, {\"url\": \"http://ipv4.download.thinkbroadband.com/5MB.zip\"}]!!0",
"curation": {
"rating": 0.93,
"numVotes": 123,
"schema": "Binary Voting",
"isListed": False
},
"additionalInformation": {
"description": "Introduce the main concepts and vision behind ocean protocol",
"copyrightHolder": "Ocean Protocol Foundation Ltd.",
"workExample": "Text PDF",
"inLanguage": "en",
"categories": [
"white-papers"
],
"tags": ["data exchange", "sharing", "curation", "bonding curve"],
"links": [
{
"url": "http://data.ceda.ac.uk/badc/ukcp09/data/gridded-land-obs/gridded-land-obs"
"-daily/"
},
{
"url": "http://data.ceda.ac.uk/badc/ukcp09/data/gridded-land-obs/gridded-land-obs"
"-averages-25km/"
},
{
"url": "http://data.ceda.ac.uk/badc/ukcp09/"
}
],
"updateFrequency": "yearly",
"structuredMarkup": [
{
"uri": "http://skos.um.es/unescothes/C01194/jsonld",
"mediaType": "application/ld+json"
},
{
"uri": "http://skos.um.es/unescothes/C01194/turtle",
"mediaType": "text/turtle"
}
]
}
},
"index": 2
}
],
"proof": {
"type": "DDOIntegritySignature",
"created": "2019-05-22T08:44:27Z",
"creator": "0x00Bd138aBD70e2F00903268F3Db08f2D25677C9e",
"signatureValue": "0xbd7b46b3ac664167bc70ac211b1a1da0baed9ead91613a5f02dfc25c1bb6e3ff40861b455017e8a587fd4e37b703436072598c3a81ec88be28bfe33b61554a471b"
}
}
json_dict_no_metadata = {"publisherId": "0x2"}
json_dict_no_valid_metadata = {"publisherId": "0x4",
"main": {},
"assetId": "002"
}
json_before = {
"@context": "https://w3id.org/future-method/v1",
"created": "2016-02-08T16:02:20Z",
"id": "did:op:112233445566778899",
"publicKey": [
{
"id": "did:op:123456789abcdefghi#keys-1",
"type": "RsaVerificationKey2018",
"owner": "did:op:123456789abcdefghi",
"publicKeyPem": "-----BEGIN PUBLIC KEY...END PUBLIC KEY-----\r\n"
},
{
"id": "did:op:123456789abcdefghi#keys-2",
"type": "Ed25519VerificationKey2018",
"owner": "did:op:123456789abcdefghi",
"publicKeyBase58": "<KEY>"
}
],
"authentication": [
{
"type": "RsaSignatureAuthentication2018",
"publicKey": "did:op:123456789abcdefghi#keys-1"
},
{
"type": "ieee2410Authentication2018",
"publicKey": "did:op:123456789abcdefghi#keys-2"
}
],
"proof": {
"type": "UUIDSignature",
"created": "2016-02-08T16:02:20Z",
"creator": "did:example:8uQhQMGzWxR8vw5P3UWH1ja",
"signatureValue": "QNB13Y7Q9...1tzjn4w=="
},
"service": [
{
"type": "Consume",
"index": 0,
"serviceEndpoint": "http://mybrizo.org/api/v1/brizo/services/consume?pubKey=${"
"pubKey}&serviceId={serviceId}&url={url}"
},
{
"type": "Compute",
"index": 1,
"serviceEndpoint": "http://mybrizo.org/api/v1/brizo/services/compute?pubKey=${"
"pubKey}&serviceId={serviceId}&algo={algo}&container={container}"
},
{
"type": "metadata",
"index": 2,
"serviceEndpoint": "http://myaquarius.org/api/v1/provider/assets/metadata/{did}",
"attributes": {
"main": {
"name": "UK Weather information 2011",
"type": "dataset",
"dateCreated": "2012-10-10T17:00:00Z",
"datePublished": "2012-10-10T17:00:00Z",
"author": "Met Office",
"license": "CC-BY",
"files": [{
"index": 0,
"contentLength": "4535431",
"contentType": "text/csv",
"encoding": "UTF-8",
"compression": "zip",
"resourceId": "access-log2018-02-13-15-17-29-18386C502CAEA932"
}
],
"price": "88888880000000000000",
},
"encryptedFiles": "0xkasdhfkljhasdfkjasdhf",
"curation": {
"rating": 0.0,
"numVotes": 0,
"schema": "Binary Votting",
"isListed": True
},
"additionalInformation": {
"description": "Weather information of UK including temperature and humidity",
"copyrightHolder": "Met Office",
"workExample": "stationId,latitude,longitude,datetime,temperature,"
"humidity /n 423432fsd,51.509865,-0.118092,"
"2011-01-01T10:55:11+00:00,7.2,68",
"inLanguage": "en",
"tags": ["weather", "uk", "2011", "temperature", "humidity"],
"updateFrequency": "yearly",
"structuredMarkup": [
{"uri": "http://skos.um.es/unescothes/C01194/jsonld",
"mediaType": "application/ld+json"},
{"uri": "http://skos.um.es/unescothes/C01194/turtle",
"mediaType": "text/turtle"}
],
"links": [
{
"name": "Sample of Asset Data",
"type": "sample",
"url": "https://foo.com/sample.csv"
},
{
"name": "Data Format Definition",
"type": "format",
"url": "https://foo.com/sample2.csv"
}
]
}
}
}
]
}
json_update = {
"@context": "https://w3id.org/future-method/v1",
"created": "2016-02-08T16:02:20Z",
"id": "did:op:112233445566778899",
"publicKey": [
{
"id": "did:op:123456789abcdefghi#keys-1",
"type": "RsaVerificationKey2018",
"owner": "did:op:123456789abcdefghi",
"publicKeyPem": "-----BEGIN PUBLIC KEY...END PUBLIC KEY-----\r\n"
},
{
"id": "did:op:123456789abcdefghi#keys-2",
"type": "Ed25519VerificationKey2018",
"owner": "did:op:123456789abcdefghi",
"publicKeyBase58": "<KEY>"
}
],
"authentication": [
{
"type": "RsaSignatureAuthentication2018",
"publicKey": "did:op:123456789abcdefghi#keys-1"
},
{
"type": "ieee2410Authentication2018",
"publicKey": "did:op:123456789abcdefghi#keys-2"
}
],
"proof": {
"type": "UUIDSignature",
"created": "2016-02-08T16:02:20Z",
"creator": "did:example:8uQhQMGzWxR8vw5P3UWH1ja",
"signatureValue": "QNB13Y7Q9...1tzjn4w=="
},
"service": [
{
"type": "Consume",
"index": 0,
"serviceEndpoint": "http://mybrizo.org/api/v1/brizo/services/consume?pubKey=${"
"pubKey}&serviceId={serviceId}&url={url}"
},
{
"type": "Compute",
"index": 1,
"serviceEndpoint": "http://mybrizo.org/api/v1/brizo/services/compute?pubKey=${"
"pubKey}&serviceId={serviceId}&algo={algo}&container={container}"
},
{
"type": "metadata",
"index": 2,
"serviceEndpoint": "http://myaquarius.org/api/v1/provider/assets/metadata/{did}",
"attributes": {
"main": {
"name": "UK Weather information 2012",
"type": "dataset",
"dateCreated": "2012-02-01T10:55:11Z",
"datePublished": "2012-02-01T10:55:11Z",
"author": "Met Office",
"license": "CC-BY",
"files": [{
"index": 0,
"contentLength": "4535431",
"contentType": "text/csv",
"encoding": "UTF-8",
"compression": "zip",
"resourceId": "access-log2018-02-13-15-17-29-18386C502CAEA932"
}],
"price": "15",
},
"encryptedFiles": "0xkasdhfkljhasdfkjasdhf",
"curation": {
"rating": 8.0,
"numVotes": 1,
"schema": "Binary Votting",
"isListed": True
},
"additionalInformation": {
"description": "Weather information of UK including temperature and humidity and white",
"copyrightHolder": "Met Office",
"workExample": "stationId,latitude,longitude,datetime,temperature,"
"humidity /n 423432fsd,51.509865,-0.118092,"
"2011-01-01T10:55:11+00:00,7.2,68",
"inLanguage": "en",
"tags": ["weather", "uk", "2011", "temperature", "humidity"],
"updateFrecuency": "yearly",
"structuredMarkup": [
{"uri": "http://skos.um.es/unescothes/C01194/jsonld",
"mediaType": "application/ld+json"},
{"uri": "http://skos.um.es/unescothes/C01194/turtle",
"mediaType": "text/turtle"}
],
"links": [
{
"name": "Sample of Asset Data",
"type": "sample",
"url": "https://foo.com/sample.csv"
},
{
"name": "Data Format Definition",
"type": "format",
"url": "https://foo.com/sample2.csv"
}
]
}
}
}
]
}
json_valid = {
"main": {
"name": "10 Monkey Species Small",
"dateCreated": "2012-02-01T10:55:11Z",
"author": "Mario",
"license": "CC0: Public Domain",
"price": "10",
"files": [
{
"index": 0,
"contentType": "application/zip",
"encoding": "UTF-8",
"compression": "zip",
"checksum": "2bf9d229d110d1976cdf85e9f3256c7f",
"checksumType": "MD5",
"contentLength": "12057507",
"url": "https://s3.amazonaws.com/assets/training.zip"
},
{
"index": 1,
"contentType": "text/txt",
"encoding": "UTF-8",
"compression": "none",
"checksum": "354d19c0733c47ef3a6cce5b633116b0",
"checksumType": "MD5",
"contentLength": "928",
"url": "https://s3.amazonaws.com/datacommons/monkey_labels.txt"
},
{
"index": 2,
"contentType": "application/zip",
"url": "https://s3.amazonaws.com/datacommons/validation.zip"
}
],
"type": "dataset",
},
"additionalInformation":{
"description": "EXAMPLE ONLY ",
"categories": [
"image"
],
"tags": [
"image data",
"classification",
"animals"
],
"workExample": "image path, id, label",
"links": [
{
"name": "example model",
"url": "https://drive.google.com/open?id=1uuz50RGiAW8YxRcWeQVgQglZpyAebgSM"
},
{
"name": "example code",
"type": "example code",
"url": "https://github.com/slothkong/CNN_classification_10_monkey_species"
},
{
"url": "https://s3.amazonaws.com/datacommons/links/discovery/n5151.jpg",
"name": "n5151.jpg",
"type": "discovery"
},
{
"url": "https://s3.amazonaws.com/datacommons/links/sample/sample.zip",
"name": "sample.zip",
"type": "sample"
}
],
"copyrightHolder": "Unknown",
"inLanguage": "en"
}
}
test_assets = []
for i in range(10):
a = copy.deepcopy(json_dict)
a['id'] = a['id'][:-2] + str(i) +str(i)
test_assets.append(a)
json_request_consume = {
'requestId': "",
'consumerId': "",
'fixed_msg': "",
'sigEncJWT': ""
}
|
StarcoderdataPython
|
79596
|
<gh_stars>0
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.python.rules.setup_py_util import distutils_repr
testdata = {
'foo': 'bar',
'baz': {
'qux': [123, 456],
'quux': ('abc', b'xyz'),
'corge': {1, 2, 3}
},
'various_strings': [
"x'y",
'aaa\nbbb'
]
}
expected = """
{
'foo': 'bar',
'baz': {
'qux': [
123,
456,
],
'quux': (
'abc',
'xyz',
),
'corge': {
1,
2,
3,
},
},
'various_strings': [
'x\\\'y',
\"\"\"aaa\nbbb\"\"\",
],
}
""".strip()
def test_distutils_repr():
assert expected == distutils_repr(testdata)
|
StarcoderdataPython
|
3247122
|
<filename>lab_03/main.py
X, Y, Z = 0, 1, 2
def function(x, y):
return x**2 + y**2
#return x + y
def create_table(f):
start_x = float(input('Введите начало x: '))
finish_x = float(input('Введите конец x: '))
start_y = float(input('Введите начало y: '))
finish_y = float(input('Введите конец y: '))
count = int(input('Введите количество точек: '))
table = [[], [], []]
step_x = (finish_x - start_x) / (count - 1)
step_y = (finish_y - start_y) / (count - 1)
x = start_x
while len(table[X]) < count:
y = start_y
table[X].append(x)
table[Y] = []
table_z = []
while len(table[Y]) < count:
z = f(x, y)
table[Y].append(y)
table_z.append(z)
y += step_y
table[Z].append(table_z)
x += step_x
return table
def print_table(table):
print('{:^10}│'.format('x\\y'), end = '')
for i in range(len(table[Y])):
print('{:<10.3f}'.format(table[Y][i]), end = '')
print()
print(end = '─' * 10 + '┼')
print('─' * (10 * len(table[Y])))
for i in range(len(table[X])):
print('{:<10.3f}│'.format(table[X][i]), end = '')
for j in range(len(table[Y])):
print('{:<10.3f}'.format(table[Z][i][j]), end = '')
print()
def find_start(table, x, n):
start = -1
for i in range(len(table)):
if table[i] >= x:
start = i
break
if start == -1:
return len(table) - n
if i <= n // 2:
return 0
start -= n // 2
if start + n >= len(table):
start = len(table) - n
return start
def approximation(table, x, n):
new_table = []
for i in range(2 * n + 1):
new_table.append([' '] * (n + 2))
j = 0
for i in range(0, len(new_table), 2):
new_table[i][0] = table[0][j]
new_table[i][1] = table[1][j]
j += 1
current = 2
for j in range(2, len(new_table[0])):
for i in range(1, len(new_table) - 1):
if new_table[i - 1][j - 1] != ' ' and new_table[i + 1][j - 1] != ' ':
new_table[i][j] = (new_table[i - 1][j - 1] - new_table[i + 1][j - 1]) /\
(new_table[0][0] - new_table[current][0])
current += 2
return new_table
def generate_polynomial(table, x):
coeff = []
j = 1
for i in range(0, len(table) // 2 + 1):
coeff.append(table[i][j])
j += 1
result = 0
for i in range(len(coeff)):
current = coeff[i]
for j in range(i):
current *= x - table[j * 2][0]
result += current
return result
def polynomial(table, x, n_x, y, n_y):
start_x = find_start(table[X], x, n_x + 1)
start_y = find_start(table[Y], y, n_y + 1)
table[X] = table[X][start_x:start_x + n_x + 1]
table[Y] = table[Y][start_y:start_y + n_y + 1]
table[Z] = table[Z][start_x:start_x + n_x + 1]
for i in range(len(table[Z])):
table[Z][i] = table[Z][i][start_y:start_y + n_y + 1]
print_table(table)
# Для фиксированных иксов поиск игриков и интерполяция по икс
table_x = []
for i in range(len(table[X])):
t = [table[Y], table[Z][i]]
new_table = approximation(t, y, n_y)
result = generate_polynomial(new_table, y)
table_x.append(result)
new_table = approximation([table[X], table_x], x, n_x)
result = generate_polynomial(new_table, x)
return result
def main():
table = create_table(function)
print()
print_table(table)
print()
n_x = int(input('Введите степень полинома для x: '))
n_y = int(input('Введите степень полинома для y: '))
x = float(input('Введите x: '))
y = float(input('Введите y: '))
print()
z = polynomial(table, x, n_x, y, n_y)
f = function(x, y)
print()
print('P({:.2f}; {:.2f}) = {:.5f}'.format(x, y, z))
print('f({:.2f}; {:.2f}) = {:.5f}'.format(x, y, f))
print('Погрешность: {:1.3e}'.format(abs(z - f) / (f) * 100))
return 0
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1620622
|
<gh_stars>1-10
#!/usr/bin/python
#
# Copyright 2017 The Goma Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import subprocess
import sys
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--generator', help='generator')
parser.add_argument('--output-path', help='output file path')
parser.add_argument('inputs', metavar='INPUT', type=str, nargs='+',
help='input files')
args = parser.parse_args()
# cmd = "$(location :js_embed) $(SRCS) > $@",
cmd = [ args.generator ] + args.inputs
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
stdout_data, _ = p.communicate()
if p.returncode != 0:
print >>sys.stderr, 'failed to run js_embed: exit_status=', p.returncode
sys.exit(1)
with open(args.output_path, 'wb') as f:
f.write(stdout_data)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1770317
|
from random import randint
computador = randint(0,10)
print('Sou o COMPUTADOR, tente acertar o núemro q eu estou pensado de 1 a 10')
acertou = False
palpite = 0
while not acertou:
jogador = int(input('Digite qual é o seu palipite: '))
palpite +=1
if jogador == computador:
acertou = True
else:
if jogador < computador:
print('pra cima')
elif jogador > computador:
print('pra baixo')
print('Ganhou com {} palpites'.format(palpite))
|
StarcoderdataPython
|
1778575
|
<reponame>Stafil0/vkbot<filename>vkresponses/__init__.py
import os
import importlib
__imports = os.path.dirname(__file__)
__module = os.path.basename(__imports)
__imported = [__importing for __importing in os.listdir(__imports) if __importing.endswith('.py')]
for __import in __imported:
importlib.import_module(f'{__module}.{__import[0:-3]}')
|
StarcoderdataPython
|
178333
|
#-*- encoding: utf-8 -*-
# 给定一个数组 nums,编写一个函数将所有 0 移动到数组的末尾,同时保持非零元素的相对顺序。
#
# 示例:
#
# 输入: [0,1,0,3,12]
# 输出: [1,3,12,0,0]
#
# 说明:
#
#
# 必须在原数组上操作,不能拷贝额外的数组。
# 尽量减少操作次数。
#
# Related Topics 数组 双指针
# leetcode submit region begin(Prohibit modification and deletion)
# class Solution(object):
# def moveZeroes(self, nums):
# """
# :type nums: List[int]
# :rtype: None Do not return anything, modify nums in-place instead.
# """
#
# j = 0
#
# for idx, d in enumerate(nums):
#
# if d != 0:
# nums[j] = nums[idx]
# if j != idx:
# nums[idx] = 0
# j只有在d!=0时才移动,d=0时不动,所以d其实就是指向0
# j += 1
#
class Solution(object):
def moveZeroes(self, nums):
"""
:type nums: List[int]
:rtype: None Do not return anything, modify nums in-place instead.
"""
zeroCount = 0
for idx, d in enumerate(nums):
if d == 0:
zeroCount += 1
elif zeroCount != 0:
nums[idx - zeroCount] = d
nums[idx] = 0
class Solution2(object):
def moveZeroes(self, nums):
"""
:type nums: List[int]
:rtype: None Do not return anything, modify nums in-place instead.
"""
zeroP = -1
for idx , data in enumerate(nums):
if data == 0 and zeroP == -1:
zeroP = idx
elif data != 0 and zeroP > -1:
nums[zeroP] = data
nums[idx] = 0
zeroP += 1
if data[zeroP] != 0:
zeroP = -1
# leetcode submit region end(Prohibit modification and deletion)
|
StarcoderdataPython
|
3206119
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A predicate that count the number of values that satisfy another predicate.
"""
from . import predicate
from . import path_predicate as pp
from .path_predicate_result import HasPathPredicateResult
class CardinalityResult(predicate.PredicateResult, HasPathPredicateResult):
"""Denotes a PredicateResult from a CardinalityPredicate.
In practice, this is a base class that is further refined for specific
types of events.
Attributes:
pred: The ValuePredicate genearting the result.
found: A list of JSON objects the predicate was applied to.
In practice these are the matched objects.
"""
@property
def path_predicate_result(self):
"""The result of mapping the underlying predicate over the source."""
return self.__collect_values_result
@property
def pred(self):
"""Returns the cardinality predicate used to generate this result."""
return self.cardinality_pred
@property
def path_pred(self):
"""The underlying path predicate used to collect values."""
return self.__collect_values_result.pred
@property
def filter_pred(self):
"""The filter to the underlying path predicate."""
return self.__collect_values_result.pred.pred
@property
def cardinality_pred(self):
"""The actual CardinalityPredicate used to generate this result."""
return self.__cardinality_pred
@property
def count(self):
"""The number of elements that satisfied the predicate."""
return len(self.__collect_values_result.path_values)
@property
def source(self):
"""The source value (collection) that we are mapping the predicateover."""
return self.__collect_values_result.source
def __init__(self, cardinality_pred, path_pred_result, **kwargs):
"""Constructor.
Args:
cardinality_pred: [CardinalityPredicate] The predicate we used to
generate this result.
pred_result: [CollectValuesResult]. The result of applying the
underlying PathPredicate bound to the |cardinality_pred|.
See the base class (PredicateResult) for additional kwargs.
"""
valid = kwargs.pop('valid', False)
super(CardinalityResult, self).__init__(valid=valid, **kwargs)
self.__cardinality_pred = cardinality_pred
self.__collect_values_result = path_pred_result
def __repr__(self):
return '{0} pred={1!r} result={2!r}'.format(
self.__class__.__name__,
self.__cardinality_pred, self.__collect_values_result)
def __str__(self):
return '{valid} count={count} of {min}...{max}'.format(
valid=self.valid, count=self.count,
min=self.__cardinality_pred.min, max=self.__cardinality_pred.max)
def __eq__(self, event):
return (self.__class__ == event.__class__
and self.__cardinality_pred == event.cardinality_pred
and self.__collect_values_result == event.path_predicate_result)
def export_to_json_snapshot(self, snapshot, entity):
"""Implements JsonSnapshotableEntity interface."""
builder = snapshot.edge_builder
count_relation = builder.determine_valid_relation(self)
result_relation = builder.determine_valid_relation(
self.__collect_values_result)
builder.make(entity, 'Count', self.count, relation=count_relation)
builder.make_mechanism(entity, 'Predicate', self.__cardinality_pred)
builder.make_input(entity, 'Source',
self.__collect_values_result.source, format='json')
builder.make(entity, 'Result',
self.__collect_values_result, relation=result_relation)
class ConfirmedCardinalityResult(CardinalityResult):
"""Denotes a CardinalityPredicate that was satisfied."""
def __init__(self, cardinality_pred, path_pred_result, **kwargs):
"""Constructor.
Args:
cardinality_pred: [CardinalityPredicate] The predicate we used to
generate this result.
pred_result: [CollectValuesResult]. The result of applying the
underlying PathPredicate bound to the |cardinality_pred|.
See the base class (CardinalityResult) for additional kwargs.
"""
valid = kwargs.pop('valid', True)
super(ConfirmedCardinalityResult, self).__init__(
valid=valid,
cardinality_pred=cardinality_pred, path_pred_result=path_pred_result,
**kwargs)
def __str__(self):
if not self.count:
return 'Confirmed no {pred}.'.format(pred=self.path_pred)
return 'Confirmed pred={pred} with count={count}'.format(
pred=self.cardinality_pred, count=self.count)
class FailedCardinalityResult(CardinalityResult):
"""Denotes a CardinalityPredicate that was not satisfied.
In practice, this is a base class used to detect failures.
It is further specialized for the particular reason for failure.
"""
pass
class UnexpectedValueCardinalityResult(FailedCardinalityResult):
"""Denotes a failure because a value existed where none were expected."""
def __str__(self):
return 'Found unexpected count={count} pred={pred}'.format(
count=self.count, pred=self.cardinality_pred)
class MissingValueCardinalityResult(FailedCardinalityResult):
"""Denotes a failure because a value did not exist where one was expected."""
def __init__(self, source, cardinality_pred, path_pred_result, **kwargs):
valid = kwargs.pop('valid', False)
super(MissingValueCardinalityResult, self).__init__(
valid=valid, cardinality_pred=cardinality_pred,
path_pred_result=path_pred_result)
self.__source = source
def __str__(self):
return 'Expected to find {pred}. No values found.'.format(
pred=self.cardinality_pred)
class FailedCardinalityRangeResult(FailedCardinalityResult):
"""Denotes a failure because too few or too many values were found."""
def __str__(self):
# pred is a CardinalityPredicate
return ('Found {count} {criteria}'
' but expected {min}..{max}'.format(
count=self.count, criteria=self.path_pred,
min=self.cardinality_pred.min, max=self.cardinality_pred.max))
class CardinalityPredicate(predicate.ValuePredicate,
pp.ProducesPathPredicateResult):
"""Validates a JSON object value based on how many things are found within.
We implicitly wrap the predicate in a MapPredicate so that the results
coming back have a structure that makes sense. But we dont bother passing
the MapPredicate in because it is implicit. Instead we just pass in the
predicate to be mapped.
Attributes:
pred: jc.ValuePredicate to apply is implictly wrapped in a MapPredicate.
min: Minimum number of expected object matches we expect.
max: Maximum number of expected object matches we allow. < 0 indicates any.
"""
@property
def path_pred(self):
"""The underlying predicate that we are mapping."""
return self.__path_pred
@property
def filter_pred(self):
"""The filter, if any, for the underlying path predicate."""
return self.__path_pred.pred
@property
def min(self):
"""The minimum desired cardinality, or None for no lower bound."""
return self.__min
@property
def max(self):
"""The maximum desired cardinality, or None for no upper bound."""
return self.__max
def export_to_json_snapshot(self, snapshot, entity):
"""Implements JsonSnapshotableEntity interface."""
snapshot.edge_builder.make_mechanism(entity, 'Predicate', self.path_pred)
if self.__min is not None:
snapshot.edge_builder.make_control(entity, 'Min', self.__min)
if self.__max is not None:
snapshot.edge_builder.make_control(entity, 'Max',
'Any' if self.__max < 0 else self.__max)
def __init__(self, pred, min=0, max=None, **kwargs):
"""Constructor.
Args:
pred: The jc.ValuePredicate to apply.
min: The minimum number of path values we expect to find when applied.
max: The maximum number of path values we expect to find when applied.
"""
super(CardinalityPredicate, self).__init__(**kwargs)
if not isinstance(pred, predicate.ValuePredicate):
raise TypeError(
'Got {0}, expected jc.ValuePredicate'.format(pred.__class__))
self.__min = min
self.__max = max
if isinstance(pred, pp.PathPredicate):
self.__path_pred = pred
else:
self.__path_pred = pp.PathPredicate('', pred=pred)
def __eq__(self, pred):
return (self.__class__ == pred.__class__
and self.__min == pred.min
and self.__max == pred.max
and self.__path_pred == pred.path_pred)
def __str__(self):
return 'Cardinality({0}) {1}..{2}'.format(
self.__path_pred, self.__min, self.__max)
def __call__(self, context, obj):
"""Attempt to match object.
Args:
obj: JSON object to match.
Returns:
PredicateResponse
"""
collected_result = self.__path_pred(context, obj)
count = len(collected_result.path_values)
the_max = context.eval(self.__max)
the_min = context.eval(self.__min)
if not count:
if the_max != 0:
return MissingValueCardinalityResult(
obj, valid=False,
cardinality_pred=self, path_pred_result=collected_result)
else:
result_type = ConfirmedCardinalityResult
elif the_max == 0:
result_type = UnexpectedValueCardinalityResult
elif (count >= the_min
and (the_max is None or count <= the_max)):
result_type = ConfirmedCardinalityResult
else:
result_type = FailedCardinalityRangeResult
valid = result_type == ConfirmedCardinalityResult
return result_type(valid=valid, cardinality_pred=self,
path_pred_result=collected_result)
|
StarcoderdataPython
|
1670841
|
<filename>Project_Lvxincan/alltogether.py
#coding=utf-8
from __future__ import print_function
import cv2 as cv
import numpy as np
from matplotlib import pylab as plt
import argparse
import glob
from math import pi
import urx
import logging
import time
import sys
import math3d as m3d
def move_to_dui1():
rob = urx.Robot("192.168.80.2")
rob.set_tcp((0,0,0,0,0,0))
rob.set_payload(0.5, (0,0,0))
v = 0.02
a = 0.1
#dui1 pose start
pose = [-0.015371047712964686, 0.16291898650384315, 0.3583823679602556, 0.729218924720313, 1.756045637777791, 1.72787322187088]
rob.movel(pose, acc=a, vel=v)
rob.close()
def robot_move_OnePose():
# to move up
print("moving to a known pose")
rob = urx.Robot("192.168.80.2")
rob.set_tcp((0,0,0,0,0,0))
rob.set_payload(0.5, (0,0,0))
v = 0.01
a = 0.1
pose = rob.getl()
print("robot tcp is at: ", pose) # in the world coordinate position and rotation
# move in z direction up for 0.02
# print("moving in z")
pose[0] = 0.1
pose[1] = 0.1
pose[2] = 1.0
rob.movel(pose, acc=a, vel=v)
rob.close()
def rotate_forward():
print("rotate_forward")
rob = urx.Robot("192.168.80.2")
rob.set_tcp((0,0,0,0,0,0))
rob.set_payload(0.5, (0,0,0))
v = 0.02
a = 0.1
pose = rob.getl()
print("robot tcp is at: ", pose) # in the world coordinate position and rotation
# move in x direction for 0.02
# print("moving in x")
pose[4] += pi/90
rob.movel(pose, acc=a, vel=v)
rob.close()
def rotate_left():
print("rotate_left")
def rotate_up():
print("rotate_up")
def robot_move_right1(baselength):
# to move right l unit:mm
print("moving right")
rob = urx.Robot("192.168.80.2")
rob.set_tcp((0,0,0,0,0,0))
rob.set_payload(0.5, (0,0,0))
v = 0.02
a = 0.1
pose = rob.getl()
print("robot tcp is at: ", pose) # in the world coordinate position and rotation
# move in x direction for 0.02
# print("moving in x")
pose[0] += baselength
rob.movel(pose, acc=a, vel=v)
rob.close()
def robot_move_forward1():
# to move forward
print("moving forward")
rob = urx.Robot("192.168.80.2")
rob.set_tcp((0,0,0,0,0,0))
rob.set_payload(0.5, (0,0,0))
v = 0.02
a = 0.1
pose = rob.getl()
print("robot tcp is at: ", pose) # in the world coordinate position and rotation
# move in y direction for 0.02
# print("moving in y")
pose[1] -= 0.02
rob.movel(pose, acc=a, vel=v)
rob.close()
# to find the known object and print the corner point location
def match(c):
parser = argparse.ArgumentParser(description='Code for Feature Matching with FLANN tutorial.')
parser.add_argument('--input1', help='Path to input image 1.', default='rec_small1.jpg')
parser.add_argument('--input2', help='Path to input image 2.', default='frame' + str(c) + '.jpg')
args = parser.parse_args()
img_object = cv.imread(args.input1, cv.IMREAD_GRAYSCALE)
img_scene = cv.imread(args.input2, cv.IMREAD_GRAYSCALE)
if img_object is None or img_scene is None:
print('Could not open or find the images!')
exit(0)
#-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
minHessian = 400
detector = cv.xfeatures2d_SURF.create(hessianThreshold=minHessian)
keypoints_obj, descriptors_obj = detector.detectAndCompute(img_object, None)
keypoints_scene, descriptors_scene = detector.detectAndCompute(img_scene, None)
#-- Step 2: Matching descriptor vectors with a FLANN based matcher
# Since SURF is a floating-point descriptor NORM_L2 is used
matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_FLANNBASED)
knn_matches = matcher.knnMatch(descriptors_obj, descriptors_scene, 2)
#-- Filter matches using the Lowe's ratio test
ratio_thresh = 0.75
good_matches = []
for m,n in knn_matches:
if m.distance < ratio_thresh * n.distance:
good_matches.append(m)
#-- Draw matches
img_matches = np.empty((max(img_object.shape[0], img_scene.shape[0]), img_object.shape[1]+img_scene.shape[1], 3), dtype=np.uint8)
cv.drawMatches(img_object, keypoints_obj, img_scene, keypoints_scene, good_matches, img_matches, flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
#-- Localize the object
obj = np.empty((len(good_matches),2), dtype=np.float32)
scene = np.empty((len(good_matches),2), dtype=np.float32)
for i in range(len(good_matches)):
#-- Get the keypoints from the good matches
obj[i,0] = keypoints_obj[good_matches[i].queryIdx].pt[0]
obj[i,1] = keypoints_obj[good_matches[i].queryIdx].pt[1]
scene[i,0] = keypoints_scene[good_matches[i].trainIdx].pt[0]
scene[i,1] = keypoints_scene[good_matches[i].trainIdx].pt[1]
H, _ = cv.findHomography(obj, scene, cv.RANSAC)
#-- Get the corners from the image_1 ( the object to be "detected" )
obj_corners = np.empty((4,1,2), dtype=np.float32)
obj_corners[0,0,0] = 0
obj_corners[0,0,1] = 0
obj_corners[1,0,0] = img_object.shape[1]
obj_corners[1,0,1] = 0
obj_corners[2,0,0] = img_object.shape[1]
obj_corners[2,0,1] = img_object.shape[0]
obj_corners[3,0,0] = 0
obj_corners[3,0,1] = img_object.shape[0]
scene_corners = cv.perspectiveTransform(obj_corners, H)
#-- Draw lines between the corners (the mapped object in the scene - image_2 )
cv.line(img_matches, (int(scene_corners[0,0,0] + img_object.shape[1]), int(scene_corners[0,0,1])),\
(int(scene_corners[1,0,0] + img_object.shape[1]), int(scene_corners[1,0,1])), (0,255,0), 4)
cv.line(img_matches, (int(scene_corners[1,0,0] + img_object.shape[1]), int(scene_corners[1,0,1])),\
(int(scene_corners[2,0,0] + img_object.shape[1]), int(scene_corners[2,0,1])), (0,255,0), 4)
cv.line(img_matches, (int(scene_corners[2,0,0] + img_object.shape[1]), int(scene_corners[2,0,1])),\
(int(scene_corners[3,0,0] + img_object.shape[1]), int(scene_corners[3,0,1])), (0,255,0), 4)
cv.line(img_matches, (int(scene_corners[3,0,0] + img_object.shape[1]), int(scene_corners[3,0,1])),\
(int(scene_corners[0,0,0] + img_object.shape[1]), int(scene_corners[0,0,1])), (0,255,0), 4)
#-- Show detected matches
cv.imshow('Good Matches & Object detection', img_matches)
cv.imwrite('Good Matches&Object detection'+str(c) + '.jpg',img_matches)
# print(int(scene_corners[0,0,0]),img_object.shape[1],int(scene_corners[0,0,1]))
# print('Good Matches&Object detection1'+str(c) + '.jpg')
center_point0=(int(scene_corners[0,0,0] + img_object.shape[1])+int(scene_corners[2,0,0] + img_object.shape[1]))/2
center_point1=(int(scene_corners[0,0,1])+int(scene_corners[2,0,1]))/2
center_point=[center_point0,center_point1]
print(center_point)
return(center_point)
def match1():
parser = argparse.ArgumentParser(description='Code for Feature Matching with FLANN tutorial.')
parser.add_argument('--input1', help='Path to input image 1.', default='rec_small1.jpg')
parser.add_argument('--input2', help='Path to input image 2.', default='facing1.jpg')
args = parser.parse_args()
img_object = cv.imread(args.input1, cv.IMREAD_GRAYSCALE)
img_scene = cv.imread(args.input2, cv.IMREAD_GRAYSCALE)
if img_object is None or img_scene is None:
print('Could not open or find the images!')
exit(0)
#-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
minHessian = 400
detector = cv.xfeatures2d_SURF.create(hessianThreshold=minHessian)
keypoints_obj, descriptors_obj = detector.detectAndCompute(img_object, None)
keypoints_scene, descriptors_scene = detector.detectAndCompute(img_scene, None)
#-- Step 2: Matching descriptor vectors with a FLANN based matcher
# Since SURF is a floating-point descriptor NORM_L2 is used
matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_FLANNBASED)
knn_matches = matcher.knnMatch(descriptors_obj, descriptors_scene, 2)
#-- Filter matches using the Lowe's ratio test
ratio_thresh = 0.75
good_matches = []
for m,n in knn_matches:
if m.distance < ratio_thresh * n.distance:
good_matches.append(m)
#-- Draw matches
img_matches = np.empty((max(img_object.shape[0], img_scene.shape[0]), img_object.shape[1]+img_scene.shape[1], 3), dtype=np.uint8)
cv.drawMatches(img_object, keypoints_obj, img_scene, keypoints_scene, good_matches, img_matches, flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
#-- Localize the object
obj = np.empty((len(good_matches),2), dtype=np.float32)
scene = np.empty((len(good_matches),2), dtype=np.float32)
for i in range(len(good_matches)):
#-- Get the keypoints from the good matches
obj[i,0] = keypoints_obj[good_matches[i].queryIdx].pt[0]
obj[i,1] = keypoints_obj[good_matches[i].queryIdx].pt[1]
scene[i,0] = keypoints_scene[good_matches[i].trainIdx].pt[0]
scene[i,1] = keypoints_scene[good_matches[i].trainIdx].pt[1]
H, _ = cv.findHomography(obj, scene, cv.RANSAC)
#-- Get the corners from the image_1 ( the object to be "detected" )
obj_corners = np.empty((4,1,2), dtype=np.float32)
obj_corners[0,0,0] = 0
obj_corners[0,0,1] = 0
obj_corners[1,0,0] = img_object.shape[1]
obj_corners[1,0,1] = 0
obj_corners[2,0,0] = img_object.shape[1]
obj_corners[2,0,1] = img_object.shape[0]
obj_corners[3,0,0] = 0
obj_corners[3,0,1] = img_object.shape[0]
scene_corners = cv.perspectiveTransform(obj_corners, H)
#-- Draw lines between the corners (the mapped object in the scene - image_2 )
cv.line(img_matches, (int(scene_corners[0,0,0] + img_object.shape[1]), int(scene_corners[0,0,1])),\
(int(scene_corners[1,0,0] + img_object.shape[1]), int(scene_corners[1,0,1])), (0,255,0), 4)
cv.line(img_matches, (int(scene_corners[1,0,0] + img_object.shape[1]), int(scene_corners[1,0,1])),\
(int(scene_corners[2,0,0] + img_object.shape[1]), int(scene_corners[2,0,1])), (0,255,0), 4)
cv.line(img_matches, (int(scene_corners[2,0,0] + img_object.shape[1]), int(scene_corners[2,0,1])),\
(int(scene_corners[3,0,0] + img_object.shape[1]), int(scene_corners[3,0,1])), (0,255,0), 4)
cv.line(img_matches, (int(scene_corners[3,0,0] + img_object.shape[1]), int(scene_corners[3,0,1])),\
(int(scene_corners[0,0,0] + img_object.shape[1]), int(scene_corners[0,0,1])), (0,255,0), 4)
#-- Show detected matches
cv.imshow('Good Matches & Object detection', img_matches)
# cv.imwrite('Good Matches&Object detection1.jpg',img_matches)
# cv.imshow('Good Matches&Object detection1.jpg',img_matches)
# print(int(scene_corners[0,0,0]),img_object.shape[1],int(scene_corners[0,0,1]))
# print('Good Matches&Object detection1'+str(c) + '.jpg')
center_point0=(int(scene_corners[0,0,0] + img_object.shape[1])+int(scene_corners[2,0,0] + img_object.shape[1]))/2
center_point1=(int(scene_corners[0,0,1])+int(scene_corners[2,0,1]))/2
center_point=[center_point0,center_point1]
# return(center_point)
print(center_point)
while(True):
# Press Q on keyboard to exit
if cv.waitKey(25) & 0xFF == ord('q'):
break
def move_point():
# to move up
print("moving to the point")
rob = urx.Robot("192.168.80.2")
rob.set_tcp((0,0,0,0,0,0))
rob.set_payload(0.5, (0,0,0))
v = 0.02
a = 0.1
#close1-6
# pose = [0.11331817392983798, 0.2801514404360457, 0.40716238831066137, 0.7288518769893056, 1.75720949766542, 1.7288762714958366]
# rob.movel(pose, acc=a, vel=v)
rob.close()
def robot_move_task():
# robot_move_task_from_ReferencePosition
# to move as the task asked
print("moving to complate the task")
rob = urx.Robot("192.168.80.2")
rob.set_tcp((0,0,0,0,0,0))
rob.set_payload(0.5, (0,0,0))
l = 0.02
v = 0.02
a = 0.1
pose = rob.getl()
print("robot tcp is at: ", pose) # in the world coordinate position and rotation
#dui1 pose start
# pose = [-0.015371047712964686, 0.16291898650384315, 0.3583823679602556, 0.729218924720313, 1.756045637777791, 1.72787322187088]
# rob.movel(pose, acc=a, vel=v)
# step1
pose = [0.09843593470393579, 0.26510182111053154, 0.4028641488364185, 0.7293840999220157, 1.7575432907350066, 1.7280083427019985]
rob.movel(pose, acc=a, vel=v)
# step2
pose = [0.11349549374315637, 0.2802989236507926, 0.40720806884446564, 0.7290357525244547, 1.7569778326154977, 1.7287120933853668]
rob.movel(pose, acc=a, vel=v)
# step3
pose = [0.11460174179046292, 0.28153593539694216, 0.4074298765968609, 0.7295337903752471, 1.756310919064192, 1.7283559296692148]
rob.movel(pose, acc=a, vel=v)
pose = [0.12923785534015672, 0.2671670032288611, 0.40747408164237053, 0.7295000886040007, 1.7572500353651812, 1.7283636977205024]
rob.movel(pose, acc=a, vel=v)
pose = [0.11078068722321228, 0.25527572034962204, 0.40764048056389807, 0.7298588022893313, 1.7555891125498237, 1.7284234936835599]
rob.movel(pose, acc=a, vel=v)
pose = [0.07648318951482813, 0.22131664005025137, 0.4077336729334317, 0.7290399452789309, 1.7566098675466841, 1.7290427412939582]
rob.movel(pose, acc=a, vel=v)
time.sleep(2)
#in 1-4
pose = [0.1096277417105041, 0.2782563939867772, 0.40945510584463063, 0.7304462462673038, 1.7569070677822813, 1.7268994057911478]
rob.movel(pose, acc=a, vel=v)
pose = [0.11813817891258888, 0.2856367025277646, 0.41328282986770337, 0.7293913095346095, 1.757686739360187, 1.7281365277259495]
rob.movel(pose, acc=a, vel=v)
pose = [0.11939772214092868, 0.2885459066988721, 0.411258361377801, 0.729510577813205, 1.7552934806593876, 1.7289513919241533]
rob.movel(pose, acc=a, vel=v)
pose = [0.12358349828409654, 0.29316420912719965, 0.410826266327851, 0.7290838685610902, 1.757399470553258, 1.7285771052298267]
rob.movel(pose, acc=a, vel=v)
pose = [0.13147317314094595, 0.3011585845734863, 0.41078134124622057, 0.728876103815376, 1.7572176942540814, 1.7289702636115485]
rob.movel(pose, acc=a, vel=v)
pose = [0.1405024103151181, 0.310255323239094, 0.4110833042894985, 0.7287406831381755, 1.7573406460681256, 1.7291010739354038]
rob.movel(pose, acc=a, vel=v)
pose = [0.14239719241844506, 0.31252705462501135, 0.41209069321252606, 0.7291499575488868, 1.7628837802632797, 1.7265063352354237]
rob.movel(pose, acc=a, vel=v)
time.sleep(5)
#out 2-6
pose = [0.1334431445545815, 0.3026290175057595, 0.4118846240821475, 0.730741243829314, 1.760253414711346, 1.7255141352815433]
rob.movel(pose, acc=a, vel=v)
pose = [0.12437409880672887, 0.29382238237474473, 0.41159802946857316, 0.7305493146697993, 1.7623292137236457, 1.724746836319876]
rob.movel(pose, acc=a, vel=v)
pose = [0.11798124860681335, 0.28735259472234725, 0.4115068046183376, 0.7308702421744679, 1.7625799214084237, 1.7244085447547393]
rob.movel(pose, acc=a, vel=v)
pose = [0.08526943020955498, 0.2547964393826721, 0.4116081134616931, 0.7302656953431732, 1.761960373435015, 1.7250979247124252]
rob.movel(pose, acc=a, vel=v)
pose = [0.0592818549986954, 0.22880215454237002, 0.4115284040360025, 0.7307364002519257, 1.7630925285669465, 1.7241497309932778]
rob.movel(pose, acc=a, vel=v)
#close1-6
pose = [0.03907648121863697, 0.24824147478697123, 0.4113091730563136, 0.7301457328402742, 1.7633817788209647, 1.724228231133081]
rob.movel(pose, acc=a, vel=v)
pose = [0.020770591111322328, 0.2668176069070978, 0.4114559310892153, 0.730043416937704, 1.7621468652746528, 1.7246688495323277]
rob.movel(pose, acc=a, vel=v)
pose = [0.007834567566814908, 0.2799198500316586, 0.41113026596905333, 0.7296730780348917, 1.763338816584342, 1.7238073259520172]
rob.movel(pose, acc=a, vel=v)
pose = [-0.002584685455172076, 0.29037234929837574, 0.41119475375580194, 0.7297502879408158, 1.7632873208189492, 1.7238532499992436]
rob.movel(pose, acc=a, vel=v)
pose = [0.04506127505101218, 0.33840389742311, 0.41116776085679263, 0.7291835964187542, 1.7638554918217029, 1.724986842882753]
rob.movel(pose, acc=a, vel=v)
pose = [0.09526823207181682, 0.263550292508512, 0.41155533470618194, 0.729797114405243, 1.7631994432180738, 1.7253466398567225]
rob.movel(pose, acc=a, vel=v)
time.sleep(2)
pose = [0.11489694220364684, 0.2821064908742386, 0.406623292470202, 0.7287839192148127, 1.7599187617629883, 1.7278966961919806]
rob.movel(pose, acc=a, vel=v)
#close OK
time.sleep(2)
#return to the original point
pose = [-0.026091850507074233, 0.13637172277017537, 0.34192834637749725, 0.7168010678810264, 1.9995058927833398, 1.4523042091915035]
rob.movel(pose, acc=a, vel=v)
# rob.movel(pose, acc=a, vel=v)
rob.close()
def Updown2dui1():
cap = cv.VideoCapture(0)
c = 0
fps = 10 # FPS of the video
# rval=cap.isOpened()
fourcc = cv.VideoWriter_fourcc(*'MJPG')
# the last parameter is the size
# videoWriter = cv.VideoWriter('saveVideo.avi', fourcc, fps, (640, 480))
# Read until video is completed
# ideal_center_point=[600,242]
ideal_center_point=[450,211]
delta_point=[0,0]
while(cap.isOpened()):
c = c + 1
# Capture frame-by-frame
ret, frame = cap.read()
if ret == True:
# Display the resulting frame
cv.imshow('Frame', frame)
cv.imwrite('frame'+str(c) + '.jpg',frame) # save as pictures
# frame1 = frame
# print("find the dui1 pose")
real_center_point=match(c)
delta_point[0]=real_center_point[0]-ideal_center_point[0]
delta_point[1]=real_center_point[1]-ideal_center_point[1]
print(delta_point[0])
if((delta_point[1]>15)|(delta_point[1]<-15)):
if (delta_point[1])>0:
robot_move_down()
if (delta_point[1])<0:
robot_move_up()
else:
print("Up and down is OK")
break
# Press Q on keyboard to exit
if cv.waitKey(25) & 0xFF == ord('q'):
break
# Break the loop
else:
break
# When everything done, release the video capture object
cap.release()
# Closes all the frames
cv.destroyAllWindows()
def LeftRight2dui1():
cap = cv.VideoCapture(0)
c = 0
fps = 10 # FPS of the video
# rval=cap.isOpened()
fourcc = cv.VideoWriter_fourcc(*'MJPG')
# the last parameter is the size
# videoWriter = cv.VideoWriter('saveVideo.avi', fourcc, fps, (640, 480))
# Read until video is completed
# ideal_center_point=[600,242]
ideal_center_point=[450,211]
delta_point=[0,0]
while(cap.isOpened()):
c = c + 1
# Capture frame-by-frame
ret, frame = cap.read()
if ret == True:
# Display the resulting frame
cv.imshow('Frame', frame)
cv.imwrite('frame'+str(c) + '.jpg',frame) # save as pictures
# frame1 = frame
# print("find the dui1 pose")
real_center_point=match(c)
delta_point[0]=real_center_point[0]-ideal_center_point[0]
delta_point[1]=real_center_point[1]-ideal_center_point[1]
print(delta_point[0])
if(((delta_point[0]>15))|((delta_point[0])<-15)):
if delta_point[0]>0:
robot_move_right()
if delta_point[0]<0:
robot_move_left()
else:
print("Left and right is OK")
break
# Press Q on keyboard to exit
if cv.waitKey(25) & 0xFF == ord('q'):
break
# Break the loop
else:
break
# When everything done, release the video capture object
cap.release()
# Closes all the frames
cv.destroyAllWindows()
def ForwardBack2dui1():
cap = cv.VideoCapture(0)
c = 0
fps = 10 # FPS of the video
# rval=cap.isOpened()
fourcc = cv.VideoWriter_fourcc(*'MJPG')
# the last parameter is the size
# videoWriter = cv.VideoWriter('saveVideo.avi', fourcc, fps, (640, 480))
# Read until video is completed
ideal_center_point=[600,242]
delta_point=[0,0]
while(cap.isOpened()):
c = c + 1
# Capture frame-by-frame
ret, frame = cap.read()
if ret == True:
# Display the resulting frame
cv.imshow('Frame', frame)
cv.imwrite('frame'+str(c) + '.jpg',frame) # save as pictures
# frame1 = frame
# print("find the dui1 pose")
real_center_point=match(c)
delta_point[0]=real_center_point[0]-ideal_center_point[0]
delta_point[1]=real_center_point[1]-ideal_center_point[1]
print(delta_point[0])
if((delta_point[1]>15)|(delta_point[1]<-15)):
if (delta_point[1])>0:
robot_move_down()
if (delta_point[1])<0:
robot_move_up()
else:
print("Up and down is OK")
break
# Press Q on keyboard to exit
if cv.waitKey(25) & 0xFF == ord('q'):
break
# Break the loop
else:
break
# When everything done, release the video capture object
cap.release()
# Closes all the frames
cv.destroyAllWindows()
# pose_dui1 = [-0.015379972627099047, 0.16278659829816503, 0.35778023133243914, 0.7293855033267473, 1.7571449945285949, 1.7263708455148004]
# if __name__ == '__main__':
# match1()
# move_to_dui1()
# robot_move_task()
# print_robot_tcp()
# robot_move_up()
# robot_move_forward1()
# rotate_forward()
# get_Transformation()
# 此处输出的Orientation为RxRyRz旋转矩阵相乘的出来的结果
# pose = [-0.015379972627099047, 0.16278659829816503, 0.35778023133243914, 0.7293855033267473, 1.7571449945285949, 1.7263708455148004]
# pose = [1.57, 0, 0, 1.57, 0, 0]
# print(m3d.Transform())
# robot_move_right1(0.01) # 最多只能移动50cm
# delta_robot_tcp1 = print_delta_robot_tcp()
# delta_robot_tcp1 = [-8.38816606e-05, 4.68648427e-05, 4.68079196e-04, -2.88025478e-02, -7.31398710e-02, 5.52882549e-02]
# test_delta_robot_tcp(delta_robot_tcp1) # test_delta_robot_tcp() function OK
# Teach and peg in the hole_OK
# Updown2dui1()
# time.sleep(3)
# LeftRight2dui1()
# time.sleep(3)
# robot_move_task()
|
StarcoderdataPython
|
1742326
|
<filename>Lectures/DeepLearningClass/chapter5/train_neuralnet_cifar10.py
# coding: utf-8
import numpy as np
from DeepLearningClass.chapter5.two_layer_net_3_layer import TwoLayerNet
from DeepLearningClass.common.optimizer import Adam
train_file_list = ['data/train_data_' + str(i) + '.csv' for i in range(1, 51)]
test_file_list = ['data/test_data_' + str(i) + '.csv' for i in range(1, 11)]
def data_setting(data):
# x : 데이터, y : 라벨
x = (np.array(data[:, 0:-1]) / 255).tolist()
y_tmp = np.zeros([len(data), 10])
for i in range(0, len(data)):
label = int(data[i][-1])
y_tmp[i, label - 1] = 1
y = y_tmp.tolist()
return x, y
def read_data(filename):
####################################################################################################################
## ▣ Data Loading
## - 각각의 파일에 대해 load 후 전처리를 수행
####################################################################################################################
data = np.loadtxt(filename, delimiter=',')
np.random.shuffle(data)
return data_setting(data)
network = TwoLayerNet(input_size=1024, hidden_size1=200, hidden_size2=200, output_size=10)
epochs = 5
batch_size = 100 # 배치 단위
learning_rate = 0.1 # 학습률
train_loss_list = [] # 매 배치마다 cost 값을 저장하는 리스트 변수
train_acc_list = [] # 매 epoch 마다 train accuracy 를 저장하는 리스트 변수
test_acc_list = [] # 매 epoch 마다 test accuracy 를 저장하는 리스트 변수
optimizer = Adam()
# 학습 시작
print('Learning Started!')
for epoch in range(epochs):
tot_train_acc = []
for index in range(0, len(train_file_list)):
total_x, total_y = read_data(train_file_list[index])
for start_idx in range(0, 1000, batch_size):
train_x_batch, train_y_batch = np.array(total_x[start_idx:start_idx + batch_size]), np.array(total_y[start_idx:start_idx + batch_size]) # 배치 단위로 data load
grad = network.gradient(train_x_batch, train_y_batch) # 기울기 계산
# Weight, Bias 갱신
optimizer.update(network.params, grad)
# for key in network.params.keys():
# network.params[key] -= learning_rate * grad[key]
loss = network.loss(train_x_batch, train_y_batch) # 변경된 Weight, Bias 을 가지고 loss 구함
train_loss_list.append(loss) # 매 batch 단위 수행시마다 loss 값을 저장
train_acc = network.accuracy(train_x_batch, train_y_batch) # 배치 단위 train 데이터에 대해 정확도를 구함
tot_train_acc.append(train_acc) # 각 배치 단위마다 구한 정확도를 저장
print('epoch - {} :'.format(epoch), np.mean(tot_train_acc))
train_acc_list.append(np.mean(tot_train_acc)) # 매 epoch 마다 구한 train 데이터의 정확도를 저장
# 테스트 시작
print('Testing Started!')
tot_test_acc = []
for index in range(0, len(test_file_list)):
total_x, total_y = read_data(test_file_list[index])
for start_idx in range(0, 1000, batch_size):
test_x_batch, test_y_batch = np.array(total_x[start_idx:start_idx + batch_size]), np.array(total_y[start_idx:start_idx + batch_size])
test_acc = network.accuracy(test_x_batch, test_y_batch) # 배치 단위 test 데이터에 대해 정확도를 구함
tot_test_acc.append(test_acc) # 각 배치 단위마다 구한 정확도를 저장
test_acc_list.append(np.mean(tot_test_acc)) # 전체 test 데이터의 정확도를 저장
print('train accuracy :', train_acc_list)
print('test accuracy :', test_acc_list)
|
StarcoderdataPython
|
1796475
|
from utils.singleton import Singleton
class Storage:
__metaclass__ = Singleton
def __init__(self):
pass
def download_file(self, key):
raise NotImplementedError
|
StarcoderdataPython
|
4804280
|
<filename>simpleml/models/classifiers/external_models.py
from simpleml.models.external_models import ExternalModelMixin
import logging
__author__ = '<NAME>'
LOGGER = logging.getLogger(__name__)
class ClassificationExternalModelMixin(ExternalModelMixin):
'''
Wrapper class for a pickleable model with expected methods
Expected to be used as Mixin Class with default methods and
ovewritten by the model class if methods exist
Extended from base class to add classification methods
ex:
from some_model_library import ActualModelClass
class WrappedActualModelClass(ActualModelClass, ClassificationExternalModelMixin):
pass
class some_model_libraryActualModelClass(Model, [optional mixins]):
def _create_external_model(self, **kwargs):
return WrappedActualModelClass(**kwargs)
'''
def predict_proba(self, *args, **kwargs):
'''
By default fall back to predict method
'''
LOGGER.warning('No predict_proba method defined, using predict')
return self.predict(*args, **kwargs)
|
StarcoderdataPython
|
32013
|
<gh_stars>0
import torch
from torchvision import models
from torch import nn
class GoTurnRemix(nn.Module):
"""
Create a model based on GOTURN. The GOTURN architecture used a CaffeNet while GoTurnRemix uses AlexNet.
The rest of the architecture is the similar to GOTURN. A PyTorch implementation of GOTURN can be found at:
https://github.com/aakaashjois/PyTorch-GOTURN
"""
def __init__(self):
super(GoTurnRemix, self).__init__()
# Load an AlexNet model pretrained on ImageNet
self.features = nn.Sequential(*list(models.alexnet(pretrained=True).children())[:-1])
# Freeze the pretrained layers
for param in self.features.parameters():
param.requires_grad = False
self.regressor = nn.Sequential(
nn.Linear(256 * 6 * 6 * 2, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4),
)
# Initialize the biases of the Linear layers to 1
# Initialize weights to a normal distribution with 0 mean and 0.005 standard deviation
for m in self.regressor.modules():
if isinstance(m, nn.Linear):
m.bias.data.fill_(1)
m.weight.data.normal_(0, 0.005)
def forward(self, previous, current):
previous_features = self.features(previous)
current_features = self.features(current)
# Flatten, concatenate and pass to regressor the features
return self.regressor(torch.cat((previous_features.view(previous_features.size(0), 256 * 6 * 6),
current_features.view(current_features.size(0), 256 * 6 * 6)), 1))
|
StarcoderdataPython
|
3335707
|
import networkx as nx
import pickle
import os
from threading import Thread
import networkx as nx
# DATA_PATH = '/data/split_name_hr/'
# DATA_PATH = '/data/backpage_only/'
# asexyservice.com
# eroticmugshots.com
# escortsincollege.com
# hoxnif.com
# liveescortreviews.com
DATA_PATH = 'asexyservice.com/'
# ENDING = '_backpage'
ENDING = '_degree'
# city, name, phone
VARIABLE = 'name'
# hp or hr
TYPE = 'hr'
NUMBER_OF_THREADS = 1
def batch_get_degree(list_of_files, batch_num):
print("Spawned:", batch_num)
# Process these files serially
os.system('python get_degree_subscript.py ' + ' '.join(list_of_files))
all_files = set()
to_remove = set()
pattern = VARIABLE + '_' + TYPE
for (dirpath, dirnames, filenames) in os.walk(DATA_PATH):
print(filenames)
if(dirpath == DATA_PATH):
for file in filenames:
if file.startswith(VARIABLE+'_'+TYPE) and file != VARIABLE+'_'+TYPE:
if file.endswith(ENDING):
print("To Remove:", dirpath+"/"+file[:-len(ENDING)])
to_remove.add(dirpath+"/"+file[:-len(ENDING)])
else:
print(dirpath+"/"+file)
all_files.add(dirpath+"/"+file)
break
all_files = all_files - to_remove
all_files = list(all_files)
print(all_files)
# exit()
length = len(all_files)
length_per_thread = length//NUMBER_OF_THREADS
curr = 0
all_threads = list()
while curr*length_per_thread < length:
curr += 1
start = (curr-1)*length_per_thread
end = curr*length_per_thread
print("Processing:", ((curr-1)*length_per_thread), ":", (curr*length_per_thread))
list_of_files = all_files[((curr-1)*length_per_thread):(curr*length_per_thread)]
thread_instance = Thread(target=batch_get_degree, args=(list_of_files, curr))
all_threads.append(thread_instance)
for thread_instance in all_threads:
thread_instance.start()
for thread_instance in all_threads:
thread_instance.join()
print("All Threads Complete")
print("Starting Combining Degrees")
os.system('python combine_degrees_subscript.py '+DATA_PATH+' '+VARIABLE+' '+TYPE)
|
StarcoderdataPython
|
38427
|
from django.shortcuts import render
# Create your views here.
from rest_framework.views import APIView
from contents.serializers import HotSKUListSerializer
from goods.models import SKU
class HomeAPIView(APIView):
pass
'''
列表数据
热销数据:应该是到哪个分类去获取哪个分类的热销数据中
1.获取分类id
2.根据id获取数据
3.将数据转化为字典
4返回相应
'''
from rest_framework.generics import ListAPIView
class HotSKUListAPIView(ListAPIView):
def get_queryset(self):
category_id = self.kwargs['category_id']
return SKU.objects.filter(category_id=category_id).order_by('-sales')[:2]
serializer_class = HotSKUListSerializer
|
StarcoderdataPython
|
54308
|
#!/usr/bin/env python3
from random import randint
class Caesar(object):
def shift(self, offset):
"""Shifts the alphabet using a random number.
Returns the value of the shift."""
self.alphabet = [
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k',
'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
'w', 'x', 'y', 'z'
]
self.new_alphabet = list(self.alphabet)
for i in range(26 - int(offset)):
# Takes first index and appends it to the end.
self.new_alphabet.insert(0, self.new_alphabet.pop(-1))
return offset
def encrypt(self, text, key):
"""The function takes an input then
then returns the encrypted output and key."""
text = text.lower()
key = self.shift(key)
encrypted_text = []
for c in text:
# Takes letter input then appends the output to a list.
print(self.new_alphabet[self.alphabet.index(c)])
encrypted_text.append(self.new_alphabet[self.alphabet.index(c)])
return "".join(encrypted_text), key # Returns the encrypted text and the key.
def decrypt(self, cypher, key):
"""This function takes the encrypted text and key then returns
the original text."""
decrypted_text = []
self.shift(key) # Shift alphabet using value from key.
for i in range(len(cypher)):
# Takes encrypted letter and returns original letter.
decrypted_text.append(self.alphabet[self.new_alphabet.index(
cypher[i])])
return "".join(decrypted_text)
class Ncaesar(object):
"""This encryption method is like the Caesar Cypher however it does a
different alphabet shift for each letter. This results in a more
secure encryption method, however the key is longer."""
def shift(self, offset):
self.alphabet = [
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k',
'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
'w', 'x', 'y', 'z'
]
self.new_alphabet = [
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k',
'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
'w', 'x', 'y', 'z'
]
for i in range(int(offset)):
self.new_alphabet.insert(0, self.new_alphabet.pop(-1))
return offset
def encrypt(self, text):
"""Does exactly the same as the Ceaser method but uses a
different key for each letter."""
text = text.lower()
"""Does exactly the same as the Caeser encrypt method but
uses a different key for each letter."""
key = []
encrypted_text = []
for c in text:
# Shifts alphabet for each letter and generates key + text
key.append(self.shift(randint(0, 26)))
encrypted_text.append(self.new_alphabet[self.alphabet.index(c)])
return "".join(encrypted_text), key
def decrypt(self, cypher, key):
# Decrypted each letter in text.
decrypted_text = []
for i in range(len(key)):
self.shift(key[i])
decrypted_text.append(self.alphabet[self.new_alphabet.index(
cypher[i])])
print(i)
return "".join(decrypted_text)
|
StarcoderdataPython
|
1644458
|
# This file, included with the VICE package, is protected under the terms of the
# associated MIT License, and any use or redistribution of this file in original
# or altered form is subject to the copyright terms therein.
"""
Asymptotic Giant Branch Star Nucleosynthetic Yield Tools
========================================================
In the current version of VICE, users are allowed to select
between two tables of nucleosynthetic yields from asymptotic
giant branch stars - those published by the Karakas (2010) and
Cristallo et al. (2011) studies.
Included Features
=================
grid :: <function>
Return the stellar mass-metallicity grid of fractional nucleosynethetic
yields for a given element and study to the user.
References
==========
Cristallo (2011), ApJS, 197, 17
Karakas (2010), MNRAS, 403, 1413
"""
from __future__ import absolute_import
from .grid import yield_grid as grid
import sys
__all__ = ["grid"]
del absolute_import
if sys.version_info[0] < 3:
__all__ = [str(i) for i in __all__]
del i
else:
pass
del sys
|
StarcoderdataPython
|
4811348
|
<gh_stars>1-10
import sys
a=sys.stdin.read().split()
|
StarcoderdataPython
|
42076
|
<filename>src/core/migrations/0006_auto_20190615_2123.py<gh_stars>0
# Generated by Django 2.2.2 on 2019-06-15 21:23
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0005_auto_20190615_2113'),
]
operations = [
migrations.RemoveField(
model_name='organisation',
name='files',
),
migrations.DeleteModel(
name='CSV_File',
),
]
|
StarcoderdataPython
|
4807019
|
from .scan import DocScanner
|
StarcoderdataPython
|
1622720
|
<filename>kcc3/hosts.py<gh_stars>1-10
from django_hosts import patterns, host
host_patterns = patterns(
'',
host(r'fanpai', 'kcc3.urls', name='root'),
host(r'yakuman', 'yakumans.urls', name='yakumans'),
)
|
StarcoderdataPython
|
187474
|
from math import inf
def river_travelling(cost_matrix):
N = len(cost_matrix)
M = [[0 for x in range(N)] for x in range(N)]
for steps in range(1, N):
for i in range(N - steps):
j = i + steps
lowest = cost_matrix[i][j]
for k in range(i + 1, j):
lowest = min(lowest, M[k][j] + M[i][k])
M[i][j] = lowest
return M[0][-1]
C = int(input())
for i in range(C):
P = int(input())
cost_matrix = [[] * P] * P
for j in range(P):
cost_matrix[j] = [int(x) if x != '*' else inf for x in input().split()]
cost_matrix[j][j] = 0
print(river_travelling(cost_matrix))
|
StarcoderdataPython
|
3376189
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: list_alert_states.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from metadata_center_sdk.model.metadata_center import stream_alert_rule_pb2 as metadata__center__sdk_dot_model_dot_metadata__center_dot_stream__alert__rule__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='list_alert_states.proto',
package='stream',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x17list_alert_states.proto\x12\x06stream\x1a\x41metadata_center_sdk/model/metadata_center/stream_alert_rule.proto\"K\n\x17ListAlertStatesResponse\x12\x30\n\x06states\x18\x01 \x03(\x0b\x32 .metadata_center.StreamAlertRule\"\x81\x01\n\x1eListAlertStatesResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12-\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\x1f.stream.ListAlertStatesResponseb\x06proto3')
,
dependencies=[metadata__center__sdk_dot_model_dot_metadata__center_dot_stream__alert__rule__pb2.DESCRIPTOR,])
_LISTALERTSTATESRESPONSE = _descriptor.Descriptor(
name='ListAlertStatesResponse',
full_name='stream.ListAlertStatesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='states', full_name='stream.ListAlertStatesResponse.states', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=102,
serialized_end=177,
)
_LISTALERTSTATESRESPONSEWRAPPER = _descriptor.Descriptor(
name='ListAlertStatesResponseWrapper',
full_name='stream.ListAlertStatesResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='stream.ListAlertStatesResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='stream.ListAlertStatesResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='stream.ListAlertStatesResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='stream.ListAlertStatesResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=180,
serialized_end=309,
)
_LISTALERTSTATESRESPONSE.fields_by_name['states'].message_type = metadata__center__sdk_dot_model_dot_metadata__center_dot_stream__alert__rule__pb2._STREAMALERTRULE
_LISTALERTSTATESRESPONSEWRAPPER.fields_by_name['data'].message_type = _LISTALERTSTATESRESPONSE
DESCRIPTOR.message_types_by_name['ListAlertStatesResponse'] = _LISTALERTSTATESRESPONSE
DESCRIPTOR.message_types_by_name['ListAlertStatesResponseWrapper'] = _LISTALERTSTATESRESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ListAlertStatesResponse = _reflection.GeneratedProtocolMessageType('ListAlertStatesResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTALERTSTATESRESPONSE,
'__module__' : 'list_alert_states_pb2'
# @@protoc_insertion_point(class_scope:stream.ListAlertStatesResponse)
})
_sym_db.RegisterMessage(ListAlertStatesResponse)
ListAlertStatesResponseWrapper = _reflection.GeneratedProtocolMessageType('ListAlertStatesResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _LISTALERTSTATESRESPONSEWRAPPER,
'__module__' : 'list_alert_states_pb2'
# @@protoc_insertion_point(class_scope:stream.ListAlertStatesResponseWrapper)
})
_sym_db.RegisterMessage(ListAlertStatesResponseWrapper)
# @@protoc_insertion_point(module_scope)
|
StarcoderdataPython
|
109160
|
# -*- coding: utf-8 -*-
"""
Solution to Project Euler problem 3
Author: <NAME>
https://github.com/jaimeliew1/Project_Euler_Solutions
"""
from EulerFunctions import primelist
def run():
N = 600851475143
return max(p for p in primelist(int(N**0.5)) if N%p == 0)
if __name__ == "__main__":
print(run())
|
StarcoderdataPython
|
4827219
|
<filename>setup.py<gh_stars>1-10
from setuptools import setup
setup(
name='thai_sentiment',
packages=['thai_sentiment'],
version='v0.1.3', # Ideally should be same as your GitHub release tag varsion
description='The naive sentiment classification function based on NBSVM trained on wisesight_sentiment',
author='cstorm125',
author_email='<EMAIL>',
url='https://github.com/cstorm125/thai_sentiment',
download_url='https://github.com/cstorm125/thai_sentiment/archive/refs/tags/v0.1.3.tar.gz',
keywords=['sentiment analysis', 'thai', 'nlp'],
classifiers=[],
install_requires=[
'pythainlp',
'sklearn',
'scipy',
],
include_package_data=True,
)
|
StarcoderdataPython
|
93433
|
NEPS_URL = 'https://neps.academy'
ENGLISH_BUTTON = '/html/body/div/div/div/div[2]/div/div/a/div[2]/div'
LOGIN_PAGE_BUTTON = '//*[@id="app"]/div/div/div/div[1]/div/header/div/div/div[3]/div/div/nav/ul/li[6]/button'
EMAIL_INPUT = '/html/body/div/div/div/div[3]/div/div/div/form/div[1]/div/div[1]/div/input'
PASSWORD_INPUT = '/html/body/div/div/div/div[3]/div/div/div/form/div[2]/div/div[1]/div[1]/input'
LOGIN_MODAL_BUTTON = '//*[@id="app"]/div[3]/div/div/div/form/div[3]/button'
|
StarcoderdataPython
|
11312
|
import os
from . import common
import cv2
import numpy as np
import imageio
import torch
import torch.utils.data as data
class Video(data.Dataset):
def __init__(self, args, name='Video', train=False, benchmark=False):
self.args = args
self.name = name
self.scale = args.scale
self.idx_scale = 0
self.train = False
self.do_eval = False
self.benchmark = benchmark
self.filename, _ = os.path.splitext(os.path.basename(args.dir_demo))
self.vidcap = cv2.VideoCapture(args.dir_demo)
self.n_frames = 0
self.total_frames = int(self.vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
def __getitem__(self, idx):
success, lr = self.vidcap.read()
if success:
self.n_frames += 1
lr, = common.set_channel(lr, n_channels=self.args.n_colors)
lr_t, = common.np2Tensor(lr, rgb_range=self.args.rgb_range)
return lr_t, -1, '{}_{:0>5}'.format(self.filename, self.n_frames)
else:
vidcap.release()
return None
def __len__(self):
return self.total_frames
def set_scale(self, idx_scale):
self.idx_scale = idx_scale
|
StarcoderdataPython
|
3274979
|
""" A module of useful, generic functions. """
from __future__ import annotations
from collections import deque
from fractions import Fraction
from typing import Iterable, TypeVar
IntFraction = TypeVar("IntFraction", int, Fraction)
T = TypeVar("T")
class Half:
"""A class for representing 1/2 in such a way that multiplication preserves types."""
def __mul__(self, other: IntFraction) -> IntFraction:
if isinstance(other, int):
int_result = other // 2
assert 2 * int_result == other, f"{other} is not halvable in its field"
return int_result
else: # isinstance(other, Fraction):
frac_result = other / 2
assert 2 * frac_result == other, f"{other} is not halvable in its field"
return frac_result
def __str__(self) -> str:
return "1/2"
def __repr__(self) -> str:
return str(self)
def __rmul__(self, other: IntFraction) -> IntFraction:
return self * other
def __call__(self, other: IntFraction) -> IntFraction:
return self * other
half = Half()
def maximin(*iterables: Iterable[int]) -> int:
"""Return the maximum of the minimum, terminating early.
This is equivalent to: max(min(iterable) for iterable in iterables)"""
iter_iterables = iter(iterables)
try:
result = min(next(iter_iterables)) # Get the first one through a full evaluation.
except StopIteration:
raise ValueError("max() arg is an empty sequence") from None
for iterable in iter_iterables:
iterable = iter(iterable)
try:
best = next(iterable)
except StopIteration:
raise ValueError("min() arg is an empty sequence") from None
if best <= result:
continue
for item in iterable:
if item <= result:
break
if item < best:
best = item
else: # We never broke out, so best > result
result = best
return result
def lookahead(iterable: Iterable[T], n: int) -> Iterable[tuple[T, T]]:
"""Yield items of iterable together with the item n steps in the future."""
iterable = iter(iterable)
queue: deque[T] = deque()
try:
queue.extend(next(iterable) for _ in range(n))
except StopIteration:
return
for item in iterable:
queue.append(item)
yield queue.popleft(), queue[-1]
def tail_enumerate(iterable: Iterable[T], tail: int = 0) -> Iterable[tuple[int, T]]:
"""Like enumerate but a negative index is used when items are within tail of the end of the iterable."""
iterable = iter(iterable)
queue: deque[T] = deque()
try:
for _ in range(tail):
queue.append(next(iterable))
except StopIteration:
yield from enumerate(queue)
return
for index, item in enumerate(iterable):
queue.append(item)
yield index, queue.popleft()
yield from zip(range(-tail, 0), queue)
|
StarcoderdataPython
|
1758998
|
<filename>tests/test_drivers/httpbin_client.py<gh_stars>10-100
from apiwrappers import Method, Request, Url
from apiwrappers.auth import TokenAuth
class HttpBin:
def __init__(self, host, driver):
self.url = Url(host)
self.driver = driver
def get(self, params=None):
"""The request's query parameters."""
request = Request(Method.GET, self.url("/get"), query_params=params)
return self.driver.fetch(request)
def post(self, data=None, files=None, json=None):
"""The request's POST parameters."""
request = Request(
Method.POST, self.url("/post"), data=data, files=files, json=json
)
return self.driver.fetch(request)
def headers(self, headers):
"""Return the incoming request's HTTP headers."""
request = Request(Method.GET, self.url("/headers"), headers=headers)
return self.driver.fetch(request)
def response_headers(self, headers):
"""Returns a set of response headers from the query string."""
request = Request(
Method.GET, self.url("/response-headers"), query_params=headers
)
return self.driver.fetch(request)
def cookies(self, cookies):
"""Returns cookie data."""
request = Request(Method.GET, self.url("/cookies"), cookies=cookies)
return self.driver.fetch(request)
def set_cookie(self, name, value):
"""Sets a cookie and redirects to cookie list."""
url = self.url("/cookies/set/{name}/{value}", name=name, value=value)
request = Request(Method.GET, url)
return self.driver.fetch(request)
def delay(self, delay, timeout):
"""Returns a delayed response (max of 10 seconds)."""
request = Request(Method.GET, self.url("/delay/{delay}", delay=delay))
return self.driver.fetch(request, timeout=timeout)
def html(self):
"""Returns a simple HTML document."""
request = Request(Method.GET, self.url("/html"))
return self.driver.fetch(request)
def basic_auth(self, login, password):
"""Prompts the user for authorization using HTTP Basic Auth."""
url = self.url("/basic-auth/{user}/{passwd}", user=login, passwd=password)
request = Request(Method.GET, url, auth=(login, password))
return self.driver.fetch(request)
def bearer_auth(self, token):
"""Prompts the user for authorization using bearer authentication."""
request = Request(Method.GET, self.url("/bearer"), auth=TokenAuth(token))
return self.driver.fetch(request)
def complex_auth_flow(self, token: str):
"""Echoes passed token and uses it for bearer authentication."""
def auth_flow():
response = yield Request(Method.POST, self.url("/anything"), data=token)
return TokenAuth(response.json()["data"])()
request = Request(Method.GET, self.url("/bearer"), auth=auth_flow)
return self.driver.fetch(request)
|
StarcoderdataPython
|
27189
|
# -*- coding: utf-8 -*-
'''
Created on Oct 23, 2015
@author: jrm
'''
from inkcut.device.plugin import DeviceProtocol
from inkcut.core.utils import async_sleep, log
class DebugProtocol(DeviceProtocol):
""" A protocol that just logs what is called """
def connection_made(self):
log.debug("protocol.connectionMade()")
def move(self, x, y, z, absolute=True):
log.debug("protocol.move({x},{y},{z})".format(x=x, y=y, z=z))
#: Wait some time before we get there
return async_sleep(0.1)
def set_pen(self, p):
log.debug("protocol.set_pen({p})".format(p=p))
def set_velocity(self, v):
log.debug("protocol.set_velocity({v})".format(v=v))
def set_force(self, f):
log.debug("protocol.set_force({f})".format(f=f))
def data_received(self, data):
log.debug("protocol.data_received({}".format(data))
def connection_lost(self):
log.debug("protocol.connection_lost()")
|
StarcoderdataPython
|
1785039
|
<reponame>Karamax/SAI1<filename>L3/irisPredictionV1.py
#!/usr/bin/env python
# coding: utf-8
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
import mglearn
import pandas as pd
import numpy as np
def prediction(unicSize, measuringBorders): # Конкретное измерение и зона класификации
unicPredict = [0, 0, 0] # Измерения относится Blue Red Green
if unicSize[measuringBorders[2]] < measuringBorders[0]:
unicPredict[0] += 1
if unicSize[measuringBorders[2]] > measuringBorders[0] and unicSize[measuringBorders[2]] < measuringBorders[1]:
unicPredict[1] += 1
if unicSize[measuringBorders[2]] > measuringBorders[1]:
unicPredict[2] += 1
return unicPredict
iris_dataset = load_iris()
X_train, X_test, y_train, y_test = train_test_split(
iris_dataset['data'], iris_dataset['target'], random_state=0)
measuringBorders = [ # Граница деления и категория
[0.75, 1.75, 3],
[2, 4.80, 2]
]
successResult = 0
for lm in range(len(X_test)): # Проход по тестовым данным
d = [0, 0, 0]
for i in measuringBorders: # Проход по значащим зонам
res = prediction(X_test[lm], i)
d = [d[0] + res[0], d[1] + res[1], d[2] + res[2]]
if d.index(max(d)) == y_test[lm]: # Определение правильности предсказания
successResult += 1
print(str(successResult / len(X_test) * 100) + " %")
|
StarcoderdataPython
|
3267210
|
<gh_stars>0
from django.contrib.contenttypes.models import ContentType
from nautobot.circuits.models import Circuit, CircuitTermination, CircuitType, Provider
from nautobot.dcim.models import PowerPanel, Site
from nautobot.extras.choices import RelationshipTypeChoices
from nautobot.extras.models import Relationship, RelationshipAssociation, Status
from nautobot.utilities.testing.integration import SplinterTestCase
class RelationshipsTestCase(SplinterTestCase):
"""
Integration test to check relationships show on a circuit termination in the UI
"""
def setUp(self):
super().setUp()
self.user.is_superuser = True
self.user.save()
self.login(self.user.username, self.password)
site_ct = ContentType.objects.get_for_model(Site)
circuit_termination_ct = ContentType.objects.get_for_model(CircuitTermination)
provider_ct = ContentType.objects.get_for_model(Provider)
power_panel_ct = ContentType.objects.get_for_model(PowerPanel)
active_circuit_status = Status.objects.get_for_model(Circuit).get(slug="active")
active_site_status = Status.objects.get_for_model(Site).get(slug="active")
provider1 = Provider.objects.create(
name="Test Provider 1",
slug="test-provider-1",
)
provider2 = Provider.objects.create(
name="Test Provider 2",
slug="test-provider-2",
)
circuit_type = CircuitType.objects.create(
name="Test Circuit Type",
slug="test-circuit-type",
)
circuit = Circuit.objects.create(
provider=provider1,
cid="1234",
type=circuit_type,
status=active_circuit_status,
)
site = Site.objects.create(
name="Test Site",
slug="test-site",
status=active_site_status,
)
circuit_termination = CircuitTermination.objects.create(
circuit=circuit,
term_side="A",
site=site,
)
power_panel = PowerPanel.objects.create(
site=site,
name="Test Power Panel",
)
m2m = Relationship.objects.create(
name="Termination 2 Provider m2m",
slug="termination-2-provider-m2m",
source_type=circuit_termination_ct,
destination_type=provider_ct,
type=RelationshipTypeChoices.TYPE_MANY_TO_MANY,
)
RelationshipAssociation.objects.create(
relationship=m2m,
source=circuit_termination,
destination=provider1,
)
RelationshipAssociation.objects.create(
relationship=m2m,
source=circuit_termination,
destination=provider2,
)
o2m = Relationship.objects.create(
name="Termination 2 Site o2m",
slug="termination-2-provider-o2m",
source_type=circuit_termination_ct,
destination_type=site_ct,
type=RelationshipTypeChoices.TYPE_ONE_TO_MANY,
)
RelationshipAssociation.objects.create(
relationship=o2m,
source=circuit_termination,
destination=site,
)
o2o = Relationship.objects.create(
name="Termination 2 Power Panel o2o",
slug="termination-2-power-panel-o2o",
source_type=circuit_termination_ct,
destination_type=power_panel_ct,
type=RelationshipTypeChoices.TYPE_ONE_TO_ONE,
)
RelationshipAssociation.objects.create(
relationship=o2o,
source=circuit_termination,
destination=power_panel,
)
def tearDown(self):
self.logout()
super().tearDown()
def test_relationships_are_visible(self):
"""
Navigate to the circuit created in setUp() and check that the relationships are showing on the page
"""
self.browser.visit(self.live_server_url)
# Click Circuits dropdown button
self.browser.links.find_by_partial_text("Circuits")[0].click()
# Click Circuits link
self.browser.links.find_by_partial_text("Circuits")[1].click()
# Click on the circuit link (circuit created in setUp)
self.browser.links.find_by_partial_text("1234").click()
# Verify custom relationships are visible
self.assertTrue(self.browser.is_text_present("Power Panel"))
self.assertTrue(self.browser.is_text_present("2 providers"))
self.assertTrue(self.browser.is_text_present("1 site"))
|
StarcoderdataPython
|
1679385
|
#!/usr/bin/env python
import argparse
import contextlib
from collections import defaultdict
import string
import sys
import os
def main():
script_path = os.path.realpath(__file__)
script_dir = os.path.dirname(script_path)
default_input = os.path.join(
script_dir, "UnitTests", "TestData", "gen", "completion"
)
default_output = os.path.join(script_dir, "LanguageServer", "Test", "GenTests.cs")
parser = argparse.ArgumentParser(
description="Generate completion and hover tests",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--ignore",
type=str,
help="comma separated list of tests to disable, of the form <filename>(:<linenum>)",
)
parser.add_argument(
"--only", type=str, help="comma separated list of tests to generate"
)
parser.add_argument(
"-o",
"--out",
nargs="?",
type=argparse.FileType("w"),
default=default_output,
help="output file",
)
parser.add_argument(
"-i",
"--input",
type=str,
default=default_input,
help="location of completions directory",
)
parser.add_argument(
"--table",
nargs="?",
type=argparse.FileType("w"),
default=os.devnull,
help="file to write test names to",
)
args = parser.parse_args()
if args.only:
to_generate = set(args.only.split(","))
else:
to_generate = set(DEFAULT_TEST_FILES)
line_skip = defaultdict(set)
if args.ignore:
for i in args.ignore.split(","):
if ":" not in i:
to_generate.discard(i)
else:
name, line = i.split(":")
try:
line = int(line)
except:
print(f"error in format of ignored item {i}", file=sys.stderr)
return
line_skip[name].add(line)
to_generate = sorted(to_generate)
with contextlib.redirect_stdout(args.out):
print(PREAMBLE)
for name in to_generate:
filename = os.path.join(args.input, name + ".py")
ignored_lines = line_skip[name]
create_tests(name, filename, ignored_lines, args.table)
print(POSTAMBLE)
def create_tests(name, filename, ignored_lines, table_f):
camel_name = snake_to_camel(name)
with open(filename) as fp:
lines = fp.read().splitlines()
width = len(str(len(lines)))
tests = []
for i, line in enumerate(lines):
if i in ignored_lines:
continue
line: str = line.strip()
if not line.startswith("#?"):
continue
line = line[2:].strip()
next_line = lines[i + 1]
col = len(next_line)
if " " in line:
maybe_num = line.split(" ", 1)
try:
col = int(maybe_num[0])
line = maybe_num[1]
except ValueError:
pass
filt = next_line[:col].lstrip()
filt = rightmost_token(filt, ". {[(\t@")
args = line.strip()
func_name = "Line_{0:0{pad}}".format(i + 1, pad=width)
func_name = camel_name + "_" + func_name
is_completion = args.startswith("[")
func_name += "_Completion" if is_completion else "_Hover"
tmpl = COMPLETION_TEST if is_completion else HOVER_TEST
print(func_name, file=table_f)
tests.append(
tmpl.format(
name=func_name,
module=csharp_str(name),
line=i + 1,
col=col,
args=csharp_str(args),
filter=csharp_str(filt),
)
)
if tests:
print(CLASS_PREAMBLE.format(name=camel_name))
for t in tests:
print(t)
print(CLASS_POSTAMBLE)
DEFAULT_TEST_FILES = [
"arrays",
"async_",
"basic",
"classes",
"completion",
"complex",
"comprehensions",
"context",
"decorators",
"definition",
"descriptors",
"docstring",
"dynamic_arrays",
"dynamic_params",
"flow_analysis",
"fstring",
"functions",
"generators",
"imports",
"invalid",
"isinstance",
"keywords",
"lambdas",
"named_param",
"on_import",
"ordering",
"parser",
"pep0484_basic",
"pep0484_comments",
"pep0484_typing",
"pep0526_variables",
"precedence",
"recursion",
"stdlib",
"stubs",
"sys_path",
"types",
]
PREAMBLE = """// Python Tools for Visual Studio
// Copyright(c) Microsoft Corporation
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the License); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
//
// THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS
// OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY
// IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
// MERCHANTABILITY OR NON-INFRINGEMENT.
//
// See the Apache Version 2.0 License for specific language governing
// permissions and limitations under the License.
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using FluentAssertions;
using Microsoft.Python.Analysis;
using Microsoft.Python.Analysis.Analyzer;
using Microsoft.Python.Analysis.Core.Interpreter;
using Microsoft.Python.Analysis.Documents;
using Microsoft.Python.Core.Idle;
using Microsoft.Python.Core.Services;
using Microsoft.Python.Core.Text;
using Microsoft.Python.LanguageServer;
using Microsoft.Python.LanguageServer.Completion;
using Microsoft.Python.LanguageServer.Sources;
using Microsoft.Python.LanguageServer.Tests;
using Microsoft.Python.Parsing;
using Microsoft.Python.Parsing.Tests;
using Microsoft.VisualStudio.TestTools.UnitTesting;
using NSubstitute;
using TestUtilities;
namespace GenTests {"""
POSTAMBLE = """
public class GenTest : LanguageServerTestBase {
private static readonly Dictionary<string, Task<IDocumentAnalysis>> _analysis = new Dictionary<string, Task<IDocumentAnalysis>>();
private static readonly InterpreterConfiguration _interpreter = PythonVersions.LatestAvailable3X;
private static readonly PythonLanguageVersion _version = _interpreter.Version.ToLanguageVersion();
private static readonly CompletionSource _cs = new CompletionSource(new PlainTextDocumentationSource(), ServerSettings.completion);
private static readonly HoverSource _hs = new HoverSource(new PlainTextDocumentationSource());
static GenTest() {
_interpreter.TypeshedPath = TestData.GetDefaultTypeshedPath();
}
protected async Task<IDocumentAnalysis> GetGenAnalysisAsync(string module) {
// Create an analyzer per module. This is slower than creating a single
// analyzer shared between all GenTest instances, but sharing them makes
// the "types" module fail (due to a bug where the name "types" shadows
// a builtin module name).
if (_analysis.TryGetValue(module, out var analysis)) {
return await analysis;
}
var root = TestData.GetPath("TestData", "gen", "completion");
var sm = CreateServiceManager();
sm.AddService(new PythonAnalyzer(sm));
sm.AddService(await PythonInterpreter.CreateAsync(_interpreter, root, sm));
sm.AddService(new RunningDocumentTable(sm));
var src = TestData.GetPath("TestData", "gen", "completion", module + ".py");
analysis = GetAnalysisAsync(File.ReadAllText(src), sm, modulePath: src);
_analysis[module] = analysis;
return await analysis;
}
protected async Task DoCompletionTest(string module, int lineNum, int col, string args, string filter) {
filter = filter.ToLowerInvariant();
var tests = string.IsNullOrWhiteSpace(args) ? new List<string>() : ParseStringList(args).Select(s => s.ToLowerInvariant()).ToList();
var analysis = await GetGenAnalysisAsync(module);
var res = _cs.GetCompletions(analysis, new SourceLocation(lineNum + 1, col + 1));
var items = res?.Completions?.Select(item => item.insertText.ToLowerInvariant())
.Where(t => t.ToLowerInvariant().Contains(filter))
.ToList() ?? new List<string>();
if (tests.Count == 0) {
items.Should().BeEmpty();
} else {
items.Should().Contain(tests);
}
}
protected async Task DoHoverTest(string module, int lineNum, int col, string args) {
var tests = string.IsNullOrWhiteSpace(args)
? new List<string>()
: args.Split(' ', options: StringSplitOptions.RemoveEmptyEntries).Select(s => s.EndsWith("()") ? s.Substring(0, s.Length - 2) : s).ToList();
var analysis = await GetGenAnalysisAsync(module);
var res = _hs.GetHover(analysis, new SourceLocation(lineNum + 1, col + 1));
if (tests.Count == 0) {
res?.contents.value.Should().BeEmpty();
} else {
res.Should().NotBeNull();
res.contents.value.Should().ContainAll(tests);
}
}
protected List<string> ParseStringList(string s) {
var list = new List<string>();
using (var reader = new StringReader(s)) {
var tokenizer = new Tokenizer(_version);
tokenizer.Initialize(reader);
while (!tokenizer.IsEndOfFile) {
var token = tokenizer.GetNextToken();
if (token.Kind == TokenKind.EndOfFile) {
break;
}
switch (token.Kind) {
case TokenKind.Constant when token != Tokens.NoneToken && (token.Value is string || token.Value is AsciiString):
list.Add(token.Image);
break;
}
}
}
return list;
}
}
}
"""
CLASS_PREAMBLE = """ [TestClass]
public class {name}Tests : GenTest {{
public TestContext TestContext {{ get; set; }}
[TestInitialize]
public void TestInitialize() => TestEnvironmentImpl.TestInitialize($"{{TestContext.FullyQualifiedTestClassName}}.{{TestContext.TestName}}");
[TestCleanup]
public void TestCleanup() => TestEnvironmentImpl.TestCleanup();"""
CLASS_POSTAMBLE = """
}"""
COMPLETION_TEST = """
[TestMethod, Priority(0)] public async Task {name}() => await DoCompletionTest({module}, {line}, {col}, {args}, {filter});"""
HOVER_TEST = """
[TestMethod, Priority(0)] public async Task {name}() => await DoHoverTest({module}, {line}, {col}, {args});"""
def snake_to_camel(s):
return string.capwords(s, "_").replace("_", "")
def rightmost_token(s, cs):
for c in cs:
i = s.rfind(c)
if i != -1:
s = s[i + 1 :]
return s
def csharp_str(s):
if s is None:
return "null"
s = s.replace('"', '""')
return '@"{}"'.format(s)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3251372
|
#!/usr/bin/env python3
import math
import cv2
import shm
from mission.constants.config import wire as constants
from vision.modules.base import ModuleBase
from vision import options
options = [
options.IntOption('adaptive_thresh_block_size', constants.block_size, 1, 2500),
options.IntOption('adaptive_thresh_c', constants.thresh_c, 0, 20),
# options.IntOption('cmyk_y_min', 20, 0, 255),
# options.IntOption('cmyk_y_max', 255, 0, 255),
# options.IntOption('hsv_h_min', 18, 0, 255),
# options.IntOption('hsv_h_max', 59, 0, 255),
# options.IntOption('hsv_s_min', 0, 0, 255),
# options.IntOption('hsv_s_max', 255, 0, 255),
# options.IntOption('hsv_v_min', 0, 0, 255),
# options.IntOption('hsv_v_max', 255, 0, 255),
options.DoubleOption('min_area', constants.min_area, 0, 1000),
options.IntOption('kernel_size', constants.kernel_size, 1, 255),
options.DoubleOption('min_aspect_ratio', 6, 1, 40),
options.BoolOption('debugging', constants.debugging),
options.BoolOption('lab a', True),
options.BoolOption('lab b', False),
options.BoolOption('ycrcb cb', False),
options.BoolOption('yellow_debug', False),
options.IntOption('yellow_hls_h_min', 58, 0, 255),
options.IntOption('yellow_hls_h_max', 100, 0, 255),
options.IntOption('yellow_ycrcb_cb_min', 111, 0, 255),
options.IntOption('yellow_ycrcb_cb_max', 145, 0, 255),
options.IntOption('yellow_lab_a_min', 0, 0, 255),
options.IntOption('yellow_lab_a_max', 101, 0, 255),
options.IntOption('yellow_lab_b_min', 95, 0, 255),
options.IntOption('yellow_lab_b_max', 152, 0, 255)
]
DOWNSCALE_RATIO = .33
def tag(mat, text, pos):
cv2.putText(mat, text, pos, cv2.FONT_HERSHEY_DUPLEX, 1, (255, 50, 255), thickness=2)
class Bar:
def __init__(self, contour):
min_rect = cv2.boxPoints(cv2.minAreaRect(contour))
dist1 = self.distance(min_rect[1], min_rect[0]) *int(1 / DOWNSCALE_RATIO)
dist2 = self.distance(min_rect[1], min_rect[2]) *int(1 / DOWNSCALE_RATIO)
self.area = dist1 * dist2
if dist1 < dist2:
self.aspect_ratio = dist2 / dist1
self.end1 = self.avg_pt(min_rect[1], min_rect[0]) * int(1 / DOWNSCALE_RATIO)
self.end2 = self.avg_pt(min_rect[2], min_rect[3]) * int(1 / DOWNSCALE_RATIO)
else:
self.aspect_ratio = dist1 / dist2
self.end1 = self.avg_pt(min_rect[1], min_rect[2]) * int(1 / DOWNSCALE_RATIO)
self.end2 = self.avg_pt(min_rect[0], min_rect[3]) *int(1 / DOWNSCALE_RATIO)
def distance(self, p1, p2):
return (((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2) ** 0.5) * int(1 / DOWNSCALE_RATIO)
def avg_pt(self, p1, p2):
return ((p1[0] + p2[0]) / 2, (p1[1] + p2[1]) / 2) * int(1 / DOWNSCALE_RATIO)
def is_horiz(self):
ratio = abs((self.end1[1] - self.end2[1]) / (self.end1[0] - self.end2[0]))
return math.atan(ratio) < math.pi / 4
def center(self):
return self.avg_pt(self.end1, self.end2)
def draw(self, mat, label):
color = (255, 255, 0) if self.is_horiz() else (127, 255, 127)
int_pt = lambda pt: (int(pt[0]), int(pt[1]))
cv2.line(mat, int_pt(self.end1), int_pt(self.end2), color, thickness=3)
tag(mat, label, int_pt(self.center()))
class Wire(ModuleBase):
def process(self, mat):
results = shm.wire_results.get()
camera_w, camera_h = mat.shape[:2]
mat = cv2.resize(mat, (int(mat.shape[1] * DOWNSCALE_RATIO), int(mat.shape[0] * DOWNSCALE_RATIO)))
self.post('original', mat)
lab = cv2.cvtColor(mat, cv2.COLOR_RGB2LAB)
ycrcb = cv2.cvtColor(mat, cv2.COLOR_RGB2YCR_CB)
hls = cv2.cvtColor(mat, cv2.COLOR_RGB2HLS)
hls_split = cv2.split(hls)
lab_split = cv2.split(lab)
ycrcb_split = cv2.split(ycrcb)
lab_a = cv2.split(lab)[1]
lab_b = cv2.split(lab)[2]
ycrcb_cb = cv2.split(ycrcb)[2]
if self.options['debugging']:
self.post('lab a', lab_a)
self.post('lab b', lab_b)
self.post('ycrcb cb', ycrcb_cb)
if self.options['lab a']:
threshed = cv2.adaptiveThreshold(
lab_a, 255,
cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV,
self.options['adaptive_thresh_block_size'] * 2 + 1,
self.options['adaptive_thresh_c'],
)
elif self.options['lab b']:
threshed = cv2.adaptiveThreshold(
lab_b, 255,
cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV,
self.options['adaptive_thresh_block_size'] * 2 + 1,
self.options['adaptive_thresh_c'],
)
elif self.options['ycrcb cb']:
threshed = cv2.adaptiveThreshold(
ycrcb_cb, 255,
cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,
self.options['adaptive_thresh_block_size'] * 2 + 1,
self.options['adaptive_thresh_c'],
)
else:
yellow_a_threshed = cv2.inRange(lab_split[1], self.options['yellow_lab_a_min'],
self.options['yellow_lab_a_max'])
if self.options['yellow_debug']:
self.post('yellow_a_threshed', yellow_a_threshed)
yellow_b_threshed = cv2.inRange(lab_split[2], self.options['yellow_lab_b_min'],
self.options['yellow_lab_b_max'])
if self.options['yellow_debug']:
self.post('yellow_b_threshed', yellow_b_threshed)
yellow_cb_threshed = cv2.inRange(ycrcb_split[2], self.options['yellow_ycrcb_cb_min'],
self.options['yellow_ycrcb_cb_max'])
if self.options['yellow_debug']:
self.post('yellow_cb_threshed', yellow_cb_threshed)
yellow_h_threshed = cv2.inRange(hls_split[0], self.options['yellow_hls_h_min'],
self.options['yellow_hls_h_max'])
if self.options['yellow_debug']:
self.post('yellow_h_threshed', yellow_h_threshed)
threshed = yellow_a_threshed & yellow_b_threshed & yellow_h_threshed & yellow_cb_threshed
if self.options['debugging']:
self.post('threshed', threshed)
blurred = cv2.medianBlur(threshed, self.options['kernel_size'] * 2 + 1)
if self.options['debugging']:
self.post('blurred', blurred)
_, contours, hierarchy = cv2.findContours(blurred.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if self.options['debugging']:
contours_mat = mat.copy()
for c in contours:
cv2.drawContours(contours_mat, c, -1, (255, 255, 0), thickness=2)
self.post('contours', contours_mat)
bars = []
for c in contours:
bar = Bar(c)
if bar.area >= self.options['min_area'] and \
bar.aspect_ratio >= self.options['min_aspect_ratio']:
bars.append(bar)
bars = sorted(bars, key=lambda x: -x.area)
# Identify left, right, and/or horizontal bars
hbar, vbar1, vbar2 = None, None, None
for bar in bars[:3]:
if bar.is_horiz():
if hbar is None:
hbar = bar
else:
if vbar1 is None:
vbar1 = bar
elif vbar2 is None:
vbar2 = bar
def set_results(bar, loc):
vars = None
if bar is not None:
vars = {
'{}_x1'.format(loc): bar.end1[0] *int(1 / DOWNSCALE_RATIO),
'{}_y1'.format(loc): bar.end1[1] *int(1 / DOWNSCALE_RATIO),
'{}_x2'.format(loc): bar.end2[0] *int(1 / DOWNSCALE_RATIO),
'{}_y2'.format(loc): bar.end2[1]*int(1 / DOWNSCALE_RATIO),
'{}_area'.format(loc): bar.area,
'{}_prob'.format(loc): 1,
}
else:
vars = {'{}_prob'.format(loc): 0}
results.update(**vars)
if hbar is not None:
# Align vertical bars relative to horizontal bar
left_bar, right_bar = None, None
for bar in [vbar1, vbar2]:
if bar is not None:
if bar.center()[0] < hbar.center()[0]:
left_bar = bar
else:
right_bar = bar
vbar1, vbar2 = left_bar, right_bar
else:
# Align vertical bars relative to each other
if vbar1 is not None:
if vbar2 is not None:
# Orient two bars
if vbar1.center()[0] > vbar2.center()[0]:
vbar1, vbar2 = vbar2, vbar1
elif vbar1.center()[0] > camera_w / 2:
# Move lone bar to side of camera that it's on
vbar1, vbar2 = vbar2, vbar1
bars_mat = mat.copy()
if hbar is not None: hbar.draw(bars_mat, 'bottom bar')
if vbar1 is not None: vbar1.draw(bars_mat, 'left bar')
if vbar2 is not None: vbar2.draw(bars_mat, 'right bar')
self.post('barlines', bars_mat)
# Set bar locations, if found, to shm
set_results(hbar, 'bottom')
set_results(vbar1, 'left')
set_results(vbar2, 'right')
self.fill_single_camera_direction(results)
shm.wire_results.set(results)
if __name__ == '__main__':
Wire('forward', options=options)()
|
StarcoderdataPython
|
3224569
|
import re
import sys
from types import FunctionType
import inspect
import pathlib
import importlib
PATH_TO_PLUGINS = pathlib.Path('../..').resolve().absolute()
if str(PATH_TO_PLUGINS) not in sys.path:
sys.path.insert(0, str(PATH_TO_PLUGINS))
import pigor.plugins as plugins
[print(module) for module in dir(plugins) if inspect.ismodule(module)]
p = pathlib.Path('../plugins')
if p.is_dir():
print(p)
test = importlib.import_module('pigor.plugins.polarimeter')
print(dir(test))
def render(template: pathlib.Path, plot_function: FunctionType, extra_info_functions: [FunctionType]):
pass
|
StarcoderdataPython
|
4801242
|
import pygame
from level import Level
from game_loop import GameLoop
from event_queue import EventQueue
from renderer import Renderer
from clock import Clock
LEVEL_MAP_1 = [[1, 1, 1, 1, 1],
[1, 0, 0, 0, 1],
[1, 2, 3, 4, 1],
[1, 1, 1, 1, 1]]
LEVEL_MAP_2 = [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 5, 1],
[1, 2, 3, 0, 0, 0, 1, 0, 0, 1, 2, 3, 0, 0, 0, 0, 1],
[1, 0, 0, 1, 2, 3, 0, 2, 3, 0, 0, 0, 1, 0, 0, 0, 1],
[1, 0, 4, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
CELL_SIZE = 50
def main():
level_map = LEVEL_MAP_2
height = len(level_map)
width = len(level_map[0])
display_height = height * CELL_SIZE
display_width = width * CELL_SIZE
display = pygame.display.set_mode((display_width, display_height))
pygame.display.set_caption("Sokoban")
level = Level(level_map, CELL_SIZE)
event_queue = EventQueue()
renderer = Renderer(display, level)
clock = Clock()
game_loop = GameLoop(level, renderer, event_queue, clock, CELL_SIZE)
pygame.init()
game_loop.start()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3222563
|
import itertools
import six
import math
class PolylineCodec(object):
def _pcitr(self, iterable):
return six.moves.zip(iterable, itertools.islice(iterable, 1, None))
def _py2_round(self, x):
# The polyline algorithm uses Python 2's way of rounding
return int(math.copysign(math.floor(math.fabs(x) + 0.5), x))
def _write(self, output, curr_value, prev_value, factor):
curr_value = self._py2_round(curr_value * factor)
prev_value = self._py2_round(prev_value * factor)
coord = curr_value - prev_value
coord <<= 1
coord = coord if coord >= 0 else ~coord
while coord >= 0x20:
output.write(six.unichr((0x20 | (coord & 0x1f)) + 63))
coord >>= 5
output.write(six.unichr(coord + 63))
def _trans(self, value, index):
byte, result, shift = None, 0, 0
while byte is None or byte >= 0x20:
byte = ord(value[index]) - 63
index += 1
result |= (byte & 0x1f) << shift
shift += 5
comp = result & 1
return ~(result >> 1) if comp else (result >> 1), index
def decode(self, expression, precision=5, geojson=False):
coordinates, index, lat, lng, length, factor = [], 0, 0, 0, len(expression), float(10 ** precision)
while index < length:
lat_change, index = self._trans(expression, index)
lng_change, index = self._trans(expression, index)
lat += lat_change
lng += lng_change
coordinates.append((lat / factor, lng / factor))
if geojson is True:
coordinates = [t[::-1] for t in coordinates]
return coordinates
def encode(self, coordinates, precision=5, geojson=False):
if geojson is True:
coordinates = [t[::-1] for t in coordinates]
output, factor = six.StringIO(), int(10 ** precision)
self._write(output, coordinates[0][0], 0, factor)
self._write(output, coordinates[0][1], 0, factor)
for prev, curr in self._pcitr(coordinates):
self._write(output, curr[0], prev[0], factor)
self._write(output, curr[1], prev[1], factor)
return output.getvalue()
|
StarcoderdataPython
|
4820275
|
import requests
class Discovery(object):
"""docstring for Discovery"""
@staticmethod
def find():
r = requests.get('https://www.meethue.com/api/nupnp')
ips = []
for elm in r.json():
ips.append(elm['internalipaddress'])
return ips
|
StarcoderdataPython
|
1628893
|
<reponame>nathandaddio/puzzle_app
import pytest
from pyramid.exceptions import HTTPNotFound
from puzzle_app.views.hitori import hitori_boards_get, hitori_board_get
from factories import (
HitoriGameBoardFactory,
HitoriGameBoardCellFactory
)
class TestHitoriGameBoardsGet:
@pytest.fixture
def board(self, db_session):
board = HitoriGameBoardFactory(number_of_rows=5, number_of_columns=5)
db_session.add(board)
db_session.commit()
return board
@pytest.fixture
def cells(self, db_session, board):
cells = [
HitoriGameBoardCellFactory(hitori_game_board=board, row_number=3, column_number=4, value=6),
HitoriGameBoardCellFactory(hitori_game_board=board, row_number=2, column_number=5, value=6)
]
db_session.add_all(cells)
db_session.commit()
return cells
@pytest.fixture
def boards_response(self, dummy_request):
return hitori_boards_get(dummy_request)
@pytest.fixture
def expected_boards_response(self, board, cells):
return [
{
'id': board.id,
'number_of_rows': board.number_of_rows,
'number_of_columns': board.number_of_columns,
'solved': False,
'feasible': None,
'cells': [ # note that the order of the cells changes as we return (row, column) order of cells
{
'id': cells[1].id,
'row_number': cells[1].row_number,
'column_number': cells[1].column_number,
'value': cells[1].value,
'included_in_solution': None
},
{
'id': cells[0].id,
'row_number': cells[0].row_number,
'column_number': cells[0].column_number,
'value': cells[0].value,
'included_in_solution': None
}
]
}
]
def test_hitori_game_boards_get(self, board, cells, boards_response, expected_boards_response):
assert boards_response == expected_boards_response
@pytest.fixture
def board_request(self, board, dummy_request):
dummy_request.matchdict['board_id'] = board.id
return dummy_request
@pytest.fixture
def board_response(self, board_request):
return hitori_board_get(board_request)
def test_hitori_game_board_get(self, board, cells, board_response, expected_boards_response):
assert board_response == expected_boards_response[0]
@pytest.fixture
def bad_board_id_request(self, dummy_request):
dummy_request.matchdict['board_id'] = 100
return dummy_request
def test_board_get_bad_id(self, bad_board_id_request):
with pytest.raises(HTTPNotFound):
hitori_board_get(bad_board_id_request)
|
StarcoderdataPython
|
3225108
|
<reponame>funkyfuture/cerberuse-collections
__all__ = []
from cerberus_collections.error_handlers.json import JSONErrorHandler # noqa: E402
__all__.append(JSONErrorHandler.__name__)
try:
from cerberus_collections.error_handlers.xml import XMLErrorHandler
except ImportError:
pass
else:
__all__.append(XMLErrorHandler.__name__)
|
StarcoderdataPython
|
27314
|
import torch.nn as nn
import torch.nn.functional as F
from torchvision.transforms import functional
import numpy as np
class Rotate(nn.Module):
"""
Rotate the image by random angle between -degrees and degrees.
"""
def __init__(self, degrees, interpolation_method='nearest'):
super(Rotate, self).__init__()
self.degrees = degrees
self.interpolation_method = interpolation_method
def forward(self, noised_and_cover):
rotation_angle = np.random.uniform(-self.degrees, self.degrees)
noised_image = noised_and_cover[0]
noised_and_cover[0] = functional.rotate(noised_image, rotation_angle)
return noised_and_cover
|
StarcoderdataPython
|
65827
|
import requests
import xml.etree.ElementTree as ET
import logging
from logging.config import dictConfig
import json
import copy
import tempfile
import os
import calendar
import time
import sys
from requests.auth import HTTPBasicAuth
import xml.dom.minidom
import datetime
import shutil
from io import open
import platform
from splunkversioncontrol_utility import runOSProcess, get_password
"""
Restore Knowledge Objects
Query a remote lookup file to determine what items should be restored from git into a Splunk instance
In general this will be running against the localhost unless it is been tested as the lookup file will be updated
by a user accessible dashboard
Basic validation will be done to ensure someone without the required access cannot restore someone else's knowledge objects
"""
splunkLogsDir = os.environ['SPLUNK_HOME'] + "/var/log/splunk"
#Setup the logging
logging_config = dict(
version = 1,
formatters = {
'f': {'format':
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s'}
},
handlers = {
'h': {'class': 'logging.StreamHandler',
'formatter': 'f',
'level': logging.WARN},
'file': {'class' : 'logging.handlers.RotatingFileHandler',
'filename' : splunkLogsDir + '/splunkversioncontrol_restore.log',
'formatter': 'f',
'maxBytes' : 2097152,
'level': logging.DEBUG,
'backupCount' : 5 }
},
root = {
'handlers': ['h','file'],
'level': logging.DEBUG,
},
)
dictConfig(logging_config)
logger = logging.getLogger()
logging.getLogger().setLevel(logging.INFO)
class SplunkVersionControlRestore:
splunk_rest = None
destUsername = None
destPassword = None
session_key = None
gitTempDir = None
gitRootDir = None
appName = "SplunkVersionControl"
gitRepoURL = None
stanzaName = None
sslVerify = False
# read XML configuration passed from splunkd
def get_config(self):
config = {}
try:
# read everything from stdin
config_str = sys.stdin.read()
# parse the config XML
doc = xml.dom.minidom.parseString(config_str)
root = doc.documentElement
session_key = root.getElementsByTagName("session_key")[0].firstChild.data
#Grab the session key in case we need it
config['session_key'] = session_key
conf_node = root.getElementsByTagName("configuration")[0]
if conf_node:
logger.debug("XML: found configuration")
stanza = conf_node.getElementsByTagName("stanza")[0]
if stanza:
stanza_name = stanza.getAttribute("name")
if stanza_name:
logger.debug("XML: found stanza " + stanza_name)
config["name"] = stanza_name
shortName = stanza_name.replace("splunkversioncontrol_restore://", "")
params = stanza.getElementsByTagName("param")
for param in params:
param_name = param.getAttribute("name")
logger.debug("i=\"%s\" XML: found param=\"%s\"" % (shortName, param_name))
if param_name and param.firstChild and \
param.firstChild.nodeType == param.firstChild.TEXT_NODE:
data = param.firstChild.data
config[param_name] = data
logger.debug("i=\"%s\" XML: \"%s\"=\"%s\"" % (shortName, param_name, data))
if not config:
raise Exception("Invalid configuration received from Splunk.")
except Exception as e:
raise Exception("Error getting Splunk configuration via STDIN: %s" % str(e))
return config
###########################
#
# runQueries (generic version)
# This attempts to read the config data from git (stored in json format), if found it will attempt to restore the config to the
# destination server
# This method works for everything excluding macros which have a different process
# Due to variations in the REST API there are a few hacks inside this method to handle specific use cases, however the majority are straightforward
#
###########################
def runQueries(self, app, endpoint, type, name, scope, user, restoreAsUser, adminLevel):
logger.info("i=\"%s\" user=%s, attempting to restore name=%s in app=%s of type=%s in scope=%s, restoreAsUser=%s, adminLevel=%s" % (self.stanzaName, user, name, app, type, scope, restoreAsUser, adminLevel))
url = None
#Check if the object exists or not
#Data models require a slightly different URL to just about everything else
if type=="datamodels" and (scope=="app" or scope=="global"):
url = self.splunk_rest + "/servicesNS/nobody/%s%s/%s?output_mode=json" % (app, endpoint, name)
elif type=="datamodels":
url = self.splunk_rest + "/servicesNS/%s/%s%s/%s?output_mode=json" % (user, app, endpoint, name)
else:
url = self.splunk_rest + "/servicesNS/-/%s%s/%s?output_mode=json" % (app, endpoint, name)
logger.debug("i=\"%s\" Running requests.get() on url=%s with user=%s in app=%s proxies_length=%s" % (self.stanzaName, url, self.destUsername, app, len(self.proxies)))
#Determine scope that we will attempt to restore
appScope = False
userScope = False
if scope == "all":
appScope = True
userScope = True
elif scope == "app":
appScope = True
elif scope == "user":
userScope = True
else:
logger.error("i=\"%s\" user=%s, while attempting to restore name=%s, found invalid scope of scope=%s" % (self.stanzaName, user, name, scope))
headers = {}
auth = None
if not self.destUsername:
headers={'Authorization': 'Splunk %s' % self.session_key}
else:
auth = HTTPBasicAuth(self.destUsername, self.destPassword)
message = ""
res_result = False
#Verify=false is hardcoded to workaround local SSL issues
res = requests.get(url, auth=auth, headers=headers, verify=self.sslVerify, proxies=self.proxies)
objExists = False
#If we get 404 it definitely does not exist or it has a name override
if (res.status_code == 404):
logger.debug("i=\"%s\" URL=%s is throwing a 404, assuming new object creation" % (self.stanzaName, url))
elif (res.status_code != requests.codes.ok):
logger.error("i=\"%s\" URL=%s in app=%s statuscode=%s reason=%s response=\"%s\"" % (self.stanzaName, url, app, res.status_code, res.reason, res.text))
else:
#However the fact that we did not get a 404 does not mean it exists in the context we expect it to, perhaps it's global and from another app context?
#or perhaps it's app level but we're restoring a private object...
logger.debug("i=\"%s\" Attempting to JSON loads on %s" % (self.stanzaName, res.text))
resDict = json.loads(res.text)
for entry in resDict['entry']:
sharingLevel = entry['acl']['sharing']
appContext = entry['acl']['app']
if appContext == app and appScope == True and (sharingLevel == 'app' or sharingLevel == 'global'):
objExists = True
elif appContext == app and userScope == True and sharingLevel == "user":
objExists = True
configList = []
foundAtAnyScope = False
#We need to work with user scope
if userScope == True:
userDir = self.gitTempDir + "/" + app + "/" + "user"
#user directory exists
if os.path.isdir(userDir):
typeFile = userDir + "/" + type
if os.path.isfile(typeFile):
#The file exists, open it and read the config
logger.debug("i=\"%s\" user=%s, name=%s, found typeFile=%s to restore from" % (self.stanzaName, user, name, typeFile))
with open(typeFile, 'r') as f:
configList = json.load(f)
found = False
for configItem in configList:
if configItem['name'] == name or ('origName' in configItem and configItem['origName'] == name):
#We found the configItem we need, run the restoration
logger.debug("i=\"%s\" user=%s, name=%s is found, dictionary is %s" % (self.stanzaName, user, name, configItem))
(res_result, message) = self.runRestore(configItem, type, endpoint, app, name, user, restoreAsUser, adminLevel, objExists)
found = True
foundAtAnyScope = True
#Let the logs know we never found it at this scope
if found == False:
logger.info("i=\"%s\" user=%s, name=%s not found at scope=user in file=%s" % (self.stanzaName, user, name, typeFile))
#We never found a file that we could use to restore from at this scope
else:
logger.info("i=\"%s\" user=%s, name=%s, did not find a typeFile=%s to restore from" % (self.stanzaName, user, name, typeFile))
else:
#There are no user level objects for this app, therefore the restore will not occur at this scope
logger.info("i=\"%s\" user directory of dir=%s does not exist" % (self.stanzaName, userDir))
#It's either app level of globally scoped
if appScope == True:
appDir = self.gitTempDir + "/" + app + "/" + "app"
#app directory exists
if os.path.isdir(appDir):
typeFile = appDir + "/" + type
if os.path.isfile(typeFile):
#The file we need exists
logger.debug("i=\"%s\" user=%s, name=%s, found typeFile=%s to restore from" % (self.stanzaName, user, name, typeFile))
with open(typeFile, 'r') as f:
configList = json.load(f)
found = False
for configItem in configList:
#We found the required configuration file, now we restore the object
if configItem['name'] == name:
logger.debug("i=\"%s\" user=%s, name=%s is found, dictionary is %s" % (self.stanzaName, user, name, configItem))
(res_result, message) = self.runRestore(configItem, type, endpoint, app, name, user, restoreAsUser, adminLevel, objExists)
found = True
foundAtAnyScope = True
#We never found the object we wanted to restore
if found == False:
logger.info("i=\"%s\" user=%s, name=%s not found at app level scope in typeFile=%s" % (self.stanzaName, user, name, typeFile))
#We did not find the file we wanted to restore from
else:
logger.info("i=\"%s\" user=%s, name=%s, did not find a typeFile=%s to restore from" % (self.stanzaName, user, name, typeFile))
else:
#The app level scope directory does not exist for this app
logger.info("i=\"%s\" app directory of dir=%s does not exist" % (self.stanzaName, appDir))
#If could also be a global level restore...
globalDir = self.gitTempDir + "/" + app + "/" + "global"
#user directory exists
if os.path.isdir(globalDir):
typeFile = globalDir + "/" + type
if os.path.isfile(typeFile):
#We found the file to restore from
logger.debug("i=\"%s\" user=%s, name=%s, found typeFile=%s to restore from" % (self.stanzaName, user, name, typeFile))
with open(typeFile, 'r') as f:
configList = json.load(f)
found = False
for configItem in configList:
#We found the relevant piece of configuration to restore, now run the restore
if configItem['name'] == name:
logger.debug("i=\"%s\" user=%s, name=%s is found, dictionary is %s" % (self.stanzaName, user, name, configItem))
(res_result, message) = self.runRestore(configItem, type, endpoint, app, name, user, restoreAsUser, adminLevel, objExists)
found = True
foundAtAnyScope = True
#We never found the config we wanted to restore
if found == False:
logger.info("i=\"%s\" user=%s, name=%s not found at scope=global in typeFile=%s" % (self.stanzaName, user, name, typeFile))
#This type of configuration does not exist at the global level
else:
logger.info("i=\"%s\" user=%s, name=%s, did not find a typeFile=%s to restore from" % (self.stanzaName, user, name, typeFile))
#The global directory for this app does not exist
else:
logger.debug("i=\"%s\" global directory of dir=%s does not exist" % (self.stanzaName, globalDir))
if foundAtAnyScope == True and res_result!=False:
logger.info("i=\"%s\" user=%s restore has run successfully for name=%s, type=%s, restoreAsUser=%s, adminLevel=%s" % (self.stanzaName, user, name, type, restoreAsUser, adminLevel))
return True, message
elif res_result == False and foundAtAnyScope == True:
logger.warn("i=\"%s\" user=%s attempted to restore name=%s, type=%s, restoreAsUser=%s, adminLevel=%s the object was found, but the restore failed" % (self.stanzaName, user, name, type, restoreAsUser, adminLevel))
return False, message
else:
message = "The object was not found, the restore was unsuccessful. Perhaps check the restore date, scope & capitilisation before trying again?"
logger.warn("i=\"%s\" user=%s attempted to restore name=%s, type=%s, restoreAsUser=%s, adminLevel=%s however the object was not found, the restore was unsuccessful. Perhaps check the restore date, scope & capitilisation before trying again?" % (self.stanzaName, user, name, type, restoreAsUser, adminLevel))
return False, message
###########################
#
# runRestore (generic version)
# Once we have received the required configuration, type, app, endpoint, name et cetera we attempt
# to run the post to restore or create the object
#
###########################
def runRestore(self, config, type, endpoint, app, name, user, restoreAsUser, adminLevel, objExists):
result = True
#Only an admin can restore an object owned by someone else
if config['owner'] != user and adminLevel == False:
message = "Owner of the object is listed as owner=%s, however user user=%s requested the restore and is not an admin, rejected" % (config['owner'], user)
logger.error("i=\"" + self.stanzaName + "\"" + message)
return False, message
#Only an admin can use the restoreAsUser option
if restoreAsUser != "" and restoreAsUser != user and adminLevel == False:
message = "restoreAsUser=%s which is not user=%s, this user is not an admin, rejected" % (restoreAsUser, user)
logger.error("i=\"" + self.stanzaName + "\"" + message)
return False, message
#Change the owner to the new oner
if restoreAsUser != "" and adminLevel == True:
config["owner"] = restoreAsUser
logger.info("i=\"%s\" Attempting to run restore for name=%s of type=%s with endpoint=%s user=%s, restoreAsUser=%s, adminLevel=%s, objExists=%s" % (self.stanzaName, name, type, endpoint, user, restoreAsUser, adminLevel, objExists))
sharing = config["sharing"]
owner = config["owner"]
message = ""
createOrUpdate = None
if objExists == True:
createOrUpdate = "update"
else:
createOrUpdate = "create"
headers = {}
auth = None
if not self.destUsername:
headers={'Authorization': 'Splunk %s' % self.session_key}
else:
auth = HTTPBasicAuth(self.destUsername, self.destPassword)
#We cannot post the sharing/owner information to the REST API, we use them later
del config["sharing"]
del config["owner"]
#App / Global scope required the /nobody/ context to be used for POST requests (GET requests do not care)
url = ""
if sharing == "user":
url = "%s/servicesNS/%s/%s%s" % (self.splunk_rest, owner, app, endpoint)
else:
url = "%s/servicesNS/nobody/%s%s" % (self.splunk_rest, app, endpoint)
payload = config
#The config has an origName in it, therefore the object exists lookup may have not worked as expected
#repeat it here for the edge cases (field extractions, field transforms and automatic lookups)
origName = None
if 'origName' in config:
origName = config['origName']
del config['origName']
objExistsURL = "%s/%s?output_mode=json" % (url, origName)
logger.debug("i=\"%s\" URL=%s re-checking object exists URL due to name override from %s to original name of %s proxies_length=%s" % (self.stanzaName, objExistsURL, name, origName, len(self.proxies)))
#Verify=false is hardcoded to workaround local SSL issues
res = requests.get(objExistsURL, auth=auth, headers=headers, verify=self.sslVerify, proxies=self.proxies)
#If we get 404 it definitely does not exist or it has a name override
if (res.status_code == 404):
logger.debug("i=\"%s\" URL=%s is throwing a 404, assuming new object creation" % (self.stanzaName, objExistsURL))
objExists = False
elif (res.status_code != requests.codes.ok):
logger.error("i=\"%s\" URL=%s in app=%s statuscode=%s reason=%s response=\"%s\"" % (self.stanzaName, objExistsURL, app, res.status_code, res.reason, res.text))
else:
#However the fact that we did not get a 404 does not mean it exists in the context we expect it to, perhaps it's global and from another app context?
#or perhaps it's app level but we're restoring a private object...
logger.debug("i=\"%s\" Attempting to JSON loads on %s" % (self.stanzaName, res.text))
resDict = json.loads(res.text)
for entry in resDict['entry']:
sharingLevel = entry['acl']['sharing']
appContext = entry['acl']['app']
appScope = False
userScope = False
if sharing == "global" or sharing == "app":
appScope = True
else:
userScope = True
if appContext == app and appScope == True and (sharingLevel == 'app' or sharingLevel == 'global'):
objExists = True
elif appContext == app and userScope == True and sharingLevel == "user":
objExists = True
logger.debug("i=\"%s\" app=%s objExists=%s after re-checking on %s" % (self.stanzaName, app, objExists, objExistsURL))
#This is an existing object we are modifying
if objExists == True:
createOrUpdate = "update"
if origName:
url = url + "/" + origName
else:
url = url + "/" + name
del config["name"]
#Cannot post type/stanza when updating field extractions or a few other object types, but require them for creation?!
if 'type' in config:
del config['type']
if 'stanza' in config:
del config['stanza']
#Hack to handle the times (conf-times) not including required attributes for creation in existing entries
#not sure how this happens but it fails to create in 7.0.5 but works fine in 7.2.x, fixing for the older versions
if type=="times_conf-times" and "is_sub_menu" not in payload:
payload["is_sub_menu"] = "0"
elif type=="collections_kvstore" and 'disabled' in payload:
del payload['disabled']
logger.debug("i=\"%s\" Attempting to %s type=%s with name=%s on URL=%s with payload=\"%s\" in app=%s proxies_length=%s" % (self.stanzaName, createOrUpdate, type, name, url, payload, app, len(self.proxies)))
res = requests.post(url, auth=auth, headers=headers, verify=self.sslVerify, data=payload, proxies=self.proxies)
if (res.status_code != requests.codes.ok and res.status_code != 201):
logger.error("i=\"%s\" user=%s, name=%s of type=%s with URL=%s statuscode=%s reason=%s, response=\"%s\", in app=%s, owner=%s" % (self.stanzaName, user, name, type, url, res.status_code, res.reason, res.text, app, owner))
#Saved Searches sometimes fail due to the VSID field, auto-retry in case that solves the problem...
if type=="savedsearches":
if 'vsid' in payload:
del payload['vsid']
res = requests.post(url, auth=auth, headers=headers, verify=self.sslVerify, data=payload, proxies=self.proxies)
if (res.status_code != requests.codes.ok and res.status_code != 201):
logger.error("i=\"%s\" user=%s, re-attempted without vsid but result for name=%s of type=%s with URL=%s statuscode=%s reason=%s, response=\"%s\", in app=%s, owner=%s" % (self.stanzaName, user, name, type, url, res.status_code, res.reason, res.text, app, owner))
result = False
else:
logger.info("i=\"%s\" user=%s, name=%s of type=%s with URL=%s successfully %s with the vsid field removed, feel free to ignore the previous error" % (self.stanzaName, user, name, type, url, createOrUpdate))
else:
logger.debug("i=\"%s\" %s name=%s of type=%s in app=%s with URL=%s result=\"%s\" owner=%s" % (self.stanzaName, createOrUpdate, name, type, app, url, res.text, owner))
#Parse the result to find re-confirm the URL and check for messages from Splunk (and log warnings about them)
root = ET.fromstring(res.text)
objURL = None
for child in root:
#Working per entry in the results
if child.tag.endswith("entry"):
#Down to each entry level
for innerChild in child:
#print innerChild.tag
if innerChild.tag.endswith("link") and innerChild.attrib["rel"]=="list":
objURL = "%s/%s" % (self.splunk_rest, innerChild.attrib["href"])
logger.debug("i=\"%s\" name=%s of type=%s in app=%s URL=%s" % (self.stanzaName, name, type, app, objURL))
elif child.tag.endswith("messages"):
for innerChild in child:
if innerChild.tag.endswith("msg") and innerChild.attrib["type"]=="ERROR" or "WARN" in innerChild.attrib:
logger.warn("i=\"%s\" name=%s of type=%s in app=%s had a warn/error message of '%s' owner=%s" % (self.stanzaName, name, type, app, innerChild.text, owner))
#Sometimes the object appears to be create but is unusable which is annoying, at least provide the warning to the logs
if not objURL:
message = "never found objURL so cannot complete ACL change with url=%s, response text=\"%s\" when looking for name=%s, type=%s app=%s, owner=%s" % (url, res.text, name, type, app, owner)
logger.warn("i=\"" + self.stanzaName + "\"" + message)
return False, message
#Re-owning it to the previous owner and sharing level
url = "%s/acl" % (objURL)
payload = { "owner": owner, "sharing" : sharing }
logger.info("i=\"%s\" Attempting to change ownership of type=%s with name=%s via URL=%s to owner=%s in app=%s with sharing=%s" % (self.stanzaName, type, name, url, owner, app, sharing))
res = requests.post(url, auth=auth, headers=headers, verify=self.sslVerify, data=payload, proxies=self.proxies)
#If re-own fails log this for investigation
if (res.status_code != requests.codes.ok):
logger.error("i=\"%s\" user=%s, name=%s of type=%s in app=%s with URL=%s statuscode=%s reason=%s, response=\"%s\", owner=%s" % (self.stanzaName, user, name, type, app, url, res.status_code, res.reason, res.text, owner))
result = False
else:
logger.debug("i=\"%s\" user=%s, name=%s of type=%s in app=%s, ownership changed with response=\"%s\", owner=%s, sharing=%s" % (self.stanzaName, user, name, type, app, res.text, owner, sharing))
logger.info("i=\"%s\" %s name=%s of type=%s in app=%s owner=%s sharing=%s" % (self.stanzaName, createOrUpdate, name, type, app, owner, sharing))
return result, message
###########################
#
# macroCreation
# Runs the required queries to create or update the macro knowledge objects and then re-owns them to the correct user
#
###########################
def runRestoreMacro(self, config, app, name, username, restoreAsUser, adminLevel, objExists):
result = True
#Only admins can restore objects on behalf of someone else
if config['owner'] != username and adminLevel == False:
message = "Owner of the object is listed as owner=%s, however user=%s requested the restore and is not an admin, rejected" % (config['owner'], username)
logger.error("i=\"" + self.stanzaName + "\"" + message)
return False, message
#Only admins can restore objects into someone else's name
if restoreAsUser != "" and restoreAsUser != username and adminLevel == False:
message = "restoreAsUser=%s which is not the user=%s, this user is not an admin, rejected" % (restoreAsUser, username)
logger.error("i=\"" + self.stanzaName + "\"" + message)
return False, message
logger.info("i=\"%s\" Attempting to run macro restore with name=%s, user=%s, restoreAsUser=%s, adminLevel=%s, objExists=%s" % (self.stanzaName, name, username, restoreAsUser, adminLevel, objExists))
#Change the owner to the new oner
if restoreAsUser != "" and adminLevel == True:
config["owner"] = restoreAsUser
sharing = config["sharing"]
name = config["name"]
owner = config["owner"]
headers = {}
auth = None
if not self.destUsername:
headers={'Authorization': 'Splunk %s' % self.session_key}
else:
auth = HTTPBasicAuth(self.destUsername, self.destPassword)
message = ""
#We are creating the macro
if objExists == False:
url = "%s/servicesNS/%s/%s/properties/macros" % (self.splunk_rest, owner, app)
logger.info("i=\"%s\" Attempting to create type=macro name=%s on URL=%s in app=%s" % (self.stanzaName, name, url, app))
payload = { "__stanza" : name }
#Create macro
#I cannot seem to get this working on the /conf URL but this works so good enough, and it's in the REST API manual...
#servicesNS/-/search/properties/macros
#__stanza = <name>
res = requests.post(url, auth=auth, headers=headers, verify=self.sslVerify, data=payload, proxies=self.proxies)
if (res.status_code != requests.codes.ok and res.status_code != 201):
message = "name=%s of type=macro in app=%s with URL=%s statuscode=%s reason=%s, response=\"%s\", owner=%s" % (name, app, url, res.status_code, res.reason, res.text, owner)
logger.error("i=\"" + self.stanzaName + "\"" + message)
return False, message
else:
#Macros always have the username in this URL context
objURL = "%s/servicesNS/%s/%s/configs/conf-macros/%s" % (self.splunk_rest, owner, app, name)
logger.debug("i=\"%s\" name=%s of type=macro in app=%s URL=%s with owner=%s" % (self.stanzaName, name, app, objURL, owner))
logger.debug("i=\"%s\" name=%s of type=macro in app=%s, received response=\"%s\"" % (self.stanzaName, name, app, res.text))
#Now we have created the macro, modify it so it has some real content (or it's an existing macro we're fixing)
#If this is an app or globally scoped object use the nobody in the URL
url = ""
if objExists == True and sharing != "user":
url = "%s/servicesNS/nobody/%s/properties/macros/%s" % (self.splunk_rest, app, name)
else:
url = "%s/servicesNS/%s/%s/properties/macros/%s" % (self.splunk_rest, owner, app, name)
#Remove parts that cannot be posted to the REST API, sharing/owner we change later
del config["sharing"]
del config["name"]
del config["owner"]
payload = config
logger.debug("i=\"%s\" Attempting to modify type=macro name=%s on URL=%s with payload=\"%s\" in app=%s proxies_length=%s" % (self.stanzaName, name, url, payload, app, len(self.proxies)))
res = requests.post(url, auth=auth, headers=headers, verify=self.sslVerify, data=payload, proxies=self.proxies)
if (res.status_code != requests.codes.ok and res.status_code != 201):
logger.error("i=\"%s\" name=%s of type=macro in app=%s with URL=%s statuscode=%s reason=%s, response=\"%s\"" % (self.stanzaName, name, app, url, res.status_code, res.reason, res.text))
result = False
else:
#Re-owning it, I've switched URL's again here but it seems to be working so will not change it
url = "%s/servicesNS/%s/%s/configs/conf-macros/%s/acl" % (self.splunk_rest, owner, app, name)
payload = { "owner": owner, "sharing" : sharing }
logger.info("i=\"%s\" Attempting to change ownership of type=macro name=%s via URL=%s to owner=%s in app=%s with sharing=%s" % (self.stanzaName, name, url, owner, app, sharing))
res = requests.post(url, auth=auth, headers=headers, verify=self.sslVerify, data=payload, proxies=self.proxies)
if (res.status_code != requests.codes.ok):
logger.error("i=\"%s\" name=%s of type=macro in app=%s with URL=%s statuscode=%s reason=%s, response=\"%s\", owner=%s sharing=%s" % (self.stanzaName, name, app, url, res.status_code, res.reason, res.text, owner, sharing))
else:
logger.debug("i=\"%s\" name=%s of type=macro in app=%s, ownership changed with response=\"%s\", newOwner=%s and sharing=%s" % (self.stanzaName, name, app, res.text, owner, sharing))
return result, ""
###########################
#
# macros
#
###########################
#macro use cases are slightly different to everything else on the REST API
#enough that this code has not been integrated into the runQuery() function
def macros(self, app, name, scope, user, restoreAsUser, adminLevel):
logger.info("i=\"%s\" user=%s, attempting to restore name=%s in app=%s of type=macro in scope=%s, restoreAsUser=%s, adminLevel=%s" % (self.stanzaName, user, name, app, scope, restoreAsUser, adminLevel))
#servicesNS/-/-/properties/macros doesn't show private macros so using /configs/conf-macros to find all the macros
#again with count=-1 to find all the available macros
url = self.splunk_rest + "/servicesNS/-/" + app + "/configs/conf-macros/" + name + "?output_mode=json"
logger.debug("i=\"%s\" Running requests.get() on url=%s with user=%s in app=%s for type=macro proxies_length=%s" % (self.stanzaName, url, self.destUsername, app, len(self.proxies)))
#Determine scope that we will attempt to restore
appScope = False
userScope = False
if scope == "all":
appScope = True
userScope = True
elif scope == "app":
appScope = True
elif scope == "user":
userScope = True
else:
logger.error("i=\"%s\" user=%s, while attempting to restore name=%s, found invalid scope=%s" % (self.stanzaName, user, name, scope))
headers = {}
auth = None
if not self.destUsername:
headers={'Authorization': 'Splunk %s' % self.session_key}
else:
auth = HTTPBasicAuth(self.destUsername, self.destPassword)
#Verify=false is hardcoded to workaround local SSL issues
res = requests.get(url, auth=auth, headers=headers, verify=self.sslVerify, proxies=self.proxies)
objExists = False
if (res.status_code == 404):
logger.debug("i=\"%s\" URL=%s is throwing a 404, assuming new object creation" % (self.stanzaName, url))
elif (res.status_code != requests.codes.ok):
logger.error("i=\"%s\" type=macro in app=%s, URL=%s statuscode=%s reason=%s, response=\"%s\"" % (self.stanzaName, app, url, res.status_code, res.reason, res.text))
else:
#However the fact that we did not get a 404 does not mean it exists in the context we expect it to, perhaps it's global and from another app context?
#or perhaps it's app level but we're restoring a private object...
logger.debug("i=\"%s\" Attempting to JSON loads on %s" % (self.stanzaName, res.text))
resDict = json.loads(res.text)
for entry in resDict['entry']:
sharingLevel = entry['acl']['sharing']
appContext = entry['acl']['app']
if appContext == app and appScope == True and (sharingLevel == 'app' or sharingLevel == 'global'):
objExists = True
elif appContext == app and userScope == True and sharingLevel == "user":
objExists = True
configList = []
foundAtAnyScope = False
#This object is at user scope or may be at user scope
if userScope == True:
userDir = self.gitTempDir + "/" + app + "/" + "user"
#user directory exists
if os.path.isdir(userDir):
typeFile = userDir + "/macros"
#We found the file, now open it to obtain the contents
if os.path.isfile(typeFile):
logger.debug("i=\"%s\" user=%s, name=%s, found typeFile=%s to restore from" % (self.stanzaName, user, name, typeFile))
with open(typeFile, 'r') as f:
configList = json.load(f)
found = False
for configItem in configList:
#We found the relevant item, now restore it
if configItem['name'] == name:
logger.debug("i=\"%s\" user=%s, name=%s is found, dictionary=\"%s\"" % (self.stanzaName, user, name, configItem))
(res_result, message) = self.runRestoreMacro(configItem, app, name, user, restoreAsUser, adminLevel, objExists)
found = True
foundAtAnyScope = True
#We never found the relevant item
if found == False:
logger.info("i=\"%s\" user=%s, name=%s not found at scope=user in typeFile=%s" % (self.stanzaName, user, name, typeFile))
#The config file did not exist
else:
logger.info("i=\"%s\" user=%s, name=%s, did not find a typeFile=%s to restore from" % (self.stanzaName, user, name, typeFile))
else:
#There are no user level objects for this app, therefore the restore will not occur at this scope
logger.info("i=\"%s\" user directory of dir=%s does not exist" % (self.stanzaName, userDir))
#The object is either app or globally scoped
if appScope == True:
appDir = self.gitTempDir + "/" + app + "/" + "app"
#app directory exists
if os.path.isdir(appDir):
typeFile = appDir + "/macros"
#We found the file, open it and load the config
if os.path.isfile(typeFile):
logger.debug("i=\"%s\" user=%s, name=%s, found typeFile=%s to restore from" % (self.stanzaName, user, name, typeFile))
with open(typeFile, 'r') as f:
configList = json.load(f)
found = False
#We found the item, now restore it
for configItem in configList:
if configItem['name'] == name:
logger.debug("i=\"%s\" user=%s, name=%s is found, dictionary is %s" % (self.stanzaName, user, name, configItem))
(res_result, message) = self.runRestoreMacro(configItem, app, name, user, restoreAsUser, adminLevel, objExists)
found = True
foundAtAnyScope = True
#We never found the item
if found == False:
logger.info("i=\"%s\" user=%s, name=%s not found at scope=app in typeFile=%s" % (self.stanzaName, user, name, typeFile))
#We never found the file to restore from
else:
logger.info("i=\"%s\" user=%s, name=%s, did not find a typeFile=%s to restore from" % (self.stanzaName, user, name, typeFile))
else:
#There are no app level objects for this app, therefore the restore will not occur at this scope
logger.info("i=\"%s\" app directory of dir=%s does not exist" % (self.stanzaName, appDir))
globalDir = self.gitTempDir + "/" + app + "/" + "global"
#global directory exists
if os.path.isdir(globalDir):
typeFile = globalDir + "/macros"
#We found the file, attempt to load the config
if os.path.isfile(typeFile):
logger.debug("i=\"%s\" user=%s, name=%s, found typeFile=%s to restore from" % (self.stanzaName, user, name, typeFile))
with open(typeFile, 'r') as f:
configList = json.load(f)
found = False
for configItem in configList:
#We found the item, now restore it
if configItem['name'] == name:
logger.debug("i=\"%s\" user=%s, name=%s is found, dictionary is %s" % (self.stanzaName, user, name, configItem))
(res_result, message) = self.runRestoreMacro(configItem, app, name, user, restoreAsUser, adminLevel, objExists)
found = True
foundAtAnyScope = True
#We never found the item
if found == False:
logger.info("i=\"%s\" user=%s, name=%s not found at scope=global in typeFile=%s" % (self.stanzaName, user, name, typeFile))
#We did not find the file to restore from
else:
logger.info("i=\"%s\" user=%s, name=%s, did not find a typeFile=%s to restore from" % (self.stanzaName, user, name, typeFile))
else:
#There are no global level objects for this app, therefore the restore will not occur at this scope
logger.info("i=\"%s\" global directory of dir=%s does not exist" % (self.stanzaName, globalDir))
if foundAtAnyScope == True and res_result!=False:
logger.info("i=\"%s\" user=%s restore has run successfully for name=%s, type=macro, restoreAsUser=%s, adminLevel=%s" % (self.stanzaName, user, name, restoreAsUser, adminLevel))
return True, message
elif res_result == False and foundAtAnyScope == True:
logger.warn("i=\"%s\" user=%s attempted to restore name=%s, type=macro, restoreAsUser=%s, adminLevel=%s the object was found, but the restore was unsuccessful" % (self.stanzaName, user, name, restoreAsUser, adminLevel))
return False, message
else:
logger.warn("i=\"%s\" user=%s attempted to restore name=%s, type=macro, restoreAsUser=%s, adminLevel=%s however the object was not found, the restore was unsuccessful. Perhaps check the restore date, scope & capitalisation before trying again?" % (self.stanzaName, user, name, restoreAsUser, adminLevel))
return False, message
###########################
#
# Migration functions
# These functions migrate the various knowledge objects mainly by calling the runQueries
# with the appropriate options for that type
# Excluding macros, they have their own function
#
###########################
###########################
#
# Dashboards
#
###########################
def dashboards(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/data/ui/views", "dashboards", name, scope, username, restoreAsUser, adminLevel)
###########################
#
# Saved Searches
#
###########################
def savedsearches(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/saved/searches", "savedsearches",name, scope, username, restoreAsUser, adminLevel)
###########################
#
# field definitions
#
###########################
def calcfields(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/data/props/calcfields", "calcfields", name, scope, username, restoreAsUser, adminLevel)
def fieldaliases(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/data/props/fieldaliases", "fieldaliases", name, scope, username, restoreAsUser, adminLevel)
def fieldextractions(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/data/props/extractions", "fieldextractions", name, scope, username, restoreAsUser, adminLevel)
def fieldtransformations(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/data/transforms/extractions", "fieldtransformations", name, scope, username, restoreAsUser, adminLevel)
def workflowactions(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/data/ui/workflow-actions", "workflow-actions", name, scope, username, restoreAsUser, adminLevel)
def sourcetyperenaming(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/data/props/sourcetype-rename", "sourcetype-rename", name, scope, username, restoreAsUser, adminLevel)
###########################
#
# tags
#
##########################
def tags(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/configs/conf-tags", "tags", name, scope, username, restoreAsUser, adminLevel)
###########################
#
# eventtypes
#
##########################
def eventtypes(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/saved/eventtypes", "eventtypes", name, scope, username, restoreAsUser, adminLevel)
###########################
#
# navMenus
#
##########################
def navMenu(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/data/ui/nav", "navMenu", name, scope, username, restoreAsUser, adminLevel)
###########################
#
# data models
#
##########################
def datamodels(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/datamodel/model", "datamodels", name, scope, username, restoreAsUser, adminLevel)
###########################
#
# collections
#
##########################
def collections(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/storage/collections/config", "collections_kvstore", name, scope, username, restoreAsUser, adminLevel)
###########################
#
# viewstates
#
##########################
def viewstates(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/configs/conf-viewstates", "viewstates", name, scope, username, restoreAsUser, adminLevel)
###########################
#
# time labels (conf-times)
#
##########################
def times(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/configs/conf-times", "times_conf-times", name, scope, username, restoreAsUser, adminLevel)
###########################
#
# panels
#
##########################
def panels(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/data/ui/panels", "pre-built_dashboard_panels", name, scope, username, restoreAsUser, adminLevel)
###########################
#
# lookups (definition/automatic)
#
##########################
def lookupDefinitions(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/data/transforms/lookups", "lookup_definition", name, scope, username, restoreAsUser, adminLevel)
def automaticLookups(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/data/props/lookups", "automatic_lookups", name, scope, username, restoreAsUser, adminLevel)
###########################
#
# Helper/utility functions
#
##########################
#helper function as per https://stackoverflow.com/questions/31433989/return-copy-of-dictionary-excluding-specified-keys
def without_keys(self, d, keys):
return {x: d[x] for x in d if x not in keys}
#Run a Splunk query via the search/jobs endpoint
def runSearchJob(self, query, earliest_time="-1h"):
url = self.splunk_rest + "/servicesNS/-/%s/search/jobs" % (self.appName)
logger.debug("i=\"%s\" Running requests.post() on url=%s with user=%s query=\"%s\" proxies_length=%s" % (self.stanzaName, url, self.destUsername, query, len(self.proxies)))
data = { "search" : query, "output_mode" : "json", "exec_mode" : "oneshot", "earliest_time" : earliest_time }
#no destUsername, use the session_key method
headers = {}
auth = None
if not self.destUsername:
headers = {'Authorization': 'Splunk %s' % self.session_key }
else:
auth = HTTPBasicAuth(self.destUsername, self.destPassword)
res = requests.post(url, auth=auth, headers=headers, verify=self.sslVerify, data=data, proxies=self.proxies)
if (res.status_code != requests.codes.ok):
logger.error("i=\"%s\" URL=%s statuscode=%s reason=%s, response=\"%s\"" % (self.stanzaName, url, res.status_code, res.reason, res.text))
res = json.loads(res.text)
#Log return messages from Splunk, often these advise of an issue but not always...
if len(res["messages"]) > 0:
firstMessage = res["messages"][0]
if 'type' in firstMessage and firstMessage['type'] == "INFO":
#This is a harmless info message ,most other messages are likely an issue
logger.info("i=\"%s\" messages from query=\"%s\" were messages=\"%s\"" % (self.stanzaName, query, res["messages"]))
else:
logger.warn("i=\"%s\" messages from query=\"%s\" were messages=\"%s\"" % (self.stanzaName, query, res["messages"]))
return res
###########################
#
# Main logic section
#
##########################
#restlist_override is when we are passed a dictionary with info on the restore requirements rather than obtaining this via a lookup commmand
#config_override is for when we are passed a configuration dictionary and we do not need to read our config from stdin (i.e. we were not called by Splunk in the normal fashion)
def run_script(self, restlist_override=None, config_override=None):
if not config_override:
config = self.get_config()
else:
config = config_override
#If we want debugMode, keep the debug logging, otherwise drop back to INFO level
if 'debugMode' in config:
debugMode = config['debugMode'].lower()
if debugMode == "true" or debugMode == "t":
logging.getLogger().setLevel(logging.DEBUG)
self.stanzaName = config["name"].replace("splunkversioncontrol_restore://", "")
useLocalAuth = False
if 'useLocalAuth' in config:
useLocalAuth = config['useLocalAuth'].lower()
if useLocalAuth == "true" or useLocalAuth=="t":
useLocalAuth = True
else:
useLocalAuth = False
#If we're not using the useLocalAuth we must have a username/password to work with
if useLocalAuth == False and ('destUsername' not in config or 'destPassword' not in config):
logger.fatal("i=\"%s\" useLocalAuth is not set to true and destUsername/destPassword not set, exiting with failure" % (self.stanzaName))
sys.exit(1)
if useLocalAuth == False:
self.destUsername = config['destUsername']
self.destPassword = config['destPassword']
if 'remoteAppName' in config:
self.appName = config['remoteAppName']
auditLogsLookupBackTime = "-1h"
if 'auditLogsLookupBackTime' in config:
auditLogsLookupBackTime = config['auditLogsLookupBackTime']
self.gitRepoURL = config['gitRepoURL']
#From server
self.splunk_rest = config['destURL']
excludedList = [ "destPassword", "session_key" ]
cleanArgs = self.without_keys(config, excludedList)
logger.info("i=\"%s\" Splunk Version Control Restore run with arguments=\"%s\"" % (self.stanzaName, cleanArgs))
self.session_key = config['session_key']
if not useLocalAuth and self.destPassword.find("password:") == 0:
self.destPassword = get_password(self.destPassword[9:], self.session_key, logger)
knownAppList = []
self.gitTempDir = config['gitTempDir']
self.gitRootDir = config['gitTempDir']
if 'git_command' in config:
self.git_command = config['git_command'].strip()
logger.debug("Overriding git command to %s" % (self.git_command))
else:
self.git_command = "git"
if 'ssh_command' in config:
self.ssh_command = config['ssh_command'].strip()
logger.debug("Overriding ssh command to %s" % (self.ssh_command))
else:
self.ssh_command = "ssh"
gitFailure = False
if platform.system() == "Windows":
self.windows = True
else:
self.windows = False
proxies = {}
if 'proxy' in config:
proxies['https'] = config['proxy']
if proxies['https'].find("password:") != -1:
start = proxies['https'].find("password:") + 9
end = proxies['https'].find("@")
logger.debug("Attempting to replace proxy=%s by subsituting=%s with a password" % (proxies['https'], proxies['https'][start:end]))
temp_password = get_password(proxies['https'][start:end], session_key, logger)
proxies['https'] = proxies['https'][0:start-9] + temp_password + proxies['https'][end:]
self.proxies = proxies
if 'sslVerify' in config:
self.sslVerify = config['sslVerify']
dirExists = os.path.isdir(self.gitTempDir)
if dirExists and len(os.listdir(self.gitTempDir)) != 0:
if not ".git" in os.listdir(self.gitTempDir):
#include the subdirectory which is the git repo
self.gitTempDir = self.gitTempDir + "/" + os.listdir(self.gitTempDir)[0]
logger.info("gitTempDir=%s" % (self.gitTempDir))
else:
if not dirExists:
#make the directory and clone under here
os.mkdir(self.gitTempDir)
#Initially we must trust our remote repo URL
(output, stderrout, res) = runOSProcess(self.ssh_command + " -n -o \"BatchMode yes\" -o StrictHostKeyChecking=no " + self.gitRepoURL[:self.gitRepoURL.find(":")], logger)
if res == False:
logger.warn("i=\"%s\" Unexpected failure while attempting to trust the remote git repo?! stdout '%s' stderr '%s'" % (self.stanzaName, output, stderrout))
#Clone the remote git repo
(output, stderrout, res) = runOSProcess("%s clone %s %s" % (self.git_command, self.gitRepoURL, self.gitRootDir), logger, timeout=300)
if res == False:
logger.fatal("i=\"%s\" git clone failed for some reason...on url=%s stdout of '%s' with stderrout of '%s'" % (self.stanzaName, self.gitRepoURL, output, stderrout))
sys.exit(1)
else:
logger.debug("i=\"%s\" result from git command: %s, output '%s' with stderroutput of '%s'" % (self.stanzaName, res, output, stderrout))
logger.info("i=\"%s\" Successfully cloned the git URL=%s into directory dir=%s" % (self.stanzaName, self.gitRepoURL, self.gitTempDir))
if not ".git" in os.listdir(self.gitTempDir):
#include the subdirectory which is the git repo
self.gitTempDir = self.gitTempDir + "/" + os.listdir(self.gitTempDir)[0]
logger.debug("gitTempDir=%s" % (self.gitTempDir))
if stderrout.find("error:") != -1 or stderrout.find("fatal:") != -1 or stderrout.find("timeout after") != -1:
logger.warn("i=\"%s\" error/fatal messages in git stderroutput please review. stderrout=\"%s\"" % (self.stanzaName, stderrout))
gitFailure = True
if not restlist_override:
#Version Control File that lists what restore we need to do...
restoreList = "splunkversioncontrol_restorelist"
res = self.runSearchJob("| inputlookup %s" % (restoreList))
resList = res["results"]
else:
resList = restlist_override
result = False
if len(resList) == 0:
logger.info("i=\"%s\" No restore required at this point in time" % (self.stanzaName))
else:
#Do a git pull to ensure we are up-to-date
if self.windows:
(output, stderrout, res) = runOSProcess("cd /d %s & %s checkout master & %s pull" % (self.gitTempDir, self.git_command, self.git_command), logger, timeout=300, shell=True)
else:
(output, stderrout, res) = runOSProcess("cd %s; %s checkout master; %s pull" % (self.gitTempDir, self.git_command, self.git_command), logger, timeout=300, shell=True)
if res == False:
logger.fatal("i=\"%s\" git pull failed for some reason...on url=%s stdout of '%s' with stderrout of '%s'. Wiping the git directory to re-clone" % (self.stanzaName, self.gitRepoURL, output, stderrout))
shutil.rmtree(self.gitTempDir)
if self.windows:
(output, stderrout, res) = runOSProcess("cd /d %s & %s checkout master & %s pull" % (self.gitTempDir, self.git_command, self.git_command), logger, timeout=300, shell=True)
else:
(output, stderrout, res) = runOSProcess("cd %s; %s checkout master; %s pull" % (self.gitTempDir, self.git_command, self.git_command), logger, timeout=300, shell=True)
if res == False:
logger.fatal("i=\"%s\" git clone failed for some reason...on url=%s stdout of '%s' with stderrout of '%s'" % (self.stanzaName, self.gitRepoURL, output, stderrout))
sys.exit(1)
else:
logger.debug("i=\"%s\" result from git command: %s, output '%s' with stderroutput of '%s'" % (self.stanzaName, res, output, stderrout))
logger.info("i=\"%s\" Successfully cloned the git URL=%s into directory dir=%s" % (self.stanzaName, self.gitRepoURL, self.gitRootDir))
else:
logger.info("i=\"%s\" Successfully ran the git pull for URL=%s from directory dir=%s" % (self.stanzaName, self.gitRepoURL, self.gitRootDir))
if stderrout.find("error:") != -1 or stderrout.find("fatal:") != -1 or stderrout.find("timeout after") != -1:
logger.warn("i=\"%s\" error/fatal messages in git stderroutput please review. stderrout=\"%s\"" % (self.stanzaName, stderrout))
gitFailure = True
if stderrout.find("timeout after") != -1:
return (False, "git command timed out")
logger.debug("i=\"%s\" The restore list is %s" % (self.stanzaName, resList))
#Attempt to determine all users involved in this restore so we can run a single query and determine if they are admins or not
userList = []
for aRes in resList:
user = aRes['user']
userList.append(user)
#obtain a list of unique user id's
userList = list(set(userList))
ldapFilter = None
usernameFilter = None
for user in userList:
if not ldapFilter:
ldapFilter = "*%s*" % (user)
usernameFilter = user
else:
ldapFilter = "%s, *%s*" % (ldapFilter, user)
usernameFilter = "%s, %s" % (usernameFilter, user)
#Query Splunk and determine if the mentioned users have the required admin role, if not they can only restore the objects they own
res = self.runSearchJob("| savedsearch \"SplunkVersionControl CheckAdmin\" ldapFilter=\"%s\", usernameFilter=\"%s\"" % (ldapFilter, usernameFilter))
userResList = []
if 'results' not in res:
logger.warn("i=\"%s\" Unable to run 'SplunkVersionControl CheckAdmin' for some reason with ldapFilter=%s and usernameFilter=%s" % (self.stanzaName, ldapFilter, usernameFilter))
else:
userResList = res["results"]
#Create a list of admins
adminList = []
for userRes in userResList:
username = userRes["username"]
logger.debug("i=\"%s\" Adding user=%s as an admin username" % (self.stanzaName, username))
adminList.append(username)
if not restlist_override:
# Run yet another query, this one provides a list of times/usernames at which valid entries were added to the lookup file
# if the addition to the lookup file was not done via the required report then the restore is not done (as anyone can add a new role
# and put the username as an admin user!)
res = self.runSearchJob("| savedsearch \"SplunkVersionControl Audit Query\"", earliest_time=auditLogsLookupBackTime)
auditEntries = []
if 'results' not in res:
logger.warn("i=\"%s\" Unable to run 'SplunkVersionControl Audit Query' for some reason with earliest_time=%s" % (self.stanzaName, auditLogsLookupBackTime))
else:
auditEntries = res["results"]
logger.debug("i=\"%s\" Audit Entries are: '%s'" % (self.stanzaName, auditEntries))
#Cycle through each result from the earlier lookup and run the required restoration
for aRes in resList:
if not all (entry in aRes for entry in ('time', 'app', 'name', 'restoreAsUser', 'tag', 'type', 'user', 'scope')):
logger.warn("i=\"%s\" this row is invalid, skipping this row of the results, res=\"%s\"" % (self.stanzaName, aRes))
continue
time = aRes['time']
app = aRes['app']
name = aRes['name']
restoreAsUser = aRes['restoreAsUser']
tag = aRes['tag']
type = aRes['type']
user = aRes['user']
scope = aRes['scope']
logger.info("i=\"%s\" user=%s has requested the object with name=%s of type=%s to be restored from tag=%s and scope=%s, restoreAsUser=%s, this was requested at time=%s in app context of app=%s" % (self.stanzaName, user, name, type, tag, scope, restoreAsUser, time, app))
if not restlist_override:
#If we have an entry in the lookup file it should be listed in the audit entries file
found = False
for entry in auditEntries:
#The audit logs are accurate to milliseconds, the lookup *is not* so sometimes it's off by about a second
timeEntry = entry['time']
timeEntryPlus1 = str(int(entry['time']) + 1)
timeEntryMinus1 = str(int(entry['time']) - 1)
if timeEntry == time or timeEntryPlus1 == time or timeEntryMinus1 == time:
found = True
auditUser = entry['user']
if user != auditUser:
logger.warn("i=\"%s\" user=%s found time entry of time=%s with auditUser=%s, this does not match the expected username (%s), rejecting this entry for name=%s of type=%s in app=%s with restoreAsUser=%s" % (self.stanzaName, user, time, auditUser, user, name, type, app, restoreAsUser))
found = False
else:
logger.debug("i=\"%s\" user=%s, found time entry of time=%s, considering this a valid entry and proceeding to restore" % (self.stanzaName, user, time))
if found == False:
logger.warn("i=\"%s\" user=%s, unable to find a time entry of time=%s matching the auditEntries list of %s, skipping this entry" % (self.stanzaName, user, time, auditEntries))
continue
#else we were provided with the override list and the username/audit logs were already checked
adminLevel = False
if user in adminList:
logger.debug("i=\"%s\" user=%s is an admin and has requested object name=%s of type=%s in app=%s to be restored with user=%s and time=%s" % (self.stanzaName, user, name, type, app, restoreAsUser, time))
adminLevel = True
#Only admins can restore objects as another user
if restoreAsUser != "" and restoreAsUser != user and adminLevel == False:
logger.error("i=\"%s\" user=%s is not an admin and has attempted to restore as a different user, requested user=%s, object=%s of type=%s in app=%s to be restored with restoreAsUser=%s time=%s, rejected" % (self.stanzaName, user, restoreAsUser, name, type, app, restoreAsUser, time))
continue
#Do a git pull to ensure we are up-to-date
if self.windows:
(output, stderrout, res) = runOSProcess("cd /d %s & %s checkout %s" % (self.gitTempDir, self.git_command, tag), logger, shell=True)
else:
(output, stderrout, res) = runOSProcess("cd %s; %s checkout %s" % (self.gitTempDir, self.git_command, tag), logger, shell=True)
if res == False:
logger.error("i=\"%s\" user=%s, object name=%s, type=%s, time=%s, git checkout of tag=%s failed in directory dir=%s stdout of '%s' with stderrout of '%s'" % (self.stanzaName, user, name, type, time, tag, self.gitTempDir, output, stderrout))
else:
logger.info("i=\"%s\" Successfully ran the git checkout for URL=%s from directory dir=%s" % (self.stanzaName, self.gitRepoURL, self.gitTempDir))
if stderrout.find("error:") != -1 or stderrout.find("fatal:") != -1 or stderrout.find("timeout after") != -1:
logger.warn("i=\"%s\" error/fatal messages in git stderroutput please review. stderrout=\"%s\"" % (self.stanzaName, stderrout))
gitFailure = True
if stderrout.find("timeout after") != -1:
return (False, "git command timed out")
knownAppList = []
if os.path.isdir(self.gitTempDir):
#include the subdirectory which is the git repo
knownAppList = os.listdir(self.gitTempDir)
logger.debug("i=\"%s\" Known app list is %s" % (self.stanzaName, knownAppList))
#If the app is not known, the restore stops here as we have nothing to restore from!
if app not in knownAppList:
logger.error("i=\"%s\" user=%s requested a restore from app=%s but this is not in the knownAppList therefore restore cannot occur, object=%s of type=%s to be restored with user=%s and time=%s" % (self.stanzaName, user, app, name, type, restoreAsUser, time))
continue
#Deal with the different types of restores that might be required, we only do one row at a time...
if type == "dashboard":
(result, message) = self.dashboards(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "savedsearch":
(result, message) = self.savedsearches(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "macro":
(result, message) = self.macros(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "fieldalias":
(result, message) = self.fieldaliases(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "fieldextraction":
(result, message) = self.fieldextractions(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "fieldtransformation":
(result, message) = self.fieldtransformations(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "navmenu":
(result, message) = self.navMenu(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "datamodel":
(result, message) = self.datamodels(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "panels":
(result, message) = self.panels(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "calcfields":
(result, message) = self.calcfields(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "workflowaction":
(result, message) = self.workflowactions(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "sourcetyperenaming":
(result, message) = self.sourcetyperenaming(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "tags":
(result, message) = self.tags(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "eventtypes":
(result, message) = self.eventtypes(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "lookupdef":
(result, message) = self.lookupDefinitions(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "automaticlookup":
(result, message) = self.automaticLookups(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "collection":
(result, message) = self.collections(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "viewstate":
(result, message) = self.viewstates(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "times":
(result, message) = self.times(app, name, scope, user, restoreAsUser, adminLevel)
else:
logger.error("i=\"%s\" user=%s, unknown type, no restore will occur for object=%s of type=%s in app=%s to be restored with restoreAsUser=%s and time=%s" % (self.stanzaName, user, name, type, app, restoreAsUser, time))
if not restlist_override:
#Wipe the lookup file so we do not attempt to restore these entries again
if len(resList) != 0:
if not gitFailure:
res = self.runSearchJob("| makeresults | fields - _time | outputlookup %s" % (restoreList))
logger.info("i=\"%s\" Cleared the lookup file to ensure we do not attempt to restore the same entries again" % (self.stanzaName))
else:
logger.error("i=\"%s\" git failure occurred during runtime, not wiping the lookup value. This failure may require investigation, please refer to the WARNING messages in the logs" % (self.stanzaName))
if gitFailure:
logger.warn("i=\"%s\" wiping the git directory, dir=%s to allow re-cloning on next run of the script" % (self.stanzaName, self.gitTempDir))
shutil.rmtree(self.gitTempDir)
logger.info("i=\"%s\" Done" % (self.stanzaName))
return (result, message)
|
StarcoderdataPython
|
3292159
|
import argparse
import math
import numpy as np
import scipy.interpolate as interpolate
import torch
import torch.nn as nn
import torch.nn.functional as F
import lib.layers as layers
from .regularization import create_regularization_fns
from .layers.elemwise import _logit as logit
from .layers.elemwise import _sigmoid as sigmoid
from .utils import logpx_to_bpd
def standard_normal_logprob(z):
logZ = -0.5 * math.log(2 * math.pi)
return logZ - z.pow(2) / 2
def normal_logprob(x, mu=0, sigma=1):
if sigma is None:
sigma = 1.0
logZ = -math.log(sigma) -0.5 * math.log(2 * math.pi)
return logZ - ((x - mu)/sigma).pow(2) / 2
def avg2d(x):
bs, c, w, h = x.shape
if x.shape[1:] == (3, 1, 1):
return x.mean(1, keepdim=True)
else:
kernel = torch.tensor([[0.25, 0.25], [0.25, 0.25]]).unsqueeze(0).unsqueeze(0).expand(c, 1, 2, 2).to(x.device)
return F.conv2d(x.float(), kernel, stride=2, groups=c)
def avg_2d_in_1d(x, ch='height'):
assert ch in ['height', 'width']
if x.shape[1:] == (3, 1, 1):
return x.mean(1, keepdim=True)
else:
if ch == 'width':
return (x[:, :, :, ::2] + x[:, :, :, 1::2])/2
else:
return (x[:, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, :] + x[:, :, fc00:db20:35b:7399::5, :])/2
class Downsample(nn.Module):
def __init__(self, tau=0.5, iters=1):
super().__init__()
self.heat = Heat(tau, iters)
#self.pick = Pick()
def forward(self, X, sh):
Y, _ = self.heat(X)
out = F.interpolate(Y, size=sh, mode='nearest')
return out
class Pyramid(nn.Module):
def __init__(self, image_shapes, mode='image'):
super().__init__()
self.image_shapes = image_shapes
self.mode = mode
def forward(self, img):
# img: [B, ch, height, width]
imgs = []
current = img.float()
imgs.append(current)
if self.mode == '1d':
l = 0
while l < len(self.image_shapes) - 1:
if l % 2 == 0:
current = avg_2d_in_1d(current, ch='height')
else:
current = avg_2d_in_1d(current, ch='width')
imgs.append(current)
l += 1
else:
for i in range(len(self.image_shapes)-1):
current = avg2d(current)
imgs.append(current)
imgs.reverse()
return imgs
def make_image_shapes(max_scales, im_size, im_ch, factor=0.5, mode='image'):
# Data shapes
image_shapes = []
if mode == '1d':
MAX = int(np.log2(im_size)*2 + 1)
assert max_scales <= (MAX+1 if im_ch == 3 else MAX), f"max_scales cannot be greater than {MAX+1 if im_ch == 3 else MAX}, given {max_scales}"
image_shapes.append((im_ch, im_size, im_size))
size_old = im_size
l = 0
while l < MAX-1:
if l % 2 == 0:
size = int(round(size_old * factor))
image_shapes.append((im_ch, size, size_old))
else:
image_shapes.append((im_ch, size, size))
size_old = size
l += 1
if im_ch == 3:
image_shapes.append((1, 1, 1))
else:
MAX = int(np.log2(im_size) + 1)
assert max_scales <= (MAX+1 if im_ch == 3 else MAX), f"max_scales cannot be greater than {MAX+1 if im_ch == 3 else MAX}, given {max_scales}"
for l in range(MAX):
size = int(round(im_size * factor**l))
image_shapes.append((im_ch, size, size))
if im_ch == 3:
image_shapes.append((1, 1, 1))
image_shapes = image_shapes[:max_scales]
image_shapes.reverse()
return image_shapes
def std_for_shapes_1d(norm_res, input_shapes):
# Actual norm_res (128) is double the default (64)! Because std formula has an erroneous "+ 1".
# Retaining it for legacy.
stds = []
for shape in input_shapes:
stds.append(np.sqrt(1/2**(2*np.log2(norm_res) - np.log2(shape[1]) - np.log2(shape[2]) + 1)))
if input_shapes[-1][0] == 3 and input_shapes[0] == (1, 1, 1):
stds[0], stds[1] = np.sqrt(1/3) * stds[0], np.sqrt(2/3) * stds[1]
return stds
def std_for_shapes_2d(norm_res, input_shapes):
stds = []
for shape in input_shapes:
stds.append(np.sqrt(3/4**(np.log2(norm_res) - np.log2(shape[1]))))
stds[0] = stds[0]/np.sqrt(3)
if input_shapes[-1][0] == 9 and input_shapes[0] == (1, 1, 1):
stds[0], stds[1] = np.sqrt(1/3) * stds[0], np.sqrt(2/3) * stds[0]
return stds
def combine1d(y, xbar):
xa = xbar + y
xb = xbar - y
y_shape = list(y.shape)
cat_dim = -1 if y_shape[-1] == y_shape[-2] else -2
y_shape[cat_dim] = int(y_shape[cat_dim]*2)
x = torch.cat((xa.unsqueeze(cat_dim), xb.unsqueeze(cat_dim)), dim=cat_dim).reshape(y_shape)
return x
def combine1ch2ch(y1, y2, xbar):
x1 = xbar + y1
x2 = xbar - y1/2 + np.sqrt(3)/2*y2
x3 = xbar - y1/2 - np.sqrt(3)/2*y2
return torch.cat([x1, x2, x3], dim=1)
def combine2d(y1, y2, y3, xbar):
# y1, y2, y3 = y[:, 0:xbar.shape[1]], y[:, xbar.shape[1]:2*xbar.shape[1]], y[:, 2*xbar.shape[1]:3*xbar.shape[1]]
x1 = y1 + xbar
x2 = - 1/3*y1 + 2*np.sqrt(2)/3*y2 + xbar
x3 = - 1/3*y1 - np.sqrt(2)/3*y2 + np.sqrt(6)/3*y3 + xbar
x4 = - 1/3*y1 - np.sqrt(2)/3*y2 - np.sqrt(6)/3*y3 + xbar
x = torch.empty(*xbar.shape[:2], xbar.shape[2]*2, xbar.shape[3]*2).to(xbar)
x[:, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ::2] = x1
x[:, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 1::2] = x2
x[:, :, fc00:db20:35b:7399::5, ::2] = x3
x[:, :, fc00:db20:35b:7399::5, 1::2] = x4
return x
def split2d(x):
x1 = x[:, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ::2]
x2 = x[:, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 1::2]
x3 = x[:, :, fc00:db20:35b:7399::5, ::2]
x4 = x[:, :, fc00:db20:35b:7399::5, 1::2]
y1 = 3/4*x1 - 1/4*x2 - 1/4*x3 - 1/4*x4
y2 = 2*np.sqrt(2)/4*x2 - np.sqrt(2)/4*(x3 + x4)
y3 = np.sqrt(6)/4*(x3 - x4)
return y1, y2, y3
def split2d_wavelet(x):
x1 = x[:, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ::2]
x2 = x[:, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 1::2]
x3 = x[:, :, fc00:db20:35b:7399::5, ::2]
x4 = x[:, :, fc00:db20:35b:7399::5, 1::2]
y1 = 1/2*x1 + 1/2*x2 - 1/2*x3 - 1/2*x4
y2 = 1/2*x1 - 1/2*x2 + 1/2*x3 - 1/2*x4
y3 = 1/2*x1 - 1/2*x2 - 1/2*x3 + 1/2*x4
xbar = 1/4*x1 + 1/4*x2 + 1/4*x3 + 1/4*x4
return y1, y2, y3, xbar
def combine2d_wavelet(y1, y2, y3, xbar):
# y1, y2, y3 = y[:, 0:xbar.shape[1]], y[:, xbar.shape[1]:2*xbar.shape[1]], y[:, 2*xbar.shape[1]:3*xbar.shape[1]]
x1 = y1/2 + y2/2 + y3/2 + xbar
x2 = y1/2 - y2/2 - y3/2 + xbar
x3 = -y1/2 + y2/2 - y3/2 + xbar
x4 = -y1/2 - y2/2 + y3/2 + xbar
x = torch.empty(*xbar.shape[:2], xbar.shape[2]*2, xbar.shape[3]*2).to(xbar)
x[:, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ::2] = x1
x[:, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 1::2] = x2
x[:, :, fc00:db20:35b:7399::5, ::2] = x3
x[:, :, fc00:db20:35b:7399::5, 1::2] = x4
return x
def split2d_mrcnf(x):
c = math.pow(2, 2/3)
x1 = x[:, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ::2]
x2 = x[:, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 1::2]
x3 = x[:, :, fc00:db20:35b:7399::5, ::2]
x4 = x[:, :, fc00:db20:35b:7399::5, 1::2]
y1 = 1/c*x1 + 1/c*x2 - 1/c*x3 - 1/c*x4
y2 = 1/c*x1 - 1/c*x2 + 1/c*x3 - 1/c*x4
y3 = 1/c*x1 - 1/c*x2 - 1/c*x3 + 1/c*x4
xbar = 1/4*x1 + 1/4*x2 + 1/4*x3 + 1/4*x4
return y1, y2, y3, xbar
def combine2d_mrcnf(y1, y2, y3, xbar):
c = math.pow(2, 2/3)
# y1, y2, y3 = y[:, 0:xbar.shape[1]], y[:, xbar.shape[1]:2*xbar.shape[1]], y[:, 2*xbar.shape[1]:3*xbar.shape[1]]
x1 = c*y1/4 + c*y2/4 + c*y3/4 + xbar
x2 = c*y1/4 - c*y2/4 - c*y3/4 + xbar
x3 = -c*y1/4 + c*y2/4 - c*y3/4 + xbar
x4 = -c*y1/4 - c*y2/4 + c*y3/4 + xbar
x = torch.empty(*xbar.shape[:2], xbar.shape[2]*2, xbar.shape[3]*2).to(xbar)
x[:, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ::2] = x1
x[:, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 1::2] = x2
x[:, :, fc00:db20:35b:7399::5, ::2] = x3
x[:, :, fc00:db20:35b:7399::5, 1::2] = x4
return x
class CNFMultiscale(nn.Module):
def __init__(self, max_scales=2, factor=0.5, concat_input=True,
mode='image', std_scale=True, joint=False,
regs=argparse.Namespace(kinetic_energy=0.0, jacobian_norm2=0.0),
bn=False, im_ch=3, im_size=32, nbits=8,
dims="64,64,64", strides="1,1,1,1", num_blocks="2,2",
zero_last=True, conv=True, layer_type="concat", nonlinearity="softplus",
time_length=1.0, train_T=False, steer_b=0.0,
div_samples=1, divergence_fn="approximate",
logit=True, alpha=0.05, normal_resolution=64,
solver='bosh3',
disable_cuda=False,
**kwargs):
super().__init__()
self.max_scales = max_scales
self.factor = factor
self.concat_input = concat_input
self.mode = mode
assert self.mode in ['wavelet', 'mrcnf']
self.std_scale = std_scale
self.joint = joint
self.regs = regs
self.bn = bn
self.im_ch, self.im_size, self.nbits = im_ch, im_size, nbits
self.dims, self.strides, self.num_blocks = dims, strides, num_blocks
self.zero_last, self.conv, self.layer_type, self.nonlinearity = zero_last, conv, layer_type, nonlinearity
self.time_length, self.train_T, self.steer_b = time_length, train_T, steer_b
self.div_samples, self.divergence_fn = div_samples, divergence_fn
self.logit, self.alpha = logit, alpha
self.normal_resolution = normal_resolution
self.solver = solver
self.disable_cuda = disable_cuda
self._scale = -1
self.device = torch.device("cuda:%d"%torch.cuda.current_device() if torch.cuda.is_available() and not disable_cuda else "cpu")
self.cvt = lambda x: x.type(torch.float32).to(self.device, non_blocking=True)
# Set image shapes
self.image_shapes = make_image_shapes(max_scales=max_scales, im_size=im_size, im_ch=im_ch, mode=mode)
self.num_scales = len(self.image_shapes)
self.pyramid = Pyramid(image_shapes=self.image_shapes, mode=mode)
MAX = int(np.log2(im_size) + 1)
self.input_shapes = [self.image_shapes[-min(MAX, max_scales)]] + self.image_shapes[-min(MAX, max_scales):-1]
self.input_shapes = [(sh[0] if i==0 else sh[0]*3, sh[1], sh[2]) for i, sh in enumerate(self.input_shapes)]
self.ch1toch3 = False
if max_scales == MAX+1 and im_ch == 3:
self.ch1toch3 = True
self.input_shapes = [(1, 1, 1), (2, 1, 1)] + self.input_shapes[1:]
if self.mode == 'wavelet':
self.z_stds = [np.sqrt(1/4**(np.log2(self.normal_resolution) - np.log2(sh[-1]))) for sh in self.image_shapes] if self.std_scale else [None] * self.num_scales
elif self.mode == 'mrcnf':
c = math.pow(2, 2/3)
self.z_stds = [np.sqrt((1 if s == 0 else c)*1/4**(np.log2(self.normal_resolution) - np.log2(sh[-1]))) for s, sh in enumerate(self.image_shapes)] if self.std_scale else [None] * self.num_scales
self.bns = None
self.coarse_bns = None
if self.concat_input:
self.concat_shapes = [None] + self.image_shapes[:-1]
else:
self.concat_shapes = [None] * len(self.image_shapes)
self.regularization_fns, self.regularization_coeffs = create_regularization_fns(self.regs)
# Create models
models = []
first = True
for input_sh, concat_sh, bl, std in zip(self.input_shapes, self.concat_shapes, self.num_blocks, self.z_stds):
models.append(self.create_model(input_sh, concat_sh, bl, first=first, std=std))
first = False
self.scale_models = nn.ModuleList(models) # TODO: may be safer to use dict keyed by image size
def create_model(self, input_shape, concat_shape=None, num_blocks=2, first=False, std=None):
hidden_dims = tuple(map(int, self.dims.split(",")))
strides = tuple(map(int, self.strides.split(",")))
def build_cnf():
diffeq = layers.ODEnet(
hidden_dims=hidden_dims,
input_shape=input_shape,
concat_shape=concat_shape,
strides=strides,
zero_last_weight=self.zero_last,
conv=self.conv,
layer_type=self.layer_type,
nonlinearity=self.nonlinearity,
)
odefunc = layers.ODEfunc(
diffeq=diffeq,
div_samples=self.div_samples,
divergence_fn=self.divergence_fn,
)
cnf = layers.CNF(
odefunc=odefunc,
T=self.time_length,
train_T=self.train_T,
steer_b=self.steer_b,
regularization_fns=self.regularization_fns,
solver=self.solver,
)
return cnf
chain = []
if self.mode == 'wavelet':
chain = [layers.LogitTransform(alpha=self.alpha)] if self.logit else [layers.ZeroMeanTransform()]
elif self.mode == 'mrcnf' and first:
chain = chain + [layers.LogitTransform(alpha=self.alpha)] if self.logit else [layers.ZeroMeanTransform()]
chain = chain + [build_cnf() for _ in range(num_blocks)]
if std is not None:
chain = chain + [layers.AffineTransform(scale=std)]
model = layers.SequentialFlow(chain)
return model
@property
def scale(self):
return self._scale
@scale.setter
def scale(self, val):
assert type(val) == int, f"scale can only be set to an int, given: {type(val)}, {val}"
assert val < self.num_scales
self._scale = val
if val < 0:
print(f"ACTIVATING ALL {len(self.scale_models)} scale_models! (JOINT)")
for sc in range(len(self.scale_models)):
for p in self.scale_models[sc].parameters():
p.requires_grad_(True)
else:
for sc in range(len(self.scale_models)):
if sc != val:
for p in self.scale_models[sc].parameters():
p.requires_grad_(False)
else:
# Turn on learning
for p in self.scale_models[sc].parameters():
p.requires_grad_(True)
def density(self, img, noisy=True):
"""Takes in a uint8 img with pixel range [0, 255]"""
data_list = self.pyramid(img)
z_dict, bpd_dict, logpz_dict, deltalogp_dict = {}, {}, {}, {}
logpx = None
reg_states = tuple([0.] * len(self.regularization_coeffs))
for sc, (model, x) in enumerate(zip(self.scale_models, data_list)):
if not self.joint and sc > self.scale:
break
x255 = x.clone()
if not self.training or self.joint or sc == self.scale:
x = self.cvt(x)
if sc != 0:
coarser_up = self.cvt(coarser_up)
# Add noise
x = x // (2**(8-self.nbits)) if self.nbits < 8 else x
noise = x.new().resize_as_(x).uniform_() if noisy else 0.5
x = x.add_(noise).div_(2**self.nbits)
# bsz, c, w, h = x.shape
# Make y
if sc > 0:
if self.mode == 'wavelet':
y1, y2, y3, _ = split2d_wavelet(x)
# y1 in [-1, 1] -> [0, 1] => /2 + 0.5
y1 = y1/2 + 0.5
y2 = y2/2 + 0.5
y3 = y3/2 + 0.5
y = torch.cat([y1, y2, y3], dim=1).clamp_(0, 1)
elif self.mode == 'mrcnf':
y1, y2, y3, _ = split2d_mrcnf(x)
y = torch.cat([y1, y2, y3], dim=1)
else:
y = x
if sc > 0:
concat_var = coarser_up if self.concat_input else None
# Forward through model
if sc == 0:
z, deltalogp, reg_tup = model(y, reverse=False)
else:
z, deltalogp, reg_tup = model(y, reverse=False, concat_var=concat_var)
# LOGPROB
logpz = normal_logprob(z, mu=0, sigma=self.z_stds[sc]).reshape(len(z), -1).sum(1, keepdim=True)
z_dict[sc] = z.detach().cpu()
logpz_dict[sc] = logpz.detach().cpu()
deltalogp_dict[sc] = -deltalogp.detach().cpu()
logpx_scale = logpz - deltalogp
# Compensating logp for x->y tx, and y scaling
if sc > 0 and self.mode == 'wavelet':
logpx_scale += np.prod(coarser_up.shape[-3:]) * np.log(1/2) + np.prod(coarser_up.shape[-3:]) * np.log(1/2 * 1/2 * 1/2)
if not self.training:
logpx_scale = logpx_scale.detach()
if logpx is None:
logpx = logpx_scale
else:
if self.joint:
logpx += logpx_scale
else:
logpx = logpx.detach() + logpx_scale
dims = np.prod(self.image_shapes[sc])
bpd_dict[sc] = logpx_to_bpd(logpx.detach(), dims, self.nbits).cpu()
# Regularization
if not self.training:
reg_states = ()
elif self.joint:
reg_states = tuple(r0 + rs.mean() for r0, rs in zip(reg_states, reg_tup)) if len(self.regularization_coeffs) else ()
elif not self.joint and sc == self.scale:
reg_states = tuple(rs.mean() for rs in reg_tup) if len(self.regularization_coeffs) else ()
else:
reg_states = ()
# Make coarse_image for next scale
# If training, only do this at scale just before current scale
if (not self.training or self.joint or sc == self.scale-1) and (sc+1 < self.num_scales):
noise = x255.new().resize_as_(x255).float().uniform_() if noisy else 0.5
coarser_up = (x255.float()/256.0 + noise/float(2**self.nbits)).clamp_(0, 1)
return logpx, reg_states, bpd_dict, z_dict, logpz_dict, deltalogp_dict
def log_prob(self, img, return_dicts=True, noisy=True, at_sc=-1):
"""Takes in a uint8 img with pixel range [0, 255]"""
data_list = self.pyramid(img)
z_dict, bpd_dict, logpz_dict, deltalogp_dict = {}, {}, {}, {}
logpx = None
reg_states = tuple([0.] * len(self.regularization_coeffs))
for sc, (model, x) in enumerate(zip(self.scale_models, data_list)):
# if not self.joint and sc > self.scale:
# break
# if self.mode != 'wavelet':
x255 = x.clone()
if at_sc == -1 or (at_sc > -1 and sc == at_sc):
x = self.cvt(x)
if sc != 0:
coarser_up = self.cvt(coarser_up)
# # Init logp
# deltalogp = torch.zeros(x.size(0), 1, device=x.device)
# if self.mode != 'wavelet':
# Add noise
x = x // (2**(8-self.nbits)) if self.nbits < 8 else x
noise = x.new().resize_as_(x).uniform_() if noisy else 0.5
x = x.add_(noise).div_(2**self.nbits)
# bsz, c, w, h = x.shape
# Make y
if sc > 0:
if self.mode == 'wavelet':
y1, y2, y3, _ = split2d_wavelet(x)
# y1 in [-1, 1] -> [0, 1] => /2 + 0.5
y1 = y1/2 + 0.5
y2 = y2/2 + 0.5
y3 = y3/2 + 0.5
y = torch.cat([y1, y2, y3], dim=1).clamp_(0, 1)
elif self.mode == 'mrcnf':
y1, y2, y3, _ = split2d_mrcnf(x)
y = torch.cat([y1, y2, y3], dim=1)
else:
y = x
if sc > 0:
concat_var = coarser_up if self.concat_input else None
# Forward through model
if sc == 0:
z, deltalogp, _ = model(y, reverse=False)
else:
z, deltalogp, _ = model(y, reverse=False, concat_var=concat_var)
# LOGPROB
logpz = normal_logprob(z, mu=0, sigma=self.z_stds[sc]).reshape(len(z), -1).sum(1, keepdim=True)
if return_dicts:
z_dict[sc] = z.detach().cpu()
logpz_dict[sc] = logpz.detach().cpu()
deltalogp_dict[sc] = -deltalogp.detach().cpu()
logpx_scale = logpz - deltalogp
# Compensating logp for x->y tx, and y scaling
if sc > 0 and self.mode == 'wavelet':
logpx_scale += np.prod(coarser_up.shape[-3:]) * np.log(1/2) + np.prod(coarser_up.shape[-3:]) * np.log(1/2 * 1/2 * 1/2)
# if not self.training:
if logpx is None:
logpx = logpx_scale.detach().cpu()
else:
# if self.joint:
logpx += logpx_scale.detach().cpu()
if return_dicts:
dims = np.prod(self.image_shapes[sc])
bpd_dict[sc] = logpx_to_bpd(logpx, dims, self.nbits).cpu()
# Make coarse_image for next scale
# If training, only do this at scale just before current scale
# if (not self.training or self.joint or sc == self.scale-1) and (sc+1 < self.num_scales):
if sc+1 < self.num_scales:
noise = x255.new().resize_as_(x255).float().uniform_() if noisy else 0.5
coarser_up = (x255.float()/256.0 + noise/float(2**self.nbits)).clamp_(0, 1)
if return_dicts:
return logpx, bpd_dict, z_dict, logpz_dict, deltalogp_dict
else:
return logpx
def generate_noise(self, batch_size):
noise = [torch.randn(batch_size, *sh) * (std or 1.0) for sh, std in zip(self.input_shapes, self.z_stds)]
return noise
def generate(self, noise_list, temp=1.0):
# noise_list : [z_0, z_1, z_2, z_3] (from coarser to finer scales)
x_dict = {}
y_dict = {}
for sc, (model, z) in enumerate(zip(self.scale_models, noise_list)):
z = self.cvt(z*temp)
if not self.joint and sc > self.scale:
break
if sc == 0:
y, _, _ = model(z, reverse=True)
else:
concat_var = coarse_bn if self.bns is not None else coarser_up if self.concat_input else None
y, _, _ = model(z, reverse=True, concat_var=concat_var)
if self.bns is not None:
mu = self.bns[sc].running_mean.reshape(1,-1, 1, 1)
var = self.bns[sc].running_var
eps = self.bns[sc].eps
std = (var + eps).sqrt().reshape(1,-1, 1, 1)
y = y*std + mu
if sc == 0:
x = y
elif self.mode == 'wavelet':
ch = coarser_up.shape[1]
y11 = y[:, 0:ch]
y22 = y[:, ch:2*ch]
y33 = y[:, 2*ch:3*ch]
# y1 in [-1, 1] -> [0, 1] => /2 + 0.5
y1 = (y11 - 0.5)*2
y2 = (y22 - 0.5)*2
y3 = (y33 - 0.5)*2
x = combine2d_wavelet(y1, y2, y3, coarser_up)
elif self.mode == 'mrcnf':
ch = coarser_up.shape[1]
y1 = y[:, 0:ch]
y2 = y[:, ch:2*ch]
y3 = y[:, 2*ch:3*ch]
x = combine2d_mrcnf(y1, y2, y3, coarser_up)
if sc > 0:
if self.mode == 'wavelet':
y_dict[sc] = [y11.detach().cpu(), y22.detach().cpu(), y33.detach().cpu()]
del y11, y22, y33
else:
y_dict[sc] = y.detach().cpu()
# Make coarser_up
if sc+1 < self.max_scales:
coarser_up = x.detach()
# To compensate for addition of noise
x = (x - 0.5/2**self.nbits).clamp_(0, 1)
x_dict[sc] = x.detach().cpu()
return x_dict, y_dict, x
def forward(self, img, reverse=False, noisy=True, temp=1.0):
if reverse:
return self.generate(img, temp=temp)
else:
return self.density(img, noisy=noisy)
|
StarcoderdataPython
|
1645398
|
<gh_stars>0
from impute import MissForestImputation
from randomforest import RandomForest
import numpy as np
class MissForestImputationLocal(MissForestImputation):
"""private class, missforest subclass for local machine"""
def __init__(self, mf_params, rf_params):
super().__init__(**mf_params)
self.params = rf_params
def miss_forest_imputation(self, matrix_for_impute):
"""impute dataset and return self"""
self.matrix_for_impute = matrix_for_impute
self.raw_fill()
self.previous_iter_matrix = np.copy(self.initial_guess_matrix)
self.cur_iter_matrix = np.copy(self.initial_guess_matrix)
cur_iter = 1
while True:
if cur_iter > self.max_iter:
self.result_matrix = self.previous_iter_matrix
return
print("Iteration " + str(cur_iter))
for var in self.vari:
p = len(self.vart_)
vt = self.vart_[var]
cur_X = self.cur_iter_matrix
cur_obsi = self.obsi[var]
cur_misi = self.misi[var]
if (len(cur_misi) == 0):
continue
p_train = np.delete(np.arange(p), var)
X_train = cur_X[cur_obsi, :][:, p_train]
y_train = cur_X[cur_obsi, :][:, var]
X_test = cur_X[cur_misi, :][:, p_train]
rf = RandomForest(self.params)
imp = rf.fit_predict(X_train, y_train, X_test, vt)
self.cur_iter_matrix[cur_misi, var] = imp
if self.check_converge() == True:
self.result_matrix = self.previous_iter_matrix
return
else:
self.previous_iter_matrix = np.copy(self.cur_iter_matrix)
cur_iter = cur_iter + 1
|
StarcoderdataPython
|
116109
|
<gh_stars>1-10
from game_effect_modifier.base_game_effect_modifier import BaseGameEffectModifier
from game_effect_modifier.game_effect_type import GameEffectType
from sims4.tuning.tunable import HasTunableSingletonFactory, TunableReference
import services
import sims4.resources
import zone_types
class RelationshipTrackDecayLocker(HasTunableSingletonFactory, BaseGameEffectModifier):
FACTORY_TUNABLES = {'description': '\n A modifier for locking the decay of a relationship track.\n ', 'relationship_track': TunableReference(description='\n The relationship track to lock.\n ', manager=services.statistic_manager(), class_restrictions=('RelationshipTrack',))}
tunable = (TunableReference(manager=services.get_instance_manager(sims4.resources.Types.ACTION), class_restrictions=('LootActions',)),)
def __init__(self, relationship_track, **kwargs):
super().__init__(GameEffectType.RELATIONSHIP_TRACK_DECAY_LOCKER)
self._track_type = relationship_track
def apply_modifier(self, sim_info):
def _all_sim_infos_loaded_callback(*arg, **kwargs):
zone = services.current_zone()
zone.unregister_callback(zone_types.ZoneState.HOUSEHOLDS_AND_SIM_INFOS_LOADED, _all_sim_infos_loaded_callback)
self._set_decay_lock_all_relationships(sim_info, lock=True)
self._initialize_create_relationship_callback(sim_info)
zone = services.current_zone()
if not zone.is_households_and_sim_infos_loaded and not zone.is_zone_running:
zone.register_callback(zone_types.ZoneState.HOUSEHOLDS_AND_SIM_INFOS_LOADED, _all_sim_infos_loaded_callback)
return
self._set_decay_lock_all_relationships(sim_info)
self._initialize_create_relationship_callback(sim_info)
def _initialize_create_relationship_callback(self, owner):
tracker = owner.relationship_tracker
tracker.add_create_relationship_listener(self._relationship_added_callback)
def _set_decay_lock_all_relationships(self, owner, lock=True):
tracker = owner.relationship_tracker
sim_info_manager = services.sim_info_manager()
for other_sim_id in tracker.target_sim_gen():
other_sim_info = sim_info_manager.get(other_sim_id)
if other_sim_info is None:
continue
track = tracker.get_relationship_track(other_sim_id, self._track_type, add=True)
other_tracker = other_sim_info.relationship_tracker
other_track = other_tracker.get_relationship_track(owner.id, self._track_type, add=True)
if not track is None:
if other_track is None:
continue
if lock:
track.add_decay_rate_modifier(0)
other_track.add_decay_rate_modifier(0)
else:
track.remove_decay_rate_modifier(0)
other_track.remove_decay_rate_modifier(0)
def _relationship_added_callback(self, relationship):
sim_a_track = relationship.get_track(self._track_type, add=True)
if sim_a_track is not None:
sim_a_track.add_decay_rate_modifier(0)
def remove_modifier(self, sim_info, handle):
tracker = sim_info.relationship_tracker
tracker.remove_create_relationship_listener(self._relationship_added_callback)
self._set_decay_lock_all_relationships(sim_info, lock=False)
|
StarcoderdataPython
|
3235067
|
<filename>stanovanja/home/migrations/0054_auto_20210825_1417.py
# Generated by Django 3.2.6 on 2021-08-25 12:17
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('home', '0053_auto_20210825_1343'),
]
operations = [
migrations.RemoveField(
model_name='solutionpage',
name='newsletter_section',
),
migrations.AddField(
model_name='solutionpage',
name='new_problem_section',
field=wagtail.core.fields.StreamField([('new_problem', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(label='Naslov')), ('description', wagtail.core.blocks.RichTextBlock(label='Opis', required=False)), ('submit_button', wagtail.core.blocks.CharBlock(label='Tekst na gumbu')), ('checkbox_text', wagtail.core.blocks.CharBlock(label='Tekst ob checkboxu'))], label='Sekcija'))], blank=True, null=True, verbose_name='Nov problem'),
),
]
|
StarcoderdataPython
|
4842310
|
import sys
import pytest
import numpy as np
from pyinlinemodule.module import InlineModule
def function_with_cpp_args_kwargs(a, b, c=None, d=3, e=(None, "test")):
"""this is a doctring
"""
__cpp__ = """
return Py_BuildValue("(O,O,O,O,O)", a, b, c, d, e);
"""
return None
def function_with_cpp_args(a, b):
"""this is a doctring
"""
__cpp__ = """
return Py_BuildValue("(O,O)", a, b);
"""
return None
def function_with_cpp_single_arg(a):
"""this is a doctring
"""
__cpp__ = """
return Py_BuildValue("(O,O)", a, a);
"""
return None
def function_with_cpp_noargs():
"""this is a doctring
"""
__cpp__ = """
return Py_BuildValue("(i,i,i)", 1, 2, 3);
"""
i = 0
return 5
def function_with_cpp_numpy_returns_arange(start, stop, step):
"""this is a doctring
"""
__cpp__ = """
return PyArray_Arange(PyFloat_AsDouble(start), PyFloat_AsDouble(stop), PyFloat_AsDouble(step), NPY_FLOAT64);
"""
i = 0
return 5
@pytest.fixture(scope='module')
def compiled_function_with_cpp_args_kwargs():
inline_module = InlineModule('compiled_function_with_cpp_args_kwargs')
inline_module.add_function(function_with_cpp_args_kwargs)
return inline_module.import_module(), 'function_with_cpp_args_kwargs'
@pytest.fixture(scope='module')
def compiled_function_with_cpp_args():
inline_module = InlineModule('compiled_function_with_cpp_args')
inline_module.add_function(function_with_cpp_args)
return inline_module.import_module(), 'function_with_cpp_args'
@pytest.fixture(scope='module')
def compiled_function_with_cpp_single_arg():
inline_module = InlineModule('compiled_function_with_cpp_single_arg')
inline_module.add_function(function_with_cpp_single_arg)
return inline_module.import_module(), 'function_with_cpp_single_arg'
@pytest.fixture(scope='module')
def compiled_function_with_cpp_noargs():
inline_module = InlineModule('compiled_function_with_cpp_noargs')
inline_module.add_function(function_with_cpp_noargs)
return inline_module.import_module(), 'function_with_cpp_noargs'
@pytest.mark.parametrize('return_value', [(1, 2, 3)])
def test_compile_single_function_noargs(compiled_function_with_cpp_noargs, return_value):
tested_module, func_name = compiled_function_with_cpp_noargs
assert hasattr(tested_module, func_name)
compiled_function = getattr(tested_module, func_name)
result = compiled_function()
assert result == return_value
# ensuring only one reference existx, plus the reference in the sys.getrefcount() function
assert sys.getrefcount(result) == 2
@pytest.mark.parametrize('args,kwargs,return_value', [
((1, 2), dict(), (1, 2, None, 3, (None, "test"))),
((1, 2), dict(e=5), (1, 2, None, 3, 5)),
((1, 2, 7), dict(), (1, 2, 7, 3, (None, "test"))),
((1, 2, 'str'), dict(e=None), (1, 2, 'str', 3, None)),
])
def test_compile_single_function_with_kwargs(compiled_function_with_cpp_args_kwargs, args, kwargs, return_value):
tested_module, func_name = compiled_function_with_cpp_args_kwargs
assert hasattr(tested_module, func_name)
compiled_function = getattr(tested_module, func_name)
result = compiled_function(*args, **kwargs)
assert result == return_value
# ensuring only one reference exists, plus the reference in the sys.getrefcount() function
assert sys.getrefcount(result) == 2
@pytest.mark.parametrize('args,return_value', [
((1, 2), (1, 2)),
((1, []), (1, [])),
])
def test_compile_single_function_with_args(compiled_function_with_cpp_args, args, return_value):
tested_module, func_name = compiled_function_with_cpp_args
assert hasattr(tested_module, func_name)
compiled_function = getattr(tested_module, func_name)
result = compiled_function(*args)
assert result == return_value
# ensuring only one reference exists, plus the reference in the sys.getrefcount() function
assert sys.getrefcount(result) == 2
@pytest.mark.parametrize('arg,return_value', [(1, (1, 1)), ([1], ([1], [1]))])
def test_compile_single_function_with_single_args(compiled_function_with_cpp_single_arg, arg, return_value):
tested_module, func_name = compiled_function_with_cpp_single_arg
assert hasattr(tested_module, func_name)
compiled_function = getattr(tested_module, func_name)
result = compiled_function(arg)
assert result == return_value
# ensuring only one reference exists, plus the reference in the sys.getrefcount() function
assert sys.getrefcount(result) == 2
@pytest.mark.parametrize('py_function,function_name,args,return_value', [
(
function_with_cpp_numpy_returns_arange,
'function_with_cpp_numpy_returns_arange',
(0., 10., 1.),
np.arange(0., 10., 1., dtype=np.float64)
),
])
def test_compile_single_function_with_numpy(py_function, function_name, args, return_value):
inline_module = InlineModule('test_compile_single_function_with_numpy', enable_numpy=True)
inline_module.add_function(py_function)
tested_module = inline_module.import_module()
assert hasattr(tested_module, function_name)
compiled_function = getattr(tested_module, function_name)
result = compiled_function(*args)
assert np.all(result == return_value)
|
StarcoderdataPython
|
3396488
|
<reponame>zhiming-shen/Xen-Blanket-NG
#!/usr/bin/python
# Copyright (C) International Business Machines Corp., 2005
# Author: <NAME> <<EMAIL>>
import re
from XmTestLib import *
status, output = traceCommand("xm destroy 0")
if status == 0:
FAIL("xm destroy returned bad status, expected non 0, status is: %i" % status)
elif not re.search("Error", output, re.I):
FAIL("xm destroy returned bad output, expected Error:, output is: %s" % output)
|
StarcoderdataPython
|
1712257
|
<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from . import nodes
from .graph import Graph
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.