content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def likelihood(sent, ai, domain, temperature): """Computes likelihood of a given sentence according the giving model.""" enc = ai._encode(sent, ai.model.word_dict) score, _, _= ai.model.score_sent(enc, ai.lang_h, ai.ctx_h, temperature) return score
8332dfc8c2dba18a117768043dff67e632cc22ff
8,500
def simulator( theta, model="angle", n_samples=1000, delta_t=0.001, # n_trials max_t=20, no_noise=False, bin_dim=None, bin_pointwise=False, ): """Basic data simulator for the models included in HDDM. :Arguments: theta : list or numpy.array or panda.DataFrame Parameters of the simulator. If 2d array, each row is treated as a 'trial' and the function runs n_sample * n_trials simulations. model: str <default='angle'> Determines the model that will be simulated. n_samples: int <default=1000> Number of simulation runs (for each trial if supplied n_trials > 1) n_trials: int <default=1> Number of trials in a simulations run (this specifically addresses trial by trial parameterizations) delta_t: float Size fo timesteps in simulator (conceptually measured in seconds) max_t: float Maximum reaction the simulator can reach no_noise: bool <default=False> Turn noise of (useful for plotting purposes mostly) bin_dim: int <default=None> Number of bins to use (in case the simulator output is supposed to come out as a count histogram) bin_pointwise: bool <default=False> Wheter or not to bin the output data pointwise. If true the 'RT' part of the data is now specifies the 'bin-number' of a given trial instead of the 'RT' directly. You need to specify bin_dim as some number for this to work. :Return: tuple can be (rts, responses, metadata) or (rt-response histogram, metadata) or (rts binned pointwise, responses, metadata) """ # Useful for sbi if type(theta) == list: print("theta is supplied as list --> simulator assumes n_trials = 1") theta = np.asarray(theta).astype(np.float32) elif type(theta) == np.ndarray: theta = theta.astype(np.float32) elif type(theta) == pd.core.frame.DataFrame: theta = theta[model_config[model]["params"]].values.astype(np.float32) else: theta = theta.numpy().astype(float32) if len(theta.shape) < 2: theta = np.expand_dims(theta, axis=0) if theta.ndim > 1: n_trials = theta.shape[0] else: n_trials = 1 # 2 choice models if no_noise: s = 0.0 else: s = 1.0 if model == "test": x = ddm_flexbound( v=theta[:, 0], a=theta[:, 1], z=theta[:, 2], t=theta[:, 3], s=s, n_samples=n_samples, n_trials=n_trials, delta_t=delta_t, boundary_params={}, boundary_fun=bf.constant, boundary_multiplicative=True, max_t=max_t, ) if model == "ddm" or model == "ddm_elife" or model == "ddm_analytic": x = ddm_flexbound( v=theta[:, 0], a=theta[:, 1], z=theta[:, 2], t=theta[:, 3], s=s, n_samples=n_samples, n_trials=n_trials, delta_t=delta_t, boundary_params={}, boundary_fun=bf.constant, boundary_multiplicative=True, max_t=max_t, ) if model == "ddm_legacy" or model == "ddm_vanilla": x = ddm( v=theta[:, 0], a=theta[:, 1], z=theta[:, 2], t=theta[:, 3], s=s, n_samples=n_samples, n_trials=n_trials, delta_t=delta_t, max_t=max_t, ) if model == "full_ddm_legacy" or model == "full_ddm_vanilla": x = full_ddm_vanilla( v=theta[:, 0], a=theta[:, 1], z=theta[:, 2], t=theta[:, 3], sz=theta[:, 4], sv=theta[:, 5], st=theta[:, 6], s=s, n_samples=n_samples, n_trials=n_trials, delta_t=delta_t, max_t=max_t, ) if model == "angle" or model == "angle2": x = ddm_flexbound( v=theta[:, 0], a=theta[:, 1], z=theta[:, 2], t=theta[:, 3], s=s, boundary_fun=bf.angle, boundary_multiplicative=False, boundary_params={"theta": theta[:, 4]}, delta_t=delta_t, n_samples=n_samples, n_trials=n_trials, max_t=max_t, ) if ( model == "weibull_cdf" or model == "weibull_cdf2" or model == "weibull_cdf_ext" or model == "weibull_cdf_concave" or model == "weibull" ): x = ddm_flexbound( v=theta[:, 0], a=theta[:, 1], z=theta[:, 2], t=theta[:, 3], s=s, boundary_fun=bf.weibull_cdf, boundary_multiplicative=True, boundary_params={"alpha": theta[:, 4], "beta": theta[:, 5]}, delta_t=delta_t, n_samples=n_samples, n_trials=n_trials, max_t=max_t, ) if model == "levy": x = levy_flexbound( v=theta[:, 0], a=theta[:, 1], z=theta[:, 2], alpha_diff=theta[:, 3], t=theta[:, 4], s=s, boundary_fun=bf.constant, boundary_multiplicative=True, boundary_params={}, delta_t=delta_t, n_samples=n_samples, n_trials=n_trials, max_t=max_t, ) if model == "full_ddm" or model == "full_ddm2": x = full_ddm( v=theta[:, 0], a=theta[:, 1], z=theta[:, 2], t=theta[:, 3], sz=theta[:, 4], sv=theta[:, 5], st=theta[:, 6], s=s, boundary_fun=bf.constant, boundary_multiplicative=True, boundary_params={}, delta_t=delta_t, n_samples=n_samples, n_trials=n_trials, max_t=max_t, ) if model == "ddm_sdv": x = ddm_sdv( v=theta[:, 0], a=theta[:, 1], z=theta[:, 2], t=theta[:, 3], sv=theta[:, 4], s=s, boundary_fun=bf.constant, boundary_multiplicative=True, boundary_params={}, delta_t=delta_t, n_samples=n_samples, n_trials=n_trials, max_t=max_t, ) if model == "ornstein" or model == "ornstein_uhlenbeck": x = ornstein_uhlenbeck( v=theta[:, 0], a=theta[:, 1], z=theta[:, 2], g=theta[:, 3], t=theta[:, 4], s=s, boundary_fun=bf.constant, boundary_multiplicative=True, boundary_params={}, delta_t=delta_t, n_samples=n_samples, n_trials=n_trials, max_t=max_t, ) # 3 Choice models if no_noise: s = np.tile(np.array([0.0, 0.0, 0.0], dtype=np.float32), (n_trials, 1)) else: s = np.tile(np.array([1.0, 1.0, 1.0], dtype=np.float32), (n_trials, 1)) if model == "race_3": x = race_model( v=theta[:, :3], a=theta[:, [3]], z=theta[:, 4:7], t=theta[:, [7]], s=s, boundary_fun=bf.constant, boundary_multiplicative=True, boundary_params={}, delta_t=delta_t, n_samples=n_samples, n_trials=n_trials, max_t=max_t, ) if model == "race_no_bias_3": x = race_model( v=theta[:, :3], a=theta[:, [3]], z=np.column_stack([theta[:, [4]], theta[:, [4]], theta[:, [4]]]), t=theta[:, [5]], s=s, boundary_fun=bf.constant, boundary_multiplicative=True, boundary_params={}, delta_t=delta_t, n_samples=n_samples, n_trials=n_trials, max_t=max_t, ) if model == "race_no_bias_angle_3": x = race_model( v=theta[:, :3], a=theta[:, [3]], z=np.column_stack([theta[:, [4]], theta[:, [4]], theta[:, [4]]]), t=theta[:, [5]], s=s, boundary_fun=bf.angle, boundary_multiplicative=False, boundary_params={"theta": theta[:, 6]}, delta_t=delta_t, n_samples=n_samples, n_trials=n_trials, max_t=max_t, ) if model == "lca_3": x = lca( v=theta[:, :3], a=theta[:, [3]], z=theta[:, 4:7], g=theta[:, [7]], b=theta[:, [8]], t=theta[:, [9]], s=s, boundary_fun=bf.constant, boundary_multiplicative=True, boundary_params={}, delta_t=delta_t, n_samples=n_samples, n_trials=n_trials, max_t=max_t, ) if model == "lca_no_bias_3": x = lca( v=theta[:, :3], a=theta[:, [3]], z=np.column_stack([theta[:, [4]], theta[:, [4]], theta[:, [4]]]), g=theta[:, [5]], b=theta[:, [6]], t=theta[:, [7]], s=s, boundary_fun=bf.constant, boundary_multiplicative=True, boundary_params={}, delta_t=delta_t, n_samples=n_samples, n_trials=n_trials, max_t=max_t, ) if model == "lca_no_bias_angle_3": x = lca( v=theta[:, :3], a=theta[:, [3]], z=np.column_stack([theta[:, [4]], theta[:, [4]], theta[:, [4]]]), g=theta[:, [5]], b=theta[:, [6]], t=theta[:, [7]], s=s, boundary_fun=bf.angle, boundary_multiplicative=False, boundary_params={"theta": theta[:, 8]}, delta_t=delta_t, n_samples=n_samples, n_trials=n_trials, max_t=max_t, ) # 4 Choice models if no_noise: s = np.tile(np.array([0.0, 0.0, 0.0, 0.0], dtype=np.float32), (n_trials, 1)) else: s = np.tile(np.array([1.0, 1.0, 1.0, 1.0], dtype=np.float32), (n_trials, 1)) if model == "race_4": x = race_model( v=theta[:, :4], a=theta[:, [4]], z=theta[:, 5:9], t=theta[:, [9]], s=s, boundary_fun=bf.constant, boundary_multiplicative=True, boundary_params={}, delta_t=delta_t, n_samples=n_samples, n_trials=n_trials, max_t=max_t, ) if model == "race_no_bias_4": x = race_model( v=theta[:, :4], a=theta[:, [4]], z=np.column_stack( [theta[:, [5]], theta[:, [5]], theta[:, [5]], theta[:, [5]]] ), t=theta[:, [6]], s=s, boundary_fun=bf.constant, boundary_multiplicative=True, boundary_params={}, delta_t=delta_t, n_samples=n_samples, n_trials=n_trials, max_t=max_t, ) if model == "race_no_bias_angle_4": x = race_model( v=theta[:, :4], a=theta[:, [4]], z=np.column_stack( [theta[:, [5]], theta[:, [5]], theta[:, [5]], theta[:, [5]]] ), t=theta[:, [6]], s=s, boundary_fun=bf.angle, boundary_multiplicative=False, boundary_params={"theta": theta[:, 7]}, delta_t=delta_t, n_samples=n_samples, n_trials=n_trials, max_t=max_t, ) if model == "lca_4": x = lca( v=theta[:, :4], a=theta[:, [4]], z=theta[:, 5:9], g=theta[:, [9]], b=theta[:, [10]], t=theta[:, [11]], s=s, boundary_fun=bf.constant, boundary_multiplicative=True, boundary_params={}, delta_t=delta_t, n_samples=n_samples, n_trials=n_trials, max_t=max_t, ) if model == "lca_no_bias_4": x = lca( v=theta[:, :4], a=theta[:, [4]], z=np.column_stack( [theta[:, [5]], theta[:, [5]], theta[:, [5]], theta[:, [5]]] ), g=theta[:, [6]], b=theta[:, [7]], t=theta[:, [8]], s=s, boundary_fun=bf.constant, boundary_multiplicative=True, boundary_params={}, delta_t=delta_t, n_samples=n_samples, n_trials=n_trials, max_t=max_t, ) if model == "lca_no_bias_angle_4": x = lca( v=theta[:, :4], a=theta[:, [4]], z=np.column_stack( [theta[:, [5]], theta[:, [5]], theta[:, [5]], theta[:, [5]]] ), g=theta[:, [6]], b=theta[:, [7]], t=theta[:, [8]], s=s, boundary_fun=bf.angle, boundary_multiplicative=False, boundary_params={"theta": theta[:, 9]}, delta_t=delta_t, n_samples=n_samples, n_trials=n_trials, max_t=max_t, ) # Seq / Parallel models (4 choice) if no_noise: s = 0.0 else: s = 1.0 # Precompute z_vector for no_bias models z_vec = np.tile(np.array([0.5], dtype=np.float32), reps=n_trials) if model == "ddm_seq2": x = ddm_flexbound_seq2( v_h=theta[:, 0], v_l_1=theta[:, 1], v_l_2=theta[:, 2], a=theta[:, 3], z_h=theta[:, 4], z_l_1=theta[:, 5], z_l_2=theta[:, 6], t=theta[:, 7], s=s, n_samples=n_samples, n_trials=n_trials, delta_t=delta_t, max_t=max_t, boundary_fun=bf.constant, boundary_multiplicative=True, boundary_params={}, ) if model == "ddm_seq2_no_bias": x = ddm_flexbound_seq2( v_h=theta[:, 0], v_l_1=theta[:, 1], v_l_2=theta[:, 2], a=theta[:, 3], z_h=z_vec, z_l_1=z_vec, z_l_2=z_vec, t=theta[:, 4], s=s, n_samples=n_samples, n_trials=n_trials, delta_t=delta_t, max_t=max_t, boundary_fun=bf.constant, boundary_multiplicative=True, boundary_params={}, ) if model == "ddm_seq2_angle_no_bias": x = ddm_flexbound_seq2( v_h=theta[:, 0], v_l_1=theta[:, 1], v_l_2=theta[:, 2], a=theta[:, 3], z_h=z_vec, z_l_1=z_vec, z_l_2=z_vec, t=theta[:, 4], s=s, n_samples=n_samples, n_trials=n_trials, delta_t=delta_t, max_t=max_t, boundary_fun=bf.angle, boundary_multiplicative=False, boundary_params={"theta": theta[:, 5]}, ) if model == "ddm_seq2_weibull_no_bias": x = ddm_flexbound_seq2( v_h=theta[:, 0], v_l_1=theta[:, 1], v_l_2=theta[:, 2], a=theta[:, 3], z_h=z_vec, z_l_1=z_vec, z_l_2=z_vec, t=theta[:, 4], s=s, n_samples=n_samples, n_trials=n_trials, delta_t=delta_t, max_t=max_t, boundary_fun=bf.weibull_cdf, boundary_multiplicative=True, boundary_params={"alpha": theta[:, 5], "beta": theta[:, 6]}, ) if model == "ddm_par2": x = ddm_flexbound_par2( v_h=theta[:, 0], v_l_1=theta[:, 1], v_l_2=theta[:, 2], a=theta[:, 3], z_h=theta[:, 4], z_l_1=theta[:, 5], z_l_2=theta[:, 6], t=theta[:, 7], s=s, n_samples=n_samples, n_trials=n_trials, delta_t=delta_t, max_t=max_t, boundary_fun=bf.constant, boundary_multiplicative=True, boundary_params={}, ) if model == "ddm_par2_no_bias": x = ddm_flexbound_par2( v_h=theta[:, 0], v_l_1=theta[:, 1], v_l_2=theta[:, 2], a=theta[:, 3], z_h=z_vec, z_l_1=z_vec, z_l_2=z_vec, t=theta[:, 4], s=s, n_samples=n_samples, n_trials=n_trials, delta_t=delta_t, max_t=max_t, boundary_fun=bf.constant, boundary_multiplicative=True, boundary_params={}, ) if model == "ddm_par2_angle_no_bias": x = ddm_flexbound_par2( v_h=theta[:, 0], v_l_1=theta[:, 1], v_l_2=theta[:, 2], a=theta[:, 3], z_h=z_vec, z_l_1=z_vec, z_l_2=z_vec, t=theta[:, 4], s=s, n_samples=n_samples, n_trials=n_trials, delta_t=delta_t, max_t=max_t, boundary_fun=bf.angle, boundary_multiplicative=False, boundary_params={"theta": theta[:, 5]}, ) if model == "ddm_par2_weibull_no_bias": x = ddm_flexbound_par2( v_h=theta[:, 0], v_l_1=theta[:, 1], v_l_2=theta[:, 2], a=theta[:, 3], z_h=z_vec, z_l_1=z_vec, z_l_2=z_vec, t=theta[:, 4], s=s, n_samples=n_samples, n_trials=n_trials, delta_t=delta_t, max_t=max_t, boundary_fun=bf.weibull_cdf, boundary_multiplicative=True, boundary_params={"alpha": theta[:, 5], "beta": theta[:, 6]}, ) if model == "ddm_mic2_adj": x = ddm_flexbound_mic2_adj( v_h=theta[:, 0], v_l_1=theta[:, 1], v_l_2=theta[:, 2], a=theta[:, 3], z_h=theta[:, 4], # np.array([0.5], dtype = np.float32), z_l_1=theta[:, 5], # np.array([0.5], dtype = np.float32), z_l_2=theta[:, 6], # np.array([0.5], dtype = np.float32), d=theta[:, 7], t=theta[:, 8], s=s, n_samples=n_samples, n_trials=n_trials, delta_t=delta_t, max_t=max_t, boundary_fun=bf.constant, boundary_multiplicative=True, boundary_params={}, ) if model == "ddm_mic2_adj_no_bias": x = ddm_flexbound_mic2_adj( v_h=theta[:, 0], v_l_1=theta[:, 1], v_l_2=theta[:, 2], a=theta[:, 3], z_h=z_vec[:], z_l_1=z_vec[:], z_l_2=z_vec[:], d=theta[:, 4], t=theta[:, 5], s=s, n_samples=n_samples, n_trials=n_trials, delta_t=delta_t, max_t=max_t, boundary_fun=bf.constant, boundary_multiplicative=True, boundary_params={}, ) if model == "ddm_mic2_adj_angle_no_bias": x = ddm_flexbound_mic2_adj( v_h=theta[:, 0], v_l_1=theta[:, 1], v_l_2=theta[:, 2], a=theta[:, 3], z_h=z_vec, z_l_1=z_vec, z_l_2=z_vec, d=theta[:, 4], t=theta[:, 5], s=s, n_samples=n_samples, n_trials=n_trials, delta_t=delta_t, max_t=max_t, boundary_fun=bf.angle, boundary_multiplicative=False, boundary_params={"theta": theta[:, 6]}, ) if model == "ddm_mic2_adj_weibull_no_bias": x = ddm_flexbound_mic2_adj( v_h=theta[:, 0], v_l_1=theta[:, 1], v_l_2=theta[:, 2], a=theta[:, 3], z_h=z_vec, z_l_1=z_vec, z_l_2=z_vec, d=theta[:, 4], t=theta[:, 5], s=s, n_samples=n_samples, n_trials=n_trials, delta_t=delta_t, max_t=max_t, boundary_fun=bf.weibull_cdf, boundary_multiplicative=True, boundary_params={"alpha": theta[:, 6], "beta": theta[:, 7]}, ) # Output compatibility if n_trials == 1: x = (np.squeeze(x[0], axis=1), np.squeeze(x[1], axis=1), x[2]) if n_trials > 1 and n_samples == 1: x = (np.squeeze(x[0], axis=0), np.squeeze(x[1], axis=0), x[2]) x[2]["model"] = model if bin_dim == 0 or bin_dim == None: return x elif bin_dim > 0 and n_trials == 1 and not bin_pointwise: binned_out = bin_simulator_output(x, nbins=bin_dim) return (binned_out, x[2]) elif bin_dim > 0 and n_trials == 1 and bin_pointwise: binned_out = bin_simulator_output_pointwise(x, nbins=bin_dim) return ( np.expand_dims(binned_out[:, 0], axis=1), np.expand_dims(binned_out[:, 1], axis=1), x[2], ) elif bin_dim > 0 and n_trials > 1 and n_samples == 1 and bin_pointwise: binned_out = bin_simulator_output_pointwise(x, nbins=bin_dim) return ( np.expand_dims(binned_out[:, 0], axis=1), np.expand_dims(binned_out[:, 1], axis=1), x[2], ) elif bin_dim > 0 and n_trials > 1 and n_samples > 1 and bin_pointwise: return "currently n_trials > 1 and n_samples > 1 will not work together with bin_pointwise" elif bin_dim > 0 and n_trials > 1 and not bin_pointwise: return "currently binned outputs not implemented for multi-trial simulators" elif bin_dim == -1: return "invalid bin_dim"
370e45499f85bd406a2f80230389dd6aa9866cf0
8,501
from pytato.utils import with_indices_for_broadcasted_shape from typing import Union def logical_not(x: ArrayOrScalar) -> Union[Array, bool]: """ Returns the element-wise logical NOT of *x*. """ if isinstance(x, SCALAR_CLASSES): # https://github.com/python/mypy/issues/3186 return np.logical_not(x) # type: ignore assert isinstance(x, Array) return IndexLambda(with_indices_for_broadcasted_shape(prim.Variable("_in0"), x.shape, x.shape), shape=x.shape, dtype=np.dtype(np.bool8), bindings={"_in0": x})
922f7a0688590fad9492b7e654f97b2f34717ca8
8,502
def _build_xyz_pow(name, pref, l, m, n, shift=2): """ Builds an individual row contraction line. name = pref * xc_pow[n] yc_pow[m] * zc_pow[n] """ l = l - shift m = m - shift n = n - shift if (pref <= 0) or (l < 0) or (n < 0) or (m < 0): return None mul = " " if pref == 1: ret = name + " =" else: # Basically always an int ret = name + " = %2.1f" % float(pref) mul = " * " if l > 0: ret += mul + "xc_pow[%d]" % (l - 1) mul = " * " if m > 0: ret += mul + "yc_pow[%d]" % (m - 1) mul = " * " if n > 0: ret += mul + "zc_pow[%d]" % (n - 1) mul = " * " if mul == " ": ret += " 1" return ret
0dbae02252b27845e795a586e2e28b58c948fa1d
8,503
def create_decode_network(width=width, height=height, Din=Din, Dout=Dout, d_range=d_range): """ data flow with traffic on: input IO -> tag horn -> (pre-fifo valve) -> FIFO -> (post-fifo valve) -> TAT -> AER_tx -> neurons -> AER_rx -> (neuron output valve) -> PAT -> accumulator -> (pre-fifo valve) -> FIFO -> (post-fifo valve) -> TAT -> tag funnel -> output IO """ N = width * height net = graph.Network("net") min_d, max_d = d_range decoders = np.ones((Dout, N)) * (max_d - min_d) + min_d tap_matrix = np.zeros((N, Din)) if Din == 1: # one synapse per 4 neurons for x in range(0, width, 2): for y in range(0, height, 2): n = y * width + x if x < width // 2: tap_matrix[n, 0] = 1 else: tap_matrix[n, 0] = -1 else: print("need to implement reasonable taps for Din > 1") assert(False) i1 = net.create_input("i1", Din) p1 = net.create_pool("p1", tap_matrix) b1 = net.create_bucket("b1", Dout) o1 = net.create_output("o1", Dout) net.create_connection("c_i1_to_p1", i1, p1, None) decoder_conn = net.create_connection("c_p1_to_b1", p1, b1, decoders) net.create_connection("c_b1_to_o1", b1, o1, None) return net
bce65caa463bea8a582426bfe9fac08617fca812
8,504
def canny(img, low_threshold, high_threshold): """Applies the Canny transform""" return cv2.Canny(img, low_threshold, high_threshold)
df3ede87458939e7648090517828e2056cd9cfd6
8,505
import os import subprocess def pscmd(item, pid=os.getpid()): """Invoke ps -o %(item)s -p %(pid)d and return the result""" pscmd = PSCMD if item == 'sid' and os.uname()[0] == 'AIX': pscmd = '/usr/sysv/bin/ps' if item == 'sid' and os.uname()[0] == 'Darwin': item = 'sess' assert pscmd, 'ps command not found (%s), can not run test' % pscmd if item == 'ni' and os.uname()[0] == 'SunOS': item = 'nice' if item == 'rssize' and os.uname()[0] in ['SunOS', 'Darwin']: item = 'rss' if item == 'pgrp' and os.uname()[0] in ['SunOS', 'AIX', 'Darwin']: item = 'pgid' cmdl = [pscmd, '-o', item, '-p', str(pid)] if HAVE_SUBPROCESS: val = subprocess.Popen(cmdl, stdout=subprocess.PIPE).communicate()[0] else: val = os.popen(' '.join(cmdl)).read() val = val.decode() val = val.strip().split()[-1] if item == 'sess' and os.uname()[0] == 'Darwin': # 'ps -o sess' on Darwin returns a hex value val = int(val, 16) return val
db9f5384984b74381cef2b34f5d9aa07acdefe83
8,506
def dGcthetalnorm(w: Wilson, cthetal): """Normalized distribution 1D cthetal""" return tauBp / Btaul * dGcthetal(w, cthetal)
b925c6dad2dd6327f3fe250771c19018ecedcf14
8,507
from typing import Optional def user_deposit_address_fixture( deploy_smart_contract_bundle_concurrently: FixtureSmartContracts, ) -> Optional[UserDepositAddress]: """ Deploy UserDeposit and fund accounts with some balances """ services_smart_contracts = deploy_smart_contract_bundle_concurrently.services_smart_contracts if services_smart_contracts: return services_smart_contracts.user_deposit_proxy.address return None
496f27fd9576191e91ac90c6e17c2b07fae629ab
8,508
def vonNeumann(t, rho, H): """(quantum Liouville-)von Neumann equation""" H = H(t) rho = rho.reshape(H.shape) rho_dot = -1j*(np.dot(H, rho) - np.dot(rho, H)) return rho_dot.flatten()
e00f9cdadacf36ba40240018d4b1dac1a7ebbba3
8,509
import tqdm def conjure_categories(path): """ Look for all pngs in the path. They are generated by quicklook.py and organised into folders by resolution. Each resolution has a number of variables associated to it and each variable can have a number of vertical levels and lead times associated to it. We want to get all of these associations as a nested dictionary so that we can use them for drop downs when selecting images. Args: path: Returns: """ # Get a list of the resolutions (each directory in the path) resolutions = [d.name for d in path.glob("*") if d.is_dir()] # For each directory find a list of all unique variables using the file patterns # from quicklook.py lookup = dict() for resolution in resolutions: lookup[resolution] = dict( varnames=[], lead_times=[], levels=dict(), ) plots = [f.name for f in (path / resolution).glob("*.png")] for plot in tqdm(plots): # TODO: Only have altitude at the moment but will need to format the # filenames in a more parseable way in future for more coordinates if "altitude" in plot: result = parse.parse( "{name}_altitude{vertical_level:d}_T+{lead_time:02d}.png", plot ).named if result["name"] not in lookup[resolution]["levels"].keys(): lookup[resolution]["levels"][result["name"]] = [] if ( result["vertical_level"] not in lookup[resolution]["levels"][result["name"]] ): lookup[resolution]["levels"][result["name"]].append( result["vertical_level"] ) else: result = parse.parse("{name}_T+{lead_time:02d}.png", plot).named if result["name"] not in lookup[resolution]["varnames"]: lookup[resolution]["varnames"].append(result["name"]) if result["lead_time"] not in lookup[resolution]["lead_times"]: lookup[resolution]["lead_times"].append(result["lead_time"]) return lookup
c36fdba3dd0ac159ee2178a0eda2a617356f0c52
8,510
def nicer_array(a, mm_cutoff=0.3): """ Returns a scaled array, the scaling, and a unit prefix Example: nicer_array( np.array([2e-10, 3e-10]) ) Returns: (array([200., 300.]), 1e-12, 'p') """ if np.isscalar(a): x = a elif len(a) == 1: x = a[0] else: x = np.array(a) fac, prefix = nicer_scale_prefix( x, mm_cutoff=mm_cutoff ) return a/fac, fac, prefix
e5abe6b45a4c80d8eb84d9f9f5aed1b11f19684e
8,511
import uuid import pickle def build_playground(): """ build a playground based on user's input building and algorithm type input: userid, algorithm, target building output: none """ userid, building, algo_type = request.form['userid'], request.form['building'], request.form['algo_type'] user = User.objects(userid=userid).first() pgid = str(uuid.uuid4()) algo_instance = get_algo_instance(algo_type=algo_type, target_building=building, pgid=pgid) algo_binaries = pickle.dumps(algo_instance, protocol=pickle.HIGHEST_PROTOCOL) objs = RawMetadata.objects(building=building) pg = Playground( userid=userid, pgid=pgid, building=building, algo_type=algo_type, algo_model=algo_binaries, playground_labeled_metadata=[] ).save() # add playground to user's record user.playground.append(pg) user.save() logger.info('build playground={} for user={}'.format(pg.pgid, user.userid)) message = { 'userid': userid, 'new_playground': pgid } resp = jsonify(message) return resp
31b73b3c505d27dca07569dc95c31d78822da452
8,512
def menuItemDirective(_context, menu, for_, action, title, description=u'', icon=None, filter=None, permission=None, layer=IDefaultBrowserLayer, extra=None, order=0, item_class=None): """Register a single menu item.""" return menuItemsDirective(_context, menu, for_, layer).menuItem( _context, action, title, description, icon, filter, permission, extra, order, item_class)
9ca19bd71cef30db9f8cd2a1154154965cf31b7d
8,513
def getQueueStatistics (): """ Returns a 4-tuple containing the numbers of identifiers in the Crossref queue by status: (awaiting submission, submitted, registered with warning, registration failed). """ q = ezidapp.models.CrossrefQueue.objects.values("status").\ annotate(django.db.models.Count("status")) d = {} for r in q: d[r["status"]] = r["status__count"] return (d.get("U", 0), d.get("S", 0), d.get("W", 0), d.get("F", 0))
2693365e24dc28b57ddbc8db5315779acee2d617
8,514
def assign_targeting_score_v2( base, manual_selected_objids=None, gmm_parameters=None, ignore_specs=False, debug=False, n_random=50, seed=123, remove_lists=None, low_priority_objids=None, **kwargs, ): """ Last updated: 05/19/2020 100 Human selection and Special targets 150 sats without AAT/MMT/PAL specs 180 low-z (z < 0.05) but ZQUALITY = 2 200 within host, r < 17.77, gri/grz cuts OR others with very low SB 300 within host, r < 20.75, high p_GMM or GMM outliers or very high priority 400 within host, r < 20.75, main targeting cuts 500 within host, r < 20.75, gri/grz cuts, low-SB, random selection of 50 600 outwith host, r < 17.77 OR very high p_GMM, low SB 700 within host, r < 20.75, gri/grz cuts, low SB 800 within host, r < 20.75, gri/grz cuts, everything else 900 outwith host, r < 20.75, gri/grz cuts 1000 everything else 1100 Not in gri/grz cut 1200 Not galaxy 1300 Not clean 1350 Removed by hand 1400 Has spec already """ basic_cut = (C.relaxed_targeting_cuts | C.paper1_targeting_cut) & C.is_clean2 & C.is_galaxy2 & Query("r_mag < 21") if not ignore_specs: basic_cut &= ~C.has_spec base = add_cut_scores(base) base["P_GMM"] = np.float64(0) base["log_L_GMM"] = np.float64(0) base["TARGETING_SCORE"] = np.int32(1000) base["index"] = np.arange(len(base)) surveys = [col[6:] for col in base.colnames if col.startswith("OBJID_")] if gmm_parameters is not None: for survey in surveys: gmm_parameters_this = gmm_parameters.get(survey) if gmm_parameters_this is None: continue postfix = "_" + survey base_this = Query( basic_cut, "OBJID{} != -1".format(postfix), "REMOVE{} == 0".format(postfix), "is_galaxy{}".format(postfix), ).filter(base) for color in get_all_colors(): b1, b2 = color n1 = "".join((b1, "_mag", postfix)) n2 = "".join((b2, "_mag", postfix)) if n1 not in base_this.colnames or n2 not in base_this.colnames: continue with np.errstate(invalid="ignore"): base_this[color] = base_this[n1] - base_this[n2] base_this[color + "_err"] = np.hypot( base_this["".join((b1, "_err", postfix))], base_this["".join((b2, "_err", postfix))], ) bands = getattr(utils, "get_{}_bands".format(survey))() # pylint: disable=not-callable base_this["P_GMM"] = ensure_proper_prob( calc_gmm_satellite_probability( base_this, gmm_parameters_this, bands=bands, mag_err_postfix="_err" + postfix, ) ) base_this["log_L_GMM"] = calc_log_likelihood( *get_input_data( base_this, bands=bands, mag_err_postfix="_err" + postfix, ), *(gmm_parameters_this[n] for n in param_labels_nosat), ) to_update_mask = base_this["P_GMM"] > base["P_GMM"][base_this["index"]] if to_update_mask.any(): to_update_idx = base_this["index"][to_update_mask] for col in ("P_GMM", "log_L_GMM"): base[col][to_update_idx] = base_this[col][to_update_mask] del base_this, to_update_mask del base["index"] bright = C.sdss_limit exclusion_cuts = Query() if low_priority_objids is not None: exclusion_cuts = Query(exclusion_cuts, QueryMaker.in1d("OBJID", low_priority_objids, invert=True)) if "sdss" in surveys and ("decals" in surveys or "des" in surveys): deep_survey = "des" if "des" in surveys else "decals" has_good_deep = Query( "OBJID_{} != -1".format(deep_survey), "REMOVE_{} == 0".format(deep_survey), ) over_subtraction = Query( QueryMaker.equals("survey", "sdss"), Query(has_good_deep, "r_mag_{} > 20.8".format(deep_survey)) | Query(~has_good_deep, "u_mag > r_mag + 3.5"), ) exclusion_cuts = Query(exclusion_cuts, ~over_subtraction) if "des" in surveys: des_bright_stars = Query( QueryMaker.equals("survey", "des"), "0.7 * (r_mag + 10.2) > sb_r", "gr < 0.6", "r_mag < 17", C.valid_g_mag, C.valid_sb, ) bright = Query(bright, ~des_bright_stars) exclusion_cuts = Query(exclusion_cuts, ~des_bright_stars) veryhigh_p_gmm = Query("P_GMM >= 0.95", "log_L_GMM >= -7") high_p_gmm = Query("P_GMM >= 0.7") | Query("log_L_GMM < -7") low_sb_cut = Query(Query("score_sb_r >= 20"), C.valid_sb) very_low_sb_cut = Query( "r_mag < 20.8", ( Query( C.high_priority_cuts, Query("score_sb_r >= 21.25") | Query("sb_r >= 25.25"), ) | Query( QueryMaker.equals("survey", "des"), Query("score_sb_r >= 21.5") | Query("sb_r >= 25.5"), ) ), C.valid_sb, exclusion_cuts, ) fill_values_by_query(base, C.faint_end_limit, {"TARGETING_SCORE": 900}) fill_values_by_query(base, Query(C.sat_rcut, C.faint_end_limit), {"TARGETING_SCORE": 800}) fill_values_by_query( base, Query( C.sat_rcut, C.faint_end_limit, C.relaxed_targeting_cuts, exclusion_cuts, ), {"TARGETING_SCORE": 700}, ) fill_values_by_query( base, (bright | Query(veryhigh_p_gmm, C.relaxed_cut_sb, exclusion_cuts)), {"TARGETING_SCORE": 600}, ) fill_values_by_query( base, Query(C.sat_rcut, C.high_priority_cuts, C.faint_end_limit, exclusion_cuts), {"TARGETING_SCORE": 400}, ) fill_values_by_query( base, Query("TARGETING_SCORE == 400", (high_p_gmm | low_sb_cut)), {"TARGETING_SCORE": 300}, ) fill_values_by_query(base, Query(C.sat_rcut, (bright | very_low_sb_cut)), {"TARGETING_SCORE": 200}) need_random_selection = np.flatnonzero( Query(basic_cut, "TARGETING_SCORE >= 700", "TARGETING_SCORE < 800").mask(base) ) if len(need_random_selection) > n_random: random_mask = np.zeros(len(need_random_selection), dtype=bool) random_mask[:n_random] = True np.random.RandomState(seed).shuffle(random_mask) # pylint: disable=no-member need_random_selection = need_random_selection[random_mask] base["TARGETING_SCORE"][need_random_selection] = 500 base["TARGETING_SCORE"] += (8 - np.digitize(base["score_sb_r"], np.linspace(19.25, 22, 7))) * 10 + ( 9 - np.floor(base["P_GMM"] * 10).astype(np.int32) ) fill_values_by_query(base, ~basic_cut, {"TARGETING_SCORE": 1100}) fill_values_by_query(base, ~C.is_galaxy2, {"TARGETING_SCORE": 1200}) fill_values_by_query(base, ~C.is_clean2, {"TARGETING_SCORE": 1300}) if not ignore_specs: fill_values_by_query(base, C.has_spec, {"TARGETING_SCORE": 1400}) fill_values_by_query( base, Query(basic_cut, "ZQUALITY == 2", "SPEC_Z < 0.05"), {"TARGETING_SCORE": 180}, ) fill_values_by_query( base, Query( C.is_sat, (lambda x: (x != "AAT") & (x != "MMT") & (x != "PAL"), "TELNAME"), ), {"TARGETING_SCORE": 150}, ) if remove_lists is not None: for survey in surveys: if survey not in remove_lists: continue fill_values_by_query( base, Query( C.is_clean2, (lambda x: np.in1d(x, remove_lists[survey]), "OBJID"), (lambda x: x == survey, "survey"), ), {"TARGETING_SCORE": 1350}, ) if manual_selected_objids is not None: q = Query((lambda x: np.in1d(x, manual_selected_objids), "OBJID")) if not ignore_specs: q &= ~C.has_spec fill_values_by_query(base, q, {"TARGETING_SCORE": 100}) base.sort("TARGETING_SCORE") return base
88ab3ff423eb8d0ce64906d330e42b037ff8cad5
8,515
from typing import List from typing import Union def create_compressed_generator( original_generator: CompressorArg, compressed_cse_list: List[List[Union[List[uint64], List[Union[bytes, None, Program]]]]], ) -> BlockGenerator: """ Bind the generator block program template to a particular reference block, template bytes offsets, and SpendBundle. """ start = original_generator.start end = original_generator.end program = DECOMPRESS_BLOCK.curry( DECOMPRESS_PUZZLE, DECOMPRESS_CSE_WITH_PREFIX, Program.to(start), Program.to(end), compressed_cse_list ) generator_arg = GeneratorArg(original_generator.block_height, original_generator.generator) return BlockGenerator(program, [generator_arg])
c2eb437caefa53452df61e1f5b4115ab4220a323
8,516
def run_mcmc(meas, x, nsamples, covm=None, scales=None): """ Sample the likelihood space with a Markov Chain Monte Carlo. :param meas: TemplateMeasurement measurement whose spectrum likelihood space is to be probe :param x: [float] parameter values where to start the chain :param covm: [[float]] covariance matrix values if sampling transformed space :param scales: [float] parameter scales if not sampling transformed space :return: [float], [float], [float], pymcmc.MCMC posterior mean, lower CI, upper CI for each parameter, and the MCMC object used for sampling """ mcmc = MCMC(meas.spec.npars) mcmc.set_values(x) if covm is not None and scales is None: mcmc.set_covm(covm) elif scales is not None: mcmc.set_scales(scales) else: raise ValueError("Must provide covariance OR scales") mcmc.rescale = 2 # good starting point mcmc.learn_scale(meas.spec.ll, 1000) mcmc.run(meas.spec.ll, nsamples) mean = list() mean_down = list() mean_up = list() for ipar in range(meas.spec.npars): mean.append(np.mean(mcmc.data[:, ipar])) low, high, _, _ = npinterval.interval(mcmc.data[:, ipar], 0.6827) mean_down.append(low-mean[-1]) mean_up.append(high-mean[-1]) return mean, mean_down, mean_up, mcmc
79f7806d3c5c84693dfbfcd6d4236734ec7921de
8,517
def get_vlan_list(dut, cli_type="click"): """ Get list of VLANs Author : Prudvi Mangadu ([email protected]) :param dut: :param cli_type: :return: """ st.log("show vlan to get vlan list") rv = show_vlan_config(dut, cli_type=cli_type) vlan_list = list(set([eac['vid'] for eac in rv])) return vlan_list
5ce768bc8a30fa73fb2f4930384197535584de64
8,518
def begin_organization_creation_task(registered_id): """ Asynchronously create our tenant schema. Email owner when process completes. """ # Run the sub-routine for taking the OrganizationRegistration object # creating our Tenant from it. call_command('populate_organization', str(registered_id)) # foundation_public/management/commands/populate_organization.py # Send email to the owner of the Organization letting them know we've successfully # finished setting up their tenancy. call_command('send_organization_ready_email', str(registered_id)) # foundation_email/management/commands/send_organization_ready_email.py # Delete the registered organization. PublicOrganizationRegistration.objects.get(id=registered_id).delete() # Return nothing. return None
fcaccc4e44def7a0d5ce83ac179899d0b288ac9c
8,519
import itertools def rewrite_blockwise(inputs): """Rewrite a stack of Blockwise expressions into a single blockwise expression Given a set of Blockwise layers, combine them into a single layer. The provided layers are expected to fit well together. That job is handled by ``optimize_blockwise`` Parameters ---------- inputs : List[Blockwise] Returns ------- blockwise: Blockwise See Also -------- optimize_blockwise """ if len(inputs) == 1: # Fast path: if there's only one input we can just use it as-is. return inputs[0] inputs = {inp.output: inp for inp in inputs} dependencies = { inp.output: {d for d, v in inp.indices if v is not None and d in inputs} for inp in inputs.values() } dependents = reverse_dict(dependencies) new_index_iter = ( c + (str(d) if d else "") # A, B, ... A1, B1, ... for d in itertools.count() for c in "ABCDEFGHIJKLMNOPQRSTUVWXYZ" ) [root] = [k for k, v in dependents.items() if not v] # Our final results. These will change during fusion below indices = list(inputs[root].indices) new_axes = inputs[root].new_axes concatenate = inputs[root].concatenate dsk = dict(inputs[root].dsk) changed = True while changed: changed = False for i, (dep, ind) in enumerate(indices): if ind is None: continue if dep not in inputs: continue changed = True # Replace _n with dep name in existing tasks # (inc, _0) -> (inc, 'b') dsk = {k: subs(v, {blockwise_token(i): dep}) for k, v in dsk.items()} # Remove current input from input indices # [('a', 'i'), ('b', 'i')] -> [('a', 'i')] _, current_dep_indices = indices.pop(i) sub = { blockwise_token(i): blockwise_token(i - 1) for i in range(i + 1, len(indices) + 1) } dsk = subs(dsk, sub) # Change new input_indices to match give index from current computation # [('c', j')] -> [('c', 'i')] new_indices = inputs[dep].indices sub = dict(zip(inputs[dep].output_indices, current_dep_indices)) contracted = { x for _, j in new_indices if j is not None for x in j if x not in inputs[dep].output_indices } extra = dict(zip(contracted, new_index_iter)) sub.update(extra) new_indices = [(x, index_subs(j, sub)) for x, j in new_indices] # Update new_axes for k, v in inputs[dep].new_axes.items(): new_axes[sub[k]] = v # Bump new inputs up in list sub = {} # Map from (id(key), inds or None) -> index in indices. Used to deduplicate indices. index_map = {(id(k), inds): n for n, (k, inds) in enumerate(indices)} for i, index in enumerate(new_indices): id_key = (id(index[0]), index[1]) if id_key in index_map: # use old inputs if available sub[blockwise_token(i)] = blockwise_token(index_map[id_key]) else: index_map[id_key] = len(indices) sub[blockwise_token(i)] = blockwise_token(len(indices)) indices.append(index) new_dsk = subs(inputs[dep].dsk, sub) # indices.extend(new_indices) dsk.update(new_dsk) # De-duplicate indices like [(a, ij), (b, i), (a, ij)] -> [(a, ij), (b, i)] # Make sure that we map everything else appropriately as we remove inputs new_indices = [] seen = {} sub = {} # like {_0: _0, _1: _0, _2: _1} for i, x in enumerate(indices): if x[1] is not None and x in seen: sub[i] = seen[x] else: if x[1] is not None: seen[x] = len(new_indices) sub[i] = len(new_indices) new_indices.append(x) sub = {blockwise_token(k): blockwise_token(v) for k, v in sub.items()} dsk = {k: subs(v, sub) for k, v in dsk.items()} indices_check = {k for k, v in indices if v is not None} numblocks = toolz.merge([inp.numblocks for inp in inputs.values()]) numblocks = {k: v for k, v in numblocks.items() if v is None or k in indices_check} # Update IO-dependency information io_deps = {} for v in inputs.values(): io_deps.update(v.io_deps) return Blockwise( root, inputs[root].output_indices, dsk, new_indices, numblocks=numblocks, new_axes=new_axes, concatenate=concatenate, annotations=inputs[root].annotations, io_deps=io_deps, )
dc80aa6c55d3ac6fafe780e5c58f3961d5d92b66
8,520
def sort_drugs(processed_data, alpha_sort, **kwargs): """ Sorts all drug names, as primary keys of processed data dictionary. Sorting is governed by primary criteria of decreasing cost, then secondary criteria of alphabetical order. Secondary criteria ignores unsafe characters if "alpha_sort" is True; and does not ignore unsafe characters if False. Requires sort_criteria() inner function. Args: processed_data (dictionary): contains all analyzed data. Primary key is drug name (string), and primary value is tuple containing number of prescribers (integer, index 0) and total cost (float, index 1). alpha_sort (boolean): if True, special characters are not considered during sorting. If False, special characters are considered during sorting. safe_char (list of strings): contains all characters considered safe. Returns: all_drugs_sorted (list of strings): contains all drug names in sequential list sorted by drug cost and alphanumeric name. """ def sort_criteria(drug): """ Determines mapped sorting value of cost and alphanumeric name for all drugs, as keys of processed data dictionary. Required by sort_drugs() outer function. Args: drug (string): drug name. Returns: (tuple): ordered and mapped sorting criteria of cost and name. """ # Sets first criteria of decreasing drug cost cost_criteria = - processed_data[drug][1] # Sets second criteria of alphanumeric drug name name_criteria = drug.upper() # If True, does not consider special characters in alphanumeric order if alpha_sort: # Iterates over all characters in drug name for char in drug: # If character is not in safe list, remove from name criteria if char not in safe_char: # Removes special characters name_criteria = name_criteria.replace(char,"") # Returns primary and secondary sorting criteria return (cost_criteria, name_criteria) # Sets safe characters for evaluation of name criteria safe_char = kwargs['ch'] # Sorts drug names by decreasing cost then alphanumeric order all_drugs_sorted = sorted(processed_data, key=sort_criteria) # Returns list of sorted drug names return all_drugs_sorted
aa3727dc52f0204c7c39807982a998cc03fabd2d
8,521
def log_k2ex_and_get_msg(ex, prefix, topology): """ LOG K2 exception and extracted message. Return NLS message """ LOG.exception(ex) detail = {} k2msg = _("None") if isinstance(ex, K2Error) and ex.k2response: detail['Request_headers'] = ex.k2response.reqheaders detail['Response_headers'] = ex.k2response.headers detail['Response_body'] = ex.k2response.body detail['Response_status'] = ex.k2response.status if hasattr(ex.k2response, 'k2err'): m = ex.k2response.k2err.find('./Message') if m is not None: k2msg = m.text msg = _("%(prefix)s ***K2 Operator Error***: %(ex_msg)s [K2 Error body " "Message: %(k2msg)s]") %\ dict(prefix=prefix, ex_msg=ex, k2msg=k2msg) LOG.error(msg) if detail: LOG.error(_("Error details: %s") % detail) if topology is not None: if 'error' in topology: topology['error'].append(msg) else: topology['error'] = [msg] return msg
a1a827ac38980e593e58236ce8d60eb01b957050
8,522
def fetch_ticket(identifier): """Return data of ticket with given identifier as pandas dataframe.""" try: return pd.read_csv(f'./data/tickets/{identifier}.csv') except: return None
46d776eab0e7867dd14079147a6101c9b8fddfa5
8,523
import torch def dice_loss(logits, targets, smooth=1.0): """ logits: (torch.float32) shape (N, C, H, W) targets: (torch.float32) shape (N, H, W), value {0,1,...,C-1} """ outputs = F.softmax(logits, dim=1) targets = torch.unsqueeze(targets, dim=1) targets = torch.zeros_like(logits).scatter_(dim=1, index=targets.type(torch.int64), src=torch.tensor(1.0)) inter = outputs * targets dice = 1 - ((2*inter.sum(dim=(2,3)) + smooth) / (outputs.sum(dim=(2,3))+targets.sum(dim=(2,3)) + smooth)) return dice.mean()
4ac40e87fe048dbc3232bb82c7fa16d9c03a8439
8,524
def deltaG_methanogenesis_early_Earth(T, pCO2, pH2, pCH4): """ Equation: CO2 (g) + 4H2 (g) --> CH4 (g)+ 2H2O (l) Assumes bar, 1bar total pressure, for gases. T must be array (even if just 1 entry) """ R=8.314E-3 #kJ mol^-1 K^-1 deltaG_0=deltaG_F_PSat_T_CH4_g(T)+2.0*deltaG_F_PSat_T_H2O_l(T) - (deltaG_F_PSat_T_CO2_g(T) + 4.0*deltaG_F_PSat_T_H2_g(T))# Standard free energy Q=(pCH4*(1.0)**2.0)/(pCO2*pH2**4.0) #Reaction quotient Q deltaG=deltaG_0+R*T*np.log(Q) deltaG[deltaG > life_threshold_deltaG]=np.nan #if at least 10 kJ/mol not generated, bugs not known to live. return deltaG
d54f15296a17a927f4e8f924b4620b2fdab4082a
8,525
import matplotlib.pyplot as plt import time def optimize_on_joints(j2d, model, cam, img, prior, try_both_orient, body_orient, n_betas=10, regs=None, conf=None, viz=False): """Fit the model to the given set of joints, given the estimated camera :param j2d: 14x2 array of CNN joints :param model: SMPL model :param cam: estimated camera :param img: h x w x 3 image :param prior: mixture of gaussians pose prior :param try_both_orient: boolean, if True both body_orient and its flip are considered for the fit :param body_orient: 3D vector, initialization for the body orientation :param n_betas: number of shape coefficients considered during optimization :param regs: regressors for capsules' axis and radius, if not None enables the interpenetration error term :param conf: 14D vector storing the confidence values from the CNN :param viz: boolean, if True enables visualization during optimization :returns: a tuple containing the optimized model, its joints projected on image space, the camera translation """ t0 = time() # define the mapping LSP joints -> SMPL joints # cids are joints ids for LSP: cids = range(12) + [13] # joint ids for SMPL # SMPL does not have a joint for head, instead we use a vertex for the head # and append it later. smpl_ids = [8, 5, 2, 1, 4, 7, 21, 19, 17, 16, 18, 20] # the vertex id for the joint corresponding to the head head_id = 411 # weights assigned to each joint during optimization; # the definition of hips in SMPL and LSP is significantly different so set # their weights to zero base_weights = np.array( [1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=np.float64) if try_both_orient: flipped_orient = cv2.Rodrigues(body_orient)[0].dot( cv2.Rodrigues(np.array([0., np.pi, 0]))[0]) flipped_orient = cv2.Rodrigues(flipped_orient)[0].ravel() orientations = [body_orient, flipped_orient] else: orientations = [body_orient] if try_both_orient: # store here the final error for both orientations, # and pick the orientation resulting in the lowest error errors = [] svs = [] cams = [] for o_id, orient in enumerate(orientations): # initialize the shape to the mean shape in the SMPL training set betas = ch.zeros(n_betas) # initialize the pose by using the optimized body orientation and the # pose prior init_pose = np.hstack((orient, prior.weights.dot(prior.means))) # instantiate the model: # verts_decorated allows us to define how many # shape coefficients (directions) we want to consider (here, n_betas) sv = verts_decorated( trans=ch.zeros(3), pose=ch.array(init_pose), v_template=model.v_template, J=model.J_regressor, betas=betas, shapedirs=model.shapedirs[:, :, :n_betas], weights=model.weights, kintree_table=model.kintree_table, bs_style=model.bs_style, f=model.f, bs_type=model.bs_type, posedirs=model.posedirs) # make the SMPL joints depend on betas Jdirs = np.dstack([model.J_regressor.dot(model.shapedirs[:, :, i]) for i in range(len(betas))]) J_onbetas = ch.array(Jdirs).dot(betas) + model.J_regressor.dot( model.v_template.r) # get joint positions as a function of model pose, betas and trans (_, A_global) = global_rigid_transformation( sv.pose, J_onbetas, model.kintree_table, xp=ch) Jtr = ch.vstack([g[:3, 3] for g in A_global]) + sv.trans # add the head joint, corresponding to a vertex... Jtr = ch.vstack((Jtr, sv[head_id])) # ... and add the joint id to the list if o_id == 0: smpl_ids.append(len(Jtr) - 1) # update the weights using confidence values weights = base_weights * conf[ cids] if conf is not None else base_weights # project SMPL joints on the image plane using the estimated camera cam.v = Jtr # data term: distance between observed and estimated joints in 2D obj_j2d = lambda w, sigma: ( w * weights.reshape((-1, 1)) * GMOf((j2d[cids] - cam[smpl_ids]), sigma)) # mixture of gaussians pose prior pprior = lambda w: w * prior(sv.pose) # joint angles pose prior, defined over a subset of pose parameters: # 55: left elbow, 90deg bend at -np.pi/2 # 58: right elbow, 90deg bend at np.pi/2 # 12: left knee, 90deg bend at np.pi/2 # 15: right knee, 90deg bend at np.pi/2 alpha = 10 my_exp = lambda x: alpha * ch.exp(x) obj_angle = lambda w: w * ch.concatenate([my_exp(sv.pose[55]), my_exp(-sv.pose[ 58]), my_exp(-sv.pose[12]), my_exp(-sv.pose[15])]) if viz: plt.ion() def on_step(_): """Create visualization.""" plt.figure(1, figsize=(10, 10)) plt.subplot(1, 2, 1) # show optimized joints in 2D tmp_img = img.copy() for coord, target_coord in zip( np.around(cam.r[smpl_ids]).astype(int), np.around(j2d[cids]).astype(int)): if (coord[0] < tmp_img.shape[1] and coord[0] >= 0 and coord[1] < tmp_img.shape[0] and coord[1] >= 0): cv2.circle(tmp_img, tuple(coord), 3, [0, 0, 255]) if (target_coord[0] < tmp_img.shape[1] and target_coord[0] >= 0 and target_coord[1] < tmp_img.shape[0] and target_coord[1] >= 0): cv2.circle(tmp_img, tuple(target_coord), 3, [0, 255, 0]) plt.imshow(tmp_img[:, :, ::-1]) plt.draw() plt.show() plt.pause(1e-2) on_step(_) else: on_step = None if regs is not None: # interpenetration term sp = SphereCollisions( pose=sv.pose, betas=sv.betas, model=model, regs=regs) sp.no_hands = True # weight configuration used in the paper, with joints + confidence values from the CNN # (all the weights used in the code were obtained via grid search, see the paper for more details) # the first list contains the weights for the pose priors, # the second list contains the weights for the shape prior opt_weights = zip([4.04 * 1e2, 4.04 * 1e2, 57.4, 4.78], [1e2, 5 * 1e1, 1e1, .5 * 1e1]) # run the optimization in 4 stages, progressively decreasing the # weights for the priors for stage, (w, wbetas) in enumerate(opt_weights): _LOGGER.info('stage %01d', stage) objs = {} objs['j2d'] = obj_j2d(1., 100) objs['pose'] = pprior(w) objs['pose_exp'] = obj_angle(0.317 * w) objs['betas'] = wbetas * betas if regs is not None: objs['sph_coll'] = 1e3 * sp ch.minimize( objs, x0=[sv.betas, sv.pose], method='dogleg', callback=on_step, options={'maxiter': 100, 'e_3': .0001, 'disp': 0}) t1 = time() _LOGGER.info('elapsed %.05f', (t1 - t0)) if try_both_orient: errors.append((objs['j2d'].r**2).sum()) svs.append(sv) cams.append(cam) if try_both_orient and errors[0] > errors[1]: choose_id = 1 else: choose_id = 0 if viz: plt.ioff() return (svs[choose_id], cams[choose_id].r, cams[choose_id].t.r)
2ce29fae66bd7898414194012a1b33e8768605b0
8,526
import math def make_axis_angle_matrix(axis, angle): """construct a matrix that rotates around axis by angle (in radians)""" #[RMS] ported from WildMagic4 fCos = math.cos(angle) fSin = math.sin(angle) fX2 = axis[0]*axis[0] fY2 = axis[1]*axis[1] fZ2 = axis[2]*axis[2] fXYM = axis[0]*axis[1]*(1-fCos) fXZM = axis[0]*axis[2]*(1-fCos) fYZM = axis[1]*axis[2]*(1-fCos) fXSin = axis[0]*fSin fYSin = axis[1]*fSin fZSin = axis[2]*fSin return ( fX2*(1-fCos)+fCos, fXYM-fZSin, fXZM+fYSin, fXYM+fZSin, fY2*(1-fCos)+fCos, fYZM-fXSin, fXZM-fYSin, fYZM+fXSin, fZ2*(1-fCos)+fCos )
1bef075e63b26559184025a69f47d8c1b6dccf1d
8,527
def get_agent_type_from_project_type(): """ use project type to determine agent type """ if 'METRIC' in if_config_vars['project_type']: if if_config_vars['is_replay']: return 'MetricFileReplay' else: return 'CUSTOM' elif if_config_vars['is_replay']: return 'LogFileReplay' else: return 'LogStreaming' # INCIDENT and DEPLOYMENT don't use this
a2ea351fcba68dde4db2b9200636c937a58ab960
8,528
import traceback def close_server(is_rebooting = False): """ Close the Unity server and tell clients to react appropriately. Set `is_rebooting` to handle cases like domain reload when Unity is expected to come back shortly. Returns True if the server was closed by this call, False if it was already closed. """ global server global clients if server is None: return False # Tell all the clients to quit. client_shutdown_async = [] clients_to_shutdown = [] with clients_lock: for client_list in clients.values(): for c in client_list: try: shutdown_result = c.async_shutdown(is_rebooting) # Give the client a half-second to tell us there was a problem. # If they don't tell us in that time, we just ignore the problem. shutdown_result.set_expiry(0.5) client_shutdown_async.append(shutdown_result) clients_to_shutdown.append(c) except EOFError: pass for a in client_shutdown_async: try: a.wait() a.value except EOFError: # The client shut down when we told it to shut down -- pretty normal. pass except: print("Exception while shutting down a client: {}".format(traceback.format_exc())) server.close() # Process all jobs pending. Client threads might be waiting for jobs to be # run on the main thread while not jobs.empty(): process_jobs(); server.thread.join() for c in clients_to_shutdown: c.wait_for_thread() # Finally release the lock file. server.lockfile.release() server = None clients = dict() return True
77fbd9ecd8ed7489d4f5763c5bb417c7cb5ddb15
8,529
import types def dict_decode(node_dict: dict) -> Node: """Convert a dictionary to an `Entity` node (if it has a `type` item).""" if "type" not in node_dict: return node_dict node_type = node_dict.pop("type") class_ = getattr(types, node_type, None) if class_ is None: return node_dict node_kwargs = {} for key, val in node_dict.items(): if isinstance(val, dict): val = dict_decode(val) elif isinstance(val, list): processed_list = [] for sub_val in val: if isinstance(sub_val, dict): processed_list.append(dict_decode(sub_val)) else: processed_list.append(sub_val) val = processed_list node_kwargs[key] = val return class_(**node_kwargs)
00b790e431cdf080c0a6220c2913fd511983904d
8,530
from datetime import datetime def compute_purges(snapshots, pattern, now): """Return the list of snapshots to purge, given a list of snapshots, a purge pattern and a now time """ snapshots = sorted(snapshots) pattern = sorted(pattern, reverse=True) purge_list = [] max_age = pattern[0] # Age of the snapshots in minutes. # Example : [30, 70, 90, 150, 210, ..., 4000] snapshots_age = [] valid_snapshots = [] for s in snapshots: try: snapshots_age.append( int((now - datetime.strptime( s.split('@')[1], DTFORMAT)).total_seconds() )/60) valid_snapshots.append(s) except: log.info("Skipping purge of %s with invalid date format", s) continue if not valid_snapshots: return purge_list # pattern = 3600:180:60 # age segments = [(3600, 180), (180, 60)] for age_segment in [(pattern[i], pattern[i+1]) for i, p in enumerate(pattern[:-1])]: last_timeframe = -1 for i, age in enumerate(snapshots_age): # if the age is outside the age_segment, delete nothing. # Only 70 and 90 are inside the age_segment (60, 180) if age > age_segment[0] < max_age or age < age_segment[1]: continue # Now get the timeframe number of the snapshot. # Ages 70 and 90 are in the same timeframe (70//60 == 90//60) timeframe = age // age_segment[1] # delete if we already had a snapshot in the same timeframe # or if the snapshot is very old if timeframe == last_timeframe or age > max_age: purge_list.append(valid_snapshots[i]) last_timeframe = timeframe return purge_list
710a65ef7068d57470fb72ff171a1f1eb3480d65
8,531
import logging def design_partial_factorial(k: int, res: int) -> DataFrame: """ design_partial_factorial This function helps design 2 level partial factorial experiments. These experiments are often described using the syntax l**(k-p) where l represents the level of each factor, k represents the total number of factors considered, and p represents a scaling factor relative to the full factorial design. This function assumes that l=2. Users are not asked to set p, instead the user sets a minimum desired resolution for their experiment. Resolution describes the kind of aliasing incurred by scaling down from a full to a partial factorial design. Higher resolutions have less potential aliasing (confounding). Resolution number is determined through the defining relation of the partial factorial design. For the 6 factor design 2**(6-p) with factors ABCDEF, example defining relations (I) are shown below. The resolution cannot exceed the number of factors in the experiment. So a 6 factor experiment can be at most a resolution 6 (otherwise it would be a full factorial experiment). * Res I: I = A * Res II: I = AB * Res III: I = ABC * Res IV: I = ABCD * Res V: I = ABCDE * Res VI: I = ABCDEF Practically we tend to use resolution III-, IV- and V-designs. * Res I: Cannot distinguish between levels within main effects (not useful). * Res II: Main effects may be aliased with other main effects (not useful). * Res III: Main effects may be aliased with two-way interactions. * Res IV: Two-way interactions may be aliased with each other. * Res V: Two-way interactions may be aliased with three-way interactions. * Res VI: Three-way interactions may be aliased with each other. Parameters ---------- k : int the total number of factors considered in the experiment res : int the desired minimum resolution of the experiment Returns ------- pd.DataFrame A dataframe with the partial factorial design Examples -------- >>> # create partial factorial design for a 2 level 4 factor resolution III experiment >>> design_df = design_partial_factorial(k=4, res=3) """ _check_int_input(k, "k") _check_int_input(res, "res") assert res <= k, "Resolution must be smaller than or equal to the number of factors." # Assume l=2 and use k specified by user to solve for p in design n = arange(res - 1, k, 1) k_minus_p = k - 1 if res == k else n[~(_k_combo_vec(n, res) < k)][0] logging.info("Partial Factorial Design: l=2, k={}, p={}".format(k, k - k_minus_p)) logging.info("Ratio to Full Factorial Design: {}".format(Fraction(2**k_minus_p / 2**k))) # identify the main effects and interactions for the design main_factors = arange(k_minus_p) clean = lambda x: x.replace(" ", " ").strip(" ").replace(" ", ":") interactions = [clean(_array_to_string(main_factors))] if res == k else \ [ clean(_array_to_string(c)) for r in range(res - 1, k_minus_p) for c in combinations(main_factors, r) ][:k - k_minus_p] # combine main effects and interactions into a single design string (format inspired by patsy) factors = " ".join([_array_to_string(main_factors)] + interactions) logging.info("Design string: {}".format(factors)) main_factors = [i for i in factors.split(" ") if i and ":" not in i] two_level_full_factorial = [[-1, 1] for _ in main_factors] full_factorial_design = design_full_factorial(two_level_full_factorial) interactions = [ ["x" + i for i in j.split(":")] for j in [i for i in factors.split(" ") if i and ":" in i] ] design = "+".join(full_factorial_design.columns.tolist() + [":".join(i) for i in interactions]) partial_factorial_design = dmatrix(design, full_factorial_design, return_type='dataframe').drop( columns=["Intercept"], axis=1) partial_factorial_design.columns = \ ["x{}".format(i) for i in range(partial_factorial_design.shape[1])] return partial_factorial_design
a9c93cf696c33f0eb74cb092d1f340d5732dc994
8,532
from pathlib import Path import os import json def find_latest(message_ts: str, post_dir: Path) -> str: """Retrieves the latest POST request timestamp for a given message.""" latest_ts = message_ts for postfile in os.listdir(os.fsencode(post_dir)): if (filename := os.fsdecode(postfile)).endswith('.json'): request_ts = filename.strip('.json') if request_ts < latest_ts: continue else: with open(os.path.join(post_dir, filename), 'r') as file: request = json.load(file) if request['container']['message_ts'] == message_ts: if request_ts > latest_ts : latest_ts = request_ts else: continue else: continue return latest_ts
5c5203cf1adc572cf7e9908dcd3c856de7c0f0da
8,533
def get_trending_queries(filename): """Extract trends from a file.""" f = open(filename, 'r') trend_tuples_list = [] for line in f: trend_tuples_list.append(tuple((line.strip()).split(','))) f.close() return trend_tuples_list
6f5828d4bf0092c0a43804ca7ffb9ee4aa67e607
8,534
def get_bio(x, lang='en'): """Get the one-sentence introduction""" bio = x.loc[16][lang] return bio
8c9ddabd2e6ada790af2b85a3fb656291f3ee5bd
8,535
import io def create_tf_example(filename, source_id, encoded_jpeg, annotations, resize=True): """ This function creates a tf.train.Example in object detection api format from a Waymo data frame. args: - filename [str]: name of the original tfrecord file - source_id [str]: original image source id (here: frame context name + camera name + frame index) - encoded_jpeg [bytes]: jpeg encoded image - annotations [protobuf object]: bboxes and classes returns: - tf_example [tf.Train.Example]: tf example in the objection detection api format. """ if not resize: encoded_jpg_io = io.BytesIO(encoded_jpeg) image = Image.open(encoded_jpg_io) width, height = image.size width_factor, height_factor = image.size else: image_tensor = tf.io.decode_jpeg(encoded_jpeg) height_factor, width_factor, _ = image_tensor.shape image_res = tf.cast(tf.image.resize(image_tensor, (640, 640)), tf.uint8) encoded_jpeg = tf.io.encode_jpeg(image_res).numpy() width, height = 640, 640 mapping = {1: 'vehicle', 2: 'pedestrian', 4: 'cyclist'} image_format = b'jpg' xmins = [] xmaxs = [] ymins = [] ymaxs = [] classes_text = [] classes = [] filename = filename.encode('utf8') # convert to bytes in utf8 format source_id = source_id.encode('utf8') # convert to bytes in utf8 format for ann in annotations: xmin, ymin = ann.box.center_x - 0.5 * ann.box.length, ann.box.center_y - 0.5 * ann.box.width xmax, ymax = ann.box.center_x + 0.5 * ann.box.length, ann.box.center_y + 0.5 * ann.box.width xmins.append(xmin / width_factor) xmaxs.append(xmax / width_factor) ymins.append(ymin / height_factor) ymaxs.append(ymax / height_factor) classes.append(ann.type) classes_text.append(mapping[ann.type].encode('utf8')) tf_example = tf.train.Example(features=tf.train.Features(feature={ 'image/height': int64_feature(height), 'image/width': int64_feature(width), 'image/filename': bytes_feature(filename), 'image/source_id': bytes_feature(source_id), 'image/encoded': bytes_feature(encoded_jpeg), 'image/format': bytes_feature(image_format), 'image/object/bbox/xmin': float_list_feature(xmins), 'image/object/bbox/xmax': float_list_feature(xmaxs), 'image/object/bbox/ymin': float_list_feature(ymins), 'image/object/bbox/ymax': float_list_feature(ymaxs), 'image/object/class/text': bytes_list_feature(classes_text), 'image/object/class/label': int64_list_feature(classes), })) return tf_example
b757fc1e4d51fac5722eb170357ea36388d40d5d
8,536
import re def format_oids(oids_parameters): """ Format dictionary OIDs to ``cryptography.x509.oid.NameOID`` object list :param oids_parameters: CA Object Identifiers (OIDs). The are typically seen in X.509 names. Allowed keys/values: ``'country_name': str (two letters)``, ``'locality_name': str``, ``'state_or_province': str``, ``'street_address': str``, ``'organization_name': str``, ``'organization_unit_name': str``, ``'email_address': str``, :type oids_parameters: dict, required :return: ``cryptography.x509.oid.NameOID`` object list :rtype: object ``cryptography.x509.oid.NameOID`` object list """ oids = list() for oid in oids_parameters: if oid in OIDS: current_oid = oids_parameters[oid] if type(current_oid) is not str: raise TypeError(f"'{oid}' must be str") if oid == "country_name": # country name ISO 3166-1 (alfa-2) if not re.match(COUNTRY_REGEX, current_oid): raise OwnCAInvalidOID( f"'{oid}' must be ISO 3166-1 (alfa-2)" ) else: oids.append( x509.NameAttribute(NameOID.COUNTRY_NAME, current_oid) ) elif oid == "locality_name": oids.append( x509.NameAttribute(NameOID.LOCALITY_NAME, current_oid) ) elif oid == "state_or_province": oids.append( x509.NameAttribute( NameOID.STATE_OR_PROVINCE_NAME, current_oid ) ) elif oid == "street_address": oids.append( x509.NameAttribute(NameOID.STREET_ADDRESS, current_oid) ) elif oid == "organization_name": oids.append( x509.NameAttribute(NameOID.ORGANIZATION_NAME, current_oid) ) elif oid == "organization_unit_name": oids.append( x509.NameAttribute( NameOID.ORGANIZATIONAL_UNIT_NAME, current_oid ) ) elif oid == "email_address": oids.append( x509.NameAttribute(NameOID.EMAIL_ADDRESS, current_oid) ) else: raise OwnCAInvalidOID( f"The '{oid}' is Invalid. Allowed OIDs: {', '.join(OIDS)}." ) return oids
08641ffb1c431c13e23f2b9498ce1cb1a896f955
8,537
def Phases(*args): """Number of phases""" # Getter if len(args) == 0: return lib.Generators_Get_Phases() # Setter Value, = args lib.Generators_Set_Phases(Value)
d1610c5b2ab19cf2b3018850fe685bb9fcbc11ad
8,538
import requests def create_channel(logger: Logger, connection: komand.connection, team_id: str, channel_name: str, description: str) -> bool: """ Creates a channel for a given team :param logger: (logging.logger) :param connection: Object (komand.connection) :param team_id: String :param channel_name: String :param description: String :return: boolean """ create_channel_endpoint = f"https://graph.microsoft.com/beta/teams/{team_id}/channels" create_channel_paylaod = { "description": description, "displayName": channel_name } headers = connection.get_headers() logger.info(f"Creating channel with: {create_channel_endpoint}") result = requests.post(create_channel_endpoint, json=create_channel_paylaod, headers=headers) try: result.raise_for_status() except Exception as e: raise PluginException(cause=f"Create channel {channel_name} failed.", assistance=result.text) from e if not result.status_code == 201: raise PluginException(cause=f"Create channel returned an unexpected result.", assistance=result.text) return True
6cdd37a7fdc131433f9f75ba10e523bc719a34aa
8,539
import sys def getMoveValue(board, table, depth, move): """ Sort criteria is as follows. 1. The move from the hash table 2. Captures as above. 3. Killers. 4. History. 5. Moves to the centre. """ # As we only return directly from transposition table if hashf == hashfEXACT # There could be a non hashfEXACT very promising move for us to test if table.isHashMove(depth, move): return sys.maxsize fcord = (move >> 6) & 63 tcord = move & 63 flag = move >> 12 arBoard = board.arBoard fpiece = fcord if flag == DROP else arBoard[fcord] tpiece = arBoard[tcord] if tpiece != EMPTY: if board.variant == ATOMICCHESS: if kingExplode(board, move, board.color): return MATE_VALUE # We add some extra to ensure also bad captures will be searched early if board.variant in ASEAN_VARIANTS: return ASEAN_PIECE_VALUES[tpiece] - PIECE_VALUES[fpiece] + 1000 else: return PIECE_VALUES[tpiece] - PIECE_VALUES[fpiece] + 1000 if flag in PROMOTIONS: if board.variant in ASEAN_VARIANTS: return ASEAN_PIECE_VALUES[flag - 3] - PAWN_VALUE + 1000 else: return PIECE_VALUES[flag - 3] - PAWN_VALUE + 1000 if flag == DROP: return PIECE_VALUES[tpiece] + 1000 killervalue = table.isKiller(depth, move) if killervalue: return 1000 + killervalue # King tropism - a move that brings us nearer to the enemy king, is probably # a good move # opking = board.kings[1-board.color] # score = distance[fpiece][fcord][opking] - distance[fpiece][tcord][opking] if fpiece not in position_values: # That is, fpiece == EMPTY print(fcord, tcord) print(board) if board.variant in ASEAN_VARIANTS: score = 0 else: score = ( position_values[fpiece][board.color][tcord] - position_values[fpiece][board.color][fcord] ) # History heuristic score += table.getButterfly(move) return score
31f530733908c39eace8a2b3d857b2b7c42b47be
8,540
def activate_user(username): """Activate a user account.""" user = annotator.credentials.find_one({'username': username}) if not user['active']: annotator.credentials.update_one(user, {'$set': {'active': True}}) flash("User {0} activated successfully".format(username), 'success') else: flash("User {0} is already active".format(username), 'warning') return redirect(url_for('manage_users'))
58b70edc4a098a7409e1c2e62f9710b3da3c95af
8,541
def query_all(): """Queries all matches in Elasticsearch, to be used further for suggesting product names when a user is not aware of them. """ query_all = { "query": {"match_all": {}}, } return query_all
9d15297cf82d813ff0a0688f5c25e2ca6fa145d3
8,542
def _mesh_homogeneous_cell(cell_vect, mesh_path): """Generate a simple mesh for a homogeneous cell. cell_vect: np.array 2x2 colonnes = vecteurs periodicité """ name = mesh_path.stem geometry.init_geo_tools() geometry.set_gmsh_option("Mesh.MshFileVersion", 4.1) # Mesh.Algorithm = 6; Frontal - Delaunay for 2D meshes geometry.set_gmsh_option("Mesh.Algorithm", 6) geometry.set_gmsh_option("Mesh.MeshSizeMin", 0.05) geometry.set_gmsh_option("Mesh.MeshSizeMax", 0.05) rve = Gmsh2DRVE([], cell_vect, (1, 1), np.zeros(2), [], False, name) rve.mesh_generate() gmsh.model.mesh.renumberNodes() gmsh.model.mesh.renumberElements() gmsh.write(str(mesh_path)) mesh_path = msh_conversion(mesh_path, ".xdmf") geometry.reset() return mesh_path
98c63d7764bcca7baad81de1fe7c3fac16ff6ffd
8,543
async def async_setup_entry(hass, config_entry): """Initialize the sharkiq platform via config entry.""" ayla_api = get_ayla_api( username=config_entry.data[CONF_USERNAME], password=config_entry.data[CONF_PASSWORD], websession=hass.helpers.aiohttp_client.async_get_clientsession(), ) try: if not await async_connect_or_timeout(ayla_api): return False except CannotConnect as exc: raise exceptions.ConfigEntryNotReady from exc shark_vacs = await ayla_api.async_get_devices(False) device_names = ", ".join(d.name for d in shark_vacs) _LOGGER.debug("Found %d Shark IQ device(s): %s", len(shark_vacs), device_names) coordinator = SharkIqUpdateCoordinator(hass, config_entry, ayla_api, shark_vacs) await coordinator.async_config_entry_first_refresh() hass.data.setdefault(DOMAIN, {}) hass.data[DOMAIN][config_entry.entry_id] = coordinator hass.config_entries.async_setup_platforms(config_entry, PLATFORMS) return True
c260e59ba86c72e84bef3f51d21b2500195b1a08
8,544
from typing import Dict from typing import Union from typing import Optional from typing import List from typing import Tuple from typing import cast from typing import Any import json def fetch_incidents(client: Client, max_incidents: int, last_run: Dict[str, Union[Optional[int], Optional[str]]], first_fetch: Optional[int], priority: Optional[str], activity_status: Optional[str], progress_status: Optional[str], business_units: Optional[str], issue_types: Optional[str], tags: Optional[str], cloud_management_status: Optional[str], mirror_direction: Optional[str], sync_tags: Optional[List[str]], fetch_details: Optional[bool] ) -> Tuple[Dict[str, Union[Optional[int], Optional[str]]], List[dict]]: """This function retrieves new alerts every interval (default is 1 minute). This function has to implement the logic of making sure that incidents are fetched only onces and no incidents are missed. By default it's invoked by XSOAR every minute. It will use last_run to save the timestamp of the last incident it processed. If last_run is not provided, it should use the integration parameter first_fetch to determine when to start fetching the first time. Uses "createdAfter" in the Expanse API for timestamp. :return: A tuple containing two elements: next_run (``Dict[str, int]``): Contains the timestamp that will be used in ``last_run`` on the next fetch, and the last issue id. incidents (``List[dict]``): List of incidents that will be created in XSOAR :rtype: ``Tuple[Dict[str, Union[Optional[int], Optional[str]]], List[dict]]`` """ last_fetch = last_run.get('last_fetch') if last_fetch is None: last_fetch = cast(int, first_fetch) else: last_fetch = cast(int, last_fetch) latest_created_time = last_fetch last_issue_id = last_run.get('last_issue_id') latest_issue_id: Optional[str] = None incidents: List[Dict[str, Any]] = [] arg_list = argToList(priority) if arg_list and not all(i in ISSUE_PRIORITY for i in arg_list): raise ValueError(f'priority must include: {", ".join(ISSUE_PRIORITY)}') _priority = ','.join(arg_list) arg_list = argToList(progress_status) if arg_list and not all(i in ISSUE_PROGRESS_STATUS for i in arg_list): raise ValueError(f'progressStatus must include: {", ".join(ISSUE_PROGRESS_STATUS)}') _progress_status = ','.join(arg_list) arg_list = argToList(activity_status) if arg_list and not all(i in ISSUE_ACTIVITY_STATUS for i in arg_list): raise ValueError(f'activityStatus must include: {", ".join(ISSUE_ACTIVITY_STATUS)}') _activity_status = ','.join(arg_list) arg_list = argToList(cloud_management_status) if arg_list and not all(i in CLOUD_MANAGEMENT_STATUS for i in arg_list): raise ValueError(f'cloudManagementStatus must include: {", ".join(CLOUD_MANAGEMENT_STATUS)}') _cloud_management_status = ','.join(arg_list) created_after = timestamp_us_to_datestring_utc(latest_created_time, DATE_FORMAT) r = client.get_issues( limit=max_incidents if not last_issue_id else max_incidents + 1, # workaround to avoid unnecessary API calls priority=_priority, business_units=business_units, progress_status=_progress_status, activity_status=_activity_status, tags=tags, issue_type=issue_types, cloud_management_status=_cloud_management_status, created_after=created_after, sort='created' ) broken = False issues: List = [] skip = cast(str, last_issue_id) for i in r: if skip and not broken: if 'id' not in i or 'created' not in i: continue # fix created time to make sure precision is the same to microsecond with no rounding i['created'] = timestamp_us_to_datestring_utc(datestring_to_timestamp_us(i['created']), DATE_FORMAT) if i['created'] != created_after: issues.append(i) broken = True elif i['id'] == skip: broken = True else: issues.append(i) if len(issues) == max_incidents: break for issue in issues: ml_feature_list: List[str] = [] if 'created' not in issue or 'id' not in issue: continue incident_created_time = datestring_to_timestamp_us(issue.get('created')) if last_fetch: if incident_created_time < last_fetch: continue incident_name = issue.get('headline') if 'headline' in issue else issue.get('id') # Mirroring issue['xsoar_mirroring'] = { 'mirror_direction': mirror_direction, 'mirror_id': issue.get('id'), 'mirror_instance': demisto.integrationInstance(), 'sync_tags': sync_tags } issue['xsoar_severity'] = convert_priority_to_xsoar_severity(issue.get('priority', 'Unknown')) # Handle asset information issue['assets'], ml_feature_list, _ = client.parse_asset_data(issue, fetch_details) # add issue specific information to ml key if ( (provider := issue.get('providers')) and isinstance(provider, list) and 'name' in provider[0] ): ml_feature_list.append(provider[0].get('name')) if ( (latest_evidence := issue.get('latestEvidence')) and isinstance(latest_evidence, dict) ): if ( (geolocation := latest_evidence.get('geolocation')) and isinstance(geolocation, dict) ): for f in ['countryCode', 'city']: if (x := geolocation.get(f)): ml_feature_list.append(x) # dedup, sort and join ml feature list issue['ml_features'] = ' '.join(sorted(list(set(ml_feature_list)))) incident = { 'name': incident_name, 'details': issue.get('helpText'), 'occurred': issue.get('created'), 'rawJSON': json.dumps(issue), 'severity': issue.get('xsoar_severity') } latest_issue_id = issue.get('id') incidents.append(incident) if incident_created_time > latest_created_time: latest_created_time = incident_created_time next_run = { 'last_fetch': latest_created_time, 'last_issue_id': latest_issue_id if latest_issue_id else last_issue_id} return next_run, incidents
e273d69611331c9f2eb5b2c0c9c27e805c9d7e4f
8,545
import collections def extractWordFeatures(x): """ Extract word features for a string x. Words are delimited by whitespace characters only. @param string x: @return dict: feature vector representation of x. Example: "I am what I am" --> {'I': 2, 'am': 2, 'what': 1} """ # BEGIN_YOUR_CODE (our solution is 4 lines of code, but don't worry if you deviate from this) mydict = collections.defaultdict(float) for s in x.split(' '): if s.isalnum() and s[0:4] != "http": mydict[s] += 1 return mydict # END_YOUR_CODE
dd5247dbf7ef69043b200acbefec996107de00f7
8,546
def delete_user(user_id): """ Delete user specified in user ID Note: Always return the appropriate response for the action requested. """ user = mongo_mgr.db.user.find_one({'_id': user_id}) if user: user.deleteOne({'_id': user_id}) result = {'id': user_id} else: result = "No result." return jsonify({'result': result})
b3244aeafcddd6c5be1d209c89ef7ed7969da989
8,547
from operator import and_ import logging def query_attention_one(**kwargs): """ 查询当前用户是否关注指定的物件 :param kwargs: {'user_id': user_id, 'object_id': object_id} :return: 0 or 1 """ session = None try: session = get_session() results = session.query(func.count('*')).filter(and_(Attention.OPEN_ID == kwargs['user_id'], Attention.OBJECT_ID == kwargs['object_id'])).scalar() # 提交即保存到数据库 session.commit() logging.info('OK : attention.py--->query_attention_one(), 成功') return str(results) except Exception as e: logging.critical('Error : attention.py--->query_attention_one() 失败:{}'.format(e)) return RESULT_ERROR finally: session.close()
44db7006eec38c2524fe5a74dba46086c63c79c5
8,548
def _dict_empty_map_helper(values, empty, delim, av_separator, v_delimiter, parser): """ A helper to consolidate logic between singleton and non-singleton mapping. Args: values: The value to parse. empty: The empty representation for this value in CoNLL-U format. delim: The delimiter between components of the value. av_separator: The separator between attribute and value in each component. v_delimiter: The delimiter between values for the same attribute. parser: The parser of the value from the attribute value pair. Returns: An empty dict if the value is empty and otherwise a parsed equivalent. Raises: ParseError: If the dict format was unable to parsed. This error will be raised by the provided parser. """ if values == empty: return {} d = {} for el in values.split(delim): parts = el.split(av_separator, 1) if len(parts) == 1 or (len(parts) == 2 and parts[1] == ''): k = parts[0] v = None elif len(parts) == 2: k, v = parts parsed = parser(v, v_delimiter) d[k] = parsed return d
cb5550eb606beb47f31236b827e78f2a7fc4ba40
8,549
import json def get_full_json(msa, component, sessionkey, pretty=False, human=False): """ Form text in JSON with storage component data. :param msa: MSA DNS name and IP address. :type msa: tuple :param sessionkey: Session key. :type sessionkey: str :param pretty: Print in pretty format :type pretty: int :param component: Name of storage component. :type component: str :param human: Expand result dict keys in human readable format :type: bool :return: JSON with all found data. :rtype: str """ # Forming URL msa_conn = msa[1] if VERIFY_SSL else msa[0] url = '{strg}/api/show/{comp}'.format(strg=msa_conn, comp=component) # Making request to API resp_return_code, resp_description, xml = query_xmlapi(url, sessionkey) if resp_return_code != '0': raise SystemExit('ERROR: {rc} : {rd}'.format(rc=resp_return_code, rd=resp_description)) # Processing XML all_components = {} if component == 'disks': for PROP in xml.findall("./OBJECT[@name='drive']"): # Processing main properties disk_location = PROP.find("./PROPERTY[@name='location']").text disk_health_num = PROP.find("./PROPERTY[@name='health-numeric']").text disk_full_data = { "h": disk_health_num } # Processing advanced properties disk_ext = dict() disk_ext['t'] = PROP.find("./PROPERTY[@name='temperature-numeric']") disk_ext['ts'] = PROP.find("./PROPERTY[@name='temperature-status-numeric']") disk_ext['cj'] = PROP.find("./PROPERTY[@name='job-running-numeric']") disk_ext['poh'] = PROP.find("./PROPERTY[@name='power-on-hours']") for prop, value in disk_ext.items(): if value is not None: disk_full_data[prop] = value.text all_components[disk_location] = disk_full_data elif component == 'vdisks': for PROP in xml.findall("./OBJECT[@name='virtual-disk']"): vdisk_name = PROP.find("./PROPERTY[@name='name']").text vdisk_health_num = PROP.find("./PROPERTY[@name='health-numeric']").text vdisk_status_num = PROP.find("./PROPERTY[@name='status-numeric']").text vdisk_owner_num = PROP.find("./PROPERTY[@name='owner-numeric']").text vdisk_owner_pref_num = PROP.find("./PROPERTY[@name='preferred-owner-numeric']").text vdisk_full_data = { "h": vdisk_health_num, "s": vdisk_status_num, "ow": vdisk_owner_num, "owp": vdisk_owner_pref_num } all_components[vdisk_name] = vdisk_full_data elif component == 'pools': for PROP in xml.findall("./OBJECT[@name='pools']"): pool_sn = PROP.find("./PROPERTY[@name='serial-number']").text pool_health_num = PROP.find("./PROPERTY[@name='health-numeric']").text pool_owner_num = PROP.find("./PROPERTY[@name='owner-numeric']").text pool_owner_pref_num = PROP.find("./PROPERTY[@name='preferred-owner-numeric']").text pool_full_data = { "h": pool_health_num, "ow": pool_owner_num, "owp": pool_owner_pref_num } all_components[pool_sn] = pool_full_data elif component == 'disk-groups': for PROP in xml.findall("./OBJECT[@name='disk-group']"): dg_sn = PROP.find(".PROPERTY[@name='serial-number']").text dg_health_num = PROP.find("./PROPERTY[@name='health-numeric']").text dg_status_num = PROP.find("./PROPERTY[@name='status-numeric']").text dg_owner_num = PROP.find("./PROPERTY[@name='owner-numeric']").text dg_owner_pref_num = PROP.find("./PROPERTY[@name='preferred-owner-numeric']").text dg_curr_job_num = PROP.find("./PROPERTY[@name='current-job-numeric']").text dg_curr_job_pct = PROP.find("./PROPERTY[@name='current-job-completion']").text # current job completion return None if job isn't running, so I'm replacing it with zero if None if dg_curr_job_pct is None: dg_curr_job_pct = '0' dg_full_data = { "h": dg_health_num, "s": dg_status_num, "ow": dg_owner_num, "owp": dg_owner_pref_num, "cj": dg_curr_job_num, "cjp": dg_curr_job_pct.rstrip('%') } all_components[dg_sn] = dg_full_data elif component == 'volumes': for PROP in xml.findall("./OBJECT[@name='volume']"): vol_sn = PROP.find("./PROPERTY[@name='serial-number']").text vol_health_num = PROP.find("./PROPERTY[@name='health-numeric']").text vol_owner_num = PROP.find("./PROPERTY[@name='owner-numeric']").text vol_owner_pref_num = PROP.find("./PROPERTY[@name='preferred-owner-numeric']").text vol_full_data = { "h": vol_health_num, "ow": vol_owner_num, "owp": vol_owner_pref_num } all_components[vol_sn] = vol_full_data elif component == 'controllers': for PROP in xml.findall("./OBJECT[@name='controllers']"): # Processing main controller properties ctrl_id = PROP.find("./PROPERTY[@name='controller-id']").text ctrl_sc_fw = PROP.find("./PROPERTY[@name='sc-fw']").text ctrl_health_num = PROP.find("./PROPERTY[@name='health-numeric']").text ctrl_status_num = PROP.find("./PROPERTY[@name='status-numeric']").text ctrl_rd_status_num = PROP.find("./PROPERTY[@name='redundancy-status-numeric']").text # Get controller statistics url = '{strg}/api/show/{comp}/{ctrl}'.format(strg=msa_conn, comp='controller-statistics', ctrl=ctrl_id) # Making request to API stats_ret_code, stats_descr, stats_xml = query_xmlapi(url, sessionkey) if stats_ret_code != '0': raise SystemExit('ERROR: {} : {}'.format(stats_ret_code, stats_descr)) # TODO: I don't know, is it good solution, but it's one more query to XML API ctrl_cpu_load = stats_xml.find("./OBJECT[@name='controller-statistics']/PROPERTY[@name='cpu-load']").text ctrl_iops = stats_xml.find("./OBJECT[@name='controller-statistics']/PROPERTY[@name='iops']").text # Making full controller dict ctrl_full_data = { "h": ctrl_health_num, "s": ctrl_status_num, "rs": ctrl_rd_status_num, "cpu": ctrl_cpu_load, "io": ctrl_iops, "fw": ctrl_sc_fw } # Processing advanced controller properties ctrl_ext = dict() ctrl_ext['fh'] = PROP.find("./OBJECT[@basetype='compact-flash']/PROPERTY[@name='health-numeric']") ctrl_ext['fs'] = PROP.find("./OBJECT[@basetype='compact-flash']/PROPERTY[@name='status-numeric']") for prop, value in ctrl_ext.items(): if value is not None: ctrl_full_data[prop] = value.text all_components[ctrl_id] = ctrl_full_data elif component == 'enclosures': for PROP in xml.findall("./OBJECT[@name='enclosures']"): # Processing main enclosure properties encl_id = PROP.find("./PROPERTY[@name='enclosure-id']").text encl_health_num = PROP.find("./PROPERTY[@name='health-numeric']").text encl_status_num = PROP.find("./PROPERTY[@name='status-numeric']").text # Making full enclosure dict encl_full_data = { "h": encl_health_num, "s": encl_status_num } all_components[encl_id] = encl_full_data elif component == 'power-supplies': # Getting info about all power supplies for PS in xml.findall("./OBJECT[@name='power-supplies']"): # Processing main power supplies properties ps_id = PS.find("./PROPERTY[@name='durable-id']").text ps_name = PS.find("./PROPERTY[@name='name']").text # Exclude voltage regulators if ps_name.lower().find('voltage regulator') == -1: ps_health_num = PS.find("./PROPERTY[@name='health-numeric']").text ps_status_num = PS.find("./PROPERTY[@name='status-numeric']").text ps_dc12v = PS.find("./PROPERTY[@name='dc12v']").text ps_dc5v = PS.find("./PROPERTY[@name='dc5v']").text ps_dc33v = PS.find("./PROPERTY[@name='dc33v']").text ps_dc12i = PS.find("./PROPERTY[@name='dc12i']").text ps_dc5i = PS.find("./PROPERTY[@name='dc5i']").text ps_full_data = { "h": ps_health_num, "s": ps_status_num, "12v": ps_dc12v, "5v": ps_dc5v, "33v": ps_dc33v, "12i": ps_dc12i, "5i": ps_dc5i } # Processing advanced power supplies properties ps_ext = dict() ps_ext['t'] = PS.find("./PROPERTY[@name='dctemp']") for prop, value in ps_ext.items(): if value is not None: ps_full_data[prop] = value.text all_components[ps_id] = ps_full_data elif component == 'fans': # Getting info about all fans for FAN in xml.findall("./OBJECT[@name='fan-details']"): # Processing main fan properties fan_id = FAN.find(".PROPERTY[@name='durable-id']").text fan_health_num = FAN.find(".PROPERTY[@name='health-numeric']").text fan_status_num = FAN.find(".PROPERTY[@name='status-numeric']").text fan_speed = FAN.find(".PROPERTY[@name='speed']").text fan_full_data = { "h": fan_health_num, "s": fan_status_num, "sp": fan_speed } all_components[fan_id] = fan_full_data elif component == 'ports': for FC in xml.findall("./OBJECT[@name='ports']"): # Processing main ports properties port_name = FC.find("./PROPERTY[@name='port']").text port_health_num = FC.find("./PROPERTY[@name='health-numeric']").text port_full_data = { "h": port_health_num } # Processing advanced ports properties port_ext = dict() port_ext['ps'] = FC.find("./PROPERTY[@name='status-numeric']") for prop, value in port_ext.items(): if value is not None: port_full_data[prop] = value.text # SFP Status # Because of before 1050/2050 API has no numeric property for sfp-status, creating mapping self sfp_status_map = {"Not compatible": '0', "Incorrect protocol": '1', "Not present": '2', "OK": '3'} sfp_status_char = FC.find("./OBJECT[@name='port-details']/PROPERTY[@name='sfp-status']") sfp_status_num = FC.find("./OBJECT[@name='port-details']/PROPERTY[@name='sfp-status-numeric']") if sfp_status_num is not None: port_full_data['ss'] = sfp_status_num.text else: if sfp_status_char is not None: port_full_data['ss'] = sfp_status_map[sfp_status_char.text] all_components[port_name] = port_full_data # Transform dict keys to human readable format if '--human' argument is given if human: all_components = expand_dict(all_components) return json.dumps(all_components, separators=(',', ':'), indent=pretty)
2b02bd3c30ee9986cc25ca2b4fa822dc483e52c6
8,550
def get_restricted_area(path1, path2, restricted_pos1, restricted_pos2, time_step): """Computes the restricted area and the start- and end-time steps for both agents. * start time-step: The first time step where an agent occupies a position within the restricted area. * end time-step: The last time step where an agent occupies a position with the restricted area :param path1: Path (previous solution) from the first agent. :param path2: Path (previous solution) from the second agent. :param restricted_pos1: The first position which agent one would occupy within the restricted area. :param restricted_pos2: The first position which agent two would occupy within the restricted area. :param time_step: The time step where the agents would collide. :return: The positions included within the restricted area, the start time steps for both agents and the end time steps for both agents. """ sub_sequence1 = find_stop_position(path1[:time_step + 2][::-1], restricted_pos1)[::-1] sub_sequence2 = find_stop_position(path2[:time_step + 2][::-1], restricted_pos2) restricted_area = list(dict.fromkeys(sub_sequence1)) + list(dict.fromkeys(sub_sequence2)) # Determine time step where agent enters restricted area fst_enter_r = find_stop_position( list(zip(path1, range(len(path1))))[:time_step + 2], restricted_pos1 )[-1][1] snd_enter_r = find_stop_position( list(zip(path2, range(len(path2))))[:time_step + 2], restricted_pos2 )[-1][1] start_time_steps = [fst_enter_r, snd_enter_r] # Determine how long the agent remains within the restricted area end_time_steps = [] for path, r, enter in [ (path1, restricted_area, fst_enter_r), (path2, restricted_area[::-1], snd_enter_r) ]: path_idx = 0 for idx in range(len(restricted_area)): # Agent might wait in the restricted area because of other constraints while path_idx < len(path[enter:]) \ and path[enter:][path_idx] == path[enter:][path_idx - 1]: path_idx += 1 # The last position of the agent is within the restricted area if path_idx >= len(path[enter:]) - 1: path_idx = len(path[enter:]) break if path[enter:][path_idx] != r[idx]: break path_idx += 1 end_time_steps.append(path_idx) end_time_steps[0] += start_time_steps[0] end_time_steps[1] += start_time_steps[1] return restricted_area, start_time_steps, end_time_steps
39104a44e8d5354799e45feb1ba6371f3423fecc
8,551
import argparse from typing import List import os def execute_config(config_subparser: argparse.ArgumentParser, argv: List[str]) -> int: """ Boolean logic of config subparser triggering. """ args = config_subparser.parse_args(argv[1:]) if args.show_settings: print(settings_msg) return 0 if args.turn_log_on: config['LOG-SETTINGS']['logging_turned_on'] = args.turn_log_on.capitalize() with open(settings_file, 'w') as fp: config.write(fp) log_state = config.getboolean('LOG-SETTINGS', 'logging_turned_on') if log_state: print('Logging is activated.') else: print('Logging is deactivated.') return 0 if args.log_name: old_logger_path = get_logger_path() config['LOG-SETTINGS']['logger_filename'] = args.log_name with open(settings_file, 'w') as fp: config.write(fp) new_logger_path = get_logger_path() os.rename(old_logger_path, new_logger_path) print(f"The new log filename is {config.get('LOG-SETTINGS', 'logger_filename')!r}.",) return 0 if args.log_location: old_logger_path = get_logger_path() log_location = args.log_location if '~' in args.log_location: log_location = os.path.expanduser(args.log_location) if not os.path.isdir(log_location): print(f'The given path {args.log_location!r} is not a valid directory!') return 1 config['LOG-SETTINGS']['logger_location'] = log_location with open(settings_file, 'w') as fp: config.write(fp) new_logger_path = get_logger_path() os.rename(old_logger_path, new_logger_path) print(f"The new log location is {config.get('LOG-SETTINGS', 'logger_location')!r}.",) return 0 if args.set_search_value: if args.set_search_value == ' ': config['VALUE-SETTINGS']['search_value'] = "' '" with open(settings_file, 'w') as fp: config.write(fp) print(f"The new search-value is {config.get('VALUE-SETTINGS', 'search_value')}.",) else: config['VALUE-SETTINGS']['search_value'] = args.set_search_value with open(settings_file, 'w') as fp: config.write(fp) print(f"The new search-value is {config.get('VALUE-SETTINGS', 'search_value')!r}.",) return 0 if args.set_new_value == '': config['VALUE-SETTINGS']['new_value'] = "''" with open(settings_file, 'w') as fp: config.write(fp) print(f"The new 'new-value' is {config.get('VALUE-SETTINGS', 'new_value')}.") return 0 if args.set_new_value: config['VALUE-SETTINGS']['new_value'] = args.set_new_value with open(settings_file, 'w') as fp: config.write(fp) print(f"The new 'new-value' is {config.get('VALUE-SETTINGS', 'new_value')!r}.") return 0 config_subparser.print_help() return 1
591cce029374a5063d101927fae7641e0ce6c422
8,552
def FR_highpass(freq: np.ndarray, hp_freq: float, trans_width: float) -> np.ndarray: """Frequency responce for highpass filter Parameters ---------- ``freq``: np.ndarray frequency array ``hp_freq``: float highpass frequency ``trans_width``: float width of the transition region between bands Returns ------- np.ndarray with values in [0, 1] """ sigma = trans_width / 6. return 1 / (1 + np.exp((hp_freq - freq) / sigma))
a57058a3fdf257ee68efe0c99d668e4f5b4fbf60
8,553
def _rexec(params): """Start a subprocess shell to execute the specified command and return its output. params - a one element list ["/bin/cat /etc/hosts"] """ # check that params is a list if not isinstance(params, list) or len(params) == 0: return "Parameter must be a not empty list" command = params[0] try: subprocess.check_call(command,shell=True) out = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).stdout.read() return '\n' + out.decode() except Exception, e: print e return "{\"msg\":\"Invalid command.\"}"
e8338dc94b177f5d39d5307a88da7aa040a3a7e1
8,554
def _get_compose_template(manifest): """ Build the service entry for each one of the functions in the given context. Each docker-compose entry will depend on the same image and it's just a static definition that gets built from a template. The template is in the artifacts folder. """ artifact = get_artifact('compose-template.yml') def build_section(label): return [ { 'name': name, 'image': _get_docker_image(manifest, sls_section), 'volumes': _get_volumes(manifest, sls_section) } for name, sls_section in manifest.get(label, {}).items() ] # Load the jinja template and build the sls functions and layers. return Template(artifact).render( functions=build_section('functions'), layers=build_section('layers') )
659e28f97c76a386a20c85fadaa3d0bbd6d88a90
8,555
def _ParsePackageNode(package_node): """Parses a <package> node from the dexdump xml output. Returns: A dict in the format: { 'classes': { <class_1>: { 'methods': [<method_1>, <method_2>] }, <class_2>: { 'methods': [<method_1>, <method_2>] }, } } """ classes = {} for child in package_node: if child.tag == 'class': classes[child.attrib['name']] = _ParseClassNode(child) return {'classes': classes}
89eefebb82848acad23a9703b87177f626fbbdf5
8,556
def greet(lang): """This function is for printing a greeting in some selected languages: Spanish, Swedish, and German""" if lang == 'es': return 'Hola' elif lang == 'ge': return 'Hallo' elif lang == 'sv': return 'Halla' else: return 'Hello'
dcbe0fb39e735666b36780ee8d06b457e0a9541e
8,557
def add_hook( show_original=False, show_transformed=False, predictable_names=False, verbose_finder=False, ): """Creates and adds the import hook in sys.meta_path""" callback_params = { "show_original": show_original, "show_transformed": show_transformed, "predictable_names": predictable_names, } hook = import_hook.create_hook( transform_source=transform_source, callback_params=callback_params, hook_name=__name__, verbose_finder=verbose_finder, ) return hook
efda58094ab2bb218dca8babcdbf0a74b97e0cd8
8,558
def correlate_two_dicts(xdict, ydict, subset_keys=None): """Find values with the same key in both dictionary and return two arrays of corresponding values""" x, y, _ = correlate_two_dicts_verbose(xdict, ydict, subset_keys) return x, y
93287a57c7bf4e8cb03384531ffbca9c6d6e7cfc
8,559
def find_gateways(unicast_gateway, session, apic) -> tuple: """Search for ACI Gateways and get configurations""" get_gateway = get_subnets(session, apic) aps = [] epgs = [] l3Outs = [] gateways = [] location, bridge_domain, uni_route, scope, unkwn_uni, tenant, bd_vrf, iplearn = None, "DoesntExist", None, None, None, None, None, None try: # Locate subnet in ACI, get scope, map location for fvSubnet in get_gateway.iter("fvSubnet"): ip = fvSubnet.get("ip") gateways.append(ip) if unicast_gateway in ip: location = fvSubnet.get("dn") scope = fvSubnet.get("scope") break # Find BD, check to see if unicast routing is enable and unknown unicast setting is for fvBD in get_gateway.iter("fvBD"): bds = fvBD.get("name") iplearn = fvBD.get("ipLearning") mtu = fvBD.get("mtu") learn_limit = fvBD.get("limitIpLearnToSubnets") mac = fvBD.get("mac") if location.rfind(bds) != -1: bridge_domain = bds uni_route = fvBD.get("unicastRoute") unkwn_uni = fvBD.get("unkMacUcastAct") # Find vrf associated with BD for fvRsCtx in get_gateway.iter("fvRsCtx"): vrf = fvRsCtx.get("tnFvCtxName") location = fvRsCtx.get("dn") if location.rfind(bridge_domain) != -1: bd_vrf = vrf # Find tenant, ap, and epgs, save to list for fvRtBd in get_gateway.iter("fvRtBd"): dn = fvRtBd.get("dn") if dn.rfind(bridge_domain) != -1: tenant = dn.split("/")[1].strip("tn-") aps.append(dn.split("/")[5].strip("ap-")) epgs.append(dn.split("/")[6].strip("epg-").strip("]")) # Find L3outs, save to list for fvRsBDToOut in get_gateway.iter("fvRsBDToOut"): dn = fvRsBDToOut.get("dn") if dn.rfind(bridge_domain) != -1: l3Outs.append(dn.split("/")[3].strip("rsBDToOut-")) # Find L3outs, save to list for ipLearning in get_gateway.iter("ipLearning"): iplearn = ipLearning.get("ipLearning") except AttributeError: pass # Set variables from conditions if aps: join_aps = ', '.join(aps) else: join_aps = None if epgs: join_epgs = ', '.join(epgs) else: join_epgs = None if l3Outs: join_l3outs = ', '.join(l3Outs) else: join_l3outs = None if not bd_vrf: bd_vrf = None if not unicast_gateway: bridge_domain = 0 # Return to user input return bridge_domain, uni_route, scope, unkwn_uni, tenant, join_aps, join_epgs, join_l3outs, bd_vrf, iplearn, mtu, learn_limit, mac, gateways
7c2f841e9fd3822c03f8b4ea38581bcaba1b60d2
8,560
import torch def hamming_dist(y_true, y_pred): """ Calculate the Hamming distance between a given predicted label and the true label. Assumes inputs are torch Variables! Args: y_true (autograd.Variable): The true label y_pred (autograd.Variable): The predicted label Returns: (float): The Hamming distance between the two vectors """ # Make sure y_pred is rounded to 0/1 y_pred = torch.round(y_pred) result = torch.mean(torch.abs(y_true - y_pred), dim=1) result = torch.mean(result, dim=0) return float(result.data.cpu().numpy())
0edda102820626b824861ac0f05d4d77f5def432
8,561
def task_mo(): """Create bynary wheel distribution""" return { "actions": [ """pybabel compile -D todo -i frontend/po/eng/LC_MESSAGES/todo.po -o frontend/po/eng/LC_MESSAGES/todo.mo""" ], "file_dep": ["frontend/po/eng/LC_MESSAGES/todo.po"], "targets": ["frontend/po/eng/LC_MESSAGES/todo.mo"], }
e6403c08973b9c4bdb20de954236fc2df8a4d2f5
8,562
from typing import Tuple def tuple_action_to_int( action: Tuple[int, int], slot_based: bool, end_trial_action: bool ) -> int: """Converts tuple action to integer.""" stone, potion = action num_special_actions = 2 if end_trial_action else 1 if stone < 0: return stone + num_special_actions if slot_based: potions_and_cauldron = MAX_POTIONS + 1 else: potions_and_cauldron = PerceivedPotion.num_types + 1 return stone * potions_and_cauldron + potion + 1 + num_special_actions
d1f616706910822670b0d14d6a19f3f9dbddf145
8,563
from datetime import datetime def PDM(signal=50, angle=0, n_points=1000, motion_slow=0, motion_size=75, box_size=8, point_size=0.05, point_speed=1, ITI=1000): """ Pattern Detection in Motion """ angle_rad = np.radians(angle) y_movement = np.sin(np.radians(angle))*point_speed x_movement = np.cos(np.radians(angle))*point_speed random_rad_angle = np.random.uniform(0, 360, int(n_points*(100-signal)/100)) random_y_movement = np.sin(np.radians(random_rad_angle))*point_speed random_x_movement = np.cos(np.radians(random_rad_angle))*point_speed # Generate points circle_r = n.Coordinates.to_pygame(distance_x=box_size/2) circle_x = n.Coordinates.to_pygame(x=0) circle_y = n.Coordinates.to_pygame(y=0) signal_x = [] signal_y = [] random_x = [] random_y = [] for point in range(int(n_points*signal/100)): alpha = 2 * np.pi * np.random.random() r = circle_r * np.random.random() x = r * np.cos(alpha) + circle_x y = r * np.sin(alpha) + circle_y signal_x.append(x) signal_y.append(y) for point in range(int(n_points*(100-signal)/100)): alpha = 2 * np.pi * np.random.random() r = circle_r * np.random.random() x = r * np.cos(alpha) + circle_x y = r * np.sin(alpha) + circle_y random_x.append(x) random_y.append(y) signal_x = np.array(signal_x) signal_y = np.array(signal_y) random_x = np.array(random_x) random_y = np.array(random_y) # Mask box_size = n.Coordinates.to_pygame(distance_y = box_size) x = n.screen_width/2-box_size/2 y = (n.screen_height-box_size)/2 # Preparation n.newpage("black", auto_refresh=False) # n.newpage("grey", auto_refresh=False) pygame.draw.circle(n.screen, n.color("grey"), (int(n.screen_width/2), int(n.screen_height/2)), int(abs(box_size)/2), 0) n.write("+", color="white", size=1.5) n.refresh() n.time.wait(ITI) # Movement time_start = datetime.datetime.now() for i in range(motion_size): n.newpage("black", auto_refresh=False) # n.newpage("grey", auto_refresh=False) pygame.draw.circle(n.screen, n.color("grey"), (int(n.screen_width/2), int(n.screen_height/2)), int(abs(box_size)/2), 0) for point in range(len(signal_x)): pygame.draw.circle(n.screen, n.color("black"), (int(signal_x[point]), int(signal_y[point])), 3, 0) # n.circle(x=half1_x[point], y=half1_y[point], size=point_size, fill_color="black") for point in range(len(random_x)): pygame.draw.circle(n.screen, n.color("black"), (int(random_x[point]), int(random_y[point])), 3, 0) # n.circle(x=half2_x[point], y=half2_y[point], size=point_size, fill_color="black") signal_x += x_movement signal_y -= y_movement random_x -= random_x_movement random_y += random_y_movement # TODO: ensure that points stay in the mask area (and transport them from one side to another if needed) n.refresh() if motion_slow > 0: n.time.wait(motion_slow) # Save duration = datetime.datetime.now()-time_start parameters = {"Angle": angle, "Angle_Radian": angle_rad, "Signal": signal, "n_Points": n_points, "Box_Size": box_size, "Motion_Size": motion_size, "Point_Size": point_size, "Point_Speed": point_speed, "Mask_Corrdinates": (int(n.screen_width/2), int(n.screen_height/2)), "Mask_Size": int(abs(box_size)/2), "ITI": ITI, "Movement_Duration": duration} return(parameters)
ea94312477326c7d08a44eeeeba3a39a7adc147b
8,564
def warpImage(imIn, pointsIn, pointsOut, delaunayTri): """ 变换图像 参数: =========== imIn:输出图像 pointsIn:输入点 pointsOut:输出点: delaunayTri:三角形 返回值: ============ imgOut:变形之后的图像 """ pass h, w, ch = imIn.shape imOut = np.zeros(imIn.shape, dtype=imIn.dtype) for j in range(0, len(delaunayTri)): tin = [] tout = [] for k in range(0, 3): pIn = pointsIn[delaunayTri[j][k]] pIn = constrainPoint(pIn, w, h) pOut = pointsOut[delaunayTri[j][k]] pOut = constrainPoint(pOut, w, h) tin.append(pIn) tout.append(pOut) warpTriangle(imIn, imOut, tin, tout) return imOut
f672cf4e6cad968c6f42747f128b436e9b00c466
8,565
import re def rmchars(value): """Remove special characters from alphanumeric values except for period (.) and negative (-) characters. :param value: Alphanumeric value :type value: string :returns: Alphanumeric value stripped of any special characters :rtype: string >>> import utils >>> utils.rmchars(value = "*6.5_") '6.5' >>> utils.rmchars(value = "ICE") 'ICE' >>> utils.rmchars(value = "-4.2") '-4.2' >>> utils.rmchars(value = "%&!@#8.32&#*;") '8.32' """ value = re.sub("[^A-Za-z0-9.-]+", "", value) return value
63428103f7da4184c6d9f33a9d05b02ce17f2448
8,566
def ema(x): """ [Definition] 以period为周期的指数加权移动平均线 [Category] 技术指标 """ return 'ema(%s,%s)' %(x, pe.gen_param('ema', 'period'))
d5490340520f57c9083ae82d6fd1cadd2fc92208
8,567
from typing import Set def tokenized(phrase: str) -> Set[str]: """Split a phrase into tokens and remove stopwords.""" return set(normalize(phrase).split()) - STOPWORDS
3a01f5ea316de0f5b27506d1ff7f2358273616a2
8,568
def synthesize(pipeline_in, net, dev, res_alloc, output_dir, prefix="", override_ibits=0): """ Create an FPGA accelerator given a QNN and compute resource allocator. Returns an ExternalExecutionLayer wrapping the compiled simulation executable. pipeline_in : list of input layers res_alloc : function that takes in a pipeline and returns PE/SIMD annotated copy output_dir : where the generated code will be placed prefix : prefix for the generated files (unused) """ # before applying any transforms, pick up pipeline input precision # unless it is specified as override if override_ibits != 0: pipeline_ibits = override_ibits else: pipeline_ibits = pipeline_in[0].ibits # turn pipeline into a form synthesizable by the FPGA backend pipeline = convert(pipeline_in, net, dev, res_alloc, pipeline_ibits) # create output dir if it does not exist if not os.path.exists(output_dir): os.makedirs(output_dir) # collect parameters (side effect: file generation, no return values) map(lambda x: x.codegen_params(output_dir), pipeline) # collect globals (include statements etc.) glob = map(lambda x: x.codegen_globals(), pipeline) glob = "".join(i for i in glob) glob = indent(glob, 0) # collect variable declarations and other preparation decls = map(lambda x: x.codegen_declarations(), pipeline) decls = "".join(i for i in decls) decls = indent(decls, 1) # collect architecture instantiation code arch = map(lambda x: x.codegen_architecture(), pipeline) arch = "".join(i for i in arch) arch = indent(arch, 1) # get input/output stream declarations instream_decl = pipeline[0].getInStreamDecl() outstream_decl = pipeline[-1].getOutStreamDecl() # generate code for single i/o (useful for simulation) singleiodecls = "\n" + instream_decl.replace("&","") + ";" singleiodecls += "\n" + outstream_decl.replace("&","") + ";" singleiodecls = indent(singleiodecls, 1) single2instream = pipeline[0].codegen_single2instream("singleInStrm", "inStream") single2instream = indent(single2instream, 1) outstream2single = pipeline[-1].codegen_outstream2single("outStream", "singleOutStrm") outstream2single = indent(outstream2single, 1) memresources = determine_memory_resources(pipeline) memresources = indent(memresources,0) numInElems = pipeline[0].getNumInputElems() numOutElems = pipeline[-1].getNumOutputElems() # put generated text into template ret = docompute_template ret = ret.replace("$MEMRESOURCES$", memresources) ret = ret.replace("$GLOBALS$", glob) ret = ret.replace("$INSTREAM$", instream_decl) ret = ret.replace("$OUTSTREAM$", outstream_decl) ret = ret.replace("$DECLS$", decls) ret = ret.replace("$ARCH$", arch) # emit code with open(output_dir + "/docompute.cpp", "w") as f: f.write(ret) # emit wrapper ret = wrapper_template ret = ret.replace("$INSTREAM$", instream_decl) ret = ret.replace("$OUTSTREAM$", outstream_decl) ret = ret.replace("$SINGLEIODECLS$", singleiodecls) ret = ret.replace("$SINGLE2INSTREAM$", single2instream) ret = ret.replace("$OUTSTREAM2SINGLE$", outstream2single) ret = ret.replace("$IN_ELEMS$", str(numInElems)) ret = ret.replace("$OUT_ELEMS$", str(numOutElems)) with open(output_dir + "/wrapper.h", "w") as f: f.write(ret) # emit and run compile script for simulation sim_compile_script = sim_compile_script_template sim_compile_script = sim_compile_script.replace("$GENSRCDIR$", output_dir) script_fn = output_dir + "/simcompile.sh" with open(script_fn, "w") as f: f.write(sim_compile_script) # emit script for on-device emu with MLBP mlbp_script = ondevice_compile_script_template mlbp_script = mlbp_script.replace("$GENSRCDIR$", output_dir) script_fn = output_dir + "/mlbpcompile.sh" with open(script_fn, "w") as f: f.write(mlbp_script) # emit script for HLS synthesis hls_script = Template(open(finnroot + "/backend/fpga/scripts/hls-syn-template.tcl").read()) # TODO part and clkperiod should come from selected device hls_script = hls_script.substitute({ "config_proj_name" : "hls_syn", "config_hwsrcdir" : output_dir, "config_bnnlibdir" : finnroot + "/backend/fpga/hls", "config_proj_part" : dev.part, "config_clkperiod" : float(1000/dev.frequency), "config_toplevelfxn" : "BlackBoxJam" }) with open(output_dir + "/hls_syn.tcl", "w") as f: f.write(hls_script) # emit script for Verilator emu compilation after synthesis shutil.copy2(finnroot + "/backend/fpga/scripts/hwemu.sh", output_dir+"/hwemu.sh") # emit BNN-PYNQ bitfile and standalone executable scripts shutil.copy2(finnroot + "/backend/fpga/scripts/make_pynq_standalone_exe.sh", output_dir+"/make_pynq_standalone_exe.sh") shutil.copy2(finnroot + "/backend/fpga/scripts/make_pynq_bitfile.sh", output_dir+"/make_pynq_bitfile.sh") print "Outputting to: ", output_dir ret = backend_util.FPGABackendProduct(output_dir, pipeline, dev) return ret
6ff887a9d5698ec82c3f15b26c5742bc2f36e56d
8,569
async def server_error(request, exc): """ Return an HTTP 500 page. """ template = '500.html' context = {'request': request} return templates.TemplateResponse(template, context, status_code=500)
a11be57885b0f0f9107b190bafdebc6f13908f84
8,570
def return_post(): """" Returns the post-processing plugins. :param: None :return: POST_PROCESSING_PLUGINS """ return POST_PROCESSING_PLUGINS
9c7469f8ec336217abdfdb46db8a0c511789a4bf
8,571
import typing import os def redis_uri() -> typing.Optional[str]: """Connection URI for Redis server.""" value = os.environ.get("REDIS_URI") if not value: log.warning('Optional environment variable "REDIS_URI" is missing') return value
2b66db79232ce9f203bb4963b284af2b8878be6a
8,572
import base64 def numpy_to_b64str(img): """ Converts a numpy array into a base 64 string Args: img (np.array): Returns: str: base 64 representation of the numpy array/image. """ img = img[..., ::-1] # flip for cv conversion _, img = cv2.imencode('.jpg', img) # strips header image_base64 = base64.b64encode(img) base64_string = image_base64.decode('utf-8') # convert to string return base64_string
a6af378a26dd3adac08568f49a5d8d74954feddc
8,573
def lennard_jones(r, epsilon, sigma, index=(12, 6)): """ General pair potential resembling a Lennard Jones model. Default indexes values are for a typical LJ potential, also called 12-6 potential. Parameters ---------- r : float or np.ndarray Distance between interacting particles. It can be a float or a numpy arrays containing a set of particle-particle distances. epsilon : float Dispersion energy, i.e. depth of the potential well. sigma : float Distance at which the potential energy is zero. index : tuple, optional Power indexes for repulsive and attractive terms. The default is (12, 6). Returns ------- float or np.ndarray Potential energies at the corresponding distances. """ sig_r = sigma/r return 4*epsilon*(m.pow(sig_r, index[0]) - m.pow(sig_r, index[1]))
c16856d1960f1b2542305e4048d8e9fe5e866210
8,574
def get_unique_name(x, mult=0, extra=''): """ Returns a unique key composed of inchikey and multiplicity >>> mol = get_mol('[O][O]') >>> get_unique_name(mol) 'MYMOFIZGZYHOMD-UHFFFAOYSA-N3' """ mol = get_mol(x, make3D=True) if mult == 0: mult = mol.spin return mol.write("inchikey").strip() + str(mult) + extra
a9a58078fb2af1c0542dcf77f522154dd2c3a374
8,575
def get_individual_user(user_id: int) -> JSONResponse: """ Lists all information belonging to one user. :param user_id: the id of the user :return: status code and response data """ user = _get_db()["users"].find_one({"user_id": user_id}) return JSONResponse(status_code=status.HTTP_200_OK, content=dumps(user))
dfa8d5cdfa8dd8363c550c79d18924a0b5a5764b
8,576
from typing import Union from typing import List from typing import Optional from typing import Tuple def portfolio_averages( df: pd.DataFrame, groupvar: str, avgvars: Union[str, List[str]], ngroups: int = 10, byvars: Optional[Union[str, List[str]]] = None, cutdf: pd.DataFrame = None, wtvar: Optional[str] = None, count: Union[str, bool] = False, portvar: str = "portfolio", avgonly: bool = False, ) -> Union[pd.DataFrame, Tuple[pd.DataFrame, pd.DataFrame]]: """ Creates portfolios and calculates equal- and value-weighted averages of variables within portfolios. If ngroups=10, then will form 10 portfolios, with portfolio 1 having the bottom 10 percentile of groupvar, and portfolio 10 having the top 10 percentile of groupvar. :Notes: Resets index and drops in output data, so don't use if index is important (input data not affected) :param df: input data :param groupvar: name of variable in df to form portfolios on :param avgvars: variables to be averaged :param ngroups: number of portfolios to form :param byvars: name of variable(s) in df, finds portfolios within byvars. For example if byvars='Month', would take each month and form portfolios based on the percentiles of the groupvar during only that month :param cutdf: optionally determine percentiles using another dataset :param wtvar: name of variable in df to use for weighting in weighted average :param count: pass variable name to get count of non-missing of that variable within groups. :param portvar: name of portfolio variable in the output dataset :param avgonly: True to return only averages, False to return (averages, individual observations with portfolios) :return: """ ports = portfolio( df, groupvar, ngroups=ngroups, byvars=byvars, cutdf=cutdf, portvar=portvar ) if byvars: assert isinstance(byvars, (str, list)) if isinstance(byvars, str): byvars = [byvars] by = [portvar] + byvars avgs = averages(ports, avgvars, byvars=by, wtvar=wtvar, count=count) else: avgs = averages(ports, avgvars, byvars=portvar, wtvar=wtvar, count=count) if avgonly: return avgs else: return avgs, ports
23c902aafd341a7bbd8e6fc8b005e3cdb5a10f82
8,577
import logging def TestQuery(): """Runs a test query against the measurement-lab BigQuery database. Returns: (string) The query results formatted as an HTML page. """ # Certify BigQuery access credentials. credentials = AppAssertionCredentials( scope='https://www.googleapis.com/auth/bigquery') http = credentials.authorize(httplib2.Http(memcache)) service = build('bigquery', 'v2', http=http) job_runner = service.jobs() # Run a query against the BigQuery database. logging.debug('Query: %s' % TEST_QUERY) jobdata = {'configuration': {'query': {'query': TEST_QUERY}}} insert = job_runner.insert(projectId=PROJECT_ID, body=jobdata).execute() logging.debug('Response: %s' % insert) currentRow = 0 queryReply = job_runner.getQueryResults( projectId=PROJECT_ID, jobId=insert['jobReference']['jobId'], startIndex=currentRow).execute() results = queryReply while 'rows' in queryReply and currentRow < queryReply['totalRows'] : currentRow += len(queryReply['rows']) queryReply = job_runner.getQueryResults( projectId=PROJECT_ID, jobId=queryReply['jobReference']['jobId'], startIndex=currentRow).execute() if 'schema' not in results or 'fields' not in results['schema']: if 'schema' in queryReply and 'fields' in queryReply['schema']: results['schema'] = queryReply['schema'] if 'rows' in queryReply: results['rows'].extend(queryReply['rows']) # Format the results as an HTML page. body = '<h2>The Query</h2><pre>%s</pre>\n<hr>\n' % TEST_QUERY tablerows = '<tr>' for field in results['schema']['fields']: tablerows += '<th>%s</th>' % field['name'] for row in results['rows']: tablerows += '</tr><tr>' for value in row['f']: tablerows += '<td>%s</td>' % value['v'] tablerows += '</tr>' body += '<table border=1>\n%s\n</table>\n' % tablerows return '<!DOCTYPE html><html><body>%s</body></html>' % body
fa278ab9a92990aa9f97d0db8bddaf89c5ee974a
8,578
def get_zero_crossing_rate(y, get_mean=True): """ Compute the Zero Crossing Rate (ZCR) :param y: np.ndarray [shape=(n,)] Sampling rate of y :param get_mean: bool Whether to instead return the mean of ZCR over all frames :return: np.ndarray [shape=(1,t)] or float ZCR for each frame, or the mean ZCR """ zcrs = librosa.feature.zero_crossing_rate(y=y) if get_mean: return zcrs.mean() else: return zcrs
782cd302acc69065d26837e45fb882714fa6b927
8,579
import argparse def parse_arguments(): """ Parse the command line arguments of the program. """ parser = argparse.ArgumentParser(description='Train or test the CRNN model.') parser.add_argument( "--train", action="store_true", help="Define if we train the model" ) parser.add_argument( "--test", action="store_true", help="Define if we test the model" ) parser.add_argument( "-ttr", "--train_test_ratio", type=float, nargs="?", help="How the data will be split between training and testing", default=0.70 ) parser.add_argument( "-m", "--model_path", type=str, nargs="?", help="The path where the pretrained model can be found or where the model will be saved", required=True ) parser.add_argument( "-ex", "--examples_path", type=str, nargs="?", help="The path to the file containing the examples (training samples)", required=True ) parser.add_argument( "-bs", "--batch_size", type=int, nargs="?", help="Size of a batch", default=64 ) parser.add_argument( "-it", "--iteration_count", type=int, nargs="?", help="How many iteration in training", default=10 ) parser.add_argument( "-miw", "--max_image_width", type=int, nargs="?", help="Maximum width of an example before truncating", default=2000 ) parser.add_argument( "-mtl", "--max_text_length", type=int, nargs="?", help="Max text length in character", default=200 ) return parser.parse_args()
17988797aacdb4608860b640e89191d71a2c98b0
8,580
import math def UF9(x): """ adapted from https://github.com/Project-Platypus/Platypus/blob/master/platypus/problems.py """ nvars = len(x) count1 = 0 count2 = 0 count3 = 0 sum1 = 0.0 sum2 = 0.0 sum3 = 0.0 E = 0.1 for j in range(3, nvars+1): yj = x[j-1] - 2.0*x[1]*math.sin(2.0*math.pi*x[0] + j*math.pi/nvars) if j % 3 == 1: sum1 += yj**2 count1 += 1 elif j % 3 == 2: sum2 += yj**2 count2 += 1 else: sum3 += yj**2 count3 += 1 yj = (1.0 + E) * (1.0 - 4.0*(2.0*x[0] - 1.0)**2) yj = max(yj, 0.0) f1 = 0.5*(yj + 2.0*x[0])*x[1] + 2.0*sum1/count1 f2 = 0.5*(yj - 2.0*x[0] + 2.0)*x[1] + 2.0*sum2/count2 f3 = 1.0 - x[1] + 2.0*sum3/count3 return np.array([f1, f2, f3])
577b36653517e09cef764528920773ea51c5ed60
8,581
import shlex import os import subprocess import traceback import sys def run_command(cmd_str, stdin=None, stdout_devnull=False): """ run command """ cmd = shlex.split(cmd_str) try: if stdout_devnull: # for pg_ctl command with open(os.devnull, 'w') as devnull: res = subprocess.run(cmd, stdout=devnull) else: res = subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=stdin) except subprocess.CalledProcessError as e: logger.critical(traceback.format_exc()) logger.info('Command: {} '.format(cmd_str)) logger.info('Stdout: {}'.format(e.stdout.decode("utf8"))) logger.info('Stderr: {}'.format(e.stderr.decode("utf8"))) sys.exit(1) return res
0f59d8020012b69d70183592e6f7a585220e0c0b
8,582
def antiderivate(values, ax_val, index, Nper, is_aper, is_phys, is_freqs): """Returns the anti-derivate of values along given axis values is assumed to be periodic and axis is assumed to be a linspace Parameters ---------- values: ndarray array to derivate ax_val: ndarray axis values index: int index of axis along which to derivate Nper: int number of periods to replicate is_aper: bool True if values is anti-periodic along axis is_phys: bool True if physical quantity (time/angle/z) is_freqs: bool True if frequency axis Returns ------- values: ndarray anti-derivate of values """ if is_freqs: dim_array = np.ones((1, values.ndim), int).ravel() dim_array[index] = -1 axis_reshaped = ax_val.reshape(dim_array) values = values / (axis_reshaped * 2 * 1j * np.pi) elif is_phys: if ax_val.size > 1: # Swap axis to always have integration axis on 1st position values = np.swapaxes(values, index, 0) if Nper is None: # Taking input values values_full = values ax_full = ax_val else: # Add last point to axis ax_full = np.concatenate( ( ax_val, np.array([ax_val[-1] + ax_val[1] - ax_val[0]]), ) ) # Get values on a full (anti-)period shape = list(values.shape) shape[0] = shape[0] + 1 values_full = np.zeros(shape, dtype=values.dtype) values_full[:-1, ...] = values # Add first sample at the end of values to integrate on last interval # Last value is the same as (respectively the opposite of) the first value # in case of periodicity (respectively anti-periodicity) values_full[-1, ...] = (-1) ** int(is_aper) * values[0, ...] # Anti-derivate along axis values = np.roll( scp_int.cumulative_trapezoid(values_full, x=ax_full, axis=0), shift=1, axis=0, ) # Integration constant is given by removing average value values = values - np.mean(values, axis=0) # Get N first values and swap axes back to origin values = np.swapaxes(values, 0, index) else: raise Exception("Cannot anti-derivate along axis if axis size is 1") else: raise AxisError("Derivation only available for time/angle/z/freqs") return values
9280187e907e16f1b2b00a1e86acd43538adcbe4
8,583
def renumber_labels(label_img): """ Re-number nuclei in a labeled image so the nuclei numbers are unique and consecutive. """ new_label = 0 for old_label in np.unique(label_img): if not old_label == new_label: label_img[label_img == old_label] = new_label new_label += 1 return label_img
4a37f151ba5a4e3066ce3656903b587f38deafea
8,584
from typing import List from typing import Optional from typing import Any from pathlib import Path import os async def densenet_xgboost_action_localization( files: List[UploadFile] = File(...), weights_densenet: Optional[str] = "denseXgB_model_mylayer", weights_xgboost: Optional[str] = "recognition_xgboost_prev_frames", classNames: Optional[str] = "classes", save_upload_to_file: bool = False, ) -> Any: """ Get densenet_xgboost action localization result for the video file. """ # Obtain the model paths model_path_densenet = Path(f"model_weights/densenet_xgboost/densenet/{weights_densenet}.h5") model_path_xgboost = Path(f"model_weights/densenet_xgboost/xgboost/{weights_xgboost}.joblib") model_path_classes = Path(f"model_weights/densenet_xgboost/classes/{classNames}.txt") if ( not os.path.isfile(model_path_densenet) or not os.path.isfile(model_path_xgboost) or not os.path.isfile(model_path_classes) ): raise HTTPException( status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, detail="Model weights not available." ) for file in files: try: # Obtain the video path if file.content_type in ['video/mp4']: if save_upload_to_file: video_path = Path(f'uploads/video/{file.filename}') video_path.parent.mkdir(parents=True, exist_ok=True) deps.save_upload_file(upload_file=file, destination=video_path) else: video_path = deps.save_upload_file_tmp(upload_file=file) else: raise HTTPException( status_code=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE, detail="Please upload only .mp4 files." ) model = DenseNet_XGBoost( input_video_path=video_path, model_path_densenet=model_path_densenet, model_path_xgboost=model_path_xgboost, model_path_classes=model_path_classes, video_name=file.filename, ) save_path = model.predict() print(video_path) finally: if not save_upload_to_file: Path.unlink(video_path) # Delete the temp file return { "model_weights_rgb": weights_densenet, "model_weights_of": weights_xgboost, "classNames": classNames, "results_path": save_path }
fafa9cbfa8cde1a6b5b2723c59897dd76b162971
8,585
import logging def callback(): """ Extract the OAuth code from the callback and exchange it for an access token. """ smart_client = _get_smart() try: smart_client.handle_callback(request.url) except Exception as e: return """<h1>Authorization Error</h1><p>{}</p><p><a href="/logout">Start over</a></p>""".format(e) logging.debug("Got an access token, returning home") return redirect('/')
3b54c66c726101c8b10ad85c4c5e211bb8a0ffc3
8,586
def __virtual__(): """ Return virtual name of the module. :return: The virtual name of the module. """ return __virtualname__
3f1a19fab2561ae1fb464d76a13e7a0b75af5c93
8,587
def getsamplev3(qcode): """Get a sample object of a given identifier in API V3 style Returns: A sample (v3) object """ scrit = SampleSearchCriteria() scrit.withCode().thatEquals(qcode) fetch_opt = SampleFetchOptions() fetch_opt.withProperties() fetch_opt.withSpace() result = api.searchSamples(sessionToken, scrit, fetch_opt) samples = [] for sample in result.getObjects(): samples.append(sample) if len(samples) > 1: raise mtbutils.MTBdropboxerror('More than one sample found with identifier {}'.format(qcode)) return samples[0]
513de42ffd13f6b9abe74753e568e8db2fa473e3
8,588
def k892_distribution(mass): """Calculate normalized relativistic Breit-Wigner distribution value for K(892) at given mass""" if k892_distribution.norm is None: k892_distribution.norm = _norm(_k892_distribution_unnormalized) return _k892_distribution_unnormalized(mass) / k892_distribution.norm
38175808a7f9acf178604bf64935f0beeb3f7631
8,589
def ProcessMoleculesUsingSingleProcess(Mols, PAINSPatternMols, Writer, WriterFiltered): """Process and filter molecules using a single process.""" NegateMatch = OptionsInfo["NegateMatch"] OutfileFilteredMode = OptionsInfo["OutfileFilteredMode"] Compute2DCoords = OptionsInfo["OutfileParams"]["Compute2DCoords"] SetSMILESMolProps = OptionsInfo["OutfileParams"]["SetSMILESMolProps"] MiscUtil.PrintInfo("\nFiltering molecules...") (MolCount, ValidMolCount, RemainingMolCount) = [0] * 3 FirstMol = True for Mol in Mols: MolCount += 1 if Mol is None: continue if RDKitUtil.IsMolEmpty(Mol): MolName = RDKitUtil.GetMolName(Mol, MolCount) MiscUtil.PrintWarning("Ignoring empty molecule: %s" % MolName) continue ValidMolCount += 1 if FirstMol: FirstMol = False if SetSMILESMolProps: if Writer is not None: RDKitUtil.SetWriterMolProps(Writer, Mol) if WriterFiltered is not None: RDKitUtil.SetWriterMolProps(WriterFiltered, Mol) MolMatched = DoesMoleculeContainsPAINSPattern(Mol, PAINSPatternMols) if MolMatched == NegateMatch: RemainingMolCount += 1 WriteMolecule(Writer, Mol, Compute2DCoords) else: if OutfileFilteredMode: WriteMolecule(WriterFiltered, Mol, Compute2DCoords) return (MolCount, ValidMolCount, RemainingMolCount)
fe81953ce311724005c27ea309aa238578c4fd1c
8,590
def UDiv(a: BitVec, b: BitVec) -> BitVec: """Create an unsigned division expression. :param a: :param b: :return: """ return _arithmetic_helper(a, b, z3.UDiv)
fb3e300a96afdbf17fa7e6fff02379790b2dfd02
8,591
def _pressure_level_widths(tro3_cube, ps_cube, top_limit=0.0): """Create a cube with pressure level widths. This is done by taking a 2D surface pressure field as lower bound. Parameters ---------- tro3_cube : iris.cube.Cube `Cube` containing `mole_fraction_of_ozone_in_air`. ps_cube : iris.cube.Cube `Cube` containing `surface_air_pressure`. top_limit : float Pressure in Pa. Returns ------- iris.cube.Cube `Cube` of same shape as `tro3_cube` containing pressure level widths. """ pressure_array = _create_pressure_array(tro3_cube, ps_cube, top_limit) data = _apply_pressure_level_widths(pressure_array) p_level_widths_cube = tro3_cube.copy(data=data) p_level_widths_cube.rename('pressure level widths') p_level_widths_cube.units = ps_cube.units return p_level_widths_cube
53dd14f6e0b1fda249ecd10d0ad30cfb4e076d5a
8,592
def load_model_configurations(sender): """ Iterates through setting MODELS_CRUD_EVENT searching for the sender model configurations. :param sender: Django Model :return dict """ for model_config in settings.MODELS_CRUD_EVENT: model = model_config['model'] app, model = model.rsplit('.', 1) model = apps.get_app_config(app).get_model(model) if sender == model: return model_config return None
e32d441de47f9bb1a78f93854e1c0436819c148b
8,593
from typing import Optional def get_user_by_private_or_public_nickname(nickname: str) -> Optional[User]: """ Gets the user by his (public) nickname, based on the option, whether his nickname is public or not :param nickname: Nickname of the user :return: Current user or None """ user: User = get_user_by_case_insensitive_nickname(nickname) public_user: User = get_user_by_case_insensitive_public_nickname(nickname) if not user or not public_user: return None settings: Settings = user.settings if not settings: return None if settings.should_show_public_nickname and user: return user elif not settings.should_show_public_nickname and public_user: return public_user return None
1dc43337c8e1372a32ed471ef8285544107cd22b
8,594
def expose(window, context, name, monitor): """REST HTTP/HTTPS API to view tuples from a window on a stream. Embeds a Jetty web server to provide HTTP REST access to the collection of tuples in `window` at the time of the last eviction for tumbling windows, or last trigger for sliding windows. Example with a sliding window:: import streamsx.endpoint as endpoint s = topo.source([{'a': 'Hello'}, {'a': 'World'}, {'a': '!'}]).as_json() endpoint.expose(window=s.last(3).trigger(1), context='sample', name='view', monitor='endpoint-out') The URL containing "**context**/**name**" for the sample above ends with: ``/sample/view/tuples`` **URL mapping** The URL contains the following parts: ``https://<base-url>/<prefix>/<context>/<name>/<postfix>`` For a web-server in a job its URLs are exposed with **prefix** path: * jobname/ - When a job name was explictly set. Job names should be simple mapping to a single path element. * streams/jobs/jobid/ - When a job name was not explicitly set. Example URLs within the cluster for application-name of "em" in project "myproject" are * with a web-server in job named "transit" with context "sample" and name "view": ``https://em.myproject.svc:8443/transit/sample/view/tuples`` * with a web-server in job 7: ``https://em.myproject.svc:8443/streams/jobs/7/sample/view/tuples`` * retrieve information for job named "transit" with context "sample" and name "view": ``https://em.myproject.svc:8443/transit/sample/view/ports/info`` Args: window(Window): Windowed stream of tuples that will be viewable using a HTTP GET request. context(str): Defines an URL context path. URL contains ``context``/``name``. name(str): Sink name in the Streams context. This name is part of the URL. monitor(str): The name of the endpoint-monitor that provides the ssl configuration for this endpoint. If it is None, the connection uses plain HTTP Returns: streamsx.topology.topology.Sink: Stream termination. """ _add_toolkit_dependency(window.topology, '[4.3.0,5.0.0)') sslAppConfigName = None if monitor is not None: sslAppConfigName = monitor + '-streams-certs' _op = _HTTPTupleView(window, context=context, name=name, sslAppConfigName=sslAppConfigName) return streamsx.topology.topology.Sink(_op)
ca3cf81c91ee89210da6989fdecce727d44273a1
8,595
import re def get_order_args(): """ Get order arguments, return a dictionary { <VIEW_NAME>: (ORDER_COL, ORDER_DIRECTION) } Arguments are passed like: _oc_<VIEW_NAME>=<COL_NAME>&_od_<VIEW_NAME>='asc'|'desc' """ orders = {} for arg in request.args: re_match = re.findall('_oc_(.*)', arg) if re_match: order_direction = request.args.get('_od_' + re_match[0]) if order_direction in ('asc', 'desc'): orders[re_match[0]] = (request.args.get(arg), order_direction) return orders
a5e57f95479e15c8167434ff34c51cc80fc43f45
8,596
def version_info(): # pragma: no cover """ Get version of nameko_kafka package as tuple """ return tuple(map(int, __version__.split('.')))
8fe39c50a43e40a589abb51f56e2c7c503026712
8,597
def StrokePathCommandAddCapType(builder, capType): """This method is deprecated. Please switch to AddCapType.""" return AddCapType(builder, capType)
4e7f852cde4993994ab5f7cf3e1b57700eaff7d3
8,598
def process_images(dummy_request): """Downloads and processes all images uploaded before resize logic fix deployment""" global n_global_resized media_bucket = storage_client.bucket(MEDIA_BUCKET) process_global_images(db_pool, media_bucket) process_user_images(db_pool, media_bucket) return f"Done! \n\n resized, replaced: \nGlobal: {n_global_resized}\n User: {n_user_resized}"
ea3734ce797305f7305880b02d2696c3ca8a21c7
8,599