query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Computes voltage drop using abf object and epoch index. | def voltage_drop_abf(abf, epoch_start):
vmin = Vmin_abf(abf, epoch_start)
resting = Vrest_abf(abf, epoch_start)
return vmin - resting | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def voltage_drop(V):\n vmin = Vmin(V)\n resting = Vrest(V)\n return vmin - resting",
"def __call__(self, epoch):\n exp = np.floor((1 + epoch) / self.dropEvery)\n alpha = initAlpha * (self.factor ** exp)\n \n # return alpha \n return float(alpha)",
"def deredden(EBV,filt):\n conversion_data = ascii.read(datapath+\"/stellar_param_data/sf11.txt\")\n assert filt in conversion_data[\"filter\"], (filt, conversion_data[\"filter\"])\n return EBV * float(conversion_data[\"AB_EBV\"][np.where(conversion_data[\"filter\"]==filt)[0]])",
"def _getBusVoltageLambdaSensor(self):\n muVmin = array([b.mu_vmin for b in self.market.case.connected_buses])\n muVmax = array([b.mu_vmax for b in self.market.case.connected_buses])\n muVmin = -1.0 * muVmin\n diff = muVmin + muVmax\n return diff",
"def Vrest_abf(abf, epoch_start):\n p0 = abf.sweepEpochs.p1s[epoch_start]\n p1 = abf.sweepEpochs.p1s[epoch_start+1]\n V = abf.sweepY[p0:p1]\n return Vrest(V)",
"def extract_delta_Q_variance(batch,index,start_cycle,end_cycle):\n X= []\n for ind in index:\n cell_no = list(batch.keys())[ind]\n Qd_100 = batch[cell_no]['cycles'][str(end_cycle-1)]['Qdlin']\n Qd_10 = batch[cell_no]['cycles'][str(start_cycle-1)]['Qdlin']\n #Calculte the log of variance of (Qd100 - Qd10)\n var_log = log(abs(variance(Qd_100-Qd_10)),10) # log base 10\n X.append(var_log)\n X = np.reshape(X,(-1,1))\n return X\n pass",
"def VACF(df,conversion = \"x\"):\n #conversion from pixels to micrometers\n if conversion == \"y\":\n df = df/1200*633\n else:\n df = df/1600*844\n #computes the velocity in one direction between the frames\n dif = pd.DataFrame()\n\n for i in range(1,len(df.T)):\n dif[i-1] = velocity(df[i-1],df[i])\n vel = []\n for i in range(len(dif)):\n vel.append(tidynamics.acf(dif.T[i]))\n\n #return the velocities in array\n return np.array(vel)",
"def _dvolume_dbsf(self):\n\n vol = ((self.I0 * self.V.omega *\n self._mu_0 / (self._mu_0 + self._mu_ex))\n * (1. - np.exp(-(self.V.tau / self._mu_0) -\n (self.V.tau / self._mu_ex)))\n * self.V.p(self.t_0, self.t_ex, self.p_0, self.p_ex,\n param_dict=self.param_dict))\n\n return - vol",
"def calc_out_voltage(self, input_photocurrent_file):\n pass",
"def test_drop_zero_variance_on_subset_columns_with_zv_removals(data):\n step = DropZVColumnsStep(['released', 'episodes'], naomit=True)\n bdf = step.prepare(data).bake(data)\n\n assert 'name' in bdf.columns\n assert 'released' in bdf.columns\n assert 'episodes' not in bdf.columns",
"def surface_runoff_flux(runoff, drain):\n return runoff - drain",
"def _get_vae_loss(self, x, x_bar):\n log_softmax_var = tf.nn.log_softmax(x_bar)\n self.neg_ll = -tf.reduce_mean(\n input_tensor=tf.reduce_sum(input_tensor=log_softmax_var * x, axis=-1)\n )\n a = tf.keras.backend.print_tensor(self.neg_ll) # noqa: F841\n # calculate positive Kullback–Leibler divergence divergence term\n kl_loss = K.mean(\n 0.5\n * K.sum(\n -1 - self.z_log_var + K.square(self.z_mean) + K.exp(self.z_log_var),\n axis=-1,\n )\n )\n\n # obtain negative ELBO\n neg_ELBO = self.neg_ll + self.beta * kl_loss\n\n return neg_ELBO",
"def dropout_backward(dout, cache):\n dropout_param, mask = cache\n mode = dropout_param['mode']\n p = dropout_param['p']\n dx = None\n if mode == 'train':\n #######################################################################\n # TODO: Implement training phase backward pass for inverted dropout #\n #######################################################################\n dx = dout * mask\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n dx = dout\n return dx",
"def dropout_backward(dout, cache):\n dropout_param, mask = cache\n mode = dropout_param['mode']\n\n dx = None\n if mode == 'train':\n #######################################################################\n # TODO: Implement training phase backward pass for inverted dropout #\n #######################################################################\n dx = mask*dout\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n dx = dout\n return dx",
"def dropout_backward(dout, cache):\n dropout_param, mask = cache\n mode = dropout_param['mode']\n\n dx = None\n if mode == 'train':\n #######################################################################\n # TODO: Implement training phase backward pass for inverted dropout #\n #######################################################################\n dx = dout * mask\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n dx = dout\n return dx",
"def dropout_backward(dout, cache):\n dropout_param, mask = cache\n mode = dropout_param['mode']\n\n dx = None\n if mode == 'train':\n #######################################################################\n # TODO: Implement training phase backward pass for inverted dropout #\n #######################################################################\n dx = dout * mask\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n dx = dout\n return dx",
"def detrend_and_decimate_new(trace,f_sample, params):\n\n logging.info(\"detrending\")\n \n f_new = int(params.f_new)\n print(f_sample,f_new)\n f_sample2= (int(f_sample)//1000)*1000\n print(f_sample2,f_new)\n leng =len(trace)\n\n up = int(f_new/np.gcd(f_sample2,f_new))\n down = int(f_sample2*up/f_new)\n print(up,down)\n factor=down/up\n logging.info(f\"up = {up}, down = {down}\")\n\n # up = int(100_000//f_sample)\n # down = int(100_000//f_new)\n\n\n trace_sub = resample_poly(trace,up,down,padtype='edge')\n dt=1/f_new\n times_sub = np.linspace(0.0,leng/f_sample,len(trace_sub))\n\n ord_filt_len = 2*(int(params.ord_len_ms*f_new/1000)//2)+1\n trace_sub2_ord = order_filter(trace_sub, np.ones(ord_filt_len), ord_filt_len//10) # 10 percentile filter\n\n down_temp = int(f_new//params.f_ord_decimate) \n print(f\"down_temp = {down_temp}\")\n trace_sub2_ord = decimate(trace_sub2_ord, down_temp, ftype='fir')\n trace_sub2_ord = medfilt(trace_sub2_ord) #median filter after decimation\n trace_sub2_ord = resample_poly(trace_sub2_ord, down_temp, 1,padtype='edge')\n\n savgol_len1 = 2*(int(25*f_new/1000)//2)+1\n\n # trace_sub2_ord = savgol_filter(trace_sub2_ord, savgol_len1, 3, mode='interp')\n\n #added to fix length errors, URGH\n last_ind=min(len(trace_sub),len(trace_sub2_ord))\n \n trace_zerod = trace_sub[:last_ind]-trace_sub2_ord[:last_ind]\n \n times_sub = times_sub[:last_ind]\n\n\n MAD = stats.median_absolute_deviation(trace_zerod)\n\n\n\n if params.post_savgol: # False\n savgol_len2 = 2*(int(params.savgol_len_ms*f_new/1000)//2)+1\n trace_zerod = savgol_filter(trace_zerod, savgol_len2, 3, mode='interp') # params.savgol_len=7\n \n trace_zerod = trace_zerod - np.quantile(trace_zerod, params.subs_quantile) # params.subs_quantile=0.25\n logging.info(\"finished detrending\")\n \n # times[]\n\n return trace_zerod, times_sub, MAD , factor",
"def genfb(h, n, u, v, f, dt, dx, dy, du,dv,dn, beta=0.281105, eps=0.013, gamma=0.0880, mu=0.3, nu=0, dudt_x=dudt, dvdt_x=dvdt, dndt_x=dndt, grav=True, cori=True, advx=True, advy=True, attn=True): # generalized forward backward feedback timestep\n \n beta = np.float32(beta)\n eps = np.float32(eps)\n gamma = np.float32(gamma)\n mu = np.float32(mu)\n \n \n dn_m1,dn_m2,dn_m0 = dn # unpack\n dndt_x(h, n, u, v, dx, dy, dn_m0)\n \n# test_out = dn_m0.copy()\n# dndt(h, n, u, v, dx, dy, test_out)\n \n# test_dif = dn_m0-test_out\n# if np.max(np.abs(test_dif[1:-1,1:-1] )) >1E-5 :\n# test_dif[1:-1,5][np.abs(test_dif[1:-1,5] ) <1E-5]=0.0\n# print (\"dn diff 2\")\n# print (test_dif[:,5])\n \n #dn_m0[:]=test_out \n\n # must do the following before the u and v !\n n1 = n + ((p32+beta)* dn_m0 - (p5+beta+beta)* dn_m1+ (beta)* dn_m2)*dt\n #del dn_m2\n du_m0,du_m1,du_m2,du_p1 = du # unpack\n dudt_x(h, n1, f, u, v, dx, dy, du_p1, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn,nu=nu,mu=mu)\n\n dv_m0,dv_m1,dv_m2,dv_p1 = dv # unpack \n dvdt_x(h, n1, f, u, v, dx, dy, dv_p1, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn,nu=nu,mu=mu)\n \n# test_out = du_p1.copy()\n# dudt(h, n1, f, u, v, dx, dy, test_out)\n \n# test_dif = du_p1-test_out\n# if np.max(np.abs(test_dif[1:-1,5] )) >1E-5 :\n# test_dif[1:-1,5][np.abs(test_dif[1:-1,5] ) <1E-5]=0.0\n# print (\"du diff\")\n# print (test_dif[:,5])\n \n# #du_p1[:] = test_out\n\n# test_out = dv_p1.copy()\n# dvdt(h, n1, f, u, v, dx, dy, test_out)\n \n# test_dif = dv_p1-test_out\n# if np.max(np.abs(test_dif[1:-1,5] )) >1E-5 :\n# test_dif[1:-1,5][np.max(test_dif[1:-1,5] ) <1E-5]=0.0\n# print (\"dv diff\")\n# print (test_dif[:,5])\n \n #dv_p1[:] = test_out\n \n u1 = u+ ((p5+gamma+eps+eps)*du_p1 +(p5-gamma-gamma-eps-eps-eps)*du_m0 +gamma*du_m1+eps*du_m2)*dt\n # del du_m2\n v1 = v+ ((p5+gamma+eps+eps)*dv_p1 +(p5-gamma-gamma-eps-eps-eps)*dv_m0 +gamma*dv_m1+eps*dv_m2)*dt\n # del dv_m2\n\n\n \n \n dv = [ dv_p1,dv_m0,dv_m1,dv_m2 ]\n du = [ du_p1,du_m0,du_m1,du_m2 ]\n dn = [ dn_m0,dn_m1,dn_m2 ]\n# n[:,:], u[:,:], v[:,:], = n1, u1, v1\n return n1, u1, v1, du,dv,dn",
"def wright2004_dVMag(BmV, VMag):\n return wright2004_main_sequence(BmV) - VMag",
"def dropout_backward(dout, cache):\r\n dropout_param, mask = cache\r\n mode = dropout_param['mode']\r\n\r\n if mode == 'train':\r\n dx = dout * mask\r\n elif mode == 'test':\r\n dx = dout\r\n return dx",
"def sweepUpDown(self):\r\n self._vna.makeSweepUnprocessed()\r\n print('Beginning Data Collection')\r\n\r\n vnaData = []\r\n # Start voltage at zero\r\n self._power.voltsSetpointSet(0)\r\n negStep = (-1)*self._power.voltageStep; time.sleep(1)\r\n\r\n # Step to 30V and record each step\r\n for _ in range(0, 15, self._power.voltageStep):\r\n vnaData.append(self._vna.makeSweepUnprocessed()); time.sleep(10)\r\n self._power.incrementVolt(self._power.voltageStep); time.sleep(1)\r\n # Step back to 0V and record each step\r\n for _ in range(0, 15, self._power.voltageStep):\r\n vnaData.append(self._vna.makeSweepUnprocessed()); time.sleep(10)\r\n self._power.incrementVolt(negStep); time.sleep(1) \r\n # Switch polarity on power supply\r\n assert self._power.voltsMeas() == 0; time.sleep(2)\r\n self._power.changePolarity(); time.sleep(2)\r\n # Step to -30V and record each step\r\n for _ in range(0, 15, self._power.voltageStep):\r\n vnaData.append(self._vna.makeSweepUnprocessed()); time.sleep(10)\r\n self._power.incrementVolt(self._power.voltageStep); time.sleep(1)\r\n # Step back to 0V and record each step\r\n for _ in range(0, 15, self._power.voltageStep):\r\n vnaData.append(self._vna.makeSweepUnprocessed()); time.sleep(10)\r\n self._power.incrementVolt(negStep); time.sleep(1)\r\n self._power.voltsSetpointSet(0)\r\n\r\n # Switch polarity on power supply\r\n assert self._power.voltsMeas() == 0; time.sleep(2)\r\n self._power.changePolarity(); time.sleep(2)\r\n\r\n self._processData(vnaData)",
"def _variational_recurrent_dropout_value(\n self, index, value, noise, keep_prob):\n # uniform [keep_prob, 1.0 + keep_prob)\n random_tensor = keep_prob + noise\n\n # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)\n binary_tensor = math_ops.floor(random_tensor)\n ret = math_ops.div(value, keep_prob) * binary_tensor\n ret.set_shape(value.get_shape())\n return ret",
"def cb_minus(event):\n delta_alpha = pm_rate\n # Decrease Alpha \n sAlpha0.set_val( np.clip(sAlpha0.val - delta_alpha, alpha_min[0], alpha_max[0]) )\n sAlpha1.set_val( np.clip(sAlpha1.val - delta_alpha, alpha_min[1], alpha_max[1]) )\n sAlpha2.set_val( np.clip(sAlpha2.val - delta_alpha, alpha_min[2], alpha_max[2]) )\n print(\"---\")",
"def velocity(df0, df1):\n velocity = df1 - df0\n return velocity",
"def test_negative_input(self):\n negative_data_down = np.full_like(\n self.cube_uv_down.data, dtype=np.float32, fill_value=-0.1\n )\n negative_uv_down = self.cube_uv_down.copy(data=negative_data_down)\n msg = (\n \"The radiation flux in UV downward contains data \"\n \"that is negative or NaN. Data should be >= 0.\"\n )\n with self.assertRaisesRegex(ValueError, msg):\n calculate_uv_index(negative_uv_down)",
"def difference_state(self, a: Vector, b: Vector, u: float, dt: float) -> Vector:\n return vectorops.mul(vectorops.sub(a,b),1.0/dt)",
"def compute_a_delta(xvf):\n diffs=xvf-xvf.shift(1)\n dt=xvf.iloc[1][\"t\"]-xvf.iloc[0][\"t\"]\n\n xva=pd.DataFrame({\"t\":xvf[\"t\"],\"x\":xvf[\"x\"],\"v\":xvf[\"v\"],\"a\":diffs[\"v\"]/(dt)},index=xvf.index)\n xva = xva[['t', 'x', 'v', 'a']]\n xva.index.name='#t'\n\n return xva.dropna()",
"def get_velocity(self, index: int, array: List[float]) -> float:\n velocity_sum = 0.0\n number_iterations = min(self.past_n_steps, index - 1)\n for i in range(number_iterations):\n velocity_sum += (array[index - 1 - i] - array[index - 2 - i])\n\n return velocity_sum / (self.time_step * number_iterations)",
"def dvdt(self, args: List[float]) -> float:\n v, h_nav, n_kvhh, h_kva, m_kvsi, s_ampar, _, s_nmdar, s_gabar, ca = args\n return ((-10.0*self.params.area \n * (self.leak.i(v)\n + self.nav.i(v, h=h_nav) \n + self.kvhh.i(v, n=n_kvhh)\n + self.kva.i(v, h=h_kva)\n + self.kvsi.i(v, m=m_kvsi)\n + self.cav.i(v)\n + self.kca.i(v, ca=ca)\n + self.nap.i(v)\n + self.kir.i(v))\n - (self.ampar.i(v, s=s_ampar)\n + self.nmdar.i(v, s=s_nmdar)\n + self.gabar.i(v, s=s_gabar))) \n / (10.0*self.params.cm*self.params.area))",
"def td0_error_from_episode(batch_episode, all_Vs, last_Vs, gamma):\n assert isinstance(batch_episode, BatchEpisode)\n\n Vs, last_Vs = V_to_numpy(all_Vs, last_Vs)\n # Mask out network output in the batch where its environment already terminated\n # this is episode specific, no problem with rolling segment\n Vs = Vs*batch_episode.numpy_validity_masks\n \n Vs = np.concatenate([Vs, np.zeros([batch_episode.N, 1])], axis=-1)\n Vs[range(batch_episode.N), batch_episode.Ts] = last_Vs\n \n out = batch_episode.numpy_rewards + gamma*Vs[:, 1:]*batch_episode.numpy_masks\n Vs[range(batch_episode.N), batch_episode.Ts] = 0.0 # clean-up last Vs, for correct subtraction\n out = out - Vs[:, :-1]\n \n return out"
]
| [
"0.60475296",
"0.5323616",
"0.5242114",
"0.5225883",
"0.5147038",
"0.5052979",
"0.5022212",
"0.50195706",
"0.50011486",
"0.4997545",
"0.4979149",
"0.49542353",
"0.49397612",
"0.49350744",
"0.49276263",
"0.49276263",
"0.4927134",
"0.49154943",
"0.4914897",
"0.49054533",
"0.4879688",
"0.4875485",
"0.48753914",
"0.48703086",
"0.48618287",
"0.48574105",
"0.48488444",
"0.4845375",
"0.48422933",
"0.483128"
]
| 0.7607646 | 0 |
Computes capacitance from the time constant tau and membrane resistance Rm. | def capacitance(tau, Rm):
return tau/Rm | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def vcapcharge(t, Vs, R, C):\n if t < 0:\n raise ValueError(\"Time must be greater than or equal to zero.\")\n if R * C == 0:\n raise ValueError(\"Resistance and Capacitance must be non-zero.\")\n Vc = Vs * (1 - _np.exp(-t / (R * C)))\n return Vc",
"def TCMB(rs):\n\n return 0.235e-3 * rs",
"def tau(self, R, m, z):\n result = self.ne2d(R, m, z)\n \n # multiply by Thompson cross section (physical)\n sigma_T = 6.6524e-29 # Thomson cross section in m^2\n mpc = 3.08567758e16*1.e6 # 1Mpc in m\n sigma_T *= (self.U.bg.h/mpc)**2 # in (Mpc/h)^2\n \n result *= sigma_T # dimensionless\n return result",
"def calc_recharge_time(self, upper_battery_capacity = 4):\n # coasting_velocity = self.coast_speed(lap_length, angle) # KWh, point when to swap back to battery power\n time = ((upper_battery_capacity - self.current_capacity) / \n (self.recharge_rate))\n return time",
"def get_multipath_constants(time0,time1,r_param,covariance_param):\n r = r_param.integral(time0,time1)\n # variance\n vars = covariance_param.diag_integral(time0,time1)\n # risk neutral movement position\n mu = r - 0.5*vars\n # discount to be applied due to time-value of money\n discount = np.exp(-r)\n return r,vars,mu,discount",
"def efftau_madau(rwl, z):\n\n from numpy import array, where, exp\n\n la = 1215.67 ## Lyman alpha. Angstroms. \n lb = 1026. ## Lyman beta. Angstroms. \n lg = 973.\n ld = 950.\n le = 938.\n ll = 912. ## Lyman limit.\n\n ## Redshifted from restframe to (definitely) observed. \n wl = rwl*(1. + z)\n \n n = len(wl)\n c = array([3.6e-3, 1.7e-3, 1.2e-3, 9.3e-4])\n l = array([ la, lb, lg, ld])\n\n tau = np.zeros_like(wl)\n xe = 1. + z\n\n ## Lyman series\n for i in range(len(l) ):\n indices = where(wl <= l[i]*xe) ## Note: no lower wavelength limit is correct as 'broadcasted' corrections. \n tau[indices] += c[i]*(wl[indices]/l[i])**3.46\n\n ## Photoelectric absorption\n xc = wl/ll\n xc3 = xc**3\n \n tau = where(wl <= ll*xe, tau + 0.25*xc3*(xe**.46 - xc**0.46) \\\n + 9.4*xc**1.5*(xe**0.18 - xc**0.18) \\\n - 0.7*xc3*(xc**(-1.32) - xe**(-1.32)) \\\n - 0.023*(xe**1.68-xc**1.68), tau)\n \n '''\n min_tau = tau.min()\n index = np.where(tau == min_tau)\n tau[:index] = min_tau \n \n tau = where(tau < 0.0, 0.0, tau)\n '''\n\n return where(tau > 700., 0., exp(-tau))",
"def compute_optimal_quantities(c, tau):\n \n mcm = McCallModel(alpha=alpha_q, \n beta=beta, \n gamma=gamma, \n c=c-tau, # post tax compensation\n sigma=sigma, \n w_vec=w_vec-tau, # post tax wages\n p_vec=p_vec)\n\n w_bar, V, U = compute_reservation_wage(mcm, return_values=True)\n lmda = gamma * np.sum(p_vec[w_vec-tau > w_bar])\n return w_bar, lmda, V, U",
"def _after_res_compute(abs_data):\n broad_const_limit = akg.lang.ascend.broadcast(akg.tvm.const(\n CONST_LIMIT, abs_data.dtype), abs_data.shape)\n data = divide(broad_const_limit, abs_data, target=utils.CCE)\n after_res = topi.multiply(data, ITR_AFTER[LEN_AFTER - 1])\n after_res = topi.add(after_res, ITR_AFTER[LEN_AFTER - 2])\n for iter_number in ITR_AFTER[LEN_AFTER-3::-1]:\n after_res = mul(after_res, data, target=utils.CCE)\n after_res = topi.add(after_res, iter_number)\n abs_data_rsqrt = rsqrt(abs_data, target=utils.CCE)\n after_res = mul(after_res, abs_data_rsqrt, target=utils.CCE)\n return after_res",
"def F_calctimescales(i, st, dm):\n mr = st.mn*dm.mxkg_v[i]/(st.mn+dm.mxkg_v[i]) # reduced mass, kg\n # containment time (orbits within Rs)\n t1 = c_yr2s( 2.7e-2 * m.pow(dm.mxkg_v[i]/st.mn,1.5) / (dm.sigx/1.e-55) ) # s \n # therm time\n tth1 = c_yr2s( 2.5e+5 * m.pow(dm.mxkg_v[i]/st.mn,2.) * m.pow(st.mn/mr,3.) / (dm.sigx/1.e-55) ) # s\n# tth2 = m.pow(dm.mxkg_v[i]/st.mn,2.) * pF / 6. / m.sqrt(2.) / st.Temp / st.nb / dm.sigx * m.pow(st.mn/mr,3.) \n tth2 = m.pow(dm.mxkg_v[i],2.)*st.mn*pF /4./m.sqrt(2.)/(st.nb*1.e+6)/dm.sigx_m/m.pow(st.mn,3.)/st.Eth\n tth = tth2\n print \"-- Time scales: t1=%.2e , tth=%.2e, tth1=%.2e, tth2=%.2e\" % (t1,tth,tth1,tth2)\n return t1, tth",
"def captransfer(t, Vs, R, Cs, Cd):\n if t < 0:\n raise ValueError(\"Time must be greater than zero.\")\n try:\n tau = (R * Cs * Cd) / (Cs + Cd)\n rvolt = Vs * _np.exp(-t / tau)\n except ZeroDivisionError:\n raise ZeroDivisionError(\"Sum of Source and Destination Capacitance must be non-zero.\")\n vfinal = Vs * Cs / (Cs + Cd)\n return rvolt, vfinal",
"def vcapdischarge(t, Vs, R, C):\n if t < 0:\n raise ValueError(\"Time must be greater than or equal to zero.\")\n if R * C == 0:\n raise ValueError(\"Resistance and Capacitance must be non-zero.\")\n Vc = Vs * (_np.exp(-t / (R * C)))\n return Vc",
"def calculate_part(self):\r\n\r\n from math import exp\r\n\r\n self.hazard_rate_model = {}\r\n\r\n if self.hazard_rate_type == 1:\r\n self.hazard_rate_model['equation'] = 'lambdab * piQ'\r\n elif self.hazard_rate_type == 2:\r\n self.hazard_rate_model['equation'] = 'lambdab * piQ * piE * piCV'\r\n\r\n # Base hazard rate.\r\n _stress = (self.operating_voltage + self.acvapplied) / \\\r\n self.rated_voltage\r\n try:\r\n self.hazard_rate_model['lambdab'] = \\\r\n 0.00069 * ((_stress / 0.4)**5 + 1) * \\\r\n exp(2.5 * ((self.temperature_active + 273) /\r\n self.reference_temperature)**18)\r\n except(OverflowError, ZeroDivisionError):\r\n # TODO: Handle overflow error.\r\n return True\r\n\r\n # Capacitance correction factor.\r\n self.piCV = 1.2 * (self.capacitance * 1000000.0)**0.092\r\n self.hazard_rate_model['piCV'] = self.piCV\r\n\r\n return Capacitor.calculate_part(self)",
"def calculate_part(self):\r\n\r\n from math import exp\r\n\r\n self.hazard_rate_model = {}\r\n\r\n if self.hazard_rate_type == 1:\r\n self.hazard_rate_model['equation'] = 'lambdab * piQ'\r\n elif self.hazard_rate_type == 2:\r\n self.hazard_rate_model['equation'] = 'lambdab * piQ * piE * piCV'\r\n\r\n # Base hazard rate.\r\n _stress = (self.operating_voltage + self.acvapplied) / \\\r\n self.rated_voltage\r\n try:\r\n self.hazard_rate_model['lambdab'] = \\\r\n 0.00115 * ((_stress / 0.4)**5 + 1) * \\\r\n exp(2.5 * ((self.temperature_active + 273) /\r\n self.reference_temperature)**18)\r\n except(OverflowError, ZeroDivisionError):\r\n # TODO: Handle overflow error.\r\n return True\r\n\r\n # Capacitance correction factor.\r\n self.piCV = 1.4 * (self.capacitance * 1000000.0)**0.12\r\n self.hazard_rate_model['piCV'] = self.piCV\r\n\r\n return Capacitor.calculate_part(self)",
"def changetau(self, tau):\n if tau == self.tau:\n return self\n elif tau < self.tau:\n return AsymptoticTimeInvariant(self.v[self.tau - tau: tau + self.tau - 1])\n else:\n v = np.zeros(2*tau-1)\n v[tau - self.tau: tau + self.tau - 1] = self.v\n return AsymptoticTimeInvariant(v)",
"def cost_mabr(blk):\n t0 = blk.flowsheet().time.first()\n\n # Get parameter dict from database\n parameter_dict = blk.unit_model.config.database.get_unit_operation_parameters(\n blk.unit_model._tech_type, subtype=blk.unit_model.config.process_subtype\n )\n\n # Get costing parameter sub-block for this technology\n A, B = blk.unit_model._get_tech_parameters(\n blk,\n parameter_dict,\n blk.unit_model.config.process_subtype,\n [\"reactor_cost\", \"blower_cost\"],\n )\n\n # Add cost variable and constraint\n blk.capital_cost = pyo.Var(\n initialize=1,\n units=blk.config.flowsheet_costing_block.base_currency,\n bounds=(0, None),\n doc=\"Capital cost of unit operation\",\n )\n\n DCC_reactor = pyo.units.convert(\n blk.unit_model.properties_treated[t0].flow_mass_comp[\"ammonium_as_nitrogen\"]\n / blk.unit_model.nitrogen_removal_rate\n * A,\n to_units=blk.config.flowsheet_costing_block.base_currency,\n )\n\n DCC_blower = pyo.units.convert(\n blk.unit_model.reactor_area * blk.unit_model.air_flow_rate[t0] * B,\n to_units=blk.config.flowsheet_costing_block.base_currency,\n )\n\n expr = DCC_reactor + DCC_blower\n\n blk.unit_model._add_cost_factor(\n blk, parameter_dict[\"capital_cost\"][\"cost_factor\"]\n )\n\n blk.capital_cost_constraint = pyo.Constraint(\n expr=blk.capital_cost == blk.cost_factor * expr\n )\n\n # Register flows\n blk.config.flowsheet_costing_block.cost_flow(\n blk.unit_model.electricity[t0], \"electricity\"\n )",
"def calcTcr(M, rh, G):\n return rh**1.5/np.sqrt(G*M)",
"def tau_RC(run = 0):\n\n\n tau = {}\n \n tau[0] = { 0 : 0.000528448223727 ,\n 1 : 0.000534286124735 ,\n 2 : 0.000531127097197 ,\n 3 : 0.000528590936727 ,\n 4 : 0.000523307118588 ,\n 5 : 0.000531484578773 ,\n 6 : 0.000522540813269 ,\n 7 : 0.000529130885772 ,\n 8 : 0.000527471895065 ,\n 9 : 0.000527267947332 ,\n 10 : 0.000529931735586 ,\n 11 : 0.000527613470536 }\n \n \n tau[8088] = { 0 : 5.31332e-04 ,\n 1 : 5.36915e-04 ,\n 2 : 5.32637e-04 ,\n 3 : 5.30414e-04 ,\n 4 : 5.32376e-04 ,\n 5 : 5.42936e-04 ,\n 6 : 5.30456e-04 ,\n 7 : 5.39265e-04 ,\n 8 : 5.29270e-04 ,\n 9 : 0.000529219 ,\n 10 : 5.29847e-04 ,\n 11 : 5.28952e-04 }\n \n tau[8089] = { 0 : 5.31416e-04 ,\n 1 : 5.37008e-04 ,\n 2 : 5.33042e-04 ,\n 3 : 0.000530479 ,\n 4 : 5.32518e-04 ,\n 5 : 5.42983e-04 ,\n 6 : 5.30310e-04 ,\n 7 : 5.39168e-04 ,\n 8 : 0.000529401 ,\n 9 : 0.000529229 ,\n 10 : 0.000530108 , \n 11 : 0.000529216 }\n \n \n tau[8678] = { 0 : 5.31343e-04 ,\n 1 : 5.36915e-04 ,\n 2 : 5.32922e-04 ,\n 3 : 0.000530248 ,\n 4 : 0.000532365 ,\n 5 : 0.000542388 ,\n 6 : 0.000530768 ,\n 7 : 0.000539662 ,\n 8 : 0.000529246 ,\n 9 : 0.00052921 ,\n 10 : 0.000529816 ,\n 11 : 0.000529137 }\n\n if run not in tau:\n print (' tau_RC, requested run ', run , ' not available, default used')\n run = 0 \n \n return tau[run]",
"def calculate_part(self):\r\n\r\n from math import exp\r\n\r\n self.hazard_rate_model = {}\r\n\r\n if self.hazard_rate_type == 1:\r\n self.hazard_rate_model['equation'] = 'lambdab * piQ'\r\n\r\n self._lambdab_count = self._lambdab_count[self.specification - 1]\r\n\r\n elif self.hazard_rate_type == 2:\r\n self.hazard_rate_model['equation'] = 'lambdab * piQ * piE * piCV'\r\n\r\n # Base hazard rate.\r\n _stress = (self.operating_voltage + self.acvapplied) / \\\r\n self.rated_voltage\r\n try:\r\n self.hazard_rate_model['lambdab'] = \\\r\n 0.00086 * ((_stress / 0.4)**5 + 1) * \\\r\n exp(2.5 * ((self.temperature_active + 273) /\r\n self.reference_temperature)**18)\r\n except(OverflowError, ZeroDivisionError):\r\n # TODO: Handle overflow and zero division errors.\r\n return True\r\n\r\n # Capacitance correction factor.\r\n self.piCV = 1.2 * (self.capacitance * 1000000.0)**0.095\r\n self.hazard_rate_model['piCV'] = self.piCV\r\n\r\n return Capacitor.calculate_part(self)",
"def test_tau(self):\n tau_values = [5.0, 15.0, 25.0]\n \n tmax = 50.0\n dt = 0.1\n N = 3\n\n ini_rate = 80.0\n\n nsteps = int_r(tmax/dt)\n\n # reproducible arbitrariness\n np.random.seed(34342)\n\n tutor_out_trace = ini_rate + 20.0*np.random.randn(nsteps, N)\n # have some correlation between reward trace and tutor.out trace\n rho = 0.2\n reward_trace = (rho*(tutor_out_trace[:, 0] - ini_rate)/20.0 +\n (1-rho)*np.random.randn(nsteps))\n \n scaling = None\n\n for crt_tau in tau_values:\n tutor = SimpleNeurons(N, out_fct=lambda i: tutor_out_trace[i])\n reward = MockReward(lambda t: reward_trace[int_r(t/dt)])\n tutor_rule = ReinforcementTutorRule(tutor, reward, tau=crt_tau,\n constrain_rates=False, ini_rate=ini_rate, learning_rate=1.0,\n use_tutor_baseline=False)\n\n sim = simulation.Simulation(tutor, reward, tutor_rule, dt=dt)\n sim.run(tmax)\n\n drates = tutor_rule.rates - ini_rate\n\n # this should be a convolution of tutor_out_trace*reward_trace with an\n # exponential with time constant crt_tau\n # that means that tau*(d/dt)drates + drates must be proportional to it\n expected_rhs = (tutor_out_trace - ini_rate)*np.reshape(reward_trace,\n (-1, 1))\n\n lhs = np.vstack((float(crt_tau)*np.reshape(drates[0, :], (1, -1))/dt,\n (crt_tau/dt)*np.diff(drates, axis=0) + drates[:-1, :]))\n \n # allow scaling to be arbitrary, but *independent of tau*\n if scaling is None:\n mask = (expected_rhs != 0)\n scaling = np.mean(lhs[mask]/expected_rhs[mask])\n\n # scaling shouldn't be negative or zero!\n self.assertGreater(scaling, 1e-9)\n\n mag = np.mean(np.abs(expected_rhs))\n\n self.assertLess(np.max(np.abs(lhs - scaling*expected_rhs)), 1e-6*mag)",
"def conductivity(self, T):\n m = self.mass\n mu = self.viscosity(T)\n K = (15/4) * kB * mu / m\n return K",
"def viscous_timescale(r):\n t_viscous = (2*np.pi)*r**(3.0/2.0) / ((H/R_out)**(2.0) * alpha)\n return t_viscous",
"def _ect_qrs_tconst(pattern, qrs):\n beats = pattern.evidence[o.QRS]\n idx = beats.index(qrs)\n tnet = pattern.last_tnet\n hyp = pattern.hypothesis\n if idx > 0:\n prev = beats[idx - 1]\n # After the second couplet, every ectopic beat introduces a new temporal\n # network in the pattern to make it easier the minimization.\n if idx > 3:\n tnet.remove_constraint(hyp.end, prev.time)\n # We create a new temporal network for the cyclic observations\n tnet = ConstraintNetwork()\n pattern.temporal_constraints.append(tnet)\n # The duration of each couplet should not have high instantaneous\n # variations.\n refrr = beats[idx - 2].time.end - beats[idx - 3].time.start\n tnet.add_constraint(prev.time, qrs.time, Iv(refrr - C.RR_MAX_DIFF, refrr + C.RR_MAX_DIFF))\n # We guide the morphology search to be similar to the previous\n # ectopic QRS complex.\n qrs.shape = beats[idx - 2].shape\n # The reference RR varies from an upper limit to the last measurement,\n # through the contextual previous rhythm.\n refrr = C.BRADY_RR.end\n stdrr = 0.1 * refrr\n if pattern.evidence[o.Cardiac_Rhythm] and idx == 1:\n mrr, srr = pattern.evidence[o.Cardiac_Rhythm][0].meas.rr\n if mrr > 0:\n refrr, stdrr = mrr, srr\n elif idx > 1:\n refrr, stdrr = hyp.meas.rr\n # Ectopic beats must be advanced wrt the reference RR\n tnet.add_constraint(prev.time, qrs.time, Iv(C.TACHY_RR.start, max(C.TACHY_RR.start, refrr - stdrr)))\n # Beats cannot overlap\n tnet.add_constraint(prev.end, qrs.start, Iv(C.TQ_INTERVAL_MIN, np.Inf))\n BASIC_TCONST(pattern, qrs)\n tnet.add_constraint(qrs.start, qrs.end, C.QRS_DUR)\n tnet.set_before(qrs.time, hyp.end)\n # Constraints with the precedent T Wave\n _qrs_after_twave(pattern, qrs)",
"def tauBeam(self, R, m, z, fwhm):\n # comoving beam size at redshift z\n fwhm *= np.pi / (180.*60.) # radians\n sigmaBeam = fwhm / np.sqrt(8.*np.log(2.)) # radians\n sigmaBeam *= self.U.bg.comoving_distance(z) # in comoving Mpc/h\n # beam, function of comoving projected radius\n fbeam = lambda R: np.exp(-0.5*R**2/sigmaBeam**2) / (2.*np.pi*sigmaBeam**2)\n \n # do the smoothing\n f = lambda r: r * self.tau(r, m, z) * np.exp(-0.5*(r**2+R**2)/sigmaBeam**2) / sigmaBeam**2 * i0(r*R/sigmaBeam**2)\n result = integrate.quad(f, 0., np.inf, epsabs=0., epsrel=1.e-2)[0]\n print \"convolved with beam\"\n return result",
"def conductivity(self):\n m = 1.67296736e-02 # Determined from optimisation\n c = 8.54665149e-05 # Determined from optimisation\n return m * self.concentration + c",
"def get_effective_mass():\n\n H_BAR = 6.582119514e-16 # eV*s\n M_0 = 9.10938356e-31 # kg\n N_KPTS = 6 # Number of k-points included in the parabola.\n\n spin_up = Spin(1)\n\n band_structure = Vasprun('vasprun.xml').get_band_structure()\n\n # Locations of CBM and VBM in band_structure.bands\n cbm_band_index = band_structure.get_cbm()['band_index'][spin_up][0]\n cbm_kpoint_index = band_structure.get_cbm()['kpoint_index'][0]\n\n vbm_band_index = band_structure.get_vbm()['band_index'][spin_up][0]\n vbm_kpoint_index = band_structure.get_vbm()['kpoint_index'][0]\n\n k = {'electron': {'left': [], 'right': []},\n 'hole': {'left': [], 'right': []}}\n E = {'electron': {'left': [], 'right': []},\n 'hole': {'left': [], 'right': []}}\n\n e_ref_coords = band_structure.kpoints[cbm_kpoint_index]._ccoords\n h_ref_coords = band_structure.kpoints[vbm_kpoint_index]._ccoords\n\n for n in range(-N_KPTS, 1):\n e_coords = band_structure.kpoints[cbm_kpoint_index + n]._ccoords\n h_coords = band_structure.kpoints[vbm_kpoint_index + n]._ccoords\n\n k['electron']['left'].append(\n ((e_coords[0] - e_ref_coords[0])**2 +\n (e_coords[1] - e_ref_coords[1])**2 +\n (e_coords[2] - e_ref_coords[2])**2)**0.5\n )\n k['hole']['left'].append(\n ((h_coords[0] - h_ref_coords[0])**2 +\n (h_coords[1] - h_ref_coords[1])**2 +\n (h_coords[2] - h_ref_coords[2])**2)**0.5\n )\n\n e_energy = band_structure.bands[\n spin_up][cbm_band_index][cbm_kpoint_index + n]\n h_energy = band_structure.bands[\n spin_up][vbm_band_index][vbm_kpoint_index + n]\n\n E['electron']['left'].append(e_energy)\n E['hole']['left'].append(h_energy)\n\n for n in range(1, 1 + N_KPTS):\n e_coords = band_structure.kpoints[cbm_kpoint_index + n]._ccoords\n h_coords = band_structure.kpoints[vbm_kpoint_index + n]._ccoords\n\n k['electron']['right'].append(\n ((e_coords[0] - e_ref_coords[0])**2 +\n (e_coords[1] - e_ref_coords[1])**2 +\n (e_coords[2] - e_ref_coords[2])**2)**0.5\n )\n k['hole']['right'].append(\n ((h_coords[0] - h_ref_coords[0])**2 +\n (h_coords[1] - h_ref_coords[1])**2 +\n (h_coords[2] - h_ref_coords[2])**2)**0.5\n )\n\n e_energy = band_structure.bands[\n spin_up][cbm_band_index][cbm_kpoint_index + n]\n h_energy = band_structure.bands[\n spin_up][vbm_band_index][vbm_kpoint_index + n]\n\n E['electron']['right'].append(e_energy)\n E['hole']['right'].append(h_energy)\n\n # 2nd order fits\n e_l_fit = np.poly1d(\n np.polyfit(k['electron']['left'], E['electron']['left'], 2))\n e_r_fit = np.poly1d(\n np.polyfit(k['electron']['right'], E['electron']['right'], 2))\n h_l_fit = np.poly1d(\n np.polyfit(k['hole']['left'], E['hole']['left'], 2))\n h_r_fit = np.poly1d(\n np.polyfit(k['hole']['right'], E['hole']['right'], 2))\n\n # Curvatures\n e_l_curvature = e_l_fit.deriv().deriv()[0]\n e_r_curvature = e_r_fit.deriv().deriv()[0]\n h_l_curvature = h_l_fit.deriv().deriv()[0]\n h_r_curvature = h_r_fit.deriv().deriv()[0]\n\n # Unit conversion\n e_m_eff_l = 10 * ((H_BAR ** 2) / e_l_curvature) / M_0\n e_m_eff_r = 10 * ((H_BAR ** 2) / e_r_curvature) / M_0\n h_m_eff_l = -10 * ((H_BAR ** 2) / h_l_curvature) / M_0\n h_m_eff_r = -10 * ((H_BAR ** 2) / h_r_curvature) / M_0\n\n return {'electron': {'left': e_m_eff_l, 'right': e_m_eff_r},\n 'hole': {'left': h_m_eff_l, 'right': h_m_eff_r}}",
"def omega_c(B, m=m_star, q=q_e):\n return q * B / m # in 1/s",
"def rcm(t):\n x0 = 0 # initial position\n return np.array([\n 0,\n 0,\n x0 + self.v0x * t + 1 / 2 * self.ax * t ** 2\n ])",
"def cont_time_params(kappa, b, sigma, delta):\n alpha = np.exp(- kappa * delta)\n beta = b * (1 - np.exp(- kappa * delta))\n s2 = sigma * sigma * (1 - np.exp(- 2 * kappa * delta)) / (2 * kappa)\n s = np.sqrt(s2)\n return beta, alpha, s",
"def _reward(self):\n # Clock reward -----------------------------------------------------------------\n A, B = self.get_von_mises(0.0, self.ratio, self.kappa)\n phi = self.phase / self.cycle_len\n #print('Cycles completed = ', self.cycle_complete)\n\n #print('A, B = ', (A,B))\n\n phi_FL = self.wrap(phi + self.theta_FL)\n phi_FR = self.wrap(phi + self.theta_FR)\n phi_RL = self.wrap(phi + self.theta_RL)\n phi_RR = self.wrap(phi + self.theta_RR)\n\n #print(phi_FL)\n #print(phi_FR)\n #print(phi_RL)\n #print(phi_RR)\n\n FL_swing = self.in_swing(A, B, phi_FL)\n FR_swing = self.in_swing(A, B, phi_FR)\n RL_swing = self.in_swing(A, B, phi_RL)\n RR_swing = self.in_swing(A, B, phi_RR)\n\n #print('Time since reset = ', self.rex.GetTimeSinceReset())\n #print('phase phi = ', phi)\n #print('FL swing = ', FL_swing)\n #print('FR swing = ', FR_swing)\n #print('RL swing = ', RL_swing)\n #print('RR swing = ', RR_swing)\n\n if FL_swing:\n c_swing_frc_FL = 1\n c_swing_spd_FL = 0\n else:\n c_swing_frc_FL = 0\n c_swing_spd_FL = 1\n\n if FR_swing:\n c_swing_frc_FR = 1\n c_swing_spd_FR = 0\n else:\n c_swing_frc_FR = 0\n c_swing_spd_FR = 1\n\n if RL_swing:\n c_swing_frc_RL = 1\n c_swing_spd_RL = 0\n else:\n c_swing_frc_RL = 0\n c_swing_spd_RL = 1\n\n if RR_swing:\n c_swing_frc_RR = 1\n c_swing_spd_RR = 0\n else:\n c_swing_frc_RR = 0\n c_swing_spd_RR = 1\n\n FL_foot_force, FR_foot_force, RL_foot_force, RR_foot_force = self.get_contact_forces()\n FL_vel, FR_vel, RL_vel, RR_vel = self.get_foot_velocities()\n\n FL_penalty = c_swing_frc_FL*FL_foot_force + c_swing_spd_FL*FL_vel\n FR_penalty = c_swing_frc_FR*FR_foot_force + c_swing_spd_FR*FR_vel\n RL_penalty = c_swing_frc_RL*RL_foot_force + c_swing_spd_RL*RL_vel\n RR_penalty = c_swing_frc_RR*RR_foot_force + c_swing_spd_RR*RR_vel\n\n foot_penalties = FL_penalty + FR_penalty + RL_penalty + RR_penalty\n \n # Deviation Penalties ----------------------------------------------------------\n # Base height\n base_height = self.rex.GetBasePosition()[-1]\n height_err = np.abs(base_height - self.height_des)\n \n if height_err < 0.02:\n height_err = 0\n\n # Speed \n vx, vy, _ = p.getBaseVelocity(bodyUniqueId=self.rex.quadruped)[0]\n vx = -vx # in rex, forward is the negative x direction\n x_vel_err = 4*np.abs(vx - self.speed) # higher emphasis on x velocity error\n y_vel_err = np.abs(vy - self.side_speed)\n\n # Orientation\n orient_curr = self.rex.GetBaseOrientation()\n orient_des = [0, 0, 0, 1] # not exact, but shouldn't be too far from this\n orient_err = 6 * (1 - np.inner(orient_curr, orient_des)**2 )\n\n shoulder_orient_des = [0, 0, 0, 1]\n FL_sh, FR_sh, RL_sh, RR_sh = self.get_shoulder_orientation()\n\n # quaternion similarity: 1 - <q1, q2>**2 == 0 when 100% similar\n # good when error < 0.01 (individually)\n # put HUGE penalty on this\n shoulder_err = 20 * ((1 - np.inner(shoulder_orient_des, FL_sh)**2) + \n (1 - np.inner(shoulder_orient_des, FR_sh)**2) +\n (1 - np.inner(shoulder_orient_des, RL_sh)**2) + \n (1 - np.inner(shoulder_orient_des, RR_sh)**2))\n\n # Energy Penalties --------------------------------------------------------------\n energy_penalty = np.abs(np.dot(self.rex.GetMotorTorques(),\n self.rex.GetMotorVelocities())) * self._time_step\n\n # Acceleration\n a_trans, a_rot = self.get_base_accelerations()\n accel_penalty = 0.15 * np.abs(a_trans.sum() + a_rot.sum())\n\n # need to encourage exploration: current issue --> Rex is stuck at origin\n # because positive rewards all the time\n # need lim error --> 0, reward > 0 \n\n beta = -0.75\n\n reward = beta + \\\n 0.200 * np.exp(-orient_err - shoulder_err) + \\\n 0.275 * np.exp(-foot_penalties) + \\\n 0.075 * np.exp(-height_err) + \\\n 0.250 * np.exp(-x_vel_err) + \\\n 0.100 * np.exp(-y_vel_err) + \\\n 0.075 * np.exp(-accel_penalty) + \\\n 0.025 * np.exp(-energy_penalty)\n\n\n return reward",
"def get_taum(cm, rm):\n return cm * rm"
]
| [
"0.60113776",
"0.5732019",
"0.5719932",
"0.5657031",
"0.5627349",
"0.5524498",
"0.540581",
"0.53926945",
"0.53923887",
"0.5390512",
"0.5366869",
"0.5360185",
"0.5343438",
"0.5330854",
"0.5330817",
"0.5262412",
"0.52592707",
"0.52560985",
"0.52401096",
"0.5227372",
"0.5217925",
"0.5192165",
"0.5171202",
"0.51707935",
"0.5157069",
"0.5132043",
"0.51225895",
"0.5113053",
"0.51039064",
"0.510344"
]
| 0.79187113 | 0 |
Computes the time constant (usually called tau) using a fit to an exponential function. | def time_constant(t, V):
a_init = 1
b_init = -100
c_init = V[-1]
popt, pcov = curve_fit(func_exp, t, V, p0=[a_init, b_init, c_init],
bounds=(-np.inf, np.inf))
Vpred = np.zeros(len(t))
for i in range(len(t)):
Vpred[i] = func_exp(t[i], popt[0], popt[1], popt[2])
return -1/popt[1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def exp(t,tau):\n return np.exp(-t/tau)",
"def exp_func(x, initial, lifetime):\n return initial * np.exp(-x/lifetime)",
"def time_constant_abf(abf, epoch_start):\n p0 = abf.sweepEpochs.p1s[epoch_start]\n p1 = abf.sweepEpochs.p1s[epoch_start + 1]\n\n t = abf.sweepX[p0:p1] - abf.sweepX[p0]\n V = abf.sweepY[p0:p1]\n\n return time_constant(t, V)",
"def exp_growth(t, log_b_0, log_tau):\n return np.exp(log_b_0) * np.exp(t/np.exp(log_tau))",
"def exp_decay(timeList, voltageList, ySS=160):\n\n parameters, _ = curve_fit(func, timeList, voltageList, p0=(1, 9, ySS), maxfev = 20000)\n # voltageFit = func(timeList, *parameters)\n return parameters",
"def exp_fit(timeList, voltageList, ySS):\n\n bList = [log(max(y-ySS,1e-6)) for y in voltageList]\n b = np.matrix(bList).T\n rows = [ [1,t] for t in timeList]\n A = np.matrix(rows)\n #w = (pinv(A)*b)\n (w,residuals,rank,sing_vals) = np.linalg.lstsq(A,b)\n tau = -1.0/w[1,0]\n amplitude = np.exp(w[0,0])\n return (amplitude,tau)",
"def exponentialLearningRate(base):\n def function(t):\n return base ** (t-1)\n return function",
"def model_growth_rate(t, a_0, omega):\n a = a_0 * np.exp(omega * t)\n return a",
"def fit_ar1_t(t, y):\n lntau0 = np.log(np.mean(np.diff(t)))\n sigma = np.std(y)\n yr = y - np.mean(y)\n nlnp = lambda lntau, sigma: -1.0 * ar1_t_like(t, yr, np.exp(lntau), sigma)\n res = minimize(nlnp, lntau0, args=(sigma,), method='Nelder-Mead')\n tau = np.exp(res.x.squeeze())\n return tau, sigma",
"def fit_exp_decay(x, y):\n def _func(z, z0):\n return np.exp(-z/z0)\n popt, pcov = curve_fit(_func, x, y)\n return popt[0]",
"def estimate_tau(t, y):\n dt = np.min(np.diff(t))\n tt = np.arange(t.min(), t.max(), dt)\n yy = np.interp(tt, t, y, 1)\n f = acor_fn(yy)\n fs = gaussian_filter(f, 50)\n w = dt * np.arange(len(f))\n m = np.arange(1, len(fs)-1)[(fs[1:-1] > fs[2:]) & (fs[1:-1] > fs[:-2])]\n if len(m):\n return w[m[np.argmax(fs[m])]]\n return w[-1]",
"def estimate_tau(t, y):\n dt = np.min(np.diff(t))\n tt = np.arange(t.min(), t.max(), dt)\n yy = np.interp(tt, t, y, 1)\n f = acor_fn(yy)\n fs = gaussian_filter(f, 50)\n w = dt * np.arange(len(f))\n m = np.arange(1, len(fs)-1)[(fs[1:-1] > fs[2:]) & (fs[1:-1] > fs[:-2])]\n if len(m):\n return w[m[np.argmax(fs[m])]]\n return w[-1]",
"def time_function(t):\n\n omega = np.pi\n return np.sin(omega * t) + np.sin(10 * omega * t) + np.sin(20 * omega * t)",
"def time_based(t, eta_init, last_eta, d = 0.01):\n return last_eta/(1+d*t)",
"def exponential(t, eta_init, last_eta, d = 0.01):\n return eta_init*np.exp(-d*t)",
"def time(self, t, s=1.0, complete=True):\n w = self.w0\n\n x = t / s\n\n output = np.exp(1j * w * x)\n\n if complete:\n output -= np.exp(-0.5 * (w ** 2))\n\n output *= np.exp(-0.5 * (x ** 2)) * np.pi ** (-0.25)\n\n return output",
"def explorentzian(mu, wid, timeconstant, x): \n g = lorentzian( mu, wid, x )\n \n hly = np.round( len(g) / 2.0 )\n ey = np.r_[np.zeros(hly),g,np.zeros(hly)]\n fy = np.fft.fft(ey)\n a = np.exp(-(np.arange(len(fy))) / timeconstant )\n fa = np.fft.fft(a)\n fy1 = fy * fa\n ybz = np.real(np.fft.ifft(fy1)) / np.sum(a)\n yb = ybz[hly:len(ybz)-hly]\n \n return yb",
"def decay(time_, max_time, coeff):\n threshold = max_time - time_\n if threshold < 0:\n threshold = 0\n return 1 + threshold * coeff / max_time",
"def delayed_exp_sfh(time_bins, tau, T0, factor=1):\n sfh = [(t-T0)*np.exp(-(t - T0)/tau) if t > T0 else 0 for t in time_bins]\n return np.array(sfh)*factor",
"def exp_smooth(trace, time_constant = 2.6):\n alpha_test = (1 - np.exp(-1/ time_constant)) # exponential decay time constant from Migault et al., 2018, biorxiv paper supp. methods (vestibular)\n k = lambda tau: alpha_test*(1-alpha_test)**tau\n k_len = len(trace)//10\n kernel = np.hstack((np.zeros(k_len), k(np.arange(k_len))))\n conv_trace = np.convolve(trace, kernel, mode='same') / np.sum(kernel)\n return conv_trace",
"def tau_model(y, t, io, tau, p_nom):\n dydt = (p_nom * io - y) / tau\n return dydt",
"def calc_K(tau, delta_t, var_n):\n var_f = 1. - var_n\n rval = var_f * np.exp(-(delta_t)**2 / (2. * tau**2))\n if delta_t == 0:\n rval += var_n\n return rval",
"def next(self, dt):\n self.x = self.x + \\\n (self.rate-0.5*self.vola*self.vola)*dt + \\\n sqrt(dt)*self.vola*np.random.normal()\n return exp(self.x)",
"def exp_schedule(k=20, lam=0.005, limit=100):\n return lambda t: (k * math.exp(-lam * t) if t < limit else 0)",
"def runge_kutta(func, x0, time):\n dt = time[1] - time[0]\n x = np.array(x0)\n val = []\n\n for t in time:\n val.append(x)\n\n k1 = np.array([f(t, x) for f in func])\n k2 = np.array([f(t+dt/2, x+dt*k1/2) for f in func])\n k3 = np.array([f(t+dt/2, x+dt*k2/2) for f in func])\n k4 = np.array([f(t+dt, x+dt*k3) for f in func])\n\n x = x + dt*(k1 + 2*k2 + 2*k3 + k4)/6\n\n return val",
"def expgaussian(mu, wid, timeconstant, x): \n # Gaussian signal broadened by an exponetial signal\n g = gaussian(mu, wid, x)\n \n hly = np.round( len(g) / 2.0 )\n ey = np.r_[np.zeros(hly),g,np.zeros(hly)]\n fy = np.fft.fft(ey)\n a = np.exp(-(np.arange(len(fy))) / timeconstant )\n fa = np.fft.fft(a)\n fy1 = fy * fa\n ybz = np.real(np.fft.ifft(fy1)) / np.sum(a)\n yb = ybz[hly:len(ybz)-hly]\n \n return yb",
"def test_call_function_ExponentialDecay():\n a = 0.4 # Decay constant\n u0 = 3.2 # Function value u(t) for some known time t\n der_u = -1.28 # Analytic value for the derivative of u at the known time t\n eps = 10**(-7)# Since we are dealing with floating point numbers,\n # we need a limit when checking that a difference is zero.\n decay_model = ExponentialDecay(a)\n assert(abs(decay_model(0, u0)-der_u) < eps)",
"def expdiff(x, a=a, n=5):\n return a**n * np.exp(a*x)",
"def xfunc(x, ts, eta):\n return x**2 - (1.0 - np.exp(-2*ts*x)) - eta",
"def singleexp(params, t):\n # 2011-05-18 20:58 IJMC: Created\n # 2011-06-03 11:49 IJMC: Normalized to unity.\n\n if len(params)==2:\n return 1. - params[0] * exp(-t/params[1]) \n else:\n return params[2] * (1. - params[0] * exp(-t/params[1]) )"
]
| [
"0.7259245",
"0.6025949",
"0.59778154",
"0.59671766",
"0.59623075",
"0.5949129",
"0.5861898",
"0.5844591",
"0.58408463",
"0.58236575",
"0.58123475",
"0.58123475",
"0.57859",
"0.5768751",
"0.5766582",
"0.57458115",
"0.57265747",
"0.56759924",
"0.56325513",
"0.5629962",
"0.5628227",
"0.55960673",
"0.55725986",
"0.55286056",
"0.552663",
"0.5525436",
"0.5476592",
"0.5413668",
"0.5397784",
"0.53929144"
]
| 0.69503814 | 1 |
Computes time constant using abf object and epoch index. | def time_constant_abf(abf, epoch_start):
p0 = abf.sweepEpochs.p1s[epoch_start]
p1 = abf.sweepEpochs.p1s[epoch_start + 1]
t = abf.sweepX[p0:p1] - abf.sweepX[p0]
V = abf.sweepY[p0:p1]
return time_constant(t, V) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def epoch():\n\treturn time.time()",
"def scheduler(epoch):\n return alpha / (1 + decay_rate * epoch)",
"def schedule(epoch):\n return alpha / (1 + (decay_rate * epoch))",
"def to_timestamp(index, base_dt, fps=4):\n ts = time.mktime(base_dt.timetuple())\n return ts + index * (1.0 / fps)",
"def epochnow():\n return time.time()",
"def epoch_time_now():\n return int(time.time())",
"def time_based(t, eta_init, last_eta, d = 0.01):\n return last_eta/(1+d*t)",
"def get_mc_livetime(n_gen, γ=-1.6, f_crab=3.39*10**-11, r_max=35000, e_min=0.01, e_max=30):\n return n_gen*(γ+1) / (f_crab * r_max**2 * np.pi * (e_max**(γ+1) - e_min**(γ+1)))",
"def index_in_epoch(self):\n return self._index_in_epoch",
"def get_frame_time(self, f):\n return f * self.get_frame_duration()",
"def _get_time(self, state: State) -> int:\n benchmark_time = {\n 'resnet': state.timestamp.epoch.value,\n 'bert': state.timestamp.sample.value,\n }\n return benchmark_time[self.benchmark]",
"def _get_delta_time(r0):\n\n s1 = random() # To pick time\n epsilon = 0.001 # To avoid division by zero\n lam = (1 / (r0 + epsilon))\n return lam * pow(e, -lam * s1)",
"def epoch():\n return datetime2epoch(datetime.now())",
"def EpochNano():\n return int(time.time() * 1000000000)",
"def calculate_time(ix, xi, wf):\n wf_len = len(wf)\n x_time = np.arange(ix, (ix + wf_len * xi), xi)\n return x_time",
"def elapsed_micros(start: int, /) -> int:",
"def lookback_time(self, z):\n\n # Calculate the integrand.\n def f(z1):\n return 1.0 / (self.H(z1) * (1 + z1))\n\n return _intf_0_z(f, z) / self._unit_time",
"def __call__(self, epoch):\n decay = (1 - (epoch / float(self.maxEpochs))) ** self.power\n alpha = self.initAlpha * decay\n \n # return alpha\n return float(alpha)",
"def _get_current_epoch_time() -> float:\n return time.time()",
"def spike_latency_abf(abf, epochstart):\n p0 = abf.sweepEpochs.p1s[epochstart]\n t = abf.sweepX[p0:-1]\n V = abf.sweepY[p0:-1]\n I = abf.sweepC[p0:-1]\n return spike_latency(t, I, V)",
"def __init__(self, time_constant: float, sampling_time: float):\n self.alpha = sampling_time / (time_constant + sampling_time)\n self.state = None",
"def update_epoch(self):\n raise NotImplementedError",
"def linear_decay(epoch: int, total_num_updates: int) -> float:\n return 1 - (epoch / float(total_num_updates))",
"def epoch_time(when):\n if not when: return 0\n epoch = datetime.utcfromtimestamp(0)\n delta = when - epoch\n return int(delta.total_seconds())",
"def lr_schedule(epoch):\n lr = 1e-3\n return lr",
"def calculate_timestamp(self):\n return ((self.calculate_record_number() - 1) * SAMPLE_RATE) + \\\n self.time_on",
"def mscb(t):\n\treturn int(np.log2(t ^ (t + 1)))",
"def __call__(self, epoch):\n exp = np.floor((1 + epoch) / self.dropEvery)\n alpha = initAlpha * (self.factor ** exp)\n \n # return alpha \n return float(alpha)",
"def scheduler(epoch_idx, lr):\n new_lr = lr\n if (epoch_idx == 60 or epoch_idx == 120 or epoch_idx == 160\n or epoch_idx == 260 or epoch_idx == 320 or epoch_idx == 360):\n new_lr *= 0.2\n \"\"\"\n if epoch_idx == 200:\n new_lr = 0.1\n \"\"\"\n return new_lr",
"def evaluate(self, time) -> float:\n ..."
]
| [
"0.60388803",
"0.5923182",
"0.5861852",
"0.5851152",
"0.5816836",
"0.5719287",
"0.56861186",
"0.56355155",
"0.5511518",
"0.5490061",
"0.54632735",
"0.5410102",
"0.539538",
"0.53734636",
"0.53531617",
"0.53530735",
"0.53180134",
"0.5299205",
"0.5292408",
"0.52918327",
"0.5264437",
"0.5255838",
"0.52406085",
"0.5230468",
"0.5218932",
"0.52175885",
"0.5209322",
"0.51970816",
"0.5174498",
"0.51702595"
]
| 0.7036398 | 0 |
Computes input membrane resistance Rm using current trace I and voltage trace V. | def input_membrane_resistance(I, V):
V1 = V[0]
V2 = V[-1]
I1 = I[0]
I2 = I[-1]
dV = V2 - V1
dI = I2 - I1
return dV / dI | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def scoreCirc_VoltageReference(circuit, gen, indi, makeRedundancyInMatrix):\n #----------#\n VREF = 1.5\n #----------#\n \n FullBigCircuitMatrix = deepcopy(circuit.fullRedundancyMatrix)\n rowsR,columnsR,columnsC,rowsC = sortedNonZeroIndices(FullBigCircuitMatrix)\n\n matrixDensity = float(len(rowsR))/float((BigMatrixSize*BigMatrixSize/2))\t#(ones/(all/2))\n matrixQuaziID = sum(rowsR)+sum(columnsR)-BigMatrixSize*(BigMatrixSize-1)\n OcSc, IcNc, SelfConnElm = checkConnsConnected(FullBigCircuitMatrix) #Outer connections Short cut, Inner connections Not connected\n \n results = None\n badSweep = 0\n if OcSc > 1:\n score = 1e4*np.exp(OcSc)\n else:\n makeNetlist(circuit, gen, indi, FullBigCircuitMatrix)\n results = runme2.evaluateVoltageRef(gen, indi)\n disfCount = 0\n \n vdd_sweep = np.array(results['vout_vdd']['nominal'], dtype=float) #This line changes Nones to np.nans\n vdd_sweep_scale = np.array(results['vout_vdd_scale']['nominal'], dtype=float)\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep)):\n disfCount = disfCount + 1\n vdd_s = 0\n vdd_s_d = 0\n #print \"tukej!\", vdd_sweep_scale\n else:\n x = np.median(vdd_sweep)\n vdd_s = abs(x - VREF) #if x > VREF else 0\n vdd_s_d = np.max(vdd_sweep) - np.min(vdd_sweep)\n #if sweep did not finish completely - add to score\n #check last scale value in runme2!!\n #print \"tukiii\", vdd_sweep_scale\n if (vdd_sweep_scale[-1]<20): #20V\n\tbadSweep = badSweep + 1\n \n rload_sweep = np.array(results['vout_rload']['nominal'], dtype=float)\n rload_sweep_scale = np.array(results['vout_rload_scale']['nominal'], dtype=float)\n # if measurement is empty\n if np.any(np.isnan(rload_sweep)):\n disfCount = disfCount + 1\n rload_s = 0\n rload_s_d = 0\n else:\n x = np.median(rload_sweep)\n rload_s = abs(x - VREF) #if x > VREF else 0\n rload_s_d = np.max(rload_sweep) - np.min(rload_sweep)\n #if sweep did not finish completely - add to score\n #check last scale value in runme2!!\n if (rload_sweep_scale[-1]<100e3): #100kOhm\n\tbadSweep = badSweep + 1\n \n temp_sweep = np.array(results['vout_temp']['nominal'], dtype=float)\n temp_sweep_scale = np.array(results['vout_temp_scale']['nominal'], dtype=float)\n # if measurement is empty OR sweep did not finish completely - check last scale value in runme2!!\n if np.any(np.isnan(temp_sweep)):\n disfCount = disfCount + 1\n temp_s = 0\n temp_s_d = 0\n else:\n x = np.median(temp_sweep)\n temp_s = abs(x - VREF) #if x > VREF else 0\n temp_s_d = np.max(temp_sweep) - np.min(temp_sweep)\n if (temp_sweep_scale[-1]<120): #120 deg celsius\n\tbadSweep = badSweep + 1\n \n power = results['power']['nominal']\n if np.isnan(np.array(power, dtype=float)):\n disfCount = disfCount + 1\n powe = 0\n else:\n powe = power\n \n #---COST FUNCTION DEFINITION---#\n score = (vdd_s) + (vdd_s_d) + 5*(rload_s) + 5*(rload_s_d) + (100*temp_s) + (100*temp_s_d) + (100*powe) + badSweep*100\n\n #print disfCount\n if disfCount > 0:\n score = np.exp(disfCount) * 1e3\n if np.isnan(score):\n score = 2e4\n score = score + (IcNc+1) #add small punishment if not all nodes connected\n\n #print \"\\t\\t\\t\\t\\tG_\" + str(gen) + \"_I_\" + str(indi) + \" SCORE:\", score\n #print vdd_s, vdd_s_d, rload_s, rload_s_d, temp_s, temp_s_d, powe\n #print vdd_s, vdd_s_d, rload_s, rload_s_d, 100*temp_s, 100*temp_s_d, 100*powe\n \n filename = \"g_\" + str(gen) + \"_i_\" + str(indi) + \"_subckt.cir\"\n os.remove(filename) #cleanup current subcircuit\n\n return score, matrixDensity, matrixQuaziID, results",
"def scoreCirc_CmosVoltageReference(circuit, gen, indi, makeRedundancyInMatrix): #TODO 6.9.2016 napisi cost function ki se sklada z evaluateCmosVoltageRef\n #----------#\n VREF = 1.5\n #----------#\n \n FullBigCircuitMatrix = deepcopy(circuit.fullRedundancyMatrix)\n rowsR,columnsR,columnsC,rowsC = sortedNonZeroIndices(FullBigCircuitMatrix)\n\n matrixDensity = float(len(rowsR))/float((BigMatrixSize*BigMatrixSize/2))\t#(ones/(all/2))\n matrixQuaziID = sum(rowsR)+sum(columnsR)-BigMatrixSize*(BigMatrixSize-1)\n OcSc, IcNc, SelfConnElm = checkConnsConnected(FullBigCircuitMatrix) #Outer connections Short cut, Inner connections Not connected\n \n results = None\n if OcSc > 1:\n score = 1e4*np.exp(OcSc)\n else:\n makeNetlist(circuit, gen, indi, FullBigCircuitMatrix)\n results = runme2.evaluateCmosVoltageRef(gen, indi)\n disfCount = 0\n \n \n #Vdd sweeps on 3 temperatures - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # -20 deg\n vdd_sweep_scale = np.array(results['vout_vdd_scale']['nominal'], dtype=float)\n vdd_sweep_t1 = np.array(results['vout_vdd_temp1']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_t1)):\n disfCount = disfCount + 1\n vdd_s_t1 = 0\n vdd_s_t1_d = 0\n else:\n x = np.median(vdd_sweep_t1)\n vdd_s_t1 = abs(x - VREF) #if x > VREF else 0\n vdd_s_t1_d = np.max(vdd_sweep_t1) - np.min(vdd_sweep_t1)\n \n \n # 25 deg\n vdd_sweep_scale = np.array(results['vout_vdd_scale']['nominal'], dtype=float)\n vdd_sweep_t2 = np.array(results['vout_vdd_temp2']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_t2)):\n disfCount = disfCount + 1\n vdd_s_t2 = 0\n vdd_s_t2_d = 0\n else:\n x = np.median(vdd_sweep_t2)\n vdd_s_t2 = abs(x - VREF) #if x > VREF else 0\n vdd_s_t2_d = np.max(vdd_sweep_t2) - np.min(vdd_sweep_t2) \n \n # 120 deg\n vdd_sweep_scale = np.array(results['vout_vdd_scale']['nominal'], dtype=float)\n vdd_sweep_t3 = np.array(results['vout_vdd_temp3']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_t3)):\n disfCount = disfCount + 1\n vdd_s_t3 = 0\n vdd_s_t3_d = 0\n else:\n x = np.median(vdd_sweep_t3)\n vdd_s_t3 = abs(x - VREF) #if x > VREF else 0\n vdd_s_t3_d = np.max(vdd_sweep_t3) - np.min(vdd_sweep_t3) \n \n #Vdd sweeps on 3 loads - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # 10e6 Ohm\n vdd_sweep_scale = np.array(results['vout_vdd_res_scale']['nominal'], dtype=float)\n vdd_sweep_r1 = np.array(results['vout_vdd_res1']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_r1)):\n disfCount = disfCount + 1\n vdd_s_r1 = 0\n vdd_s_r1_d = 0\n else:\n x = np.median(vdd_sweep_r1)\n vdd_s_r1 = abs(x - VREF) #if x > VREF else 0\n vdd_s_r1_d = np.max(vdd_sweep_r1) - np.min(vdd_sweep_r1)\n \n # 10e4 Ohm\n vdd_sweep_scale = np.array(results['vout_vdd_res_scale']['nominal'], dtype=float)\n vdd_sweep_r2 = np.array(results['vout_vdd_res2']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_r2)):\n disfCount = disfCount + 1\n vdd_s_r2 = 0\n vdd_s_r2_d = 0\n else:\n x = np.median(vdd_sweep_r2)\n vdd_s_r2 = abs(x - VREF) #if x > VREF else 0\n vdd_s_r2_d = np.max(vdd_sweep_r2) - np.min(vdd_sweep_r2) \n \n # 10e2 Ohm\n vdd_sweep_scale = np.array(results['vout_vdd_res_scale']['nominal'], dtype=float)\n vdd_sweep_r3 = np.array(results['vout_vdd_res3']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_r3)):\n disfCount = disfCount + 1\n vdd_s_r3 = 0\n vdd_s_r3_d = 0\n else:\n x = np.median(vdd_sweep_r3)\n vdd_s_r3 = abs(x - VREF) #if x > VREF else 0\n vdd_s_r3_d = np.max(vdd_sweep_r3) - np.min(vdd_sweep_r3) \n \n power = results['power']['nominal']\n if np.isnan(np.array(power, dtype=float)):\n disfCount = disfCount + 1\n powe = 0\n else:\n powe = power\n \n #---COST FUNCTION DEFINITION---#\n score = vdd_s_t1 + vdd_s_t1_d + \\\n\t vdd_s_t2 + vdd_s_t2_d + \\\n\t vdd_s_t3 + vdd_s_t3_d + \\\n\t vdd_s_r1 + vdd_s_r1_d + \\\n\t vdd_s_r2 + vdd_s_r2_d + \\\n\t vdd_s_r3 + vdd_s_r3_d + \\\n\t (100*powe)\n\n #print disfCount\n if disfCount > 0:\n score = np.exp(disfCount) * 1e3\n if np.isnan(score):\n score = 2e4\n score = score + (IcNc+1) #add small punishment if not all nodes connected\n\n #print \"\\t\\t\\t\\t\\tG_\" + str(gen) + \"_I_\" + str(indi) + \" SCORE:\", score\n \n filename = \"g_\" + str(gen) + \"_i_\" + str(indi) + \"_subckt.cir\"\n os.remove(filename) #cleanup current subcircuit\n\n return score, matrixDensity, matrixQuaziID, results",
"def test_jam_axi_rms():\n np.random.seed(123)\n xbin, ybin = np.random.uniform(low=[-55, -40], high=[55, 40], size=[1000, 2]).T\n\n inc = 60. # Assumed galaxy inclination\n r = np.sqrt(xbin**2 + (ybin/np.cos(np.radians(inc)))**2) # Radius in the plane of the disk\n a = 40 # Scale length in arcsec\n vr = 2000*np.sqrt(r)/(r+a) # Assumed velocity profile\n vel = vr * np.sin(np.radians(inc))*xbin/r # Projected velocity field\n sig = 8700/(r+a) # Assumed velocity dispersion profile\n rms = np.sqrt(vel**2 + sig**2) # Vrms field in km/s\n\n surf = np.array([39483., 37158., 30646., 17759., 5955.1, 1203.5, 174.36, 21.105, 2.3599, 0.25493])\n sigma = np.array([0.153, 0.515, 1.58, 4.22, 10, 22.4, 48.8, 105, 227, 525])\n qObs = np.full_like(sigma, 0.57)\n\n distance = 16.5 # Assume Virgo distance in Mpc (Mei et al. 2007)\n mbh = 1e8 # Black hole mass in solar masses\n beta = np.full_like(surf, 0.3)\n\n surf_lum = surf # Assume self-consistency\n sigma_lum = sigma\n qobs_lum = qObs\n surf_pot = surf\n sigma_pot = sigma\n qobs_pot = qObs\n\n sigmapsf = 0.6\n pixsize = 0.8\n goodbins = r > 10 # Arbitrarily exclude the center to illustrate how to use goodbins\n\n # The model is similar but not identical to the adopted kinematics!\n rmsModel, ml, chi2, flux = jam_axi_rms(\n surf_lum, sigma_lum, qobs_lum, surf_pot, sigma_pot, qobs_pot,\n inc, mbh, distance, xbin, ybin, plot=True, rms=rms, sigmapsf=sigmapsf,\n beta=beta, pixsize=pixsize, tensor='zz', goodbins=goodbins)\n plt.pause(0.01)",
"def _compute_linear_magnitude_term(index, M):\r\n if M <= c1:\r\n # this is the second term in eq. (2a), p. 20\r\n return a2 * (M - c1)\r\n else:\r\n # this is the second term in eq. (2b), p. 20\r\n return a7 * (M - c1)",
"def _compute_linear_magnitude_term(index, M):\r\n if M <= c1:\r\n # this is the second term in eq. (2a), p. 20\r\n return a2 * (M - c1)\r\n else:\r\n # this is the second term in eq. (2b), p. 20\r\n return a7 * (M - c1)",
"def mt(P_1,V0_1,meanF_1,rho): \n psi = np.arctan2(V0_1[2],-V0_1[0])\n \n # Find swept ares\n idx_zmax = np.argmax(P_1[:,-1,2])\n idx_ymax = np.argmax(P_1[:,-1,1])\n idx_zmin = np.argmin(P_1[:,-1,2])\n \n Ad = np.linalg.norm(P_1[idx_zmax,-1,2]-P_1[idx_zmin,-1,2])*P_1[idx_ymax,-1,1]\n print P_1[idx_zmax,-1,2]\n V0 = np.linalg.norm(V0_1)\n \n Vi_1new = np.zeros_like(V0_1,dtype=float)\n\n while True:\n Vi_1 = Vi_1new\n \n Vi_1new[0] = meanF_1[0] / (2 * rho * Ad * np.sqrt( (V0*np.cos(psi)+Vi_1[0])**2 + (-V0*np.sin(psi)+Vi_1[2])**2 )) \n Vi_1new[2] = meanF_1[2] / (2 * rho * Ad * np.sqrt( (V0*np.cos(psi)+Vi_1[0])**2 + (-V0*np.sin(psi)+Vi_1[2])**2 )) \n \n if np.linalg.norm(Vi_1-Vi_1new) < 0.001:\n break\n\n return -Vi_1",
"def mass_flow_func(self):\n residual = []\n for i in range(self.num_i):\n residual += [self.inl[i].m.val_SI - self.outl[i].m.val_SI]\n return residual",
"def mass_flow_func(self):\n residual = []\n for i in range(self.num_i):\n residual += [self.inl[i].m.val_SI - self.outl[i].m.val_SI]\n return residual",
"def modelFattiRpRs(interface,vpvs=0.5):\n interface[:,8]=vpvs*(interface[:,6]-interface[:,7])",
"def inductorcharge(t, Vs, R, L):\n Vl = Vs * _np.exp(-R * t / L)\n Il = Vs / R * (1 - _np.exp(-R * t / L))\n return Vl, Il",
"def scoreCirc_CmosVoltageReference_2(circuit, gen, indi, MOEAMODE):\n \n if debug > 2:\n print \"\\t\\tG_\" + str(gen) + \"_I_\" + str(indi)\n #----------#\n VREF = 1.5\n #----------#\n\n #---------------------------------------------------------BigMatrix stuff, check short-circuits, matrix density, matrix identifier (obsolete) \n FullBigCircuitMatrix = copy(circuit.fullRedundancyMatrix)\n OcSc, IcNc, SelfConnElm = checkConnsConnected(FullBigCircuitMatrix) #Outer connections Short cut, Inner connections Not connected\n #--------------------------------------------------------- \n \n score = np.array([0,0,0], dtype=\"float64\") if MOEAMODE == 1 else 0\n \n score += 2e4*np.exp(OcSc)\n results = None\n if OcSc > 1:\n score += 1e4*np.exp(OcSc)\n else:\n #----------------------------------------------------------Try to make netlist and evaluate the individual\n makeNetlist(circuit, gen, indi, FullBigCircuitMatrix)\n results = runme2.evaluateCmosVoltageRef(gen, indi)\n #----------------------------------------------------------Start of results analysis and objectives creation\n disfCount = 0\n \n #Vdd sweeps on 3 temperatures - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # -20 deg\n vdd_sweep_scale = np.array(results['vout_vdd_scale']['nominal'], dtype=float)\n vdd_sweep_t1 = np.array(results['vout_vdd_temp1']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_t1)):\n disfCount = disfCount + 1\n vdd_s_t1 = 0\n vdd_s_t1_d = 0\n else:\n x = np.median(vdd_sweep_t1)\n vdd_s_t1 = abs(x - VREF) #if x > VREF else 0\n vdd_s_t1_d = np.max(vdd_sweep_t1) - np.min(vdd_sweep_t1)\n \n \n # 25 deg\n vdd_sweep_scale = np.array(results['vout_vdd_scale']['nominal'], dtype=float)\n vdd_sweep_t2 = np.array(results['vout_vdd_temp2']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_t2)):\n disfCount = disfCount + 1\n vdd_s_t2 = 0\n vdd_s_t2_d = 0\n else:\n x = np.median(vdd_sweep_t2)\n vdd_s_t2 = abs(x - VREF) #if x > VREF else 0\n vdd_s_t2_d = np.max(vdd_sweep_t2) - np.min(vdd_sweep_t2) \n \n # 120 deg\n vdd_sweep_scale = np.array(results['vout_vdd_scale']['nominal'], dtype=float)\n vdd_sweep_t3 = np.array(results['vout_vdd_temp3']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_t3)):\n disfCount = disfCount + 1\n vdd_s_t3 = 0\n vdd_s_t3_d = 0\n else:\n x = np.median(vdd_sweep_t3)\n vdd_s_t3 = abs(x - VREF) #if x > VREF else 0\n vdd_s_t3_d = np.max(vdd_sweep_t3) - np.min(vdd_sweep_t3) \n \n #Vdd sweeps on 3 loads - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # 10e6 Ohm\n vdd_sweep_scale = np.array(results['vout_vdd_res_scale']['nominal'], dtype=float)\n vdd_sweep_r1 = np.array(results['vout_vdd_res1']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_r1)):\n disfCount = disfCount + 1\n vdd_s_r1 = 0\n vdd_s_r1_d = 0\n else:\n x = np.median(vdd_sweep_r1)\n vdd_s_r1 = abs(x - VREF) #if x > VREF else 0\n vdd_s_r1_d = np.max(vdd_sweep_r1) - np.min(vdd_sweep_r1)\n \n # 10e4 Ohm\n vdd_sweep_scale = np.array(results['vout_vdd_res_scale']['nominal'], dtype=float)\n vdd_sweep_r2 = np.array(results['vout_vdd_res2']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_r2)):\n disfCount = disfCount + 1\n vdd_s_r2 = 0\n vdd_s_r2_d = 0\n else:\n x = np.median(vdd_sweep_r2)\n vdd_s_r2 = abs(x - VREF) #if x > VREF else 0\n vdd_s_r2_d = np.max(vdd_sweep_r2) - np.min(vdd_sweep_r2) \n \n # 10e2 Ohm\n vdd_sweep_scale = np.array(results['vout_vdd_res_scale']['nominal'], dtype=float)\n vdd_sweep_r3 = np.array(results['vout_vdd_res3']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_r3)):\n disfCount = disfCount + 1\n vdd_s_r3 = 0\n vdd_s_r3_d = 0\n else:\n x = np.median(vdd_sweep_r3)\n vdd_s_r3 = abs(x - VREF) #if x > VREF else 0\n vdd_s_r3_d = np.max(vdd_sweep_r3) - np.min(vdd_sweep_r3) \n \n power = results['power']['nominal']\n if np.isnan(np.array(power, dtype=float)):\n disfCount = disfCount + 1\n powe = 0\n else:\n powe = power\n \n psrr = results['psrr']['nominal']\n# if np.isnan(np.array(psrr, dtype=float)):\n# disfCount = disfCount + 1\n# psr = 0\n# else:\n# psr = 1.0/psrr #abs(90 - psrr) if psrr < 90 else 0 #tole kot objective ni ok. ker je opravljena meritev samo pri vdd=15 je to precej stala.\n\n\n #----------------------------------------------------------Score function SINGLE-OBJECTIVE\n if MOEAMODE == 0:\n score =(vdd_s_t1 + 5*vdd_s_t1_d +\n\t 2*vdd_s_t2 + 2*vdd_s_t2_d +\n\t vdd_s_t3 + 5*vdd_s_t3_d +\n\t #vdd_s_r1 + 2*vdd_s_r1_d +\n\t #vdd_s_r2 + 2*vdd_s_r2_d + \n\t #vdd_s_r3 + 2*vdd_s_r3_d + \n\t (100*powe)\n )\n if disfCount > 0:\n\tscore = 0 + np.exp(disfCount) * 1e3\n\t\n #----------------------------------------------------------Score function MULTI-OBJECTIVE\t\n else: #MOEAMODE == 1:\n oMediana = vdd_s_t1 + vdd_s_t2 + vdd_s_t3\n oPsrr = vdd_s_t1_d + vdd_s_t2_d + vdd_s_t3_d\t#DC rejection\n #oPsrr = psr\n oP = powe\n\t\t\t\t\t #add constraints\n score = (np.array([oMediana, oPsrr, oP]) \t+ (oMediana if oMediana > 4 else 0) + \n\t\t\t\t\t\t#+ (oPsrr*1000 if oPsrr > 1.0/40 else 0) +\n\t\t\t\t\t\t+ (oPsrr if oPsrr > 3 else 0) +\n\t\t\t\t\t\t+ (oP if oP > 1e-1 else 0)\n )\n if disfCount > 0:\n\tscore = (np.array([0,0,0])+np.exp(disfCount) * 1e3) + random.randint(0, 200)\n\n #-------------------------------------------------------------------\n if debug > 2: \n print \"\\t\\tG_\" + str(gen) + \"_I_\" + str(indi) + \" SCORE:\", score\n\n filename = \"g_\" + str(gen) + \"_i_\" + str(indi) + \"_subckt.cir\"\n os.remove(filename) #cleanup current subcircuit\n \n \n # TRIGGER STOP SIGNAL if:\n if (vdd_s_t2 <= 0.001 and \n\tpsrr >= 80 and \n\tpowe <= 1e-5):\n globalVars.DONE = 1 # End evolution, feasible solution evolved.\n \n\n return score, results",
"def vel(self,M):\n v_peri = np.sqrt((HohmannTransfer.G*M)*((2/self.r1)-(2/(self.r1+self.r2))))\n \n v_aphe = np.sqrt((HohmannTransfer.G*M)*((2/self.r2)-(2/(self.r1+self.r2))))\n \n return v_peri, v_aphe",
"def calc_output(line, react_cap=None, gen_res_high=225, gen_res_low=50):\n # unpack\n t, v, i = line\n t_diff = t[1] - t[0]\n # assert t_diff == 1e-9 # time scale should be 1ns.\n # values based on current measurment. Assuming voltage waveform is aligned.\n\n # validation on the real maxima/minima of current\n assert i.argmax() < i.argmin(), 'Current valley before peak, signal is inverted!'\n\n v_min = min(v)\n v_max = max(v)\n v_max_time = np.where(v == v_max)[0][0] # first value where voltage has maximum\n # v_min_time = np.where(v == v_min)[0][-1] # last value where voltage has minimum\n # assert v_max_time < v_min_time, 'Voltage valley before peak, signal inverted!'\n c_peak_time = i[0:v_max_time].argmax() # current peak is before voltage maximum\n c_max = i[c_peak_time]\n\n c_valley_time = i.argmin()\n c_min = min(i)\n assert i[c_valley_time] == c_min\n\n # some validation\n assert c_peak_time < c_valley_time, 'Current valley before peak, signal is inverted!'\n assert MAX_VOLTAGE_MIN <= v_max < MAX_VOLTAGE_MAX, 'Max voltage error (%r)' % v_max\n assert MAX_CURRENT_MIN <= c_max < MAX_CURRENT_MAX, 'Max current error (%r)' % c_max\n\n # Find the settling time of the current. Than use the time where the current is stable\n # to calculate the final pulse voltage. This pulse final voltage is then used to calculate\n # the settling time and risetime of the voltage.\n\n # all parts of current inside 10% of maximum, till end of pulse\n i_time_settling_options = [abs(x) < 0.1 * c_max for x in i[0:c_valley_time]]\n ranges = count_ranges(i_time_settling_options)\n range_before, range_pulse = find_longest_ranges(ranges, 2) # [end, length]\n end_pulse = range_pulse[0]\n i_time_settling = range_pulse[0] - range_pulse[1]\n # average of voltage during pulse when current is < 5% of max current\n v_pulse = np.mean(v[i_time_settling:end_pulse])\n # all parts of current inside 10% of maximum, till end of pulse\n v_time_settling_options = [abs(x - v_pulse) < (0.1 * v_pulse) for x in v]\n ranges = count_ranges(v_time_settling_options)\n if ranges == []: # if too much oscillations, a range cannot be found. Increase the bounds:\n # all parts of current inside 10% of maximum, till end of pulse\n v_time_settling_options = [abs(x - v_pulse) < (0.3 * v_pulse) for x in v]\n ranges = count_ranges(v_time_settling_options)\n print('Warning, voltage settling options increased from 10% to 30%!')\n assert ranges != [], \"Error! Line is too unstable.\"\n pulse = find_longest_ranges(ranges, 1) # pulse=[end,length] of voltage pulse stable\n settling_end = pulse[0] - pulse[1] # voltage pulse stable start\n # recalculate pulse voltage\n v_pulse_new = np.mean(v[settling_end:pulse[0]])\n if v_pulse > 13e3: # pulses for highest voltages have to be stable. Lower voltages are always less stable.\n assert abs(v_pulse-v_pulse_new)/v_pulse_new < 0.01, 'Pulse voltage unstable.'\n t_settling_end = t[settling_end] # voltage pulse stable start time\n v05 = 0.05 * v_pulse\n settling_start = np.where(v > v05)[0][0]\n t_settling_start = t[settling_start] # when v first rises above 0.05 of final\n t_settling = t_settling_end - t_settling_start\n v10 = 0.1 * v_pulse\n v90 = 0.9 * v_pulse\n t_rise_start = t[np.where(v > v10)[0][0]]\n t_rise_end = t[np.where(v > v90)[0][0]]\n t_rise = t_rise_end - t_rise_start\n rise_rate = (v90 - v10) / (t_rise)\n v_overshoot = v_max / v_pulse\n pulse_stable = int((settling_end + end_pulse) / 2) # point where the pulse is very stable\n # energy\n p = (v * i) # for this to be correct, make sure lines are aligned in b_correct_lines using offset 'v_div'\n e = integrate.cumtrapz(p, t, initial=0)\n p_rise = p[settling_start:pulse_stable]\n e_rise = e[settling_start:pulse_stable][-1]\n p_res = np.append(i[0:pulse_stable] ** 2 * gen_res_high, i[pulse_stable:] ** 2 * gen_res_low)\n # 1/2*C*V^2 is energy stored in capacitor, which is lost after discharging pulse.\n # e_cap = 1 / 2 * react_cap * v_pulse ** 2\n e_res = integrate.cumtrapz(p_res, t, initial=0)\n e_res_total = e_res[-1]\n e_plasma = e[-1] # energy to plasma is energy in positive pulse except charge on capacitor.\n\n # Correct the time axis to have 0 at the start of the pulse\n start = t[settling_start]\n t = t - start\n\n # all these values are added to the pickle and xlsx with 'output_' prepend in calc_run.py\n data = {\n 't': t,\n 'v': v,\n 'c': i,\n 'c_min': c_min,\n 'c_max': c_max,\n 'v_min': v_min,\n 'v_max': v_max,\n 'v_pulse': v_pulse,\n 't_settling': t_settling,\n 't_rise': t_rise,\n 'rise_rate': rise_rate,\n 'v_overshoot': v_overshoot,\n 'p': p,\n 'e': e,\n 'p_rise': p_rise,\n 'e_rise': e_rise,\n\n 'p_res': p_res,\n 'e_res': e_res,\n 'e_res_total': e_res_total,\n # 'e_cap': e_cap,\n 'e_plasma': e_plasma,\n\n 'start': start,\n 'end': t[end_pulse],\n # 'start_index': settling_start,\n # 'end_index': end_pulse,\n # 'test': i_time_settling\n }\n return data",
"def get_vrms(self, ch: int) -> float:\n cmd = \":measure:vrms? cycle,ac,channel{0}\".format(ch)\n return float(self.query(cmd))",
"def calc_rV(A):\n return np.sqrt(calc_rVsq(A))",
"def REXI(self, T=2.5, h=None, M=None):\n if h:\n M = 1.1*T*self.mu_max/h\n elif M:\n h = 1.1*T*self.mu_max/M\n else:\n raise Exception(\"ERROR: At least ONE of h or M needs to be assigned!\")\n\n alpha, beta = rexi_coefficients.RexiCoefficients(h, M)\n J = alpha.shape[0]\n\n # Initialising variables V, W:\n V = np.zeros((self.N, J), dtype=np.complex128)\n W = np.zeros((self.N, J), dtype=np.complex128)\n\n # Pre-computing terms outside of the loop:\n LHS_pre = T**2 * self.K\n RHS_pre = self.W0*T\n for j in range(J):\n LHS = LHS_pre + scipy.sparse.diags([alpha[j]**2],\n offsets=0,\n shape=[self.N, self.N],\n dtype=np.complex128)\n RHS = alpha[j]*self.V0 - RHS_pre\n L, U = question2.bandedLU(LHS, 1, 1)\n RHS2 = scipy.sparse.linalg.spsolve_triangular(L, RHS, lower=True)\n # Can't use spsolve_triangular again as U isn't completely upper-triangular due to roundoff\n V[:, j] = scipy.sparse.linalg.spsolve(U, RHS2)\n W[:, j] = (self.V0 - alpha[j]*V[:, j])/T\n\n # End loop:\n Uj_mat = np.concatenate((V, W), axis=0)\n U = np.sum(np.multiply(Uj_mat, beta[None, :]), axis=1)\n return U, (h, M)",
"def update_speed_input_step(self,curr_v):\n \n # update speed inputs \n self.speed_inputs_east*=0\n self.speed_inputs_west*=0\n self.speed_inputs_north*=0\n self.speed_inputs_south*=0\n\n if self.use_eight_directions is True: \n self.speed_inputs_north_east*=0\n self.speed_inputs_north_west*=0\n self.speed_inputs_south_east*=0\n self.speed_inputs_south_west*=0\n \n #speed_values=self.rr[:self.N_e,0] \n speed_values=np.ones((self.N_e,1))\n\n if curr_v[0]>0:\n \n # north-east\n if self.use_eight_directions is True and curr_v[1]>0:\n self.speed_inputs_north_east=speed_values \n \n # south-east \n elif self.use_eight_directions is True and curr_v[1]<0:\n self.speed_inputs_south_east=speed_values\n \n #east \n else:\n self.speed_inputs_east=speed_values\n\n\n elif curr_v[0]<0:\n\n # north-west \n if self.use_eight_directions is True and curr_v[1]>0:\n self.speed_inputs_north_west=speed_values\n\n # south-west \n elif self.use_eight_directions is True and curr_v[1]<0:\n self.speed_inputs_south_west=speed_values\n \n # west \n else:\n self.speed_inputs_west=speed_values\n\n else: \n # north\n if curr_v[1]>0:\n self.speed_inputs_north=speed_values\n\n # south\n elif curr_v[1]<0:\n self.speed_inputs_south=speed_values",
"def fRCrim(Swe,Vc1,Vc2,Vc3,Vk,PHIe,Rc1,Rc2,Rc3,Rk,Rw,Rh,Cwv,Ckv,Alpha,Tout):\n#\n# 1. Compute and normalise volumetric components:\n#\t-----------------------------------------------\n\tVw=PHIe*Swe\n\tVh=PHIe*(1-Swe)\n\tVwe=(Vw-Cwv)/(1-Cwv)\n\tVwe=ImposeLimits(Vwe,0,1)\n\tVke=(Vk-Ckv)/(1-Ckv)\n\tVke=ImposeLimits(Vke,0,1)\n\tSum=abs(Vc1)+abs(Vc2)+abs(Vc3)+abs(Vke)+abs(Vwe)+abs(Vh)\n\tVc1=abs(Vc1)/Sum\n\tVc2=abs(Vc2)/Sum\n\tVc3=abs(Vc3)/Sum\n\tVk=abs(Vk)/Sum\n\tVw=abs(Vw)/Sum\n\tVh=abs(Vh)/Sum\n#\n#\t2. Determine conductivity of components:\n#\t----------------------------------------\n\tSigc1=1/Rc1\n\tSigc2=1/Rc2\n\tSigc3=1/Rc3\n\tSigk=1/Rk\n\tSigw=1/Rw\n\tSigh=1/Rh\n#\n#\t3. Compute Conductivity:\n#\t========================\n\tTrm1=Vc1*(Sigc1**(1/Alpha))\n\tTrm2=Vc2*(Sigc2**(1/Alpha))\n\tTrm3=Vc3*(Sigc3**(1/Alpha))\n\tTrm4=(Vk**2.2)*(Sigk**(1/Alpha)) # Factor of 2.2 included to get data to fit to Yang et al\n\tTrm5=Vw*(Sigw**(1/Alpha))\n\tTrm6=Vh*(Sigh**(1/Alpha))\n\tCrf=(Trm1+Trm2+Trm3+Trm4+Trm5+Trm6)**Alpha\n#\n#\n# 4. Output result:\n#\t-----------------\n\tif(Tout==0):\n\t\tFr=Crf\n\telse:\n\t\tFr=1/Crf\n\treturn Fr",
"def set_rm(self, Rm, theta0=None, showInfos=None):\n if theta0 is None: theta0 = self.rc.theta0\n self.rc.Rm = Rm\n self.rc.set_theta0(theta0, showInfos=showInfos) #to refresh positions\n self.rs0 = self.rc.Rs #store Rs for updated Rm/theta0\n self.sb = self.get_sb() #self.rc.Rs is updated in set_theta0\n print('INFO: bender motor at {0:.3f}'.format(self.sb))",
"def delta_v_calc(mass_initial,\n mass_final,\n v_exhaust,\n ):\n\n return v_exhaust * math.log(mass_initial / mass_final)",
"def radiation_measurement_analysis():\n import pint\n ureg = pint.UnitRegistry()\n\n mrem_h = ureg.parse_units('mrem') / ureg.hour\n m = ureg.parse_units('meters')\n s = ureg.parse_units('seconds')\n\n # Measurements of background radiation\n bg_dist = ureg.parse_expression('10 m') # estimate of how far away we are wrt background\n background_rows = [\n dict(vid=1, distance=bg_dist, rad=0.023 * mrem_h, capture_time=0.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.022 * mrem_h, capture_time=0.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.023 * mrem_h, capture_time=4.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.021 * mrem_h, capture_time=5.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.023 * mrem_h, capture_time=11.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.023 * mrem_h, capture_time=16.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.024 * mrem_h, capture_time=20.0 * s),\n ]\n\n # Measurements of sample radiation\n esp_dist = ureg.parse_expression('1 inch').to(m) / 2 # estimate of how far we are from the sample when very close\n dist0_rows = [\n dict(vid=2, distance=esp_dist, rad=0.060 * mrem_h, capture_time=0.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.061 * mrem_h, capture_time=3.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.060 * mrem_h, capture_time=5.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.059 * mrem_h, capture_time=9.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.060 * mrem_h, capture_time=10.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.059 * mrem_h, capture_time=11.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.057 * mrem_h, capture_time=12.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.058 * mrem_h, capture_time=13.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.059 * mrem_h, capture_time=14.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.060 * mrem_h, capture_time=15.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.061 * mrem_h, capture_time=16.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.062 * mrem_h, capture_time=18.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.062 * mrem_h, capture_time=18.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.064 * mrem_h, capture_time=20.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.065 * mrem_h, capture_time=22.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.066 * mrem_h, capture_time=23.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.065 * mrem_h, capture_time=24.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.063 * mrem_h, capture_time=25.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.065 * mrem_h, capture_time=26.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.064 * mrem_h, capture_time=27.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.064 * mrem_h, capture_time=27.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.065 * mrem_h, capture_time=28.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.063 * mrem_h, capture_time=30.0 * s),\n ]\n\n dist0_v2_rows = [\n dict(vid=3, distance=esp_dist, rad=0.012 * mrem_h, capture_time=0.0 * s),\n dict(vid=3, distance=esp_dist, rad=0.011 * mrem_h, capture_time=1.0 * s),\n dict(vid=3, distance=esp_dist, rad=0.013 * mrem_h, capture_time=8.0 * s),\n dict(vid=3, distance=esp_dist, rad=0.013 * mrem_h, capture_time=9.0 * s),\n ]\n\n close_rows = [\n dict(vid=4, distance=0.5 * m, rad=0.013 * mrem_h, capture_time=0.0 * s),\n dict(vid=4, distance=0.5 * m, rad=0.014 * mrem_h, capture_time=5.0 * s),\n dict(vid=4, distance=0.5 * m, rad=0.012 * mrem_h, capture_time=7.0 * s),\n dict(vid=4, distance=0.5 * m, rad=0.011 * mrem_h, capture_time=15.0 * s),\n dict(vid=4, distance=0.5 * m, rad=0.012 * mrem_h, capture_time=16.0 * s),\n ]\n\n mid_rows = [\n dict(vid=5, distance=1.0 * m, rad=0.014 * mrem_h, capture_time=0.0 * s),\n dict(vid=5, distance=1.0 * m, rad=0.015 * mrem_h, capture_time=5.0 * s),\n dict(vid=5, distance=1.0 * m, rad=0.013 * mrem_h, capture_time=10.0 * s),\n ]\n\n far_rows = [\n dict(vid=6, distance=2.0 * m, rad=0.023 * mrem_h, capture_time=0.0 * s),\n dict(vid=6, distance=2.0 * m, rad=0.025 * mrem_h, capture_time=0.1 * s),\n ]\n\n # guess_dist = ureg.parse_expression('0.3 m') # estimate of how far away we are wrt background\n # guess_rows = [\n # dict(vid=9, distance=guess_dist, rad=0.030 * mrem_h, capture_time=0.0 * s),\n # dict(vid=9, distance=guess_dist, rad=0.041 * mrem_h, capture_time=2.0 * s),\n # dict(vid=9, distance=guess_dist, rad=0.051 * mrem_h, capture_time=3.0 * s),\n # ]\n\n rows = dist0_rows + background_rows + dist0_v2_rows + close_rows + mid_rows + far_rows\n # rows += guess_rows\n\n import pandas as pd\n import numpy as np\n table = pd.DataFrame(rows)\n\n # Ensure comparable units\n units = {\n 'rad': mrem_h,\n 'distance': m,\n 'capture_time': s,\n }\n for key, unit in units.items():\n table[key] = table[key].apply(lambda c: c.to(unit).m)\n table['rad'] = table['rad'].astype(float)\n table['distance'] = table['distance'].astype(float)\n\n # Weight each measurement based on the amount of time the measurement was\n # sustained in the video.\n average_rad_rows = []\n for vid, group in table.groupby('vid'):\n from statsmodels.stats.weightstats import DescrStatsW\n weights = (-1 * group['capture_time'].diff(periods=-1).fillna(0)) / group['capture_time'].iloc[-1]\n table.loc[group.index, 'weight'] = weights\n values = group['rad']\n weighted_stats = DescrStatsW(values, weights=weights, ddof=0)\n dists = group['distance'].unique()\n assert len(dists) == 1\n average_rad_rows.append({\n 'vid': vid,\n 'distance': dists[0],\n 'rad_mean': weighted_stats.mean,\n 'rad_std': weighted_stats.std,\n })\n stats_table = pd.DataFrame(average_rad_rows)\n\n bg_row = stats_table.loc[stats_table['distance'].argmax()]\n fg_row = stats_table.loc[stats_table['distance'].argmin()]\n\n # -------------------\n ADD_DUMMY_VALUES = 0\n if ADD_DUMMY_VALUES:\n # Hack: because we don't have enough samples we can fudge the value\n # knowning that the value should be the background radiation in the\n # limit.\n\n dummy_measurements = []\n extra_support = 1\n for idx in range(3, 3 + extra_support):\n dummy_row = {\n 'vid': -idx,\n 'distance': bg_row['distance'] + idx,\n 'rad_mean': bg_row['rad_mean'],\n 'rad_std': 0.01,\n }\n dummy_measurements.append(dummy_row)\n\n # also add an extra value close to the sample\n rad_bg = bg_row['rad_mean']\n rad_above_bg = fg_row['rad_mean'] - rad_bg\n dummy_row = {\n 'vid': -1,\n 'distance': fg_row['distance'] / 2,\n 'rad_mean': rad_bg + (rad_above_bg * 4),\n 'rad_std': 0.5,\n }\n dummy_measurements.append(dummy_row)\n\n # dummy_row = {\n # 'vid': -2,\n # 'distance': fg_row['distance'] / 4,\n # 'rad_mean': rad_bg + (rad_above_bg * 16),\n # }\n # dummy_measurements.append(dummy_row)\n\n dummy_stats = pd.DataFrame(dummy_measurements)\n dummy_stats['weight'] = 0.5\n stats_table['weight'] = 1.0\n stats_table2 = pd.concat([stats_table, dummy_stats]).reset_index(drop=True).sort_values('distance')\n else:\n stats_table2 = stats_table\n # -------------------\n\n import scipy\n scipy.optimize.curve_fit\n\n # Because we know the radiation should follow an inverse square law wrt to\n # distance, we can fit a polynomial of degree 2 (parabola) to interpolate /\n # extrapolate the **inverse** values.\n x = stats_table2['distance'].values\n y = stats_table2['rad_mean'].values\n s = stats_table2['rad_std'].values\n\n # Model the squared falloff directly\n def invsquare(x, a, b):\n return a * (1 / (0.01 + x ** 2)) + b\n # bg_row['rad_mean']\n # Use curve_fit to constrain the first coefficient to be zero\n try:\n coef = scipy.optimize.curve_fit(invsquare, x, y, sigma=s, method='trf')[0]\n except Exception as ex:\n coef = None\n print(f'ex={ex}')\n\n # Also fit one to the raw weighted points as a sanity check\n # inv_poly2 = Polynomial.fit(table['distance'], 1 / table['rad'], w=table['weight'], deg=2)\n\n import kwplot\n sns = kwplot.autosns()\n plt = kwplot.autoplt()\n # ax = sns.boxplot(data=table, x='distance', y='rad', width=0.1)\n\n # Add in points to show each observation\n ax = sns.relplot(x=\"distance\", y=\"rad\", data=table, size=4, color=\".3\",\n linewidth=0, alpha=0.5, palette='deep')\n\n ax = plt.gca()\n ax.set_xlabel('distance from sample ({})'.format(str(units['distance'])))\n ax.set_ylabel('radiation dosage ({})'.format(str(units['rad'])))\n\n max_meters = 10\n\n extrap_x = np.linspace(0, max_meters, 1000)\n if coef is not None:\n extrap_y1 = invsquare(extrap_x, *coef)\n # extrap_y2 = 1 / inv_poly2(extrap_x)\n ax.plot(stats_table2['distance'].values, stats_table2['rad_mean'].values, 'rx')\n ax.plot(stats_table['distance'].values, stats_table['rad_mean'].values, 'bo')\n ax.plot(extrap_x, extrap_y1, '--')\n ax.set_ylim(0.001, 0.1)\n ax.set_yscale('log')\n # ax.plot(extrap_x, extrap_y2, '--')",
"def jam_axi_rms(surf_lum, sigma_lum, qobs_lum, surf_pot, sigma_pot, qobs_pot,\n inc, mbh, distance, xbin, ybin, ml=None, normpsf=1., pixang=0.,\n pixsize=0., plot=True, rms=None, erms=None, sigmapsf=0.,\n goodbins=None, quiet=False, beta=None, step=0., nrad=20,\n nang=10, rbh=0.01, tensor='zz', vmin=None, vmax=None, **kwargs):\n if beta is None:\n beta = np.zeros_like(surf_lum) # Anisotropy parameter beta = 1 - (sig_z/sig_R)**2\n if not (surf_lum.size == sigma_lum.size == qobs_lum.size == beta.size):\n raise ValueError(\"The luminous MGE components do not match\")\n if not (surf_pot.size == sigma_pot.size == qobs_pot.size):\n raise ValueError(\"The total mass MGE components do not match\")\n if xbin.size != ybin.size:\n raise ValueError(\"xbin and ybin do not match\")\n if rms is not None:\n if erms is None:\n erms = np.full_like(rms, np.median(rms)*0.05) # Constant ~5% errors\n if goodbins is None:\n goodbins = np.ones_like(rms, dtype=bool)\n elif goodbins.dtype != bool:\n raise ValueError(\"goodbins must be a boolean vector\")\n if not (xbin.size == rms.size == erms.size == goodbins.size):\n raise ValueError(\"(rms, erms, goodbins) and (xbin, ybin) do not match\")\n\n sigmapsf = np.atleast_1d(sigmapsf)\n normpsf = np.atleast_1d(normpsf)\n if sigmapsf.size != normpsf.size:\n raise ValueError(\"sigmaPSF and normPSF do not match\")\n\n pc = distance*np.pi/0.648 # Constant factor to convert arcsec --> pc\n\n surf_lum_pc = surf_lum\n surf_pot_pc = surf_pot\n sigma_lum_pc = sigma_lum*pc # Convert from arcsec to pc\n sigma_pot_pc = sigma_pot*pc # Convert from arcsec to pc\n xbin_pc = xbin*pc # Convert all distances to pc\n ybin_pc = ybin*pc\n pixSize_pc = pixsize*pc\n sigmaPsf_pc = sigmapsf*pc\n step_pc = step*pc\n\n # Add a Gaussian with small sigma and the same total mass as the BH.\n # The Gaussian provides an excellent representation of the second moments\n # of a point-like mass, to 1% accuracy out to a radius 2*sigmaBH.\n # The error increses to 14% at 1*sigmaBH, independently of the BH mass.\n #\n if mbh > 0:\n sigmaBH_pc = rbh*pc # Adopt for the BH just a very small size\n surfBH_pc = mbh/(2*np.pi*sigmaBH_pc**2)\n surf_pot_pc = np.append(surfBH_pc, surf_pot_pc) # Add Gaussian to potential only!\n sigma_pot_pc = np.append(sigmaBH_pc, sigma_pot_pc)\n qobs_pot = np.append(1., qobs_pot) # Make sure vectors do not have extra dimensions\n\n qobs_lum = qobs_lum.clip(0, 0.999)\n qobs_pot = qobs_pot.clip(0, 0.999)\n\n t = clock()\n rmsModel = _vrms2(xbin_pc, ybin_pc, inc, surf_lum_pc, sigma_lum_pc,\n qobs_lum, surf_pot_pc, sigma_pot_pc, qobs_pot, beta,\n tensor, sigmaPsf_pc, normpsf, pixSize_pc, pixang,\n step_pc, nrad, nang)\n if not quiet:\n print('jam_axi_rms elapsed time sec: %.2f' % (clock() - t))\n\n if tensor in ('xx', 'yy', 'zz'):\n rmsModel = np.sqrt(rmsModel.clip(0)) # Return SQRT and fix possible rounding errors\n if tensor in ('xy', 'xz'):\n rmsModel *= np.sign(xbin*ybin) # Calculation was done in positive quadrant\n\n # Analytic convolution of the MGE model with an MGE circular PSF\n # using Equations (4,5) of Cappellari (2002, MNRAS, 333, 400)\n #\n lum = surf_lum_pc*qobs_lum*sigma_lum**2 # Luminosity/(2np.pi) of each Gaussian\n flux = np.zeros_like(xbin) # Total MGE surface brightness for plotting\n for sigp, norp in zip(sigmapsf, normpsf): # loop over the PSF Gaussians\n sigmaX = np.sqrt(sigma_lum**2 + sigp**2)\n sigmaY = np.sqrt((sigma_lum*qobs_lum)**2 + sigp**2)\n surfConv = lum / (sigmaX*sigmaY) # PSF-convolved in Lsun/pc**2\n for srf, sx, sy in zip(surfConv, sigmaX, sigmaY): # loop over the galaxy MGE Gaussians\n flux += norp*srf*np.exp(-0.5*((xbin/sx)**2 + (ybin/sy)**2))\n\n if rms is None:\n\n chi2 = None\n if ml is None:\n ml = 1.\n else:\n rmsModel *= np.sqrt(ml)\n\n else:\n\n if (ml is None) or (ml <= 0):\n\n # y1, dy1 = rms, erms # (y1 are the data, y2 the model)\n # scale = sum(y1*y2/dy1**2)/sum(y2**2/dy1**2) # (equation 51)\n #\n ml = (np.sum(rms[goodbins]*rmsModel[goodbins]/erms[goodbins]**2)\n / np.sum((rmsModel[goodbins]/erms[goodbins])**2))**2\n\n rmsModel *= np.sqrt(ml)\n chi2 = np.sum(((rms[goodbins]-rmsModel[goodbins])/erms[goodbins])**2) / goodbins.sum()\n\n if not quiet:\n print('inc=%.1f beta_z=%.2f M/L=%.3g BH=%.2e chi2/DOF=%.3g' % (inc, beta[0], ml, mbh*ml, chi2))\n mass = 2*np.pi*surf_pot_pc*qobs_pot*sigma_pot_pc**2\n print('Total mass MGE: %.4g' % np.sum(mass*ml))\n\n if plot:\n\n rms1 = rms.copy() # Only symmetrize good bins\n rms1[goodbins] = symmetrize_velfield(xbin[goodbins], ybin[goodbins], rms[goodbins])\n\n if (vmin is None) or (vmax is None):\n vmin, vmax = stats.scoreatpercentile(rms1[goodbins], [0.5, 99.5]) # Could use np.percentile in Numpy 1.10\n\n plt.clf()\n plt.subplot(121)\n plot_velfield(xbin, ybin, rms1, vmin=vmin, vmax=vmax, flux=flux, **kwargs)\n plt.title(r\"Input $V_{\\rm rms}$\")\n\n plt.subplot(122)\n plot_velfield(xbin, ybin, rmsModel, vmin=vmin, vmax=vmax, flux=flux, **kwargs)\n plt.plot(xbin[~goodbins], ybin[~goodbins], 'ok', mec='white')\n plt.title(r\"Model $V_{\\rm rms}$\")\n plt.tick_params(labelleft='off')\n plt.subplots_adjust(wspace=0.03)\n\n return rmsModel, ml, chi2, flux",
"def calibV(self):\n # clear buffer in case of errors\n self.flushInput()\n \n if (self.model == 'GDS'):\n self.write(':CHAN'+str(ch)+':SCAL?\\n')\n # returns V/div, turn it into multiplicative factor\n # between digitizer and actual volts\n vmult = float(self.readline()) * 10./255.\n # GDS includes vertical offset in the data returned.\n voff = 0.\n elif (self.model == 'TDS'):\n self.write('WFMPre:YMUlt?\\n')\n # formula I am using later is from TDS manual, so this\n # is straightforward.\n vmult = float(self.readline())\n self.write('WFMPre:YOFf?\\n')\n voff = float(self.readline())\n \n # clear buffer in case of errors\n self.flushInput()\n\n return (vmult, voff)",
"def calc_emi_dif(tgt_pt, src_pt, src_dir, coef=1):\r\n emi_params = [\r\n numpy.zeros(tgt_pt.shape[-1], tgt_pt.dtype),\r\n numpy.zeros((tgt_pt.shape[-1], tgt_pt.shape[-1]), tgt_pt.dtype)\r\n ]\r\n\r\n # 'r' vector\r\n r = tgt_pt - src_pt\r\n\r\n src_dir_len2 = src_dir.dot(src_dir)\r\n if not src_dir_len2:\r\n return emi_params # Zero length, return zero EMI params\r\n\r\n # Vector projections of \"r\" in the direction of \"src_dir\"\r\n l = src_dir.dot(src_dir.dot(r) / src_dir_len2)\r\n R = r - l\r\n\r\n r_len = numpy.sqrt(r.dot(r))\r\n if not r_len:\r\n return None # Target point coincides with \"src_pt\"\r\n\r\n # Calculate the differential Biot–Savart law (https://en.wikipedia.org/wiki/Biot–Savart_law):\r\n # dl x r / r^3\r\n B = numpy.cross(src_dir, r) / r_len ** 3\r\n\r\n # Scale by a coefficient, like current, magnetic constant and 1/(4*pi)\r\n B *= coef\r\n\r\n emi_params[0] = B\r\n\r\n # Calculate the partial derivatives from Biot–Savart law \"R/sqrt(l^2 + R^2)^3\" (see calc_emi())\r\n # along \"l\" and \"R\" axes.\r\n\r\n # Gradient component along 'l':\r\n # Use derivative calculator https://www.derivative-calculator.net/ (substitute l with x):\r\n # input: R / sqrt(x^2 + R^2)^3, result: -3Rx / (x^2 + R^2)^(5/2)\r\n # Substitute back x to l, then sqrt(l^2 + R^2) to r:\r\n # result: -3 * R * l / r^5\r\n R_len2 = R.dot(R)\r\n l_len2 = l.dot(l)\r\n R_len = numpy.sqrt(R_len2)\r\n l_len = numpy.sqrt(l_len2)\r\n if l.dot(src_dir) < 0:\r\n l_len = -l_len\r\n\r\n l_comp = -3 * R_len * l_len / r_len ** 5\r\n\r\n # Gradient component along 'R':\r\n # Use derivative calculator https://www.derivative-calculator.net/ (substitute R with x):\r\n # input: x / sqrt(x^2 + l^2)^3, result: - (2x^2 - l^2) / (x^2 + l^2)^(5/2)\r\n # Substitute back x to R, then sqrt(l^2 + R^2) to r:\r\n # result: (l^2 - 2R^2) / r^5\r\n\r\n R_comp = (l_len2 - 2 * R_len2) / r_len ** 5\r\n\r\n l_comp *= coef\r\n R_comp *= coef\r\n\r\n # Combine l_comp and R_comp into a Jacobian matrix\r\n emi_params[1] = build_jacobian(l_comp, R_comp, src_dir, R, B)\r\n\r\n return emi_params",
"def v(self):\n\n # TODO This translation formula works, but needs simplified.\n\n # PWM duration can go from 0 to 4095 with 4095 representing max rpm\n# print(\"MuleBot.v MuleBot.dcMotorPWMDurationLeft:\", MuleBot.dcMotorPWMDurationLeft)\n speed_percentage = float(MuleBot.dcMotorPWMDurationLeft) / 4095.0\n# print(\"speed_percentage: \", speed_percentage)\n\n rpm = speed_percentage * self.motorMaxRPM\n# print(\"rpm: \", rpm)\n\n secondsPerMinute = 60\n revs_per_second = rpm / secondsPerMinute\n# print(\"--revs_per_second\", revs_per_second)\n\n inches_per_rev = 2.0 * math.pi * MuleBot.WHEEL_RADIUS\n INCHES_PER_METER = 39.3701\n meters_per_rev = inches_per_rev / INCHES_PER_METER\n# print(\"--meters_per_rev\", meters_per_rev)\n\n meters_per_second = meters_per_rev * revs_per_second\n\n# print(\"--meters_per_second: \", meters_per_second)\n return meters_per_second",
"def calc_V(A):\n return 1. / calc_rV(A)",
"def abs_units_old(wb_run,sample_run,mono_van,wb_mono,samp_rmm,samp_mass,ei_guess,rebin,map_file,monovan_mapfile,**kwargs): \n #available keywords\n #abs_units_van_range\n global reducer, rm_zero,inst_name,van_mass,bleed_switch,rate,pixels\n print 'DGreduce run for ',inst_name,'run number ',sample_run\n print 'Output will be in absolute units of mb/str/mev/fu'\n try:\n n,r=lhs('both')\n wksp_out=r[0]\n except:\n if sample_run == 0:\n #deal with the current run being parsed as 0 rather than 00000\n sample_run='00000'\n wksp_out=str(sample_run)+'.spe'\n \n start_time=time.time()\n \n if sample_run=='00000' and mtd.doesExist(inst_name+'00000.raw')==True:\n print 'Deleteing previous instance of temp data'\n DeleteWorkspace(Workspace=inst_name+'00000.raw')\n \n if kwargs.has_key('norm_method'):\n reducer.normalise_method = kwargs.get('norm_method')\n print 'Setting normalisation method to ', kwargs.get('norm_method')\n else:\n reducer.normalise_method = 'monitor-1'\n \n if kwargs.has_key('mask_run'):\n mask_run = kwargs.get('mask_run')\n print 'Using run ', kwargs.get('mask_run'),' for diag'\n else:\n mask_run=sample_run\n \n if kwargs.has_key('background'):\n reducer.background = kwargs.get('background')\n print 'Setting background option to ', kwargs.get('background')\n else:\n reducer.background = False\n \n if kwargs.has_key('fixei'):\n reducer.fix_ei = kwargs.get('fixei')\n print 'Setting fixei to ', kwargs.get('fixei')\n else:\n reducer.fix_ei = False\n \n if kwargs.has_key('save_format'):\n reducer.save_formats = kwargs.get('save_format')\n print 'Setting save format to ', kwargs.get('save_format')\n else:\n reducer.save_formats = ['.spe']\n #Set parameters for the run\n \n if kwargs.has_key('detector_van_range'):\n reducer.wb_integr_range = kwargs.get('detector_van_range')\n print 'Setting detector van int range to ', kwargs.get('detector_van_range')\n else:\n reducer.wb_integr_range=[20,100]\n \n #######DIAG###########\n if kwargs.has_key('bkgd_range'):\n background_range = kwargs.get('bkgd_range')\n print 'Setting background intergration to ', kwargs.get('bkgd_range')\n else:\n background_range=[15000,19000]\n \n if kwargs.has_key('tiny'):\n tinyval = kwargs.get('tiny')\n print 'Setting tiny ratelimit to ', kwargs.get('tiny')\n else:\n tinyval=1e-10\n \n if kwargs.has_key('large'):\n largeval = kwargs.get('large')\n print 'Setting large limit to ', kwargs.get('large')\n else:\n largeval=1e10\n \n if kwargs.has_key('diag_remove_zero'):\n sampzero = kwargs.get('diag_remove_zero')\n print 'Setting diag to reject zero backgrounds '\n else:\n sampzero =False\n \n if kwargs.has_key('diag_van_median_rate_limit_hi'):\n vanouthi = kwargs.get('diag_van_median_rate_limit_hi')\n print 'Setting diag_van_median_rate_limit_hi to ', kwargs.get('diag_van_median_rate_limit_hi')\n else:\n vanouthi=100\n \n if kwargs.has_key('diag_van_median_rate_limit_lo'):\n vanoutlo = kwargs.get('diag_van_median_rate_limit_lo')\n print 'Setting diag_van_median_rate_limit_lo to ', kwargs.get('diag_van_median_rate_limit_lo')\n else:\n vanoutlo=0.01\n \n if kwargs.has_key('diag_van_median_sigma_lo'):\n vanlo = kwargs.get('diag_van_median_sigma_lo')\n print 'Setting diag_van_median_sigma_lo to ', kwargs.get('diag_van_median_sigma_lo')\n else:\n vanlo=0.1\n \n if kwargs.has_key('diag_van_median_sigma_hi'):\n vanhi = kwargs.get('diag_van_median_sigma_hi')\n print 'Setting diag_van_median_sigma_hi to ', kwargs.get('diag_van_median_sigma_hi')\n else:\n vanhi=1.5\n \n if kwargs.has_key('diag_van_median_sigma'):\n vansig = kwargs.get('diag_van_median_sigma')\n print 'Setting diag_van_median_sigma to ', kwargs.get('diag_van_median_sigma')\n else:\n vansig=0.0\n \n if kwargs.has_key('diag_samp_median_sigma_lo'):\n samplo = kwargs.get('diag_samp_median_sigma_lo')\n print 'Setting diag_samp_median_sigma_lo to ', kwargs.get('diag_samp_median_sigma_lo')\n else:\n samplo=0.0\n \n if kwargs.has_key('diag_samp_median_sigma_hi'):\n samphi = kwargs.get('diag_samp_median_sigma_hi')\n print 'Setting diag_samp_median_sigma_hi to ', kwargs.get('diag_samp_median_sigma_hi')\n else:\n samphi=2.0\n \n if kwargs.has_key('diag_samp_median_sigma'):\n sampsig = kwargs.get('diag_samp_median_sigma')\n print 'Setting diag_samp_median_sigma to ', kwargs.get('diag_samp_median_sigma')\n else:\n sampsig=3.0\n \n if kwargs.has_key('bleed'):\n bleed_switch = kwargs.get('bleed')\n print 'Setting bleed ', kwargs.get('bleed')\n else:\n print 'bleed set to default'\n #####diad end########\n \n if kwargs.has_key('det_cal_file'):\n reducer.det_cal_file = kwargs.get('det_cal_file')\n reducer.relocate_dets = True\n print 'Setting detector calibration file to ', kwargs.get('det_cal_file')\n else:\n print 'Setting detector calibration to detector block info from ', sample_run\n reducer.det_cal_file =None\n reducer.relocate_dets = False\n \n if mtd.doesExist(str(sample_run))==True and kwargs.has_key('det_cal_file')==False:\n print 'For data input type: workspace detector calibration must be specified'\n print 'use Keyword det_cal_file with a valid detctor file or run number'\n return\n \n \n if kwargs.has_key('one2one'):\n reducer.map_file =None\n print 'one2one selected'\n \n else:\n fileName, fileExtension = os.path.splitext(map_file)\n if (not fileExtension):\n map_file=map_file+'.map' \n reducer.map_file = map_file\n \n if kwargs.has_key('hardmaskPlus'):\n HardMaskFile = kwargs.get('hardmaskPlus')\n print 'Use hardmask from ', HardMaskFile\n #hardMaskSpec=common.load_mask(HardMaskFile)\n #MaskDetectors(Workspace='masking',SpectraList=hardMaskSpec)\n else:\n HardMaskFile=None\n \n reducer.energy_bins = rebin\n #monovan info\n fileName, fileExtension = os.path.splitext(monovan_mapfile)\n if (not fileExtension):\n monovan_mapfile=monovan_mapfile+'.map'\n reducer.abs_map_file =monovan_mapfile \n\n if kwargs.has_key('abs_units_van_range'):\n reducer.monovan_integr_range = kwargs.get('abs_units_van_range')\n print 'Setting absolute units vanadium integratiOn range to ', kwargs.get('abs_units_van_range')\n else:\n reducer.monovan_integr_range=[-40,40]\n \n #reducer.van_rmm =50.94\n reducer.van_mass=van_mass\n #sample info\n reducer.sample_mass=samp_mass\n reducer.sample_rmm =samp_rmm\n \n print 'output will be normalised to', reducer.normalise_method\n if (numpy.size(sample_run)) > 1 and kwargs.has_key('sum') and kwargs.get('sum')==True:\n #this sums the runs together before passing the summed file to the rest of the reduction\n #this circumvents the inbuilt method of summing which fails to sum the files for diag\n \n sumfilename=str(sample_run[0])+'sum'\n accum=sum_files(sumfilename, sample_run)\n #the D.E.C. tries to be too clever so we have to fool it into thinking the raw file is already exists as a workpsace\n RenameWorkspace(InputWorkspace=accum,OutputWorkspace=inst_name+str(sample_run[0])+'.raw')\n sample_run=sample_run[0]\n \n if kwargs.has_key('hardmaskOnly'):\n hardmask = kwargs.get('hardmaskOnly')\n print 'Use hardmask from ', hardmask\n masking=hardmask\n else:\n \n masking = reducer.diagnose(wb_run, \n sample=mask_run,\n second_white = None,\n tiny=tinyval, \n huge=largeval, \n van_out_lo=vanoutlo,\n van_out_hi=vanouthi,\n van_lo=vanlo,\n van_hi=vanhi,\n van_sig=vansig,\n samp_zero=sampzero,\n samp_lo=samplo,\n samp_hi=samphi,\n samp_sig=sampsig,\n bkgd_range=background_range, \n variation=1.1,\n print_results=True,\n bleed_test=bleed_switch,\n bleed_maxrate=rate,\n bleed_pixels=pixels,\n hard_mask=HardMaskFile)\n \n masking2 = reducer.diagnose(wb_mono, \n sample=mono_van,\n second_white = None,\n tiny=tinyval, \n huge=largeval, \n van_out_lo=vanoutlo,\n van_out_hi=vanouthi,\n van_lo=vanlo,\n van_hi=vanhi,\n van_sig=vansig,\n samp_zero=sampzero,\n samp_lo=samplo,\n samp_hi=samphi,\n samp_sig=sampsig,\n bkgd_range=background_range, \n variation=1.1,\n print_results=True,\n bleed_test=bleed_switch,\n bleed_maxrate=rate,\n bleed_pixels=pixels,\n hard_mask=HardMaskFile)\n \n total_mask=masking+masking2\n \n \n if kwargs.has_key('use_sam_msk_on_monovan') and kwargs.get('use_sam_msk_on_monovan')==True:\n print 'applying sample run mask to mono van'\n reducer.spectra_masks=masking\n else:\n reducer.spectra_masks=total_mask\n \n fail_list=get_failed_spectra_list('total_mask')\n \n \n print 'Diag found ', len(fail_list),'bad spectra'\n \n \n #Run the conversion\n deltaE_wkspace = reducer.convert_to_energy(sample_run, ei_guess, wb_run, mono_van,ei_guess,wb_mono)\n end_time=time.time()\n results_name=str(sample_run)+'.spe'\n ei= (deltaE_wkspace.getRun().getLogData(\"Ei\").value)\n \n if mtd.doesExist('_wksp.spe-white')==True:\n DeleteWorkspace(Workspace='_wksp.spe-white')\n \n \n print 'Incident energy found ',ei,' meV'\n print 'Elapsed time =',end_time-start_time, 's'\n #get the name that convert to energy will use\n \n if mtd.doesExist(results_name)==False:\n RenameWorkspace(InputWorkspace=deltaE_wkspace,OutputWorkspace=results_name)\n RenameWorkspace(InputWorkspace=results_name,OutputWorkspace=wksp_out)\n \n return mtd[wksp_out]",
"def __init__(self, M, rat):\n self.M = M\n xc0, _ = np.polynomial.chebyshev.chebgauss(M-0)\n xc1, _ = np.polynomial.chebyshev.chebgauss(M-1)\n xc2, _ = np.polynomial.chebyshev.chebgauss(M-2)\n # vandermonde and inverse vandermonde matrices\n self.V0 = np.polynomial.chebyshev.chebvander(xc0, M-1)\n self.V1 = np.polynomial.chebyshev.chebvander(xc1, M-2)\n self.V2 = np.polynomial.chebyshev.chebvander(xc2, M-3)\n self.VI0 = np.linalg.inv(self.V0)\n self.VI1 = np.linalg.inv(self.V1)\n self.VI2 = np.linalg.inv(self.V2)\n # differentiation matrices\n DC01 = np.polynomial.chebyshev.chebder(np.eye(M-0)) / rat\n DC12 = np.polynomial.chebyshev.chebder(np.eye(M-1)) / rat\n DC00 = np.row_stack([DC01, np.zeros(M)])\n self.D00 = self.V0.dot(DC00.dot(self.VI0))\n self.D01 = self.V1.dot(DC01.dot(self.VI0))\n self.D12 = self.V2.dot(DC12.dot(self.VI1))\n # boundary condition operators\n self.ibc_dirichlet = np.polynomial.chebyshev.chebvander(1, M-1).dot(self.VI0)\n self.obc_dirichlet = np.polynomial.chebyshev.chebvander(-1, M-1).dot(self.VI0)\n self.ibc_neumann = self.ibc_dirichlet.dot(self.D00)\n self.obc_neumann = self.obc_dirichlet.dot(self.D00)\n # rank reduction operators\n temp = np.zeros([M-1, M-0], dtype=float)\n np.fill_diagonal(temp, 1.0)\n self.R01 = self.V1.dot(temp.dot(self.VI0))\n temp = np.zeros([M-2, M-1], dtype=float)\n np.fill_diagonal(temp, 1.0)\n self.R12 = self.V2.dot(temp.dot(self.VI1))\n self.R02 = self.R12.dot(self.R01)\n # get poof operator from M-1 --> M\n temp = np.zeros([M, M-1], dtype=float)\n np.fill_diagonal(temp, 1.0)\n self.P10 = self.V0.dot(temp.dot(self.VI1))",
"def dRdE_magnetic(E, m_x, mu_x, target, vlag=232.0, sigmav=156.0, vesc=544.0):\n \n A = Avals[target]\n \n #See Eq. 62 of https://arxiv.org/pdf/1307.5955.pdf, but note\n #that we're using some different normalisations for the operators\n #so there are some extra factors of m_x and m_p lurking around...\n \n amu = 931.5e3 # keV\n q1 = np.sqrt(2*A*amu*E) #Recoil momentum in keV\n \n alpha = 0.007297\n e = np.sqrt(4*np.pi*alpha)\n m_p = 0.9315\n \n #Proton and neutron g-factors\n gp = 5.59\n gn = -3.83\n \n #Bohr Magneton\n #Tesla = 194.6*eV**2 # Tesla in natural units (with e = sqrt(4 pi alpha))\n #muB = 5.7883818e-5*eV/Tesla # Bohr magneton\n mu_B = 297.45 #GeV^-1 (in natural units (with e = sqrt(4 pi alpha)))\n\n cp = [E*0.0 for i in range(11)]\n cn = [E*0.0 for i in range(11)]\n \n #Operator 1\n cp[0] = e*(mu_x*mu_B)/(2.0*m_x)\n \n #Operator 5\n cp[4] = 2*e*(mu_x*mu_B)*m_p/(q1*1e-6)**2\n \n #Operator 4\n cp[3] = gp*e*(mu_x*mu_B)/m_p\n cn[3] = gn*e*(mu_x*mu_B)/m_p\n \n #Operator 6\n cp[5] = -gp*e*(mu_x*mu_B)*m_p/(q1*1e-6)**2\n cn[5] = -gn*e*(mu_x*mu_B)*m_p/(q1*1e-6)**2\n\n return dRdE_NREFT(E, m_x, cp, cn, target, vlag, sigmav, vesc)",
"def getR(self):\n # Reynolds number uses the absolute value of the velocity\n V = abs(self.V)\n return (V * self.D) / self.v # formula for Reynolds number"
]
| [
"0.560289",
"0.5515143",
"0.54086477",
"0.53610516",
"0.53610516",
"0.5335618",
"0.5252791",
"0.5252791",
"0.5249713",
"0.5220318",
"0.51931876",
"0.51924676",
"0.51635695",
"0.5152857",
"0.5121018",
"0.5117766",
"0.5107696",
"0.5075379",
"0.50642395",
"0.505149",
"0.5047355",
"0.50360537",
"0.50284326",
"0.49896875",
"0.49883544",
"0.4981773",
"0.49607882",
"0.49524757",
"0.4937097",
"0.49348602"
]
| 0.7239529 | 0 |
Computes input membrane resistance Rm using abf object and epoch index. | def input_membrane_resistance_abf(abf, epoch_start):
p0 = abf.sweepEpochs.p1s[epoch_start]
p1 = abf.sweepEpochs.p1s[epoch_start + 1]
V = abf.sweepY[p0:p1]
I = abf.sweepC[p0-1:p1]
return input_membrane_resistance(I, V) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def abs_units(wb_run,sample_run,mono_van,wb_mono,samp_rmm,samp_mass,ei_guess,rebin,map_file,monovan_mapfile,**kwargs): \n #available keywords\n #abs_units_van_range\n global reducer, rm_zero,inst_name,van_mass,bleed_switch,rate,pixels\n print 'DGreduce run for ',inst_name,'run number ',sample_run\n print 'Output will be in absolute units of mb/str/mev/fu'\n\n #reducer.van_rmm =50.94\n reducer.van_mass=van_mass\n #sample info\n reducer.sample_mass=samp_mass\n reducer.sample_rmm =samp_rmm\n print 'Using vanadium mass: ',van_mass\n print ' sample mass: ',samp_mass \n print ' sample_rmm : ',samp_rmm \n # check if mono-vanadium is provided as multiple files list or just put in brackets ocasionally\n if isinstance(mono_van,list):\n if len(mono_van)>1:\n raise IOError(' Can currently work only with single monovan file but list supplied')\n else:\n mono_van = mono_van[0];\n\n \n try:\n n,r=lhs('both')\n wksp_out=r[0]\n except:\n if sample_run == 0:\n #deal with the current run being parsed as 0 rather than 00000\n sample_run='00000'\n wksp_out=str(sample_run)+'.spe'\n \n start_time=time.time()\n \n if sample_run=='00000' and mtd.doesExist(inst_name+'00000.raw')==True:\n print 'Deleteing previous instance of temp data'\n DeleteWorkspace(Workspace=inst_name+'00000.raw')\n \n if kwargs.has_key('norm_method'):\n reducer.normalise_method = kwargs.get('norm_method')\n print 'Setting normalisation method to ', kwargs.get('norm_method')\n else:\n reducer.normalise_method = 'monitor-1'\n \n if kwargs.has_key('mask_run'):\n mask_run = kwargs.get('mask_run')\n print 'Using run ', kwargs.get('mask_run'),' for diag'\n else:\n mask_run=sample_run\n \n if kwargs.has_key('background'):\n reducer.background = kwargs.get('background')\n print 'Setting background option to ', kwargs.get('background')\n else:\n reducer.background = False\n \n if kwargs.has_key('fixei'):\n reducer.fix_ei = kwargs.get('fixei')\n print 'Setting fixei to ', kwargs.get('fixei')\n else:\n reducer.fix_ei = False\n \n if kwargs.has_key('save_format'):\n reducer.save_formats = kwargs.get('save_format')\n print 'Setting save format to ', kwargs.get('save_format')\n else:\n reducer.save_formats = ['.spe']\n #Set parameters for the run\n \n if kwargs.has_key('detector_van_range'):\n reducer.wb_integr_range = kwargs.get('detector_van_range')\n print 'Setting detector van int range to ', kwargs.get('detector_van_range')\n else:\n reducer.wb_integr_range=[20,100]\n \n #######DIAG###########\n if kwargs.has_key('bkgd_range'):\n background_range = kwargs.get('bkgd_range')\n print 'Setting background intergration to ', kwargs.get('bkgd_range')\n else:\n background_range=[15000,19000]\n \n if kwargs.has_key('tiny'):\n tinyval = kwargs.get('tiny')\n print 'Setting tiny ratelimit to ', kwargs.get('tiny')\n else:\n tinyval=1e-10\n \n if kwargs.has_key('large'):\n largeval = kwargs.get('large')\n print 'Setting large limit to ', kwargs.get('large')\n else:\n largeval=1e10\n \n if kwargs.has_key('diag_remove_zero'):\n sampzero = kwargs.get('diag_remove_zero')\n print 'Setting diag to reject zero backgrounds '\n else:\n sampzero =False\n \n if kwargs.has_key('diag_van_median_rate_limit_hi'):\n vanouthi = kwargs.get('diag_van_median_rate_limit_hi')\n print 'Setting diag_van_median_rate_limit_hi to ', kwargs.get('diag_van_median_rate_limit_hi')\n else:\n vanouthi=100\n \n if kwargs.has_key('diag_van_median_rate_limit_lo'):\n vanoutlo = kwargs.get('diag_van_median_rate_limit_lo')\n print 'Setting diag_van_median_rate_limit_lo to ', kwargs.get('diag_van_median_rate_limit_lo')\n else:\n vanoutlo=0.01\n \n if kwargs.has_key('diag_van_median_sigma_lo'):\n vanlo = kwargs.get('diag_van_median_sigma_lo')\n print 'Setting diag_van_median_sigma_lo to ', kwargs.get('diag_van_median_sigma_lo')\n else:\n vanlo=0.1\n \n if kwargs.has_key('diag_van_median_sigma_hi'):\n vanhi = kwargs.get('diag_van_median_sigma_hi')\n print 'Setting diag_van_median_sigma_hi to ', kwargs.get('diag_van_median_sigma_hi')\n else:\n vanhi=1.5\n \n if kwargs.has_key('diag_van_median_sigma'):\n vansig = kwargs.get('diag_van_median_sigma')\n print 'Setting diag_van_median_sigma to ', kwargs.get('diag_van_median_sigma')\n else:\n vansig=0.0\n \n if kwargs.has_key('diag_samp_median_sigma_lo'):\n samplo = kwargs.get('diag_samp_median_sigma_lo')\n print 'Setting diag_samp_median_sigma_lo to ', kwargs.get('diag_samp_median_sigma_lo')\n else:\n samplo=0.0\n \n if kwargs.has_key('diag_samp_median_sigma_hi'):\n samphi = kwargs.get('diag_samp_median_sigma_hi')\n print 'Setting diag_samp_median_sigma_hi to ', kwargs.get('diag_samp_median_sigma_hi')\n else:\n samphi=2.0\n \n if kwargs.has_key('diag_samp_median_sigma'):\n sampsig = kwargs.get('diag_samp_median_sigma')\n print 'Setting diag_samp_median_sigma to ', kwargs.get('diag_samp_median_sigma')\n else:\n sampsig=3.0\n \n if kwargs.has_key('bleed'):\n bleed_switch = kwargs.get('bleed')\n print 'Setting bleed ', kwargs.get('bleed')\n else:\n print 'bleed set to default'\n #####diad end########\n \n \n if kwargs.has_key('det_cal_file'):\n reducer.det_cal_file = kwargs.get('det_cal_file')\n reducer.relocate_dets = True\n print 'Setting detector calibration file to ', kwargs.get('det_cal_file')\n else:\n print 'Setting detector calibration to detector block info from ', sample_run\n reducer.det_cal_file =None\n reducer.relocate_dets = False\n \n if mtd.doesExist(str(sample_run))==True and kwargs.has_key('det_cal_file')==False:\n print 'For data input type: workspace detector calibration must be specified'\n print 'use Keyword det_cal_file with a valid detctor file or run number'\n return\n \n \n if kwargs.has_key('one2one'):\n reducer.map_file =None\n map_file = \"\"\n print 'one2one selected'\n else:\n fileName, fileExtension = os.path.splitext(map_file)\n if (not fileExtension):\n map_file = map_file+'.map'\n reducer.map_file = map_file;\n \n if kwargs.has_key('hardmaskPlus'):\n HardMaskFile = kwargs.get('hardmaskPlus')\n print 'Use hardmask from ', HardMaskFile\n #hardMaskSpec=common.load_mask(HardMaskFile)\n #MaskDetectors(Workspace='masking',SpectraList=hardMaskSpec)\n else:\n HardMaskFile=None\n \n reducer.energy_bins = rebin\n #monovan info\n fileName, fileExtension = os.path.splitext(monovan_mapfile)\n if (not fileExtension):\n monovan_mapfile=monovan_mapfile+'.map'\n reducer.abs_map_file =monovan_mapfile \n\n if kwargs.has_key('abs_units_van_range'):\n reducer.monovan_integr_range = kwargs.get('abs_units_van_range')\n print 'Setting absolute units vanadium integration range to: ', kwargs.get('abs_units_van_range')\n else:\n reducer.monovan_integr_range=[-40,40]\n\n \n \n print 'output will be normalised to', reducer.normalise_method\n if (numpy.size(sample_run)) > 1 and kwargs.has_key('sum') and kwargs.get('sum')==True:\n #this sums the runs together before passing the summed file to the rest of the reduction\n #this circumvents the inbuilt method of summing which fails to sum the files for diag\n \n sumfilename=str(sample_run[0])+'sum'\n accum=sum_files(sumfilename, sample_run)\n #the D.E.C. tries to be too clever so we have to fool it into thinking the raw file is already exists as a workpsace\n RenameWorkspace(InputWorkspace=accum,OutputWorkspace=inst_name+str(sample_run[0])+'.raw')\n sample_run=sample_run[0]\n \n if kwargs.has_key('hardmaskOnly'):\n if (kwargs.get('hardmaskOnly')): \n totalmask = kwargs.get('hardmaskOnly')\n print 'Using hardmask from ', totalmask\n #next stable version can replace this with loadmask algoritum\n specs=diag_load_mask(totalmask)\n else:\n specs=\"\"\n \n CloneWorkspace(InputWorkspace=sample_run,OutputWorkspace='mask_wksp')\n MaskDetectors(Workspace='mask_wksp',SpectraList=specs)\n masking =mtd['mask_wksp']\n else:\n print '########### Run diagnose for sample run ##############'\n masking = reducer.diagnose(wb_run, \n sample=mask_run,\n second_white = None,\n tiny=tinyval, \n huge=largeval, \n van_out_lo=vanoutlo,\n van_out_hi=vanouthi,\n van_lo=vanlo,\n van_hi=vanhi,\n van_sig=vansig,\n samp_zero=sampzero,\n samp_lo=samplo,\n samp_hi=samphi,\n samp_sig=sampsig,\n bkgd_range=background_range, \n variation=1.1,\n print_results=True,\n bleed_test=bleed_switch,\n bleed_maxrate=rate,\n bleed_pixels=pixels,\n hard_mask=HardMaskFile)\n \n fail_list,n_total_spectra =get_failed_spectra_list_from_masks(masking) \n print 'first Diag found ', len(fail_list),'bad spectra out of: ',n_total_spectra,' ws spectra'\n \n if kwargs.has_key('use_sam_msk_on_monovan') and kwargs.get('use_sam_msk_on_monovan')==True:\n print 'applying sample run mask to mono van'\n reducer.spectra_masks=masking\n fail_list=get_failed_spectra_list(masking) \n else:\n print '########### Run diagnose for monochromatic vanadium run ##############'\n masking2 = reducer.diagnose(wb_mono, \n sample=mono_van,\n second_white = None,\n tiny=tinyval, \n huge=largeval, \n van_out_lo=vanoutlo,\n van_out_hi=vanouthi,\n van_lo=vanlo,\n van_hi=vanhi,\n van_sig=vansig,\n samp_zero=sampzero,\n samp_lo=samplo,\n samp_hi=samphi,\n samp_sig=sampsig,\n bkgd_range=background_range, \n variation=1.1,\n print_results=True,\n bleed_test=bleed_switch,\n bleed_maxrate=rate,\n bleed_pixels=pixels,\n hard_mask=HardMaskFile)\n \n total_mask=masking+masking2 \n reducer.spectra_masks=total_mask \n fail_list,n_total_spectra =get_failed_spectra_list_from_masks(total_mask)\n #fail_list=get_failed_spectra_list('total_mask')\n \n \n print 'Diag found ', len(fail_list),'bad spectra out of: ',n_total_spectra,' ws spectra'\n \n \n \n #Run the conversion first on the sample\n deltaE_wkspace_sample = reducer.convert_to_energy(sample_run, ei_guess, wb_run)\n\n \n if kwargs.has_key('mono_correction_factor'):\n absnorm_factor=kwargs.get('mono_correction_factor')\n print 'Using supplied correction factor for absolute units'\n else:\n print '##### Evaluate the integral from the monovan run and calculate the correction factor ######'\n print ' Using absolute units vanadion integration range : ', reducer.monovan_integr_range \n #now on the mono_vanadium run swap the mapping file\n reducer.map_file = monovan_mapfile \n deltaE_wkspace_monovan = reducer.convert_to_energy(mono_van, ei_guess, wb_mono)\n \n (absnorm_factorL,absnorm_factorSS,absnorm_factorP,absnorm_factTGP) = getAbsNormalizationFactor(deltaE_wkspace_monovan.getName(),str(reducer.monovan_integr_range[0]),str(reducer.monovan_integr_range[1])) \n \n print 'Absolute correction factor S^2 =',absnorm_factorSS,' Libisis: ',absnorm_factorL,' Puasonian: ',absnorm_factorP, ' TGP : ',absnorm_factTGP\n CreateSingleValuedWorkspace(OutputWorkspace='AbsFactor',DataValue=absnorm_factTGP)\n end_time=time.time()\n results_name=str(sample_run)+'.spe'\n ei= (deltaE_wkspace_sample.getRun().getLogData(\"Ei\").value)\n \n if mtd.doesExist('_wksp.spe-white')==True:\n DeleteWorkspace(Workspace='_wksp.spe-white')\n \n \n print 'Incident energy found for sample run ',ei,' meV'\n print 'Incident energy found for mono vanadium run ',ei,' meV'\n print 'Elapsed time =',end_time-start_time, 's'\n #get the name that convert to energy will use\n \n if mtd.doesExist(results_name)==False:\n RenameWorkspace(InputWorkspace=deltaE_wkspace_sample,OutputWorkspace=results_name)\n if results_name != wksp_out:\n RenameWorkspace(InputWorkspace=results_name,OutputWorkspace=wksp_out)\n Divide(LHSWorkspace=wksp_out,RHSWorkspace='AbsFactor',OutputWorkspace=wksp_out)\n DeleteWorkspace(Workspace='AbsFactor')\n return mtd[wksp_out]",
"def abs_units_old(wb_run,sample_run,mono_van,wb_mono,samp_rmm,samp_mass,ei_guess,rebin,map_file,monovan_mapfile,**kwargs): \n #available keywords\n #abs_units_van_range\n global reducer, rm_zero,inst_name,van_mass,bleed_switch,rate,pixels\n print 'DGreduce run for ',inst_name,'run number ',sample_run\n print 'Output will be in absolute units of mb/str/mev/fu'\n try:\n n,r=lhs('both')\n wksp_out=r[0]\n except:\n if sample_run == 0:\n #deal with the current run being parsed as 0 rather than 00000\n sample_run='00000'\n wksp_out=str(sample_run)+'.spe'\n \n start_time=time.time()\n \n if sample_run=='00000' and mtd.doesExist(inst_name+'00000.raw')==True:\n print 'Deleteing previous instance of temp data'\n DeleteWorkspace(Workspace=inst_name+'00000.raw')\n \n if kwargs.has_key('norm_method'):\n reducer.normalise_method = kwargs.get('norm_method')\n print 'Setting normalisation method to ', kwargs.get('norm_method')\n else:\n reducer.normalise_method = 'monitor-1'\n \n if kwargs.has_key('mask_run'):\n mask_run = kwargs.get('mask_run')\n print 'Using run ', kwargs.get('mask_run'),' for diag'\n else:\n mask_run=sample_run\n \n if kwargs.has_key('background'):\n reducer.background = kwargs.get('background')\n print 'Setting background option to ', kwargs.get('background')\n else:\n reducer.background = False\n \n if kwargs.has_key('fixei'):\n reducer.fix_ei = kwargs.get('fixei')\n print 'Setting fixei to ', kwargs.get('fixei')\n else:\n reducer.fix_ei = False\n \n if kwargs.has_key('save_format'):\n reducer.save_formats = kwargs.get('save_format')\n print 'Setting save format to ', kwargs.get('save_format')\n else:\n reducer.save_formats = ['.spe']\n #Set parameters for the run\n \n if kwargs.has_key('detector_van_range'):\n reducer.wb_integr_range = kwargs.get('detector_van_range')\n print 'Setting detector van int range to ', kwargs.get('detector_van_range')\n else:\n reducer.wb_integr_range=[20,100]\n \n #######DIAG###########\n if kwargs.has_key('bkgd_range'):\n background_range = kwargs.get('bkgd_range')\n print 'Setting background intergration to ', kwargs.get('bkgd_range')\n else:\n background_range=[15000,19000]\n \n if kwargs.has_key('tiny'):\n tinyval = kwargs.get('tiny')\n print 'Setting tiny ratelimit to ', kwargs.get('tiny')\n else:\n tinyval=1e-10\n \n if kwargs.has_key('large'):\n largeval = kwargs.get('large')\n print 'Setting large limit to ', kwargs.get('large')\n else:\n largeval=1e10\n \n if kwargs.has_key('diag_remove_zero'):\n sampzero = kwargs.get('diag_remove_zero')\n print 'Setting diag to reject zero backgrounds '\n else:\n sampzero =False\n \n if kwargs.has_key('diag_van_median_rate_limit_hi'):\n vanouthi = kwargs.get('diag_van_median_rate_limit_hi')\n print 'Setting diag_van_median_rate_limit_hi to ', kwargs.get('diag_van_median_rate_limit_hi')\n else:\n vanouthi=100\n \n if kwargs.has_key('diag_van_median_rate_limit_lo'):\n vanoutlo = kwargs.get('diag_van_median_rate_limit_lo')\n print 'Setting diag_van_median_rate_limit_lo to ', kwargs.get('diag_van_median_rate_limit_lo')\n else:\n vanoutlo=0.01\n \n if kwargs.has_key('diag_van_median_sigma_lo'):\n vanlo = kwargs.get('diag_van_median_sigma_lo')\n print 'Setting diag_van_median_sigma_lo to ', kwargs.get('diag_van_median_sigma_lo')\n else:\n vanlo=0.1\n \n if kwargs.has_key('diag_van_median_sigma_hi'):\n vanhi = kwargs.get('diag_van_median_sigma_hi')\n print 'Setting diag_van_median_sigma_hi to ', kwargs.get('diag_van_median_sigma_hi')\n else:\n vanhi=1.5\n \n if kwargs.has_key('diag_van_median_sigma'):\n vansig = kwargs.get('diag_van_median_sigma')\n print 'Setting diag_van_median_sigma to ', kwargs.get('diag_van_median_sigma')\n else:\n vansig=0.0\n \n if kwargs.has_key('diag_samp_median_sigma_lo'):\n samplo = kwargs.get('diag_samp_median_sigma_lo')\n print 'Setting diag_samp_median_sigma_lo to ', kwargs.get('diag_samp_median_sigma_lo')\n else:\n samplo=0.0\n \n if kwargs.has_key('diag_samp_median_sigma_hi'):\n samphi = kwargs.get('diag_samp_median_sigma_hi')\n print 'Setting diag_samp_median_sigma_hi to ', kwargs.get('diag_samp_median_sigma_hi')\n else:\n samphi=2.0\n \n if kwargs.has_key('diag_samp_median_sigma'):\n sampsig = kwargs.get('diag_samp_median_sigma')\n print 'Setting diag_samp_median_sigma to ', kwargs.get('diag_samp_median_sigma')\n else:\n sampsig=3.0\n \n if kwargs.has_key('bleed'):\n bleed_switch = kwargs.get('bleed')\n print 'Setting bleed ', kwargs.get('bleed')\n else:\n print 'bleed set to default'\n #####diad end########\n \n if kwargs.has_key('det_cal_file'):\n reducer.det_cal_file = kwargs.get('det_cal_file')\n reducer.relocate_dets = True\n print 'Setting detector calibration file to ', kwargs.get('det_cal_file')\n else:\n print 'Setting detector calibration to detector block info from ', sample_run\n reducer.det_cal_file =None\n reducer.relocate_dets = False\n \n if mtd.doesExist(str(sample_run))==True and kwargs.has_key('det_cal_file')==False:\n print 'For data input type: workspace detector calibration must be specified'\n print 'use Keyword det_cal_file with a valid detctor file or run number'\n return\n \n \n if kwargs.has_key('one2one'):\n reducer.map_file =None\n print 'one2one selected'\n \n else:\n fileName, fileExtension = os.path.splitext(map_file)\n if (not fileExtension):\n map_file=map_file+'.map' \n reducer.map_file = map_file\n \n if kwargs.has_key('hardmaskPlus'):\n HardMaskFile = kwargs.get('hardmaskPlus')\n print 'Use hardmask from ', HardMaskFile\n #hardMaskSpec=common.load_mask(HardMaskFile)\n #MaskDetectors(Workspace='masking',SpectraList=hardMaskSpec)\n else:\n HardMaskFile=None\n \n reducer.energy_bins = rebin\n #monovan info\n fileName, fileExtension = os.path.splitext(monovan_mapfile)\n if (not fileExtension):\n monovan_mapfile=monovan_mapfile+'.map'\n reducer.abs_map_file =monovan_mapfile \n\n if kwargs.has_key('abs_units_van_range'):\n reducer.monovan_integr_range = kwargs.get('abs_units_van_range')\n print 'Setting absolute units vanadium integratiOn range to ', kwargs.get('abs_units_van_range')\n else:\n reducer.monovan_integr_range=[-40,40]\n \n #reducer.van_rmm =50.94\n reducer.van_mass=van_mass\n #sample info\n reducer.sample_mass=samp_mass\n reducer.sample_rmm =samp_rmm\n \n print 'output will be normalised to', reducer.normalise_method\n if (numpy.size(sample_run)) > 1 and kwargs.has_key('sum') and kwargs.get('sum')==True:\n #this sums the runs together before passing the summed file to the rest of the reduction\n #this circumvents the inbuilt method of summing which fails to sum the files for diag\n \n sumfilename=str(sample_run[0])+'sum'\n accum=sum_files(sumfilename, sample_run)\n #the D.E.C. tries to be too clever so we have to fool it into thinking the raw file is already exists as a workpsace\n RenameWorkspace(InputWorkspace=accum,OutputWorkspace=inst_name+str(sample_run[0])+'.raw')\n sample_run=sample_run[0]\n \n if kwargs.has_key('hardmaskOnly'):\n hardmask = kwargs.get('hardmaskOnly')\n print 'Use hardmask from ', hardmask\n masking=hardmask\n else:\n \n masking = reducer.diagnose(wb_run, \n sample=mask_run,\n second_white = None,\n tiny=tinyval, \n huge=largeval, \n van_out_lo=vanoutlo,\n van_out_hi=vanouthi,\n van_lo=vanlo,\n van_hi=vanhi,\n van_sig=vansig,\n samp_zero=sampzero,\n samp_lo=samplo,\n samp_hi=samphi,\n samp_sig=sampsig,\n bkgd_range=background_range, \n variation=1.1,\n print_results=True,\n bleed_test=bleed_switch,\n bleed_maxrate=rate,\n bleed_pixels=pixels,\n hard_mask=HardMaskFile)\n \n masking2 = reducer.diagnose(wb_mono, \n sample=mono_van,\n second_white = None,\n tiny=tinyval, \n huge=largeval, \n van_out_lo=vanoutlo,\n van_out_hi=vanouthi,\n van_lo=vanlo,\n van_hi=vanhi,\n van_sig=vansig,\n samp_zero=sampzero,\n samp_lo=samplo,\n samp_hi=samphi,\n samp_sig=sampsig,\n bkgd_range=background_range, \n variation=1.1,\n print_results=True,\n bleed_test=bleed_switch,\n bleed_maxrate=rate,\n bleed_pixels=pixels,\n hard_mask=HardMaskFile)\n \n total_mask=masking+masking2\n \n \n if kwargs.has_key('use_sam_msk_on_monovan') and kwargs.get('use_sam_msk_on_monovan')==True:\n print 'applying sample run mask to mono van'\n reducer.spectra_masks=masking\n else:\n reducer.spectra_masks=total_mask\n \n fail_list=get_failed_spectra_list('total_mask')\n \n \n print 'Diag found ', len(fail_list),'bad spectra'\n \n \n #Run the conversion\n deltaE_wkspace = reducer.convert_to_energy(sample_run, ei_guess, wb_run, mono_van,ei_guess,wb_mono)\n end_time=time.time()\n results_name=str(sample_run)+'.spe'\n ei= (deltaE_wkspace.getRun().getLogData(\"Ei\").value)\n \n if mtd.doesExist('_wksp.spe-white')==True:\n DeleteWorkspace(Workspace='_wksp.spe-white')\n \n \n print 'Incident energy found ',ei,' meV'\n print 'Elapsed time =',end_time-start_time, 's'\n #get the name that convert to energy will use\n \n if mtd.doesExist(results_name)==False:\n RenameWorkspace(InputWorkspace=deltaE_wkspace,OutputWorkspace=results_name)\n RenameWorkspace(InputWorkspace=results_name,OutputWorkspace=wksp_out)\n \n return mtd[wksp_out]",
"def input_membrane_resistance(I, V):\n V1 = V[0]\n V2 = V[-1]\n I1 = I[0]\n I2 = I[-1]\n\n dV = V2 - V1\n dI = I2 - I1\n\n return dV / dI",
"def arb_units(wb_run,sample_run,ei_guess,rebin,map_file,**kwargs):\n global reducer, rm_zero,inst_name,van_mass,bleed_switch,rate,pixels\n print 'DGreduce run for ',inst_name,'run number ',sample_run\n try:\n n,r=funcreturns.lhs_info('both')\n #n,r=lhs('both')\n wksp_out=r[0]\n except:\n if sample_run == 0:\n #deal with the current run being parsed as 0 rather than 00000\n sample_run='00000'\n wksp_out=inst_name+str(sample_run)+'.spe'\n if kwargs.has_key('sum') and kwargs.get('sum')==True:\n wksp_out=inst_name+str(sample_run[0])+'sum'+'.spe'\n \n start_time=time.time()\n \n if sample_run=='00000' and mtd.doesExist(inst_name+'00000.raw')==True:\n print 'Deleteing previous instance of temp data'\n DeleteWorkspace(Workspace=inst_name+'00000.raw')\n \n #repopulate defualts\n if kwargs.has_key('norm_method'):\n reducer.normalise_method = kwargs.get('norm_method')\n print 'Setting normalisation method to ', kwargs.get('norm_method')\n else:\n reducer.normalise_method = 'monitor-1'\n if kwargs.has_key('mask_run'):\n mask_run = kwargs.get('mask_run')\n print 'Using run ', kwargs.get('mask_run'),' for diag'\n else:\n mask_run=sample_run\n \n if kwargs.has_key('background'):\n reducer.background = kwargs.get('background')\n print 'Setting background option to ', kwargs.get('background')\n else:\n reducer.background = False\n \n if kwargs.has_key('fixei'):\n reducer.fix_ei = kwargs.get('fixei')\n print 'Setting fixei to ', kwargs.get('fixei')\n else:\n reducer.fix_ei = False\n \n if kwargs.has_key('save_format'):\n reducer.save_formats = kwargs.get('save_format')\n print 'Setting save format to ', kwargs.get('save_format')\n else:\n reducer.save_formats = ['.spe']\n #Set parameters for the run\n \n if kwargs.has_key('detector_van_range'):\n reducer.wb_integr_range = kwargs.get('detector_van_range')\n print 'Setting detector van int range to ', kwargs.get('detector_van_range')\n else:\n reducer.wb_integr_range=[20,100]\n #-------------DIAG------------------------\n if kwargs.has_key('bkgd_range'):\n background_range = kwargs.get('bkgd_range')\n print 'Setting background intergration to ', kwargs.get('bkgd_range')\n else:\n background_range=[15000,19000]\n \n if kwargs.has_key('tiny'):\n tinyval = kwargs.get('tiny')\n print 'Setting tiny ratelimit to ', kwargs.get('tiny')\n else:\n tinyval=1e-10\n \n if kwargs.has_key('large'):\n largeval = kwargs.get('large')\n print 'Setting large limit to ', kwargs.get('large')\n else:\n largeval=1e10\n \n if kwargs.has_key('diag_remove_zero'):\n sampzero = kwargs.get('diag_remove_zero')\n print 'Setting diag to reject zero backgrounds '\n else:\n sampzero =False\n \n if kwargs.has_key('diag_van_median_rate_limit_hi'):\n vanouthi = kwargs.get('diag_van_median_rate_limit_hi')\n print 'Setting diag_van_median_rate_limit_hi to ', kwargs.get('diag_van_median_rate_limit_hi')\n else:\n vanouthi=100\n \n if kwargs.has_key('diag_van_median_rate_limit_lo'):\n vanoutlo = kwargs.get('diag_van_median_rate_limit_lo')\n print 'Setting diag_van_median_rate_limit_lo to ', kwargs.get('diag_van_median_rate_limit_lo')\n else:\n vanoutlo=0.01\n \n if kwargs.has_key('diag_van_median_sigma_lo'):\n vanlo = kwargs.get('diag_van_median_sigma_lo')\n print 'Setting diag_van_median_sigma_lo to ', kwargs.get('diag_van_median_sigma_lo')\n else:\n vanlo=0.1\n \n if kwargs.has_key('diag_van_median_sigma_hi'):\n vanhi = kwargs.get('diag_van_median_sigma_hi')\n print 'Setting diag_van_median_sigma_hi to ', kwargs.get('diag_van_median_sigma_hi')\n else:\n vanhi=1.5\n \n if kwargs.has_key('diag_van_median_sigma'):\n vansig = kwargs.get('diag_van_median_sigma')\n print 'Setting diag_van_median_sigma to ', kwargs.get('diag_van_median_sigma')\n else:\n vansig=0.0\n \n if kwargs.has_key('diag_samp_median_sigma_lo'):\n samplo = kwargs.get('diag_samp_median_sigma_lo')\n print 'Setting diag_samp_median_sigma_lo to ', kwargs.get('diag_samp_median_sigma_lo')\n else:\n samplo=0.0\n \n if kwargs.has_key('diag_samp_median_sigma_hi'):\n samphi = kwargs.get('diag_samp_median_sigma_hi')\n print 'Setting diag_samp_median_sigma_hi to ', kwargs.get('diag_samp_median_sigma_hi')\n else:\n samphi=2.0\n \n if kwargs.has_key('diag_samp_median_sigma'):\n sampsig = kwargs.get('diag_samp_median_sigma')\n print 'Setting diag_samp_median_sigma to ', kwargs.get('diag_samp_median_sigma')\n else:\n sampsig=3.0\n \n if kwargs.has_key('bleed'):\n bleed_switch = kwargs.get('bleed')\n print 'Setting bleed ', kwargs.get('bleed')\n else:\n print 'bleed set to default'\n #---------------END of DIAG--------------------\n if kwargs.has_key('det_cal_file'):\n reducer.det_cal_file = kwargs.get('det_cal_file')\n reducer.relocate_dets = True\n print 'Setting detector calibration file to ', kwargs.get('det_cal_file')\n else:\n print 'Setting detector calibration to detector block info from ', sample_run\n reducer.det_cal_file =None\n reducer.relocate_dets = False\n \n if mtd.doesExist(str(sample_run))==True and kwargs.has_key('det_cal_file')==False:\n print 'For data input type: workspace detector calibration must be specified'\n print 'use Keyword det_cal_file with a valid detctor file or run number'\n return\n \n \n \n if kwargs.has_key('one2one'):\n reducer.map_file =None\n print 'one2one selected'\n \n else:\n fileName, fileExtension = os.path.splitext(map_file)\n if (not fileExtension):\n map_file=map_file+'.map' \n reducer.map_file = map_file\n\n reducer.energy_bins = rebin\n \n if float(str.split(rebin,',')[2])>=float(ei_guess):\n print 'error rebin range exceeds ei'\n return\n \n print 'output will be normalised to', reducer.normalise_method\n if (numpy.size(sample_run)) > 1 and kwargs.has_key('sum') and kwargs.get('sum')==True:\n #this sums the runs together before passing the summed file to the rest of the reduction\n #this circumvents the inbuilt method of summing which fails to sum the files for diag\n \n sumfilename=str(sample_run[0])+'sum'\n accum=sum_files(sumfilename, sample_run)\n #the D.E.C. tries to be too clever so we have to fool it into thinking the raw file is already exists as a workpsace\n RenameWorkspace(InputWorkspace=accum,OutputWorkspace=inst_name+str(sample_run[0])+'.raw')\n sample_run=sample_run[0]\n \n if kwargs.has_key('hardmaskPlus'):\n HardMaskFile = kwargs.get('hardmaskPlus')\n print 'Use hardmask from ', HardMaskFile\n #hardMaskSpec=common.load_mask(HardMaskFile)\n #MaskDetectors(Workspace='masking',SpectraList=hardMaskSpec)\n else:\n HardMaskFile=None\n \n if kwargs.has_key('hardmaskOnly'):\n totalmask = kwargs.get('hardmaskOnly')\n print 'Using hardmask from ', totalmask\n #next stable version can replace this with loadmask algoritum\n specs=diag_load_mask(totalmask)\n CloneWorkspace(InputWorkspace=sample_run,OutputWorkspace='mask_wksp')\n MaskDetectors(Workspace='mask_wksp',SpectraList=specs)\n masking=mtd['mask_wksp']\n else:\n \n masking = reducer.diagnose(wb_run, \n sample=mask_run,\n second_white = None,\n tiny=tinyval, \n huge=largeval, \n van_out_lo=vanoutlo,\n van_out_hi=vanouthi,\n van_lo=vanlo,\n van_hi=vanhi,\n van_sig=vansig,\n samp_zero=sampzero,\n samp_lo=samplo,\n samp_hi=samphi,\n samp_sig=sampsig,\n bkgd_range=background_range, \n variation=1.1,\n print_results=True,\n bleed_test=bleed_switch,\n bleed_maxrate=rate,\n bleed_pixels=pixels,\n hard_mask=HardMaskFile)\n \n reducer.spectra_masks=masking\n #fail_list=get_failed_spectra_list(masking)\n fail_list,n_total_spectra =get_failed_spectra_list_from_masks(masking)\n \n print 'Diag found ', len(fail_list),'bad spectra'\n \n #Run the conversion\n deltaE_wkspace = reducer.convert_to_energy(sample_run, ei_guess, wb_run)\n end_time=time.time()\n results_name=str(sample_run)+'.spe'\n \n ei= (deltaE_wkspace.getRun().getLogData(\"Ei\").value)\n \n if mtd.doesExist('_wksp.spe-white')==True:\n DeleteWorkspace(Workspace='_wksp.spe-white')\n \n if mtd.doesExist(results_name)==False:\n RenameWorkspace(InputWorkspace=deltaE_wkspace,OutputWorkspace=results_name)\n \n print 'Incident energy found ',ei,' meV'\n print 'Elapsed time =',end_time-start_time, 's'\n #get the name that convert to energy will use\n \n \n RenameWorkspace(InputWorkspace=results_name,OutputWorkspace=wksp_out)\n \n return mtd[wksp_out]",
"def modelmag(teff,band,distance=10,AV=0.0,RV=3):\n if band not in PASSBANDS:\n raise ValueError('%s is unrecognized bandpass.' % band)\n\n distance = atleast_1d(distance)\n AV = atleast_1d(AV)\n #AV = AV * distance/1000.\n \n\n if RV==5:\n A = AV*EXTINCTION5[band]\n else:\n A = AV*EXTINCTION[band]\n\n if size(distance) > 1 or size(AV) > 1:\n teff = atleast_1d(teff)\n dm = distancemodulus(distance)\n M = MAGFN[band](teff)\n D = dm[:,newaxis]\n A = A[:,newaxis,newaxis]\n #A = resize(A,(M.shape[1],M.shape[0])).T\n #A = A[:,newaxis]\n else:\n M = MAGFN[band](teff)\n D = distancemodulus(distance)\n\n \n res = M+D+A\n if size(res) == 1:\n return res[0]\n else:\n return res",
"def run_RL_sync(mapname,n_trials = int, seed = int,alpha = 0.15, beta = 0.2, tau = 5, gamma = 0.9, max_steps = 1000, reward_size = 100):\n\n # Softmax can't be from external file, because multiprocessing messes up the seed values\n np.random.seed(seed)\n def softmax_action(action_weights = [], tau = int):\n action_indices = list(range(len(action_weights)))\n f = np.exp((action_weights - np.max(action_weights))/tau) # shift values\n action_prob = f / f.sum(axis=0)\n action_index = np.random.choice(action_indices, 1, p=action_prob)\n return action_index[0]\n\n srate = 500 #sample rate\n \n total_time = int(1.5*srate) #total timesteps or \"time the agent gets to think about moving\"\n\n time0 = time.perf_counter()\n\n print(\"Running the RL model but with sync !\")\n srate = 500 #sample rate\n \n total_time = int(1.5*srate) #total timesteps or \"time the agent gets to think about moving\"\n\n # Learning Parameters\n parameters = {\"alpha\": alpha\n ,\"beta\": beta\n ,\"gamma\": gamma\n ,\"tau\": tau}\n n_steps = max_steps\n n_trials = n_trials\n \n sub_reward_size = 0 # no subgoals!\n # # # # # # # # # # # # # #\n # # Setting up the map # #\n # # # # # # # # # # # # # #\n \"\"\" The agent begins in a walled grid and has to find \n the goal to obtain a reward.\"\"\"\n # Grid #\n states = create_grid_from_file(map_file=mapname,goal_location = [10,3],reward_size=reward_size,sub_reward_size=sub_reward_size)\n state_set = list(range(int(states.shape[0]*states.shape[1]))) #index of states\n\n #set of actions\n move_name=[\"UP\", \"R-UP\", \"RIGHT\",\"R-DOWN\",\"DOWN\",\"L-DOWN\", \"LEFT\" ,\"LEFT-UP\"] \n moves = [[-1, 0],[-1, 1], [0, 1], [1, 1], [1, 0],[1, -1], [0, -1], [-1, -1]]\n action_set = list(range(len(moves))) #index list\n\n # # # # # # # # # # # # # # # # # # # # # # # #\n # # Setting up the synchronization modules # #\n # # # # # # # # # # # # # # # # # # # # # # # #\n\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n Processing module\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n # Initial variables #\n\n r2_max = 1 #maximum amplitude of nodes\n drift = .8 #rate of drift between coupling parameters\n\n cg_1 = (30/srate)*np.pi #gamma band coupling parameter for input information\n cg_2 = cg_1 + (drift/srate)*2*np.pi #gamma band coupling parameter for actions\n \n damp = 0.3 #damping parameter\n decay = 0.9 #decay parameter\n noise = 0.5 #noise parameter\n\n # Initial matrices #\n\n n_states = len(state_set)\n n_actions= len(action_set)\n\n #Setting up phase code neurons across entire task\n S_Phase = np.zeros((2,states.shape[0],states.shape[1],total_time)) #State phase code units\n A_Phase = np.zeros((2,n_actions,total_time)) #Action phase code units\n\n #Setting up rate code neurons across entire task\n S_Rate = np.zeros((states.shape[0],states.shape[1],total_time)) #State rate code units\n A_Rate = np.zeros((n_actions,total_time)) #Action rate code units\n #State-Action Weight Matrix\n W = np.zeros((states.shape[0],states.shape[1],n_actions))#*0.001 #initial state-action weights\n V = np.zeros((states.shape[0],states.shape[1]))#*0.001 #initial state weights\n\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n Control module\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n # MFC #\n # Initial variables \n r2_MFC = 0.7 #maximum amplitude MFC node\n damp_MFC = 0.03 # damping parameter MFC\n acc_slope = 10 # MFC slope parameter ---> steepness of burst probability distribution\n ct = (5/srate)*2*np.pi #theta band coupling parameter for MFC\n\n #Setting up phase code nodes for the MFC\n MFC = np.zeros((2,total_time))\n #Setting up phase code neuron for MFC -> Bernoulli rate code\n Be = 0 \n \"\"\"When the be value as the rate code of MFC\n reaches certain threshold the MFC will send a burst to coupled neurons\"\"\"\n\n # LFC #\n #Module indicating which states should be initiate action-state synchronization\n LFC = np.zeros((states.shape[0],states.shape[1],n_steps))\n\n #Module that gives the right indices to synchronize\n LFC_sync = 0\n\n\n\n # # # # # # # # # # # # # #\n # # Simulation # #\n # # # # # # # # # # # # # #\n\n # Logging dependent variables\n Hit = np.zeros((total_time,n_steps,n_trials)) #log when there is a burst from the MFC\n # Goal_reach = np.zeros((n_steps,n_trials)) #record if goal is reached \n # Move = np.zeros((n_steps,n_trials)) #record move\n # Bernoulli = np.zeros((total_time,n_steps,n_trials)) #Logging the bernoulli process variables (should be in between -.8 and .8)\n # pred_err = np.zeros((states.shape[0],states.shape[1],n_steps,n_trials)) #logging the prediction error\n trial_length = np.zeros((n_trials))\n\n # Recording sync\n sync = np.zeros((n_states,n_actions,n_steps,n_trials)) \n\n \"\"\" L O O P \"\"\"\n\n exploration = 0\n exploration_intent =0\n sync_fail=0\n greedy=0\n for trial in range(n_trials):\n \"\"\"A trial is considered as each journey the actor makes until the goal\n or until it runs out of steps\"\"\"\n at_goal = False\n start_loc = [1,int(states.shape[1]-2)] #start in the top left\n step = 0 \n S_Phase[:,:,:,0] = (2*np.random.random_sample((2,states.shape[0],states.shape[1])))-1 # random starting points processing module\n A_Phase[:,:,0] = (2*np.random.random_sample((2,n_actions)))-1 # idem\n while not at_goal:\n #starting location at first trial\n if step == 0:\n current_loc = start_loc\n else:\n S_Phase[:,:,:,0] = S_Phase[:,:,:,total_time-1] # random starting points processing module\n A_Phase[:,:,0] = A_Phase[:,:,total_time-1] # idem\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n Synchronization\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n \n #phase reset\n MFC[:,0]=np.ones((2))*r2_MFC \n\n\n # LFC setting instruction per step: each state is an input\n LFC[current_loc[0],current_loc[1],step] = 1\n\n # What we want is the lfc to indicate the state and then have the LFC sync pro actively select an action based on state action value maps\n \n action_to_sync = softmax_action(action_weights=W[current_loc[0],current_loc[1],:],tau=10)\n if action_to_sync in np.where(W[current_loc[0],current_loc[1],:]== max(W[current_loc[0],current_loc[1],:]))[0]:\n greedy+=0\n else:\n exploration_intent+=1\n \n \n #Which action does LFC sync to current state\n LFC_sync = int(action_to_sync)\n LFC_desync = list(range(len(moves)))\n LFC_desync.pop(LFC_sync) \n\n # The actor makes the move #\n for t in range(total_time-1):\n\n \n #Update phase code neurons for actions and states in processing module\n #State phase code neurons \n S_Phase[:,:,:,t+1] = update_phase(nodes=S_Phase[:,:,:,t], grid = True, radius=r2_max, damp = damp, coupling = cg_1,multiple=True )\n \n #Action phase code neurons\n A_Phase[:,:,t+1] = update_phase(nodes=A_Phase[:,:,t], grid = False, radius=r2_max, damp = damp, coupling = cg_2,multiple=True )\n\n #Update phase code untis of MFC\n MFC[:,t+1] = update_phase(nodes=MFC[:,t], grid = False, radius=r2_MFC, damp=damp_MFC, coupling=ct,multiple=False)\n\n #MFC rate code neuron-> Bernoulli process\n\n Be = 1/(1 + np.exp(-acc_slope*(MFC[0,t]-1))) # Bernoulli process \n #Bernoulli[time,step,trial] = Be # logging Be value\n\n p = random.random()\n\n if p < Be:\n\n Gaussian = np.random.normal(size = [1,2]) #noise factor as normal distribution\n #Hit[tijd,step,trial] = 1\n \n \n x, y = current_loc[1], current_loc[0]\n\n #the LFC decides which state is paired with which actions\n\n if LFC[y,x,step]:\n #The state the actor is in receives a burst because it is the only input\n S_Phase[:,y,x,t+1] = decay*S_Phase[:,y,x,t] + Gaussian\n\n # and all the actions that are to be synchronized to that state receive a burst\n if type(LFC_sync) is int:\n A_Phase[:,LFC_sync,t+1] = decay*A_Phase[:,LFC_sync,t] + Gaussian\n \n # Desynchronize all other actions !\n for node in LFC_desync:\n A_Phase[:,int(node),t+1] = decay*A_Phase[:,int(node),t] - Gaussian*noise\n\n #Updating rate code units\n #Only the rate code neuron of a single state is updated because the actor can only be in one place at the same time\n S_Rate[current_loc[0],current_loc[1],t]= (1/(1+np.exp(-5*S_Phase[0,current_loc[0],current_loc[1],t]-0.6)))\n A_Rate[:,t]=(S_Rate[current_loc[0],current_loc[1],t]*(W[current_loc[0],current_loc[1],:]+1))*(1/(1+np.exp(-5*A_Phase[0,:,t]-0.6)))\n #A_Rate[:,t]=(S_Rate[current_loc[0],current_loc[1],t])*(1/(1+np.exp(-5*A_Phase[0,:,t]-0.6)))\n \n # select action\n action_index = int(np.argmax(np.sum(A_Rate[:,:],axis=1)))\n if action_index in np.where(W[current_loc[0],current_loc[1],:] == max(W[current_loc[0],current_loc[1],:]))[0]:\n greedy+=1\n else:\n exploration+=1\n\n if action_index != LFC_sync:\n sync_fail+=1\n \n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n Learning\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n #update location\n new_loc= update_location(grid = states, loc=current_loc,move = moves[action_index])\n\n #log coordinates for weight matrices\n coordinates = [current_loc[0], current_loc[1], new_loc[0], new_loc[1], action_index] #location coordinates\n\n #update weights according to TD-learning\n V, W, delta, at_goal = update_weights(param=parameters, index=coordinates, V=V, W=W, states=states, reward_size = reward_size)\n\n\n #update_location\n current_loc = new_loc\n step+=1\n if step ==n_steps:\n #print(\"Agent did not reach goal\")\n break\n \n trial_length[trial] = step \n \n print(\"I took {0} exploratory steps and {1} greedy steps this simulation\".format(exploration,greedy))\n print(\"I intended to explore {} times\".format(exploration_intent))\n print(\"Sync of correct action failed {} times\".format(sync_fail))\n print(\"In this sim I took a total {} steps\".format(np.sum(trial_length)))\n \n time1 = time.perf_counter()\n print(\"For the second model I took {} minutes\".format((time1-time0)/60))\n return trial_length, V",
"def run_maf(dbFile, ra, dec):\n\n # establish connection to sqllite database file.\n opsimdb = db.OpsimDatabase(dbFile)\n \n # While we're in transition between opsim v3 and v4, this may be helpful: print(\"{dbFile} is an opsim version {version} database\".format(dbFile=dbFile, version=opsimdb.opsimVersion))\n if opsimdb.opsimVersion == \"V3\":\n # For v3 databases:\n mjdcol = 'expMJD'\n degrees = False\n cols = ['filter', 'fiveSigmaDepth', mjdcol, 'expDate']\n stackerList = []\n else:\n # For v4 and alternate scheduler databases.\n mjdcol = 'observationStartMJD'\n degrees = True\n cols = ['filter', 'fiveSigmaDepth', mjdcol]\n stackerList = [expDateStacker()]\n \n # IntraNightGapsMetric returns the gap (in days) between observations within the same night custom reduceFunc to find min gaps \n metric = metrics.cadenceMetrics.IntraNightGapsMetric(reduceFunc=np.amin, mjdCol=mjdcol)\n # PassMetric just pass all values\n metric_pass = metrics.simpleMetrics.PassMetric(cols=cols)\n # slicer for slicing pointing history\n slicer = slicers.UserPointsSlicer(ra, dec, lonCol='fieldRA', latCol='fieldDec', latLonDeg=degrees)\n # sql constrains, 3 for baseline2018a, 1 for rolling m2045\n sql = ''\n \n # bundles to combine metric, slicer and sql constrain together\n bundle = metricBundles.MetricBundle(metric, slicer, sql)\n date_bundle = metricBundles.MetricBundle(metric_pass, slicer, sql, stackerList=stackerList)\n \n # create metric bundle group and returns\n bg = metricBundles.MetricBundleGroup({'sep': bundle, 'cadence': date_bundle}, opsimdb, outDir=outDir, resultsDb=resultsDb)\n bg.runAll()\n opsimdb.close()\n return bg",
"def scoreCirc_VoltageReference(circuit, gen, indi, makeRedundancyInMatrix):\n #----------#\n VREF = 1.5\n #----------#\n \n FullBigCircuitMatrix = deepcopy(circuit.fullRedundancyMatrix)\n rowsR,columnsR,columnsC,rowsC = sortedNonZeroIndices(FullBigCircuitMatrix)\n\n matrixDensity = float(len(rowsR))/float((BigMatrixSize*BigMatrixSize/2))\t#(ones/(all/2))\n matrixQuaziID = sum(rowsR)+sum(columnsR)-BigMatrixSize*(BigMatrixSize-1)\n OcSc, IcNc, SelfConnElm = checkConnsConnected(FullBigCircuitMatrix) #Outer connections Short cut, Inner connections Not connected\n \n results = None\n badSweep = 0\n if OcSc > 1:\n score = 1e4*np.exp(OcSc)\n else:\n makeNetlist(circuit, gen, indi, FullBigCircuitMatrix)\n results = runme2.evaluateVoltageRef(gen, indi)\n disfCount = 0\n \n vdd_sweep = np.array(results['vout_vdd']['nominal'], dtype=float) #This line changes Nones to np.nans\n vdd_sweep_scale = np.array(results['vout_vdd_scale']['nominal'], dtype=float)\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep)):\n disfCount = disfCount + 1\n vdd_s = 0\n vdd_s_d = 0\n #print \"tukej!\", vdd_sweep_scale\n else:\n x = np.median(vdd_sweep)\n vdd_s = abs(x - VREF) #if x > VREF else 0\n vdd_s_d = np.max(vdd_sweep) - np.min(vdd_sweep)\n #if sweep did not finish completely - add to score\n #check last scale value in runme2!!\n #print \"tukiii\", vdd_sweep_scale\n if (vdd_sweep_scale[-1]<20): #20V\n\tbadSweep = badSweep + 1\n \n rload_sweep = np.array(results['vout_rload']['nominal'], dtype=float)\n rload_sweep_scale = np.array(results['vout_rload_scale']['nominal'], dtype=float)\n # if measurement is empty\n if np.any(np.isnan(rload_sweep)):\n disfCount = disfCount + 1\n rload_s = 0\n rload_s_d = 0\n else:\n x = np.median(rload_sweep)\n rload_s = abs(x - VREF) #if x > VREF else 0\n rload_s_d = np.max(rload_sweep) - np.min(rload_sweep)\n #if sweep did not finish completely - add to score\n #check last scale value in runme2!!\n if (rload_sweep_scale[-1]<100e3): #100kOhm\n\tbadSweep = badSweep + 1\n \n temp_sweep = np.array(results['vout_temp']['nominal'], dtype=float)\n temp_sweep_scale = np.array(results['vout_temp_scale']['nominal'], dtype=float)\n # if measurement is empty OR sweep did not finish completely - check last scale value in runme2!!\n if np.any(np.isnan(temp_sweep)):\n disfCount = disfCount + 1\n temp_s = 0\n temp_s_d = 0\n else:\n x = np.median(temp_sweep)\n temp_s = abs(x - VREF) #if x > VREF else 0\n temp_s_d = np.max(temp_sweep) - np.min(temp_sweep)\n if (temp_sweep_scale[-1]<120): #120 deg celsius\n\tbadSweep = badSweep + 1\n \n power = results['power']['nominal']\n if np.isnan(np.array(power, dtype=float)):\n disfCount = disfCount + 1\n powe = 0\n else:\n powe = power\n \n #---COST FUNCTION DEFINITION---#\n score = (vdd_s) + (vdd_s_d) + 5*(rload_s) + 5*(rload_s_d) + (100*temp_s) + (100*temp_s_d) + (100*powe) + badSweep*100\n\n #print disfCount\n if disfCount > 0:\n score = np.exp(disfCount) * 1e3\n if np.isnan(score):\n score = 2e4\n score = score + (IcNc+1) #add small punishment if not all nodes connected\n\n #print \"\\t\\t\\t\\t\\tG_\" + str(gen) + \"_I_\" + str(indi) + \" SCORE:\", score\n #print vdd_s, vdd_s_d, rload_s, rload_s_d, temp_s, temp_s_d, powe\n #print vdd_s, vdd_s_d, rload_s, rload_s_d, 100*temp_s, 100*temp_s_d, 100*powe\n \n filename = \"g_\" + str(gen) + \"_i_\" + str(indi) + \"_subckt.cir\"\n os.remove(filename) #cleanup current subcircuit\n\n return score, matrixDensity, matrixQuaziID, results",
"def main():\n parser = argparse.ArgumentParser(description='Memory Reallocation')\n parser.add_argument('-i', dest='file_path', help='path to input data file', required=True)\n\n args = parser.parse_args()\n with open(args.file_path, 'r') as file:\n input_data = [int(val) for val in file.read().split('\\t')]\n\n cycles, cycles_to_previous = reallocation(input_data)\n print(cycles)\n print(cycles_to_previous)",
"def mb_r(self) -> float:\n # Calculate metric\n n = self.predicted.size\n tot = 0.0\n for i in range(n):\n tot = tot + np.sum(np.abs(self.predicted - self.true[i]))\n mae_val = np.sum(np.abs(self.predicted - self.true)) / n\n mb = 1 - ((n ** 2) * mae_val / tot)\n\n return float(mb)",
"def process_rixs(save_folder,states_index,temperature,absorption_pol,emission_pol,\n final_state_lifetime, intermediate_state_lifetime,\n ein_min = 1E10, ein_max = -1E10, ein_step = 0.1,\n eloss_min = 1E10, eloss_max = -1E10, eloss_step = 0.1,\n verbose = False):\n\n\n states = get_ground_states(save_folder,states_index,prefix='abs_')\n states['boltz'] = boltz_dist(states['energy'],temperature,verbose=verbose)\n\n absorption = collect_transition_matrix(save_folder,states['label'],absorption_pol,\n states_energy = states['energy'], prefix='abs_')\n\n absorption = cleanup_transition_matrix(absorption,absorption_pol)\n\n emission_label = [label.split()[0] for label in absorption['label']]\n emission = collect_transition_matrix(save_folder,emission_label,emission_pol,\n prefix='emi_')\n\n\n # Apply Bolzmann factor\n for i in range(len(absorption['matrix'])):\n for j in range(absorption['matrix'][i].shape[1]):\n for energy,state,boltz in zip(states['energy'],states['label'],states['boltz']):\n if absorption['label'][i].split()[-1] == state:\n if np.array(absorption['ket_energy'][i][j]-energy) < 0.0002:\n absorption['matrix'][i][:,j] *= boltz\n\n # Construct rixs matrix\n rixs_matrix = OrderedDict({})\n rixs_matrix['label'] = []\n rixs_matrix['energy_inc'] = []\n rixs_matrix['energy_loss'] = []\n rixs_matrix['matrix'] = []\n\n for i in range(len(absorption['label'])):\n for j in range(len(emission['label'])):\n if absorption['label'][i].split()[0] == emission['label'][j].split()[-1]:\n for k in range(len(absorption['ket_energy'][i])):\n emi, inc = np.meshgrid([np.abs(x - states['gs0']) for x in emission['bra_energy'][j]], absorption['bra_energy'][i]- absorption['ket_energy'][i][k])\n rixs_matrix['energy_loss'].append(emi)\n rixs_matrix['energy_inc'].append(inc)\n #rixs_matrix['label'].append(absorption['label'][i] + ' {:s} {:s}'.format(emission['label'][j].split()[1],emission['label'][j].split()[2]))\n rixs_matrix['label'].append(emission['label'][j] + ' {:s} {:s}'.format(absorption['label'][i].split()[1],absorption['label'][i].split()[2]))\n rixs_matrix['matrix'].append(np.zeros((len(rixs_matrix['energy_inc'][-1]), len(rixs_matrix['energy_loss'][-1][0,:])), dtype = complex))\n for l in range(len(rixs_matrix['matrix'][-1])):\n rixs_matrix['matrix'][-1][l,:] = [x*absorption['matrix'][i][l,k] for x in emission['matrix'][j][:,l]]\n\n # Finds energy limits\n if ein_max == -1E10:\n for line in absorption['bra_energy']:\n if ein_max < np.max(line):\n ein_max = np.max(line)\n ein_max += 2.0\n if ein_min == 1E10:\n for line in absorption['bra_energy']:\n if ein_min > np.min(line):\n ein_min = np.min(line)\n ein_min -= 2.0\n\n if eloss_max == -1E10:\n for line in emission['bra_energy']:\n if eloss_max < (np.max(line) - states['gs0']):\n eloss_max = np.max(line) - states['gs0']\n eloss_max += 1.0\n if eloss_min == 1E10:\n for line in emission['bra_energy']:\n if eloss_min > (np.min(line) - states['gs0']):\n eloss_min = np.min(line) - states['gs0']\n eloss_min -= 1.0\n\n energy_loss = np.linspace(eloss_min , eloss_max, int((eloss_max - eloss_min)/eloss_step + 0.5))\n energy_inc = np.linspace(ein_min, ein_max, int((ein_max - ein_min)/ein_step + 0.5))\n\n if verbose is True:\n time0 = datetime.now()\n\n if verbose is True:\n print('\\nStarting RIXS calculation...')\n print('Transitions used:')\n\n for l in rixs_matrix['label']:\n print(l)\n\n print('\\nBuilding RIXS matrix...')\n\n #Finding transitions that will interfere\n rixs_interference = []\n rixs_interference_label = []\n for i in range(len(rixs_matrix['label'])):\n\n init = rixs_matrix['label'][i].split()[0]\n final = rixs_matrix['label'][i].split()[-1]\n\n if len(rixs_interference) == 0:\n rixs_interference_label.append('{:s},{:s}'.format(init,final))\n rixs_interference.append([i])\n else:\n\n if '{:s},{:s}'.format(init,final) in rixs_interference_label:\n rixs_interference[rixs_interference_label.index('{:s},{:s}'.format(init,final))].append(i)\n else:\n rixs_interference_label.append('{:s},{:s}'.format(init,final))\n rixs_interference.append([i])\n\n if verbose is True:\n print('Interference pairs')\n for i,j in zip(rixs_interference_label, rixs_interference):\n print(i,j)\n\n print('')\n\n rixs = pd.DataFrame(np.zeros((len(energy_loss),len(energy_inc))),index=energy_loss,columns=energy_inc)\n\n for eloss in energy_loss:\n if verbose is True:\n if np.abs(eloss - int(eloss+0.5)) < eloss_step/2:\n print('Eloss = {:0.2f} eV... '.format(eloss))\n for ein in energy_inc:\n aux = np.array([])\n for l in range(len(rixs_interference)):\n M = []\n for k in rixs_interference[l]:\n M.append(np.sum(rixs_matrix['matrix'][k]/(ein-rixs_matrix['energy_inc'][k] + intermediate_state_lifetime*1j),axis=0))\n aux = np.append(aux, np.abs(np.sum(M,axis=0))**2*final_state_lifetime/2/np.pi/((eloss-rixs_matrix['energy_loss'][rixs_interference[l][0]][0,:])**2 + final_state_lifetime**2/4))\n rixs[ein][eloss] = np.sum(aux)\n\n if verbose is True:\n print('Done!')\n\n if verbose is True:\n timef = datetime.now()\n print('Time to create rixs matrixes: ', timef-time0)\n\n return rixs",
"def task_metalearn(inp, reuse=True):\n # Seperate inp to different variables\n inputa, inputb, labela, labelb = inp\n # Generate empty list to record losses\n lossa_list = [] # Base train loss list\n lossb_list = [] # Base test loss list\n\n # Embed the input images to embeddings with ss weights\n emb_outputa = self.forward_resnet(inputa, weights, ss_weights, reuse=reuse) # Embed episode train \n emb_outputb = self.forward_resnet(inputb, weights, ss_weights, reuse=True) # Embed episode test \n\n # Run the first epoch of the base learning\n # Forward fc layer for episode train \n outputa = self.forward_fc(emb_outputa, fc_weights)\n # Calculate base train loss\n lossa = self.loss_func(outputa, labela)\n # Record base train loss\n lossa_list.append(lossa)\n # Forward fc layer for episode test\n outputb = self.forward_fc(emb_outputb, fc_weights)\n # Calculate base test loss\n lossb = self.loss_func(outputb, labelb)\n # Record base test loss\n lossb_list.append(lossb) \n # Calculate the gradients for the fc layer \n grads = tf.gradients(lossa, list(fc_weights.values()))\n gradients = dict(zip(fc_weights.keys(), grads))\n # Use graient descent to update the fc layer\n fast_fc_weights = dict(zip(fc_weights.keys(), [fc_weights[key] - \\\n self.update_lr*gradients[key] for key in fc_weights.keys()]))\n \n for j in range(num_updates - 1):\n # Run the following base epochs, these are similar to the first base epoch\n lossa = self.loss_func(self.forward_fc(emb_outputa, fast_fc_weights), labela)\n lossa_list.append(lossa)\n lossb = self.loss_func(self.forward_fc(emb_outputb, fast_fc_weights), labelb)\n lossb_list.append(lossb) \n grads = tf.gradients(lossa, list(fast_fc_weights.values()))\n gradients = dict(zip(fast_fc_weights.keys(), grads))\n fast_fc_weights = dict(zip(fast_fc_weights.keys(), [fast_fc_weights[key] - \\\n self.update_lr*gradients[key] for key in fast_fc_weights.keys()]))\n\n # Calculate final episode test predictions\n outputb = self.forward_fc(emb_outputb, fast_fc_weights)\n # Calculate the final episode test loss, it is the loss for the episode on meta-train \n final_lossb = self.loss_func(outputb, labelb)\n # Calculate the final episode test accuarcy\n accb = tf.contrib.metrics.accuracy(tf.argmax(tf.nn.softmax(outputb), 1), tf.argmax(labelb, 1))\n\n # Reorganize all the outputs to a list\n task_output = [final_lossb, lossb_list, lossa_list, accb]\n\n return task_output",
"def __init__(self, cell):\n self._cell = cell\n self._residual_fn = gnmt_residual_fn",
"def reduce_run():",
"def main(cliArgs):\n\n log.info(\"START RAMAN TENSOR CONVERSION\")\n\n # Read tensor file as matrices\n tensorlist = util.readFileAsMatrices(cliArgs.tensorfile, (3,3))\n\n# PREPARE SIMULATION\n\n log.info(\"Prepare simulation\")\n\n # Copy the structure of tensorlist with empty arrays. This copy will be filled with the result of the simulation\n convertedTensorlist = [{\"head\": tensor[\"head\"],\n \"muellerMatrix\": np.diag([0, 0, 0, 0]).astype(np.float),\n \"ramanTensor\": np.diag([0, 0, 0]).astype(np.float)\n } for tensor in tensorlist]\n\n # Set a flag to signal the while loop below wether or not to rerun the simulation if validation fails\n runMonteCarlo = True\n\n # Total number of iterations\n # This number will increase if the simulation is not validated and run again\n totalIterations = cliArgs.iterationLimit\n\n# RUN MONTE-CARLO SIMULATION\n# The steps 1. and 2. will be performed by the function __monteCarlo(). Step 3. will be performed by this function.\n# Calculation: 1. Rotate all raman tensors randomly via matrix multiplication\n# Uniformly distributed random rotations are generated with James Arvo's Algorithm \"Fast Random Rotation Matrices\". See pdf file jamesArvoAlgorithm.pdf for the math.\n# 2. Compute the mueller matrix of the rotated raman tensor. For the math, see pdf file ramanMuellerMatrix.pdf.\n# 3. Compute the mean of all rotated mueller matrices and raman tensors. The mean will be computed by the main function.\n# The while loop gives the opportunity to run the simulatio again, if the validation of the simulation fails.\n while( runMonteCarlo == True ):\n log.info(\"START MONTE CARLO SIMULATION\")\n\n # !!!!! LOGGING IS OMITTED DURING THE SIMULATION DUE TO SEVERE PERFORMANCE ISSUES !!!!!!\n\n # Build a generator that returns the tensorlist that will be passed to every iteration of the monte-carlo-simulation\n processArgs = ( tensorlist for i in range(cliArgs.iterationLimit) )\n\n # Create a pool of workers sharing the computation task\n with multiprocessing.Pool(processes = cliArgs.processCount) as pool:\n\n # Start child processes which run __monteCarlo()\n # Each subprocess will be given a list of size chunksize. Each element of the list contains the list of all raman tensor.\n # Each subprocess will therefore run the function __monteCarlo() cunksize times and passes the tensorlist to every function call.\n # The computation will be slow if the chunksize is to big or to small\n process = pool.imap_unordered(__monteCarlo, processArgs, chunksize = cliArgs.chunksize)\n\n # Loop over all ready results, while the processes are still running\n # process contains all rotated matrices\n # tqdm prints a lovely progress bar\n for result in tqdm( process, total = cliArgs.iterationLimit,\n desc = \"Processes \" + str(cliArgs.processCount) ):\n # Tally the results of all processes up and divide by the iteration limit to get the mean of all computations\n convertedTensorlist = [ {\"head\" : tensor[\"head\"],\n \"muellerMatrix\": np.add(convertedTensorlist[index][\"muellerMatrix\"], tensor[\"muellerMatrix\"]/totalIterations),\n \"ramanTensor\" : np.add(convertedTensorlist[index][\"ramanTensor\"] , tensor[\"ramanTensor\"] /totalIterations)\n } for (index, tensor) in enumerate(result) ]\n\n log.info(\"STOPPED MONTE CARLO SIMULATION SUCCESSFULLY\")\n\n #\n # VALIDATE THE SIMULATION\n # by comparing the depolarisation ratio of the molecular tensor and the labratory matrix\n # Source: Richard N. Zare: Angular Momentum, p.129\n #\n\n log.info(\"Validating monte-carlo-simulation via the depolarisation ratio.\")\n\n # Check every matrix\n for initial, final in zip(tensorlist, convertedTensorlist):\n\n log.debug(\"Check matrix '\" + initial[\"head\"] + \"'.\")\n\n # Check if loop is comparing the right matrices\n if initial[\"head\"] != final[\"head\"]:\n log.critical(\"INTERNAL ERROR: The header of input and output matrices don't match! Error in input tensor '\" + initial[\"head\"] + \"' and output matrix '\" + final[\"head\"] + \"'.\" )\n log.critical(\"TERMINATE EXECUTION.\")\n sys.exit(-1)\n\n # Compute eigenvalues of molecular tensor\n try:\n eigenvalues = np.linalg.eigvals(initial[\"matrix\"])\n\n except LinAlgError as e:\n # Eigenvalues do not converge. Log this issue and exit execution.\n log.critical(\"The eigenvalue computation of the input raman tensor '\" + initial[\"head\"] + \"' does not converge. Unable to validate monte-carlo-simulation!\")\n log.critical(\"TERMINATE EXECUTION.\")\n sys.exit(-1)\n\n # Compute depolarisation ratio of the inital tensor via the eigenvalues. See Richard N. Zare: \"Angluar Momentum\", p.129.\n isotropicPolarisability = sum(eigenvalues)/3\n anisotropicPolarisability_squared = ( (eigenvalues[0]-eigenvalues[1])**2 + (eigenvalues[1]-eigenvalues[2])**2 + (eigenvalues[2]-eigenvalues[0])**2 )/2\n initialDepolarisationRatio = 3*anisotropicPolarisability_squared / ( 45*isotropicPolarisability**2 + 4*anisotropicPolarisability_squared )\n\n log.debug(\"Initial Depolarisation Ratio: \" + str(initialDepolarisationRatio))\n\n # Compute the depolarisation ratio of the final mueller matrix via raman scattering in Mueller-Formalism. See Richard N. Zare: \"Angluar Momentum\", p.129.\n # Compute light intensities along x- and y-axis via stokes parameter:\n # I_x = S_0 + S_1\n # I_y = S_0 - S_1\n # depolarisationRatio = I_y / I_x ; if the incoming light is polarised along the x-axis.\n incomingLight = np.array([1,1,0,0])\n scatteredLight = final[\"muellerMatrix\"] @ incomingLight\n finalDepolarisationRatio = (scatteredLight[0]-scatteredLight[1])/(scatteredLight[0]+scatteredLight[1])\n\n log.debug(\"Final Depolarisation Ratio: \" + str(finalDepolarisationRatio))\n\n #\n # CHECK RESULTS\n #\n # Give the user the opportunity to run the simulation\n # again and use the computation time that's been spent so far\n #\n if round(initialDepolarisationRatio, cliArgs.threshold) != round(finalDepolarisationRatio, cliArgs.threshold):\n success = False\n break\n else:\n success = True\n\n #\n # DECIDE TO CONTINUE OR END THE PROGRAM\n #\n if success == True:\n # Simulation is valid exit while loop\n runMonteCarlo = False\n log.info(\"Validation done.\")\n\n else:\n # The validation failed\n log.critical(\"Validation failed for matrix '\" + final[\"head\"] + \"'!\")\n log.critical(\"Input: \" + str(round(initialDepolarisationRatio, cliArgs.threshold)) + \" Simulation: \" + str(round(finalDepolarisationRatio, cliArgs.threshold)))\n log.critical(\"Ask for user input. Should the simulation run again?\")\n # Ask user if he/she wants to run more iterations and try the validation again\n response = input(\"The simulation did \" + str(totalIterations) + \" iterations. Do you wish to compute another \"\n + str(cliArgs.iterationLimit) + \" iterations and try the validation again? [Y/n] \").lower()\n log.critical(\"Users response: \" + response)\n if response == \"n\":\n # User wants to exit\n log.critical(\"The user does not want to continue the computation.\")\n log.critical(\"TERMINATE EXECUTION.\")\n sys.exit(-1)\n else:\n # User wants to continue\n runMonteCarlo = True\n log.info(\"Run Monte-Carlo-Simulation again.\")\n # Save the number of computed iterations done so far\n iterationsSoFar = totalIterations\n # Compute new number of total iterations\n totalIterations = iterationsSoFar + cliArgs.iterationLimit\n # Rescale the calculated matrices.\n # There is following problem: The programm does not save a list of all computed matrices.\n # It only saves the mean value. In order to use the current mean\n # value of the matrices to compute the mean you get when doing more\n # iterations, you have to multiply the matrices by the number of\n # iterations done so far and divide it by the total number of\n # iterations that will be done after rerunning the simulation.\n log.info(\"Prepare rerun of simulation by rescaling the mueller matrices mean.\")\n scalingFactor = iterationsSoFar / totalIterations\n convertedTensorlist = [ {\"head\" : entry[\"head\"],\n \"muellerMatrix\": entry[\"muellerMatrix\"] * scalingFactor,\n \"ramanTensor\" : entry[\"ramanTensor\"] * scalingFactor\n } for entry in convertedTensorlist ]\n\n##### END OF MONTE-CARLO-SIMULATIONS WHILE LOOP\n\n\n# CONVERT RESULTS TO TEXT\n\n # Write the commandline parameters and the execution time in a string\n output_text = \"# polaram convert \" + str(cliArgs.tensorfile.resolve())\n output_text += \" --output \" + str(cliArgs.outputfile.resolve())\n output_text += \" --log \" + str(cliArgs.logfile.resolve())\n output_text += \" --iterations \" + str(totalIterations)\n output_text += \" --threshold \" + str(cliArgs.threshold)\n output_text += \"\\n# Execution time: \" + str(datetime.now())\n\n # Add user comment to string\n # Given via command line interface\n if cliArgs.comment != \"\":\n output_text += \"\\n\\n# \" + str(cliArgs.comment)\n\n # Add the calculated matrices to the string. The matrices are formated like the tensor input file\n for dict in convertedTensorlist:\n # Print mean of mueller matrices\n output_text += \"\\n\\n! \" + dict[\"head\"] + \"\\n\" + np.array2string(dict[\"muellerMatrix\"], sign = None).replace(\"[[\", \"\").replace(\" [\", \"\").replace(\"]\", \"\")\n # Print mean of raman tensors as comments\n output_text += \"\\n\\n#! \" + dict[\"head\"] + \" (Mean Of Rotated Raman Tensors)\\n\" + np.array2string(dict[\"ramanTensor\"], sign = None).replace(\"[[\", \"#\").replace(\" [\", \"#\").replace(\"]\", \"\")\n\n # Log and write text to file\n log.debug(\"Writing results to '\" + str(cliArgs.outputfile.resolve()) + \"':\\n\\n\" + output_text + \"\\n\")\n print(output_text)\n cliArgs.outputfile.write_text(output_text)\n\n log.info(\"STOPPED RAMAN TENSOR CONVERSION SUCCESSFULLY\")",
"def gpuMBIR(tomo,angles,center,input_params):\n print('Starting GPU MBIR recon')\n #allocate space for final answer \n af.set_device(input_params['gpu_device']) #Set the device number for gpu based code\n #Change tomopy format\n new_tomo=np.transpose(tomo,(1,2,0)) #slice, columns, angles\n im_size = new_tomo.shape[1]\n num_slice = new_tomo.shape[0]\n num_angles=new_tomo.shape[2]\n pad_size=np.int16(im_size*input_params['oversamp_factor'])\n# nufft_scaling = (np.pi/pad_size)**2\n num_iter = input_params['num_iter']\n mrf_sigma = input_params['smoothness']\n mrf_p = input_params['p']\n print('MRF params p=%f sigma=%f' %(mrf_p,mrf_sigma))\n #Initialize structures for NUFFT\n sino={}\n geom={}\n sino['Ns'] = pad_size#Sinogram size after padding\n sino['Ns_orig'] = im_size #size of original sinogram\n sino['center'] = center + (sino['Ns']/2 - sino['Ns_orig']/2) #for padded sinogram\n sino['angles'] = angles\n \n #Initialize NUFFT parameters\n print('Initialize NUFFT params')\n nufft_params = init_nufft_params(sino,geom)\n\n temp_y = afnp.zeros((sino['Ns'],num_angles),dtype=afnp.complex64)\n temp_x = afnp.zeros((sino['Ns'],sino['Ns']),dtype=afnp.complex64)\n x_recon = afnp.zeros((num_slice/2,sino['Ns_orig'],sino['Ns_orig']),dtype=afnp.complex64)\n \n pad_idx = slice(sino['Ns']/2-sino['Ns_orig']/2,sino['Ns']/2+sino['Ns_orig']/2)\n\n #allocate output array\n rec_mbir_final=np.zeros((num_slice,sino['Ns_orig'],sino['Ns_orig']),dtype=np.float32)\n \n #Move all data to GPU\n print('Moving data to GPU')\n slice_1=slice(0,num_slice,2)\n slice_2=slice(1,num_slice,2)\n gdata=afnp.array(new_tomo[slice_1]+1j*new_tomo[slice_2],dtype=afnp.complex64)\n gradient = afnp.zeros((num_slice/2,sino['Ns_orig'],sino['Ns_orig']), dtype=afnp.complex64)#temp array to store the derivative of cost func\n z_recon = afnp.zeros((num_slice/2,sino['Ns_orig'],sino['Ns_orig']),dtype=afnp.complex64)#Nesterov method variables\n t_nes = 1\n \n #Compute Lipschitz of gradient\n print('Computing Lipschitz of gradient')\n x_ones= afnp.ones((1,sino['Ns_orig'],sino['Ns_orig']),dtype=afnp.complex64)\n temp_x[pad_idx,pad_idx]=x_ones[0]\n temp_proj=forward_project(temp_x,nufft_params)\n temp_backproj=(back_project(temp_proj,nufft_params))[pad_idx,pad_idx]\n print('Adding Hessian of regularizer')\n temp_backproj2=afnp.zeros((1,sino['Ns_orig'],sino['Ns_orig']),dtype=afnp.complex64)\n temp_backproj2[0]=temp_backproj\n add_hessian(mrf_sigma,x_ones, temp_backproj2)\n L = np.max([temp_backproj2.real.max(),temp_backproj2.imag.max()])\n print('Lipschitz constant = %f' %(L))\n del x_ones,temp_proj,temp_backproj,temp_backproj2\n\n #loop over all slices\n for iter_num in range(num_iter):\n print('Iteration %d of %d'%(iter_num,num_iter))\n #Derivative of the data fitting term\n for i in range(num_slice/2):\n temp_x[pad_idx,pad_idx]=x_recon[i]\n Ax = forward_project(temp_x,nufft_params)\n temp_y[pad_idx]=gdata[i]\n gradient[i] =(back_project((Ax-temp_y),nufft_params))[pad_idx,pad_idx] #nufft_scaling\n #Derivative of regularization term\n tvd_update(mrf_p,mrf_sigma,x_recon, gradient) \n #x_recon-=gradient/L\n x_recon,z_recon,t_nes=nesterovOGM2update(x_recon,z_recon,t_nes,gradient,L)\n \n #Move to CPU\n #Rescale result to match tomopy\n rec_mbir=np.array(x_recon,dtype=np.complex64)\n rec_mbir_final[slice_1]=np.array(rec_mbir.real,dtype=np.float32)\n rec_mbir_final[slice_2]=np.array(rec_mbir.imag,dtype=np.float32)\n return rec_mbir_final",
"def chunk(wb_run,sample_run,ei_guess,rebin,mapingfile,nchunk,**kwargs):\n global reducer,rm_zero,inst_name,van_mass,bleed_switch,rate,pixels\n print 'DGreduce run for ',inst_name,'run number ',sample_run\n try:\n n,r=lhs('both')\n wksp_out=r[0]\n except:\n if sample_run == 0:\n #deal with the current run being parsed as 0 rather than 00000\n sample_run='00000'\n wksp_out=inst_name+str(sample_run)+'.spe'\n if kwargs.has_key('sum') and kwargs.get('sum')==True:\n wksp_out=inst_name+str(sample_run[0])+'sum'+'.spe'\n \n start_time=time.time()\n \n if sample_run=='00000' and mtd.doesExist(inst_name+'00000.raw')==True:\n print 'Deleteing previous instance of temp data'\n DeleteWorkspace(Workspace=inst_name+'00000.raw')\n \n \n reducer.energy_bins = rebin\n \n mon_list1=reducer.ei_mon_spectra\n mon_list2=reducer.mon1_norm_spec\n mon_list1.append(mon_list2)\n #mon_list1.sort()\n print 'Monitors for this chunk are: ',mon_list1\n # monitors for merlin[69634,69638]\n \n if inst_name == 'MER':\n #number of spectrums per instrument and where the detectors start (i.e. 5 for mari but 1 for merlin)\n numspec=69632\n spectrum_start=1\n if inst_name == 'MAP':\n #number of spectrums per instrument and where the detectors start (i.e. 5 for mari but 1 for merlin)\n numspec=41472\n spectrum_start=1\n \n if kwargs.has_key('det_cal_file'):\n cal_file = kwargs.get('det_cal_file') \n else:\n print 'Setting detector calibration to detector block info from ', sample_run\n \n reducer.det_cal_file =None\n reducer.relocate_dets = False\n nums=range(spectrum_start,numspec,nchunk)\n output_wkspName=wksp_out\n for i in nums:\n print '=========================================================================='\n print 'start spectra for this chunk',i\n chunk=range(i,i+nchunk)\n endIndex=nchunk-1\n if i+nchunk > numspec:\n chunk=range(i,numspec+1)\n endIndex=len(chunk)-1\n print 'end spectra for this chunk ', i+endIndex\n \n speclist=mon_list1+chunk\n #print speclist\n LoadRaw(Filename=wb_run,OutputWorkspace=\"wb_wksp\",LoadLogFiles=\"0\",SpectrumList=speclist)\n \n LoadRaw(Filename=sample_run,OutputWorkspace=\"run_wksp\",LoadLogFiles=\"0\",SpectrumList=speclist)\n \n tmp=arb_units(\"wb_wksp\",\"run_wksp\",ei_guess,rebin,'none_for_this_run_type',one2one=True,bleed=False,**kwargs)\n \n \n DeleteWorkspace(Workspace=\"wb_wksp\")\n DeleteWorkspace(Workspace=\"run_wksp\")\n #DeleteWorkspace(\"_wksp.spe\")\n #DeleteWorkspace(\"_wksp.spe-white\")\n \n if i == spectrum_start:\n #crop the workspace to remove the monitors, the workpsace seems sorted on specnumber so this is ok for instruments where the monitors are at the end of the \n # spectrum list\n CropWorkspace(InputWorkspace=tmp,OutputWorkspace=wksp_out,StartWorkSpaceIndex=0,EndWorkSpaceIndex=endIndex)\n else:\n CropWorkspace(InputWorkspace=tmp,OutputWorkspace=tmp,StartWorkSpaceIndex=0,EndWorkSpaceIndex=endIndex)\n ConjoinWorkspaces(InputWorkspace1=wksp_out,InputWorkspace2=tmp,CheckOverlapping='0')\n print int(((float(i+endIndex))/float(numspec))*100),'% complete'\n print '===============================================================================' \n \n GroupDetectors(InputWorkspace=output_wkspName,OutputWorkspace=output_wkspName,MapFile=mapingfile)\n\n \n \n print 'Elapsed time =',time.time()-start_time, 's'\n return mtd[wksp_out]",
"def task_metalearn(inp, reuse=True):\n # Seperate inp to different variables\n inputa, inputb, labela, labelb = inp\n # Generate empty list to record accuracies\n accb_list = []\n\n # Embed the input images to embeddings with ss weights\n emb_outputa = self.forward_resnet(inputa, weights, ss_weights, reuse=reuse)\n emb_outputb = self.forward_resnet(inputb, weights, ss_weights, reuse=True)\n\n # This part is similar to the meta-train function, you may refer to the comments above\n outputa = self.forward_fc(emb_outputa, fc_weights)\n lossa = self.loss_func(outputa, labela) \n grads = tf.gradients(lossa, list(fc_weights.values()))\n gradients = dict(zip(fc_weights.keys(), grads))\n fast_fc_weights = dict(zip(fc_weights.keys(), [fc_weights[key] - \\\n self.update_lr*gradients[key] for key in fc_weights.keys()]))\n outputb = self.forward_fc(emb_outputb, fast_fc_weights)\n accb = tf.contrib.metrics.accuracy(tf.argmax(tf.nn.softmax(outputb), 1), tf.argmax(labelb, 1))\n accb_list.append(accb)\n \n for j in range(num_updates - 1):\n lossa = self.loss_func(self.forward_fc(emb_outputa, fast_fc_weights), labela)\n grads = tf.gradients(lossa, list(fast_fc_weights.values()))\n gradients = dict(zip(fast_fc_weights.keys(), grads))\n fast_fc_weights = dict(zip(fast_fc_weights.keys(), [fast_fc_weights[key] - \\\n self.update_lr*gradients[key] for key in fast_fc_weights.keys()]))\n outputb = self.forward_fc(emb_outputb, fast_fc_weights)\n accb = tf.contrib.metrics.accuracy(tf.argmax(tf.nn.softmax(outputb), 1), tf.argmax(labelb, 1))\n accb_list.append(accb)\n\n lossb = self.loss_func(outputb, labelb)\n\n task_output = [lossb, accb, accb_list]\n\n return task_output",
"def test_jam_axi_rms():\n np.random.seed(123)\n xbin, ybin = np.random.uniform(low=[-55, -40], high=[55, 40], size=[1000, 2]).T\n\n inc = 60. # Assumed galaxy inclination\n r = np.sqrt(xbin**2 + (ybin/np.cos(np.radians(inc)))**2) # Radius in the plane of the disk\n a = 40 # Scale length in arcsec\n vr = 2000*np.sqrt(r)/(r+a) # Assumed velocity profile\n vel = vr * np.sin(np.radians(inc))*xbin/r # Projected velocity field\n sig = 8700/(r+a) # Assumed velocity dispersion profile\n rms = np.sqrt(vel**2 + sig**2) # Vrms field in km/s\n\n surf = np.array([39483., 37158., 30646., 17759., 5955.1, 1203.5, 174.36, 21.105, 2.3599, 0.25493])\n sigma = np.array([0.153, 0.515, 1.58, 4.22, 10, 22.4, 48.8, 105, 227, 525])\n qObs = np.full_like(sigma, 0.57)\n\n distance = 16.5 # Assume Virgo distance in Mpc (Mei et al. 2007)\n mbh = 1e8 # Black hole mass in solar masses\n beta = np.full_like(surf, 0.3)\n\n surf_lum = surf # Assume self-consistency\n sigma_lum = sigma\n qobs_lum = qObs\n surf_pot = surf\n sigma_pot = sigma\n qobs_pot = qObs\n\n sigmapsf = 0.6\n pixsize = 0.8\n goodbins = r > 10 # Arbitrarily exclude the center to illustrate how to use goodbins\n\n # The model is similar but not identical to the adopted kinematics!\n rmsModel, ml, chi2, flux = jam_axi_rms(\n surf_lum, sigma_lum, qobs_lum, surf_pot, sigma_pot, qobs_pot,\n inc, mbh, distance, xbin, ybin, plot=True, rms=rms, sigmapsf=sigmapsf,\n beta=beta, pixsize=pixsize, tensor='zz', goodbins=goodbins)\n plt.pause(0.01)",
"def _calculate_r0(self):\n\n self.r0 = self.coherence_cell_size * (np.cos(np.deg2rad(self.zenith_angle)))**(3/5)",
"def _compute_linear_magnitude_term(index, M):\r\n if M <= c1:\r\n # this is the second term in eq. (2a), p. 20\r\n return a2 * (M - c1)\r\n else:\r\n # this is the second term in eq. (2b), p. 20\r\n return a7 * (M - c1)",
"def _compute_linear_magnitude_term(index, M):\r\n if M <= c1:\r\n # this is the second term in eq. (2a), p. 20\r\n return a2 * (M - c1)\r\n else:\r\n # this is the second term in eq. (2b), p. 20\r\n return a7 * (M - c1)",
"def offlineEvaluate(mab, arms, rewards, contexts, nrounds=None): \n \n assert isinstance(nrounds, int), \"'nrounds' argument should be an integer.\"\n assert nrounds > 0, \"'nrounds' argument should be a positive integer.\"\n\n assert (not (np.isscalar(arms) or isinstance(arms, list))) and\\\n all([isinstance(np.asscalar(element), int) for element in arms]),\\\n \"'arms' argument should be an array of integers.\" \n assert (not (np.isscalar(rewards) or isinstance(rewards, list))) and\\\n all([isinstance(element, float) for element in rewards]),\\\n \"'rewards' argument should be an array of float.\" \n assert (not (np.isscalar(contexts) or isinstance(contexts, list))) and\\\n all([[isinstance(element, float) for element in elements] for elements in contexts]),\\\n \"'contexts' argument should be an array of integer.\" \n\n assert np.size(contexts, 0) == len(rewards) == len(arms), \\\n \"'contexts', 'rewards', and 'arms' arguments should have the same number of events.\"\n assert (np.size(contexts, 1) / mab.narms).is_integer() ,\\\n \"'contexts' argument should have same dimensions for each of the arms.\" \n \n arm_history = []\n per_round_rewards = []\n tround = 1\n for i in range(len(arms)):\n # Play arm\n played_arm = mab.play(tround, contexts[i]) \n # If played arm equals arm played by a uniformly-random policy\n if played_arm == arms[i]: \n reward = rewards[i]\n # Update MAB state\n mab.update(played_arm-1, reward, contexts[i]) \n # Store arm and rewards history\n arm_history.append([played_arm, reward])\n # Store observed reward per round \n per_round_rewards.append(reward) \n # if the desired number of matching arms are found, stop and \n # return per round rewards \n if tround == nrounds: \n return(per_round_rewards)\n # Increment tround (only if arm playe by bandit equals arm in the \n # dataset)\n tround += 1 \n \n return(per_round_rewards)",
"def event_based_r_factor(self):\n # assign variables\n rain_energy = 'rain_energy'\n rain_volume = 'rain_volume'\n erosivity = 'erosivity'\n r_factor = 'r_factor'\n\n # derive rainfall energy (MJ ha^-1 mm^-1)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_energy}\"\n \"=0.29*(1.-(0.72*exp(-0.05*{rain_intensity})))\".format(\n rain_energy=rain_energy,\n rain_intensity=self.rain_intensity),\n overwrite=True)\n\n # derive rainfall volume\n \"\"\"\n rainfall volume (mm)\n = rainfall intensity (mm/hr)\n * (rainfall interval (min)\n * (1 hr / 60 min))\n \"\"\"\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_volume}\"\n \"= {rain_intensity}\"\n \"*({rain_interval}\"\n \"/60.)\".format(\n rain_volume=rain_volume,\n rain_intensity=self.rain_intensity,\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # derive event erosivity index (MJ mm ha^-1 hr^-1)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{erosivity}\"\n \"=({rain_energy}\"\n \"*{rain_volume})\"\n \"*{rain_intensity}\"\n \"*1.\".format(\n erosivity=erosivity,\n rain_energy=rain_energy,\n rain_volume=rain_volume,\n rain_intensity=self.rain_intensity),\n overwrite=True)\n\n # derive R factor (MJ mm ha^-1 hr^-1 yr^1)\n \"\"\"\n R factor (MJ mm ha^-1 hr^-1 yr^1)\n = EI (MJ mm ha^-1 hr^-1)\n / (rainfall interval (min)\n * (1 yr / 525600 min))\n \"\"\"\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{r_factor}\"\n \"={erosivity}\"\n \"/({rain_interval}\"\n \"/525600.)\".format(\n r_factor=r_factor,\n erosivity=erosivity,\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['rain_energy',\n 'rain_volume',\n 'erosivity'],\n flags='f')\n\n return r_factor",
"def train_by_episode(self):\n # only REINFORCE and REINFORCE with baseline\n # use the ff code\n # convert the rewards to returns\n rewards = []\n gamma = 0.99\n for item in self.memory:\n [_, _, _, reward, _] = item\n rewards.append(reward)\n # rewards = np.array(self.memory)[:,3].tolist()\n\n # compute return per step\n # return is the sum of rewards from t til end of episode\n # return replaces reward in the list\n for i in range(len(rewards)):\n reward = rewards[i:]\n horizon = len(reward)\n discount = [math.pow(gamma, t) for t in range(horizon)]\n return_ = np.dot(reward, discount)\n self.memory[i][3] = return_\n\n # train every step\n for item in self.memory:\n self.train(item, gamma=gamma)",
"def CalculateRoc2(dataArray,prefix,readsize,uniquehits,mappedreads,filename):\r\n starttime= time.time()\r\n uniquehits = float(uniquehits)\r\n readsize = float(readsize)\r\n \r\n \r\n entries = len(dataArray)\r\n \r\n\r\n resultmatrix = np.arange(entries*2)\r\n resultmatrix = resultmatrix.reshape(2,entries)\r\n \r\n maxrq = max(x.rq for x in dataArray)\r\n maxnm = max(x.nm[0] for x in dataArray)\r\n maxGaps= max(x.gaps[0] for x in dataArray)\r\n maxMism= max(x.mism[0] for x in dataArray)\r\n \r\n \r\n minrq = min(x.rq for x in dataArray)\r\n minnm = min(x.nm[0] for x in dataArray)\r\n minmq= min(x.mq[0] for x in dataArray)\r\n minGaps= min(x.gaps[0] for x in dataArray) \r\n minMism= min(x.mism[0] for x in dataArray) \r\n \r\n \r\n # adjust stepsize for rq since the score behaves the other way\r\n quants = [1,2,3,4,5]\r\n tempa = maxrq-minrq\r\n stepsize = tempa/5\r\n \r\n rqQuants = [round(minrq+(i-1)*stepsize,3) for i in quants]\r\n rqQuants.reverse()\r\n rqQuants[-1] =0 # last entry is rounded bigger than the smallest in the dataset\r\n \r\n nmQuants = [i*maxnm/5 for i in quants]\r\n GapsQuants = [i*maxGaps/5 for i in quants]\r\n MismQuants = [i*maxMism/5 for i in quants]\r\n\r\n rocvector = []\r\n \r\n # i = NM,l = RQ, k = MQ\r\n for l in quants: # RQ\r\n for k in quants: # GAPS\r\n for j in quants: # MISMATCH\r\n temparray = [m for m in dataArray if m.gaps[0] <= GapsQuants[k-1] and m.mism[0] <= MismQuants[j-1] and m.rq >=rqQuants[l-1]]\r\n \r\n\r\n tempids = [m.id for m in temparray]\r\n uniquereads = {}\r\n for i in xrange(0,len(tempids)):\r\n uniquereads[tempids[i]] = \"\"\r\n\r\n mappedreads = len(uniquereads)\r\n \r\n \r\n \r\n templength = len(temparray)\r\n \r\n if templength == 0:\r\n continue\r\n else:\r\n tempTP = sum(x.mr[0] for x in temparray)\r\n tempFP =templength-tempTP\r\n F = round((float(mappedreads)/ readsize) ,3)\r\n sens = round((tempTP/ uniquehits) * F,3)\r\n if tempFP == 0:\r\n spec = 0\r\n else:\r\n spec = round((tempFP / uniquehits) * F,3) \r\n \r\n rocvector.append([rqQuants[l-1],GapsQuants[k-1],MismQuants[j-1],tempTP,tempFP,templength,sens,spec,F])\r\n \r\n #print (\"%d\\t%d\\t%d\\t\" % (templength,tempTP,tempFP))\r\n\r\n #0 = NM 4 = TP 7 = sens\r\n #1 = RQ 5 = FP 8 = 1-spec\r\n #2 = GAPS 6 = P 9 = F\r\n #append needed for last entry in AUC calculation\r\n rocvector.append([0,0,0,0,0,0,0,0,0]) \r\n nproc = np.array(rocvector)\r\n \r\n #write the sens and specificity values from nproc according to the enumeration in line 149. \r\n #specificity is in cell -2\r\n # sensitivity is in cell -3\r\n sens = [i[-3] for i in nproc]\r\n spez = [i[-2] for i in nproc]\r\n \r\n # adjust ROC curve. It is necessary that it the 1-specificity ends in 1.\r\n # for the last record copy the predecessor in sens to it\r\n # and write 1 to specificity \r\n spez[-1] = 1\r\n sens[-1] = sens[-2]\r\n \r\n\r\n rocarray1 = np.array([sens,spez])\r\n rocarray1 = rocarray1.flatten('F')\r\n rocarray1= rocarray1.reshape((len(spez),2))\r\n \r\n rocarray = np.array([sens,spez])\r\n rocarray = rocarray.flatten('F')\r\n rocarray = rocarray.reshape((len(spez),2))\r\n rocarray = np.sort(rocarray.view('float,float'), order=['f0','f1'], axis=0).view(np.float)\r\n \r\n rocarrayCorrected = rocarray\r\n \r\n #print rocarrayCorrected\r\n # project points where...\r\n for m in range(len(rocarrayCorrected)-2,-1,-1):\r\n if (rocarrayCorrected[m,1] >= rocarrayCorrected[m+1,1]):\r\n rocarrayCorrected[m,1] = rocarrayCorrected[m+1,1]\r\n\r\n \r\n #print rocarrayCorrected \r\n plt.hold(True)\r\n plt.figure()\r\n plt.subplot(111)\r\n #plt.scatter(spez, sens, c='b', marker='o', facecolor='red')\r\n #plt.plot(rocarray[:,1], rocarray[:,0]\r\n plt.plot(rocarrayCorrected[:,1],rocarrayCorrected[:,0], marker='o', markersize=7,linestyle='--', color='r', label='projected')\r\n plt.plot(rocarray1[:,1], rocarray1[:,0], linestyle=\"None\",label='real',marker='.',color='g')\r\n plt.xlabel('1-specificity')\r\n plt.ylabel('sensitivity')\r\n plt.title(r'ROC:'+filename)\r\n plt.axis([-0.1,1.1,-0.1,1.1])\r\n plt.grid(True)\r\n plt.legend(loc='lower right')\r\n plt.tight_layout()\r\n plt.savefig(prefix + \"_ROC.pdf\",format='pdf')\r\n plt.clf \r\n \r\n \r\n AUC = trapezoidal_rule(rocarrayCorrected[:,1], rocarrayCorrected[:,0])\r\n \r\n fobj = open(prefix+\"_roctable.txt\",\"w\")\r\n fobj.write(\"RQ\\tGAPS\\tMM\\tPTP\\tFP\\tP\\tSn\\t1-Sp\\tF\\r\\n\")\r\n for i in xrange(0,len(rocvector),1):\r\n temp = [str(k) for k in rocvector[i]]\r\n tempstr = \"\\t\".join(temp)\r\n fobj.write(tempstr+\"\\r\\n\")\r\n\r\n endtime= time.time()\r\n return(round(AUC,3))",
"def calc_mrr(kb, test_file, delimiter=',', header=None, size=None):\n with open(test_file) as f:\n reader = csv.reader(f, delimiter=delimiter)\n if header:\n next(reader, None)\n i = 0\n test_triples = []\n for row in reader:\n pred = cleanse(pred)\n sub = cleanse(sub)\n ob = cleanse(ob)\n if not (sub.replace('_','').isalnum() and ob.replace('_','').isalnum()):\n continue\n test_triples.append((sub, pred, ob))\n mrr = 0\n print('Test set size: {}'.format(len(test_triples)))\n if not size:\n size = len(test_triples)\n for t in tqdm(test_triples[:size]):\n # TODO, this is inefficient, not batched.\n try:\n ob = t[2]\n ranks = kb.get_most_likely(t[0], t[1], '?', k=100)\n ranked_ents = [x['triple'][2] for x in ranks]\n if ob not in ranked_ents:\n continue\n mrr += 1 / (ranked_ents.index(ob) + 1)\n except:\n continue\n return mrr / size",
"def computeRmse(model, data, n , sc):\n truth = data.map( lambda x: ((x[0], x[1]), x[2]) )\n truth.cache()\n ##print 'test zhou 0.....', truth.count() , '............', truth.take(10)\n\n predictions = model.predictAll(data.map(lambda x: (x[0], x[1])))\n predictions.cache()\n # here let's rescale predicted ratings to 0-10 scale\n maxPrediction = predictions.map(lambda x: x[2]).max()\n minPrediction = predictions.map(lambda x: x[2]).min()\n maxRate = RatingScale\n minRate = RatingScaleMin\n ##print 'test zhou 1......', predictions.count(), '............', predictions.take(10)\n\n #predictionsAndRatings = predictions.map(lambda x: ((x[0], x[1]), (x[2]-minPrediction)/(maxPrediction-minPrediction)*(maxRate-minRate)+minRate )).join(data.map(lambda x: ((x[0], x[1]), x[2]))).values()\n\n\n #predictedRating = predictions.map(lambda x: ((x[0], x[1]), (x[2]-minPrediction)/(maxPrediction-minPrediction)*(maxRate-minRate)+minRate ) )\n predictedRating = predictions.map(lambda x: ((x[0], x[1]), x[2] ) )\n predictedRating.cache()\n ##predictedRating.checkpoint()\n ##print 'test zhou 2.......', predictedRating.count(), '............', predictedRating.take(10)\n\n\n \n\n\n predictionsAndRatings = predictedRating.join(truth).values()\n #predictionsAndRatings = sc.union(predictedRating, truth)\n predictionsAndRatings.cache()\n #print 'test zhou 3........', predictionsAndRatings.count(), '............', predictionsAndRatings.take(10)\n #predictionsAndRatings = predictions.map(lambda x: ((x[0], x[1]), x[2])).join(data.map(lambda x: ((x[0], x[1]), x[2]))).values()\n \n return sqrt(predictionsAndRatings.map(lambda x: (x[0] - x[1]) ** 2).reduce(add) / float(n))\n #return 1.0",
"def run_recurrent_dynamics(self,record_mean_max=True,activation='tanh',r_max=100):\n \n print '\\nRunning recurrent dynamics'\n\n activation_fun=get_activation_fun(activation,r_max)\n\n \n r_e=np.zeros((self.N_e,self.NX))\n r_i=np.zeros((self.N_i,self.NX))\n r=np.vstack([r_e,r_i])\n\n num_steps=int(self.recdyn_time/self.dt)\n\n if record_mean_max is True: \n self.rec_input_mean_vect=np.zeros((self.N,num_steps))\n self.rec_input_max_vect=np.zeros((self.N,num_steps))\n self.r_mean_vect=np.zeros((self.N,num_steps))\n self.r_max_vect=np.zeros((self.N,num_steps))\n\n self.r_evo=np.zeros((self.N,self.NX,self.recdyn_num_snaps))\n self.rec_input_evo=np.zeros((self.N,self.NX,self.recdyn_num_snaps))\n \n delta_snap=num_steps/self.recdyn_num_snaps\n \n snap_idx=0\n \n rec_input=np.zeros_like(r)\n start_clock=time.time()\n \n for t in xrange(num_steps):\n \n if np.remainder(t,delta_snap)==0:\n \n sl.print_progress(snap_idx,self.recdyn_num_snaps,start_clock=start_clock,step=1)\n\n self.rec_input_evo[:,:,snap_idx]=rec_input\n self.r_evo[:,:,snap_idx]=r\n snap_idx+=1\n\n if record_mean_max:\n self.rec_input_mean_vect[:,t]=np.mean(rec_input,axis=1)\n self.rec_input_max_vect[:,t]=np.max(rec_input,axis=1)\n self.r_mean_vect[:,t]=np.mean(r,axis=1)\n self.r_max_vect[:,t]=np.max(r,axis=1)\n \n # recurrent input \n rec_input=np.dot(self.W,r)\n \n # total input, add feed-forward inhibition if recurrent inhibition is not explicitely modeled\n tot_input=self.h+rec_input \n if self.N_i==0:\n tot_input+=self.r0\n \n tot_activation = activation_fun(tot_input)\n \n r=r+(self.dt/self.tau)*(-r+tot_activation)\n \n\n self.r=r",
"def run(self):\n qubit = self.qubit\n\n routine = self.routine\n\n # Saving some typing for parameters that are only read ;)\n allowed_delta_f = self.get_param_value(\"allowed_delta_f\")\n f_unit = self.get_param_value(\"f_unit\")\n f_factor = self.get_param_value(\"f_factor\")\n delta_f_unit = self.get_param_value(\"delta_f_unit\")\n delta_f_factor = self.get_param_value(\"delta_f_factor\")\n max_iterations = self.get_param_value(\"max_iterations\")\n transition = self.get_param_value(\"transition_name\")\n\n # Finding the ramsey experiment in the pipulse calibration\n pipulse_calib = routine.routine_steps[-1]\n ramsey = pipulse_calib.routine_steps[-1]\n\n # Transition frequency from last Ramsey\n freq = qubit[f\"{transition}_freq\"]()\n\n # Retrieving the frequency difference\n max_waiting_seconds = self.get_param_value(\"max_waiting_seconds\")\n for i in range(max_waiting_seconds):\n try:\n routine.delta_f = (\n ramsey.analysis.proc_data_dict[\n \"analysis_params_dict\"][\n qubit.name][\"exp_decay\"][\"new_qb_freq\"] -\n ramsey.analysis.proc_data_dict[\n \"analysis_params_dict\"][\n qubit.name][\"exp_decay\"][\"old_qb_freq\"])\n break\n except KeyError:\n log.warning(\n \"Could not find frequency difference between current \"\n \"and last Ramsey measurement, delta_f not updated\")\n break\n except AttributeError:\n # FIXME: Unsure if this can also happen on real set-up\n log.warning(\n \"Analysis not yet run on last Ramsey measurement, \"\n \"frequency difference not updated\")\n time.sleep(1)\n\n # Progress update\n if self.get_param_value('verbose'):\n print(f\"Iteration {routine.iteration}, {transition}-freq \"\n f\"{freq / f_factor} {f_unit}, frequency \"\n f\"difference = {routine.delta_f / delta_f_factor} \"\n f\"{delta_f_unit}\")\n\n # Check if the absolute frequency difference is small enough\n if np.abs(routine.delta_f) < allowed_delta_f:\n # Success\n if self.get_param_value('verbose'):\n print(f\"{transition}-frequency found to be \"\n f\"{freq / f_factor} {f_unit} within \"\n f\"{allowed_delta_f / delta_f_factor} \"\n f\"{delta_f_unit} of previous value.\")\n\n elif routine.iteration < max_iterations:\n # No success yet, adding a new rabi-ramsey and decision step\n if self.get_param_value('verbose'):\n print(f\"Allowed error (\"\n f\"{allowed_delta_f / delta_f_factor} \"\n f\"{delta_f_unit}) not yet achieved, adding new\"\n \" round of PiPulse calibration...\")\n\n routine.add_next_pipulse_step()\n\n step_settings = {'qubits': self.qubits}\n routine.add_step(\n FindFrequency.Decision,\n 'decision',\n step_settings,\n )\n\n routine.iteration += 1\n return\n\n else:\n # No success yet, reached max iterations\n msg = (f\"{self.step_label} routine finished for {qubit.name}, \"\n \"desired precision not necessarily achieved within the \"\n f\"maximum number of iterations ({max_iterations}).\")\n log.warning(msg)\n\n if self.get_param_value('verbose'):\n print(msg)\n\n if self.get_param_value('verbose'):\n # Printing termination update\n print(f\"FindFrequency routine finished: \"\n f\"{transition}-frequencies for {qubit.name} \"\n f\"is {freq / f_factor} {f_unit}.\")"
]
| [
"0.5661596",
"0.5583767",
"0.5244712",
"0.52179146",
"0.50538725",
"0.5035523",
"0.50179875",
"0.49651486",
"0.49581146",
"0.4953757",
"0.4952349",
"0.4940061",
"0.49252868",
"0.4917301",
"0.4902398",
"0.48844683",
"0.4883962",
"0.48734564",
"0.48699412",
"0.48697597",
"0.4863017",
"0.4863017",
"0.4847229",
"0.48343268",
"0.48321345",
"0.48156974",
"0.48125836",
"0.47925735",
"0.47918713",
"0.47877863"
]
| 0.72315216 | 0 |
Computes spike amplitude from voltage trace V and spike index t_spike. | def spike_amplitude(V, t_spike):
# handle no spike found
if t_spike is None:
return None
Vmax = V[t_spike]
Vmin = np.min(V[t_spike+1:t_spike+500])
return Vmax - Vmin | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def spike_amplitude_abf(abf, t_spike, epoch_start=3):\n p0 = abf.sweepEpochs.p1s[epoch_start]\n V = abf.sweepY[p0:-1]\n\n return spike_amplitude(V, t_spike)",
"def get_spike_frequency_adaptation(t, V):\n # check that there are 2 spikes minimum\n intervals = interspike_intervals(t, V)\n raise_if_not_multiple_spikes(intervals)\n return intervals[-1]/intervals[0]",
"def all_spike_ind(t, V):\n spikes, _ = find_peaks(V, [1, 1000])\n\n return spikes",
"def avg_spike_frequency(t, V):\n intervals = interspike_intervals(t, V)\n\n try:\n raise_if_not_multiple_spikes(intervals)\n except NoMultipleSpikesException:\n return None\n\n avg_int = np.average(intervals)\n return 1/avg_int",
"def spike_width(t, V, t_spike, spike_amp):\n # handle no spike found\n if t_spike is None:\n return None\n\n Vmin = np.min(V[t_spike+1:t_spike+500])\n minval = np.max([t_spike - 100, 0])\n if len(V) > t_spike+500:\n maxval = -1\n else:\n maxval = t_spike+500\n id1 = find_nearest_idx(V[minval:t_spike], spike_amp/2 + Vmin) \\\n + t_spike - 100\n id2 = find_nearest_idx(V[t_spike+1:maxval], spike_amp/2 + Vmin) \\\n + t_spike + 1\n return t[id2] - t[id1]",
"def spike_latency(t, I, V):\n # make sure that current is +100 pA\n if abs(I[5] - 0.1) > 1e-7:\n sign = \"\"\n if I[5] > 0:\n sign = \"+\"\n print(f\"Warning! Expected +100pA current, got {sign}{round(I[5]*1000)} \\\n pA current\")\n\n spike_tind = first_spike_tind(V)\n return t[spike_tind] - t[0]",
"def max_spike_frequency(t, V):\n intervals = interspike_intervals(t, V)\n raise_if_not_multiple_spikes(intervals)\n min_int = np.amin(intervals)\n return 1/min_int",
"def qvib(v):\n T = s.Symbol(\"T\")\n return 1.0 / (1.0 - s.exp(-1.0 * (h * v) / (k * T)))",
"def min_spike_frequency_tV(t, V):\n intervals = interspike_intervals(t, V)\n raise_if_not_multiple_spikes(intervals)\n max_int = np.amax(intervals)\n return 1/max_int",
"def findspikes(t, v, thresh):\n tm = np.array(t)\n s0 = np.array(v) > thresh # np.where(v > thresh) # np.array(v) > thresh # find points above threshold\n\n# print ('v: ', v)\n dsp = tm[s0]\n if dsp.shape[0] == 1:\n dsp = np.array(dsp)\n sd = np.append(True, np.diff(dsp) > 1.0) # find first points of spikes\n if len(dsp) > 0:\n sp = dsp[sd]\n else:\n sp = []\n return(sp) # list of spike times.",
"def get_spike_template_amplitudes(self, spike_ids, **kwargs):\n if self.model.amplitudes is None:\n return np.zeros(len(spike_ids))\n amplitudes = self.model.amplitudes[spike_ids]\n return amplitudes",
"def interspike_intervals(t, V):\n # first pass -- get number of spikes and locations\n spike_inds = all_spike_ind(t, V)\n n_spikes = len(spike_inds)\n\n if n_spikes == 0:\n return []\n\n # generate array to hold time intervals\n intervals = np.zeros((n_spikes-1), dtype=float)\n for ti in range(1, n_spikes):\n intervals[ti-1] = t[spike_inds[ti]] - t[spike_inds[ti-1]]\n\n return intervals",
"def time_offset(time_v, trace):\n # start by finding the max of the derivative\n signal = trace\n dt = np.diff(time_v)[0]\n\n # derivative of the signal, soothed\n d_signal = savgol_filter(np.diff(signal), 301, 3)\n\n # find peaks in the derivative to find the steepest point\n idx = peakutils.indexes(d_signal, .5, 3000)\n if len(idx) == 0:\n return [np.nan for _ in time_v]\n idx_s = np.flipud(idx[d_signal[idx].argsort()])[0]\n\n try:\n time_p = peakutils.interpolate(time_v[:-1], d_signal, [idx_s])[0]\n except (RuntimeError, ValueError) as e:\n time_p = time_v[idx_s]\n\n n_shift = int(time_p / dt)\n return shift(trace, - n_shift)",
"def evap(self, lai, tl, ta, qa):#average ta and tl!!\n\t fsev = max(self.TAI*self.GA*1000.*RHO_A/RHO_W*((.622*esat((tl+ta)/2)/P_ATM)-qa), 0.)\n\t\t#if the amount of water in tank is less than amount that will evaporate in timestep dt, then what's left will evaporate \n\t if self.tx*self.ZT*self.TAI*10**6 <= 0:\n\t return 0.\n\t elif self.tx*self.ZT*self.TAI*10**6 <= fsev*dt:\n\t return (self.tx*self.ZT*self.TAI*10**6/dt)\n\t else:\n\t return fsev",
"def analyzeIV(t, V, I, tw, thr):\n ntraces = numpy.shape(V)[0]\n vss = []\n vmin = []\n vm = []\n ic = []\n nspikes = []\n ispikes = []\n tmin = []\n fsl = []\n fisi = []\n for j in range(0, ntraces):\n ts = tw[0]\n te = tw[1]\n td = tw[2]\n ssv = measure('mean', t, V[j,:], te-td, te)\n ssi = measure('mean', t, I[j,:], te-td, te)\n rvm = measure('mean', t, V[j,:], 0.0, ts-1.0)\n minv = measure('min', t, V[j,:], ts, te)\n spk = findspikes(t, V[j,:], thr, t0=ts, t1=te)\n nspikes.append(count_spikes(spk)) # build spike list\n ispikes.append(ssi[0])\n if nspikes[-1] >= 1:\n fsl.append(spk[0])\n else:\n fsl.append(None)\n if nspikes[-1] >= 2:\n fisi.append(spk[1]-spk[0])\n else:\n fisi.append(None)\n vm.append(rvm[0])\n if ssi[0] < 0.0: # just for hyperpolarizing pulses...\n ic.append(ssi[0])\n vss.append(ssv[0]) # get steady state voltage\n vmin.append(minv[0]) # and min voltage\n tmin.append(minv[1]) # and min time\n\n return({'I': numpy.array(ic), 'Vmin': numpy.array(vmin), 'Vss': numpy.array(vss),\n 'Vm': numpy.array(vm), 'Tmin': numpy.array(tmin), \n 'Ispike': numpy.array(ispikes), 'Nspike': numpy.array(nspikes), \n 'FSL': numpy.array(fsl), 'FISI': numpy.array(fisi)})",
"def true_vert_amp(self, index):\n index = ct.c_int(index)\n amp = ct.c_int()\n self.lib.GetVSAmplitudeValue(index, ct.pointer(amp))\n return amp.value",
"def tke(self):\n if len(self._obj.t) < 2:\n raise ValueError('TKE is not defined for a single vector field, use .piv.ke()')\n\n self._obj['w'] = (self._obj['u'] - self._obj['u'].mean(dim='t'))**2 + \\\n (self._obj['v'] - self._obj['v'].mean(dim='t'))**2\n vel_units = self._obj.attrs['units'][-1]\n self._obj.attrs['units'].append(f'({vel_units})^2')\n return self._obj",
"def tot_KE(V):\n KE = 0.0\n for i in range(len(V)):\n for j in range(3):\n KE += (V[i, j] * V[i, j]) / 2.0\n return KE",
"def amplitude_diff(i, k):\n flipped = jax.ops.index_mul(config, jax.ops.index[:, [i, (i + k) % N]], -1)\n logpsi_flipped = log_amplitude(model, flipped)\n return jnp.exp(logpsi_flipped - logpsi)",
"def kA_func(self):\n i, o = self.inl[0].to_flow(), self.outl[0].to_flow()\n\n ttd_1 = T_mix_ph(i, T0=self.inl[0].T.val_SI) - self.Tamb.val_SI\n ttd_2 = T_mix_ph(o, T0=self.outl[0].T.val_SI) - self.Tamb.val_SI\n\n if ttd_1 > ttd_2:\n td_log = (ttd_1 - ttd_2) / np.log(ttd_1 / ttd_2)\n elif ttd_1 < ttd_2:\n td_log = (ttd_2 - ttd_1) / np.log(ttd_2 / ttd_1)\n else:\n td_log = 0\n\n return i[0] * (o[2] - i[2]) + self.kA.val * td_log",
"def _get_alpha(self, m_t, v_t):\n return max(0, ((-m_t * self._psi \n + math.sqrt((m_t ** 2 * self._phi ** 4) \n / 4 + v_t * self._phi ** 2 * self._xi)) \n / (v_t * self._xi)))",
"def set_vert_clock(self, index):\n self.lib.SetVSAmplitude(ct.c_int(index))",
"def forward_propagate(vnx, spikes, spikes_delayed, vpx, indices, tau):\n for j, k in enumerate(indices):\n\n if k < 0:\n vnx[j] = 0\n else:\n vnx[j] = vpx[k] * np.exp(-(spikes[j] - spikes_delayed[k]) / tau)",
"def svpice(t):\n A0=0.7859063157e0\n A1=0.357924232e-1\n A2=-0.1292820828e-3\n A3=0.5937519208e-6\n A4=0.4482949133e-9\n A5=0.2176664827e-10\n T = t - 273.16\n e = pow(10.0,A0+T*(A1 + T*(A2 + T*(A3 + T*(A4 + T*A5)))))\n return e",
"def compute_spike_moving_average(self, tau=0.005):\n rho = 1 - self.DT / tau\n rav = np.zeros_like(self.R)\n\n rav[:, 0] = self.R[:, 0] * (1 - rho)\n for i in range(1, self.N_T):\n rav[:, i] = rho * rav[:, i - 1] + (1 - rho) * self.R[:, i]\n\n self.rav = rav / self.DT",
"def wind_chill(T_a, v):\r\n return 13.12 + 0.6215*(T_a) - 11.37*(v)**0.16 + 0.3965*(T_a)*(v)**0.16",
"def compute_a(xvf):\n diffs = xvf.shift(-1) - xvf.shift(1)\n dt = xvf.iloc[1][\"t\"] - xvf.iloc[0][\"t\"]\n xva = pd.DataFrame({\n \"t\": xvf[\"t\"],\n \"x\": xvf[\"x\"],\n \"v\": xvf[\"v\"],\n \"a\": diffs[\"v\"] / (2. * dt)\n },\n index=xvf.index)\n xva = xva[['t', 'x', 'v', 'a']]\n xva.index.name = '#t'\n\n return xva.dropna()",
"def compute_a(xvf):\n diffs=xvf.shift(-1)-xvf.shift(1)\n dt=xvf.iloc[1][\"t\"]-xvf.iloc[0][\"t\"]\n xva=pd.DataFrame({\"t\":xvf[\"t\"],\"x\":xvf[\"x\"],\"v\":xvf[\"v\"],\"a\":diffs[\"v\"]/(2.*dt)},index=xvf.index)\n xva = xva[['t', 'x', 'v', 'a']]\n xva.index.name='#t'\n\n return xva.dropna()",
"def vi1(t):\n u_t = 1*(t>0)\n return (np.sin(2000*np.pi*t)+np.cos(2e6*np.pi*t)) * u_t",
"def calibration(N,kb,T,Ekinv,V):\n lamb = np.sqrt((N-1)*3*kb*T/(Ekinv*2))\n \n if lamb < 0.9999:\n V = lamb*V\n elif lamb>1.0001:\n V = lamb*V\n \n return V"
]
| [
"0.6659725",
"0.65162295",
"0.64531356",
"0.64009815",
"0.63487196",
"0.60847944",
"0.59346",
"0.5815772",
"0.5731569",
"0.56815606",
"0.5331351",
"0.5330158",
"0.53281474",
"0.5322849",
"0.5302969",
"0.52671605",
"0.52634954",
"0.52188766",
"0.5206136",
"0.51838505",
"0.5182256",
"0.5172093",
"0.5160012",
"0.5146488",
"0.50988966",
"0.50975746",
"0.50680155",
"0.50641537",
"0.50501066",
"0.5036822"
]
| 0.8268467 | 0 |
Computes spike amplitude from abf object with epoch index and the index of the spike time. Note that t_spike should be found within the same epoch, otherwise there be an index mismatch. | def spike_amplitude_abf(abf, t_spike, epoch_start=3):
p0 = abf.sweepEpochs.p1s[epoch_start]
V = abf.sweepY[p0:-1]
return spike_amplitude(V, t_spike) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def avg_spike_frequency_abf(abf, epoch):\n p0 = abf.sweepEpochs.p1s[epoch]\n p1 = abf.sweepEpochs.p1s[epoch+1]\n t = abf.sweepX[p0:p1]\n V = abf.sweepY[p0:p1]\n return avg_spike_frequency(t, V)",
"def spike_amplitude(V, t_spike):\n # handle no spike found\n if t_spike is None:\n return None\n Vmax = V[t_spike]\n Vmin = np.min(V[t_spike+1:t_spike+500])\n\n return Vmax - Vmin",
"def spike_width_abf(abf, t_spike, spike_amp, epoch_start=3):\n # handle no spike found\n if t_spike is None:\n return None\n p0 = abf.sweepEpochs.p1s[epoch_start]\n t = abf.sweepX[p0:-1]\n V = abf.sweepY[p0:-1]\n return spike_width(t, V, t_spike, spike_amp)",
"def spike_latency_abf(abf, epochstart):\n p0 = abf.sweepEpochs.p1s[epochstart]\n t = abf.sweepX[p0:-1]\n V = abf.sweepY[p0:-1]\n I = abf.sweepC[p0:-1]\n return spike_latency(t, I, V)",
"def first_spike_tind_abf(abf, epoch_start, startind=0):\n p0 = abf.sweepEpochs.p1s[epoch_start]\n V = abf.sweepY[p0:-1]\n return first_spike_tind(V, startind=startind)",
"def kA_func(self):\n i, o = self.inl[0].to_flow(), self.outl[0].to_flow()\n\n ttd_1 = T_mix_ph(i, T0=self.inl[0].T.val_SI) - self.Tamb.val_SI\n ttd_2 = T_mix_ph(o, T0=self.outl[0].T.val_SI) - self.Tamb.val_SI\n\n if ttd_1 > ttd_2:\n td_log = (ttd_1 - ttd_2) / np.log(ttd_1 / ttd_2)\n elif ttd_1 < ttd_2:\n td_log = (ttd_2 - ttd_1) / np.log(ttd_2 / ttd_1)\n else:\n td_log = 0\n\n return i[0] * (o[2] - i[2]) + self.kA.val * td_log",
"def eta(self):\r\n #Make a list for the output\r\n h = [0] * self._len_h\r\n\r\n if self._is_ts:\r\n # Loop over channels\r\n for i in range(self._len_h):\r\n data = self.data[i]\r\n u = np.unique(self.events[i])\r\n event_types = u[np.unique(self.events[i]) != 0]\r\n h[i] = np.empty((event_types.shape[0], self.len_et),\r\n dtype=complex)\r\n\r\n # This offset is used to pull the event indices below, but we\r\n # have to broadcast it so the shape of the resulting idx+offset\r\n # operation below gives us the (nevents, len_et) array we want,\r\n # per channel.\r\n offset = np.arange(self.offset,\r\n self.offset + self.len_et)[:, np.newaxis]\r\n # Loop over event types\r\n for e_idx in range(event_types.shape[0]):\r\n idx = np.where(self.events[i] == event_types[e_idx])[0]\r\n event_trig = data[idx + offset]\r\n #Correct baseline by removing the first point in the series\r\n #for each channel:\r\n if self._correct_baseline:\r\n event_trig -= event_trig[0]\r\n\r\n h[i][e_idx] = np.mean(event_trig, -1)\r\n\r\n #In case the input events are an Events:\r\n else:\r\n #Get the indices necessary for extraction of the eta:\r\n add_offset = np.arange(self.offset,\r\n self.offset + self.len_et)[:, np.newaxis]\r\n\r\n idx = (self.events.time / self.sampling_interval).astype(int)\r\n\r\n #Make a list for the output\r\n h = [0] * self._len_h\r\n\r\n # Loop over channels\r\n for i in range(self._len_h):\r\n #If this is a list with one element:\r\n if self._len_h == 1:\r\n event_trig = self.data[0][idx + add_offset]\r\n #Otherwise, you need to index straight into the underlying data\r\n #array:\r\n else:\r\n event_trig = self.data.data[i][idx + add_offset]\r\n\r\n h[i] = np.mean(event_trig, -1)\r\n\r\n h = np.array(h).squeeze()\r\n return ts.TimeSeries(data=h,\r\n sampling_interval=self.sampling_interval,\r\n t0=self.offset * self.sampling_interval,\r\n time_unit=self.time_unit)",
"def fit_average_event(self, tb, average_event, debug=False, label='', inittaus=[0.001, 0.005], initdelay=None):\n #tsel = np.argwhere(self.avgeventtb > self.tpre)[0] # only fit data in event, not baseline\n tsel = 0 # use whole averaged trace\n self.tsel = tsel\n self.tau1 = inittaus[0]\n self.tau2 = inittaus[1]\n self.tau2_range = 10.\n self.tau1_minimum_factor = 5.\n time_past_peak = 2.5e-4\n self.fitted_tau1 = np.nan\n self.fitted_tau2 = np.nan\n self.Amplitude = np.nan\n # peak_pos = np.argmax(self.sign*self.avgevent[self.tsel:])\n # decay_fit_start = peak_pos + int(time_past_peak/self.dt)\n # init_vals = [self.sign*10., 1.0, 4., 0.]\n # init_vals_exp = [20., 5.0]\n # bounds_exp = [(0., 0.5), (10000., 50.)]\n \n res, rdelay = self.event_fitter(tb, average_event, time_past_peak=time_past_peak, initdelay=initdelay, debug=debug, label=label)\n # print('rdelay: ', rdelay)\n self.fitresult = res.x\n self.Amplitude = self.fitresult[0]\n self.fitted_tau1 = self.fitresult[1]\n self.fitted_tau2 = self.fitresult[2]\n self.bfdelay = rdelay\n self.DC = 0. # best_vals[3]\n self.avg_best_fit = self.doubleexp(self.fitresult, tb[self.tsel:],\n np.zeros_like(tb[self.tsel:]), risepower=self.risepower, mode=0, fixed_delay=self.bfdelay)\n self.avg_best_fit = self.sign*self.avg_best_fit\n fiterr = np.linalg.norm(self.avg_best_fit-average_event[self.tsel:])\n self.avg_fiterr = fiterr\n ave = self.sign*average_event\n ipk = np.argmax(ave)\n pk = ave[ipk]\n p10 = 0.1*pk\n p90 = 0.9*pk\n p37 = 0.37*pk\n try:\n i10 = np.argmin(np.fabs(ave[:ipk]-p10))\n except:\n self.fitted = False\n return\n i90 = np.argmin(np.fabs(ave[:ipk]-p90))\n i37 = np.argmin(np.fabs(ave[ipk:]-p37))\n self.risetenninety = self.dt*(i90-i10)\n self.decaythirtyseven = self.dt*(i37-ipk)\n self.Qtotal = self.dt*np.sum(average_event[self.tsel:])\n self.fitted = True",
"def avg_spike_frequency(t, V):\n intervals = interspike_intervals(t, V)\n\n try:\n raise_if_not_multiple_spikes(intervals)\n except NoMultipleSpikesException:\n return None\n\n avg_int = np.average(intervals)\n return 1/avg_int",
"def all_spike_ind(t, V):\n spikes, _ = find_peaks(V, [1, 1000])\n\n return spikes",
"def __call__(self, epoch):\n decay = (1 - (epoch / float(self.maxEpochs))) ** self.power\n alpha = self.initAlpha * decay\n \n # return alpha\n return float(alpha)",
"def amplitude_diff(i, k):\n flipped = jax.ops.index_mul(config, jax.ops.index[:, [i, (i + k) % N]], -1)\n logpsi_flipped = log_amplitude(model, flipped)\n return jnp.exp(logpsi_flipped - logpsi)",
"def Heston_fft(self,alpha,n,B,K):\r\n bt = time.time()\r\n r = self.r\r\n T = self.T\r\n S0 = self.S0\r\n N = 2**n\r\n Eta = B / N\r\n Lambda_Eta = 2 * math.pi / N\r\n Lambda = Lambda_Eta / Eta\r\n \r\n J = np.arange(1,N+1,dtype = complex)\r\n vj = (J-1) * Eta\r\n m = np.arange(1,N+1,dtype = complex)\r\n Beta = np.log(S0) - Lambda * N / 2\r\n km = Beta + (m-1) * Lambda\r\n \r\n ii = complex(0,1)\r\n \r\n Psi_vj = np.zeros(len(J),dtype = complex)\r\n \r\n for zz in range(0,N):\r\n u = vj[zz] - (alpha + 1) * ii\r\n numer = self.Heston_cf(u)\r\n denom = (alpha + vj[zz] * ii) * (alpha + 1 + vj[zz] * ii)\r\n \r\n Psi_vj [zz] = numer / denom\r\n \r\n # Compute FTT\r\n xx = (Eta/2) * Psi_vj * np.exp(-ii * Beta * vj) * (2 - self.dirac(J-1))\r\n zz = np.fft.fft(xx)\r\n \r\n # Option price\r\n Mul = np.exp(-alpha * np.array(km)) / np.pi\r\n zz2 = Mul * np.array(zz).real\r\n k_List = list(Beta + (np.cumsum(np.ones((N, 1))) - 1) * Lambda)\r\n Kt = np.exp(np.array(k_List))\r\n \r\n Kz = []\r\n Z = []\r\n for i in range(len(Kt)):\r\n if( Kt[i]>1e-16 )&(Kt[i] < 1e16)& ( Kt[i] != float(\"inf\"))&( Kt[i] != float(\"-inf\")) &( zz2[i] != float(\"inf\"))&(zz2[i] != float(\"-inf\")) & (zz2[i] is not float(\"nan\")):\r\n Kz += [Kt[i]]\r\n Z += [zz2[i]]\r\n tck = interpolate.splrep(Kz , np.real(Z))\r\n price = np.exp(-r*T)*interpolate.splev(K, tck).real\r\n et = time.time()\r\n \r\n runt = et-bt\r\n\r\n return(price,runt)",
"def test_aft_equals1(self, test_peak_idx):\n test_data = self.get_test_peaks(self.n_top)\n test_data[test_peak_idx]['area_per_channel'][:self.n_top] = 1\n test_data[test_peak_idx]['area'] = np.sum(test_data[test_peak_idx]['area_per_channel'])\n peaks = self.peaks_basics_compute(test_data)\n assert peaks[test_peak_idx]['area_fraction_top'] == 1",
"def getKlauder(f,t,T=5.0):\n assert len(f) == 2, 'Klauder wavelet needs 2 frequencies as input'\n\n k = np.diff(f)/T\n f0 = np.sum(f)/2.0\n wav = np.real(np.sin(pi*k*t*(T-t))/(pi*k*t)*np.exp(2*pi*1j*f0*t))\n return wav",
"def kA_func(self):\n i1 = self.inl[0].to_flow()\n i2 = self.inl[1].to_flow()\n o1 = self.outl[0].to_flow()\n o2 = self.outl[1].to_flow()\n\n T_i1 = T_mix_ph(i1, T0=self.inl[0].T.val_SI)\n T_i2 = T_mix_ph(i2, T0=self.inl[1].T.val_SI)\n T_o1 = T_mix_ph(o1, T0=self.outl[0].T.val_SI)\n T_o2 = T_mix_ph(o2, T0=self.outl[1].T.val_SI)\n\n if T_i1 <= T_o2:\n T_i1 = T_o2 + 0.01\n if T_i1 <= T_o2:\n T_o2 = T_i1 - 0.01\n if T_i1 <= T_o2:\n T_o1 = T_i2 + 0.02\n if T_o1 <= T_i2:\n T_i2 = T_o1 - 0.02\n\n td_log = ((T_o1 - T_i2 - T_i1 + T_o2) /\n np.log((T_o1 - T_i2) / (T_i1 - T_o2)))\n\n return i1[0] * (o1[2] - i1[2]) + self.kA.val * td_log",
"def test_spike_realdata(self):\n suspect_threshold = 0.5\n fail_threshold = 1\n\n arr = [-0.189, -0.0792, -0.0122, 0.0457, 0.0671, 0.0213, -0.0488, -0.1463, -0.2438, -0.3261, -0.3871, -0.4054,\n -0.3932, -0.3383, -0.2804, -0.2347, -0.2134, -0.2347, -0.2926, -0.3597, -0.442, -0.509, 0, -0.5944,\n -0.57, -0.4267, -0.2926, -0.1585, -0.0945, -0.0762]\n\n expected = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1]\n\n inputs = [\n arr,\n np.asarray(arr, dtype=np.floating),\n dask_arr(np.asarray(arr, dtype=np.floating))\n ]\n for i in inputs:\n npt.assert_array_equal(\n qartod.spike_test(\n inp=i,\n suspect_threshold=suspect_threshold,\n fail_threshold=fail_threshold\n ),\n expected\n )",
"def compute_spike_moving_average(self, tau=0.005):\n rho = 1 - self.DT / tau\n rav = np.zeros_like(self.R)\n\n rav[:, 0] = self.R[:, 0] * (1 - rho)\n for i in range(1, self.N_T):\n rav[:, i] = rho * rav[:, i - 1] + (1 - rho) * self.R[:, i]\n\n self.rav = rav / self.DT",
"def get_spike_frequency_adaptation(t, V):\n # check that there are 2 spikes minimum\n intervals = interspike_intervals(t, V)\n raise_if_not_multiple_spikes(intervals)\n return intervals[-1]/intervals[0]",
"def spike_width(t, V, t_spike, spike_amp):\n # handle no spike found\n if t_spike is None:\n return None\n\n Vmin = np.min(V[t_spike+1:t_spike+500])\n minval = np.max([t_spike - 100, 0])\n if len(V) > t_spike+500:\n maxval = -1\n else:\n maxval = t_spike+500\n id1 = find_nearest_idx(V[minval:t_spike], spike_amp/2 + Vmin) \\\n + t_spike - 100\n id2 = find_nearest_idx(V[t_spike+1:maxval], spike_amp/2 + Vmin) \\\n + t_spike + 1\n return t[id2] - t[id1]",
"def time_window_eigs(self, t0, tend):\n indexes = self.time_window_bins(t0, tend)\n return np.concatenate([self.operator.eigenvalues[idx]\n for idx in indexes])",
"def asf_to_amplitude(self, asf):\n return 20 * np.log10(self._ensure_number(asf) / 2 ** self.ASF_WIDTH)",
"def evap(self, lai, tl, ta, qa):#average ta and tl!!\n\t fsev = max(self.TAI*self.GA*1000.*RHO_A/RHO_W*((.622*esat((tl+ta)/2)/P_ATM)-qa), 0.)\n\t\t#if the amount of water in tank is less than amount that will evaporate in timestep dt, then what's left will evaporate \n\t if self.tx*self.ZT*self.TAI*10**6 <= 0:\n\t return 0.\n\t elif self.tx*self.ZT*self.TAI*10**6 <= fsev*dt:\n\t return (self.tx*self.ZT*self.TAI*10**6/dt)\n\t else:\n\t return fsev",
"def time_constant_abf(abf, epoch_start):\n p0 = abf.sweepEpochs.p1s[epoch_start]\n p1 = abf.sweepEpochs.p1s[epoch_start + 1]\n\n t = abf.sweepX[p0:p1] - abf.sweepX[p0]\n V = abf.sweepY[p0:p1]\n\n return time_constant(t, V)",
"def fanofactor(spiketrains, warn_tolerance=0.1 * pq.ms):\n # Build array of spike counts (one per spike train)\n spike_counts = np.array([len(st) for st in spiketrains])\n\n # Compute FF\n if all(count == 0 for count in spike_counts):\n # empty list of spiketrains reaches this branch, and NaN is returned\n return np.nan\n\n if all(isinstance(st, neo.SpikeTrain) for st in spiketrains):\n if not is_time_quantity(warn_tolerance):\n raise TypeError(\"'warn_tolerance' must be a time quantity.\")\n durations = [(st.t_stop - st.t_start).simplified.item()\n for st in spiketrains]\n durations_min = min(durations)\n durations_max = max(durations)\n if durations_max - durations_min > warn_tolerance.simplified.item():\n warnings.warn(\"Fano factor calculated for spike trains of \"\n \"different duration (minimum: {_min}s, maximum \"\n \"{_max}s).\".format(_min=durations_min,\n _max=durations_max))\n\n fano = spike_counts.var() / spike_counts.mean()\n return fano",
"def __call__(self, epoch):\n exp = np.floor((1 + epoch) / self.dropEvery)\n alpha = initAlpha * (self.factor ** exp)\n \n # return alpha \n return float(alpha)",
"def kA_func(self):\n\n i1 = self.inl[0].to_flow()\n i2 = self.inl[1].to_flow()\n o1 = self.outl[0].to_flow()\n o2 = self.outl[1].to_flow()\n\n T_i1 = T_bp_p(i1)\n T_i2 = T_mix_ph(i2, T0=self.inl[1].T.val_SI)\n T_o1 = T_mix_ph(o1, T0=self.outl[0].T.val_SI)\n T_o2 = T_mix_ph(o2, T0=self.outl[1].T.val_SI)\n\n if T_i1 <= T_o2 and not self.inl[0].T.val_set:\n T_i1 = T_o2 + 0.5\n if T_i1 <= T_o2 and not self.outl[1].T.val_set:\n T_o2 = T_i1 - 0.5\n\n if T_o1 <= T_i2 and not self.outl[0].T.val_set:\n T_o1 = T_i2 + 1\n if T_o1 <= T_i2 and not self.inl[1].T.val_set:\n T_i2 = T_o1 - 1\n\n td_log = ((T_o1 - T_i2 - T_i1 + T_o2) /\n np.log((T_o1 - T_i2) / (T_i1 - T_o2)))\n\n return i1[0] * (o1[2] - i1[2]) + self.kA.val * td_log",
"def spike_latency(t, I, V):\n # make sure that current is +100 pA\n if abs(I[5] - 0.1) > 1e-7:\n sign = \"\"\n if I[5] > 0:\n sign = \"+\"\n print(f\"Warning! Expected +100pA current, got {sign}{round(I[5]*1000)} \\\n pA current\")\n\n spike_tind = first_spike_tind(V)\n return t[spike_tind] - t[0]",
"def sinegauss_FT_a(f, t0, f0, a):\n return (np.sqrt(np.pi / a)\n * np.exp(-2j * np.pi * f * t0)\n * np.exp(-np.pi ** 2 * (f - f0) ** 2 / a))",
"def input_f(t,decay=0.5,freq=1.5):\n u_t = 1*(t>0)\n return np.cos(freq*t)*np.exp(-decay*t) * u_t"
]
| [
"0.71000993",
"0.66413033",
"0.59593457",
"0.57064515",
"0.560591",
"0.5349984",
"0.52664065",
"0.5201257",
"0.51858026",
"0.5102255",
"0.50289434",
"0.5015051",
"0.501375",
"0.5008975",
"0.49718475",
"0.49624377",
"0.49535725",
"0.4944559",
"0.49286938",
"0.49224788",
"0.49128255",
"0.4897496",
"0.48657098",
"0.48534518",
"0.483654",
"0.48335496",
"0.4832121",
"0.4827458",
"0.48017347",
"0.47594595"
]
| 0.8097871 | 0 |
Finds index in an array `arr` closest to value `val`. | def find_nearest_idx(arr, val):
arr = np.asarray(arr)
idx = (np.abs(arr - val)).argmin()
return idx | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_nearest(arr, val):\n\n arr = np.asarray(arr)\n idx = (np.abs(arr - val)).argmin()\n return idx, arr[idx]",
"def find_nearest(arr, val):\n arr = np.asarray(arr)\n idx = (np.abs(arr - val)).argmin()\n return idx, arr[idx]",
"def find_index(arr, val):\n index = 0\n min_differ = abs(arr[0] - val)\n for i in range(1, len(arr)):\n if abs(arr[i] - val) < min_differ:\n min_differ = abs(arr[i] - val)\n index = i\n return index",
"def findNearestIndex(arr,value):\n arr = np.array(arr)\n index=(np.abs(arr-value)).argmin()\n return index",
"def _get_index_closest_val(list, val):\n\n return min(range(len(list)), key=lambda i: abs(list[i]-val))",
"def find_nearest_idx(array: np.array, value: float) -> int:\n return (np.abs(array - value)).argmin()",
"def find_nearest(array,value):\n idx = (np.abs(array-value)).argmin()\n return idx",
"def find_nearest(array, value):\n idx = (np.abs(array-value)).argmin()\n return idx",
"def nearestIndex(array, value):\n idx = np.searchsorted(array, value, side='left')\n if idx > 0 and (idx == len(array) or math.fabs(value - array[idx-1]) < math.fabs(value - array[idx])):\n return idx-1\n return idx",
"def __find_index(arr, val):\n if val is not None:\n return numpy.searchsorted(arr, val)\n else:\n return val",
"def closest(array, value):\n idx = (np.abs(array-value)).argmin()\n return idx",
"def find_nearest(array, value):\n array = np.asarray(array)\n idx = (np.abs(array - value)).argmin()\n return idx",
"def find_nearest(array, value):\n array = np.asarray(array)\n idx = (np.abs(array - value)).argmin()\n return idx",
"def find_nearest(array, value):\n if isinstance(array, list):\n array = np.array(array)\n idx = (np.abs(array-value)).argmin()\n return idx",
"def find_idx(array, value):\n\n idx = np.searchsorted(array, value, side=\"left\")\n if idx > 0 and (\n idx == len(array)\n or math.fabs(value - array[idx - 1]) < math.fabs(value - array[idx])\n ):\n return idx - 1\n else:\n return idx",
"def _find_nearest(array, value):\n idx = (np.abs(array - value)).argmin()\n return array[idx], idx",
"def find_nearest_idx_tol(array, value, tol=dt.timedelta(days=1.)):\n array = np.asarray(array)\n idx = (np.abs(array - value)).argmin()\n if abs(array[idx] - value) <= tol:\n ret_val = idx\n else:\n ret_val = np.nan\n return ret_val",
"def find_nearest(array, value):\n idx = (np.abs(array - value)).argmin()\n return array[idx]",
"def find_closest(array, value):\n array = np.asarray(array)\n index = (np.abs(array - value)).argmin()\n return index, array[index]",
"def find_nearest(array, value):\n idx = (np.abs(array-value)).argmin()\n return array[idx]",
"def find_nearest(array, value):\n idx = (np.abs(array-value)).argmin()\n return array[idx]",
"def find_nearest(array, value):\n array = np.asarray(array)\n idx = (np.abs(array - value)).argmin()\n return array[idx], idx",
"def find_nearest(value,array):\n idx = numpy.abs(value-array).argmin()\n return idx,array[idx]",
"def closest_value_index(val, lst):\n index = 0\n for item in lst:\n if item > val:\n return index\n index += 1\n return index-1",
"def binary_search_find_first_ge_than_value(arr: List[int], value: int):\n start = 0\n end = len(arr) - 1\n\n while start <= end:\n # to avoid start+end overflow and bit operate is faster, use current start+((end-start)>>1)\n # which is start+(end-start)/2\n mid = start + ((end - start) >> 1)\n mid_value = arr[mid]\n\n if mid_value >= value:\n if mid == 0 or (arr[mid - 1] < value):\n return mid\n else:\n end = mid - 1\n elif mid_value < value:\n start = mid + 1\n return -1",
"def find_nearest_point_1D(array_to_search, value, print_nearest_val = True): \n array_to_search = np.asarray(array_to_search)\n idx = (np.abs(array_to_search - value)).argmin()\n \n if(print_nearest_val):\n print(\"Nearest Value:\",array_to_search[idx])\n \n return idx",
"def find_nearest(array, value):\n # handle pandas Series case\n if isinstance(array, pd.Series):\n array = array.values\n # add units if none\n value = value * u.dimensionless_unscaled\n array = array * u.dimensionless_unscaled\n value = value.to(array.unit)\n value = value.value\n array = array.value\n ds = []\n for i in range(len(array)):\n ds.append(array[i] - value)\n idx = (np.abs(ds)).argmin()\n return idx",
"def find_nearest(array, value, index=False):\n \n idx = (numpy.abs(numpy.array(array) - value)).argmin()\n error = array[idx] - value\n if index:\n return idx, error\n else:\n return array[idx], error",
"def binary_search_find_first_le_than_value(arr: List[int], value: int):\n start = 0\n end = len(arr) - 1\n\n while start <= end:\n # to avoid start+end overflow and bit operate is faster, use current start+((end-start)>>1)\n # which is start+(end-start)/2\n mid = start + ((end - start) >> 1)\n mid_value = arr[mid]\n\n if mid_value <= value:\n if mid == len(arr) - 1 or (arr[mid + 1] > value):\n return mid\n else:\n start = mid + 1\n elif mid_value > value:\n end = mid - 1\n return -1",
"def index_equals_value_search2(arr):\n start = 0\n end = len(arr) - 1\n while start <= end:\n mid = int((end + start)/2)\n print('mid = {}'.format(mid))\n if arr[mid] == mid:\n return mid\n elif arr[mid] < mid:\n start = mid + 1\n elif arr[mid] > mid:\n end = mid - 1\n return -1"
]
| [
"0.85350364",
"0.85226977",
"0.8322245",
"0.78922063",
"0.77532303",
"0.76765335",
"0.7673342",
"0.76364684",
"0.7635272",
"0.76115376",
"0.75738764",
"0.7554509",
"0.7554509",
"0.7552452",
"0.73447585",
"0.73183924",
"0.72771406",
"0.72477376",
"0.7222891",
"0.721653",
"0.721653",
"0.715731",
"0.7124785",
"0.70766383",
"0.69327694",
"0.6920377",
"0.6883827",
"0.68264306",
"0.6714148",
"0.66676044"
]
| 0.8975662 | 0 |
Computes spike width for time t, voltage trace V, and index t_spike and voltage amplitude `spike_amp`. | def spike_width(t, V, t_spike, spike_amp):
# handle no spike found
if t_spike is None:
return None
Vmin = np.min(V[t_spike+1:t_spike+500])
minval = np.max([t_spike - 100, 0])
if len(V) > t_spike+500:
maxval = -1
else:
maxval = t_spike+500
id1 = find_nearest_idx(V[minval:t_spike], spike_amp/2 + Vmin) \
+ t_spike - 100
id2 = find_nearest_idx(V[t_spike+1:maxval], spike_amp/2 + Vmin) \
+ t_spike + 1
return t[id2] - t[id1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def spike_width_abf(abf, t_spike, spike_amp, epoch_start=3):\n # handle no spike found\n if t_spike is None:\n return None\n p0 = abf.sweepEpochs.p1s[epoch_start]\n t = abf.sweepX[p0:-1]\n V = abf.sweepY[p0:-1]\n return spike_width(t, V, t_spike, spike_amp)",
"def spike_amplitude(V, t_spike):\n # handle no spike found\n if t_spike is None:\n return None\n Vmax = V[t_spike]\n Vmin = np.min(V[t_spike+1:t_spike+500])\n\n return Vmax - Vmin",
"def get_spike_frequency_adaptation(t, V):\n # check that there are 2 spikes minimum\n intervals = interspike_intervals(t, V)\n raise_if_not_multiple_spikes(intervals)\n return intervals[-1]/intervals[0]",
"def spike_latency(t, I, V):\n # make sure that current is +100 pA\n if abs(I[5] - 0.1) > 1e-7:\n sign = \"\"\n if I[5] > 0:\n sign = \"+\"\n print(f\"Warning! Expected +100pA current, got {sign}{round(I[5]*1000)} \\\n pA current\")\n\n spike_tind = first_spike_tind(V)\n return t[spike_tind] - t[0]",
"def avg_spike_frequency(t, V):\n intervals = interspike_intervals(t, V)\n\n try:\n raise_if_not_multiple_spikes(intervals)\n except NoMultipleSpikesException:\n return None\n\n avg_int = np.average(intervals)\n return 1/avg_int",
"def findspikes(t, v, thresh):\n tm = np.array(t)\n s0 = np.array(v) > thresh # np.where(v > thresh) # np.array(v) > thresh # find points above threshold\n\n# print ('v: ', v)\n dsp = tm[s0]\n if dsp.shape[0] == 1:\n dsp = np.array(dsp)\n sd = np.append(True, np.diff(dsp) > 1.0) # find first points of spikes\n if len(dsp) > 0:\n sp = dsp[sd]\n else:\n sp = []\n return(sp) # list of spike times.",
"def min_spike_frequency_tV(t, V):\n intervals = interspike_intervals(t, V)\n raise_if_not_multiple_spikes(intervals)\n max_int = np.amax(intervals)\n return 1/max_int",
"def max_spike_frequency(t, V):\n intervals = interspike_intervals(t, V)\n raise_if_not_multiple_spikes(intervals)\n min_int = np.amin(intervals)\n return 1/min_int",
"def spike_amplitude_abf(abf, t_spike, epoch_start=3):\n p0 = abf.sweepEpochs.p1s[epoch_start]\n V = abf.sweepY[p0:-1]\n\n return spike_amplitude(V, t_spike)",
"def model_wave(time, period, width) -> float:\n cur_time = time % period\n half_width = width//2\n if cur_time < half_width:\n return float(cur_time) / half_width\n elif cur_time < width:\n return 1 - float(cur_time - half_width) / half_width\n else:\n return 0",
"def all_spike_ind(t, V):\n spikes, _ = find_peaks(V, [1, 1000])\n\n return spikes",
"def DSS28_beamwidth(freq):\n return 0.54/freq",
"def half_space_cooling_waermefluss(k, T0, T1, kappa, t):\n return k * (T1 - T0) / (numpy.sqrt(math.pi * kappa * t))",
"def preprocess_spikes(data, dt, nlag=0):\n for d in data:\n ntrials = len(d[\"spikes\"])\n nframes = d[\"duration\"]\n nbins = nframes * int(d[\"stim_dt\"] / dt) + nlag\n spike_v = np.zeros((nbins, ntrials), dtype='i')\n for i, trial in enumerate(d[\"spikes\"]):\n idx = (trial / dt).astype('i')\n # make sure all spikes are in bounds\n idx = idx[(idx >= 0) & (idx < nbins)]\n spike_v[idx, i] = 1\n d[\"spike_v\"] = spike_v\n d[\"spike_dt\"] = dt\n d[\"psth\"] = np.sum(spike_v, axis=1)\n return data",
"def draw_spike_times(spike_times):\n for line in spike_times:\n plt.axvline(x=line, color='y')",
"def findspikes(xin, vin, thresh, t0=None, t1= None, dt=1.0, mode=None, interpolate=False, debug=False):\n # if debug:\n # # this does not work with pyside...\n # import matplotlib\n # matplotlib.use('Qt4Agg')\n # import matplotlib.pyplot as PL\n # from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas\n # from matplotlib.figure import Figure\n # \n # #PL.rcParams['interactive'] = False\n \n st=numpy.array([])\n spk = []\n if xin is None:\n return(st, spk)\n xt = xin.view(numpy.ndarray)\n v = vin.view(numpy.ndarray)\n if t1 is not None and t0 is not None:\n it0 = int(t0/dt)\n it1 = int(t1/dt)\n if not isinstance(xin, numpy.ndarray):\n xt = xt[it0:it1]\n v = v[it0:it1]\n else:\n xt = xt[it0:it1]\n v = v[it0:it1]\n # if debug:\n # f = PL.figure(1)\n # print \"xt: \", xt\n # print \"v: \", v\n # PL.plot(numpy.array(xt), v, 'k-')\n # PL.draw()\n # PL.show()\n\n dv = numpy.diff(v, axis=0) # compute slope\n try:\n dv = numpy.insert(dv, 0, dv[0])\n except:\n pass # print 'dv: ', dv\n dv /= dt\n st = numpy.array([])\n spk = []\n spv = numpy.where(v > thresh)[0].tolist() # find points above threshold\n sps = numpy.where(dv > 0.0)[0].tolist() # find points where slope is positive\n sp = list(set.intersection(set(spv),set(sps))) # intersection defines putative spikes\n sp.sort() # make sure all detected events are in order (sets is unordered)\n sp = tuple(sp) # convert to tuple\n if sp is ():\n return(st, spk) # nothing detected\n dx = 1\n mingap = int(0.0005/dt) # 0.5 msec between spikes (a little unphysiological...)\n # normal operating mode is fixed voltage threshold\n # for this we need to just get the FIRST positive crossing,\n if mode == 'schmitt':\n sthra = list(numpy.where(numpy.diff(sp) > mingap))\n sthr = [sp[x] for x in sthra[0]] # bump indices by 1\n #print 'findspikes: sthr: ', len(sthr), sthr\n for k in sthr:\n if k == 0:\n continue\n x = xt[k-1:k+1]\n y = v[k-1:k+1]\n if interpolate:\n dx = 0\n m = (y[1]-y[0])/dt # local slope\n b = y[0]-(x[0]*m)\n s0 = (thresh-b)/m\n else:\n s0 = x[1]\n st = numpy.append(st, x[1])\n\n elif mode == 'peak':\n pkwidth = 1.0e-3 # in same units as dt - usually msec\n kpkw = int(pkwidth/dt)\n z = (numpy.array(numpy.where(numpy.diff(spv) > 1)[0])+1).tolist()\n z.insert(0, 0) # first element in spv is needed to get starting AP\n spk = []\n #print 'findspikes peak: ', len(z)\n for k in z:\n zk = spv[k]\n spkp = numpy.argmax(v[zk:zk+kpkw])+zk # find the peak position\n x = xt[spkp-1:spkp+2]\n y = v[spkp-1:spkp+2]\n if interpolate:\n try:\n # mimic Igor FindPeak routine with B = 1\n m1 = (y[1]-y[0])/dt # local slope to left of peak\n b1 = y[0]-(x[0]*m1)\n m2 = (y[2]-y[1])/dt # local slope to right of peak\n b2 = y[1]-(x[1]*m2)\n mprime = (m2-m1)/dt # find where slope goes to 0 by getting the line\n bprime = m2-((dt/2.0)*mprime)\n st = numpy.append(st, -bprime/mprime+x[1])\n spk.append(spkp)\n except:\n continue\n else:\n st = numpy.append(st, x[1]) # always save the first one\n spk.append(spkp)\n return(st, spk)",
"def spike_count(spikeTime, start, stop, dt):\n\n\n #Spike time turned into a numpy array\n spikeTime = np.array(spikeTime)\n # print('Spike Times: ', spikeTime)\n\n #Creat interval array - intervals in which to break up the time array - sub time interval array\n duration = stop-start #Total run time\n n = duration/dt #How many subintervals from time horizon results from user defined interval\n splitInterval = np.linspace(0, duration, n+1) #create numpy array of subinterval over which to count spikes\n # print ('split interval: ', splitInterval)\n\n ##Find length over which to iterate in for loop\n length_splitInt = len(splitInterval)\n # print('length splitInterval: ', length_splitInt)\n length_time = len(spikeTime)\n # print('length time: ', length_time)\n length = length_splitInt + ((length_time) - 2)\n # print('length :', length)\n\n i=0 #inex for time array\n j=0 #index for splitInterval array.\n k=0 #index for new matrix that will store the grouped values from the split time array\n counter = 0 #counter variable to keep track of spike count for each subinterval through loop\n SpikeCount = [] #Initialize array to collect the number of spikes occuring wihtin each subinterval\n\n for i in range(length):\n if (i == 0) and (spikeTime[0] == splitInterval[0]):\n counter += 1\n i += 1\n\n # Spot check\n # print('if counter: ', counter)\n # print('time element: ', spikeTime[k])\n # print('splitInt: ', splitInterval[j], splitInterval[j + 1])\n # print('i: ', i)\n # print('if k: ', k)\n\n if k < (len(spikeTime) - 1):\n k += 1\n\n # Spot check\n # print('iff k: ', k)\n # print('iff counter: ', counter)\n\n else:\n j += 1\n\n # Spot check\n # print('iff counter: ', counter)\n # print(SpikeCount)\n # print('iff j: ', j)\n\n elif (spikeTime[k] > splitInterval[j]) and (spikeTime[k] <= splitInterval[j + 1]):\n counter += 1\n i += 1\n\n # Spot check\n # print('if counter: ', counter)\n # print('time element: ', spikeTime[k])\n # print('splitInt: ', splitInterval[j], splitInterval[j + 1])\n # print('i: ', i)\n # print('if k: ', k)\n\n if k < (len(spikeTime) - 1):\n k += 1\n\n # Spot check\n # print('iff k: ', k)\n # print('iff counter: ', counter)\n\n else:\n j += 1\n # Spot check\n SpikeCount.append(counter)\n # print('iff counter: ', counter)\n # print(SpikeCount)\n # print('iff j: ', j)\n\n\n\n else:\n SpikeCount.append(counter)\n counter = 0\n j += 1\n i += 1\n\n # Spot Check\n # print('else counter: ', counter)\n # print(SpikeCount)\n # print('time element: ', spikeTime[k])\n # print('splitInt: ', splitInterval[j], splitInterval[j + 1])\n # print('else j: ', j)\n # print('else i: ', i)\n # print('else k: ', k)\n\n return (SpikeCount, splitInterval)",
"def test_temporal_smoothing_reduce_length(PM_ds_control_3d_full):\r\n smooth = 10\r\n tsmooth_kws = {\"time\": smooth}\r\n actual = temporal_smoothing(\r\n PM_ds_control_3d_full, tsmooth_kws=tsmooth_kws\r\n ).time.size\r\n expected = PM_ds_control_3d_full.time.size - smooth + 1\r\n assert actual == expected",
"def time_filter(self, width, kernel='t', bgwindow=4, show=0):\n\n print 'Applying fft time filter. Assumes no missing data in time.'\n\n if not isinstance(width, types.ListType):\n width = [width] * len(self.chans)\n\n # time filter by convolution. functions have different normlizations. m has central peak integral=1 and total is 0. others integrate to 1, so they don't do bg subtraction.\n kernelset = {} # optionally could make set of kernels. one per width needed. (used only by 'w' for now).\n\n if kernel == 'm':\n from scipy import signal\n print 'Applying mexican hat filter. Note that effective width is somewhat larger than equivalent tophat width.'\n for w in n.unique(width):\n kernel = signal.wavelets.ricker(len(self.data), w) # mexican hat (ricker) function can have given width and integral=0, so good for smoothing in time and doing bg-subtraction at same time! width of averaging is tied to width of bgsub though...\n kernelset[w] = kernel/n.where(kernel>0, kernel, 0).sum() # normalize to have peak integral=1, thus outside integral=-1.\n elif kernel == 't':\n import math\n print 'Applying tophat filter.'\n for w in n.unique(width):\n kernel = n.zeros(len(self.data)) # tophat.\n onrange = range(len(kernel)/2 - w/2, len(kernel)/2 + int(math.ceil(w/2.)))\n kernel[onrange] = 1.\n kernelset[w] = kernel/n.where(kernel>0, kernel, 0).sum() # normalize to have peak integral=1, thus outside integral=-1.\n elif kernel == 'b':\n import math\n print 'Applying tophat filter with bg subtraction (square mexican hat).'\n for w in n.unique(width):\n kernel = n.zeros(len(self.data)) # tophat.\n onrange = range(len(kernel)/2 - w/2, len(kernel)/2 + int(math.ceil(w/2.)))\n kernel[onrange] = 1.\n offrange = range(len(kernel)/2 - (bgwindow/2+w)+1, len(kernel)/2-w+1) + range(len(kernel)/2 + w, len(kernel)/2 + (w+bgwindow/2))\n offrange = range(len(kernel)/2 - (bgwindow+w)/2, len(kernel)/2-w/2) + range(len(kernel)/2 + int(math.ceil(w/2.)), len(kernel)/2 + int(math.ceil((w+bgwindow)/2.)))\n kernel[offrange] = -1.\n posnorm = n.where(kernel>0, kernel, 0).sum() # find normalization of positive\n negnorm = n.abs(n.where(kernel<0, kernel, 0).sum()) # find normalization of negative\n kernelset[w] = n.where(kernel>0, kernel/posnorm, kernel/negnorm) # pos and neg both sum to 1/-1, so total integral=0\n elif kernel == 'g':\n from scipy import signal\n print 'Applying gaussian filter. Note that effective width is much larger than equivalent tophat width.'\n for w in n.unique(width):\n kernel = signal.gaussian(len(self.data), w) # gaussian. peak not quite at 1 for widths less than 3, so it is later renormalized.\n kernelset[w] = kernel / (w * n.sqrt(2*n.pi)) # normalize to pdf, not peak of 1.\n elif kernel == 'w':\n import math\n print 'Applying tophat filter that varies with channel.'\n for w in n.unique(width):\n kernel = n.zeros(len(self.data)) # tophat.\n onrange = range(len(kernel)/2 - w/2, len(kernel)/2 + int(math.ceil(w/2.)))\n kernel[onrange] = 1.\n kernelset[w] = kernel/n.where(kernel>0, kernel, 0).sum() # normalize to have peak integral=1, thus outside integral=-1.\n\n if show:\n for kernel in kernelset.values():\n p.plot(kernel,'.')\n p.title('Time filter kernel')\n p.show()\n\n # take ffts (in time)\n datafft = n.fft.fft(self.data, axis=0)\n kernelsetfft = {}\n for w in n.unique(width):\n kernelsetfft[w] = n.fft.fft(n.roll(kernelset[w], len(self.data)/2)) # seemingly need to shift kernel to have peak centered near first bin if convolving complex array (but not for real array?)\n\n # filter by product in fourier space\n for i in range(self.nbl): # **can't find matrix product I need, so iterating over nbl, chans, npol**\n for j in range(len(self.chans)):\n for k in range(self.npol):\n datafft[:,i,j,k] = datafft[:,i,j,k]*kernelsetfft[width[j]] # index fft kernel by twidth\n\n # ifft to restore time series\n self.data = n.ma.masked_array(n.fft.ifft(datafft, axis=0), self.flags[:self.nints,:, self.chans,:] == 0)\n self.dataph = (self.data.mean(axis=3).mean(axis=1)).real",
"def extract_spike_features(time, current, voltage, start=0.1, end=0.7, fil=10):\n\n df = pd.DataFrame()\n df_related_features = pd.DataFrame()\n for c, curr in enumerate(current):\n current_array = curr * np.ones_like(time)\n start_index = (np.abs(time - start)).argmin() # Find closest index where the injection current starts\n end_index = (np.abs(time - end)).argmin() # Find closest index where the injection current ends\n current_array[:start_index] = 0\n current_array[end_index:len(current_array)] = 0\n EphysObject = efex.EphysSweepFeatureExtractor(t=time, v=voltage[:, c], i=current_array, start=start, \\\n end=end, filter=fil)\n EphysObject.process_spikes()\n\n # Adding peak_height (mV) + code for maximum frequency determination (see further)\n spike_count = 0\n if EphysObject._spikes_df.size:\n EphysObject._spikes_df['peak_height'] = EphysObject._spikes_df['peak_v'].values - \\\n EphysObject._spikes_df['threshold_v'].values\n spike_count = EphysObject._spikes_df['threshold_i'].values.size\n df = pd.concat([df, EphysObject._spikes_df], sort=True)\n\n # Some easily found extra features\n df_features = EphysObject._sweep_features\n\n # Adding spike count\n df_features.update({'spike_count': spike_count})\n\n # Adding spike frequency adaptation (ratio of spike frequency of second half to first half)\n SFA = np.nan\n half_stim_index = ft.find_time_index(time, np.float(start + (end - start) / 2))\n if spike_count > 5: # We only consider traces with more than 8.333 Hz = 5/600 ms spikes here\n # but in the end we only take the trace with the max amount of spikes\n\n if np.sum(df.loc[df['threshold_i'] == curr, :]['threshold_index'] < half_stim_index) != 0:\n SFA = np.sum(df.loc[df['threshold_i'] == curr, :]['threshold_index'] > half_stim_index) / \\\n np.sum(df.loc[df['threshold_i'] == curr, :]['threshold_index'] < half_stim_index)\n\n df_features.update({'SFA': SFA})\n\n # Adding current (pA)\n df_features.update({'current': curr})\n\n # Adding membrane voltage (mV)\n df_features.update({'resting_membrane_potential': EphysObject._get_baseline_voltage()})\n\n # Adding voltage deflection to steady state (mV)\n voltage_deflection_SS = ft.average_voltage(voltage[:, c], time, start=end - 0.1, end=end)\n # voltage_deflection_v, voltage_deflection_i = EphysObject.voltage_deflection() # = old way: max deflection\n df_features.update({'voltage_deflection': voltage_deflection_SS})\n\n # Adding input resistance (MOhm)\n input_resistance = np.nan\n if not ('peak_i' in EphysObject._spikes_df.keys()) and not curr == 0: # We only calculate input resistances\n # from traces without APs\n input_resistance = (np.abs(voltage_deflection_SS - EphysObject._get_baseline_voltage()) * 1000) / np.abs(\n curr)\n if input_resistance == np.inf:\n input_resistance = np.nan\n df_features.update({'input_resistance': input_resistance})\n\n # Adding membrane time constant (s) and voltage plateau level for hyperpolarisation paradigms\n # after stimulus onset\n tau = np.nan\n E_plat = np.nan\n sag_ratio = np.nan\n if curr < 0: # We use hyperpolarising steps as required in the object function to estimate the\n # membrane time constant and E_plateau\n while True:\n try:\n tau = EphysObject.estimate_time_constant() # Result in seconds!\n break\n except TypeError: # Probably a noisy bump for this trace, just keep it to be np.nan\n break\n E_plat = ft.average_voltage(voltage[:, c], time, start=end - 0.1, end=end)\n sag, sag_ratio = EphysObject.estimate_sag()\n df_features.update({'tau': tau})\n df_features.update({'E_plat': E_plat})\n df_features.update({'sag_ratio': sag_ratio})\n\n # For the rebound and sag time we only are interested in the lowest (-200 pA (usually)) hyperpolarisation trace\n rebound = np.nan\n sag_time = np.nan\n sag_area = np.nan\n\n if c == 0:\n baseline_interval = 0.1 # To calculate the SS voltage\n v_baseline = EphysObject._get_baseline_voltage()\n\n end_index = ft.find_time_index(time, 0.7)\n if np.flatnonzero(voltage[end_index:, c] > v_baseline).size == 0: # So perfectly zero here means\n # it did not reach it\n rebound = 0\n else:\n index_rebound = end_index + np.flatnonzero(voltage[end_index:, c] > v_baseline)[0]\n if not (time[index_rebound] > (end + 0.15)): # We definitely have 150 ms left to calculate the rebound\n rebound = ft.average_voltage(\n voltage[index_rebound:index_rebound + ft.find_time_index(time, 0.15), c], \\\n time[index_rebound:index_rebound + ft.find_time_index(time, 0.15)]) - v_baseline\n else: # Work with whatever time is left\n if time[-1] == time[index_rebound]:\n rebound = 0\n else:\n rebound = ft.average_voltage(voltage[index_rebound:, c], \\\n time[index_rebound:]) - v_baseline\n\n v_peak, peak_index = EphysObject.voltage_deflection(\"min\")\n v_steady = ft.average_voltage(voltage[:, c], time, start=end - baseline_interval, end=end)\n\n if v_steady - v_peak < 4: # The sag should have a minimum depth of 4 mV\n # otherwise we set sag time and sag area to 0\n sag_time = 0\n sag_area = 0\n else:\n # First time SS is reached after stimulus onset\n first_index = start_index + np.flatnonzero(voltage[start_index:peak_index, c] < v_steady)[0]\n # First time SS is reached after the max voltage deflection downwards in the sag\n if np.flatnonzero(voltage[peak_index:end_index, c] > v_steady).size == 0:\n second_index = end_index\n else:\n second_index = peak_index + np.flatnonzero(voltage[peak_index:end_index, c] > v_steady)[0]\n sag_time = time[second_index] - time[first_index]\n sag_area = -integrate.cumtrapz(voltage[first_index:second_index, c], time[first_index:second_index])[-1]\n\n burst_metric = np.nan\n # print(c)\n if spike_count > 5:\n burst = EphysObject._process_bursts()\n if len(burst) != 0:\n burst_metric = burst[0][0]\n\n df_features.update({'rebound': rebound})\n df_features.update({'sag_time': sag_time})\n df_features.update({'sag_area': sag_area})\n df_features.update({'burstiness': burst_metric})\n\n df_related_features = pd.concat([df_related_features, pd.DataFrame([df_features])], sort=True)\n\n return df, df_related_features",
"def waveform_width(waveform, cutoff=0.75):\n waveform = np.squeeze(waveform)\n if np.ndim(waveform) != 1:\n raise ValueError('Expected 1-dimensional waveform.')\n if len(waveform) < 2:\n raise ValueError('Too short waveform.')\n if not (0 <= cutoff < 1):\n raise ValueError('Cuttoff must be in range [0, 1).')\n\n min_border = max(1, int(len(waveform) * cutoff))\n idx_min = np.argmin(waveform[:min_border])\n idx_max = np.argmax(waveform[idx_min:]) + idx_min\n width = idx_max - idx_min\n\n return width",
"def get_waveform_halfwidth(waveform, sampling_rate=30000.):\n w = resample(waveform,200)#upsample to smooth the data\n time = np.linspace(0,len(waveform)/sampling_rate,200)\n trough = np.where(w==np.min(w))[0][0]\n peak = np.where(w==np.max(w))[0][0]\n \n #dur = time[trough:][np.where(w[trough:]==np.max(w[trough:]))[0][0]] - time[trough]\n if w[peak] > np.abs(w[trough]):\n dur = time[peak:][np.where(w[peak:]>=0.5*np.min(w[peak:]))[0][0]] - time[peak] \n else:\n dur = time[trough:][np.where(w[trough:]<=0.5*np.max(w[trough:]))[0][0]] - time[trough] \n if peak<trough:\n dur=-dur\n return dur",
"def _compute_spike_delays(sim_params, spikes, induced_spike_times):\n ind_wins = spt.ExclusiveWindows.build_between(induced_spike_times, sim_params.tstart, sim_params.tend)\n spikes_delay_from_induced = ind_wins.classify_spikes(spikes).delay\n return spikes_delay_from_induced",
"def tke_wavenumber_spectrum(u, v, x_range=None, axis=0):\n u_k = np.fft.fftn(u)\n v_k = np.fft.fftn(v)\n E_k = np.mean(np.abs(v_k)**2 + np.abs(u_k)**2, axis=axis)\n if x_range is None:\n k = np.arange(len(E_k))\n else:\n k = np.linspace(x_range[0], x_range[1], len(E_k))\n n_steps = len(k) // 2\n E_k = k**2 * E_k\n E_k_a = E_k[1:n_steps + 1]\n E_k_b = E_k[-n_steps:][::-1]\n E_k = E_k_a + E_k_b\n return k[:n_steps], E_k",
"def width_calc(sigma_v):\n\t\t\n\t\tsigma_lambda = sigma_v/c*(lambda0*(1 + z)) #in observing frame\n\t\n\t\treturn np.sqrt(sigma_lambda**2 + sigma_slit**2)",
"def get_spike_template_amplitudes(self, spike_ids, **kwargs):\n if self.model.amplitudes is None:\n return np.zeros(len(spike_ids))\n amplitudes = self.model.amplitudes[spike_ids]\n return amplitudes",
"def CorrectStokes(self, t, fluid):\n\n\t\tself.SetTime(t + fluid.dt)\n\t\t\n\t\tself.F = (self.Tether - self.X)\n\t\tf = self.F.sum(axis=0) / self.Nb\n\n\t\tself.X += f\n\n\t\tfluid.u += f / fluid.dt",
"def plotPSTH(self, stimpath,\n stimdata,\n spikesdict,\n simtime,\n offset=0,\n binsize=10e-3,\n legendSuffix='',\n rate=False,\n normcells=True\n ):\n if not spikesdict:\n return 0\n stimdata = stimdata[:]\n times = []\n # It is a spike train, x values are spike times, wrap around those\n if 'spikes' in stimpath:\n times = stimdata\n # It is a stimulus: take the leadin edges\n elif 'stim' in stimpath:\n times = numpy.linspace(0, simtime, stimdata.shape[0])[numpy.r_[False, numpy.diff(stimdata) < 0].nonzero()[0]]\n else:\n stimdata = analyzer.smooth(stimdata)\n mid = numpy.mean(stimdata)\n stimdata = stimdata[stimdata > mid] # Threshold at midpoint\n times = numpy.linspace(0, simtime, stimdata.shape[0])[numpy.r_[True, stimdata[1:] > stimdata[:-1]] & numpy.r_[stimdata[:-1] > stimdata[1:], True]]\n if (times is None) or (len(times) == 0):\n return 0\n start = times + offset\n end = numpy.zeros(times.shape)\n end[:-1] = start[1:]\n end[-1] = simtime + offset # We assume\n accumulated_data = []\n for spikedata in spikesdict.values():\n tpoints = spikedata[:]\n for ii in range(len(times)):\n ix = numpy.nonzero((tpoints >= start[ii]) & (tpoints < end[ii]))[0]\n accumulated_data = numpy.r_[accumulated_data, tpoints[ix] - times[ii]]\n if len(accumulated_data) == 0:\n return 0\n # set the bins by splitting interstimulus interval\n interval = numpy.mean(numpy.diff(times))\n bins = numpy.arange(offset, interval+offset, binsize)\n bins = numpy.r_[bins, bins[-1] + binsize]\n hist = numpy.histogram(accumulated_data, bins=bins)\n xx = (hist[1][:-1] + hist[1][1:])/2.0\n if rate:\n yy = hist[0] / binsize\n else:\n yy = hist[0]\n if normcells:\n yy /= len(spikesdict)\n path = stimpath + '_psth' + legendSuffix\n new_curve = Qwt.QwtPlotCurve(path)\n new_curve.setData(xx, yy)\n pen = Qt.QPen(Qt.Qt.blue, 1, Qt.Qt.DashDotLine)\n new_curve.setStyle(Qwt.QwtPlotCurve.Lines)\n new_curve.setPen(pen)\n pen = Qt.QPen(Qt.Qt.red, 1)\n new_curve.setSymbol(Qwt.QwtSymbol(Qwt.QwtSymbol.XCross,\n Qt.QBrush(),\n pen,\n Qt.QSize(3,3))) \n new_curve.attach(self)\n self.curve_path_dict[new_curve] = path\n self.path_curve_dict[path].append(new_curve)\n path = stimpath + '_bins' + legendSuffix\n histmarkers = Qwt.QwtPlotCurve(path)\n height = int(max(yy) + 0.5)\n yy = numpy.ones(hist[1].shape) * height\n histmarkers.setData(hist[1], yy)\n pen = Qt.QPen(Qt.Qt.black, 1, Qt.Qt.DotLine)\n histmarkers.setPen(pen)\n histmarkers.setStyle(Qwt.QwtPlotCurve.Sticks)\n histmarkers.attach(self)\n self.curve_path_dict[histmarkers] = path\n self.path_curve_dict[path].append(new_curve)\n self.clearZoomStack()\n self.replot()\n return 1",
"def interspike_intervals(t, V):\n # first pass -- get number of spikes and locations\n spike_inds = all_spike_ind(t, V)\n n_spikes = len(spike_inds)\n\n if n_spikes == 0:\n return []\n\n # generate array to hold time intervals\n intervals = np.zeros((n_spikes-1), dtype=float)\n for ti in range(1, n_spikes):\n intervals[ti-1] = t[spike_inds[ti]] - t[spike_inds[ti-1]]\n\n return intervals",
"def physicond(amp, t_phi, t_th):\n # no negative amplitude\n if amp <= 0 :\n amp = 0\n\n # no negative char. time\n if t_th <= 0:\n t_th = 1e-20\n\n # rising time shorter than decay time\n if t_phi <= t_th:\n t_phi = t_th\n\n return amp, t_phi, t_th"
]
| [
"0.7286549",
"0.6501221",
"0.6024024",
"0.5812456",
"0.5557657",
"0.55041677",
"0.5400036",
"0.53080493",
"0.5215794",
"0.52152115",
"0.51359975",
"0.5117697",
"0.5102243",
"0.5066071",
"0.5011091",
"0.50041133",
"0.4983569",
"0.4959171",
"0.49430996",
"0.49130043",
"0.49118742",
"0.4891758",
"0.4876571",
"0.48635292",
"0.47776863",
"0.47716376",
"0.476636",
"0.47638333",
"0.47507474",
"0.47106344"
]
| 0.8307181 | 0 |
Computes spike width for abf object and t_spike index, and spike amplitude `spike_amp`. Note that t_spike should be found within the same epoch, otherwise there be an index mismatch. | def spike_width_abf(abf, t_spike, spike_amp, epoch_start=3):
# handle no spike found
if t_spike is None:
return None
p0 = abf.sweepEpochs.p1s[epoch_start]
t = abf.sweepX[p0:-1]
V = abf.sweepY[p0:-1]
return spike_width(t, V, t_spike, spike_amp) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def spike_width(t, V, t_spike, spike_amp):\n # handle no spike found\n if t_spike is None:\n return None\n\n Vmin = np.min(V[t_spike+1:t_spike+500])\n minval = np.max([t_spike - 100, 0])\n if len(V) > t_spike+500:\n maxval = -1\n else:\n maxval = t_spike+500\n id1 = find_nearest_idx(V[minval:t_spike], spike_amp/2 + Vmin) \\\n + t_spike - 100\n id2 = find_nearest_idx(V[t_spike+1:maxval], spike_amp/2 + Vmin) \\\n + t_spike + 1\n return t[id2] - t[id1]",
"def spike_amplitude_abf(abf, t_spike, epoch_start=3):\n p0 = abf.sweepEpochs.p1s[epoch_start]\n V = abf.sweepY[p0:-1]\n\n return spike_amplitude(V, t_spike)",
"def avg_spike_frequency_abf(abf, epoch):\n p0 = abf.sweepEpochs.p1s[epoch]\n p1 = abf.sweepEpochs.p1s[epoch+1]\n t = abf.sweepX[p0:p1]\n V = abf.sweepY[p0:p1]\n return avg_spike_frequency(t, V)",
"def spike_amplitude(V, t_spike):\n # handle no spike found\n if t_spike is None:\n return None\n Vmax = V[t_spike]\n Vmin = np.min(V[t_spike+1:t_spike+500])\n\n return Vmax - Vmin",
"def waveform_width(waveform, cutoff=0.75):\n waveform = np.squeeze(waveform)\n if np.ndim(waveform) != 1:\n raise ValueError('Expected 1-dimensional waveform.')\n if len(waveform) < 2:\n raise ValueError('Too short waveform.')\n if not (0 <= cutoff < 1):\n raise ValueError('Cuttoff must be in range [0, 1).')\n\n min_border = max(1, int(len(waveform) * cutoff))\n idx_min = np.argmin(waveform[:min_border])\n idx_max = np.argmax(waveform[idx_min:]) + idx_min\n width = idx_max - idx_min\n\n return width",
"def get_spike_frequency_adaptation(t, V):\n # check that there are 2 spikes minimum\n intervals = interspike_intervals(t, V)\n raise_if_not_multiple_spikes(intervals)\n return intervals[-1]/intervals[0]",
"def DSS28_beamwidth(freq):\n return 0.54/freq",
"def spike_latency_abf(abf, epochstart):\n p0 = abf.sweepEpochs.p1s[epochstart]\n t = abf.sweepX[p0:-1]\n V = abf.sweepY[p0:-1]\n I = abf.sweepC[p0:-1]\n return spike_latency(t, I, V)",
"def avg_spike_frequency(t, V):\n intervals = interspike_intervals(t, V)\n\n try:\n raise_if_not_multiple_spikes(intervals)\n except NoMultipleSpikesException:\n return None\n\n avg_int = np.average(intervals)\n return 1/avg_int",
"def meanSpikeCount(self, gather=True):\n raise NotImplementedError",
"def array_to_spiketrains(array, bin_size):\n stList = []\n for trial in range(len(array)):\n trialList = []\n for channel in range(array.shape[2]):\n times = np.nonzero(array[trial, :, channel])[0]\n counts = array[trial, times, channel].astype(int)\n times = np.repeat(times, counts)\n st = neo.SpikeTrain(times*bin_size*pq.ms, t_stop=array.shape[1]*bin_size*pq.ms)\n trialList.append(st)\n stList.append(trialList)\n return stList",
"def compute_profiling_time(key, expected_num_spikes, rate, t_stop, n,\n winlen, binsize, num_rep=10):\n\n time_fast_fca = 0.\n time_fpgrowth = 0.\n for rep in range(num_rep):\n # Generating artificial data\n data = []\n for i in range(n):\n np.random.seed(0)\n data.append(stg.homogeneous_poisson_process(\n rate=rate, t_start=0*pq.s, t_stop=t_stop))\n\n # Extracting Closed Frequent Itemset with FP-Growth\n t0 = time.time()\n # Binning the data and clipping (binary matrix)\n binary_matrix = conv.BinnedSpikeTrain(data, binsize).to_bool_array()\n # Computing the context and the binary matrix encoding the relation\n # between objects (window positions) and attributes (spikes,\n # indexed with a number equal to neuron idx*winlen+bin idx)\n context, transactions, rel_matrix = spade._build_context(binary_matrix,\n winlen)\n # Applying FP-Growth\n fim_results = [i for i in spade._fpgrowth(\n transactions,\n rel_matrix=rel_matrix,\n winlen=winlen)]\n time_fpgrowth += time.time() - t0\n\n # Extracting Closed Frequent Itemset with Fast_fca\n t1 = time.time()\n # Binning the data and clipping (binary matrix)\n binary_matrix = conv.BinnedSpikeTrain(data, binsize).to_bool_array()\n # Computing the context and the binary matrix encoding the relation\n # between objects (window positions) and attributes (spikes,\n # indexed with a number equal to neuron idx*winlen+bin idx)\n context, transactions, rel_matrix = \\\n spade._build_context(binary_matrix, winlen)\n # Applying FP-Growth\n fim_results = spade._fast_fca(context, winlen=winlen)\n time_fast_fca += time.time() - t1\n\n time_profiles = {'fp_growth': time_fpgrowth/num_rep,\n 'fast_fca': time_fast_fca/num_rep}\n\n # Storing data\n res_path = '../results/{}/{}/'.format(key, expected_num_spikes)\n # Create path is not already existing\n path_temp = './'\n for folder in split_path(res_path):\n path_temp = path_temp + '/' + folder\n mkdirp(path_temp)\n\n np.save(res_path + '/profiling_results.npy', {'results': time_profiles,\n 'parameters': {'rate': rate, 't_stop': t_stop, 'n': n,\n 'winlen': winlen, 'binsize': binsize}})",
"def spikesperburst(tdf, bstart, bstop):\n tms = list(tdf.times.dropna().values)\n bursts = [[tms[u] for u in range(len(tms)) if bstart[k]<(tms[u]/1000.)<bstop[k] ]\n for k in range(len(bstart))]\n bursts = [len(i) for i in bursts]\n return np.mean(bursts), np.std(bursts)/np.mean(bursts)",
"def test_temporal_smoothing_reduce_length(PM_ds_control_3d_full):\r\n smooth = 10\r\n tsmooth_kws = {\"time\": smooth}\r\n actual = temporal_smoothing(\r\n PM_ds_control_3d_full, tsmooth_kws=tsmooth_kws\r\n ).time.size\r\n expected = PM_ds_control_3d_full.time.size - smooth + 1\r\n assert actual == expected",
"def compute_auto(tr, freq_width):\n \n ## whitening and autocorrelating\n spectrum = spectral_whitening(tr.data, tr.stats.delta, freq_width)\n ## autocorrelating\n tr.data = np.fft.irfft(np.abs(spectrum)**2)[0:tr.stats.npts]\n \n # post processing: tapering and filtering\n taper_width = 0.5 / (tr.stats.npts * tr.stats.delta)\n tr.taper(type='cosine', max_percentage=taper_width)\n tr.filter('bandpass', freqmin=1.0, freqmax=5.0, corners=4, zerophase=True)\n tr.taper(type='cosine', max_percentage=taper_width)\n\n return tr",
"def align_spikes(spike_data, spt_dict, sp_win, type=\"max\", resample=1,\n contact=0, remove=True):\n\n tol = 0.1\n\n if (sp_win[0] > -tol) or (sp_win[1] < tol):\n warn('You are using very short sp_win. '\n 'This may lead to alignment problems.')\n\n spt = spt_dict['data'].copy()\n\n idx_align = np.arange(len(spt))\n\n #go in a loop until all spikes are correctly aligned\n iter_id = 0\n while len(idx_align) > 0:\n spt_align = {'data': spt[idx_align]}\n spt_inbound = filter_spt(spike_data, spt_align, sp_win)\n idx_align = idx_align[spt_inbound]\n sp_waves_dict = extract_spikes(spike_data, spt_align, sp_win,\n resample=resample, contacts=contact)\n\n sp_waves = sp_waves_dict['data'][:, spt_inbound, 0]\n time = sp_waves_dict['time']\n\n if type == \"max\":\n i = sp_waves.argmax(0)\n elif type == \"min\":\n i = sp_waves.argmin(0)\n\n #move spike markers\n shift = time[i]\n spt[idx_align] += shift\n\n #if spike maximum/minimum was at the edge we have to extract it at the\n # new marker and repeat the alignment\n\n idx_align = idx_align[(shift < (sp_win[0] + tol)) |\n (shift > (sp_win[1] - tol))]\n iter_id += 1\n\n ret_dict = {'data': spt}\n\n if remove:\n #remove double spikes\n FS = spike_data['FS']\n ret_dict = remove_doubles(ret_dict, 1000.0 / FS)\n\n return ret_dict",
"def extract_spikes(spike_data, spt_dict, sp_win,\n resample=1, contacts='all'):\n sp_data = spike_data['data']\n n_contacts = spike_data['n_contacts']\n\n if contacts == \"all\":\n contacts = np.arange(n_contacts)\n elif isinstance(contacts, int):\n contacts = np.array([contacts])\n else:\n contacts = np.asarray(contacts)\n\n FS = spike_data['FS']\n spt = spt_dict['data']\n idx = np.arange(len(spt))\n inner_idx = filter_spt(spike_data, spt_dict, sp_win)\n outer_idx = idx[~np.in1d(idx, inner_idx)]\n\n indices = (spt / 1000.0 * FS).astype(np.int32)\n win = (np.asarray(sp_win) / 1000.0 * FS).astype(np.int32)\n time = np.arange(win[1] - win[0]) * 1000.0 / FS + sp_win[0]\n n_contacts, n_pts = sp_data.shape\n\n # auxiliary function to find a valid spike window within data range\n minmax = lambda x: np.max([np.min([n_pts, x]), 0])\n spWave = np.zeros((len(time), len(spt), len(contacts)),\n dtype=np.float32)\n\n for i in inner_idx:\n sp = indices[i]\n spWave[:, i, :] = np.atleast_2d(sp_data[contacts,\n sp + win[0]:sp + win[1]]).T\n\n for i in outer_idx:\n sp = indices[i]\n l, r = map(minmax, sp + win)\n if l != r:\n spWave[(l - sp) - win[0]:(r - sp) - win[0], i, :] = \\\n sp_data[contacts, l:r].T\n\n wavedict = {\"data\": spWave, \"time\": time, \"FS\": FS}\n\n if len(idx) != len(inner_idx):\n is_valid = np.zeros(len(spt), dtype=np.bool)\n is_valid[inner_idx] = True\n wavedict['is_valid'] = is_valid\n\n if resample != 1:\n warn(\"resample argument is deprecated.\"\n \"Please update your code to use function\"\n \"resample_spikes\", DeprecationWarning)\n wavedict = resample_spikes(wavedict, FS * resample)\n return wavedict",
"def preprocess_spikes(data, dt, nlag=0):\n for d in data:\n ntrials = len(d[\"spikes\"])\n nframes = d[\"duration\"]\n nbins = nframes * int(d[\"stim_dt\"] / dt) + nlag\n spike_v = np.zeros((nbins, ntrials), dtype='i')\n for i, trial in enumerate(d[\"spikes\"]):\n idx = (trial / dt).astype('i')\n # make sure all spikes are in bounds\n idx = idx[(idx >= 0) & (idx < nbins)]\n spike_v[idx, i] = 1\n d[\"spike_v\"] = spike_v\n d[\"spike_dt\"] = dt\n d[\"psth\"] = np.sum(spike_v, axis=1)\n return data",
"def _iter_spike_waveforms(\n interval=None, traces_interval=None, model=None, supervisor=None,\n n_samples_waveforms=None, get_best_channels=None, show_all_spikes=False):\n m = model\n p = supervisor\n sr = m.sample_rate\n a, b = m.spike_times.searchsorted(interval)\n s0, s1 = int(round(interval[0] * sr)), int(round(interval[1] * sr))\n ns = n_samples_waveforms\n k = ns // 2\n for show_selected in (False, True):\n for i in range(a, b):\n t = m.spike_times[i]\n c = m.spike_clusters[i]\n is_selected = c in p.selected\n # Show non selected spikes first, then selected spikes so that they appear on top.\n if is_selected is not show_selected:\n continue\n # Skip non-selected spikes if requested.\n if (not show_all_spikes and c not in supervisor.selected):\n continue\n # cg = p.cluster_meta.get('group', c)\n channel_ids, channel_amps = get_best_channels(c)\n s = int(round(t * sr)) - s0\n # Skip partial spikes.\n if s - k < 0 or s + k >= (s1 - s0): # pragma: no cover\n continue\n # Extract the waveform.\n wave = Bunch(\n data=traces_interval[s - k:s + ns - k, channel_ids],\n channel_ids=channel_ids,\n start_time=(s + s0 - k) / sr,\n spike_id=i,\n spike_time=t,\n spike_cluster=c,\n channel_amps=channel_amps, # for each of the channel_ids, the relative amp\n select_index=p.selected.index(c) if c in p.selected else None,\n )\n assert wave.data.shape == (ns, len(channel_ids))\n yield wave",
"def fanofactor(spiketrains, warn_tolerance=0.1 * pq.ms):\n # Build array of spike counts (one per spike train)\n spike_counts = np.array([len(st) for st in spiketrains])\n\n # Compute FF\n if all(count == 0 for count in spike_counts):\n # empty list of spiketrains reaches this branch, and NaN is returned\n return np.nan\n\n if all(isinstance(st, neo.SpikeTrain) for st in spiketrains):\n if not is_time_quantity(warn_tolerance):\n raise TypeError(\"'warn_tolerance' must be a time quantity.\")\n durations = [(st.t_stop - st.t_start).simplified.item()\n for st in spiketrains]\n durations_min = min(durations)\n durations_max = max(durations)\n if durations_max - durations_min > warn_tolerance.simplified.item():\n warnings.warn(\"Fano factor calculated for spike trains of \"\n \"different duration (minimum: {_min}s, maximum \"\n \"{_max}s).\".format(_min=durations_min,\n _max=durations_max))\n\n fano = spike_counts.var() / spike_counts.mean()\n return fano",
"def get_waveform_halfwidth(waveform, sampling_rate=30000.):\n w = resample(waveform,200)#upsample to smooth the data\n time = np.linspace(0,len(waveform)/sampling_rate,200)\n trough = np.where(w==np.min(w))[0][0]\n peak = np.where(w==np.max(w))[0][0]\n \n #dur = time[trough:][np.where(w[trough:]==np.max(w[trough:]))[0][0]] - time[trough]\n if w[peak] > np.abs(w[trough]):\n dur = time[peak:][np.where(w[peak:]>=0.5*np.min(w[peak:]))[0][0]] - time[peak] \n else:\n dur = time[trough:][np.where(w[trough:]<=0.5*np.max(w[trough:]))[0][0]] - time[trough] \n if peak<trough:\n dur=-dur\n return dur",
"def instantaneous_rate(spiketrains, sampling_period, kernel='auto',\n cutoff=5.0, t_start=None, t_stop=None, trim=False,\n center_kernel=True, border_correction=False):\n def optimal_kernel(st):\n width_sigma = None\n if len(st) > 0:\n width_sigma = optimal_kernel_bandwidth(\n st.magnitude, times=None, bootstrap=False)['optw']\n if width_sigma is None:\n raise ValueError(\"Unable to calculate optimal kernel width for \"\n \"instantaneous rate from input data.\")\n return kernels.GaussianKernel(width_sigma * st.units)\n\n if border_correction and not \\\n (kernel == 'auto' or isinstance(kernel, kernels.GaussianKernel)):\n raise ValueError(\n 'The border correction is only implemented'\n ' for Gaussian kernels.')\n\n if isinstance(spiketrains, neo.SpikeTrain):\n if kernel == 'auto':\n kernel = optimal_kernel(spiketrains)\n spiketrains = [spiketrains]\n\n if not all([isinstance(elem, neo.SpikeTrain) for elem in spiketrains]):\n raise TypeError(f\"'spiketrains' must be a list of neo.SpikeTrain's or \"\n f\"a single neo.SpikeTrain. Found: {type(spiketrains)}\")\n\n if not is_time_quantity(sampling_period):\n raise TypeError(f\"The 'sampling_period' must be a time Quantity.\"\n f\"Found: {type(sampling_period)}\")\n\n if sampling_period.magnitude < 0:\n raise ValueError(f\"The 'sampling_period' ({sampling_period}) \"\n f\"must be non-negative.\")\n\n if not (isinstance(kernel, kernels.Kernel) or kernel == 'auto'):\n raise TypeError(f\"'kernel' must be instance of class \"\n f\"elephant.kernels.Kernel or string 'auto'. Found: \"\n f\"{type(kernel)}, value {str(kernel)}\")\n\n if not isinstance(cutoff, (float, int)):\n raise TypeError(\"'cutoff' must be float or integer\")\n\n if not is_time_quantity(t_start, allow_none=True):\n raise TypeError(\"'t_start' must be a time Quantity\")\n if not is_time_quantity(t_stop, allow_none=True):\n raise TypeError(\"'t_stop' must be a time Quantity\")\n\n if not isinstance(trim, bool):\n raise TypeError(\"'trim' must be bool\")\n\n check_neo_consistency(spiketrains,\n object_type=neo.SpikeTrain,\n t_start=t_start, t_stop=t_stop)\n\n if kernel == 'auto':\n if len(spiketrains) == 1:\n kernel = optimal_kernel(spiketrains[0])\n else:\n raise ValueError(\"Cannot estimate a kernel for a list of spike \"\n \"trains. Please provide a kernel explicitly \"\n \"rather than 'auto'.\")\n\n if t_start is None:\n t_start = spiketrains[0].t_start\n if t_stop is None:\n t_stop = spiketrains[0].t_stop\n\n # Rescale units for consistent calculation\n t_start = t_start.rescale(spiketrains[0].units)\n t_stop = t_stop.rescale(spiketrains[0].units)\n\n # Calculate parameters for np.histogram\n n_bins = int(((t_stop - t_start) / sampling_period).simplified)\n hist_range_end = t_start + n_bins * \\\n sampling_period.rescale(spiketrains[0].units)\n\n hist_range = (t_start.item(), hist_range_end.item())\n\n # Preallocation\n histogram_arr = np.zeros((len(spiketrains), n_bins), dtype=np.float64)\n for i, st in enumerate(spiketrains):\n histogram_arr[i], _ = np.histogram(st.magnitude, bins=n_bins,\n range=hist_range)\n\n histogram_arr = histogram_arr.T # make it (time, units)\n\n # Kernel\n if cutoff < kernel.min_cutoff:\n cutoff = kernel.min_cutoff\n warnings.warn(\"The width of the kernel was adjusted to a minimally \"\n \"allowed width.\")\n\n scaling_unit = pq.CompoundUnit(f\"{sampling_period.rescale('s').item()}*s\")\n cutoff_sigma = cutoff * kernel.sigma.rescale(scaling_unit).magnitude\n if center_kernel: # t_arr is centered on the kernel median.\n median = kernel.icdf(0.5).rescale(scaling_unit).item()\n else:\n median = 0\n\n # An odd number of points correctly resolves the median index of the\n # kernel. This avoids a timeshift in the rate estimate for symmetric\n # kernels. A number x given by 'x = 2 * n + 1' with n being an integer is\n # always odd. Using `math.ceil` to calculate `t_arr_kernel_half` ensures an\n # integer value, hence the number of points for the kernel (num) given by\n # `num=2 * t_arr_kernel_half + 1` is always odd.\n # (See Issue #360, https://github.com/NeuralEnsemble/elephant/issues/360)\n t_arr_kernel_half = math.ceil(\n cutoff * (kernel.sigma / sampling_period).simplified.item())\n t_arr_kernel_length = 2 * t_arr_kernel_half + 1\n\n # Shift kernel using the calculated median\n t_arr_kernel = np.linspace(start=-cutoff_sigma + median,\n stop=cutoff_sigma + median,\n num=t_arr_kernel_length,\n endpoint=True) * scaling_unit\n\n # Calculate the kernel values with t_arr\n kernel_arr = np.expand_dims(\n kernel(t_arr_kernel).rescale(pq.Hz).magnitude, axis=1)\n\n # Define mode for scipy.signal.fftconvolve\n if trim:\n fft_mode = 'valid'\n else:\n fft_mode = 'same'\n\n rate = scipy.signal.fftconvolve(histogram_arr,\n kernel_arr,\n mode=fft_mode)\n # The convolution of non-negative vectors is non-negative\n rate = np.clip(rate, a_min=0, a_max=None, out=rate)\n\n # Adjust t_start and t_stop\n if fft_mode == 'valid':\n median_id = kernel.median_index(t_arr_kernel)\n kernel_array_size = len(kernel_arr)\n t_start = t_start + median_id * scaling_unit\n t_stop = t_stop - (kernel_array_size - median_id) * scaling_unit\n\n kernel_annotation = dict(type=type(kernel).__name__,\n sigma=str(kernel.sigma),\n invert=kernel.invert)\n\n rate = neo.AnalogSignal(signal=rate,\n sampling_period=sampling_period,\n units=pq.Hz, t_start=t_start, t_stop=t_stop,\n kernel=kernel_annotation)\n\n if border_correction:\n sigma = kernel.sigma.simplified.magnitude\n times = rate.times.simplified.magnitude\n correction_factor = 2 / (\n erf((t_stop.simplified.magnitude - times) / (\n np.sqrt(2.) * sigma))\n - erf((t_start.simplified.magnitude - times) / (\n np.sqrt(2.) * sigma)))\n\n rate *= correction_factor[:, None]\n\n duration = t_stop.simplified.magnitude - t_start.simplified.magnitude\n # ensure integral over firing rate yield the exact number of spikes\n for i, spiketrain in enumerate(spiketrains):\n if len(spiketrain) > 0:\n rate[:, i] *= len(spiketrain) /\\\n (np.mean(rate[:, i]).magnitude * duration)\n\n return rate",
"def half_space_cooling_waermefluss(k, T0, T1, kappa, t):\n return k * (T1 - T0) / (numpy.sqrt(math.pi * kappa * t))",
"def get_auto_slit_width(self, index):\n assert (1 <= index <= 4)\n width_um = c_float()\n self._dll.ShamrockGetAutoSlitWidth(self._device, index, byref(width_um))\n return width_um.value",
"def test_spike(self):\n\n arr = [10, 12, 999.99, 13, 15, 40, 9, 9]\n\n # First and last elements should always be good data, unless someone\n # has set a threshold to zero.\n expected = [1, 4, 4, 4, 1, 3, 1, 1]\n\n inputs = [\n arr,\n np.asarray(arr, dtype=np.floating),\n dask_arr(np.asarray(arr, dtype=np.floating))\n ]\n for i in inputs:\n npt.assert_array_equal(\n qartod.spike_test(\n inp=i,\n suspect_threshold=self.suspect_threshold,\n fail_threshold=self.fail_threshold\n ),\n expected\n )",
"def model_wave(time, period, width) -> float:\n cur_time = time % period\n half_width = width//2\n if cur_time < half_width:\n return float(cur_time) / half_width\n elif cur_time < width:\n return 1 - float(cur_time - half_width) / half_width\n else:\n return 0",
"def max_spike_frequency(t, V):\n intervals = interspike_intervals(t, V)\n raise_if_not_multiple_spikes(intervals)\n min_int = np.amin(intervals)\n return 1/min_int",
"def amplogwidth(arr, factor=2):\n\n log = np.ma.log10(np.sqrt(np.sum(arr**2, axis=-1))) # logarithms of amplitudes\n mean = log.mean() # means of logarithms of amplitudes\n std = log.std() # standard deviation of logarithms of amplitudes\n\n return mean - factor*std, mean + factor*std",
"def time_histogram(spiketrains, bin_size, t_start=None, t_stop=None,\n output='counts', binary=False):\n # Bin the spike trains and sum across columns\n bs = BinnedSpikeTrain(spiketrains, t_start=t_start, t_stop=t_stop,\n bin_size=bin_size)\n\n if binary:\n bs = bs.binarize(copy=False)\n bin_hist = bs.get_num_of_spikes(axis=0)\n # Flatten array\n bin_hist = np.ravel(bin_hist)\n # Renormalise the histogram\n if output == 'counts':\n # Raw\n bin_hist = pq.Quantity(bin_hist, units=pq.dimensionless, copy=False)\n elif output == 'mean':\n # Divide by number of input spike trains\n bin_hist = pq.Quantity(bin_hist / len(spiketrains),\n units=pq.dimensionless, copy=False)\n elif output == 'rate':\n # Divide by number of input spike trains and bin width\n bin_hist = bin_hist / (len(spiketrains) * bin_size)\n else:\n raise ValueError(f'Parameter output ({output}) is not valid.')\n\n return neo.AnalogSignal(signal=np.expand_dims(bin_hist, axis=1),\n sampling_period=bin_size, units=bin_hist.units,\n t_start=bs.t_start, normalization=output,\n copy=False)",
"def spike_latency(t, I, V):\n # make sure that current is +100 pA\n if abs(I[5] - 0.1) > 1e-7:\n sign = \"\"\n if I[5] > 0:\n sign = \"+\"\n print(f\"Warning! Expected +100pA current, got {sign}{round(I[5]*1000)} \\\n pA current\")\n\n spike_tind = first_spike_tind(V)\n return t[spike_tind] - t[0]"
]
| [
"0.7473685",
"0.6299853",
"0.59371275",
"0.5677367",
"0.5461012",
"0.53023696",
"0.52721286",
"0.5254281",
"0.5241991",
"0.487418",
"0.48584986",
"0.4848857",
"0.48387563",
"0.48139584",
"0.4812244",
"0.48033372",
"0.4773211",
"0.47599214",
"0.4744546",
"0.4734407",
"0.47342744",
"0.47324625",
"0.47274515",
"0.472538",
"0.47114906",
"0.4704369",
"0.46932244",
"0.46931362",
"0.46799576",
"0.4672518"
]
| 0.8146022 | 0 |
Finds the index of the first spike. The value of startind can be used as an offset in case t and V are slices of a larger array, but you want the index for those arrays. | def first_spike_tind(V, startind=0):
spikes, _ = find_peaks(V, [1, 1000])
if len(spikes) == 0:
found_spike = False
else:
found_spike = True
if found_spike is False:
raise NoSpikeFoundException
else:
return spikes[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def first_spike_tind_abf(abf, epoch_start, startind=0):\n p0 = abf.sweepEpochs.p1s[epoch_start]\n V = abf.sweepY[p0:-1]\n return first_spike_tind(V, startind=startind)",
"def get_start(i,v):\n return i-v[i]-1",
"def _get_indx(self, t):\n t = np.array(t)\n a = (t[:, np.newaxis] <= self._data['stop']) & (t[:, np.newaxis] >=\n self._data['start'])\n return np.array([np.where(row)[0][0] for row in a])",
"def all_spike_ind(t, V):\n spikes, _ = find_peaks(V, [1, 1000])\n\n return spikes",
"def matching_function_startpoint(self, idx):\n real_idx = idx + 1\n path = dtw.best_path(self.paths, col=real_idx)\n start_idx = path[0][1]\n return start_idx",
"def find_start_index():\n def recursive_find_index(lower_bound, upper_bound):\n if upper_bound - lower_bound <= 1:\n if intervals[upper_bound][0] <= start_dt:\n return upper_bound\n return lower_bound\n index = (upper_bound + lower_bound) // 2\n if intervals[index][0] <= start_dt:\n return recursive_find_index(index, upper_bound)\n else:\n return recursive_find_index(lower_bound, index)\n\n if start_dt <= intervals[0][0] - tolerance:\n return -1\n if end_dt >= intervals[-1][1] + tolerance:\n return -1\n return recursive_find_index(0, len(intervals) - 1)",
"def findspikes(t, v, thresh):\n tm = np.array(t)\n s0 = np.array(v) > thresh # np.where(v > thresh) # np.array(v) > thresh # find points above threshold\n\n# print ('v: ', v)\n dsp = tm[s0]\n if dsp.shape[0] == 1:\n dsp = np.array(dsp)\n sd = np.append(True, np.diff(dsp) > 1.0) # find first points of spikes\n if len(dsp) > 0:\n sp = dsp[sd]\n else:\n sp = []\n return(sp) # list of spike times.",
"def find_min_index(array, start, end):\n\n min_val = array[start]\n min_index = start\n\n end = end + 1 if end < len(array) - 1 else len(array)\n\n for i in range(start, end):\n if array[i] < min_val:\n min_index = i\n min_val = array[i]\n\n return min_index",
"def search_linear(xs, target):\n for (i, v) in enumerate(xs):\n if v == target: # Is referred to as a probe.\n return i\n return -1",
"def __findStartValue(self):\n\t\tself.minEvidence = 1e120*np.ones(self.nStartValues)\n\t\tfor idx1 in range(self.nStartValues):\n\t\t\tself.minEvidence[self.nStartValues-1-idx1] = self.__minBayesianEvidence(self.logLamStart[self.nStartValues-1-idx1])\n\t\t\t# If minEvidence > 1e100 a negative value has been found for wMP which\n\t\t\t# implies that lower values of lambda do not need to be considered.\n\t\t\tif self.minEvidence[self.nStartValues-1-idx1] > 1e100:\n\t\t\t\tbreak\n\t\tself.startIdx = np.argmin(self.minEvidence)\n\t\t\t\t\n\t\treturn self.logLamStart[self.startIdx]",
"def __find_start(puzzle):\n for i in range(len(puzzle)):\n for j in range(len(puzzle[0])):\n if puzzle[i][j] == 0:\n return i\n return 0",
"def min_spike_frequency_tV(t, V):\n intervals = interspike_intervals(t, V)\n raise_if_not_multiple_spikes(intervals)\n max_int = np.amax(intervals)\n return 1/max_int",
"def find_idx_of_fit_limit(self, idx_start, idx_end):\n \n if idx_start != 0:\n self.idx_start = np.searchsorted(self.time_axis, idx_start)\n else:\n self.idx_start = 0\n if idx_end != -1:\n self.idx_end = np.searchsorted(self.time_axis, idx_end)\n else:\n self.idx_end = len(self.time_axis)\n return self.idx_start, self.idx_end",
"def start(self):\n try:\n return self.index[0]\n except:\n pass",
"def _sind(v):\n return math.sin(math.radians(v))",
"def Find_the_first_index(A, target):\n if len(A)==0:\n return -1\n begin = 0\n end = len(A) - 1\n while end - begin > 1:\n mid = begin + (end - begin >> 1)\n if target > A[mid]:\n begin = mid\n else:\n end = mid\n if A[begin] == target:\n return begin\n elif A[end] == target:\n return end\n else:\n return -1",
"def _get_start(self, variant, reference_start, cigar, ignore_softclip=False):\n indels = get_indel_from_cigar(cigar, ignore_softclip)\n start = variant.POS - reference_start - 1\n # for pos, val in indels.iteritems(): # python2\n for pos, val in indels.items():\n if pos > start:\n break\n if val[0] == 'I':\n start += val[1]\n elif val[0] == 'D':\n start -= val[1]\n return start",
"def first_peak_detect(beam, start_point):\n logging.debug('running first_peak_detect function')\n for i in range(start_point, len(beam)):\n logging.debug('current value of i is %d', i)\n if beam[i-1] < beam[i] > beam[i+1]:\n logging.debug('value determined to be the center of the values %d, %d, %d', beam[i-1], beam[i], beam[i+1])\n return i\n\n logging.error(\"no peak was found. will try working with the length of the beam\")\n return len(beam)",
"def findidx(X, v, tol=1e-3):\n\tloc = -1\n\tdiff = 1e15 # Take a big difference\n\tn = len(X)\n\n\tfor i in xrange(n):\n\t\tndiff = abs(X[i]-v)\n\t\tif ndiff <= tol and ndiff < diff:\n\t\t\tloc = i\n\t\t\tdiff = ndiff\n\t\n\treturn loc",
"def Ni_find(t):\r\n return ep(t) - 1",
"def index_before_slice(s, index):\n\n start = s.start or 0\n step = s.step or 1\n\n assert start >= 0\n assert step >= 0\n assert s.stop is None or s.stop >= 0\n\n new_index = start + index * step\n if s.stop is not None:\n new_index = new_index[np.where(new_index < s.stop)]\n\n return new_index",
"def findspikes(xin, vin, thresh, t0=None, t1= None, dt=1.0, mode=None, interpolate=False, debug=False):\n # if debug:\n # # this does not work with pyside...\n # import matplotlib\n # matplotlib.use('Qt4Agg')\n # import matplotlib.pyplot as PL\n # from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas\n # from matplotlib.figure import Figure\n # \n # #PL.rcParams['interactive'] = False\n \n st=numpy.array([])\n spk = []\n if xin is None:\n return(st, spk)\n xt = xin.view(numpy.ndarray)\n v = vin.view(numpy.ndarray)\n if t1 is not None and t0 is not None:\n it0 = int(t0/dt)\n it1 = int(t1/dt)\n if not isinstance(xin, numpy.ndarray):\n xt = xt[it0:it1]\n v = v[it0:it1]\n else:\n xt = xt[it0:it1]\n v = v[it0:it1]\n # if debug:\n # f = PL.figure(1)\n # print \"xt: \", xt\n # print \"v: \", v\n # PL.plot(numpy.array(xt), v, 'k-')\n # PL.draw()\n # PL.show()\n\n dv = numpy.diff(v, axis=0) # compute slope\n try:\n dv = numpy.insert(dv, 0, dv[0])\n except:\n pass # print 'dv: ', dv\n dv /= dt\n st = numpy.array([])\n spk = []\n spv = numpy.where(v > thresh)[0].tolist() # find points above threshold\n sps = numpy.where(dv > 0.0)[0].tolist() # find points where slope is positive\n sp = list(set.intersection(set(spv),set(sps))) # intersection defines putative spikes\n sp.sort() # make sure all detected events are in order (sets is unordered)\n sp = tuple(sp) # convert to tuple\n if sp is ():\n return(st, spk) # nothing detected\n dx = 1\n mingap = int(0.0005/dt) # 0.5 msec between spikes (a little unphysiological...)\n # normal operating mode is fixed voltage threshold\n # for this we need to just get the FIRST positive crossing,\n if mode == 'schmitt':\n sthra = list(numpy.where(numpy.diff(sp) > mingap))\n sthr = [sp[x] for x in sthra[0]] # bump indices by 1\n #print 'findspikes: sthr: ', len(sthr), sthr\n for k in sthr:\n if k == 0:\n continue\n x = xt[k-1:k+1]\n y = v[k-1:k+1]\n if interpolate:\n dx = 0\n m = (y[1]-y[0])/dt # local slope\n b = y[0]-(x[0]*m)\n s0 = (thresh-b)/m\n else:\n s0 = x[1]\n st = numpy.append(st, x[1])\n\n elif mode == 'peak':\n pkwidth = 1.0e-3 # in same units as dt - usually msec\n kpkw = int(pkwidth/dt)\n z = (numpy.array(numpy.where(numpy.diff(spv) > 1)[0])+1).tolist()\n z.insert(0, 0) # first element in spv is needed to get starting AP\n spk = []\n #print 'findspikes peak: ', len(z)\n for k in z:\n zk = spv[k]\n spkp = numpy.argmax(v[zk:zk+kpkw])+zk # find the peak position\n x = xt[spkp-1:spkp+2]\n y = v[spkp-1:spkp+2]\n if interpolate:\n try:\n # mimic Igor FindPeak routine with B = 1\n m1 = (y[1]-y[0])/dt # local slope to left of peak\n b1 = y[0]-(x[0]*m1)\n m2 = (y[2]-y[1])/dt # local slope to right of peak\n b2 = y[1]-(x[1]*m2)\n mprime = (m2-m1)/dt # find where slope goes to 0 by getting the line\n bprime = m2-((dt/2.0)*mprime)\n st = numpy.append(st, -bprime/mprime+x[1])\n spk.append(spkp)\n except:\n continue\n else:\n st = numpy.append(st, x[1]) # always save the first one\n spk.append(spkp)\n return(st, spk)",
"def find(self,v):\n for i in range(len(self)):\n if near(self[i],v):\n return i\n return -1",
"def getObservationStart(vis, obsid=-1, verbose=False):\n if (os.path.exists(vis) == False):\n print \"vis does not exist = %s\" % (vis)\n return\n if (os.path.exists(vis+'/table.dat') == False):\n print \"No table.dat. This does not appear to be an ms.\"\n print \"Use au.getObservationStartDateFromASDM().\"\n return\n mytb = createCasaTool(tbtool)\n try:\n mytb.open(vis+'/OBSERVATION')\n except:\n print \"ERROR: failed to open OBSERVATION table on file \"+vis\n return(3)\n time_range = mytb.getcol('TIME_RANGE')\n mytb.close()\n if verbose: print \"time_range: \", str(time_range)\n # the first index is whether it is starttime(0) or stoptime(1) \n time_range = time_range[0]\n if verbose: print \"time_range[0]: \", str(time_range)\n if (obsid >= len(time_range)):\n print \"Invalid obsid\"\n return\n if obsid >= 0:\n time_range = time_range[obsid]\n elif (type(time_range) == np.ndarray):\n time_range = np.min(time_range)\n return(time_range)",
"def find_zero(self, t, y):\n return y[0]",
"def get_index_under_point(self, event):\r\n xy = np.asarray(list(zip(self.xs, self.ys)))\r\n xyt = self.line.get_transform().transform(xy)\r\n xt, yt = xyt[:, 0], xyt[:, 1]\r\n d = np.sqrt((xt - event.x) ** 2 + (yt - event.y) ** 2)\r\n pt_idx = np.argmin(d)\r\n if d[pt_idx] >= self.max_pixels_from_vertex:\r\n pt_idx = None\r\n return pt_idx",
"def find_x_for_T(self, T_0=1.e9):\n\n # our strategy here assumes that the hot ash is in the early\n # part of the profile. We then find the index of the first\n # point where T drops below T_0\n idx = np.where(self.T < T_0)[0][0]\n\n T1 = self.T[idx-1]\n x1 = self.x[idx-1]\n\n T2 = self.T[idx]\n x2 = self.x[idx]\n\n slope = (x2 - x1)/(T2 - T1)\n\n return x1 + slope*(T_0 - T1)",
"def return_pos_index(self, index, tpos, window_size):\r\n minimum = max(0, index-(tpos//window_size))\r\n maximum = min(len(self.data), index+(tpos//window_size)+1) # since non-inclusive\r\n return np.random.randint(minimum, maximum)",
"def find_nearest_idx(arr, val):\n arr = np.asarray(arr)\n idx = (np.abs(arr - val)).argmin()\n return idx",
"def find_first(item, vec):\n for i in range(len(vec)):\n if item == vec[i]:\n return i\n return -1"
]
| [
"0.6851553",
"0.66663456",
"0.6480114",
"0.5943271",
"0.5854851",
"0.58466977",
"0.57779187",
"0.5618756",
"0.550382",
"0.5497214",
"0.5495332",
"0.5477422",
"0.5467759",
"0.54675514",
"0.5465566",
"0.543921",
"0.54121536",
"0.5388304",
"0.5374073",
"0.53687054",
"0.53608406",
"0.5359516",
"0.5332319",
"0.5293704",
"0.5274141",
"0.52589536",
"0.5254499",
"0.5250098",
"0.52392894",
"0.5207115"
]
| 0.8042713 | 0 |
Computes spike latency. Makes sure that current is +100 pA. | def spike_latency(t, I, V):
# make sure that current is +100 pA
if abs(I[5] - 0.1) > 1e-7:
sign = ""
if I[5] > 0:
sign = "+"
print(f"Warning! Expected +100pA current, got {sign}{round(I[5]*1000)} \
pA current")
spike_tind = first_spike_tind(V)
return t[spike_tind] - t[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _latency(self):\n\n return\n time.sleep(0.005 + random.random() / 30.)",
"def spike_latency_abf(abf, epochstart):\n p0 = abf.sweepEpochs.p1s[epochstart]\n t = abf.sweepX[p0:-1]\n V = abf.sweepY[p0:-1]\n I = abf.sweepC[p0:-1]\n return spike_latency(t, I, V)",
"def get_latency(a, low, high):\n\n if random.random() > 0.69:\n dist = random.randint(1000000,20000000)\n else:\n q = random.uniform(low,high)\n dist = int(stats.pareto.ppf(q, a))\n\n return dist/(1000*66) + 5",
"def inc_latency(self, *_, **__): # pylint: disable=arguments-differ\n pass",
"def __get_speed(self):\n if self.speed_method == 'average_gap':\n total_gap = 0\n for i in range(1, len(self.__spike_buffer)):\n total_gap += self.__spike_buffer[i] - self.__spike_buffer[i-1]\n\n average_gap = total_gap / len(self.__spike_buffer)\n\n\n if self.__spike_buffer[-1] > timeit.default_timer() - self.cooldown:\n speed = self.tick_length/average_gap\n else:\n speed = 0.00\n\n return speed",
"def offset_sleep(self, sleep_time):\n ran_time = time.perf_counter() - self._lap\n while sleep_time - (time.perf_counter() - self._lap) > 0:\n if sleep_time - (time.perf_counter() - self._lap) > 0.002:\n time.sleep(0.00001)\n total_time = time.perf_counter() - self._lap\n self.lap()\n return ran_time, total_time",
"def delayToNextPacket(self):\n delay = -(1.0 / (self.mPacketsPerSecond)) * np.log(1 - np.random.uniform())\n # exponential distribution in seconds\n return round(delay * Constants.TICKS_PER_SECOND)\n #return (Math.round(delay * Main.TICKS_PER_SECOND))",
"def speed(self, s=0):",
"def slower(self):\n self._prepare()\n rate = self._eng.getProperty(\"rate\")\n newrate = rate - 50\n logging.debug(\"slower %d => %d\" %(rate, newrate))\n self._eng.setProperty(\"rate\", newrate)\n self._eng.runAndWait()\n self.say(\"slower\")",
"def increase_speed(self):\n self.ship_speed*=self.speedup_scale\n self.bullet_speed*=self.speedup_scale\n self.alien_speed*=self.speedup_scale\n self.alien_points=int(self.alien_points*self.score_scale)\n print(self.alien_points)",
"def _calibrate_time(self):\n time_overhead = 0\n for i in range(1000):\n start = self._adjusted_time()\n end = self._adjusted_time()\n time_overhead += end - start\n return time_overhead / 1000",
"def speed(self, speed: int, time: int = 0, /) -> None:",
"def get_latency(self):\n raise NotImplementedError()",
"def packet_arrival():\r\n return 1.0",
"def speed(self) -> int:",
"def speed(self) -> int:",
"def lap(self):\n current_time = time.perf_counter()\n ret = current_time - self._lap\n if abs(ret) != ret:\n ret = self._time_corruption\n self._lap = current_time\n return ret",
"def soundspeed(temp,pres):\n g_p = liq_g(0,1,temp,pres)\n g_tt = liq_g(2,0,temp,pres)\n g_tp = liq_g(1,1,temp,pres)\n g_pp = liq_g(0,2,temp,pres)\n csqinv = (g_tp**2/g_tt - g_pp) / g_p**2\n c = csqinv**(-.5)\n return c",
"def increase_speed(self):\n self.ship_speed_factor *= self.speedup_scale\n self.bullet_speed_factor *= self.speedup_scale\n self.alien_speed_factor *= self.speedup_scale\n self.alien_points = int(self.alien_points * self.score_scale)",
"def calculate_speed_of_sound(t, h, p):\n\n # using crude approximation for now\n return 331.4 + 0.6 * t + 0.0124 * h",
"def increase_speed(self):\n self.ship_speed_factor *= self.speed_up_scale\n self.bullet_speed_factor *= self.speed_up_scale\n self.alien_speed_factor *= self.speed_up_scale\n\n self.alien_points = int(self.alien_points * self.score_scale)",
"def _probe_wait_time(self):\n r = self.probe_cycle_time / float(len(self.servers)) #self.probe_cycle_time=5\n r = max(.25, r) # Cap it at four per second\n return r",
"def warmup_step(ckpt_step: int) -> float:\n return ckpt_step * 10",
"def fpct(self):\n # 1 is probably the best number in most cases because the game is often CPU-bound.\n # the following number could be chosen instead someday\n tps = self.real_speed * 1000 / VIRTUAL_TIME_INTERVAL\n # Avoid unrealistic ping values.\n ping = min(self.max_ping, self.ping)\n result = int(tps * ping * config.fpct_coef) + 1\n return min(config.fpct_max, result)",
"def increase_speed(self):\n self.ship_speed += self.speedup_scale\n self.bullet_speed += self.speedup_scale\n self.alien_speed += self.speedup_scale\n\n self.alien_points = int(self.alien_points * self.score_scale)",
"def increase_speed(self):\n self.target_speed *= self.speedup_scale\n self.bullet_speed_factor *= self.speedup_scale",
"def sleep_approx(self, seconds):\n upperbound = (seconds+0.2)*10000\n if (seconds >= 1):\n lowerbound = (seconds-0.2)*10000\n else:\n lowerbound = seconds*10000\n\n sleeptime = random.randint(lowerbound, upperbound)\n sleeptime = sleeptime/10000\n sleeptime = sleeptime*.8\n\n if (self.botspeed == 1.25):\n sleeptime = sleeptime*.75\n elif (self.botspeed == 1.5):\n sleeptime = sleeptime*.5\n sleep(sleeptime)",
"def faster(self):\n self._prepare()\n rate = self._eng.getProperty(\"rate\")\n newrate = rate + 50\n logging.debug(\"faster %d => %d\" %(rate, newrate))\n self._eng.setProperty(\"rate\", newrate)\n self._eng.runAndWait()\n self.say(\"faster\")",
"def penalisePathTime(route):\n for edge in route:\n # Getting the current adapted time for that edge\n currentAdaptedTime = adjustedEdgeSpeedGlobal[edge]\n # Penalise the travel time by PENALISATION\n adjustedEdgeSpeedGlobal[edge] = currentAdaptedTime * 2\n traci.edge.adaptTraveltime(edge, adjustedEdgeSpeedGlobal[edge])",
"def speed(self, value: int, /) -> None:"
]
| [
"0.6667742",
"0.6639285",
"0.63193643",
"0.6291087",
"0.62277293",
"0.6076571",
"0.6061371",
"0.59558076",
"0.59556544",
"0.5908918",
"0.57772434",
"0.5773665",
"0.5744893",
"0.5719568",
"0.56907696",
"0.56907696",
"0.5681695",
"0.56708837",
"0.5628368",
"0.56033325",
"0.5596657",
"0.5593546",
"0.5563045",
"0.55494344",
"0.5523033",
"0.54893166",
"0.54823637",
"0.54814446",
"0.5471393",
"0.54585534"
]
| 0.7749325 | 0 |
Computes spike latency using abf objet and epoch index. | def spike_latency_abf(abf, epochstart):
p0 = abf.sweepEpochs.p1s[epochstart]
t = abf.sweepX[p0:-1]
V = abf.sweepY[p0:-1]
I = abf.sweepC[p0:-1]
return spike_latency(t, I, V) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def spike_latency(t, I, V):\n # make sure that current is +100 pA\n if abs(I[5] - 0.1) > 1e-7:\n sign = \"\"\n if I[5] > 0:\n sign = \"+\"\n print(f\"Warning! Expected +100pA current, got {sign}{round(I[5]*1000)} \\\n pA current\")\n\n spike_tind = first_spike_tind(V)\n return t[spike_tind] - t[0]",
"def time_constant_abf(abf, epoch_start):\n p0 = abf.sweepEpochs.p1s[epoch_start]\n p1 = abf.sweepEpochs.p1s[epoch_start + 1]\n\n t = abf.sweepX[p0:p1] - abf.sweepX[p0]\n V = abf.sweepY[p0:p1]\n\n return time_constant(t, V)",
"def train_one_epoch(sess, tr_model, i_epoch, run_metadata):\n tr_loss, i = 0, 0\n stime = time.time()\n while True:\n try:\n if NNET_PARAM.time_line:\n _, loss, current_batchsize = sess.run(\n [tr_model.train_op, tr_model.loss, tr_model.batch_size],\n options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE),\n run_metadata=run_metadata)\n else:\n _, loss, current_batchsize = sess.run(\n [tr_model.train_op, tr_model.loss, tr_model.batch_size])\n tr_loss += loss\n if (i+1) % NNET_PARAM.minibatch_size == 0:\n if NNET_PARAM.time_line and NNET_PARAM.timeline_type == 'minibatch':\n tl = timeline.Timeline(run_metadata.step_stats)\n ctf = tl.generate_chrome_trace_format()\n with open('_timeline/%03dtimeline%04d.json' % (i_epoch, i+1), 'w') as f:\n f.write(ctf)\n lr = sess.run(tr_model.lr)\n costtime = time.time()-stime\n stime = time.time()\n print(\"MINIBATCH %05d: TRAIN AVG.LOSS %04.6f, \"\n \"(learning rate %02.6f)\" % (\n i + 1, tr_loss / (i*NNET_PARAM.batch_size+current_batchsize), lr), 'cost time: %06dS' % costtime)\n sys.stdout.flush()\n i += 1\n except tf.errors.OutOfRangeError:\n break\n tr_loss /= ((i-1)*NNET_PARAM.batch_size+current_batchsize)\n return tr_loss",
"def scheduler(epoch):\n return alpha / (1 + decay_rate * epoch)",
"def getmeanspikerateonepoch(seg, epoch_name):\n epoch_names = [epoch.name for epoch in seg.epochs]\n if epoch_name not in epoch_names:\n print('Cannot find epoch named {} in segment {}'.format(epoch_name, seg.name))\n return\n if not seg.spiketrains:\n print('No spiketrain for segment {}'.format(seg.name))\n return\n elif len(seg.spiketrains) > 1:\n print('More than one spiketrain in segment {}. Only consider the first one.'.format(seg.name))\n epoch_pos = np.where(np.array(epoch_names) == epoch_name)[0][0]\n epoch = seg.epochs[epoch_pos]\n epoch_t_starts, epoch_durations = epoch.times, epoch.durations\n _, spktimes = spiketraintimesel(seg.spiketrains[0], np.array(epoch_t_starts), np.array(epoch_t_starts +\n epoch_durations))\n n_spk_per_epoch = np.array([len(times) for times in spktimes])\n spkrate_per_epoch = n_spk_per_epoch / epoch_durations\n return spkrate_per_epoch",
"def run_epoch_test(session, model, verbose=False):\n # fetches = {\"ms\": model.dynamic_eval.global_ms()}\n # vals = session.run(fetches)\n # ms = vals[\"ms\"]\n # s = np.sum(np.sqrt([x for x in ms]))\n # print(s)\n\n\n\n start_time = time.time()\n losses = 0.0\n iters = 0\n\n # zeros initial state for all devices\n state = []\n for k in range(model.gpu_num):\n state.append(session.run(model.initial_state(k)))\n\n # evaluate loss and final state for all devices\n fetches = {\"loss\": model.loss}\n\n if config.dynamic_eval:\n fetches[\"update_op\"] = model.dynamic_eval.update_op()\n\n\n for k in range(model.gpu_num):\n fetches[\"final_state%d\" % k] = model.final_state(k)\n\n for step in range(model.input.epoch_size):\n # pass states between time batches\n feed_dict = {}\n for i in range(model.gpu_num):\n gpu_state = model.initial_state(i)\n for j, (c, h) in enumerate(gpu_state):\n feed_dict[c] = state[i][j].c\n feed_dict[h] = state[i][j].h\n\n vals = session.run(fetches, feed_dict)\n\n loss = vals[\"loss\"]\n\n for k in range(model.gpu_num):\n state[k] = vals[\"final_state%d\" % k]\n\n losses += loss\n iters += model.input.time_steps\n\n if verbose and step % (model.input.epoch_size // 10) == 0:\n print(\"%.3f perplexity: %.3f bits: %.3f speed: %.0f wps\" %\n (step * 1.0 / model.input.epoch_size, np.exp(losses / iters), np.log2(np.exp(losses / iters)),\n iters * model.input.batch_size / (time.time() - start_time)))\n\n return np.exp(losses / iters)",
"def linear_decay(epoch: int, total_num_updates: int) -> float:\n return 1 - (epoch / float(total_num_updates))",
"def flann_index_time_experiment():\n import vtool as vt\n import pyflann\n import itertools\n\n class TestDataPool(object):\n \"\"\"\n Perform only a few allocations of test data\n \"\"\"\n def __init__(self):\n self.num = 10000\n self.data_pool = None\n self.alloc_pool(1000000)\n\n def alloc_pool(self, num):\n print('[alloc] num = %r' % (num,))\n self.num = num\n self.data_pool = vt.tests.dummy.testdata_dummy_sift(num)\n print('[alloc] object size ' + ut.get_object_size_str(self.data_pool, 'data_pool'))\n\n def get_testdata(self, num):\n if len(self.data_pool) < num:\n self.alloc_pool(2 * self.num)\n return self.data_pool[0:num]\n\n pool = TestDataPool()\n\n def get_buildtime_data(**kwargs):\n flann_params = vt.get_flann_params(**kwargs)\n print('flann_params = %r' % (ut.dict_str(flann_params),))\n data_list = []\n num = 1000\n print('-----')\n for count in ut.ProgressIter(itertools.count(), nTotal=-1, freq=1, autoadjust=False):\n num = int(num * 1.2)\n print('num = %r' % (num,))\n #if num > 1E6:\n # break\n data = pool.get_testdata(num)\n print('object size ' + ut.get_object_size_str(data, 'data'))\n flann = pyflann.FLANN(**flann_params)\n with ut.Timer(verbose=False) as t:\n flann.build_index(data)\n print('t.ellapsed = %r' % (t.ellapsed,))\n if t.ellapsed > 5 or count > 1000:\n break\n data_list.append((count, num, t.ellapsed))\n print('-----')\n return data_list, flann_params\n\n data_list1, params1 = get_buildtime_data(trees=1)\n\n data_list2, params2 = get_buildtime_data(trees=2)\n\n data_list4, params4 = get_buildtime_data(trees=4)\n\n data_list8, params8 = get_buildtime_data(trees=8)\n\n data_list16, params16 = get_buildtime_data(trees=16)\n\n import plottool as pt\n\n def plotdata(data_list):\n count_arr = ut.get_list_column(data_list, 1)\n time_arr = ut.get_list_column(data_list, 2)\n pt.plot2(count_arr, time_arr, marker='-o', equal_aspect=False,\n x_label='num_vectors', y_label='FLANN build time')\n\n plotdata(data_list1)\n plotdata(data_list2)\n plotdata(data_list4)\n plotdata(data_list8)\n plotdata(data_list16)\n\n pt.iup()",
"def inc_latency(self, *_, **__): # pylint: disable=arguments-differ\n pass",
"def __init__(self, reference, time_start=500.0 * pq.ms,\n time_stop=2000.0 * pq.ms, time_buffer=250 * pq.ms):\n super(SpikeTimesObjective, self).__init__(time_start, time_stop)\n if time_stop - time_start - time_buffer * 2 <= 0:\n raise Exception(\"Buffer time ({}) exceeds half of spike train \"\n \"time ({}) and therefore the inner window is \"\n \"empty\".format(buffer, (time_stop - time_start)))\n if isinstance(reference, neo.core.SpikeTrain):\n self.ref_spikes = reference\n elif isinstance(reference, neo.core.AnalogSignal):\n self.ref_spikes = AnalysedSignal(reference).spikes()\n else:\n raise Exception(\"Spikes must be a neo.core.SpikeTrain object not \"\n \"{}\".format(type(reference)))\n self.time_buffer = time_buffer\n self.ref_inner = reference[numpy.where(\n (reference >= (time_start + time_buffer)) &\n (reference <= (time_stop - time_buffer)))]\n if not len(self.ref_inner):\n raise Exception(\"Inner window does not contain any spikes\")",
"def compute_time_train(model, loss_fun):\n # Use train mode\n model.train()\n # Generate a dummy mini-batch and copy data to GPU\n im_size, batch_size = cfg.TRAIN.IM_SIZE, int(cfg.TRAIN.BATCH_SIZE / cfg.NUM_GPUS)\n inputs = torch.rand(batch_size, 3, im_size, im_size).cuda(non_blocking=False)\n labels = torch.zeros(batch_size, dtype=torch.int64).cuda(non_blocking=False)\n # Cache BatchNorm2D running stats\n bns = [m for m in model.modules() if isinstance(m, torch.nn.BatchNorm2d)]\n bn_stats = [[bn.running_mean.clone(), bn.running_var.clone()] for bn in bns]\n # Compute precise forward backward pass time\n fw_timer, bw_timer = Timer(), Timer()\n total_iter = cfg.PREC_TIME.NUM_ITER + cfg.PREC_TIME.WARMUP_ITER\n for cur_iter in range(total_iter):\n # Reset the timers after the warmup phase\n if cur_iter == cfg.PREC_TIME.WARMUP_ITER:\n fw_timer.reset()\n bw_timer.reset()\n # Forward\n fw_timer.tic()\n preds = model(inputs)\n loss = loss_fun(preds, labels)\n torch.cuda.synchronize()\n fw_timer.toc()\n # Backward\n bw_timer.tic()\n loss.backward()\n torch.cuda.synchronize()\n bw_timer.toc()\n # Restore BatchNorm2D running stats\n for bn, (mean, var) in zip(bns, bn_stats):\n bn.running_mean, bn.running_var = mean, var\n return fw_timer.average_time, bw_timer.average_time",
"def compute_profiling_time(key, expected_num_spikes, rate, t_stop, n,\n winlen, binsize, num_rep=10):\n\n time_fast_fca = 0.\n time_fpgrowth = 0.\n for rep in range(num_rep):\n # Generating artificial data\n data = []\n for i in range(n):\n np.random.seed(0)\n data.append(stg.homogeneous_poisson_process(\n rate=rate, t_start=0*pq.s, t_stop=t_stop))\n\n # Extracting Closed Frequent Itemset with FP-Growth\n t0 = time.time()\n # Binning the data and clipping (binary matrix)\n binary_matrix = conv.BinnedSpikeTrain(data, binsize).to_bool_array()\n # Computing the context and the binary matrix encoding the relation\n # between objects (window positions) and attributes (spikes,\n # indexed with a number equal to neuron idx*winlen+bin idx)\n context, transactions, rel_matrix = spade._build_context(binary_matrix,\n winlen)\n # Applying FP-Growth\n fim_results = [i for i in spade._fpgrowth(\n transactions,\n rel_matrix=rel_matrix,\n winlen=winlen)]\n time_fpgrowth += time.time() - t0\n\n # Extracting Closed Frequent Itemset with Fast_fca\n t1 = time.time()\n # Binning the data and clipping (binary matrix)\n binary_matrix = conv.BinnedSpikeTrain(data, binsize).to_bool_array()\n # Computing the context and the binary matrix encoding the relation\n # between objects (window positions) and attributes (spikes,\n # indexed with a number equal to neuron idx*winlen+bin idx)\n context, transactions, rel_matrix = \\\n spade._build_context(binary_matrix, winlen)\n # Applying FP-Growth\n fim_results = spade._fast_fca(context, winlen=winlen)\n time_fast_fca += time.time() - t1\n\n time_profiles = {'fp_growth': time_fpgrowth/num_rep,\n 'fast_fca': time_fast_fca/num_rep}\n\n # Storing data\n res_path = '../results/{}/{}/'.format(key, expected_num_spikes)\n # Create path is not already existing\n path_temp = './'\n for folder in split_path(res_path):\n path_temp = path_temp + '/' + folder\n mkdirp(path_temp)\n\n np.save(res_path + '/profiling_results.npy', {'results': time_profiles,\n 'parameters': {'rate': rate, 't_stop': t_stop, 'n': n,\n 'winlen': winlen, 'binsize': binsize}})",
"def test020_speed(self, b_size=64, dim=1024,\n alpha_fwd=0.999, alpha_bkw=0.99, eps=1e-05, epoch=100):\n input = torch.randn(b_size, dim)\n\n # instantiate Linearized Online Norm class\n onlin = OnlineNorm1D(dim, alpha_fwd=alpha_fwd, alpha_bkw=alpha_bkw,\n eps=eps, b_size=b_size)\n\n # time lin algo\n forward = 0\n backward = 0\n for _ in range(epoch):\n start = time.time()\n # fprop through lin algo\n out = onlin(input)\n forward += time.time() - start\n\n start = time.time()\n # bprop through lin algo\n out.sum().backward()\n backward += time.time() - start\n\n self.logger.info(f'Linearized Control Normalization Speed Test: '\n f'Forward {forward * 1e6/1e5:.3f} us | '\n f'Backward {backward * 1e6/1e5:.3f} us | '\n f'Total {(forward + backward) * 1e6/1e5:.3f} us')\n\n # Speed test online norm\n # instantiate Looping Online Norm class\n onloop = OnlineNorm1D(dim, eps=eps,\n ctrl_norm=ControlNorm1DLoop(dim,\n alpha_fwd=alpha_fwd,\n alpha_bkw=alpha_bkw,\n eps=eps))\n\n # time loop algo\n forward = 0\n backward = 0\n for _ in range(epoch):\n start = time.time()\n # fprop through loop algo\n out = onloop(input)\n forward += time.time() - start\n\n start = time.time()\n # bprop through loop algo\n out.sum().backward()\n backward += time.time() - start\n\n self.logger.info(f'Loop Control Normalization Speed Test: '\n f'Forward {forward * 1e6/1e5:.3f} us | '\n f'Backward {backward * 1e6/1e5:.3f} us | '\n f'Total {(forward + backward) * 1e6/1e5:.3f} us')\n\n self.logger.info('Make input tensors representative of size you will '\n 'use and then use the correct algorithm based on '\n 'speed of execution.')",
"def get_latency(self):\n raise NotImplementedError()",
"def schedule(epoch):\n return alpha / (1 + (decay_rate * epoch))",
"def test_time_optimize(args, model, optim, imgs, poses, hwf, bound):\n pixels = imgs.reshape(-1, 3)\n\n rays_o, rays_d = get_rays_shapenet(hwf, poses)\n rays_o, rays_d = rays_o.reshape(-1, 3), rays_d.reshape(-1, 3)\n\n num_rays = rays_d.shape[0]\n for step in range(args.tto_steps):\n indices = torch.randint(num_rays, size=[args.tto_batchsize])\n raybatch_o, raybatch_d = rays_o[indices], rays_d[indices]\n pixelbatch = pixels[indices] \n t_vals, xyz = sample_points(raybatch_o, raybatch_d, bound[0], bound[1],\n args.num_samples, perturb=True)\n \n optim.zero_grad()\n rgbs, sigmas = model(xyz)\n colors = volume_render(rgbs, sigmas, t_vals, white_bkgd=True)\n loss = F.mse_loss(colors, pixelbatch)\n loss.backward()\n optim.step()",
"def index(self):\n return self._epochs_completed * self._size + self._index_in_epoch",
"def speed(self) -> int:",
"def speed(self) -> int:",
"def time_speed(self):\n time_speed = []\n for i in range (len(np.unique(self.pd.objid))):\n trajec = self.dataset.trajec(self.dataset.keys[i])\n times = trajec.time_epoch_secs + trajec.time_epoch_nsecs / 1e9\n time_speedy = np.vstack([times, trajec.speed])\n time_speed.append(time_speedy)\n return time_speed",
"def first_spike_tind_abf(abf, epoch_start, startind=0):\n p0 = abf.sweepEpochs.p1s[epoch_start]\n V = abf.sweepY[p0:-1]\n return first_spike_tind(V, startind=startind)",
"def warmup_step(ckpt_step: int) -> float:\n return ckpt_step * 10",
"def timer(trainX, trainY, testX, k, condensed=False):\n \n gc.disable() # disable garbage collector for uninterrupted timing \n initial = clock()\n if condensed:\n cnn = condenseData(trainX, trainY)\n testY = testknn(trainX[cnn], trainY[cnn], testX, k)\n else:\n testY = testknn(trainX, trainY, testX, k)\n final = clock()\n \n gc.enable() # turn garbage collector back on\n return ((final - initial), testY)",
"def scheduler(epoch_idx, lr):\n new_lr = lr\n if (epoch_idx == 60 or epoch_idx == 120 or epoch_idx == 160\n or epoch_idx == 260 or epoch_idx == 320 or epoch_idx == 360):\n new_lr *= 0.2\n \"\"\"\n if epoch_idx == 200:\n new_lr = 0.1\n \"\"\"\n return new_lr",
"def time_run(fnk):\n xval = []\n yval = []\n for n in range(10, 1000, 10):\n xval.append(n)\n graph = gdc.make_upa_graph(n, 5)\n c_time = time.time()\n fnk(graph)\n time_passed = time.time() - c_time\n yval.append(time_passed)\n return xval, yval",
"def train_epoch(model, training_data, optimizer, pred_loss_func, opt):\n\n model.train()\n\n total_event_ll = 0 # cumulative event log-likelihood\n total_time_se = 0 # cumulative time prediction squared-error\n total_time_error = 0 # cumulative time prediction squared-error\n total_time_latitude = 0 # cumulative latitude prediction squared-error\n total_time_longitude = 0 # cumulative longitude prediction squared-error\n total_event_rate = 0 # cumulative number of correct prediction\n total_num_event = 0 # number of total events\n total_num_pred = 0 # number of predictions\n for batch in tqdm(training_data, mininterval=2,\n desc=' - (Training) ', leave=False):\n \"\"\" prepare data \"\"\"\n event_time, time_gap, event_type, latitude, longitude = map(lambda x: x.to(opt.device), batch)\n\n \"\"\" forward \"\"\"\n optimizer.zero_grad()\n\n enc_out, prediction = model(event_type, event_time, latitude, longitude) # change the event_time to time gap\n\n \"\"\" backward \"\"\"\n # negative log-likelihood\n event_ll, non_event_ll = Utils.log_likelihood(model, enc_out, event_time, event_type, latitude, longitude) # change the event_time to time gap\n event_loss = -torch.sum(event_ll - non_event_ll)\n\n # type prediction\n pred_loss, pred_num_event = Utils.type_loss(prediction[0], event_type, pred_loss_func)\n\n # time prediction\n se = Utils.time_loss(prediction[1], event_time) # change the event_time to time gap\n\n # latitude prediction\n le = Utils.time_loss(prediction[2], latitude)\n\n # longitude prediction\n ge = Utils.time_loss(prediction[3], longitude)\n\n # SE is usually large, scale it to stabilize training\n scale_time_loss = 100\n loss = event_loss + pred_loss + se / scale_time_loss + le / scale_time_loss + ge / scale_time_loss\n loss.backward()\n\n \"\"\" update parameters \"\"\"\n optimizer.step()\n\n \"\"\" note keeping \"\"\"\n total_event_ll += -event_loss.item()\n total_time_se += se.item() + le.item() + ge.item()\n total_time_error += se.item()\n total_time_latitude += le.item()\n total_time_longitude += ge.item()\n\n total_event_rate += pred_num_event.item()\n total_num_event += event_type.ne(Constants.PAD).sum().item()\n # we do not predict the first event\n total_num_pred += event_type.ne(Constants.PAD).sum().item() - event_time.shape[0]\n\n total_rmse = np.sqrt(total_time_se / total_num_pred)\n time_rmse = np.sqrt(total_time_error / total_num_pred)\n latitude_rmse = np.sqrt(total_time_latitude / total_num_pred)\n longitude_rmse = np.sqrt(total_time_longitude / total_num_pred)\n print('Time: {:5f} Latitude: {:5f} Longitude: {:5f} Overall: {:5f} '.format(time_rmse, latitude_rmse, longitude_rmse, total_rmse))",
"def CalculateSpeedIndex(self):\n time_completeness_list = self.GetTimeCompletenessList()\n prev_completeness = 0.0\n speed_index = 0.0\n prev_time = time_completeness_list[0][0]\n for time, completeness in time_completeness_list:\n # Add the incemental value for the interval just before this event.\n elapsed_time = time - prev_time\n incompleteness = (1.0 - prev_completeness)\n speed_index += elapsed_time * incompleteness\n\n # Update variables for next iteration.\n prev_completeness = completeness\n prev_time = time\n return speed_index",
"def runtime(self):\n return self.tmax_epochs - self.tmin_epochs",
"def calculate_latency(session_id, limit):\n from .wsgi_aux import app\n with app.app_context():\n head_key = \"head-frames\"\n scene_key = \"scene-frames\"\n\n r = redis.StrictRedis(host='localhost', port=6379, db=0)\n head_frames_raw = get_frames_from_redis(r=r, key=head_key, limit=limit)\n scene_frames_raw = get_frames_from_redis(r=r, key=scene_key, limit=limit)\n\n head_stream = []\n scene_stream = []\n\n for data in head_frames_raw:\n head_stream.append(decode_base64(data))\n\n for data in scene_frames_raw:\n scene_stream.append(decode_base64(data))\n\n _, phf_pitch, phf_yaw, phf_roll = cssi.latency.calculate_head_pose(frame=head_stream[0])\n _, chf_pitch, chf_yaw, chf_roll = cssi.latency.calculate_head_pose(frame=head_stream[1])\n _, _, ff_angles, sf_angles = cssi.latency.calculate_camera_pose(first_frame=scene_stream[0],\n second_frame=scene_stream[1], crop=True,\n crop_direction='horizontal')\n\n head_angles = [[phf_pitch, phf_yaw, phf_roll], [chf_pitch, chf_yaw, chf_roll]]\n camera_angles = [ff_angles, sf_angles]\n\n latency_score = cssi.latency.generate_rotation_latency_score(head_angles=head_angles,\n camera_angles=camera_angles)\n\n # head_movement = cssi.latency.check_for_head_movement(head_stream)\n # logger.debug(\"Head movement detected: {0}\".format(head_movement))\n\n # pst = cssi.latency.calculate_pst(scene_stream, 10)\n # logger.debug(\"Pixel switching time: {0}\".format(pst))\n\n session = Session.query.filter_by(id=session_id).first()\n if session is not None:\n new_score = {'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'score': latency_score}\n session.latency_scores.append(new_score)\n db.session.commit()",
"def elapseTime(self, idx):\n newBeliefs = util.Counter()\n for oldPos in self.legalPositions:\n if self.beliefs[idx][oldPos] <= 0:\n continue\n newPosDist = self.getPositionDistribution(oldPos)\n for newPos, prob in newPosDist.items():\n newBeliefs[newPos] += prob * self.beliefs[idx][oldPos]\n newBeliefs.normalize()\n self.beliefs[idx] = newBeliefs"
]
| [
"0.6028425",
"0.5491465",
"0.530693",
"0.5300092",
"0.52771044",
"0.52674377",
"0.5228959",
"0.5220311",
"0.52153283",
"0.52088284",
"0.5191576",
"0.5187746",
"0.51213366",
"0.511883",
"0.51113784",
"0.5102528",
"0.5069571",
"0.50503147",
"0.50503147",
"0.50397193",
"0.50395787",
"0.50389785",
"0.5022178",
"0.50200903",
"0.4984694",
"0.49842593",
"0.49760103",
"0.4968743",
"0.49679825",
"0.49455684"
]
| 0.71531516 | 0 |
Gets all spike indices from time t and voltage trace V. | def all_spike_ind(t, V):
spikes, _ = find_peaks(V, [1, 1000])
return spikes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def interspike_intervals(t, V):\n # first pass -- get number of spikes and locations\n spike_inds = all_spike_ind(t, V)\n n_spikes = len(spike_inds)\n\n if n_spikes == 0:\n return []\n\n # generate array to hold time intervals\n intervals = np.zeros((n_spikes-1), dtype=float)\n for ti in range(1, n_spikes):\n intervals[ti-1] = t[spike_inds[ti]] - t[spike_inds[ti-1]]\n\n return intervals",
"def findspikes(t, v, thresh):\n tm = np.array(t)\n s0 = np.array(v) > thresh # np.where(v > thresh) # np.array(v) > thresh # find points above threshold\n\n# print ('v: ', v)\n dsp = tm[s0]\n if dsp.shape[0] == 1:\n dsp = np.array(dsp)\n sd = np.append(True, np.diff(dsp) > 1.0) # find first points of spikes\n if len(dsp) > 0:\n sp = dsp[sd]\n else:\n sp = []\n return(sp) # list of spike times.",
"def findspikes(xin, vin, thresh, t0=None, t1= None, dt=1.0, mode=None, interpolate=False, debug=False):\n # if debug:\n # # this does not work with pyside...\n # import matplotlib\n # matplotlib.use('Qt4Agg')\n # import matplotlib.pyplot as PL\n # from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas\n # from matplotlib.figure import Figure\n # \n # #PL.rcParams['interactive'] = False\n \n st=numpy.array([])\n spk = []\n if xin is None:\n return(st, spk)\n xt = xin.view(numpy.ndarray)\n v = vin.view(numpy.ndarray)\n if t1 is not None and t0 is not None:\n it0 = int(t0/dt)\n it1 = int(t1/dt)\n if not isinstance(xin, numpy.ndarray):\n xt = xt[it0:it1]\n v = v[it0:it1]\n else:\n xt = xt[it0:it1]\n v = v[it0:it1]\n # if debug:\n # f = PL.figure(1)\n # print \"xt: \", xt\n # print \"v: \", v\n # PL.plot(numpy.array(xt), v, 'k-')\n # PL.draw()\n # PL.show()\n\n dv = numpy.diff(v, axis=0) # compute slope\n try:\n dv = numpy.insert(dv, 0, dv[0])\n except:\n pass # print 'dv: ', dv\n dv /= dt\n st = numpy.array([])\n spk = []\n spv = numpy.where(v > thresh)[0].tolist() # find points above threshold\n sps = numpy.where(dv > 0.0)[0].tolist() # find points where slope is positive\n sp = list(set.intersection(set(spv),set(sps))) # intersection defines putative spikes\n sp.sort() # make sure all detected events are in order (sets is unordered)\n sp = tuple(sp) # convert to tuple\n if sp is ():\n return(st, spk) # nothing detected\n dx = 1\n mingap = int(0.0005/dt) # 0.5 msec between spikes (a little unphysiological...)\n # normal operating mode is fixed voltage threshold\n # for this we need to just get the FIRST positive crossing,\n if mode == 'schmitt':\n sthra = list(numpy.where(numpy.diff(sp) > mingap))\n sthr = [sp[x] for x in sthra[0]] # bump indices by 1\n #print 'findspikes: sthr: ', len(sthr), sthr\n for k in sthr:\n if k == 0:\n continue\n x = xt[k-1:k+1]\n y = v[k-1:k+1]\n if interpolate:\n dx = 0\n m = (y[1]-y[0])/dt # local slope\n b = y[0]-(x[0]*m)\n s0 = (thresh-b)/m\n else:\n s0 = x[1]\n st = numpy.append(st, x[1])\n\n elif mode == 'peak':\n pkwidth = 1.0e-3 # in same units as dt - usually msec\n kpkw = int(pkwidth/dt)\n z = (numpy.array(numpy.where(numpy.diff(spv) > 1)[0])+1).tolist()\n z.insert(0, 0) # first element in spv is needed to get starting AP\n spk = []\n #print 'findspikes peak: ', len(z)\n for k in z:\n zk = spv[k]\n spkp = numpy.argmax(v[zk:zk+kpkw])+zk # find the peak position\n x = xt[spkp-1:spkp+2]\n y = v[spkp-1:spkp+2]\n if interpolate:\n try:\n # mimic Igor FindPeak routine with B = 1\n m1 = (y[1]-y[0])/dt # local slope to left of peak\n b1 = y[0]-(x[0]*m1)\n m2 = (y[2]-y[1])/dt # local slope to right of peak\n b2 = y[1]-(x[1]*m2)\n mprime = (m2-m1)/dt # find where slope goes to 0 by getting the line\n bprime = m2-((dt/2.0)*mprime)\n st = numpy.append(st, -bprime/mprime+x[1])\n spk.append(spkp)\n except:\n continue\n else:\n st = numpy.append(st, x[1]) # always save the first one\n spk.append(spkp)\n return(st, spk)",
"def analyzeIV(t, V, I, tw, thr):\n ntraces = numpy.shape(V)[0]\n vss = []\n vmin = []\n vm = []\n ic = []\n nspikes = []\n ispikes = []\n tmin = []\n fsl = []\n fisi = []\n for j in range(0, ntraces):\n ts = tw[0]\n te = tw[1]\n td = tw[2]\n ssv = measure('mean', t, V[j,:], te-td, te)\n ssi = measure('mean', t, I[j,:], te-td, te)\n rvm = measure('mean', t, V[j,:], 0.0, ts-1.0)\n minv = measure('min', t, V[j,:], ts, te)\n spk = findspikes(t, V[j,:], thr, t0=ts, t1=te)\n nspikes.append(count_spikes(spk)) # build spike list\n ispikes.append(ssi[0])\n if nspikes[-1] >= 1:\n fsl.append(spk[0])\n else:\n fsl.append(None)\n if nspikes[-1] >= 2:\n fisi.append(spk[1]-spk[0])\n else:\n fisi.append(None)\n vm.append(rvm[0])\n if ssi[0] < 0.0: # just for hyperpolarizing pulses...\n ic.append(ssi[0])\n vss.append(ssv[0]) # get steady state voltage\n vmin.append(minv[0]) # and min voltage\n tmin.append(minv[1]) # and min time\n\n return({'I': numpy.array(ic), 'Vmin': numpy.array(vmin), 'Vss': numpy.array(vss),\n 'Vm': numpy.array(vm), 'Tmin': numpy.array(tmin), \n 'Ispike': numpy.array(ispikes), 'Nspike': numpy.array(nspikes), \n 'FSL': numpy.array(fsl), 'FISI': numpy.array(fisi)})",
"def spiketraintimesel(spiketrain, t_start, t_stop):\n if not np.isscalar(t_start):\n if len(t_start) != len(t_stop):\n print('Argments t_start and t_stop must have the same size')\n return [], []\n else:\n spiketrainsel_list, spiketimesel_list = [], []\n for i in range(len(t_start)):\n spiketrainsel, spiketimesel = spiketraintimesel(spiketrain, t_start[i], t_stop[i])\n spiketrainsel_list.append(spiketrainsel)\n spiketimesel_list.append(spiketimesel)\n return spiketrainsel_list, spiketimesel_list\n else:\n spiketimes = np.array(spiketrain).ravel()\n spiketimesel = spiketimes[np.logical_and(spiketimes > t_start, spiketimes < t_stop)]\n spiketrainsel = neo.core.SpikeTrain(times=spiketimesel, units=spiketrain.units, t_start=spiketrain.t_start,\n t_stop=spiketrain.t_stop, sampling_rate=spiketrain.sampling_rate,\n file_origin=spiketrain.file_origin, name=spiketrain.name)\n return spiketrainsel, spiketimesel",
"def get_spikes(spiketimes=None, t1=None, t2=None):\n indices = np.where((spiketimes[:,1] > t1) & (spiketimes[:,1] < t2))\n timed_spikes = spiketimes[indices]\n return timed_spikes",
"def get_ser_spktimes(self):\n\n spktimes = []\n for sweep_no in range(self.get_no_sweeps()):\n spktimes_singlesweep = []\n for cell_no in range(self.get_no_ser_neurons()):\n spktimes_singlesweep.append(\n np.where(self.ser_spktrains[sweep_no, cell_no, :] > 0.5)[0]\n * self.get_dt()\n )\n spktimes.append(spktimes_singlesweep)\n return spktimes",
"def get_spike_frequency_adaptation(t, V):\n # check that there are 2 spikes minimum\n intervals = interspike_intervals(t, V)\n raise_if_not_multiple_spikes(intervals)\n return intervals[-1]/intervals[0]",
"def getSpikes(self, compatible_output=False, gather=True):\n global controller\n timer = None\n if conf.config.getboolean(\"Reports\", \"outputTimesForSections\"):\n timer = Timer()\n timer.start_timing()\n spikes = self.vertex.getSpikes(controller, controller.dao.run_time, compatible_output)\n\n if conf.config.getboolean(\"Reports\", \"outputTimesForSections\"):\n timer.take_sample()\n return spikes",
"def get_spikes(self):\n\t\treturn spiketrain.netconvecs_to_listoflists(self.t_vec, self.id_vec)",
"def draw_spike_times(spike_times):\n for line in spike_times:\n plt.axvline(x=line, color='y')",
"def at(self, t, tol=None):\r\n return self.data[..., self.time.index_at(t)]",
"def spike_amplitude(V, t_spike):\n # handle no spike found\n if t_spike is None:\n return None\n Vmax = V[t_spike]\n Vmin = np.min(V[t_spike+1:t_spike+500])\n\n return Vmax - Vmin",
"def AllSpikeTimes(self):\n blah = []\n for neur in self.neurons:\n blah.append(np.array(neur.spikes))\n\n return blah",
"def spiketraintimerejection(spiketrain, t_start, t_stop):\n t_start, t_stop = np.array(t_start), np.array(t_stop)\n if not t_start.size == 1:\n if t_start.size != t_stop.size:\n print('Argments t_start and t_stop must have the same size')\n return [], []\n else:\n spikerejected_ind = np.zeros(len(spiketrain), dtype=bool)\n for i in range(len(t_start)):\n _, _, spikerejected_ind_i = spiketraintimerejection(spiketrain, t_start[i], t_stop[i])\n spikerejected_ind = spikerejected_ind | spikerejected_ind_i\n spiketimesel = np.array(spiketrain).ravel()[~spikerejected_ind]\n spiketrainsel = neo.core.SpikeTrain(times=spiketimesel, units=spiketrain.units, t_start=spiketrain.t_start,\n t_stop=spiketrain.t_stop, sampling_rate=spiketrain.sampling_rate,\n file_origin=spiketrain.file_origin, name=spiketrain.name)\n return spiketrainsel, spiketimesel, spikerejected_ind\n else:\n spiketimes = np.array(spiketrain).ravel()\n spikerejected_ind = np.logical_and(spiketimes >= t_start, spiketimes <= t_stop)\n spiketimesel = spiketimes[np.logical_or(spiketimes < t_start, spiketimes > t_stop)]\n spiketrainsel = neo.core.SpikeTrain(times=spiketimesel, units=spiketrain.units, t_start=spiketrain.t_start,\n t_stop=spiketrain.t_stop, sampling_rate=spiketrain.sampling_rate,\n file_origin=spiketrain.file_origin, name=spiketrain.name)\n return spiketrainsel, spiketimesel, spikerejected_ind",
"def extract_spike_features(time, current, voltage, start=0.1, end=0.7, fil=10):\n\n df = pd.DataFrame()\n df_related_features = pd.DataFrame()\n for c, curr in enumerate(current):\n current_array = curr * np.ones_like(time)\n start_index = (np.abs(time - start)).argmin() # Find closest index where the injection current starts\n end_index = (np.abs(time - end)).argmin() # Find closest index where the injection current ends\n current_array[:start_index] = 0\n current_array[end_index:len(current_array)] = 0\n EphysObject = efex.EphysSweepFeatureExtractor(t=time, v=voltage[:, c], i=current_array, start=start, \\\n end=end, filter=fil)\n EphysObject.process_spikes()\n\n # Adding peak_height (mV) + code for maximum frequency determination (see further)\n spike_count = 0\n if EphysObject._spikes_df.size:\n EphysObject._spikes_df['peak_height'] = EphysObject._spikes_df['peak_v'].values - \\\n EphysObject._spikes_df['threshold_v'].values\n spike_count = EphysObject._spikes_df['threshold_i'].values.size\n df = pd.concat([df, EphysObject._spikes_df], sort=True)\n\n # Some easily found extra features\n df_features = EphysObject._sweep_features\n\n # Adding spike count\n df_features.update({'spike_count': spike_count})\n\n # Adding spike frequency adaptation (ratio of spike frequency of second half to first half)\n SFA = np.nan\n half_stim_index = ft.find_time_index(time, np.float(start + (end - start) / 2))\n if spike_count > 5: # We only consider traces with more than 8.333 Hz = 5/600 ms spikes here\n # but in the end we only take the trace with the max amount of spikes\n\n if np.sum(df.loc[df['threshold_i'] == curr, :]['threshold_index'] < half_stim_index) != 0:\n SFA = np.sum(df.loc[df['threshold_i'] == curr, :]['threshold_index'] > half_stim_index) / \\\n np.sum(df.loc[df['threshold_i'] == curr, :]['threshold_index'] < half_stim_index)\n\n df_features.update({'SFA': SFA})\n\n # Adding current (pA)\n df_features.update({'current': curr})\n\n # Adding membrane voltage (mV)\n df_features.update({'resting_membrane_potential': EphysObject._get_baseline_voltage()})\n\n # Adding voltage deflection to steady state (mV)\n voltage_deflection_SS = ft.average_voltage(voltage[:, c], time, start=end - 0.1, end=end)\n # voltage_deflection_v, voltage_deflection_i = EphysObject.voltage_deflection() # = old way: max deflection\n df_features.update({'voltage_deflection': voltage_deflection_SS})\n\n # Adding input resistance (MOhm)\n input_resistance = np.nan\n if not ('peak_i' in EphysObject._spikes_df.keys()) and not curr == 0: # We only calculate input resistances\n # from traces without APs\n input_resistance = (np.abs(voltage_deflection_SS - EphysObject._get_baseline_voltage()) * 1000) / np.abs(\n curr)\n if input_resistance == np.inf:\n input_resistance = np.nan\n df_features.update({'input_resistance': input_resistance})\n\n # Adding membrane time constant (s) and voltage plateau level for hyperpolarisation paradigms\n # after stimulus onset\n tau = np.nan\n E_plat = np.nan\n sag_ratio = np.nan\n if curr < 0: # We use hyperpolarising steps as required in the object function to estimate the\n # membrane time constant and E_plateau\n while True:\n try:\n tau = EphysObject.estimate_time_constant() # Result in seconds!\n break\n except TypeError: # Probably a noisy bump for this trace, just keep it to be np.nan\n break\n E_plat = ft.average_voltage(voltage[:, c], time, start=end - 0.1, end=end)\n sag, sag_ratio = EphysObject.estimate_sag()\n df_features.update({'tau': tau})\n df_features.update({'E_plat': E_plat})\n df_features.update({'sag_ratio': sag_ratio})\n\n # For the rebound and sag time we only are interested in the lowest (-200 pA (usually)) hyperpolarisation trace\n rebound = np.nan\n sag_time = np.nan\n sag_area = np.nan\n\n if c == 0:\n baseline_interval = 0.1 # To calculate the SS voltage\n v_baseline = EphysObject._get_baseline_voltage()\n\n end_index = ft.find_time_index(time, 0.7)\n if np.flatnonzero(voltage[end_index:, c] > v_baseline).size == 0: # So perfectly zero here means\n # it did not reach it\n rebound = 0\n else:\n index_rebound = end_index + np.flatnonzero(voltage[end_index:, c] > v_baseline)[0]\n if not (time[index_rebound] > (end + 0.15)): # We definitely have 150 ms left to calculate the rebound\n rebound = ft.average_voltage(\n voltage[index_rebound:index_rebound + ft.find_time_index(time, 0.15), c], \\\n time[index_rebound:index_rebound + ft.find_time_index(time, 0.15)]) - v_baseline\n else: # Work with whatever time is left\n if time[-1] == time[index_rebound]:\n rebound = 0\n else:\n rebound = ft.average_voltage(voltage[index_rebound:, c], \\\n time[index_rebound:]) - v_baseline\n\n v_peak, peak_index = EphysObject.voltage_deflection(\"min\")\n v_steady = ft.average_voltage(voltage[:, c], time, start=end - baseline_interval, end=end)\n\n if v_steady - v_peak < 4: # The sag should have a minimum depth of 4 mV\n # otherwise we set sag time and sag area to 0\n sag_time = 0\n sag_area = 0\n else:\n # First time SS is reached after stimulus onset\n first_index = start_index + np.flatnonzero(voltage[start_index:peak_index, c] < v_steady)[0]\n # First time SS is reached after the max voltage deflection downwards in the sag\n if np.flatnonzero(voltage[peak_index:end_index, c] > v_steady).size == 0:\n second_index = end_index\n else:\n second_index = peak_index + np.flatnonzero(voltage[peak_index:end_index, c] > v_steady)[0]\n sag_time = time[second_index] - time[first_index]\n sag_area = -integrate.cumtrapz(voltage[first_index:second_index, c], time[first_index:second_index])[-1]\n\n burst_metric = np.nan\n # print(c)\n if spike_count > 5:\n burst = EphysObject._process_bursts()\n if len(burst) != 0:\n burst_metric = burst[0][0]\n\n df_features.update({'rebound': rebound})\n df_features.update({'sag_time': sag_time})\n df_features.update({'sag_area': sag_area})\n df_features.update({'burstiness': burst_metric})\n\n df_related_features = pd.concat([df_related_features, pd.DataFrame([df_features])], sort=True)\n\n return df, df_related_features",
"def getDataWithTimeIndex(self, t):\n\n return self.sensorDf.iloc[t,:self.sensorChannels].values",
"def gt_voltages(self, key):\r\n # If we already have the (model, G, T) key, we're done\r\n if key in self.cell_keys:\r\n return self.cell_keys[key]\r\n model, insolation, temperature = key # unpack the key\r\n index = len(self.cell_voltages)\r\n self.cell_keys[key] = index\r\n self.cell_voltages.append(\r\n model.voltage([(i, insolation, temperature) for i in self.currents]))\r\n logger.debug(f'[{index:04d}] CV {insolation:.1f} {temperature:.1f}{DEG}C')\r\n return index",
"def get_spike_trains(self, current=None):\n\n # For compability with sciunit as many spike trains are generated as there exists ground truth observations\n spike_trains = []\n if current:\n self.set_external_current(current)\n self.simulate(T_max=TMAX)\n voltage_trial = self.v\n vm_trial = AnalogSignal(voltage_trial, self.dt)\n spike_train = vm_trial.threshold_detection(0)\n spike_trains = [spike_train for _ in range(0,3)]\n return spike_trains",
"def _get_indx(self, t):\n t = np.array(t)\n a = (t[:, np.newaxis] <= self._data['stop']) & (t[:, np.newaxis] >=\n self._data['start'])\n return np.array([np.where(row)[0][0] for row in a])",
"def get_gaba_spktimes(self):\n\n spktimes = []\n for sweep_no in range(self.get_no_sweeps()):\n spktimes_singlesweep = []\n for cell_no in range(self.get_no_gaba_neurons()):\n spktimes_singlesweep.append(\n np.where(self.gaba_spktrains[sweep_no, cell_no, :] > 0.5)[\n 0\n ]\n * self.get_dt()\n )\n spktimes.append(spktimes_singlesweep)\n return spktimes",
"def spike_latency(t, I, V):\n # make sure that current is +100 pA\n if abs(I[5] - 0.1) > 1e-7:\n sign = \"\"\n if I[5] > 0:\n sign = \"+\"\n print(f\"Warning! Expected +100pA current, got {sign}{round(I[5]*1000)} \\\n pA current\")\n\n spike_tind = first_spike_tind(V)\n return t[spike_tind] - t[0]",
"def get_spike(env, a, x = 0, y = 0):\n\n\tmedian = cal_median(a)\n\tmad = get_mad(a, median)\n\tspike_list = np.sum(ct_bis(env, a, median, mad), axis = 0)\n##peut etre ignore\n\tspike_list[np.where(spike_list > 0)[0]] = 1\n\treturn (spike_list)",
"def get_licks(dlc, dlc_t):\r\n lick_times = get_feature_event_times(dlc, dlc_t, ['tongue_end_l_x', 'tongue_end_l_y',\r\n 'tongue_end_r_x', 'tongue_end_r_y'])\r\n return lick_times",
"def test_correct_spiking(self):\n n = 10\n t_max = 25.0\n dt = 0.2\n p = 0.05\n\n # some reproducible arbitrariness\n np.random.seed(622312)\n n_steps = int_r(t_max/dt)\n table = np.random.rand(n_steps, n) < p\n\n G = TableSpikers(n)\n G.spike_table = copy.copy(table)\n\n class SimpleMonitor(object):\n def __init__(self, target):\n self.target = target;\n self.results = []\n self.order = 1\n\n def evolve(self, t, dt):\n idxs = self.target.spike.nonzero()[0]\n self.results.extend([(int_r(t/dt), i) for i in idxs])\n\n M = SimpleMonitor(G)\n sim = simulation.Simulation(G, M, dt=dt)\n sim.run(t_max)\n\n expected = zip(*table.nonzero())\n\n self.assertSequenceEqual(expected, M.results)",
"def first_spike_tind(V, startind=0):\n spikes, _ = find_peaks(V, [1, 1000])\n\n if len(spikes) == 0:\n found_spike = False\n else:\n found_spike = True\n\n if found_spike is False:\n raise NoSpikeFoundException\n else:\n return spikes[0]",
"def get_t_vec(self):\n t_vec = np.arange(0, self.get_T(), self.get_dt())\n\n # Shape checks.\n if 'ser' in self.keys() and 'spktrains' in self['ser'].keys():\n assert self.ser_spktrains.shape[2] == len(\n t_vec\n ), 'Bad t_vec length ({})'.format(len(t_vec))\n if 'gaba' in self.keys() and 'spktrains' in self['gaba'].keys():\n assert self.gaba_spktrains.shape[2] == len(\n t_vec\n ), 'Bad t_vec length ({})'.format(len(t_vec))\n\n return t_vec",
"def time_window_eigs(self, t0, tend):\n indexes = self.time_window_bins(t0, tend)\n return np.concatenate([self.operator.eigenvalues[idx]\n for idx in indexes])",
"def get_spike_template_amplitudes(self, spike_ids, **kwargs):\n if self.model.amplitudes is None:\n return np.zeros(len(spike_ids))\n amplitudes = self.model.amplitudes[spike_ids]\n return amplitudes",
"def spike_width(t, V, t_spike, spike_amp):\n # handle no spike found\n if t_spike is None:\n return None\n\n Vmin = np.min(V[t_spike+1:t_spike+500])\n minval = np.max([t_spike - 100, 0])\n if len(V) > t_spike+500:\n maxval = -1\n else:\n maxval = t_spike+500\n id1 = find_nearest_idx(V[minval:t_spike], spike_amp/2 + Vmin) \\\n + t_spike - 100\n id2 = find_nearest_idx(V[t_spike+1:maxval], spike_amp/2 + Vmin) \\\n + t_spike + 1\n return t[id2] - t[id1]"
]
| [
"0.7304651",
"0.6784216",
"0.62212384",
"0.6000979",
"0.59682405",
"0.5941399",
"0.58900493",
"0.5643796",
"0.5640157",
"0.5621029",
"0.5612397",
"0.560505",
"0.54741955",
"0.54578584",
"0.5405408",
"0.538798",
"0.53744054",
"0.5362125",
"0.53530985",
"0.5315595",
"0.5313094",
"0.5302655",
"0.5289811",
"0.5254021",
"0.5247707",
"0.5244414",
"0.5190822",
"0.5147635",
"0.51119566",
"0.50961024"
]
| 0.8067253 | 0 |
Computes interspike intervals for time t and voltage trace V. If there are N spikes, then there will be N1 intervals. | def interspike_intervals(t, V):
# first pass -- get number of spikes and locations
spike_inds = all_spike_ind(t, V)
n_spikes = len(spike_inds)
if n_spikes == 0:
return []
# generate array to hold time intervals
intervals = np.zeros((n_spikes-1), dtype=float)
for ti in range(1, n_spikes):
intervals[ti-1] = t[spike_inds[ti]] - t[spike_inds[ti-1]]
return intervals | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def all_spike_ind(t, V):\n spikes, _ = find_peaks(V, [1, 1000])\n\n return spikes",
"def analyzeIV(t, V, I, tw, thr):\n ntraces = numpy.shape(V)[0]\n vss = []\n vmin = []\n vm = []\n ic = []\n nspikes = []\n ispikes = []\n tmin = []\n fsl = []\n fisi = []\n for j in range(0, ntraces):\n ts = tw[0]\n te = tw[1]\n td = tw[2]\n ssv = measure('mean', t, V[j,:], te-td, te)\n ssi = measure('mean', t, I[j,:], te-td, te)\n rvm = measure('mean', t, V[j,:], 0.0, ts-1.0)\n minv = measure('min', t, V[j,:], ts, te)\n spk = findspikes(t, V[j,:], thr, t0=ts, t1=te)\n nspikes.append(count_spikes(spk)) # build spike list\n ispikes.append(ssi[0])\n if nspikes[-1] >= 1:\n fsl.append(spk[0])\n else:\n fsl.append(None)\n if nspikes[-1] >= 2:\n fisi.append(spk[1]-spk[0])\n else:\n fisi.append(None)\n vm.append(rvm[0])\n if ssi[0] < 0.0: # just for hyperpolarizing pulses...\n ic.append(ssi[0])\n vss.append(ssv[0]) # get steady state voltage\n vmin.append(minv[0]) # and min voltage\n tmin.append(minv[1]) # and min time\n\n return({'I': numpy.array(ic), 'Vmin': numpy.array(vmin), 'Vss': numpy.array(vss),\n 'Vm': numpy.array(vm), 'Tmin': numpy.array(tmin), \n 'Ispike': numpy.array(ispikes), 'Nspike': numpy.array(nspikes), \n 'FSL': numpy.array(fsl), 'FISI': numpy.array(fisi)})",
"def get_spike_frequency_adaptation(t, V):\n # check that there are 2 spikes minimum\n intervals = interspike_intervals(t, V)\n raise_if_not_multiple_spikes(intervals)\n return intervals[-1]/intervals[0]",
"def spike_count(spikeTime, start, stop, dt):\n\n\n #Spike time turned into a numpy array\n spikeTime = np.array(spikeTime)\n # print('Spike Times: ', spikeTime)\n\n #Creat interval array - intervals in which to break up the time array - sub time interval array\n duration = stop-start #Total run time\n n = duration/dt #How many subintervals from time horizon results from user defined interval\n splitInterval = np.linspace(0, duration, n+1) #create numpy array of subinterval over which to count spikes\n # print ('split interval: ', splitInterval)\n\n ##Find length over which to iterate in for loop\n length_splitInt = len(splitInterval)\n # print('length splitInterval: ', length_splitInt)\n length_time = len(spikeTime)\n # print('length time: ', length_time)\n length = length_splitInt + ((length_time) - 2)\n # print('length :', length)\n\n i=0 #inex for time array\n j=0 #index for splitInterval array.\n k=0 #index for new matrix that will store the grouped values from the split time array\n counter = 0 #counter variable to keep track of spike count for each subinterval through loop\n SpikeCount = [] #Initialize array to collect the number of spikes occuring wihtin each subinterval\n\n for i in range(length):\n if (i == 0) and (spikeTime[0] == splitInterval[0]):\n counter += 1\n i += 1\n\n # Spot check\n # print('if counter: ', counter)\n # print('time element: ', spikeTime[k])\n # print('splitInt: ', splitInterval[j], splitInterval[j + 1])\n # print('i: ', i)\n # print('if k: ', k)\n\n if k < (len(spikeTime) - 1):\n k += 1\n\n # Spot check\n # print('iff k: ', k)\n # print('iff counter: ', counter)\n\n else:\n j += 1\n\n # Spot check\n # print('iff counter: ', counter)\n # print(SpikeCount)\n # print('iff j: ', j)\n\n elif (spikeTime[k] > splitInterval[j]) and (spikeTime[k] <= splitInterval[j + 1]):\n counter += 1\n i += 1\n\n # Spot check\n # print('if counter: ', counter)\n # print('time element: ', spikeTime[k])\n # print('splitInt: ', splitInterval[j], splitInterval[j + 1])\n # print('i: ', i)\n # print('if k: ', k)\n\n if k < (len(spikeTime) - 1):\n k += 1\n\n # Spot check\n # print('iff k: ', k)\n # print('iff counter: ', counter)\n\n else:\n j += 1\n # Spot check\n SpikeCount.append(counter)\n # print('iff counter: ', counter)\n # print(SpikeCount)\n # print('iff j: ', j)\n\n\n\n else:\n SpikeCount.append(counter)\n counter = 0\n j += 1\n i += 1\n\n # Spot Check\n # print('else counter: ', counter)\n # print(SpikeCount)\n # print('time element: ', spikeTime[k])\n # print('splitInt: ', splitInterval[j], splitInterval[j + 1])\n # print('else j: ', j)\n # print('else i: ', i)\n # print('else k: ', k)\n\n return (SpikeCount, splitInterval)",
"def min_spike_frequency_tV(t, V):\n intervals = interspike_intervals(t, V)\n raise_if_not_multiple_spikes(intervals)\n max_int = np.amax(intervals)\n return 1/max_int",
"def findspikes(xin, vin, thresh, t0=None, t1= None, dt=1.0, mode=None, interpolate=False, debug=False):\n # if debug:\n # # this does not work with pyside...\n # import matplotlib\n # matplotlib.use('Qt4Agg')\n # import matplotlib.pyplot as PL\n # from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas\n # from matplotlib.figure import Figure\n # \n # #PL.rcParams['interactive'] = False\n \n st=numpy.array([])\n spk = []\n if xin is None:\n return(st, spk)\n xt = xin.view(numpy.ndarray)\n v = vin.view(numpy.ndarray)\n if t1 is not None and t0 is not None:\n it0 = int(t0/dt)\n it1 = int(t1/dt)\n if not isinstance(xin, numpy.ndarray):\n xt = xt[it0:it1]\n v = v[it0:it1]\n else:\n xt = xt[it0:it1]\n v = v[it0:it1]\n # if debug:\n # f = PL.figure(1)\n # print \"xt: \", xt\n # print \"v: \", v\n # PL.plot(numpy.array(xt), v, 'k-')\n # PL.draw()\n # PL.show()\n\n dv = numpy.diff(v, axis=0) # compute slope\n try:\n dv = numpy.insert(dv, 0, dv[0])\n except:\n pass # print 'dv: ', dv\n dv /= dt\n st = numpy.array([])\n spk = []\n spv = numpy.where(v > thresh)[0].tolist() # find points above threshold\n sps = numpy.where(dv > 0.0)[0].tolist() # find points where slope is positive\n sp = list(set.intersection(set(spv),set(sps))) # intersection defines putative spikes\n sp.sort() # make sure all detected events are in order (sets is unordered)\n sp = tuple(sp) # convert to tuple\n if sp is ():\n return(st, spk) # nothing detected\n dx = 1\n mingap = int(0.0005/dt) # 0.5 msec between spikes (a little unphysiological...)\n # normal operating mode is fixed voltage threshold\n # for this we need to just get the FIRST positive crossing,\n if mode == 'schmitt':\n sthra = list(numpy.where(numpy.diff(sp) > mingap))\n sthr = [sp[x] for x in sthra[0]] # bump indices by 1\n #print 'findspikes: sthr: ', len(sthr), sthr\n for k in sthr:\n if k == 0:\n continue\n x = xt[k-1:k+1]\n y = v[k-1:k+1]\n if interpolate:\n dx = 0\n m = (y[1]-y[0])/dt # local slope\n b = y[0]-(x[0]*m)\n s0 = (thresh-b)/m\n else:\n s0 = x[1]\n st = numpy.append(st, x[1])\n\n elif mode == 'peak':\n pkwidth = 1.0e-3 # in same units as dt - usually msec\n kpkw = int(pkwidth/dt)\n z = (numpy.array(numpy.where(numpy.diff(spv) > 1)[0])+1).tolist()\n z.insert(0, 0) # first element in spv is needed to get starting AP\n spk = []\n #print 'findspikes peak: ', len(z)\n for k in z:\n zk = spv[k]\n spkp = numpy.argmax(v[zk:zk+kpkw])+zk # find the peak position\n x = xt[spkp-1:spkp+2]\n y = v[spkp-1:spkp+2]\n if interpolate:\n try:\n # mimic Igor FindPeak routine with B = 1\n m1 = (y[1]-y[0])/dt # local slope to left of peak\n b1 = y[0]-(x[0]*m1)\n m2 = (y[2]-y[1])/dt # local slope to right of peak\n b2 = y[1]-(x[1]*m2)\n mprime = (m2-m1)/dt # find where slope goes to 0 by getting the line\n bprime = m2-((dt/2.0)*mprime)\n st = numpy.append(st, -bprime/mprime+x[1])\n spk.append(spkp)\n except:\n continue\n else:\n st = numpy.append(st, x[1]) # always save the first one\n spk.append(spkp)\n return(st, spk)",
"def multivariate(spiketrains, start, end, nsamples):\n t = np.linspace(start+(end-start)/nsamples, end, nsamples)\n N = len(spiketrains)\n\n strains_se = []\n for idx in range(N):\n newst = np.insert(spiketrains[idx], 0, start)\n strains_se.append(np.append(newst, end))\n\n # different between t and previous and next spikes for each t\n prev_spikes = np.zeros((nsamples, N))\n next_spikes = np.zeros((nsamples, N))\n dprev_spikes = np.zeros((nsamples, N))\n dnext_spikes = np.zeros((nsamples, N))\n for idx, ti in enumerate(t):\n prev_spikes[idx] = _find_prev_spikes(ti, strains_se)\n next_spikes[idx] = _find_next_spikes(ti, strains_se)\n dprev_spikes[idx] = ti-prev_spikes[idx]\n dnext_spikes[idx] = next_spikes[idx]-ti\n\n # mean interval from t to previous/next spike on each spiketrain (over t)\n meanp = np.mean(dprev_spikes, axis=1)\n meanf = np.mean(dnext_spikes, axis=1)\n # stdev interval from t to previous/next spike on each spiketrain (over t)\n sigmap = np.std(prev_spikes, axis=1)\n sigmaf = np.std(next_spikes, axis=1)\n # mean inter-spike interval around each t\n xisi = meanp+meanf\n\n mvdist = ((sigmap*meanf)+(sigmaf*meanp))/(xisi**2)\n return t, mvdist",
"def max_spike_frequency(t, V):\n intervals = interspike_intervals(t, V)\n raise_if_not_multiple_spikes(intervals)\n min_int = np.amin(intervals)\n return 1/min_int",
"def get_envelops(x, t=None):\n if t is None:\n t = np.arange(x.shape[0])\n maxima = argrelmax(x)[0]\n minima = argrelmin(x)[0]\n\n # consider the start and end to be extrema\n\n ext_maxima = np.zeros((maxima.shape[0] + 2,), dtype=int)\n ext_maxima[1:-1] = maxima\n ext_maxima[0] = 0\n ext_maxima[-1] = t.shape[0] - 1\n\n ext_minima = np.zeros((minima.shape[0] + 2,), dtype=int)\n ext_minima[1:-1] = minima\n ext_minima[0] = 0\n ext_minima[-1] = t.shape[0] - 1\n\n tck = interpolate.splrep(t[ext_maxima], x[ext_maxima])\n upper = interpolate.splev(t, tck)\n tck = interpolate.splrep(t[ext_minima], x[ext_minima])\n lower = interpolate.splev(t, tck)\n return upper, lower",
"def findsubintervals (t ,x):\n k, m = len(t), len(x)\n if k<2:\n return zeros(m,1)\n else:\n j = concatenate([t, x]).argsort()\n i = nonzero(j >= k)\n arr = arange(0,m)\n arr = i - arr - 1\n arr = arr[0]\n return arr",
"def calcualte_inte_vn(pT_low, pT_high, data):\n npT = 50\n pT_inte_array = linspace(pT_low, pT_high, npT)\n dpT = pT_inte_array[1] - pT_inte_array[0]\n dN_event = data[:, 2]\n pT_event = data[:, 0]\n dN_interp = exp(interp(pT_inte_array, pT_event, log(dN_event+1e-30)))\n N_event = data[:, -1]\n N_interp = exp(interp(pT_inte_array, pT_event, log(N_event+1e-30)))\n N = sum(N_interp)*dpT/0.1\n temp_vn_array = [N,]\n for iorder in range(1, n_order):\n vn_real_event = data[:, 4*iorder]\n vn_imag_event = data[:, 4*iorder+2]\n vn_real_interp = interp(pT_inte_array, pT_event, vn_real_event)\n vn_imag_interp = interp(pT_inte_array, pT_event, vn_imag_event)\n vn_real_inte = (\n sum(vn_real_interp*dN_interp*pT_inte_array)\n /sum(dN_interp*pT_inte_array))\n vn_imag_inte = (\n sum(vn_imag_interp*dN_interp*pT_inte_array)\n /sum(dN_interp*pT_inte_array))\n vn_inte = vn_real_inte + 1j*vn_imag_inte\n temp_vn_array.append(vn_inte)\n return(temp_vn_array)",
"def spike_latency(t, I, V):\n # make sure that current is +100 pA\n if abs(I[5] - 0.1) > 1e-7:\n sign = \"\"\n if I[5] > 0:\n sign = \"+\"\n print(f\"Warning! Expected +100pA current, got {sign}{round(I[5]*1000)} \\\n pA current\")\n\n spike_tind = first_spike_tind(V)\n return t[spike_tind] - t[0]",
"def findspikes(t, v, thresh):\n tm = np.array(t)\n s0 = np.array(v) > thresh # np.where(v > thresh) # np.array(v) > thresh # find points above threshold\n\n# print ('v: ', v)\n dsp = tm[s0]\n if dsp.shape[0] == 1:\n dsp = np.array(dsp)\n sd = np.append(True, np.diff(dsp) > 1.0) # find first points of spikes\n if len(dsp) > 0:\n sp = dsp[sd]\n else:\n sp = []\n return(sp) # list of spike times.",
"def avg_spike_frequency(t, V):\n intervals = interspike_intervals(t, V)\n\n try:\n raise_if_not_multiple_spikes(intervals)\n except NoMultipleSpikesException:\n return None\n\n avg_int = np.average(intervals)\n return 1/avg_int",
"def get_spike_currents(U, t_post_spike, n):\n current = 0.0\n if 0.0 <= t_post_spike < n['t_rise']:\n current += -n['g_Na'] * (U - n['E_Na'])\n if n['t_rise'] <= t_post_spike < n['t_fall']:\n current += -n['g_K'] * (U - n['E_K'])\n return current",
"def calculate_intervals(tick_times: List[float]) -> List[float]:\n return [tick_times[i] - tick_times[i - 1] for i in range(1, len(tick_times))]",
"def VIntervals(self, *args):\n return _Adaptor3d.Adaptor3d_Surface_VIntervals(self, *args)",
"def KS_Periodic(x, tmax, ntime, u):\n N = x.size\n v = np.fft.fft(u)\n\n # Precompute various ETDRK4 scalar quantities:\n h = 0.025\n k = (2 * np.pi / (x[-1] - 2 * x[0] + x[1])) * np.fft.fftfreq(N, d=1 / N)\n L = k**2 - k**4\n E = np.exp(h * L)\n E2 = np.exp(h * L / 2)\n M = 64\n r = np.exp(1j * np.pi * (np.arange(1, M + 1) - .5) / M)\n LR = h * np.tile(np.reshape(L, (N, 1)), (1, M)) + np.tile(r, (N, 1))\n Q = h * np.real(np.mean((np.exp(LR / 2) - 1) / LR, axis=1))\n f1 = h * np.real(np.mean(\n (-4 - LR + np.exp(LR) * (4 - 3 * LR + LR**2)) / LR**3, axis=1))\n f2 = h * np.real(np.mean(\n (2 + LR + np.exp(LR) * (-2 + LR)) / LR**3, axis=1))\n f3 = h * np.real(np.mean(\n (-4 - 3 * LR - LR**2 + np.exp(LR) * (4 - LR)) / LR**3, axis=1))\n\n # Main time-stepping loop:\n uu = u\n nmax = int(np.round(tmax / h))\n nplt = int(np.floor((tmax / (ntime - 1)) / h))\n g = -0.5j * k\n for n in range(nmax):\n Nv = g * np.fft.fft(np.real(np.fft.ifft(v))**2)\n a = E2 * v + Q * Nv\n Na = g * np.fft.fft(np.real(np.fft.ifft(a))**2)\n b = E2 * v + Q * Na\n Nb = g * np.fft.fft(np.real(np.fft.ifft(b))**2)\n c = E2 * a + Q * (2 * Nb - Nv)\n Nc = g * np.fft.fft(np.real(np.fft.ifft(c))**2)\n v = E * v + Nv * f1 + 2 * (Na + Nb) * f2 + Nc * f3\n if np.mod(n + 1, nplt) == 0:\n u = np.real(np.fft.ifft(v))\n uu = np.vstack((uu, u))\n\n return uu",
"def scipy_integrate(func, X0, args, IRK_times, N=0):\n V0 = 0.7 # we fix the volatge initial condition\n t_span = [0.0, args.h * N]\n t_sim = np.array([t_span[0]])\n for k in range(1, N +1):\n temp = (k - 1) * args.h + IRK_times * args.h\n t_sim = np.vstack((t_sim, temp))\n t_next = np.array([k * args.h])\n t_sim = np.vstack((t_sim, t_next))\n del temp, t_next\n sol = solve_ivp(func, t_span, [X0[0], X0[1], X0[2], X0[3], V0], method=args.method, t_eval=t_sim.reshape(-1,))\n y_test = sol.y\n return t_sim[1:,:], y_test[:, 1:]",
"def get_continous_time_periods(binary_array):\n binary_array = np.copy(binary_array).astype(\"int8\")\n n_times = len(binary_array)\n d_times = np.diff(binary_array)\n # show the +1 and -1 edges\n pos = np.where(d_times == 1)[0] + 1\n neg = np.where(d_times == -1)[0] + 1\n\n if (pos.size == 0) and (neg.size == 0):\n if len(np.nonzero(binary_array)[0]) > 0:\n return [(0, n_times-1)]\n else:\n return []\n elif pos.size == 0:\n # i.e., starts on an spike, then stops\n return [(0, neg[0])]\n elif neg.size == 0:\n # starts, then ends on a spike.\n return [(pos[0], n_times-1)]\n else:\n if pos[0] > neg[0]:\n # we start with a spike\n pos = np.insert(pos, 0, 0)\n if neg[-1] < pos[-1]:\n # we end with aspike\n neg = np.append(neg, n_times - 1)\n # NOTE: by this time, length(pos)==length(neg), necessarily\n h = np.matrix([pos, neg])\n # print(f\"len(h[1][0]) {len(h[1][0])} h[1][0] {h[1][0]} h.size {h.size}\")\n if np.any(h):\n result = []\n for i in np.arange(h.shape[1]):\n if h[1, i] == n_times-1:\n result.append((h[0, i], h[1, i]))\n else:\n result.append((h[0, i], h[1, i]-1))\n return result\n return []",
"def vi1(t):\n u_t = 1*(t>0)\n return (np.sin(2000*np.pi*t)+np.cos(2e6*np.pi*t)) * u_t",
"def spike_width(t, V, t_spike, spike_amp):\n # handle no spike found\n if t_spike is None:\n return None\n\n Vmin = np.min(V[t_spike+1:t_spike+500])\n minval = np.max([t_spike - 100, 0])\n if len(V) > t_spike+500:\n maxval = -1\n else:\n maxval = t_spike+500\n id1 = find_nearest_idx(V[minval:t_spike], spike_amp/2 + Vmin) \\\n + t_spike - 100\n id2 = find_nearest_idx(V[t_spike+1:maxval], spike_amp/2 + Vmin) \\\n + t_spike + 1\n return t[id2] - t[id1]",
"def compute_tap_intervals(xtaps, t, threshold=20):\n import numpy as np\n\n if isinstance(xtaps, list):\n xtaps = np.asarray(xtaps)\n if isinstance(t, list):\n t = np.asarray(t)\n\n # Set time points:\n tap_times = t - t[0]\n\n # Calculate x offset:\n xtaps_offset = xtaps - np.mean(xtaps)\n\n # Find left/right finger \"press\" events:\n dx = xtaps_offset[1:] - xtaps_offset[:-1]\n ipress = np.where(np.abs(dx) > threshold)\n\n # Filter data:\n #xtaps = xtaps[ipress]\n tap_times = tap_times[ipress]\n\n # Find press event intervals:\n tap_intervals = tap_times[1:] - tap_times[:-1]\n\n return ipress, tap_intervals",
"def interpolateCubic( t):\n curframe = []\n frame = np.searchsorted( keytime, t, side='right') - 1\n\n for i in range(11):\n poly = S[i]\n res = poly[frame](t)\n curframe.append(res)\n\n return curframe",
"def NbVIntervals(self, *args):\n return _Adaptor3d.Adaptor3d_Surface_NbVIntervals(self, *args)",
"def get_spikes(spiketimes=None, t1=None, t2=None):\n indices = np.where((spiketimes[:,1] > t1) & (spiketimes[:,1] < t2))\n timed_spikes = spiketimes[indices]\n return timed_spikes",
"def spiketraintimesel(spiketrain, t_start, t_stop):\n if not np.isscalar(t_start):\n if len(t_start) != len(t_stop):\n print('Argments t_start and t_stop must have the same size')\n return [], []\n else:\n spiketrainsel_list, spiketimesel_list = [], []\n for i in range(len(t_start)):\n spiketrainsel, spiketimesel = spiketraintimesel(spiketrain, t_start[i], t_stop[i])\n spiketrainsel_list.append(spiketrainsel)\n spiketimesel_list.append(spiketimesel)\n return spiketrainsel_list, spiketimesel_list\n else:\n spiketimes = np.array(spiketrain).ravel()\n spiketimesel = spiketimes[np.logical_and(spiketimes > t_start, spiketimes < t_stop)]\n spiketrainsel = neo.core.SpikeTrain(times=spiketimesel, units=spiketrain.units, t_start=spiketrain.t_start,\n t_stop=spiketrain.t_stop, sampling_rate=spiketrain.sampling_rate,\n file_origin=spiketrain.file_origin, name=spiketrain.name)\n return spiketrainsel, spiketimesel",
"def measure_t_interval(self):\n self.write(\"MEAS:TINT? (@1),(@2)\")",
"def interval_multivariate(inputspikes, outputspikes, samples=1):\n times = []\n krdists = []\n for prv, nxt in zip(outputspikes[:-1], outputspikes[1:]):\n krd = multivariate(inputspikes, prv, nxt, samples)\n times.append(krd[0])\n krdists.append(krd[1])\n return times, krdists",
"def draw_spike_times(spike_times):\n for line in spike_times:\n plt.axvline(x=line, color='y')"
]
| [
"0.6780872",
"0.66757673",
"0.6342778",
"0.61360574",
"0.6082667",
"0.6070609",
"0.597286",
"0.58500314",
"0.57331735",
"0.5723254",
"0.5691698",
"0.5659111",
"0.5644936",
"0.56319046",
"0.55685604",
"0.55277026",
"0.54763794",
"0.5443054",
"0.53831714",
"0.53791595",
"0.5372703",
"0.53589445",
"0.5339593",
"0.53335136",
"0.5328859",
"0.532869",
"0.5325912",
"0.5315179",
"0.5303762",
"0.5303433"
]
| 0.8620648 | 0 |
Computes interspike intervals for each spike, then computes the average of those intervals, then returns the reciprocal to denote the average spike frequency, in Hz. | def avg_spike_frequency(t, V):
intervals = interspike_intervals(t, V)
try:
raise_if_not_multiple_spikes(intervals)
except NoMultipleSpikesException:
return None
avg_int = np.average(intervals)
return 1/avg_int | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_spike_frequency_adaptation(t, V):\n # check that there are 2 spikes minimum\n intervals = interspike_intervals(t, V)\n raise_if_not_multiple_spikes(intervals)\n return intervals[-1]/intervals[0]",
"def avg_spike_frequency_abf(abf, epoch):\n p0 = abf.sweepEpochs.p1s[epoch]\n p1 = abf.sweepEpochs.p1s[epoch+1]\n t = abf.sweepX[p0:p1]\n V = abf.sweepY[p0:p1]\n return avg_spike_frequency(t, V)",
"def get_average(self, s_freq, e_freq):\n s_ind = self.get_bin(s_freq)\n e_ind = self.get_bin(e_freq)\n lst = self.mags[s_ind:e_ind+1]\n try:\n avg = sum(lst)/len(lst)\n except:\n print(s_ind, e_ind)\n print('werid stuff')\n avg = 0\n return avg",
"def dishlist_avg_cal(n:list)->float:\r\n all_cal = dishlist_cal(n)\r\n return sum(all_cal)/len(all_cal)",
"def max_spike_frequency(t, V):\n intervals = interspike_intervals(t, V)\n raise_if_not_multiple_spikes(intervals)\n min_int = np.amin(intervals)\n return 1/min_int",
"def sinc_mean_function(x):\n return np.sin(24*x ) / (12*x) + 2",
"def ave(values):\n return float(sum(values))/len(values)",
"def band_penalty(self):\n fc_ix = np.argmin(np.abs(self.f - self.fc)) # Index to frequency array closes to center frequency\n # Number of indexes on each side of center frequency, not extending outside, only up to 10 kHz\n n = min(fc_ix, self.ix10k - fc_ix)\n if n == 0:\n return 0.0\n return np.mean(np.square(self.fr[fc_ix - n:fc_ix] - self.fr[fc_ix + n - 1:fc_ix - 1:-1]))",
"def mean_firing_rate(spiketrain, t_start=None, t_stop=None, axis=None):\n if isinstance(spiketrain, neo.SpikeTrain) and t_start is None \\\n and t_stop is None and axis is None:\n # a faster approach for a typical use case\n n_spikes = len(spiketrain)\n time_interval = spiketrain.t_stop - spiketrain.t_start\n time_interval = time_interval.rescale(spiketrain.units)\n rate = n_spikes / time_interval\n return rate\n\n if isinstance(spiketrain, pq.Quantity):\n # Quantity or neo.SpikeTrain\n if not is_time_quantity(t_start, allow_none=True):\n raise TypeError(\"'t_start' must be a Quantity or None\")\n if not is_time_quantity(t_stop, allow_none=True):\n raise TypeError(\"'t_stop' must be a Quantity or None\")\n\n units = spiketrain.units\n if t_start is None:\n t_start = getattr(spiketrain, 't_start', 0 * units)\n t_start = t_start.rescale(units).magnitude\n if t_stop is None:\n t_stop = getattr(spiketrain, 't_stop',\n np.max(spiketrain, axis=axis))\n t_stop = t_stop.rescale(units).magnitude\n\n # calculate as a numpy array\n rates = mean_firing_rate(spiketrain.magnitude, t_start=t_start,\n t_stop=t_stop, axis=axis)\n\n rates = pq.Quantity(rates, units=1. / units)\n elif isinstance(spiketrain, (np.ndarray, list, tuple)):\n if isinstance(t_start, pq.Quantity) or isinstance(t_stop, pq.Quantity):\n raise TypeError(\"'t_start' and 't_stop' cannot be quantities if \"\n \"'spiketrain' is not a Quantity.\")\n spiketrain = np.asarray(spiketrain)\n if len(spiketrain) == 0:\n raise ValueError(\"Empty input spiketrain.\")\n if t_start is None:\n t_start = 0\n if t_stop is None:\n t_stop = np.max(spiketrain, axis=axis)\n time_interval = t_stop - t_start\n if axis and isinstance(t_stop, np.ndarray):\n t_stop = np.expand_dims(t_stop, axis)\n rates = np.sum((spiketrain >= t_start) & (spiketrain <= t_stop),\n axis=axis) / time_interval\n else:\n raise TypeError(\"Invalid input spiketrain type: '{}'. Allowed: \"\n \"neo.SpikeTrain, Quantity, ndarray\".\n format(type(spiketrain)))\n return rates",
"def average_over_interval(raw_rate, weight_function, intervals):\n\n def averaging_function(t):\n return raw_rate(t) * weight_function(t)\n\n results = np.zeros(len(intervals), dtype=np.float)\n\n for interval_idx in range(len(intervals)):\n start = intervals.start[interval_idx]\n finish = intervals.finish[interval_idx]\n results[interval_idx] = quad(averaging_function, start, finish)[0]\n\n return results",
"def _get_average(self):\n norm = 1.0\n for pos, idx in enumerate(self.idx):\n norm *= (self.high[pos] - self.low[pos])\n return 1.0/norm",
"def get_average_energy(audio, beats, begin, end):\n buffer = np.square(audio[int(beats[int(begin)]):int(beats[int(end)])])\n average = np.mean(buffer)\n return average",
"def get_frequency(self, detune=0) -> float:\n return np.power(2, (self._cents + detune)/1200) * 440",
"def min_spike_frequency_tV(t, V):\n intervals = interspike_intervals(t, V)\n raise_if_not_multiple_spikes(intervals)\n max_int = np.amax(intervals)\n return 1/max_int",
"def compute_spike_moving_average(self, tau=0.005):\n rho = 1 - self.DT / tau\n rav = np.zeros_like(self.R)\n\n rav[:, 0] = self.R[:, 0] * (1 - rho)\n for i in range(1, self.N_T):\n rav[:, i] = rho * rav[:, i - 1] + (1 - rho) * self.R[:, i]\n\n self.rav = rav / self.DT",
"def avg(self):\n return sum(self.times) / len(self.times)",
"def avg(self):\n return sum(self.times) / len(self.times)",
"def avg(self):\n return sum(self.times) / len(self.times)",
"def mean_wave_period(F, f, df):\n return np.sum(F * df) / np.sum(F * f * df)",
"def spike_amplitude(V, t_spike):\n # handle no spike found\n if t_spike is None:\n return None\n Vmax = V[t_spike]\n Vmin = np.min(V[t_spike+1:t_spike+500])\n\n return Vmax - Vmin",
"def harmonic_mean(self):\n return self.count() / sum(1/number for number in self.numbers)",
"def calcAverage(dat):\n return sum(dat)/len(dat)",
"def mean_function(x):\n return np.sin(12*x) + 0.66*np.cos(25*x) # original frequencies: 12, 25",
"def band_penalty(self):\n fc_ix = np.argmin(np.abs(self.f - self.fc)) # Index to frequency array closes to center frequency\n # Number of indexes on each side of center frequency, not extending outside, only up to 10 kHz\n n = min(fc_ix, self.ix10k - fc_ix)\n if n == 0:\n return 0.0\n return np.mean(np.square(self.fr[fc_ix - n:fc_ix] - (self.gain - self.fr[fc_ix + n - 1:fc_ix - 1:-1])))",
"def fharmonicmean(items):\n if len(items) == 0:\n return 0.\n\n # create a list with 1/xi values\n s = 0.\n for item in items:\n s += 1./item\n\n return float(len(items)) / s",
"def dishlist_avg(n:list)->float:\r\n all_prices = dishlist_prices(n)\r\n return sum(all_prices)/len(all_prices)",
"def mean(series):\n return fsum(series) / len(series)",
"def start(self):\n lastbeat = time.time()\n averages = []\n for x in range(self.keep_amount):\n averages.append(1)\n while self.run:\n cur_amount = len(averages)\n if self.keep_amount != cur_amount:\n if self.keep_amount < cur_amount:\n while self.keep_amount != len(averages):\n averages.pop(0)\n elif self.keep_amount < cur_amount:\n avg = sum(averages)/cur_amount\n while self.keep_amount != len(averages):\n averages.append(avg)\n self.do_fft()\n mag = self.get_average(self.s_freq, self.e_freq)\n avg = sum(averages)/self.keep_amount\n averages.pop(0)\n if (mag/avg)-1 > self.percent:\n now = time.time()\n if ((now-lastbeat) > self.wait_time) and self.relative_range() > .009:\n lastbeat = now\n self.detect_func()\n averages.append(avg)\n else:\n averages.append(mag)\n else:\n averages.append(mag)",
"def mean(self) -> float:\n return self._interval_sum / len(self.intervals)",
"def zenith_gain(freq):\n parfile = open(project_path\n + \"DSS-28_technical/efficiency_vs_freq_pars.pkl\",\"r\")\n pars = cPickle.load(parfile)\n parfile.close()\n effic = {}\n avg_effic = 0\n for key in list(pars.keys()):\n effic[key] = pars[key](freq)/100.\n avg_effic += effic[key]\n # right now I don't know what Pol A and Pol B are\n avg_effic /= len(list(pars.keys()))\n return avg_effic"
]
| [
"0.6792057",
"0.64459485",
"0.63366777",
"0.60239524",
"0.5911255",
"0.5820491",
"0.5770946",
"0.56408936",
"0.5640427",
"0.5621468",
"0.5606927",
"0.5601544",
"0.55967706",
"0.5577748",
"0.55715287",
"0.55707043",
"0.55707043",
"0.55707043",
"0.5566058",
"0.55575603",
"0.5552227",
"0.5528741",
"0.55050296",
"0.550161",
"0.5475114",
"0.5467665",
"0.5461006",
"0.54595333",
"0.54442984",
"0.5437472"
]
| 0.73140913 | 0 |
Computes average spike frequency for abf object and epoch index. | def avg_spike_frequency_abf(abf, epoch):
p0 = abf.sweepEpochs.p1s[epoch]
p1 = abf.sweepEpochs.p1s[epoch+1]
t = abf.sweepX[p0:p1]
V = abf.sweepY[p0:p1]
return avg_spike_frequency(t, V) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def spike_amplitude_abf(abf, t_spike, epoch_start=3):\n p0 = abf.sweepEpochs.p1s[epoch_start]\n V = abf.sweepY[p0:-1]\n\n return spike_amplitude(V, t_spike)",
"def avg_spike_frequency(t, V):\n intervals = interspike_intervals(t, V)\n\n try:\n raise_if_not_multiple_spikes(intervals)\n except NoMultipleSpikesException:\n return None\n\n avg_int = np.average(intervals)\n return 1/avg_int",
"def averaging_factor(self):\n af = ct.c_uint()\n self.lib.Filter_GetAveragingFactor(ct.pointer(af))\n return af.value",
"def get_average(self, s_freq, e_freq):\n s_ind = self.get_bin(s_freq)\n e_ind = self.get_bin(e_freq)\n lst = self.mags[s_ind:e_ind+1]\n try:\n avg = sum(lst)/len(lst)\n except:\n print(s_ind, e_ind)\n print('werid stuff')\n avg = 0\n return avg",
"def meanSpikeCount(self, gather=True):\n raise NotImplementedError",
"def dishlist_avg_cal(n:list)->float:\r\n all_cal = dishlist_cal(n)\r\n return sum(all_cal)/len(all_cal)",
"def spike_latency_abf(abf, epochstart):\n p0 = abf.sweepEpochs.p1s[epochstart]\n t = abf.sweepX[p0:-1]\n V = abf.sweepY[p0:-1]\n I = abf.sweepC[p0:-1]\n return spike_latency(t, I, V)",
"def _foi_average(conn, foi_idx):\n # get the number of foi\n n_foi = foi_idx.shape[0]\n\n # get input shape and replace n_freqs with the number of foi\n sh = list(conn.shape)\n sh[-2] = n_foi\n\n # compute average\n conn_f = np.zeros(sh, dtype=conn.dtype)\n for n_f, (f_s, f_e) in enumerate(foi_idx):\n conn_f[..., n_f, :] = conn[..., f_s:f_e, :].mean(-2)\n return conn_f",
"def fft_frequency(fft, index):\n\treturn index * AUDIO_RATE / len(fft) / 2 # Same as in fft_index, see above",
"def spike_width_abf(abf, t_spike, spike_amp, epoch_start=3):\n # handle no spike found\n if t_spike is None:\n return None\n p0 = abf.sweepEpochs.p1s[epoch_start]\n t = abf.sweepX[p0:-1]\n V = abf.sweepY[p0:-1]\n return spike_width(t, V, t_spike, spike_amp)",
"def AllFreAverageV(self,):\n \t\tv_array = self.data\n \t\taaverage_v = np.average(v_array[:,1])\n \t\tprint('Whole frequency average group velocity:\\nVw=',aaverage_v/1000,'km/s')\n \t\treturn",
"def start(self):\n lastbeat = time.time()\n averages = []\n for x in range(self.keep_amount):\n averages.append(1)\n while self.run:\n cur_amount = len(averages)\n if self.keep_amount != cur_amount:\n if self.keep_amount < cur_amount:\n while self.keep_amount != len(averages):\n averages.pop(0)\n elif self.keep_amount < cur_amount:\n avg = sum(averages)/cur_amount\n while self.keep_amount != len(averages):\n averages.append(avg)\n self.do_fft()\n mag = self.get_average(self.s_freq, self.e_freq)\n avg = sum(averages)/self.keep_amount\n averages.pop(0)\n if (mag/avg)-1 > self.percent:\n now = time.time()\n if ((now-lastbeat) > self.wait_time) and self.relative_range() > .009:\n lastbeat = now\n self.detect_func()\n averages.append(avg)\n else:\n averages.append(mag)\n else:\n averages.append(mag)",
"def next_epoch(self):\n update_table = [None for _ in range(2 ** self.bit_count)]\n for w in self.frequency_map:\n frequency = self.frequency_map[w]\n for h in self.hash_functions:\n c = get_hash(h(), repr(w)) % 2 ** self.bit_count\n if not update_table[c] or frequency > update_table[c]:\n update_table[c] = frequency\n # Update statistics table\n debug_hit_something = False\n for c in range(len(update_table)):\n if update_table[c] is not None:\n debug_hit_something = True\n freq = update_table[c] / self.tweet_count\n self._update_bucket(freq, self.buckets[c])\n else:\n pass\n assert debug_hit_something is True\n self.epoch += 1\n self.index[self.epoch] = dict()\n self.trending_topics = []",
"def metric_iaf(self, x):\n data = np.asarray(x['data'])\n iaf = [10.0] * data.shape[0]\n for ch, ch_data in enumerate(data):\n pxx, freqs = mlab.psd(ch_data, Fs=128.0, NFFT=256)\n alpha_mask = np.abs(freqs - 10) <= 2.0\n alpha_pxx = 10*np.log10(pxx[alpha_mask])\n alpha_pxx = scipy.signal.detrend(alpha_pxx)\n # iaf[ch] = alpha_pxx.shape\n iaf[ch] = freqs[alpha_mask][np.argmax(alpha_pxx)]\n return iaf",
"def zenith_gain(freq):\n parfile = open(project_path\n + \"DSS-28_technical/efficiency_vs_freq_pars.pkl\",\"r\")\n pars = cPickle.load(parfile)\n parfile.close()\n effic = {}\n avg_effic = 0\n for key in list(pars.keys()):\n effic[key] = pars[key](freq)/100.\n avg_effic += effic[key]\n # right now I don't know what Pol A and Pol B are\n avg_effic /= len(list(pars.keys()))\n return avg_effic",
"def frequency(self):\n return self.reference_clock_speed / 4096 / self.prescale_reg",
"def averageTime(self):\n \n pass",
"def mean_wave_period(F, f, df):\n return np.sum(F * df) / np.sum(F * f * df)",
"def runningAvg(f,nDays):\n disi,disb = _runningAvgWgts(nDays)\n ka = nDays // 2\n npts = f.shape[0]\n favg = np.empty_like(f)\n # interior\n for i in range(ka,favg.shape[0]-ka):\n favg[i] = np.sum(disi*f[i-ka:i+ka+1])\n # boundaries\n for i in range(ka):\n fwidth = len(disb[i])\n favg[i] = np.sum(disb[i]*f[0:fwidth])\n favg[npts-1-i] = np.sum(disb[i]*f[npts-1:npts-fwidth-1:-1])\n return favg",
"def get_average_energy(audio, beats, begin, end):\n buffer = np.square(audio[int(beats[int(begin)]):int(beats[int(end)])])\n average = np.mean(buffer)\n return average",
"def fanofactor(spiketrains, warn_tolerance=0.1 * pq.ms):\n # Build array of spike counts (one per spike train)\n spike_counts = np.array([len(st) for st in spiketrains])\n\n # Compute FF\n if all(count == 0 for count in spike_counts):\n # empty list of spiketrains reaches this branch, and NaN is returned\n return np.nan\n\n if all(isinstance(st, neo.SpikeTrain) for st in spiketrains):\n if not is_time_quantity(warn_tolerance):\n raise TypeError(\"'warn_tolerance' must be a time quantity.\")\n durations = [(st.t_stop - st.t_start).simplified.item()\n for st in spiketrains]\n durations_min = min(durations)\n durations_max = max(durations)\n if durations_max - durations_min > warn_tolerance.simplified.item():\n warnings.warn(\"Fano factor calculated for spike trains of \"\n \"different duration (minimum: {_min}s, maximum \"\n \"{_max}s).\".format(_min=durations_min,\n _max=durations_max))\n\n fano = spike_counts.var() / spike_counts.mean()\n return fano",
"def get_spike_frequency_adaptation(t, V):\n # check that there are 2 spikes minimum\n intervals = interspike_intervals(t, V)\n raise_if_not_multiple_spikes(intervals)\n return intervals[-1]/intervals[0]",
"def compute_tf(doc_info, freq_dict_all):\n tf_scores = []\n\n for temp_dict in freq_dict_all:\n id = temp_dict['doc_id']\n\n for k in temp_dict['freq_dict']:\n temp = {\n 'doc_id': id,\n 'TF_Score': temp_dict['freq_dict'][k] / doc_info[id - 1]['doc_length'],\n 'key': k\n }\n\n tf_scores.append(temp)\n\n return tf_scores",
"def update_average(self,result):\n a = 1/self.iters\n b = 1 - a\n self.average = a * result + b * self.average\n self.iters += 1",
"def freq():",
"def _test_get_a_freq(self):\n for i, energy_test in enumerate(self.energy_list):\n if i+1 < len(self.energy_list):\n energy = (self.energy_list[i+1]+self.energy_list[i])/2.\n else:\n energy = (-self.energy_list[i-1]+2.*self.energy_list[i])\n energy_test_2 = energy_test-1e-2\n print i, energy_test_2, self._get_a_freq(energy_test_2)\n print i, energy_test, self._get_a_freq(energy_test)\n energy_test_2 = energy_test+1e-2\n print i, energy_test_2, self._get_a_freq(energy_test_2)\n print i, energy_test, 1./self.closed_orbits_t[energy_test]\n print i, energy, self._get_a_freq(energy)\n print",
"def eta_scan_averages(self):\n return self._eta_scan_averages",
"def __call__(self, epoch):\n exp = np.floor((1 + epoch) / self.dropEvery)\n alpha = initAlpha * (self.factor ** exp)\n \n # return alpha \n return float(alpha)",
"def fasper_py(x,y,ofac,hifac, MACC=4):\n #Check dimensions of input arrays\n n = long(len(x))\n if n != len(y):\n print 'Incompatible arrays.'\n return\n\n nout = 0.5*ofac*hifac*n\n nfreqt = long(ofac*hifac*n*MACC) #Size the FFT as next power\n nfreq = 64L # of 2 above nfreqt.\n\n while nfreq < nfreqt: \n nfreq = 2*nfreq\n\n ndim = long(2*nfreq)\n \n #Compute the mean, variance\n ave = y.mean()\n ##sample variance because the divisor is N-1\n var = ((y-y.mean())**2).sum()/(len(y)-1) \n # and range of the data.\n xmin = x.min()\n xmax = x.max()\n xdif = xmax-xmin\n\n #extirpolate the data into the workspaces\n wk1 = np.zeros(ndim, dtype='complex')\n wk2 = np.zeros(ndim, dtype='complex')\n\n fac = ndim/(xdif*ofac)\n fndim = ndim\n ck = ((x-xmin)*fac) % fndim\n ckk = (2.0*ck) % fndim\n\n for j in range(0L, n):\n __spread__(y[j]-ave,wk1,ndim,ck[j],MACC)\n __spread__(1.0,wk2,ndim,ckk[j],MACC)\n\n #Take the Fast Fourier Transforms\n wk1 = np.fft.ifft( wk1 )*len(wk1)\n wk2 = np.fft.ifft( wk2 )*len(wk1)\n\n wk1 = wk1[1:nout+1]\n wk2 = wk2[1:nout+1]\n rwk1 = wk1.real\n iwk1 = wk1.imag\n rwk2 = wk2.real\n iwk2 = wk2.imag\n \n df = 1.0/(xdif*ofac)\n \n #Compute the Lomb value for each frequency\n hypo2 = 2.0 * abs( wk2 )\n hc2wt = rwk2/hypo2\n hs2wt = iwk2/hypo2\n\n cwt = np.sqrt(0.5+hc2wt)\n swt = np.sign(hs2wt)*(np.sqrt(0.5-hc2wt))\n den = 0.5*n+hc2wt*rwk2+hs2wt*iwk2\n cterm = (cwt*rwk1+swt*iwk1)**2./den\n sterm = (cwt*iwk1-swt*rwk1)**2./(n-den)\n\n wk1 = df*(np.arange(nout, dtype='float')+1.)\n wk2 = (cterm+sterm)/(2.0*var)\n pmax = wk2.max()\n jmax = wk2.argmax()\n\n\n #Significance estimation\n #expy = exp(-wk2) \n #effm = 2.0*(nout)/ofac \n #sig = effm*expy\n #ind = (sig > 0.01).nonzero()\n #sig[ind] = 1.0-(1.0-expy[ind])**effm\n\n #Estimate significance of largest peak value\n expy = np.exp(-pmax) \n effm = 2.0*(nout)/ofac \n prob = effm*expy\n\n if prob > 0.01: \n prob = 1.0-(1.0-expy)**effm\n\n return wk1,wk2,nout,jmax,prob",
"def growth_rate(filenames, time_model=arai_time_model):\n # file ID\n\n print(\"storing casename and Reynolds number\\n\\n\")\n casename, Re, We = file_id(filenames[1])\n\n print(\"\\nNow calculating FFTs\\n\\n\")\n # calculating ffts\n\n t, freqs, loc0_diameter_fft, loc0_centroid_fft = fft_output(filenames[0])\n t, freqs, loc1_diameter_fft, loc1_centroid_fft = fft_output(filenames[1])\n t, freqs, loc2_diameter_fft, loc2_centroid_fft = fft_output(filenames[2])\n t, freqs, loc3_diameter_fft, loc3_centroid_fft = fft_output(filenames[3])\n t, freqs, loc4_diameter_fft, loc4_centroid_fft = fft_output(filenames[4])\n t, freqs, loc5_diameter_fft, loc5_centroid_fft = fft_output(filenames[5])\n t, freqs, loc6_diameter_fft, loc6_centroid_fft = fft_output(filenames[6])\n t, freqs, loc7_diameter_fft, loc7_centroid_fft = fft_output(filenames[7])\n t, freqs, loc8_diameter_fft, loc8_centroid_fft = fft_output(filenames[8])\n t, freqs, loc9_diameter_fft, loc9_centroid_fft = fft_output(filenames[9])\n\n # real amplitudes from morozumi equation\n\n loc0_diameter_amp = np.sqrt((4/t)*loc0_diameter_fft)\n loc0_centroid_amp = np.sqrt((4/t)*loc0_centroid_fft)\n\n loc1_diameter_amp = np.sqrt((4/t)*loc1_diameter_fft)\n loc1_centroid_amp = np.sqrt((4/t)*loc1_centroid_fft)\n\n loc2_diameter_amp = np.sqrt((4/t)*loc2_diameter_fft)\n loc2_centroid_amp = np.sqrt((4/t)*loc2_centroid_fft)\n\n loc3_diameter_amp = np.sqrt((4/t)*loc3_diameter_fft)\n loc3_centroid_amp = np.sqrt((4/t)*loc3_centroid_fft)\n\n loc4_diameter_amp = np.sqrt((4/t)*loc4_diameter_fft)\n loc4_centroid_amp = np.sqrt((4/t)*loc4_centroid_fft)\n\n loc5_diameter_amp = np.sqrt((4/t)*loc5_diameter_fft)\n loc5_centroid_amp = np.sqrt((4/t)*loc5_centroid_fft)\n\n loc6_diameter_amp = np.sqrt((4/t)*loc6_diameter_fft)\n loc6_centroid_amp = np.sqrt((4/t)*loc6_centroid_fft)\n\n loc7_diameter_amp = np.sqrt((4/t)*loc7_diameter_fft)\n loc7_centroid_amp = np.sqrt((4/t)*loc7_centroid_fft)\n\n loc8_diameter_amp = np.sqrt((4/t)*loc8_diameter_fft)\n loc8_centroid_amp = np.sqrt((4/t)*loc8_centroid_fft)\n\n loc9_diameter_amp = np.sqrt((4/t)*loc9_diameter_fft)\n loc9_centroid_amp = np.sqrt((4/t)*loc9_centroid_fft)\n\n # setting up storage array for the z_locations\n z_locations = np.zeros(10)\n\n # using filenames to ID z locations\n for i in range(len(filenames)):\n # separate into the paramaters\n underscore_split = filenames[i].split('_')\n # identify the last parameter, split by the . and then take the first\n # value as this will be the z_location\n z_loc = underscore_split[-1].split('.')[0]\n z_locations[i] = int(z_loc)\n\n # calculating velocity\n u = velocity_calculator(int(Re))\n\n # converting z_locations into real distances\n zs_metres = 0.02*z_locations/1000\n\n # time model can be changed as needed\n z_times = time_model(u, zs_metres, float(We))\n\n # initialising storage arrays for growth rates\n diameter_growth_rates = np.zeros((len(loc0_diameter_amp)))\n diameter_a0 = np.zeros((len(loc0_diameter_amp)))\n diameter_errs = np.zeros((len(loc0_diameter_amp)))\n\n centroid_growth_rates = np.zeros((len(loc0_centroid_amp)))\n centroid_a0 = np.zeros((len(loc0_centroid_amp)))\n centroid_errs = np.zeros((len(loc0_centroid_amp)))\n\n # performing loop to work out growth rates of diameter from curve fitting\n # various z locations (z times)\n\n print(\"\\n\\nNow calculating the diameter growth rates:\\n\\n\")\n # i is an indexer for the length of the array, equal to the frame number\n for i in range(len(loc0_diameter_amp)):\n # progress calculator\n if (i % 1000) == 0:\n print(\"Progress: {:.1f}%\".format(i*100/len(loc0_diameter_amp)))\n # assign a local array which takes the diameter amp at the current\n # index across the 10 z locations\n local_amps = np.array((loc0_diameter_amp[i], loc1_diameter_amp[i],\n loc2_diameter_amp[i], loc3_diameter_amp[i],\n loc4_diameter_amp[i], loc5_diameter_amp[i],\n loc6_diameter_amp[i], loc7_diameter_amp[i],\n loc8_diameter_amp[i], loc9_diameter_amp[i]))\n # work out the local a_0, growth rate, and error in curve fit\n # using the curve fit function defined earlier\n loc_a_0, loc_omega, loc_err = param_extractor(z_times, local_amps)\n # assign local variables to global array\n diameter_a0[i] = loc_a_0\n diameter_growth_rates[i] = loc_omega\n diameter_errs[i] = loc_err\n\n print('diameter growth rate calculation complete')\n\n print(\"\\n\\nNow calculating the centroid growth rates:\\n\\n\")\n for i in range(len(loc0_centroid_amp)):\n # progress calculator\n if (i % 1000) == 0:\n print(\"Progress: {:.1f}%\".format(i*100/len(loc0_centroid_amp)))\n # assign a local array which takes the centroid amp at the current\n # index across the 10 z locations\n local_amps = np.array((loc0_centroid_amp[i], loc1_centroid_amp[i],\n loc2_centroid_amp[i], loc3_centroid_amp[i],\n loc4_centroid_amp[i], loc5_centroid_amp[i],\n loc6_centroid_amp[i], loc7_centroid_amp[i],\n loc8_centroid_amp[i], loc9_centroid_amp[i]))\n # work out the local a_0, growth rate, and error in curve fit\n # using the curve fit function defined earlier\n loc_a_0, loc_omega, loc_err = param_extractor(z_times, local_amps)\n # assign local variables to global array\n centroid_a0[i] = loc_a_0\n centroid_growth_rates[i] = loc_omega\n centroid_errs[i] = loc_err\n\n # create filename by taking the first portion of the input filename\n output_filename = casename[0:-12] + '_fft.csv'\n\n # stack the arrays together so they can be saved as a single file along\n # the first axis\n output_arr = np.stack((freqs, diameter_a0, diameter_growth_rates,\n diameter_errs, centroid_a0, centroid_growth_rates,\n centroid_errs), axis=1)\n\n # save the array with a header that is for user experience, this is\n # ignored by numpy.loadtxt\n np.savetxt(output_filename, output_arr,\n fmt='%f', delimiter=',',\n header='freqs, diameter_a0, diameter_growth_rates,\\\n diameter_errs, centroid_a0, centroid_growth_rates,\\\n centroid_errs')\n\n # POST PROCESSING TESTING, NOT FOR DEPLOYMENT\n\n fig, ax = plt.subplots()\n ax.plot(freqs, diameter_growth_rates, '.', color='yellow')\n ax.set_xlim(0, 1000)\n ax.set_ylim(0, 150)\n ax.set_title(\"Growth rates vs frequencies\")\n ax.set_xlabel(\"Frequencies\")\n ax.set_ylabel(\"Growth rates\")\n\n print(\"minimum error is:\", diameter_errs.min())\n\n minimum_location = diameter_errs.argmin()\n print(minimum_location)\n print(\"minimum error frequency:\", freqs[minimum_location])\n\n # 1253 is the location of 290.04 Hz\n amps_reg = np.array([loc0_diameter_amp[minimum_location],\n loc1_diameter_amp[minimum_location],\n loc3_diameter_amp[minimum_location],\n loc2_diameter_amp[minimum_location],\n loc4_diameter_amp[minimum_location],\n loc5_diameter_amp[minimum_location],\n loc6_diameter_amp[minimum_location],\n loc7_diameter_amp[minimum_location],\n loc8_diameter_amp[minimum_location],\n loc9_diameter_amp[minimum_location]])\n\n amps = amps_reg/diameter_a0[minimum_location]\n\n fig1, ax1 = plt.subplots()\n ax1.plot(z_times, amps, 'o', label='Experimental amplitudes')\n\n modelling_ts = np.linspace(0, 0.02, 1000)\n modelamps_r = (model_growth_rate(modelling_ts,\n diameter_a0[minimum_location],\n diameter_growth_rates[minimum_location]))\n model_amps = modelamps_r/diameter_a0[minimum_location]\n\n ax1.plot(modelling_ts, model_amps,\n label='Curve fit ($\\\\zeta = \\\\zeta_0e^{\\\\omega t}$)')\n ax1.set_xlabel(\"Modelled time (seconds)\", fontsize=12)\n ax1.set_ylabel('$\\\\frac{\\\\zeta}{\\\\zeta_0}$', fontsize=16)\n ax1.set_xlim(0, 0.0125)\n ax1.set_ylim(1, 3)\n ax1.grid()\n ax1.legend()\n ax1.tick_params(axis='both', labelsize=8)\n fig1.set_size_inches(5.5, 4)\n fig1.savefig(fname='curve_fit_example.pgf', bbox_inches='tight')\n\n fig2, ax2 = plt.subplots()\n ax2.plot(freqs, diameter_errs, '.')\n ax2.set_xlim(0, 1000)\n ax2.set_title('Errors')\n ax2.set_xlabel(\"Frequencies\")\n ax2.set_ylabel(\"Standard deviation of curve fit\")\n\n print(freqs[600])\n\n w = savgol_filter(diameter_growth_rates, 1001, 2)\n fig5, ax5 = plt.subplots()\n ax5.plot(freqs, w)\n ax5.set_title('Savitzky-Golay filter')\n ax5.set_xlim(0, 5000)\n ax5.set_xlabel('Frequencies')\n ax5.set_ylabel('Growth rate')\n\n ax.plot(freqs, w, label='Savitzky-Golay', color='red')\n ax.legend()\n\n zero_crossings_w = np.where(np.diff(np.signbit(w)))[0]\n\n print(\"Zeros savgol\", freqs[zero_crossings_w])\n\n Ks = []\n delx = 1/27000\n for i in range(len(loc0_diameter_amp)):\n k = i*(2*np.pi)/(delx*116495)\n Ks.append(k*1e-3)"
]
| [
"0.643844",
"0.6115807",
"0.5816971",
"0.5777478",
"0.56836677",
"0.56770694",
"0.5666032",
"0.56568635",
"0.5650543",
"0.5620046",
"0.55462974",
"0.5540028",
"0.5525934",
"0.5521558",
"0.5508961",
"0.54813564",
"0.5462477",
"0.54483956",
"0.5408029",
"0.5349789",
"0.5333669",
"0.53316134",
"0.53144646",
"0.52851945",
"0.5284023",
"0.5269456",
"0.5268479",
"0.525642",
"0.52296233",
"0.5225452"
]
| 0.7959858 | 0 |
Computes maximum interspike frequency (equivalent to minimum interspike interval). | def max_spike_frequency(t, V):
intervals = interspike_intervals(t, V)
raise_if_not_multiple_spikes(intervals)
min_int = np.amin(intervals)
return 1/min_int | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def max_frequency(sig, FS):\n\n f, fs = plotfft(sig, FS)\n t = np.cumsum(fs)\n\n try:\n ind_mag = np.where(t > t[-1]*0.95)[0][0]\n except:\n ind_mag = np.argmax(t)\n f_max = f[ind_mag]\n\n return f_max",
"def findMaximal(freqSet):",
"def min_spike_frequency_tV(t, V):\n intervals = interspike_intervals(t, V)\n raise_if_not_multiple_spikes(intervals)\n max_int = np.amax(intervals)\n return 1/max_int",
"def get_spike_frequency_adaptation(t, V):\n # check that there are 2 spikes minimum\n intervals = interspike_intervals(t, V)\n raise_if_not_multiple_spikes(intervals)\n return intervals[-1]/intervals[0]",
"def calcUpperFrequencyLimit(fls, noct, max_idx):\n # floats required due to integer division in Python 2.7\n f_upper = fls[0:max_idx] * (2.0 ** (1.0 / (2.0 * noct)))\n step_size = fls[1] - fls[0]\n approx_idx = f_upper / float(step_size)\n f_upper = np.round(approx_idx).astype(int)\n return f_upper",
"def FindFixedSineInputFreq(timevector,inputsignal,minfreq=0.05,maxfreq=100):\n f=makefreqvect(timevector)\n fminind=thresh_py(f,minfreq)\n fmaxind=thresh_py(f,maxfreq)\n N=len(timevector)\n inputfft=fft(inputsignal,None,0)*2/N\n inputmag=abs(inputfft)\n inputslice=inputmag[fminind:fmaxind]\n maxtemp=argmax(inputslice)\n maxind=fminind+maxtemp\n fatmax=f[maxind]\n return fatmax,maxind",
"def spectral_maxpeaks(sign, FS):\n f, ff = plotfft(sign, FS)\n diff_sig = np.diff(ff)\n\n return np.sum([1 for nd in range(len(diff_sig[:-1])) if (diff_sig[nd+1]<0 and diff_sig[nd]>0)])",
"def get_scale_freq():\n return sf / 2 / (num_freq-1)",
"def freq(self) -> int:",
"def freq_from_autocorr(x):\n corr = autocorr(x)\n\n\n # Find the first low point\n d = np.diff(corr)\n start = np.where(d>0)[0]\n\n if len(start)>0 :\n return np.argmax(corr[start[0]:]) + start[0]\n return 0\n\n # Find the next peak after the low point (other than 0 lag). This bit is\n # not reliable for long signals, due to the desired peak occurring between\n # samples, and other peaks appearing higher.\n # Should use a weighting function to de-emphasize the peaks at longer lags.\n # Also could zero-pad before doing circular autocorrelation.\n #peak = np.argmax(corr[start:])\n #px, py = parabolic(corr, peak)\n\n #print(\"minimum at {}, peak at {}\".format(start, peak + start))\n\n\n #return fs / px\n #return peak + start",
"def get_tuning_freq(self, freq):\n if freq < self.freq_lo:\n return\n\n while freq < self.NOTES.min() or freq > self.NOTES.max():\n while freq > self.NOTES.max():\n self.NOTES *= 2\n while freq < self.NOTES.min():\n self.NOTES /= 2\n tuning_freq = min(self.NOTES, key=lambda x: abs(x-freq))\n return tuning_freq",
"def freq():",
"def get_frequency(self, detune=0) -> float:\n return np.power(2, (self._cents + detune)/1200) * 440",
"def get_frequency(time_series):\n if len(time_series.index) == 0:\n return 0\n ft = np.fft.rfft(time_series)\n return np.fft.fftfreq(len(time_series))[np.argmax(abs(ft))]",
"def infer_interval(self) -> (pd.Timedelta, float):\n occurrences_dict = {}\n top_count = 0\n top_frequent_deltas = []\n\n dates = self.index\n\n if len(dates) <= 1:\n raise ValueError(\"Index is too short. It must contain at least 2 values for automatic frequency setting.\")\n\n time_deltas = [dates[i] - dates[i - 1] for i in range(1, len(dates))]\n\n for item in time_deltas:\n item_count = occurrences_dict.get(item, 0) + 1\n occurrences_dict[item] = item_count\n if item_count == top_count:\n top_frequent_deltas.append(item)\n elif item_count > top_count:\n top_frequent_deltas = [item]\n top_count = item_count\n\n relative_frequency = top_count / len(time_deltas)\n\n # if there is more than one delta of top frequency then combine them by calculating the mean\n # \"top frequency delta\" and assigning the combined_relative_frequency as the combined number of occurrences\n # of all \"top frequency deltas\".\n top_frequent_delta = pd.Series(data=top_frequent_deltas).mean()\n combined_relative_frequency = len(top_frequent_deltas) * relative_frequency\n\n return top_frequent_delta, combined_relative_frequency",
"def optimal_nffts(arr):\n\n return int(8 * 2 ** np.ceil(np.log2(len(arr))))",
"def maxpeaks(sig):\n diff_sig = np.diff(sig)\n\n return np.sum([1 for nd in range(len(diff_sig[:-1])) if (diff_sig[nd+1]<0 and diff_sig[nd]>0)])",
"def best_coupling_frequency(self):\n\n idx_best = self.coupling().argmax()\n\n return self.freq.f[idx_best]",
"def getUpperFrequencyBound(self) -> int:\n return self.upper_frequency_bound",
"def get_frequency(frame):\n frame = clip_centre(frame)\n frame = auto_correlate(frame)\n threshold: int = SAMPLE_RATE // 500\n lag = frame[threshold:].argmax()\n frequency = SAMPLE_RATE / lag\n return frequency",
"def checkfrequency(inputgiven):\n data_size = 40000\n wav_file = wave.open(inputgiven, 'r')\n data = wav_file.readframes(data_size)\n wav_file.close()\n data = struct.unpack('{n}h'.format(n=data_size), data)\n print max(data)",
"def max_power_spectrum(sig, FS):\n\n if np.std(sig) == 0:\n return float(max(signal.welch(sig, int(FS), nperseg=len(sig))[1]))\n else:\n return float(max(signal.welch(sig/np.std(sig), int(FS), nperseg=len(sig))[1]))",
"def find_largest_freq():\n words_list = {word for line in lines for word in line} # all words possible\n word_freqs = [(find_freq(word), word) for word in words_list] # list of tuples of words and their frequencies\n max_freq = max(word_freqs)\n return max_freq[0], max_freq[1]",
"def freqvals(t):\n N = len(t)\n T = t[-1] - t[0]\n dt = T/N\n nyquist = 1/(2.0*dt)\n lowfreq = 1/T\n return (N, T, dt, nyquist, lowfreq)",
"def peak_time(self):\n return np.array([self.wftime[ch][self.waveform[ch].argmax()] for ch in range(self.nchannels)])",
"def getStopFrequency(self) -> int:\n if not self.debug:\n self.myFieldFox.write(\"SENS:FREQ:STOP?\")\n ret = int(self.myFieldFox.read())\n else:\n ret = 1000000\n return ret",
"def max_time(self):\n return self.time[np.argmax(self.flux)]",
"def part_1(tape: Tape) -> int:\n\n result = max_thruster_signal(tape)\n\n print(f\"part 1: highest thruster signal is {result}\")\n return result",
"def fitness(self, analysis):\n signal = analysis.get_signal()\n frequency = signal.spike_frequency()\n return float((self.frequency - frequency) ** 2)",
"def find_max_interval_praat(sound, interval_list):\n\n max_intensity = None\n max_intensity_index = None\n\n max_length = None\n max_length_index = None\n\n # Finding interval with highest intensity and the longest interval.\n\n for index, (begin_sec, end_sec, _) in enumerate(interval_list):\n\n intensity = sound.get_interval_intensity(begin_sec, end_sec)\n length = end_sec - begin_sec\n\n if max_intensity == None or intensity > max_intensity:\n max_intensity = intensity\n max_intensity_index = index\n\n if max_length == None or length > max_length:\n max_length = length\n max_length_index = index\n\n return (max_intensity_index, max_intensity, max_length_index, max_length)"
]
| [
"0.686226",
"0.67198664",
"0.6456348",
"0.6237039",
"0.61072993",
"0.6094395",
"0.6089857",
"0.60836124",
"0.5901322",
"0.5895342",
"0.5891257",
"0.5876694",
"0.58622146",
"0.583807",
"0.5814099",
"0.57966477",
"0.5774891",
"0.5773817",
"0.577248",
"0.5768403",
"0.5766519",
"0.57629836",
"0.5745433",
"0.571993",
"0.57068956",
"0.5668198",
"0.5617967",
"0.56127036",
"0.5598967",
"0.5570393"
]
| 0.81546277 | 0 |
Computes minimum interspike frequency (equivalent to maximum interspike interval). | def min_spike_frequency_tV(t, V):
intervals = interspike_intervals(t, V)
raise_if_not_multiple_spikes(intervals)
max_int = np.amax(intervals)
return 1/max_int | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def max_spike_frequency(t, V):\n intervals = interspike_intervals(t, V)\n raise_if_not_multiple_spikes(intervals)\n min_int = np.amin(intervals)\n return 1/min_int",
"def calcLowFrequencyLimit(fls, noct, max_idx):\n # floats required due to integer division in Python 2.7\n f_lower = fls[0:max_idx] / (2.0 ** (1 / (2.0 * noct)))\n step_size = fls[1] - fls[0]\n approx_idx = f_lower / (1.0 * step_size)\n f_lower = np.round(approx_idx).astype(int)\n return f_lower",
"def get_spike_frequency_adaptation(t, V):\n # check that there are 2 spikes minimum\n intervals = interspike_intervals(t, V)\n raise_if_not_multiple_spikes(intervals)\n return intervals[-1]/intervals[0]",
"def FindFixedSineInputFreq(timevector,inputsignal,minfreq=0.05,maxfreq=100):\n f=makefreqvect(timevector)\n fminind=thresh_py(f,minfreq)\n fmaxind=thresh_py(f,maxfreq)\n N=len(timevector)\n inputfft=fft(inputsignal,None,0)*2/N\n inputmag=abs(inputfft)\n inputslice=inputmag[fminind:fmaxind]\n maxtemp=argmax(inputslice)\n maxind=fminind+maxtemp\n fatmax=f[maxind]\n return fatmax,maxind",
"def getLowerFrequencyBound(self) -> int:\n return self.lower_frequency_bound",
"def start_frequency_to_counter(start_frequency):\n assert(start_frequency >= 0 and start_frequency <= MAX_FREQ)\n counter = round(start_frequency/MAX_FREQ * 2**MAX_ZOOM * WF_BINS)\n ## actual start frequency\n print (start_frequency)\n start_frequency = counter * MAX_FREQ / WF_BINS / 2**MAX_ZOOM\n return counter,start_frequency",
"def get_tuning_freq(self, freq):\n if freq < self.freq_lo:\n return\n\n while freq < self.NOTES.min() or freq > self.NOTES.max():\n while freq > self.NOTES.max():\n self.NOTES *= 2\n while freq < self.NOTES.min():\n self.NOTES /= 2\n tuning_freq = min(self.NOTES, key=lambda x: abs(x-freq))\n return tuning_freq",
"def calcMinIDX(fls, minFreq):\n min_idx = np.argmax(fls >= minFreq)\n return min_idx",
"def get_scale_freq():\n return sf / 2 / (num_freq-1)",
"def getStartFrequency(self) -> int:\n if not self.debug:\n self.myFieldFox.write(\"SENS:FREQ:STAR?\")\n ret = int(self.myFieldFox.read())\n else:\n ret = 0\n return ret",
"def minimum_sampling(self):\n return self.fwhm/2.",
"def freq_from_autocorr(x):\n corr = autocorr(x)\n\n\n # Find the first low point\n d = np.diff(corr)\n start = np.where(d>0)[0]\n\n if len(start)>0 :\n return np.argmax(corr[start[0]:]) + start[0]\n return 0\n\n # Find the next peak after the low point (other than 0 lag). This bit is\n # not reliable for long signals, due to the desired peak occurring between\n # samples, and other peaks appearing higher.\n # Should use a weighting function to de-emphasize the peaks at longer lags.\n # Also could zero-pad before doing circular autocorrelation.\n #peak = np.argmax(corr[start:])\n #px, py = parabolic(corr, peak)\n\n #print(\"minimum at {}, peak at {}\".format(start, peak + start))\n\n\n #return fs / px\n #return peak + start",
"def minpeaks(sig):\n diff_sig = np.diff(sig)\n\n return np.sum([1 for nd in range(len(diff_sig[:-1])) if (diff_sig[nd]<0 and diff_sig[nd + 1]>0)])",
"def get_minimum_air_volume(v_vent: np.ndarray) -> float:\n\n return v_vent.sum()",
"def sampling_frequency(self) -> int:\n return int(1 / self.x_scale)",
"def avg_spike_frequency(t, V):\n intervals = interspike_intervals(t, V)\n\n try:\n raise_if_not_multiple_spikes(intervals)\n except NoMultipleSpikesException:\n return None\n\n avg_int = np.average(intervals)\n return 1/avg_int",
"def FirstRepeatingFrequency(self):\n prev_freqs = {0}\n freq = 0\n for offset in cycle(self.freq_changes):\n freq += offset\n if freq in prev_freqs:\n return freq\n else:\n prev_freqs.add(freq)",
"def freq():",
"def filter_min(counter: Counter, min_freq: int):\n return Counter({t: c for t, c in counter.items() if c >= min_freq})",
"def cpu_freq():\n curr, max_ = cext.cpu_freq()\n min_ = 0.0\n return [_common.scpufreq(float(curr), min_, float(max_))]",
"def compute_minimum_scale(self):\n dt = self.dt\n\n def func_to_solve(s):\n return self.wavelet.fourier_period(s) - 2 * dt\n\n return optimize.fsolve(func_to_solve, 1)[0]",
"def freq(self) -> int:",
"def freqvals(t):\n N = len(t)\n T = t[-1] - t[0]\n dt = T/N\n nyquist = 1/(2.0*dt)\n lowfreq = 1/T\n return (N, T, dt, nyquist, lowfreq)",
"def min_interpacket_interval(self):\n min_c2s = 0\n min_s2c = 0\n count_c2s = 0\n count_s2c = 0\n prev_c2s_idx = 0\n prev_s2c_idx = 0\n for idx, direction in enumerate(self.fcip_doc['packet_directions']):\n if direction == 'c2s':\n count_c2s += 1\n if count_c2s > 1:\n current_ts = self.fcip_doc['packet_timestamps'][idx]\n prev_ts = self.fcip_doc['packet_timestamps'][prev_c2s_idx]\n delta = current_ts - prev_ts\n if not min_c2s or delta < min_c2s:\n min_c2s = delta\n prev_c2s_idx = idx\n elif direction == 's2c':\n count_s2c += 1\n if count_s2c > 1:\n current_ts = self.fcip_doc['packet_timestamps'][idx]\n prev_ts = self.fcip_doc['packet_timestamps'][prev_s2c_idx]\n delta = current_ts - prev_ts\n if not min_s2c or delta < min_s2c:\n min_s2c = delta\n prev_s2c_idx = idx\n else:\n #*** Don't know direction so ignore:\n pass\n #*** Return the smallest interpacket delay overall, watch out for\n #*** where we didn't get a calculation (don't return 0 unless both 0):\n if not min_s2c:\n #*** min_s2c not set so return min_c2s as it might be:\n return min_c2s\n elif 0 < min_c2s < min_s2c:\n return min_c2s\n else:\n return min_s2c",
"def infer_interval(self) -> (pd.Timedelta, float):\n occurrences_dict = {}\n top_count = 0\n top_frequent_deltas = []\n\n dates = self.index\n\n if len(dates) <= 1:\n raise ValueError(\"Index is too short. It must contain at least 2 values for automatic frequency setting.\")\n\n time_deltas = [dates[i] - dates[i - 1] for i in range(1, len(dates))]\n\n for item in time_deltas:\n item_count = occurrences_dict.get(item, 0) + 1\n occurrences_dict[item] = item_count\n if item_count == top_count:\n top_frequent_deltas.append(item)\n elif item_count > top_count:\n top_frequent_deltas = [item]\n top_count = item_count\n\n relative_frequency = top_count / len(time_deltas)\n\n # if there is more than one delta of top frequency then combine them by calculating the mean\n # \"top frequency delta\" and assigning the combined_relative_frequency as the combined number of occurrences\n # of all \"top frequency deltas\".\n top_frequent_delta = pd.Series(data=top_frequent_deltas).mean()\n combined_relative_frequency = len(top_frequent_deltas) * relative_frequency\n\n return top_frequent_delta, combined_relative_frequency",
"def get_frequency(frame):\n frame = clip_centre(frame)\n frame = auto_correlate(frame)\n threshold: int = SAMPLE_RATE // 500\n lag = frame[threshold:].argmax()\n frequency = SAMPLE_RATE / lag\n return frequency",
"def rough_frequency_samples(m1, m2, flow, fmax, df_min):\n kmin = int(flow / df_min)\n kmax = int(fmax / df_min)\n k = kmin\n ksamples = []\n while k < kmax:\n ksamples.append(k)\n k += int(1.0 / rough_time_estimate(m1, m2, k * df_min) / df_min)\n ksamples.append(kmax)\n return numpy.array(ksamples)",
"def getMinKey(self):\n\n if self.freq:\n min_freq = min(self.freq.keys())\n return list(self.freq[min_freq])[0]\n\n return ''",
"def rastrigin(x):\n x = np.copy(x)\n x -= 10.0\n if not np.isscalar(x[0]):\n N = len(x[0])\n min_num = np.array([10 * N + sum(xi**2 - 10 * np.cos(2 * np.pi * xi)) for xi in x])\n return min_num * (-1)\n N = len(x)\n return -(10 * N + sum(x**2 - 10 * np.cos(2 * np.pi * x)))",
"def reduce_peaks(self,peaks,odf_min):\n if len(peaks)==0:\n return -1 \n if odf_min<self.iso_thr*peaks[0]:\n #remove small peaks\n ismallp=np.where(peaks<self.peak_thr*peaks[0])\n if len(ismallp[0])>0:\n l=ismallp[0][0]\n else:\n l=len(peaks)\n else:\n return -1\n return l"
]
| [
"0.7111554",
"0.6457247",
"0.6452509",
"0.63382065",
"0.61823815",
"0.606085",
"0.6013277",
"0.5989754",
"0.59551436",
"0.5943181",
"0.592826",
"0.5807336",
"0.5763848",
"0.5742698",
"0.57422686",
"0.5737989",
"0.5735367",
"0.57189804",
"0.57057506",
"0.569486",
"0.56937164",
"0.5670518",
"0.566079",
"0.5655692",
"0.56502205",
"0.5646061",
"0.55630195",
"0.55500686",
"0.5526501",
"0.55047584"
]
| 0.77755356 | 0 |
Checks for whether there are multiple spikes, otherwise raises and exception. | def raise_if_not_multiple_spikes(intervals):
if len(intervals) < 1:
raise NoMultipleSpikesException | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_spike(self):\n\n arr = [10, 12, 999.99, 13, 15, 40, 9, 9]\n\n # First and last elements should always be good data, unless someone\n # has set a threshold to zero.\n expected = [1, 4, 4, 4, 1, 3, 1, 1]\n\n inputs = [\n arr,\n np.asarray(arr, dtype=np.floating),\n dask_arr(np.asarray(arr, dtype=np.floating))\n ]\n for i in inputs:\n npt.assert_array_equal(\n qartod.spike_test(\n inp=i,\n suspect_threshold=self.suspect_threshold,\n fail_threshold=self.fail_threshold\n ),\n expected\n )",
"def check_spikes(data, chunk_size='2min',\n detrend=True,\n detrend_kw={'how':'linear'},\n visualize=False, vis_col=1, max_consec_spikes=3,\n cut_func = lambda x: (abs(x - x.mean()) > 5.*x.std()),\n replace_with='interpolation',\n max_percent=1.):\n import pandas as pd\n from . import algs\n from . import signal as pmdata\n\n #------------\n if replace_with=='trend':\n def replace_nans(dframe):\n trend = pmdata.trend(dframe, how='linear')\n return dframe.fillna(trend)\n elif replace_with=='interpolation':\n def replace_nans(dframe):\n return dframe.interpolate(method='index', limit_direction='both')\n #------------\n\n original = data.copy()\n\n #------------\n # If dentreded == True we save the trend for later and work with the detrended data\n if detrend:\n origtrend = pmdata.trend(data, **detrend_kw)\n detrended = original - origtrend\n dfs = algs.splitData(detrended, rule=chunk_size)\n else:\n dfs = algs.splitData(original, rule=chunk_size)\n #------------\n\n max_count = int(len(original)*max_percent/100.)\n fault_count = pd.Series(len(original), index=original.columns)\n\n for i in range(len(dfs)):\n chunk=dfs[i].copy()\n\n #-------------------------------\n # This substitutes the spikes to NaNs so it can be replaced later\n if len(chunk)>max_consec_spikes:\n chunk=algs.limitedSubs(chunk, max_interp=max_consec_spikes, func=cut_func)\n fault_count = fault_count - chunk.count()\n #-------------------------------\n\n #-------------------------------\n # Substitution of spikes happens here\n #trend = pmdata.trend(chunk, how='linear')\n #chunk = chunk.fillna(trend)\n chunk = replace_nans(chunk)\n #-------------------------------\n\n #-------------------------------\n # We change the chunk in the original list of dfs to concatenate later\n dfs[i]=chunk.copy()\n #-------------------------------\n\n #---------------------\n # Now we put the chunks back together and maybe correct the trend\n despiked = pd.concat(dfs)\n if detrend:\n fou = despiked + origtrend\n else:\n fou = despiked\n valid = fault_count < max_count\n #---------------------\n\n #---------------------\n # Visualize what you're doing to see if it's correct\n if visualize:\n import matplotlib.pyplot as plt\n print('Plotting de-spiking...')\n original[vis_col].plot(style='g-', label='original')\n fou[vis_col].plot(style='b-', label='final')\n plt.title('Column: {}'.format(vis_col))\n plt.legend()\n plt.show()\n plt.close()\n #---------------------\n\n return fou, valid, fault_count",
"def check(self):\n if self.backend.poll():\n raise RuntimeError('Backend process died.')\n\n if self.esp.poll():\n raise RuntimeError('ESP process died.')",
"def test_no_spike_after_table(self):\n n = 5\n dt = 1.0\n t_max = 2*dt\n # make sure we have spikes at the end\n table = np.ones((1, n))\n\n G = TableSpikers(n)\n G.spike_table = table\n\n sim = simulation.Simulation(G, dt=dt)\n sim.run(t_max)\n \n self.assertFalse(np.any(G.spike))",
"def test_spike_order(self):\n M = simulation.EventMonitor(self.G)\n sim = simulation.Simulation(self.G, M, dt=self.dt)\n sim.run(self.t_max)\n\n self.assertNotEqual(len(M.t), 0)\n self.assertTrue(all(M.t[i] <= M.t[i+1] for i in xrange(len(M.t) - 1)))",
"def check(self):\n exception = False\n for scalerThread in [self.preemptableScaler, self.scaler]:\n if scalerThread is not None:\n try:\n scalerThread.join(timeout=0)\n except Exception as e:\n logger.exception(e)\n exception = True\n if exception:\n raise RuntimeError('The cluster scaler has exited due to an exception')",
"def testTooManyPlaybacksRaisesAnException(self):\n\t\tpolicy = FixedCountPolicy()\n\t\tpolicy.playback()\n\t\tself.failUnlessRaises(RecordedCallsWereNotReplayedCorrectly, policy.playback)",
"def test_check_price_exception() -> None:\n with raises(StopProcessing):\n check_price(min_price=4, data={'p': 2.0})",
"def self_check(self):\r\n \r\n try:\r\n #tries to get a value from each sensor\r\n \r\n sensor_1_value = self.sen.get_sensor_value()\r\n\r\n # checks if the value is a float else rase exception\r\n\r\n if type(sensor_1_value) != float:\r\n raise Exception()\r\n\r\n #if the sensors dont return a value or is in the wrong type\r\n #the code will fail before here and get caught by the catch.\r\n #otherwise its sets the pass or fail condition to true\r\n \r\n pass_or_fail = True\r\n \r\n\r\n except:\r\n #if the self check fails then it sets the pass or fail\r\n #condition to false\r\n \r\n pass_or_fail = False\r\n \r\n \r\n return(pass_or_fail)",
"def test_rate_reached_perf_issue(self):\n for i in range(0, 10):\n event = self.store_transaction(\n environment=None,\n project_id=self.project.id,\n user_id=str(i),\n fingerprint=[f\"{GroupType.PERFORMANCE_N_PLUS_ONE_DB_QUERIES.value}-group1\"],\n )\n perf_group = event.groups[0]\n snooze = GroupSnooze.objects.create(group=perf_group, count=10, window=24 * 60)\n assert not snooze.is_valid(test_rates=True)",
"def _check_number_of_bells(self) -> bool:\n if self.row_generator.stage == 0:\n self.logger.debug(\"Place holder row generator. Wheatley will not ring!\")\n return False\n if self._tower.number_of_bells < self.row_generator.stage:\n self.logger.warning(f\"Row generation requires at least {self.row_generator.stage} bells, \"\n + f\"but the current tower has {self._tower.number_of_bells}. \"\n + \"Wheatley will not ring!\")\n return False\n if self._tower.number_of_bells > self.row_generator.stage + 1:\n if self.row_generator.stage % 2:\n expected = self.row_generator.stage + 1\n else:\n expected = self.row_generator.stage\n self.logger.info(f\"Current tower has more bells ({self._tower.number_of_bells}) than expected \"\n + f\"({expected}). Wheatley will add extra cover bells.\")\n return True",
"def checkSpikeBonding (self):\r\n stable = True # If any bonds break this will be set to false\r\n stabilityChecker = True # Checks the result of each function call, if set to false then stable will be set to false\r\n # Go through each atom\r\n for i in range(len(self.mol)):\r\n # Go through each spike\r\n for j in range(len(self.mol[i].spikeArray)):\r\n if self.mol[i].spikeArray[j].bonded == True:\r\n stabilityChecker = self.stabilitySpike(self.mol[i].spikeArray[j])\r\n if stabilityChecker == False:\r\n stable = False\r\n #print (stable)\r\n if stable == True:\r\n print(\"No Bonds have broken \\n\")\r\n else:\r\n print (\"Bonds have broken \\n\")\r\n return stable",
"def test_correct_spiking(self):\n n = 10\n t_max = 25.0\n dt = 0.2\n p = 0.05\n\n # some reproducible arbitrariness\n np.random.seed(622312)\n n_steps = int_r(t_max/dt)\n table = np.random.rand(n_steps, n) < p\n\n G = TableSpikers(n)\n G.spike_table = copy.copy(table)\n\n class SimpleMonitor(object):\n def __init__(self, target):\n self.target = target;\n self.results = []\n self.order = 1\n\n def evolve(self, t, dt):\n idxs = self.target.spike.nonzero()[0]\n self.results.extend([(int_r(t/dt), i) for i in idxs])\n\n M = SimpleMonitor(G)\n sim = simulation.Simulation(G, M, dt=dt)\n sim.run(t_max)\n\n expected = zip(*table.nonzero())\n\n self.assertSequenceEqual(expected, M.results)",
"def can_throw(self):\n if self.round_points == 0:\n return False\n return True",
"def test_rate_reached(self):\n for i in range(5):\n group = self.store_event(\n data={\n \"fingerprint\": [\"group1\"],\n \"timestamp\": iso_format(before_now(minutes=5 + i)),\n },\n project_id=self.project.id,\n ).group\n snooze = GroupSnooze.objects.create(group=group, count=5, window=24 * 60)\n assert not snooze.is_valid(test_rates=True)",
"def findspikes(t, v, thresh):\n tm = np.array(t)\n s0 = np.array(v) > thresh # np.where(v > thresh) # np.array(v) > thresh # find points above threshold\n\n# print ('v: ', v)\n dsp = tm[s0]\n if dsp.shape[0] == 1:\n dsp = np.array(dsp)\n sd = np.append(True, np.diff(dsp) > 1.0) # find first points of spikes\n if len(dsp) > 0:\n sp = dsp[sd]\n else:\n sp = []\n return(sp) # list of spike times.",
"def check_sporadic(self):\n par = self._get_parameters()\n if par is None:\n return\n if par in sporadic:\n raise InfeasibleError(refs=sporadic[par])",
"def test_increasing_trend_is_true_if_price_increase_for_3_updates(self):\n self.given_a_series_of_prices([8, 10, 12])\n self.assertTrue(self.goog.is_increasing_trend())",
"def count_single_spikes_and_bursts(info):\r\n\r\n singles = 0\r\n bursts = 0\r\n inBurst = False\r\n\r\n spikesperbursts = []\r\n spb = 2\r\n\r\n prev_start = -1000\r\n prev_end = -1000\r\n for spike in info:\r\n if spike[0] - prev_end > 80:\r\n #print('single:', spike[0])\r\n singles += 1\r\n if inBurst:\r\n spikesperbursts.append(spb)\r\n inBurst = False\r\n else:\r\n #print('burst:',inBurst, spike[0])\r\n if not inBurst:\r\n spb = 2\r\n singles -=1\r\n bursts += 1\r\n else:\r\n spb += 1\r\n inBurst = True\r\n prev_start = spike[0]\r\n prev_end = spike[1]\r\n return singles, bursts, spikesperbursts, sum(spikesperbursts)/len(spikesperbursts)",
"def test_too_many_requests(self):\n try:\n self._mock_time_series(error=fitbit_exceptions.HTTPTooManyRequests,\n error_attrs={'retry_after_secs': 35})\n except fitbit_exceptions.HTTPTooManyRequests:\n self.assertEqual(sys.exc_info()[1].retry_after_secs, 35)\n else:\n assert False, 'Should have thrown exception'",
"def checkTrailingStop(self):\n open_positions = self.open_positions.find(\n {\"Trader\": self.user[\"Name\"], \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id})\n\n for position in open_positions:\n\n last_price = position[\"Last_Price\"]\n\n high_price = position[\"High_Price\"]\n\n five_percent = round(high_price * 0.95, 2)\n\n if last_price > high_price:\n\n self.open_positions.update_one({\"Trader\": self.user[\"Name\"], \"Symbol\": position[\"Symbol\"], \"Strategy\": position[\"Strategy\"], \"Asset_Type\": self.asset_type}, {\n \"$set\": {\"High_Price\": last_price}})\n\n # CHECK IF LAST PRICE < 5% OF HIGH PRICE\n elif last_price < five_percent and self.user[\"Accounts\"][self.account_id][\"Trailing_Stop_Active\"]:\n\n queued = self.queue.find_one(\n {\"Trader\": self.user[\"Name\"], \"Symbol\": position[\"Symbol\"], \"Strategy\": position[\"Strategy\"], \"Asset_Type\": self.asset_type})\n\n # IF TRUE AND NOT IN QUEUE, SELL OUT POSITION\n if not queued:\n\n trade_data = {\n \"Symbol\": position[\"Symbol\"],\n \"Aggregation\": position[\"Aggregation\"],\n \"Strategy\": position[\"Strategy\"],\n \"Asset_Type\": position[\"Asset_Type\"],\n \"Account_ID\": self.account_id\n }\n\n trade_data[\"Side\"] = \"SELL\"\n\n if self.asset_type == \"OPTION\":\n\n trade_data[\"Exp_Date\"] = position[\"Exp_Date\"]\n\n trade_data[\"Pre_Symbol\"] = position[\"Pre_Symbol\"]\n\n trade_data[\"Side\"] = \"SELL_TO_CLOSE\"\n\n self.placeOrder(trade_data, position)\n\n msg = f\"Symbol {position['Symbol']} is selling due to 5% drop of high price - TRADER: {self.user['Name']}\"\n\n self.logger.INFO(msg)",
"def stabilitySpike(self,spike):\r\n stable = True\r\n # print (\"The bonded RBN is: \" + str(spike.bondedRBN) + \"\\n\")\r\n bondedSpike = spike.bondedRBN.spikeArray[spike.bondedSpikeNum]\r\n # If type 3 spikes then sum of an intensity can be plus or minus two for bond to be stable\r\n if spike.type == 3 and bondedSpike.type == 3:\r\n if abs(spike.intensity + bondedSpike.intensity) >= 2:\r\n # If unstable break bonds and set stable to false\r\n bondedSpike.bondBreak()\r\n spike.bondBreak()\r\n print (\"Bond Broken in metaAtom: \" + str(self.atomNumber) + \"\\n\")\r\n print (\"The RBN number broken is: \" + str(spike.RBN.rbnNumber) + \"\\n\")\r\n \r\n stable = False\r\n # If one or boths spikes is type 2 and neither is type 1 then the sum of intensitys can be plus or minus one to be stable\r\n elif (spike.type == 2 and bondedSpike.type == 2) or (spike.type == 3 and bondedSpike.type == 2) or (spike.type == 2 and bondedSpike.type == 3):\r\n if abs(spike.intensity + bondedSpike.intensity) >= 1:\r\n # If unstable break bonds and set stable to false\r\n bondedSpike.bondBreak()\r\n spike.bondBreak()\r\n print (\"Bond Broken in metaAtom: \" + str(self.atomNumber) + \"\\n\")\r\n print (\"The RBN number broken is: \" + str(spike.RBN.rbnNumber) + \"\\n\")\r\n stable = False\r\n # If one of the spikes or both is a type 1 spike then the intesnity needs to sum to zero to be stable\r\n else:\r\n if spike.intensity + bondedSpike.intensity != 0:\r\n # If unstable break bonds and set stable to false\r\n bondedSpike.bondBreak()\r\n spike.bondBreak()\r\n print (\"Bond Broken in metaAtom: \" + str(self.atomNumber) + \"\\n\")\r\n print (\"The RBN number broken is: \" + str(spike.RBN.rbnNumber) + \"\\n\")\r\n stable = False\r\n \r\n# # If any bonds have broken False will be returned otherwise True will be returned\r\n# if stable == True:\r\n# print (\"Stability has not changed \\n\")\r\n# else:\r\n# print (\"Stability has changed \\n\")\r\n return stable",
"def check_done(self):\n if len(self._calls) != 0:\n raise MockException(\"Still expecting more function calls\")",
"def is_spare(self):\n if self.is_strike():\n return False\n\n return (self.first_ball + self.second_ball) == 10",
"def _checkRoundOver(self):\n\n # if we already ended it doesn't matter\n if self.hasEnded():\n return\n\n if not any(player.isAlive() for player in self.teams[0].players):\n # allow continuing after wave 1\n if self._wave > 1:\n self.continueOrEndGame()\n else:\n self.endGame()",
"def check_fires(self):\n for fire in self.pjs.fires:\n for block in fire.rects:\n if block.overlap(self.rects[0]):\n self.killer = fire\n return\n return",
"def check_powerups(self):\n for powerup in self.pjs.powerups:\n block = powerup.rects[0]\n if block.overlap(self.rects[0]):\n self.eat(powerup)",
"def is_exhausted(self):\n return random.random() < 0.5",
"def test_spike_negative_vals(self):\n thresholds = (25, 50)\n\n arr = [-10, -12, -999.99, -13, -15, -40, -9, -9]\n\n # First and last elements should always be good data, unless someone\n # has set a threshold to zero.\n expected = [1, 4, 4, 4, 1, 3, 1, 1]\n\n inputs = [\n arr,\n np.asarray(arr, dtype=np.floating),\n dask_arr(np.asarray(arr, dtype=np.floating))\n ]\n for i in inputs:\n npt.assert_array_equal(\n qartod.spike_test(\n inp=i,\n suspect_threshold=self.suspect_threshold,\n fail_threshold=self.fail_threshold\n ),\n expected\n )",
"def __check(self):\n if len(self._data)!=len(self._ptbins)+1: \n raise IndexError('Pt bins mismatch')\n for ptbin in self._data:\n if len(ptbin)!=len(self._etabins)+1:\n raise IndexError('Eta bins mismatch')"
]
| [
"0.5455404",
"0.5442264",
"0.54034597",
"0.53654855",
"0.53497535",
"0.53397554",
"0.53384614",
"0.5319763",
"0.52793396",
"0.5264266",
"0.523095",
"0.5229649",
"0.5143878",
"0.5126793",
"0.5108548",
"0.50885737",
"0.50762266",
"0.50755095",
"0.50739056",
"0.504875",
"0.50441664",
"0.5043433",
"0.5028964",
"0.5023726",
"0.5020382",
"0.5010529",
"0.500343",
"0.49969652",
"0.49866062",
"0.49427882"
]
| 0.78514844 | 0 |
load csv filter div and 0.95 r value and at least 2 clls | def load_and_filer(pwd,rval=0.95):
df = pd.read_csv(pwd)
df = rl.give_good_structure(df)
df = df.loc[(df['end_type']=='DIVISION')|(df['end_type']=='DIV')|(df['end_type']=='div')]
if 'length_box' in df.columns: #guillaume data
df['time_sec'] = df['frame']*60*3
df['length_box_um'] = df['length_box']*0.065
else:
df['length_box_um'] = (df['vertical_bottom'] - df['vertical_top'])*0.065
df = df.groupby('cell').filter(lambda x: True if len(x['length_box_um'])>2 else False)
df =df.groupby('cell').filter(lambda x: linregress(x['time_sec'],np.log(x['length_box_um'])).rvalue>rval)
#df = rl.give_unique_dataset(df,6,18)
df =df[['length_box_um','time_sec','parent_id','id','gl','date','pos','cell','lane_ID','end_type']]
return df | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_breuer_csv():\n import csv\n with open('Hill_Re_10595_Breuer.csv', 'r') as fh:\n reader = csv.reader(fh)\n reader.next() # Eat header\n raw = np.array([[float(i) for i in l[:-1]] for l in reader if len(l) > 0])\n return analyse_breuer(raw)",
"def load_data(int_to_ped,range_of_int):\n orig_dir = \"exp_result/original_data/\"\n file_orig = \"%s_%s_0.5.csv\" % (int_to_ped, range_of_int)\n file_path = orig_dir+file_orig\n df = pd.read_csv(file_path)\n return df",
"def csv_loader(csv_file):\n df = pd.read_csv(csv_file, sep=';', parse_dates=['Data_Alteraçao'])\n pd.set_option('display.float_format', '{:.0f}'.format)\n\n df = df.fillna(0)\n df = df.drop(columns=['Cod. Pareamento', 'Cod. UF', 'Sigla UF', 'Cod. Subarea',\n 'Nome Subarea', 'Cod. Municipio', 'Nome Municipio', 'Codigo Agencia',\n 'Nome Agencia', 'Cod. Setor', 'Cod. Logradouro CNEFE',\n 'Tipo Logradouro CNEFE', 'Titulo Logradouro CNEFE',\n 'Nome Logradouro CNEFE', 'Nome Tratado CNEFE', 'Tipo Logradouro DNE',\n 'Titulo Logradouro DNE', 'Nome Logradouro DNE', 'Nome Tratado DNE',\n 'Logradouro Completo DNE', 'Distancia', 'Cod. Match', 'Motivo Match',\n 'CEPs Face', 'Localidade Face',\n 'Alterar Logradouro para DNE?', 'Observaçao', 'SIAPE Alteração',\n 'Nome Alteraçao', 'Data_Alteraçao', 'Status', 'Unnamed: 33'])\n\n # df.astype({'CEP Logradouro CNEFE': 'int32'}).dtypes\n\n df['CEP'] = df['CEP'].str.replace(' ', '', regex=False)\n\n ceps_dne = []\n for index, row in df.iterrows():\n if type(row.CEP) == str:\n for cep in row.CEP.split(','):\n # print(index, cep)\n ceps_dne.append(int(cep))\n\n ceps_cnefe = df['CEP Logradouro CNEFE'].astype(int).tolist()\n ceps = ceps_dne + ceps_cnefe\n ceps = list(set(ceps))\n return pd.Series(ceps)",
"def load_filter():\n if not os.path.isfile(FILTER):\n print('no filter found, creating square grid')\n return []\n with open(FILTER, 'r') as ff:\n reader = csv.reader(ff)\n l = list(reader)\n ar = numpy.asarray(l)\n # ar = numpy.transpose(ar, (0, 1))\n # ar = numpy.flip(ar, 1)\n # ar = numpy.rot90(ar, k=3, axes=(0, 1))\n # ar = numpy.swapaxes(ar, 0, 1)\n f = list(map(list, ar))\n return f",
"def read_csv():",
"def read_data(path_to_file, survey):\n if survey == 'eco':\n columns = ['name', 'radeg', 'dedeg', 'cz', 'grpcz', 'absrmag', \n 'logmstar', 'logmgas', 'grp', 'grpn', 'logmh', 'logmh_s', \n 'fc', 'grpmb', 'grpms']\n\n # 13878 galaxies\n eco_buff = pd.read_csv(path_to_file,delimiter=\",\", header=0, \\\n usecols=columns)\n\n # 6456 galaxies \n catl = eco_buff.loc[(eco_buff.cz.values >= 3000) & \\\n (eco_buff.cz.values <= 7000) & (eco_buff.absrmag.values <= -17.33) &\\\n (eco_buff.logmstar.values >= 8.9)]\n\n volume = 151829.26 # Survey volume without buffer [Mpc/h]^3\n cvar = 0.125\n z_median = np.median(catl.grpcz.values) / (3 * 10**5)\n \n elif survey == 'resolvea' or survey == 'resolveb':\n columns = ['name', 'radeg', 'dedeg', 'cz', 'grpcz', 'absrmag', \n 'logmstar', 'logmgas', 'grp', 'grpn', 'grpnassoc', 'logmh', \n 'logmh_s', 'fc', 'grpmb', 'grpms', 'f_a', 'f_b']\n # 2286 galaxies\n resolve_live18 = pd.read_csv(path_to_file, delimiter=\",\", header=0, \\\n usecols=columns)\n\n if survey == 'resolvea':\n catl = resolve_live18.loc[(resolve_live18.f_a.values == 1) & \\\n (resolve_live18.grpcz.values > 4500) & \\\n (resolve_live18.grpcz.values < 7000) & \\\n (resolve_live18.absrmag.values < -17.33) & \\\n (resolve_live18.logmstar.values >= 8.9)]\n\n\n volume = 13172.384 # Survey volume without buffer [Mpc/h]^3\n cvar = 0.30\n z_median = np.median(resolve_live18.grpcz.values) / (3 * 10**5)\n \n elif survey == 'resolveb':\n # 487 - cz, 369 - grpcz\n catl = resolve_live18.loc[(resolve_live18.f_b.values == 1) & \\\n (resolve_live18.grpcz.values > 4500) & \\\n (resolve_live18.grpcz.values < 7000) & \\\n (resolve_live18.absrmag.values < -17) & \\\n (resolve_live18.logmstar.values >= 8.7)]\n\n volume = 4709.8373 # *2.915 #Survey volume without buffer [Mpc/h]^3\n cvar = 0.58\n z_median = np.median(resolve_live18.grpcz.values) / (3 * 10**5)\n\n return catl,volume,cvar,z_median",
"def test_load_csv_file():\n data = loader.load_csv_file(\"buildup/reference/comsol_solution/lofi/voltage.csv.bz2\")\n\n assert data.any()",
"def load_filter_file(self, file_path): \n self._pop_all_self()\n self.filter_list = []\n self.file_path = file_path \n \n with codecs.open(self.file_path, 'r', encoding='cp1252') as fid: \n for k, line in enumerate(fid):\n line = line.lstrip('\\n\\r ')\n if line.startswith('#'):\n continue \n split_line = [item.strip() for item in line.split('\\t')]\n if k==0:\n # Header\n header = split_line\n else:\n line_dict = dict(zip(header, split_line))\n self[line_dict['variable']] = SingleFilter(line_dict, self.parameter)\n\n # Save attributes\n for item in self.keys():\n setattr(self, item, self[item])\n \n self.header = sorted(header)\n \n if self.filter_type == 'data':\n self.year_list = [y for y in range(self['YEAR_INTERVAL'].value[0], \n self['YEAR_INTERVAL'].value[1]+1)]",
"def filter_data_on_complaince(folder_Path,complaince_rate):\n complaince_df=pd.read_csv(folder_Path+\"complaince.csv\")\n complaince_df['Percent'] = complaince_df['Percent'].apply(converters.ConvertPercent)\n complaince_df = complaince_df.loc[complaince_df.Percent >= complaince_rate]\n IDs= complaince_df.ID.unique()\n # print(IDs)\n df_apps=pd.read_csv(folder_Path+\"app_usage.csv\")\n df_apps = df_apps.loc[df_apps.user_id.isin(IDs)]\n df_apps = df_apps.reset_index(drop=True)\n df_apps.to_csv(folder_Path+\"Filtered/app_usage.csv\")\n\n\n df_battery= pd.read_csv(folder_Path+\"battery_events.csv\")\n df_battery= df_battery.loc[df_battery.user_id.isin(IDs)]\n df_battery = df_battery.reset_index(drop=True)\n df_battery.to_csv(folder_Path+\"Filtered/battery_events.csv\")\n\n\n df_bluetooth = pd.read_csv(folder_Path+\"bluetooth.csv\")\n df_bluetooth = df_bluetooth.loc[df_bluetooth.user_id.isin(IDs)]\n df_bluetooth = df_bluetooth.reset_index(drop=True)\n df_bluetooth.to_csv(folder_Path+\"Filtered/bluetooth.csv\")\n\n df_screen = pd.read_csv(folder_Path+\"screenstate.csv\")\n df_screen = df_screen.loc[df_screen.user_id.isin(IDs)]\n df_screen = df_screen.reset_index(drop=True)\n df_screen.to_csv(folder_Path+\"Filtered/screenstate.csv\")\n\n\n df_wifi = pd.read_csv(folder_Path+\"wifi.csv\")\n df_wifi = df_wifi.loc[df_wifi.user_id.isin(IDs)]\n df_wifi = df_wifi.reset_index(drop=True)\n df_wifi.to_csv(folder_Path+\"Filtered/wifi.csv\")",
"def ac_load_data(request,dyn_id):\n\n # Retrieve data from request\n options = request.POST.get('ac_option')\n protsel = request.POST.get('ac_protsel')\n analysis_type = request.POST.get('ac_analtype')\n nedges = request.POST.get('ac_nedges')\n\n # Open file with specified options\n infile = settings.MEDIA_ROOT + 'Precomputed/allosteric_com/dyn%s/%s_%s.csv'%(dyn_id,options,protsel)\n if os.path.exists(infile):\n df = pd.read_csv(infile)\n else: \n return HttpResponse(json.dumps({'filenotfound' : 1}), content_type='view/'+dyn_id)\n\n # Extract selected data\n myweihgt = analysis_type+'_weight'\n df_sel = df.filter(['resid1','resid2','resid1_gennum','resid2_gennum',myweihgt], axis=1)\n df_sel.rename(columns = {myweihgt : 'weight'}, inplace = True)\n\n # Take maximum and minimum values \n min_w = df_sel['weight'].min()\n max_w = df_sel['weight'].max()\n\n # Sort by (absolute) weight and select top ones\n df_sel['ac_abs_value'] = df_sel['weight'].apply(lambda x: abs(x))\n df_sorted = df_sel.sort_values(by=['ac_abs_value'], ascending=False)\n df_top = df_sorted.head(int(nedges))\n\n # Get a color for each weight value\n df_top['color'] = df_top['weight'].apply(\n lambda x: get_color(x, max_w, min_w)\n )\n\n # Get a color for each weight value\n print(min_w, max_w)\n df_top['cyldiam'] = df_top['weight'].apply(\n lambda x: get_cyldiam(x, max_w, min_w)\n )\n \n # Set format for Residue columns: we'll want to show both generic numbering and residue number\n df_top['Residue1'] = df_top.apply(lambda x: format_resname(x,'1'), axis=1)\n df_top['Residue2'] = df_top.apply(lambda x: format_resname(x,'2'), axis=1)\n\n # ROunded ac values for displayed table\n df_top['Edge weight'] = df_top['weight'].apply(lambda x: format_num(x))\n\n\n # For residue Selection\n df_top['resnum1'] = df_top['resid1'].apply(lambda x: x.split(':')[-1])\n df_top['resnum2'] = df_top['resid2'].apply(lambda x: x.split(':')[-1])\n\n # Convert resid1 and resid2 into NGL-selection-language strings\n df_top['resid1'] = df_top['resid1'].apply(lambda x: into_ngl(x))\n df_top['resid2'] = df_top['resid2'].apply(lambda x: into_ngl(x))\n\n # Create new column for the NGL selection line that we will use in the \"lupas\" thingy\n df_top['sels'] = df_top.apply(lambda x: lupa_column(x), axis=1)\n\n # Create new column for checkboxes of specific selection\n df_top['cbx'] = df_top.apply(lambda x: checkbox_column(x,'ac'), axis=1)\n\n # Return AC info\n ac_dict = dict()\n ac_dict['ac_data'] = df_top.to_dict(orient='records')\n ac_dict['min'] = format_num(min_w)\n #ac_dict['int'] = round(format((max_w-min_w)/2+min_w,4), '.3g')\n ac_dict['max'] = format_num(max_w)\n\n # Decide colorscales to use (wtbl if positive values avaliable, wtyl if negatives avaliable )\n colorscales = []\n if ac_dict['min'] < 0:\n colorscales.append('wtyl')\n \n if ac_dict['max'] > 0: \n colorscales.append('wtbl') \n ac_dict['colorscales'] = colorscales\n\n # Find name of the selected options\n options_name = []\n for option in options.split('_'):\n optname = ac_options_codes[option]\n options_name.append(optname) \n ac_dict['sel_opt'] = '-'.join(options_name)\n\n # Convert our df to a html table\n html_top = df_top.to_html(\n index=False,\n escape=False,\n columns = ['cbx','sels','Residue1', 'Residue2', 'Edge weight'],\n classes=\"dataframe ac_table display compact dataTable\",\n )\n\n # Remove unneeded column titles\n html_top = html_top.replace('>cbx<','><')\n html_top = html_top.replace('>sels<','><')\n ac_dict['table'] = html_top\n\n return HttpResponse(json.dumps(ac_dict), content_type='view/'+dyn_id)",
"def __loaddata(filename, datatype='flightcsv', minprob=0.001, maxprob=0.20):\n if datatype is 'flightcsv':\n return extract_flight_csv(filename, minprob=minprob, maxprob=maxprob)\n else:\n raise Exception('unknown datatype %s' % datatype)",
"def load_data(self, dropna=False):\r\n # Load data, delete Ml index, get number of channels, add\r\n df = pd.read_csv(self.file_path, header=None, index_col=0, dtype='float64')\r\n\r\n cols = df.shape[1]\r\n if cols < 2:\r\n raise ValueError(f'{self} wrong file type.')\r\n\r\n df.columns = ['t'] + [f\"c{i}\" for i in range(1, cols)]\r\n df.index = df.index.astype(int)\r\n df.index.name = 'r'\r\n\r\n if dropna:\r\n df.dropna(axis=1, how='all', inplace=True)\r\n\r\n self.set_data(df)",
"def load_and_prepare_cmd(filename,verbose=False): # (g, gr) = load_and_prepare_cmd('fieldA.csv')\n FIELD = pd.read_csv(\"fieldA.csv\")\n g = FIELD[\"g\"] # probs slower than inital idea\n gr = g - FIELD[\"r\"]\n mask = (g>14) & (g<24) & (gr>-0.5) & (gr<2.5)\n if verbose:\n print(\"Length of g and gr are {0:d} and {1:d} respectively\".format(len(g),len(gr)))\n return gr.where(mask), g.where(mask)",
"def read_csv(self, filepath, header=True):\n BaseSampler.read_csv(self, filepath, header)\n # convert the data to floats\n self.new_obs = []\n self.img_w, self.img_h = None, None\n for row in self.obs:\n if self.img_w is None:\n self.img_w = int(row[0])\n if self.img_w == 0 or (len(row)-1) % self.img_w != 0:\n raise Exception('The sampler does not understand the format of the data. Did you forget to specify image width in the data file?')\n self.new_obs.append([int(_) for _ in row])\n\n self.obs = np.array(self.new_obs)[:,1:]\n if self.cl_mode:\n self.d_obs = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR, hostbuf=self.obs.astype(np.int32))\n\n self.d = self.obs.shape[1]\n self.img_h = int(self.d / self.img_w)\n self.alpha = float(self.N) * 5\n return",
"def loadCSV(input_file):",
"def read_prefiltered_data(self):\n logger.info(\"Reading prefiltered data from file: %s...\" % os.path.basename(self.filled_output_file))\n self.df = pd.read_csv(self.filled_output_file)",
"def __init__(self):\r\n self.filter_p_number = 3 # First one with enough data for statistics\r\n self.prfs_d = extract_settings_elvis()\r\n\r\n ccds = True\r\n filtered = False\r\n scamp = False\r\n\r\n input_df = read_csv('cats/cat_clean_ssos.csv', index_col=0)\r\n filt_cat = self.gets_filtered_catalog() # Gets data from filtered\r\n\r\n if ccds:\r\n cats_d = self.extract_cats()\r\n self.extract_stats_ccds(cats_d, input_df, filt_cat)\r\n elif filtered:\r\n self.extract_stats_filt(filt_cat, input_df)\r\n elif scamp:\r\n pass\r\n # self.extract_stats_scamp(input_df)\r\n else:\r\n pass",
"def load_interpolation_data():\n\n global AL_scan_precision\n\n data = np.genfromtxt(\"../data/theoretical_limit.csv\", delimiter=',')\n\n # Data is in mas, interpolation should be in asec\n AL_scan_precision = interp1d(data[:,0], data[:,1]/1.0e3)",
"def __init__(self, configfile, ikfile, filterfile):\n \n self.configfile = configfile\n self.filterfile = filterfile\n self.ikfile=ikfile\n self.ckfile=\"test.ck\"\n self.fkfile=\"test.fk\"\n self.sclkfile=\"fakesclk\"\n\n with open(configfile, newline='') as f:\n reader = csv.reader(f,delimiter=' ', skipinitialspace=True)\n type=next(reader)\n if (type[0].lower() == 'polygon'):\n self.x=[]\n self.y=[]\n self.z=[]\n n_lines = 0\n for row in reader:\n n_lines=n_lines+1\n if n_lines==1:\n if row[0]=='fill_factor':\n self.fillfactor = float(row[1])\n if n_lines>1:\n tmpx=np.tan(np.radians(float(row[0])))\n tmpy=np.tan(np.radians(float(row[1])))\n tmpz=1\n self.x.append(tmpx)\n self.y.append(tmpy)\n self.z.append(tmpz)\n self.save_poly(self.ikfile,[0,0,1],self.x,self.y,self.z)\n \n elif (type[0].lower() == 'circle'):\n self.x=float(next(reader)[0])\n self.save_circ(self.ikfile,[0,0,1],self.x)\n self.fillfactor = float(next(reader)[1])\n\n else:\n sys.exit(\"In file %s, instrument FOV is invalid. Only \\\"Circle\\\" or \\\"Polygon\\\" allowed.\" %(configfile))\n \n with open(filterfile, newline='') as f:\n transform = pd.read_csv(f, sep='\\s+', index_col='colors')\n self.transforms = transform",
"def process_results_file(f_path):\n results = pd.read_csv(f_path, sep='\\t', header=0)\n keep_cols = {'GS', 'SIZE', 'ES', 'NES', 'p-val'}\n results = results[:20].filter(keep_cols)\n return results",
"def phot_readData(input_file):\n original_path = os.getcwd()\n os.chdir(input_file['save_path'])\n print 'Reading '+input_file['exoplanet']+'*.csv files ....'\n files_csv = np.sort(glob.glob(input_file['save_path']+'/phot_results/'+input_file['exoplanet']+'*.csv'))\n scatter = np.zeros(len(files_csv))\n for i in range(len(files_csv)):\n phot_data = read_csv(files_csv[i])\n scatter[i] = np.std(phot_data.hoststar)\n use.update_progress((i+1.)/len(files_csv))\n hjd = read_csv(input_file['save_path']+'/results_iraf_calibrations.csv')\n airmass = hjd.Airmass\n hjd = hjd.HJD.values\n print '... done!'\n id_min = scatter.argmin() #index of the min scatter file\n id_max = scatter.argmax() #index for the maximum scatter file\n print 'The smallest scatter is: '+str(files_csv[id_min])\n print 'Which is file: '+files_csv[id_min]\n print('Working @'+files_csv[id_min]+' that is the min scatter')\n print('... Read '+files_csv[id_min]+' ...')\n data_min_scatter = read_csv(files_csv[id_min])\n print('... done.')\n rawflux = data_min_scatter.hoststar.values/data_min_scatter.refstar.values\n eflux = rawflux*np.sqrt((data_min_scatter.hoststar_err.values/data_min_scatter.hoststar.values)**2 + (data_min_scatter.refstar_err.values/data_min_scatter.refstar.values)**2)\n os.chdir(original_path)\n return rawflux,eflux,hjd,airmass",
"def data_input(self):\n path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'data'))\n if not os.path.isfile('{0}/{1}.csv'.format(path, self.data_file)):\n print 'Error: Dataset file is not exist.'\n exit()\n # Uplead Dataset.csv file.\n f = open('{0}/{1}.csv'.format(path, self.data_file), 'r')\n print 'Now uploading dataset File.....'\n f = list(f)\n # The Dataset contains heading, number of lines - heading\n self.number_of_VOCs = sum(1 for row in f)-1\n # Count number of columns, last column's value is empty, that is why -1.\n self.number_of_columns = len(f[0].split(',')) -1\n self.first_m_z = int(f[0].split(',')[3]) # find the first m/z value.\n self.last_m_z = int(f[0].split(',')[-2]) # find the last m/z value.\n print 'dataset includes ', self.number_of_VOCs, 'VOCs in all samples '\n print ('dataset includes ', self.number_of_columns, ' Columns, ',\n 'm/z values start from ', self.first_m_z,\n 'and end ', self.last_m_z)\n # Create a matrix with a shape of (number_of_VOCs X number_of_columns) filled with zeros.\n self.dataset = np.zeros((self.number_of_VOCs,\n self.number_of_columns))\n for line in range(1, len(f)):\n if int(float(f[line].strip().split(',')[0])) not in self.loaded_samples:\n self.loaded_samples.append(int(float(f[line].strip().split(',')[0])))\n for column in range(self.number_of_columns):\n self.dataset[line-1][column] = int(float(f[line].strip().split(',')[column]))",
"def _read_csvs(self):\n self.data = pd.read_csv(self.path+self.name, index_col=0)",
"def __init__(self, csv_path, list_path):\n self.df = pd.read_csv(csv_path)\n self.df = self.df[['Enlarged Cardiomediastinum', 'Cardiomegaly', 'Lung Opacity',\n 'Lung Lesion', 'Edema', 'Consolidation', 'Pneumonia', 'Atelectasis',\n 'Pneumothorax', 'Pleural Effusion', 'Pleural Other', 'Fracture',\n 'Support Devices', 'No Finding']]\n self.df.fillna(0.0, inplace=True) #only have two labels, positive and negative\n self.encoded_imp = load_list(path=list_path)",
"def read_rf_csv():\n if os.path.exists(\"rf.csv\"):\n #print (\"--decision trees CSV imported\\n\")\n results = pd.read_csv(\"rf.csv\", index_col=0)\n else:\n print(\"log not found\")\n\n return results",
"def init(fileName):\r\n global grand_prod_cost, grand_album_sales\r\n infile = ''\r\n try:\r\n with open(fileName, mode='r') as infile:\r\n reader = csv.reader(infile)\r\n sniffer = csv.Sniffer()\r\n has_header = sniffer.has_header(infile.read(2048))\r\n infile.seek(0)\r\n if (has_header):\r\n next(reader) # move curser to next row so the header is not included\r\n initBands(reader)\r\n # Reset the curser to start based on presence of header\r\n if(has_header):\r\n infile.seek(0)\r\n # avoid header\r\n next(reader)\r\n else:\r\n infile.seek(0)\r\n splitByBand(reader)\r\n except Exception as e:\r\n print('Exception in init')\r\n raise e",
"def river_builder_harmonics(in_csv, index_field, units, field_names, r_2, n, methods):\n print('In the RB function')",
"def getCSVbbx(filepath, detail, folder, time):\n \n #format validation\n pd.read_csv(filepath)\n click.echo(\"csv\")\n CRSinfo = True\n listlat = [\"Koordinate_Hochwert\",\"lat\",\"Latitude\",\"latitude\"]\n listlon = [\"Koordinate_Rechtswert\",\"lon\",\"Longitude\",\"longitude\",\"lng\"]\n listCRS = [\"CRS\",\"crs\",\"Koordinatensystem\",\"EPSG\",\"Coordinate reference system\", \"coordinate system\"]\n listtime = [\"time\", \"timestamp\", \"date\", \"Time\", \"Jahr\", \"Datum\"]\n try:\n deli=';'\n df = pd.read_csv(filepath, delimiter=deli,engine='python')\n #tests if there is a column named Coordinatesystem or similar\n click.echo(\"hi\")\n #click.echo(df.columns.values)\n #click.echo(intersect(listCRS,df.columns.values))\n if not intersect(listCRS,df.columns.values):\n CRSinfo= False\n print(\"hu\")\n print(\"No fitting header for a reference system\")\n\n if not(((intersect(listlat,df.columns.values) and intersect(listlon,df.columns.values)))or (intersect(listtime, df.columns.values))):\n #output=\"No fitting header for latitudes or longitudes\"\n raise Exception('No fitting ')\n #print(output)\n #return output\n\n except Exception as exce:\n deli=','\n df = pd.read_csv(filepath, delimiter=deli,engine='python')\n #tests if there is a column named Coordinatesystem or similar\n click.echo(\"hi\")\n #click.echo(df.columns.values)\n #click.echo(intersect(listCRS,df.columns.values))\n if not intersect(listCRS,df.columns.values):\n CRSinfo= False\n \n print(\"No fitting header for a reference system2\")\n z=intersect(listtime, df.columns.values)\n print (z)\n t=intersect(listlat,df.columns.values) and intersect(listlon,df.columns.values)\n print (intersect(listlat,df.columns.values))\n print(\"_______________\")\n print(t)\n if not t:\n print(\"false\")\n\n if not(((intersect(listlat,df.columns.values) and intersect(listlon,df.columns.values)))or (intersect(listtime, df.columns.values))):\n #output=\"No fitting header for latitudes or longitudes\"\n #raise Exception('No fim')\n \n raise Exception(\"evtl kein csv oder ungueltiges Trennzeichen.\")\n #print(\"keine Koordinaten vorhanden\")\n #print(output)\n #return output\n print (exce)\n\n if detail =='bbox':\n click.echo(\"bbox\")\n # Using Pandas: http://pandas.pydata.org/pandas-docs/stable/io.html\n #if folder=='single':\n mylat=intersect(listlat,df.columns.values)\n mylon=intersect(listlon,df.columns.values)\n lats=df[mylat[0]]\n lons=df[mylon[0]]\n bbox=[min(lats),min(lons),max(lats),max(lons)]\n # CRS transformation if there is information about crs\n if(CRSinfo):\n mycrsID=intersect(listCRS,df.columns.values)\n myCRS=df[mycrsID[0]]\n lat1t,lng1t = extractTool.transformToWGS84(min(lats),min(lons), myCRS)\n lat2t,lng2t = extractTool.transformToWGS84(max(lats),max(lons), myCRS)\n bbox=[lat1t,lng1t,lat2t,lng2t]\n if folder=='single':\n print(\"----------------------------------------------------------------\")\n click.echo(\"Filepath:\")\n click.echo(filepath)\n click.echo(\"Boundingbox of the CSV object:\")\n click.echo(bbox)\n print(\"----------------------------------------------------------------\")\n extractTool.ret_value.append(bbox)\n if folder=='whole':\n extractTool.bboxArray.append(bbox)\n print(\"----------------------------------------------------------------\")\n click.echo(\"Filepath:\")\n click.echo(filepath)\n click.echo(\"Boundingbox of the CSV:\")\n click.echo(bbox)\n print(\"----------------------------------------------------------------\")\n else:\n if folder=='single':\n print(\"----------------------------------------------------------------\")\n click.echo(\"Filepath:\")\n click.echo(filepath)\n click.echo(\"Boundingbox of the CSV object:\")\n print(bbox)\n print(\"Missing CRS -----> Boundingbox will not be saved in zenodo.\")\n print(\"----------------------------------------------------------------\")\n extractTool.ret_value.append([None])\n if folder=='whole':\n print(\"----------------------------------------------------------------\")\n click.echo(\"Filepath:\")\n click.echo(filepath)\n click.echo(\"Boundingbox of the CSV file:\")\n click.echo(bbox)\n click.echo(\"because of a missing crs this CSV is not part of the folder calculation.\")\n print(\"----------------------------------------------------------------\")\n\n else:\n extractTool.ret_value.append([None])\n\n #returns the convex hull of the coordinates from the CSV object.\n if detail == 'convexHull':\n click.echo(\"convexHull\")\n mylat=intersect(listlat,df.columns.values)\n mylon=intersect(listlon,df.columns.values)\n lats=df[mylat[0]]\n lons=df[mylon[0]]\n coords=np.column_stack((lats, lons))\n #definition and calculation of the convex hull\n hull=ConvexHull(coords)\n hull_points=hull.vertices\n convHull=[]\n for z in hull_points:\n point=[coords[z][0], coords[z][1]]\n convHull.append(point)\n if(CRSinfo):\n mycrsID=intersect(listCRS,df.columns.values)\n myCRS=df[mycrsID[0]]\n inputProj='epsg:'\n inputProj+=str(myCRS[0])\n print(inputProj)\n inProj = Proj(init=inputProj)\n outProj = Proj(init='epsg:4326')\n for z in coords:\n z[0],z[1] = transform(inProj,outProj,z[0],z[1])\n if folder=='single':\n print(\"----------------------------------------------------------------\")\n click.echo(\"Filepath:\")\n click.echo(filepath)\n click.echo(\"convex Hull of the csv file: \")\n click.echo(convHull)\n print(\"----------------------------------------------------------------\")\n extractTool.ret_value.append(convHull)\n if folder=='whole':\n extractTool.bboxArray=extractTool.bboxArray+convHull\n print(\"----------------------------------------------------------------\")\n click.echo(\"Filepath:\")\n click.echo(filepath)\n click.echo(\"convex hull of the CSV:\")\n click.echo(convHull)\n print(\"----------------------------------------------------------------\")\n #return convHull\n else:\n if folder=='single':\n print(\"----------------------------------------------------------------\")\n click.echo(\"Filepath:\")\n click.echo(filepath)\n click.echo(\"Convex hull of the CSV object:\")\n print(convHull)\n print(\"Missing CRS -----> Boundingbox will not be saved in zenodo.\")\n print(\"----------------------------------------------------------------\")\n extractTool.ret_value.append([None])\n if folder=='whole':\n print(\"----------------------------------------------------------------\")\n click.echo(\"Filepath:\")\n click.echo(filepath)\n click.echo(\"Convex hull of the CSV file:\")\n click.echo(convHull)\n click.echo(\"because of a missing crs this CSV is not part of the folder calculation.\")\n print(\"----------------------------------------------------------------\")\n\n\n else:\n extractTool.ret_value.append([None])\n\n\n\n \n if (time):\n click.echo(\"hallo\")\n # Using Pandas: http://pandas.pydata.org/pandas-docs/stable/io.html\n df = pd.read_csv(filepath, sep=';|,',engine='python')\n click.echo(listtime)\n click.echo(df.columns.values)\n intersection=intersect(listtime, df.columns.values)\n click.echo(intersection)\n if not intersection:\n print(\"No fitting header for time-values\")\n extractTool.ret_value.append([None])\n # TODO: fehlerbehandlung \n #try:\n #for t in listtime:\n #if(x not in df.columns.values):\n #click.echo(\"This file does not include time-values\")\n #else:\n #time=df[t]\n #timeextend =[min(time), max(time)]\n #click.echo(timeextend)\n #return timeextend\n #except Exception as e:\n #click.echo (\"There is no time-value or invalid file.\")\n #return None \n else:\n \n \n time=df[intersection[0]]\n print(min(time))\n print(max(time))\n timemin=str(min(time))\n timemax=str(max(time))\n timemax_formatted=dateparser.parse(timemax)\n timemin_formatted=dateparser.parse(timemin)\n timeextend=[timemin_formatted, timemax_formatted]\n print(timeextend)\n if folder=='single':\n print(\"----------------------------------------------------------------\")\n click.echo(\"Timeextend of this CSV file:\")\n click.echo(timeextend)\n print(\"----------------------------------------------------------------\")\n extractTool.ret_value.append([timeextend])\n #return timeextend\n if folder=='whole':\n extractTool.timeextendArray.append(timeextend)\n print(\"timeextendArray:\")\n print(extractTool.timeextendArray)\n\n else:\n extractTool.ret_value.append([None])\n if folder=='single':\n print(extractTool.ret_value)\n return extractTool.ret_value",
"def readCSV(filename):\r\n data = list( csv.reader(open('HW_08_DBScan_Data_NOISY_v300.csv','r'),delimiter=','))\r\n for dIdx in range(len(data)):\r\n data[dIdx] = [float(data[dIdx][0]),float(data[dIdx][1]),float(data[dIdx][2])]\r\n #print(data[0])\r\n return data",
"def read_csv_ur10(self, csv_file):\r\n df = pd.read_csv(csv_file, sep=';', decimal=',', header=0)\r\n return df"
]
| [
"0.59180343",
"0.59158766",
"0.57957816",
"0.5783254",
"0.5765451",
"0.5695217",
"0.5683417",
"0.56652474",
"0.5604578",
"0.56006265",
"0.559732",
"0.5570948",
"0.5557407",
"0.5556778",
"0.55453295",
"0.5445747",
"0.5437512",
"0.5381096",
"0.53779906",
"0.53775626",
"0.5370843",
"0.53656894",
"0.536316",
"0.53555286",
"0.5341907",
"0.5336898",
"0.5333998",
"0.5316496",
"0.53068614",
"0.5305497"
]
| 0.63760495 | 0 |
Variable at birth and elongation rate mean over npoint | def at_birth(df,variable,npoint):
return df.groupby('cell')[['{}'.format('{}'.format(variable)),'pred_growth_rate']].apply(lambda x: x.head(npoint).mean()).rename(columns={'pred_length_box_um':'{}_at_birth'.format(variable)}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bl_mean(mu, std, alpha, n):\n var__alpha = (alpha - 1) / 2\n var__beta = var__alpha * std ** 2\n var = var__beta / (var__alpha - 1)\n return np.exp(mu + var / 2) * (alpha / n)",
"def mean(self):\n return math.exp(self.mu + (self.sigma ** 2) / 2)",
"def modelmean(self, model_params, this_data, this_suff_stat):\n pass",
"def estimate(self):\n mu = self.mean()\n var = np.average((self.particles - mu) ** 2, weights=self.weights, axis=0)\n\n return mu, var",
"def get_mean(self):\n self.meanval = np.mean(self.adulist)",
"def mean(vals):",
"def mean(self):\n return self.mu",
"def mae(t, y):\n\treturn mean_absolute_error(t, y)",
"def em_mean(self) -> float:\n if self.__total_pulls == 0:\n raise Exception('Number of pulls is 0. No empirical mean.')\n return self.__total_rewards / self.__total_pulls",
"def _predictive_mean_analytical(self, mu, sigma):\r\n #FIXME: Not correct\r\n return mu",
"def mean(self):\n return self.k * self.theta",
"def mean(self):\n\t\treturn 0. #obtained by integrating 1.5x^3 from -1 to 1",
"def mean_height(data):",
"def mean_height(data):",
"def mean(self):\n return self.vmean",
"def get_population_mean(self):\n\t\treturn self.variables.get('population_mean')",
"def d_mean(x, y):\n return (x + y) / 2",
"def mean_average_position():\n pass",
"def mean_error(self) -> float:\n self.ME = sum(np.array(self.sim_data[\"Human (mean)\"]) - np.array(self.sim_data[\"assigned_sim\"])) / len(self.sim_data)\n return self.ME",
"def calculate_mean(self) -> float:\n\n if self.data:\n return np.mean(self.data)\n else:\n return self.mu",
"def parade_mean(p):\n return [noneneg(mj.get('mean')) for mj in p['moments']]",
"def get_average_MAE(true_pred_df): \n age_group = true_pred_df.groupby('y_true')\n \n mae_average = []\n for age, age_data in age_group:\n mae_average.append(np.mean(age_data.mae))\n \n return mae_average",
"def _compute_instance_moments(x):\n return torch.mean(x, dim=(2, 3), keepdim=True), torch.var(x, dim=(2, 3), keepdim=True)",
"def average_age_nt(all_profile_nt: namedtuple) -> tuple:\n \"\"\"Param: all_profile_nt: Named tuple containing all profiles\"\"\"\n today = date.today()\n value = sum(map(lambda v: today.year - v[-1].year - ((today.month, today.day) < (\n v[-1].month, v[-1].day)), all_profile_nt))/len(all_profile_nt)\n return value",
"def estimate(particles, weights):\n\n pos = particles[:, 0:2]\n mean = np.average(pos, weights=weights, axis=0)\n var = np.average((pos - mean)**2, weights=weights, axis=0)\n return mean, var",
"def estimate(particles, weights):\n\n pos = particles[:, 0:2]\n mean = np.average(pos, weights=weights, axis=0)\n var = np.average((pos - mean)**2, weights=weights, axis=0)\n return mean, var",
"def getMeanE(self):\n\n\n\t\tEBefore, EAfter = self.getEnergyEvolution()\n\n\t\tmeanBefore = np.mean(EBefore[-self.__Nkicks//5:])\n\t\tmeanAfter = np.mean(EAfter[-self.__Nkicks//5:])\n\t\tmeanTot = (meanBefore+meanAfter)/2\n\n\t\treturn meanBefore, meanAfter, meanTot",
"def mean_STD(self,counter):\n \n \n pass",
"def mean(self):\n return np.average(self.particles, weights=self.weights, axis=0)",
"def yearly_mean(args_file):\n product, start_date, end_date, variable_name, shape_file = Utility.read_yml_params(args_file)\n stat = Statistic.Mean\n time = TimePeriod.Yearly\n\n ds = get_data_set(product, shape_file)\n\n result = Utility.Apply_stat(ds, start_date, end_date, variable_name, stat, time)\n return result"
]
| [
"0.62526536",
"0.61370325",
"0.6102281",
"0.60851276",
"0.6018355",
"0.601033",
"0.60047895",
"0.6003948",
"0.6003362",
"0.59845704",
"0.5964844",
"0.5962788",
"0.594292",
"0.594292",
"0.5938851",
"0.5929146",
"0.5919363",
"0.5890413",
"0.586686",
"0.5865125",
"0.5861947",
"0.5852307",
"0.58513397",
"0.5837102",
"0.58294684",
"0.58294684",
"0.5812727",
"0.58071417",
"0.5801461",
"0.579255"
]
| 0.6964157 | 0 |
Connect cells between genealogies and return dataframe with super_cell id and variable | def connect_cells(dfte,vari):
# Create the variabel cell for mother, grand mother and grand grand mother
if 'g_parent_cell' not in dfte.columns:
dfte = rl.genalogy(dfte,'parent_cell') #Create genealogy
if 'g_g_parent_cell' not in dfte.columns:
dfte = rl.genalogy(dfte,'g_parent_cell')
if 'g_g_g_parent_cell' not in dfte.columns:
dfte = rl.genalogy(dfte,'g_g_parent_cell')
#give unique index to all cells
dfte['uid'] = dfte['cell']+dfte['time_sec'].apply(lambda x: str(x))
vac=[];sc=[];uid = []
# Create a vecotor for the variable of interest of cell,mother,grand mother and grand grand mother and an unique identifier of it
for c,idx in enumerate(dfte['cell'].unique()):
dau = dfte.loc[dfte['cell']==idx]
pc = dau['parent_cell'].iloc[0]
mum = dfte.loc[dfte['cell']==pc]
gpc = dau['g_parent_cell'].iloc[0]
gmum = dfte.loc[dfte['cell']==gpc]
ggpc = dau['g_g_parent_cell'].iloc[0]
ggmum = dfte.loc[dfte['cell']==ggpc]
gggpc = dau['g_g_g_parent_cell'].iloc[0]
gggmum = dfte.loc[dfte['cell']==gggpc]
fte = lambda x: x[['{}'.format(vari),'uid']].values
tmp = np.vstack([fte(gggmum),fte(ggmum),fte(gmum),fte(mum),fte(dau)])
vac.append(tmp[:,0])
uid.append(tmp[:,1])
sc.append(['super_cell_{}'.format(c)]*len(tmp))
return pd.DataFrame({'super_cell':np.hstack(sc),'uid':np.hstack(uid),'{}'.format(vari):np.hstack(vac)}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def muther_dau_gdau(df,var_m,var_d,var_gd):\n if 'g_parent_cell' not in df.columns:\n df = rl.genalogy(df,'parent_cell') #Create genealogy\n tmp={'gr_mu_{}'.format(var_m):[],'mu_{}'.format(var_d):[],'daugther_{}'.format(var_gd):[]}\n for k in df.cell.unique():\n dau = df.loc[df['cell']==k]\n dau_var = dau['{}'.format(var_gd)].iloc[0]\n nid = dau.parent_cell.iloc[0]\n mu = df.loc[df['cell']==nid]\n try:#if mother exists\n mu_var = mu['{}'.format(var_d)].iloc[0]\n nid = mu.g_parent_cell.iloc[0]\n mu_var = mu['{}'.format(var_d)].iloc[0]\n gmu = df.loc[df['cell']==nid]\n except IndexError:\n continue\n try:# if grand mother exists\n tmp['gr_mu_{}'.format(var_m)].append(gmu['{}'.format(var_m)].iloc[0])\n tmp['mu_{}'.format(var_d)].append(mu_var)\n tmp['daugther_{}'.format(var_gd)].append(dau_var)\n except IndexError:\n tmp['gr_mu_{}'.format(var_m)].append(np.nan)\n tmp['mu_{}'.format(var_d)].append(mu_var)\n tmp['daugther_{}'.format(var_gd)].append(dau_var)\n return pd.DataFrame(tmp)",
"def initialize_all_df(itinerary_nodes):\n\n # Build nested bundles and assign IDs\n\n # (itinerary_ID, itinerary_node)\n itin_nodes = [(str(uuid.uuid4()), itin) for itin in itinerary_nodes]\n\n # (itinerary_ID, leg_ID, leg_node, numbering)\n leg_nodes = [(itin[0], str(uuid.uuid4()), leg, i)\n for itin in itin_nodes\n for i, leg in enumerate(xml_path.get_leg_nodes(itin[1]))]\n\n # (leg_ID, segment_ID, segment_node, numbering)\n seg_nodes = [(leg[1], str(uuid.uuid4()), seg, i * 2)\n for leg in leg_nodes\n for i, seg in enumerate(xml_path.get_segment_nodes(leg[2]))]\n\n # Turn the itineraries into a dataframe\n itin_nodes_df = pd.DataFrame(itin_nodes, columns=['itinerary_id', 'node'])\n itinerary_df = ids.init_itineraries_df({'itinerary_id': itin_nodes_df['itinerary_id'],\n 'node': itin_nodes_df['node']\n })\n\n # Turn the legs into a dataframe\n leg_nodes_df = pd.DataFrame(leg_nodes, columns=['itinerary_id', 'leg_id', 'node', 'leg_number'])\n legs_df = ids.init_legs_df({'leg_id': leg_nodes_df['leg_id'],\n 'node': leg_nodes_df['node'],\n 'leg_number': leg_nodes_df['leg_number']\n })\n\n # Turn the segments into a dataframe\n seg_nodes_df = pd.DataFrame(seg_nodes, columns=['leg_id', 'segment_id', 'node', 'segment_number'])\n segments_df = ids.init_segments_df({'segment_id': seg_nodes_df['segment_id'],\n 'node': seg_nodes_df['node'],\n 'segment_number': seg_nodes_df['segment_number']\n })\n\n # Build the link table dataframe\n merged_link = pd.merge(leg_nodes_df[['itinerary_id', 'leg_id']],\n seg_nodes_df[['leg_id', 'segment_id']],\n on='leg_id', how='outer')\n trip_link_df = ids.init_trip_link_df({'itinerary_id': merged_link['itinerary_id'],\n 'leg_id': merged_link['leg_id'],\n 'segment_id': merged_link['segment_id']\n })\n\n return trip_link_df, itinerary_df, legs_df, segments_df",
"def gen_CASTEP_supercell(CASTEP_cell,na,nb,nc):\n nruter=dict()\n nruter[\"na\"]=na\n nruter[\"nb\"]=nb\n nruter[\"nc\"]=nc\n nruter[\"lattvec\"]=np.array(CASTEP_cell[\"lattvec\"])\n nruter[\"lattvec\"][:,0]*=na\n nruter[\"lattvec\"][:,1]*=nb\n nruter[\"lattvec\"][:,2]*=nc\n nruter[\"elements\"]=copy.copy(CASTEP_cell[\"elements\"])\n nruter[\"numbers\"]=na*nb*nc*CASTEP_cell[\"numbers\"]\n nruter[\"positions\"]=np.empty((3,CASTEP_cell[\"positions\"].shape[1]*na*nb*nc))\n pos=0\n for pos,(k,j,i,iat) in enumerate(itertools.product(xrange(nc),\n xrange(nb),\n xrange(na),\n xrange(\n CASTEP_cell[\"positions\"].shape[1]))):\n nruter[\"positions\"][:,pos]=(CASTEP_cell[\"positions\"][:,iat]+[i,j,k])/[\n na,nb,nc]\n nruter[\"types\"]=[]\n for i in xrange(na*nb*nc):\n nruter[\"types\"].extend(CASTEP_cell[\"types\"])\n # print \"supercell\", nruter\n return nruter",
"def mutation(data: Data, working_dataframe: pd.DataFrame) -> pd.DataFrame:\n lat = LinkedAirrTable(working_dataframe, key_column=\"cellid\")\n lat_with_mutational_analysis = run_mutational_analysis(lat, scheme=\"kabat\")\n lat_with_mutational_analysis[[\"cellid\", \"mutations_heavy\", \"mutations_light\"]]\n working_dataframe = working_dataframe.merge(\n lat_with_mutational_analysis[[\"cellid\", \"mutations_heavy\", \"mutations_light\"]], on=\"cellid\", how=\"outer\"\n )\n return working_dataframe",
"def call_cells(df_reads):\n cols = [WELL, TILE, CELL]\n s = (df_reads\n .drop_duplicates([WELL, TILE, BLOB])\n .groupby(cols)[BARCODE]\n .value_counts()\n .rename('count')\n .sort_values(ascending=False)\n .reset_index()\n .groupby(cols)\n )\n\n return (df_reads\n .join(s.nth(0)[BARCODE].rename(BARCODE_0), on=cols)\n .join(s.nth(0)['count'].rename(BARCODE_COUNT_0).fillna(0), on=cols)\n .join(s.nth(1)[BARCODE].rename(BARCODE_1), on=cols)\n .join(s.nth(1)['count'].rename(BARCODE_COUNT_1).fillna(0), on=cols)\n .join(s['count'].sum() .rename(BARCODE_COUNT), on=cols)\n .drop_duplicates(cols)\n [[WELL, TILE, CELL, BARCODE_0, BARCODE_COUNT_0, BARCODE_1, BARCODE_COUNT_1]]\n )",
"def create_geneIDsDF():\n datas=data.plfam_to_matrix()\n datas.run()\n print('***Dataframe created***')",
"def extend_dataset(intial_df):\n all_data = []\n for i,row in intial_df.iterrows():\n all_data.extend(create_all_combination(row))\n\n extended_results = pd.DataFrame(all_data)\n return extended_results",
"def __init__(self, id, position, parent_a=None, parent_b=None):\n \"\"\" int: For uniquely identifying cells \"\"\"\n self._id = 0\n \"\"\" int: Possibly used for data or life-span related functions \"\"\"\n self._age = 0;\n \"\"\" int: To measure the success of a gene \"\"\"\n self._score = p.params['initial_score']\n \"\"\" Gene: The decision making entity of the cell.\"\"\"\n self._gene = None\n \"\"\" dict(Cell,Memory): To hold the memory of past interactions with other cells\"\"\"\n self._memory = {}\n \"\"\" Position: The location of the Cell within the toroidal world. \"\"\"\n self._position = None\n\n if parent_a is not None and parent_b is not None:\n self._gene = Gene.Gene(parent_a.get_gene(), parent_b.get_gene())\n else:\n self._gene = Gene.Gene()\n self._position = position\n self._id = id",
"def get_cell_agg_df(spark, date_):\n #closing_day_date = datetime.datetime.strptime(closing_day, \"%Y%m%d\")\n #starting_day_date = datetime.datetime.strptime(starting_day, \"%Y%m%d\")\n year_ = date_[0:4]\n month_ = str(int(date_[4:6]))\n day_ = str(int(date_[6:8]))\n\n from churn_nrt.src.data.customer_base import CustomerBase\n\n base_df = CustomerBase(spark) \\\n .get_module(date_, save=False, save_others=False, force_gen=True) \\\n .filter(col('rgu') == 'mobile') \\\n .select(\"msisdn\") \\\n .distinct() \\\n .repartition(400)\n\n data_netscout_ori = (spark\n .read\n .parquet(path_netscout + \"year=\" + year_ + \"/month=\" + month_ + \"/day=\" + day_)\n .where(col('application_name').isin(list_netscout_working_apps))\n .where(~col('subscriber_msisdn').isNull())\n .withColumn('msisdn', when(substring(col('subscriber_msisdn'), 1, 2) == '34', substring(col('subscriber_msisdn'), 3, 9)).otherwise(col('subscriber_msisdn')))\n .select('msisdn', 'cell_id')\n .join(base_df, ['msisdn'])\n .groupBy(['msisdn'])\n .agg(countDistinct(col('cell_id')).alias('num_cells'))\n .withColumn(\"year\", lit(year_))\n .withColumn(\"month\", lit(month_))\n .withColumn(\"day\", lit(day_))\n )\n\n return data_netscout_ori",
"def parallel_genepairs(df):\n pool = mp.Pool(processes=4)\n df['gene_pairs'] = pool.map(_if_gene, df['tokenized_abs'])\n pool.terminate()\n return df",
"def get_eaggr_cells(res, extent=None):\r\n dggs = Eaggr(Model.ISEA4T)\r\n cell_ids_0 = list(map(lambda x: '0' + x, list(map(str, [x for x in range(10)]))))\r\n cell_ids_0.extend(list(map(str, [x for x in range(10, 20, 1)])))\r\n gdf_level_0 = gpd.GeoDataFrame()\r\n gdf_level_0['cell'] = pd.Series(list(map(lambda x: DggsCell(x), cell_ids_0)))\r\n \r\n df = get_eaggr_indexes_at_level(gdf_level_0, res, dggs).copy()\r\n df['cell_id'] = df['cell'].apply(lambda x: x.get_cell_id())\r\n return df",
"def df_with_hexid_to_gdf(df, hexcolname='_id'):\n df_geometry=hexlist_to_geodataframe(df[hexcolname].to_list())\n #Creando el geodataframe\n gdf=gpd.GeoDataFrame(df, geometry=df_geometry['geometry'])\n gdf.crs = 'EPSG:4326'\n return gdf",
"def set_data(self):\n # take care of samples\n patients = self.samples.iloc[:,1].tolist()\n samples = self.samples.iloc[:,0].tolist()\n self.samples = pd.DataFrame(patients,index = samples,columns = ['patient']) # indexed by sample\n #\n # take care of expression data\n cols = self.expression.SYMBOL.tolist() # set new column names to transposed expression_data \n \n new_exp = self.expression.T.ix[1:,:] # transpose\n new_exp.columns = cols\n self.expression = new_exp # add columns\n self.data = pd.merge(self.expression,self.samples,left_index = True,right_index=True) # merged data sets\n #pd.merge(df1,df2,how = 'left',left_index=True,right_index=True) # do a left join",
"def merge_cell_to_index(cell_roi_df, master_index_df, path, row, write=True):\n cols_to_add = cell_roi_df.columns\n\n for col in cols_to_add:\n master_index_df.loc[row, col] = cell_roi_df.loc[0, col]\n \n if write:\n master_index_df.to_csv(path, index=False)\n print(f\"Added cell info to master index and saved at\\n{path}\")\n else:\n return master_index_df",
"def build_gene_indexes(df):\n\tgeneDict = OrderedDict()\n\n\tgeneCount = 0\n\tpreviousGeneIndex = 0\n\n\tcurrent_id=\"\"\n\tcurrent_gene=\"\"\n\n\tfor i in range(len(df)):\n\n\t\tif df.loc[i,'feature'] == 'gene':\n\t\t\ttrdict = parse_entry(df.loc[i,'transcript_id'])\n\n\t\t\tcurGeneID = trdict['gene_id'][0]\n\t\t\n\t\t\tif geneCount != 0:\n\t\t\t\tnewGeneIndex = i\n\t\t\t\tgeneDict[current_id] = [previousGeneIndex,newGeneIndex]\n\t\t\t\tpreviousGeneIndex = i\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\t\tgeneCount += 1\n\n\t\t\telse:\n\t\t\t\tnewgeneIndex = 0\n\t\t\t\tgeneCount +=1\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\tif i == (len(df)-1):\n\t\t\tnewGeneIndex = i+1\n\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\tgeneDict[current_id] = [previousGeneIndex,newGeneIndex]\n\treturn geneDict",
"def _get_supercells(self, struct1, struct2, fu, s1_supercell):\n def av_lat(l1, l2):\n params = (np.array(l1.lengths_and_angles) +\n np.array(l2.lengths_and_angles)) / 2\n return Lattice.from_lengths_and_angles(*params)\n\n def sc_generator(s1, s2):\n s2_fc = np.array(s2.frac_coords)\n if fu == 1:\n cc = np.array(s1.cart_coords)\n for l, sc_m in self._get_lattices(s2.lattice, s1, fu):\n fc = l.get_fractional_coords(cc)\n fc -= np.floor(fc)\n yield fc, s2_fc, av_lat(l, s2.lattice), sc_m\n else:\n fc_init = np.array(s1.frac_coords)\n for l, sc_m in self._get_lattices(s2.lattice, s1, fu):\n fc = np.dot(fc_init, np.linalg.inv(sc_m))\n lp = lattice_points_in_supercell(sc_m)\n fc = (fc[:, None, :] + lp[None, :, :]).reshape((-1, 3))\n fc -= np.floor(fc)\n yield fc, s2_fc, av_lat(l, s2.lattice), sc_m\n if s1_supercell:\n for x in sc_generator(struct1, struct2):\n yield x\n else:\n for x in sc_generator(struct2, struct1):\n # reorder generator output so s1 is still first\n yield x[1], x[0], x[2], x[3]",
"def long_to_gctx(df):\n df = df[[\"rid\", \"cid\", \"value\"]].pivot(index=\"rid\", columns=\"cid\", values=\"value\")\n gct = GCToo(df)\n\n # Ensure index is string\n gct.row_metadata_df.index = gct.row_metadata_df.index.astype(\"str\")\n gct.data_df.index = gct.data_df.index.astype(\"str\")\n\n return gct",
"def get_mult_gene_RNA(ensemble, genes, grouping, max_points='10000'):\n\n\t# Prevent SQL injected since column names cannot be parameterized.\n\tif \";\" in ensemble or \";\" in grouping:\n\t\treturn None\n\n\tgenes = [gene+\"%\" for gene in genes]\n\n\t# This query is just to fix gene id's missing the ensemble version number.\n\t# Necessary because the table name must match exactly with whats on the MySQL database.\n\t# Ex. ENSMUSG00000026787 is fixed to ENSMUSG00000026787.3\n\tfirst_query = \"SELECT gene_id FROM genes WHERE gene_id LIKE %s\" + \" OR gene_id LIKE %s\" * (len(genes)-1)\n\tresult = db.get_engine(current_app, 'methylation_data').execute(first_query, (genes,)).fetchall()\n\n\tgene_table_names = ['gene_' + gene_id[0].replace('.','_') for gene_id in result]\n\n\tdf_all = pd.DataFrame()\n\n\tfirst = True\n\tfor gene_table_name in gene_table_names:\n\t\tquery = \"SELECT cells.cell_id, cells.cell_name, cells.dataset, \\\n\t\t\t%(ensemble)s.annotation_RNA, %(ensemble)s.cluster_RNA, \\\n\t\t\t%(ensemble)s.tsne_x_RNA, %(ensemble)s.tsne_y_RNA, \\\n\t\t\t%(gene_table_name)s.normalized_counts, \\\n\t\t\tdatasets.target_region \\\n\t\t\tFROM cells \\\n\t\t\tINNER JOIN %(ensemble)s ON cells.cell_id = %(ensemble)s.cell_id \\\n\t\t\tLEFT JOIN %(gene_table_name)s ON %(ensemble)s.cell_id = %(gene_table_name)s.cell_id \\\n\t\t\tLEFT JOIN datasets ON cells.dataset = datasets.dataset\" % {'ensemble': ensemble,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'gene_table_name': gene_table_name}\n\t\tif max_points.isdigit():\n\t\t\tquery = query+\" ORDER BY RAND() LIMIT %(max_points)s()\" % {'max_points': max_points}\n\n\t\ttry:\n\t\t\tdf_all = df_all.append(pd.read_sql(query, db.get_engine(current_app, 'RNA_data')))\n\t\texcept exc.ProgrammingError as e:\n\t\t\tnow = datetime.datetime.now()\n\t\t\tprint(\"[{}] ERROR in app(get_mult_gene_RNA): {}\".format(str(now), e))\n\t\t\tsys.stdout.flush()\n\t\t\treturn None\n\n\t\tif first:\n\t\t\tdf_coords = df_all\n\t\tfirst = False\n\n\tif df_all.empty: # If no data in column, return None\n\t\tnow = datetime.datetime.now()\n\t\tprint(\"[{}] ERROR in app(get_gene_RNA): No RNA data for {}\".format(str(now), ensemble))\n\t\tsys.stdout.flush()\n\t\treturn None\n\n\tdf_all['normalized_counts'].fillna(0, inplace=True)\n\n\tdf_avg_methylation = df_all.groupby(by='cell_id', as_index=False)['normalized_counts'].mean()\n\tdf_coords.update(df_avg_methylation)\n\n\tif grouping == 'annotation':\n\t\tdf_coords.fillna({'annotation_RNA': 'None'}, inplace=True)\n\t\tdf_coords['annotation_cat'] = pd.Categorical(df_coords['annotation_RNA'], cluster_annotation_order)\n\t\tdf_coords.sort_values(by='annotation_cat', inplace=True)\n\t\tdf_coords.drop('annotation_cat', axis=1, inplace=True)\n\telif grouping == 'cluster':\n\t\tdf_coords.sort_values(by='cluster_RNA', inplace=True)\n\treturn df_coords",
"def _create_cells(self):\n\t\tcellId=0\n\t\t# Iterate over all dictionaries\n\t\tfor muscle,muscAfferentDelay in self._infoMuscles:\n\t\t\tfor cellInfo in self._infoCommonCellsInMuscles:\n\t\t\t\tcellClass = cellInfo[0]\n\t\t\t\tcellName = cellInfo[1]\n\t\t\t\tcellNumber = cellInfo[2]\n\t\t\t\tif len(cellInfo)>=4: neuronParam = cellInfo[3]\n\t\t\t\telse: neuronParam = None\n\t\t\t\tcellId = self._create_cell_population(cellId,muscle,muscAfferentDelay,cellClass,cellName,cellNumber,neuronParam)\n\t\t# Add special cells\n\t\tfor cellInfo in self._infoSpecialCells:\n\t\t\tgroupOrMuscle = cellInfo[0]\n\t\t\tcellClass = cellInfo[1]\n\t\t\tcellName = cellInfo[2]\n\t\t\tcellNumber = cellInfo[3]\n\t\t\tif len(cellInfo)>=5: neuronParam = cellInfo[4]\n\t\t\telse: neuronParam = None\n\t\t\tmuscAfferentDelay = None\n\t\t\tcellId = self._create_cell_population(cellId,groupOrMuscle,muscAfferentDelay,cellClass,cellName,cellNumber,neuronParam)\n\n\t\tself._motoneuronsNames = self._intMotoneuronsNames+self._realMotoneuronsNames\n\t\tself._afferentsNames = self._primaryAfferentsNames+self._secondaryAfferentsNames",
"def add_derived_GEOSChem_specs2df(df):\n # Add temperature in deg C\n df['T'] = df['GMAO_TEMP'].copy()\n df['T'] = df['GMAO_TEMP'].values - 273.15\n # Inc. V nd U with same variable names as GEOS-CF\n df['V'] = df['GMAO_VWND'].copy()\n df['U'] = df['GMAO_UWND'].copy()\n # Add NOx as combined NO and NO2\n df['NOx'] = df['NO'].values + df['NO2'].values\n # Add NOy as defined in GEOS-CF\n # NOy = no_no2_hno3_hno4_hono_2xn2o5_pan_organicnitrates_aerosolnitrates\n vars2use = AC.GC_var('NOy-all')\n df['NOy'] = df['N2O5'].copy() # 2 N2O5 in NOy, so 2x via template\n for var in vars2use:\n try:\n df.loc[:, 'NOy'] = df['NOy'].values + df[var].values\n except KeyError:\n pass\n # Add a variable for gas-phase NOy (by subtracting aerosol nitrate)\n vars2use = AC.GC_var('NOy-gas')\n df['NOy-gas'] = df['N2O5'].copy() # 2 N2O5 in NOy, so 2x via template\n for var in vars2use:\n try:\n df.loc[:, 'NOy-gas'] = df['NOy-gas'].values + df[var].values\n except KeyError:\n pass\n # Include a variable of NOy where HNO3 is removed\n # NOy = no_no2_hno3_hno4_hono_2xn2o5_pan_organicnitrates_aerosolnitrates\n df['NOy-HNO3'] = df['NOy'].values - df['HNO3'].values\n # Include a variable of NOy where HNO3 is removed\n df['NOy-HNO3-PAN'] = df['NOy'].values - \\\n df['HNO3'].values - df['PAN'].values\n # gas-phase (exc. PAN, HNO3, HNO4, Org-NIT, N2O5)\n df['NOy-Limited'] = df['NO'].values + df['NO2'].values + \\\n df['HNO2'].values + df['NIT'].values + df['NITs'].values\n # Add an all sulfate tracer\n NewVar = 'SO4-all'\n vars2use = AC.GC_var(NewVar)\n df[NewVar] = df['NIT'].values\n for var2use in vars2use:\n try:\n df[NewVar] = df[NewVar].values + df[var2use].values\n except KeyError:\n pass\n # And a all nitrate tracer\n NewVar = 'NIT-all'\n vars2use = AC.GC_var(NewVar)\n df[NewVar] = df['NIT'].values\n for var2use in vars2use:\n try:\n df[NewVar] = df[NewVar].values + df[var2use].values\n except KeyError:\n pass\n # Uset the P-I variable as a model level variable\n df['model-lev'] = df['P-I'].copy()\n return df",
"def add_entrez_ids(lookup: pd.DataFrame) -> pd.DataFrame:\n \n celeg_ens2entrez = get_species_ens_entrez_lookup(CELEGANS_DATASET_NAME)\n celeg_ens2entrez.columns = ['celeg_ensembl_id',\n 'celeg_entrez_id']\n \n droso_ens2entrez = get_species_ens_entrez_lookup(DROSO_DATASET_NAME)\n droso_ens2entrez.columns = ['dmelanogaster_ensembl_id',\n 'dmelanogaster_entrez_id']\n \n lookup_with_entrez = pd.merge(lookup, celeg_ens2entrez,\n left_on=\"Gene stable ID\",\n right_on=\"celeg_ensembl_id\",\n how=\"left\")\n \n lookup_with_entrez = pd.merge(lookup_with_entrez, droso_ens2entrez,\n left_on=\"Drosophila melanogaster gene stable ID\",\n right_on=\"dmelanogaster_ensembl_id\",\n how=\"left\")\n \n lookup_with_entrez.to_csv(LOOKUP_FILENAME, header=True, index=False)\n return lookup_with_entrez",
"def _df_to_cellchat_format(self, df_to_convert) -> dict:\n interactions_dataframes = {}\n\n for _, row in df_to_convert.iterrows():\n df = pd.DataFrame(index=self.cell_types, columns=self.cell_types)\n\n for cell_type_i in self.cell_types:\n for cell_type_j in self.cell_types:\n interaction_name = f\"{cell_type_i}|{cell_type_j}\"\n df.loc[cell_type_i, cell_type_j] = row[interaction_name]\n\n interactions_dataframes[row.interacting_pair] = df\n\n return interactions_dataframes",
"def cell_edges(self):",
"def get_reactome_hierarchy_df() -> pd.DataFrame:\n return pd.read_csv(REACTOME_HIERARCHICAL_MAPPINGS_PATH, sep='\\t')",
"def init_individual(index, columns, initializer=None):\n \n ind = pd.DataFrame(0,index=index, columns=columns)\n \n if initializer is not None:\n \n # sets up the DataFrame with the initializer data\n ind.loc[:, 2:] = initializer.loc[:,1:]\n ind.loc[:, 'in'] = initializer.loc[:, 'in']\n \n # sets the age\n for i in index:\n if ind.loc[i,'in'] != 0:\n ind.loc[i,'age'] = 1\n else:\n ind.loc[i,'age'] = 0\n \n # randomly flips a company in or out of the system\n if random.random() < 0.05:\n if ind.loc[i,'in'] == 0:\n ind.loc[i,'in'] = 1\n ind.loc[i,'age'] = 1\n for j in index:\n if i == j:\n ind.loc[i,j] = 0\n else:\n if random.random() < 0.2:\n ind.loc[i,j] = 1\n ind.loc[j,i] = 1\n else:\n ind.loc[i,:] = 0\n ind.loc[:,i] = 0\n \n # randomly flips correlations\n if ind.loc[i,'in'] == 1: \n for j in index:\n if random.random() < 0.05 and i != j:\n ind.loc[i,j] = abs(ind.loc[i,j] - 1)\n ind.loc[j,i] = ind.at[i,j]\n\n else:\n for i in index:\n # randomly places companies in or out of the network\n if random.random() < 0.2:\n ind.loc[i,'in'] = 1\n ind.loc[i,'age'] = 1\n \n # randomly assigns correlations for companies in the network\n if ind.loc[i,'in'] == 1:\n for j in index:\n if i == j:\n ind.loc[i,j] = 0\n else:\n if random.random() < 0.2\n ind.loc[i,j] = 1\n ind.loc[j,i] = ind.at[i,j]\n \n ind.fillna(0)\n\n return ind",
"def df_with_hexid_to_centroids_gdf(df, hexcolname='hexid'):\n seriesofcoordinates=df[hexcolname].apply(h3.h3_to_geo)\n geometria=seriesofcoordinates.apply(lambda row: Point(row[1],row[0])) ## Patty reversed indices\n gdf=gpd.GeoDataFrame(df, geometry=geometria)\n return gdf",
"def expand_data(new_species_xlsx, output_hdf5, species_df_key, rxn_df_key, elements_csv, bonds_csv, new_xlsx_path):\n\n # Reading xlsx files which contains newly fetched PubChem IDs into pandas df\n new_df_from_xlsx = pd.read_excel(new_species_xlsx, header=0)\n\n # Reading old Species dataframe to which new PubChem ids have to be transfered\n old_df_from_hdf = pd.read_hdf(output_hdf5, species_df_key)\n\n # Setting 'Species' name as index for efficiency\n old_df_from_hdf = old_df_from_hdf.reset_index()\n old_df_from_hdf = old_df_from_hdf.set_index(keys=\"Species\", verify_integrity=True)\n\n # Initializing FeatureConstructor\n my_constructor = ft.FeatureConstructor(elements_csv, bonds_csv)\n\n # Transfering CID, adding BondsInfo (stringified PubChem JSON), adding species feature vector\n new_species_count = 0\n for idx, row in new_df_from_xlsx.iterrows():\n if not math.isnan(row['CID']) and row['CID'] != \"\":\n if math.isnan(old_df_from_hdf.at[row['Species'], 'CID']) or old_df_from_hdf.at[row['Species'], 'CID'] == \"\":\n old_df_from_hdf.at[row['Species'], 'CID'] = row['CID']\n pubchem_str_json = my_constructor.get_full(row['CID'])\n print(\"--Data fetched for CID {}--\".format(int(row['CID'])))\n old_df_from_hdf.at[row['Species'], 'BondsInfo'] = pubchem_str_json\n old_df_from_hdf.at[row['Species'], 'FeatureVector'] = my_constructor.bonds_count_json(None, pubchem_str_json)\n new_species_count = new_species_count + 1\n\n print('--Status--')\n print('--{} New Species Added--'.format(new_species_count))\n\n if new_species_count == 0:\n\n print('No new changes were made as there were no new species to add.')\n return\n\n else:\n\n # Updating HDF with updated species df\n old_df_from_hdf = old_df_from_hdf.reset_index()\n old_df_from_hdf = old_df_from_hdf.set_index(keys=\"SID\", verify_integrity=True)\n old_df_from_hdf.to_hdf(output_hdf5, species_df_key)\n\n # Updating Reactions DF with new CID list\n rm.RecordMapper.map_rid_to_cid(output_hdf5, rxn_df_key, species_df_key)\n\n # Filetring out reactions whose feature vectors can be calculated\n reduced_rxn_df = Extender.get_rxn_subset(output_hdf5, rxn_df_key)\n\n # Creating feature vectors of the filtered out reactions\n reduced_rxn_df = my_constructor.bond_brk(output_hdf5, species_df_key, reduced_rxn_df)\n\n print('--Status--')\n print('--Reactions Feature Vectors Created--')\n\n # Creating the new reactions xlsx for ML Training\n reduced_rxn_df.to_excel(new_xlsx_path)\n\n print('--Status--')\n print('--Database Expansion Routine Complete--')",
"def create_cells(self):\n if self.do_run:\n \n self.del_cells()\n \n if self.id == 0: print \"creating cells\"\n \n for n in range(self.n_celltypes): \n self.cells.append([]) # create list in list \n \n #print self.cellimport[n]\n exec self.cellimport[n]\n \n #print self.gidlist\n for i in self.gidlist[n]:\n \n #if \"sigma\" not in self.cell_exe[n]:\n # exec self.cell_exe[n]\n # cell.gid = i # tell cell it's gid!\n # print i\n #else:\n \n if (self.celltype[n] == \"IfCell\") or (self.celltype[n] == \"Grc\"):\n \n # add gid to cell and execute!\n if self.cell_exe[n][-2] == \"(\":\n exec self.cell_exe[n][0:-1] + \"gid=\" + str(i) + \")\"\n else:\n exec self.cell_exe[n][0:-1] + \", gid=\" + str(i) + \")\"\n \n else:\n exec self.cell_exe[n] \n cell.gid = i\n \n self.cells[n].append(cell) # add to (local) list\n \n if self.use_mpi:\n #### Tell this host it has this gid\n #### gids can be any integer, they just need to be unique.\n #### In this simple case, we set the gid to i.\n self.pc.set_gid2node(i, int(self.id))\n self.pc.cell(i, cell.nc_spike) # Associate the cell with this host and gid\n \n ## NOT NECESSARY ANYMORE ##\n #### Means to tell the ParallelContext that this cell is a source.\n #nc = cell.connect_target(None)\n #self.ncs[n].append(nc) \n \n #### Record spikes of this cell\n self.pc.spike_record(i, self.t_vec[n], self.id_vec[n])\n \n #print n, self.cells[n][-1].nc_spike.thresh\n else:\n \n self.t_vec[n].append(h.Vector())\n cell.nc_spike.record(self.t_vec[n][-1])",
"def create_cells(self):\n raise NotImplementedError(\n \"create_cells function not reimplemented from base class\")",
"def get_superpixel_mapping(mapping):\n df = mapping[['superpixel', 'slot', 'asic', 'row', 'col', 'xpix', 'ypix']]\n f_rowcol = lambda v: v.values[0] // 2\n f = dict(slot='first', asic='first', row=f_rowcol, col=f_rowcol,\n xpix='mean', ypix='mean')\n df = df.groupby('superpixel').agg(f).reset_index()\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', UserWarning)\n df.metadata = mapping.metadata\n df.metadata['n_rows'] = df['row'].max() + 1\n df.metadata['n_columns'] = df['col'].max() + 1\n df.metadata['size'] *= 2\n return df"
]
| [
"0.60499215",
"0.54830205",
"0.5432107",
"0.54215103",
"0.5346217",
"0.53322774",
"0.53123105",
"0.51663107",
"0.5132326",
"0.51209533",
"0.51172334",
"0.51165843",
"0.51119787",
"0.51019263",
"0.5093513",
"0.5086647",
"0.508317",
"0.5072695",
"0.50606495",
"0.50243706",
"0.5013042",
"0.5010529",
"0.49878755",
"0.49853125",
"0.49761993",
"0.49713945",
"0.49705672",
"0.4967144",
"0.49630174",
"0.49360764"
]
| 0.7782316 | 0 |
Find autocorrelation even between genalogy frmo t=0 to t=maxt with step steps | def autocorrelation(df,maxt,step,vari,acquisiton_time,division_time):
maxt = int(maxt/acquisiton_time)
step = int(step/acquisiton_time)
df = connect_cells(df,vari)
return np.vstack([correlation(df,Dt,vari) for Dt in\
np.arange(0,maxt,step)]),\
np.arange(0,maxt,step)*acquisiton_time/division_time | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def step_autocorrelation(self):\n\n max_hops = max([len(x) for x in self.steps])\n\n self.acf = np.zeros([len(self.steps), max_hops])\n\n keep = [] # list to hold indices of trajectories with a non-zero amount of hops\n for i in range(len(self.steps)):\n hops = self.steps[i]\n if len(hops) > 1:\n self.acf[i, :len(self.steps[i])] = timeseries.acf(self.steps[i])\n keep.append(i)\n\n self.acf = self.acf[keep, :]\n\n self.acf = np.array([self.acf[np.nonzero(self.acf[:, i]), i].mean() for i in range(max_hops)])\n\n #self.acf = timeseries.step_autocorrelation(self.z_interpolated.T[..., np.newaxis])",
"def EstimatedAutocorr(fw, data, pnum, trialnum, marker1, marker2): \n cycle_start = HeelStrike(fw, data, pnum, trialnum, marker1, marker2)\n x = cycle_start[2] \n time = cycle_start[1]\n drop_NA = np.vstack((x, time))\n #print drop_NA.shape, x.shape, y.shape\n drop_NA = drop_NA.T\n x = drop_NA[:,0]\n #x = x[~np.isnan(x).any()]\n \n #n = len(x)\n #var = np.var(x)\n tao = np.correlate(x, x, mode='full')\n # assert np.allclose(r, np.array([(x[:n-k]*x[-(n-k):]).sum() for k in range(n)]))\n #result = r/(var*(np.arange(n, 0, -1)))\n plt.figure(4)\n plt.plot(tao)\n return tao",
"def step_autocorrelation(trajectories, axis=0):\n\n try:\n if len(axis) == 1:\n axis = axis[0]\n except TypeError:\n pass\n\n ntraj = trajectories.shape[1] # number of particles with a trajectory\n\n # calculate acf of first trajectory in order to determine size of output array. timeseries.acf will truncate\n # the array slightly in order to make the FFT efficient\n ACF = acf(trajectories[1:, 0, axis] - trajectories[:-1, 0, axis])\n acfs = np.zeros([ntraj, ACF.size])\n acfs[0, :] = ACF\n\n keep = []\n for t in range(1, ntraj):\n steps = trajectories[1:, t, axis] - trajectories[:-1, t, axis]\n if not np.all(steps == 0):\n acfs[t, :] = acf(steps)\n keep.append(t)\n #acfs[t, :] = acf(trajectories[:ACF.size, t, axis])\n\n return acfs[keep, :]",
"def get_acf_tau(y, c=7.0):\n if np.nansum(y) == 0 or np.nanstd(y) < 1e-12:\n print(\"Autocorr time could not be computed. Check your input.\")\n return 0, np.zeros(len(y)), np.zeros(len(y))\n acf = y*0.\n for ii in range(y.shape[1]):\n acf[:,ii] = autocorr(y[:,ii] - np.nanmean(y[:,ii]))\n acf[:,ii] /= acf[0,ii] #np.nanmax(acf[ii,:])\n f = np.nansum(acf, axis=1) / y.shape[1]\n taus = 2.0 * np.cumsum(f) - 1.0\n window = auto_window(taus, c)\n return taus[window], f, acf",
"def acf(t, largest_prime=500):\n\n T = np.array(t)\n\n # Don't allow a prime factor larger than 'largest_prime'. Truncate data until that condition is met\n l = 2 * T.shape[0] - 1\n\n while largest_prime_factor(l) >= largest_prime or l % 2 == 0:\n l -= 1\n\n T = T[:(l + 1) // 2, ...] # '...' allows for no second dimension if only a single time series is analysed\n length = T.shape[0] * 2 - 1\n\n T -= np.mean(T, axis=0)\n\n fftx = np.fft.fft(T, n=length, axis=0)\n ret = np.fft.ifft(fftx * np.conjugate(fftx), axis=0)\n ret = np.fft.fftshift(ret, axes=(0,))\n\n autocorr_fxn = ret[length // 2:].real\n\n if len(autocorr_fxn.shape) > 1:\n autocorr_fxn /= np.arange(T.shape[0], 0, -1)[:, None]\n else:\n autocorr_fxn /= np.arange(T.shape[0], 0, -1)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n autocorr_fxn /= np.var(T, axis=0)\n\n return autocorr_fxn # normalized",
"def time_lag(data):\n normalize = [i for i in range(1,len(data[0])+1)]\n normalize += normalize[-2::-1]\n idx = len(data[0])//4\n def win(sig):\n res = fftconvolve(sig, np.conj(sig)[::-1])/normalize\n res[len(res)//2] = 0\n return res[idx:idx*7]\n autocorr = np.apply_along_axis(win, 1, data)\n return np.max(np.abs(autocorr), axis=1)",
"def autocorrelation(x):\n x = np.asarray(x)\n N = len(x)\n x = x-x.mean()\n s = fft.fft(x, N*2-1)\n result = np.real(fft.ifft(s * np.conjugate(s), N*2-1))\n result = result[:N]\n result /= result[0]\n return result",
"def autocorrelation(x):\n x = (x - np.mean(x)) / (np.std(x) * np.sqrt(len(x)))\n result = np.correlate(x, x, mode='full')\n return result[int(result.size / 2):]",
"def autocorrelation(x):\n x = (x - np.mean(x)) / (np.std(x) * np.sqrt(len(x)))\n result = np.correlate(x, x, mode='full')\n return result[int(result.size / 2):]",
"def auto_correlation_geyer(target_distribution, x0, xs, accepteds, max_lag=None):\n n = len(xs)\n if max_lag is None:\n max_lag = n - 1\n if max_lag < 1:\n raise ValueError(\"max_lag needs to be greater than 0.\")\n\n acorr = np.zeros(max_lag+1)\n pair_count = int(np.floor((max_lag + 1) / 2.0))\n pair_sums = np.zeros(pair_count)\n seq = xs - np.mean(xs)\n var = np.var(seq)\n\n if np.isclose(var, 0.0):\n return -2*n + 1.0\n\n for lag in range(0, max_lag+1):\n acorr[lag] = np.sum(seq[0:(n-lag)] * seq[lag:n]) / (n*var)\n if lag % 2 == 1:\n pair_sum = acorr[lag] + acorr[lag-1]\n # pair sums are always positive\n # if the sum of autocorrelations for adjacent lags is negative, we can stop\n if pair_sum < 0.0:\n break\n\n pair_ix = int(np.floor(lag / 2.0))\n # pair sums are decreasing\n # if the current pair sum is greater than the previous, set it to the previous.\n if pair_ix > 0 and pair_sum > pair_sums[pair_ix - 1]:\n pair_sum = pair_sums[pair_ix - 1]\n pair_sums[int(np.floor(lag / 2.0))] = pair_sum\n acorr_time = np.sum(2*pair_sums) - acorr[0]\n return -acorr_time",
"def decide_input(yt, max_lag_period, forecast_period): \n correlations = list()\n yt_mean = np.mean(yt)\n\n #correlations of time series yt at lag k (yt-1...yt-max_lag_period+1)\n for k in range(1, max_lag_period + 2):\n yt_k = shift(yt, k, cval=np.NaN)\n a = yt-yt_mean\n a = np.nan_to_num(a)\n b = yt_k-yt_mean\n b = np.nan_to_num(b)\n print(np.dot(a, b))\n r_k = np.sum(np.dot(a, b))\n correlations.append(r_k)\n \n print(\"CORRELATIONS\", correlations)\n \n s = list()\n muls = list()\n for i in range(0, len(correlations)-1):\n for j in range(0, len(correlations)-1):\n if(j != i):\n muls.append(abs(correlations[i-j]))\n \n s.append(abs(correlations[i + forecast_period])/abs(np.prod(muls)))\n \n print(s)\n lag_periods = np.argpartition(s, -max_lag_period)[-max_lag_period:] + 1\n \n print(\"lag_periods\", lag_periods)\n return lag_periods",
"def autocorr(sig):\n return float(np.correlate(sig, sig))",
"def autocorrelation(x, nlags = 0):\n return [x.corr(x.shift(lag)) for lag in range(nlags + 1)]",
"def autocorrelate_negative(autocorrelation):\n T = 1\n for a in autocorrelation:\n if a < 0:\n return T - 1\n T += 1\n return T",
"def autocorrelate_negative(autocorrelation):\n T = 1\n for a in autocorrelation:\n if a < 0:\n return T - 1\n T += 1\n return T",
"def _ac_fft3 (self,xp,max_lag):\n '''takes xp'''\n f = np.fft.fft(self.xp)\n p = np.array([np.real(v)**2+np.imag(v)**2 for v in f])\n pi = np.fft.ifft(p)\n corr = np.real(pi)[:self.n]/np.sum(self.xp**2)\n return corr[:max_lag]",
"def ft_autocorr_out_dist(\n cls,\n ts: np.ndarray,\n p: float = 0.8,\n max_nlags: t.Optional[int] = None,\n adjusted: bool = True,\n detrended_acfs: t.Optional[np.ndarray] = None,\n ) -> np.ndarray:\n detrended_acfs = cls._calc_acf(\n ts=ts,\n nlags=max_nlags,\n adjusted=adjusted,\n detrended_acfs=detrended_acfs,\n )\n\n ts_abs = np.abs(ts)\n ts_inliners = ts[ts_abs <= np.quantile(ts_abs, p)]\n\n ts_inliners_acfs = cls._calc_acf(\n ts=ts_inliners, nlags=max_nlags, adjusted=adjusted\n )\n\n dist_acfs = np.abs(\n detrended_acfs[: ts_inliners_acfs.size] - ts_inliners_acfs\n )\n\n return dist_acfs",
"def freq_from_autocorr(x):\n corr = autocorr(x)\n\n\n # Find the first low point\n d = np.diff(corr)\n start = np.where(d>0)[0]\n\n if len(start)>0 :\n return np.argmax(corr[start[0]:]) + start[0]\n return 0\n\n # Find the next peak after the low point (other than 0 lag). This bit is\n # not reliable for long signals, due to the desired peak occurring between\n # samples, and other peaks appearing higher.\n # Should use a weighting function to de-emphasize the peaks at longer lags.\n # Also could zero-pad before doing circular autocorrelation.\n #peak = np.argmax(corr[start:])\n #px, py = parabolic(corr, peak)\n\n #print(\"minimum at {}, peak at {}\".format(start, peak + start))\n\n\n #return fs / px\n #return peak + start",
"def autocorrFFT(x):\n\n N = len(x)\n F = np.fft.fft(x, n=2*N) # 2*N because of zero-padding\n PSD = F * F.conjugate()\n res = np.fft.ifft(PSD)\n res = (res[:N]).real # now we have the autocorrelation in convention B\n n = N*np.ones(N) - np.arange(0, N) # divide res(m) by (N-m)\n\n return res / n # this is the autocorrelation in convention A",
"def autocorr(x):\n result = np.correlate(x, x, mode='full')/np.sum(x**2)\n return result[result.size//2:]",
"def autoc(array):\r\n return ifft2(np.square(np.abs(fft2(array))))",
"def source_adj_gsdf(gmdata_sim,gmdata_obs,IsolationFilter,num_pts,dt): \n t = np.arange(num_pts)*dt\n ts=np.flip(-t[1:], axis=0)\n lTime = np.concatenate((ts,t), axis=0)#Lag time \n \n #convolve the waveforms for the cross- and auto-correlagrams \n cross = np.correlate(IsolationFilter,gmdata_obs,'full')\n auto = np.correlate(IsolationFilter,gmdata_sim,'full') \n \n #GSDF Parameters \n w0=2*np.pi/(lTime[-1]) \n# wN=2*np.pi/(2*dt)\n# w(:,1)=-wN:w0:wN\n wf=w0*np.linspace(-int(num_pts/2),int(num_pts/2),num_pts) \n fi = [0.05, 0.075, 0.1]\n# fi = [0.02, 0.03, 0.04, 0.05]\n# fi = [0.05, 0.075, 0.1, 0.125, 0.15, 0.175, 0.2]\n Tw = 2/np.mean(fi) # Effective window\n# sw = 2*np.pi*0.72/Tw; # Sigma w ~ 0.2827433388230814\n sw=0.1 \n \n# #% A local maximum will be selected closest to 0-lag\n# I_O=np.argmax(cross)\n# I_S=np.argmax(auto) \n\n I_O, peaks_O = find_peaks(np.abs(hilbert(cross))/np.max(np.abs(hilbert(cross))), height=0.25)\n I_S, peaks_S = find_peaks(np.abs(hilbert(auto))/np.max(np.abs(hilbert(auto))), height=0.25)\n\n PkO = peaks_O.get(\"peak_heights\", \"\")\n PkS = peaks_S.get(\"peak_heights\", \"\")\n\n if (I_O==[] or I_S==[]):\n I_O=np.argmax(cross)\n I_S=np.argmax(auto)\n else:\n I_O_min = np.argmin(np.multiply((1+np.abs(lTime[I_O]))**2,np.abs(1-PkO)))\n I_O = I_O[I_O_min]\n\n I_S_min = np.argmin(np.multiply((1+np.abs(lTime[I_S]))**2,np.abs(1-PkS)))\n I_S = I_S[I_S_min]\n \n ##Windowing\n win1=np.exp(-(0.5*sw**2)*(lTime-lTime[I_O])**2)\n win2=np.exp(-(0.5*sw**2)*(lTime-lTime[I_S])**2) \n \n #\n WO = np.multiply(win1,cross)\n WS = np.multiply(win2,auto)\n WS = WS*np.max(WO)/np.max(WS) #Normalized window by amplitude\n #% Parameters for \"bootstraping\"\n InOR=np.argmax(WO)\n InSR=np.argmax(WS) \n \n #% Isolation filter FFT for perturbation kernel\n tff=np.conj(fftshift(fft(IsolationFilter)))*1/num_pts \n \n adj_sim_decompose = np.zeros((len(fi),num_pts))\n adj_sim_sum = np.zeros(num_pts)\n TauP_arr = np.zeros(len(fi)) \n \n ne = int(np.min([2/np.min(fi)/dt,num_pts/2])) #% Effective bandwidth for inversion\n \n for i in range(0,len(fi)): \n si = 0.1*fi[i]\n #Crosscorrelagram and Autocorrelagram filtering\n dO=computebandfftfilter_gauss(WO,dt,fi[i],si,lTime);\n dS=computebandfftfilter_gauss(WS,dt,fi[i],si,lTime); \n \n # % Check bootstraping\n InO=np.argmax(np.real(dO))\n InS=np.argmax(np.real(dS)) \n \n BS = 1; Cn = 0;\n while BS == 1 or Cn < 10:\n InO=int(InO)\n if (lTime[InO] < lTime[InOR]+0.51/fi[i]) and (lTime[InO] >= lTime[InOR]-0.51/fi[i]):\n BS = 0\n elif (lTime[InO] >= (lTime[InOR]+0.45/fi[i])):\n InO=InO-np.round(1/fi[i]/dt)\n elif (lTime[InO] < lTime[InOR]-0.45/fi[i]):\n InO=InO+np.round(1/fi[i]/dt)\n Cn = Cn+1\n \n BS = 1; Cn = 0;\n while BS == 1 or Cn < 10:\n InS=int(InS) \n if (lTime[InS] < lTime[InSR]+0.51/fi[i]) and (lTime[InS] >= lTime[InSR]-0.51/fi[i]):\n BS = 0\n elif (lTime[InS] >= (lTime[InSR]+0.45/fi[i])):\n InS=InS-np.round(1/fi[i]/dt)\n elif (lTime[InS] < lTime[InSR]-0.45/fi[i]):\n InS=InS+np.round(1/fi[i]/dt)\n Cn = Cn+1 \n\n # Five parameter Gaussian wavelet fitting \n Ao = np.max(envelope(np.real(dO))); Io = np.argmax(envelope(np.real(dO)));\n As = np.max(envelope(np.real(dS))); Is = np.argmax(envelope(np.real(dS))); \n ##Constrain the initial values \n # Parameters for curve_fit\n wi=2*np.pi*fi[i] \n \n try:\n GaO, params_covariance = curve_fit(Eqn, lTime[Io-ne-1:Io+ne], np.real(dO[Io-ne-1:Io+ne]))\n GaS, params_covariance = curve_fit(Eqn, lTime[Is-ne-1:Is+ne], np.real(dS[Is-ne-1:Is+ne])) \n except:\n GaO = [Ao, 2*np.pi*si, lTime[Io], 2*np.pi*fi[i], lTime[InO]]\n GaS = [As, 2*np.pi*si, lTime[Is], 2*np.pi*fi[i], lTime[InS]] \n\n# GaO, params_covariance = curve_fit(Eqn, lTime[Io-ne-1:Io+ne], np.real(dO[Io-ne-1:Io+ne]),bounds=(0,[Ao, 2*np.pi*si, lTime[Io], 2*np.pi*fi[i], lTime[InO]]))\n# GaS, params_covariance = curve_fit(Eqn, lTime[Is-ne-1:Is+ne], np.real(dS[Is-ne-1:Is+ne]),bounds=(0,[As, 2*np.pi*si, lTime[Is], 2*np.pi*fi[i], lTime[InS]])) \n \n# % Check fitting\n if ((GaO[0]/GaS[0]) > 10**5) or np.abs(GaO[4]-GaS[4]) > lTime[-1]/2:\n GaO = [Ao, 2*np.pi*si, lTime[Io], 2*np.pi*fi[i], lTime[InO]]\n GaS = [As, 2*np.pi*si, lTime[Is], 2*np.pi*fi[i], lTime[InS]] \n \n wP=((si**2)*wf+(sw**2)*wi)/(sw**2+si**2)\n wPP=((si**2)*wf-(sw**2)*wi)/(sw**2+si**2)\n siP=((si**2)*(sw**2)/(sw**2+si**2))**0.5 \n #Estimate waveform perturbation kernel (WPK)\n IW=(siP/(sw*GaS[0]))*np.multiply(np.exp(-0.5*(wf-2*np.pi*fi[i])**2/(sw**2+si**2)),np.divide(tff,wP))+\\\n (siP/(sw*GaS[0]))*np.exp(-0.5*(wf+2*np.pi*fi[i])**2/(sw**2+si**2))*tff/wPP\n \n IW[0:int(len(IW)/2)]=0*IW[0:int(len(IW)/2)]\n \n itff = ifft(fftshift(num_pts*IW)) \n \n #Save the GSDF measurements\n TauP_arr[i] = GaO[4]-GaS[4]; #% delta_P\n \n# Jp = np.real(itff)\n# Jp = np.imag(itff)\n Jp = -np.imag(itff) \n adj_sim_decompose[i,:] = np.flip(Jp,axis=0)*TauP_arr[i] \n \n #if i>0:\n adj_sim_sum = adj_sim_sum + adj_sim_decompose[i,:] \n \n return adj_sim_sum, TauP_arr",
"def _calculate_autocorrelations(self):\n\n self._autocorr_real_x = self.__calculate_autocorr(self._noise_field_real, self._n_x, self._n_y, 'x')\n self._autocorr_real_y = self.__calculate_autocorr(self._noise_field_real, self._n_y, self._n_x, 'y')\n self._autocorr_imag_x = self.__calculate_autocorr(self._noise_field_imag, self._n_x, self._n_y, 'x')\n self._autocorr_imag_y = self.__calculate_autocorr(self._noise_field_imag, self._n_y, self._n_x, 'y')",
"def _compute_acf(values_in_series):\n\n autocorrelation_by_lag = numpy.correlate(\n values_in_series, values_in_series, mode='same')\n\n # Remove negative lags.\n lag_0_index = numpy.argmax(autocorrelation_by_lag)\n autocorrelation_by_lag = autocorrelation_by_lag[lag_0_index:]\n lags = numpy.linspace(\n 0, len(autocorrelation_by_lag) - 1, num=len(autocorrelation_by_lag),\n dtype=int)\n\n # Divide by num points used to compute each autocorrelation.\n num_points_by_lag = len(values_in_series) - lags\n autocorrelation_by_lag = autocorrelation_by_lag / num_points_by_lag\n\n # Normalize so that lag-0 autocorrelation is 1 (true by definition).\n autocorrelation_by_lag = autocorrelation_by_lag / autocorrelation_by_lag[0]\n\n return autocorrelation_by_lag, lags",
"def auto_correlation_naive(target_distribution, x0, xs, accepteds, max_lag=None):\n n = len(xs)\n if max_lag is None:\n max_lag = n - 1\n if max_lag < 1:\n raise ValueError(\"max_lag needs to be greater than 0.\")\n\n ac = 0.0\n seq = xs - np.mean(xs)\n var = np.var(seq)\n\n if np.isclose(var, 0.0):\n return -float(max_lag)\n\n for lag in range(1, max_lag+1):\n ac += np.abs(np.sum(seq[0:(n-lag)] * seq[lag:n]) / (n*var))\n return -ac",
"def autocorrelation_1d(data):\n\n N = len(data)\n n_fft = select_power_of_two(N)\n\n # Pad the signal with zeros to avoid the periodic images.\n\n R_data = np.zeros(2*n_fft)\n R_data[:N] = data\n\n F_data = np.fft.fft(R_data)\n\n result = np.fft.ifft(F_data*F_data.conj())[:N].real/(N-np.arange(N))\n\n return result[:N]",
"def calculate_overf_correlation(amp, index, f0, dt, n_lags):\n \n # Cast inputs as floats as I do a bunch of division.\n dt = float(dt)\n f0 = float(f0)\n index = float(index)\n # Number of points used in calculation needs to be at least 10 times bigger\n # than final number of point returned. This is so we are not affected by\n # the periodicity of the correlation function.\n buff_factor = 64\n n = buff_factor * n_lags\n n_return = n_lags\n # Generate the power spectrum.\n # Need to add a low frequency cut off, since there is an IR divergence.\n # Choose to cut off at 1/2df (so we get a bit of slope mode).\n power = overf_power_spectrum(amp, index, f0, dt, n,\n cut_off=1./n_lags/dt/2.0)\n # FFT it to the correlation function.\n corr = fft.ifft(power)\n # Complex part should be zero.\n corr = corr.real\n # In previous versions of this function, we shifted the output function.\n # however this screws up positive definiteness of the correlation matrix\n # and is unnecessary if you have the IR cut off.\n #corr -= corr[2 * n_return]\n # Trim to return size.\n corr = corr[:n_return]\n # To normalize, need to multiply by twice the bandwidth.\n corr *= 1.0/dt\n return corr",
"def trc_fgen_prefb(self,trc,dt,nspad=200,hwin=150,vlen=51):\n output=np.zeros((len(trc),((11*(vlen))+1)))\n pad=np.random.rand(nspad)/100\n trc_norm=trc/np.amax(np.abs(trc))\n trc_norm_padded=np.hstack((pad,trc_norm))\n trc_entropy=self.entropy(trc_norm_padded,50)\n trc_fdm=self.fdm(trc_norm_padded,50,np.arange(1,4),15)\n trc_slta=trigger.classic_sta_lta(trc_norm_padded,2,100)\n trc_fq_win_sum=self.fq_win_sum(trc_norm_padded,hwin,dt)\n hwin2=50\n trc_kurtosis_skew=self.kurtosis_skewness(trc_norm_padded,hwin2)\n for i,j in enumerate(trc):\n ftrc=[]\n fb=i*dt\n ftrc=np.append(ftrc,trc_norm_padded[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,self.norm(np.gradient(np.abs(trc_norm_padded)))[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,self.norm(trc_entropy)[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,self.norm(np.gradient(trc_entropy))[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1]) \n ftrc=np.append(ftrc,self.norm(trc_fdm)[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,self.norm(np.gradient(trc_fdm))[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1]) \n ftrc=np.append(ftrc,self.norm(trc_slta)[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,self.norm(trc_fq_win_sum)[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,self.norm(np.gradient(trc_fq_win_sum))[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,self.norm(trc_kurtosis_skew[0])[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,self.norm(trc_kurtosis_skew[1])[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,1)\n output[i,:]=ftrc\n return output",
"def plotting_autocorr(dataframe):\n plot_acf(dataframe['STU'].iloc[1:], lags=40)\n plt.show()",
"def autocorrelation(self):\n # For all features calculate kendall's tau with every other feature.\n df_bin = pd.read_csv(self.path_bin)\n features = sorted(list(df_bin.columns))\n df_correlation = pd.DataFrame({f: [np.nan] * len(features) for f in features}, index=features)\n for f1 in features:\n for f2 in features:\n x = list(df_bin[f1])\n y = list(df_bin[f2])\n corr, p = scipy.stats.kendalltau(x, y)\n df_correlation.loc[f1, f2] = \"{} (p={:.3f})\".format(corr, p)\n if f1 == f2:\n break\n df_correlation.to_csv(self.path_autocorrelation, index=True)"
]
| [
"0.6922259",
"0.6406261",
"0.6337326",
"0.62940246",
"0.6227375",
"0.6044275",
"0.6015199",
"0.5965191",
"0.5965191",
"0.5907191",
"0.5794681",
"0.5791768",
"0.57608676",
"0.56982356",
"0.56982356",
"0.5690548",
"0.5685578",
"0.564711",
"0.5607928",
"0.5591235",
"0.557764",
"0.55693233",
"0.55580723",
"0.55208826",
"0.5503176",
"0.5497603",
"0.54476184",
"0.5440064",
"0.54394066",
"0.5432295"
]
| 0.75102353 | 0 |
qq plot with normal dist | def qq_plot(obs,var,fname):
plt.figure()
z = (obs-np.mean(obs))/np.std(obs)
stats.probplot(z, dist="norm", plot=plt)
plt.plot(np.arange(-3,3),np.arange(-3,3))
plt.xlim([-3,3])
plt.ylim([-3,3])
plt.title("Normal Q-Q plot {} in {}".format(var,fname))
plt.savefig("qq_{}".format(var)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def qq_plot(data, name, distribution=\"norm\", ax_size=(7, 7)):\n\n common_set_up(ax_size)\n\n fig = plt.figure(figsize=ax_size)\n ax = fig.add_subplot(111) # Make one axes\n\n # Use scipy stats probplot and get out only values\n (x, y) = stats.probplot(data, dist=distribution, plot=None, fit=False)\n\n # Add a best fit line to the plot.\n #\n # Not using probplot version to be able to\n # customize the style of the line.\n slope, intercept, r, prob, sterrest = stats.linregress(x, y)\n ax.plot(\n x,\n (slope*x + intercept),\n '#9099A2', # Choose color for line\n linestyle='--', # Dashed line\n linewidth=1\n )\n\n ax.scatter(\n x,\n y,\n s=70, # Scale of points on scatter plot\n facecolors='none', # Transparent, no fill\n edgecolors='#192231', # Dark grey\n linewidths=1.4\n )\n\n title_color = '#192231' # Dark grey\n font_colour = '#9099A2' # Light grey\n\n ax.set_title(\n \"Q-Q plot of {0}\".format(name),\n fontsize=20,\n color=title_color\n )\n ax.set_ylabel(\n 'Quantiles of {0}'.format(name),\n color=font_colour\n )\n ax.set_xlabel(\n 'Quantiles of {0} dist.'.format(distribution),\n color=font_colour\n )\n\n sns.despine(ax=ax, offset=2, trim=True, left=True, bottom=True)",
"def QQplot(self,using,dx=0.0001,Nquants=101):\n pits = self.PIT(using=using,dx=dx)\n quants = np.linspace(0.,100.,Nquants)\n QTheory = quants/100.\n Qdata = np.percentile(pits,quants)\n plt.figure(figsize=(10,10))\n plt.plot(QTheory,Qdata,c='b',linestyle='-',linewidth=3,label='QQ')\n plt.plot([0,1],[0,1],color='k',linestyle='-',linewidth=2)\n plt.xlabel(\"Qtheory\",fontsize=18)\n plt.ylabel(\"Qdata\",fontsize=18)\n plt.legend()\n plt.savefig(\"QQplot.jpg\")\n return",
"def plot_qq(x, var_name, path='qq_plot.png'):\n # https://en.wikipedia.org/wiki/Q%E2%80%93Q_plot\n # In Q-Q plot the axes are transformed in order to make a normal distribution appear in a straight line.\n # (a perfectly normal distribution would exactly follow a line with slope = 1 and intercept = 0).\n # The theoretical quantiles are placed along the x-axis. That is, the x-axis is not our data, it's simply\n # an expectation of where our data should have been if it were normal.\n # The actual data is plotted along the y-axis.\n fig, ax = plt.subplots()\n res = stats.probplot(x, dist='norm', fit=True, plot=plt)\n plt.title('Q-Q plot of ' + var_name)\n plt.grid(True)\n plt.tight_layout()\n plt.savefig(path, bbox_inches='tight')",
"def demo():\n n_dim = 3\n A = np.eye(n_dim)\n covar = np.eye(n_dim)\n mean = np.zeros(n_dim)\n approx = approx_quad_form(mean, covar, A)\n\n true_dist = scipy.stats.chi2(df=n_dim)\n\n q = np.linspace(0, 10)\n\n plt.plot(\n q, true_dist.pdf(q), label='True', color='black')\n plt.plot(\n q, approx(q), label='Approx.',\n color='tab:blue', linestyle='--')\n plt.xlabel('q')\n plt.ylabel('pdf(q) [-]')\n plt.legend()",
"def plot_q_values(self):\n\n sim_freq = self.conf['Simulation']['params']['frequency']\n sim_wvlgth = 1e9*consts.c / sim_freq\n leg_str = ''\n for mat, matpath in self.conf['Materials'].items():\n n, k = get_nk(matpath, sim_freq)\n mat_wv = 1e-3*sim_wvlgth / n\n mat_q = 2*np.pi/mat_wv\n leg_str += '{}: {:.2f} [rads/$\\mu$m]\\n'.format(mat, mat_q)\n leg_str = leg_str[0:-1]\n for lname, l_obj in self.layers.items():\n qarr = self.data['{}_qvals'.format(lname)]\n max_pos_freq = np.amax(qarr.real)\n max_neg_freq = np.amin(qarr.real)\n min_pos_wv = 1e3*2*np.pi/max_pos_freq\n if max_neg_freq == 0:\n min_neg_wv = 0\n else:\n min_neg_wv = 1e3*2*np.pi/max_neg_freq\n plt.figure()\n inc_q = 2*np.pi/(1e-3*sim_wvlgth)\n title = 'Layer: {}, Incident q: {:.2f} [rads/$\\mu$m]'.format(lname, inc_q)\n # title += 'Min Positive $\\\\lambda$: {:.2f} nm, '\n # title += 'Min Negative $\\\\lambda$: {:.2f} nm'\n # title = title.format(lname, sim_wvlgth, min_pos_wv, min_neg_wv)\n # title = title.format(lname, sim_wvlgth)\n plt.title(title)\n # plt.scatter(1e3*2*np.pi/qarr.real, 1e4*qarr.imag/2*np.pi, c='b', s=.5,\n # marker='o', label=leg_str)\n plt.scatter(qarr.real, qarr.imag/(2*np.pi), c='b', s=.75,\n marker='o', label=leg_str)\n # pt = (qarr[0].real, qarr[0].imag)\n # theta = np.linspace(0, 1.48, 200)\n # plt.plot(pt[0]*np.cos(theta), pt[1]/np.cos(theta), 'r--')\n plt.legend(loc='best')\n # plt.annotate(leg_str, xy=(.95,.95), xycoords='axes fraction',\n # size=14, ha='right', va='top',\n # bbox=dict(boxstyle='round', fc='w'))\n plt.xlabel('Re(q) [radians/micron]')\n plt.ylabel('Im(q) [1/microns]')\n plot_path = os.path.join(self.dir, '{}_qvals.png'.format(lname))\n plt.grid(True)\n plt.savefig(plot_path)\n plt.close()",
"def normquantplot(dataVect, title=\"data\"):\n dataVect = np.sort(dataVect)\n mm = np.mean(dataVect)\n dev = np.std(dataVect)\n per = (np.arange(len(dataVect)) + 1) / len(dataVect)\n zvals = norm.ppf(per)\n fig = go.Figure()\n fig.add_scatter(x=dataVect, y=zvals, mode='markers', name=title)\n fig.add_scatter(x=[mm - 2 * dev, mm + 2 * dev], \n y=[-2, 2], mode='lines', line={'dash':'dash'},\n name=\"Approximate Normal line\")\n \n fig.update_layout({\"title\": f\"Normal Quantile plot of {title}\",\n \"xaxis_title\":title,\n \"yaxis_title\":\"Normal Score\"})\n \n st.plotly_chart(fig)\n \n return None",
"def p_qq(data,pv_index=0,alpha=0.05):\n ####if it's a pd.dataframe, rename to col header\n if isinstance(data, pd.DataFrame):\n if isinstance(pv_index, int):\n pv_index = data.columns.get_values()[pv_index]\n data =data.rename(columns ={pv_index: \"p_value\"})\n if not (np.issubdtype(data['p_value'].dtypes, np.number)):\n raise TypeError(\"Please ensure you have specified the column index of numeric p-values.\")\n ###or make a vector a pd.dataframe\n else:\n data = pd.DataFrame({\"p_value\": data})\n \n if (data[\"p_value\"].max()> 1) or (data[\"p_value\"].max()< 0):\n raise ProbabilityError(\"One or more p-values is not between 0 and 1!\")\n \n m = len(data['p_value'])\n\n data['log_transf'] = -np.log10(data['p_value'])\n data = data.sort_values('p_value',ascending=True)\n data['rank'] = np.arange(1,len(data['p_value'])+1)\n data['log_exp'] = -np.log10(data['rank']/m)\n fig = plt.clf()\n plt.scatter(data['log_exp'],data['log_transf'],color='black')\n plt.plot(data['log_exp'],data['log_exp'])\n plt.title(\"QQ\")\n plt.xlabel(\"Expected -log10(p)\")\n plt.ylabel(\"Observed -log10(p)\")\n return fig",
"def qq_plot_var(data_a, data_b, name_a, name_b, ax_size=(7, 7), fit_zero=True):\n\n common_set_up(ax_size)\n\n fig = plt.figure(figsize=ax_size)\n ax = fig.add_subplot(111) # Make one plot within a figure\n\n # Manually calculate quantiles from 1 to 100.\n x = []\n for i in range(1, 100):\n v = np.percentile(data_a, i)\n x.append(v)\n y = []\n for i in range(1, 100):\n v = np.percentile(data_b, i)\n y.append(v)\n\n # Plot a base line of y = 1x + 0\n ax.plot(\n x,\n (1*x),\n '#9099A2', # Color of line, light grey\n linestyle='--', # Dashed line style\n linewidth=1\n )\n\n ax.scatter(\n x,\n y,\n s=40, # Scale of scatter point\n facecolors='none', # Transparent fill\n edgecolors='#192231', # Darky grey\n linewidths=0.5\n )\n\n # To be able to see the figure back to 0, 0\n if fit_zero:\n axes = ax.axes\n axes.set_xlim(0,)\n axes.set_ylim(0,)\n\n title_color = '#192231' # Dark grey\n font_colour = '#9099A2' # Light grey\n\n ax.set_title(\n \"Q-Q plot of {0} vs {1}\".format(name_a, name_b),\n fontsize=20,\n color=title_color\n )\n ax.set_ylabel(\n 'Quantiles of {0}'.format(name_b),\n color=font_colour\n )\n ax.set_xlabel(\n 'Quantiles of {0}'.format(name_a),\n color=font_colour\n )\n\n sns.despine(ax=ax, offset=2, trim=True, left=True, bottom=True)\n\n return ax",
"def normal_log_q(self,z): \n means, scale = self.get_means_and_scales()\n return ss.norm.logpdf(z,loc=means,scale=scale)",
"def demo_neg():\n n_dim = 3\n A = -1 * np.eye(n_dim)\n covar = np.eye(n_dim)\n mean = np.zeros(n_dim)\n approx = approx_quad_form(mean, covar, A)\n\n true_dist = scipy.stats.chi2(df=n_dim)\n\n q = np.linspace(-10, 0)\n\n plt.plot(\n q, true_dist.pdf(-1 * q), label='True', color='black')\n plt.plot(\n q, approx(q), label='Approx.',\n color='tab:blue', linestyle='--')\n plt.xlabel('q')\n plt.ylabel('pdf(q) [-]')\n plt.legend()",
"def _onSquaredQ(self, event):\r\n self.graph.xaxis('\\\\rm{q}^2 ', 'A^{-2}')\r\n self.set_xscale('squared')\r\n\r\n self.graph.render(self)\r\n self.subplot.figure.canvas.draw_idle()",
"def qq_plots(results, num_dots, max_log_val, file_prefix, method_types=['kw', 'emma'], mapping_labels=None,\n\t \tphen_name=None, perm_pvalues=None, is_binary=False, **kwargs):\n\tlog_pdf_file = None\n\tlog_png_file = None\n\tpdf_file = None\n\tpng_file = None\n\tif file_prefix:\n\t\tlog_pdf_file = file_prefix + \"_qq_log.pdf\"\n\t\tlog_png_file = file_prefix + \"_qq_log.png\"\n\t\tpdf_file = file_prefix + \"_qq.pdf\"\n\t\tpng_file = file_prefix + \"_qq.png\"\n\tqq_plot(results, num_dots, method_types=method_types, mapping_labels=mapping_labels, phenName=phen_name,\n\t\tpdfFile=pdf_file, pngFile=png_file, perm_pvalues=perm_pvalues, isBinary=is_binary, kwargs=kwargs)\n\tlog_qq_plot(results, num_dots, max_log_val, method_types=method_types, mapping_labels=mapping_labels,\n\t\tphenName=phen_name, pdfFile=log_pdf_file, pngFile=log_png_file, perm_pvalues=perm_pvalues,\n\t\tisBinary=is_binary, kwargs=kwargs)",
"def normal_log_q_initial(self,z): \n means, scale = self.get_means_and_scales_from_q()\n return ss.norm.logpdf(z,loc=means,scale=scale)",
"def _onLinearQ(self, event):\r\n self.graph.xaxis('\\\\rm{q} ', 'A^{-1}')\r\n self.set_xscale('linear')\r\n self.graph.render(self)\r\n self.subplot.figure.canvas.draw_idle()",
"def plot_simple_qqplots(png_file_prefix, results, result_labels=None, line_colors=None,\n\t\t\tnum_dots=1000, title=None, max_neg_log_val=5):\n\tqs = []\n\tlog_qs = []\n\tfor res in results:\n\t\tpvals = res.snp_results['scores'][:]\n\t\tqs.append(get_quantiles(pvals, num_dots))\n\t\tlog_qs.append(get_log_quantiles(pvals, num_dots, max_neg_log_val))\n\tsimple_qqplot(qs, png_file_prefix + '_qq.png', quantile_labels=result_labels,\n\t\t\t\tline_colors=line_colors, num_dots=num_dots, title=title)\n\tsimple_log_qqplot(log_qs, png_file_prefix + '_log_qq.png', quantile_labels=result_labels,\n\t\t\t\tline_colors=line_colors, num_dots=num_dots, title=title, max_val=max_neg_log_val)",
"def normal_log_q(self,z):\n means, scale = self.get_means_and_scales()\n return (ss.norm.logpdf(z,loc=means,scale=scale)).sum(axis=1)",
"def qscatter(xlist,ylist):\n dislin.qplsca(xlist,ylist,len(xlist))",
"def normal_log_q_initial(self,z):\n means, scale = self.get_means_and_scales_from_q()\n return (ss.norm.logpdf(z,loc=means,scale=scale)).sum(axis=1)",
"def qt(q, df=1, loc=0, scale=1, ncp=None, lowertail=True, log=False):\n # ==========================================================================\n if log:\n raise NotImplementedError(\"Log option is not implemented yet.\")\n elif lowertail:\n return t.ppf(q=q, df=df, loc=loc, scale=scale)\n else:\n return t.isf(q=q, df=df, loc=loc, scale=scale)",
"def log_qq_plot(results, numDots, maxVal, method_types=['kw', 'emma'], mapping_labels=None, phenName=None, pdfFile=None,\n\t pngFile=None, perm_pvalues=None, **kwargs):\n\tdef _getExpectedLogQuantiles_():\n\t\tquantiles = []\n\t\tfor i in range(1, numDots + 1):\n\t\t\tquantiles.append((float(i) / (numDots + 2.0)) * maxVal)\n\t\treturn quantiles\n\n\tif not mapping_labels:\n\t\tmapping_labels = method_types\n\tplt.figure(figsize=(5, 4))\n\tplt.axes([0.15, 0.14, 0.82, 0.79])\n\tmaxVal = min(math.log10(len(results[mapping_labels[0]].snp_results['scores'])), maxVal)\n\tminVal = (1.0 / numDots) * maxVal\n\tvalRange = maxVal - minVal\n\tplt.plot([minVal, maxVal], [minVal, maxVal], \"k\", label=\"Expected\")\n\tmaxObsVals = []\n\tareas = []\n\tds = []\n\tslopes = []\n\tfor method_type, label in zip(method_types, mapping_labels):\n\t\tresult = results[label]\n\t\tif perm_pvalues and method_type in ['kw', 'ft']:\n\t\t\texp_maxVal = _getLogQuantilesMaxVal_(perm_pvalues[:], maxVal)\n\t\t\texpQuantiles = _getLogQuantiles_(perm_pvalues[:], numDots, exp_maxVal)\n\t\t\tks_res = calc_ks_stats(result.snp_results['scores'], perm_pvalues)\n\t\t\tquantiles = _getLogQuantiles_(result.snp_results['scores'][:], numDots, exp_maxVal)\n\t\t\tslope = _estLogSlope_(result.snp_results['scores'][:], perm_pvalues)\n\t\telse:\n\t\t\tquantiles = _getLogQuantiles_(result.snp_results['scores'][:], numDots, maxVal)\n\t\t\texpQuantiles = _getExpectedLogQuantiles_()\n\t\t\tks_res = calc_ks_stats(result.snp_results['scores'])\n\t\t\tslope = _estLogSlope_(result.snp_results['scores'][:])\n\n\t\tarea = _estAreaBetweenCurves_(quantiles, expQuantiles)\n\t\tareas.append(area)\n\t\tslopes.append(slope)\n\t\tds.append(ks_res[\"D\"])\n\t\t#plt.plot(expQuantiles, quantiles, label = label+\", A=\"+str(round(area,2))+\", D=\"+str(round(ks_res[\"D\"],3))+\", S=\"+str(round(slope,3)))\n\t\tplt.plot(expQuantiles, quantiles, label=label + \", D=\" + str(round(ks_res[\"D\"], 3)) + \", S=\" + str(round(slope, 3)))\n\t\tmaxObsVals.append(max(quantiles))\n\n\tmaxObsVal = max(maxObsVals)\n\tobsValRange = maxObsVal - minVal\n\tplt.axis([minVal - 0.025 * valRange, maxVal + 0.025 * valRange, minVal - 0.025 * obsValRange, maxObsVal + 0.025 * obsValRange])\n\tplt.ylabel(\"Observed $-log_{10}(p$-value$)$\")\n\tplt.xlabel(\"Expected $-log_{10}(p$-value$)$\")\n\tif phenName:\n\t\tplt.title(phenName)\n\tfontProp = matplotlib.font_manager.FontProperties(size=8)\n\tplt.legend(loc=2, numpoints=4, handlelen=0.05, markerscale=1, prop=fontProp, pad=0.018)\n\tif pdfFile:\n\t\tplt.savefig(pdfFile, format=\"pdf\")\n\tif pngFile:\n\t\tplt.savefig(pngFile, format=\"png\", dpi=300)\n\telif not pdfFile:\n\t\tplt.show()\n\tplt.clf()\n\treturn (ds, areas, slopes)",
"def visualize_q(self, grid_size, show_max_qs=False):\n q_values = None\n if show_max_qs:\n # Maximum over actions\n q_values = self.q.max(axis=1)\n else:\n # Mean over actions\n q_values = self.q.mean(axis=1)\n # Reshape to match with the grid we have\n q_values = q_values.reshape(grid_size)\n pyplot.imshow(q_values)\n pyplot.show()",
"def create_normal_logq(self,z):\n means, scale = self.get_means_and_scales()\n return ss.norm.logpdf(z,loc=means,scale=scale).sum()",
"def plot_q_values(self):\n if 'numbasis' not in self.grouped_against:\n raise ValueError(\"\"\"Simulations must be grouped against number of\n basis terms to plot q values on the same axis\"\"\")\n\n layers = self.sims[0].layers\n base = self.sims[0].conf['General']['results_dir']\n freq = self.sims[0].conf[('Simulation', 'params', 'frequency')]\n wvlgth = 1e9*consts.c/freq\n for lname, l_obj in layers.items():\n plt.figure()\n title = 'Layer: {}, Freq = {:.3E}, Wavelength = {:.2f} nm'\n plt.title(title.format(lname, freq, wvlgth))\n for sim in self.sims:\n qarr = sim.data['{}_qvals'.format(lname)]\n label = 'Numbasis: {}'\n label = label.format(sim.conf[('Simulation','params','numbasis')])\n plt.scatter(qarr.real, qarr.imag/(2*np.pi), s=.75, label=label)\n # pt = (qarr[0].real, qarr[0].imag)\n # theta = np.linspace(0, 1.48, 200)\n # plt.plot(pt[0]*np.cos(theta), pt[1]/np.cos(theta), 'r--')\n # plt.annotate(leg_str, xy=(.95,.95), xycoords='axes fraction',\n # size=14, ha='right', va='top',\n # bbox=dict(boxstyle='round', fc='w'))\n plt.xlabel('Re(q) [radians/micron]')\n plt.ylabel('Im(q) [1/microns]')\n plot_path = os.path.join(base, '{}_qvals.png'.format(lname))\n plt.grid(True)\n plt.legend(loc='best')\n plt.savefig(plot_path)\n plt.close()",
"def show(self, q , x_axis = 0 , y_axis = 1 ):\n \n system.ContinuousDynamicSystem.show( self.plant , q , \n x_axis = 0 , y_axis = 1 )",
"def _q_z(self):\n D = self.latt_par['D'].value\n lambda_r = self.latt_par['lambda_r'].value\n gamma = self.latt_par['gamma'].value\n return 2*np.pi*(self.h/D - self.k/lambda_r/np.tan(gamma))",
"def qq_plot(results, numQuantiles, method_types=[\"kw\", \"emma\"], mapping_labels=None, phenName=None, pdfFile=None, pngFile=None,\n\t perm_pvalues=None, **kwargs):\n\n\tif not mapping_labels:\n\t\tmapping_labels = method_types\n\n\tplt.figure(figsize=(5, 4))\n\t#plt.figure(figsize=(10,8))\n\t#plt.figure(figsize=(4,3.5))\n\tplt.axes([0.15, 0.14, 0.82, 0.79])\n\tplt.plot([0, 1], [0, 1], \"k\", label=\"Expected\")\n\tareas = []\n\tmedians = []\n\tfor method_type, label in zip(method_types, mapping_labels):\n\t\tresult = results[label]\n\t\tnewScores = result.snp_results['scores'][:]\n\t\tquantiles = _getQuantiles_(newScores, numQuantiles)\n\t\tif perm_pvalues and method_type in ['kw', 'ft']:\n\t\t\tprint \"Getting exp. quantiles for permuted p-values\"\n\t\t\texpQuantiles = _getQuantiles_(perm_pvalues, numQuantiles)\n\t\t\tq_i = numQuantiles / 2\n\t\t\tif numQuantiles % 2 == 0: #even\n\t\t\t\texp_median = (expQuantiles[q_i - 1] + expQuantiles[q_i]) / 2.0\n\t\t\telse: #odd\n\t\t\t\texp_median = expQuantiles[q_i]\n\n\t\telse:\n\t\t\texp_median = 0.5\n\t\t\texpQuantiles = _getExpectedPvalueQuantiles_(numQuantiles)\n\t\tarea = _estAreaBetweenCurves_(quantiles, expQuantiles)\n\t\tmedian = calc_median(newScores, exp_median)\n\t\tplt.plot(expQuantiles, quantiles, label=label + \", A=\" + str(round(area, 3)) + \\\n\t\t\t\", M=\" + str(round(median, 3)))\n\t\tareas.append(area)\n\t\tmedians.append(median)\n\n\tif phenName:\n\t\tplt.title(phenName)\n\tfontProp = matplotlib.font_manager.FontProperties(size=8)\n\tplt.legend(loc=2, numpoints=4, handlelen=0.05, markerscale=1, prop=fontProp, pad=0.018)\n\tplt.axis([-0.01, 1.01, -0.01, 1.01])\n\tplt.xlabel(\"Expected $p$-value\")\n\tplt.ylabel(\"Observed $p$-value\")\n\tif pdfFile:\n\t\tplt.savefig(pdfFile, format=\"pdf\")\n\tif pngFile:\n\t\tplt.savefig(pngFile, format=\"png\", dpi=300)\n\telif not pdfFile:\n\t\tplt.show()\n\tplt.clf()\n\treturn (areas, medians)",
"def _q_x(self):\n lambda_r = self.latt_par['lambda_r'].value \n return 2*np.pi*self.k/lambda_r",
"def plot_1d(q, n=300):\n theta = np.linspace(THETA_LOW, THETA_HIGH, n)\n Zq = kernel_1d(q=q, theta=theta)\n Zq *= abs(sin(theta))\n pylab.semilogy(degrees(theta), np.fmax(Zq, 1.e-6), label=\"Q=%g\"%q)\n pylab.title(\"%s I(q, theta) sin(theta)\" % (KERNEL.__doc__,))\n pylab.xlabel(\"theta (degrees)\")\n pylab.ylabel(\"Iq 1/cm\")",
"def init_plot_q(nb_q):\n app_q = pg.mkQApp(\"q\")\n layout = pg.LayoutWidget()\n layout.resize(900, 800)\n layout.move(500, 0)\n label = QtGui.QLabel()\n box = []\n remote = []\n rplt = []\n row_count = 0\n col_count = 0\n # col_span = 4 if nb_q > 8 else 8\n for q in range(nb_q):\n remote.append(rgv.RemoteGraphicsView())\n remote[q].pg.setConfigOptions(antialias=True)\n app_q.aboutToQuit.connect(remote[q].close)\n box.append(QtGui.QCheckBox(f\"angle_{q}\"))\n box[q].setChecked(True)\n layout.addWidget(box[q], row=0, col=q)\n layout.addWidget(remote[q], row=row_count + 1, col=col_count)\n rplt.append(remote[q].pg.PlotItem())\n rplt[q]._setProxyOptions(deferGetattr=True) ## speeds up access to rplt.plot\n remote[q].setCentralItem(rplt[q])\n layout.addWidget(label)\n layout.show()\n if col_count < 4:\n col_count += 1\n else:\n col_count = 0\n row_count += 1\n\n return rplt, layout, app_q, box",
"def norm_q(exp_obj, q=0.75):\n df = exp_obj.counts_df.copy()\n # remove zeros\n nz = df.where(df > 0)\n nz = nz.dropna(how=\"all\").fillna(0)\n sf_q = _sf_q(nz, q)\n # apply scaling\n df = df.div(sf_q, axis=1)\n return df"
]
| [
"0.661851",
"0.6325651",
"0.62123716",
"0.5937972",
"0.5926158",
"0.58665866",
"0.5817014",
"0.5704175",
"0.5699012",
"0.5634741",
"0.5628107",
"0.559978",
"0.559678",
"0.5528379",
"0.55140585",
"0.5493067",
"0.5485866",
"0.5461674",
"0.5413696",
"0.5400506",
"0.53609097",
"0.53071046",
"0.52898604",
"0.5280203",
"0.52790636",
"0.5273273",
"0.52713645",
"0.5252812",
"0.52450204",
"0.5227085"
]
| 0.71324325 | 0 |
Instantiate a `Surrogate` from a preinstantiated Botorch `Model`. | def from_botorch(
cls,
model: Model,
mll_class: Type[MarginalLogLikelihood] = ExactMarginalLogLikelihood,
) -> Surrogate:
surrogate = cls(botorch_model_class=model.__class__, mll_class=mll_class)
surrogate._model = model
# Temporarily disallowing `update` for surrogates instantiated from
# pre-made BoTorch `Model` instances to avoid reconstructing models
# that were likely pre-constructed for a reason (e.g. if this setup
# doesn't fully allow to constuct them).
surrogate._constructed_manually = True
return surrogate | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def model_to_instance(model):\n pass",
"def __init__(self, model: object):\n self.model = model",
"def __init__(self, model):\n self._model = model",
"def _instantiate(decoder, model=None, dataset=None):\n if decoder is None: return U.Identity()\n\n if isinstance(decoder, str): name = decoder\n else: name = decoder['_name_']\n\n # Extract arguments from attribute names\n dataset_args = utils.config.extract_attrs_from_obj(dataset, *dataset_attrs.get(name, []))\n model_args = utils.config.extract_attrs_from_obj(model, *model_attrs.get(name, []))\n # Instantiate decoder\n obj = utils.instantiate(registry, decoder, *model_args, *dataset_args)\n return obj",
"def __init__(self, model_type=DEFAULT_MODEL_TYPE):\n assert (model_type == 'SVR'), \"Model '{}' is not supported. \" \\\n \"We support only SVR for now.\".format(model_type)\n self._model_type = model_type\n self._model_params = BTCForecast.DEFAULT_SVR_MODEL_PARAMS\n\n # set up SVR pipeline\n self._scaler = preprocessing.StandardScaler(copy=True, with_mean=True, with_std=True)\n self._model = SVR(kernel=self._model_params['kernel'],\n epsilon=self._model_params['epsilon'],\n C=self._model_params['c'],\n gamma=self._model_params['gamma'])\n self._pipeline = make_pipeline(self._scaler, self._model)\n self.has_learned = False",
"def __init__(self, model: Type[ModelType]):\n self.model = model",
"def __init__(self, model: Type[ModelType]):\n self.model = model",
"def __init__(self, model: str, **kwargs):\n super().__init__(model=model)",
"def __init__(self, model):\n\t\tself.model = model",
"def __init__(self, model):\n self.model = model",
"def __init__(self, model):\n self.model = model",
"def __init__(self, model):\n self.model = model",
"def __init__(self, model):\n self.model = model",
"def _construct_model(\n self,\n dataset: SupervisedDataset,\n **kwargs: Any,\n ) -> None:\n if self.botorch_model_class is None:\n raise ValueError(\n \"botorch_model_class must be set to construct single model Surrogate.\"\n )\n botorch_model_class = self.botorch_model_class\n\n input_constructor_kwargs = {**self.model_options, **(kwargs or {})}\n botorch_model_class_args = inspect.getfullargspec(botorch_model_class).args\n\n # Temporary workaround to allow models to consume data from\n # `FixedNoiseDataset`s even if they don't accept variance observations\n if \"train_Yvar\" not in botorch_model_class_args and isinstance(\n dataset, FixedNoiseDataset\n ):\n warnings.warn(\n f\"Provided model class {botorch_model_class} does not accept \"\n \"`train_Yvar` argument, but received `FixedNoiseDataset`. Ignoring \"\n \"variance observations and converting to `SupervisedDataset`.\",\n AxWarning,\n )\n dataset = SupervisedDataset(X=dataset.X(), Y=dataset.Y())\n\n self._training_data = [dataset]\n\n formatted_model_inputs = botorch_model_class.construct_inputs(\n training_data=dataset, **input_constructor_kwargs\n )\n self._set_formatted_inputs(\n formatted_model_inputs=formatted_model_inputs,\n inputs=[\n [\n \"covar_module\",\n self.covar_module_class,\n self.covar_module_options,\n None,\n ],\n [\"likelihood\", self.likelihood_class, self.likelihood_options, None],\n [\"outcome_transform\", None, None, self.outcome_transform],\n [\"input_transform\", None, None, self.input_transform],\n ],\n dataset=dataset,\n botorch_model_class_args=botorch_model_class_args,\n robust_digest=kwargs.get(\"robust_digest\", None),\n )\n # pyre-ignore [45]\n self._model = botorch_model_class(**formatted_model_inputs)",
"def __init__(self, model: Optional[Model] = None) -> None:\n self.model = model",
"def initialize_model(self):\n model = self.model_class()\n return model",
"def __init__(self, model: Model1D):\n self._model = model",
"def instance_for_model(self, model: AbstractPriorModel):\n try:\n if self.is_path_kwargs:\n return model.instance_from_path_arguments(self.kwargs)\n else:\n return model.instance_from_prior_name_arguments(self.kwargs)\n\n except KeyError:\n # TODO: Does this get used? If so, why?\n return model.instance_from_vector(self.parameter_lists_for_model(model))",
"def instance_to_model(self):\n pass",
"def _load_from(cls, model_state: dict) -> AbstractModel:\n return cls(model=model_state.get('model'), **model_state.get('kwargs'))",
"def make(model: Type[Model], **kwargs: Any) -> Model:\n return modelfactory_factory(model)(**kwargs)",
"def _create_model(self, key):\n pass",
"def __init__(self, model: str, **kwargs):\n\n super().__init__(model=model, **kwargs)\n logger.info('load model done')",
"def __init__(self, *args):\n this = _libsbml.new_ModelCreator(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, model: Model1D):\n super().__init__(model=model)",
"def __init__(self, model: Model1D):\n super().__init__(model=model)",
"def __init__(self, model: Model1D):\n super().__init__(model=model)",
"def __init__(self, model: Model1D):\n super().__init__(model=model)",
"def __init__(self, model: Model1D):\n super().__init__(model=model)",
"def model() -> Model:\n return Model()"
]
| [
"0.6612203",
"0.58194447",
"0.58095664",
"0.57330984",
"0.5716413",
"0.563533",
"0.563533",
"0.5630868",
"0.56230235",
"0.5622394",
"0.5622394",
"0.5622394",
"0.5622394",
"0.55855143",
"0.5579342",
"0.5510232",
"0.54965955",
"0.54339975",
"0.54204804",
"0.53734046",
"0.5353403",
"0.5348271",
"0.5347983",
"0.5325686",
"0.52720356",
"0.52720356",
"0.52720356",
"0.52720356",
"0.52720356",
"0.52380866"
]
| 0.65808463 | 1 |
Updates the surrogate model with new data. In the base ``Surrogate``, just calls ``fit`` after checking that this surrogate was not created via ``Surrogate.from_botorch`` (in which case the ``Model`` comes premade, constructed manually and then supplied to ``Surrogate``). | def update(
self,
datasets: List[SupervisedDataset],
metric_names: List[str],
search_space_digest: SearchSpaceDigest,
candidate_metadata: Optional[List[List[TCandidateMetadata]]] = None,
state_dict: Optional[Dict[str, Tensor]] = None,
refit: bool = True,
**kwargs: Any,
) -> None:
# NOTE: In the future, could have `incremental` kwarg, in which case
# `training_data` could contain just the new data.
if self._constructed_manually:
raise NotImplementedError(
"`update` not yet implemented for models that are "
"constructed manually, but it is possible to create a new "
"surrogate in the same way as the current manually constructed one, "
"via `Surrogate.from_botorch`."
)
self.fit(
datasets=datasets,
metric_names=metric_names,
search_space_digest=search_space_digest,
candidate_metadata=candidate_metadata,
state_dict=state_dict,
refit=refit,
**kwargs,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_model(model, data, encoders, epochs, batch_size):\n model.fit(\n dict(data.drop(\"total_return\", axis=1)),\n data.total_return,\n epochs=epochs,\n batch_size=batch_size,\n verbose=False,\n )\n\n factors = pd.DataFrame(model.get_layer(\"date_embedding\").get_weights()[0]).reindex(\n data.date.unique()\n )\n\n loadings = pd.DataFrame(\n model.get_layer(\"ticker_embedding\").get_weights()[0]\n ).reindex(data.ticker.unique())\n\n loadings.index = encoders[\"ticker\"].inverse_transform(loadings.index)\n\n # rotating loadings so the factors are uncorrelated and unit variance\n rotated_loadings = loadings.dot(np.linalg.cholesky(factors.cov()))\n rotated_loadings.index.name = \"ticker\"\n rotated_loadings.columns.name = \"factor\"\n return rotated_loadings",
"def update(self):\n if not self.metamodel.surrogate.is_built():\n # Do not adjust until we have a surrogate\n return\n\n surr_rate = 1 - self.metamodel.history.get_model_usage_rate()\n surr_rate_err = abs(self.desired_rate - surr_rate)\n\n if surr_rate_err <= self.acceptable_offset:\n # Usage rate is acceptable.\n return\n\n T = self.value\n edge_adjustment = 1 - ((2*T - 1) ** self.alpha)\n err_adjustment = min(self.beta, 1 / ((1 - surr_rate_err) ** self.beta))\n step_size = self.step * edge_adjustment * err_adjustment\n # Adjust\n if surr_rate > self.desired_rate:\n self.value = max(T/self.beta, T - step_size)\n elif surr_rate < self.desired_rate:\n self.value = min(1 - ((1-T)/self.beta), T + step_size)\n\n return",
"def update(self):\n if not self.metamodel.surrogate.is_built():\n # Do not adjust until we have a surrogate\n return\n\n surr_rate = 1 - self.metamodel.history.get_model_usage_rate()\n up_bound = self.desired_rate + self.acceptable_offset\n low_bound = self.desired_rate + self.acceptable_offset\n\n if low_bound <= surr_rate <= up_bound:\n # Usage rate is acceptable.\n return\n\n T = self.value\n # Adjust step size if close to border of [0, 1]\n step_size = min(self.step, T/2, (1 - T)/2)\n\n # Check if critical (Needs adjustement fast)\n # !!! This is all very hacky and needs to be improved !!!\n if surr_rate > 1 - (1 - up_bound)/2 or surr_rate < low_bound/2:\n step_size = min(self.step * self.big_step_mult, T/1.5, (1 - T)/1.5)\n\n # Adjust\n if surr_rate > up_bound:\n self.value = max(0, min(1, self.value - step_size))\n elif surr_rate < low_bound:\n self.value = max(0, min(1, self.value + step_size))\n\n return",
"def updateModel(self):\n pass",
"def _update(self, y, X=None, update_params=True):\n # Need to construct a completely new y out of ol self._y and y and then\n # fit_treansform the transformer and re-fit the forecaster.\n _y = update_data(self._y, y)\n\n self.bootstrap_transformer_.fit(X=_y)\n y_bootstraps = self.bootstrap_transformer_.transform(X=_y)\n self.forecaster_.fit(y=y_bootstraps, fh=self.fh, X=None)\n\n return self",
"def update(self, x_train_single, updated_h):\n x_row = x_train_single.toarray()\n for i in range(self.num_models):\n self.models[i].partial_fit(x_row, [updated_h[i]])",
"def update_model(self):\n pass",
"def update_model(self):\n pass # TODO: Implement this.",
"def update_data(self):\n self._model.update()\n self.__refresh()",
"def _construct_model(\n self,\n dataset: SupervisedDataset,\n **kwargs: Any,\n ) -> None:\n if self.botorch_model_class is None:\n raise ValueError(\n \"botorch_model_class must be set to construct single model Surrogate.\"\n )\n botorch_model_class = self.botorch_model_class\n\n input_constructor_kwargs = {**self.model_options, **(kwargs or {})}\n botorch_model_class_args = inspect.getfullargspec(botorch_model_class).args\n\n # Temporary workaround to allow models to consume data from\n # `FixedNoiseDataset`s even if they don't accept variance observations\n if \"train_Yvar\" not in botorch_model_class_args and isinstance(\n dataset, FixedNoiseDataset\n ):\n warnings.warn(\n f\"Provided model class {botorch_model_class} does not accept \"\n \"`train_Yvar` argument, but received `FixedNoiseDataset`. Ignoring \"\n \"variance observations and converting to `SupervisedDataset`.\",\n AxWarning,\n )\n dataset = SupervisedDataset(X=dataset.X(), Y=dataset.Y())\n\n self._training_data = [dataset]\n\n formatted_model_inputs = botorch_model_class.construct_inputs(\n training_data=dataset, **input_constructor_kwargs\n )\n self._set_formatted_inputs(\n formatted_model_inputs=formatted_model_inputs,\n inputs=[\n [\n \"covar_module\",\n self.covar_module_class,\n self.covar_module_options,\n None,\n ],\n [\"likelihood\", self.likelihood_class, self.likelihood_options, None],\n [\"outcome_transform\", None, None, self.outcome_transform],\n [\"input_transform\", None, None, self.input_transform],\n ],\n dataset=dataset,\n botorch_model_class_args=botorch_model_class_args,\n robust_digest=kwargs.get(\"robust_digest\", None),\n )\n # pyre-ignore [45]\n self._model = botorch_model_class(**formatted_model_inputs)",
"def update_fit(self, X, y):\n pass",
"def update(self, s, a, y):\r\n features = self.featurize_state(s)\r\n self.models[a].partial_fit([features], [y])",
"def fit(self, X, y):\n Xs = self.scaler.fit_transform(X)\n self.model.fit(Xs, y)",
"def fit(self):\n \n # Open an existing model and get the training & test dataset and targets\n train_test_df, target_df = self._get_model_and_data(target=True, set_feature_def=True)\n \n # Check that the estimator is an supervised ML algorithm\n if self.model.estimator_type not in [\"classifier\", \"regressor\"]:\n err = \"Incorrect usage. The estimator specified is not a known classifier or regressor: {0}\".format(self.model.estimator)\n raise Exception(err)\n \n # Check which validation strategy is to be used, if any\n # For an explanation of cross validation in scikit-learn see: http://scikit-learn.org/stable/modules/cross_validation.html#multimetric-cross-validation\n if self.model.time_series_split > 0:\n self.model.validation = \"timeseries\"\n # Set up cross validation to be performed using TimeSeriesSplit\n self.model.cv = TimeSeriesSplit(n_splits=self.model.time_series_split, max_train_size=self.model.max_train_size)\n elif self.model.cv > 0:\n self.model.validation = \"k-fold\"\n elif self.model.test_size > 0:\n self.model.validation = \"hold-out\"\n else:\n self.model.validation = \"external\"\n\n if self.model.validation == \"hold-out\": \n # Split the data into training and testing subsets\n self.X_train, self.X_test, self.y_train, self.y_test = \\\n train_test_split(train_test_df, target_df, test_size=self.model.test_size, random_state=self.model.random_state)\n else:\n self.X_train = train_test_df\n self.y_train = target_df\n \n # Add the training and test data to the model if required\n if self.model.retain_data:\n self.model.X_train = self.X_train\n self.model.y_train = self.y_train\n \n try:\n self.model.X_test = self.X_test\n self.model.y_test = self.y_test\n except AttributeError:\n pass\n \n # Scale the targets and increase stationarity if required\n if self.model.scale_target or self.model.make_stationary:\n # Set up the target transformer\n self.model.target_transformer = TargetTransformer(scale=self.model.scale_target, make_stationary=self.model.make_stationary, stationarity_lags=self.model.stationarity_lags,\\\n missing=self.model.missing, scaler=self.model.scaler, logfile=self.logfile, **self.model.scaler_kwargs)\n\n # Fit the transformer to the training targets\n self.model.target_transformer = self.model.target_transformer.fit(self.y_train)\n\n # Apply the transformer to the training targets\n self.y_train = self.model.target_transformer.transform(self.y_train)\n # Drop samples where the target cannot be transformed due to insufficient lags\n self.X_train = self.X_train.iloc[len(self.X_train)-len(self.y_train):] \n \n # Add lag observations to the samples if required\n if self.model.lags or self.model.lag_target:\n # Check if the current sample will be included as an input, or whether we only use lag observations for predictions\n extrapolate = 1 if self.model.current_sample_as_input else 0\n # Add the lag observations\n self.X_train = self._add_lags(self.X_train, self.y_train, extrapolate=extrapolate, update_features_df=True)\n # Drop targets for samples which were dropped due to null values after adding lags.\n if len(self.y_train) > len(self.X_train):\n self.y_train = self.y_train.iloc[len(self.y_train)-len(self.X_train):]\n\n # If this is a Keras estimator, we require the preprocessing to return a data frame instead of a numpy array\n prep_return = 'df' if self.model.using_keras else 'np'\n\n # Construct the preprocessor\n prep = Preprocessor(self.model.features_df, return_type=prep_return, scale_hashed=self.model.scale_hashed, scale_vectors=self.model.scale_vectors,\\\n missing=self.model.missing, scaler=self.model.scaler, logfile=self.logfile, **self.model.scaler_kwargs)\n\n # Setup a list to store steps for the sklearn pipeline\n pipe_steps = [('preprocessor', prep)]\n\n if self.model.dim_reduction:\n # Construct the dimensionality reduction object\n reduction = self.decomposers[self.model.reduction](**self.model.dim_reduction_args)\n \n # Include dimensionality reduction in the pipeline steps\n pipe_steps.append(('reduction', reduction))\n self.model.estimation_step = 2\n else:\n self.model.estimation_step = 1 \n\n # If this is a Keras estimator, update the input shape and reshape the data if required\n if self.model.using_keras:\n # Update the input shape based on the final number of features after preprocessing\n self._keras_update_shape(prep)\n\n # Add the Keras build function, architecture and prediction_periods to the estimator keyword arguments\n self.model.estimator_kwargs['build_fn'] = self._keras_build_fn\n self.model.estimator_kwargs['architecture'] = self.model.architecture\n self.model.estimator_kwargs['prediction_periods'] = self.model.prediction_periods\n\n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(10)\n \n # Check than an identifier has been provided for sorting data if this is a sequence prediction problem\n if self.model.lags or len(self.model.first_layer_kwargs[\"input_shape\"]) > 1:\n assert len(self.model.original_features_df[self.model.original_features_df['variable_type'].isin([\"identifier\"])]) == 1, \\\n \"An identifier is mandatory when using lags or with sequence prediction problems. Define this field in your feature definitions.\"\n\n # Cater for multi-step predictions\n if self.model.prediction_periods > 1:\n # Transform y to a vector of values equal to prediction_periods\n self.y_train = utils.vectorize_array(self.y_train, steps=self.model.prediction_periods)\n # Drop values from x for which we don't have sufficient y values\n self.X_train = self.X_train.iloc[:-len(self.X_train)+len(self.y_train)]\n\n # Add a pipeline step to update the input shape and reshape the data if required\n # This transform will also add lag observations if specified through the lags parameter\n # If lag_target is True, an additional feature will be created for each sample using the previous value of y \n reshape = Reshaper(first_layer_kwargs=self.model.first_layer_kwargs, logfile=self.logfile)\n pipe_steps.append(('reshape', reshape))\n self.model.estimation_step += self.model.estimation_step\n\n # Avoid tensorflow error for keras models\n # https://github.com/tensorflow/tensorflow/issues/14356\n # https://stackoverflow.com/questions/40785224/tensorflow-cannot-interpret-feed-dict-key-as-tensor\n kerasbackend.clear_session()\n \n # Try assuming the pipeline involves a grid search\n try:\n # Construct an estimator\n estimator = self.algorithms[self.model.estimator](**self.model.estimator_kwargs)\n\n # Prepare the grid search using the previously set parameter grid\n grid_search = GridSearchCV(estimator=estimator, param_grid=self.model.param_grid, **self.model.grid_search_args)\n \n # Add grid search to the pipeline steps\n pipe_steps.append(('grid_search', grid_search))\n\n # Construct the sklearn pipeline using the list of steps\n self.model.pipe = Pipeline(pipe_steps)\n\n if self.model.validation in [\"k-fold\", \"timeseries\"]:\n # Perform K-fold cross validation\n self._cross_validate()\n\n # Fit the training data to the pipeline\n if self.model.using_keras:\n # https://stackoverflow.com/questions/54652536/keras-tensorflow-backend-error-tensor-input-10-specified-in-either-feed-de\n session = tf.Session()\n kerasbackend.set_session(session)\n with session.as_default():\n with session.graph.as_default():\n sys.stdout.write(\"\\nMODEL: {}, INPUT SHAPE: {}\\n\\n\".format(self.model.name, self.model.first_layer_kwargs['input_shape']))\n y = self.y_train.values if self.y_train.shape[1] > 1 else self.y_train.values.ravel()\n self.model.pipe.fit(self.X_train, y)\n else:\n self.model.pipe.fit(self.X_train, self.y_train.values.ravel())\n\n # Get the best parameters and the cross validation results\n grid_search = self.model.pipe.named_steps['grid_search']\n self.model.best_params = grid_search.best_params_\n self.model.cv_results = grid_search.cv_results_\n\n # Get the best estimator to add to the final pipeline\n estimator = grid_search.best_estimator_\n\n # Update the pipeline with the best estimator\n self.model.pipe.steps[self.model.estimation_step] = ('estimator', estimator)\n\n except AttributeError:\n # Construct an estimator\n estimator = self.algorithms[self.model.estimator](**self.model.estimator_kwargs)\n\n # Add the estimator to the pipeline steps\n pipe_steps.append(('estimator', estimator))\n\n # Construct the sklearn pipeline using the list of steps\n self.model.pipe = Pipeline(pipe_steps)\n\n if self.model.validation in [\"k-fold\", \"timeseries\"]:\n # Perform K-fold cross validation\n self._cross_validate()\n\n # Fit the training data to the pipeline\n if self.model.using_keras:\n # https://stackoverflow.com/questions/54652536/keras-tensorflow-backend-error-tensor-input-10-specified-in-either-feed-de\n session = tf.Session()\n kerasbackend.set_session(session)\n with session.as_default():\n with session.graph.as_default():\n sys.stdout.write(\"\\nMODEL: {}, INPUT SHAPE: {}\\n\\n\".format(self.model.name, self.model.first_layer_kwargs['input_shape']))\n y = self.y_train.values if self.y_train.shape[1] > 1 else self.y_train.values.ravel()\n self.model.pipe.fit(self.X_train, y)\n else:\n self.model.pipe.fit(self.X_train, self.y_train.values.ravel())\n \n if self.model.validation == \"hold-out\": \n # Evaluate the model using the test data \n self.calculate_metrics(caller=\"internal\")\n \n if self.model.calc_feature_importances:\n # Select the dataset for calculating importances\n if self.model.validation == \"hold-out\":\n X = self.X_test\n y = self.y_test # Already a numpy array after calculate_metrics\n else:\n X = self.X_train\n y = self.y_train.values.ravel()\n \n # Calculate model agnostic feature importances\n self._calc_importances(X = X, y = y)\n\n # Persist the model to disk\n self.model = self.model.save(self.model.name, self.path, overwrite=self.model.overwrite, compress=self.model.compress)\n \n # Update the cache to keep this model in memory\n self._update_cache()\n \n # Prepare the output\n if self.model.validation != \"external\": \n message = [[self.model.name, 'Model successfully trained, tested and saved to disk.',\\\n time.strftime('%X %x %Z', time.localtime(self.model.state_timestamp)),\\\n \"{0} model has a score of {1:.3f} against the test data.\"\\\n .format(self.model.estimator, self.model.score), self.model.score]]\n else:\n message = [[self.model.name, 'Model successfully trained and saved to disk.',\\\n time.strftime('%X %x %Z', time.localtime(self.model.state_timestamp)),\\\n \"{0} model score unknown as test_size was <= 0.\"\\\n .format(self.model.estimator), np.NaN]]\n \n self.response = pd.DataFrame(message, columns=['model_name', 'result', 'time_stamp', 'score_result', 'score'])\n \n # Send the reponse table description to Qlik\n self._send_table_description(\"fit\")\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n # Finally send the response\n return self.response",
"def fit_training_data(self):\n self.model.fit(self.X_train)",
"def _update_model(self, normalization_type='stats'):\n if self.num_acquisitions % self.model_update_interval == 0:\n\n # input that goes into the model (is unziped in case there are categorical variables)\n X_inmodel = self.space.unzip_inputs(self.X)\n\n # Y_inmodel is the output that goes into the model\n if self.normalize_Y:\n Y_inmodel = normalize(self.Y, normalization_type)\n else:\n Y_inmodel = self.Y\n\n self.model.updateModel(X_inmodel, Y_inmodel, None, None)",
"def update(self, data):\n if self.mode == 'image':\n data = self.preprocess(data)\n self.main_object.set_data(data)\n\n vmin, vmax = self._parse_vrange(data)\n self.main_object.set_clim([vmin, vmax])\n\n if self.mode == 'histogram':\n raise NotImplementedError(\"Updating layer data is not in supported in 'histogram' mode. \")\n\n if self.mode == 'curve':\n x_data, y_data = self.preprocess(data)\n self.main_object.set_data(x_data, y_data)\n self.update_lims()\n\n if self.mode == 'loss':\n raise NotImplementedError(\"Updating layer data is not in supported in 'loss' mode. \")",
"def fit(self, data):\n raise NotImplementedError(\"To be implemented in sub classes\")",
"def update_model(self, masked_data, masked_binary_data, can_query):\n # Update the model after new data has been revealed, if required.\n self.model.update_model(masked_binary_data, self.previous_selections, self.question_encodings)",
"def _update_model(self, X_all, Y_all):\n if self.model is None:\n self._create_model(X_all, Y_all)\n else:\n self.model.set_XY(X_all, Y_all)\n\n # WARNING: Even if self.max_iters=0, the hyperparameters are bit modified...\n if self.max_iters > 0:\n # --- update the model maximizing the marginal likelihood.\n if self.optimize_restarts==1:\n self.model.optimize(optimizer=self.optimizer, max_iters = self.max_iters, messages=False, ipython_notebook=False)\n else:\n self.model.optimize_restarts(num_restarts=self.optimize_restarts, optimizer=self.optimizer, max_iters = self.max_iters, verbose=self.verbose)",
"def update_model(self, boards, outcomes, verbose=0):\n\t\t# Compile the model\n\t\tself.model.compile(\n\t\t\toptimizer='adam',\n\t\t\tloss='mse'\n\t\t)\n\n\t\t# Train the model\n\t\tmf = self.model.fit(boards, outcomes, verbose=verbose)\n\n\t\t# Crush the graph, maybe that is what is messing with RAM?\n\t\ttf.keras.backend.clear_session()",
"def fit_model(self):\n logger.info('Fitting model')\n if self.traj_dict is None:\n self.traj_dict = self.get_traj_dict()\n self.model.fit(self.traj_dict.values())",
"def fit(self, X, y, fit_intercept=False, normalize=False):\n # Store the unique class labels, we need them to predict the exact\n # class labels as were given.\n if isinstance(self.model, ClassifierMixin):\n self.classes_ = np.unique(y)\n\n # Remove the offset from X and y to compute the covariance later.\n # Also normalize X if the base model did so.\n self.fit_intercept = getattr(self.model, 'fit_intercept', fit_intercept)\n self.normalize = getattr(self.model, 'normalize', normalize)\n\n X, y_, X_offset, y_offset, X_scale = LinearModel._preprocess_data(\n X=X, y=y, fit_intercept=self.fit_intercept,\n normalize=self.normalize, copy=True,\n )\n if isinstance(self.model, RegressorMixin):\n y = y_\n else:\n y_offset = 0.\n\n # Fit the base model\n self.model.fit(X, y)\n\n if not hasattr(self.model, 'coef_'):\n raise RuntimeError(\n 'Model does not have a `coef_` attribute after fitting. '\n 'This does not seem to be a linear model following the '\n 'Scikit-Learn API.'\n )\n\n # Get the weight matrix\n W = self.model.coef_\n\n # For the next computations, ensure that y is a 2D array:\n # n_samples x n_targets\n flat_y = y.ndim == 1\n if flat_y:\n y = np.atleast_2d(y).T\n\n # Modify the original linear model and obtain a new one\n coef, pattern, normalizer = disassemble_modify_reassemble(\n W, X, y, self.cov, self.pattern_modifier, self.normalizer_modifier\n )\n\n # Store the decomposed model as attributes, so the user may inspect it\n if flat_y:\n self.coef_ = coef.ravel()\n else:\n self.coef_ = coef\n\n self.pattern_normalized_ = pattern\n self.pattern_ = pattern * X_scale[:, np.newaxis]\n\n self.normalizer_ = normalizer\n\n # Set intercept and undo normalization\n self._set_intercept(X_offset, y_offset, X_scale)\n self.inverse_intercept_ = X_offset - np.dot(y_offset, self.pattern_.T)\n\n return self",
"def update_model(path_to_data, path_to_model):\r\n # Open the annotation files.\r\n with open(path_to_data + r'\\coco_annotation.json') as f:\r\n coco_d = json.load(f)\r\n # Get the categories.\r\n categories = []\r\n for cat in coco_d['categories']:\r\n categories.append(cat['name'])\r\n\r\n # Register the new data.\r\n register_coco_instances(\"coco_update\", {}, path_to_data + r\"\\coco_annotation.json\", path_to_data)\r\n MetadataCatalog.get(\"meta_update\").set(thing_classes=categories)\r\n # MetadataCatalog.get(\"meta_update\").set(thing_classes=[\"Bad\", \"Good\"], thing_colors=[(172, 0, 0), (229, 0, 0)])\r\n\r\n # Set the parameters.\r\n cfg = get_cfg()\r\n cfg.merge_from_file(model_zoo.get_config_file(\"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml\"))\r\n cfg.DATASETS.TRAIN = (\"coco_update\",)\r\n cfg.OUTPUT_DIR = path_to_model\r\n cfg.DATASETS.TEST = ()\r\n cfg.DATALOADER.NUM_WORKERS = 2\r\n cfg.MODEL.DEVICE = 'cpu'\r\n cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, \"model_final.pth\")\r\n cfg.SOLVER.IMS_PER_BATCH = 1\r\n cfg.SOLVER.BASE_LR = 0.00025\r\n cfg.SOLVER.MAX_ITER = 400\r\n cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 10\r\n cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(categories)\r\n\r\n # Update the model.\r\n trainer = DefaultTrainer(cfg)\r\n trainer.resume_or_load(resume=False)\r\n trainer.train()",
"def updateModel(self):\n model = self.model()\n if model is None:\n return\n \n model.duration = self.duration_base.value()\n model.duration_deviation = self.duration_deviation.value()\n model.feedback_source = self.feedback_source.text()\n model.feedback_type = self.feedback_type.currentText()\n model.random_bound = self.random_bound.currentText()\n model.video_path = self.video_path.text()\n model.message = self.message.text()\n model.voiceover = self.voiceover.isChecked()\n model.mock_signal_path = self.mock_signal_path.text()\n model.mock_signal_dataset = self.mock_signal_dataset.text()\n model.mock_previous = self.mock_previous.value()\n model.mock_previous_reverse = self.mock_previous_reverse.isChecked()\n model.mock_previous_random = self.mock_previous_random.isChecked()\n model.start_data_driven_filter_designer = self.start_data_driven_filter_designer.isChecked()\n model.pause = self.pause.isChecked()\n model.beep = self.beep.isChecked()\n model.update_statistics = self.update_statistics.isChecked()\n model.statistics_type = self.statistics_name_to_type[self.statistics_type.currentText()]",
"def update(self, instance, validated_data):\n pass",
"def fit(self, X, y):\n self.model_x = X\n self.model_y = y",
"def partial_fit(self, X, y=None):\n # update model on a minibatch\n self.logger.info(self.__name__ +\n ' is updated on dataset with {:d} samples and {:d} features.'.\n format(X.shape[0], X.shape[1]))\n pass",
"def update(self, data):\n # get a dictionary of traits based on the class definition\n traits = self.traits()\n\n # process update data based on model traits\n for (trait_name, trait) in iteritems(traits):\n # skip traits that aren't going to be updated\n if trait_name not in data.keys():\n continue\n\n # based on input to this function\n given_value = data[trait_name]\n\n # based on the existing value on the model\n current_value = getattr(self, trait_name)\n\n # Use this to defer assignment on to self. By default, assume it\n # should be the new value.\n deferred_value = given_value\n\n # The only special cases worth handling are field.Instance,\n # field.List and field.Dict, the other cases can probably be\n # handled by just directly setting the value.\n if isinstance(trait, field.Instance):\n # Use recursion to call the current model instance or value's\n # update method (Model.update) with this dictionary data.\n if isinstance(given_value, dict):\n current_value.update(given_value)\n deferred_value = current_value\n else:\n if isinstance(trait, field.List):\n # make a new list\n deferred_value = []\n\n # Grab the inner trait type for this list. Could be None.\n # This is the specific type that should be used for each of\n # the elements of the list. In the case of None, it may be\n # any value, although model instances can't be\n # reconstructed in that scenario.\n inner_trait_type = trait._trait\n\n # Determine if inner_trait_type is a field.Instance, and\n # then whether or not it is a Model.\n is_model = False\n if isinstance(inner_trait_type, TraitType) and hasattr(inner_trait_type, \"klass\"):\n if issubclass(inner_trait_type.klass, Model):\n is_model = True\n\n # Append each value to the new list. Create new model\n # instances if necessary.\n for some_given_value in given_value:\n # handle model instances that need to be created\n if is_model:\n # Can't use Model.update here because the list is\n # probably unordered, and updating the wrong\n # elements is very wrong. Another implementation\n # could be created for ordered collections, though.\n some_given_value = inner_trait_type.klass.create(**some_given_value)\n deferred_value.append(some_given_value)\n elif isinstance(trait, field.Dict):\n # TODO\n pass\n\n # Set the deferred_value on to the current model at the appropriate\n # trait key (trait_name).\n setattr(self, trait_name, deferred_value)",
"def fit(self, X, y):\n self.__X = X\n self.__y = y\n self.__trained = True"
]
| [
"0.61704075",
"0.60705763",
"0.60033774",
"0.5971347",
"0.5954025",
"0.5933478",
"0.59277827",
"0.5849612",
"0.583745",
"0.57466644",
"0.5739969",
"0.571057",
"0.56760395",
"0.5675398",
"0.56300014",
"0.55561376",
"0.55544794",
"0.5538727",
"0.55382186",
"0.55081874",
"0.5489829",
"0.5476584",
"0.5475252",
"0.5469701",
"0.54502237",
"0.5448435",
"0.5443599",
"0.5442111",
"0.5440373",
"0.5420176"
]
| 0.6504125 | 0 |
For multiobjective optimization, retrieve Pareto frontier instead of best point. | def pareto_frontier(self) -> Tuple[Tensor, Tensor]:
raise NotImplementedError("Pareto frontier not yet implemented.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pareto_front_cut(self):\n return self.NDA([kernel.objective_values for kernel in self.kernels \\\n if kernel.objective_values is not None],\n self.reference_point)",
"def _calc_pareto_front(self, *args, **kwargs):\n fname = f\"{self.fct.name}_PF.dat\"\n F = load_pareto_front_from_file(os.path.join(\"modact\", fname))\n if F is not None:\n return F*self.weights*-1",
"def _closest_front_opponent(self, raw_obs, o, target):\n delta = target - o\n min_d = None\n closest = None\n for p in raw_obs['right_team']:\n delta_opp = p - o\n if np.dot(delta, delta_opp) <= 0:\n continue\n d = self._object_distance(o, p)\n if min_d is None or d < min_d:\n min_d = d\n closest = p\n\n # May return None!\n return closest",
"def pareto_front_uncut(self):\n f_pairs = [k.objective_values for k in self\n if k.objective_values is not None]\n # TODO: this should preferably all be done in self.NDA\n def reference(f_pairs):\n reference = []\n for i in [0, 1]: # make a reference that is dominated by all f_pairs\n reference[i] = max(f[i] for f in f_pairs)\n reference[i] = 1.1**np.sign(reference[i]) * reference[i] + 1\n return reference\n if not f_pairs:\n if self.reference_point in (None, (), [], {}): # should never happen\n warnings.warn(\"Sofomore.pareto_front_uncut: self.reference_point = %s\"\n \" (#kernels=%d) which should never happen\"\n % (str(self.reference_point), len(self)))\n return []\n return self.NDA(f_pairs, self.reference_point)\n return self.NDA(f_pairs, reference(f_pairs))",
"def get_approx_preRes(self, connectivity_threshold, index = 0):\n if self.flip_the_script:\n index = np.random.randint(len(self.book_indices))\n #print(\"index\", index, \"book indices\", self.book_indices, \"self.library\", self.library)\n book = self.library[self.book_indices[index]]\n if index != 0:\n printc(\"retrieving book from library\" + str(self.book_indices[index]), 'green')\n return book.get_approx_preRes(connectivity_threshold)",
"def pareto_frontier(cmrf,featlist) :\n\tQ = []\n\ttaboodict = {}\n\tnStates = len(featlist)\n\tfeat1,feat2 = featlist\n\tEaxa,Xa = cmrf.decode(feat1)\n\tEbxb,Xb = cmrf.decode(feat2)\n\tif Xa == Xb : \n\t\treturn [Xa],[(Eaxa,Ebxb)]\n\tEaxb = cmrf.score(Xb,feat1)\n\tEbxa = cmrf.score(Xa,feat2)\n\tQ.append((Xa,Xb))\n\tfrontier,frontier_energy = [],[]\n\tfrontier.extend([Xa,Xb])\n\tfrontier_energy.extend([(Eaxa,Ebxa),(Eaxb,Ebxb)])\n\ttaboodict[(Eaxa,Ebxa)] = 1;\n\ttaboodict[(Eaxb,Ebxb)] = 1;\n\twhile len(Q) > 0 :\n\t\t### Optimize \n\t\tXa,Xb = Q[0]\n\t\tQ = Q[1:] # Dequeue\n\t\tEaxb = cmrf.score(Xb,feat1)\n\t\tEbxa = cmrf.score(Xa,feat2)\t\n\t\tEaxa = cmrf.score(Xa,feat1)\n\t\tEbxb = cmrf.score(Xb,feat2)\t\n\t\tm = (Ebxa - Ebxb)/(Eaxa-Eaxb)\n\t\tif m > 0 : \n\t\t\t#stop()\n\t\t\tsys.stderr.write(\"### WARNING : Slope > 0. Cvxhull failed\")\n\t\t\treturn frontier,frontier_energy\n\t\tthetaa = -m/(1-m)\n\t\tthetab = 1/(1-m)\n\t\ttmrf = TMRF(cmrf,[thetaa,thetab],[feat1,feat2])\n\t\tXab = tmrf.decode()[1]\n\t\tEaxab = cmrf.score(Xab,feat1)\n\t\tEbxab = cmrf.score(Xab,feat2)\n\t\tif Xab != Xa and Xab != Xb and \\\n\t\t\tnot taboodict.has_key((Eaxab,Ebxab)) :\n\t\t\t# Check almost equal condition\n\t\t\tif any(map(lambda(x):almost_eq(Eaxab,x[0] or \\\n\t\t\t\talmost_eq(Ebxab,x[1])),taboodict.keys())) : \n\t\t\t\tcontinue\n\t\t\t\t\t\n\t\t\tfrontier.append(Xab)\n\t\t\tfrontier_energy.append((Eaxab,Ebxab))\n\t\t\ttaboodict[(Eaxab,Ebxab)]=1\n\t\t\tQ.extend([(Xa,Xab),(Xab,Xb)])\n\t# Calculate energy of frontier elements\t\n\treturn frontier,frontier_energy",
"def get_best_particle(self):\n index = self.weights.argmax()\n return self.particles[index, :]",
"def _get_lip_best(self) -> float:\n pass",
"def find_nearest_frontier(self,frontier):\n #calculate manhatan distance \n dist = abs(self.rob_pos[0]-frontier[0]) + abs(self.rob_pos[1]-frontier[1])\n if dist < self.min_dist and 15 < self.min_dist:\n self.min_dist = dist\n self.nearest = frontier",
"def pareto_set_cut(self):\n return [kernel.incumbent for kernel in self.kernels if \\\n kernel.objective_values in self.pareto_front_cut]",
"def FindClosestPoint(self, ):\n ...",
"def GetPts(self):\n return self.best",
"def get_best_solution(self):\n if not self.tours:\n raise Exception('No solution has been computed yet')\n scores = {s:get_cost(self.tours[s],self) for s in self.tours}\n best = min(scores,key=scores.get)\n print('The best solution is given by {} with score {}'.format(best,scores[best]))\n return self.tours[best]",
"def findBestChokepoint(self, iPlayer=-1, bCheckForVisibility=false):\r\n\t\tpBestPlot = -1\r\n\t\tiBestValue = -1\r\n\t\tpPlayer = -1\r\n\t\tiTeam = -1\r\n\t\tif (iPlayer >= 0):\r\n\t\t\tpPlayer = gc.getPlayer(iPlayer)\r\n\t\t\tiTeam = pPlayer.getTeam()\r\n\r\n\t\tiFeatNebula = gc.getInfoTypeForString('FEATURE_ICE')\r\n\t\tiFeatAsteroid = gc.getInfoTypeForString('FEATURE_FOREST')\r\n\t\t\t\r\n\t\tiMaxRange = max(CyMap().getGridWidth() / 2, 60)\r\n\t\tprintd(\"findBestChokepoint iMaxRange = %d\" % (iMaxRange))\t\t\r\n\t\tfor iPlotLoop in range(CyMap().numPlots()):\r\n\t\t\tpLoopPlot = CyMap().plotByIndex(iPlotLoop)\r\n\t\t\t\r\n\t\t\t# If we're supposed to be checking for a player's visibility then only check this plot if it's revealed\r\n\t\t\tif (bCheckForVisibility):\r\n\t\t\t\tif (not pLoopPlot.isRevealed(iTeam, false)):\r\n\t\t\t\t\tcontinue\r\n\r\n\t\t\t# CP - Check the plot being rated to see if it already belongs to someone else.\r\n\t\t\tiPlotOwner = pLoopPlot.getOwner()\r\n\t\t\tif ((iPlotOwner != -1) and (iPlotOwner != iPlayer)):\r\n\t\t\t\tcontinue\r\n\r\n\t\t\t# Don't build anywhere except in empty space & asteroids\r\n\t\t\tif (pLoopPlot.getFeatureType() != -1 and pLoopPlot.getFeatureType() != iFeatAsteroid):\r\n\t\t\t\tcontinue\r\n\r\n\t\t\tiDistanceFromCapital = CyMap().getGridWidth()\r\n\t\t\t\r\n\t\t\tif (pPlayer.getCapitalCity()):\r\n\t\t\t\tiDistanceFromCapital = CyMap().calculatePathDistance(pPlayer.getCapitalCity().plot(), pLoopPlot)\r\n\t\t\t\r\n\t\t\t# Don't look too far away (performance, more than anything)\r\n\t\t\tif (iDistanceFromCapital > 0 and iDistanceFromCapital < iMaxRange):\r\n\t\t\t\t\r\n\t\t\t\tif iDistanceFromCapital < 4 : # Discourage it from building sensor stations right next to the capital\r\n\t\t\t\t\tiDistanceValueMod = -9 # it will also get a penalty down below for being close to a star system if it is within 2\r\n\t\t\t\telse : # Highest distance scores in the zone from 1/6 iMaxRange to 2/3 iMaxRange, in this zone iDistanceValueMod will be iMaxRange/6\r\n\t\t\t\t\t# modified for post 1.72: adjust the highest scoring zone to only extend out to 1/2 iMaxRange; BTW the above should have read \"iMaxRange/12\" due to the \"/ 2\" at the end of the next line\r\n\t\t\t\t\tiDistanceValueMod = ((2 * min( iDistanceFromCapital, iMaxRange/6)) - max( iDistanceFromCapital - (iMaxRange / 2), 0)) / 2\r\n\t\t\t\t\r\n\t\t\t\tiPlotValue = 0\r\n\t\t\t\tiNumNebula = 0\r\n\t\t\t\tiNumAdjacentNebula = 0\r\n\t\t\t\tiNumAsteroid = 0\r\n\t\t\t\tiNumDamaging = 0\r\n\t\t\t\tiNumOurs = 0 # post 1.72\r\n\t\t\t\tiNumTheirs = 0 # post 1.72\r\n\t\t\t\tfor iXSearchLoop in range(pLoopPlot.getX()-2, pLoopPlot.getX()+3):\r\n\t\t\t\t\tfor iYSearchLoop in range(pLoopPlot.getY()-2, pLoopPlot.getY()+3):\r\n\t\t\t\t\t\t# If the map does not wrap and the plot is not on the map give a small penalty and skip to the next.\r\n\t\t\t\t\t\t# Note that if the plot is off the map then all plots in that row and/or column are off too\r\n\t\t\t\t\t\t# so it will actually be at least 5 plots that give this penalty.\r\n\t\t\t\t\t\tif not CyMap().isPlot(iXSearchLoop, iYSearchLoop):\r\n\t\t\t\t\t\t\tiPlotValue -= 3\r\n\t\t\t\t\t\t\tcontinue\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\tpSearchPlot = CyMap().plot(iXSearchLoop, iYSearchLoop)\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t# Don't search unseen plots in range of the one we're looking at either\r\n\t\t\t\t\t\tif (bCheckForVisibility):\r\n\t\t\t\t\t\t\tif (not pSearchPlot.isRevealed(iTeam, false)):\r\n\t\t\t\t\t\t\t\tcontinue\r\n\r\n\t\t\t\t\t\t#Build sensor stations near chokepoints -- TC01\r\n\t\t\t\t\t\tiFeature = pSearchPlot.getFeatureType()\r\n\t\t\t\t\t\tif iFeature == iFeatNebula:\r\n\t\t\t\t\t\t\tiNumNebula += 1\r\n\t\t\t\t\t\t\tif (abs(iXSearchLoop - pLoopPlot.getX()) <= 1) and (abs(iYSearchLoop - pLoopPlot.getY()) <=1):\r\n\t\t\t\t\t\t\t\tiNumAdjacentNebula += 1\r\n\t\t\t\t\t\telif iFeature == iFeatAsteroid:\r\n\t\t\t\t\t\t\tiNumAsteroid +=1\r\n\t\t\t\t\t\telif (iFeature != -1) and (gc.getFeatureInfo(iFeature).getTurnDamage() > 0): # bug fix - make sure there is a feature before trying to get the info for it, taking advantage of the short-circuit conditional evaluation\r\n\t\t\t\t\t\t\tiNumDamaging += 1\r\n\t\t\t\t\t\telif iFeature == gc.getInfoTypeForString('FEATURE_SOLAR_SYSTEM'):\r\n\t\t\t\t\t\t\tiPlotValue -= 22 # reduce value a lot if near a star system\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t#If other stations are present, no build -- TC01\r\n\t\t\t\t\t\tfor iUnit in range(pSearchPlot.getNumUnits()):\r\n\t\t\t\t\t\t\tpOtherStarbase = pSearchPlot.getUnit(iUnit)\r\n\t\t\t\t\t\t\tif pOtherStarbase.isStarbase():\r\n\t\t\t\t\t\t\t\t# iPlotValue = 0\r\n\t\t\t\t\t\t\t\tiPlotValue -= 99\r\n\t\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t# post 1.72 AI update: count the number of plots that are ours, and the number that are someone else's\r\n\t\t\t\t\t\tiOwner = pSearchPlot.getOwner()\r\n\t\t\t\t\t\tif iOwner != -1 :\r\n\t\t\t\t\t\t\tif iOwner == iPlayer :\r\n\t\t\t\t\t\t\t\tiNumOurs += 1\r\n\t\t\t\t\t\t\telse :\r\n\t\t\t\t\t\t\t\tiNumTheirs += 1\r\n\r\n\t\t\t\t# Some nebula is a good indication of a choke point.\r\n\t\t\t\t# Too much is an indication that we are in a box canyon.\r\n\t\t\t\t# If there are 7 or more adjacent nebula plots, then this is a bad location. Otherwise:\r\n\t\t\t\t# As a guess, make it increase the value for more up to a max value at 13, then decrease fairly rapidly.\r\n\t\t\t\t# Give a score of 0 for 0, increaseing by 3 per nebula up to a score of 39 at 13 through 15,\r\n\t\t\t\t# then decreasing by 5 per nebula over 15.\r\n\t\t\t\t# This is -1 at 23, -6 at 24 and -11 at 25 (which is not a valid location anyway; neither is one\r\n\t\t\t\t# with 23 or 24 since it is unreachable from the capital so the iDistanceFromCapital condition\r\n\t\t\t\t# rules it out before we get here).\r\n\t\t\t\t# Additionally, if there are more than 4 (i.e. 5 or 6) immediately adjacent nebula plots, give a\r\n\t\t\t\t# small penalty of -2.\r\n\t\t\t\t# Tweak for post 1.72: change the \"sweet spot\" from 13-15 to 12-14,\r\n\t\t\t\t#\tthis would give a value of +36 in this range instead of the +39 that it got in the old range\r\n\t\t\t\t#\tso just add 3 as well, giving 3 at 0 up to 39 at 12-14, 34@15, 29@16, 24@17, 19@18, 14@19...\r\n\t\t\t\t#\tbut make the adjacent nebula thing give -5 instead of -2\r\n\t\t\t\tif iNumAdjacentNebula > 6 :\r\n\t\t\t\t\tiPlotValue -= 99\r\n\t\t\t\telse:\r\n\t\t\t\t\tiPlotValue += ( 3 * min( iNumNebula, 12)) - ( 5 * max( iNumNebula - 14, 0)) + 3\r\n\t\t\t\t\tif iNumAdjacentNebula > 4 :\r\n\t\t\t\t\t\tiPlotValue -= 5 \r\n\r\n\t\t\t\t# A few asteroids are OK, but they block the visibility (and visibility is the whole point of a sensor station)\r\n\t\t\t\t# With 0 no change, then +5 for 1-3 (which is the max bonus), then -1 for each over 3.\r\n\t\t\t\t# Note that there is still a bonus for being on top of asteroids given later.\r\n\t\t\t\tiPlotValue += ( 5 * min( iNumAsteroid, 1)) - max( iNumAsteroid - 3, 0)\r\n\t\t\t\t\r\n\t\t\t\t# Damaging features are good, but too many is not as good since the area will tend to be avoided and\r\n\t\t\t\t# it is probably between two black holes/supernovas (which is a good chokepoint, but bad for the visibility\r\n\t\t\t\t# aspect since looking at a lot of such plots is rather pointless).\r\n\t\t\t\t# Give +2 per, up to a max of +30 at 15, then -1 per damaging feature over 15\r\n\t\t\t\t# Tweak for post 1.72: change reduction for being over 15 from -1 to -1.5\r\n\t\t\t\tiPlotValue += ( 2 * min( iNumDamaging, 15)) - (3 * max( iNumDamaging - 15, 0) / 2)\r\n\t\t\t\t\r\n\t\t\t\tiPlotValue += iDistanceValueMod\r\n\r\n\t\t\t\t# Little extra bonus for being in Asteroids (defense)\r\n\t\t\t\tif (pLoopPlot.getFeatureType() == iFeatAsteroid):\r\n\t\t\t\t\tiPlotValue += 4\t\t#How small should it be?\r\n\t\t\t\t\t\r\n\t\t\t\t# post 1.72 AI update: a few plots of ours in the area increase the value slightly and\r\n\t\t\t\t# more than a few reduce the bonus, and even more give a penalty. Plots that belong to\r\n\t\t\t\t# someone else each give a -1.\r\n\t\t\t\t# Don't want to shift the score much so currently using +3 per ours for the first 3\r\n\t\t\t\t# then -2 per ours over 5 (so the max of +9 for 3-5), and -1 per other's\r\n\t\t\t\tiPlotValue += ( 3 * min( iNumOurs, 3)) - ( 2 * max( iNumOurs - 5, 0))\r\n\t\t\t\tiPlotValue -= iNumTheirs\r\n\r\n\t\t\t\t# If this plot has the most resources in range from what we've found\r\n\t\t\t\tif (iPlotValue > iBestValue):\r\n\t\t\t\t\tiBestValue = iPlotValue\r\n\t\t\t\t\tpBestPlot = pLoopPlot\r\n\t\t\t\t\r\n\t\t\t\tprintd(\"plot %d (%d,%d) value = %d (distance=%d (for %d), NumNebula=%d (adjacent=%d), NumAsteroid=%d, NumDamaging=%d)\" % \r\n\t\t\t\t\t\t(CyMap().plotNum(pLoopPlot.getX(), pLoopPlot.getY()), pLoopPlot.getX(), pLoopPlot.getY(), \r\n\t\t\t\t\t\tiPlotValue, iDistanceFromCapital, iDistanceValueMod, iNumNebula, iNumAdjacentNebula, iNumAsteroid, iNumDamaging))\r\n\t\t\t\t\t\r\n\t\tprintd(\"* best plot = %d (%d,%d), value = %d\" % (CyMap().plotNum(pBestPlot.getX(), pBestPlot.getY()), pBestPlot.getX(), pBestPlot.getY(), iBestValue))\r\n\t\t\t\r\n\t\treturn [pBestPlot, iBestValue]",
"def _min_max(study):\n\n # Iterate pareto-front trials storing mean correlation and std dev\n df = []\n for trial in study.best_trials:\n df.append([trial.number, np.mean(trial.values), np.std(trial.values)])\n\n # Sort dataframe ascending by mean correlation\n df = pd.DataFrame(df).sort_values(by=2, ascending=True)\n\n # Sort df with best trial in first row\n if len(df) > 1 and len(df.iloc[:, 1:3].drop_duplicates()) > 1:\n\n # Create second pareto to maximize correlation and minimize stddev\n # Epsilons define precision, ie dominance over other candidates\n # Dominance is defined as x percent of stddev of stddev\n try:\n nd = pareto.eps_sort([list(df.itertuples(False))], objectives=[1, 2],\n epsilons=[1e-09, np.std(df[1])*.5], maximize=[1])\n except:\n # Something went wrong, return df\n nd = df\n\n # Sort remaining candidates\n nd = pd.DataFrame(nd).sort_values(by=2, ascending=True)\n\n # Only 1st trial so return it\n else:\n nd = df\n\n # Return \"best\" trial index\n return nd.iloc[0, 0]",
"def isPareto(leaf): \n # determine the deminsion of point's objective\n dim = len(leaf.problem.objectives) \n # recorganize all the visited points together into one sorted array\n _visitedPoints = utils.dictToSortedNumpyArray(leaf.visitedPoints(),dim) \n # check whether the leaf contains the Pareto solution\n flag = 0\n pool = leaf.pool\n for key in pool:\n _p = np.array([pool[key].mean])\n dominantionCount = _cutils.calDominationCount(_p, _visitedPoints, len(_p))[1][0] \n if dominantionCount == 0: \n flag = 1\n break \n return flag",
"def closest_on_screen_point_optim(trajectory, viewpoint, yaw, gaze_on_screen):\n \n traj_angles = dp.world_to_angles_through_screen(trajectory, viewpoint, yaw) \n \n #pprint(traj_angles)\n\n dist, idx = closest_node_tree(traj_angles, gaze_on_screen)\n ml_screen_ref = traj_angles[idx] \n\n return(idx, ml_screen_ref)",
"def get_starting_point(self, Otrain, Ftrain, y):\n return self.get_curve_fmin(Otrain, Ftrain, [y])\n # xx = np.linspace(np.min(Otrain), np.max(Otrain), 50)\n # scores, xx = self.compute_scores(Otrain, Ftrain, y, xx)\n # bestScore = np.max(scores)\n # Ibest = np.where(scores == bestScore)[0]\n # x = xx[Ibest[0]]\n return x",
"def _pareto_front(teams_population, novelty):\r\n for team in teams_population:\r\n team.dom_by_ = 0\r\n team.dom_of_ = 0\r\n\r\n front = []\r\n dominateds = []\r\n for teamA in teams_population:\r\n for teamB in teams_population:\r\n # check if there are teams that have a better or equal [fitness, novelty] and that are better in at least \r\n # one of the dimensions. If yes, then teamA is dominated by these teams.\r\n if ParetoDominanceForTeams._is_dominated(teamA, teamB, novelty):\r\n teamA.dom_by_ += 1\r\n teamB.dom_of_ += 1\r\n if teamA not in dominateds:\r\n dominateds.append(teamA)\r\n if teamA.dom_by_ == 0:\r\n front.append(teamA)\r\n\r\n # use this score to balance the teams between remove and keep\r\n for team in teams_population:\r\n team.submission_score_ = team.dom_by_/float(len(teams_population)) # use it to add teams to the front (the lower, the better)\r\n team.dominance_score_ = team.dom_of_/float(len(teams_population)) # use it to remove teams from the front (the higher, the better)\r\n return front, dominateds",
"def find_pareto(X, y):\n y_copy = np.copy(y)\n pareto_front = np.zeros((0,y.shape[1]))\n pareto_set = np.zeros((0,X.shape[1]))\n i = 0\n j = 0\n while i < y_copy.shape[0]:\n y_outi = np.delete(y_copy, i, axis =0)\n #paretoだったら全部false\n flag = np.all(y_outi <= y_copy[i,:],axis = 1)\n if not np.any(flag):\n pareto_front = np.append(pareto_front, [y_copy[i,:]],axis = 0)\n pareto_set = np.append(pareto_set, [X[j,:]],axis = 0)\n i += 1\n else :\n y_copy = np.delete(y_copy, i, axis= 0)\n j += 1\n return pareto_front, pareto_set",
"def best_first_graph_search_show_frontier(problem, f,showFrontier = True):\n f = memoize(f, 'f')\n node = Node(problem.initial)\n frontier = PriorityQueue('min', f)\n frontier.append(node)\n explored = set()\n while frontier:\n\n print(\"Explored ==>\",explored) \n print(\"Frontier ==> \",frontier.heap)\n print()\n node = frontier.pop()\n print(\"Current ==> \",node.state)\n print(\"Eval Function ==> \",f(node))\n \n \n if problem.goal_test(node.state):\n return node\n\n explored.add(node.state)\n for child in node.expand(problem):\n if child.state not in explored and child not in frontier:\n frontier.append(child)\n elif child in frontier:\n if f(child) < frontier[child]:\n del frontier[child]\n frontier.append(child)\n \n return None",
"def fit_to_pareto(self, x, y, mX = 1, mY = 1, chart = True, order = 3, doublefront = None):\n x = self.xy[x]\n y = self.xy[y]\n # Find pareto front:\n if doublefront == 'x':\n xp1, yp1 = self.pareto_frontier(x,y, mX, mY)\n xp2, yp2 = self.pareto_frontier(x,y, not mX, mY)\n xp = xp1 + xp2\n yp = yp1 + yp2\n else:\n xp, yp = self.pareto_frontier(x,y, mX, mY)\n\n # Conversion to float is required for fitting\n try:\n xp = [float(i) for i in xp]\n yp = [float(i) for i in yp]\n except:\n # Launch date needs additional handling\n xp = [epoch_from_string(i.strftime(\"%Y-%m-%d %H-%M-%S.%f\")).mjd2000 for i in xp]\n yp = [float(i) for i in yp]\n\n # Sort, in case of broken order in doublefront\n myList = sorted([[xp[i], yp[i]] for i in range(len(xp))], reverse=0)\n xp = [pair[0] for pair in myList]\n yp = [pair[1] for pair in myList]\n\n x_to_fit = []\n y_to_fit = []\n # select fitting peroid\n for i in range(len(xp)):\n if self.dateend >= xp[i] >= self.datestart:\n x_to_fit.append(xp[i])\n y_to_fit.append(yp[i])\n\n z = np.polyfit(x_to_fit, y_to_fit, order) #fitnij polynomial\n f = np.poly1d(z) #zbuduj polynimoala\n values = f(x_to_fit)\n\n if order == 3:\n # https://stackoverflow.com/questions/24065904/numpy-calculate-polynom-efficiently\n fast_f = lambda x: z[3] + x*(z[2] + x*(z[1] + x*z[0]))\n fast_fitted = [fast_f(i) for i in x_to_fit]\n #print x_to_fit[0]\n else:\n fast_f = False\n fast_fitted = values\n\n if chart:\n plt.plot(x_to_fit, y_to_fit, 'o')\n plt.plot(x_to_fit, values, '--')\n plt.plot(x_to_fit, fast_fitted, '-')\n plt.tight_layout()\n plt.show()\n\n return f, fast_f # returnij polynomiala",
"def getNearestPreference(self, myABR):\n closestRange = 99999\n closestShip = None\n for shipID in self.targets:\n enemyShip = self.myGalaxy.ships[shipID]\n if enemyShip.alive == 1 and (enemyShip.myShipHull.abr in globals.targetPreference[myABR]):\n range = funcs.getTargetRange(self.posX, self.posY, enemyShip.posX, enemyShip.posY)\n if range < closestRange:\n closestRange = range\n closestShip = enemyShip\n return closestShip",
"def find_top_low_pts3(self, polydata_endo_epi):\n\n # 1. sort the points of the cut slice in order to easily find top points\n sorted_pts_endo_epi = self.sort_cutpoly_points(polydata_endo_epi)\n\n # 2. top points are easily found as ..\n top_point_1 = sorted_pts_endo_epi[0][0]\n top_point_2 = sorted_pts_endo_epi[0][-1]\n top_points = [top_point_1, top_point_2]\n\n # 3. lowest point is found by finding furthest distant point from the middle point\n middle_point = (top_point_1 + top_point_2)/2.0\n dists = scipy.spatial.distance.cdist(sorted_pts_endo_epi[0],middle_point.reshape((1,3)))\n lowest_pt_idx = np.argmax(dists)\n lowest_pt = sorted_pts_endo_epi[0][lowest_pt_idx]\n\n # make function to display top points and lowest point\n display_special_pts = 0\n if display_special_pts:\n sepi = include_points(list(sorted_pts_endo_epi[1]),\n len(list(sorted_pts_endo_epi[1])), 7, (0,1,0))\n sendo = include_points(list(sorted_pts_endo_epi[0]),\n len(list(sorted_pts_endo_epi[0])), 7, (0,1,0))\n a1 = include_points(list(lowest_pt), 1, 15, (1,0,0))\n a2 = include_points(list(top_point_1), 1, 15, (1,0,0))\n a3 = include_points(list(top_point_2), 1, 15, (1,0,0))\n ren = vtk.vtkRenderer()\n ren.AddActor(sendo)\n ren.AddActor(a1)\n ren.AddActor(sepi)\n ren.AddActor(a2)\n ren.AddActor(a3)\n vtk_show(ren)\n\n return lowest_pt, lowest_pt_idx, top_points, sorted_pts_endo_epi",
"def closest_object(geometries, point): \n min_dist, min_index = min((point.distance(geom), k) \n for (k, geom) in enumerate(geometries))\n \n return geometries[min_index], min_dist, min_index",
"def getBestOption(self):\n if len(self.Data) < 1:\n return None\n else:\n bestR = max(self.Data.items(), key=lambda x: x[1]['SPat'].I)\n return bestR[1]",
"def normalize_pobj(pobj, best_pobj=None, normalize_method='best'):\n if normalize_method == 'best':\n assert best_pobj is not None\n pobj = (pobj - best_pobj) / best_pobj\n elif normalize_method == 'last':\n pobj = [(p - p.min()) / p.min() for p in pobj]\n pobj = [p / p[0] if p[0] != 0 else p for p in pobj]\n elif normalize_method == 'diff':\n pobj = [-np.diff(p) for p in pobj]\n elif normalize_method in [None, 'short']:\n pass\n else:\n raise ValueError('unknown normalize_method: %s' % normalize_method)\n\n return pobj",
"def getHighestRank_Toilet(self):\n\n # filter out low confidences\n #maxConfidence = max(self.Predictors, key=operator.attrgetter('confidence'))\n #p = [p for p in self.Predictors if p.confidence == maxConfidence]\n \n \n p = self.Predictors\n \n if len(p) == 1:\n # only one predictor has high confidence\n chosenPredictor = p[0]\n elif len(p) > 1:\n random.shuffle(p, random = rps.randomRange)\n \n # drop the first 37% and grab the best \n drop = round(len(p) * 0.37) - 1\n initial = p[:drop]\n maxConfidence = max(initial, key=operator.attrgetter('confidence'))\n maxConfidence = maxConfidence.confidence\n \n toCheck = p[drop:]\n for p in toCheck:\n if p.confidence >= maxConfidence:\n chosenPredictor = p\n break\n else:\n chosenPredictor = toCheck[-1]\n \n rankConfidence = chosenPredictor.confidence\n return chosenPredictor, rankConfidence",
"def get_best_anticipation(self, perception: Perception) -> Perception:\n return self.effect.get_best_anticipation(perception)",
"def get_best(algo):\n _, best_tuple = algo.get_best_tours(algo.tours)\n best = algo.tours[best_tuple[0]]\n cities = []\n for i in best.cities:\n cities.append(i)\n return cities, best.get_cost()"
]
| [
"0.6535724",
"0.6393786",
"0.5591595",
"0.5555255",
"0.54125065",
"0.5407732",
"0.53989637",
"0.5383117",
"0.53506523",
"0.534688",
"0.5342991",
"0.5335419",
"0.5257751",
"0.5256599",
"0.5251918",
"0.5244487",
"0.5146469",
"0.5136885",
"0.5126721",
"0.51253355",
"0.5110239",
"0.50968796",
"0.50807655",
"0.50553966",
"0.502497",
"0.5023409",
"0.502054",
"0.5019668",
"0.5005423",
"0.50010866"
]
| 0.7016293 | 0 |
Serialize attributes of this surrogate, to be passed back to it as kwargs on reinstantiation. | def _serialize_attributes_as_kwargs(self) -> Dict[str, Any]:
if self._constructed_manually:
raise UnsupportedError(
"Surrogates constructed manually (ie Surrogate.from_botorch) may not "
"be serialized. If serialization is necessary please initialize from "
"the constructor."
)
return {
"botorch_model_class": self.botorch_model_class,
"model_options": self.model_options,
"mll_class": self.mll_class,
"mll_options": self.mll_options,
"outcome_transform": self.outcome_transform,
"input_transform": self.input_transform,
"covar_module_class": self.covar_module_class,
"covar_module_options": self.covar_module_options,
"likelihood_class": self.likelihood_class,
"likelihood_options": self.likelihood_options,
"allow_batched_models": self.allow_batched_models,
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def serialize(self):\n pass",
"def serialize(self):\n raise NotImplementedError(\"Abstract class, implemented in sub class\")",
"def _serialise(self):\n # TODO (M Foley)\n pass",
"def serialize(self, *args, **kwargs): # pylint: disable = unused-argument\n attribs = {}\n depth = kwargs.get('depth', float('inf'))\n calls = kwargs.get('calls', 0)\n ignore_protected = kwargs.get('ignore_protected', False)\n for attr in dir(self):\n if self.class_attr(attr, ignore_protected):\n attribs[attr] = self.unpack(getattr(self, attr), depth=depth,\n calls=calls, ignore_protected=ignore_protected)\n return {type(self).__name__: attribs}",
"def data(self):\n retval = copy.deepcopy(self.__dict__)\n retval[\"_Serializable_classname\"] = type(self).__name__\n retval[\"_Serializable_version\"] = \"1.0\"\n return retval",
"def serialize(self, obj):\n pass",
"def serialize(self) -> typing.Any:\n return self._serialize(self.__dict__)",
"def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n 'description' : self.description,\n 'is_private' : self.is_private,\n }",
"def serialize(self):",
"def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n }",
"def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n }",
"def serialize(self):\n\t\treturn {\n\t\t\t'name' : self.name,\n\t\t\t'id' : self.id,\n\t\t\t'description' : self.description,\n\t\t\t'kind_of_thing' : self.kind_of_thing,\n\t\t}",
"def serialize(self):\n\t\treturn {\n\t\t\t'name' : self.name,\n\t\t\t'id' : self.id,\n\t\t}",
"def serialize(self):\n return {\n 'name' : self.name,\n 'id' : self.id,\n }",
"def __serialize__(self):\n return {\"_custom_type\" : self.__class__.__name__,\n \"name\" : self.name,\n \"src\" : self.src,\n \"exec_loc\" : self.exec_loc,\n \"precompiled\" : self.precompiled}",
"def serialize(self):\n data = {}\n\n for k, v in self.__dict__.items():\n if not k.startswith('__'):\n data[k] = v\n\n return data",
"def serialize(self):\n\t\treturn {\n\t\t\t\"id\": self.id,\n\t\t\t\"name\": self.name\n\t\t}",
"def serialize(self):\n\t\treturn {\n\t\t\t\"id\": self.id,\n\t\t\t\"name\": self.name\n\t\t}",
"def serialize(self) -> str:\n return json.dumps(self.__dict__)",
"def serialize(self):\n raise NotImplemented()",
"def serialize(self):\n raise NotImplementedError(\n \"Subclasses of Serializable must implement serialize\"\n )",
"def serialize(self):\n cls = self.__class__\n return {\n \"spawn_prob\": self.spawn_prob,\n \"agent_locs\": self.agent_locs.copy(),\n \"agent_names\": self.agent_names.copy(),\n \"board\": self.board.copy(),\n \"class\": \"%s.%s\" % (cls.__module__, cls.__name__),\n }",
"def serialize(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'url': self.url,\n 'created': self.created,\n 'update': self.update,\n 'active': self.active,\n }",
"def serialise(self) -> SerialisedState:\n raise NotImplementedError",
"def serialize(self):\n return {\n 'name': self.name,\n 'id': self.id,\n 'description': self.description,\n }",
"def serialize(self):\n return {\n 'name': self.name,\n 'description': self.description,\n 'id': self.id,\n }",
"def serialize(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n }",
"def serialize(self):\n return {\n 'id' : self.id,\n 'description': self.description,\n 'longitude' : self.longitude,\n 'latitude' : self.latitude,\n 'created_on' : self.created_on,\n 'created_by' : self.created_by,\n 'likes' : self.likes\n }",
"def serialize(self):\n return {\n 'id': self.id,\n 'name': self.name,\n }",
"def serialize(self):\n return {\n 'id': self.id,\n 'name': self.name,\n }"
]
| [
"0.6771217",
"0.67654073",
"0.6750785",
"0.66644406",
"0.66298956",
"0.66251737",
"0.6616901",
"0.6616476",
"0.65615296",
"0.65493906",
"0.65493906",
"0.653947",
"0.6524214",
"0.65125376",
"0.6478986",
"0.64641863",
"0.6462501",
"0.6462501",
"0.6429069",
"0.6424507",
"0.64142805",
"0.63946927",
"0.6390409",
"0.6386256",
"0.6382212",
"0.638001",
"0.63622165",
"0.6359459",
"0.635932",
"0.635932"
]
| 0.7048768 | 0 |
Calculates the maximum time, between all the velocities, required to reach the goal. By default, it only synchronizes linear velocities. If angular_synchronization is True, then it also synchronizes for angular velocities. | def calculate_max_time(error, velocity, angular_synchronization=False, zero=0.001):
if angular_synchronization:
assert len(error) == len(velocity) == 6
else:
assert len(error) == len(velocity) == 3
# calculate_duration = lambda distance, speed: abs(float(distance) / speed)
def calculate_duration(distance, speed):
return abs(float(distance) / speed)
durations = [
calculate_duration(ee, vv) if (abs(vv) >= zero) else 0.0
for ee, vv in zip(error, velocity)
]
return max(durations) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate_sync_velocity(error, velocity, max_time, angular_synchronization=False):\n if angular_synchronization:\n assert len(error) == len(velocity) == 6\n else:\n assert len(error) == len(velocity) == 3\n\n # A velocity is computed to cover a distance (dist) in a given time (max_time),\n # where max_time is the same for all distances.\n # synchronize_velocity = lambda dist, vel: abs(float(dist) / max_time) * cmp(vel, 0)\n def synchronize_velocity(dist, vel):\n return abs(float(dist) / max_time) * cmp(vel, 0)\n\n return [\n synchronize_velocity(ee, vv) if (max_time and vv) else 0.0\n for ee, vv in zip(error, velocity)\n ]",
"def calculate_velocity(self, globalBest):\r\n self.sync = False\r\n \r\n for index in range(self.size):\r\n # The formula is composed of 3 terms\r\n term1 = INERTIA_WEIGHT * self.velocities[index]\r\n \r\n term2 = (\r\n INDIVIDUAL_CONSTANT * random.uniform(0, 1) *\r\n (self.personal_best[index] - self.values[index])\r\n )\r\n \r\n term3 = (\r\n GLOBAL_CONSTANT * random.uniform(0, 1) *\r\n (globalBest[index] - self.values[index])\r\n )\r\n \r\n newVelocity = term1 + term2 + term3\r\n \r\n # Adjust the velocity so it doesn't exceed the maximum allowed \r\n if newVelocity < 0:\r\n self.velocities[index] = -(newVelocity % VELOCITY_MAX)\r\n else:\r\n self.velocities[index] = newVelocity % VELOCITY_MAX",
"def _compute_solar_torque(self):\n pass",
"def _compute_gravity_torque(self):\n pass",
"def calc_time_from_waypoints(self, initial_wp, final_wp, max_speed):\n joint_dist_to_cover = np.absolute(\n np.asarray(final_wp) - np.asarray(initial_wp))\n max_joint_dist_to_cover = np.max(joint_dist_to_cover)\n T = max_joint_dist_to_cover / max_speed\n return T\n # pass",
"def _set_angular_velocity(self):\n nb_angular_velocities = 0\n sum_angular_velocities = 0\n for sl_id in range(self.nb_sl):\n w_list = self.sl_list[sl_id].angular_velocities\n nb_angular_velocities += len(w_list)\n sum_angular_velocities += np.sum(w_list)\n self.angular_velocity = sum_angular_velocities / nb_angular_velocities",
"def best_speed(self, time, angle):\n for velocity in range(self.max_speed, 0, -1): # velocity in km/h\n delta_energy = time * (self.recharge_rate - self.motor_power(velocity, angle)) #kWh\n\n if (self.current_capacity + delta_energy) >= self.end_capacity:\n return velocity",
"def _compute_aero_torque(self):\n pass",
"def angular_velocity(self):\n trig = gyro_trigger_mode.GET_ANGULAR_VELOCITY_TRIGGER_READ\n if self.__trigger == trig:\n self.read_and_update_angle()\n\n adjusted = (self.__angle - self.__offset)\n if self.__factorSet:\n return adjusted / self.__degPerSecondFactor\n return adjusted",
"def get_max_velocity(self):\n return self._max_velocity",
"def max_angular_acceleration():",
"def _test_max_simulation_step(self):\n previous_step = self.program.steps[0]\n previous_pb_frame = self.program.steps[0].playback_frames[0]\n for step in self.program.steps:\n for index, pb_frame in enumerate(step.playback_frames):\n if self.program.simulation_type == InstructionListJointsFlags.TimeBased:\n msg = f\"Step {step.name} playback frame {index}, time_step {pb_frame.time_step} not in 'max_time_step' bounds\"\n self.assertLessEqual(pb_frame.time_step, self.program.max_time_step, msg)\n else:\n move_type = step.move_type if index != 0 else previous_step.move_type\n if move_type == MoveType.Joint:\n msg_deg = f\"Step {step.name} (Joint) playback frame {index}, deg_step {pb_frame.deg_step} not in 'max_deg_step' bounds\"\n\n # Check if value given in list result is smaller than max for simulation\n self.assertLessEqual(pb_frame.deg_step, self.program.max_deg_step, msg_deg)\n\n # Check if actual step is smaller than max for simulation\n actual_deg_step = max([abs(j_a[0] - j_b[0]) for j_a, j_b\n in zip(pb_frame.joints.rows, previous_pb_frame.joints.rows)])\n self.assertLessEqual(actual_deg_step, self.program.max_deg_step, msg_deg)\n else:\n msg_mm = f\"Step {step.name} (Frame )playback frame {index}, mm_step {pb_frame.mm_step} not in 'max_mm_step' bounds\"\n\n # Check if value given in list result is smaller than max for simulation\n self.assertLessEqual(pb_frame.mm_step, self.program.max_mm_step, msg_mm)\n\n # Check if actual step is smaller than max for simulation\n actual_mm_step = sqrt(sum([(c_a[0] - c_b[0]) * (c_a[0] - c_b[0]) for c_a, c_b\n in zip(pb_frame.coords.rows, previous_pb_frame.coords.rows)]))\n self.assertLessEqual(actual_mm_step, self.program.max_mm_step, msg_mm)\n\n previous_pb_frame = pb_frame\n previous_step = step",
"def compute_velocity(self, my_agent, nearest_agents, avoids):\n force = Vector2()\n self.velocity = get_agent_velocity(my_agent)\n\n # Compute all the components\n alignment = self.compute_alignment(nearest_agents)\n cohesion = self.compute_cohesion(nearest_agents)\n separation = self.compute_separation(nearest_agents)\n avoid = self.compute_avoids(avoids)\n\n if DEBUG:\n print(\"alignment: \", alignment)\n print(\"cohesion: \", cohesion)\n print(\"separation: \", separation)\n print(\"avoid: \", avoid)\n\n # Add components together and limit the output\n force += alignment * self.alignment_factor\n force += cohesion * self.cohesion_factor\n force += separation * self.separation_factor\n force += avoid * self.avoid_factor\n force.limit(self.max_force)\n\n # If agent is moving, apply constant friction force\n if self.velocity.norm() > 0:\n force += self.friction * -1 * self.velocity.normalize(ret=True)\n\n acceleration = force / self.mass\n\n # Calculate total velocity (delta_velocity = acceleration * delta_time)\n self.velocity += acceleration / 10\n self.velocity.limit(self.max_speed)\n\n if DEBUG:\n print(\"force: \", force)\n print(\"acceleration: \", acceleration)\n print(\"velocity: \", self.velocity)\n print()\n\n # Return the the velocity as Twist message\n vel = Twist()\n vel.linear.x = self.velocity.x\n vel.linear.y = self.velocity.y\n return vel",
"def _compute_gravity_torque(self, curr_date):\n if self._to_add[0]:\n body2inertial = self.earth.getBodyFrame().getTransformTo(self.in_frame, curr_date)\n body2sat = self.inertial2Sat.applyTo(body2inertial.getRotation())\n sat2body = body2sat.revert()\n\n satM = self.state_observer.spacecraftState.getMass()\n mCub = self.inCub['mass_frac'] * satM\n\n self._gTorque = Vector3D.ZERO\n\n for CoM in self.inCub['CoM']:\n\n S_dmPos = self.satPos_s.add(CoM)\n\n r2 = S_dmPos.getNormSq()\n gNewton = Vector3D(-self.muGM / (sqrt(r2) * r2), S_dmPos)\n\n B_dmPos = sat2body.applyTo(S_dmPos)\n\n gDist = Vector3D(self.GravityModel.gradient(curr_date,\n B_dmPos,\n self.muGM))\n\n g_Dist_s = body2sat.applyTo(gDist)\n\n dmForce = Vector3D(mCub, gNewton.add(g_Dist_s))\n self._gTorque = self._gTorque.add(self.V3_cross(CoM, dmForce))\n\n else:\n self._gTorque = Vector3D.ZERO",
"def _compute_magnetic_torque(self, curr_date):\n if self._to_add[1]:\n gP = self.earth.transform(self.satPos_i, self.in_frame, curr_date)\n\n topoframe = TopocentricFrame(self.earth, gP, 'ENU')\n topo2inertial = topoframe.getTransformTo(self.in_frame, curr_date)\n\n lat = gP.getLatitude()\n lon = gP.getLongitude()\n alt = gP.getAltitude() / 1e3 # Mag. Field needs degrees and [km]\n\n # get B-field in geodetic system (X:East, Y:North, Z:Nadir)\n B_geo = FileDataHandler.mag_field_model.calculateField(\n degrees(lat), degrees(lon), alt).getFieldVector()\n\n # convert geodetic frame to inertial and from [nT] to [T]\n B_i = topo2inertial.transformVector(Vector3D(1e-9, B_geo))\n\n B_b = self.inertial2Sat.applyTo(B_i)\n B_b = np.array([B_b.x, B_b.y, B_b.z])\n\n dipoleVector = self.dipoleM.getDipoleVectors(B_b)\n\n torque = np.sum(np.cross(dipoleVector, B_b), axis=0)\n\n self._mTorque = Vector3D(float(torque[0]), float(torque[1]), float(torque[2]))\n else:\n self._mTorque = Vector3D.ZERO",
"def _compute_magnetic_torque(self, curr_date):\n if self._to_add[1]:\n gP = self.earth.transform(self.satPos_i, self.in_frame, curr_date)\n\n topoframe = TopocentricFrame(self.earth, gP, 'ENU')\n topo2inertial = topoframe.getTransformTo(self.in_frame, curr_date)\n\n lat = gP.getLatitude()\n lon = gP.getLongitude()\n alt = gP.getAltitude() / 1e3 # Mag. Field needs degrees and [km]\n\n # get B-field in geodetic system (X:East, Y:North, Z:Nadir)\n B_geo = FileDataHandler.mag_field_model.calculateField(\n degrees(lat), degrees(lon), alt).getFieldVector()\n\n # convert geodetic frame to inertial and from [nT] to [T]\n B_i = topo2inertial.transformVector(Vector3D(1e-9, B_geo))\n\n B_b = self.inertial2Sat.applyTo(B_i)\n B_b = np.array([B_b.x, B_b.y, B_b.z])\n\n dipoleVector = self.dipoleM.getDipoleVectors(B_b)\n\n torque = np.sum(np.cross(dipoleVector, B_b), axis=0)\n\n self._mTorque = Vector3D(float(torque[0]), float(torque[1]), float(torque[2]))\n else:\n self._mTorque = Vector3D.ZERO",
"def _compute_aero_torque(self, curr_date, omega):\n if self._to_add[3]:\n # assuming constant atmosphere condition over spacecraft\n # error is of order of 10^-17\n rho = self.AtmoModel.getDensity(curr_date, self.satPos_i, self.in_frame)\n vAtm_i = self.AtmoModel.getVelocity(curr_date, self.satPos_i, self.in_frame)\n\n satVel = self.inertial2Sat.applyTo(self.satVel_i)\n vAtm = self.inertial2Sat.applyTo(vAtm_i)\n\n dragCoeff = self.meshDA['Cd']\n liftRatio = 0.0 # no lift considered\n\n CoM = self.meshDA['CoM_np']\n normal = self.meshDA['Normal_np']\n area = np.asarray(self.meshDA['Area'])\n satVel = np.array([satVel.x, satVel.y, satVel.z])\n vAtm = np.array([vAtm.x, vAtm.y, vAtm.z])\n\n relativeVelocity = vAtm - (satVel + (np.cross(omega, CoM)))\n vNorm = np.linalg.norm(relativeVelocity, axis=1)\n vDir = np.reciprocal(vNorm[:, None]) * relativeVelocity\n\n dot = np.einsum('ij,ij->i', normal, vDir)\n\n dotCondition = dot < 0\n dot = dot[dotCondition]\n if dot.size > 0:\n vDir = vDir[dotCondition]\n vNorm = vNorm[dotCondition]\n normal = normal[dotCondition]\n area = area[dotCondition]\n CoM = CoM[dotCondition]\n\n coeff = 0.5 * rho * dragCoeff * (vNorm**2)\n oMr = 1.0 - liftRatio\n f = (coeff * area * dot)[:, None]\n\n aT = np.sum(np.cross(CoM, oMr * np.absolute(f) * vDir + 2 * liftRatio * f * normal), axis=0)\n\n self._aTorque = Vector3D(float(aT[0]), float(aT[1]), float(aT[2]))\n\n else:\n self._aTorque = Vector3D.ZERO",
"def _compute_gravity_torque(self, curr_date):\n if self._to_add[0]:\n # return gravity gradient torque in satellite frame\n body2inertial = self.earth.getBodyFrame().getTransformTo(self.in_frame, curr_date)\n body2sat = self.inertial2Sat.applyTo(body2inertial.getRotation())\n body2satRot = PyRotation(body2sat.q0,\n body2sat.q1,\n body2sat.q2,\n body2sat.q3)\n sat2bodyRot = body2satRot.revert()\n body2sat = body2satRot.getMatrix()\n sat2body = sat2bodyRot.getMatrix()\n\n satM = self.spacecraft_state.getMass()\n mCub = self.inCub['dm'] * satM\n # add booms\n if \"dm_boom\" in self.inCub:\n mCub = np.concatenate((mCub, self.inCub['dm_boom']), axis=0) # boom store with mass\n CoM = self.inCub['CoM_np']\n\n dmPos_s = CoM + self.satPos_s\n\n gNewton = (-self.muGM / np.linalg.norm(dmPos_s,\n axis=1,\n keepdims=True)**3) * dmPos_s\n\n # rotate vectors:\n dmPos_b = np.einsum('ij,kj->ki', sat2body, dmPos_s)\n\n gDist = np.empty(dmPos_b.shape)\n for i in xrange(0, dmPos_b.shape[0]):\n gDist[i, :] = np.asarray(\n self.GravityModel.gradient(curr_date,\n Vector3D(float(dmPos_b[i, 0]),\n float(dmPos_b[i, 1]),\n float(dmPos_b[i, 2])),\n self.muGM))\n\n gDist_s = np.einsum('ij,kj->ki', body2sat, gDist)\n\n gT = np.sum(np.cross(CoM, mCub*(gNewton + gDist_s)), axis=0)\n\n self._gTorque = Vector3D(float(gT[0]), float(gT[1]), float(gT[2]))\n\n else:\n self._gTorque = Vector3D.ZERO",
"def method_compute_timestep(self):\n\n myg = self.cc_data.grid\n\n cfl = self.rp.get_param(\"driver.cfl\")\n\n u = self.cc_data.get_var(\"x-velocity\")\n v = self.cc_data.get_var(\"y-velocity\")\n\n # the timestep is min(dx/|u|, dy|v|)\n xtmp = ytmp = 1.e33\n if not abs(u).max() == 0:\n xtmp = myg.dx/abs(u.v()).max()\n if not abs(v).max() == 0:\n ytmp = myg.dy/abs(v.v()).max()\n\n dt = cfl*min(xtmp, ytmp)\n\n # We need an alternate timestep that accounts for buoyancy, to\n # handle the case where the velocity is initially zero.\n rho = self.cc_data.get_var(\"density\")\n rho0 = self.base[\"rho0\"]\n rhoprime = self.make_prime(rho, rho0)\n\n g = self.rp.get_param(\"lm-atmosphere.grav\")\n\n F_buoy = (abs(rhoprime*g).v()/rho.v()).max()\n\n dt_buoy = np.sqrt(2.0*myg.dx/F_buoy)\n\n self.dt = min(dt, dt_buoy)\n if self.verbose > 0:\n print(f\"timestep is {dt}\")",
"def goal_velocity(self):\n return self._read(MX_GOAL_VELOCITY)",
"def get_intermediate_velocities(\n current_local_max_dict, previous_local_max_dict,\n e_folding_radius_metres=DEFAULT_VELOCITY_EFOLD_RADIUS_METRES):\n\n error_checking.assert_is_greater(e_folding_radius_metres, 0.)\n\n num_current_maxima = len(current_local_max_dict[X_COORDS_KEY])\n if previous_local_max_dict is None:\n num_previous_maxima = 0\n else:\n num_previous_maxima = len(previous_local_max_dict[X_COORDS_KEY])\n\n if num_current_maxima == 0 or num_previous_maxima == 0:\n x_velocities_m_s01 = numpy.full(num_current_maxima, numpy.nan)\n y_velocities_m_s01 = numpy.full(num_current_maxima, numpy.nan)\n\n current_local_max_dict.update({\n X_VELOCITIES_KEY: x_velocities_m_s01,\n Y_VELOCITIES_KEY: y_velocities_m_s01\n })\n\n return current_local_max_dict\n\n first_previous_indices = numpy.full(num_current_maxima, -1, dtype=int)\n second_previous_indices = numpy.full(num_current_maxima, -1, dtype=int)\n\n for i in range(num_current_maxima):\n these_previous_indices = numpy.where(\n current_local_max_dict[CURRENT_TO_PREV_MATRIX_KEY][i, ...]\n )[0]\n\n if len(these_previous_indices) > 0:\n first_previous_indices[i] = these_previous_indices[0]\n if len(these_previous_indices) > 1:\n second_previous_indices[i] = these_previous_indices[1]\n\n time_diff_seconds = (\n current_local_max_dict[VALID_TIME_KEY] -\n previous_local_max_dict[VALID_TIME_KEY]\n )\n\n first_prev_x_coords_metres = numpy.array([\n numpy.nan if k == -1 else previous_local_max_dict[X_COORDS_KEY][k]\n for k in first_previous_indices\n ])\n\n first_prev_y_coords_metres = numpy.array([\n numpy.nan if k == -1 else previous_local_max_dict[Y_COORDS_KEY][k]\n for k in first_previous_indices\n ])\n\n first_x_velocities_m_s01 = (\n current_local_max_dict[X_COORDS_KEY] - first_prev_x_coords_metres\n ) / time_diff_seconds\n\n first_y_velocities_m_s01 = (\n current_local_max_dict[Y_COORDS_KEY] - first_prev_y_coords_metres\n ) / time_diff_seconds\n\n second_prev_x_coords_metres = numpy.array([\n numpy.nan if k == -1 else previous_local_max_dict[X_COORDS_KEY][k]\n for k in second_previous_indices\n ])\n\n second_prev_y_coords_metres = numpy.array([\n numpy.nan if k == -1 else previous_local_max_dict[Y_COORDS_KEY][k]\n for k in second_previous_indices\n ])\n\n second_x_velocities_m_s01 = (\n current_local_max_dict[X_COORDS_KEY] - second_prev_x_coords_metres\n ) / time_diff_seconds\n\n second_y_velocities_m_s01 = (\n current_local_max_dict[Y_COORDS_KEY] - second_prev_y_coords_metres\n ) / time_diff_seconds\n\n x_velocities_m_s01 = numpy.nanmean(\n numpy.array([first_x_velocities_m_s01, second_x_velocities_m_s01]),\n axis=0\n )\n\n y_velocities_m_s01 = numpy.nanmean(\n numpy.array([first_y_velocities_m_s01, second_y_velocities_m_s01]),\n axis=0\n )\n\n x_velocities_m_s01, y_velocities_m_s01 = _estimate_velocity_by_neigh(\n x_coords_metres=current_local_max_dict[X_COORDS_KEY],\n y_coords_metres=current_local_max_dict[Y_COORDS_KEY],\n x_velocities_m_s01=x_velocities_m_s01,\n y_velocities_m_s01=y_velocities_m_s01,\n e_folding_radius_metres=e_folding_radius_metres)\n\n current_local_max_dict.update({\n X_VELOCITIES_KEY: x_velocities_m_s01,\n Y_VELOCITIES_KEY: y_velocities_m_s01\n })\n\n return current_local_max_dict",
"def solve( # pylint: disable=too-complex\n self,\n delta_t: float,\n total_time: float,\n max_iterations: Optional[int] = None,\n verbosity: int = 1\n ) -> None:\n # Validate parameters\n if not isinstance(delta_t, float) or delta_t < 0.0:\n raise TypeError('Wrong type for parameter delta_t ({} != {})'.format(type(delta_t), float))\n if not isinstance(total_time, float) or total_time < 0.0:\n raise TypeError('Wrong type for parameter total_time ({} != {})'.format(type(total_time), float))\n if max_iterations is not None and not isinstance(max_iterations, int):\n raise TypeError('Wrong type for parameter max_iterations ({} != {})'.format(type(max_iterations), int))\n if not isinstance(verbosity, int) or verbosity < 0 or verbosity > 2:\n raise TypeError('Wrong type for parameter verbosity ({} != {})'.format(type(verbosity), int))\n # Perform the solving\n delta_t = self._get_delta_t(delta_t)\n self._discretize(delta_t)\n current_time: float = 0.0\n progress = ProgressBar()\n while current_time < total_time:\n current_time += delta_t\n for component in self.components:\n component.prepare_next_timestep(delta_t, current_time)\n for component in self.components:\n component.exchange_last_boundaries()\n self._solve_inner_loop(max_iterations)\n for component in self.components:\n component.finalize_current_timestep()\n if self._callback is not None:\n self._callback()\n if verbosity > 0:\n progress.update(\n current_time / total_time,\n 'Currently at time {:7.3f} of {:7.3f}'.format(current_time, total_time)\n )",
"def max_speed(self) -> float:\n return 2",
"def findMaximumDeviationLoop(junctions, wires, resistances, voltages, currents):\n raise NotImplementedError",
"def max_time(self) -> float:\r\n if(len(self.operations_by_name) == 0):\r\n return -1\r\n return max(map(lambda x: x[\"time_step\"], self.operations_by_name.values()))",
"def maximum_communication_delay(self):\n\n segment_pairs = ((src, dst) for src in self.sim.segments for dst in\n self.sim.segments if src != dst)\n\n delays = []\n for src, dst in segment_pairs:\n delay = self.communication_delay(src, dst)\n delays.append(delay)\n\n delays = np.array(delays)\n max_delay = np.max(delays)\n # max_delay *= pq.second\n\n return max_delay",
"def _set_winding_angle_and_angular_velocity(self, delta_time, cut_lines):\n\n coord = self.coord_list[:, 0] + 1j * self.coord_list[:, 1]\n coord += coord.real * np.cos(self.mean_pos[1] * np.pi / 180)\n vectors = coord[1:] - coord[:-1]\n\n # Remove the null vectors\n not_null_vect = vectors != 0\n vectors = vectors[not_null_vect]\n norms = abs(vectors)\n angles = np.angle(vectors[1:] / vectors[:-1])\n\n if type(delta_time * 1.0) != float:\n delta_time = delta_time[not_null_vect]\n\n self.coord_list = self.coord_list[np.append(not_null_vect, True), :]\n self.nb_points = len(self.coord_list)\n\n if self.nb_points <= 2:\n self.winding_angle = 0\n self.angular_velocities = 0\n else:\n if cut_lines:\n # Find the shortest sub streamline with the smallest ratio\n # dist(start,end)/sub_line_len. This ratio will be close to 1 for\n # straight lines and close to 0 for closed loop.\n cumsum_norms = np.concatenate((np.array([0]),np.cumsum(norms)))\n\n min_ratio = 1\n\n min_end_id = self.nb_points - 1\n min_start_id = 0\n\n for start_id in range(self.nb_points):\n start_norm = cumsum_norms[start_id]\n start_pts = coord[start_id]\n for end_id in range(start_id+1, self.nb_points):\n curr_len = cumsum_norms[end_id] - start_norm\n start_end_dist = abs(coord[end_id] - start_pts)\n ratio = start_end_dist / curr_len\n if ratio <= min_ratio:\n min_ratio = ratio\n min_end_id = end_id\n min_start_id = start_id\n\n # Recompute the first attributs after taking the sub streamline\n self.coord_list = np.array(\n self.coord_list[min_start_id : min_end_id + 1], dtype=float\n )\n self.nb_points = (min_end_id - min_start_id) + 1\n self.length = cumsum_norms[end_id] - cumsum_norms[start_id]\n self.mean_pos = np.mean(self.coord_list, axis=0)\n\n # Recompute the sub angle list and delta_time\n angles = np.array(angles[min_start_id : min_end_id - 1])\n if type(delta_time * 1.0) != float:\n delta_time = np.array(delta_time[min_start_id:min_end_id])\n\n # Set the winding angle and the angular velocity\n self.winding_angle = np.sum(angles)\n\n if type(delta_time * 1.0) == float:\n self.angular_velocities = angles / delta_time\n else:\n self.angular_velocities = (\n 2 * angles / (delta_time[1:] + delta_time[:-1])\n )",
"def get_max_speed(self):\n if self.mot_type == 'ims':\n return self.get_par(\"max_speed\")\n elif self.mot_type == 'xps8p':\n return self.get_par(\"max_speed_xps\")\n else:\n return self.get_par(\"max_speed\")",
"def _compute_aero_torque(self, curr_date, omega):\n if self._to_add[3]:\n # assuming constant atmosphere condition over spacecraft\n # error is of order of 10^-17\n rho = self.AtmoModel.getDensity(curr_date, self.satPos_i, self.in_frame)\n vAtm_i = self.AtmoModel.getVelocity(curr_date, self.satPos_i, self.in_frame)\n\n satVel = self.inertial2Sat.applyTo(self.satVel_i)\n vAtm = self.inertial2Sat.applyTo(vAtm_i)\n\n self._aTorque = Vector3D.ZERO\n\n dragCoeff = self.meshDA['Cd']\n liftRatio = 0.0 # no lift considered\n\n iterator = itertools.izip(self.meshDA['CoM'],\n self.meshDA['Normal'],\n self.meshDA['Area'])\n\n for CoM, Normal, Area in iterator:\n CoMVelocity = satVel.add(self.V3_cross(omega, CoM))\n relativeVelocity = vAtm.subtract(CoMVelocity)\n\n vNorm2 = relativeVelocity.getNormSq()\n vNorm = sqrt(vNorm2)\n vDir = relativeVelocity.scalarMultiply(1.0 / vNorm)\n\n dot = self.V3_dot(Normal, vDir)\n if (dot < 0):\n coeff = 0.5 * rho * dragCoeff * vNorm2\n oMr = 1.0 - liftRatio\n # dA intercepts the incoming flux\n f = coeff * Area * dot\n force = Vector3D(float(oMr * abs(f)), vDir,\n float(liftRatio * f * 2), Normal)\n self._aTorque = self._aTorque.add(self.V3_cross(CoM, force))\n\n else:\n self._aTorque = Vector3D.ZERO",
"def calculate_task_potential(self) -> float:\n cur_xy = self.agent.get_position()[:2]\n goal_xy = np.array([1e3, 0])\n return -np.linalg.norm(cur_xy - goal_xy) * 60"
]
| [
"0.68456197",
"0.57857203",
"0.57491744",
"0.5585136",
"0.5567209",
"0.5532728",
"0.55307406",
"0.5515998",
"0.5502145",
"0.53898937",
"0.5357226",
"0.53221965",
"0.5281155",
"0.527711",
"0.52734107",
"0.52734107",
"0.52235395",
"0.5221728",
"0.5191337",
"0.5123462",
"0.5105481",
"0.5093182",
"0.5092932",
"0.5091347",
"0.5079873",
"0.5069659",
"0.5057028",
"0.50361985",
"0.50032765",
"0.49902672"
]
| 0.67981666 | 1 |
Calculates the synchronized velocity for all velocities to reach their goal at the same time. By default, it only synchronizes linear velocities. If angular_synchronization is True, then it also synchronizes for angular velocities. | def calculate_sync_velocity(error, velocity, max_time, angular_synchronization=False):
if angular_synchronization:
assert len(error) == len(velocity) == 6
else:
assert len(error) == len(velocity) == 3
# A velocity is computed to cover a distance (dist) in a given time (max_time),
# where max_time is the same for all distances.
# synchronize_velocity = lambda dist, vel: abs(float(dist) / max_time) * cmp(vel, 0)
def synchronize_velocity(dist, vel):
return abs(float(dist) / max_time) * cmp(vel, 0)
return [
synchronize_velocity(ee, vv) if (max_time and vv) else 0.0
for ee, vv in zip(error, velocity)
] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calcVelocity(self, iteration):\n self.setIteration(iteration)\n if self.parallelization_mode == \"serial\":\n # calculate and set coordinates\n for this_bin in iteration:\n for this_segment in this_bin:\n self.calcSegmentVelocities(this_segment)\n \n else:\n sys.stderr.write(\"ERROR: Velocity calculation is only available in serial mode\")\n sys.exit(-1)",
"def _set_angular_velocity(self):\n nb_angular_velocities = 0\n sum_angular_velocities = 0\n for sl_id in range(self.nb_sl):\n w_list = self.sl_list[sl_id].angular_velocities\n nb_angular_velocities += len(w_list)\n sum_angular_velocities += np.sum(w_list)\n self.angular_velocity = sum_angular_velocities / nb_angular_velocities",
"def calculate_velocity(self, globalBest):\r\n self.sync = False\r\n \r\n for index in range(self.size):\r\n # The formula is composed of 3 terms\r\n term1 = INERTIA_WEIGHT * self.velocities[index]\r\n \r\n term2 = (\r\n INDIVIDUAL_CONSTANT * random.uniform(0, 1) *\r\n (self.personal_best[index] - self.values[index])\r\n )\r\n \r\n term3 = (\r\n GLOBAL_CONSTANT * random.uniform(0, 1) *\r\n (globalBest[index] - self.values[index])\r\n )\r\n \r\n newVelocity = term1 + term2 + term3\r\n \r\n # Adjust the velocity so it doesn't exceed the maximum allowed \r\n if newVelocity < 0:\r\n self.velocities[index] = -(newVelocity % VELOCITY_MAX)\r\n else:\r\n self.velocities[index] = newVelocity % VELOCITY_MAX",
"def compute_velocity(self, my_agent, nearest_agents, avoids):\n force = Vector2()\n self.velocity = get_agent_velocity(my_agent)\n\n # Compute all the components\n alignment = self.compute_alignment(nearest_agents)\n cohesion = self.compute_cohesion(nearest_agents)\n separation = self.compute_separation(nearest_agents)\n avoid = self.compute_avoids(avoids)\n\n if DEBUG:\n print(\"alignment: \", alignment)\n print(\"cohesion: \", cohesion)\n print(\"separation: \", separation)\n print(\"avoid: \", avoid)\n\n # Add components together and limit the output\n force += alignment * self.alignment_factor\n force += cohesion * self.cohesion_factor\n force += separation * self.separation_factor\n force += avoid * self.avoid_factor\n force.limit(self.max_force)\n\n # If agent is moving, apply constant friction force\n if self.velocity.norm() > 0:\n force += self.friction * -1 * self.velocity.normalize(ret=True)\n\n acceleration = force / self.mass\n\n # Calculate total velocity (delta_velocity = acceleration * delta_time)\n self.velocity += acceleration / 10\n self.velocity.limit(self.max_speed)\n\n if DEBUG:\n print(\"force: \", force)\n print(\"acceleration: \", acceleration)\n print(\"velocity: \", self.velocity)\n print()\n\n # Return the the velocity as Twist message\n vel = Twist()\n vel.linear.x = self.velocity.x\n vel.linear.y = self.velocity.y\n return vel",
"def calculate(x, y, z, vx, vy, vz, dt, m, g, B2, S0, omega):\n t = 0.0\n # Establish lists with initial position and velocity components and time.\n x_list = [x]\n y_list = [y]\n z_list = [z]\n vx_list = [vx]\n vy_list = [vy]\n vz_list = [vz]\n t_list = [t]\n\n # Set up visual elements.\n mound = visual.box(pos=(0,0,0), length=0.1, width=0.5, height=0.03, color=visual.color.white)\n plate = visual.box(pos=(18,0,0), length=0.5, width=0.5, height=0.03, color=visual.color.white)\n ball = visual.sphere(pos=(x,y,z), radius=0.05, color=visual.color.white)\n ball.trail = visual.curve(color=ball.color)\n\n while y >= 0.0:\n visual.rate(100) # Limit to no more than 100 iterations per second.\n t, x, y, z, vx, vy, vz = do_time_step(t, dt, x, y, z, vx, vy, vz, m, B2, g, S0, omega)\n x_list.append(x)\n y_list.append(y)\n z_list.append(z)\n vx_list.append(vx)\n vy_list.append(vy)\n vz_list.append(vz)\n t_list.append(t)\n ball.pos = (x,y,z)\n ball.trail.append(pos=ball.pos)\n\n return t_list, x_list, y_list, z_list, vx_list, vy_list, vz_list",
"def set_base_angular_velocites(self, velocity: List[float]):\n # self._reset_wheel()\n fBVel = velocity[0]\n lRVel = velocity[1]\n rVel = velocity[2]\n self.set_joint_target_velocities(\n [-fBVel - lRVel - rVel, -fBVel + lRVel - rVel,\n -fBVel - lRVel + rVel, -fBVel + lRVel + rVel])",
"def _compute_solar_torque(self):\n pass",
"def _update_velocities(self, agent1, agent2, **kwargs):\n # calculate vector between centers\n rel_position = [\n agent2.position - agent1.position,\n agent1.position - agent2.position\n ]\n # Calculate relative velocities\n rel_velocities = [\n agent1.velocity - agent2.velocity,\n agent2.velocity - agent1.velocity\n ]\n # Calculate mass factor\n mass_factor = [\n 2 * agent2.mass / (agent2.mass + agent1.mass),\n 2 * agent1.mass / (agent2.mass + agent1.mass)\n ]\n # norm\n norm = [\n np.square(np.linalg.norm(rel_position[0])),\n np.square(np.linalg.norm(rel_position[1]))\n ]\n # Dot product of relative velocity and relative distcance\n dot = [\n np.dot(rel_velocities[0], rel_position[0]),\n np.dot(rel_velocities[1], rel_position[1])\n ]\n # bringing it all together\n vel_new = [\n agent1.velocity - (mass_factor[0] * (dot[0]/norm[0]) * rel_position[0]),\n agent2.velocity - (mass_factor[1] * (dot[1]/norm[1]) * rel_position[1])\n ]\n # Only update the velocity if not stationary\n self.velocity_state.set_velocity(agent1, vel_new[0])\n self.velocity_state.set_velocity(agent2, vel_new[1])",
"def set_velocities(self, linear, angular):\n msg = Twist()\n msg.linear.x = min(linear, 0.5)\n msg.angular.z = min(angular, 0.5)\n self.velocity_pub.publish(msg)",
"def _compute_aero_torque(self, curr_date, omega):\n if self._to_add[3]:\n # assuming constant atmosphere condition over spacecraft\n # error is of order of 10^-17\n rho = self.AtmoModel.getDensity(curr_date, self.satPos_i, self.in_frame)\n vAtm_i = self.AtmoModel.getVelocity(curr_date, self.satPos_i, self.in_frame)\n\n satVel = self.inertial2Sat.applyTo(self.satVel_i)\n vAtm = self.inertial2Sat.applyTo(vAtm_i)\n\n dragCoeff = self.meshDA['Cd']\n liftRatio = 0.0 # no lift considered\n\n CoM = self.meshDA['CoM_np']\n normal = self.meshDA['Normal_np']\n area = np.asarray(self.meshDA['Area'])\n satVel = np.array([satVel.x, satVel.y, satVel.z])\n vAtm = np.array([vAtm.x, vAtm.y, vAtm.z])\n\n relativeVelocity = vAtm - (satVel + (np.cross(omega, CoM)))\n vNorm = np.linalg.norm(relativeVelocity, axis=1)\n vDir = np.reciprocal(vNorm[:, None]) * relativeVelocity\n\n dot = np.einsum('ij,ij->i', normal, vDir)\n\n dotCondition = dot < 0\n dot = dot[dotCondition]\n if dot.size > 0:\n vDir = vDir[dotCondition]\n vNorm = vNorm[dotCondition]\n normal = normal[dotCondition]\n area = area[dotCondition]\n CoM = CoM[dotCondition]\n\n coeff = 0.5 * rho * dragCoeff * (vNorm**2)\n oMr = 1.0 - liftRatio\n f = (coeff * area * dot)[:, None]\n\n aT = np.sum(np.cross(CoM, oMr * np.absolute(f) * vDir + 2 * liftRatio * f * normal), axis=0)\n\n self._aTorque = Vector3D(float(aT[0]), float(aT[1]), float(aT[2]))\n\n else:\n self._aTorque = Vector3D.ZERO",
"def angular_velocity(self):\n trig = gyro_trigger_mode.GET_ANGULAR_VELOCITY_TRIGGER_READ\n if self.__trigger == trig:\n self.read_and_update_angle()\n\n adjusted = (self.__angle - self.__offset)\n if self.__factorSet:\n return adjusted / self.__degPerSecondFactor\n return adjusted",
"def vc_set_joint_velocity(self, speeds, is_radian=None, is_sync=True, duration=-1, **kwargs):\r\n return self._arm.vc_set_joint_velocity(speeds, is_radian=is_radian, is_sync=is_sync, duration=duration, **kwargs)",
"def updateVelocity(self, glob, latency):\n # Velocity equation parameters that control efficacy and behavior of the PSO, selected arbitrarily by us \n omega= .40 # coefficient for influence of currect velocity\n psi_loc= 0.30 # coefficient for influence of local best\n psi_glob= 0.30 # coefficient for influence of global best \n #calculates random weights between 0 and 1 (non-inclusive) for influence of individual (local) or social (global) best\n randLocalWeight= .01*random.randrange(-100,100,1)\n randGlobalWeight= .01*random.randrange(-100,100,1)\n \n #multiplies weights with best vectors (glob is the Swarm object) and current velocity to get new velocity\n #function below comes from wikipedia.org/wiki/Particle_swarm_optimization\n self.velocity= (omega*self.velocity + psi_loc*(self.bestXYZ[0:2] - self.position)*randLocalWeight + psi_glob*(np.array(glob.overBestPos)-self.position)*randGlobalWeight)*latency\n # latency multiplies velocity by #time-steps until update",
"def apply_velocity(self, angles, velocity, phase, x):\r\n \r\n # VX\r\n v=velocity[0]*self.parameters[\"vx_amplitude\"]\r\n d=(x*2-1)*v\r\n if phase:\r\n angles[\"l_thigh_joint\"]+=d\r\n angles[\"l_ankle_joint\"]+=d\r\n angles[\"r_thigh_joint\"]+=d\r\n angles[\"r_ankle_joint\"]+=d\r\n else:\r\n angles[\"l_thigh_joint\"]-=d\r\n angles[\"l_ankle_joint\"]-=d\r\n angles[\"r_thigh_joint\"]-=d\r\n angles[\"r_ankle_joint\"]-=d\r\n\r\n # VY\r\n v=velocity[1]*self.parameters[\"vy_amplitude\"]\r\n d=(x)*v\r\n d2=(1-x)*v\r\n if v>=0:\r\n if phase:\r\n angles[\"l_hip_joint\"]-=d\r\n angles[\"l_foot_joint\"]-=d\r\n angles[\"r_hip_joint\"]+=d\r\n angles[\"r_foot_joint\"]+=d\r\n else:\r\n angles[\"l_hip_joint\"]-=d2\r\n angles[\"l_foot_joint\"]-=d2\r\n angles[\"r_hip_joint\"]+=d2\r\n angles[\"r_foot_joint\"]+=d2\r\n else:\r\n if phase:\r\n angles[\"l_hip_joint\"]+=d2\r\n angles[\"l_foot_joint\"]+=d2\r\n angles[\"r_hip_joint\"]-=d2\r\n angles[\"r_foot_joint\"]-=d2\r\n else:\r\n angles[\"l_hip_joint\"]+=d\r\n angles[\"l_foot_joint\"]+=d\r\n angles[\"r_hip_joint\"]-=d\r\n angles[\"r_foot_joint\"]-=d\r\n \r\n ## VT\r\n #v=velocity[2]*self.parameters[\"vt_amplitude\"]\r\n #d=(x)*v\r\n #d2=(1-x)*v\r\n #if v>=0:\r\n #if phase:\r\n #angles[\"j_pelvis_l\"]=-d\r\n #angles[\"j_pelvis_r\"]=d\r\n #else:\r\n #angles[\"j_pelvis_l\"]=-d2\r\n #angles[\"j_pelvis_r\"]=d2\r\n #else:\r\n #if phase:\r\n #angles[\"j_pelvis_l\"]=d2\r\n #angles[\"j_pelvis_r\"]=-d2\r\n #else:\r\n #angles[\"j_pelvis_l\"]=d\r\n #angles[\"j_pelvis_r\"]=-d\r",
"def verletIntegration(self):\n for atom in range(0, self.numAtoms):\n \n # Update velocities\n self.atoms[atom].vx += (self.atoms[atom].fx/self.m)*self.dt\n self.atoms[atom].vy += (self.atoms[atom].fy/self.m)*self.dt\n self.atoms[atom].vz += (self.atoms[atom].fz/self.m)*self.dt\n \n \n # Update positions\n newX = self.atoms[atom].x + self.atoms[atom].vx*self.dt\n newY = self.atoms[atom].y + self.atoms[atom].vy*self.dt\n newZ = self.atoms[atom].z + self.atoms[atom].vz*self.dt\n\n # Update current positions (applying PBC)\n if newX < 0:\n self.atoms[atom].x = newX + self.lbox\n elif newX > self.lbox:\n self.atoms[atom].x = newX - self.lbox\n else:\n self.atoms[atom].x = newX\n \n if newY < 0:\n self.atoms[atom].y = newY + self.lbox\n elif newY > self.lbox:\n self.atoms[atom].y = newY - self.lbox\n else:\n self.atoms[atom].y = newY\n \n if newZ < 0:\n self.atoms[atom].z = newZ + self.lbox\n elif newZ > self.lbox:\n self.atoms[atom].z = newZ - self.lbox\n else:\n self.atoms[atom].z = newZ",
"def angular_velocities(self, dt: float) -> np.ndarray:\n if not isinstance(dt, float):\n raise TypeError(f\"dt must be a float. Got {type(dt)}.\")\n if dt <= 0:\n raise ValueError(f\"dt must be greater than zero. Got {dt}.\")\n w = np.c_[\n self.w[:-1]*self.x[1:] - self.x[:-1]*self.w[1:] - self.y[:-1]*self.z[1:] + self.z[:-1]*self.y[1:],\n self.w[:-1]*self.y[1:] + self.x[:-1]*self.z[1:] - self.y[:-1]*self.w[1:] - self.z[:-1]*self.x[1:],\n self.w[:-1]*self.z[1:] - self.x[:-1]*self.y[1:] + self.y[:-1]*self.x[1:] - self.z[:-1]*self.w[1:]]\n return 2.0 * w / dt",
"def update_vel(self, forces, dt):\n\n for particle, force in zip(self.particles, forces):\n particle.leap_velocity(dt, force)\n return None",
"def calculate_angular_velocity(headings, fs):\r\n heading_vectors = np.array([np.cos(headings), np.sin(headings)]).T\r\n sin_angular_change = np.cross(heading_vectors[:-1], heading_vectors[1:])\r\n angular_velocity = np.arcsin(sin_angular_change) * float(fs)\r\n return angular_velocity",
"def _compute_solar_torque(self, curr_date):\n if self._to_add[2]:\n inertial2Sat = self.spacecraft_state.getAttitude().getRotation()\n\n ratio = self.SolarModel.getLightingRatio(self.satPos_i,\n self.in_frame,\n curr_date)\n\n sunPos = inertial2Sat.applyTo(\n self.sun.getPVCoordinates(curr_date,\n self.in_frame).getPosition())\n self._sTorque = Vector3D.ZERO\n\n iterator = itertools.izip(self.meshDA['CoM'],\n self.meshDA['Normal'],\n self.meshDA['Area'],\n self.meshDA['Coefs'])\n\n for CoM, normal, area, coefs in iterator:\n position = self.satPos_s.add(CoM)\n\n # compute flux in inertial frame\n sunSatVector = \\\n position.subtract(sunPos)\n r2 = sunSatVector.getNormSq()\n\n rawP = ratio * self.K_REF / r2\n flux = Vector3D(rawP / sqrt(r2), sunSatVector)\n\n # compute Radiation Pressure Force:\n if flux.getNormSq() > Precision.SAFE_MIN:\n # illumination (we are not in umbra)\n # rotate flux to spacecraft frame:\n dot = self.V3_dot(normal, flux)\n\n if dot > 0:\n # the solar array is illuminated backward,\n # fix signs to compute contribution correctly\n dot = -dot\n normal = normal.negate()\n absorbCoeff = coefs[0]\n specularReflCoeff = coefs[1]\n diffuseReflCoeff = 1 - (absorbCoeff + specularReflCoeff)\n try:\n assert(diffuseReflCoeff >= 0)\n except AssertionError:\n raise AssertionError(\n \"Negative diffuse reflection coefficient not possible!\")\n psr = flux.getNorm()\n # Vallado's equation uses different parameters which are\n # related to our parameters as:\n # cos (phi) = - dot / (psr*area)\n # n = N (n...unit vector)\n # s = -fluxSat / psr (s...unit vector)\n cN = 2 * area * dot * (diffuseReflCoeff / 3 -\n specularReflCoeff * dot / psr)\n cS = (area * dot / psr) * (specularReflCoeff - 1)\n Force = Vector3D(float(cN), normal, float(cS), flux)\n # Force already in spacecraft frame. No need to convert\n self._sTorque = self._sTorque.add(self.V3_cross(CoM, Force))\n\n else:\n self._sTorque = Vector3D.ZERO",
"def _compute_solar_torque(self, curr_date):\n if self._to_add[2]:\n ratio = self.SolarModel.getLightingRatio(self.satPos_i,\n self.in_frame,\n curr_date)\n\n sunPos = self.inertial2Sat.applyTo(\n self.sun.getPVCoordinates(curr_date,\n self.in_frame).getPosition())\n sunPos = np.array([sunPos.x, sunPos.y, sunPos.z], dtype='float64')\n\n CoM = self.meshDA['CoM_np']\n normal = self.meshDA['Normal_np']\n area = self.meshDA['Area_np']\n coefs = self.meshDA['Coefs_np']\n\n sunSatVector = self.satPos_s + CoM - sunPos\n r = np.linalg.norm(sunSatVector, axis=1)\n rawP = ratio * self.K_REF / (r**2)\n flux = (rawP / r)[:, None] * sunSatVector\n # eliminate arrays where zero flux\n fluxNorm = np.linalg.norm(flux, axis=1)\n Condflux = fluxNorm**2 > Precision.SAFE_MIN\n flux = flux[Condflux]\n normal = normal[Condflux]\n\n # dot product for multidimensional arrays:\n dot = np.einsum('ij,ij->i', flux, normal)\n dot[dot > 0] = dot[dot > 0] * (-1.0)\n if dot.size > 0:\n normal[dot > 0] = normal[dot > 0] * (-1.0)\n\n cN = 2 * area * dot * (coefs[:, 2] / 3 - coefs[:, 1] * dot / fluxNorm)\n cS = (area * dot / fluxNorm) * (coefs[:, 1] - 1)\n force = cN[:, None] * normal + cS[:, None] * flux\n\n sT = np.sum(np.cross(CoM, force), axis=0)\n\n self._sTorque = Vector3D(float(sT[0]), float(sT[1]), float(sT[2]))\n\n else:\n self._sTorque = Vector3D.ZERO",
"def compute_speeds(self):\n # compute and return the speed as the norm of all velocity vectors\n return norm(self.velocities, axis=1)",
"def _compute_aero_torque(self, curr_date, omega):\n if self._to_add[3]:\n # assuming constant atmosphere condition over spacecraft\n # error is of order of 10^-17\n rho = self.AtmoModel.getDensity(curr_date, self.satPos_i, self.in_frame)\n vAtm_i = self.AtmoModel.getVelocity(curr_date, self.satPos_i, self.in_frame)\n\n satVel = self.inertial2Sat.applyTo(self.satVel_i)\n vAtm = self.inertial2Sat.applyTo(vAtm_i)\n\n self._aTorque = Vector3D.ZERO\n\n dragCoeff = self.meshDA['Cd']\n liftRatio = 0.0 # no lift considered\n\n iterator = itertools.izip(self.meshDA['CoM'],\n self.meshDA['Normal'],\n self.meshDA['Area'])\n\n for CoM, Normal, Area in iterator:\n CoMVelocity = satVel.add(self.V3_cross(omega, CoM))\n relativeVelocity = vAtm.subtract(CoMVelocity)\n\n vNorm2 = relativeVelocity.getNormSq()\n vNorm = sqrt(vNorm2)\n vDir = relativeVelocity.scalarMultiply(1.0 / vNorm)\n\n dot = self.V3_dot(Normal, vDir)\n if (dot < 0):\n coeff = 0.5 * rho * dragCoeff * vNorm2\n oMr = 1.0 - liftRatio\n # dA intercepts the incoming flux\n f = coeff * Area * dot\n force = Vector3D(float(oMr * abs(f)), vDir,\n float(liftRatio * f * 2), Normal)\n self._aTorque = self._aTorque.add(self.V3_cross(CoM, force))\n\n else:\n self._aTorque = Vector3D.ZERO",
"def _compute_gravity_torque(self, curr_date):\n if self._to_add[0]:\n body2inertial = self.earth.getBodyFrame().getTransformTo(self.in_frame, curr_date)\n body2sat = self.inertial2Sat.applyTo(body2inertial.getRotation())\n sat2body = body2sat.revert()\n\n satM = self.state_observer.spacecraftState.getMass()\n mCub = self.inCub['mass_frac'] * satM\n\n self._gTorque = Vector3D.ZERO\n\n for CoM in self.inCub['CoM']:\n\n S_dmPos = self.satPos_s.add(CoM)\n\n r2 = S_dmPos.getNormSq()\n gNewton = Vector3D(-self.muGM / (sqrt(r2) * r2), S_dmPos)\n\n B_dmPos = sat2body.applyTo(S_dmPos)\n\n gDist = Vector3D(self.GravityModel.gradient(curr_date,\n B_dmPos,\n self.muGM))\n\n g_Dist_s = body2sat.applyTo(gDist)\n\n dmForce = Vector3D(mCub, gNewton.add(g_Dist_s))\n self._gTorque = self._gTorque.add(self.V3_cross(CoM, dmForce))\n\n else:\n self._gTorque = Vector3D.ZERO",
"def compute_coll(self, particle, step):\r\n m1, m2 = self.mass, particle.mass\r\n r1, r2 = self.radius, particle.radius\r\n v1, v2 = self.velocity, particle.velocity\r\n x1, x2 = self.position, particle.position\r\n di = x2-x1\r\n norm = np.linalg.norm(di)\r\n if norm-(r1+r2)*1.1 < step*abs(np.dot(v1-v2, di))/norm:\r\n self.velocity = v1 - 2. * m2/(m1+m2) * np.dot(v1-v2, di) / (np.linalg.norm(di)**2.) * di\r\n particle.velocity = v2 - 2. * m1/(m2+m1) * np.dot(v2-v1, (-di)) / (np.linalg.norm(di)**2.) * (-di)",
"def _set_winding_angle_and_angular_velocity(self, delta_time, cut_lines):\n\n coord = self.coord_list[:, 0] + 1j * self.coord_list[:, 1]\n coord += coord.real * np.cos(self.mean_pos[1] * np.pi / 180)\n vectors = coord[1:] - coord[:-1]\n\n # Remove the null vectors\n not_null_vect = vectors != 0\n vectors = vectors[not_null_vect]\n norms = abs(vectors)\n angles = np.angle(vectors[1:] / vectors[:-1])\n\n if type(delta_time * 1.0) != float:\n delta_time = delta_time[not_null_vect]\n\n self.coord_list = self.coord_list[np.append(not_null_vect, True), :]\n self.nb_points = len(self.coord_list)\n\n if self.nb_points <= 2:\n self.winding_angle = 0\n self.angular_velocities = 0\n else:\n if cut_lines:\n # Find the shortest sub streamline with the smallest ratio\n # dist(start,end)/sub_line_len. This ratio will be close to 1 for\n # straight lines and close to 0 for closed loop.\n cumsum_norms = np.concatenate((np.array([0]),np.cumsum(norms)))\n\n min_ratio = 1\n\n min_end_id = self.nb_points - 1\n min_start_id = 0\n\n for start_id in range(self.nb_points):\n start_norm = cumsum_norms[start_id]\n start_pts = coord[start_id]\n for end_id in range(start_id+1, self.nb_points):\n curr_len = cumsum_norms[end_id] - start_norm\n start_end_dist = abs(coord[end_id] - start_pts)\n ratio = start_end_dist / curr_len\n if ratio <= min_ratio:\n min_ratio = ratio\n min_end_id = end_id\n min_start_id = start_id\n\n # Recompute the first attributs after taking the sub streamline\n self.coord_list = np.array(\n self.coord_list[min_start_id : min_end_id + 1], dtype=float\n )\n self.nb_points = (min_end_id - min_start_id) + 1\n self.length = cumsum_norms[end_id] - cumsum_norms[start_id]\n self.mean_pos = np.mean(self.coord_list, axis=0)\n\n # Recompute the sub angle list and delta_time\n angles = np.array(angles[min_start_id : min_end_id - 1])\n if type(delta_time * 1.0) != float:\n delta_time = np.array(delta_time[min_start_id:min_end_id])\n\n # Set the winding angle and the angular velocity\n self.winding_angle = np.sum(angles)\n\n if type(delta_time * 1.0) == float:\n self.angular_velocities = angles / delta_time\n else:\n self.angular_velocities = (\n 2 * angles / (delta_time[1:] + delta_time[:-1])\n )",
"def _compute_gravity_torque(self, curr_date):\n if self._to_add[0]:\n # return gravity gradient torque in satellite frame\n body2inertial = self.earth.getBodyFrame().getTransformTo(self.in_frame, curr_date)\n body2sat = self.inertial2Sat.applyTo(body2inertial.getRotation())\n body2satRot = PyRotation(body2sat.q0,\n body2sat.q1,\n body2sat.q2,\n body2sat.q3)\n sat2bodyRot = body2satRot.revert()\n body2sat = body2satRot.getMatrix()\n sat2body = sat2bodyRot.getMatrix()\n\n satM = self.spacecraft_state.getMass()\n mCub = self.inCub['dm'] * satM\n # add booms\n if \"dm_boom\" in self.inCub:\n mCub = np.concatenate((mCub, self.inCub['dm_boom']), axis=0) # boom store with mass\n CoM = self.inCub['CoM_np']\n\n dmPos_s = CoM + self.satPos_s\n\n gNewton = (-self.muGM / np.linalg.norm(dmPos_s,\n axis=1,\n keepdims=True)**3) * dmPos_s\n\n # rotate vectors:\n dmPos_b = np.einsum('ij,kj->ki', sat2body, dmPos_s)\n\n gDist = np.empty(dmPos_b.shape)\n for i in xrange(0, dmPos_b.shape[0]):\n gDist[i, :] = np.asarray(\n self.GravityModel.gradient(curr_date,\n Vector3D(float(dmPos_b[i, 0]),\n float(dmPos_b[i, 1]),\n float(dmPos_b[i, 2])),\n self.muGM))\n\n gDist_s = np.einsum('ij,kj->ki', body2sat, gDist)\n\n gT = np.sum(np.cross(CoM, mCub*(gNewton + gDist_s)), axis=0)\n\n self._gTorque = Vector3D(float(gT[0]), float(gT[1]), float(gT[2]))\n\n else:\n self._gTorque = Vector3D.ZERO",
"def update_velocity(self):\n # Set thruster (up/down) movement\n if self.thrusters:\n self.velocity_y -= self.gravity\n else:\n self.velocity_y += self.velocity_slowing\n\n # Set left movement\n if self.moving_left:\n self.velocity_x -= self.gravity\n else:\n if self.velocity_x < 0:\n self.velocity_x += self.velocity_slowing\n \n # Set right movement\n if self.moving_right:\n self.velocity_x += self.gravity\n else:\n if self.velocity_x > 0:\n self.velocity_x -= self.velocity_slowing",
"def momentum_updates(self, params, grads, learning_rate=1e-3, m=0.9,\n nesterov=True):\n updates = []\n lr = learning_rate * (1 - m)\n for param, grad in zip(params, grads):\n size = param.eval().shape\n # momentum velocity\n v = theano.shared(\n value=np.zeros(size, DTYPE_FLOATX),\n name='v_0'\n )\n # velocity update\n inc = m * v - lr * grad\n updates.append((v, inc))\n\n # Nesterov accelerated momentum\n if nesterov:\n inc = m * inc - lr * grad\n updates.append((param, param + inc))\n\n return updates",
"def get_angular_velocity(r, T):\n # http://www.hep.fsu.edu/~berg/teach/phy2048/0918.pdf\n # velocity = 2(pi)r/T\n return (2*math.pi*r)/T",
"def _compute_gravity_torque(self):\n pass"
]
| [
"0.6337773",
"0.6259177",
"0.608339",
"0.58207667",
"0.56556183",
"0.5638544",
"0.55727834",
"0.5561666",
"0.5555737",
"0.55239564",
"0.54603755",
"0.54364896",
"0.542645",
"0.5399831",
"0.53976446",
"0.5384",
"0.5349686",
"0.5341977",
"0.5341679",
"0.53016967",
"0.52791053",
"0.52714944",
"0.5268817",
"0.52611375",
"0.52524805",
"0.5249641",
"0.5232296",
"0.52308744",
"0.52168715",
"0.5215904"
]
| 0.7185989 | 0 |
Color edge based on the mean of its node colors | def get_edge_color( row ):
rgb = 0.5 * (
node_color_dict[ row[ 'source' ] ] + \
node_color_dict[ row[ 'target' ] ] )
return rgb2hex( rgb ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def merge_mean_color(graph, src ,dst):\n\n graph.nodes[dst]['total color'] += graph.nodes[src]['total color']\n graph.nodes[dst]['pixel count'] += graph.nodes[src]['pixel count']\n graph.nodes[dst]['mean color'] = (graph.nodes[dst]['total color'] / graph.nodes[dst]['pixel count'])",
"def node_colors(self, nodes):\n zmin, zmax = nodes[:, 2].min(), nodes[:, 2].max()\n start_color = np.array(self.background) + 5\n end_color = np.array(self.nodeColor)\n z = (nodes[:, 2] - zmin) / (zmax - zmin)\n # indexing [:, None] is used to explicitly state second axis\n c = (1 - z)[:, None] @ start_color[:, None].T + z[:, None] @ end_color[:, None].T\n self.wireframe_col = c\n # return c",
"def _weight_mean_color(graph, src, dst, n):\n diff = graph.nodes[dst]['mean color'] - graph.nodes[n]['mean color']\n diff = np.linalg.norm(diff)\n return {'weight': diff}",
"def edge_colors(\n et: pd.DataFrame,\n nt: pd.DataFrame,\n color_by: Hashable,\n node_color_by: Hashable,\n):\n if color_by in (\"source_node_color\", \"target_node_color\"):\n edge_select_by = color_by.split(\"_\")[0]\n return encodings.data_color(\n et[edge_select_by].apply(nt[node_color_by].get),\n nt[node_color_by],\n )\n elif color_by:\n return encodings.data_color(et[color_by], et[color_by])\n return pd.Series([\"black\"] * len(et), name=\"color_by\")",
"def meanColor(self):\n return self.image[self.x, self.y]",
"def _on_edge_color_change(self, event=None):\n with self.layer.events.edge_color.blocker():\n index = self.edgeComboBox.findText(\n self.layer.edge_color, Qt.MatchFixedString\n )\n self.edgeComboBox.setCurrentIndex(index)\n color = Color(self.layer.edge_color).hex\n self.edgeColorSwatch.setStyleSheet(\"background-color: \" + color)",
"def meanColor(self):\n return np.array([f.meanColor() for f in self])",
"def set_colors(graph):\n colors = []\n for n in graph.nodes():\n node = graph.node[n]\n if node['adopter'] == 1:\n colors.append('b')\n else:\n colors.append('r')\n \n return colors",
"def maximal_color(graph, node):\n return max(get_node_colors(graph, node))",
"def mouseClicked(self,mouseEvent):\n\t\tself.canvas.nodeColorize(self)",
"def greedy_coloring(*args):\r\n # get arguments\r\n G = args[0]\r\n n = G.nodes()\r\n m = G.arcs()\r\n \r\n # check if it a valid Graph\r\n if not G.is_correct_type('u'):\r\n print \"ERROR: the graph is not in one of the valid formats for greedy_coloring()\"\r\n return [], []\r\n \r\n # calculate degrees of each node (set as rows per node)\r\n a_nodes = zeros((n,n), int)\r\n for arc in range(m):\r\n i = G.A[arc,0] # tail of the arc\r\n j = G.A[arc,1] # head of the arc\r\n a_nodes[i-1,j-1] = 1\r\n a_nodes[j-1,i-1] = 1\r\n # get degree and add the node number\r\n degree = sum(a_nodes,0)\r\n degree = vstack((degree, array(range(n), int) + 1))\r\n \r\n # initialize coloring vector\r\n coloring = zeros(n, int)\r\n color_step = 1\r\n \r\n # if there are any nodes of degree 0 color them first\r\n while min(degree[0,:]) == 0:\r\n n_i = argmin(degree[0,:]) # get node with zero\r\n i = degree[1,n_i]\r\n # eliminate the node column from the list and matrix\r\n degree = delete(degree, s_[n_i], axis=1)\r\n a_nodes = delete(a_nodes, s_[n_i], axis=1)\r\n # color it\r\n coloring[i-1] = color_step\r\n \r\n # iterate till all nodes have a color\r\n while size(degree) > 0:\r\n n_i = argmax(degree[0,:]) # get node with largest degree\r\n i = degree[1,n_i]\r\n # eliminate the node column from the list and matrix\r\n degree = delete(degree, s_[n_i], axis=1)\r\n a_nodes = delete(a_nodes, s_[n_i], axis=1)\r\n \r\n # color it\r\n coloring[i-1] = color_step\r\n \r\n # color the rest of the possible nodes\r\n possible = 1 - array(a_nodes[i-1,:]) # transforms 0 in 1, and 1 in 0\r\n # iterate while there are possible nodes available\r\n while sum(possible) > 0:\r\n # get the node with largest degree among possible ones\r\n n_j = argmax(degree[0,:] * possible)\r\n j = degree[1,n_j]\r\n # eliminate the node column from the list and matrix\r\n degree = delete(degree, s_[n_j], axis=1)\r\n a_nodes = delete(a_nodes, s_[n_j], axis=1)\r\n possible = delete(possible, n_j)\r\n \r\n # color it\r\n coloring[j-1] = color_step\r\n # eliminate adjacent nodes of j from possible nodes\r\n possible = possible * (1 - a_nodes[j-1,:])\r\n \r\n # update color\r\n color_step += 1\r\n \r\n col_number = max(coloring) # approx chromatic number\r\n \r\n return coloring, col_number",
"def get_node_color(self, origin_node_id):\n origin_node_id %= 11\n if origin_node_id == 9:\n return 0.753, 0.753, 0.753, 1.\n if origin_node_id == 8:\n return 0.824, 0.412, 0.118, 1.\n if origin_node_id == 7:\n return 1.000, 0.000, 1.000, 1.\n if origin_node_id == 6:\n return 1.000, 1.000, 0.000, 1.\n if origin_node_id == 5:\n return 1.000, 0.627, 0.478, 1.\n if origin_node_id == 4:\n return 0.498, 1.000, 0.000, 1.\n if origin_node_id == 3:\n return 0.000, 1.000, 1.000, 1.\n if origin_node_id == 2:\n return 1.000, 0.922, 0.804, 1.\n if origin_node_id == 1:\n return 0.871, 0.722, 0.529, 1.\n if origin_node_id == 0:\n return 0.000, 0.749, 1.000, 1.\n if origin_node_id == 0:\n return 0.500, 0.549, 1.000, 1.\n\n return 0.8, 0.8, 0.8, 1.0",
"def get_graph_color ( self, object ):\n return self.graph_color_",
"def change_edge_color(self, text):\n self.layer.edge_color = text",
"def calc_coloring(graph_rdd):\n graph_rdd = graph_rdd \\\n .map(swap) \\\n .union(graph_rdd)\n nodes = graph_rdd.keys().distinct()\n\n color = nodes.map(lambda x: (x, 1))\n color_num = 1\n\n while True:\n graph_join_color = graph_rdd.join(color)\n neighbour = graph_join_color \\\n .map(lambda (x, (a, bx)): (a, (x, bx))) \\\n .groupByKey() \\\n .map(lambda (x, y): (x, [n[1] for n in y]))\n color = neighbour.map(lambda (x, y): (x, hash(str(sorted(y)))))\n color_new = color \\\n .map(swap) \\\n .reduceByKey(add, numPartitions=40) \\\n .map(lambda x: 1) \\\n .reduce(add)\n if color_num != color_new:\n break\n color_num = color_new\n return color",
"def color(self, verify=False, sort=\"id\"):\n\t\t\n\t\tcolored = 0\n\t\tround = 0\n\t\t\n\t\t# Sort\n\t\tif sort == \"id\":\n\t\t\tsorted_vertices = sorted(self.vertices, key = lambda x : x)\n\t\telif sort == \"ascending\":\n\t\t\tsorted_vertices = sorted(self.vertices, key = lambda x : len(self.vertices[x].adjacent_to))\n\t\telif sort == \"descending\":\n\t\t\tsorted_vertices = sorted(self.vertices, key = lambda x : -len(self.vertices[x].adjacent_to))\n\t\t\n\t\t# Assign numbers\n\t\tfor i in range(0, len(sorted_vertices)):\n\t\t\tself.graph.set_vertex_value(sorted_vertices[i], {'number': i})\n\t\t\n\t\t# Color all vertices\n\t\twhile colored < len(self.graph.vertices):\n\t\t\t\n\t\t\t# Remove colored vertices\n\t\t\tsorted_vertices = [x for x in sorted_vertices if not 'color' in self.graph.get_vertex_value(x)]\n\t\t\t\n\t\t\t# Test if vertex needs to be colored this round\n\t\t\tfor vertex in sorted_vertices:\n\t\t\t\tif 'color' in self.graph.get_vertex_value(vertex):\n\t\t\t\t\tcontinue\n\t\t\t\n\t\t\t\tvertex_number = self.graph.get_vertex_value(vertex)['number']\n\t\t\t\tneighbors = self.graph.neighbors(vertex)\n\t\t\t\tlocal_max = True\n\t\t\t\t\n\t\t\t\t# Check if local max\n\t\t\t\tfor neighbor in neighbors:\n\t\t\t\t\tdata_neighbor = self.graph.get_vertex_value(neighbor)\n\t\t\t\t\tif 'color' in data_neighbor and data_neighbor['color'] != round:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif data_neighbor['number'] > vertex_number:\n\t\t\t\t\t\tlocal_max = False\n\t\t\t\t\n\t\t\t\t# Color if local max\n\t\t\t\tif local_max:\n\t\t\t\t\tself.graph.set_vertex_value(vertex, {u'color': round})\n\t\t\t\t\tcolored += 1\n\t\t\t\t\t\t\t\t\n\t\t\t# Increment round\n\t\t\tround += 1\n\t\t\n\t\t# Verify\n\t\tcorrect = True\n\t\tif verify:\n\t\t\tfor vertex in self.graph.vertices:\n\t\t\t\tneighbors = self.graph.neighbors(vertex)\n\t\t\t\tfor neighbor in neighbors:\n\t\t\t\t\tif self.graph.get_vertex_value(vertex)['color'] == self.graph.get_vertex_value(neighbor)['color']:\n\t\t\t\t\t\treturn -1\n\t\t\n\t\treturn round",
"def meanrgb(color1,color2):\r\n if check_colormath:\r\n srgb1 = sRGBColor(color1[0],color1[1],color1[2])\r\n srgb2 = sRGBColor(color2[0],color2[1],color2[2])\r\n\r\n lab1 = convert_color (srgb1,LabColor)\r\n lab2 = convert_color (srgb2,LabColor)\r\n lab1tuple = SpectralColor.get_value_tuple(lab1)\r\n lab2tuple = SpectralColor.get_value_tuple(lab2)\r\n labAtuple = ( (lab1tuple[0] + lab2tuple[0])/2.0 , (lab1tuple[1] + lab2tuple[1])/2.0,\r\n (lab1tuple[2] + lab2tuple[2])/2.0 )\r\n labA = LabColor(labAtuple[0],labAtuple[1],labAtuple[2])\r\n rgbA = convert_color(labA,sRGBColor)\r\n rgbAtuple = SpectralColor.get_value_tuple(rgbA)\r\n return list(rgbAtuple)\r\n else:\r\n acolor = [0,0,0]\r\n for j in range(3):\r\n ## this seems to give a useful average color\r\n meancolor = (color1[j] + color2[j])/2.0\r\n # now lighten it a bit\r\n acolor[j] = (1.0 - (0.8 * (1.0 -meancolor )))\r\n return acolor",
"def color_chosen_nodes(network, chosen_node, color):\n\n # Color the node selected randomly by RWR\n network.nodes[chosen_node]['color'] = color\n # Create a list with color for each node\n color_nodes = [network.nodes[node]['color'] for node in network.nodes]\n return color_nodes",
"def draw_edges(self):\n pass",
"def set_edge_colors(self, edge_colors):\n\n self.edge_colors = edge_colors",
"def vertex_coloring(self, display = False):\r\n stack = self.SL_algorithm()\r\n color_of_vertex = self.greedily_coloring(stack)\r\n if(display):\r\n self.display_graph(color_of_vertex)\r\n return color_of_vertex\r\n else: \r\n return color_of_vertex",
"def _greedy_color(self, source):\n for target in self.graph.iteradjacent(source):\n if self.color[target] is not None:\n self._color_list[self.color[target]] = True\n for c in xrange(self.graph.v()): # check colors\n if not self._color_list[c]:\n self.color[source] = c\n break\n for target in self.graph.iteradjacent(source):\n if self.color[target] is not None:\n self._color_list[self.color[target]] = False\n return c",
"def draw(self, model):\n graph = model.graph\n ants = model.ants\n sugar = model.sugar\n nest = model.nest\n\n colors = {node: \"y\" for node in graph.nodes}\n colors[nest] = \"b\"\n colors[sugar] = \"r\"\n for ant in ants:\n colors[ant.position] = \"k\"\n\n weights = [graph[u][v][\"weight\"] / 5 for u, v in graph.edges()]\n super().draw(graph, node_color=colors.values(), width=weights)#, arrows=True)",
"def add_ancestor_edges(graph, node, color):\n out_neighbor, = graph.get_deductive_out_neighbors(node)\n for in_neighbor in graph.get_deductive_in_neighbors(node):\n graph.add_ancestor_edge(out_neighbor, in_neighbor, path=[0, color])\n graph.set_node_attribute(in_neighbor, graph.ANCESTOR_TARGET, True)",
"def get_color(edge, nR):\n R_color, E_color = 'C0', 'C1'\n edge = sorted(edge)\n if edge[0] < nR:\n if edge[1] > nR:\n comp_color = 'gray'\n zorder = 10\n else:\n comp_color = R_color\n zorder = 5\n else:\n comp_color = E_color\n zorder = 5\n return comp_color, zorder",
"def get_color(self, node: Node) -> str:\n\n idx = hash(node.get_kind_name()) % len(self.colors_)\n return self.colors_[idx]",
"def randcolor(self, left_edge, right_edge):\n color_pixel = (\n random.randint(\n left_edge, right_edge), random.randint(\n left_edge, right_edge), random.randint(\n left_edge, right_edge))\n return color_pixel",
"def highlight_cycles(self):\n for cycle in nx.simple_cycles(self.network):\n i = 0\n while i < len(cycle) - 1:\n self.network[cycle[i]][cycle[i + 1]][\"color\"] = self.cycles_color\n i += 1\n self.network[cycle[-1]][cycle[0]][\"color\"] = self.cycles_color",
"def make_edgecolor(ax, color=None):\n for i,artist in enumerate(ax.artists):\n # Set the linecolor on the artist to the facecolor, and set the facecolor to None\n col = artist.get_facecolor()\n if color is not None:\n col = color\n artist.set_edgecolor(col)\n artist.set_facecolor('None')\n\n # Each box has 6 associated Line2D objects (to make the whiskers, fliers, etc.)\n # Loop over them here, and use the same colour as above\n for j in range(i*6,i*6+6):\n line = ax.lines[j]\n line.set_color(col)\n line.set_mfc(col)\n line.set_mec(col)",
"def node_set_colors(nodes, spanset, gapset, preset, postset):\n\n node_colors = []\n for n in nodes:\n if n in preset:\n node_colors.append(nx_helpers.rgb_to_hex((255, 0, 0)))\n elif n in postset:\n node_colors.append(nx_helpers.rgb_to_hex((255, 255, 0)))\n ## reads now may be missing the last set of numbers. Account for this in the node naming.\n elif n in gapset or any([g for g in gapset if n in g]):\n node_colors.append(nx_helpers.rgb_to_hex((0, 10, 250)))\n elif n in spanset or any([s for s in spanset if n in s]):\n node_colors.append(nx_helpers.rgb_to_hex((0, 250, 10)))\n else:\n # uncategorized\n node_colors.append(nx_helpers.rgb_to_hex((0, 0, 0)))\n return node_colors"
]
| [
"0.6794543",
"0.65701836",
"0.63036895",
"0.60805434",
"0.5911971",
"0.5882105",
"0.5866474",
"0.58314013",
"0.58042705",
"0.5791548",
"0.57542545",
"0.5686546",
"0.5630701",
"0.56300443",
"0.5613021",
"0.55528307",
"0.55281764",
"0.55185825",
"0.55115545",
"0.54595757",
"0.54349095",
"0.54219985",
"0.5415037",
"0.5388365",
"0.53855",
"0.53838557",
"0.53081316",
"0.53031373",
"0.5295411",
"0.5288515"
]
| 0.6954697 | 0 |
Import functions from module ``mmgroup.mm_order``. We import these functions from module ``mmgroup.mm_order`` on demand. This avoids an infinite recursion of imports. | def import_mm_order_functions():
global check_mm_order, check_mm_equal
global check_mm_half_order, check_mm_in_g_x0
from mmgroup.mm_order import check_mm_order as f
check_mm_order = f
from mmgroup.mm_order import check_mm_equal as f
check_mm_equal = f
from mmgroup.mm_order import check_mm_half_order as f
check_mm_half_order = f
from mmgroup.mm_order import check_mm_in_g_x0 as f
check_mm_in_g_x0 = f | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _import_custom(self, custom_modules):\n for filter_module in custom_modules:\n info('Loading {}'.format(filter_module))\n funs = module_utils.get_all_functions(filter_module)\n for fun_name, fun in funs.items():\n if fun_name.startswith('function'):\n import_name = '_'.join(fun_name.split('_')[1:])\n debug('Adding function {}'.format(import_name))\n self._functions[import_name] = fun\n elif fun_name.startswith('filter'):\n import_name = '_'.join(fun_name.split('_')[1:])\n debug('Adding filter {}'.format(import_name))\n self._filters[import_name] = fun",
"def test_ufedmm_imported():\n assert \"ufedmm\" in sys.modules",
"def load_functions(self, module_name, path=None):\n# try:\n if True:\n if not path:\n path = os.getcwd()\n if not isinstance(path,list):\n path = [path]\n file,filename,desc = imp.find_module(module_name,path)\n funcs = imp.load_module(module_name, file, filename, desc)\n if hasattr(funcs,'_init'):\n getattr(funcs,'_init')(self)\n attrs = [attr for attr in funcs.__dict__ \n if not attr.startswith('__')\n and attr is not '_init'\n and not hasattr(getattr(funcs,attr),'__base__')]\n for attr in attrs:\n try:\n print 'Adding', attr, 'to', self._name\n self.add_function(getattr(funcs,attr))\n except:\n print 'Error adding', attr, 'to', self._name",
"def test_rlmm_imported():\n assert \"rlmm\" in sys.modules",
"def _import(self, module_name):\n # load keywords\n kw = __import__('keywords')\n # set real rpc proxy\n kw.var_cache['proxy'] = device_proxy\n kw.var_cache['reflection'] = reflection_proxy\n kw.var_cache['local'] = local_proxy\n # load script\n __import__(module_name)\n # register all kw func from keywords.kw_func\n self.kw_func.update(kw.kw_func)",
"def import_all_model_modules():\r\n import brokerage.model\r\n # ensure that these imports don't get auto-deleted! they have side effects.\r\n brokerage.model",
"def xocImport(self, name, *args, **kwargs):\n trace(\"Import invoked:\", name, kwargs.keys())\n if name in sys.builtin_module_names:\n trace(\"Loading builtin module\", name)\n return self.load_module(name)\n else:\n return self.oldImport(name, *args, **kwargs)",
"def import_module(self, location, name):",
"def supports_ordinary_make_module_imports(self):\n return True",
"def restoreImport():\n # Restore __import__ with thre orignial import mechanism\n __builtins__[\"__import__\"] = BUILTIN_IMPORT",
"def load_modules_manually():\n #cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0]))\n cmd_folder = '../myutils/'\n if cmd_folder not in sys.path:\n sys.path.insert(0, cmd_folder)\n #print sys.path",
"def test_mmelemental_imported():\n import sys\n\n assert \"mmelemental\" in sys.modules",
"def import_all():\n import theory",
"def _isolateImports(mf, f, *a, **kw):\n\n\n oldMetaPath = sys.meta_path\n oldPathHooks = sys.path_hooks\n _PEP302Mapper._oldSysModules = sys.modules.copy()\n oldImport = __builtin__.__import__\n #where is your god now?\n sys.path_hooks = []\n sys.modules.clear()\n sys.meta_path = [mf]\n __builtins__['__import__'] = mf.xocImport\n\n\n\n #stupid special case for the stdlib\n if mf.mapper.contains('warnings'):\n sys.modules['warnings'] = mf.mapper.lookup('warnings')\n\n try:\n return f(*a, **kw)\n finally:\n sys.meta_path = oldMetaPath\n sys.path_hooks = oldPathHooks\n sys.modules.clear()\n sys.modules.update(_PEP302Mapper._oldSysModules)\n __builtins__['__import__'] = oldImport",
"def _imported_functions(self):\n\n i = 0\n while 1:\n thunk = obj.Object('_IMAGE_THUNK_DATA',\n offset = self.obj_parent.DllBase + self.OriginalFirstThunk +\n i * self.obj_vm.profile.get_obj_size('_IMAGE_THUNK_DATA'),\n vm = self.obj_native_vm)\n\n # We've reached the end when the element is zero \n if thunk == None or thunk.AddressOfData == 0:\n break\n\n o = obj.NoneObject(\"Ordinal not accessible?\")\n n = obj.NoneObject(\"Imported by ordinal?\")\n f = obj.NoneObject(\"FirstThunk not accessible\")\n\n # If the highest bit (32 for x86 and 64 for x64) is set, the function is \n # imported by ordinal and the lowest 16-bits contain the ordinal value. \n # Otherwise, the lowest bits (0-31 for x86 and 0-63 for x64) contain an \n # RVA to an _IMAGE_IMPORT_BY_NAME struct. \n if thunk.OrdinalBit == 1:\n o = thunk.Ordinal & 0xFFFF\n else:\n iibn = obj.Object(\"_IMAGE_IMPORT_BY_NAME\",\n offset = self.obj_parent.DllBase +\n thunk.AddressOfData,\n vm = self.obj_native_vm)\n o = iibn.Hint\n n = iibn.Name\n\n # See if the import is bound (i.e. resolved)\n first_thunk = obj.Object('_IMAGE_THUNK_DATA',\n offset = self.obj_parent.DllBase + self.FirstThunk +\n i * self.obj_vm.profile.get_obj_size('_IMAGE_THUNK_DATA'),\n vm = self.obj_native_vm)\n if first_thunk:\n f = first_thunk.Function.v()\n\n yield o, f, str(n or '')\n i += 1",
"def AddImport(imports, mojom_imports, module, element):\n if not isinstance(element, mojom.Kind):\n return\n\n if mojom.IsArrayKind(element) or mojom.IsInterfaceRequestKind(element):\n AddImport(imports, mojom_imports, module, element.kind)\n return\n if mojom.IsMapKind(element):\n AddImport(imports, mojom_imports, module, element.key_kind)\n AddImport(imports, mojom_imports, module, element.value_kind)\n return\n if mojom.IsAnyHandleKind(element):\n imports['mojo/public/go/system'] = 'system'\n return\n\n if not hasattr(element, 'imported_from') or not element.imported_from:\n return\n imported = element.imported_from\n if GetPackagePath(imported['module']) == GetPackagePath(module):\n return\n path = GetPackagePath(imported['module'])\n if path in imports:\n return\n name = GetPackageName(imported['module'])\n while name in imports.values(): # This avoids repeated names.\n name += '_'\n imported['go_name'] = name\n imports[path] = name\n mojom_imports[path] = name",
"def import_function(name: str):\n module_name, function_name = name.rsplit(\".\", 1)\n module = importlib.import_module(module_name)\n return getattr(module, function_name)",
"def add_module_import(self, module):\n self._main_model.add_module_import(module)",
"def import_module(self, module): # pylint: disable=R0201\r\n if isinstance(module, list):\r\n all_modules = module\r\n else:\r\n all_modules = [module]\r\n for mod in all_modules:\r\n globals()[mod] = __import__(mod.strip())",
"def overrideImport():\n # Replace __import__ with importOverride\n __builtins__[\"__import__\"] = importOverride",
"def late_import():\n global NumberField_quadratic\n global NumberFieldElement_quadratic\n global AlgebraicNumber_base\n global AlgebraicNumber\n global AlgebraicReal\n global AA, QQbar, SR\n global CLF, RLF, CDF\n if NumberFieldElement_quadratic is None:\n import sage.rings.number_field.number_field\n import sage.rings.number_field.number_field_element_quadratic as nfeq\n NumberField_quadratic = sage.rings.number_field.number_field.NumberField_quadratic\n NumberFieldElement_quadratic = nfeq.NumberFieldElement_quadratic\n import sage.rings.qqbar\n AlgebraicNumber_base = sage.rings.qqbar.AlgebraicNumber_base\n AlgebraicNumber = sage.rings.qqbar.AlgebraicNumber\n AlgebraicReal = sage.rings.qqbar.AlgebraicReal\n AA = sage.rings.qqbar.AA\n QQbar = sage.rings.qqbar.QQbar\n import sage.symbolic.ring\n SR = sage.symbolic.ring.SR\n from .real_lazy import CLF, RLF\n from .complex_double import CDF",
"def load_code(mfile, fname):\n mname = mfile.split('.py')[0].replace('/', '.')\n try:\n mod = __import__(mname, fromlist=['model'])\n func = getattr(mod, fname)\n print(\"load {} {} {}\".format(mfile, func, func.__doc__))\n return func\n except ImportError:\n traceback.print_exc()\n msg = \"Please provide file name with 'def %s' implementation\" % fname\n msg += \"\\nThe file should be available in PYTHONPATH\"\n print(msg)\n raise",
"def importOverride(name, glbls={}, lcls={}, fromlist=[], level=-1):\n module = None\n # First try the system __import__ first\n try:\n module = BUILTIN_IMPORT(name, glbls, lcls, fromlist, level)\n # You cannot log in this namespace, due to an infinite regression issue, so don't try\n # Although I am thinking that disabling the import override, logging, and re enabling it would work\n except ImportError as error:\n # Next we will try to import them as a *.cc\n # First we need to determine if it exists\n # Check the folders in CC_PATH\n for path in CC_PATH:\n # If the path exists\n if os.path.exists(path):\n # And the path/<module name>.cc exists\n if os.path.exists(os.path.join(path, name+'.cc')):\n # We will use the first one we find\n # No the magic happens, we will first create a temp file\n temp_file = tempfile.TemporaryFile()\n # Now we add the 'magic' to the top of the temp file\n temp_file.write(MAGIC)\n # Now open the file being imported\n module_file = open(os.path.join(path, name+'.cc'), 'r')\n # Read the module contents into the temp file\n temp_file.write(module_file.read())\n module_file.close()\n # Now rewind the temp file so it can be read from the beginning\n temp_file.seek(0)\n # Now import the module\n try:\n module = imp.load_module(name, temp_file, path, ('.cc', 'r', imp.PY_SOURCE))\n except Exception as exception:\n logError(sys.exc_info(), log.error, 'Error importing control code file %s.cc:' % name, MAGIC_LINENO)\n finally:\n temp_file.close()\n log.debug('Module %s loaded from %s using the special .cc import' % (name, path))\n # If module is still None, we didn't find it and we should raise the original error\n if not module:\n raise error\n return module",
"def setUp(self):\n\n def import_hook(name, *args, **kwargs):\n if name == 'actstream':\n raise ImportError('test case module import failure')\n else:\n return self.original_imports(name, *args, **kwargs)\n\n self.original_imports = builtins.__import__\n builtins.__import__ = import_hook",
"def auto_import_commands():\n import re,os\n import topo\n import __main__\n\n # CEBALERT: this kind of thing (topo.__file__) won't work with\n # py2exe and similar tools\n topo_path = os.path.join(os.path.split(topo.__file__)[0],\"command\")\n for f in os.listdir(topo_path):\n if re.match('^[^_.].*\\.py$',f):\n modulename = re.sub('\\.py$','',f)\n exec \"from topo.command.\"+modulename+\" import *\" in __main__.__dict__\n exec \"from topo.command import *\" in __main__.__dict__",
"def _setup_modules(self):\r\n module_registry = AppModule.module_registry()\r\n for bundle in topological_sort(AppModule.module_dependencies()):\r\n for module_label in bundle:\r\n assert module_label in module_registry\r\n module = module_registry[module_label]\r\n self._debug_log('Initializing: %s (%s)' % (module.label(), module.description()))\r\n try:\r\n module.setup_function()\r\n except AppModule.Unimplemented:\r\n pass\r\n self._init_modules.append(module.label())",
"def _import_package_files():\n\n\timport os\n\texports = []\n\tglobals_, locals_ = globals(), locals()\n\tpackage_path = os.path.dirname(__file__)\n\tpackage_name = os.path.basename(package_path)\n\n\tfor filename in os.listdir(package_path):\n\t\tmodulename, ext = os.path.splitext(filename)\n\t\tif modulename[0] != '_' and ext in ('.py', '.pyw'):\n\t\t\tsubpackage = '%s.%s' % (package_name, modulename) # package relative\n\t\t\tmodule = __import__(subpackage, globals_, locals_, [modulename])\n\t\t\tmodict = module.__dict__\n\t\t\tnames = (modict['__all__'] if '__all__' in modict else\n\t\t\t\t[name for name in modict if name[0] != '_']) # all public\n\t\t\texports.extend(names)\n\t\t\tglobals_.update((name, modict[name]) for name in names)\n\n\treturn exports",
"def retry_import(mf: ModuleGraph, m: Node) -> typing.Optional[Node]:\n if \".\" in m.identifier:\n pname, partname = m.identifier.rsplit(\".\", 1)\n parent = mf.findNode(pname)\n else:\n parent = None\n partname = m.identifier\n\n # This is basically mf.find_module inlined and with a\n # check disabled.\n\n def fmod(\n name: str,\n path: typing.Optional[typing.List[str]],\n parent: typing.Optional[Node],\n ) -> typing.Tuple[\n typing.Optional[typing.IO], typing.Optional[str], typing.Tuple[str, str, int]\n ]:\n if path is None:\n if name in sys.builtin_module_names:\n return (None, None, (\"\", \"\", imp.C_BUILTIN))\n\n path = mf.path\n\n fp, buf, stuff = find_module(name, path)\n if buf:\n buf = os.path.realpath(buf)\n return (fp, buf, stuff)\n\n try:\n fp, pathname, stuff = fmod(\n partname, parent.packagepath if parent is not None else None, parent\n )\n except ImportError:\n return None\n\n if stuff[-1] == imp.PKG_DIRECTORY:\n m.__class__ = Package\n elif stuff[-1] == imp.PY_SOURCE:\n m.__class__ = SourceModule\n else:\n m.__class__ = CompiledModule\n\n m = mf._load_module(m.identifier, fp, pathname, stuff)\n\n if parent:\n mf.createReference(m, parent)\n parent[partname] = m\n return m",
"def test_imports():\n from tg_utils import admin\n from tg_utils import checks\n from tg_utils import compressor_filters\n from tg_utils import email\n from tg_utils import files\n from tg_utils import hashmodels\n from tg_utils import lock\n from tg_utils import managers\n from tg_utils import mixins\n from tg_utils import models\n from tg_utils import profiling\n from tg_utils import signals\n from tg_utils import uuid\n from tg_utils import decorators",
"def _import_image_module(mn):\n global _image_modules\n\n result = None\n\n # Don't re-import existing cached modules\n if mn in _image_modules and _image_modules[mn] is not None:\n result = _image_modules[mn]\n else:\n # Import the 'Image' module from PIL\n if mn == \"pil\":\n try:\n result = importlib.import_module(\"PIL.Image\")\n except (ImportError, RuntimeError):\n pass\n\n # Import wxPython\n elif mn == \"wx\":\n try:\n result = importlib.import_module(\"wx\")\n except (ImportError, RuntimeError):\n pass\n\n # Import the 'QImage' module from PyQt5, PyQt4 or PySide\n elif mn == \"qt\":\n try:\n result = importlib.import_module(\"PyQt5.QtGui\").QImage\n except (ImportError, RuntimeError):\n try:\n result = importlib.import_module(\"PyQt4.QtGui\").QImage\n except (ImportError, RuntimeError):\n try:\n result = importlib.import_module(\"PySide.QtGui\").QImage\n except ImportError:\n pass\n\n # Import OpenCV\n elif mn == \"cv\":\n try:\n result = importlib.import_module(\"cv2\")\n except (ImportError, RuntimeError):\n pass\n\n if result:\n _image_modules[mn] = result\n\n return result"
]
| [
"0.56968665",
"0.5643161",
"0.5435371",
"0.53745395",
"0.53692764",
"0.53691804",
"0.53291184",
"0.5281062",
"0.5275298",
"0.5262633",
"0.52055323",
"0.51773846",
"0.51679754",
"0.5095697",
"0.5073008",
"0.50676996",
"0.50347024",
"0.50226986",
"0.5018887",
"0.5016364",
"0.5015765",
"0.5006036",
"0.49910232",
"0.49886143",
"0.49676767",
"0.49559212",
"0.49550524",
"0.4945943",
"0.49298817",
"0.4916955"
]
| 0.82972014 | 0 |
String representation of Completeness object When the command 'print' is used on the Completeness object, this method will return the values contained in the object | def __str__(self):
for att in self.__dict__.keys():
print '%s: %r' % (att, getattr(self, att))
return 'Completeness class object attributes' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __str__(self):\n struct_repr = \", \".join([\n \"was_available_once: \" + str(self.was_available_once),\n \"is_available: \" + str(self.is_available),\n \"signal_strength_percent: \" + str(self.signal_strength_percent)\n ])\n\n return f\"RcStatus: [{struct_repr}]\"",
"def __str__(self):\n result = \"Problem size: \"\n result += str(len(self._list)) + \"\\n\"\n result += \"Elapsed time: \"\n result += str(self._elapsed_time) + \"\\n\"\n if self._comp:\n result += \"Comparisons: \"\n result += str(self._comp_count) + \"\\n\"\n if self._swap:\n result += \"Exchanges: \"\n result += str(self._swap_count) + \"\\n\"\n return result",
"def __str__(self):\n struct_repr = \", \".join([\n \"is_gyrometer_calibration_ok: \" + str(self.is_gyrometer_calibration_ok),\n \"is_accelerometer_calibration_ok: \" + str(self.is_accelerometer_calibration_ok),\n \"is_magnetometer_calibration_ok: \" + str(self.is_magnetometer_calibration_ok),\n \"is_level_calibration_ok: \" + str(self.is_level_calibration_ok),\n \"is_local_position_ok: \" + str(self.is_local_position_ok),\n \"is_global_position_ok: \" + str(self.is_global_position_ok),\n \"is_home_position_ok: \" + str(self.is_home_position_ok)\n ])\n\n return f\"Health: [{struct_repr}]\"",
"def _to_string(self):\n self.results.print_results()\n self.results.print_comparison()",
"def __str__(self):\n struct_repr = \", \".join([\n \"voltage_v: \" + str(self.voltage_v),\n \"remaining_percent: \" + str(self.remaining_percent)\n ])\n\n return f\"Battery: [{struct_repr}]\"",
"def __str__(self):\n return_string = self.name + \"\\n\" + str(self.traits)\n\n return return_string",
"def __str__(self):\n status = \"height = {}\\n\".format(self.height)\n status += \"width = {}\\n\".format(self.width)\n status += \"channels = {}\\n\".format(self.channels)\n status += \"classes = {}\\n\".format(self.classes)\n status += \"batch_size = {}\\n\".format(self.batch_size)\n status += \"epochs = {}\\n\".format(self.epochs)\n status += \"save_step = {}\\n\".format(self.save_step)\n status += \"learning_rate = {}\\n\".format(self.learning_rate)\n status += \"momentum = {}\\n\".format(self.momentum)\n return status",
"def __str__(self):\n struct_repr = \", \".join([\n \"active: \" + str(self.active),\n \"actuator: \" + str(self.actuator)\n ])\n\n return f\"ActuatorOutputStatus: [{struct_repr}]\"",
"def __str__(self):\n status = \"height = {}\\n\".format(self.height)\n status += \"width = {}\\n\".format(self.width)\n status += \"channels = {}\\n\".format(self.channels)\n status += \"architecture = {}\\n\".format(self.architecture)\n status += \"activations = {}\\n\".format(self.activations)\n status += \"conv_activations = {}\\n\".format(self.conv_activations)\n status += \"conv_architecture = {}\\n\".format(self.conv_architecture)\n status += \"kernel_sizes = {}\\n\".format(self.kernel_sizes)\n status += \"pool_kernel = {}\\n\".format(self.pool_kernel)\n status += \"batch_size = {}\\n\".format(self.batch_size)\n status += \"epochs = {}\\n\".format(self.epochs)\n status += \"save_step = {}\\n\".format(self.save_step)\n status += \"learning_rate = {}\\n\".format(self.learning_rate)\n status += \"momentum = {}\\n\".format(self.momentum)\n return status",
"def __str__(self):\n s = '--------- Descriptor ---------\\n'\n s += 'serial no.: %s\\n' % self.jobSerialNumber\n s += 'name: %s\\n' % self.name\n s += 'identifiedName: %s\\n' % self.identifiedName\n s += 'log name: %s\\n' % self.log\n s += 'job group: %s\\n' % self.jobGroup\n s += 'runPath: %s\\n' % self.runPath\n s += 'resPath: %s\\n' % self.resPath\n s += 'suggested batch queue: %s\\n' % self.suggestedQueue\n s += 'confederation: %s\\n' % self.confederation\n s += self.paths.__str__()\n \n return s",
"def __str__(self):\n return stringify(\n Inspect(\n self,\n help=True,\n methods=True,\n private=True,\n dunder=False,\n sort=True,\n all=False,\n ),\n maxlen=-1,\n )",
"def __str__(self):\n if self.combinedReplicates:\n printout = \"\\nCOMBINED MS REPLICATES WITH n = \" + str(self.n_cutoff) + \" and std dev = \" + str(self.std_cutoff) + \"\\nCell Lines: \" + str(self.cellLines).strip(\"[]\") + \"\\nSize: \" + str([self.combinedReplicates[i].shape[0] for i in range(len(self.combinedReplicates))]).strip(\"[]\") + \"\\nIntersection Size: \" + str(self.experimentFullIntersection.shape[0]) + \"\\n\"\n printout += \"\\n\"\n else:\n printout = \"\"\n printout += \"\\n\".join([str(each) for each in self.experimentalReplicates]).strip(\"[]\")\n if self.phenotypicMeasurements:\n printout += \"\\n\"\n printout += \"\".join([str(each) for each in self.phenotypicMeasurements.values()]).strip(\"[]\")\n\n return printout",
"def __str__(self):\n return self.components + \", \" + self.circuit_name",
"def __str__(self):\n astr = '[\\n name: [ ' + self.name + ' ]\\n'\n astr += ' variables: [ '\n for var, init in self.variables:\n astr += '(' + var + ' := ' + init + '), '\n astr = astr[:-2] + ' ]\\n assumptions: [ '\n for assumption in self.assumptions:\n astr += assumption + ', '\n astr = astr[:-2] + ' ]\\n guarantees: [ '\n for guarantee in self.guarantees:\n astr += guarantee + ', '\n return astr[:-2] + ' ]\\n]'",
"def __str__(self):\n astr = ' variables:\\t[ '\n for var in self.variables:\n astr += str(var) + ', '\n astr = astr[:-2] + ' ]\\n assumptions :\\t[ '\n for assumption in self.assumptions.cnf:\n astr += assumption.formula + ', '\n astr = astr[:-2] + ' ]\\n guarantees :\\t[ '\n for guarantee in self.guarantees.cnf:\n astr += guarantee.formula + ', '\n # astr = astr[:-2] + ' ]\\n guarantees_unsat :\\t[ '\n # for guarantee in self.guarantees.cnf:\n # astr += guarantee.unsaturated + ', '\n return astr[:-2] + ' ]\\n'",
"def printClassifier(self):\n classifier_info = \"\"\n for att in range(cons.env.format_data.numb_attributes):\n att_info = cons.env.format_data.attribute_info[att]\n if att in self.specified_attributes: #If the attribute was specified in the rule\n i = self.specified_attributes.index(att)\n #-------------------------------------------------------\n # CONTINUOUS ATTRIBUTE\n #-------------------------------------------------------\n if att_info[0]:\n classifier_info += str(self.condition[i][0])+';'+str(self.condition[i][1]) + \"\\t\"\n #-------------------------------------------------------\n # DISCRETE ATTRIBUTE\n #-------------------------------------------------------\n else:\n classifier_info += str(self.condition[i]) + \"\\t\"\n else: # Attribute is wild.\n classifier_info += '#' + \"\\t\"\n #-------------------------------------------------------------------------------\n specificity = len(self.condition) / float(cons.env.format_data.numb_attributes)\n\n if cons.env.format_data.discrete_action:\n classifier_info += str(self.action)+\"\\t\"\n else:\n classifier_info += str(self.action[0])+';'+str(self.action[1])+\"\\t\"\n #------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n classifier_info += '{:.1f}'.format(self.prediction)+\"\\t\"+'{:.2f}'.format(self.error)+\"\\t\"+'{:.2f}'.format(self.fitness)+\"\\t\"+str(self.numerosity)+\"\\t\"+str(self.ga_count)+\"\\t\"\n classifier_info += '{:.1f}'.format(self.mean_actionset_sz)+\"\\t\\t\"+str(self.ga_timestamp)+\"\\t\\t\"+str(self.init_timestamp)+\"\\t\\t\"+'{:.2f}'.format(specificity)+\"\\t\\t\"\n classifier_info += '{:.1f}'.format(self.delete_vote)+\"\\t\\t\"+str(self.action_cnt)+\"\\n\"\n\n #------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n return classifier_info",
"def __repr__(self):\n options_str = \", \".join(\n [\n f\"validate={self._validate}\",\n f\"outcome={self._outcome}\",\n f\"alpha_prior={self._alpha_prior}\",\n ]\n )\n return f\"{self.__class__.__name__}({options_str})\"",
"def __str__(self):\n r = []\n for item in sorted(self._data.keys()):\n correct, incorrect = self._data[item][True], self._data[item][False]\n acc = correct / (correct + incorrect)\n s = f\"{item:4} | Accuracy: {acc:.2f}% (diff {'+' if acc-item >=0 else ''}{acc-item:.2f}%) | correct: {correct:2}, incorrect: {incorrect:2}\" \n r.append(s)\n\n return \"\\n\".join(r)",
"def __str__(self):\n status = \"height = {}\\n\".format(self.height)\n status += \"width = {}\\n\".format(self.width)\n status += \"channels = {}\\n\".format(self.channels)\n status += \"input_dim = {}\\n\".format(self.input_dim)\n status += \"architecture = {}\\n\".format(self.architecture)\n status += \"activations = {}\\n\".format(self.activations)\n status += \"batch_size = {}\\n\".format(self.batch_size)\n status += \"epochs = {}\\n\".format(self.epochs)\n status += \"save_step = {}\\n\".format(self.save_step)\n status += \"learning_rate = {}\\n\".format(self.learning_rate)\n status += \"momentum = {}\\n\".format(self.momentum)\n return status",
"def get_human_readable(self):\n\n def yesno(key):\n if getattr(self, key) and getattr(self, key) > 0:\n return \"Y\"\n else:\n return \"N\"\n\n keys = (\n \"pvs1\",\n \"ps1\",\n \"ps2\",\n \"ps3\",\n \"ps4\",\n \"pm1\",\n \"pm2\",\n \"pm3\",\n \"pm4\",\n \"pm5\",\n \"pm6\",\n \"pp1\",\n \"pp2\",\n \"pp3\",\n \"pp4\",\n \"pp5\",\n \"ba1\",\n \"bs1\",\n \"bs2\",\n \"bs3\",\n \"bs4\",\n \"bp1\",\n \"bp2\",\n \"bp3\",\n \"bp4\",\n \"bp5\",\n \"bp6\",\n \"bp7\",\n )\n result = \", \".join([\"%s: %s\" % (key.upper(), yesno(key)) for key in keys])\n result += \", ACMG classification: %s\" % self.class_auto\n if self.class_override:\n result += \", ACMG class. override: %s\" % self.class_override\n return result",
"def __str__(self):\n\n # Print the class and address.\n msg = \"{0} at {1}\\n\".format(str(self.__class__), str(hex(id(self))))\n\n # Print some other basic information.\n msg = \"{0} line name: ({1})\\n\".format(msg, self.name)\n msg = \"{0} ping_time: ({1})\\n\".format(\n msg,\n self.ping_time.shape[0])\n msg = \"{0} data: ({1})\\n\".format(\n msg,\n self.data.shape[0])\n msg = \"{0} start time: {1}\\n\".format(msg,\n self.ping_time[0])\n msg = \"{0} end time: {1}\\n\" .format(msg,\n self.ping_time[-1])\n\n return msg",
"def toString():",
"def asString(self):\n\n res = []\n for v in list(self.vars.values()):\n res.append(v.asString())\n res.append('')\n for e in list(self.enums.values()):\n res.append(e.asString())\n res.append('')\n for s in list(self.structs.values()):\n res.append(s.defAsString())\n res.append('')\n for s in list(self.structs.values()):\n res.append(s.dataAsString())\n\n return '\\n'.join(res)",
"def printObj(self):\n return 'patient_id:{}, medication:{}, frequency:{}, start_dt:{},'\n 'end_dt:{}, noti_type:{}'.format(\n self.patients.data,\n self.medication.data,\n self.frequency.data,\n self.start_dt,\n self.end_dt.data,\n self.noti_type.data)",
"def __repr__(self):\n return ''.join(f'\\ncompany: {self.company_name}\\nsize: {self.company_size}\\ncompany_founded: '\n f'{self.company_founded}\\ncompany_industry: {self.company_industry}\\ncompany_sector: '\n f'{self.company_sector}\\ncompany_type: {self.company_type}\\ncompany_rating: '\n f'{self.company_rating}\\ncompany_competitors: {self.company_competitors}\\ncompany_revenue: '\n f'{self.company_revenue}\\ncompany_headquarters: {self.company_headquarters}')",
"def __str__(self):\n self.semaphore_lock.acquire()\n string_representation = \"\"\n for key, value in self.__dict.items():\n string_representation += \"{}: {}, \".format(key, value)\n string_representation = \"{\" + string_representation[:-1] + \"}\"\n self.semaphore_lock.release()\n\n return string_representation",
"def __str__(self):\n if __debug__:\n description = ('CM' in debug.active)\n else:\n description = False\n return self.asstring(short=False, header=True, summary=True,\n description=description)",
"def _to_str(self):\n\t\tprint(\"predictors: {}, types: {} \\n method: {}, preprocessing: {}\\\n\t\t\t \\n partition_rate: {}, metric: {}, file name: {}\".format(\n\t\t\t self.predictors, self.predictors_types, self.method_name,\n\t\t\t self.preprocessing_methods, self.data_split, self.metric,\n\t\t\t self.plotting_file_name))",
"def __str__(self):\r\n\r\n retval = self.__class__.__name__ + ' ('\r\n for val in self.VALUES:\r\n value = getattr(self, val, None)\r\n if value is not None:\r\n retval += '%s:%.4f ' % (val, getattr(self, val))\r\n return retval.strip() + ')'",
"def __str__(self) -> str:\n return \"scapacity of hash: {}, current size of hash: {}\".format(\n self.capacity, self.length\n )"
]
| [
"0.66518784",
"0.6558187",
"0.65491027",
"0.64940053",
"0.6372875",
"0.6264341",
"0.62038535",
"0.6191972",
"0.61830616",
"0.61642146",
"0.6149433",
"0.6116986",
"0.61088806",
"0.6106351",
"0.6097083",
"0.6079512",
"0.6078032",
"0.6071107",
"0.60571027",
"0.6052877",
"0.6051746",
"0.60499555",
"0.60386795",
"0.6025195",
"0.6023468",
"0.60188824",
"0.60097075",
"0.6009308",
"0.59954494",
"0.5989702"
]
| 0.75313747 | 0 |
Generates completeness values for target stars This method is called from TargetList __init__ method. | def target_completeness(self, TL):
comp0 = np.array([0.2]*TL.nStars)
return comp0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate_appropriate_target(self):\n pass",
"def calculate_appropriate_target(self):\n pass",
"def get_target_per_score(self):\n pass",
"def narration_target(self):",
"def get_learning_completion(self):\n return min(1.0, self.step / Parameters.FINAL_EXPLORATION)",
"def __init__(self, label, select_high_quality):\n self.label = label\n self.resource = max(0.1, np.random.random_sample()) # let's assume everyone has at least a tenth of the max resource\n\n # put x one in each team\n if select_high_quality and np.random.uniform(0,1) <= PERCENT_INITIAL_HIGH_QUALITY:\n self.state = np.random.uniform(HIGH_QUALITY_THRESHOLD, 1)\n else:\n self.state = np.random.uniform(0, AVERAGE_DOCUMENTATION_FEQUENCY)\n self.tasks_completed = 0",
"def _finalize_targets(target_values, binarize_target, num_classes):\n\n target_values[target_values == target_val_utils.DEAD_STORM_INTEGER] = 0\n\n if binarize_target:\n target_values = (target_values == num_classes - 1).astype(int)\n num_classes_to_predict = 2\n else:\n num_classes_to_predict = num_classes + 0\n\n if num_classes_to_predict == 2:\n print('Fraction of {0:d} examples in positive class: {1:.3f}'.format(\n len(target_values), numpy.mean(target_values)\n ))\n return target_values\n\n target_matrix = keras.utils.to_categorical(\n target_values, num_classes_to_predict)\n\n class_fractions = numpy.mean(target_matrix, axis=0)\n print('Fraction of {0:d} examples in each class: {1:s}\\n'.format(\n len(target_values), str(class_fractions)\n ))\n\n return target_matrix",
"def completeness_update(self, sInd, TL, obsbegin, obsend, nexttime):\r\n # prototype returns the \"virgin\" completeness value\r\n return TL.comp0",
"def __init__(self) :\n self.probabilities_ = None",
"def __init__(self) :\n self.probabilities_ = None",
"def __init__(self) :\n self.probabilities_ = None",
"def init_complete(cls) -> object:\n return cls.from_matrix(tool.test_complete)",
"def buildings(self):\n\t\treturn None if self.is_input else self.throughput / self.recipe.throughput",
"def __init__(self):\n self.gradual_items = list()\n self.support = 0\n \"\"\":type support: float\"\"\"",
"def comply(self, counts):\n pass",
"def test_target_greater_than_alp(self):\n alp = list(range(5))\n targets = generate_targets(alp, 10)\n self.assertEqual(len(targets), 10)\n\n counts = Counter(targets)\n\n for item in alp:\n self.assertEqual(counts[item], 2)",
"def set_potential_target(self):\r\n \r\n import copy\r\n import numpy as np\r\n \r\n # Get the hydraulic conductivity\r\n for e in self.model.elementlist:\r\n if isinstance(e, ElementMoebiusBase) or isinstance(e, ElementUniformBase):\r\n temp_k = e.k\r\n \r\n for e in self.model.elementlist:\r\n if isinstance(e, ElementInhomogeneity):\r\n if e.are_points_inside_polygon(self.zc):\r\n temp_k = e.k\r\n \r\n # Create a list of hydraulic potential targets\r\n self.strength = copy.copy(self.head_change)\r\n if self.model.aquifer_type == 'confined':\r\n # Strack 1989, Eq. 8.6\r\n self.strength = temp_k*self.model.H*self.strength - \\\r\n 0.5*temp_k*self.model.H**2\r\n elif self.model.aquifer_type == 'unconfined':\r\n # Strack 1989, Eq. 8.7\r\n self.strength = 0.5*temp_k*self.strength**2\r\n elif self.model.aquifer_type == 'convertible':\r\n # Find out which points are confined and which are unconfined\r\n index_conf = np.where(self.strength >= self.model.H)[0]\r\n index_unconf = np.where(self.strength < self.model.H)[0]\r\n # Account for the confined points\r\n # confined: Strack 1989, Eq. 8.6\r\n self.strength[index_conf] = \\\r\n temp_k[index_conf]*self.model.H*self.strength[index_conf] - \\\r\n 0.5*temp_k[index_conf]*self.model.H**2\r\n # unconfined: Strack 1989, Eq. 8.7\r\n self.strength[index_unconf] = \\\r\n 0.5*temp_k[index_unconf]*self.strength[index_unconf]**2",
"def completeness_value(self, selection_band='I2_MAG_APER4'):\n\n # Load in the completeness simulation data from the file\n if isinstance(self._completeness_results, list):\n json_dicts = []\n for comp_results in self._completeness_results:\n with open(comp_results, 'r') as f:\n json_dicts.append(json.load(f))\n completeness_dict = dict(ChainMap(*json_dicts))\n else:\n with open(self._completeness_results, 'r') as f:\n completeness_dict = json.load(f)\n\n for cluster_id, cluster_info in self._catalog_dictionary.items():\n # Array element names\n se_catalog = cluster_info['catalog']\n\n # Select the correct entry in the dictionary corresponding to our cluster.\n completeness_data = completeness_dict[cluster_id]\n\n # Also grab the magnitude bins used to create the completeness data (removing the last entry so we can\n # broadcast our arrays correctly)\n mag_bins = completeness_dict['magnitude_bins'][:-1]\n\n # Interpolate the completeness data into a functional form using linear interpolation\n completeness_funct = interp1d(mag_bins, completeness_data, kind='linear')\n\n # For the objects' magnitude specified by `selection_band` query the completeness function to find the\n # completeness value.\n completeness_values = completeness_funct(se_catalog[selection_band])\n\n # The completeness correction values are defined as 1/[completeness value]\n completeness_corrections = 1 / completeness_values\n\n # Add the completeness values and corrections to the SExtractor catalog.\n se_catalog['COMPLETENESS_VALUE'] = completeness_values\n se_catalog['COMPLETENESS_CORRECTION'] = completeness_corrections\n\n cluster_info['catalog'] = se_catalog",
"def get_expected_cost(self):",
"def target_multi_objective1(\n config: Configuration,\n seed: int,\n # instance: str,\n # budget: float,\n) -> list[float]:\n return [seed, seed]",
"def __init__(self, gyp_target, gn_target=None):\n if gn_target is None:\n gn_target = gyp_target\n self._gyp_target = gyp_target\n self._gn_target = gn_target\n\n self._skipped = []\n\n self._total_diffs = 0\n\n self._missing_gyp_flags = {}\n self._missing_gn_flags = {}\n\n self._missing_gyp_files = {}\n self._missing_gn_files = {}\n\n self._CompareFiles()",
"def pick(self, target: int) -> int:\n\t\tans = None\n cnt = 0\n for i, x in enumerate(self.nums): \n if x == target: \n cnt += 1\n if randint(1, cnt) == cnt: ans = i # prob 1/cnt\n return ans",
"def get_learn_after_each_trial(self):\r\n return 0",
"def coverage(self):\r\n return 0, 1",
"def complete(self, return_fraction=False):\n bools = map(lambda output: output.exists(), self.get_outputs(flatten=True))\n frac = 1.0*sum(bools)/len(bools)\n if return_fraction:\n return frac\n else:\n return frac >= self.min_completion_fraction",
"def get_learn_after_each_decision(self):\r\n return 0",
"def objective(self):\n pass",
"def test_target_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run(\"LR\")\n assert atom.lr.target == atom.target",
"def __init__(self):\n self.set_recharge(random.randint(1000, 2000) / 10000)\n operator_count = random.randint(1, 3)\n self.operators = [Solder() for _ in range(0, operator_count)]\n list_operators = [i.get_health for i in self.operators]\n self.set_health(sum(list_operators) / len(list_operators))",
"def _build_binary_target(self):\n self.raw_data['binary_target'] = 0\n self.raw_data.loc[self.raw_data.no_occupants > 0, 'binary_target'] = 1"
]
| [
"0.52117205",
"0.52117205",
"0.5203223",
"0.5138068",
"0.5120635",
"0.50905424",
"0.5024778",
"0.49907243",
"0.4958207",
"0.4958207",
"0.4958207",
"0.4953239",
"0.49531022",
"0.49428225",
"0.49236003",
"0.48905414",
"0.48772275",
"0.4871452",
"0.48638627",
"0.48611942",
"0.48600638",
"0.48573476",
"0.48512486",
"0.48463",
"0.48080668",
"0.48037946",
"0.47680655",
"0.47662276",
"0.47590744",
"0.47501844"
]
| 0.7235767 | 0 |
Updates completeness value for stars previously observed | def completeness_update(self, sInd, TL, obsbegin, obsend, nexttime):
# prototype returns the "virgin" completeness value
return TL.comp0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def target_completeness(self, TL):\r\n \r\n comp0 = np.array([0.2]*TL.nStars)\r\n \r\n return comp0",
"def update_percent(self):",
"def percent_updated(self):\n return self.percent_complete - self.previous_percent_complete",
"def update_score():\n pass",
"def _update_stars(self):\n self._check_galaxy_edges()\n self.stars.update()\n if pygame.sprite.spritecollideany(self.ship, self.stars):\n self._ship_hit()\n self._check_stars_bottom()",
"def prob_update(self):\n pass",
"def completeness_value(self, selection_band='I2_MAG_APER4'):\n\n # Load in the completeness simulation data from the file\n if isinstance(self._completeness_results, list):\n json_dicts = []\n for comp_results in self._completeness_results:\n with open(comp_results, 'r') as f:\n json_dicts.append(json.load(f))\n completeness_dict = dict(ChainMap(*json_dicts))\n else:\n with open(self._completeness_results, 'r') as f:\n completeness_dict = json.load(f)\n\n for cluster_id, cluster_info in self._catalog_dictionary.items():\n # Array element names\n se_catalog = cluster_info['catalog']\n\n # Select the correct entry in the dictionary corresponding to our cluster.\n completeness_data = completeness_dict[cluster_id]\n\n # Also grab the magnitude bins used to create the completeness data (removing the last entry so we can\n # broadcast our arrays correctly)\n mag_bins = completeness_dict['magnitude_bins'][:-1]\n\n # Interpolate the completeness data into a functional form using linear interpolation\n completeness_funct = interp1d(mag_bins, completeness_data, kind='linear')\n\n # For the objects' magnitude specified by `selection_band` query the completeness function to find the\n # completeness value.\n completeness_values = completeness_funct(se_catalog[selection_band])\n\n # The completeness correction values are defined as 1/[completeness value]\n completeness_corrections = 1 / completeness_values\n\n # Add the completeness values and corrections to the SExtractor catalog.\n se_catalog['COMPLETENESS_VALUE'] = completeness_values\n se_catalog['COMPLETENESS_CORRECTION'] = completeness_corrections\n\n cluster_info['catalog'] = se_catalog",
"def update_score(self):\n self.score = TurboMQ.calculate_fitness(self.result, self.graph)",
"def _update(self):\n self.cv.update()",
"def assign_percent_real(session, percent_real_update, new_rate, current_rate):\n session.run(percent_real_update, feed_dict={new_rate: current_rate})",
"def update(self) -> None:\n self.data.update()\n self._state = round(self.data.rate[\"rates\"][self._target], 3)",
"def update_comment_usefulness(self):\n self.cursor.execute(\"\"\"UPDATE comment SET veryUseful=0, useful=0, useless=0, avg_usefulness=NULL\"\"\")\n self.db.commit()\n self.cursor.execute(\"\"\"SELECT * FROM rates\"\"\")\n for rating in self.cursor.fetchall():\n self.update_comment_score(rating[0], rating[1], rating[2])",
"def complete(self, return_fraction=False):\n bools = map(lambda output: output.get_status() == Constants.DONE, self.get_outputs())\n if len(bools) == 0: frac = 0.\n else: frac = 1.0*sum(bools)/len(bools)\n if return_fraction:\n return frac\n else:\n return frac >= self.min_completion_fraction",
"def status(self) -> NoReturn:\n\n curr_status= self.percent_done()\n while(curr_status < 100):\n\n update_status(name=self.name, status=curr_status)\n time.sleep(0.5)\n\n curr_status = self.percent_done()\n\n update_status(name=self.name, status=curr_status)",
"def complete(self, return_fraction=False):\n bools = map(lambda output: output.exists(), self.get_outputs(flatten=True))\n frac = 1.0*sum(bools)/len(bools)\n if return_fraction:\n return frac\n else:\n return frac >= self.min_completion_fraction",
"def percentage_complete(self) -> float:\n return self.__percentage_complete",
"def get_completeness(star, period, rp, e, with_geom=True, thresh=None):\n aor = get_a(period, star.mass) / star.radius\n pdet = get_pdet(star, aor, period, rp, e, thresh=thresh)\n pwin = get_pwin(star, period)\n if not with_geom:\n return pdet * pwin\n pgeom = get_pgeom(aor, e)\n return pdet * pwin * pgeom",
"def force_update(self):\n self.update(self.poll())",
"def update_entropy(self, save=True):\n\n #min_consensus = self.mturk_assignment.hit.hit_type \\\n #.experiment_settings.min_output_consensus\n min_consensus = 3\n\n # update substance label and entropy\n self.substance = None\n substances = self.substances.filter(invalid=False) \\\n .values_list('substance_id', flat=True)\n if substances:\n self.substance_entropy = compute_entropy(substances)\n hist = Counter(substances).most_common(2)\n substance_id, count = hist[0]\n # must be at least the consensus, and larger than the 2nd choice\n if count >= min_consensus and (len(hist) == 1 or hist[1][1] < count):\n self.substance_id = substance_id\n self.quality_method = 'M'\n\n # update name label and entropy\n self.name = None\n names = self.names.filter(invalid=False) \\\n .values_list('name_id', flat=True)\n if names.exists():\n self.name_entropy = compute_entropy(names)\n hist = Counter(names).most_common(2)\n name_id, count = hist[0]\n # must be at least the consensus, and larger than the 2nd choice\n if count >= min_consensus and (len(hist) == 1 or hist[1][1] < count):\n self.name_id = name_id\n self.quality_method = 'M'\n\n # update rectified normal\n self.rectified_normal = None\n if self.planar:\n for n in self.rectified_normals.all():\n if n.better_than(self.rectified_normal):\n self.rectified_normal = n\n if self.rectified_normal and not self.rectified_normal.correct:\n self.rectified_normal = None\n\n # update bsdf\n self.bsdf_wd = None\n for b in self.bsdfs_wd.all():\n if b.gloss_correct and b.color_correct and b.better_than(self.bsdf_wd):\n self.bsdf_wd = b\n\n if save:\n self.save()",
"def func(progress_remaining: float) -> float:\n return progress_remaining * initial_value",
"def func(progress_remaining: float) -> float:\n return progress_remaining * initial_value",
"def func(progress_remaining: float) -> float:\n return progress_remaining * initial_value",
"def func(progress_remaining: float) -> float:\n return progress_remaining * initial_value",
"def update_porosity(self):\n\n if self.geom_complete:\n self.graph.graph['porosity'] = (\n self.pores_volume + self.throats_volume) / np.prod(self.graph.graph['extent'])",
"def _update_(self):\n self._update_distance_()\n self._check_literature_name_()",
"def update(self):\n if not self.metamodel.surrogate.is_built():\n # Do not adjust until we have a surrogate\n return\n\n surr_rate = 1 - self.metamodel.history.get_model_usage_rate()\n surr_rate_err = abs(self.desired_rate - surr_rate)\n\n if surr_rate_err <= self.acceptable_offset:\n # Usage rate is acceptable.\n return\n\n T = self.value\n edge_adjustment = 1 - ((2*T - 1) ** self.alpha)\n err_adjustment = min(self.beta, 1 / ((1 - surr_rate_err) ** self.beta))\n step_size = self.step * edge_adjustment * err_adjustment\n # Adjust\n if surr_rate > self.desired_rate:\n self.value = max(T/self.beta, T - step_size)\n elif surr_rate < self.desired_rate:\n self.value = min(1 - ((1-T)/self.beta), T + step_size)\n\n return",
"def _update_reward_values(self):\n # First update the reward value\n if self.true_reward is not None:\n past_X, past_Y = self.get_past_data()\n self.curr_reward = self.true_reward(past_X, past_Y)\n else:\n self.curr_reward = np.nan\n # Update the best reward\n if self.curr_reward > self.curr_best_reward:\n self.curr_best_reward = self.curr_reward",
"def set_complete(self):\n self._current = self._max",
"def update(self, observations: Observations, action: CARLAAction,\n reward: float, new_observations: Observations, *args: Any,\n **kwargs: Any) -> None:\n if new_observations[\"collision\"] > 0:\n self.value += 1",
"def biomass_remaining(self, remaining):\n sql = \"\"\"UPDATE barcodes.sample\n SET biomass_remaining = %s\n WHERE sample_id = %s\"\"\"\n with pm.sql.TRN:\n pm.sql.TRN.add(sql, [remaining, self.id])"
]
| [
"0.6213586",
"0.5982645",
"0.5977477",
"0.57943684",
"0.56373745",
"0.562053",
"0.5567826",
"0.555107",
"0.5533249",
"0.5398687",
"0.53789085",
"0.5299783",
"0.52602965",
"0.52126044",
"0.5192764",
"0.5178828",
"0.516683",
"0.51598394",
"0.5144335",
"0.5123487",
"0.5123487",
"0.5123487",
"0.5123487",
"0.5091917",
"0.50890446",
"0.5061755",
"0.5051792",
"0.5034435",
"0.50188994",
"0.50103843"
]
| 0.66835356 | 0 |
Set up a default Dogstatsd instance and mock the proc filesystem. | def setUp(self):
#
self.statsd = DogStatsd(telemetry_min_flush_interval=0)
self.statsd.socket = FakeSocket()
self.statsd._reset_telemetry()
# Mock the proc filesystem
route_data = load_fixtures('route')
self._procfs_mock = patch('datadog.util.compat.builtins.open', mock_open())
self._procfs_mock.start().return_value.readlines.return_value = route_data.split("\n") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init_statsd():\n statsd.init_statsd({\n 'STATSD_HOST': config.secrets.server('statsd.host'),\n 'STATSD_PORT': config.secrets.server('statsd.port'),\n 'STATSD_BUCKET_PREFIX': 'linkr',\n })",
"async def test_statsd_setup_defaults(hass: HomeAssistant) -> None:\n config = {\"statsd\": {\"host\": \"host\"}}\n\n config[\"statsd\"][statsd.CONF_PORT] = statsd.DEFAULT_PORT\n config[\"statsd\"][statsd.CONF_PREFIX] = statsd.DEFAULT_PREFIX\n\n with patch(\"statsd.StatsClient\") as mock_init:\n assert await async_setup_component(hass, statsd.DOMAIN, config)\n\n assert mock_init.call_count == 1\n assert mock_init.call_args == mock.call(host=\"host\", port=8125, prefix=\"hass\")\n hass.states.async_set(\"domain.test\", \"on\")\n await hass.async_block_till_done()\n assert len(mock_init.mock_calls) == 3",
"def test_initialization(self):\n options = {\n 'statsd_host': \"myhost\",\n 'statsd_port': 1234\n }\n\n # Default values\n self.assertEqual(statsd.host, \"localhost\")\n self.assertEqual(statsd.port, 8125)\n\n # After initialization\n initialize(**options)\n self.assertEqual(statsd.host, \"myhost\")\n self.assertEqual(statsd.port, 1234)\n\n # Add namespace\n options['statsd_namespace'] = \"mynamespace\"\n initialize(**options)\n self.assertEqual(statsd.host, \"myhost\")\n self.assertEqual(statsd.port, 1234)\n self.assertEqual(statsd.namespace, \"mynamespace\")\n\n # Set `statsd` host to the system's default route\n initialize(statsd_use_default_route=True, **options)\n self.assertEqual(statsd.host, \"172.17.0.1\")\n self.assertEqual(statsd.port, 1234)\n\n # Add UNIX socket\n options['statsd_socket_path'] = '/var/run/dogstatsd.sock'\n initialize(**options)\n self.assertEqual(statsd.socket_path, options['statsd_socket_path'])\n self.assertIsNone(statsd.host)\n self.assertIsNone(statsd.port)",
"def test_dogstatsd_initialization_with_env_vars(self):\n # Setup\n with preserve_environment_variable('DD_AGENT_HOST'):\n os.environ['DD_AGENT_HOST'] = 'myenvvarhost'\n with preserve_environment_variable('DD_DOGSTATSD_PORT'):\n os.environ['DD_DOGSTATSD_PORT'] = '4321'\n dogstatsd = DogStatsd()\n\n # Assert\n self.assertEqual(dogstatsd.host, \"myenvvarhost\")\n self.assertEqual(dogstatsd.port, 4321)",
"async def test_statsd_setup_full(hass: HomeAssistant) -> None:\n config = {\"statsd\": {\"host\": \"host\", \"port\": 123, \"rate\": 1, \"prefix\": \"foo\"}}\n with patch(\"statsd.StatsClient\") as mock_init:\n assert await async_setup_component(hass, statsd.DOMAIN, config)\n\n assert mock_init.call_count == 1\n assert mock_init.call_args == mock.call(host=\"host\", port=123, prefix=\"foo\")\n\n hass.states.async_set(\"domain.test\", \"on\")\n await hass.async_block_till_done()\n assert len(mock_init.mock_calls) == 3",
"def service():\n service = Mock()\n service.log_dir = ''\n service.persistent_root = ''\n service.context.globals = {\"cluster_size\": 1}\n service.log_config_file = ''\n\n return service",
"def setUp(self):\n\n self.logger_stats = DataScreen()",
"def run():\r\n\r\n # By default use the statsd agent\r\n options = {'statsd': True}\r\n\r\n if hasattr(settings, 'DATADOG'):\r\n options.update(settings.DATADOG)\r\n\r\n # Not all arguments are documented.\r\n # Look at the source code for details.\r\n dog_stats_api.start(**options)\r\n\r\n dog_http_api.api_key = options.get('api_key')",
"def setUp(self):\n fixtures_dir = os.path.abspath(os.path.join(\n os.path.dirname(__file__), 'fixtures'))\n\n config = get_collector_config('NagiosPerfdataCollector', {\n 'perfdata_dir': fixtures_dir\n })\n\n self.collector = NagiosPerfdataCollector(config, None)\n self.fixtures = os.listdir(fixtures_dir)",
"def _initialize_tests(self):\n # Access the sentries for inspecting service units\n self.compute_sentry = self.d.sentry['nova-compute'][0]\n self.rabbitmq_sentry = self.d.sentry['rabbitmq-server'][0]\n self.neutron_api_sentry = self.d.sentry['neutron-api'][0]\n self.n_ovs_sentry = self.d.sentry['neutron-openvswitch'][0]\n\n # pidof is failing to find neutron-server on stein\n # use pgrep instead.\n if self._get_openstack_release() >= self.bionic_stein:\n self.pgrep_full = True\n else:\n self.pgrep_full = False",
"def setUp(self):\n # create temporary directory\n if not usedir:\n self.test_dir = tempfile.mkdtemp()\n os.chdir(self.test_dir)\n else:\n os.chdir(usedir) \n\n super(SimpleTest, self).setUp()\n\n import SFramework\n self.manager = SFramework.TSStatisticsManager()\n self.manager.getWorkspaces().addObject(self.makeWS())",
"def setUp(self):\n super(TestSheepdogStore, self).setUp()\n\n def _fake_execute(*cmd, **kwargs):\n pass\n\n self.config(default_store='sheepdog',\n group='glance_store')\n\n execute = mock.patch.object(processutils, 'execute').start()\n execute.side_effect = _fake_execute\n self.addCleanup(execute.stop)\n self.store = sheepdog.Store(self.conf)\n self.store.configure()\n self.store_specs = {'image': '6bd59e6e-c410-11e5-ab67-0a73f1fda51b',\n 'addr': '127.0.0.1',\n 'port': 7000}",
"def setUp(self):\n # Initialize DDoS object\n self.ddos_obj = DDoS()\n\n # Mock parsed log file data\n # Case 1: No DoS\n self.data_1 = {\n \"1.0.0.0\":{\n \"count\": 1,\n \"get\": [\"random_get\"] * 1,\n \"unique_get\": [\"random_get\"] * 1,\n \"ua\": [\"360port_scan\"] * 1,\n \"ep_time\": [1560524200] * 1,\n \"status_code\": [404] * 1\n }\n }\n\n # Case 2: DoS attack\n self.data_2 = {\n \"1.1.1.1\":{\n \"count\": 10000,\n \"get\": [\"random_get\"] * 10000,\n \"unique_get\": [\"random_get\"] * 10000,\n \"ua\": [\"360port_scan\"] * 10000,\n \"ep_time\": [1560524200] * 10000,\n \"status_code\": [404] * 10000\n }\n }",
"def setUpClass(cls):\n cls.testDir = tempfile.mkdtemp()\n cls.readonlyDir = tempfile.mkdtemp()\n cls.testfile = os.path.join(cls.testDir, 'desispec_test_io.fits')\n cls.testyfile = os.path.join(cls.testDir, 'desispec_test_io.yaml')\n cls.testlog = os.path.join(cls.testDir, 'desispec_test_io.log')\n # cls.testbrfile appears to be unused by this class.\n cls.testbrfile = os.path.join(cls.testDir, 'desispec_test_io-br.fits')\n cls.origEnv = {'SPECPROD': None,\n \"DESI_ROOT\": None,\n \"DESI_ROOT_READONLY\": None,\n \"DESI_SPECTRO_DATA\": None,\n \"DESI_SPECTRO_REDUX\": None,\n \"DESI_SPECTRO_CALIB\": None,\n }\n cls.testEnv = {'SPECPROD':'dailytest',\n \"DESI_ROOT\": cls.testDir,\n \"DESI_ROOT_READONLY\": cls.readonlyDir,\n \"DESI_SPECTRO_DATA\": os.path.join(cls.testDir, 'spectro', 'data'),\n \"DESI_SPECTRO_REDUX\": os.path.join(cls.testDir, 'spectro', 'redux'),\n \"DESI_SPECTRO_CALIB\": os.path.join(cls.testDir, 'spectro', 'calib'),\n }\n cls.datadir = cls.testEnv['DESI_SPECTRO_DATA']\n cls.reduxdir = os.path.join(cls.testEnv['DESI_SPECTRO_REDUX'],\n cls.testEnv['SPECPROD'])\n for e in cls.origEnv:\n if e in os.environ:\n cls.origEnv[e] = os.environ[e]\n os.environ[e] = cls.testEnv[e]",
"def __init__(self, *args, **kwargs):\n super(MdtestBase, self).__init__(*args, **kwargs)\n self.mdtest_cmd = None\n self.processes = None\n self.hostfile_clients_slots = None",
"def setUp(self):\n self.addCleanup(self._clean_up)\n\n # The daemon should already be running, if not lets starts it and wait\n # a bit\n if process_exists('stratisd') is None:\n exec_command([\"systemctl\", \"start\", \"stratisd\"])\n time.sleep(20)\n\n StratisCli.destroy_all()\n self.assertEqual(0, len(StratisCli.pool_list()))",
"def plugin_initialize():\n global _PROC_PID_STAT\n collectd.info('Initializing collectd-mlab plugin.')\n _PROC_PID_STAT = '/proc/%s/stat' % os.getpid()",
"def setUp(self):\n self.app = Flask(__name__)\n self.gh = mock.MagicMock()\n self.db = mock.MagicMock()\n self.sc = mock.MagicMock()\n self.testcommand = TeamCommand(self.db, self.gh, self.sc)\n self.help_text = self.testcommand.help\n self.maxDiff = None",
"def setUp(self, mock_ghn, mock_grnam, mock_pwnam):\n super(HGSTTestCase, self).setUp()\n self.mock_object(processutils, 'execute', self._fake_execute)\n self._fail_vgc_cluster = False\n self._fail_ip = False\n self._fail_network_list = False\n self._fail_domain_list = False\n self._empty_domain_list = False\n self._fail_host_storage = False\n self._fail_space_list = False\n self._fail_space_delete = False\n self._fail_set_apphosts = False\n self._fail_extend = False\n self._request_cancel = False\n self._return_blocked = 0\n self.configuration = mock.Mock(spec=conf.Configuration)\n self.configuration.safe_get = self._fake_safe_get\n self._reset_configuration()\n self.driver = HGSTDriver(configuration=self.configuration,\n execute=self._fake_execute)",
"def setUp(self):\n self.setUpPyfakefs()\n self.fake_os = fake_filesystem.FakeOsModule(self.fs)\n\n populate_fakefs(self)",
"def setUp(self):\n self.p = Process(target = start_server)\n self.p.start()\n time.sleep(0.5)\n #init_fakeDB()\n time.sleep(0.5)",
"def setUp(self):\n self.p = Process(target = start_server)\n self.p.start()\n time.sleep(0.5)\n #init_fakeDB()\n time.sleep(0.5)",
"def setUp(self):\n self.p = Process(target = start_server)\n self.p.start()\n time.sleep(0.5)\n #init_fakeDB()\n time.sleep(0.5)",
"def setUp(self):\n super(TestCaseWithFakeAccessor, self).setUp()\n self.accessor = bg_memory.build()\n self.accessor.connect()\n self.addCleanup(self.accessor.shutdown)\n self.metadata_cache = bg_metadata_cache.DiskCache(\n self.accessor, self.tempdir)\n self.metadata_cache.open()\n self.addCleanup(self.metadata_cache.close)",
"def setup_class(cls):\n cls.runner = CliRunner()\n cls.agent_name = \"agent_1\"\n cls.cwd = os.getcwd()\n cls.t = tempfile.mkdtemp()\n os.chdir(cls.t)",
"def setupClass(cls):\n cls._tmp_dir = tempfile.mkdtemp()\n cls.test_filepath = os.path.join( cls._tmp_dir, \"test_data.h5\" )\n cls._generate_testdata_h5(cls.test_filepath)\n cls.server_proc, cls.shutdown_event = cls._start_mockserver( cls.test_filepath, same_process=True )\n cls.client_connection = httplib.HTTPConnection( \"localhost:8000\" )",
"def __init__(self, collectd):\n self.collectd = collectd\n self.conf = self.default_config()\n self.types = {}\n\n collectd.info('Initialized MetricsConfig with default config %s' % self.conf)",
"def setUp(self):\n self.cmd = BenchResultsProcesser()\n self.exp_keys = ['bench_data', 'time_fig', 'time_str',\n 'mem_fig', 'mem_str']\n # Get the tests folder\n tests_dir = os.path.dirname(os.path.abspath(__file__))\n # Path to the test timing folder\n self.timing_dir = os.path.join(tests_dir, '../support_files/timing')",
"def setUp(self):\n # obtain separate logs\n self.update_log_file_names()\n # Start the servers and agents\n super(MdtestBase, self).setUp()\n\n # Get the parameters for Mdtest\n self.mdtest_cmd = MdtestCommand()\n self.mdtest_cmd.get_params(self)\n self.processes = self.params.get(\"np\", '/run/mdtest/client_processes/*')\n self.manager = self.params.get(\"manager\", '/run/mdtest/*', \"MPICH\")",
"def setUp(self):\n # This Queue and Pipe let heartbeat send and receive messages to the\n # fake child client processes, and have those messages processed here.\n self.queue = Queue()\n self.consumer_master, self.consumer_slave = multiprocessing.Pipe()\n self.monitor_master, self.monitor_slave = multiprocessing.Pipe()\n\n # Only one consumer\n self.consumer = ProcessData(process=self.MockProcess(id=0,\n name='TestConsumer'),\n pipe=self.consumer_slave)\n\n self.monitor = ProcessData(process=self.MockProcess(id=0,\n name='TestMonitor'),\n pipe=self.monitor_slave)\n\n # Messages that StorageHeartbeat puts on the socket for the server\n # Are quickly decoded again and put on this queue for verification\n self.socket_queue = Queue()\n\n # We use MockSocket to impersonate a real socket\n self.socket = self.MockSocket(self.socket_queue, self)\n\n self.dut = StorageHeartbeat(consumers=[self.consumer],\n monitor=self.monitor,\n report_in=self.queue,\n runtime=10,\n poll_period=5,\n client_socket=self.socket)"
]
| [
"0.67476094",
"0.66412675",
"0.64277816",
"0.6259234",
"0.60098875",
"0.5872404",
"0.58321947",
"0.58246964",
"0.5780947",
"0.5772299",
"0.5660361",
"0.5632279",
"0.55069",
"0.5496597",
"0.54851276",
"0.54772216",
"0.5475186",
"0.5469049",
"0.5455055",
"0.5450422",
"0.5429659",
"0.5429659",
"0.5429659",
"0.5423881",
"0.5377495",
"0.5376391",
"0.5350008",
"0.53461933",
"0.5341051",
"0.5333668"
]
| 0.75835073 | 0 |
Unmock the proc filesystem. | def tearDown(self):
self._procfs_mock.stop() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def teardown_function(function):\n bundle.local.reset_mock()\n bundle.cd.reset_mock()",
"def tearDown(self):\n self.popen_patcher.stop()",
"def tearDown(self):\n self.popen_patcher.stop()",
"def test_unload(install_mockery, mock_fetch, mock_archive, mock_packages, working_env):\n install(\"mpileaks\")\n mpileaks_spec = spack.spec.Spec(\"mpileaks\").concretized()\n\n # Set so unload has something to do\n os.environ[\"FOOBAR\"] = \"mpileaks\"\n os.environ[uenv.spack_loaded_hashes_var] = \"%s:%s\" % (mpileaks_spec.dag_hash(), \"garbage\")\n\n sh_out = unload(\"--sh\", \"mpileaks\")\n csh_out = unload(\"--csh\", \"mpileaks\")\n\n assert \"unset FOOBAR\" in sh_out\n assert \"unsetenv FOOBAR\" in csh_out\n\n assert \"export %s=garbage\" % uenv.spack_loaded_hashes_var in sh_out\n assert \"setenv %s garbage\" % uenv.spack_loaded_hashes_var in csh_out",
"def teardown(self):\n self.file_comm.remove_file()\n super(TestCisAsciiFileOutput, self).teardown()",
"def tearDown(self):\n if not self.io_validation_complete:\n g.log.info(\"Wait for IO to complete as IO validation did not \"\n \"succeed in test method\")\n ret = wait_for_io_to_complete(self.all_mounts_procs, self.mounts)\n if not ret:\n raise ExecutionError(\"IO failed on some of the clients\")\n g.log.info(\"IO is successful on all mounts\")\n\n # Cleanup and umount volume\n g.log.info(\"Starting to Unmount Volume and Cleanup Volume\")\n ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)\n if not ret:\n raise ExecutionError(\"Failed to umount the vol & cleanup Volume\")\n g.log.info(\"Successful in umounting the volume and Cleanup\")\n\n # Calling GlusterBaseClass teardown\n GlusterBaseClass.tearDown.im_func(self)",
"def test_unload_fails_no_shell(\n install_mockery, mock_fetch, mock_archive, mock_packages, working_env\n):\n install(\"mpileaks\")\n mpileaks_spec = spack.spec.Spec(\"mpileaks\").concretized()\n os.environ[uenv.spack_loaded_hashes_var] = mpileaks_spec.dag_hash()\n\n out = unload(\"mpileaks\", fail_on_error=False)\n assert \"To set up shell support\" in out",
"def tearDown(self):\n self.popen_patcher.stop()\n self.env_patcher.stop()",
"def tearDown(self):\n\n # stopping the volume\n g.log.info(\"Starting to Unmount Volume and Cleanup Volume\")\n ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)\n if not ret:\n raise ExecutionError(\"Failed to Unmount Volume and Cleanup Volume\")\n g.log.info(\"Successful in Unmount Volume and Cleanup Volume\")\n\n # Calling GlusterBaseClass tearDown\n GlusterBaseClass.tearDown.im_func(self)",
"def test_cleanup_filesystem(self, delete_mock, network_delete_mock, create_mock, libvirt_mock):\n resources = lxc.LXCResources('foo', {'domain': 'bar', 'filesystem':\n {'source_path': '/bar',\n 'target_path': '/baz'}})\n resources._domain = mock.Mock()\n resources._network = mock.Mock()\n resources._hypervisor = mock.Mock()\n resources.cleanup()\n delete_mock.assert_called_with(resources.domain, mock.ANY, '/bar/foo')",
"def teardown(self):\n storage.close()",
"def teardown(self):\n storage.close()",
"def teardown(self):\n storage.close()",
"def teardown(self):\n storage.close()",
"def teardown(self):\n self.logger.info('Tearing down file server vm')\n self.local_env.execute('uninstall', task_retries=40,\n task_retry_interval=30)",
"def teardown(self):\n super(TestCisObjInput, self).teardown()\n if os.path.isfile(self.tempfile):\n os.remove(self.tempfile)",
"def test_04_remove_file(self, mock_uid, mock_isfile,\n mock_islink, mock_exists, mock_remove,\n mock_msg):\n mock_uid.return_value = os.getuid()\n mock_isfile.return_value = True\n # file does not exist (regression of #50)\n mock_exists = False\n futil = udocker.FileUtil(\"/tmp/filename4.txt\")\n status = futil.remove()\n self.assertTrue(status)\n # under /\n mock_exists = True\n futil = udocker.FileUtil(\"/filename4.txt\")\n futil.topdir = \"/home/user/.udocker\"\n futil.tmpdir = \"/tmp\"\n status = futil.remove()\n self.assertFalse(status)\n # wrong uid\n mock_uid.return_value = os.getuid() + 1\n futil = udocker.FileUtil(\"/tmp/filename4.txt\")\n futil.topdir = \"/home/user/.udocker\"\n futil.tmpdir = \"/tmp\"\n status = futil.remove()\n self.assertFalse(status)\n # under /tmp\n mock_uid.return_value = os.getuid()\n futil = udocker.FileUtil(\"/tmp/filename4.txt\")\n futil.topdir = \"/home/user/.udocker\"\n futil.tmpdir = \"/tmp\"\n status = futil.remove()\n self.assertTrue(status)\n # under user home\n futil = udocker.FileUtil(\"/home/user/.udocker/filename4.txt\")\n futil.topdir = \"/home/user/.udocker\"\n futil.tmpdir = \"/tmp\"\n futil.safe_prefixes.append(futil.topdir)\n status = futil.remove()\n self.assertTrue(status)\n # outside of scope 1\n futil = udocker.FileUtil(\"/etc/filename4.txt\")\n futil.topdir = \"/home/user/.udocker\"\n futil.tmpdir = \"/tmp\"\n futil.safe_prefixes = []\n status = futil.remove()\n self.assertFalse(status)",
"def teardown_module(module):\n import os\n import time\n\n # temp file will be removed soon\n time.sleep(1.0)\n\n for p in [data_pk, data_pk_gz, data_js, data_js_gz]:\n try:\n os.remove(p)\n except:\n pass",
"def test_mountfile(self):\n quote = b\"\"\"If you wish to make an apple pie from scratch, you must first invent the universe.\"\"\"\n mem_fs = MemoryFS()\n mem_fs.makedir('foo')\n mem_fs.setcontents('foo/bar.txt', quote)\n foo_dir = mem_fs.opendir('foo')\n\n mount_fs = MountFS()\n mount_fs.mountfile('bar.txt', foo_dir.open, foo_dir.getinfo)\n\n self.assertTrue(mount_fs.isdir('/'))\n self.assertTrue(mount_fs.isdir('./'))\n self.assertTrue(mount_fs.isdir(''))\n\n # Check we can see the mounted file in the dir list\n self.assertEqual(mount_fs.listdir(), [\"bar.txt\"])\n self.assertTrue(not mount_fs.exists('nobodyhere.txt'))\n self.assertTrue(mount_fs.exists('bar.txt'))\n self.assertTrue(mount_fs.isfile('bar.txt'))\n self.assertTrue(not mount_fs.isdir('bar.txt'))\n\n # Check open and getinfo callables\n self.assertEqual(mount_fs.getcontents('bar.txt'), quote)\n self.assertEqual(mount_fs.getsize('bar.txt'), len(quote))\n\n # Check changes are written back\n mem_fs.setcontents('foo/bar.txt', 'baz')\n self.assertEqual(mount_fs.getcontents('bar.txt'), b'baz')\n self.assertEqual(mount_fs.getsize('bar.txt'), len('baz'))\n\n # Check changes are written to the original fs\n self.assertEqual(mem_fs.getcontents('foo/bar.txt'), b'baz')\n self.assertEqual(mem_fs.getsize('foo/bar.txt'), len('baz'))\n\n # Check unmount\n self.assertTrue(mount_fs.unmount(\"bar.txt\"))\n self.assertEqual(mount_fs.listdir(), [])\n self.assertTrue(not mount_fs.exists('bar.txt'))\n\n # Check unount a second time is a null op, and returns False\n self.assertFalse(mount_fs.unmount(\"bar.txt\"))",
"def teardown(self):\n super(TestCisPlyInput, self).teardown()\n if os.path.isfile(self.tempfile):\n os.remove(self.tempfile)",
"def tearDown(self):\n\n self.rawInputStub.destroy()",
"def teardown(self):\n super(TestCisPickleInput, self).teardown()\n if os.path.isfile(self.tempfile):\n os.remove(self.tempfile)",
"def tearDown(self):\n del self.u\n MockConfigRegistry._EXTRA = {}\n MockUCSHttpServer.mock_reset()\n MockPopen.mock_reset()",
"def teardown(self):\n super(TestCisPandasInput, self).teardown()\n if os.path.isfile(self.tempfile):\n os.remove(self.tempfile)",
"def test_cleanup_on_failure_when_preparing_file(self, mocker):\n remove_spy = mocker.spy(os, 'remove')\n self._retryable.side_effect = requests.HTTPError('Fail')\n\n payload = dict(id=\"B\", data={\"some\": \"data\"}, ai_service='A')\n headers = {'x-rh-identity': 'ABC'}\n self.client.post(self.url, json=payload, headers=headers)\n\n remove_spy.assert_called_once()",
"def teardown(self):\n super(TestCisObjOutput, self).teardown()\n if os.path.isfile(self.tempfile):\n os.remove(self.tempfile)",
"def teardown(self):\n super(TestCisPickleOutput, self).teardown()\n if os.path.isfile(self.tempfile):\n os.remove(self.tempfile)",
"def unmount(self, pathname):\n \n # Make sure we don't try to mount something twice.\n if not pathname in self.mounts:\n raise ProcessorError(\"%s is already mounted\" % pathname)\n \n # Call hdiutil.\n try:\n p = subprocess.Popen((\"/usr/bin/hdiutil\",\n \"detach\",\n self.mounts[pathname]),\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n (out, err) = p.communicate()\n except OSError as e:\n raise ProcessorError(\"ditto execution failed with error code %d: %s\" % (\n e.errno, e.strerror))\n if p.returncode != 0:\n raise ProcessorError(\"unmounting %s failed: %s\" % (pathname, err))\n \n # Delete mount from mount list.\n del self.mounts[pathname]",
"def test_09_cleanup(self, mock_remove, mock_config):\n udocker.Config = mock_config\n udocker.Config.tmpdir = \"/tmp\"\n udocker.FileUtil.tmptrash = {'file1.txt': None, 'file2.txt': None}\n udocker.FileUtil(\"\").cleanup()\n self.assertEqual(mock_remove.call_count, 2)",
"def teardown():\n\n self.zorp_mock.stop()"
]
| [
"0.6361763",
"0.62163806",
"0.62163806",
"0.62079",
"0.6134043",
"0.60920656",
"0.604096",
"0.60291505",
"0.6024147",
"0.5966361",
"0.5828402",
"0.5828402",
"0.5828402",
"0.5828402",
"0.5827227",
"0.5768053",
"0.576695",
"0.57601285",
"0.57424736",
"0.57312685",
"0.5698628",
"0.56899565",
"0.5686666",
"0.5686219",
"0.5642999",
"0.56330025",
"0.5619945",
"0.5609412",
"0.56052506",
"0.55985874"
]
| 0.71018255 | 0 |
Send and then asserts that a chain of metrics arrive in the right order and with expected telemetry values. | def send_and_assert(
self,
dogstatsd,
expected_metrics,
last_telemetry_size=0,
buffered=False,
):
expected_messages = []
for metric_type, metric_name, metric_value in expected_metrics:
# Construct the expected message data
metric_type_id = TestDogStatsd.METRIC_TYPE_MAP[metric_type]['id']
expected_messages.append(
"{}:{}|{}\n".format(metric_name, metric_value, metric_type_id)
)
# Send the value
getattr(dogstatsd, metric_type)(metric_name, metric_value)
# Sanity check
if buffered:
# Ensure that packets didn't arrive immediately if we are expecting
# buffering behavior
self.assertIsNone(dogstatsd.socket.recv(2, no_wait=True))
metrics = 1
if buffered:
metrics = len(expected_messages)
if buffered:
expected_messages = [ ''.join(expected_messages) ]
for message in expected_messages:
packets_sent = 1
# For all ono-initial packets, our current telemetry stats will
# contain the metadata for the last telemetry packet as well.
if last_telemetry_size > 0:
packets_sent += 1
expected_metrics=telemetry_metrics(
metrics=metrics,
packets_sent=packets_sent,
bytes_sent=len(message) + last_telemetry_size
)
self.assert_equal_telemetry(
message,
dogstatsd.socket.recv(2, no_wait=not buffered, reset_wait=True),
telemetry=expected_metrics,
)
last_telemetry_size = len(expected_metrics)
return last_telemetry_size | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_wait_for_dispatched_metrics(self):\n worker_helper = WorkerHelper()\n d = worker_helper.wait_for_dispatched_metrics()\n self.assertEqual(self.successResultOf(d), [])\n\n self._add_to_dispatched_metrics(worker_helper.broker, MetricMessage())\n msg = MetricMessage()\n msg.append('fake metric 1')\n msg.append('fake metric 2')\n self._add_to_dispatched_metrics(worker_helper.broker, msg)\n worker_helper.kick_delivery()\n d = worker_helper.wait_for_dispatched_metrics()\n self.assertNoResult(d)\n yield worker_helper.broker.wait_delivery()\n self.assertEqual(\n self.successResultOf(d), [[], ['fake metric 1', 'fake metric 2']])",
"def test_get_dispatched_metrics(self):\n worker_helper = WorkerHelper()\n dispatched = worker_helper.get_dispatched_metrics()\n self.assertEqual(dispatched, [])\n\n msg = MetricMessage()\n self._add_to_dispatched_metrics(worker_helper.broker, msg)\n dispatched = worker_helper.get_dispatched_metrics()\n self.assertEqual(dispatched, [[]])\n\n msg = MetricMessage()\n msg.append('fake metric 1')\n msg.append('fake metric 2')\n self._add_to_dispatched_metrics(worker_helper.broker, msg)\n dispatched = worker_helper.get_dispatched_metrics()\n self.assertEqual(dispatched, [[], ['fake metric 1', 'fake metric 2']])",
"def send_metrics(self):\n metrics = self.get_metrics()\n if not metrics:\n return\n\n for mkey, metric in metrics.items():\n for mname, mval in metric.items():\n try:\n self.agent.record_custom_metric(self.convert_metric_name(mkey, mname), mval, None)\n except Exception as e:\n print_(e)",
"def test_metric_tracker_and_collection_multioutput(input_to_tracker, assert_type):\n tracker = MetricTracker(input_to_tracker)\n for _ in range(5):\n tracker.increment()\n for _ in range(5):\n preds, target = torch.randn(100, 2), torch.randn(100, 2)\n tracker.update(preds, target)\n all_res = tracker.compute_all()\n assert isinstance(all_res, assert_type)\n best_metric, which_epoch = tracker.best_metric(return_step=True)\n if isinstance(best_metric, dict):\n for v in best_metric.values():\n assert v is None\n for v in which_epoch.values():\n assert v is None\n else:\n assert best_metric is None\n assert which_epoch is None",
"def test_metrics(self):\n # Check the route\n self.check_metrics(self.test_metrics_submission_id, False, \"award\")\n self.check_metrics(self.test_metrics_submission_id, True, \"award_financial\")\n self.check_metrics(self.test_metrics_submission_id, True, \"appropriations\")",
"def testSuccess(self):\n seq_num = 9\n request = struct.pack(HEADER_FMT, REQUEST_TYPE, seq_num)\n reply = self.sendAndReceive(request)\n reply_type, replied_seq_num = struct.unpack(HEADER_FMT,\n reply[0:HEADER_SIZE])\n self.assertEqual(REPLY_TYPE, reply_type)\n self.assertEqual(seq_num, replied_seq_num)\n metrics = json.loads(reply[HEADER_SIZE:])\n self.assertEqual([], metrics['Components'])",
"async def test_all_transactions(self):\n response = await self.collect(get_request_text=self.GATLING_LOG)\n self.assert_measurement(response, value=\"2\")",
"def check_delivered_messages(results):\n assert results[\"metrics\"][\"Delivered messages\"] == 20",
"def test_success_metrics(self):\n @self.graph.route(self.ns.collection_path, Operation.Search, self.ns)\n def foo():\n return \"\"\n\n response = self.client.get(\"api/v1/foo\")\n assert_that(response.status_code, is_(equal_to(200)))\n\n self.graph.metrics.histogram.assert_called_with(\n \"route\",\n ANY,\n tags=[\n \"endpoint:foo.search.v1\",\n \"backend_type:microcosm_flask\",\n ],\n )\n self.graph.metrics.increment.assert_called_with(\n \"route.call.count\",\n tags=[\n \"endpoint:foo.search.v1\",\n \"backend_type:microcosm_flask\",\n \"classifier:2xx\",\n ],\n )",
"async def test_all_transactions(self):\n response = await self.collect(get_request_json_return_value=self.GATLING_JSON)\n self.assert_measurement(response, value=\"2\", entities=self.expected_entities)",
"def test_traffic_analysis_success(\n mock_client, monkeypatch, traffic_analysis_success\n):\n args = {\n \"port\": 8443,\n \"protocol\": \"tcp\",\n \"policy_decisions\": \"potentially_blocked\",\n \"start_time\": \"2022-07-17T12:58:33.528Z\",\n \"end_time\": \"2022-07-18T12:58:33.529Z\",\n }\n\n monkeypatch.setattr(\n illumio.pce.PolicyComputeEngine,\n \"get_traffic_flows_async\",\n lambda *a, **k: [\n TrafficFlow.from_json(flow) for flow in traffic_analysis_success\n ],\n )\n\n resp = traffic_analysis_command(mock_client, args)\n\n assert resp.raw_response == traffic_analysis_success",
"def test_combine_peer_stats(self):\n tracts = Geo.objects.filter(geo_type=Geo.TRACT_TYPE, cbsa=request.GET.get('metro'))\n metro = Geo.objects.get(geo_type=Geo.METRO_TYPE, geoid=request.GET.get('metro'))\n lender = Institution.objects.get(institution_id=request.GET.get('lender'))\n peers = lender.get_peer_list(metro, None, None)\n peer_data_collector = []\n for peer in peers:\n peer_request = HttpRequest()\n peer_request.GET['lender'] = peer.institution.institution_id\n peer_request.GET['metro']= metro.geoid\n peer_lar_data = loan_originations_as_json(peer_request)\n peer_data_collector.append(assemble_stats(peer_lar_data, tracts))\n peer_stats = combine_peer_stats(peer_data_collector)\n self.assertEqual(peer_stats['hma_pct'], 0.0)\n self.assertEqual(peer_stats['lma_pct'], 1.0)\n self.assertEqual(peer_stats['mma_pct'], 0.0)\n self.assertEqual(peer_stats['lma'], 7)\n self.assertEqual(peer_stats['mma'], 0)\n self.assertEqual(peer_stats['hma'], 0)\n self.assertEqual(peer_stats['lar_total'], 7)",
"def _send_metrics(output):\n\n keys = [encode_string(key) for key in output.keys()]\n values = [float(output[key]['value']) for key in output.keys()]\n higher_is_better = [int(output[key]['higher_is_better']) for key in output.keys()] # send the boolean as int\n\n _send(len(keys),0) \n _send(keys)\n _send(values)\n _send(higher_is_better)",
"def test_feed_values_collected(self):\n reference_temperature = \"T\"\n value_temperature = 123\n reference_location = \"LOC\"\n value_location1 = \"45.45,19.19\"\n value_location2 = \"42.42,18.18\"\n reference_information = \"I\"\n value_information = \"Hello!\"\n\n timestamp_first = round(time.time()) * 1000\n timestamp_second = timestamp_first + 123456789\n\n expected_topic = self.factory.common_topic + WAPMF.FEED_VALUES\n expected_payload = json.dumps(\n [\n {\n \"timestamp\": timestamp_first,\n f\"{reference_temperature}\": value_temperature,\n f\"{reference_location}\": value_location1,\n },\n {\n \"timestamp\": timestamp_second,\n f\"{reference_location}\": value_location2,\n f\"{reference_information}\": value_information,\n },\n ]\n )\n expected_message = Message(expected_topic, expected_payload)\n\n serialized_message = self.factory.make_from_feed_values_collected(\n {\n timestamp_first: {\n reference_temperature: value_temperature,\n reference_location: value_location1,\n },\n timestamp_second: {\n reference_location: value_location2,\n reference_information: value_information,\n },\n }\n )\n\n self.assertEqual(expected_message, serialized_message)",
"def test_process_signals(self, mock_discover):\n self.mock_insight.insight_params = {'pi': 3.14}\n mock_discover.return_value = [self.mock_insight]\n discover_event = Event()\n blk = EventWeMoDiscovery(discover_event)\n self.configure_block(blk, {'enrich': {'exclude_existing': False}})\n blk.start()\n self.assertTrue(discover_event.wait(1))\n self.assertEqual(mock_discover.call_count, 1)\n blk.process_signals([Signal({'foo': 'bar'}), Signal({'foo': 'baz'})])\n self.assertEqual(self.mock_insight.update_insight_params.call_count, 2)\n self.assert_num_signals_notified(2)\n self.assertDictEqual(\n self.last_notified[DEFAULT_TERMINAL][0].to_dict(),\n {'pi': 3.14, 'foo': 'bar'})\n self.assertDictEqual(\n self.last_notified[DEFAULT_TERMINAL][1].to_dict(),\n {'pi': 3.14, 'foo': 'baz'})\n blk.stop()",
"async def test_nr_of_metrics(self):\n response = await self.collect(get_request_json_side_effect=[self.data_model, self.reports])\n self.assert_measurement(\n response,\n value=str(len(self.entities)),\n total=self.expected_software_metrics,\n entities=self.entities,\n )",
"def test_clear_dispatched_metrics(self):\n worker_helper = WorkerHelper()\n self._add_to_dispatched_metrics(worker_helper.broker, MetricMessage())\n self.assertNotEqual(\n worker_helper.broker.dispatched['vumi.metrics'], {})\n worker_helper.clear_dispatched_metrics()\n self.assertEqual(\n worker_helper.broker.dispatched['vumi.metrics'], {})",
"def test_queue_attn_xfer(self):\n events = self.run_and_get_events('fixtures/queue/queue_attn_xfer.json')\n\n expected_events = self.events_from_tuples((\n ('on_b_dial', {\n 'call_id': 'e83df36bebbe-1507037906.116',\n 'caller': CallerId(code=15001, number='+31150010001', is_public=True),\n 'to_number': '+31150010004',\n 'targets': [CallerId(code=150010002, number='+31150010004', is_public=True)],\n }),\n ('on_up', {\n 'call_id': 'e83df36bebbe-1507037906.116',\n 'caller': CallerId(code=15001, number='+31150010001', is_public=True),\n 'to_number': '+31150010004',\n 'callee': CallerId(code=150010002, number='+31150010004', is_public=True),\n }),\n ('on_b_dial', {\n 'call_id': 'e83df36bebbe-1507037917.120',\n 'caller': CallerId(code=150010002, number='202', name=\"Samantha Graham\", is_public=True),\n 'to_number': '203',\n 'targets': [CallerId(code=150010003, number='203', is_public=True)],\n }),\n ('on_up', {\n 'call_id': 'e83df36bebbe-1507037917.120',\n 'caller': CallerId(code=150010002, number='202', name=\"Samantha Graham\", is_public=True),\n 'to_number': '203',\n 'callee': CallerId(code=150010003, number='203', is_public=True),\n }),\n ('on_warm_transfer', {\n 'new_id': 'e83df36bebbe-1507037917.120',\n 'merged_id': 'e83df36bebbe-1507037906.116',\n 'caller': CallerId(code=15001, number='+31150010001', is_public=True),\n 'callee': CallerId(code=150010003, number='203', is_public=True),\n 'redirector': CallerId(code=150010002, number='202', name=\"Samantha Graham\", is_public=True),\n }),\n ('on_hangup', {\n 'call_id': 'e83df36bebbe-1507037917.120',\n 'caller': CallerId(code=15001, number='+31150010001', is_public=True),\n 'to_number': '203',\n 'reason': 'completed',\n }),\n ))\n\n self.assertEqual(expected_events, events)",
"def test_get_measurement_history(self):\n device = DeviceFactory(node=Node.objects.first(), external_id='123', type__code=SecureDeviceType.SRT321,\n device_param__type__code=SecureDeviceParameterType.MEASURED_TEMPERATURE)\n d_id_1 = device.external_id\n\n now_loc = datetime.datetime.now(bst)\n ts_loc = now_loc - datetime.timedelta(seconds=30)\n ts_str = ts_loc.strftime('%Y-%m-%dT%H:%M:%S')\n\n data = self.create_secure_server_push_data(d_id_1, ts_str)\n\n SecureClient.process_push_data(data)\n time.sleep(.5)\n\n # get newer timestamp\n ts_str = now_loc.strftime('%Y-%m-%dT%H:%M:%S')\n data = self.create_secure_server_push_data(d_id_1, ts_str, value=\"23.5\")\n\n SecureClient.process_push_data(data)\n\n token = Token.objects.get(user__username=email)\n device_param = device.parameters.first()\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)\n url = reverse('api:device_measurements', kwargs={'device_parameter_id': device_param.id})\n\n time.sleep(.5)\n\n response = client.get(url, format='json')\n\n self.assertTrue(response.status_code == 200)\n self.assertTrue(len(response.data) >= 2)",
"def test_producer_send_messages_keyed(self):\n first_part = 43\n second_part = 56\n client = Mock(reactor=MemoryReactorClock())\n client._api_versions = 0\n ret1 = Deferred()\n client.send_produce_request.side_effect = [ret1]\n client.topic_partitions = {self.topic: [first_part, second_part, 102]}\n client.metadata_error_for_topic.return_value = False\n msgs1 = [self.msg(\"one\"), self.msg(\"two\")]\n msgs2 = [self.msg(\"three\"), self.msg(\"four\")]\n key1 = b\"35\"\n key2 = b\"foo\"\n ack_timeout = 5\n\n # Even though we're sending keyed messages, we use the default\n # round-robin partitioner, since the requests are easier to predict\n producer = Producer(client, ack_timeout=ack_timeout, batch_send=True, batch_every_n=4)\n d1 = producer.send_messages(self.topic, key=key1, msgs=msgs1)\n d2 = producer.send_messages(self.topic, key=key2, msgs=msgs2)\n # Check the expected request was sent\n msgSet1 = create_message_set(make_send_requests(msgs1, key=key1), producer.codec)\n msgSet2 = create_message_set(make_send_requests(msgs2, key=key2), producer.codec)\n req1 = ProduceRequest(self.topic, first_part, msgSet1)\n req2 = ProduceRequest(self.topic, second_part, msgSet2)\n # Annoying, but order of requests is indeterminate...\n client.send_produce_request.assert_called_once_with(\n ANY, acks=producer.req_acks, timeout=ack_timeout, fail_on_error=False\n )\n self.assertEqual(sorted([req1, req2]), sorted(client.send_produce_request.call_args[0][0]))\n # Check results when \"response\" fires\n self.assertNoResult(d1)\n self.assertNoResult(d2)\n resp = [\n ProduceResponse(self.topic, first_part, 0, 10),\n ProduceResponse(self.topic, second_part, 0, 23),\n ]\n ret1.callback(resp)\n result = self.successResultOf(d1)\n self.assertEqual(result, resp[0])\n result = self.successResultOf(d2)\n self.assertEqual(result, resp[1])\n producer.stop()",
"def test_tracker(base_metric, metric_input, maximize):\n tracker = MetricTracker(base_metric, maximize=maximize)\n for i in range(5):\n tracker.increment()\n # check both update and forward works\n for _ in range(5):\n tracker.update(*metric_input)\n for _ in range(5):\n tracker(*metric_input)\n\n # Make sure we have computed something\n val = tracker.compute()\n if isinstance(val, dict):\n for v in val.values():\n assert v != 0.0\n else:\n assert val != 0.0\n assert tracker.n_steps == i + 1\n\n # Assert that compute all returns all values\n assert tracker.n_steps == 5\n all_computed_val = tracker.compute_all()\n if isinstance(all_computed_val, dict):\n for v in all_computed_val.values():\n assert v.numel() == 5\n else:\n assert all_computed_val.numel() == 5\n\n # Assert that best_metric returns both index and value\n val, idx = tracker.best_metric(return_step=True)\n if isinstance(val, dict):\n for v, i in zip(val.values(), idx.values()):\n assert v != 0.0\n assert i in list(range(5))\n else:\n assert val != 0.0\n assert idx in list(range(5))\n\n val2 = tracker.best_metric(return_step=False)\n assert val == val2",
"def test_producer_send_messages_batched_partial_success(self):\n clock = MemoryReactorClock()\n client = Mock(reactor=clock)\n topic2 = \"tpsmbps_two\"\n client.topic_partitions = {self.topic: [0, 1, 2, 3], topic2: [4, 5, 6]}\n client.metadata_error_for_topic.return_value = False\n\n init_resp = [\n ProduceResponse(self.topic, 0, 0, 10),\n ProduceResponse(self.topic, 1, 6, 20),\n ProduceResponse(topic2, 5, 0, 30),\n ]\n next_resp = [\n ProduceResponse(self.topic, 2, 0, 10),\n ProduceResponse(self.topic, 1, 0, 20),\n ProduceResponse(topic2, 4, 0, 30),\n ]\n failed_payloads = [\n (ProduceRequest(self.topic, ANY, ANY), NotLeaderForPartitionError()),\n (ProduceRequest(topic2, ANY, ANY), BrokerNotAvailableError()),\n ]\n\n client.send_produce_request.side_effect = [\n fail(Failure(FailedPayloadsError(init_resp, failed_payloads))),\n succeed(next_resp),\n ]\n\n msgs = self.msgs(range(10))\n results = []\n\n producer = Producer(client, batch_send=True, batch_every_t=0)\n # Send 5 total requests: 4 here, one after we make sure we didn't\n # send early\n results.append(producer.send_messages(self.topic, msgs=msgs[0:3]))\n results.append(producer.send_messages(topic2, msgs=msgs[3:5]))\n results.append(producer.send_messages(self.topic, msgs=msgs[5:8]))\n results.append(producer.send_messages(topic2, msgs=msgs[8:9]))\n # No call yet, not enough messages\n self.assertFalse(client.send_produce_request.called)\n # Enough messages to start the request\n client.reset_topic_metadata.reset_mock()\n results.append(producer.send_messages(self.topic, msgs=msgs[9:10]))\n # Before the retry, there should be some results\n self.assertEqual(init_resp[0], self.successResultOf(results[0]))\n self.assertEqual(init_resp[2], self.successResultOf(results[3]))\n # And the errors should have forced a metadata reset on one of the topics.\n client.reset_topic_metadata.assert_called_with(self.topic)\n # Advance the clock to trigger retries.\n clock.advance(producer._retry_interval)\n # Check the otehr results came in\n self.assertEqual(next_resp[0], self.successResultOf(results[4]))\n self.assertEqual(next_resp[1], self.successResultOf(results[2]))\n self.assertEqual(next_resp[2], self.successResultOf(results[1]))\n\n producer.stop()",
"def test_DPMeasurements_notify_listeners(self, receiver):\n\n m = DPMeasurements(device_parameter=type('test_device_param', (object,),\n {'id': 1, 'type': MagicMock(), 'device': MagicMock()})())\n\n m.add(time=timezone.now(), value=EmptyDBTestCase.v)\n time.sleep(2.5)\n m.add(time=timezone.now(), value=EmptyDBTestCase.v + 1)\n self.assertEqual(receiver.call_count, 1)\n self.assertEqual(receiver.call_args[1]['exchange'], settings.IODICUS_MESSAGING_EXCHANGE_NAME)",
"def base_test(self, metric, expected):\n mq_out = metric.compute(self.pl_logits, self.pl_targets)\n\n # Send to execution\n with tf.Session() as sess:\n\n # Init all variables\n sess.run(tf.global_variables_initializer())\n\n # Get and log results\n result = sess.run([mq_out],\n feed_dict={\n self.pl_logits: self.logits,\n self.pl_targets: self.targets\n })\n self.compare(result[0], expected)",
"def test_push_statistics(self):\n from supvisors.statistics import StatisticsInstance\n instance = StatisticsInstance(12, 2)\n # push first set of measures\n stats1 = (8.5, [(25, 400), (25, 125), (15, 150), (40, 400), (20, 200)],\n 76.1, {'eth0': (1024, 2000), 'lo': (500, 500)}, {'myself': (118612, (0.15, 1.85))})\n instance.push_statistics(stats1)\n # check evolution of instance\n self.assertEqual(0, instance.counter)\n self.assertEqual(5, len(instance.cpu))\n for cpu in instance.cpu:\n self.assertFalse(cpu)\n self.assertFalse(instance.mem)\n self.assertItemsEqual(['eth0', 'lo'], instance.io.keys())\n for recv, sent in instance.io.values():\n self.assertIs(list, type(recv))\n self.assertFalse(recv)\n self.assertIs(list, type(sent))\n self.assertFalse(sent)\n self.assertEqual([('myself', 118612)], instance.proc.keys())\n for cpu_list, mem_list in instance.proc.values():\n self.assertIs(list, type(cpu_list))\n self.assertFalse(cpu_list)\n self.assertIs(list, type(mem_list))\n self.assertFalse(mem_list)\n self.assertIs(stats1, instance.ref_stats)\n # push second set of measures\n stats2 = (18.52, [(30, 600), (40, 150), (30, 200), (41, 550), (20, 300)],\n 76.2, {'eth0': (1250, 2200), 'lo': (620, 620)}, {'myself': (118612, (0.16, 1.84))})\n instance.push_statistics(stats2)\n # counter is based a theoretical period of 5 seconds\n # this update is not taken into account\n # check evolution of instance\n self.assertEqual(1, instance.counter)\n self.assertEqual(5, len(instance.cpu))\n for cpu in instance.cpu:\n self.assertFalse(cpu)\n self.assertFalse(instance.mem)\n self.assertItemsEqual(['eth0', 'lo'], instance.io.keys())\n for recv, sent in instance.io.values():\n self.assertIs(list, type(recv))\n self.assertFalse(recv)\n self.assertIs(list, type(sent))\n self.assertFalse(sent)\n self.assertEqual([('myself', 118612)], instance.proc.keys())\n for cpu_list, mem_list in instance.proc.values():\n self.assertIs(list, type(cpu_list))\n self.assertFalse(cpu_list)\n self.assertIs(list, type(mem_list))\n self.assertFalse(mem_list)\n self.assertIs(stats1, instance.ref_stats)\n # push third set of measures\n stats3 = (28.5, [(45, 700), (50, 225), (40, 250), (42, 598), (20, 400)],\n 76.1, {'eth0': (2048, 2512), 'lo': (756, 756)}, {'myself': (118612, (1.75, 1.9))})\n instance.push_statistics(stats3)\n # this update is taken into account\n # check evolution of instance\n self.assertEqual(2, instance.counter)\n self.assertListEqual([[6.25], [20.0], [20.0], [1.0], [0.0]], instance.cpu)\n self.assertListEqual([76.1], instance.mem)\n self.assertDictEqual({'eth0': ([0.4], [0.2]), 'lo': ([0.1], [0.1])}, instance.io)\n self.assertEqual({('myself', 118612): ([0.5], [1.9])}, instance.proc)\n self.assertIs(stats3, instance.ref_stats)\n # push fourth set of measures (reuse stats2)\n instance.push_statistics(stats2)\n # again,this update is not taken into account\n self.assertEqual(3, instance.counter)\n self.assertIs(stats3, instance.ref_stats)\n # push fifth set of measures\n stats5 = (38.5, [(80, 985), (89, 386), (48, 292), (42, 635), (32, 468)],\n 75.9, {'eth0': (3072, 2768), 'lo': (1780, 1780)}, {'myself': (118612, (11.75, 1.87))})\n instance.push_statistics(stats5)\n # this update is taken into account\n # check evolution of instance\n self.assertEqual(4, instance.counter)\n self.assertListEqual([[6.25, 10.9375], [20.0, 19.5], [20.0, 16.0], [1.0, 0.0], [0.0, 15.0]], instance.cpu)\n self.assertListEqual([76.1, 75.9], instance.mem)\n self.assertDictEqual({'eth0': ([0.4, 0.8], [0.2, 0.2]), 'lo': ([0.1, 0.8], [0.1, 0.8])}, instance.io)\n self.assertEqual({('myself', 118612): ([0.5, 3.125], [1.9, 1.87])}, instance.proc)\n self.assertIs(stats5, instance.ref_stats)\n # push sixth set of measures (reuse stats2)\n instance.push_statistics(stats2)\n # this update is not taken into account\n # check evolution of instance\n self.assertEqual(5, instance.counter)\n self.assertIs(stats5, instance.ref_stats)\n # push seventh set of measures\n stats7 = (48.5, [(84, 1061), (92, 413), (48, 480), (45, 832), (40, 1100)],\n 74.7, {'eth0': (3584, 3792), 'lo': (1812, 1812)}, {'myself': (118612, (40.75, 2.34))})\n instance.push_statistics(stats7)\n # this update is taken into account\n # check evolution of instance. max depth is reached so lists roll\n self.assertEqual(6, instance.counter)\n self.assertListEqual([[ 10.9375, 5.0], [19.5, 10.0], [16.0, 0.0], [0.0, 1.5], [15.0, 1.25]], instance.cpu)\n self.assertListEqual([75.9, 74.7], instance.mem)\n self.assertDictEqual({'eth0': ([0.8, 0.4], [0.2, 0.8]), 'lo': ([0.8, 0.025], [0.8, 0.025])}, instance.io)\n self.assertEqual({('myself', 118612): ([3.125, 36.25], [1.87, 2.34])}, instance.proc)\n self.assertIs(stats7, instance.ref_stats)",
"def test_successful_deliveries_logging(self):\n sms = SMS.objects.create(to='+6280000000000', status=STATUS.queued,\n backend_alias='dummy')\n call_command('send_queued_sms', log_level=0)\n self.assertEqual(sms.logs.count(), 0)\n\n sms = SMS.objects.create(to='+6280000000000', status=STATUS.queued,\n backend_alias='dummy')\n call_command('send_queued_sms', log_level=1)\n self.assertEqual(sms.logs.count(), 0)\n\n sms = SMS.objects.create(to='+6280000000000', status=STATUS.queued,\n backend_alias='dummy')\n call_command('send_queued_sms', log_level=2)\n self.assertEqual(sms.logs.count(), 1)",
"def test_wait_for_dispatched_outbound(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n d = worker_helper.wait_for_dispatched_outbound(1, 'fooconn')\n self.assertNoResult(d)\n msg = msg_helper.make_outbound('message')\n yield self._add_to_dispatched(\n worker_helper.broker, 'fooconn.outbound', msg, kick=True)\n dispatched = success_result_of(d)\n self.assertEqual(dispatched, [msg])",
"def test_telemetry(self):\n tel = {}\n tel[\"voltage\"] = 23.0\n tel[\"amperage\"] = 23.0\n tel[\"ambient_temp\"] = 34.2\n tel[\"ambient_humidity\"] = 34.2\n tel[\"flux_capacitance\"] = 1.0\n tel[\"time_stamp\"] = str(datetime.utcnow())\n tel_obj = Telemetry.from_dict(tel)\n assert(tel_obj.voltage == 23.0)",
"def test_get(self, app, data_queues, metricsmock, logs):\n res = self._call(app, ip=self.test_ip, method=\"get\", status=200)\n self.check_response(data_queues, res, \"ok\")\n self.check_queue(data_queues, 0)\n\n metricsmock.assert_incr_once(\n \"request\", tags=[self.metric_path, \"method:get\", \"status:200\"]\n )\n metricsmock.assert_timing_once(\n \"request.timing\", tags=[self.metric_path, \"method:get\"]\n )\n\n log = logs.only_entry\n expected_entry = {\n # accuracy is low for region API fixture, and medium for geolocate\n # see bound_model_accuracy and related tests for direct calculation\n \"accuracy\": logs.only_entry[\"accuracy\"],\n \"accuracy_min\": \"low\",\n \"api_key\": \"test\",\n \"api_path\": self.metric_path.split(\":\")[1],\n \"api_type\": self.metric_type,\n \"blue\": 0,\n \"blue_valid\": 0,\n \"cell\": 0,\n \"cell_valid\": 0,\n \"duration_s\": log[\"duration_s\"],\n \"event\": f\"GET {self.url} - 200\",\n \"fallback_allowed\": False,\n \"has_geoip\": True,\n \"has_ip\": True,\n \"http_method\": \"GET\",\n \"http_path\": self.url,\n \"http_status\": 200,\n \"log_level\": \"info\",\n \"region\": \"GB\",\n \"result_status\": \"hit\",\n \"source_geoip_accuracy\": log[\"accuracy\"],\n \"source_geoip_accuracy_min\": \"low\",\n \"source_geoip_status\": \"hit\",\n \"wifi\": 0,\n \"wifi_valid\": 0,\n }\n if self.metric_type == \"locate\":\n expected_entry[\"api_key_count\"] = 1\n expected_entry[\"api_response_sig\"] = log[\"api_response_sig\"]\n assert log == expected_entry",
"def test_metrics(self):\n self.assertIsInstance(self.analytics.suites[testReportSuite].metrics, omniture.utils.AddressableList)"
]
| [
"0.6648457",
"0.61141425",
"0.5876275",
"0.5731564",
"0.56937885",
"0.5580018",
"0.55792314",
"0.54794395",
"0.53811246",
"0.5342774",
"0.53372544",
"0.53371376",
"0.5333182",
"0.53151363",
"0.5304059",
"0.527914",
"0.5275696",
"0.52615434",
"0.524505",
"0.5242069",
"0.5214685",
"0.52004325",
"0.51836294",
"0.5181684",
"0.51683587",
"0.5165429",
"0.51366913",
"0.51272553",
"0.51197064",
"0.51041144"
]
| 0.7067573 | 0 |
`initialize` overrides `statsd` default instance attributes. | def test_initialization(self):
options = {
'statsd_host': "myhost",
'statsd_port': 1234
}
# Default values
self.assertEqual(statsd.host, "localhost")
self.assertEqual(statsd.port, 8125)
# After initialization
initialize(**options)
self.assertEqual(statsd.host, "myhost")
self.assertEqual(statsd.port, 1234)
# Add namespace
options['statsd_namespace'] = "mynamespace"
initialize(**options)
self.assertEqual(statsd.host, "myhost")
self.assertEqual(statsd.port, 1234)
self.assertEqual(statsd.namespace, "mynamespace")
# Set `statsd` host to the system's default route
initialize(statsd_use_default_route=True, **options)
self.assertEqual(statsd.host, "172.17.0.1")
self.assertEqual(statsd.port, 1234)
# Add UNIX socket
options['statsd_socket_path'] = '/var/run/dogstatsd.sock'
initialize(**options)
self.assertEqual(statsd.socket_path, options['statsd_socket_path'])
self.assertIsNone(statsd.host)
self.assertIsNone(statsd.port) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init_statsd():\n statsd.init_statsd({\n 'STATSD_HOST': config.secrets.server('statsd.host'),\n 'STATSD_PORT': config.secrets.server('statsd.port'),\n 'STATSD_BUCKET_PREFIX': 'linkr',\n })",
"def __init__(self, collectd):\n self.collectd = collectd\n self.conf = self.default_config()\n self.types = {}\n\n collectd.info('Initialized MetricsConfig with default config %s' % self.conf)",
"def __init__(self, stats):\n self._meta = stats['meta'].item()\n self._stats = stats['data'].item()\n self._moving_average_cache = {}",
"def __init__(self, config, stats):\n self.config = config\n self.stats = stats\n self.on_map = False",
"def __init__(self):\n self.stats = {}\n self.stats['hits'] = 0\n self.stats['operations'] = {}\n self.stats['operations']['GetCapabilities'] = {}\n self.stats['operations']['GetCapabilities']['hits'] = 0\n self.stats['operations']['POST'] = {}\n self.stats['operations']['POST']['hits'] = 0",
"def __init__(self):\n self.stats = None\n self.ticker = None",
"def _setup_stats(self) -> None:\n\n # Save statistics\n self.mass = np.array([0])\n self.mass_balance = np.array([0])\n self.mass_balance_trend = np.array([0])",
"def __init__(self, settings):\n self._settings = settings\n self._stats = None",
"def __init__(self, aggregation_depth, include_bytes=True):\n\n self._prev_stats = {}\n self._aggregation_depth = aggregation_depth\n self._include_bytes = include_bytes\n\n self.init_cur_stats()",
"def __init__(\r\n self,\r\n attributes: Optional[Dict[str, Any]] = None,\r\n modifiers: Optional[List[Modifier]] = None,\r\n default: Optional[int] = 0,\r\n ):\r\n # Initializes level in case the call stat does not\r\n self.level = 0\r\n\r\n for attr in Stats.BASE_ATTR:\r\n setattr(self, attr, default)\r\n\r\n # Adds the individual stat\r\n if attributes:\r\n for key, value in attributes.items():\r\n setattr(self, key, value)\r\n\r\n # Modifiers for changing stats\r\n self.mods = {}\r\n if modifiers:\r\n for mod in modifiers:\r\n if self._validate_mod(mod):\r\n self.mods[mod.name] = mod.get_mods()",
"def __init__(self):\n super().__init__()\n self.metric = 'AVGDIST'",
"def __init__(self, name):\n self._name = name # name of this statistics\n self._n = 0 # number of data points\n self._mean = 0 # sample mean\n self._stDev = 0 # sample standard deviation\n self._max = -sys.float_info.max # maximum\n self._min = sys.float_info.max # minimum",
"def test_stats_class_init_empty(self):\n self.assertIsInstance(self.stats, cardutils.Stats)",
"def __init__(self):\n self._profiling_mode = False\n self._total_time_ms = 0.0\n self._traced_records = []\n self._statistical_results = {}",
"def __init__(self):\n\n #call super class's __init__ method\n super(TRiseSampler, self).__init__(name=\"trise\", observed=False)",
"def initialize(self):\n super(Stats, self).initialize()\n if not hasattr(self.application, 'rabbitmq'):\n setattr(self.application, 'rabbitmq', dict())\n if not hasattr(self.application, 'host'):\n setattr(self.application, 'host',\n socket.gethostname())",
"def stats(self, stats):\n self._stats = stats",
"def __init__(self) -> None:\n self.metrics = {}\n self.current = None\n self.run = None",
"def __init__(self, *args, **opts):\n \n madloop_statistics = {\n 'unknown_stability' : 0,\n 'stable_points' : 0,\n 'unstable_points' : 0,\n 'exceptional_points' : 0,\n 'DP_usage' : 0,\n 'QP_usage' : 0,\n 'DP_init_usage' : 0,\n 'QP_init_usage' : 0,\n 'CutTools_DP_usage' : 0,\n 'CutTools_QP_usage' : 0, \n 'PJFry_usage' : 0,\n 'Golem_usage' : 0,\n 'IREGI_usage' : 0,\n 'Samurai_usage' : 0,\n 'Ninja_usage' : 0,\n 'Ninja_QP_usage' : 0,\n 'COLLIER_usage' : 0,\n 'max_precision' : 1.0e99,\n 'min_precision' : 0.0,\n 'averaged_timing' : 0.0,\n 'n_madloop_calls' : 0,\n 'cumulative_timing' : 0.0,\n 'skipped_subchannel' : 0 # number of times that a computation have been \n # discarded due to abnormal weight.\n }\n \n for key, value in madloop_statistics.items():\n self[key] = value\n\n super(dict,self).__init__(*args, **opts)",
"def __init__(self):\n OWSReport.__init__(self)\n self.stats['type'] = 'OGC:SOS'\n self.stats['operations']['GetObservation'] = {}\n self.stats['operations']['GetObservation']['hits'] = 0\n self.stats['operations']['GetObservation']['resource'] = {}\n self.stats['operations']['GetObservation']['resource']['param'] = 'observedproperty'\n self.stats['operations']['GetObservation']['resource']['list'] = {}\n self.stats['operations']['DescribeSensor'] = {}\n self.stats['operations']['DescribeSensor']['hits'] = 0",
"def init(self, *args, **kwds):\n pass",
"def __init__(self):\n super().__init__()\n self.metric = 'RNDIND'",
"def __init__(self):\n self.drilling = False # Hack for now...\n self._me = 'fuzzstats'\n self._s = \"Not running\"",
"def initialise_sampler(self):\n raise NotImplementedError",
"def stats(self, stats):\n\n self._stats = stats",
"def test_stats_class_init_statsPresentInd(self):\n self.assertEqual(self.stats.statsPresent, False)",
"def statistics(self, **_):\n raise NotImplementedError(\"{} doesn't support statistics.\".format(__class__.__name__))",
"def test_stats_class_initialisation(self):\n self.assertIsInstance(self.stats,cardutils.Stats)",
"def test_stats_class_init_statsPresentInd(self):\n self.assertEqual(self.stats.statsPresent, True)",
"def __init__(self, stats_file):\n stats = dict()\n self._stats = dict()\n\n for line in stats_file:\n stat = next((regex.match(line).groupdict()\n for regex in FUZZER_STATS_RES if regex.match(line)),\n dict())\n stats.update(stat)\n\n if not stats:\n raise Exception('Empty fuzzer_stats file `%s`' % stats_file.name)\n\n # Automatically create class attributes based on the fuzzer_stats fields\n for k, v in stats.items():\n if k == 'command_line':\n afl_opts = None\n target_args = None\n getopt_error = None\n\n for afl_getopt in AFL_GETOPTS:\n try:\n afl_opts, target_args = getopt(v.split(), afl_getopt)\n break\n except GetoptError as e:\n getopt_error = e\n\n if not afl_opts or not target_args:\n raise getopt_error\n\n setattr(self, 'afl_cmdline', afl_opts)\n setattr(self, 'target_cmdline', target_args)\n else:\n # If convertable to a number, treat as a number\n try:\n v = float(v)\n except ValueError:\n pass\n\n setattr(self, k, v)\n self._stats[k] = v"
]
| [
"0.71561664",
"0.65992165",
"0.6561374",
"0.6316971",
"0.6264479",
"0.6241959",
"0.61702603",
"0.6155935",
"0.6036247",
"0.6032295",
"0.6023695",
"0.601545",
"0.60045856",
"0.59905905",
"0.5960218",
"0.5936547",
"0.5932973",
"0.5904823",
"0.58644193",
"0.5854766",
"0.5841938",
"0.5810587",
"0.5806126",
"0.58019316",
"0.58017004",
"0.5790686",
"0.5786455",
"0.5783988",
"0.5779792",
"0.57700694"
]
| 0.68837506 | 1 |
Dogstatsd can retrieve its config from env vars when not provided in constructor. | def test_dogstatsd_initialization_with_env_vars(self):
# Setup
with preserve_environment_variable('DD_AGENT_HOST'):
os.environ['DD_AGENT_HOST'] = 'myenvvarhost'
with preserve_environment_variable('DD_DOGSTATSD_PORT'):
os.environ['DD_DOGSTATSD_PORT'] = '4321'
dogstatsd = DogStatsd()
# Assert
self.assertEqual(dogstatsd.host, "myenvvarhost")
self.assertEqual(dogstatsd.port, 4321) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self):\n\n self.config = load_config()\n self.set_env_var()",
"def __init__(self, environment=None):\n if environment is None:\n environment = os.environ.get(\"SENTERA_ENV\") or \"prod\"\n environment = environment.lower()\n self.environment = environment\n\n if self.environment == \"prod\":\n self.config = {\n \"sentera_api_url\": \"https://api.sentera.com\",\n \"weather_api_url\": \"https://weather.sentera.com\",\n }\n else:\n self.config = {\n \"sentera_api_url\": f\"https://api{self.environment}.sentera.com\",\n \"weather_api_url\": f\"https://weather{self.environment}.sentera.com\",\n }\n\n if ENV_SENTERA_API_URL in os.environ:\n self.config[\"sentera_api_url\"] = os.environ.get(ENV_SENTERA_API_URL)\n\n if ENV_WEATHER_API_URL in os.environ:\n self.config[\"weather_api_url\"] = os.environ.get(ENV_WEATHER_API_URL)",
"def from_env():\n app_env = os.getenv(\"APP_ENV\", \"TEST\")\n return ConfigurationFactory.get_config(app_env)",
"def __init__(self, env=None, config_src=None):\n if config_src is None:\n self.config_file = DEF_CONFIG_SRC\n else:\n self.config_file = config_src\n\n with open(self.config_file, 'r') as json_config:\n self.config = json.load(json_config)\n # get list of top-level environments from config;\n # if env is not one of them, or is None,\n # default to the \"DEFAULT\" environment.\n environments = [e for e in self.config]\n if env is None or env not in environments:\n self.env = 'DEFAULT'\n else:\n self.env = env\n\n # set up logging\n log_file = self.config[self.env]['LOG_FILE']\n log_level = self.config[self.env]['LOG_LEVEL']\n logging.basicConfig(filename=log_file,\n format='%(asctime)s - %(levelname)s - %(message)s',\n level=log_level)\n logging.info(f'Config Mgr: Config set up; env: {self.env}')\n logging.debug(f'CWD: {os.getcwd()}')\n logging.debug(f'search path: {sys.path}')",
"def __init__(self, environment):\n with open('config.json') as f:\n self.config = eval(f.read())\n self.config = self.config[environment]",
"def __init__(self, name, config, handlers):\r\n # Get Class Collector config\r\n try:\r\n class_config = config['collectors']['CMDCollector']\r\n except KeyError:\r\n class_config = None\r\n super(CMDCollector, self).__init__(name, config, handlers)\r\n if class_config:\r\n self.config.merge(class_config)\r\n\r\n # vars = self.config['env_vars']\r\n # if not isinstance(vars, list):\r\n # vars = vars.split()\r\n # for var in vars:\r\n # key, param = var.split(':')\r\n # os.putenv(key, self.config[param])\r",
"def __init__(self, config):\n super().__init__(config)\n self.collector_host = config.get(\"collector_host\")\n self.schedds = config.get(\"schedds\", [None])\n self.condor_config = config.get(\"condor_config\")\n self.constraint = config.get(\"constraint\", True)\n self.classad_attrs = config.get(\"classad_attrs\")\n self.correction_map = config.get(\"correction_map\")",
"def fromenv(cls) -> 'Config':\n files = Config.find_config_files()\n if not files:\n log.info(\n \"Could not find default config: `~/.wpwatcher/wpwatcher.conf`, `~/wpwatcher.conf` or `./wpwatcher.conf`\"\n )\n return cls.default()\n else:\n return cls.fromfiles(files)",
"def _get_config(self, kwargs):\n return Config(config_file=kwargs.pop('config_file', None),\n env=kwargs.pop('env', None), overrides=kwargs)",
"def get_config():\n\n return {\n 'ADMIN_USERNAME': env.get('ECSTEST_ADMIN_USERNAME', 'username'),\n 'ADMIN_PASSWORD': env.get('ECSTEST_ADMIN_PASSWORD', 'password'),\n 'TOKEN': env.get('ECSTEST_TOKEN', None),\n 'CONTROL_ENDPOINT': env.get(\n 'ECSTEST_CONTROL_ENDPOINT', 'https://127.0.0.1:4443'\n ),\n 'TOKEN_ENDPOINT': env.get(\n 'ECSTEST_CONTROL_TOKEN_ENDPOINT', 'https://127.0.0.1:4443/login'\n ),\n 'ALT_CONTROL_ENDPOINT': env.get(\n 'ECSTEST_ALT_CONTROL_ENDPOINT',\n env.get('ECSTEST_CONTROL_ENDPOINT',\n 'https://127.0.0.1:4443')),\n 'ALT_TOKEN_ENDPOINT': env.get(\n 'ECSTEST_ALT_CONTROL_TOKEN_ENDPOINT',\n env.get('ECSTEST_CONTROL_TOKEN_ENDPOINT',\n 'https://127.0.0.1:4443/login'),\n ),\n 'VERIFY_SSL': _env_to_bool('ECSTEST_VERIFY_SSL', 0),\n 'REQUEST_TIMEOUT': float(env.get('ECSTEST_REQUEST_TIMEOUT', 15.0)),\n 'TOKEN_FILENAME': env.get(\n 'ECSTEST_TOKEN_FILENAME', '/tmp/ecstest.token'\n ),\n 'CACHE_TOKEN': _env_to_bool('ECSTEST_CACHE_TOKEN', 1),\n 'AUTH_TOKEN_MIN_LENGTH': env.get('ECSTEST_AUTH_TOKEN_MIN_LENGTH', 1),\n 'AUTH_TOKEN_MAX_LENGTH': env.get('ECSTEST_AUTH_TOKEN_MAX_LENGTH', 512),\n 'NAMESPACE': env.get('ECSTEST_NAMESPACE', 'namespace1'),\n 'MAX_LOGIN_TIME': env.get('ECSTEST_MAX_LOGIN_TIME', 3),\n 'ACCESS_SSL': _env_to_bool('ECSTEST_ACCESS_SSL', 0),\n 'ACCESS_SERVER': env.get('ECSTEST_ACCESS_SERVER', 'localhost'),\n 'ALT_ACCESS_SERVER': env.get(\n 'ECSTEST_ALT_ACCESS_SERVER',\n env.get('ECSTEST_ACCESS_SERVER', 'localhost')\n ),\n 'ACCESS_PORT': int(env.get('ECSTEST_ACCESS_PORT', 3128)),\n 'ACCESS_KEY': env.get('ECSTEST_ACCESS_KEY', 'mykey'),\n 'ACCESS_SECRET': env.get('ECSTEST_ACCESS_SECRET', 'mysecret'),\n 'ALT_ACCESS_KEY': env.get(\n 'ECSTEST_ALT_ACCESS_KEY',\n env.get('ECSTEST_ACCESS_KEY', 'mykey')\n ),\n 'ALT_ACCESS_SECRET': env.get(\n 'ECSTEST_ALT_ACCESS_SECRET',\n env.get('ECSTEST_ACCESS_SECRET', 'mysecret')\n ),\n 'VERBOSE_OUTPUT': _env_to_bool('ECSTEST_VERBOSE_OUTPUT', 0),\n 'TEST_TARGET': env.get('ECSTEST_TEST_TARGET', constants.TARGET_AWSS3),\n 'TEST_TYPE': env.get(\n 'ECSTEST_TEST_TYPE', constants.TYPE_COMPATIBILITY\n ),\n 'DNS_BUCKET_NAMING_CONVENTION': _env_to_bool(\n 'ECSTEST_DNS_BUCKET_NAMING_CONVENTION', 0\n ),\n 'NODES_PER_SITE': int(env.get('ECSTEST_NODES_PER_SITE', 1)),\n 'RUN_DISABLED': _env_to_bool('ECSTEST_RUN_DISABLED'),\n 'REUSE_BUCKET_NAME': env.get('ECSTEST_REUSE_BUCKET_NAME'),\n }",
"def __init__(self, config):\n err_msg = \".puppeteer.yml must have a list of environments. Please see setup details at {0}.\".format(\n PROJECT_URL)\n try:\n self.envs = config.get('environments')\n if self.envs is None or not isinstance(self.envs, list):\n raise ControlRepoError(err_msg)\n except (TypeError, AttributeError):\n raise ControlRepoError(err_msg)\n\n self.inventory_file = config.get('inventory_file', 'inventory.ini')\n self.repo_file = REPO_FILE\n self.env_dir = 'environments'\n self.group_dir = 'group_vars'\n self.host_dir = 'host_vars'\n self.roles_dir = 'roles'",
"def _get_crowd_config():\n config = getattr(settings, 'CROWD', None)\n if not config:\n raise UserWarning('CROWD configuration is not in your settings.py')\n return config",
"def __init__(self):\n # Select all the environment variables starting with 'ASH_CFG_' and strip\n # off the leading ASH_CFG_ portion to use as the name of the variable.\n self.variables = dict(\n [(x[8:], y) for x, y in os.environ.items() if x.startswith('ASH_CFG_')]\n )",
"def test_dogstatsd_initialization_with_dd_env_service_version(self):\n cases = [\n # Test various permutations of setting DD_* env vars, as well as other global tag configuration.\n # An empty string signifies that the env var either isn't set or that it is explicitly set to empty string.\n ('', '', '', '', [], []),\n ('prod', '', '', '', [], ['env:prod']),\n ('prod', 'dog', '', '', [], ['env:prod', 'service:dog']),\n ('prod', 'dog', 'abc123', '', [], ['env:prod', 'service:dog', 'version:abc123']),\n ('prod', 'dog', 'abc123', 'env:prod,type:app', [], ['env:prod', 'env:prod', 'service:dog', 'type:app', 'version:abc123']),\n ('prod', 'dog', 'abc123', 'env:prod2,type:app', [], ['env:prod', 'env:prod2', 'service:dog', 'type:app', 'version:abc123']),\n ('prod', 'dog', 'abc123', '', ['env:prod', 'type:app'], ['env:prod', 'env:prod', 'service:dog', 'type:app', 'version:abc123']),\n ('prod', 'dog', 'abc123', '', ['env:prod2', 'type:app'], ['env:prod', 'env:prod2', 'service:dog', 'type:app', 'version:abc123']),\n ('prod', 'dog', 'abc123', 'env:prod3,custom_tag:cat', ['env:prod2', 'type:app'], ['custom_tag:cat', 'env:prod', 'env:prod2', 'env:prod3', 'service:dog', 'type:app', 'version:abc123']),\n ]\n for case in cases:\n dd_env, dd_service, dd_version, datadog_tags, constant_tags, global_tags = case\n with EnvVars(\n env_vars={\n 'DATADOG_TAGS': datadog_tags,\n 'DD_ENV': dd_env,\n 'DD_SERVICE': dd_service,\n 'DD_VERSION': dd_version,\n }\n ):\n dogstatsd = DogStatsd(constant_tags=constant_tags, telemetry_min_flush_interval=0)\n dogstatsd.socket = FakeSocket()\n\n # Guarantee consistent ordering, regardless of insertion order.\n dogstatsd.constant_tags.sort()\n self.assertEqual(global_tags, dogstatsd.constant_tags)\n\n # Make call with no tags passed; only the globally configured tags will be used.\n global_tags_str = ','.join([t for t in global_tags])\n dogstatsd.gauge('gt', 123.4)\n dogstatsd.flush()\n\n # Protect against the no tags case.\n metric = 'gt:123.4|g|#{}\\n'.format(global_tags_str) if global_tags_str else 'gt:123.4|g\\n'\n self.assertEqual(metric, dogstatsd.socket.recv())\n self.assertEqual(\n telemetry_metrics(\n tags=global_tags_str,\n bytes_sent=len(metric)\n ),\n dogstatsd.socket.recv(),\n )\n dogstatsd._reset_telemetry()\n\n # Make another call with local tags passed.\n passed_tags = ['env:prod', 'version:def456', 'custom_tag:toad']\n all_tags_str = ','.join([t for t in passed_tags + global_tags])\n dogstatsd.gauge('gt', 123.4, tags=passed_tags)\n dogstatsd.flush()\n\n metric = 'gt:123.4|g|#{}\\n'.format(all_tags_str)\n self.assertEqual(metric, dogstatsd.socket.recv())\n self.assertEqual(\n telemetry_metrics(\n tags=global_tags_str,\n bytes_sent=len(metric),\n ),\n dogstatsd.socket.recv(),\n )",
"def get_config():\n return _config",
"def __init__(self):\n self._api_key = os.environ.get('IDCF_DNS_API_KEY')\n self._secret_key = os.environ.get('IDCF_DNS_SECRET_KEY')",
"def __init__(self):\r\n self.dev_conf_dir = self._get_dev_conf_dir()",
"def __init__(self, config_path: str = \"config.json\"):\n # Change here if you want to relocate you config file\n self.config = {}\n self.load_configuration(config_path)\n self.app_name = self.config.get('app_name', self.APP_NAME)",
"def __init__(self, config: Dict[str, Any]) -> None:\n self.config = config",
"def config(env=DEFAULT_ENV, default=None, engine=None, conn_max_age=0, ssl_require=False):\n\n config = {}\n\n s = os.environ.get(env, default)\n\n if s:\n config = parse(s, engine, conn_max_age, ssl_require)\n\n return config",
"def get_environment_configuration():\n\n try:\n time_limit = int(os.getenv('AUTOBOT_POST_TIMELIMIT'))\n except TypeError:\n time_limit = None\n\n # if we're using Redis Labs\n redis_cloud_url = os.getenv('REDISCLOUD_URL')\n\n if redis_cloud_url:\n url = urlparse.urlparse(redis_cloud_url)\n redis_host = url.hostname\n redis_port = url.port\n redis_password = url.password\n else:\n redis_host = os.getenv('AUTOBOT_REDIS_URL')\n redis_port = os.getenv('AUTOBOT_REDIS_PORT')\n redis_password = None\n\n override = {\n REDDIT_USERNAME: os.getenv('AUTOBOT_REDDIT_USERNAME'),\n REDDIT_PASSWORD: os.getenv('AUTOBOT_REDDIT_PASSWORD'),\n SUBREDDIT: os.getenv('AUTOBOT_SUBREDDIT'),\n CLIENT_ID: os.getenv('AUTOBOT_CLIENT_ID'),\n CLIENT_SECRET: os.getenv('AUTOBOT_CLIENT_SECRET'),\n POST_TIMELIMIT: time_limit,\n REDIS_BACKEND: os.getenv('AUTOBOT_REDIS_BACKEND'),\n REDIS_URL: redis_host,\n REDIS_PORT: redis_port,\n REDIS_PASSWORD: redis_password,\n ROLLBAR_ACCESS_TOKEN: os.getenv('ROLLBAR_ACCESS_TOKEN'),\n ROLLBAR_ENVIRONMENT: os.getenv('ROLLBAR_ENVIRONMENT')\n }\n\n # remove all the 'None' valued things\n return {k: v for k, v in override.items() if v is not None}",
"def __init__(self, config):\n try:\n config['volume_id']\n config['access_key']\n config['secret_access_key']\n config['region']\n except KeyError, e:\n logging.error(repr(e))\n raise ImproperlyConfigured()\n\n if not config.has_key('keep'):\n config['keep'] = 5\n\n self.config = config",
"def config():\n return _config",
"def __init__(self, config):\n\n self.root = config.root\n self.pidfile = config.pidfile\n self.log_conf = config.logging",
"def __init__(self, collectd):\n self.collectd = collectd\n self.conf = self.default_config()\n self.types = {}\n\n collectd.info('Initialized MetricsConfig with default config %s' % self.conf)",
"def __init__(self):\n load_dotenv('.env')\n self.NEWS_API_KEY = os.getenv('NEWS_API_KEY')",
"def __init__(self,\n env=None,\n git_command=DEFAULT_GIT,\n local_config_enabled=True):\n super().__init__(env=env, git_command=git_command)\n self.__local_config_enabled = local_config_enabled",
"def __init__(self):\n '''Lets find ot the system we run on'''\n self.syst = platform.system()\n '''And where we are'''\n self.module_abs_path = os.path.abspath(os.path.dirname(__file__))\n if self.syst == 'Windows':\n self.sonata_suite_config_json = self.vm_logsrv_cnf_location = os.path.join(self.module_abs_path,\n \"..\\\\configs_sonata\\sonata_conf.json\")\n elif self.syst == 'Linux':\n self.sonata_suite_config_json = self.vm_logsrv_cnf_location = os.path.join(self.module_abs_path,\n \"../configs_sonata/sonata_conf.json\")\n '''get some tools ready'''\n self.__utils__=var_utils.Varutils()\n '''MAP OF CONFIG PARAMS FROM JSON'''\n self.sonata_suite_config = self.__utils__.read_json_to_map(data_location=self.sonata_suite_config_json)",
"def setup_config():\n if CONFIG.get(\"environment\", \"server\") == 'production':\n return 'config.ProductionConfig'\n else:\n return 'config.TestingConfig'",
"def _get_config(self):\n return self.__config"
]
| [
"0.67549545",
"0.6440547",
"0.6411156",
"0.6389691",
"0.6364424",
"0.63438725",
"0.6324953",
"0.6221165",
"0.62137526",
"0.6192844",
"0.61294645",
"0.61156017",
"0.60837567",
"0.6071664",
"0.60495734",
"0.6048155",
"0.60300165",
"0.6009422",
"0.60019284",
"0.59974647",
"0.59810466",
"0.5975687",
"0.5955512",
"0.5923189",
"0.59191877",
"0.5899465",
"0.58925915",
"0.5888633",
"0.5875118",
"0.58648473"
]
| 0.70487374 | 0 |
Dogstatsd host can be dynamically set to the default route. | def test_default_route(self):
self.assertEqual(
DogStatsd(use_default_route=True).host,
"172.17.0.1"
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getHost():",
"def getHost():",
"def get_host(name):\n raise NotImplementedError('derived class should overload me')",
"def set_target(self, host, port):\r\n pass",
"def serve(ctx, host, port):\n pass",
"def test_default_host_http_required(self):\n client = self.base_scenario(\n frang_config=\"\", requests=[\"GET / HTTP/1.1\\r\\nHost: 127.0.0.1\\r\\n\\r\\n\"]\n )\n self.check_response(client, status_code=\"403\", warning_msg=WARN_IP_ADDR)",
"def init_host(self, host):\n LOG.debug(\"init_host\")",
"def host(self, host) :\n\t\ttry :\n\t\t\tself._host = host\n\t\texcept Exception as e:\n\t\t\traise e",
"def __init__(self, *args, **kwargs):\n # Enrich the base name given in the form to add prefixes or randomness\n kwargs['dns_name'] = names.create_host_name(kwargs['base_name'])\n if ('setup_script' not in kwargs and 'git_repo' in kwargs and\n 'port' in kwargs):\n kwargs['setup_script'] = self.default_setup_script(**kwargs)\n super(Host, self).__init__(*args, **kwargs)",
"def usage(self, host):",
"def get_host(self, conf, tenant_id, network_id, host_id):\n\t\tpass",
"def __get_host(self) -> str:\n\t\treturn os.getenv('FLASK_DRIVER_HOST', '0.0.0.0')",
"def set_service_host(self, host):\n self._api_host = f\"https://{host}\"",
"def register_router(self, hostname, expire=-1):",
"def get_host(self):\r\n return self.host",
"def __init__(self, host):\n self.host = host",
"def __init__(self, host):\n self.host = host",
"def test_get_host(self):\n pass",
"def __init__(self, hosts, tmp):\n super().__init__(\"/run/dfuse/*\", \"dfuse\")\n\n # set params\n self.hosts = hosts\n self.tmp = tmp\n self.running_hosts = NodeSet()",
"def fusion_api_get_hypervisor_host(self, uri=None, param='', api=None, headers=None): # pylint: disable=unused-argument\n return self.hypervisor_host.get(uri, api, headers, param='')",
"def horizonhost():\n env.cd = cd\n env.run = run\n env.hosts = settings.HOSTS['horizon']\n env.exists = exists",
"def handle_host(self, host):\n LOG.info('FakeHandler: handle host %s' % host)",
"def set_host(host_index):\n env.hosts = [public_dns_names[int(host_index)]]\n env.password = [public_pwds[int(host_index)]]",
"def get_default_config(self):\n config = super(BindCollector, self).get_default_config()\n config.update({\n 'host': 'localhost',\n 'port': 8080,\n 'path': 'bind',\n # Available stats:\n # - resolver (Per-view resolver and cache statistics)\n # - server (Incoming requests and their answers)\n # - zonemgmt (Requests/responses related to zone management)\n # - sockets (Socket statistics)\n # - memory (Global memory usage)\n 'publish': [\n 'resolver',\n 'server',\n 'zonemgmt',\n 'sockets',\n 'memory',\n ],\n # By default we don't publish these special views\n 'publish_view_bind': False,\n 'publish_view_meta': False,\n })\n return config",
"def setHostRoute( self, ip, intf ):\n return self.cmd( 'route add -host ' + ip + ' dev ' + intf )",
"def fill_host(self, data):\n check_input_params(data, self.HOST)\n self.host = data[self.HOST]",
"def fill_host(self, data):\n check_input_params(data, self.HOST)\n self.host = data[self.HOST]",
"def set_host(self, host: str) -> None:\n _LOGGER.debug(\"Setting host to %s\", host)\n host_url = urlparse(host)\n self.scheme = host_url.scheme or \"http\"\n self.host = host_url.netloc or host_url.path\n self.base_url = f\"{self.scheme}://{self.host}\"\n self.api_url = f\"{self.base_url}/apps/api/{self.app_id}\"",
"def scribe_host():\r\n if LogOptions._SCRIBE_HOST is None:\r\n LogOptions._SCRIBE_HOST = app.get_options().twitter_common_log_scribe_host\r\n return LogOptions._SCRIBE_HOST",
"def get_default_config(self):\n config = super(EndecaDgraphCollector, self).get_default_config()\n config.update({\n 'path': 'endeca.dgraph',\n 'host': 'localhost',\n 'port': 8080,\n 'timeout': 1,\n })\n return config"
]
| [
"0.6033232",
"0.6033232",
"0.5783881",
"0.5775147",
"0.5751545",
"0.56110024",
"0.5583052",
"0.55784684",
"0.5525812",
"0.549326",
"0.54884046",
"0.54740816",
"0.54694706",
"0.5451875",
"0.5401176",
"0.53989035",
"0.53989035",
"0.5395777",
"0.5393919",
"0.5384688",
"0.5379834",
"0.53783405",
"0.53687036",
"0.5354895",
"0.5345455",
"0.5343131",
"0.5343131",
"0.533639",
"0.53321517",
"0.52881944"
]
| 0.75208133 | 0 |
Timed value is reported in ms when statsd.use_ms is True. | def test_timed_in_ms(self):
# Arm statsd to use_ms
self.statsd.use_ms = True
# Sample a function run time
@self.statsd.timed('timed.test')
def func(arg1, arg2, kwarg1=1, kwarg2=1):
"""docstring"""
time.sleep(0.5)
return (arg1, arg2, kwarg1, kwarg2)
func(1, 2, kwarg2=3)
# Assess the packet
packet = self.recv(2).split("\n")[0] # ignore telemetry packet
name_value, type_ = packet.split('|')
name, value = name_value.split(':')
self.assertEqual('ms', type_)
self.assertEqual('timed.test', name)
self.assert_almost_equal(500, float(value), 100)
# Repeat, force timer value in seconds
@self.statsd.timed('timed.test', use_ms=False)
def func(arg1, arg2, kwarg1=1, kwarg2=1):
"""docstring"""
time.sleep(0.5)
return (arg1, arg2, kwarg1, kwarg2)
func(1, 2, kwarg2=3)
self.statsd.flush()
packet = self.recv()
name_value, type_ = packet.rstrip('\n').split('|')
name, value = name_value.split(':')
self.assertEqual('ms', type_)
self.assertEqual('timed.test', name)
self.assert_almost_equal(0.5, float(value), 0.1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ms(self):\n\t\treturn self._ms",
"def ms(self):\n # my clock uses seconds internally\n return 1000 * self.read()",
"def _unit_ms(self):\n return (self.time_base / 1000.0) / 60.0",
"def ms_from_timedelta(td):\n return (td.seconds * 1000) + (td.microseconds / 1000.0)",
"def get_time_ms():\n return int(round(time.time() * 1000))",
"def unit_ms(self):\n return (self.time_base / 1000.0) / 60.0",
"def time_ms():\n return int(1000 * time.time())",
"def millis(): \r\n return int(round(monotonic.monotonic() * C.MILLISECONDS))",
"def millis(): \n return int(round(monotonic.monotonic() * C.MILLISECONDS))",
"def getDurationMs(self):\n return self.durationMs",
"def get_time(self):\n return self.get_timed() / 10.0",
"def microsecond(self):\n return self._microsecond",
"def _time_ms(dt):\n epoch = datetime.datetime.utcfromtimestamp(0)\n diff = dt - epoch\n return diff.total_seconds() * 1000",
"def millis(self):\n return self._micros // 1000",
"def unit_of_measurement(self) -> str:\n return MS",
"def _time_ms(self, dt):\n if dt.tzinfo is None:\n dt = dt.replace(tzinfo=pytz.utc)\n return int((dt - self._EPOCH).total_seconds() * 1000)",
"def millis() -> int:",
"def t(self):\n return self._data_writer.get_current_run_time_ms()",
"def timeTime(self):\n return self._micros / 1000000.0",
"def _msd(self):\n return math.floor(math.log10(abs(self._val)))",
"def millis():\n return int(round(time() * 1000))",
"def _get_milleseconds(self):\n return int(round(time.time() * 1000))",
"def getTime():\n return float(time.perf_counter()*1000)",
"def getTime():\n return float(time.perf_counter()*1000)",
"def getTime():\n return float(time.perf_counter()*1000)",
"def getTime():\n return float(time.perf_counter()*1000)",
"def getTime():\n return float(time.perf_counter()*1000)",
"def getTime():\n return float(time.perf_counter()*1000)",
"def getTime():\n\n return float(time.perf_counter()*1000)",
"def timing(self, stat, time, sample_rate=1):\n stats = {stat: \"%f|ms\" % time}\n self.send(stats, sample_rate)"
]
| [
"0.6975095",
"0.6810435",
"0.6657259",
"0.6554632",
"0.65140027",
"0.64969325",
"0.6489244",
"0.6371504",
"0.6335592",
"0.63139933",
"0.62697685",
"0.62574714",
"0.6204073",
"0.61836475",
"0.61701643",
"0.6164281",
"0.6155498",
"0.60401815",
"0.6027048",
"0.6026042",
"0.60232174",
"0.6008441",
"0.5959251",
"0.5959251",
"0.5959251",
"0.5959251",
"0.5959251",
"0.5959251",
"0.5949537",
"0.59421545"
]
| 0.74131775 | 0 |
Exception bubbles out of the `timed` context manager. | def test_timed_context_exception(self):
class ContextException(Exception):
pass
def func(self):
with self.statsd.timed('timed_context.test.exception'):
time.sleep(0.5)
raise ContextException()
# Ensure the exception was raised.
with pytest.raises(ContextException):
func(self)
# Ensure the timing was recorded.
packet = self.recv(2).split("\n")[0] # ignore telemetry packet
name_value, type_ = packet.split('|')
name, value = name_value.split(':')
self.assertEqual('ms', type_)
self.assertEqual('timed_context.test.exception', name)
self.assert_almost_equal(0.5, float(value), 0.1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_timer_context_exceptions():\n sc = _client()\n\n with assert_raises(socket.timeout):\n with sc.timer('foo'):\n raise socket.timeout()\n\n _timer_check(sc, 1, 'foo', 'ms')",
"def test_timeoutRaises(self):\n\n @self.eventloop.wait_for(timeout=0.5)\n def times_out():\n return Deferred().addErrback(lambda f: f.trap(CancelledError))\n\n start = time.time()\n self.assertRaises(TimeoutError, times_out)\n self.assertTrue(abs(time.time() - start - 0.5) < 0.1)",
"def raise_timeout(self, *args, **kwargs):\n\n self.log.error(\"Task timeout encountered.\")\n raise TimeoutError",
"def _timeout(signum, frame):\n # Raise TimeoutException with system default timeout message\n raise TimeoutException()",
"def test_timeout_elapsed_exception(self):\n deadline = Deadline(-MS)\n with self.assertRaises(TimeoutError):\n deadline.timeout()",
"def assert_timeout(self) -> None:\n if self._cancelled:\n raise asyncio.TimeoutError from None",
"def pytest_exception_interact(node):\n hooks = node.config.pluginmanager.hook\n hooks.pytest_timeout_cancel_timer(item=node)",
"def test_timed_context_no_metric_exception(self):\n\n def func(self):\n with self.statsd.timed():\n time.sleep(0.5)\n\n # Ensure the exception was raised.\n with pytest.raises(TypeError):\n func(self)\n\n # Ensure the timing was recorded.\n packet = self.statsd.socket.recv()\n self.assertIsNone(packet)",
"def _handle_timeout(self, frame=None, **_):\n\n raise TimeOut.TimeOutError(self, frame)",
"def timeout(self, time, job_id, reraise=False):\n\n signal.signal(signal.SIGALRM, self.raise_timeout)\n signal.alarm(time)\n try:\n yield\n except TimeoutError:\n self.log.warning(\n \"Timeout encountered after %s seconds running %s.\",\n time,\n job_id,\n )\n if reraise:\n raise TimeoutError\n finally:\n signal.signal(signal.SIGALRM, signal.SIG_IGN)",
"def test_timeoutCancels(self):\n result = Deferred()\n error = []\n result.addErrback(error.append)\n\n @self.eventloop.wait_for(timeout=0.0)\n def times_out():\n return result\n\n self.assertRaises(TimeoutError, times_out)\n self.assertIsInstance(error[0].value, CancelledError)",
"def pytest_timeout_cancel_timer(item):",
"def handler(*args, **kwargs):\n raise TimeoutException(\"Test aborted due to timeout. Test was \" +\n \"expected to finish in less than {} second(s).\".format(time_limit))",
"def _timeout_cbk(proc):\n proc.kill()\n raise RuntimeError(\"Timeout popped.\")",
"def exception(self, timeout=None):\n start_time = time()\n with self._done_condition:\n res = self.__exception()\n if res:\n return res\n self._done_condition.wait(timeout)\n if timeout and start_time + timeout < time():\n raise TimeoutError()\n return self.__exception()",
"def assert_timeout(self) -> None:",
"def timeout(time: int) -> None:\n\n # Defines the signal handler\n def raise_timeout(signum, frame):\n raise TimeoutError\n\n # Register a function to raise a TimeoutError on the signal\n signal.signal(signal.SIGALRM, raise_timeout)\n # Schedule the signal to be sent after specified time\n signal.alarm(time)\n\n try:\n yield\n except TimeoutError:\n pass\n finally:\n # Unregister the signal so it won't be triggered if the timeout is not reached\n signal.signal(signal.SIGALRM, signal.SIG_IGN)",
"def _fail(self, exception):\n self.monitor_loop.stop()\n self._maintained.errback(exception)",
"def test_timeout_elapsed_no_exception(self):\n deadline = Deadline(-MS)\n timeout = deadline.timeout(raise_if_elapsed=False)\n self.assertGreater(timeout, -2 * MS)\n self.assertLess(timeout, -MS)",
"def test_timeout(self):\n start = time.time()\n dr = EventualResult(Deferred(), None)\n self.assertRaises(TimeoutError, dr.wait, timeout=0.03)\n # be a little lenient for slow computers:\n self.assertTrue(abs(time.time() - start) < 0.05)",
"def test_timeout_twice(self):\n dr = EventualResult(Deferred(), None)\n self.assertRaises(TimeoutError, dr.wait, timeout=0.01)\n self.assertRaises(TimeoutError, dr.wait, timeout=0.01)",
"async def timeout(self, failed: bool = False) -> None:\n raise NotImplementedError()",
"def on_timeout(self):\n logger.debug(\"on_timeout\")\n self.discard_env()\n self.transport.close()",
"def raise_exc(self, exctype):\n\t\t_async_raise(self._get_my_tid(), exctype)",
"def on_timeout(self):\n pass",
"def timer_object_timeout(seconds=5):\n def raise_timeout_exception():\n raise TimeoutReachedException(seconds=seconds)\n\n return Timer(seconds, raise_timeout_exception)",
"def raise_timeout_exception(self, _result=None, _timeout=None):\n raise RosTimeoutError(\"No service response received\")",
"def pytest_timeout_cancel_timer(item):\n tle.lib.cancel()\n return True",
"def timeout(time_limit):\n\n class TimeoutException(Exception):\n \"\"\" Subclass Exception to catch timer expiration during search \"\"\"\n pass\n\n def handler(*args, **kwargs):\n \"\"\" Generic handler to raise an exception when a timer expires \"\"\"\n raise TimeoutException(\"Test aborted due to timeout. Test was \" +\n \"expected to finish in less than {} second(s).\".format(time_limit))\n\n def wrapUnitTest(testcase):\n\n @wraps(testcase)\n def testWrapper(self, *args, **kwargs):\n\n signal.signal(signal.SIGALRM, handler)\n signal.alarm(time_limit)\n\n try:\n return testcase(self, *args, **kwargs)\n finally:\n signal.alarm(0)\n\n return testWrapper\n\n return wrapUnitTest",
"def timeout_function(seconds=5):\n\n def signal_handler(signum, frame):\n raise TimeoutError(\"Timed out!\")\n\n signal.signal(signal.SIGALRM, signal_handler)\n signal.alarm(seconds)\n\n try:\n yield\n finally:\n signal.alarm(0)"
]
| [
"0.70439595",
"0.667859",
"0.66749775",
"0.665197",
"0.6643113",
"0.65210307",
"0.6512625",
"0.6384847",
"0.63573813",
"0.6299778",
"0.6114314",
"0.6095104",
"0.6008552",
"0.59938556",
"0.59831214",
"0.5930287",
"0.59205747",
"0.58640987",
"0.5854599",
"0.58519787",
"0.5781867",
"0.5722956",
"0.57157695",
"0.56523556",
"0.5633548",
"0.56137735",
"0.5608107",
"0.56002676",
"0.55749303",
"0.5551697"
]
| 0.707114 | 0 |
Dogstatsd should automatically use DD_ENV, DD_SERVICE, and DD_VERSION (if present) to set {env, service, version} as global tags for all metrics emitted. | def test_dogstatsd_initialization_with_dd_env_service_version(self):
cases = [
# Test various permutations of setting DD_* env vars, as well as other global tag configuration.
# An empty string signifies that the env var either isn't set or that it is explicitly set to empty string.
('', '', '', '', [], []),
('prod', '', '', '', [], ['env:prod']),
('prod', 'dog', '', '', [], ['env:prod', 'service:dog']),
('prod', 'dog', 'abc123', '', [], ['env:prod', 'service:dog', 'version:abc123']),
('prod', 'dog', 'abc123', 'env:prod,type:app', [], ['env:prod', 'env:prod', 'service:dog', 'type:app', 'version:abc123']),
('prod', 'dog', 'abc123', 'env:prod2,type:app', [], ['env:prod', 'env:prod2', 'service:dog', 'type:app', 'version:abc123']),
('prod', 'dog', 'abc123', '', ['env:prod', 'type:app'], ['env:prod', 'env:prod', 'service:dog', 'type:app', 'version:abc123']),
('prod', 'dog', 'abc123', '', ['env:prod2', 'type:app'], ['env:prod', 'env:prod2', 'service:dog', 'type:app', 'version:abc123']),
('prod', 'dog', 'abc123', 'env:prod3,custom_tag:cat', ['env:prod2', 'type:app'], ['custom_tag:cat', 'env:prod', 'env:prod2', 'env:prod3', 'service:dog', 'type:app', 'version:abc123']),
]
for case in cases:
dd_env, dd_service, dd_version, datadog_tags, constant_tags, global_tags = case
with EnvVars(
env_vars={
'DATADOG_TAGS': datadog_tags,
'DD_ENV': dd_env,
'DD_SERVICE': dd_service,
'DD_VERSION': dd_version,
}
):
dogstatsd = DogStatsd(constant_tags=constant_tags, telemetry_min_flush_interval=0)
dogstatsd.socket = FakeSocket()
# Guarantee consistent ordering, regardless of insertion order.
dogstatsd.constant_tags.sort()
self.assertEqual(global_tags, dogstatsd.constant_tags)
# Make call with no tags passed; only the globally configured tags will be used.
global_tags_str = ','.join([t for t in global_tags])
dogstatsd.gauge('gt', 123.4)
dogstatsd.flush()
# Protect against the no tags case.
metric = 'gt:123.4|g|#{}\n'.format(global_tags_str) if global_tags_str else 'gt:123.4|g\n'
self.assertEqual(metric, dogstatsd.socket.recv())
self.assertEqual(
telemetry_metrics(
tags=global_tags_str,
bytes_sent=len(metric)
),
dogstatsd.socket.recv(),
)
dogstatsd._reset_telemetry()
# Make another call with local tags passed.
passed_tags = ['env:prod', 'version:def456', 'custom_tag:toad']
all_tags_str = ','.join([t for t in passed_tags + global_tags])
dogstatsd.gauge('gt', 123.4, tags=passed_tags)
dogstatsd.flush()
metric = 'gt:123.4|g|#{}\n'.format(all_tags_str)
self.assertEqual(metric, dogstatsd.socket.recv())
self.assertEqual(
telemetry_metrics(
tags=global_tags_str,
bytes_sent=len(metric),
),
dogstatsd.socket.recv(),
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_dogstatsd_initialization_with_env_vars(self):\n # Setup\n with preserve_environment_variable('DD_AGENT_HOST'):\n os.environ['DD_AGENT_HOST'] = 'myenvvarhost'\n with preserve_environment_variable('DD_DOGSTATSD_PORT'):\n os.environ['DD_DOGSTATSD_PORT'] = '4321'\n dogstatsd = DogStatsd()\n\n # Assert\n self.assertEqual(dogstatsd.host, \"myenvvarhost\")\n self.assertEqual(dogstatsd.port, 4321)",
"def test_set_derived_metric_tags(self):\n pass",
"def _add_serve_metric_default_tags(default_tags: Dict[str, str]):\n if context.get_internal_replica_context() is None:\n return default_tags\n if DEPLOYMENT_TAG in default_tags:\n raise ValueError(f\"'{DEPLOYMENT_TAG}' tag is reserved for Ray Serve metrics\")\n if REPLICA_TAG in default_tags:\n raise ValueError(f\"'{REPLICA_TAG}' tag is reserved for Ray Serve metrics\")\n if APPLICATION_TAG in default_tags:\n raise ValueError(f\"'{APPLICATION_TAG}' tag is reserved for Ray Serve metrics\")\n replica_context = context.get_internal_replica_context()\n # TODO(zcin): use replica_context.deployment for deployment tag\n default_tags[DEPLOYMENT_TAG] = replica_context.deployment\n default_tags[REPLICA_TAG] = replica_context.replica_tag\n if replica_context.app_name:\n default_tags[APPLICATION_TAG] = replica_context.app_name\n return default_tags",
"def dd_environment():\n\n # specify couchbase container name\n env = {\n 'GITLAB_TEST_TOKEN': GITLAB_TEST_TOKEN,\n 'GITLAB_LOCAL_MASTER_PORT': str(GITLAB_LOCAL_MASTER_PORT),\n 'GITLAB_LOCAL_RUNNER_PORT': str(GITLAB_LOCAL_RUNNER_PORT),\n }\n compose_file = os.path.join(HERE, 'compose', 'docker-compose.yml')\n with docker_run(\n compose_file=compose_file,\n env_vars=env,\n conditions=[\n CheckDockerLogs(\n compose_file, ['Gitlab is up!', 'Configuration loaded', 'Metrics server listening'], wait=5\n ),\n CheckEndpoints(GITLAB_RUNNER_URL, attempts=180),\n ],\n ):\n yield CONFIG, E2E_METADATA",
"def metrics_env(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_env(cmd_ctx, cpc, options))",
"def set_global_vars():\n global_vars = {\"status\": False}\n try:\n global_vars[\"Owner\"] = \"Mystique\"\n global_vars[\"Environment\"] = \"Prod\"\n global_vars[\"aws_region\"] = \"us-east-1\"\n global_vars[\"tag_name\"] = \"serverless_cloudwatch_logs_exporter\"\n global_vars[\"retention_days\"] = 35\n global_vars[\"cw_logs_to_export\"] = [\"/aws/lambda/trending_news\"]\n #global_vars[\"cw_logs_to_export\"] = os.environ.get(\"cw_logs_to_export\").split(\",\")\n global_vars[\"log_dest_bkt\"] = \"cw-log-exports-01\"\n global_vars[\"time_out\"] = 300\n global_vars[\"tsk_back_off\"] = 2\n global_vars[\"status\"] = True\n except Exception as e:\n logger.error(\"Unable to set Global Environment variables. Exiting\")\n global_vars[\"error_message\"] = str(e)\n return global_vars",
"def populate_default_mdc(request):\n if MDC.get(\"instanceUUID\") is None:\n default_mdc()\n g.request_start = time.process_time()\n g.empty_value = \"EMPTY\"\n g.request_id = MDC.get(\"requestID\")\n MDC.put('serviceName', request.path)\n MDC.put('IPAddress', request.headers.get('X-Forwarded-For', request.remote_addr))",
"def set_global_attributes(ds):\n ds.title = \"LPDM CO2 Concentration Footprints\"\n ds.summary = (\"Gridded CO2 concentration footprints from the output \"\n \"of the Lagrangian Particle Dispersion model \"\n \"described in Uliasz 1994.\")\n ds.Conventions = \"CF-1.6 ACDD-1.3\"\n ds.history = (\"{date:{acdd_format}} {user:s} \"\n \"created by {progname:s}\").format(\n date=RUN_DATE, user=os.environ[\"USER\"],\n acdd_format=ACDD_DATE,\n progname=sys.argv[0])\n ds.source = (\"Gridded outputs from LPDM v?.?.? \"\n \"written by Uliasz et al. and modified by Lauvaux\")\n ds.standard_name_vocabulary = \"CF Standard Name Table v32\"\n ds.date_created = \"{date:{acdd_format}}\".format(\n date=RUN_DATE, acdd_format=ACDD_DATE)\n ds.creator_name = \"Daniel Wesloh, Thomas Lauvaux\"\n ds.creator_institution = (\n \"The Pennsylvania State University \"\n \"Department of Meteorology and Atmospheric Science\")\n ds.date_modified = \"{date:{acdd_format}}\".format(\n date=RUN_DATE, acdd_format=ACDD_DATE)\n ds.date_metadata_modified = \"{date:{acdd_format}}\".format(\n date=RUN_DATE, acdd_format=ACDD_DATE)\n ds.product_version = \"Py_v1.0.0\"\n ds.references = \"\"\"Uliasz, M. 1994. Lagrangian particle dispersion modeling in mesoscale applications. Environ Model: Comput Methods and Softw for Simulat Environ Pollut and its Adverse Effects (CMP) 2 : 71-.\"\"\"\n\n ds.geospatial_vertical_min = 0\n ds.geospatial_vertical_max = CLOSE_TO_GROUND\n ds.geospatial_vertical_positive = \"up\"\n ds.geospatial_vertical_units = \"km AGL\"\n # Kind of a cross between Grid and Trajectory\n # Grid covers the first and last two axes;\n # trajectory covers third-to-last\n ds.cdm_data_type = \"Grid\"\n\n ds.institution = ds.creator_institution",
"def post_init_metrics(sender, **kwargs):\r\n tags = _database_tags('initialized', sender, kwargs)\r\n\r\n dog_stats_api.increment('edxapp.db.model', tags=tags)",
"def dk_influxdb(request):\n dk_cfg = request.getfuncargvalue('dk_config')\n\n service = DKInfluxDB(dk_cfg)\n service.setUp()\n request.addfinalizer(service.tearDown)\n\n return service",
"def default_mdc():\n MDC.put('instanceUUID', generate_uuid())\n MDC.put('InvocationID', generate_uuid())\n MDC.put('serviceName', 'OOF_OSDF')\n MDC.put('threadID', threading.currentThread().getName())\n default_server_info()\n MDC.put('requestID', generate_uuid())\n MDC.put('partnerName', 'N/A')\n MDC.put('entryTimestamp', get_time())",
"def init_statsd():\n statsd.init_statsd({\n 'STATSD_HOST': config.secrets.server('statsd.host'),\n 'STATSD_PORT': config.secrets.server('statsd.port'),\n 'STATSD_BUCKET_PREFIX': 'linkr',\n })",
"def _add_env_var_injector(tag: str = \"!ENV\") -> None:\n # pattern for global vars: look for ${word}\n pattern = re.compile(\".*?\\${([^}]+::[^}]*)}.*?\") # noqa: W605\n loader = yaml.SafeLoader\n\n # the tag will be used to mark where to start searching for the pattern\n # e.g. somekey: !ENV somestring${MYENVVAR}blah blah blah\n loader.add_implicit_resolver(tag, pattern, None) # type: ignore\n\n def constructor_env_variables(loader, node) -> Any: # type: ignore\n \"\"\"\n Extracts the environment variable from the node's value\n :param yaml.Loader loader: the yaml loader\n :param node: the current node in the yaml\n :return: the parsed string that contains the value of the environment\n variable\n \"\"\"\n value = loader.construct_scalar(node)\n match = pattern.findall(value) # to find all env variables in line\n if match:\n full_value = value\n for g in match:\n (env_var, default_val) = g.split(\"::\")\n value = os.environ.get(env_var, default_val)\n full_value = full_value.replace(f\"${{{g}}}\", value)\n if not full_value:\n full_value = None\n _logger.debug(f\"injected ENV parameter {env_var} resolved to {value}\")\n return full_value\n return value\n\n loader.add_constructor(tag, constructor_env_variables) # type: ignore",
"def build_env_vars(self):\n return _untag_env_vars(self._tagged_env_vars, build=True)",
"def __init__(self):\n super().__init__()\n self.metric = 'VARINFO'",
"def __set_container_info(self):\n self.container = \"{}_{}_1\".format(self.build, self.service.lower())\n self.mysql_container = \"{}_{}-mysql_1\".format(self.build, self.service.lower())",
"def _get_metric_tags(self, request):\n return {'api_name': self.api_name or extract_api_name(request.url)}",
"def _add_services(self):\n this_service = {'name': '{{ metadata.package }}'}\n other_services = [\n {'name': 'mysql',\n 'location': 'cs:percona-cluster',\n 'constraints': {'mem': '3072M'}},\n {'name': 'rabbitmq-server'},\n {'name': 'keystone'},\n {'name': 'manila'}\n ]\n super(ManilaPluginCharmDeployment, self)._add_services(\n this_service, other_services)",
"def test_add_tag_to_derived_metric(self):\n pass",
"def main():\n if len(sys.argv) != 3:\n print USAGE.format(prog=sys.argv[0])\n sys.exit(1)\n\n prefix = sys.argv[1]\n metrics = sys.argv[2]\n\n print \"PREFIX: '{}'\".format(prefix)\n print \"METRICS: '{}'\".format(metrics)\n\n try:\n metrics_dict = json.loads(metrics)\n except ValueError:\n print \"Could not parse metrics '{}' as JSON\".format(metrics)\n sys.exit(1)\n\n print \"Configuring DataDog...\"\n dog_http_api.api_key = configure_datadog()\n\n print \"Reporting metrics to DataDog...\"\n report_metrics(prefix=prefix, metrics=metrics_dict)\n\n print \"Done.\"",
"def test_get_derived_metric_tags(self):\n pass",
"def prepare_env(args: argparse.Namespace) -> None:\n if os.environ.get(\"KATSDP_LOG_GELF_ADDRESS\"):\n extra = load_json_dict(os.environ.get(\"KATSDP_LOG_GELF_EXTRA\", \"{}\"))\n extra[\"subarray_product_id\"] = args.subarray_product_id\n os.environ[\"KATSDP_LOG_GELF_EXTRA\"] = json.dumps(extra)\n if not os.environ.get(\"KATSDP_LOG_GELF_LOCALNAME\"):\n os.environ[\"KATSDP_LOG_GELF_LOCALNAME\"] = args.external_hostname",
"def env_vars(self):\n return _untag_env_vars(self._tagged_env_vars, build=False)",
"def _init_env_variables(self):\n raise NotImplementedError()",
"def _init_env_variables(self):\n raise NotImplementedError()",
"def _init_env_variables(self):\n raise NotImplementedError()",
"def _init_env_variables(self):\n raise NotImplementedError()",
"def _init_env_variables(self):\n raise NotImplementedError()",
"def _init_env_variables(self):\n raise NotImplementedError()",
"def log_environment_variables():\n\n LOGGER.info(f\"bucket: {BUCKET}\")\n LOGGER.info(f\"config_s3_key: {CONFIG_S3_KEY}\")\n LOGGER.info(f\"previously_valid_urls_s3_key: {PREVIOUSLY_VALID_URLS_S3_KEY}\")\n LOGGER.info(f\"logging_level: {LOGGING_LEVEL}\")\n LOGGER.info(f\"default_email_subject: {DEFAULT_EMAIL_SUBJECT}\")\n LOGGER.info(f\"default_from_email: {DEFAULT_FROM_EMAIL}\")\n LOGGER.info(f\"default_to_email: {DEFAULT_TO_EMAIL}\")\n LOGGER.info(f\"default_url: {DEFAULT_URL}\")\n LOGGER.info(f\"default_start_value: {DEFAULT_START_VALUE}\")\n LOGGER.info(f\"default_look_ahead: {DEFAULT_LOOK_AHEAD}\")\n LOGGER.info(f\"default_slide_window: {DEFAULT_SLIDE_WINDOW}\")"
]
| [
"0.581118",
"0.5363366",
"0.53107005",
"0.52418685",
"0.51974744",
"0.5170413",
"0.50802094",
"0.5078154",
"0.5077305",
"0.4975919",
"0.4950397",
"0.49243206",
"0.490046",
"0.48965698",
"0.48863626",
"0.48436457",
"0.48390573",
"0.48379213",
"0.4822794",
"0.48173887",
"0.48043746",
"0.47698957",
"0.476007",
"0.47540608",
"0.47540608",
"0.47540608",
"0.47540608",
"0.47540608",
"0.47540608",
"0.4737249"
]
| 0.74013835 | 0 |
The Eflo VPD ID. | def vpd_id(self) -> str:
return pulumi.get(self, "vpd_id") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def id(self):\n return self.vdu_info.vdu_id",
"def vat_id(self) -> str:\n return self._vat_id",
"def vat_id(self):\n return self._vat_id",
"def id(self):\n return int(self.__nvXxPr.cNvPr.get('id'))",
"def vm_id(self):\n return self.vm_info.get('id', 'Error retrieving ID')",
"def vmid(self):\n return self.raw[\"VMId\"]",
"def vid(self):\n return self._id",
"def get_idn(self):\n # not all IVVI racks support the version command, so return a dummy\n return -1\n\n idparts = ['QuTech', 'IVVI', 'None', self.version()]\n\n return dict(zip(('vendor', 'model', 'serial', 'firmware'), idparts))",
"def getId(self):\n return self.__vmId",
"def ivoid(self):\n return self.get(\"identifier\")",
"def get_vm_id(self):\n return self.instance_metadata.vm_id",
"def vm_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"vm_id\")",
"def VplsIdType(self):\n return self._get_attribute('vplsIdType')",
"def vcn_id(self):\n return self._vcn_id",
"def get_vdu(self):\n return self._vdu",
"def ivoid(self):\n return self.get(\"ivoid\", decode=True)",
"def vpd_name(self) -> str:\n return pulumi.get(self, \"vpd_name\")",
"def vswitch_id(self) -> str:\n return pulumi.get(self, \"vswitch_id\")",
"def vswitch_id(self) -> str:\n return pulumi.get(self, \"vswitch_id\")",
"def vswitch_id(self) -> str:\n return pulumi.get(self, \"vswitch_id\")",
"def vswitch_id(self) -> str:\n return pulumi.get(self, \"vswitch_id\")",
"def video_id(self):\n # type: () -> string_types\n return self._video_id",
"def get_id(self):\n try:\n return self.inst.query('*IDN?')[:36]\n except errors.VisaIOError as e:\n logger.warning(e)\n return 'Device not connected.'",
"def VsiId(self):\n return self._get_attribute('vsiId')",
"def get_product_id(self):\n pid = \"%s-%s-%s-%s\" % (self.valid.strftime(\"%Y%m%d%H%M\"),\n self.source, self.wmo, self.afos)\n return pid.strip()",
"def device_id(self):\n data = fcntl.ioctl(self._fd, _EVIOCGID, '\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00')\n idbus, idvendor, idproduct, idversion = struct.unpack(\"hhhh\", data)\n return idbus, idvendor, idproduct, idversion",
"def get_PID(self):\n return self.PID",
"def vswitch_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"vswitch_id\")",
"def vswitch_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"vswitch_id\")",
"def vswitch_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"vswitch_id\")"
]
| [
"0.7936278",
"0.7136131",
"0.6907432",
"0.6904606",
"0.6881559",
"0.68572456",
"0.6835941",
"0.6706659",
"0.6685424",
"0.6678652",
"0.6605415",
"0.66002005",
"0.6591701",
"0.65612185",
"0.65474844",
"0.6488551",
"0.64564127",
"0.6436533",
"0.6436533",
"0.6436533",
"0.6436533",
"0.64307183",
"0.6429964",
"0.6426046",
"0.6423675",
"0.64061767",
"0.63886964",
"0.63806933",
"0.63806933",
"0.63806933"
]
| 0.85733056 | 1 |
The id of the vpd. | def vpd_id(self) -> str:
return pulumi.get(self, "vpd_id") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def id(self):\n return self.vdu_info.vdu_id",
"def vat_id(self) -> str:\n return self._vat_id",
"def id(self) -> str:\n return pulumi.get(self, \"id\")",
"def id(self) -> str:\n return pulumi.get(self, \"id\")",
"def id(self) -> str:\n return pulumi.get(self, \"id\")",
"def id(self) -> str:\n return pulumi.get(self, \"id\")",
"def id(self) -> str:\n return pulumi.get(self, \"id\")",
"def id(self) -> str:\n return pulumi.get(self, \"id\")",
"def id(self) -> str:\n return pulumi.get(self, \"id\")",
"def id(self) -> str:\n return pulumi.get(self, \"id\")",
"def id(self) -> str:\n return pulumi.get(self, \"id\")",
"def id(self) -> str:\n return pulumi.get(self, \"id\")",
"def id(self) -> str:\n return pulumi.get(self, \"id\")",
"def id(self) -> str:\n return pulumi.get(self, \"id\")",
"def id(self) -> str:\n return pulumi.get(self, \"id\")",
"def id(self) -> str:\n return pulumi.get(self, \"id\")",
"def id(self) -> str:\n return pulumi.get(self, \"id\")",
"def id(self) -> str:\n return pulumi.get(self, \"id\")",
"def id(self) -> str:\n return pulumi.get(self, \"id\")",
"def id(self) -> str:\n return pulumi.get(self, \"id\")",
"def id(self) -> str:\n return pulumi.get(self, \"id\")",
"def id(self) -> str:\n return pulumi.get(self, \"id\")",
"def id(self) -> str:\n return pulumi.get(self, \"id\")",
"def id(self) -> str:\n return pulumi.get(self, \"id\")",
"def id(self) -> str:\n return pulumi.get(self, \"id\")",
"def id(self) -> str:\n return pulumi.get(self, \"id\")",
"def id(self) -> str:\n return pulumi.get(self, \"id\")",
"def id(self) -> str:\n return pulumi.get(self, \"id\")",
"def id(self) -> str:\n return pulumi.get(self, \"id\")",
"def id(self) -> str:\n return pulumi.get(self, \"id\")"
]
| [
"0.8721102",
"0.7705356",
"0.7478009",
"0.7478009",
"0.7478009",
"0.7478009",
"0.7478009",
"0.7478009",
"0.7478009",
"0.7478009",
"0.7478009",
"0.7478009",
"0.7478009",
"0.7478009",
"0.7478009",
"0.7478009",
"0.7478009",
"0.7478009",
"0.7478009",
"0.7478009",
"0.7478009",
"0.7478009",
"0.7478009",
"0.7478009",
"0.7478009",
"0.7478009",
"0.7478009",
"0.7478009",
"0.7478009",
"0.7478009"
]
| 0.89650935 | 0 |
The Name of the VPD. | def vpd_name(self) -> str:
return pulumi.get(self, "vpd_name") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_name(self):\n return self.nvPublic.get_name()",
"def name(self):\n return self.__nvXxPr.cNvPr.get('name')",
"def get_name():\n return \"SVMd+\"",
"def getVhdlName(self):\n return self.name.replace(TOP_NODE_NAME + '.', '').replace('.', '_')",
"def virtual_name(self) -> Optional[str]:\n return pulumi.get(self, \"virtual_name\")",
"def virtual_name(self) -> Optional[str]:\n return pulumi.get(self, \"virtual_name\")",
"def get_name(self):\n\t\treturn call_sdk_function('PrlFoundVmInfo_GetName', self.handle)",
"def name(self) -> str:\n return pulumi.get(self, \"name\")",
"def name(self) -> str:\n return pulumi.get(self, \"name\")",
"def name(self) -> str:\n return pulumi.get(self, \"name\")",
"def name(self) -> str:\n return pulumi.get(self, \"name\")",
"def name(self) -> str:\n return pulumi.get(self, \"name\")",
"def name(self) -> str:\n return pulumi.get(self, \"name\")",
"def name(self) -> str:\n return pulumi.get(self, \"name\")",
"def name(self) -> str:\n return pulumi.get(self, \"name\")",
"def name(self) -> str:\n return pulumi.get(self, \"name\")",
"def name(self) -> str:\n return pulumi.get(self, \"name\")",
"def name(self) -> str:\n return pulumi.get(self, \"name\")",
"def name(self) -> str:\n return pulumi.get(self, \"name\")",
"def name(self) -> str:\n return pulumi.get(self, \"name\")",
"def name(self) -> str:\n return pulumi.get(self, \"name\")",
"def name(self) -> str:\n return pulumi.get(self, \"name\")",
"def name(self) -> str:\n return pulumi.get(self, \"name\")",
"def name(self) -> str:\n return pulumi.get(self, \"name\")",
"def name(self) -> str:\n return pulumi.get(self, \"name\")",
"def name(self) -> str:\n return pulumi.get(self, \"name\")",
"def name(self) -> str:\n return pulumi.get(self, \"name\")",
"def name(self) -> str:\n return pulumi.get(self, \"name\")",
"def name(self) -> str:\n return pulumi.get(self, \"name\")",
"def name(self) -> str:\n return pulumi.get(self, \"name\")"
]
| [
"0.77821183",
"0.7476595",
"0.73026973",
"0.7113421",
"0.7092685",
"0.7092685",
"0.7085843",
"0.707482",
"0.707482",
"0.707482",
"0.707482",
"0.707482",
"0.707482",
"0.707482",
"0.707482",
"0.707482",
"0.707482",
"0.707482",
"0.707482",
"0.707482",
"0.707482",
"0.707482",
"0.707482",
"0.707482",
"0.707482",
"0.707482",
"0.707482",
"0.707482",
"0.707482",
"0.707482"
]
| 0.91165507 | 0 |
Sets the axis1 limits | def set_axis1_limits(self, start, end):
if start > end:
raise ValueError("Start point over end for this view.")
self.axis1_limits = start, end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setlimits(self, Xlim=[], Ylim=[]):\n self.data['Xmin'] = Xlim[0]\n self.data['Xmax'] = Xlim[1]\n self.data['Ymin'] = Ylim[0]\n self.data['Ymax'] = Ylim[1]",
"def set_plot_limits(self) -> None:\n matplotlib.pyplot.xlim(0, self.imgsz[0])\n matplotlib.pyplot.ylim(self.imgsz[1], 0)",
"def _set_axes_limits(ax, parameter, axis=\"x\"):\n\n lims = list(ax.get_xlim()) if axis == \"x\" else list(ax.get_ylim())\n\n if \"low\" in DEFAULT_BOUNDS[parameter]:\n low = DEFAULT_BOUNDS[parameter][\"low\"]\n if lims[0] < low:\n lims[0] = DEFAULT_BOUNDS[parameter][\"low\"]\n if \"high\" in DEFAULT_BOUNDS[parameter]:\n high = DEFAULT_BOUNDS[parameter][\"high\"]\n if lims[1] > high:\n lims[1] = DEFAULT_BOUNDS[parameter][\"high\"]\n\n if axis == \"x\":\n ax.set_xlim(lims)\n else:\n ax.set_ylim(lims)",
"def set_limits(xlim=None, ylim=None, ax=None):\n if ax is None:\n ax = plt.gca()\n if ylim is not None:\n ax.set_ylim(ylim)\n if xlim is not None:\n ax.set_xlim(xlim)",
"def adjust_ylimits(self, ylim1, ylim2):\n self.axplot.set_ylim(ylim1, ylim2)\n self.fig.canvas.draw()\n return",
"def set_axis2_limits(self, start, end):\n if start > end:\n raise ValueError(\"Start point over end for this view.\")\n\n self.axis2_limits = start, end",
"def set_axis_limits(*args):\n robots = get_robot_roots()\n if not robots:\n raise MimicError('Nothing Selected; Select a valid robot')\n return\n\n current_tab = pm.tabLayout('limits_tab_layout',\n query=True,\n selectTab=True)\n\n if current_tab == 'position_limits_tab':\n set_position_limits()\n elif current_tab == 'velocity_limits_tab':\n set_deriv_limits('Velocity')\n elif current_tab == 'accel_limits_tab':\n set_deriv_limits('Accel')\n elif current_tab == 'jerk_limits_tab':\n set_deriv_limits('Jerk')",
"def py_apply_limits(self, plot):\n if any(x is not None for x in self.x_lim):\n if self.x_lim[0] is not None: # at least left?\n if self.x_lim[1] is not None: # left and right?\n plot.set_xlim(left=self.x_lim[0], right=self.x_lim[1])\n else:\n plot.set_xlim(left=self.x_lim[0])\n else: # just right\n plot.set_xlim(rigt=self.x_lim[1])\n if any(y is not None for y in self.y_lim):\n if self.y_lim[0] is not None: # at least bottom?\n if self.y_lim[1] is not None:\n plot.set_ylim(bottom=self.y_lim[0], top=self.y_lim[1])\n else:\n plot.set_ylim(bottom=self.y_lim[0])\n else:\n plot.set_ylim(top=self.y_lim[1])",
"def _zoom(self, x0, y0, x1, y1):\n # Store current zoom state in stack\n self.plot.getLimitsHistory().push()\n\n extents = self._getAxesExtent(x0, y0, x1, y1)\n self.plot.setLimits(\n extents.xmin,\n extents.xmax,\n extents.ymin,\n extents.ymax,\n extents.y2min,\n extents.y2max,\n )",
"def xlim(self, left=None, right=None):\r\n for ax in self._subaxes:\r\n ax.set_xlim(left, right)\r\n self.figure.canvas.draw()",
"def setX(ax1: Union[object, List], ax2: Union[object, List]):\n if type(ax1) is list:\n print(\"PlotHelpers: cannot use list as source to set Y axis\")\n return\n ax2 = _ax_tolist(ax2)\n # if type(ax2) is not list:\n # ax2 = [ax2]\n refx = ax1.get_xlim()\n for ax in ax2:\n ax.set_xlim(refx)",
"def adjust_axes(axis):\r\n x_lim = axis.get_xlim()\r\n y_lim = axis.get_ylim()\r\n new_lim = (min(x_lim[0], y_lim[0]), max(x_lim[1], y_lim[1]))\r\n axis.set_xlim(new_lim)\r\n axis.set_ylim(new_lim)\r\n axis.set_aspect('equal')",
"def apply_transforms(self):\n self.axes.set_xlim(self._curr_xlim)\n self.axes.set_ylim(self._curr_ylim)",
"def update_limits(self):\n if len(self) == 0:\n self.limits = np.array([[0.0, 0.0], [0.0, 0.0]])\n else:\n x_min, x_max = self.buf[self.rear][0], self.buf[self.front][0]\n y_min, y_max = self.slmm.get_minmax()\n self.limits = np.array([[x_min, y_min], [x_max, y_max]])",
"def set_axis_limit(axis_number, min_max):\n robots = get_robot_roots()\n\n if not robots:\n raise MimicError('Nothing Selected; Select a valid robot')\n return\n\n try:\n val = float(pm.textField('t_A{}{}'.format(axis_number, min_max),\n query=True,\n text=True))\n robot_list_str = ''\n for robot in robots:\n pm.setAttr(get_target_ctrl_path(robot)\n + '.axis{}{}'.format(axis_number, min_max),\n val)\n robot_list_str += robot + ' '\n except:\n pass\n\n pm.headsUpMessage('Axis Position Limits for {} set successfuly!'.format(robot_list_str))",
"def __init__(self, axis1, axis2=None, bins=100, same_scale=False,\n axis1_values=None, axis2_values=None, **kwargs):\n self.same_scale = same_scale\n\n self.axis1 = axis1\n self.axis1_limits = None\n\n if isinstance(axis1_values, (float, int)):\n axis1_values = [axis1_values]\n self.axis1_values = axis1_values\n\n self.axis2 = axis2\n self.axis2_limits = None\n if isinstance(axis2_values, (float, int)):\n axis2_values = [axis2_values]\n self.axis2_values = axis2_values\n\n self.bins = bins\n\n self.plot_options = kwargs",
"def setXYLimit(self, xmin=None, xmax=None, ymin=None, ymax=None):\n self._myCanvas.axes.set_xlim([xmin, xmax])\n self._myCanvas.axes.set_ylim([ymin, ymax])\n\n self._myCanvas.draw()\n\n return",
"def initialize_axes(self):\r\n self.x_lim = np.array([self.vals[:, 0].min(), self.vals[:, 0].max()])\r\n self.y_lim = np.array([self.vals[:, 1].min(), self.vals[:, 1].max()])\r\n self.z_lim = np.array([self.vals[:, 2].min(), self.vals[:, 2].max()])",
"def auto_adjust_axes(self, *args):\n\n xmin, xmax = self.axes.get_xlim()\n ymin, ymax = self.axes.get_ylim()\n self.adjust_axes(xmin, ymin, xmax, ymax)",
"def y1_max(self, y1_max):\n\n self._y1_max = y1_max",
"def xylim(xmin=None, xmax=None, ymin=None, ymax=None):\n plt.axis(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)",
"def set_xlim(self, left=None, right=None):\n if right is None and np.iterable(left):\n left, right = left\n\n if left is None or right is None:\n old_left, old_right = self.get_xlim()\n if left is None:\n left = old_left\n if right is None:\n right = old_right\n\n if left == right:\n warnings.warn(\n \"Attempting to set identical left == right == {} x-axis limits\".format(\n left\n ),\n stacklevel=2,\n )\n\n if left > right:\n raise ValueError(\"Axis limits must be in increasing order\")\n\n if right <= 0 and self._logx:\n warnings.warn(\n \"Attempting to set non-positive right xlim on a log-scaled axis.\\n\"\n \"Invalid limit will be ignored.\",\n stacklevel=2,\n )\n right = self.get_xlim()[1]\n\n elif left <= 0 and self._logx:\n warnings.warn(\n \"Attempting to set non-positive left xlim on a log-scaled axis.\\n\"\n \"Invalid limit will be ignored.\",\n stacklevel=2,\n )\n left = self.get_xlim()[0]\n\n if isinstance(self._frame, root.TH1F):\n self._frame.GetXaxis().SetLimits(left, right)\n else:\n self._frame.GetXaxis().SetRangeUser(left, right)\n\n self._pad.Modified() # Draw the updated axes\n\n return (left, right)",
"def reset_limits(self):\n self.autoscale = True\n self.pixels.autoscale()",
"def set_base_transforms(self):\n self._curr_xlim = self.axes.get_xlim()\n self._curr_ylim = self.axes.get_ylim()",
"def reset_limits(self) -> None:\n if self.userDefinedLimits:\n if self.userMinimum is None or self.userMaximum is None:\n return\n self.setMinimum(self.userMinimum)\n self.setMaximum(self.userMaximum)\n else:\n if self._lower_ctrl_limit is None or self._upper_ctrl_limit is None:\n return\n self.setMinimum(self._lower_ctrl_limit)\n self.setMaximum(self._upper_ctrl_limit)",
"def _use_data_bounds_changed_for_axes(self):\n self.update_pipeline()",
"def xlim(left=None, right=None):\n impl.xlim(**locals())",
"def set_lim(x, y, **kws):\n per = kws['per']\n min_per = 50 - per/2\n max_per = per/2 + 50\n xper = np.nanpercentile(x,[min_per,max_per])\n yper = np.nanpercentile(y,[min_per,max_per])\n ax = plt.gca()\n ax.set_xlim(xper)\n ax.set_ylim(yper)",
"def set_xlim(self, xlim):\n # x coordinate of center of leftmost pixel\n self.xmin = xlim[0]\n # x coordinate of center of rightmost pixel\n self.xmax = xlim[1]\n self.delta_x = (self.xmax-self.xmin)/float(self.cols-1)",
"def set_limits_minmax(self, zmin, zmax):\n self.camera.set_clim(zmin, zmax)\n self.autoscale = False"
]
| [
"0.6964966",
"0.69501424",
"0.69050765",
"0.6849012",
"0.6782971",
"0.6751624",
"0.6689119",
"0.66625834",
"0.6655498",
"0.64951956",
"0.64660126",
"0.64273226",
"0.63873005",
"0.63656276",
"0.62677586",
"0.62336606",
"0.6226322",
"0.62159514",
"0.6192549",
"0.61510056",
"0.612542",
"0.6085064",
"0.60431147",
"0.6034502",
"0.60136306",
"0.5969499",
"0.58837235",
"0.5879814",
"0.5877497",
"0.5854542"
]
| 0.8218228 | 0 |
Sets the axis2 limits | def set_axis2_limits(self, start, end):
if start > end:
raise ValueError("Start point over end for this view.")
self.axis2_limits = start, end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def adjust_ylimits(self, ylim1, ylim2):\n self.axplot.set_ylim(ylim1, ylim2)\n self.fig.canvas.draw()\n return",
"def setlimits(self, Xlim=[], Ylim=[]):\n self.data['Xmin'] = Xlim[0]\n self.data['Xmax'] = Xlim[1]\n self.data['Ymin'] = Ylim[0]\n self.data['Ymax'] = Ylim[1]",
"def set_axis1_limits(self, start, end):\n if start > end:\n raise ValueError(\"Start point over end for this view.\")\n\n self.axis1_limits = start, end",
"def _set_axes_limits(ax, parameter, axis=\"x\"):\n\n lims = list(ax.get_xlim()) if axis == \"x\" else list(ax.get_ylim())\n\n if \"low\" in DEFAULT_BOUNDS[parameter]:\n low = DEFAULT_BOUNDS[parameter][\"low\"]\n if lims[0] < low:\n lims[0] = DEFAULT_BOUNDS[parameter][\"low\"]\n if \"high\" in DEFAULT_BOUNDS[parameter]:\n high = DEFAULT_BOUNDS[parameter][\"high\"]\n if lims[1] > high:\n lims[1] = DEFAULT_BOUNDS[parameter][\"high\"]\n\n if axis == \"x\":\n ax.set_xlim(lims)\n else:\n ax.set_ylim(lims)",
"def set_axis_limits(*args):\n robots = get_robot_roots()\n if not robots:\n raise MimicError('Nothing Selected; Select a valid robot')\n return\n\n current_tab = pm.tabLayout('limits_tab_layout',\n query=True,\n selectTab=True)\n\n if current_tab == 'position_limits_tab':\n set_position_limits()\n elif current_tab == 'velocity_limits_tab':\n set_deriv_limits('Velocity')\n elif current_tab == 'accel_limits_tab':\n set_deriv_limits('Accel')\n elif current_tab == 'jerk_limits_tab':\n set_deriv_limits('Jerk')",
"def set_plot_limits(self) -> None:\n matplotlib.pyplot.xlim(0, self.imgsz[0])\n matplotlib.pyplot.ylim(self.imgsz[1], 0)",
"def update_limits(self):\n if len(self) == 0:\n self.limits = np.array([[0.0, 0.0], [0.0, 0.0]])\n else:\n x_min, x_max = self.buf[self.rear][0], self.buf[self.front][0]\n y_min, y_max = self.slmm.get_minmax()\n self.limits = np.array([[x_min, y_min], [x_max, y_max]])",
"def set_limits(xlim=None, ylim=None, ax=None):\n if ax is None:\n ax = plt.gca()\n if ylim is not None:\n ax.set_ylim(ylim)\n if xlim is not None:\n ax.set_xlim(xlim)",
"def _verticalLimit_changed(self):\n self.masterContainer.range2d.y_range.high = self.verticalLimit",
"def set_axis_limit(axis_number, min_max):\n robots = get_robot_roots()\n\n if not robots:\n raise MimicError('Nothing Selected; Select a valid robot')\n return\n\n try:\n val = float(pm.textField('t_A{}{}'.format(axis_number, min_max),\n query=True,\n text=True))\n robot_list_str = ''\n for robot in robots:\n pm.setAttr(get_target_ctrl_path(robot)\n + '.axis{}{}'.format(axis_number, min_max),\n val)\n robot_list_str += robot + ' '\n except:\n pass\n\n pm.headsUpMessage('Axis Position Limits for {} set successfuly!'.format(robot_list_str))",
"def py_apply_limits(self, plot):\n if any(x is not None for x in self.x_lim):\n if self.x_lim[0] is not None: # at least left?\n if self.x_lim[1] is not None: # left and right?\n plot.set_xlim(left=self.x_lim[0], right=self.x_lim[1])\n else:\n plot.set_xlim(left=self.x_lim[0])\n else: # just right\n plot.set_xlim(rigt=self.x_lim[1])\n if any(y is not None for y in self.y_lim):\n if self.y_lim[0] is not None: # at least bottom?\n if self.y_lim[1] is not None:\n plot.set_ylim(bottom=self.y_lim[0], top=self.y_lim[1])\n else:\n plot.set_ylim(bottom=self.y_lim[0])\n else:\n plot.set_ylim(top=self.y_lim[1])",
"def setY(ax1: Union[object, List], ax2: Union[object, List]):\n if type(ax1) is list:\n print(\"PlotHelpers: cannot use list as source to set Y axis\")\n return\n ax2 = _ax_tolist(ax2)\n # if type(ax2) is not list:\n # ax2 = [ax2]\n refy = ax1.get_ylim()\n for ax in ax2:\n ax.set_ylim(refy)",
"def SetYAxisRange(self, lower, upper):\n self.Y_AXIS_RANGE = (lower, upper)",
"def setX(ax1: Union[object, List], ax2: Union[object, List]):\n if type(ax1) is list:\n print(\"PlotHelpers: cannot use list as source to set Y axis\")\n return\n ax2 = _ax_tolist(ax2)\n # if type(ax2) is not list:\n # ax2 = [ax2]\n refx = ax1.get_xlim()\n for ax in ax2:\n ax.set_xlim(refx)",
"def set_lim(values, scale):\n\n v_min, v_max = min(values), max(values)\n margin = (v_max - v_min) * scale\n v_min, v_max = v_min - margin, v_max + margin\n\n return v_min, v_max",
"def ylim(self, bottom=None, top=None):\r\n for ax in self._subaxes:\r\n ax.set_ylim(bottom, top)\r\n self.figure.canvas.draw()",
"def set_lim(x, y, **kws):\n per = kws['per']\n min_per = 50 - per/2\n max_per = per/2 + 50\n xper = np.nanpercentile(x,[min_per,max_per])\n yper = np.nanpercentile(y,[min_per,max_per])\n ax = plt.gca()\n ax.set_xlim(xper)\n ax.set_ylim(yper)",
"def setXYLimit(self, xmin=None, xmax=None, ymin=None, ymax=None):\n self._myCanvas.axes.set_xlim([xmin, xmax])\n self._myCanvas.axes.set_ylim([ymin, ymax])\n\n self._myCanvas.draw()\n\n return",
"def _zoom(self, x0, y0, x1, y1):\n # Store current zoom state in stack\n self.plot.getLimitsHistory().push()\n\n extents = self._getAxesExtent(x0, y0, x1, y1)\n self.plot.setLimits(\n extents.xmin,\n extents.xmax,\n extents.ymin,\n extents.ymax,\n extents.y2min,\n extents.y2max,\n )",
"def ylim(bottom=None, top=None):\n impl.ylim(**locals())",
"def _update_limits(self):\n if self.pos_x > self.max_x:\n self.max_x = self.pos_x\n if self.pos_y > self.max_y:\n self.max_y = self.pos_y\n if self.pos_x < self.min_x:\n self.min_x = self.pos_x\n if self.pos_y < self.min_y:\n self.min_y = self.pos_y",
"def lim_and_line2(v1, v2, round_lims=True):\n\n vmin = np.min((v1.min(), v2.min()))\n vmax = np.max((v1.max(), v2.max()))\n\n if round_lims:\n vmin = np.floor(vmin)\n vmax = np.ceil(vmax)\n\n ax=pl.gca()\n pl.plot([vmin,vmax], [vmin,vmax], 'k:', linewidth=1)\n pl.plot([vmin,vmax], [0,0], 'k:', linewidth=1)\n pl.plot([0,0], [vmin,vmax], 'k:', linewidth=1)\n\n # Aarghh\n for i in range(2):\n ticks = ax.get_yticks()\n ax.set_xticks(ticks)\n\n ax.set_xlim(vmin, vmax)\n ax.set_ylim(vmin, vmax)",
"def set_ylim(self, bottom=None, top=None):\n if top is None and np.iterable(bottom):\n bottom, top = bottom\n\n if bottom is None or top is None:\n old_bottom, old_top = self.get_ylim()\n if bottom is None:\n bottom = old_bottom\n if top is None:\n top = old_top\n\n if bottom == top:\n warnings.warn(\n \"Attempting to set identical bottom == top == {} y-axis limits\".format(\n bottom\n ),\n stacklevel=2,\n )\n\n if bottom > top:\n raise ValueError(\"Axis limits must be in increasing order\")\n\n if top <= 0 and self._logy:\n warnings.warn(\n \"Attempting to set non-positive top ylim on a log-scaled axis.\\n\"\n \"Invalid limit will be ignored.\",\n stacklevel=2,\n )\n top = self.get_ylim()[1]\n\n elif bottom <= 0 and self._logy:\n warnings.warn(\n \"Attempting to set non-positive bottom ylim on a log-scaled axis.\\n\"\n \"Invalid limit will be ignored.\",\n stacklevel=2,\n )\n bottom = self.get_ylim()[0]\n\n if isinstance(self._frame, root.TH1F):\n self._frame.SetMinimum(bottom)\n self._frame.SetMaximum(top)\n else:\n self._frame.GetYaxis().SetRangeUser(bottom, top)\n\n self._pad.Modified() # Draw the updated axes\n\n return (bottom, top)",
"def initialize_axes(self):\r\n self.x_lim = np.array([self.vals[:, 0].min(), self.vals[:, 0].max()])\r\n self.y_lim = np.array([self.vals[:, 1].min(), self.vals[:, 1].max()])\r\n self.z_lim = np.array([self.vals[:, 2].min(), self.vals[:, 2].max()])",
"def adjust_axes(axis):\r\n x_lim = axis.get_xlim()\r\n y_lim = axis.get_ylim()\r\n new_lim = (min(x_lim[0], y_lim[0]), max(x_lim[1], y_lim[1]))\r\n axis.set_xlim(new_lim)\r\n axis.set_ylim(new_lim)\r\n axis.set_aspect('equal')",
"def apply_transforms(self):\n self.axes.set_xlim(self._curr_xlim)\n self.axes.set_ylim(self._curr_ylim)",
"def set_glidein_config_limits(self, limits_data):\n self.glidein_config_limits = limits_data",
"def __init__(self, axis1, axis2=None, bins=100, same_scale=False,\n axis1_values=None, axis2_values=None, **kwargs):\n self.same_scale = same_scale\n\n self.axis1 = axis1\n self.axis1_limits = None\n\n if isinstance(axis1_values, (float, int)):\n axis1_values = [axis1_values]\n self.axis1_values = axis1_values\n\n self.axis2 = axis2\n self.axis2_limits = None\n if isinstance(axis2_values, (float, int)):\n axis2_values = [axis2_values]\n self.axis2_values = axis2_values\n\n self.bins = bins\n\n self.plot_options = kwargs",
"def _set_plot_limits(self, dist):\n\n plt.xlim(self._Position.xPos - dist * 1000.0, self._Position.xPos + dist * 1000.0)\n plt.ylim(self._Position.zPos - dist * 1000.0, self._Position.zPos + dist * 1000.0)",
"def _use_data_bounds_changed_for_axes(self):\n self.update_pipeline()"
]
| [
"0.7679957",
"0.7137881",
"0.6968403",
"0.6968129",
"0.69276756",
"0.66304326",
"0.6625808",
"0.65875417",
"0.6522321",
"0.64404905",
"0.6419612",
"0.6401575",
"0.63996685",
"0.63885385",
"0.63736707",
"0.62931263",
"0.6214984",
"0.6184074",
"0.61167693",
"0.6107988",
"0.60540503",
"0.6053772",
"0.6039171",
"0.60296565",
"0.60074496",
"0.599238",
"0.59895927",
"0.5983104",
"0.59812194",
"0.59670174"
]
| 0.82910025 | 0 |
Creates a _Head for linear regression. | def _regression_head(label_name=None,
weight_column_name=None,
label_dimension=1,
enable_centered_bias=False,
head_name=None):
return _RegressionHead(
label_name=label_name,
weight_column_name=weight_column_name,
label_dimension=label_dimension,
enable_centered_bias=enable_centered_bias,
head_name=head_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self,\n label_name,\n weight_column_name,\n label_dimension,\n enable_centered_bias,\n head_name,\n loss_fn=_mean_squared_loss):\n super(_RegressionHead, self).__init__(head_name=head_name)\n\n self._loss_fn = loss_fn\n self._logits_dimension = label_dimension\n self._label_name = label_name\n self._weight_column_name = weight_column_name\n self._enable_centered_bias = enable_centered_bias\n self._problem_type = constants.ProblemType.LINEAR_REGRESSION",
"def __make_header__(self):\n header = lashead.Header(point_format=0)\n return header",
"def create_head(self, head_L=0.5, head_dia=0.5, Ra=150.0, Cm=1.0):\n \n sec_name = 'spine_%d_head' % (self.id)\n head = h.Section(name=sec_name)\n \n head.nseg = 1\n head.L = head_L\n head.diam = head_dia\n head.Ra = Ra\n head.cm = 1.0\n \n for mech in [ 'pas', \\\n 'kir', \\\n 'cav32', \\\n 'cav33', \\\n 'car', \\\n 'cal12', \\\n 'cal13', \\\n 'cadyn', \\\n 'caldyn' ]:\n head.insert(mech)\n \n head(0.5).pbar_cav32 = 1e-7\n head(0.5).pbar_cav33 = 1e-8\n head(0.5).pbar_car = 1e-8\n head(0.5).pbar_cal12 = 1e-7\n head(0.5).pbar_cal13 = 1e-8\n head(0.5).gbar_kir = 1e-7\n \n head.g_pas = 1.25e-5\n head.e_pas = -70 \n \n return head",
"def build_head(self, n_features, device=None):\n # By default this is a linear layer\n self.head = self.create_compatible_head(n_features, device)",
"def create_compatible_head(\n self,\n n_features: int,\n device: Optional[str] = None,\n ):\n head = nn.Linear(n_features, self.n_classes)\n xavier_initialize(head)\n if device is not None:\n head = head.to(device)\n return head",
"def generateHead(self, headType):\n # load the multi-head models\n filePrefix, phase = ModelDict[self.style.body]\n headModel = loader.loadModel(\"phase_\" + str(phase) + filePrefix + \"heads\")\n\n # search for the appropriate parts\n headReferences = headModel.findAllMatches(\"**/\" + headType)\n for i in range(0, headReferences.getNumPaths()):\n headPart = self.instance(headReferences.getPath(i), \"modelRoot\",\n \"joint_head\")\n # set head texture if necessary\n if self.headTexture:\n headTex = loader.loadTexture(\"phase_\" + str(phase) + \"/maps/\" +\n self.headTexture)\n headTex.setMinfilter(Texture.FTLinearMipmapLinear)\n headTex.setMagfilter(Texture.FTLinear) \n headPart.setTexture(headTex, 1)\n\n # set head color if necessary\n if self.headColor:\n headPart.setColor(self.headColor)\n self.headParts.append(headPart)\n\n # Now remove the extra instance that was created in the\n # loadModelOnce call; we don't need it anymore now that we've\n # copied everything out.\n headModel.removeNode()",
"def createHead(cls, difficulty):\n self = cls(difficulty)\n self.height = 0\n self.prevHash = \"0ff\"\n return self",
"def Linear(X, data_headers):\n X[data_headers] = pd.DataFrame(np.power(2, X[data_headers]))\n return X",
"def add_head(self):\n self.scenes[self.current_scene].add_object(Head())\n self.redraw()",
"def head(self) -> tf.estimator.Head:\n\n task_type = self._problem_statement.tasks[0].type\n if task_type.HasField('one_dimensional_regression'):\n return tf.estimator.RegressionHead()\n num_classes = (\n self._tf_transform_output.num_buckets_for_transformed_feature(\n self.raw_label_key))\n if task_type.HasField('multi_class_classification'):\n return tf.estimator.MultiClassHead(num_classes)\n if task_type.HasField('binary_classification'):\n return tf.estimator.BinaryClassHead()\n raise ValueError('Invalid task type: {}'.format(task_type))",
"def transformerXLLMHeadModel(*args, **kwargs):\n model = TransfoXLLMHeadModel.from_pretrained(*args, **kwargs)\n return model",
"def change_model_head(model, custom_head, **kwargs):\n model.head = custom_head(model.head_nf, model.c_out, model.seq_len, **kwargs)\n return model",
"def nnRegression(data):",
"def linear(self, X):\n return X",
"def make_head_line():\n with open(args.out_folder.strip() + \"/files/head_line.txt\", \"a\") as headLine:\n headLine.write(\"#Query ID\\t#Subject\\t#Subject accession\\t#Subject Taxonomy ID\\t#Identity percentage\\t#Coverage\\t#evalue\\t#bitscore\\n\")",
"def __init__(self, heads, loss_combiner):\n # TODO(zakaria): Keep _Head a pure interface.\n super(_MultiHead, self).__init__(head_name=None)\n self._logits_dimension = 0\n for head in heads:\n if not head.head_name:\n raise ValueError(\"Head must have a name.\")\n self._logits_dimension += head.logits_dimension\n\n self._heads = heads\n self._loss_combiner = loss_combiner",
"def __init__(self, add_intercept=True):\n self.add_intercept = add_intercept\n self.beta_hat = None\n return",
"def __init__(self, coefficients, degree=1, cross=False, reference_simulations=None, previous_statistics=None):\n self.coefficients = coefficients\n\n super(LinearTransformation, self).__init__(degree, cross, reference_simulations, previous_statistics)",
"def linear_regression(self):\n # Create dummies of the Types\n\n\n self.truth = pd.read_csv(\"AllTruth/AllTruth.csv\")\n self.false = pd.read_csv(\"AllLie/AllLie.csv\")\n\n self.truth[\"Type\"] = \"T\"\n self.false[\"Type\"] = \"F\"\n\n full = self.truth.append([self.false])\n\n\n\n encoder = LabelEncoder()\n encoder.fit(full[\"Type\"])\n encoded_Y = encoder.transform(full[\"Type\"])\n # convert integers to dummy variables (i.e. one hot encoded)\n dummy_y = np_utils.to_categorical(encoded_Y)\n\n y_data = full[\"Type\"]\n for label in ['Type', 'Unnamed: 0.1', 'Unnamed: 0', 'Unnamed: 0.1.1', 'Unnamed: 0.1.1.1']:\n if label in full.columns.values:\n full = full.drop([label], axis=1)\n #full = full.drop(columns=['136', '137', '138', '139', '140', '141', '142'])\n\n print(\"Nan = \", full.isnull().any())\n\n \"\"\"removeList = full.columns.values\n print(removeList)\n full = createCouples(full)\n full = full.drop(columns = removeList)\"\"\"\n\n self.labels = full.columns.values\n # Create train & test data\n #this.X_train, this.X_test, this.y_train, this.y_test = #train_test_split(np.array(full), np.array(y_data))\n X_train, y_train = full, dummy_y\n\n print(X_train, \"\\n\", y_train)\n\n # Learn the data and check for success\n #self.learner = NetLearner(\"test2\", 137)\n self.learner = LinearLearner()\n self.learner.learn(np.array(X_train), np.array(y_train))\n\n print(full, \"\\n\\nLearning Done.\")\n\n X_test_truth = pd.read_csv(\"TruthTest/TruthTest.csv\")\n X_test_lie = pd.read_csv(\"LieTest/LieTest.csv\")\n\n X_test_truth[\"Type\"] = \"T\"\n X_test_lie[\"Type\"] = \"F\"\n\n full_test = X_test_truth.append([X_test_lie])\n\n\n encoder = LabelEncoder()\n encoder.fit(full_test[\"Type\"])\n encoded_Y = encoder.transform(full_test[\"Type\"])\n # convert integers to dummy variables (i.e. one hot encoded)\n dummy_y_test = np_utils.to_categorical(encoded_Y)\n\n y_test = dummy_y_test#full_test[\"Type\"]\n for label in ['Type', 'Unnamed: 0.1', 'Unnamed: 0', 'Unnamed: 0.1.1', 'Unnamed: 0.1.1.1']:\n if label in full_test.columns.values:\n full_test = full_test.drop([label], axis=1)\n\n # self.learner.print_accuracy(full_test, dummy_y_test)\n\n y_test_predict = pd.DataFrame(self.learner.predict(full_test)).idxmax(axis=1)\n y_test = pd.DataFrame(y_test).idxmax(axis=1)\n #print(y_test, y_test_predict)\n result = np.array(y_test) - np.array(y_test_predict)\n\n total = len(full_test)\n curr = total - np.count_nonzero(result)\n\n # Print the success rate\n print(\"Total success rate: %.2f%%\" % (curr/total * 100))\n\n importance0 = pd.DataFrame(np.abs(self.learner.lm.coef_[0]),\n index = full_test.columns,\n columns=['importance']).sort_values('importance', ascending=False)\n\n importance1 = pd.DataFrame(np.abs(self.learner.lm.coef_[1]),\n index = full_test.columns,\n columns=['importance']).sort_values('importance', ascending=False)\n\n\n importance0 = np.array(importance0) / 2\n importance1 = np.array(importance1) / 2\n print(importance0, '\\n', importance1)\n with open(\"importance.txt\", \"w\") as file:\n file.write(\"///////Importance 0///////\\n\")\n file.write(str(importance0))\n file.write(\"\\n\\n///////Importance 1///////\\n\")\n file.write(str(importance1))\n \"\"\"\n\n # Merge PL+NL and PT+NT\n def group(n):\n if n in [0, 1]:\n return 0\n return 1\n\n # Get the predicted output for X_test and compare to\n # the expected output\n y_test_predict = pd.DataFrame(self.learner.predict(X_test)).idxmax(axis=1).apply(group)\n y_test = pd.DataFrame(y_test).idxmax(axis=1).apply(group)\n\n # Count number of failures\n result = np.array(y_test) - np.array(y_test_predict)\n\n total = len(X_test)\n curr = total - np.count_nonzero(result)\n\n # Print the success rate\n print(\"Total success rate: %.2f%%\" % (curr/total * 100))\n \"\"\"",
"def __init__(self, data_table, answers):\n\t\tBasicRegression.__init__(self,data_table,answers)\n\t\tself.add_intercept()",
"def mtr_lm_v1_h1_8():\n hparams = mtr_lm_v1()\n hparams.num_memory_heads = 1\n return hparams",
"def linearize(self, params, unknowns, resids):\n\n m = self.slope\n J = {}\n\n J['y', 'x'] = m\n return J",
"def __init__(self):\n self.slope = -1.0\n self.last_obs = -1.0\n self.last_obs_ind = -1\n self._fitted = False",
"def head(self) -> ComponentTableHead:\n return ComponentTableHead(\n self.wait_for_elements_by_tag_name('tr')[0])",
"def linear_regression(x_train, t_train, basis, bias,reg_lambda=0, degree=1, mu=0, s=1):\n \n # Construct the design matrix.\n # Pass the required parameters to this function\n \n phi = design_matrix(x_train,basis,degree,bias,mu,s) \n #print(x_train.shape) \n # Learning Coefficients\n if reg_lambda > 0:\n I=np.identity((phi.shape[1]),dtype=int)\n inv = np.linalg.inv((reg_lambda*I)+(phi.T@phi))\n w = inv@(phi.T@t_train) \n # regularized regression\n else:\n # no regularization \n w = np.linalg.pinv(phi)@t_train\n \n pred_train=phi@w\n train_err = np.sqrt((np.square(pred_train-t_train)).mean())\n return (w, train_err)",
"def linearReg(x,y):\n X=np.array(x).reshape(-1,1)\n Y=np.array(y).reshape(-1,1)\n x_shape = X.shape\n num_var = x_shape[1] \n yintercept = 0\n slope = 0\n progress = []\n #intialize the parameter\n weight_matrix = np.random.normal(-1,1,(num_var,1))\n yintercept = np.random.rand(1)\n #cost minmization\n for i in range(200):\n dcostdm = np.sum(np.multiply(((np.matmul(X,weight_matrix)+ yintercept)-Y),X))*2/x_shape[0] #w.r.t to the weight\n dcostdc = np.sum(((np.matmul(X,weight_matrix)+yintercept)-Y))*2/x_shape[0] #partial derivative of cost w.r.t the intercept\n weight_matrix -= 0.1*dcostdm \n #updating the weights with the calculated gradients\n yintercept -= 0.1*dcostdc #updating the weights with the calculated gradients\n progress.append(np.array((weight_matrix,yintercept)))\n slope = weight_matrix\n return (slope[-1],yintercept)",
"def __init__(self):\n\t\tself.theta = 0.8\t\t\t# Theta value, the constant of the line which x+y is.(1.2 is best)\n\t\tself.numberOfInput = 0\t\t# The number of Input\n\t\tself.weight = []\t\t\t# The list of weight.",
"def __init__(self, x_function, x_derivative, data_f, data_df, a):\n self.x_function = x_function\n self.x_derivative = x_derivative\n self.data_f = data_f\n self.data_df = data_df\n self.a = a\n self.linear_model = LinearModel(self.x_function, self.x_derivative)",
"def __init__(self, head=None):\n\n self.head = head",
"def lemma_headwords(self):\n new_var = 'lemma_headword'\n lemma_heads = [clx._lemmas[i]['Head'] for i in xrange(len(clx._lemmas))]\n has_item = self.compare_items(lemma_heads)\n new_column = []\n if False in has_item:\n self._warning_msg('lemma_headword', lemma_heads)\n for record, exists in zip(self._dict, has_item):\n if exists:\n lemma_id = clx.wordform_lookup(record)[0].IdNumLemma\n lemma_head = clx.lemma_by_id(lemma_id).Head\n else:\n lemma_head = None\n new_column.append(lemma_head)\n self._append_column(new_column, new_var)"
]
| [
"0.684842",
"0.63585603",
"0.63347226",
"0.61810565",
"0.5857701",
"0.5773445",
"0.5767058",
"0.57526094",
"0.5734294",
"0.57188934",
"0.5590229",
"0.55242336",
"0.5514718",
"0.54834616",
"0.5425716",
"0.5410449",
"0.5330436",
"0.52927476",
"0.52814287",
"0.5274359",
"0.5250361",
"0.52455467",
"0.52321714",
"0.52293515",
"0.5228125",
"0.52247226",
"0.5223736",
"0.52071",
"0.51895493",
"0.518706"
]
| 0.7615701 | 0 |
Creates a _Head for multi class single label classification. The Head uses softmax cross entropy loss. | def _multi_class_head(n_classes,
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None,
thresholds=None,
metric_class_ids=None):
if (n_classes is None) or (n_classes < 2):
raise ValueError("n_classes must be > 1 for classification: %s." %
n_classes)
if n_classes == 2:
if metric_class_ids:
raise ValueError("metric_class_ids invalid for n_classes==2.")
return _BinaryLogisticHead(
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
thresholds=thresholds)
return _MultiClassHead(
n_classes=n_classes,
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
thresholds=thresholds,
metric_class_ids=metric_class_ids) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _multi_label_head(n_classes,\n label_name=None,\n weight_column_name=None,\n enable_centered_bias=False,\n head_name=None,\n thresholds=None,\n metric_class_ids=None):\n if n_classes < 2:\n raise ValueError(\"n_classes must be > 1 for classification.\")\n return _MultiLabelHead(\n n_classes=n_classes,\n label_name=label_name,\n weight_column_name=weight_column_name,\n enable_centered_bias=enable_centered_bias,\n head_name=head_name,\n thresholds=thresholds,\n metric_class_ids=metric_class_ids)",
"def __init__(self, heads, loss_combiner):\n # TODO(zakaria): Keep _Head a pure interface.\n super(_MultiHead, self).__init__(head_name=None)\n self._logits_dimension = 0\n for head in heads:\n if not head.head_name:\n raise ValueError(\"Head must have a name.\")\n self._logits_dimension += head.logits_dimension\n\n self._heads = heads\n self._loss_combiner = loss_combiner",
"def register_classification_head(self, name, num_classes=None, inner_dim=None, **kwargs):\n if name in self.classification_heads:\n prev_num_classes = self.classification_heads[name].out_proj.out_features\n prev_inner_dim = self.classification_heads[name].dense.out_features\n if num_classes != prev_num_classes or inner_dim != prev_inner_dim:\n logger.warning(\n 're-registering head \"{}\" with num_classes {} (prev: {}) '\n 'and inner_dim {} (prev: {})'.format(\n name, num_classes, prev_num_classes, inner_dim, prev_inner_dim\n )\n )\n self.classification_heads[name] = HuggingFaceBertClassificationHead(\n self.args.embed_dim, # self.args.encoder_embed_dim,\n inner_dim or self.args.embed_dim,\n num_classes,\n self.args.pooler_activation_fn,\n self.args.pooler_dropout,\n self.args.quant_noise_pq,\n self.args.quant_noise_pq_block_size,\n )",
"def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False),\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n # NOTE: This batchnorm was omitted in my earlier implementation due to a typo.\n # Commenting it out for consistency with the experiments in the paper.\n # nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)",
"def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False),\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n # NOTE: This batchnorm was omitted in my earlier implementation due to a typo.\n # Commenting it out for consistency with the experiments in the paper.\n # nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)",
"def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False),\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n # NOTE: This batchnorm was omitted in my earlier implementation due to a typo.\n # Commenting it out for consistency with the experiments in the paper.\n # nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)",
"def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False), nn.Conv2d(C, 128, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, 2, bias=False), nn.ReLU(inplace=True))\n self.classifier = nn.Linear(768, num_classes)",
"def head(self) -> tf.estimator.Head:\n\n task_type = self._problem_statement.tasks[0].type\n if task_type.HasField('one_dimensional_regression'):\n return tf.estimator.RegressionHead()\n num_classes = (\n self._tf_transform_output.num_buckets_for_transformed_feature(\n self.raw_label_key))\n if task_type.HasField('multi_class_classification'):\n return tf.estimator.MultiClassHead(num_classes)\n if task_type.HasField('binary_classification'):\n return tf.estimator.BinaryClassHead()\n raise ValueError('Invalid task type: {}'.format(task_type))",
"def __init__(self,\n label_name,\n weight_column_name,\n enable_centered_bias,\n head_name,\n loss_fn=_log_loss_with_two_classes,\n thresholds=None):\n super(_BinaryLogisticHead, self).__init__(head_name=head_name)\n self._thresholds = thresholds if thresholds else (.5,)\n self._label_name = label_name\n self._weight_column_name = weight_column_name\n self._loss_fn = loss_fn\n self._enable_centered_bias = enable_centered_bias\n self._problem_type = constants.ProblemType.LOGISTIC_REGRESSION",
"def build_head(self, n_features, device=None):\n # By default this is a linear layer\n self.head = self.create_compatible_head(n_features, device)",
"def __init__(self, channels, num_classes):\n super(AuxiliaryHead, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n # image size = 2 x 2\n nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False),\n nn.Conv2d(channels, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n nn.BatchNorm2d(768),\n nn.ReLU(inplace=True),\n )\n self.classifier = nn.Linear(768, num_classes)",
"def generateHead(self, headType):\n # load the multi-head models\n filePrefix, phase = ModelDict[self.style.body]\n headModel = loader.loadModel(\"phase_\" + str(phase) + filePrefix + \"heads\")\n\n # search for the appropriate parts\n headReferences = headModel.findAllMatches(\"**/\" + headType)\n for i in range(0, headReferences.getNumPaths()):\n headPart = self.instance(headReferences.getPath(i), \"modelRoot\",\n \"joint_head\")\n # set head texture if necessary\n if self.headTexture:\n headTex = loader.loadTexture(\"phase_\" + str(phase) + \"/maps/\" +\n self.headTexture)\n headTex.setMinfilter(Texture.FTLinearMipmapLinear)\n headTex.setMagfilter(Texture.FTLinear) \n headPart.setTexture(headTex, 1)\n\n # set head color if necessary\n if self.headColor:\n headPart.setColor(self.headColor)\n self.headParts.append(headPart)\n\n # Now remove the extra instance that was created in the\n # loadModelOnce call; we don't need it anymore now that we've\n # copied everything out.\n headModel.removeNode()",
"def __init__(self, nheads, d_model):\n super(MultiheadAttention, self).__init__()\n assert d_model % nheads == 0\n self.d_head = d_model // nheads\n self.nheads = nheads\n self.Q_fc = nn.Linear(d_model, d_model, bias=False)\n self.K_fc = nn.Linear(d_model, d_model, bias=False)\n self.V_fc = nn.Linear(d_model, d_model, bias=False)\n self.output_fc = nn.Linear(d_model, d_model, bias=False)\n self.attn = None",
"def _multi_head(heads, loss_weights=None):\n if loss_weights:\n if len(loss_weights) != len(heads):\n raise ValueError(\"heads and loss_weights must have same size\")\n\n def _weighted_loss_combiner(losses):\n if loss_weights:\n if len(losses) != len(loss_weights):\n raise ValueError(\"losses and loss_weights must have same size\")\n weighted_losses = []\n for loss, weight in zip(losses, loss_weights):\n weighted_losses.append(math_ops.multiply(loss, weight))\n return math_ops.add_n(weighted_losses)\n else:\n return math_ops.add_n(losses)\n\n return _MultiHead(heads, loss_combiner=_weighted_loss_combiner)",
"def create_heads(x, upsample_feature, num_classes, hg_id, num_channels):\n hg_name = 'hg' + str(hg_id)\n\n head = Conv2D(num_channels, kernel_size=(1, 1), activation='relu', padding='same', name=hg_name+'_conv_1x1_1')(upsample_feature)\n head = BatchNormalization()(head)\n\n # for head as intermediate supervision, use 'linear' as activation.\n head_predict = Conv2D(num_classes, kernel_size=(1, 1), activation='linear', padding='same',\n name=hg_name+'_conv_1x1_predict')(head)\n\n # use linear activation\n head = Conv2D(num_channels, kernel_size=(1, 1), activation='linear', padding='same',\n name=hg_name+'_conv_1x1_2')(head)\n head_m = Conv2D(num_channels, kernel_size=(1, 1), activation='linear', padding='same',\n name=hg_name+'_conv_1x1_3')(head_predict)\n\n # merge heads for next stage\n head_next_stage = Add()([head, head_m, x])\n\n return head_next_stage, head_predict",
"def __init__(self, dense_weight=1.0, cls_weight = 1.0, mixup_active=True, smoothing=0.1,\n classes = 1000):\n super(RelabelPooledCrossEntropy, self).__init__()\n\n\n self.CE = SoftTargetCrossEntropy()\n\n self.dense_weight = dense_weight\n self.smoothing = smoothing\n self.mixup_active = mixup_active\n self.classes = classes\n self.cls_weight = cls_weight\n assert dense_weight+cls_weight>0",
"def multi_label_cls_head__post_process(ctx, self, pred, **kwargs):\n return pred",
"def __init__(self, C, num_classes):\n super(AuxiliaryHeadCIFAR, self).__init__()\n self.features = nn.Sequential(nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False), nn.Conv2d(C, 128, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, 2, bias=False), nn.BatchNorm2d(768), nn.ReLU(inplace=True))\n self.classifier = nn.Linear(768, num_classes)",
"def __init__(self, C, num_classes):\n super(AuxiliaryHeadCIFAR, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False), # image size = 2 x 2\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)",
"def yolo_head(features, anchors, num_classes, input_shape, calc_loss=False):\n\n if calc_loss:\n return yolo_head_base(features, anchors, num_classes, input_shape)\n else:\n return yolo_head_sigmoid(features, anchors, num_classes, input_shape)",
"def class2onehot(class_labels, seq_len, batchsize, num_task):\n\n\n one_hot = torch.FloatTensor(batchsize,seq_len,num_task)\n one_hot.zero_()\n one_hot = one_hot.scatter_(1, seq_len,class_labels, 1)\n\n return one_hot",
"def __init__(self,\n label_name,\n weight_column_name,\n label_dimension,\n enable_centered_bias,\n head_name,\n loss_fn=_mean_squared_loss):\n super(_RegressionHead, self).__init__(head_name=head_name)\n\n self._loss_fn = loss_fn\n self._logits_dimension = label_dimension\n self._label_name = label_name\n self._weight_column_name = weight_column_name\n self._enable_centered_bias = enable_centered_bias\n self._problem_type = constants.ProblemType.LINEAR_REGRESSION",
"def _binary_svm_head(\n label_name=None,\n weight_column_name=None,\n enable_centered_bias=False,\n head_name=None,\n thresholds=None,):\n return _BinarySvmHead(\n label_name=label_name,\n weight_column_name=weight_column_name,\n enable_centered_bias=enable_centered_bias,\n head_name=head_name,\n thresholds=thresholds)",
"def __init__(self, dense_weight=1.0, cls_weight = 1.0, mixup_active=True, smoothing=0.1,\n classes = 1000, n_comn=2):\n super(RelabelCrossEntropy, self).__init__()\n\n\n self.CE = SoftTargetCrossEntropy()\n\n self.n_comn = n_comn\n self.dis_fn = nn.CosineSimilarity(dim=1)\n\n self.dense_weight = dense_weight\n self.smoothing = smoothing\n self.mixup_active = mixup_active\n self.classes = classes\n self.cls_weight = cls_weight\n assert dense_weight+cls_weight>0",
"def to_one_hot(labels, num_classes):\n shape = labels.size()\n shape = shape + (num_classes,)\n one_hot = torch.FloatTensor(shape)\n one_hot.zero_()\n dim = 1 if len(shape) == 2 else 2\n one_hot.scatter_(dim, labels.unsqueeze(-1), 1)\n return one_hot",
"def _onehot_labels(self,\n labels,\n n_classes,\n axis=-1):\n onehot_labels = tf.one_hot(\n tf.squeeze(labels),\n depth=n_classes,\n axis=axis)\n return onehot_labels",
"def test_shape_of_classification_heads(batch_size: int, hidden_size: int, num_class: int):\n classification_head = ClassificationHead(num_class, False, 0.0)\n\n pooled_output = tf.random.uniform((batch_size, hidden_size))\n outputs = classification_head(pooled_output)\n\n assert outputs.shape == (batch_size, num_class)",
"def _regression_head(label_name=None,\n weight_column_name=None,\n label_dimension=1,\n enable_centered_bias=False,\n head_name=None):\n return _RegressionHead(\n label_name=label_name,\n weight_column_name=weight_column_name,\n label_dimension=label_dimension,\n enable_centered_bias=enable_centered_bias,\n head_name=head_name)",
"def to_onehot(labels: torch.Tensor, num_classes: int) -> torch.Tensor:\n if len(labels.size()) == 1:\n return F.one_hot(labels, num_classes).float()\n return labels",
"def __init__(self, label_size=10):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 16, 5, padding=2)\n self.drop1 = nn.Dropout2d(p=0.3)\n self.pool1 = nn.MaxPool2d(2)\n self.conv2 = nn.Conv2d(16, 32, 5, padding=2)\n self.drop2 = nn.Dropout2d(p=0.3)\n self.pool2 = nn.MaxPool2d(2)\n self.head_linear = nn.Linear(1568, label_size)"
]
| [
"0.7676755",
"0.6761584",
"0.66569084",
"0.6371564",
"0.6371564",
"0.6371564",
"0.6289213",
"0.6183466",
"0.6155606",
"0.61344504",
"0.59884363",
"0.5926488",
"0.58939695",
"0.5892835",
"0.5892204",
"0.58319116",
"0.58294505",
"0.58045626",
"0.5804537",
"0.58013827",
"0.5789348",
"0.5782409",
"0.57642055",
"0.56709486",
"0.5666635",
"0.5664133",
"0.5645804",
"0.56385314",
"0.56224585",
"0.56126374"
]
| 0.7576061 | 1 |
Creates a `_Head` for binary classification with SVMs. The head uses binary hinge loss. | def _binary_svm_head(
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None,
thresholds=None,):
return _BinarySvmHead(
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
thresholds=thresholds) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self,\n label_name,\n weight_column_name,\n enable_centered_bias,\n head_name,\n loss_fn=_log_loss_with_two_classes,\n thresholds=None):\n super(_BinaryLogisticHead, self).__init__(head_name=head_name)\n self._thresholds = thresholds if thresholds else (.5,)\n self._label_name = label_name\n self._weight_column_name = weight_column_name\n self._loss_fn = loss_fn\n self._enable_centered_bias = enable_centered_bias\n self._problem_type = constants.ProblemType.LOGISTIC_REGRESSION",
"def __init__(self,training_data,default_kernel=\"rbf\"):\n my_data = genfromtxt(training_data, delimiter='\\t',skip_header=0)\n n_col = my_data.shape[1]\n n_features=n_col-1 #assuming that the latest column\n #contains the the outputs \n #preprocessing data\n X = preprocessing.scale(np.hsplit(my_data,[n_features,n_col])[0])\n Y = np.squeeze(np.asarray(np.hsplit(my_data,[n_features,n_col])[1]))\n #defining scaling\n self.scaler = preprocessing.Scaler()\n self.scaler.fit(np.hsplit(my_data,[n_features,n_col])[0])\n #define classifier\n self.classifier = svm.SVC(class_weight='auto',cache_size=DEFAULT_CACHE_SIZE, kernel=default_kernel)\n self.classifier.fit(X, Y)",
"def head(self) -> tf.estimator.Head:\n\n task_type = self._problem_statement.tasks[0].type\n if task_type.HasField('one_dimensional_regression'):\n return tf.estimator.RegressionHead()\n num_classes = (\n self._tf_transform_output.num_buckets_for_transformed_feature(\n self.raw_label_key))\n if task_type.HasField('multi_class_classification'):\n return tf.estimator.MultiClassHead(num_classes)\n if task_type.HasField('binary_classification'):\n return tf.estimator.BinaryClassHead()\n raise ValueError('Invalid task type: {}'.format(task_type))",
"def _regression_head(label_name=None,\n weight_column_name=None,\n label_dimension=1,\n enable_centered_bias=False,\n head_name=None):\n return _RegressionHead(\n label_name=label_name,\n weight_column_name=weight_column_name,\n label_dimension=label_dimension,\n enable_centered_bias=enable_centered_bias,\n head_name=head_name)",
"def register_classification_head(self, name, num_classes=None, inner_dim=None, **kwargs):\n if name in self.classification_heads:\n prev_num_classes = self.classification_heads[name].out_proj.out_features\n prev_inner_dim = self.classification_heads[name].dense.out_features\n if num_classes != prev_num_classes or inner_dim != prev_inner_dim:\n logger.warning(\n 're-registering head \"{}\" with num_classes {} (prev: {}) '\n 'and inner_dim {} (prev: {})'.format(\n name, num_classes, prev_num_classes, inner_dim, prev_inner_dim\n )\n )\n self.classification_heads[name] = HuggingFaceBertClassificationHead(\n self.args.embed_dim, # self.args.encoder_embed_dim,\n inner_dim or self.args.embed_dim,\n num_classes,\n self.args.pooler_activation_fn,\n self.args.pooler_dropout,\n self.args.quant_noise_pq,\n self.args.quant_noise_pq_block_size,\n )",
"def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False),\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n # NOTE: This batchnorm was omitted in my earlier implementation due to a typo.\n # Commenting it out for consistency with the experiments in the paper.\n # nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)",
"def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False),\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n # NOTE: This batchnorm was omitted in my earlier implementation due to a typo.\n # Commenting it out for consistency with the experiments in the paper.\n # nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)",
"def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False),\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n # NOTE: This batchnorm was omitted in my earlier implementation due to a typo.\n # Commenting it out for consistency with the experiments in the paper.\n # nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)",
"def __init__(self,\n label_name,\n weight_column_name,\n label_dimension,\n enable_centered_bias,\n head_name,\n loss_fn=_mean_squared_loss):\n super(_RegressionHead, self).__init__(head_name=head_name)\n\n self._loss_fn = loss_fn\n self._logits_dimension = label_dimension\n self._label_name = label_name\n self._weight_column_name = weight_column_name\n self._enable_centered_bias = enable_centered_bias\n self._problem_type = constants.ProblemType.LINEAR_REGRESSION",
"def SVM():\n x1, x2 = generate_training_data_2D()\n Y = np.concatenate([np.zeros(x1.shape[0], dtype=np.int32),\n np.ones(x2.shape[0], dtype=np.int32)])\n X = np.concatenate([x1, x2], axis=0)\n rng = np.random.get_state()\n np.random.shuffle(X)\n # Set the random state back to previous to shuffle X & Y similarly\n np.random.set_state(rng)\n np.random.shuffle(Y)\n\n models, titles = get_fitted_svm(X, Y)\n\n plot_decision_boundary(X, Y, models, titles)",
"def _multi_class_head(n_classes,\n label_name=None,\n weight_column_name=None,\n enable_centered_bias=False,\n head_name=None,\n thresholds=None,\n metric_class_ids=None):\n if (n_classes is None) or (n_classes < 2):\n raise ValueError(\"n_classes must be > 1 for classification: %s.\" %\n n_classes)\n\n if n_classes == 2:\n if metric_class_ids:\n raise ValueError(\"metric_class_ids invalid for n_classes==2.\")\n return _BinaryLogisticHead(\n label_name=label_name,\n weight_column_name=weight_column_name,\n enable_centered_bias=enable_centered_bias,\n head_name=head_name,\n thresholds=thresholds)\n\n return _MultiClassHead(\n n_classes=n_classes,\n label_name=label_name,\n weight_column_name=weight_column_name,\n enable_centered_bias=enable_centered_bias,\n head_name=head_name,\n thresholds=thresholds,\n metric_class_ids=metric_class_ids)",
"def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False), nn.Conv2d(C, 128, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, 2, bias=False), nn.ReLU(inplace=True))\n self.classifier = nn.Linear(768, num_classes)",
"def svm():",
"def __init__(self,training_data):\n my_data = genfromtxt(training_data, delimiter='\\t',skip_header=0)\n n_col = my_data.shape[1]\n n_features=n_col-1 #assuming that the latest column\n #contains the the outputs \n #preprocessing data\n X = preprocessing.scale(np.hsplit(my_data,[n_features,n_col])[0])\n Y = np.squeeze(np.asarray(np.hsplit(my_data,[n_features,n_col])[1]))\n #defining scaling\n self.scaler = preprocessing.Scaler()\n self.scaler.fit(np.hsplit(my_data,[n_features,n_col])[0])\n #define classifier\n self.classifier = svm.LinearSVC(class_weight='auto',C=1.0)\n self.classifier.fit(X, Y)",
"def create_heads(x, upsample_feature, num_classes, hg_id, num_channels):\n hg_name = 'hg' + str(hg_id)\n\n head = Conv2D(num_channels, kernel_size=(1, 1), activation='relu', padding='same', name=hg_name+'_conv_1x1_1')(upsample_feature)\n head = BatchNormalization()(head)\n\n # for head as intermediate supervision, use 'linear' as activation.\n head_predict = Conv2D(num_classes, kernel_size=(1, 1), activation='linear', padding='same',\n name=hg_name+'_conv_1x1_predict')(head)\n\n # use linear activation\n head = Conv2D(num_channels, kernel_size=(1, 1), activation='linear', padding='same',\n name=hg_name+'_conv_1x1_2')(head)\n head_m = Conv2D(num_channels, kernel_size=(1, 1), activation='linear', padding='same',\n name=hg_name+'_conv_1x1_3')(head_predict)\n\n # merge heads for next stage\n head_next_stage = Add()([head, head_m, x])\n\n return head_next_stage, head_predict",
"def yolo_head_sigmoid(features, anchors, num_classes, input_shape):\n\n _, features, box_xy, box_wh = yolo_head_base(features, anchors, num_classes, input_shape)\n\n box_confidence = K.sigmoid(features[..., 4:5])\n box_class_probabilities = K.sigmoid(features[..., 5:])\n\n return box_xy, box_wh, box_confidence, box_class_probabilities",
"def build_head(self, n_features, device=None):\n # By default this is a linear layer\n self.head = self.create_compatible_head(n_features, device)",
"def predictSVM(w, x):\n \n # compute activation for test example and threshold the result\n a = np.dot(w, x);\n label = 1 if a > 0 else -1;\n \n return label;",
"def __init__(self,training_data):\n my_data = genfromtxt(training_data, delimiter='\\t',skip_header=0)\n n_col = my_data.shape[1]\n self.n_features=n_col-1 #assuming that the latest column\n #contains the the outputs \n #pre-processing data\n X = preprocessing.scale(np.hsplit(my_data,[self.n_features,n_col])[0])\n Y = np.squeeze(np.asarray(np.hsplit(my_data,[self.n_features,n_col])[1]))\n #defining scaling\n self.scaler = preprocessing.Scaler()\n self.scaler.fit(np.hsplit(my_data,[self.n_features,n_col])[0])\n #define classifier\n self.classifier = svm.SVR(kernel='linear', C=1e3, cache_size=DEFAULT_CACHE_SIZE)\n #self.classifier = svm.SVR(kernel='rbf', C=1e3, gamma=0.1, cache_size=DEFAULT_CACHE_SIZE)\n self.classifier.fit(X, Y)",
"def _multi_label_head(n_classes,\n label_name=None,\n weight_column_name=None,\n enable_centered_bias=False,\n head_name=None,\n thresholds=None,\n metric_class_ids=None):\n if n_classes < 2:\n raise ValueError(\"n_classes must be > 1 for classification.\")\n return _MultiLabelHead(\n n_classes=n_classes,\n label_name=label_name,\n weight_column_name=weight_column_name,\n enable_centered_bias=enable_centered_bias,\n head_name=head_name,\n thresholds=thresholds,\n metric_class_ids=metric_class_ids)",
"def create_compatible_head(\n self,\n n_features: int,\n device: Optional[str] = None,\n ):\n head = nn.Linear(n_features, self.n_classes)\n xavier_initialize(head)\n if device is not None:\n head = head.to(device)\n return head",
"def yolo_head(features, anchors, num_classes, input_shape, calc_loss=False):\n\n if calc_loss:\n return yolo_head_base(features, anchors, num_classes, input_shape)\n else:\n return yolo_head_sigmoid(features, anchors, num_classes, input_shape)",
"def binary_svm_loss(theta, X, y, C):\n\n m, d = X.shape\n grad = np.zeros(theta.shape)\n J = 0\n\n ############################################################################\n # TODO #\n # Implement the binary SVM hinge loss function here #\n # 4 - 5 lines of vectorized code expected #\n ############################################################################\n J=0.5*np.sum(theta**2)/m\n J=J+C*np.sum(np.maximum(0,1-np.multiply(y,(np.dot(X,theta)))))/m\n \n grad=theta/m\n temp_1=np.dot(X,theta)\n temp_2=np.multiply(y,temp_1)\n\n temp_3=y[temp_2<1]\n temp_4=X[temp_2<1,:]\n temp_5=np.dot(temp_4.T,temp_3)\n grad=grad-temp_5*C/m\n\n\n# for j in range(d):\n# \tgrad[j]=float(theta[j]/m)\n# \tfor k in range(m):\n#\t \tif (y[k]*(np.dot(theta,X[k,:]))<1):\n#\t \t\tgrad[j]=grad[j]-float(C*y[k]*X[k,j]/m)\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return J, grad",
"def __init__(self, classifierType, hyperparams, dimension, trainPath, testPath, addBias=False):\n \n # set classifier type\n if classifierType.upper() == 'SVM':\n self.type = self.ClassifierType.SVM;\n else:\n raise ValueError('Unknown classifier type: ' + classifierType); \n \n # store value ranges for classifier hyperparameters\n self.hpRanges = hyperparams;\n \n # set default learning parameters\n self.k = 5; # k-fold cross validation\n self.cvEpochs = 10; # training epochs for cross validation\n self.epochs = 20; # training epochs for inference\n \n # read in train and test data\n self.D = dimension;\n self.trainSet = self.readFile(trainPath, dimension, addBias);\n self.testSet = self.readFile(testPath, dimension, addBias);",
"def custom_training(nb_tweet_sample, randomised, equal_pos_neg, language, name_kernel, Resource, keep_null_vector):\n m_features, m_labels = get_characteristic_label_vectors(nb_tweet_sample, randomised, equal_pos_neg, Resource,\n keep_null_vector, language)\n\n kernel = Kernel.get_correct_kernel(name_kernel)\n custom_SVM = SVM(kernel)\n custom_SVM.fit(m_features, m_labels)\n\n return custom_SVM",
"def ex_1_b(x, y):\n ###########\n ## Add a point (4,0) with label 1 to the data set and then\n ## train an SVM with a linear kernel\n ## and plot the decision boundary and support vectors using 'plot_svm_decision_boundary' function\n ###########\n new_x = np.vstack((x, np.array([4,0])))\n new_y = np.hstack((y, np.array((1))))\n\n clf = svm.SVC(kernel='linear')\n clf.fit(new_x, new_y)\n plot_svm_decision_boundary(clf, new_x, new_y)\n pass",
"def binary_svm_loss(theta, X, y, C):\n\n m, d = X.shape\n grad = np.zeros(theta.shape)\n J = 0\n\n ############################################################################\n # TODO #\n # Implement the binary SVM hinge loss function here #\n # 4 - 5 lines of vectorized code expected #\n ############################################################################\n h = np.dot(X, theta)\n J = 1.0 / 2 / m * np.sum(theta**2) + 1.0 * C / m * np.sum(np.max([np.zeros(m), 1 - y * h], axis = 0))\n\n grad = 1.0 / m * theta + 1.0 * C / m * np.dot(X.T, -y * (y * h < 1))\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return J, grad",
"def multiclass_hinge(\n x: ArrayLike,\n y_true: ArrayLike,\n hinge: float = 1.0,\n *,\n constant: Optional[bool] = None\n) -> Tensor:\n return Tensor._op(MulticlassHinge, x, op_args=(y_true, hinge), constant=constant)",
"def train_SVM(data: np.array, labels: np.array)->None:\n print(\"SVM is not implemented yet!\")",
"def test_tsm_head():\n tsm_head = TSMHead(num_classes=4, in_channels=2048)\n tsm_head.init_weights()\n\n assert tsm_head.num_classes == 4\n assert tsm_head.dropout_ratio == 0.8\n assert tsm_head.in_channels == 2048\n assert tsm_head.init_std == 0.001\n assert tsm_head.consensus.dim == 1\n assert tsm_head.spatial_type == 'avg'\n\n assert isinstance(tsm_head.dropout, nn.Dropout)\n assert tsm_head.dropout.p == tsm_head.dropout_ratio\n\n assert isinstance(tsm_head.fc_cls, nn.Linear)\n assert tsm_head.fc_cls.in_features == tsm_head.in_channels\n assert tsm_head.fc_cls.out_features == tsm_head.num_classes\n\n assert isinstance(tsm_head.avg_pool, nn.AdaptiveAvgPool2d)\n assert tsm_head.avg_pool.output_size == 1\n\n input_shape = (8, 2048, 7, 7)\n feat = torch.rand(input_shape)\n\n # tsm head inference with no init\n num_segs = input_shape[0]\n cls_scores = tsm_head(feat, num_segs)\n assert cls_scores.shape == torch.Size([1, 4])\n\n # tsm head inference with init\n tsm_head = TSMHead(num_classes=4, in_channels=2048, temporal_pool=True)\n tsm_head.init_weights()\n cls_scores = tsm_head(feat, num_segs)\n assert cls_scores.shape == torch.Size([2, 4])"
]
| [
"0.6206049",
"0.5943907",
"0.59064436",
"0.5802742",
"0.5763362",
"0.5754058",
"0.5754058",
"0.5754058",
"0.5724703",
"0.57224053",
"0.5697456",
"0.5676167",
"0.5643779",
"0.5625094",
"0.56141084",
"0.558039",
"0.55719155",
"0.5535557",
"0.5532209",
"0.55216694",
"0.54993004",
"0.5488707",
"0.5458802",
"0.5458515",
"0.54551744",
"0.5452348",
"0.5414098",
"0.5411056",
"0.54092115",
"0.5396725"
]
| 0.7948433 | 0 |
Creates a _Head for multi label classification. The Head uses softmax cross entropy loss. | def _multi_label_head(n_classes,
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None,
thresholds=None,
metric_class_ids=None):
if n_classes < 2:
raise ValueError("n_classes must be > 1 for classification.")
return _MultiLabelHead(
n_classes=n_classes,
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
thresholds=thresholds,
metric_class_ids=metric_class_ids) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _multi_class_head(n_classes,\n label_name=None,\n weight_column_name=None,\n enable_centered_bias=False,\n head_name=None,\n thresholds=None,\n metric_class_ids=None):\n if (n_classes is None) or (n_classes < 2):\n raise ValueError(\"n_classes must be > 1 for classification: %s.\" %\n n_classes)\n\n if n_classes == 2:\n if metric_class_ids:\n raise ValueError(\"metric_class_ids invalid for n_classes==2.\")\n return _BinaryLogisticHead(\n label_name=label_name,\n weight_column_name=weight_column_name,\n enable_centered_bias=enable_centered_bias,\n head_name=head_name,\n thresholds=thresholds)\n\n return _MultiClassHead(\n n_classes=n_classes,\n label_name=label_name,\n weight_column_name=weight_column_name,\n enable_centered_bias=enable_centered_bias,\n head_name=head_name,\n thresholds=thresholds,\n metric_class_ids=metric_class_ids)",
"def __init__(self, heads, loss_combiner):\n # TODO(zakaria): Keep _Head a pure interface.\n super(_MultiHead, self).__init__(head_name=None)\n self._logits_dimension = 0\n for head in heads:\n if not head.head_name:\n raise ValueError(\"Head must have a name.\")\n self._logits_dimension += head.logits_dimension\n\n self._heads = heads\n self._loss_combiner = loss_combiner",
"def register_classification_head(self, name, num_classes=None, inner_dim=None, **kwargs):\n if name in self.classification_heads:\n prev_num_classes = self.classification_heads[name].out_proj.out_features\n prev_inner_dim = self.classification_heads[name].dense.out_features\n if num_classes != prev_num_classes or inner_dim != prev_inner_dim:\n logger.warning(\n 're-registering head \"{}\" with num_classes {} (prev: {}) '\n 'and inner_dim {} (prev: {})'.format(\n name, num_classes, prev_num_classes, inner_dim, prev_inner_dim\n )\n )\n self.classification_heads[name] = HuggingFaceBertClassificationHead(\n self.args.embed_dim, # self.args.encoder_embed_dim,\n inner_dim or self.args.embed_dim,\n num_classes,\n self.args.pooler_activation_fn,\n self.args.pooler_dropout,\n self.args.quant_noise_pq,\n self.args.quant_noise_pq_block_size,\n )",
"def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False),\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n # NOTE: This batchnorm was omitted in my earlier implementation due to a typo.\n # Commenting it out for consistency with the experiments in the paper.\n # nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)",
"def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False),\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n # NOTE: This batchnorm was omitted in my earlier implementation due to a typo.\n # Commenting it out for consistency with the experiments in the paper.\n # nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)",
"def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False),\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n # NOTE: This batchnorm was omitted in my earlier implementation due to a typo.\n # Commenting it out for consistency with the experiments in the paper.\n # nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)",
"def _multi_head(heads, loss_weights=None):\n if loss_weights:\n if len(loss_weights) != len(heads):\n raise ValueError(\"heads and loss_weights must have same size\")\n\n def _weighted_loss_combiner(losses):\n if loss_weights:\n if len(losses) != len(loss_weights):\n raise ValueError(\"losses and loss_weights must have same size\")\n weighted_losses = []\n for loss, weight in zip(losses, loss_weights):\n weighted_losses.append(math_ops.multiply(loss, weight))\n return math_ops.add_n(weighted_losses)\n else:\n return math_ops.add_n(losses)\n\n return _MultiHead(heads, loss_combiner=_weighted_loss_combiner)",
"def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False), nn.Conv2d(C, 128, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, 2, bias=False), nn.ReLU(inplace=True))\n self.classifier = nn.Linear(768, num_classes)",
"def build_head(self, n_features, device=None):\n # By default this is a linear layer\n self.head = self.create_compatible_head(n_features, device)",
"def __init__(self, nheads, d_model):\n super(MultiheadAttention, self).__init__()\n assert d_model % nheads == 0\n self.d_head = d_model // nheads\n self.nheads = nheads\n self.Q_fc = nn.Linear(d_model, d_model, bias=False)\n self.K_fc = nn.Linear(d_model, d_model, bias=False)\n self.V_fc = nn.Linear(d_model, d_model, bias=False)\n self.output_fc = nn.Linear(d_model, d_model, bias=False)\n self.attn = None",
"def __init__(self,\n label_name,\n weight_column_name,\n enable_centered_bias,\n head_name,\n loss_fn=_log_loss_with_two_classes,\n thresholds=None):\n super(_BinaryLogisticHead, self).__init__(head_name=head_name)\n self._thresholds = thresholds if thresholds else (.5,)\n self._label_name = label_name\n self._weight_column_name = weight_column_name\n self._loss_fn = loss_fn\n self._enable_centered_bias = enable_centered_bias\n self._problem_type = constants.ProblemType.LOGISTIC_REGRESSION",
"def __init__(self, channels, num_classes):\n super(AuxiliaryHead, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n # image size = 2 x 2\n nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False),\n nn.Conv2d(channels, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n nn.BatchNorm2d(768),\n nn.ReLU(inplace=True),\n )\n self.classifier = nn.Linear(768, num_classes)",
"def generateHead(self, headType):\n # load the multi-head models\n filePrefix, phase = ModelDict[self.style.body]\n headModel = loader.loadModel(\"phase_\" + str(phase) + filePrefix + \"heads\")\n\n # search for the appropriate parts\n headReferences = headModel.findAllMatches(\"**/\" + headType)\n for i in range(0, headReferences.getNumPaths()):\n headPart = self.instance(headReferences.getPath(i), \"modelRoot\",\n \"joint_head\")\n # set head texture if necessary\n if self.headTexture:\n headTex = loader.loadTexture(\"phase_\" + str(phase) + \"/maps/\" +\n self.headTexture)\n headTex.setMinfilter(Texture.FTLinearMipmapLinear)\n headTex.setMagfilter(Texture.FTLinear) \n headPart.setTexture(headTex, 1)\n\n # set head color if necessary\n if self.headColor:\n headPart.setColor(self.headColor)\n self.headParts.append(headPart)\n\n # Now remove the extra instance that was created in the\n # loadModelOnce call; we don't need it anymore now that we've\n # copied everything out.\n headModel.removeNode()",
"def head(self) -> tf.estimator.Head:\n\n task_type = self._problem_statement.tasks[0].type\n if task_type.HasField('one_dimensional_regression'):\n return tf.estimator.RegressionHead()\n num_classes = (\n self._tf_transform_output.num_buckets_for_transformed_feature(\n self.raw_label_key))\n if task_type.HasField('multi_class_classification'):\n return tf.estimator.MultiClassHead(num_classes)\n if task_type.HasField('binary_classification'):\n return tf.estimator.BinaryClassHead()\n raise ValueError('Invalid task type: {}'.format(task_type))",
"def create_heads(x, upsample_feature, num_classes, hg_id, num_channels):\n hg_name = 'hg' + str(hg_id)\n\n head = Conv2D(num_channels, kernel_size=(1, 1), activation='relu', padding='same', name=hg_name+'_conv_1x1_1')(upsample_feature)\n head = BatchNormalization()(head)\n\n # for head as intermediate supervision, use 'linear' as activation.\n head_predict = Conv2D(num_classes, kernel_size=(1, 1), activation='linear', padding='same',\n name=hg_name+'_conv_1x1_predict')(head)\n\n # use linear activation\n head = Conv2D(num_channels, kernel_size=(1, 1), activation='linear', padding='same',\n name=hg_name+'_conv_1x1_2')(head)\n head_m = Conv2D(num_channels, kernel_size=(1, 1), activation='linear', padding='same',\n name=hg_name+'_conv_1x1_3')(head_predict)\n\n # merge heads for next stage\n head_next_stage = Add()([head, head_m, x])\n\n return head_next_stage, head_predict",
"def __init__(self, dense_weight=1.0, cls_weight = 1.0, mixup_active=True, smoothing=0.1,\n classes = 1000):\n super(RelabelPooledCrossEntropy, self).__init__()\n\n\n self.CE = SoftTargetCrossEntropy()\n\n self.dense_weight = dense_weight\n self.smoothing = smoothing\n self.mixup_active = mixup_active\n self.classes = classes\n self.cls_weight = cls_weight\n assert dense_weight+cls_weight>0",
"def __init__(self, num_heads: int, size: int, dropout: float = 0.1):\n super(MultiHeadedAttention, self).__init__()\n\n assert size % num_heads == 0\n\n self.head_size = head_size = size // num_heads\n self.model_size = size\n self.num_heads = num_heads\n\n self.k_layer = nn.Linear(size, num_heads * head_size)\n self.v_layer = nn.Linear(size, num_heads * head_size)\n self.q_layer = nn.Linear(size, num_heads * head_size)\n\n self.output_layer = nn.Linear(size, size)\n self.softmax = nn.Softmax(dim=-1)\n self.dropout = nn.Dropout(dropout)",
"def __init__(self,\n label_name,\n weight_column_name,\n label_dimension,\n enable_centered_bias,\n head_name,\n loss_fn=_mean_squared_loss):\n super(_RegressionHead, self).__init__(head_name=head_name)\n\n self._loss_fn = loss_fn\n self._logits_dimension = label_dimension\n self._label_name = label_name\n self._weight_column_name = weight_column_name\n self._enable_centered_bias = enable_centered_bias\n self._problem_type = constants.ProblemType.LINEAR_REGRESSION",
"def yolo_head(features, anchors, num_classes, input_shape, calc_loss=False):\n\n if calc_loss:\n return yolo_head_base(features, anchors, num_classes, input_shape)\n else:\n return yolo_head_sigmoid(features, anchors, num_classes, input_shape)",
"def __init__(self, C, num_classes):\n super(AuxiliaryHeadCIFAR, self).__init__()\n self.features = nn.Sequential(nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False), nn.Conv2d(C, 128, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, 2, bias=False), nn.BatchNorm2d(768), nn.ReLU(inplace=True))\n self.classifier = nn.Linear(768, num_classes)",
"def __init__(self, C, num_classes):\n super(AuxiliaryHeadCIFAR, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False), # image size = 2 x 2\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)",
"def __init__(self, label_size=10):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 16, 5, padding=2)\n self.drop1 = nn.Dropout2d(p=0.3)\n self.pool1 = nn.MaxPool2d(2)\n self.conv2 = nn.Conv2d(16, 32, 5, padding=2)\n self.drop2 = nn.Dropout2d(p=0.3)\n self.pool2 = nn.MaxPool2d(2)\n self.head_linear = nn.Linear(1568, label_size)",
"def multi_label_cls_head__post_process(ctx, self, pred, **kwargs):\n return pred",
"def __init__(self, dense_weight=1.0, cls_weight = 1.0, mixup_active=True, smoothing=0.1,\n classes = 1000, n_comn=2):\n super(RelabelCrossEntropy, self).__init__()\n\n\n self.CE = SoftTargetCrossEntropy()\n\n self.n_comn = n_comn\n self.dis_fn = nn.CosineSimilarity(dim=1)\n\n self.dense_weight = dense_weight\n self.smoothing = smoothing\n self.mixup_active = mixup_active\n self.classes = classes\n self.cls_weight = cls_weight\n assert dense_weight+cls_weight>0",
"def __init__(self, num_heads: int, size: int, size_v: int, dropout: float = 0.1):\n super(ContMultiHeadedAttention, self).__init__()\n\n assert size % num_heads == 0\n\n self.head_size = head_size = size // num_heads\n self.model_size = size\n self.num_heads = num_heads\n\n self.k_layer = nn.Linear(size, num_heads * head_size)\n self.v_layer = nn.Linear(size_v, num_heads * head_size)\n self.q_layer = nn.Linear(size, num_heads * head_size)\n\n self.output_layer = nn.Linear(size, size_v)\n self.softmax = nn.Softmax(dim=-1)\n self.dropout = nn.Dropout(dropout)",
"def __init__(\n self,\n embed_dim,\n num_heads,\n dropout=0.,\n bias=True,\n kdim=None,\n vdim=None,\n ):\n super(MultiheadAttention, self).__init__()\n self.embed_dim = embed_dim\n self.kdim = kdim if kdim is not None else embed_dim\n self.vdim = vdim if vdim is not None else embed_dim\n self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.head_dim = embed_dim // num_heads\n if self.head_dim * num_heads != self.embed_dim:\n raise ValueError('<embed_dim> must be divisible by <num_heads>.')\n if not self._qkv_same_embed_dim:\n self.q_proj_weight = Parameter(Tensor(embed_dim, embed_dim))\n self.k_proj_weight = Parameter(Tensor(embed_dim, self.kdim))\n self.v_proj_weight = Parameter(Tensor(embed_dim, self.vdim))\n self.register_parameter('in_proj_weight', None)\n else:\n self.in_proj_weight = Parameter(Tensor(3 * embed_dim, embed_dim))\n self.register_parameter('q_proj_weight', None)\n self.register_parameter('k_proj_weight', None)\n self.register_parameter('v_proj_weight', None)\n if bias:\n self.in_proj_bias = Parameter(Tensor(3 * embed_dim))\n else:\n self.register_parameter('in_proj_bias', None)\n self.out_proj = Linear(embed_dim, embed_dim, bias=bias)\n self.reset_parameters()",
"def _regression_head(label_name=None,\n weight_column_name=None,\n label_dimension=1,\n enable_centered_bias=False,\n head_name=None):\n return _RegressionHead(\n label_name=label_name,\n weight_column_name=weight_column_name,\n label_dimension=label_dimension,\n enable_centered_bias=enable_centered_bias,\n head_name=head_name)",
"def _binary_svm_head(\n label_name=None,\n weight_column_name=None,\n enable_centered_bias=False,\n head_name=None,\n thresholds=None,):\n return _BinarySvmHead(\n label_name=label_name,\n weight_column_name=weight_column_name,\n enable_centered_bias=enable_centered_bias,\n head_name=head_name,\n thresholds=thresholds)",
"def create_compatible_head(\n self,\n n_features: int,\n device: Optional[str] = None,\n ):\n head = nn.Linear(n_features, self.n_classes)\n xavier_initialize(head)\n if device is not None:\n head = head.to(device)\n return head",
"def __init__(self, num_heads, D, hidden_layers, dropout=0.1, norm_eps=1e-12):\n super(TransformerBlock, self).__init__()\n # Attention\n self.attention = MultiHeadAttention(\n num_heads=num_heads, key_dim=D, dropout=dropout\n )\n self.norm_attention = LayerNormalization(epsilon=norm_eps)\n\n # MLP\n self.mlp = MLPBlock(hidden_layers, dropout)\n self.norm_mlp = LayerNormalization(epsilon=norm_eps)"
]
| [
"0.7336204",
"0.7173082",
"0.6579786",
"0.6405291",
"0.6405291",
"0.6405291",
"0.63451236",
"0.6327597",
"0.6323638",
"0.6302445",
"0.61321354",
"0.60909694",
"0.6047005",
"0.6039839",
"0.603225",
"0.59006226",
"0.5891783",
"0.58525467",
"0.58370924",
"0.5797899",
"0.5795075",
"0.57874465",
"0.5758207",
"0.5735806",
"0.5631108",
"0.5601545",
"0.5537957",
"0.5508757",
"0.55068487",
"0.55047315"
]
| 0.76452595 | 0 |
_Head to combine multiple _Head objects. | def __init__(self, heads, loss_combiner):
# TODO(zakaria): Keep _Head a pure interface.
super(_MultiHead, self).__init__(head_name=None)
self._logits_dimension = 0
for head in heads:
if not head.head_name:
raise ValueError("Head must have a name.")
self._logits_dimension += head.logits_dimension
self._heads = heads
self._loss_combiner = loss_combiner | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generateHead(self, headType):\n # load the multi-head models\n filePrefix, phase = ModelDict[self.style.body]\n headModel = loader.loadModel(\"phase_\" + str(phase) + filePrefix + \"heads\")\n\n # search for the appropriate parts\n headReferences = headModel.findAllMatches(\"**/\" + headType)\n for i in range(0, headReferences.getNumPaths()):\n headPart = self.instance(headReferences.getPath(i), \"modelRoot\",\n \"joint_head\")\n # set head texture if necessary\n if self.headTexture:\n headTex = loader.loadTexture(\"phase_\" + str(phase) + \"/maps/\" +\n self.headTexture)\n headTex.setMinfilter(Texture.FTLinearMipmapLinear)\n headTex.setMagfilter(Texture.FTLinear) \n headPart.setTexture(headTex, 1)\n\n # set head color if necessary\n if self.headColor:\n headPart.setColor(self.headColor)\n self.headParts.append(headPart)\n\n # Now remove the extra instance that was created in the\n # loadModelOnce call; we don't need it anymore now that we've\n # copied everything out.\n headModel.removeNode()",
"def headsofunion(h1, h2):\n res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)\n return {ctx.node() for ctx in res}",
"def head(self, head):\n\n self._head = head",
"def _multi_head(heads, loss_weights=None):\n if loss_weights:\n if len(loss_weights) != len(heads):\n raise ValueError(\"heads and loss_weights must have same size\")\n\n def _weighted_loss_combiner(losses):\n if loss_weights:\n if len(losses) != len(loss_weights):\n raise ValueError(\"losses and loss_weights must have same size\")\n weighted_losses = []\n for loss, weight in zip(losses, loss_weights):\n weighted_losses.append(math_ops.multiply(loss, weight))\n return math_ops.add_n(weighted_losses)\n else:\n return math_ops.add_n(losses)\n\n return _MultiHead(heads, loss_combiner=_weighted_loss_combiner)",
"def build_head(self):\n stages = [f'stage{i}' for i in range(1, 7)]\n for stage in stages:\n block = getattr(self.arch, stage)\n PAF, CFM = block.keys()\n PAF = build_blocks(block[PAF], 'head')\n CFM = build_blocks(block[CFM], 'head')\n setattr(self, f\"{stage}_PAF\", PAF)\n setattr(self, f\"{stage}_CFM\", CFM)",
"def heads(self) -> \"IterableList[Head]\":\n return Head.list_items(self)",
"def add(hs):\n hNew = hs[0].Clone()\n if len(hs) > 1:\n for h in hs[1:]:\n hNew.Add(h)\n return hNew",
"def getHeadParts(self):\n return self.headParts",
"def get_3d_heads_intra():\n \n # Extract vertices and faces for the first head\n file_manager = ExitStack()\n atexit.register(file_manager.close)\n ref = importlib_resources.files('hypyp') / 'data/Basehead.obj'\n filename = file_manager.enter_context(importlib_resources.as_file(ref))\n\n mesh = meshio.read(Path(filename).resolve())\n zoom = 0.064\n interval = 0.5\n\n head1_v = mesh.points*zoom\n head1_f = mesh.cells[0].data\n\n # Copy the first head to create a second head\n head2_v = copy(mesh.points*zoom)\n head2_v[:, 0] = head2_v[:, 0] + interval\n\n # Use the same faces\n head2_f = copy(mesh.cells[0].data)\n\n # Concatenate the vertices\n vertices = np.concatenate((head1_v, head2_v))\n # Concatenate the faces, shift vertices indexes for second head\n faces = np.concatenate((head1_f, head2_f + len(head1_v)))\n return vertices, faces",
"def __init__(self, head=None):\n\n self.head = head",
"def __init__(self, head=None):\r\n self.head = head",
"def add_head(self):\n self.scenes[self.current_scene].add_object(Head())\n self.redraw()",
"def flatten(self):\n return merge(self.head.value, self.head.next.value)",
"def __init__(self, head):\n self.head = head",
"def __init__(self, head):\n self.head = head",
"def __init__(self, head):\n self.head = head",
"def __init__(self, head):\n self.head = head",
"def get_3d_heads_inter():\n \n # Extract vertices and faces for the first head\n file_manager = ExitStack()\n atexit.register(file_manager.close)\n ref = importlib_resources.files('hypyp') / 'data/Basehead.obj'\n filename = file_manager.enter_context(importlib_resources.as_file(ref))\n\n mesh = meshio.read(Path(filename).resolve())\n zoom = 0.064\n interval = 0.32\n\n head1_v = mesh.points*zoom\n head1_f = mesh.cells[0].data\n\n # Copy the first head to create a second head\n head2_v = copy(mesh.points*zoom)\n # Move the vertices by Y rotation and Z translation\n rotY = np.pi\n newX = head2_v[:, 0] * np.cos(rotY) - head2_v[:, 2] * np.sin(rotY)\n newZ = head2_v[:, 0] * np.sin(rotY) + head2_v[:, 2] * np.cos(rotY)\n head2_v[:, 0] = newX\n head2_v[:, 2] = newZ\n\n head1_v[:, 2] = head1_v[:, 2] - interval/2\n head2_v[:, 2] = head2_v[:, 2] + interval/2\n\n # Use the same faces\n head2_f = copy(mesh.cells[0].data)\n\n # Concatenate the vertices\n vertices = np.concatenate((head1_v, head2_v))\n # Concatenate the faces, shift vertices indexes for second head\n faces = np.concatenate((head1_f, head2_f + len(head1_v)))\n return vertices, faces",
"def merge(self, obj):\n pass",
"def head(self, *args):\n pass",
"def to_head(self, tag):\n\t\tself.head += tag + '\\n'",
"def _copy_to_head_args(args: Namespace) -> Namespace:\n\n _head_args = copy.deepcopy(args)\n _head_args.polling = args.polling\n _head_args.port = args.port\n _head_args.host = args.host[0]\n _head_args.uses = args.uses\n _head_args.pod_role = PodRoleType.HEAD\n _head_args.runtime_cls = 'HeadRuntime'\n _head_args.replicas = 1\n\n if args.name:\n _head_args.name = f'{args.name}/head'\n else:\n _head_args.name = f'head'\n\n return _head_args",
"def mergeWith(self, others):",
"def concat_all(self):\n return self.merge(1)",
"def GetListHead(self, *args, **kwargs):\n pass",
"def _merge(self):\n raise NotImplementedError",
"def HeadList(self):\n return [(rname, repo.currenthead) for rname, repo in self.repos.items()\n ]",
"def init_mask_head(self, mask_roi_extractor, mask_head):\n self.mask_head = nn.ModuleList()\n if not isinstance(mask_head, list):\n mask_head = [mask_head for _ in range(self.num_stages)]\n assert len(mask_head) == self.num_stages\n for head in mask_head:\n self.mask_head.append(build_head(head))\n if self.recursive:\n for i in range(self.num_stages):\n self.mask_head[i] = self.mask_head[0]",
"def concate(self, ll):\n if type(self) != type(ll):\n raise TypeError('Wrong type of ll')\n walk = self._head\n while walk._next is not None:\n walk = walk._next\n walk._next = ll._head",
"def concatenate(self, next_seq):\n return HSeq(self._elements + next_seq._elements)"
]
| [
"0.6166992",
"0.5967676",
"0.5889176",
"0.58571684",
"0.58249354",
"0.58173925",
"0.5694333",
"0.56684655",
"0.5642174",
"0.56208825",
"0.5578456",
"0.55694306",
"0.55612534",
"0.5547007",
"0.5547007",
"0.5547007",
"0.5547007",
"0.54943246",
"0.548427",
"0.5468281",
"0.5420097",
"0.54125005",
"0.5382945",
"0.5380683",
"0.5357014",
"0.5343887",
"0.53316915",
"0.5278235",
"0.5276123",
"0.52566504"
]
| 0.6410378 | 0 |
Splits logits for heads. | def _split_logits(self, logits):
all_logits = []
begin = 0
for head in self._heads:
current_logits_size = head.logits_dimension
current_logits = array_ops.slice(logits, [0, begin],
[-1, current_logits_size])
all_logits.append(current_logits)
begin += current_logits_size
return all_logits | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _split_heads(self, x: torch.Tensor) -> torch.Tensor:\n depth = x.size(-1)\n split_x = torch.reshape(x, (\n x.size(0), x.size(1),\n self._hparams.num_heads, depth // self._hparams.num_heads))\n return split_x.permute((0, 2, 1, 3))",
"def split_heads(x, num_heads):\n sz = x.size()\n # x -> [batch_size, length, heads, depth / num_heads]\n x = x.view(sz[0], sz[1], num_heads, sz[2] // num_heads)\n # [batch_size, length, 1, depth // num_heads] * \n heads = torch.chunk(x, num_heads, 2)\n x = []\n for i in range(num_heads):\n x.append(torch.squeeze(heads[i], 2))\n return x",
"def split_heads(self, tensor, batch_size):\n # (batch_size, seq_len, output_dim) -> (batch_size, seq_len, n_heads, head_depth)\n splitted_tensor = tensor.view(batch_size, -1, self.n_heads, self.head_depth)\n return splitted_tensor.transpose(1, 2) # (batch_size, n_heads, seq_len, head_depth)",
"def _split_heads(x, num_heads):\n\tshape_lst = bert_utils.get_shape_list(x)\n\tdepth = shape_lst[-1]\n\tbatch = shape_lst[0]\n\tseq = shape_lst[1]\n\t# print(x.get_shape(), \"===splitheads===\")\n\tsplitted_x = tf.reshape(x, [tf.shape(x)[0], tf.shape(x)[1], \\\n\t\tnum_heads, depth // num_heads])\n\treturn tf.transpose(splitted_x, [0, 2, 1, 3])",
"def _split_heads(self, x, is_picture):\n if is_picture is False:\n if len(x.shape) != 3:\n raise ValueError(\"x must have rank 3\")\n shape = x.shape\n return x.reshape(shape[0], shape[1], self.num_heads, shape[2]//self.num_heads).permute(0, 2, 1, 3).contiguous()\n else:\n if len(x.shape) != 5:\n raise ValueError(\"x must have rank 5\")\n shape = x.shape\n return x.reshape(shape[0], shape[1], shape[2], shape[3], self.num_heads, shape[4]//self.num_heads).permute(0, 4, 1, 2, 3, 5).contiguous()",
"def split_heads(x, num_heads):\n return tf.transpose(split_last_dimension(x, num_heads), [0, 2, 1, 3])",
"def split_heads(self, x, batch_size):\n x = tf.reshape(x, (batch_size, -1, self.h, self.depth))\n return tf.transpose(x, perm=[0, 2, 1, 3])",
"def split_heads(self, x, batch_size):\n x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))\n return tf.transpose(x, perm=[0, 2, 1, 3])",
"def split_heads(self, x, batch_size):\n x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))\n return tf.transpose(x, perm=[0, 2, 1, 3])",
"def split_heads(self, x, batch_size):\n x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))\n return tf.transpose(x, perm=[0, 2, 1, 3])",
"def split_heads(self, x, batch_size):\n x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))\n return tf.transpose(x, perm=[0, 2, 1, 3])",
"def split_heads(self, x, batch_size):\n x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))\n return tf.transpose(x, perm=[0, 2, 1, 3])",
"def split_heads(self, x, batch_size):\n x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))\n return tf.transpose(x, perm=[0, 2, 1, 3])",
"def split_heads(self, x, batch_size):\n x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))\n return tf.transpose(x, perm=[0, 2, 1, 3])",
"def split_heads(x, batch_size, num_heads, depth):\n x = tf.reshape(x, (batch_size, -1, num_heads, depth))\n return tf.transpose(x, perm=[0, 2, 1, 3])",
"def split_heads(self, x, batch_size): # noqa\n x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))\n return tf.transpose(x, perm=[0, 2, 1, 3])",
"def get_heads(self):\n return self.heads[1:]",
"def _batch2heads(self, x: torch.Tensor) -> torch.Tensor:\n # num_heads = self.num_heads\n B, seq_len, head_out_channels = x.shape\n x = x.reshape(B // self.num_heads, self.num_heads, seq_len, head_out_channels)\n x = x.permute(0, 2, 1, 3).reshape(\n B // self.num_heads, seq_len, head_out_channels * self.num_heads\n )\n\n return x",
"def split_timeline(self, size: float, one_ts: bool) -> None:\n # Split log data\n splitter = ls.LogSplitter(self.log)\n train, test = splitter.split_log('timeline_contained', size, one_ts)\n total_events = len(self.log)\n # Check size and change time splitting method if necesary\n if len(test) < int(total_events*0.1):\n train, test = splitter.split_log('timeline_trace', size, one_ts)\n # Set splits\n key = 'end_timestamp' if one_ts else 'start_timestamp'\n test = pd.DataFrame(test)\n train = pd.DataFrame(train)\n self.log_test = (test.sort_values(key, ascending=True)\n .reset_index(drop=True))\n self.log_train = (train.sort_values(key, ascending=True)\n .reset_index(drop=True))",
"def make_mdn_heads(self, config):\n raise NotImplementedError",
"def __init__(self, heads, loss_combiner):\n # TODO(zakaria): Keep _Head a pure interface.\n super(_MultiHead, self).__init__(head_name=None)\n self._logits_dimension = 0\n for head in heads:\n if not head.head_name:\n raise ValueError(\"Head must have a name.\")\n self._logits_dimension += head.logits_dimension\n\n self._heads = heads\n self._loss_combiner = loss_combiner",
"def _merge_heads(self, x, is_picture):\n if is_picture is False:\n if len(x.shape) != 4:\n raise ValueError(\"x must have rank 4\")\n shape = x.shape\n return x.permute(0, 2, 1, 3).contiguous().reshape(shape[0], shape[2], shape[3]*self.num_heads)\n else:\n if len(x.shape) != 6:\n raise ValueError(\"x must have rank 6\")\n shape = x.shape\n return x.permute(0, 2, 3, 4, 1, 5).contiguous().reshape(shape[0], shape[2], shape[3], shape[4], shape[5]*self.num_heads)",
"def get_tokens_with_heads(self, snlp_doc):\n tokens = []\n heads = []\n offset = 0\n for sentence in snlp_doc.sentences:\n for token in sentence.tokens:\n for word in token.words:\n # Here, we're calculating the absolute token index in the doc,\n # then the *relative* index of the head, -1 for zero-indexed\n # and if the governor is 0 (root), we leave it at 0\n if word.head:\n head = word.head + offset - len(tokens) - 1\n else:\n head = 0\n heads.append(head)\n tokens.append(word)\n offset += sum(len(token.words) for token in sentence.tokens)\n return tokens, heads",
"def split_head(line, is_head=lambda line: line.startswith('>')):\n if is_head(line):\n return True\n else:\n return False",
"def _decode_head_forward_test(self, x, img_metas):\n seg_logits = self.decode_head.forward_test(x, img_metas, self.test_cfg)\n return seg_logits",
"def pre_logits(self, feats: Tuple[torch.Tensor]) -> torch.Tensor:\n # The EfficientFormerClsHead doesn't have other module, just return\n # after unpacking.\n return feats[-1]",
"def split_heads_2d(inputs, Nh):\n B, H, W, d = shape_list(inputs)\n ret_shape = [B, H, W, Nh, d // Nh]\n split = tf.reshape(inputs, ret_shape)\n return tf.transpose(split, [0, 3, 1, 2, 4])",
"def splitTrackingNums(_pack):\n multi = [ i.strip() for i in _pack[1].split(';') ]\n splits_ = [ [_pack[0], m] for m in multi ]\n return splits_",
"def _update_head_history(self):\n # pylint: disable=broad-except\n try:\n head = [h for h in self._git.heads if h.name == self.head][0]\n self.head_hash = head.commit.hexsha\n self.head_history = [\n {\n \"commit\": str(c.newhexsha),\n \"timestamp\": c.time[0],\n \"message\": c.message,\n \"author\": {\"name\": c.actor.name, \"email\": c.actor.email},\n }\n for c in head.log()[::-1]\n ]\n except Exception as err:\n self.log.warn(\"Git head update error, ignoring: %s\", err, exc_info=True)\n self.head_history = []",
"def gen_split_session(self, set_choice, split_threshold, history_length):\r\n return_seq = []\r\n chosen_set = self.split_data[set_choice]\r\n\r\n for user_index, group in chosen_set.groupby('user_index'):\r\n group = group.reset_index(drop=True)\r\n\r\n # Split full trajectories into sessions.\r\n # Any pair of consecutive records with time delta higher than split_threshold will be regard as splitting borders.\r\n dt_series = group['datetime']\r\n time_diff = (dt_series - dt_series.shift(1)).apply(lambda x: x.total_seconds()).fillna(0)\r\n split_indices = [0] + dt_series[time_diff > (60 * 60 * split_threshold)].index.tolist() + [dt_series.shape[0]]\r\n split_indices = np.array([split_indices[:-1], split_indices[1:]]).transpose()\r\n\r\n session_base_timestamp = []\r\n session_start_index = []\r\n sessions = []\r\n for i, split_index in enumerate(split_indices):\r\n session = group.iloc[split_index[0]:split_index[1]]\r\n this_base_timestamp = session.iloc[-1]['timestamp']\r\n session_base_timestamp.append(this_base_timestamp)\r\n sessions.append([user_index,\r\n session['poi_index'].to_list(),\r\n session['hour'].to_list()])\r\n this_start_index = np.where(np.array(session_base_timestamp) >= (this_base_timestamp - history_length * 60 * 60))[0][0]\r\n session_start_index.append(this_start_index)\r\n\r\n for i, session in enumerate(sessions):\r\n for j in range(1, len(session[1])):\r\n return_seq.append([user_index,\r\n [sessions[k][1] for k in range(session_start_index[i], i)] + [session[1][:j]],\r\n [sessions[k][2] for k in range(session_start_index[i], i)] + [session[2][:j]],\r\n session[1][j],\r\n i - session_start_index[i] + 1])\r\n return return_seq"
]
| [
"0.70507145",
"0.65164036",
"0.64181155",
"0.6412669",
"0.6331245",
"0.6179869",
"0.6152748",
"0.5997743",
"0.5997743",
"0.5997743",
"0.5997743",
"0.5997743",
"0.5997743",
"0.5997743",
"0.59784955",
"0.5914892",
"0.56974804",
"0.555019",
"0.55291426",
"0.5393336",
"0.530446",
"0.52594256",
"0.51606303",
"0.5028752",
"0.49880788",
"0.49104252",
"0.48676288",
"0.48649144",
"0.48300388",
"0.48111668"
]
| 0.75607777 | 0 |
Combines list of ModelFnOps for training. | def _combine_train(self, all_model_fn_ops, train_op_fn):
losses = []
additional_train_ops = []
for m in all_model_fn_ops:
losses.append(m.loss)
additional_train_ops.append(m.train_op)
loss = self._loss_combiner(losses)
train_op = train_op_fn(loss)
train_op = control_flow_ops.group(train_op, *additional_train_ops)
return model_fn.ModelFnOps(
mode=model_fn.ModeKeys.TRAIN,
loss=loss,
train_op=train_op) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _combine_eval(self, all_model_fn_ops):\n predictions = {}\n metrics = {}\n losses = []\n for head, m in zip(self._heads, all_model_fn_ops):\n losses.append(m.loss)\n head_name = head.head_name\n for k, v in m.predictions.items():\n predictions[(head_name, k)] = v\n for k, v in m.eval_metric_ops.items():\n # metrics[\"%s/%s\" % (k, head_name)] = v\n metrics[k] = v\n loss = self._loss_combiner(losses)\n\n return model_fn.ModelFnOps(\n mode=model_fn.ModeKeys.EVAL,\n predictions=predictions,\n loss=loss,\n eval_metric_ops=metrics)",
"def _combine_infer(self, all_model_fn_ops):\n predictions = {}\n output_alternatives = {}\n for head, m in zip(self._heads, all_model_fn_ops):\n head_name = head.head_name\n output_alternatives[head_name] = m.output_alternatives[head_name]\n for k, v in m.predictions.items():\n predictions[(head_name, k)] = v\n\n return model_fn.ModelFnOps(\n mode=model_fn.ModeKeys.INFER,\n predictions=predictions,\n output_alternatives=output_alternatives)",
"def train_and_eval_all_models():\n\n clfShape, accShape = shape_symmetry_train_classifier()\n clfTexture, accTexture = texture_symmetry_train_classifier()\n clfFinal, accFinal = combined_symmetry_train_classifier()\n\n return accShape, accTexture, accFinal",
"def AddTrainingOperators(model, predict, label, value, value_label, base_lr=-0.003):\n xent = model.LabelCrossEntropy([predict, label], 'xent')\n # compute the expected loss\n loss1 = model.AveragedLoss(xent, \"loss1\")\n loss2 = model.Sub([value, value_label], \"loss2\")\n # track the accuracy of the model\n AddAccuracy(model, predict, label)\n # use the average loss we just computed to add gradient operators to the model\n model.AddGradientOperators([loss1, loss2])\n # do a simple stochastic gradient descent\n ITER = brew.iter(model, \"iter\")\n # set the learning rate schedule\n LR = model.LearningRate(ITER, \"LR\", base_lr=base_lr, policy=\"fixed\") # when policy=fixed, stepsize and gamma are ignored\n # ONE is a constant value that is used in the gradient update. We only need\n # to create it once, so it is explicitly placed in param_init_net.\n ONE = model.param_init_net.ConstantFill([], \"ONE\", shape=[1], value=1.0)\n # Now, for each parameter, we do the gradient updates.\n for param in model.params:\n # Note how we get the gradient of each parameter - ModelHelper keeps\n # track of that.\n param_grad = model.param_to_grad[param]\n # The update is a simple weighted sum: param = param + param_grad * LR\n model.WeightedSum([param, ONE, param_grad, LR], param)",
"def ops2alg(ops):\n return Model(cardinality=len(ops[0]), \n operations=dict([\"h\"+str(i),list(ops[i])] for i in range(len(ops))))",
"def import_ops(self):\n if self.is_training:\n self.lr = tf.get_collection_ref(\"lr\")[0]\n self.new_lr = tf.get_collection_ref(\"new_lr\")[0]\n self.lr_update = tf.get_collection_ref(\"lr_update\")[0]\n\n self.cost = tf.get_collection_ref(util.with_prefix(self.name, \"cost\"))[0]\n self.initial_state = util.import_state_tuples(\n self.initial_state, self.initial_state_name, self.name)\n self.final_state = util.import_state_tuples(\n self.final_state, self.final_state_name, self.name)",
"def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\t\ttf.logging.info(\"*** Features ***\")\n\t\tfor name in sorted(features.keys()):\n\t\t\ttf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n\t\tis_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n\t\tinput_ids=[]\n\t\tinput_mask=[]\n\t\tsegment_ids=[]\n\t\tmask_lm_info = []\n\t\tif is_training:\n\t\t\tinput_ids = [features[\"rewrite_query_ids\"], features[\"doc0_ids\"], features[\"doc1_ids\"], features[\"raw_query_ids\"]]\n\t\t\tinput_mask = [features[\"rewrite_query_mask\"], features[\"doc0_mask\"], features[\"doc1_mask\"], features[\"raw_query_mask\"]]\n\t\t\tsegment_ids = [features[\"rewrite_query_segment_ids\"], features[\"doc0_segment_ids\"], features[\"doc1_segment_ids\"], features[\"raw_query_segment_ids\"]]\n\t\t\teffective_mask = [features[\"effective_rewrite_query_mask\"], features[\"effective_doc0_mask\"], features[\"effective_doc1_mask\"], features[\"effective_raw_query_mask\"]]\n\t\telif is_eval:\n\t\t\tinput_ids = [features[\"query_ids\"], features[\"docx_ids\"], 0, features[\"query_ids\"]]\n\t\t\tinput_mask = [features[\"query_mask\"], features[\"docx_mask\"], 0, features[\"query_mask\"]]\n\t\t\tsegment_ids = [features[\"query_segment_ids\"], features[\"docx_segment_ids\"], 0, features[\"query_segment_ids\"]]\n\t\t\teffective_mask = [features[\"effective_query_mask\"], features[\"effective_docx_mask\"], 0, features[\"effective_query_mask\"]]\n\t\telif is_output:\n\t\t\tinput_ids=[features[\"input_ids\"], features[\"input_ids\"], features[\"input_ids\"], features[\"input_ids\"]]\n\t\t\tinput_mask = [features[\"input_mask\"], features[\"input_mask\"], features[\"input_mask\"], features[\"input_mask\"]]\n\t\t\tsegment_ids = [features[\"segment_ids\"], features[\"segment_ids\"], features[\"segment_ids\"], features[\"segment_ids\"]]\n\t\t\teffective_mask = [features[\"effective_input_mask\"], features[\"effective_input_mask\"], features[\"effective_input_mask\"], features[\"effective_input_mask\"]]\n\n\n\n\t\tlabel = features[\"label\"]\n\n\n\t\ttf.logging.info(\"Create model\")\n\t\tif (is_training) or (is_eval):\n\t\t\t(total_loss, score, doc_length) = create_model(\n\t\t\t\tbert_config, is_training, is_eval, is_output, input_ids, input_mask, segment_ids, effective_mask, label, use_one_hot_embeddings,\n\t\t\t\tcolbert_dim, dotbert_dim, max_q_len, max_p_len, doc_type, loss, kd_source, train_model, eval_model)\n\t\telif is_output:\n\t\t\t(pooling_emb, emb, doc_length) = create_model(\n\t\t\t\tbert_config, is_training, is_eval, is_output, input_ids, input_mask, segment_ids, effective_mask, label, use_one_hot_embeddings,\n\t\t\t\tcolbert_dim, dotbert_dim, max_q_len, max_p_len, doc_type, loss, kd_source, train_model, eval_model)\n\n\t\ttf.logging.info(\"Finish create model\")\n\t\ttvars = tf.trainable_variables()\n\n\t\tscaffold_fn = None\n\t\tif init_checkpoint:\n\t\t\t(assignment_map, initialized_variable_names)= modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n\t\t\t(assignment_map1, initialized_variable_names1) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint, 'Student/', 'query_reformulator/')\n\t\t\tassignment_maps = [assignment_map, assignment_map1]\n\t\t\tinitialized_variable_names.update(initialized_variable_names1)\n\n\t\t\ttf.logging.info(\"**** Assignment Map ****\")\n\t\t\tif use_tpu:\n\t\t\t\tdef tpu_scaffold():\n\t\t\t\t\tfor assignment_map in assignment_maps:\n\t\t\t\t\t tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\t\t\t\t\treturn tf.train.Scaffold()\n\n\t\t\t\tscaffold_fn = tpu_scaffold\n\t\t\telse:\n\t\t\t\ttf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\t\ttf.logging.info(\"**** Trainable Variables ****\")\n\n\t\tfor var in tvars:\n\t\t\tinit_string = \"\"\n\t\t\tif var.name in initialized_variable_names:\n\t\t\t\tinit_string = \", *INIT_FROM_CKPT*\"\n\t\t\ttf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n\t\t\t\t\t\t\tinit_string)\n\n\t\toutput_spec = None\n\t\tif mode == tf.estimator.ModeKeys.TRAIN:\n\t\t\ttrain_op = optimization.create_optimizer(\n\t\t\t\t\t\ttotal_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu, train_model)\n\n\t\t\toutput_spec = tf.contrib.tpu.TPUEstimatorSpec(\n\t\t\t\t\t\tmode=mode,\n\t\t\t\t\t\tloss=total_loss,\n\t\t\t\t\t\ttrain_op=train_op,\n\t\t\t\t\t\tscaffold_fn=scaffold_fn)\n\n\t\telif mode == tf.estimator.ModeKeys.PREDICT:\n\t\t\tif is_output:\n\t\t\t\toutput_spec = tf.contrib.tpu.TPUEstimatorSpec(\n\t\t\t\t\t\t\t\tmode=mode,\n\t\t\t\t\t\t\t\tpredictions={\n\t\t\t\t\t\t\t\t\t\"docid\": features['docid'],\n\t\t\t\t\t\t\t\t\t\"pooling_emb\":pooling_emb,\n\t\t\t\t\t\t\t\t\t\"emb\":emb,\n\t\t\t\t\t\t\t\t\t\"doc_length\":doc_length,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tscaffold_fn=scaffold_fn)\n\t\t\telif is_eval:\n\t\t\t\toutput_spec = tf.contrib.tpu.TPUEstimatorSpec(\n\t\t\t\t\t\t\t\tmode=mode,\n\t\t\t\t\t\t\t\tpredictions={\n\t\t\t\t\t\t\t\t\t\"log_probs\": score,\n\t\t\t\t\t\t\t\t\t\"label_ids\": label,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tscaffold_fn=scaffold_fn)\n\n\t\telse:\n\t\t\traise ValueError(\n\t\t\t\t\t\"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n\t\treturn output_spec",
"def apply_operator_set(model, operator_set):\n field_part = []\n for operator in operator_set:\n field_part.append(apply_const_shift_operator(model, operator))\n field_part = torch.cat(field_part)\n return field_part",
"def train_all(X_train_fuse, Y_train, X_dev_fuse, Y_dev, R_train, R_dev, hyperparams):",
"def create_setops_trainer(\r\n base_model,\r\n classifier,\r\n setops_model,\r\n optimizer,\r\n criterion1,\r\n criterion2,\r\n params_object,\r\n metrics={},\r\n device=None):\r\n if device:\r\n base_model.to(device)\r\n classifier.to(device)\r\n setops_model.to(device)\r\n\r\n def _update(engine, batch):\r\n\r\n if params_object.train_base:\r\n base_model.train()\r\n else:\r\n base_model.eval()\r\n\r\n classifier.train()\r\n setops_model.train()\r\n\r\n optimizer.zero_grad()\r\n\r\n input_a, input_b, target_a, target_b = _prepare_batch(batch, device=device)\r\n\r\n #\r\n # Apply the classification model\r\n #\r\n with conditional(not params_object.train_base, torch.no_grad()):\r\n embed_a = base_model(input_a)\r\n embed_b = base_model(input_b)\r\n\r\n output_a = classifier(embed_a)\r\n output_b = classifier(embed_b)\r\n\r\n #\r\n # Apply the setopt model.\r\n #\r\n outputs_setopt = setops_model(embed_a, embed_b)\r\n fake_a, fake_b, a_S_b, b_S_a, a_U_b, b_U_a, a_I_b, b_I_a, \\\r\n a_S_b_b, b_S_a_a, a_I_b_b, b_I_a_a, a_U_b_b, b_U_a_a, \\\r\n a_S_b_I_a, b_S_a_I_b, a_S_a_I_b, b_S_b_I_a = \\\r\n [classifier(o) for o in outputs_setopt]\r\n fake_a_em, fake_b_em, a_S_b_em, b_S_a_em, a_U_b_em, b_U_a_em, a_I_b_em, b_I_a_em, \\\r\n a_S_b_b_em, b_S_a_a_em, a_I_b_b_em, b_I_a_a_em, a_U_b_b_em, b_U_a_a_em, \\\r\n a_S_b_I_a_em, b_S_a_I_b_em, a_S_a_I_b_em, b_S_b_I_a_em = outputs_setopt\r\n\r\n loss_class = criterion1(output_a, target_a) + criterion1(output_b, target_b)\r\n loss_class_out = criterion1(fake_a, target_a) + criterion1(fake_b, target_b)\r\n if params_object.mc_toggle:\r\n loss_recon = criterion2(embed_a, fake_a_em) + criterion2(embed_b, fake_b_em)\r\n return_loss_recon = loss_recon.item()\r\n else:\r\n loss_recon = 0\r\n return_loss_recon = 0\r\n\r\n #\r\n # Calculate the target setopt operations\r\n #\r\n target_a = target_a.type(torch.cuda.ByteTensor)\r\n target_b = target_b.type(torch.cuda.ByteTensor)\r\n\r\n target_a_I_b = target_a & target_b\r\n target_a_U_b = target_a | target_b\r\n target_a_S_b = target_a & ~target_a_I_b\r\n target_b_S_a = target_b & ~target_a_I_b\r\n\r\n target_a_I_b = target_a_I_b.type(torch.cuda.FloatTensor)\r\n target_a_U_b = target_a_U_b.type(torch.cuda.FloatTensor)\r\n target_a_S_b = target_a_S_b.type(torch.cuda.FloatTensor)\r\n target_b_S_a = target_b_S_a.type(torch.cuda.FloatTensor)\r\n\r\n loss_class_S = criterion1(a_S_b, target_a_S_b) + criterion1(b_S_a, target_b_S_a)\r\n loss_class_U = criterion1(a_U_b, target_a_U_b)\r\n loss_class_I = criterion1(a_I_b, target_a_I_b)\r\n if params_object.tautology_class_toggle:\r\n loss_class_S += criterion1(a_S_b_b, target_a_S_b) + criterion1(b_S_a_a, target_b_S_a)\r\n loss_class_S += criterion1(a_S_a_I_b, target_a_S_b) + criterion1(b_S_a_I_b, target_b_S_a) +\\\r\n criterion1(b_S_b_I_a, target_b_S_a) + criterion1(a_S_b_I_a, target_a_S_b)\r\n loss_class_U += criterion1(a_U_b_b, target_a_U_b) + criterion1(b_U_a_a, target_a_U_b)\r\n loss_class_I += criterion1(a_I_b_b, target_a_I_b) + criterion1(b_I_a_a, target_a_I_b)\r\n\r\n if params_object.tautology_recon_toggle:\r\n loss_recon_S = criterion2(a_S_b_em, a_S_b_b_em) + criterion2(a_S_b_em, a_S_a_I_b_em) + \\\r\n criterion2(a_S_b_em, a_S_b_I_a_em)\r\n loss_recon_S += criterion2(b_S_a_em, b_S_a_a_em) + criterion2(b_S_a_em, b_S_a_I_b_em) + \\\r\n criterion2(b_S_a_em, b_S_b_I_a_em)\r\n return_recon_S = loss_recon_S.item()\r\n else:\r\n loss_recon_S = 0\r\n return_recon_S = 0\r\n\r\n if params_object.sym_class_toggle:\r\n loss_class_U += criterion1(b_U_a, target_a_U_b)\r\n loss_class_I += criterion1(b_I_a, target_a_I_b)\r\n\r\n if params_object.sym_recon_toggle:\r\n loss_recon_U = criterion2(a_U_b_em, b_U_a_em)\r\n loss_recon_I = criterion2(a_I_b_em, b_I_a_em)\r\n return_recon_U = loss_recon_U.item()\r\n return_recon_I = loss_recon_I.item()\r\n else:\r\n loss_recon_U = 0\r\n loss_recon_I = 0\r\n return_recon_U = 0\r\n return_recon_I = 0\r\n\r\n loss = loss_class\r\n loss += 0 if params_object.class_fake_loss_weight == 0 else params_object.class_fake_loss_weight * loss_class_out\r\n loss += 0 if (params_object.recon_loss_weight == 0) or (not loss_recon) else params_object.recon_loss_weight * loss_recon\r\n loss += 0 if params_object.class_S_loss_weight == 0 else params_object.class_S_loss_weight * loss_class_S\r\n loss += 0 if (params_object.recon_loss_weight == 0) or (not loss_recon_I) else params_object.recon_loss_weight * loss_recon_S\r\n loss += 0 if params_object.class_U_loss_weight == 0 else params_object.class_U_loss_weight * loss_class_U\r\n loss += 0 if (params_object.recon_loss_weight == 0) or (not loss_recon_U) else params_object.recon_loss_weight * loss_recon_U\r\n loss += 0 if params_object.class_I_loss_weight == 0 else params_object.class_I_loss_weight * loss_class_I\r\n loss += 0 if (params_object.recon_loss_weight == 0) or (not loss_recon_I) else params_object.recon_loss_weight * loss_recon_I\r\n\r\n loss.backward()\r\n optimizer.step()\r\n\r\n return {\r\n \"main\": loss.item(),\r\n \"real class\": loss_class.item(),\r\n \"fake class\": loss_class_out.item(),\r\n \"fake MSE\": return_loss_recon,\r\n \"S MSE\": return_recon_S,\r\n \"U MSE\": return_recon_U,\r\n \"I MSE\": return_recon_I,\r\n \"S class\": loss_class_S.item(),\r\n \"U class\": loss_class_U.item(),\r\n \"I class\": loss_class_I.item()\r\n }\r\n\r\n engine = Engine(_update)\r\n\r\n for name, metric in metrics.items():\r\n metric.attach(engine, name)\r\n\r\n return engine",
"def call(self, inputs, training=None, mask=None):\n res = inputs\n Bs, N, K, chs = inputs.shape\n inputs = tf.reshape(inputs, [Bs*N, K, chs])\n inputs = self.intra_former(inputs)\n inputs = tf.reshape(inputs, [Bs, N, K, chs]) + res\n res = inputs\n inputs = tf.reshape(tf.transpose(inputs, [0, 2, 1, 3]), [Bs*K, N, chs])\n inputs = self.inter_former(inputs)\n inputs = tf.transpose(tf.reshape(inputs, [Bs, K, N, chs]), [0, 2, 1, 3])\n return inputs + res",
"def _build_train_ops(train_params):\n global_step = tf.get_variable('global_step', shape=[], dtype='int32',\n initializer=tf.constant_initializer(0), trainable=False)\n #global_step = tf.train.get_or_create_global_step()\n loss = tf.get_collection(tf.GraphKeys.LOSSES)\n if len(loss) == 0:\n raise RuntimeError(\"No losses found in losses collection\")\n loss = tf.add_n(loss, name=\"loss\")\n\n if len(tf.get_collection(tf.GraphKeys.SUMMARIES)) > 0:\n # Add any summaries client stored in SUMMARIES\n summary_tensor = tf.summary.merge([[tf.summary.tensor_summary(\"loss\", loss)] +\n tf.get_collection(tf.GraphKeys.SUMMARIES)])\n else:\n summary_tensor = tf.summary.tensor_summary(\"loss\", loss)\n\n train_objective = loss\n\n regularizers = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n if len(regularizers) > 0:\n regularization_loss = tf.add_n(regularizers, name=\"regularization_loss\")\n if train_params.regularization_weight is not None:\n train_objective = train_objective + regularization_loss * train_params.regularization_weight\n else:\n train_objective = train_objective + regularization_loss\n else:\n regularization_loss = None\n\n opt = train_params.opt.get()\n opt = hvd.DistributedOptimizer(opt)\n #train_opt = opt.apply_gradients(opt.compute_gradients(train_objective), global_step=global_step)\n train_opt = opt.minimize(train_objective, global_step=global_step)\n\n if train_params.ema is not None:\n ema = tf.train.ExponentialMovingAverage(decay=train_params.ema)\n ema_op = ema.apply(tf.trainable_variables())\n with tf.control_dependencies([train_opt]):\n # Run the old training op, then update the averages.\n train_opt = tf.group(ema_op)\n else:\n ema = None\n\n # Any collections starting with \"monitor\" are also added as summaries\n to_monitor = {}\n for col in tf.get_default_graph().get_all_collection_keys():\n if col.startswith(\"monitor\"):\n v = tf.get_collection(col)\n if len(v) > 0:\n print(\"Monitoring: \" + col)\n v = tf.add_n(v)\n to_monitor[col] = v\n\n if len(to_monitor) > 0:\n monitor_ema = tf.train.ExponentialMovingAverage(decay=train_params.monitor_ema, name=\"MonitorEMA\",\n zero_debias=True)\n train_opt = tf.group(train_opt, monitor_ema.apply(list(to_monitor.values())))\n summary_tensor = tf.summary.merge(\n [tf.summary.scalar(col, monitor_ema.average(v)) for col, v in to_monitor.items()] +\n [summary_tensor])\n\n # EMA for the loss and what we monitoring\n if train_params.loss_ema is not None:\n loss_ema = tf.train.ExponentialMovingAverage(decay=train_params.loss_ema, name=\"LossEMA\", zero_debias=True)\n\n if regularization_loss is None:\n ema_op = loss_ema.apply([loss])\n train_opt = tf.group(train_opt, ema_op)\n ema_var = loss_ema.average(loss)\n summary_tensor = tf.summary.merge([tf.summary.scalar(\"training-ema/loss\", ema_var), summary_tensor])\n else:\n to_track = [loss, train_objective, regularization_loss]\n ema_op = loss_ema.apply(to_track)\n train_opt = tf.group(train_opt, ema_op)\n tensor_vars = [\n tf.summary.scalar(\"training-ema/loss\", loss_ema.average(loss)),\n tf.summary.scalar(\"training-ema/objective\", loss_ema.average(train_objective)),\n tf.summary.scalar(\"training-ema/regularization-loss\",\n loss_ema.average(regularization_loss))\n ]\n summary_tensor = tf.summary.merge([tensor_vars, summary_tensor])\n\n return loss, summary_tensor, train_opt, global_step, ema",
"def head_ops(self,\n features,\n labels,\n mode,\n train_op_fn,\n logits=None,\n logits_input=None,\n scope=None):\n raise NotImplementedError(\"Calling an abstract method.\")",
"def _extract_ops_from_onnx_model(model_files: typing.Iterable[pathlib.Path]):\n\n required_ops = {}\n\n for model_file in model_files:\n if not model_file.is_file():\n raise ValueError(f\"Path is not a file: '{model_file}'\")\n _process_onnx_model(model_file, required_ops)\n\n return required_ops",
"def train_and_eval(self):\n self.__create_indexes()\n model = None\n model = None\n if self.model == 'OMult':\n model = OMult(self.kwargs)\n elif self.model == 'ConvO':\n model = ConvO(self.kwargs)\n elif self.model == 'QMult':\n model = QMult(self.kwargs)\n elif self.model == 'ConvQ':\n model = ConvQ(self.kwargs)\n elif self.model == 'OMultBatch':\n model = OMultBatch(self.kwargs)\n elif self.model == 'ConvOBatch':\n model = ConvOBatch(self.kwargs)\n elif self.model == 'QMultBatch':\n model = QMultBatch(self.kwargs)\n elif self.model == 'ConvQBatch':\n model = ConvQBatch(self.kwargs)\n else:\n print(self.model, ' is not valid name')\n raise ValueError\n\n self.train(model)\n self.eval(model)",
"def __init__(self, input_model_dict, func_lib, x_list,\n par_add_dict_all={}, QuietMode=False, **kwargs):\n modelDict = OrderedDict()\n modelNameList = input_model_dict.keys()\n for modelName in modelNameList:\n funcName = input_model_dict[modelName][\"function\"]\n funcInfo = func_lib[funcName]\n xName = funcInfo[\"x_name\"]\n #-> Build up the parameter dictionaries\n parFitList = funcInfo[\"param_fit\"]\n parAddList = funcInfo[\"param_add\"]\n parFitDict = OrderedDict()\n parAddDict = {}\n for parName in parFitList:\n parFitDict[parName] = input_model_dict[modelName][parName]\n for parName in parAddList:\n par_add_iterm = par_add_dict_all.get(parName, \"No this parameter\")\n if par_add_iterm == \"No this parameter\":\n pass\n else:\n parAddDict[parName] = par_add_iterm\n #-> Check the consistency if the component is multiply\n multiList = input_model_dict[modelName].get(\"multiply\", None)\n if not multiList is None:\n #--> The \"*\" should be included in the operation list.\n assert \"*\" in funcInfo[\"operation\"]\n if not QuietMode:\n print \"[Model_Generator]: {0} is multiplied to {1}!\".format(modelName, multiList)\n #--> Check further the target models are not multiplicative.\n for tmn in multiList:\n f_mlt = input_model_dict[tmn].get(\"multiply\", None)\n if not f_mlt is None:\n raise ValueError(\"The multiList includes a multiplicative model ({0})!\".format(tmn))\n modelDict[modelName] = ModelFunction(funcName, xName, parFitDict, parAddDict, multiList)\n ModelCombiner.__init__(self, modelDict, x_list, np.complex_, **kwargs)",
"def multiple_input_model():\n\n input1 = tf.keras.Input(name='input1', shape=(10, 10, 3))\n input2 = tf.keras.Input(name='input2', shape=(12, 12, 3))\n x1 = tf.keras.layers.Conv2D(8, (1, 1), name='conv1a')(input1)\n x2 = tf.keras.layers.Conv2D(8, (3, 3), name='conv1b')(input2)\n x = tf.keras.layers.add([x1, x2])\n x = tf.keras.layers.Conv2D(4, (1, 1), name='conv2')(x)\n x = tf.keras.layers.Flatten()(x)\n outputs = tf.keras.layers.Dense(2, activation=tf.nn.softmax, name=\"multiple_input_model\")(x)\n\n return outputs",
"def train_model(model, X_train, y_train, X_val, y_val, image_name):\n if MODEL == 1:\n return train_model_1(model, X_train, y_train, X_val, y_val, image_name)\n elif MODEL == 3:\n if CROSS_VALIDATION:\n return train_cv_model_3(model, X_train, y_train,\n X_val, y_val, image_name)\n else:\n return train_model_3(model, X_train, y_train,\n X_val, y_val, image_name)\n elif MODEL == 2:\n return train_model_2(model, X_train, y_train, X_val, y_val, image_name)\n else:\n # For models 4, 5 and 6\n return train_model_4(model, X_train, y_train, image_name)",
"def group(*ops):\n with tf.control_dependencies(ops):\n return tf.constant(0)",
"def concatenate_lists(*layers, **kwargs):\n ...",
"def _register_ops(self):\n ops = []\n ops.append(BatchAppsOps.register(\"shared.home\",\n \"Home\",\n self._home))\n ops.append(BatchAppsOps.register(\"shared.management_portal\",\n \"Management Portal\",\n self._management_portal))\n return ops",
"def _build_fetches(self, global_step, all_logits, losses, device_grads,\n enqueue_ops, update_ops, all_top_1_ops, all_top_5_ops,\n phase_train):\n fetches = {'enqueue_ops': enqueue_ops}\n \n apply_gradient_devices, gradient_state = (\n self.variable_mgr.preprocess_device_grads(device_grads))\n\n training_ops = []\n for d, device in enumerate(apply_gradient_devices):\n with tf.device(device):\n total_loss = tf.reduce_mean(losses)\n avg_grads = self.variable_mgr.get_gradients_to_apply(d, gradient_state)\n\n gradient_clip = self.params.gradient_clip\n learning_rate = (\n self.params.learning_rate or\n self.model.get_learning_rate(global_step, self.batch_size))\n \n clipped_grads = avg_grads\n\n learning_rate = tf.identity(learning_rate, name='learning_rate')\n opt = tf.train.GradientDescentOptimizer(learning_rate)\n \n\n loss_scale_params = variable_mgr_util.AutoLossScaleParams(\n enable_auto_loss_scale=self.enable_auto_loss_scale,\n loss_scale=self.loss_scale,\n loss_scale_normal_steps=self.loss_scale_normal_steps,\n inc_loss_scale_every_n=self.params.fp16_inc_loss_scale_every_n,\n is_chief=not self.job_name or self.task_index == 0)\n\n self.variable_mgr.append_apply_gradients_ops(\n gradient_state, opt, clipped_grads, training_ops, loss_scale_params)\n train_op = tf.group(*(training_ops + update_ops))\n\n fetches['train_op'] = train_op\n fetches['total_loss'] = total_loss\n return fetches",
"def test_keras_model_functional_with_training_ops_get_op_product_graph(self):\n tf.compat.v1.reset_default_graph()\n _ = keras_model_functional_for_tf2()\n\n # add training ops\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=1e-3, name='Adam_new')\n _ = optimizer.minimize(loss=tf.compat.v1.get_default_graph().get_tensor_by_name('keras_model_functional/Softmax:0'),\n name='train_step_new')\n conn_graph = ConnectedGraph(tf.compat.v1.get_default_graph(), [\"input_1\"],\n output_op_names=['keras_model_functional/Softmax'])\n self.assertTrue(validate_branch_ops(conn_graph))\n self.assertTrue(validate_product_tensor_lists(conn_graph))\n self.assertEqual(0, conn_graph.branch_count)\n self.assertEqual(14, len(conn_graph.get_all_ops()))\n\n # 13 products from inter module connections\n # 22 products from parameters\n self.assertEqual(35, len(conn_graph.get_all_products()))",
"def experiment_models(train, test, train_target, test_target):\n # Linear models\n linear_models = [(LinearRegression, {\"n_jobs\": -1}),\n (Lasso, {\"alpha\": 3}),\n (Ridge, {\"alpha\": 3}),\n (LinearSVR, {\"random_state\": 0, \"tol\": 1e-5})]\n\n # Add polynomial features\n poly = preprocessing.PolynomialFeatures(2)\n\n # scaler\n scaler = preprocessing.StandardScaler().fit(train)\n\n print(\"Use linear models with linear features\")\n for model_ in linear_models:\n scaled_train = scaler.transform(train)\n scaled_test = scaler.transform(test)\n model = model_[0](**model_[1])\n model.fit(scaled_train, train_target.to_numpy())\n train_pred = model.predict(scaled_train)\n valid_pred = model.predict(scaled_test)\n print(\"=========================================\")\n print(f\"Model : {model_}\")\n compute_metrics(train_pred, train_target, valid_pred, test_target)\n print(\"=========================================\")\n\n print(\"Use linear models with polynomial features\")\n train = poly.fit_transform(train)\n test = poly.transform(test)\n scaler = preprocessing.StandardScaler().fit(train)\n for model_ in linear_models:\n scaled_train = scaler.transform(train)\n scaled_test = scaler.transform(test)\n model = model_[0](**model_[1])\n model.fit(scaled_train, train_target.to_numpy())\n train_pred = model.predict(scaled_train)\n valid_pred = model.predict(scaled_test)\n print(\"=========================================\")\n print(f\"Model : {model_}\")\n compute_metrics(train_pred, train_target, valid_pred, test_target)\n print(\"=========================================\")",
"def train(model, train_inputs, train_labels):\n print('Train starts: \\n')\n indices = tf.range(0, train_inputs.shape[0])\n indices = tf.random.shuffle(indices)\n train_inputs = tf.gather(train_inputs, indices)\n train_labels = tf.gather(train_labels, indices)\n\n N = train_inputs.shape[0] // model.batch_size\n for batch in range(N):\n start = batch * model.batch_size\n end = (batch + 1) * model.batch_size\n if (batch + 1) * model.batch_size > train_inputs.shape[0]:\n end = train_inputs.shape[0]\n inputs = train_inputs[start: end]\n labels = train_labels[start: end]\n\n with tf.GradientTape() as tape:\n probs = model.call(inputs)\n loss = model.loss_function(probs, labels)\n\n gradients = tape.gradient(loss, model.trainable_variables)\n model.optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n # if batch % 100 == 0:\n # print('Batch {} starts:'.format(batch))\n print('\\r', \"training processing : {} %\".format((batch + 1) * 100 // N), end='')",
"def call(self, inputs, training=False):\n with tf.device('/GPU:0'):\n x1 = self.conv1_1(inputs)\n x1 = tf.nn.local_response_normalization(x1, depth_radius=5, bias=2, alpha=0.001, beta=0.75)\n x1 = self.max_pool(x1)\n x1 = self.conv2_1(x1)\n x1 = tf.nn.local_response_normalization(x1, depth_radius=5, bias=2, alpha=0.001, beta=0.75)\n x1 = self.max_pool(x1)\n\n with tf.device('/GPU:1'):\n x2 = self.conv1_2(inputs)\n x2 = tf.nn.local_response_normalization(x2, depth_radius=5, bias=2, alpha=0.001, beta=0.75)\n x2 = self.max_pool(x2)\n x2 = self.conv2_2(x2)\n x2 = tf.nn.local_response_normalization(x2, depth_radius=5, bias=2, alpha=0.001, beta=0.75)\n x2 = self.max_pool(x2)\n\n x = tf.keras.layers.concatenate([x1, x2])\n\n with tf.device('/GPU:0'):\n x1 = self.conv3_1(x)\n x1 = self.conv4_1(x1)\n x1 = self.conv5_1(x1)\n x1 = self.max_pool(x1)\n\n with tf.device('/GPU:1'):\n x2 = self.conv3_2(x)\n x2 = self.conv4_2(x2)\n x2 = self.conv5_2(x2)\n x2 = self.max_pool(x2)\n\n # Fully Connected Layers\n x = tf.keras.layers.concatenate([x1, x2])\n x = self.flatten(x)\n\n with tf.device('/GPU:0'):\n x1 = self.drop(self.fc1_1(x))\n x1 = self.drop(self.fc2_1(x1))\n\n with tf.device('/GPU:1'):\n x2 = self.drop(self.fc1_2(x))\n x2 = self.drop(self.fc2_2(x2))\n\n x = tf.keras.layers.concatenate([x1, x2])\n x = self.fc3(x)\n\n return x",
"def add_training_fetches(self, fetches):\n fetches[self._name] = {\n 'loss': self._dual.get_op('loss'), # the calculation of loss\n 'training': self._dual.get_op('training'), # the optimisation\n 'output': self._dual.get_op('output'), # the output value\n # debugging\n 'target': self._dual.get_op('target'),\n 'degraded': self._dual.get_op('degraded')\n }\n\n if self._hparams.use_batch_transformer:\n fetches[self._name]['bt_input'] = self._dual.get_op('bt_input')\n fetches[self._name]['bt_output'] = self._dual.get_op('bt_output')\n\n if self._summary_op is not None:\n fetches[self._name]['summaries'] = self._summary_op",
"def pretraining_functions(self, batch_size):\n\n index = T.lscalar('index') # index to a minibatch\n\n # beginning of a batch, given `index`\n batch_begin = index * batch_size\n # ending of a batch given `index`\n batch_end = batch_begin + batch_size\n\n forward_backward_step = []\n forward_step_fns = []\n i = 0\n for AE in self.AE_layers:\n\n # get the cost and the updates list\n cost = AE.get_cost_updates()\n\n params = AE.params\n shared_cost = theano.shared(np.float32(0.0))\n forward_step_fns.append(\n theano.function(\n [index], [],\n updates=[(shared_cost, cost)],\n givens={\n self.x: self.train_set_x[batch_begin: batch_end],\n }))\n grads_temp = T.grad(cost, params)\n\n # This is both forward and backward\n forward_backward_step.append(\n theano.function(\n [index], grads_temp,\n givens={\n self.x: self.train_set_x[batch_begin: batch_end],\n }))\n i += 1\n\n return forward_backward_step, forward_step_fns",
"def get_weight_ops(self, ops=None, skip_bias_op=False):\n if not ops:\n ops = self._graph.get_operations()\n\n ops_with_weights = []\n for op in ops:\n if self._is_op_with_weights(op):\n self._log.debug('Found op w/weights: %s', op.name)\n ops_with_weights.append(op)\n\n if not skip_bias_op and self._is_op_with_weights(op):\n for consumer in op.outputs[0].consumers():\n # Ignore Reshape as it can be placed between MatMul and BiasAdd on Dense layer of Transformer\n if consumer.type in ['Reshape'] and len(consumer.outputs[0].consumers()) == 1:\n consumer = consumer.outputs[0].consumers()[0]\n if consumer.type in _BIAS_TYPES:\n self._log.debug('Found op w/bias: %s', consumer.name+'('+consumer.type+')')\n ops_with_weights.append(consumer)\n\n reduced_list = [x for x in ops_with_weights if not x.name.startswith(tuple(self._ops_to_ignore))]\n return reduced_list",
"def trainModel( self, featureTrain, classTrain):"
]
| [
"0.7152099",
"0.681696",
"0.6271104",
"0.6034426",
"0.58629924",
"0.5860132",
"0.585599",
"0.58350027",
"0.57953465",
"0.56816924",
"0.56582206",
"0.5627848",
"0.5627384",
"0.5624815",
"0.5592285",
"0.5530983",
"0.5488082",
"0.5472284",
"0.5439261",
"0.54166454",
"0.5410787",
"0.53943104",
"0.53934443",
"0.5391907",
"0.537824",
"0.53688884",
"0.5360214",
"0.53476846",
"0.5333483",
"0.5327901"
]
| 0.82771045 | 0 |
Combines list of ModelFnOps for inference. | def _combine_infer(self, all_model_fn_ops):
predictions = {}
output_alternatives = {}
for head, m in zip(self._heads, all_model_fn_ops):
head_name = head.head_name
output_alternatives[head_name] = m.output_alternatives[head_name]
for k, v in m.predictions.items():
predictions[(head_name, k)] = v
return model_fn.ModelFnOps(
mode=model_fn.ModeKeys.INFER,
predictions=predictions,
output_alternatives=output_alternatives) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _combine_eval(self, all_model_fn_ops):\n predictions = {}\n metrics = {}\n losses = []\n for head, m in zip(self._heads, all_model_fn_ops):\n losses.append(m.loss)\n head_name = head.head_name\n for k, v in m.predictions.items():\n predictions[(head_name, k)] = v\n for k, v in m.eval_metric_ops.items():\n # metrics[\"%s/%s\" % (k, head_name)] = v\n metrics[k] = v\n loss = self._loss_combiner(losses)\n\n return model_fn.ModelFnOps(\n mode=model_fn.ModeKeys.EVAL,\n predictions=predictions,\n loss=loss,\n eval_metric_ops=metrics)",
"def _combine_train(self, all_model_fn_ops, train_op_fn):\n losses = []\n additional_train_ops = []\n for m in all_model_fn_ops:\n losses.append(m.loss)\n additional_train_ops.append(m.train_op)\n loss = self._loss_combiner(losses)\n\n train_op = train_op_fn(loss)\n train_op = control_flow_ops.group(train_op, *additional_train_ops)\n return model_fn.ModelFnOps(\n mode=model_fn.ModeKeys.TRAIN,\n loss=loss,\n train_op=train_op)",
"def train_and_eval_all_models():\n\n clfShape, accShape = shape_symmetry_train_classifier()\n clfTexture, accTexture = texture_symmetry_train_classifier()\n clfFinal, accFinal = combined_symmetry_train_classifier()\n\n return accShape, accTexture, accFinal",
"def _extract_ops_from_onnx_model(model_files: typing.Iterable[pathlib.Path]):\n\n required_ops = {}\n\n for model_file in model_files:\n if not model_file.is_file():\n raise ValueError(f\"Path is not a file: '{model_file}'\")\n _process_onnx_model(model_file, required_ops)\n\n return required_ops",
"def multiple_input_model():\n\n input1 = tf.keras.Input(name='input1', shape=(10, 10, 3))\n input2 = tf.keras.Input(name='input2', shape=(12, 12, 3))\n x1 = tf.keras.layers.Conv2D(8, (1, 1), name='conv1a')(input1)\n x2 = tf.keras.layers.Conv2D(8, (3, 3), name='conv1b')(input2)\n x = tf.keras.layers.add([x1, x2])\n x = tf.keras.layers.Conv2D(4, (1, 1), name='conv2')(x)\n x = tf.keras.layers.Flatten()(x)\n outputs = tf.keras.layers.Dense(2, activation=tf.nn.softmax, name=\"multiple_input_model\")(x)\n\n return outputs",
"def apply_operator_set(model, operator_set):\n field_part = []\n for operator in operator_set:\n field_part.append(apply_const_shift_operator(model, operator))\n field_part = torch.cat(field_part)\n return field_part",
"def ops2alg(ops):\n return Model(cardinality=len(ops[0]), \n operations=dict([\"h\"+str(i),list(ops[i])] for i in range(len(ops))))",
"def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\t\ttf.logging.info(\"*** Features ***\")\n\t\tfor name in sorted(features.keys()):\n\t\t\ttf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n\t\tis_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n\t\tinput_ids=[]\n\t\tinput_mask=[]\n\t\tsegment_ids=[]\n\t\tmask_lm_info = []\n\t\tif is_training:\n\t\t\tinput_ids = [features[\"rewrite_query_ids\"], features[\"doc0_ids\"], features[\"doc1_ids\"], features[\"raw_query_ids\"]]\n\t\t\tinput_mask = [features[\"rewrite_query_mask\"], features[\"doc0_mask\"], features[\"doc1_mask\"], features[\"raw_query_mask\"]]\n\t\t\tsegment_ids = [features[\"rewrite_query_segment_ids\"], features[\"doc0_segment_ids\"], features[\"doc1_segment_ids\"], features[\"raw_query_segment_ids\"]]\n\t\t\teffective_mask = [features[\"effective_rewrite_query_mask\"], features[\"effective_doc0_mask\"], features[\"effective_doc1_mask\"], features[\"effective_raw_query_mask\"]]\n\t\telif is_eval:\n\t\t\tinput_ids = [features[\"query_ids\"], features[\"docx_ids\"], 0, features[\"query_ids\"]]\n\t\t\tinput_mask = [features[\"query_mask\"], features[\"docx_mask\"], 0, features[\"query_mask\"]]\n\t\t\tsegment_ids = [features[\"query_segment_ids\"], features[\"docx_segment_ids\"], 0, features[\"query_segment_ids\"]]\n\t\t\teffective_mask = [features[\"effective_query_mask\"], features[\"effective_docx_mask\"], 0, features[\"effective_query_mask\"]]\n\t\telif is_output:\n\t\t\tinput_ids=[features[\"input_ids\"], features[\"input_ids\"], features[\"input_ids\"], features[\"input_ids\"]]\n\t\t\tinput_mask = [features[\"input_mask\"], features[\"input_mask\"], features[\"input_mask\"], features[\"input_mask\"]]\n\t\t\tsegment_ids = [features[\"segment_ids\"], features[\"segment_ids\"], features[\"segment_ids\"], features[\"segment_ids\"]]\n\t\t\teffective_mask = [features[\"effective_input_mask\"], features[\"effective_input_mask\"], features[\"effective_input_mask\"], features[\"effective_input_mask\"]]\n\n\n\n\t\tlabel = features[\"label\"]\n\n\n\t\ttf.logging.info(\"Create model\")\n\t\tif (is_training) or (is_eval):\n\t\t\t(total_loss, score, doc_length) = create_model(\n\t\t\t\tbert_config, is_training, is_eval, is_output, input_ids, input_mask, segment_ids, effective_mask, label, use_one_hot_embeddings,\n\t\t\t\tcolbert_dim, dotbert_dim, max_q_len, max_p_len, doc_type, loss, kd_source, train_model, eval_model)\n\t\telif is_output:\n\t\t\t(pooling_emb, emb, doc_length) = create_model(\n\t\t\t\tbert_config, is_training, is_eval, is_output, input_ids, input_mask, segment_ids, effective_mask, label, use_one_hot_embeddings,\n\t\t\t\tcolbert_dim, dotbert_dim, max_q_len, max_p_len, doc_type, loss, kd_source, train_model, eval_model)\n\n\t\ttf.logging.info(\"Finish create model\")\n\t\ttvars = tf.trainable_variables()\n\n\t\tscaffold_fn = None\n\t\tif init_checkpoint:\n\t\t\t(assignment_map, initialized_variable_names)= modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n\t\t\t(assignment_map1, initialized_variable_names1) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint, 'Student/', 'query_reformulator/')\n\t\t\tassignment_maps = [assignment_map, assignment_map1]\n\t\t\tinitialized_variable_names.update(initialized_variable_names1)\n\n\t\t\ttf.logging.info(\"**** Assignment Map ****\")\n\t\t\tif use_tpu:\n\t\t\t\tdef tpu_scaffold():\n\t\t\t\t\tfor assignment_map in assignment_maps:\n\t\t\t\t\t tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\t\t\t\t\treturn tf.train.Scaffold()\n\n\t\t\t\tscaffold_fn = tpu_scaffold\n\t\t\telse:\n\t\t\t\ttf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\t\ttf.logging.info(\"**** Trainable Variables ****\")\n\n\t\tfor var in tvars:\n\t\t\tinit_string = \"\"\n\t\t\tif var.name in initialized_variable_names:\n\t\t\t\tinit_string = \", *INIT_FROM_CKPT*\"\n\t\t\ttf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n\t\t\t\t\t\t\tinit_string)\n\n\t\toutput_spec = None\n\t\tif mode == tf.estimator.ModeKeys.TRAIN:\n\t\t\ttrain_op = optimization.create_optimizer(\n\t\t\t\t\t\ttotal_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu, train_model)\n\n\t\t\toutput_spec = tf.contrib.tpu.TPUEstimatorSpec(\n\t\t\t\t\t\tmode=mode,\n\t\t\t\t\t\tloss=total_loss,\n\t\t\t\t\t\ttrain_op=train_op,\n\t\t\t\t\t\tscaffold_fn=scaffold_fn)\n\n\t\telif mode == tf.estimator.ModeKeys.PREDICT:\n\t\t\tif is_output:\n\t\t\t\toutput_spec = tf.contrib.tpu.TPUEstimatorSpec(\n\t\t\t\t\t\t\t\tmode=mode,\n\t\t\t\t\t\t\t\tpredictions={\n\t\t\t\t\t\t\t\t\t\"docid\": features['docid'],\n\t\t\t\t\t\t\t\t\t\"pooling_emb\":pooling_emb,\n\t\t\t\t\t\t\t\t\t\"emb\":emb,\n\t\t\t\t\t\t\t\t\t\"doc_length\":doc_length,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tscaffold_fn=scaffold_fn)\n\t\t\telif is_eval:\n\t\t\t\toutput_spec = tf.contrib.tpu.TPUEstimatorSpec(\n\t\t\t\t\t\t\t\tmode=mode,\n\t\t\t\t\t\t\t\tpredictions={\n\t\t\t\t\t\t\t\t\t\"log_probs\": score,\n\t\t\t\t\t\t\t\t\t\"label_ids\": label,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tscaffold_fn=scaffold_fn)\n\n\t\telse:\n\t\t\traise ValueError(\n\t\t\t\t\t\"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n\t\treturn output_spec",
"def _multi_image_op(self, image_files, ops, model=None):\n if len(set(ops).intersection(SUPPORTED_OPS)) != len(ops):\n raise Exception('Unsupported op: %s, ops available: %s' % (str(ops), str(SUPPORTED_OPS)))\n image_data = self._process_image_files(image_files)\n data = {'op': ','.join(ops)}\n if model:\n data['model'] = self._sanitize_param(model)\n elif self._model:\n data['model'] = self._model\n url = self._url_for_op(ops)\n raw_response = self._get_raw_response(self._get_multipart_headers,\n post_images_multipart, image_data, data, url)\n return self._parse_response(raw_response, ops)",
"def _model_fn(features, labels, mode):\n weights = None\n if weights_name and weights_name in features:\n weights = features.pop(weights_name)\n\n keys = None\n if keys_name and keys_name in features:\n keys = features.pop(keys_name)\n\n # If we're doing eval, optionally ignore device_assigner.\n # Also ignore device assigner if we're exporting (mode == INFER)\n dev_assn = device_assigner\n if (mode == model_fn_lib.ModeKeys.INFER or\n (local_eval and mode == model_fn_lib.ModeKeys.EVAL)):\n dev_assn = None\n\n graph_builder = graph_builder_class(params,\n device_assigner=dev_assn)\n inference = {}\n output_alternatives = None\n if (mode == model_fn_lib.ModeKeys.EVAL or\n mode == model_fn_lib.ModeKeys.INFER):\n inference[eval_metrics.INFERENCE_PROB_NAME] = (\n graph_builder.inference_graph(features))\n\n if params.regression:\n predictions = {\n None: inference[eval_metrics.INFERENCE_PROB_NAME]}\n output_alternatives = {\n None: (constants.ProblemType.LINEAR_REGRESSION, predictions)}\n else:\n inference[eval_metrics.INFERENCE_PRED_NAME] = math_ops.argmax(\n inference[eval_metrics.INFERENCE_PROB_NAME], 1)\n\n predictions = {\n prediction_key.PredictionKey.PROBABILITIES:\n inference[eval_metrics.INFERENCE_PROB_NAME],\n prediction_key.PredictionKey.CLASSES:\n inference[eval_metrics.INFERENCE_PRED_NAME]}\n output_alternatives = {\n None: (constants.ProblemType.CLASSIFICATION, predictions)}\n\n if report_feature_importances:\n inference[eval_metrics.FEATURE_IMPORTANCE_NAME] = (\n graph_builder.feature_importances())\n\n if keys is not None:\n inference[keys_name] = keys\n\n # labels might be None if we're doing prediction (which brings up the\n # question of why we force everything to adhere to a single model_fn).\n loss_deps = []\n training_graph = None\n training_hooks = []\n scaffold = None\n if labels is not None and mode == model_fn_lib.ModeKeys.TRAIN:\n training_graph = control_flow_ops.group(\n graph_builder.training_graph(\n features, labels, input_weights=weights,\n num_trainers=num_trainers,\n trainer_id=trainer_id),\n state_ops.assign_add(contrib_framework.get_global_step(), 1))\n loss_deps.append(training_graph)\n if hasattr(graph_builder, 'finalize_training'):\n finalize_listener = EveryCheckpointPreSaveListener(\n graph_builder.finalize_training())\n scaffold = monitored_session.Scaffold()\n training_hooks.append(\n basic_session_run_hooks.CheckpointSaverHook(\n model_dir, save_secs=600, save_steps=None,\n scaffold=scaffold,\n listeners=[finalize_listener]))\n\n training_loss = None\n if (mode == model_fn_lib.ModeKeys.EVAL or\n mode == model_fn_lib.ModeKeys.TRAIN):\n with ops.control_dependencies(loss_deps):\n training_loss = graph_builder.training_loss(\n features, labels, name=LOSS_NAME)\n\n # Put weights back in\n if weights is not None:\n features[weights_name] = weights\n\n if early_stopping_rounds:\n training_hooks.append(TensorForestLossHook(early_stopping_rounds))\n\n return model_fn_lib.ModelFnOps(\n mode=mode,\n predictions=inference,\n loss=training_loss,\n train_op=training_graph,\n training_hooks=training_hooks,\n scaffold=scaffold,\n output_alternatives=output_alternatives)",
"def compose(*fns):\n return functools.reduce(lambda f,g: lambda x: f(g(x)), fns)",
"def init_py_impls(self):\r\n def compose_impl(r):\r\n # this is not optimal at all eg in add(*1 -> mul(x, y), *1)\r\n # it will calculate *1 twice\r\n # it also doesn't follow fgraph.toposort but that's (presumably)\r\n # still correct since we only have scalar ops\r\n if r in self.fgraph.inputs:\r\n idx = self.fgraph.inputs.index(r)\r\n return lambda inputs: inputs[idx]\r\n elif r.owner is None: # in fgraph.orphans:\r\n return lambda inputs: r.data\r\n node = r.owner\r\n producers = [compose_impl(input) for input in node.inputs]\r\n return lambda inputs: node.op.impl(*[p(inputs) for p in producers])\r\n self._impls = [compose_impl(r) for r in self.fgraph.outputs]",
"def compose_many(*fs):\n return reduce(compose, fs)",
"def convert_elemwise(self, op):\n try:\n from tflite.Operator import Operator\n from tflite.AddOptions import AddOptions\n from tflite.SubOptions import SubOptions\n from tflite.MulOptions import MulOptions\n from tflite.DivOptions import DivOptions\n from tflite.BuiltinOptions import BuiltinOptions\n from tflite.ActivationFunctionType import ActivationFunctionType\n except ImportError:\n raise ImportError(\"The tflite package must be installed\")\n\n assert isinstance(op, Operator)\n input_tensors = self.get_input_tensors(op)\n assert len(input_tensors) == 2, \"input tensors length should be 2\"\n\n def get_input_nodes(tensor):\n if tensor.tensor_idx in self.tensor_tab:\n # In most cases, we can assume that TOCO fuses elemwise operators\n # with constants - it means both will be tensors.\n return self.tensor_tab[tensor.tensor_idx]\n else:\n # However, in some corner cases, the elemwise operator is not fused,\n # we can receive as constant.\n t_value = self.get_tensor_value(tensor)\n return self.nn_new_const(tensor, t_value)\n\n lhs_nodes = get_input_nodes(input_tensors[0])\n rhs_nodes = get_input_nodes(input_tensors[1])\n\n assert len(lhs_nodes) in [1, 3], \"Nodes list size should be 1 or 3\"\n assert len(lhs_nodes) == len(rhs_nodes), \"Left and right nodes list size should be equal\"\n\n output_tensors = self.get_output_tensors(op)\n assert len(output_tensors) == 1, \"output tensors length should be 1\"\n output_tensor = output_tensors[0]\n output_tensor_idx = output_tensor.tensor_idx\n output_tensor_shape = output_tensor.tensor.ShapeAsNumpy()\n\n # Options (fused_activation_function)\n options = None\n if op.BuiltinOptionsType() == BuiltinOptions.AddOptions:\n op_type = \"Add\"\n options = AddOptions()\n elif op.BuiltinOptionsType() == BuiltinOptions.SubOptions:\n op_type = \"Sub\"\n options = SubOptions()\n elif op.BuiltinOptionsType() == BuiltinOptions.MulOptions:\n op_type = \"Mul\"\n options = MulOptions()\n elif op.BuiltinOptionsType() == BuiltinOptions.DivOptions:\n op_type = \"Div\"\n options = DivOptions()\n\n if options is not None:\n op_options = op.BuiltinOptions()\n options.Init(op_options.Bytes, op_options.Pos)\n fused_activation_fn = options.FusedActivationFunction()\n # if we have activation fn\n assert fused_activation_fn == ActivationFunctionType.NONE, \\\n 'Elemwise operators with fused activation are not supported yet.'\n\n out_nodes = self.nn_elemwise(lhs_nodes, rhs_nodes, op_type, output_tensor_shape)\n\n self.tensor_tab[output_tensor_idx] = out_nodes\n return out_nodes",
"def model_fn(topology_lst, keep_prob):\n fn_list = list()\n if len(topology_lst) < 2:\n raise ValueError(\"Incompatible topology length.\") \n def drop(x):\n return tf.nn.dropout(tf.nn.relu(x), keep_prob=keep_prob)\n def compose_fn(f, g):\n return lambda x: g(f(x)) \n for c, (in_dim, out_dim) in enumerate(zip(topology_lst[: len(topology_lst) -1], \n topology_lst[1 :])):\n if c == (len(topology_lst) - 2):\n layer_name = 'out'\n fn_list.append(nn_layer_fn(in_dim, out_dim, layer_name, fn=tf.identity))\n else:\n layer_name = 'hl%d' %c\n fn_list.append(nn_layer_fn(in_dim, out_dim, layer_name, fn=drop))\n return functools.reduce(compose_fn, fn_list)",
"def _model_fn(features, labels, mode, config, params):\n with self._ctx.with_mode(mode) as ctx:\n model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)\n\n if mode != model_fn_lib.ModeKeys.PREDICT:\n is_export_mode = False\n else:\n # For export_savedmodel, input_fn is never passed to Estimator. So, by\n # checking the self._is_input_fn_invoked bit, we can know, given the\n # mode == PREDICT, it is the .predict API, not export_savedmodel API.\n if self._is_input_fn_invoked:\n is_export_mode = False\n else:\n is_export_mode = True\n\n # Clear the bit.\n self._is_input_fn_invoked = None\n\n if ctx.is_running_on_cpu(is_export_mode=is_export_mode):\n logging.info('Running %s on CPU', mode)\n return model_fn_wrapper.call_without_tpu(\n features, labels, is_export_mode=is_export_mode)\n\n assert labels is None, '`labels` passed to `model_fn` must be `None`.'\n # TPUEstimator._call_input_fn passes `input_fn` as features to here.\n assert callable(features), '`input_fn` is not callable.'\n input_fn = features\n\n input_holders = _InputPipeline(input_fn, batch_axis, ctx)\n enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (\n input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())\n\n graph = ops.get_default_graph()\n for enqueue_op in enqueue_ops:\n if isinstance(enqueue_op, list):\n graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)\n else:\n graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)\n\n if mode == model_fn_lib.ModeKeys.TRAIN:\n loss, host_call, scaffold = (\n _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))\n host_ops = host_call.create_tpu_hostcall()\n if host_ops is None:\n host_ops = []\n hooks = [\n TPUInfeedOutfeedSessionHook(\n ctx,\n enqueue_ops,\n host_ops,\n run_infeed_loop_on_coordinator=(\n run_infeed_loop_on_coordinator)),\n ExamplesPerSecondHook(ctx.global_batch_size,\n output_dir=self.model_dir),\n InstallSignalHandlerHook(),\n training.LoggingTensorHook(\n {\n 'loss': array_ops.identity(loss),\n 'step': training.get_global_step()\n },\n every_n_secs=30)\n ] + input_hooks\n chief_hooks = []\n if (self._config.save_checkpoints_secs or\n self._config.save_checkpoints_steps):\n chief_hooks.append(\n training.CheckpointSaverHook(\n self.model_dir,\n save_secs=self._config.save_checkpoints_secs,\n save_steps=self._config.save_checkpoints_steps,\n steps_per_run=self._config.tpu_config.iterations_per_loop,\n scaffold=scaffold))\n summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)\n with ops.control_dependencies([loss]):\n update_ops = _sync_variables_ops()\n\n # Validate the TPU training graph to catch basic errors\n _validate_tpu_training_graph()\n\n train_op = control_flow_ops.group(*update_ops)\n graph.add_to_collection(_TPU_TRAIN_OP, train_op)\n\n return model_fn_lib.EstimatorSpec(\n mode,\n loss=loss,\n training_chief_hooks=chief_hooks,\n training_hooks=hooks,\n train_op=train_op,\n scaffold=scaffold)\n\n if mode == model_fn_lib.ModeKeys.EVAL:\n total_loss, host_calls, scaffold = _eval_on_tpu_system(\n ctx, model_fn_wrapper, dequeue_fn)\n iterations_per_loop_var = _create_or_get_iterations_per_loop()\n mean_loss = math_ops.div(total_loss,\n math_ops.cast(\n iterations_per_loop_var,\n dtype=total_loss.dtype))\n\n # Creates a dummy metric update_op for all metrics. Estimator expects\n # all metrics in eval_metric_ops have update_op and calls them one by\n # one. The real metric update_ops are invoked in a separated thread.\n # So, here give Estimator the dummy op for all metrics.\n with ops.control_dependencies([mean_loss]):\n # After TPU evaluation computation is done (the mean_loss tensor),\n # reads all variables back from TPU and updates the eval step\n # counter properly\n internal_ops_to_run = _sync_variables_ops()\n internal_ops_to_run.append(\n _increase_eval_step_op(iterations_per_loop_var))\n with ops.control_dependencies(internal_ops_to_run):\n dummy_update_op = control_flow_ops.no_op()\n\n host_call_ret = host_calls.create_tpu_hostcall()\n eval_metric_ops = {}\n eval_update_ops = []\n for k, v in host_call_ret['eval_metrics'].items():\n eval_metric_ops[k] = (v[0], dummy_update_op)\n eval_update_ops.append(v[1])\n\n if 'host_call' not in host_call_ret:\n host_ops = []\n else:\n host_ops = host_call_ret['host_call']\n hooks = [\n TPUInfeedOutfeedSessionHook(\n ctx,\n enqueue_ops,\n eval_update_ops + host_ops,\n run_infeed_loop_on_coordinator=(\n run_infeed_loop_on_coordinator)),\n ] + input_hooks\n\n return model_fn_lib.EstimatorSpec(\n mode,\n loss=mean_loss,\n evaluation_hooks=hooks,\n eval_metric_ops=eval_metric_ops,\n scaffold=scaffold)\n\n # Predict\n assert mode == model_fn_lib.ModeKeys.PREDICT\n\n dummy_predict_op, host_calls, scaffold = _predict_on_tpu_system(\n ctx, model_fn_wrapper, dequeue_fn)\n with ops.control_dependencies([dummy_predict_op]):\n internal_ops_to_run = _sync_variables_ops()\n with ops.control_dependencies(internal_ops_to_run):\n dummy_predict_op = control_flow_ops.no_op()\n\n # In train and evaluation, the main TPU program is passed to monitored\n # training session to run. Infeed enqueue and outfeed dequeue are\n # executed in side threads. This is not the configuration for\n # prediction mode.\n #\n # For prediction, the Estimator executes the EstimatorSpec.predictions\n # directly and yield the element (via generator) to call site. So, the\n # outfeed based prediction must be passed to MonitoredSession directly.\n # Other parts of the TPU execution are organized as follows.\n #\n # 1. All outfeed based Tensors must be grouped with predictions Tensors\n # to form a single invocation. This avoid the issue we might trigger\n # multiple outfeeds incorrectly. To achieve this, `host_call` is\n # placed in control_dependencies of `stopping_signals`, and\n # `stopping_signals` is passed into _StoppingPredictHook, which sets\n # the `stopping_signals` as SessionRunArgs. MonitoredSession merges\n # all SessionRunArgs with the fetch in session.run together.\n #\n # 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)\n # are grouped together. They will be launched once and only once in\n # side threads and they quit naturally according to the SAME stopping\n # condition.\n enqueue_ops.append(dummy_predict_op)\n\n host_call_ret = host_calls.create_tpu_hostcall()\n if 'host_call' not in host_call_ret:\n host_ops = []\n else:\n host_ops = host_call_ret['host_call']\n\n predictions = host_call_ret['predictions']\n _verify_cross_hosts_transfer_size(\n predictions, message=(\n 'The estimated size for TPUEstimatorSpec.predictions is too '\n 'large.'))\n signals = host_call_ret['signals']\n\n with ops.control_dependencies(host_ops):\n host_ops = [] # Empty, we do do not need it anymore.\n scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(\n signals)\n predictions = _PaddingSignals.slice_tensor_or_dict(\n predictions, signals)\n\n hooks = [\n _StoppingPredictHook(scalar_stopping_signal),\n TPUInfeedOutfeedSessionHookForPrediction(ctx, enqueue_ops,\n host_ops),\n ] + input_hooks\n\n return model_fn_lib.EstimatorSpec(\n mode,\n prediction_hooks=hooks,\n predictions=predictions,\n scaffold=scaffold)",
"def composition(func_list):\n return reduce(\n lambda (f1, args1), (f2, args2) : (lambda x : f1(f2(x, *args2), *args1)), \n func_list,\n lambda x : x\n )",
"def add_output_ops(self, graph, output):\n with graph.as_default():\n softmax = []\n with tf.name_scope('inference'):\n for i, logits in enumerate(output):\n softmax.append(tf.nn.softmax(logits, name='softmax_%d' % i))\n output = softmax\n return output",
"def compose(*fs) -> Callable:\n return lambda x: reduce(flip(funcall), reversed(fs), x)",
"def run_all(operations=ops):\n for operation in operations:\n run(operation)",
"def get_best_models(self, num_models) -> Sequence[tf.keras.Model]:\n pass",
"def fanins(op):\n return {t.op for t in op.inputs}",
"def model_fn(features, labels, mode):\n\n # Build a Graph that computes predictions from the inference model.\n logits = inference(features, args.hidden1, args.hidden2)\n\n tensors = {}\n # Add to the Graph the Ops for loss calculation.\n if mode == ModeKeys.INFER:\n softmax = tf.nn.softmax(logits)\n tensors['digit'] = tf.argmax(softmax, 1)\n loss_op = None\n else:\n loss_op = loss(logits, labels)\n tensors['loss'] = loss_op\n tf.scalar_summary('loss', loss_op)\n\n # Add to the Graph the Ops for accuracy calculation.\n if mode == ModeKeys.EVAL:\n accuracy_op = evaluation(logits, labels)\n tensors['accuracy'] = accuracy_op\n tf.scalar_summary('training/hptuning/metric', accuracy_op)\n\n # Add to the Graph the Ops that calculate and apply gradients.\n if mode == ModeKeys.TRAIN:\n global_step = framework.get_global_step()\n # Create the gradient descent optimizer with the given learning rate.\n optimizer = tf.train.GradientDescentOptimizer(args.learning_rate)\n # Create a variable to track the global step.\n # Use the optimizer to apply the gradients that minimize the loss\n # (and also increment the global step counter) as a single training step.\n train_op = optimizer.minimize(loss_op, global_step=global_step)\n # Add streaming means.\n else:\n train_op = None\n\n return tensors, loss_op, train_op",
"def RunOperators(ops_def):\n RunOperatorsCC([_stringify_proto(op_def) for op_def in ops_def])",
"def run_inference(model: torch.nn.Module,\n model_inputs: Dict[str, torch.Tensor]) -> list:\n return model(**model_inputs, return_loss=False)",
"def call(self, inputs)->Any:\n unpacked_inputs = tf_utils.unpack_inputs(inputs)\n input_tensor = unpacked_inputs[0]\n attention_mask = unpacked_inputs[1]\n output_tensor = input_tensor\n\n all_layer_outputs = []\n for layer in self.layers:\n output_tensor, attention_scores = layer(output_tensor, attention_mask)\n all_layer_outputs.append((output_tensor, attention_scores))\n\n return all_layer_outputs",
"def get_ensemble_model_output_and_feature(\n models,\n batch_x,\n ensemble_method,\n temperature = 1.0,\n):\n batch_ensemble_output = 0\n batch_feature_list = []\n if ensemble_method == 'hard':\n num_classes = None\n for model in models:\n batch_output, batch_feature = model.get_output_and_feature(\n batch_x, training=False, temperature=temperature,\n )\n batch_feature_list.append(batch_feature)\n if ensemble_method == 'hard':\n batch_pred = tf.argmax(batch_output, axis=1)\n if num_classes is None:\n num_classes = batch_output.shape[1]\n batch_one_hot_output = tf.one_hot(batch_pred, num_classes)\n batch_ensemble_output += batch_one_hot_output\n elif ensemble_method == 'soft':\n batch_ensemble_output += batch_output\n else:\n raise ValueError(f'Not supported ensemble method: {ensemble_method}!')\n # Concatenates the features of the models in the ensemble.\n concat_batch_feature = tf.concat(batch_feature_list, axis=1)\n return batch_ensemble_output / len(models), concat_batch_feature",
"def compose(*ops):\n if len(ops) == 0:\n return [0, 1, 2, 3, 4, 5, 6, 7]\n if len(ops) == 1:\n return ops[0]\n if len(ops) == 2:\n op1, op2 = ops\n return [op2[op1[v]] for v in range(8)]\n op1 = ops[0]\n rest = ops[1:]\n return compose(op1, compose(*rest))",
"def FixInputsFC(model, inputs):\n first_input = K.constant(inputs[0])\n second_input = K.constant(inputs[1][:,:-1])\n\n Tensor_Input0 = Input(batch_shape = (model.input_shape[1][0], 1))\n\n n_input = keras.layers.Lambda(lambda x: K.concatenate([second_input,x],axis=-1))(Tensor_Input0)\n n2_input = keras.layers.Lambda(lambda x: [first_input, x])(n_input)\n Out1 = model(n2_input)\n# Out2 = keras.layers.Lambda(lambda x : x[:,:,0] - x[:,:,1])(Out1)\n Out2 = keras.layers.Lambda(lambda x : x)(Out1)\n M = keras.Model( Tensor_Input0, Out2 )\n return(M)",
"def _process_op_fetches(self, op_fetches):\n if op_fetches is None:\n return []\n\n if not isinstance(op_fetches, (list, tuple)):\n op_fetches = [op_fetches]\n\n fetches = []\n for fetch in op_fetches:\n if isinstance(fetch, ops.Operation):\n fetches.append(fetch)\n elif isinstance(fetch, tensor_lib.Tensor):\n fetches.append(fetch.op)\n else:\n logging.warning('Ignoring the given op_fetch:%s, which is not an op.' %\n fetch)\n return fetches"
]
| [
"0.7424944",
"0.71752834",
"0.59878427",
"0.5931156",
"0.56635725",
"0.56063145",
"0.5599954",
"0.55916476",
"0.5555402",
"0.552452",
"0.53857994",
"0.5308501",
"0.5290959",
"0.5280084",
"0.5270434",
"0.5227811",
"0.5208972",
"0.5192557",
"0.5178041",
"0.51539606",
"0.5143707",
"0.5132286",
"0.511539",
"0.5111402",
"0.51050794",
"0.5083747",
"0.5078155",
"0.5056282",
"0.5050236",
"0.50459796"
]
| 0.7783812 | 0 |
Combines list of ModelFnOps for eval. | def _combine_eval(self, all_model_fn_ops):
predictions = {}
metrics = {}
losses = []
for head, m in zip(self._heads, all_model_fn_ops):
losses.append(m.loss)
head_name = head.head_name
for k, v in m.predictions.items():
predictions[(head_name, k)] = v
for k, v in m.eval_metric_ops.items():
# metrics["%s/%s" % (k, head_name)] = v
metrics[k] = v
loss = self._loss_combiner(losses)
return model_fn.ModelFnOps(
mode=model_fn.ModeKeys.EVAL,
predictions=predictions,
loss=loss,
eval_metric_ops=metrics) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _combine_train(self, all_model_fn_ops, train_op_fn):\n losses = []\n additional_train_ops = []\n for m in all_model_fn_ops:\n losses.append(m.loss)\n additional_train_ops.append(m.train_op)\n loss = self._loss_combiner(losses)\n\n train_op = train_op_fn(loss)\n train_op = control_flow_ops.group(train_op, *additional_train_ops)\n return model_fn.ModelFnOps(\n mode=model_fn.ModeKeys.TRAIN,\n loss=loss,\n train_op=train_op)",
"def _combine_infer(self, all_model_fn_ops):\n predictions = {}\n output_alternatives = {}\n for head, m in zip(self._heads, all_model_fn_ops):\n head_name = head.head_name\n output_alternatives[head_name] = m.output_alternatives[head_name]\n for k, v in m.predictions.items():\n predictions[(head_name, k)] = v\n\n return model_fn.ModelFnOps(\n mode=model_fn.ModeKeys.INFER,\n predictions=predictions,\n output_alternatives=output_alternatives)",
"def apply_operator_set(model, operator_set):\n field_part = []\n for operator in operator_set:\n field_part.append(apply_const_shift_operator(model, operator))\n field_part = torch.cat(field_part)\n return field_part",
"def RunOperators(ops_def):\n RunOperatorsCC([_stringify_proto(op_def) for op_def in ops_def])",
"def ops2alg(ops):\n return Model(cardinality=len(ops[0]), \n operations=dict([\"h\"+str(i),list(ops[i])] for i in range(len(ops))))",
"def _extract_ops_from_onnx_model(model_files: typing.Iterable[pathlib.Path]):\n\n required_ops = {}\n\n for model_file in model_files:\n if not model_file.is_file():\n raise ValueError(f\"Path is not a file: '{model_file}'\")\n _process_onnx_model(model_file, required_ops)\n\n return required_ops",
"def create_operators(op_param_list, global_config=None):\n assert isinstance(op_param_list, dict), ('operator config should be a dict')\n ops = []\n for operator in op_param_list.items():\n assert isinstance(operator, tuple) and len(operator) == 2, \"yaml format error\"\n op_name = list(operator)[0]\n param = {} if operator[1] is None else operator[1]\n if global_config is not None:\n param.update(global_config)\n op = eval(op_name)(**param)\n ops.append(op)\n return ops",
"def _multi_image_op(self, image_files, ops, model=None):\n if len(set(ops).intersection(SUPPORTED_OPS)) != len(ops):\n raise Exception('Unsupported op: %s, ops available: %s' % (str(ops), str(SUPPORTED_OPS)))\n image_data = self._process_image_files(image_files)\n data = {'op': ','.join(ops)}\n if model:\n data['model'] = self._sanitize_param(model)\n elif self._model:\n data['model'] = self._model\n url = self._url_for_op(ops)\n raw_response = self._get_raw_response(self._get_multipart_headers,\n post_images_multipart, image_data, data, url)\n return self._parse_response(raw_response, ops)",
"def run_all(operations=ops):\n for operation in operations:\n run(operation)",
"def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\t\ttf.logging.info(\"*** Features ***\")\n\t\tfor name in sorted(features.keys()):\n\t\t\ttf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n\t\tis_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n\t\tinput_ids=[]\n\t\tinput_mask=[]\n\t\tsegment_ids=[]\n\t\tmask_lm_info = []\n\t\tif is_training:\n\t\t\tinput_ids = [features[\"rewrite_query_ids\"], features[\"doc0_ids\"], features[\"doc1_ids\"], features[\"raw_query_ids\"]]\n\t\t\tinput_mask = [features[\"rewrite_query_mask\"], features[\"doc0_mask\"], features[\"doc1_mask\"], features[\"raw_query_mask\"]]\n\t\t\tsegment_ids = [features[\"rewrite_query_segment_ids\"], features[\"doc0_segment_ids\"], features[\"doc1_segment_ids\"], features[\"raw_query_segment_ids\"]]\n\t\t\teffective_mask = [features[\"effective_rewrite_query_mask\"], features[\"effective_doc0_mask\"], features[\"effective_doc1_mask\"], features[\"effective_raw_query_mask\"]]\n\t\telif is_eval:\n\t\t\tinput_ids = [features[\"query_ids\"], features[\"docx_ids\"], 0, features[\"query_ids\"]]\n\t\t\tinput_mask = [features[\"query_mask\"], features[\"docx_mask\"], 0, features[\"query_mask\"]]\n\t\t\tsegment_ids = [features[\"query_segment_ids\"], features[\"docx_segment_ids\"], 0, features[\"query_segment_ids\"]]\n\t\t\teffective_mask = [features[\"effective_query_mask\"], features[\"effective_docx_mask\"], 0, features[\"effective_query_mask\"]]\n\t\telif is_output:\n\t\t\tinput_ids=[features[\"input_ids\"], features[\"input_ids\"], features[\"input_ids\"], features[\"input_ids\"]]\n\t\t\tinput_mask = [features[\"input_mask\"], features[\"input_mask\"], features[\"input_mask\"], features[\"input_mask\"]]\n\t\t\tsegment_ids = [features[\"segment_ids\"], features[\"segment_ids\"], features[\"segment_ids\"], features[\"segment_ids\"]]\n\t\t\teffective_mask = [features[\"effective_input_mask\"], features[\"effective_input_mask\"], features[\"effective_input_mask\"], features[\"effective_input_mask\"]]\n\n\n\n\t\tlabel = features[\"label\"]\n\n\n\t\ttf.logging.info(\"Create model\")\n\t\tif (is_training) or (is_eval):\n\t\t\t(total_loss, score, doc_length) = create_model(\n\t\t\t\tbert_config, is_training, is_eval, is_output, input_ids, input_mask, segment_ids, effective_mask, label, use_one_hot_embeddings,\n\t\t\t\tcolbert_dim, dotbert_dim, max_q_len, max_p_len, doc_type, loss, kd_source, train_model, eval_model)\n\t\telif is_output:\n\t\t\t(pooling_emb, emb, doc_length) = create_model(\n\t\t\t\tbert_config, is_training, is_eval, is_output, input_ids, input_mask, segment_ids, effective_mask, label, use_one_hot_embeddings,\n\t\t\t\tcolbert_dim, dotbert_dim, max_q_len, max_p_len, doc_type, loss, kd_source, train_model, eval_model)\n\n\t\ttf.logging.info(\"Finish create model\")\n\t\ttvars = tf.trainable_variables()\n\n\t\tscaffold_fn = None\n\t\tif init_checkpoint:\n\t\t\t(assignment_map, initialized_variable_names)= modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n\t\t\t(assignment_map1, initialized_variable_names1) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint, 'Student/', 'query_reformulator/')\n\t\t\tassignment_maps = [assignment_map, assignment_map1]\n\t\t\tinitialized_variable_names.update(initialized_variable_names1)\n\n\t\t\ttf.logging.info(\"**** Assignment Map ****\")\n\t\t\tif use_tpu:\n\t\t\t\tdef tpu_scaffold():\n\t\t\t\t\tfor assignment_map in assignment_maps:\n\t\t\t\t\t tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\t\t\t\t\treturn tf.train.Scaffold()\n\n\t\t\t\tscaffold_fn = tpu_scaffold\n\t\t\telse:\n\t\t\t\ttf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\t\ttf.logging.info(\"**** Trainable Variables ****\")\n\n\t\tfor var in tvars:\n\t\t\tinit_string = \"\"\n\t\t\tif var.name in initialized_variable_names:\n\t\t\t\tinit_string = \", *INIT_FROM_CKPT*\"\n\t\t\ttf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n\t\t\t\t\t\t\tinit_string)\n\n\t\toutput_spec = None\n\t\tif mode == tf.estimator.ModeKeys.TRAIN:\n\t\t\ttrain_op = optimization.create_optimizer(\n\t\t\t\t\t\ttotal_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu, train_model)\n\n\t\t\toutput_spec = tf.contrib.tpu.TPUEstimatorSpec(\n\t\t\t\t\t\tmode=mode,\n\t\t\t\t\t\tloss=total_loss,\n\t\t\t\t\t\ttrain_op=train_op,\n\t\t\t\t\t\tscaffold_fn=scaffold_fn)\n\n\t\telif mode == tf.estimator.ModeKeys.PREDICT:\n\t\t\tif is_output:\n\t\t\t\toutput_spec = tf.contrib.tpu.TPUEstimatorSpec(\n\t\t\t\t\t\t\t\tmode=mode,\n\t\t\t\t\t\t\t\tpredictions={\n\t\t\t\t\t\t\t\t\t\"docid\": features['docid'],\n\t\t\t\t\t\t\t\t\t\"pooling_emb\":pooling_emb,\n\t\t\t\t\t\t\t\t\t\"emb\":emb,\n\t\t\t\t\t\t\t\t\t\"doc_length\":doc_length,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tscaffold_fn=scaffold_fn)\n\t\t\telif is_eval:\n\t\t\t\toutput_spec = tf.contrib.tpu.TPUEstimatorSpec(\n\t\t\t\t\t\t\t\tmode=mode,\n\t\t\t\t\t\t\t\tpredictions={\n\t\t\t\t\t\t\t\t\t\"log_probs\": score,\n\t\t\t\t\t\t\t\t\t\"label_ids\": label,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tscaffold_fn=scaffold_fn)\n\n\t\telse:\n\t\t\traise ValueError(\n\t\t\t\t\t\"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n\t\treturn output_spec",
"def train_and_eval_all_models():\n\n clfShape, accShape = shape_symmetry_train_classifier()\n clfTexture, accTexture = texture_symmetry_train_classifier()\n clfFinal, accFinal = combined_symmetry_train_classifier()\n\n return accShape, accTexture, accFinal",
"def get_ops (self, names):\n return operator.attrgetter(names)(self.core) if isinstance(names,str) else [\n operator.attrgetter(n)(self.core) for n in names ]",
"def get_ops(self, mode='train'):\r\n if not self._ready_to_write:\r\n self._prepare_for_write()\r\n if mode == 'test' or mode == 'full_test': # Always return all ops for test case\r\n return self._expensive_ops[mode]\r\n elif mode == 'train': # Select ops to evaluate based on defined frequency\r\n check_func = self._model.time.has_been_n_seconds_since_last\r\n if check_func('expensive_summaries_train', self._expensive_ops_every_n_secs):\r\n return self._expensive_ops[mode]\r\n elif check_func('cheap_summaries_train', self._cheap_ops_every_n_secs):\r\n return self._cheap_ops[mode]\r\n return {}",
"def eval_ops(opcodes):\n output = []\n for op in opcodes:\n if op in [\"+\", \"*\"]:\n b = output.pop(-1)\n a = output.pop(-1)\n value = ops[op](a, b)\n output.append(value)\n else:\n output.append(op)\n\n assert len(output) == 1\n return output[0]",
"def __call__(self, X, Y=None, eval_gradient=False):\n return [f(X, Y=Y, eval_gradient=eval_gradient) for f in self.list_func]",
"def eval(self):\n for name in self.model_names:\n if isinstance(name, str):\n net = getattr(self, 'net_' + name)\n net.eval()",
"def AddTrainingOperators(model, predict, label, value, value_label, base_lr=-0.003):\n xent = model.LabelCrossEntropy([predict, label], 'xent')\n # compute the expected loss\n loss1 = model.AveragedLoss(xent, \"loss1\")\n loss2 = model.Sub([value, value_label], \"loss2\")\n # track the accuracy of the model\n AddAccuracy(model, predict, label)\n # use the average loss we just computed to add gradient operators to the model\n model.AddGradientOperators([loss1, loss2])\n # do a simple stochastic gradient descent\n ITER = brew.iter(model, \"iter\")\n # set the learning rate schedule\n LR = model.LearningRate(ITER, \"LR\", base_lr=base_lr, policy=\"fixed\") # when policy=fixed, stepsize and gamma are ignored\n # ONE is a constant value that is used in the gradient update. We only need\n # to create it once, so it is explicitly placed in param_init_net.\n ONE = model.param_init_net.ConstantFill([], \"ONE\", shape=[1], value=1.0)\n # Now, for each parameter, we do the gradient updates.\n for param in model.params:\n # Note how we get the gradient of each parameter - ModelHelper keeps\n # track of that.\n param_grad = model.param_to_grad[param]\n # The update is a simple weighted sum: param = param + param_grad * LR\n model.WeightedSum([param, ONE, param_grad, LR], param)",
"def eval(self):\n for name in self.model_names:\n if isinstance(name, str):\n net = getattr(self, 'net' + name)\n net.eval()",
"def eval(self):\n for name in self.model_names:\n if isinstance(name, str):\n net = getattr(self, 'net' + name)\n net.eval()",
"def _multi_imageurl_op(self, image_urls, ops, model=None):\n if len(set(ops).intersection(SUPPORTED_OPS)) != len(ops):\n raise Exception('Unsupported op: %s, ops available: %s' % (str(ops), str(SUPPORTED_OPS)))\n if not isinstance(image_urls, list):\n image_urls = [image_urls]\n self._check_batch_size(image_urls)\n if not isinstance(image_urls[0], basestring):\n raise Exception(\"image_urls must be strings\")\n data = {'op': ','.join(ops),\n 'url': image_urls}\n if model:\n data['model'] = self._sanitize_param(model)\n elif self._model:\n data['model'] = self._model\n url = self._url_for_op(ops)\n raw_response = self._get_raw_response(self._get_json_headers,\n self._get_json_response, url, data)\n return self._parse_response(raw_response, ops)",
"def declare_operators(*op_list):\n operators.update({op.__name__:op for op in op_list})\n return operators",
"def operartors(self) -> List[Operator]:\n return list(self.__ops.keys())",
"def get_apply_gradients_ops_func():\n return [opt.apply_gradients(grads)]",
"def gen_funcs_combination(self):\n funcs = '\\n\\n;; Combination'\n funcs += '\\n(module'\n\n assert_template = ' (func (export \"{lane_type}.{op1}-{lane_type}.{op2}\") (param v128 v128 v128) (result v128) ' \\\n '({lane_type}.{op1} ({lane_type}.{op2} (local.get 0) (local.get 1))(local.get 2))' \\\n ')'\n\n binary_ops = list(self.BINARY_OPS)\n binary_ops.reverse()\n for op1 in self.BINARY_OPS:\n for op2 in binary_ops:\n funcs += '\\n' + assert_template.format(lane_type=self.LANE_TYPE, op1=op1, op2=op2)\n\n funcs += '\\n)'\n return funcs",
"def callables(self):\n \n if hasattr(self, \"_callables\"):\n return self._callables\n \n # build a list of all the Callable objects\n # The old backend processed all operations first\n # (FIXME: duplicate for the sake of easy checking)\n self._callables = []\n\n for c in self._node.callables():\n if isinstance(c, idlast.Operation):\n self._callables.append(call.operation(self, c))\n \n for c in self._node.callables():\n if isinstance(c, idlast.Attribute):\n self._callables = self._callables + call.read_attributes(self, c)\n if c.readonly(): continue\n self._callables = self._callables + call.write_attributes(self, c)\n \n return self._callables",
"def apply(self, collection, ops, **kwargs):\n validator = lambda obj: all(op(get_value(obj, self.name), val) for (op, val) in ops) # noqa\n return [o for o in collection if validator(o)]",
"def get_ops_list(model_data):\n model = schema_fb.Model.GetRootAsModel(model_data, 0)\n op_set = set()\n\n for subgraph_idx in range(model.SubgraphsLength()):\n subgraph = model.Subgraphs(subgraph_idx)\n for op_idx in range(subgraph.OperatorsLength()):\n op = subgraph.Operators(op_idx)\n opcode = model.OperatorCodes(op.OpcodeIndex())\n builtin_code = schema_util.get_builtin_code_from_operator_code(opcode)\n if builtin_code == schema_fb.BuiltinOperator.CUSTOM:\n opname = opcode.CustomCode().decode(\"utf-8\")\n op_set.add(opname)\n else:\n op_set.add(visualize.BuiltinCodeToName(builtin_code))\n return op_set",
"def set_eval(self):\n for m in self.models.values():\n m.eval()",
"def train_and_eval(self):\n self.__create_indexes()\n model = None\n model = None\n if self.model == 'OMult':\n model = OMult(self.kwargs)\n elif self.model == 'ConvO':\n model = ConvO(self.kwargs)\n elif self.model == 'QMult':\n model = QMult(self.kwargs)\n elif self.model == 'ConvQ':\n model = ConvQ(self.kwargs)\n elif self.model == 'OMultBatch':\n model = OMultBatch(self.kwargs)\n elif self.model == 'ConvOBatch':\n model = ConvOBatch(self.kwargs)\n elif self.model == 'QMultBatch':\n model = QMultBatch(self.kwargs)\n elif self.model == 'ConvQBatch':\n model = ConvQBatch(self.kwargs)\n else:\n print(self.model, ' is not valid name')\n raise ValueError\n\n self.train(model)\n self.eval(model)",
"def import_ops(self):\n if self.is_training:\n self.lr = tf.get_collection_ref(\"lr\")[0]\n self.new_lr = tf.get_collection_ref(\"new_lr\")[0]\n self.lr_update = tf.get_collection_ref(\"lr_update\")[0]\n\n self.cost = tf.get_collection_ref(util.with_prefix(self.name, \"cost\"))[0]\n self.initial_state = util.import_state_tuples(\n self.initial_state, self.initial_state_name, self.name)\n self.final_state = util.import_state_tuples(\n self.final_state, self.final_state_name, self.name)"
]
| [
"0.7129016",
"0.6561746",
"0.6168162",
"0.59013474",
"0.58962",
"0.57510495",
"0.5645326",
"0.56403315",
"0.5626065",
"0.5615804",
"0.55982953",
"0.55878484",
"0.5579561",
"0.54772735",
"0.5472304",
"0.5467546",
"0.5465826",
"0.546536",
"0.546536",
"0.5436267",
"0.54085",
"0.54077184",
"0.5405241",
"0.5381983",
"0.53736705",
"0.5353772",
"0.53518295",
"0.53353876",
"0.53340006",
"0.5323525"
]
| 0.79966766 | 0 |
Returns a tuple of (loss, weighted_average_loss). | def _loss(loss_unweighted, weight, name):
with ops.name_scope(name, values=(loss_unweighted, weight)) as name_scope:
if weight is None:
loss = math_ops.reduce_mean(loss_unweighted, name=name_scope)
return loss, loss
loss_weighted = _weighted_loss(loss_unweighted, weight)
weighted_average_loss = math_ops.div(
math_ops.reduce_sum(loss_weighted),
math_ops.to_float(math_ops.reduce_sum(weight)),
name="weighted_average_loss")
loss = math_ops.reduce_mean(loss_weighted, name=name_scope)
return loss, weighted_average_loss | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_loss(combined_loss):\n loss_dict = {'localization': combined_loss[0].item(),\n 'confidence': combined_loss[1].item()}\n return combined_loss[0] + combined_loss[1], loss_dict",
"def _weighted_loss(loss, weight):\n with ops.name_scope(None, \"weighted_loss\", (loss, weight)) as name:\n return math_ops.multiply(\n array_ops.reshape(\n loss, shape=(-1,)),\n array_ops.reshape(\n weight, shape=(-1,)),\n name=name)",
"def get_loss(self, combined_loss):\n loss_dict = {'iou_loss': combined_loss[0].item(),\n 'emotion_loss': combined_loss[1].item()}\n self.iou = 1 - combined_loss[0].item()\n return combined_loss[0] + combined_loss[1], loss_dict",
"def _loss(W):\r\n M = X @ W\r\n if loss_type == 'l2':\r\n R = X - M\r\n loss = 0.5 / X.shape[0] * (R ** 2).sum()\r\n G_loss = - 1.0 / X.shape[0] * X.T @ R\r\n elif loss_type == 'logistic':\r\n loss = 1.0 / X.shape[0] * (np.logaddexp(0, M) - X * M).sum()\r\n G_loss = 1.0 / X.shape[0] * X.T @ (sigmoid(M) - X)\r\n elif loss_type == 'poisson':\r\n S = np.exp(M)\r\n loss = 1.0 / X.shape[0] * (S - X * M).sum()\r\n G_loss = 1.0 / X.shape[0] * X.T @ (S - X)\r\n else:\r\n raise ValueError('unknown loss type')\r\n return loss, G_loss",
"def calculate_loss(self, output, batch):\n\n detailed_loss = {}\n for loss_func_key, this_loss_func, weight in self.loss_funcs:\n this_loss = this_loss_func(output, batch) * weight\n detailed_loss[loss_func_key] = this_loss\n loss = sum(detailed_loss.values())\n return loss, detailed_loss",
"def average_error_to_weight(error):\n return error ** (-2)",
"def get_loss(self):\n return self.loss / self.cnt",
"def average_error_to_weight(error):\r\n return error ** (-2)",
"def compute_loss(self, state_action_values, next_state_values, weights):\n\n absolute_error = torch.abs(next_state_values - state_action_values)\n td_error = self._loss(next_state_values, state_action_values)\n loss = torch.mean(weights * td_error)\n return loss, td_error.mean(), absolute_error",
"def get_loss(self, label, pred, sample_weight=None):\n\n loss = np.average((label - pred) ** 2, axis=0, weights=sample_weight)\n return loss",
"def _weighted_mean_absolute_percentage_error_update(preds: Tensor, target: Tensor) ->Tuple[Tensor, int]:\n _check_same_shape(preds, target)\n sum_abs_error = (preds - target).abs().sum()\n sum_scale = target.abs().sum()\n return sum_abs_error, sum_scale",
"def loss_weights(self):\n return None",
"def get_avg_loss(self):\n if self.n_batches > 0:\n avg_loss = self.loss / self.n_batches\n self.loss = 0\n self.n_batches = 0\n return avg_loss\n else:\n return 0",
"def envisaged_loss(self):\n loss = round(\n self.calcul_buy_nb_action() * self.stop_loss - self.investment_price(),\n 2,\n )\n percent_loss = round(loss * 100 / self.capital, 2)\n return loss, percent_loss",
"def get_loss(self, x, weights=1.0):\n input_dtype = x.dtype\n x = self.cast(x, mstype.float32)\n weights = self.cast(weights, mstype.float32)\n x = self.mul(weights, x)\n if self.reduce and self.average:\n x = self.reduce_mean(x, self.get_axis(x))\n if self.reduce and not self.average:\n x = self.reduce_sum(x, self.get_axis(x))\n x = self.cast(x, input_dtype)\n return x",
"def get_loss_stats(self, networks):\n total_loss = 0\n highest_loss = 0\n lowest_loss = 1e99\n best_scoring_network = None\n \n for network in networks:\n total_loss += network.loss\n if network.loss < lowest_loss:\n lowest_loss = network.loss\n best_scoring_network = network\n if network.loss > highest_loss:\n highest_loss = network.loss\n \n return total_loss / len(networks), highest_loss, lowest_loss, best_scoring_network",
"def build_loss(self):\n\n opt = tf.train.AdamOptimizer(self.learning_rate)\n mse = tf.losses.mean_squared_error(self.label[-1], self.outputs[-1])\n loss = tf.losses.get_total_loss()\n\n return mse, loss",
"def loss(self):\n return self._get(\"loss\")",
"def weighted_avg_and_std(values, weights):\r\n average = np.average(values, weights=weights)\r\n variance = np.average((values-average)**2, weights=weights) # Fast and numerically precise\r\n return (average, np.sqrt(variance))",
"def weighted_avg_and_std(values, weights):\n average = numpy.average(values, weights=weights)\n # Fast and numerically precise:\n variance = numpy.average((values-average)**2, weights=weights)\n return (average, math.sqrt(variance))",
"def importance_weighted_error(self):\n weighted_errors = self.i_s_weights * self.errors\n self.mean_error = tf.reduce_mean(weighted_errors, name=\"mean_error\")\n return(self.mean_error)",
"def _weightedAverage(list_):\n\n\t\taccum = [0, 0]\n\n\t\tfor point, weight in list_:\n\n\t\t\taccum[0] += point[0] * weight\n\t\t\taccum[1] += point[1] * weight\n\n\t\ttotalWeight = sum([weight for point, weight in list_])\n\n\n\t\tif totalWeight == 0:\n\t\t\t\n\t\t\treturn (0, 0)\n\n\n\t\taccum[0] /= float(totalWeight)\n\t\taccum[1] /= float(totalWeight)\n\n\t\treturn (accum[0], accum[1])",
"def weighted_avg_and_std(values, weights):\n average = np.average(values, weights=weights)\n # Fast and numerically precise\n variance = np.average((values - average)**2, weights=weights)\n return (average, math.sqrt(variance))",
"def weighted_avg_and_std(values, weights):\n average = np.average(values, weights=weights)\n variance = np.average((values-average)**2, weights=weights) # Fast and numerically precise\n return (average, math.sqrt(variance))",
"def get_loss(self):\n raise NotImplementedError",
"def loss_fn(model):\n with flax.deprecated.nn.stateful() as state:\n with flax.deprecated.nn.stochastic(dropout_rng):\n logits = model(example, train=True)\n loss, weight_sum = compute_weighted_cross_entropy(logits, targets)\n mean_loss = loss / weight_sum\n return mean_loss, (logits, state)",
"def _avg(value1, value2, weight):\r\n if value1 is None:\r\n return value2\r\n if value2 is None:\r\n return value1\r\n return value2 * weight + value1 * (1 - weight)",
"def _avg(value1, value2, weight):\r\n if value1 is None:\r\n return value2\r\n if value2 is None:\r\n return value1\r\n return value2 * weight + value1 * (1 - weight)",
"def test_average_weight_loss(self):\n user_created = self.create_user()\n average_return = self.new_calculation.average_weight_loss(user_created)\n\n self.assertEqual(average_return, 5.0)\n self.assertEqual(type(average_return), float)",
"def weighted_avg_and_std(values, weights):\n average = np.average(values, weights=weights)\n variance = np.average((values-average)**2, weights=weights)\n return average, np.sqrt(variance)"
]
| [
"0.67525476",
"0.67277217",
"0.655962",
"0.6471539",
"0.64588875",
"0.64291614",
"0.6397769",
"0.6392674",
"0.6356757",
"0.635146",
"0.62811285",
"0.6252047",
"0.62353516",
"0.6232239",
"0.61814845",
"0.6181042",
"0.6164968",
"0.61537457",
"0.6151719",
"0.6144429",
"0.6137694",
"0.6121836",
"0.61190796",
"0.611424",
"0.6076427",
"0.6066709",
"0.60594916",
"0.60594916",
"0.6050235",
"0.6034221"
]
| 0.788702 | 0 |
Raises ValueError if the given mode is invalid. | def _check_mode_valid(mode):
if (mode != model_fn.ModeKeys.TRAIN and mode != model_fn.ModeKeys.INFER and
mode != model_fn.ModeKeys.EVAL):
raise ValueError("mode=%s unrecognized." % str(mode)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _assert_valid_mode(mode:str):\n if not mode in [_TRAIN, _EVAL, _PREDICT]:\n raise ValueError(\"Invalid mode.\")",
"def _checkMode(mode):\n\n if not isinstance(mode, str):\n raise TypeError('The {0} should be a string. Given: {1!r}'.format(\"mode\", mode))\n\n if mode not in [MODE_RTU, MODE_ASCII]:\n raise ValueError(\"Unreconized Modbus mode given. Must be 'rtu' or 'ascii' but {0!r} was given.\".format(mode))",
"def test_mode_invalid(mode):\n # Test errors on construction\n with pytest.raises(TypeConversionError):\n gay_berne = md.pair.aniso.GayBerne(nlist=md.nlist.Cell(buffer=0.4),\n default_r_cut=2.5,\n mode=mode)\n gay_berne = md.pair.aniso.GayBerne(nlist=md.nlist.Cell(buffer=0.4),\n default_r_cut=2.5)\n gay_berne.params[('A', 'A')] = {'epsilon': 1, 'lpar': 0.5, 'lperp': 1.0}\n # Test errors on setting\n with pytest.raises(TypeConversionError):\n gay_berne.mode = mode",
"def validate(self, mode): # pragma: no cover\n pass",
"def _check_mode(mode, encoding, newline):\n if \"t\" in mode:\n if \"b\" in mode:\n raise ValueError(\"Invalid mode: %r\" % (mode,))\n else:\n if encoding is not None:\n raise ValueError(\"Argument 'encoding' not supported in binary mode\")\n if newline is not None:\n raise ValueError(\"Argument 'newline' not supported in binary mode\")",
"def is_valid_mode(mode: str) -> bool:\n return mode in (TEST, EASY, HARD)",
"def test_mode_from_knx_wrong_value(self):\n with pytest.raises(ConversionError):\n DPTHVACMode.from_knx((1, 2))",
"def validate_mode(mode, operator_tag, is_sha_digest):\n version_supports_restricted = check_if_tag_supports_restricted(operator_tag, is_sha_digest)\n if mode == MODE_RESTRICTED and not version_supports_restricted:\n raise ValueError(\"{} is not supported for this version, please use {}\".format(MODE_RESTRICTED, MODE_ALL))",
"def test_handling_wrong_context(member, mode, arg, msg):\n with pytest.raises(TypeError) as excinfo:\n member.set_validate_mode(getattr(Validate, mode), arg)\n assert msg in excinfo.exconly()",
"def mode(self, mode: Optional[int] = None) -> Optional[int]:\n ...",
"def test_invalid_upload_mode(self):\n # verify mode doesn't exist\n\n mode = \"invalid_mode\"\n self.assertFalse(mode in UPLOAD_MODES)\n\n with self.assertRaises(Exception):\n upload_helpers.verify_upload_mode(mode)",
"def test_non_integer_mode(self, m, parse_input_mocked_metadata):\n with pytest.raises(ValueError, match=\"Mode must be of type int, not\"):\n bb = parse_input_mocked_metadata(\"MeasureFock() | {}\".format(m))",
"def test_mode_from_knx_wrong_code(self):\n with pytest.raises(CouldNotParseKNXIP):\n DPTHVACMode.from_knx((0x05,))",
"def _check_mode(self):\n if self.mode is None:\n raise RuntimeError(\"Please set pin numbering mode using GPIO.setmode(GPIO.BOARD) or GPIO.setmode(GPIO.BCM)\")",
"def test_malformedModes(self):\n self.assertRaises(irc.IRCBadModes, irc.parseModes, \"foo\", [])\n self.assertRaises(irc.IRCBadModes, irc.parseModes, \"%\", [])",
"def test_parameter_mode_invalid(self, mock_ghn, mock_grnam, mock_pwnam):\n # Should pass\n self.driver.check_for_setup_error()\n # Should throw exceptions\n self.configuration.hgst_space_mode = ''\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)\n self.configuration.hgst_space_mode = 'Fred'\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)",
"def validate_engine_mode(engine_mode):\n\n VALID_DB_ENGINE_MODES = (\n \"provisioned\",\n \"serverless\",\n \"parallelquery\",\n \"global\",\n \"multimaster\",\n )\n\n if engine_mode not in VALID_DB_ENGINE_MODES:\n raise ValueError(\n \"DBCluster EngineMode must be one of: %s\" % \", \".join(VALID_DB_ENGINE_MODES)\n )\n return engine_mode",
"def test_mode_to_knx_wrong_value(self):\n with pytest.raises(ConversionError):\n DPTHVACMode.to_knx(5)",
"def _verify_ccd_operation_mode(self, ccd_operation_mode):\n em_mode = ccd_operation_mode['em_mode']\n em_gain = ccd_operation_mode['em_gain']\n hss = ccd_operation_mode['hss']\n preamp = ccd_operation_mode['preamp']\n binn = ccd_operation_mode['binn']\n t_exp = ccd_operation_mode['t_exp']\n ccd_temp = ccd_operation_mode['ccd_temp']\n\n dic_keywords_list = [\n 'binn', 'ccd_temp', 'em_gain', 'em_mode', 'hss', 'preamp', 't_exp']\n\n for key in ccd_operation_mode.keys():\n if key not in dic_keywords_list:\n raise ValueError(\n f'The name provided is not a CCD parameter: {key}')\n\n if list(ccd_operation_mode.keys()).sort() != dic_keywords_list.sort():\n raise ValueError(\n 'There is a missing parameter of the CCD operation mode')\n\n if em_mode not in [0, 1]:\n raise ValueError(\n f'Invalid value for the EM mode: {em_mode}')\n if em_mode == 0:\n if em_gain != 1:\n raise ValueError(\n 'The EM Gain must be 1 for the Conventional'\n + f' Mode: {em_gain}')\n else:\n if em_gain not in [float, int]:\n raise ValueError(\n f'The EM gain must be a number: {em_gain}')\n elif em_gain < 2 or em_gain > 300:\n raise ValueError(\n f'EM gain out of range [2, 300]: {em_gain}')\n\n if preamp not in [1, 2]:\n raise ValueError(\n f'Invalid value for the pre-amplification: {preamp}')\n\n if hss not in [0.1, 1, 10, 20, 30]:\n raise ValueError(\n f'Invalid value for the Readout rate: {hss}')\n\n if binn not in [1, 2]:\n raise ValueError(\n f'Invalid value for the binning: {bin}')\n\n if type(t_exp) not in [float, int]:\n raise ValueError(\n f'The exposure time must be a number: {t_exp}')\n elif ccd_operation_mode['t_exp'] < 1e-5:\n raise ValueError(\n f'Invalid value for the exposure time: {t_exp}')\n\n if type(ccd_temp) not in [float, int]:\n raise ValueError(\n f'The CCD temperature must be a number: {ccd_temp}')\n if ccd_temp < -80 or ccd_temp > 20:\n raise ValueError(\n f'CCD temperature out of range [-80, 20]: {ccd_temp}')",
"def set_mode(self, mode):\n if mode == 'train':\n self.net.train()\n elif mode == 'eval':\n self.net.eval()\n else:\n raise ValueError(\n \"Got invalid mode '{}'. Valid options are 'train' and 'eval'.\".format(mode))",
"def validate_mode_9(self):\n\n if self.mode9:\n raise PermissionDenied('You cannot access this now!')",
"def test_wrong_mode(self):\n self.assertRaises(ComponentErrorsEx, self.dp.setRewindingMode, 'FOO')",
"async def set_op_mode(self, mode):\n\n if mode not in self.op_modes:\n raise ValueError(f\"Invalid operating mode: {mode}\")\n keys = self._get_cmd_keys(CMD_STATE_OP_MODE)\n mode_value = self.model_info.enum_value(keys[2], DHumMode[mode].value)\n await self.set(keys[0], keys[1], key=keys[2], value=mode_value)",
"def set_mode(self, mode: str) -> None:\n # Not all programs are fully supported by the current\n # OpenInterface API version. The known restricitons are:\n # - The 'Calibration' and 'TightnessTest' programms cannot\n # be started through the API.\n # - The 'Dry' program does not expose all it's parameters\n # (see github.com/buchi-labortechnik-ag/openinterface_rotavapor/issues/1)\n return self.send(self.cmd.SET_MODE, mode)",
"def mode (self, mode) :\r\n self.mode_ = mode",
"def test_custom_validate(mode, factory):\n v = factory()\n assert type(v).v.validate_mode[0] == getattr(Validate, mode)\n assert v.v == (0 if mode != \"NoOp\" else None)\n if mode == \"NoOp\":\n return\n\n v.v = 1\n assert v.v == 1\n\n with pytest.raises(TypeError):\n v.v = None\n assert v.v == 1\n with pytest.raises(ValueError):\n v.v = 4\n\n v_member = type(factory()).v\n with pytest.raises(TypeError):\n v_member.do_validate(v, 1, None)\n assert v.v == 1\n with pytest.raises(ValueError):\n v_member.do_full_validate(v, 1, 4)\n\n with pytest.raises(TypeError) as excinfo:\n type(v).v.set_validate_mode(getattr(Validate, mode), 1)\n assert \"str\" in excinfo.exconly()",
"def test_validate_mode(self):\n with self.assertRaises(ValidationError):\n self.make_site(\n name='New site',\n url='http://example.com',\n mode='uGaj9eicQueib1th',\n )",
"def check_mode(args, accepted, functor=None, location=None, database=None, **kwdargs):\n for i, mode in enumerate(accepted):\n correct = True\n for a, t in zip(args, mode):\n name, test = mode_types[t]\n if not test(a):\n correct = False\n break\n if correct:\n return i\n if database and location:\n location = database.lineno(location)\n else:\n location = None\n raise CallModeError(functor, args, accepted, location=location)",
"def test_error_on_different_mode():\n metric = AUROC()\n # pass in multi-class data\n metric.update(torch.randn(10, 5).softmax(dim=-1), torch.randint(0, 5, (10, )))\n with pytest.raises(ValueError, match=r\"The mode of data.* should be constant.*\"):\n # pass in multi-label data\n metric.update(torch.rand(10, 5), torch.randint(0, 2, (10, 5)))",
"def mode(self, value):\n self._set_attr('mode', value)"
]
| [
"0.7941835",
"0.732205",
"0.7257453",
"0.6744612",
"0.66706824",
"0.64615804",
"0.64179987",
"0.6403857",
"0.62705153",
"0.6223311",
"0.6173096",
"0.6138948",
"0.6084667",
"0.6069799",
"0.5938012",
"0.59287703",
"0.5881334",
"0.58438796",
"0.582425",
"0.57956535",
"0.5769798",
"0.5685399",
"0.5666253",
"0.5634729",
"0.56239057",
"0.5607097",
"0.5606313",
"0.55971986",
"0.5588164",
"0.55840063"
]
| 0.7829693 | 1 |
Returns training loss tensor. Training loss is different from the loss reported on the tensorboard as we should respect the example weights when computing the gradient. L = sum_{i} w_{i} l_{i} / B where B is the number of examples in the batch, l_{i}, w_{i} are individual losses, and example weight. | def _training_loss(features,
labels,
logits,
loss_fn,
weight_column_name=None,
head_name=None):
with ops.name_scope(None, "training_loss",
tuple(six.itervalues(features)) +
(labels, logits)) as name:
loss, weighted_average_loss = _loss(
loss_fn(logits, labels),
_weight_tensor(features, weight_column_name),
name=name)
# The tag must be same as the tag for eval loss, so the losses will show up
# in the same graph in tensorboard.
logging_ops.scalar_summary(
_summary_key(head_name, "loss"), weighted_average_loss)
return loss | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_loss_weight(self) -> torch.Tensor:\n n_pos: torch.Tensor = 0.0\n n_neg: torch.Tensor = 0.0\n\n for _, ground_truth in self.train_loader:\n n_poss_curr = ground_truth.sum()\n n_pos += n_poss_curr\n n_neg += ground_truth.numel() - n_poss_curr\n\n eps = torch.finfo(n_pos.dtype).eps\n return n_neg / (n_pos + eps)",
"def loss_function(self, train_head, train_tail, train_relation, train_head_corrupted, train_tail_corrupted):\n\n # train_head = tf.nn.l2_normalize(train_head, 1)\n # train_tail = tf.nn.l2_normalize(train_tail, 1)\n # train_head_corrupted = tf.nn.l2_normalize(train_head_corrupted, 1)\n # train_tail_corrupted = tf.nn.l2_normalize(train_tail_corrupted, 1)\n\n # loss = tf.reduce_mean(\n # tf.maximum(self.dict_paras['margin']\n # + self.distance(tf.add(train_head, train_relation), train_tail)\n # - self.distance(tf.add(train_head_corrupted, train_relation), train_tail_corrupted), 0.))\n\n loss = tf.reduce_mean(self.distance(tf.add(train_head, train_relation), train_tail))\n\n return loss",
"def compute_loss(self):\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.data.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss",
"def compute_loss(self):\n def calc_loss(inputs, outputs):\n reconstruction_loss = tf.metrics.binary_crossentropy(\n tf_flat(inputs), tf_flat(outputs))\n reconstruction_loss *= OUT_SIZE * OUT_SIZE\n kl_loss = -0.5 * tf.reduce_sum(1.0 + self.log_sigma - tf.square(\n self.mu) - tf.exp(self.log_sigma), 1)\n return tf.reduce_mean(reconstruction_loss + kl_loss)\n return calc_loss",
"def lfads_training_loss(params, lfads_hps, key, x_bxt, kl_scale, keep_rate):\n losses = lfads_losses(params, lfads_hps, key, x_bxt, kl_scale, keep_rate)\n return losses['total']",
"def kl_loss(self):\n return sum([p.kl_loss() for p in self.parameters])",
"def _loss(W):\r\n M = X @ W\r\n if loss_type == 'l2':\r\n R = X - M\r\n loss = 0.5 / X.shape[0] * (R ** 2).sum()\r\n G_loss = - 1.0 / X.shape[0] * X.T @ R\r\n elif loss_type == 'logistic':\r\n loss = 1.0 / X.shape[0] * (np.logaddexp(0, M) - X * M).sum()\r\n G_loss = 1.0 / X.shape[0] * X.T @ (sigmoid(M) - X)\r\n elif loss_type == 'poisson':\r\n S = np.exp(M)\r\n loss = 1.0 / X.shape[0] * (S - X * M).sum()\r\n G_loss = 1.0 / X.shape[0] * X.T @ (S - X)\r\n else:\r\n raise ValueError('unknown loss type')\r\n return loss, G_loss",
"def compute_loss(self):\n self.prototypes = self.compute_prototypes()\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.episode.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss",
"def build_loss(self):\n import tensorflow as tf\n\n y_1d = [tf.reduce_sum(tf.multiply(self.variables[\"y\"][i], self.variables[\"y_action\"][i]), axis=1) for i in range(len(self.variables[\"y\"]))]\n loss = np.sum([tf.nn.l2_loss(y_1d[i] - self.variables[\"y_true\"]) for i in range(len(y_1d))])\n\n l1_reg = 0\n l2_reg = 0\n\n keys = sorted(self.variables.keys())\n keys = [key for key in keys if critere_keys(key) and \"W\" in key]\n for key in keys:\n l1_reg += tf.reduce_sum(tf.abs(self.variables[key]))\n l2_reg += tf.nn.l2_loss(self.variables[key])\n\n self.loss = loss + self.alpha_reg * l1_reg + self.beta_reg * l2_reg\n\n self.train_step = tf.train.RMSPropOptimizer(self.decay_learning_rate,\n decay=0.99, momentum=0., centered=True).minimize(self.loss, global_step=self.global_step)",
"def loss(self, dataset=None, loss=None, training=None):\n # Recover the defaults, if missing\n dataset, loss = self._resolve_defaults(trainset=dataset, loss=loss)\n # Sample the train batch\n inputs, targets = dataset.sample(self._config)\n # Guess whether computation is for training, if necessary\n if training is None:\n training = torch.is_grad_enabled()\n # Forward pass\n return loss(self.run(inputs), targets, self._params)",
"def get_loss(self):\r\n\r\n if F.loss_type==\"cosine\":\r\n self.losscos = r2d*tf.acos(1-tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1))\r\n self.loss = tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1)\r\n elif F.loss_type==\"mse2d\":\r\n xl, yl, zl = tf.split(self.labels, 3, axis=1)\r\n xo, yo, zo = tf.split(self.out, 3, axis=1)\r\n thetal, thetao = tf.asin(-yl), tf.asin(-yo)\r\n phil, phio = tf.atan2(-zl, -xl), tf.atan2(-zo, -xo)\r\n self.lb = tf.concat([thetal, phil], axis=1)\r\n self.ob = tf.concat([thetao, phio], axis=1)\r\n self.loss = tf.scalar_mul(tf.constant(r2d), tf.losses.mean_squared_error(self.lb, self.ob, 2))\r\n elif F.loss_type==\"mse3d\":\r\n self.loss = tf.losses.mean_squared_error(tf.nn.l2_normalize(self.labels, 0), tf.nn.l2_normalize(self.out, 0))",
"def loss_fn(params):\n logits = models.ProgramTransformer(config).apply(\n {'params': params},\n inputs,\n outputs,\n programs,\n rngs={'dropout': train_rng})\n loss, weight_sum = compute_weighted_cross_entropy(logits, programs, weights)\n mean_loss = loss / weight_sum\n return mean_loss, logits",
"def calculate_loss(y, tx, w):\n txw = np.matmul(tx, w)\n return np.logaddexp(0.0, txw).sum() - np.dot(y, txw)",
"def build_loss(self):\n\n opt = tf.train.AdamOptimizer(self.learning_rate)\n mse = tf.losses.mean_squared_error(self.label[-1], self.outputs[-1])\n loss = tf.losses.get_total_loss()\n\n return mse, loss",
"def _create_loss_op(self):\n # 1.) The reconstruction loss, which forces the NN towards reconstructing more accurately the\n # given input. This function is configurable, but usually it is the Bernoulli negative log-likelihood.\n if self.cost_function == 'abs':\n reconstr_loss = tf.reduce_sum(tf.abs(self.x_decoded - self.x_in), 1)\n elif self.cost_function in ('mse', 'l2', 'square'):\n reconstr_loss = tf.reduce_sum(tf.squared_difference(self.x_in, self.x_decoded), 1)\n elif self.cost_function in ('xentropy', 'log'):\n reconstr_loss = \\\n -tf.reduce_sum(self.x_in * tf.log(1e-10 + self.x_decoded)\n + (1 - self.x_in) * tf.log(1e-10 + 1 - self.x_decoded),\n 1)\n else:\n raise ValueError(self.cost_function, \"Unknown cost function name!\")\n\n # 2.) The latent loss, which is defined as the Kullback Leibler divergence\n ## between the distribution in latent space induced by the encoder on\n # the data and some prior. This acts as a kind of regularizer.\n # This can be interpreted as the number of \"nats\" required\n # for transmitting the the latent space distribution given\n # the prior.\n latent_loss = -0.5 * tf.reduce_sum(1. + self.z_log_sigma_sq\n - tf.square(self.z_mean)\n - tf.exp(self.z_log_sigma_sq), 1)\n\n self.loss_op = tf.reduce_mean(reconstr_loss + latent_loss) # average over batch\n tf.add_to_collection(\"losses\", self.loss_op)\n\n if self.learning_rate is not None:\n global_step = tf.train.get_or_create_global_step()\n self.train_op = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(\n self.loss_op,\n global_step=global_step,\n var_list=tf.get_collection(self.training_scope) if self.training_scope is not None else None)\n\n tf.add_to_collection(\"train_ops\", self.train_op)\n tf_logging.info(\"Added AdamOptimizer with learning rate: %.8f\" % self.learning_rate)\n\n tf.summary.scalar(\"latent_loss\", tf.reduce_mean(latent_loss))\n tf.summary.scalar(\"reconstruction_loss\", tf.reduce_mean(reconstr_loss))\n tf.summary.scalar(\"vae_loss\", self.loss_op)",
"def calculate_training_loss(self):\n self.network.train()\n self.training_average_loss = self.calculate_average_loss(self.training_dataloader)",
"def tower_loss(scope):\n\t# Get images and flows for Flownet.\n \timg1, img2, flo = flownet_input.inputs(False, FLAGS.data_dir, FLAGS.batch_size)\n\n\t# Build a Graph that computes predictions from the inference model.\n\tlogits = flowNet.inference(img1, img2, FLAGS.batch_size)\n\n\t# Add to the Graph the Ops for loss calculation.\n\t_ = flowNet.loss(logits, flo)\n\n\t# Assemble all of the losses for the current tower only.\n\tlosses = tf.get_collection('losses', scope)\n\n\t# Calculate the total loss for the current tower.\n\ttotal_loss = tf.add_n(losses, name='total_loss')\n\n\t# Attach a scalar summary to all individual losses and the total loss; do the\n\t# same for the averaged version of the losses.\n\tfor l in losses + [total_loss]:\n\t\t# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training\n\t\t# session. This helps the clarity of presentation on tensorboard.\n\t\tloss_name = re.sub('%s_[0-9]*/' % flowNet.TOWER_NAME, '', l.op.name)\n\t\ttf.summary.scalar(loss_name, l)\n\n\treturn total_loss",
"def loss_op(self):\n return self.loss",
"def _calc_loss(self, fvs, labels, w, b):\n\n loss = 0.5 * self.lda * (np.linalg.norm(w) ** 2)\n tmp = sum(map(lambda x, y: (x - y) ** 2, fvs.dot(w) + b, labels))\n loss += tmp / fvs.shape[0]\n\n return loss",
"def _get_ner_loss(self):\n # per example loss\n no_entity_id = self.config[\"model\"][\"ner\"][\"no_entity_id\"]\n logits_shape = tf.shape(self.ner_logits_train)\n labels_shape = logits_shape[:3]\n labels = get_dense_labels_from_indices(indices=self.ner_labels_ph, shape=labels_shape, no_label_id=no_entity_id)\n per_example_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=labels, logits=self.ner_logits_train\n ) # [batch_size, num_tokens, num_tokens]\n\n # mask\n maxlen = logits_shape[1]\n span_mask = upper_triangular(maxlen, dtype=tf.float32)\n sequence_mask = tf.sequence_mask(self.num_tokens_ph, dtype=tf.float32) # [batch_size, num_tokens]\n mask = span_mask[None, :, :] * sequence_mask[:, None, :] * sequence_mask[:, :, None] # [batch_size, num_tokens, num_tokens]\n\n masked_per_example_loss = per_example_loss * mask\n total_loss = tf.reduce_sum(masked_per_example_loss)\n num_valid_spans = tf.cast(tf.reduce_sum(mask), tf.float32)\n loss = total_loss / num_valid_spans\n\n loss *= self.config[\"model\"][\"ner\"][\"loss_coef\"]\n return loss",
"def compute_loss(self,\n inputs: Union[\n # Tuple of (features, labels).\n Tuple[\n Dict[str, tf.Tensor],\n tf.Tensor\n ],\n # Tuple of (features, labels, sample weights).\n Tuple[\n Dict[str, tf.Tensor],\n tf.Tensor,\n Optional[tf.Tensor]\n ]\n ],\n training: bool = False) -> tf.Tensor:\n\n # We need to work around a bug in mypy - tuple narrowing\n # based on length checks doesn't work.\n # See https://github.com/python/mypy/issues/1178 for details.\n if len(inputs) == 2:\n inputs = cast(\n Tuple[\n Dict[str, tf.Tensor],\n tf.Tensor\n ],\n inputs\n )\n features, labels = inputs\n sample_weight = None\n elif len(inputs) == 3:\n inputs = cast(\n Tuple[\n Dict[str, tf.Tensor],\n tf.Tensor,\n Optional[tf.Tensor],\n ],\n inputs\n )\n features, labels, sample_weight = inputs\n else:\n raise ValueError(\n \"Inputs should either be a tuple of (features, labels), \"\n \"or a tuple of (features, labels, sample weights). \"\n \"Got a length {len(inputs)} tuple instead: {inputs}.\"\n )\n\n outputs = self(features, training=training)\n\n loss = self._task(labels, outputs, sample_weight=sample_weight)\n loss = tf.reduce_mean(loss)\n # Scales loss as the default gradients allreduce performs sum inside the\n # optimizer.\n return loss / tf.distribute.get_strategy().num_replicas_in_sync",
"def loss_fn(model):\n with flax.nn.stateful(state) as new_state:\n with flax.nn.stochastic(prng_key):\n logits = model(batch['image'])\n loss = cross_entropy_loss(logits, batch['label'])\n # TODO(britefury): check if applying L2 regularization to weights but\n # *not* biases improves results\n weight_penalty_params = jax.tree_leaves(model.params)\n weight_l2 = sum([jnp.sum(x ** 2)\n for x in weight_penalty_params\n if x.ndim > 1])\n weight_penalty = l2_reg * 0.5 * weight_l2\n loss = loss + weight_penalty\n return loss, (new_state, logits)",
"def loss(self, X, Y, lmd):\n P, _ = self.forward(X)\n loss = np.mean(-np.log(np.einsum('ij,ji->i', Y.T, P)))\n\n reg = 0 # Regularization term\n for w in self.W:\n reg += np.sum(np.square(w))\n\n reg *= lmd\n\n cost = loss + reg\n\n return cost",
"def loss_fn(model):\n with flax.deprecated.nn.stateful() as state:\n with flax.deprecated.nn.stochastic(dropout_rng):\n logits = model(example, train=True)\n loss, weight_sum = compute_weighted_cross_entropy(logits, targets)\n mean_loss = loss / weight_sum\n return mean_loss, (logits, state)",
"def calculate_loss(self, pred, gold, smoothing=False):\n gold = gold.contiguous().view(-1)\n if smoothing:\n epsilon = 0.1\n n_class = pred.size(1)\n one_hot = torch.zeros_like(pred).scatter(1, gold.view(-1, 1), 1)\n one_hot = one_hot * (1 - epsilon) + \\\n (1 - one_hot) * epsilon / (n_class - 1)\n\n log_prb = F.log_softmax(pred, dim=1)\n # create non-padding mask with torch.ne()\n non_pad_mask = gold.ne(self.constants.PAD)\n loss = -(one_hot * log_prb).sum(dim=1)\n # losses are averaged later\n loss = loss.masked_select(non_pad_mask).sum()\n else:\n loss = F.cross_entropy(\n pred, gold, ignore_index=self.constants.PAD, reduction='sum')\n return loss",
"def loss(self):\n return self._loss",
"def _get_loss(self, logit, label):\n logit = fluid.layers.transpose(logit, [0, 2, 3, 1])\n label = fluid.layers.transpose(label, [0, 2, 3, 1])\n mask = label != self.ignore_index\n mask = fluid.layers.cast(mask, 'float32')\n loss, probs = fluid.layers.softmax_with_cross_entropy(\n logit,\n label,\n ignore_index=self.ignore_index,\n return_softmax=True,\n axis=-1)\n\n loss = loss * mask\n avg_loss = fluid.layers.mean(loss) / (\n fluid.layers.mean(mask) + self.EPS)\n\n label.stop_gradient = True\n mask.stop_gradient = True\n return avg_loss",
"def kl_loss_batch(self):\n return sum([e for m in self.modules for e in m._kl_losses])",
"def loss(self, log_prob, C):\n W = self.W\n T = self.T\n average_log_loss = -C * log_prob\n W_norm = torch.sum(torch.tensor([(torch.norm(Wy.double())) ** 2 for Wy in W])) / 2\n T_norm = torch.sum(torch.tensor([torch.sum(torch.tensor([Tij ** 2 for Tij in row])) for row in T])) / 2\n loss = average_log_loss + W_norm + T_norm\n return loss",
"def loss(\n self,\n model_in: torch.Tensor,\n target: Optional[torch.Tensor] = None,\n idx=None,\n next_obs=None,\n eval=False,\n ) -> torch.Tensor:\n loss = self._vaml_loss(model_in, target, idx=idx, next_obs=next_obs, eval=False)\n if self.add_mse:\n loss += self._mse_loss(model_in, target).mean(-1, keepdim=True)\n return loss.mean()"
]
| [
"0.6821857",
"0.6805423",
"0.6794827",
"0.666859",
"0.6659326",
"0.66584104",
"0.6639398",
"0.6634321",
"0.66334075",
"0.6623661",
"0.6581316",
"0.6571407",
"0.65705365",
"0.65537775",
"0.6537383",
"0.6529755",
"0.6529278",
"0.6529041",
"0.65235376",
"0.65120804",
"0.6492314",
"0.6483883",
"0.64620626",
"0.6424367",
"0.6401764",
"0.63771975",
"0.6344502",
"0.6342215",
"0.63342875",
"0.6332996"
]
| 0.6987192 | 0 |
Tries to determine the libc version that the file executable (which defaults to the Python interpreter) is linked against. Returns a tuple of strings (lib,version) which default to the given parameters in case the lookup fails. Note that the function has intimate knowledge of how different libc versions add symbols to the executable and thus is probably only usable for executables compiled using gcc. The file is read and scanned in chunks of chunksize bytes. | def libc_ver(executable=None, lib='', version='', chunksize=16384):
if not executable:
try:
ver = os.confstr('CS_GNU_LIBC_VERSION')
# parse 'glibc 2.28' as ('glibc', '2.28')
parts = ver.split(maxsplit=1)
if len(parts) == 2:
return tuple(parts)
except (AttributeError, ValueError, OSError):
# os.confstr() or CS_GNU_LIBC_VERSION value not available
pass
executable = sys.executable
if not executable:
# sys.executable is not set.
return lib, version
V = _comparable_version
# We use os.path.realpath()
# here to work around problems with Cygwin not being
# able to open symlinks for reading
executable = os.path.realpath(executable)
with open(executable, 'rb') as f:
binary = f.read(chunksize)
pos = 0
while pos < len(binary):
if b'libc' in binary or b'GLIBC' in binary:
m = _libc_search.search(binary, pos)
else:
m = None
if not m or m.end() == len(binary):
chunk = f.read(chunksize)
if chunk:
binary = binary[max(pos, len(binary) - 1000):] + chunk
pos = 0
continue
if not m:
break
libcinit, glibc, glibcversion, so, threads, soversion = [
s.decode('latin1') if s is not None else s
for s in m.groups()]
if libcinit and not lib:
lib = 'libc'
elif glibc:
if lib != 'glibc':
lib = 'glibc'
version = glibcversion
elif V(glibcversion) > V(version):
version = glibcversion
elif so:
if lib != 'glibc':
lib = 'libc'
if soversion and (not version or V(soversion) > V(version)):
version = soversion
if threads and version[-len(threads):] != threads:
version = version + threads
pos = m.end()
return lib, version | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_version(e):\n args = e.split()\n args += ['-shared', '-Wl,-t']\n p = subprocess.Popen(args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)\n candidates = [x for x in p.stdout if 'libstdc++.so' in x]\n if not candidates:\n return ''\n assert len(candidates) == 1\n libstdcxx = parse_ld_line(candidates[-1])\n\n p = subprocess.Popen(['readelf', '-V', libstdcxx], stdout=subprocess.PIPE)\n versions = [parse_readelf_line(x)\n for x in p.stdout.readlines() if 'Name: GLIBCXX' in x]\n last_version = sorted(versions, cmp = cmp_ver)[-1]\n return encode_ver(last_version)",
"def get_version():\n version_file = Path(__file__).resolve().parent / \"clinker\" / \"__init__.py\"\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file.read_text(), re.M\n )\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Failed to find version string\")",
"def get_gcc_ver(exe=\"gcc\"):\n cmd = [exe, '-v']\n major = -1\n minor = -1\n patch = -1\n raw = sub.check_output(cmd, stderr=sub.STDOUT).decode('ascii').lower().split('\\n')\n for line in raw:\n if line.startswith('gcc version'):\n tokens = line.split()\n # we obtain a version string such as \"5.4.0\"\n verstr = tokens[2].strip()\n vertup = verstr.split('.')\n major = int(vertup[0])\n minor = int(vertup[1])\n patch = int(vertup[2])\n ver = major, minor, patch\n return ver",
"def get_version():\n major=c_int_t(0)\n minor=c_int_t(0)\n patch=c_int_t(0)\n safe_call(backend.get().af_get_version(c_pointer(major), c_pointer(minor), c_pointer(patch)))\n return major.value,minor.value,patch.value",
"def find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")",
"def find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file, re.M\n )\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")",
"def find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")",
"def _find_ld_version():\n if sys.platform == 'darwin':\n return _find_exe_version('ld -v', _MAC_OS_X_LD_VERSION)\n else:\n return _find_exe_version('ld -v')",
"def find_version(fname):\n version = ''\n with open(fname, 'r') as fp:\n reg = re.compile(r'__version__ = [\\'\"]([^\\'\"]*)[\\'\"]')\n for line in fp:\n m = reg.match(line)\n if m:\n version = m.group(1)\n break\n if not version:\n raise RuntimeError('Cannot find version information')\n return version",
"def _find_clang_format():\n required_clang_format_major = 10\n\n def parse_version(bin_path):\n \"\"\"\n Get clang-format version string. Returns None if parsing fails.\n \"\"\"\n version_str = subprocess.check_output([bin_path, \"--version\"\n ]).decode(\"utf-8\").strip()\n match = re.match(\"^clang-format version ([0-9.]*).*$\", version_str)\n return match.group(1) if match else None\n\n def parse_version_major(bin_path):\n \"\"\"\n Get clang-format major version. Returns None if parsing fails.\n \"\"\"\n version = parse_version(bin_path)\n return int(version.split(\".\")[0]) if version else None\n\n def find_bin_by_name(bin_name):\n \"\"\"\n Returns bin path if found. Otherwise, returns None.\n \"\"\"\n bin_path = shutil.which(bin_name)\n if bin_path is None:\n return None\n else:\n major = parse_version_major(bin_path)\n return bin_path if major == required_clang_format_major else None\n\n bin_path = find_bin_by_name(\"clang-format\")\n if bin_path is not None:\n bin_version = parse_version(bin_path)\n return bin_path, bin_version\n\n bin_path = find_bin_by_name(f\"clang-format-{required_clang_format_major}\")\n if bin_path is not None:\n bin_version = parse_version(bin_path)\n return bin_path, bin_version\n\n raise RuntimeError(\n f\"clang-format version {required_clang_format_major} not found. \"\n \"See http://www.open3d.org/docs/release/contribute/styleguide.html#style-guide \"\n \"for help on clang-format installation.\")",
"def get_compiler_versions():\n gcc = _find_exe_version('gcc -dumpversion')\n ld = _find_ld_version()\n dllwrap = _find_exe_version('dllwrap --version')\n return gcc, ld, dllwrap",
"def find_version(*file_paths):\n with open(os.path.join(abs_base_dir, *file_paths), 'r') as fp:\n version_file = fp.read()\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file, re.M\n )\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")",
"def gcc_version(gcc):\n\tversion = \"\"\n\ttry:\n\t\tversion = os.popen(\"%s --version\" % gcc).readline().split()[-1]\n\texcept:\n\t\tpass\n\treturn version",
"def extract_gcc_binaries():\n patterns = [\n \"/opt/local/bin/g++-mp-[0-9]*.[0-9]*\",\n \"/opt/local/bin/g++-mp-[0-9]*\",\n \"/usr/local/bin/g++-[0-9]*.[0-9]*\",\n \"/usr/local/bin/g++-[0-9]*\",\n ]\n if platform.system() == \"Darwin\":\n gcc_binaries = []\n for pattern in patterns:\n gcc_binaries += glob.glob(pattern)\n gcc_binaries.sort()\n if gcc_binaries:\n _, gcc = os.path.split(gcc_binaries[-1])\n return gcc\n else:\n return None\n else:\n return None",
"def load_linux_so():\n shared_name = get_project_root() / \"build/libastyle.so\"\n\n shared = str(pl.Path(shared_name).absolute())\n # file_ = {f for f in pl.Path().iterdir() if f.name == shared_name}\n\n try:\n libc = cdll.LoadLibrary(shared)\n except OSError as err:\n # \"cannot open shared object file: No such file or directory\"\n print(err)\n raise FileNotFoundError(\"Cannot find \" + shared)\n return libc",
"def src_get_version():\n return ffi.string(_lib.src_get_version()).decode()",
"def _sys_version(sys_version=None):\n # Get the Python version\n if sys_version is None:\n sys_version = sys.version\n\n # Try the cache first\n result = _sys_version_cache.get(sys_version, None)\n if result is not None:\n return result\n\n # Parse it\n if 'IronPython' in sys_version:\n # IronPython\n name = 'IronPython'\n if sys_version.startswith('IronPython'):\n match = _ironpython_sys_version_parser.match(sys_version)\n else:\n match = _ironpython26_sys_version_parser.match(sys_version)\n\n if match is None:\n raise ValueError(\n 'failed to parse IronPython sys.version: %s' %\n repr(sys_version))\n\n version, alt_version, compiler = match.groups()\n buildno = ''\n builddate = ''\n\n elif sys.platform.startswith('java'):\n # Jython\n name = 'Jython'\n match = _sys_version_parser.match(sys_version)\n if match is None:\n raise ValueError(\n 'failed to parse Jython sys.version: %s' %\n repr(sys_version))\n version, buildno, builddate, buildtime, _ = match.groups()\n if builddate is None:\n builddate = ''\n compiler = sys.platform\n\n elif \"PyPy\" in sys_version:\n # PyPy\n name = \"PyPy\"\n match = _pypy_sys_version_parser.match(sys_version)\n if match is None:\n raise ValueError(\"failed to parse PyPy sys.version: %s\" %\n repr(sys_version))\n version, buildno, builddate, buildtime = match.groups()\n compiler = \"\"\n\n else:\n # CPython\n match = _sys_version_parser.match(sys_version)\n if match is None:\n raise ValueError(\n 'failed to parse CPython sys.version: %s' %\n repr(sys_version))\n version, buildno, builddate, buildtime, compiler = \\\n match.groups()\n\n # XXX: RUSTPYTHON support\n if \"rustc\" in sys_version:\n name = \"RustPython\"\n else:\n name = 'CPython'\n\n if builddate is None:\n builddate = ''\n elif buildtime:\n builddate = builddate + ' ' + buildtime\n\n if hasattr(sys, '_git'):\n _, branch, revision = sys._git\n elif hasattr(sys, '_mercurial'):\n _, branch, revision = sys._mercurial\n else:\n branch = ''\n revision = ''\n\n # Add the patchlevel version if missing\n l = version.split('.')\n if len(l) == 2:\n l.append('0')\n version = '.'.join(l)\n\n # Build and cache the result\n result = (name, version, branch, revision, buildno, builddate, compiler)\n _sys_version_cache[sys_version] = result\n return result",
"def get_version():\n version = \"unknown\"\n try:\n version_file = open(VERSIONFILE, \"r\")\n for line in version_file:\n if line.startswith('__version__'):\n version = line.split(\"'\")[1]\n break\n except EnvironmentError:\n pass # Okay, there is no version file.\n return version",
"def find_version(*file_paths):\n version_file = Path(__file__).parent.joinpath(*file_paths)\n with open(str(version_file), 'r') as openf:\n data = openf.read()\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n data,\n re.M,\n )\n if version_match:\n return version_match.group(1)\n\n raise RuntimeError(\"Unable to find version string.\")",
"def find_version(*file_paths):\n # Source: packaging.python.org/guides/single-sourcing-package-version\n\n def read(*parts):\n here = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(here, *parts), 'r') as fp:\n return fp.read()\n\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")",
"def get_version(*file_paths):\n filename = os.path.join(os.path.dirname(__file__), *file_paths)\n version_file = open(filename).read()\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError('Unable to find version string.')",
"def get_version(*file_paths):\n filename = os.path.join(os.path.dirname(__file__), *file_paths)\n version_file = open(filename).read()\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError('Unable to find version string.')",
"def get_version(*file_paths):\n filename = os.path.join(os.path.dirname(__file__), *file_paths)\n version_file = open(filename).read()\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError('Unable to find version string.')",
"def getVersion():\n try:\n fh=open(version_py, 'r')\n version=fh.read().strip().split('=')[-1].replace(\"'\",'').lstrip()\n fh.close()\n except:\n return None\n\n return version",
"def version(version_file=default_version_file, osp_package=default_osp_package):\n\n if os.path.exists(version_file):\n (version_string, version_name) = version_from_file(version_file)\n\n else:\n package_info = get_package_info(osp_package)\n repo_name = get_package_repo_name(package_info)\n version_string = get_version_from_repo_name(repo_name)\n\n if version_string == None:\n version_string = \"unknown\"\n \n return version_string",
"def version_from_file(version_filename):\n\n version_pattern = \"Red Hat OpenStack Platform release ([\\d.]+) \\((.*)\\)\"\n version_re = re.compile(version_pattern)\n \n # should try and check. A single line with a newline\n \n version_file = open(version_filename)\n release_string = version_file.read().strip()\n version_file.close()\n \n release_match = version_re.match(release_string)\n\n if release_match == None:\n return (None,None)\n\n return release_match.groups()",
"def get_linked_libpython():\n if is_windows():\n return\n libdl = ctypes.CDLL(ctypes.util.find_library(\"dl\"))\n libdl.dladdr.argtypes = [ctypes.c_void_p, ctypes.POINTER(_Dl_info)]\n libdl.dladdr.restype = ctypes.c_int\n\n dlinfo = _Dl_info()\n retcode = libdl.dladdr(\n ctypes.cast(ctypes.pythonapi.Py_GetVersion, ctypes.c_void_p),\n ctypes.pointer(dlinfo))\n if retcode == 0: # means error\n return\n path = os.path.realpath(dlinfo.dli_fname.decode())\n if path == os.path.realpath(sys.executable):\n return\n return path",
"def find_version():\n regex = r\"^ATRAM_VERSION = ['\\\"]v?([^'\\\"]*)['\\\"]\"\n with open(\"./lib/db.py\", 'r') as f:\n match = re.search(regex, f.read(), re.M)\n if match:\n return match.group(1)\n\n raise RuntimeError(\"Unable to find version string.\")",
"def extract_version_info():\n version = None\n if os.path.exists('.version'):\n with open('.version') as f:\n line = f.read().rstrip()\n log.info('.version contains \"%s\"', line)\n if line.startswith('openafs-'):\n # Extract version from the git tag name.\n version = re.sub('openafs-[^-]*-', '', line).replace('_', '.')\n elif line.startswith('BP-'):\n # Branch point tags do not contain the version number.\n log.info('.version file has old branch point tag name.')\n else:\n # Use the given version string.\n version = line\n if not version:\n # Unable to lookup version from the .version file, try to extract the\n # version from the source directory name.\n root = os.path.basename(os.path.abspath('.'))\n m = re.match(r'openafs-(.*)', root)\n if m:\n version = m.group(1)\n if not version:\n module.fail_json(msg='Unable to determine version.')\n\n # Determine package version and release from the OpenAFS version.\n m1 = re.match(r'(.*)(pre[0-9]+)', version) # prerelease\n m2 = re.match(r'(.*)dev', version) # development\n m3 = re.match(r'(.*)-([0-9]+)-(g[a-f0-9]+)$', version) # development\n m4 = re.match(r'(.*)-([a-z]+)([0-9]+)', version) # custom\n if m1:\n v = m1.group(1)\n r = \"0.{0}\".format(m1.group(2))\n elif m2:\n v = m2.group(1)\n r = \"0.dev\"\n elif m3:\n v = m3.group(1)\n r = \"{0}.{1}\".format(m3.group(2), m3.group(3))\n elif m4:\n v = m4.group(1).replace('-', '')\n r = \"1.2.{0}.{1}\".format(m4.group(3), m4.group(2))\n else:\n v = version # standard release\n r = \"1\" # increment when repackaging this version\n # '-' are used as delimiters by rpm.\n v = v.replace('-', '_')\n r = r.replace('-', '_')\n return dict(openafs_version=version, package_version=v, package_release=r)",
"def read_version():\n # code parts were taken from here https://stackoverflow.com/a/67692\n\n path2setup = os.path.dirname(__file__)\n version_file = os.path.abspath(\n os.path.join(path2setup, \"diffusion_maps\", \"version.py\"))\n\n spec = importlib.util.spec_from_file_location(\"version\", version_file)\n version = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(version)\n return version.version.v_short"
]
| [
"0.67426795",
"0.57934326",
"0.55663675",
"0.555489",
"0.55024654",
"0.54960775",
"0.5471877",
"0.5459558",
"0.54391474",
"0.54132146",
"0.53969854",
"0.53964067",
"0.5367232",
"0.5360286",
"0.52679116",
"0.52131796",
"0.5212079",
"0.51907736",
"0.51898694",
"0.5185536",
"0.5179402",
"0.5179402",
"0.5179402",
"0.51713204",
"0.5157648",
"0.5155151",
"0.51551145",
"0.51289254",
"0.51214063",
"0.5116686"
]
| 0.7944829 | 0 |
Normalize the version and build strings and return a single version string using the format major.minor.build (or patchlevel). | def _norm_version(version, build=''):
l = version.split('.')
if build:
l.append(build)
try:
strings = list(map(str, map(int, l)))
except ValueError:
strings = l
version = '.'.join(strings[:3])
return version | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_version_string():\n major, minor, micro, patch, tag, relnum, is_release = VERSION\n\n version = '%s.%s' % (major, minor)\n\n if micro or patch:\n version += '.%s' % micro\n\n if patch:\n version += '.%s' % patch\n\n if tag != 'final':\n if tag == 'rc':\n version += ' RC'\n else:\n version += ' %s ' % tag\n\n version += '%s' % relnum\n\n if not is_release:\n version += ' (dev)'\n\n return version",
"def get_min_build_version(version: str) -> str:\n return Version(version).replace(micro=0).get_stable().dumps()",
"def versionstr():\n return \"%d.%d.%d%s\" % (version[0], version[1], version[2],\n '-' + gitstr() if gitstr() else '')",
"def _build_version(self, version, num_of_digits):\n version = \"{}\".format(version).replace(\".\", \"\").replace(\" \", \"\").strip()\n num_of_digits_to_add = (num_of_digits - len(version))\n version += (\"0\" * num_of_digits_to_add)\n version = int(version)\n return version",
"def get_version():\n vers = [\"%(major)i.%(minor)i\" % __version_info__, ]\n\n if __version_info__['micro']:\n vers.append(\".%(micro)i\" % __version_info__)\n if __version_info__['releaselevel'] != 'final':\n vers.append('%(releaselevel)s' % __version_info__)\n return ''.join(vers)",
"def get_version(version=None):\n if version is None:\n version = VERSION\n assert len(version) == 5\n assert version[3] in (\"alpha\", \"beta\", \"rc\", \"final\")\n\n parts = 2 if version[2] == 0 else 3\n main = \".\".join(str(digit) for digit in version[:parts])\n\n sub = \"\"\n if version[3] != \"final\":\n mapping = {\"alpha\": \"a\", \"beta\": \"b\", \"rc\": \"rc\"}\n sub = mapping[version[3]] + str(version[4])\n\n return main + sub",
"def format_release_version(version, build_id_to_inject):\n subs = version.split(\".\")\n subs[-1] = build_id_to_inject\n return '.'.join(subs)",
"def get_major_dot_minor_version(version):\n return '.'.join([str(v) for v in version[:2]])",
"def __buildVersion(version, versionPattern):\n patternParts = __splitVersionPattern(versionPattern)\n return patternParts['prefix'] + str(version).zfill(len(patternParts['padding'])) + patternParts['suffix']",
"def get_max_build_version(version: str) -> str:\n return Version(version).bump_minor().get_stable().dumps()",
"def getVersionString():\n return str(version_gen.major) + \".\" + str(version_gen.minor) + \".\" + str(version_gen.compilation)",
"def get_version():\n return '.'.join(map(str, VERSION))",
"def get_package_version():\n major, minor, micro, patch, tag, relnum = __version_info__\n\n version = '%s.%s' % (major, minor)\n\n if micro or patch:\n version += '.%s' % micro\n\n if patch:\n version += '.%s' % patch\n\n if tag != 'final':\n version += '%s%s' % (\n {\n 'alpha': 'a',\n 'beta': 'b',\n }.get(tag, tag),\n relnum)\n\n return version",
"def _canonicalize_version(_version: str) -> str:\n\n try:\n version = PythonVersion(_version)\n except InvalidVersion:\n return _version\n\n parts = []\n\n # Epoch\n if version.epoch != 0:\n parts.append(\"{0}!\".format(version.epoch))\n\n # Release segment\n # NB: This strips trailing '.0's to normalize\n parts.append(re.sub(r\"(\\.0)+$\", \"\", \".\".join(\n str(x) for x in version.release\n )))\n\n # Pre-release\n if version.pre is not None:\n parts.append(\"\".join(str(x) for x in version.pre))\n\n # Post-release\n if version.post is not None:\n parts.append(\".post{0}\".format(version.post))\n\n # Development release\n if version.dev is not None:\n parts.append(\".dev{0}\".format(version.dev))\n\n # Local version segment\n if version.local is not None:\n parts.append(\"+{0}\".format(version.local))\n\n return \"\".join(parts)",
"def make_version_string(version_info):\n\n version_info = list(version_info)\n\n numbers = []\n while version_info and isinstance(version_info[0], int):\n numbers.append(str(version_info.pop(0)))\n version_str = '.'.join(numbers)\n\n if not version_info:\n return version_str\n\n assert len(version_info) % 2 == 0\n while version_info:\n suffix_type = version_info.pop(0)\n suffix_number = version_info.pop(0)\n\n if suffix_type in {'a', 'b', 'rc'}:\n suffix = f'{suffix_type}{suffix_number}'\n elif suffix_type in {'dev', 'post'}:\n suffix = f'.{suffix_type}{suffix_number}'\n else:\n raise ValueError(f\"Unknown suffix type '{suffix_type}'\")\n version_str += suffix\n\n return version_str",
"def GetVersionStr(version):\n if isinstance(version, str):\n version = int(version, 0)\n major = ((version>>24) & 0xff)\n minor = ((version>>16) & 0xff)\n release = ((version>> 8) & 0xff)\n subrelease = (version & 0xff)\n if major >= 6:\n if subrelease > 0:\n subreleasestr = str(subrelease)\n else:\n subreleasestr = ''\n else:\n if subrelease > 0:\n subreleasestr = str(chr(subrelease+ord('a')-1))\n else:\n subreleasestr = ''\n return \"{:d}.{:d}.{:d}{}{}\".format( major, minor, release, '.' if (major >= 6 and subreleasestr != '') else '', subreleasestr)",
"def get_version():\r\n return '.'.join((str(each) for each in VERSION[:3]))",
"def semantic_version(self) -> str:\n\n version_core = f\"{self.major_version}.{self.minor_version}.{self.patch_version}\"\n sep = \"-\" if self.pre_release != \"\" else \"\"\n\n return f\"{version_core}{sep}{self.pre_release}\"",
"def get_major_version(version):\n return str(check_version(version)[0])",
"def get_version():\n return '%d.%d.%d' % version_info",
"def generate_version(parts):\n\n version = '.'.join([str(part) for part in parts[0:2]])\n if len(parts) > 3:\n version += parts[3]\n if len(parts) > 4:\n version += str(parts[4])\n return version",
"def _convert_version(tup):\n ret_val = str(tup[0]) # first is always digit\n next_sep = \".\" # separator for next extension, can be \"\" or \".\"\n for x in tup[1:]:\n if isinstance(x, int):\n ret_val += next_sep + str(x)\n next_sep = '.'\n continue\n first_letter = x[0].lower()\n next_sep = ''\n if first_letter in 'abcr':\n ret_val += 'rc' if first_letter == 'r' else first_letter\n elif first_letter in 'pd':\n ret_val += '.post' if first_letter == 'p' else '.dev'\n return ret_val",
"def pythonVersionString(self):\n vstring = \"{0}.{1}.{2}\".format(sys.version_info.major, sys.version_info.minor, sys.version_info.micro)\n if sys.version_info.releaselevel != \"final\":\n vstring += \" ({})\".format( sys.version_info.releaselevel )\n if sys.version_info.serial != 0:\n vstring += \" (serial: {})\".format( sys.version_info.serial )\n return vstring",
"def getversion():\r\n\r\n global VERSION\r\n\r\n if len(VERSION) == 3:\r\n return '{}.{}.{}'.format(VERSION[0], VERSION[1], VERSION[2])\r\n else:\r\n return '{}.{}.{}-{}'.format(VERSION[0], VERSION[1], VERSION[2], VERSION[3])",
"def _version_to_shorthand(version):\n parts = version.split('.')\n if len(parts) != 2 and len(parts) != 3:\n tmpl = 'Version string must be like X.Y or X.Y.Z, not `{}`'\n raise ValueError(tmpl.format(version))\n return parts[0] + parts[1]",
"def __format_golang_version(self, version):\n if '.' in version and version[0].isdigit():\n version = 'v' + version\n return version",
"def get_version(version=VERSION, date=DATE):\n return \"JoMRS v{} Modular Rigging System | last update {}\".format(\n \".\".join([i for i in version]), \"/\".join([x for x in date])\n )",
"def get_version(version_tuple):\n return \".\".join(map(str, version_tuple))",
"def version_major_minor(version_string):\n return '.'.join(version_string.split('.')[0:2])",
"def get_version():\n return \".\".join([str(i) for i in config[\"version\"]])"
]
| [
"0.721763",
"0.71875566",
"0.7102432",
"0.7024088",
"0.70059144",
"0.68267673",
"0.6767354",
"0.6754383",
"0.6735615",
"0.67306393",
"0.67295843",
"0.66853565",
"0.66841066",
"0.6682962",
"0.6672119",
"0.66486835",
"0.6642322",
"0.65846986",
"0.65199375",
"0.64731044",
"0.6456186",
"0.6434641",
"0.6379373",
"0.6360189",
"0.6359126",
"0.6333992",
"0.62949264",
"0.62829846",
"0.62657404",
"0.6238582"
]
| 0.764916 | 0 |
Tries to figure out the OS version used and returns a tuple (system, release, version). It uses the "ver" shell command for this which is known to exists on Windows, DOS. XXX Others too ? In case this fails, the given parameters are used as defaults. | def _syscmd_ver(system='', release='', version='',
supported_platforms=('win32', 'win16', 'dos')):
if sys.platform not in supported_platforms:
return system, release, version
# Try some common cmd strings
import subprocess
for cmd in ('ver', 'command /c ver', 'cmd /c ver'):
try:
info = subprocess.check_output(cmd,
stdin=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
text=True,
encoding="locale",
shell=True)
except (OSError, subprocess.CalledProcessError) as why:
#print('Command %s failed: %s' % (cmd, why))
continue
else:
break
else:
return system, release, version
# Parse the output
info = info.strip()
m = _ver_output.match(info)
if m is not None:
system, release, version = m.groups()
# Strip trailing dots from version and release
if release[-1] == '.':
release = release[:-1]
if version[-1] == '.':
version = version[:-1]
# Normalize the version and build strings (eliminating additional
# zeros)
version = _norm_version(version)
return system, release, version | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def java_ver(release='', vendor='', vminfo=('', '', ''), osinfo=('', '', '')):\n # Import the needed APIs\n try:\n import java.lang\n except ImportError:\n return release, vendor, vminfo, osinfo\n\n vendor = _java_getprop('java.vendor', vendor)\n release = _java_getprop('java.version', release)\n vm_name, vm_release, vm_vendor = vminfo\n vm_name = _java_getprop('java.vm.name', vm_name)\n vm_vendor = _java_getprop('java.vm.vendor', vm_vendor)\n vm_release = _java_getprop('java.vm.version', vm_release)\n vminfo = vm_name, vm_release, vm_vendor\n os_name, os_version, os_arch = osinfo\n os_arch = _java_getprop('java.os.arch', os_arch)\n os_name = _java_getprop('java.os.name', os_name)\n os_version = _java_getprop('java.os.version', os_version)\n osinfo = os_name, os_version, os_arch\n\n return release, vendor, vminfo, osinfo",
"def version():\n cmd = \"{} -v\".format(_detect_os())\n out = __salt__[\"cmd.run\"](cmd).splitlines()\n ret = out[0].split(\": \")\n return ret[1]",
"def osversion():\n return platform()",
"def mac_ver(release='', versioninfo=('', '', ''), machine=''):\n\n # First try reading the information from an XML file which should\n # always be present\n info = _mac_ver_xml()\n if info is not None:\n return info\n\n # If that also doesn't work return the default values\n return release, versioninfo, machine",
"def systemversionstr():\n return platform.uname().system",
"def _get_release_infos():\n \n # support RHEL or CentOS, we don't care about the rest...\n with settings(hide('warnings', 'running', 'stdout', 'stderr'), warn_only=True):\n infos = run('cat /etc/redhat-release')\n \n m = _lsb_release_version.match(infos)\n if m is not None:\n return tuple(m.groups())\n else:\n abort('OS not supported.')",
"def get_osversion(self):\n\t\treturn call_sdk_function('PrlFoundVmInfo_GetOSVersion', self.handle)",
"def get_os_version(self):\n\t\treturn call_sdk_function('PrlSrvInfo_GetOsVersion', self.handle)",
"def get_os_version(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetOsVersion', self.handle)",
"async def osversion(self):\n\n await self.bot.say(box(release(), 'Bash'))",
"def software_versions():\n\n quiet = 1\n versions = collections.OrderedDict()\n for package in ['python', 'python3', 'robot', 'firefox', 'google-chrome']:\n # Note: \"robot --version\" returns 0x00000000000000fb.\n # Note: If package does not exist, 0x7f is returned.\n rc, version = gc.shell_cmd(package + \" --version\",\n valid_rcs=[0, 0x7f, 0xfb])\n versions[package] = \"Not installed\" if rc == 0x7f else version.rstrip('\\n')\n\n versions.update(import_versions)\n\n for package in ['robotframework-angularjs', 'robotframework-scplibrary',\n 'robotframework-extendedselenium2library']:\n rc, version = gc.shell_cmd(\"pip3 show \" + package\n + \" | grep Version | sed -re 's/.*: //g'\")\n versions[package] = \"Not installed\" if not version else version.rstrip('\\n')\n\n rc, version = gc.shell_cmd(\"lsb_release -d -s\")\n versions[\"host OS\"] = \"Failed\" if not version else version.rstrip('\\n')\n return versions",
"def get_os_release():\n if platform.linux_distribution()[0]:\n return \" \".join(platform.linux_distribution())\n elif platform.mac_ver()[0]:\n return \"%s %s\" % (platform.mac_ver()[0], platform.mac_ver()[2])\n else:\n return \"Unknown\"",
"def system_alias(system, release, version):\n if system == 'SunOS':\n # Sun's OS\n if release < '5':\n # These releases use the old name SunOS\n return system, release, version\n # Modify release (marketing release = SunOS release - 3)\n l = release.split('.')\n if l:\n try:\n major = int(l[0])\n except ValueError:\n pass\n else:\n major = major - 3\n l[0] = str(major)\n release = '.'.join(l)\n if release < '6':\n system = 'Solaris'\n else:\n # XXX Whatever the new SunOS marketing name is...\n system = 'Solaris'\n\n elif system in ('win32', 'win16'):\n # In case one of the other tricks\n system = 'Windows'\n\n # bpo-35516: Don't replace Darwin with macOS since input release and\n # version arguments can be different than the currently running version.\n\n return system, release, version",
"def getwindowsversion(): # real signature unknown; restored from __doc__\n pass",
"def getOsVersion():\n os_version_tuple = platform.mac_ver()[0].split('.')\n return int(os_version_tuple[1])",
"def get_version():\n vers = [\"%(major)i.%(minor)i\" % __version_info__, ]\n\n if __version_info__['micro']:\n vers.append(\".%(micro)i\" % __version_info__)\n if __version_info__['releaselevel'] != 'final':\n vers.append('%(releaselevel)s' % __version_info__)\n return ''.join(vers)",
"def _GetSystemVersion(self, component, info):\n # Check if we are on mario, then we need to use the legacy parser\n if self.ChromeOSBoard() == 'x86-mario':\n return self._GetSystemVersionMario(component, info)\n items = info.strip().splitlines()\n # This is going to give us a list of lines, we are looking for the\n # following ones:\n # BIOS version: board.xx.xx.xxx.xxx.xx\n # EC version: foobar\n for line in items:\n line_components = line.split(':')\n # The line we are looking for has at least 2 items\n if len(line_components) >= 2 and line_components[0] == component:\n return line_components[1].strip()\n self.fail('Could not locate the following item %s in the return value '\n 'of chromeos-firmwareupdate.' % component)",
"def __getOracleVersion(self):\n linuxVendor = \"Oracle\"\n linuxRelease, resultErr = self.ksp_ssh.ssh_execute_command(\n \"cat /etc/oracle-release | sed 's/^Oracle Linux Server release /OL/' | sed 's/[ .].*//' \") # El8\n return linuxVendor.strip(), linuxRelease.strip() # strip()删除开头结尾的空格",
"def uname():\n global _uname_cache\n\n if _uname_cache is not None:\n return _uname_cache\n\n # Get some infos from the builtin os.uname API...\n try:\n system, node, release, version, machine = infos = os.uname()\n except AttributeError:\n system = sys.platform\n node = _node()\n release = version = machine = ''\n infos = ()\n\n if not any(infos):\n # uname is not available\n\n # Try win32_ver() on win32 platforms\n if system == 'win32':\n release, version, csd, ptype = win32_ver()\n machine = machine or _get_machine_win32()\n\n # Try the 'ver' system command available on some\n # platforms\n if not (release and version):\n system, release, version = _syscmd_ver(system)\n # Normalize system to what win32_ver() normally returns\n # (_syscmd_ver() tends to return the vendor name as well)\n if system == 'Microsoft Windows':\n system = 'Windows'\n elif system == 'Microsoft' and release == 'Windows':\n # Under Windows Vista and Windows Server 2008,\n # Microsoft changed the output of the ver command. The\n # release is no longer printed. This causes the\n # system and release to be misidentified.\n system = 'Windows'\n if '6.0' == version[:3]:\n release = 'Vista'\n else:\n release = ''\n\n # In case we still don't know anything useful, we'll try to\n # help ourselves\n if system in ('win32', 'win16'):\n if not version:\n if system == 'win32':\n version = '32bit'\n else:\n version = '16bit'\n system = 'Windows'\n\n elif system[:4] == 'java':\n release, vendor, vminfo, osinfo = java_ver()\n system = 'Java'\n version = ', '.join(vminfo)\n if not version:\n version = vendor\n\n # System specific extensions\n if system == 'OpenVMS':\n # OpenVMS seems to have release and version mixed up\n if not release or release == '0':\n release = version\n version = ''\n\n # normalize name\n if system == 'Microsoft' and release == 'Windows':\n system = 'Windows'\n release = 'Vista'\n\n vals = system, node, release, version, machine\n # Replace 'unknown' values with the more portable ''\n _uname_cache = uname_result(*map(_unknown_as_blank, vals))\n return _uname_cache",
"def __getSuSEVersion(self):\n linuxVendor = \"SuSE\"\n linuxRelease, resultErr = self.ksp_ssh.ssh_execute_command(\n \"grep 'VERSION' /etc/SuSE-release | cut -d= -f2 | tr -d ' \\n'\")\n return linuxVendor.strip(), linuxRelease.strip()",
"def tesseract_version():\n result = None\n\n try:\n command = _config.command\n\n if \" \" in command:\n command = _escape_path(command)\n\n command += \" --version\"\n status, output, err_string = _proc_exec_wait(command, True)\n\n if status == 0:\n result = LooseVersion(output.split()[1].lstrip(string.printable[10:]))\n except Exception as e:\n _warn(\n \"tesseract_version: Unable to retrieve Tesseract version. \"\n \"Error: {0}\".format(e)\n )\n\n return result",
"def get_kernel_version():\r\n try:\r\n return utils.run('uname -r').stdout.strip()\r\n except:\r\n logging.info(\"Not Found\")\r\n return -1",
"def __getRedhatVersion(self):\n result, resultErr = self.ksp_ssh.ssh_execute_command('cat /etc/redhat-release')\n if \"Red\" in result:\n linuxVendor = \"RedHat\"\n linuxRelease, resultErr = self.ksp_ssh.ssh_execute_command(\n \"cat /etc/redhat-release | sed 's/^Red Hat Enterprise Linux.* release /EL/' | sed 's/[ .].*//'\")\n elif \"CentOS\" in result:\n linuxVendor = \"CentOS\"\n linuxRelease, resultErr = self.ksp_ssh.ssh_execute_command(\n \"cat /etc/os-release | grep -w \\\"VERSION\\\"| sed 's/VERSION=\\\"/EL/' | sed 's/[ .].*//'\")\n elif \"Cloud\" in result:\n linuxVendor = \"CloudLinux\"\n linuxRelease, resultErr = self.ksp_ssh.ssh_execute_command(\n \"cat /etc/redhat-release | sed 's/^CloudLinux.*release //' | sed 's/[ .].*//'\")\n else:\n linuxVendor = \"unknownVendor\"\n linuxRelease = \"unknownRelease\"\n return linuxVendor.strip(), linuxRelease.strip()",
"def os_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"os_version\")",
"def _sys_version(sys_version=None):\n # Get the Python version\n if sys_version is None:\n sys_version = sys.version\n\n # Try the cache first\n result = _sys_version_cache.get(sys_version, None)\n if result is not None:\n return result\n\n # Parse it\n if 'IronPython' in sys_version:\n # IronPython\n name = 'IronPython'\n if sys_version.startswith('IronPython'):\n match = _ironpython_sys_version_parser.match(sys_version)\n else:\n match = _ironpython26_sys_version_parser.match(sys_version)\n\n if match is None:\n raise ValueError(\n 'failed to parse IronPython sys.version: %s' %\n repr(sys_version))\n\n version, alt_version, compiler = match.groups()\n buildno = ''\n builddate = ''\n\n elif sys.platform.startswith('java'):\n # Jython\n name = 'Jython'\n match = _sys_version_parser.match(sys_version)\n if match is None:\n raise ValueError(\n 'failed to parse Jython sys.version: %s' %\n repr(sys_version))\n version, buildno, builddate, buildtime, _ = match.groups()\n if builddate is None:\n builddate = ''\n compiler = sys.platform\n\n elif \"PyPy\" in sys_version:\n # PyPy\n name = \"PyPy\"\n match = _pypy_sys_version_parser.match(sys_version)\n if match is None:\n raise ValueError(\"failed to parse PyPy sys.version: %s\" %\n repr(sys_version))\n version, buildno, builddate, buildtime = match.groups()\n compiler = \"\"\n\n else:\n # CPython\n match = _sys_version_parser.match(sys_version)\n if match is None:\n raise ValueError(\n 'failed to parse CPython sys.version: %s' %\n repr(sys_version))\n version, buildno, builddate, buildtime, compiler = \\\n match.groups()\n\n # XXX: RUSTPYTHON support\n if \"rustc\" in sys_version:\n name = \"RustPython\"\n else:\n name = 'CPython'\n\n if builddate is None:\n builddate = ''\n elif buildtime:\n builddate = builddate + ' ' + buildtime\n\n if hasattr(sys, '_git'):\n _, branch, revision = sys._git\n elif hasattr(sys, '_mercurial'):\n _, branch, revision = sys._mercurial\n else:\n branch = ''\n revision = ''\n\n # Add the patchlevel version if missing\n l = version.split('.')\n if len(l) == 2:\n l.append('0')\n version = '.'.join(l)\n\n # Build and cache the result\n result = (name, version, branch, revision, buildno, builddate, compiler)\n _sys_version_cache[sys_version] = result\n return result",
"def get_version():\n major=c_int_t(0)\n minor=c_int_t(0)\n patch=c_int_t(0)\n safe_call(backend.get().af_get_version(c_pointer(major), c_pointer(minor), c_pointer(patch)))\n return major.value,minor.value,patch.value",
"def version():\n return uname().version",
"def version():\n return uname().version",
"def check_os_version():\n if not version.is_supported_version():\n supported_releases = []\n for rel in version.SUPPORTED_VERSIONS:\n for ver in version.SUPPORTED_VERSIONS[rel]:\n supported_releases.append(rel.upper() + ' ' + ver)\n reporting.create_report([\n reporting.Title(\n 'The installed OS version is not supported for the in-place upgrade to the target RHEL version'\n ),\n reporting.Summary(\n 'The supported OS releases for the upgrade process:\\n'\n ' {}'.format('\\n'.join(supported_releases))\n ),\n reporting.Severity(reporting.Severity.HIGH),\n reporting.Groups(COMMON_REPORT_TAGS),\n reporting.Groups([reporting.Groups.INHIBITOR]),\n # we want to set a static Key here because of different Title per path\n reporting.Key('1c7a98849a747ec9890f04bf4321de7280970715')\n ] + related)",
"def get_host_os_version(self):\n\t\treturn call_sdk_function('PrlLoginResponse_GetHostOsVersion', self.handle)"
]
| [
"0.75740063",
"0.7208389",
"0.7052544",
"0.69534546",
"0.6946279",
"0.6852539",
"0.6808164",
"0.6719663",
"0.6674075",
"0.66512173",
"0.6601551",
"0.6572575",
"0.6549043",
"0.6530099",
"0.6431139",
"0.6426023",
"0.6416867",
"0.6409583",
"0.6381106",
"0.6374783",
"0.6352063",
"0.6346869",
"0.63315773",
"0.6302122",
"0.62954307",
"0.6289827",
"0.6211736",
"0.6211736",
"0.6184875",
"0.61811155"
]
| 0.8433543 | 0 |
Get macOS version information and return it as tuple (release, versioninfo, machine) with versioninfo being a tuple (version, dev_stage, non_release_version). Entries which cannot be determined are set to the parameter values which default to ''. All tuple entries are strings. | def mac_ver(release='', versioninfo=('', '', ''), machine=''):
# First try reading the information from an XML file which should
# always be present
info = _mac_ver_xml()
if info is not None:
return info
# If that also doesn't work return the default values
return release, versioninfo, machine | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def java_ver(release='', vendor='', vminfo=('', '', ''), osinfo=('', '', '')):\n # Import the needed APIs\n try:\n import java.lang\n except ImportError:\n return release, vendor, vminfo, osinfo\n\n vendor = _java_getprop('java.vendor', vendor)\n release = _java_getprop('java.version', release)\n vm_name, vm_release, vm_vendor = vminfo\n vm_name = _java_getprop('java.vm.name', vm_name)\n vm_vendor = _java_getprop('java.vm.vendor', vm_vendor)\n vm_release = _java_getprop('java.vm.version', vm_release)\n vminfo = vm_name, vm_release, vm_vendor\n os_name, os_version, os_arch = osinfo\n os_arch = _java_getprop('java.os.arch', os_arch)\n os_name = _java_getprop('java.os.name', os_name)\n os_version = _java_getprop('java.os.version', os_version)\n osinfo = os_name, os_version, os_arch\n\n return release, vendor, vminfo, osinfo",
"def get_version_info() -> Tuple[Text, Text]:",
"def _get_release_infos():\n \n # support RHEL or CentOS, we don't care about the rest...\n with settings(hide('warnings', 'running', 'stdout', 'stderr'), warn_only=True):\n infos = run('cat /etc/redhat-release')\n \n m = _lsb_release_version.match(infos)\n if m is not None:\n return tuple(m.groups())\n else:\n abort('OS not supported.')",
"def _syscmd_ver(system='', release='', version='',\n\n supported_platforms=('win32', 'win16', 'dos')):\n if sys.platform not in supported_platforms:\n return system, release, version\n\n # Try some common cmd strings\n import subprocess\n for cmd in ('ver', 'command /c ver', 'cmd /c ver'):\n try:\n info = subprocess.check_output(cmd,\n stdin=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL,\n text=True,\n encoding=\"locale\",\n shell=True)\n except (OSError, subprocess.CalledProcessError) as why:\n #print('Command %s failed: %s' % (cmd, why))\n continue\n else:\n break\n else:\n return system, release, version\n\n # Parse the output\n info = info.strip()\n m = _ver_output.match(info)\n if m is not None:\n system, release, version = m.groups()\n # Strip trailing dots from version and release\n if release[-1] == '.':\n release = release[:-1]\n if version[-1] == '.':\n version = version[:-1]\n # Normalize the version and build strings (eliminating additional\n # zeros)\n version = _norm_version(version)\n return system, release, version",
"def get_version_info():\n out = \"\\nmpsyt version : %s \" % __version__\n out += \"\\n notes : %s\" % __notes__\n out += \"\\npafy version : %s\" % pafy.__version__\n out += \"\\nPython version : %s\" % sys.version\n out += \"\\nProcessor : %s\" % platform.processor()\n out += \"\\nMachine type : %s\" % platform.machine()\n out += \"\\nArchitecture : %s, %s\" % platform.architecture()\n out += \"\\nPlatform : %s\" % platform.platform()\n out += \"\\nsys.stdout.enc : %s\" % sys.stdout.encoding\n out += \"\\ndefault enc : %s\" % sys.getdefaultencoding()\n out += \"\\nConfig dir : %s\" % get_config_dir()\n envs = \"TERM SHELL LANG LANGUAGE\".split()\n\n for env in envs:\n value = os.environ.get(env)\n out += \"\\nenv:%-11s: %s\" % (env, value) if value else \"\"\n\n return out",
"def version_info():\r\n return tuple(map(int, __version__.split('.')))",
"def macOS(self) -> str:\n\n result = subprocess.run([\"sw_vers\", \"-productVersion\"], stdout=subprocess.PIPE)\n\n return result.stdout.decode(\"utf-8\").strip()",
"def get_version():\n # this implementation avoids calling Foundation and will work on\n # non Apple OSes.\n vers = \"UNKNOWN\"\n build = \"\"\n # find the munkilib directory, and the version file\n munkilibdir = os.path.dirname(os.path.abspath(__file__))\n versionfile = os.path.join(munkilibdir, \"version.plist\")\n if os.path.exists(versionfile):\n try:\n vers_plist = readPlist(versionfile)\n except (IOError, OSError, ExpatError):\n pass\n else:\n try:\n vers = vers_plist['CFBundleShortVersionString']\n build = vers_plist['BuildNumber']\n except KeyError:\n pass\n if build:\n vers = vers + \".\" + build\n return vers",
"def get_software_version(device, return_tuple:bool=False):\n try:\n out = device.parse('show version')\n except SubCommandFailure:\n log.error('Could not get device version information')\n return None\n\n # Version is the first token of software/system_version when split with ' '\n ver:str = out.q.contains('software').get_values('system_version', 0) \\\n .split(' ')[0]\n if return_tuple :\n # Tokenize system version into a list with delimiters '.', '(' and ')'\n ver:list = [ch for ch in re.split('\\.|\\(|\\)', ver) if ch != '']\n # Convert to int whereever possible\n for i in range(len(ver)):\n try:\n ver[i] = int(ver[i])\n except ValueError:\n pass\n return tuple(ver)\n else:\n return ver",
"def get_release_info(version='v1.1-dev', date='2021-07-22'):\n # go to the repository directory\n dir_orig = os.getcwd()\n os.chdir(os.path.dirname(os.path.dirname(__file__)))\n\n # grab git info into string\n try:\n cmd = \"git describe --tags\"\n version = subprocess.check_output(cmd.split(), stderr=subprocess.DEVNULL)\n version = version.decode('utf-8').strip()\n\n # if there are new commits after the latest release\n if '-' in version:\n version, num_commit = version.split('-')[:2]\n version += '-{}'.format(num_commit)\n\n cmd = \"git log -1 --date=short --format=%cd\"\n date = subprocess.check_output(cmd.split(), stderr=subprocess.DEVNULL)\n date = date.decode('utf-8').strip()\n except:\n pass\n\n # go back to the original directory\n os.chdir(dir_orig)\n return version, date",
"def _get_version():\n try:\n code, output = _run_cmd('git', 'describe', '--tags')\n if code:\n return 'unknown'\n output = output.decode('utf8').strip().split('-')\n if len(output) != 3:\n return 'unknown'\n version = '%s+%s' % (output[0], output[2])\n\n code, _ = _run_cmd('git', 'diff', '--quiet')\n if code:\n version += '+dirty'\n\n return version\n except OSError:\n return 'unknown'",
"def _GetSystemVersion(self, component, info):\n # Check if we are on mario, then we need to use the legacy parser\n if self.ChromeOSBoard() == 'x86-mario':\n return self._GetSystemVersionMario(component, info)\n items = info.strip().splitlines()\n # This is going to give us a list of lines, we are looking for the\n # following ones:\n # BIOS version: board.xx.xx.xxx.xxx.xx\n # EC version: foobar\n for line in items:\n line_components = line.split(':')\n # The line we are looking for has at least 2 items\n if len(line_components) >= 2 and line_components[0] == component:\n return line_components[1].strip()\n self.fail('Could not locate the following item %s in the return value '\n 'of chromeos-firmwareupdate.' % component)",
"def getOsVersion():\n os_version_tuple = platform.mac_ver()[0].split('.')\n return int(os_version_tuple[1])",
"def version_info(self):\n if self._api_version is None:\n self.query_api_version()\n return self._api_version['api-major-version'],\\\n self._api_version['api-minor-version']",
"def getversion(): # 3\n res,resargs = _msk.Env.getversion()\n if res != 0:\n raise Error(rescode(res),\"\")\n _major_return_value,_minor_return_value,_build_return_value,_revision_return_value = resargs\n return _major_return_value,_minor_return_value,_build_return_value,_revision_return_value",
"def get_version():\n major=c_int_t(0)\n minor=c_int_t(0)\n patch=c_int_t(0)\n safe_call(backend.get().af_get_version(c_pointer(major), c_pointer(minor), c_pointer(patch)))\n return major.value,minor.value,patch.value",
"def get_version():\n vers = [\"%(major)i.%(minor)i\" % __version_info__, ]\n\n if __version_info__['micro']:\n vers.append(\".%(micro)i\" % __version_info__)\n if __version_info__['releaselevel'] != 'final':\n vers.append('%(releaselevel)s' % __version_info__)\n return ''.join(vers)",
"def version():\n cmd = \"{} -v\".format(_detect_os())\n out = __salt__[\"cmd.run\"](cmd).splitlines()\n ret = out[0].split(\": \")\n return ret[1]",
"def get_info():\n global PERF_APP\n archs = None\n best_arch = None\n cipher_algos = None\n hash_algos = None\n aead_algos = None\n\n cmd = PERF_APP + ' --print-info'\n\n try:\n res = subprocess.run(cmd, stdout=subprocess.PIPE, \\\n stderr=subprocess.STDOUT, \\\n env=ENVS, shell=True, check=True)\n output = res.stdout.decode('utf-8')\n except subprocess.CalledProcessError as e:\n print(\"Error (\" + str(e.returncode) + \")\")\n print(e.output.decode('utf-8'))\n sys.exit(1)\n\n lines = output.rstrip().split('\\n')\n try:\n for line in lines:\n info = line.split(':')\n if info[0] == 'Supported architectures':\n archs = info[1].split()\n if info[0] == 'Best architecture':\n best_arch = info[1].split()\n if info[0] == 'Supported cipher algorithms':\n cipher_algos = info[1].split()\n if info[0] == 'Supported hash algorithms':\n hash_algos = info[1].split()\n if info[0] == 'Supported aead algorithms':\n aead_algos = info[1].split()\n except:\n print(\"Error parsing --print-info output:\\n\" \\\n \"{}\".format(output), file=sys.stderr)\n\n if archs is None or best_arch is None or cipher_algos is None \\\n or hash_algos is None or aead_algos is None:\n print(\"Error parsing system and app information\", file=sys.stderr)\n sys.exit(1)\n\n return archs, best_arch, cipher_algos, hash_algos, aead_algos",
"def extract_version_info():\n version = None\n if os.path.exists('.version'):\n with open('.version') as f:\n line = f.read().rstrip()\n log.info('.version contains \"%s\"', line)\n if line.startswith('openafs-'):\n # Extract version from the git tag name.\n version = re.sub('openafs-[^-]*-', '', line).replace('_', '.')\n elif line.startswith('BP-'):\n # Branch point tags do not contain the version number.\n log.info('.version file has old branch point tag name.')\n else:\n # Use the given version string.\n version = line\n if not version:\n # Unable to lookup version from the .version file, try to extract the\n # version from the source directory name.\n root = os.path.basename(os.path.abspath('.'))\n m = re.match(r'openafs-(.*)', root)\n if m:\n version = m.group(1)\n if not version:\n module.fail_json(msg='Unable to determine version.')\n\n # Determine package version and release from the OpenAFS version.\n m1 = re.match(r'(.*)(pre[0-9]+)', version) # prerelease\n m2 = re.match(r'(.*)dev', version) # development\n m3 = re.match(r'(.*)-([0-9]+)-(g[a-f0-9]+)$', version) # development\n m4 = re.match(r'(.*)-([a-z]+)([0-9]+)', version) # custom\n if m1:\n v = m1.group(1)\n r = \"0.{0}\".format(m1.group(2))\n elif m2:\n v = m2.group(1)\n r = \"0.dev\"\n elif m3:\n v = m3.group(1)\n r = \"{0}.{1}\".format(m3.group(2), m3.group(3))\n elif m4:\n v = m4.group(1).replace('-', '')\n r = \"1.2.{0}.{1}\".format(m4.group(3), m4.group(2))\n else:\n v = version # standard release\n r = \"1\" # increment when repackaging this version\n # '-' are used as delimiters by rpm.\n v = v.replace('-', '_')\n r = r.replace('-', '_')\n return dict(openafs_version=version, package_version=v, package_release=r)",
"def get_version_info(self, key_name='ver_sw_release'):\n if key_name in self._msg_info_dict:\n val = self._msg_info_dict[key_name]\n return ((val >> 24) & 0xff, (val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff)\n return None",
"def version() -> typing.Tuple[int, ...]:\n cmd = [DOT_BINARY, '-V']\n log.debug('run %r', cmd)\n proc = run_check(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='ascii')\n\n ma = re.search(r'graphviz version'\n r' '\n r'(\\d+)\\.(\\d+)'\n r'(?:\\.(\\d+)'\n r'(?:'\n r'~dev\\.\\d{8}\\.\\d{4}'\n r'|'\n r'\\.(\\d+)'\n r')?'\n r')?'\n r' ', proc.stdout)\n if ma is None:\n raise RuntimeError(f'cannot parse {cmd!r} output: {proc.stdout!r}')\n\n return tuple(int(d) for d in ma.groups() if d is not None)",
"def pyzmq_version_info():\n import re\n parts = re.findall('[0-9]+', __version__)\n parts = [ int(p) for p in parts ]\n if 'dev' in __version__:\n parts.append(float('inf'))\n return tuple(parts)",
"def get_release_info():\n major_info = fetch_json(\n \"https://product-details.mozilla.org/1.0/\" \"firefox_history_major_releases.json\"\n )\n if major_info is None:\n raise Exception(\"Failed to fetch major version info\")\n minor_info = fetch_json(\n \"https://product-details.mozilla.org/1.0/\"\n \"firefox_history_stability_releases.json\"\n )\n if minor_info is None:\n raise Exception(\"Failed to fetch minor version info\")\n\n return {\"major\": major_info, \"minor\": minor_info}",
"def get_os_release():\n if platform.linux_distribution()[0]:\n return \" \".join(platform.linux_distribution())\n elif platform.mac_ver()[0]:\n return \"%s %s\" % (platform.mac_ver()[0], platform.mac_ver()[2])\n else:\n return \"Unknown\"",
"def os_release() -> str:\n\tassert(platform.system() == 'Darwin')\n\n\tver, _, _ = platform.mac_ver()\n\n\treturn ver",
"def get_version() -> Tuple[List[int], str]:\n global _SINGULARITY_VERSION # pylint: disable=global-statement\n global _SINGULARITY_FLAVOR # pylint: disable=global-statement\n if _SINGULARITY_VERSION is None:\n version_output = check_output( # nosec\n [\"singularity\", \"--version\"], universal_newlines=True\n ).strip()\n\n version_match = re.match(r\"(.+) version ([0-9\\.]+)\", version_output)\n if version_match is None:\n raise RuntimeError(\"Output of 'singularity --version' not recognized.\")\n\n version_string = version_match.group(2)\n _SINGULARITY_VERSION = [int(i) for i in version_string.split(\".\")]\n _SINGULARITY_FLAVOR = version_match.group(1)\n\n _logger.debug(\n f\"Singularity version: {version_string}\" \" ({_SINGULARITY_FLAVOR}.\"\n )\n return (_SINGULARITY_VERSION, _SINGULARITY_FLAVOR)",
"def build_version(self):\n return self.nodes[0].get('infos').get('system_info').get('system_version')",
"def version(self, *args, **kwargs):\n\n stdout, stderr = self.ctx.execute((self.exe, '--version'), quieter=1)\n\n m = re.match(\n r'(?:Apple clang .* \\(based on LLVM (\\S+)\\))'\n r'|'\n r'(?:clang version (\\S+))', stdout.decode())\n if m:\n if m.group(1):\n return m.group(1)\n else:\n return m.group(2)\n\n return None",
"def get_version(self):\n verxml = self._ncc.nxoscli('show version')\n self.logger.debug(verxml)\n verparsed = _begin_parse(verxml)\n sysmgrclischema = parse_get_nsmap(verparsed)\n self.logger.debug(\"NSMAP: {}\".format(sysmgrclischema))\n showversion = find_element(['sys_ver_str', 'chassis_id', 'host_name', 'loader_ver_str'], sysmgrclischema,\n verparsed)\n self.logger.debug(str(showversion))\n self.hostname = showversion['host_name']\n self.chassis_id = showversion['chassis_id']\n self.system_version = showversion['sys_ver_str']"
]
| [
"0.70257735",
"0.69399524",
"0.66601026",
"0.64289784",
"0.63986456",
"0.6331269",
"0.61766344",
"0.61554945",
"0.6056711",
"0.6012014",
"0.59970677",
"0.59970117",
"0.5963331",
"0.5931367",
"0.5928057",
"0.5925719",
"0.59226507",
"0.591708",
"0.5905139",
"0.5883812",
"0.58399284",
"0.57854193",
"0.576293",
"0.57582784",
"0.57378066",
"0.57208097",
"0.5713514",
"0.57133996",
"0.56768",
"0.56154585"
]
| 0.8445862 | 0 |
Version interface for Jython. Returns a tuple (release, vendor, vminfo, osinfo) with vminfo being a tuple (vm_name, vm_release, vm_vendor) and osinfo being a tuple (os_name, os_version, os_arch). Values which cannot be determined are set to the defaults given as parameters (which all default to ''). | def java_ver(release='', vendor='', vminfo=('', '', ''), osinfo=('', '', '')):
# Import the needed APIs
try:
import java.lang
except ImportError:
return release, vendor, vminfo, osinfo
vendor = _java_getprop('java.vendor', vendor)
release = _java_getprop('java.version', release)
vm_name, vm_release, vm_vendor = vminfo
vm_name = _java_getprop('java.vm.name', vm_name)
vm_vendor = _java_getprop('java.vm.vendor', vm_vendor)
vm_release = _java_getprop('java.vm.version', vm_release)
vminfo = vm_name, vm_release, vm_vendor
os_name, os_version, os_arch = osinfo
os_arch = _java_getprop('java.os.arch', os_arch)
os_name = _java_getprop('java.os.name', os_name)
os_version = _java_getprop('java.os.version', os_version)
osinfo = os_name, os_version, os_arch
return release, vendor, vminfo, osinfo | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def version(self):\r\n return self._get_version(self.java)",
"def version(self):\n return \"%d.%d\" % (self._vmajor, self._vminor)",
"def get_version_info():\n out = \"\\nmpsyt version : %s \" % __version__\n out += \"\\n notes : %s\" % __notes__\n out += \"\\npafy version : %s\" % pafy.__version__\n out += \"\\nPython version : %s\" % sys.version\n out += \"\\nProcessor : %s\" % platform.processor()\n out += \"\\nMachine type : %s\" % platform.machine()\n out += \"\\nArchitecture : %s, %s\" % platform.architecture()\n out += \"\\nPlatform : %s\" % platform.platform()\n out += \"\\nsys.stdout.enc : %s\" % sys.stdout.encoding\n out += \"\\ndefault enc : %s\" % sys.getdefaultencoding()\n out += \"\\nConfig dir : %s\" % get_config_dir()\n envs = \"TERM SHELL LANG LANGUAGE\".split()\n\n for env in envs:\n value = os.environ.get(env)\n out += \"\\nenv:%-11s: %s\" % (env, value) if value else \"\"\n\n return out",
"def build_version(self):\n return self.nodes[0].get('infos').get('system_info').get('system_version')",
"def getversion(): # 3\n res,resargs = _msk.Env.getversion()\n if res != 0:\n raise Error(rescode(res),\"\")\n _major_return_value,_minor_return_value,_build_return_value,_revision_return_value = resargs\n return _major_return_value,_minor_return_value,_build_return_value,_revision_return_value",
"def get_osversion(self):\n\t\treturn call_sdk_function('PrlFoundVmInfo_GetOSVersion', self.handle)",
"def get_version():\n vers = [\"%(major)i.%(minor)i\" % __version_info__, ]\n\n if __version_info__['micro']:\n vers.append(\".%(micro)i\" % __version_info__)\n if __version_info__['releaselevel'] != 'final':\n vers.append('%(releaselevel)s' % __version_info__)\n return ''.join(vers)",
"def get_version_info() -> Tuple[Text, Text]:",
"def get_os_version(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetOsVersion', self.handle)",
"def os_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"os_version\")",
"def mac_ver(release='', versioninfo=('', '', ''), machine=''):\n\n # First try reading the information from an XML file which should\n # always be present\n info = _mac_ver_xml()\n if info is not None:\n return info\n\n # If that also doesn't work return the default values\n return release, versioninfo, machine",
"def _get_version(self):\n version = self.job_config.get(\"os_version\")\n if not version:\n version = DEFAULT_OS_VERSION.get(self.os_type)\n\n return str(version)",
"def pythonVersionString(self):\n vstring = \"{0}.{1}.{2}\".format(sys.version_info.major, sys.version_info.minor, sys.version_info.micro)\n if sys.version_info.releaselevel != \"final\":\n vstring += \" ({})\".format( sys.version_info.releaselevel )\n if sys.version_info.serial != 0:\n vstring += \" (serial: {})\".format( sys.version_info.serial )\n return vstring",
"def version():\n cmd = \"{} -v\".format(_detect_os())\n out = __salt__[\"cmd.run\"](cmd).splitlines()\n ret = out[0].split(\": \")\n return ret[1]",
"def getVersionInfo(cls):\n\n return __version__ + \"\\n\"",
"def do_version(self):\n return \"1.0.0\", True",
"def to_interpreter_version(self) -> Optional[Tuple[int, int]]:\n if self.value is None:\n return None\n mo = cast(Match, re.match(self.PYTHON_RUNTIME_REGEX, self.value))\n return int(mo.group(\"major\")), int(mo.group(\"minor\"))",
"def version_info():\r\n return tuple(map(int, __version__.split('.')))",
"def get_version():\n major=c_int_t(0)\n minor=c_int_t(0)\n patch=c_int_t(0)\n safe_call(backend.get().af_get_version(c_pointer(major), c_pointer(minor), c_pointer(patch)))\n return major.value,minor.value,patch.value",
"def __getNullVersion(self):\n print(\"Can't get version\")\n return \"unknownVendor\", \"unknownRelease\"",
"def software_versions():\n\n quiet = 1\n versions = collections.OrderedDict()\n for package in ['python', 'python3', 'robot', 'firefox', 'google-chrome']:\n # Note: \"robot --version\" returns 0x00000000000000fb.\n # Note: If package does not exist, 0x7f is returned.\n rc, version = gc.shell_cmd(package + \" --version\",\n valid_rcs=[0, 0x7f, 0xfb])\n versions[package] = \"Not installed\" if rc == 0x7f else version.rstrip('\\n')\n\n versions.update(import_versions)\n\n for package in ['robotframework-angularjs', 'robotframework-scplibrary',\n 'robotframework-extendedselenium2library']:\n rc, version = gc.shell_cmd(\"pip3 show \" + package\n + \" | grep Version | sed -re 's/.*: //g'\")\n versions[package] = \"Not installed\" if not version else version.rstrip('\\n')\n\n rc, version = gc.shell_cmd(\"lsb_release -d -s\")\n versions[\"host OS\"] = \"Failed\" if not version else version.rstrip('\\n')\n return versions",
"def version_info(self):\n if self._api_version is None:\n self.query_api_version()\n return self._api_version['api-major-version'],\\\n self._api_version['api-minor-version']",
"def getVersionString():\n return str(version_gen.major) + \".\" + str(version_gen.minor) + \".\" + str(version_gen.compilation)",
"def to_interpreter_version(self) -> Tuple[int, int]:\n mo = cast(Match, re.match(self.PYTHON_RUNTIME_REGEX, self.value))\n return int(mo.group(\"major\")), int(mo.group(\"minor\"))",
"def _syscmd_ver(system='', release='', version='',\n\n supported_platforms=('win32', 'win16', 'dos')):\n if sys.platform not in supported_platforms:\n return system, release, version\n\n # Try some common cmd strings\n import subprocess\n for cmd in ('ver', 'command /c ver', 'cmd /c ver'):\n try:\n info = subprocess.check_output(cmd,\n stdin=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL,\n text=True,\n encoding=\"locale\",\n shell=True)\n except (OSError, subprocess.CalledProcessError) as why:\n #print('Command %s failed: %s' % (cmd, why))\n continue\n else:\n break\n else:\n return system, release, version\n\n # Parse the output\n info = info.strip()\n m = _ver_output.match(info)\n if m is not None:\n system, release, version = m.groups()\n # Strip trailing dots from version and release\n if release[-1] == '.':\n release = release[:-1]\n if version[-1] == '.':\n version = version[:-1]\n # Normalize the version and build strings (eliminating additional\n # zeros)\n version = _norm_version(version)\n return system, release, version",
"def getLibVersion():\n return \"Software Development Library for Linux 1.999.1\"",
"def getversion():\n major_ = ctypes.c_int32()\n minor_ = ctypes.c_int32()\n revision_ = ctypes.c_int32()\n res = __library__.MSK_XX_getversion(ctypes.byref(major_),ctypes.byref(minor_),ctypes.byref(revision_))\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])\n major_ = major_.value\n _major_return_value = major_\n minor_ = minor_.value\n _minor_return_value = minor_\n revision_ = revision_.value\n _revision_return_value = revision_\n return (_major_return_value,_minor_return_value,_revision_return_value)",
"def do_get_version(self, arg):\n arg = arg\n print(self.phil.if_version)",
"def get_version(version=VERSION, date=DATE):\n return \"JoMRS v{} Modular Rigging System | last update {}\".format(\n \".\".join([i for i in version]), \"/\".join([x for x in date])\n )",
"def version(self) -> Union[int, str]:"
]
| [
"0.6518461",
"0.6503223",
"0.6397304",
"0.6328363",
"0.6327358",
"0.63007367",
"0.62272507",
"0.62220067",
"0.6184409",
"0.6115367",
"0.6099069",
"0.6094439",
"0.6087738",
"0.6084212",
"0.60287714",
"0.6018853",
"0.6005753",
"0.5993766",
"0.5981113",
"0.5972475",
"0.5962731",
"0.59287983",
"0.5918777",
"0.5908015",
"0.5904553",
"0.5880349",
"0.5878832",
"0.58710915",
"0.5851658",
"0.58353865"
]
| 0.79008067 | 0 |
Returns (system, release, version) aliased to common marketing names used for some systems. It also does some reordering of the information in some cases where it would otherwise cause confusion. | def system_alias(system, release, version):
if system == 'SunOS':
# Sun's OS
if release < '5':
# These releases use the old name SunOS
return system, release, version
# Modify release (marketing release = SunOS release - 3)
l = release.split('.')
if l:
try:
major = int(l[0])
except ValueError:
pass
else:
major = major - 3
l[0] = str(major)
release = '.'.join(l)
if release < '6':
system = 'Solaris'
else:
# XXX Whatever the new SunOS marketing name is...
system = 'Solaris'
elif system in ('win32', 'win16'):
# In case one of the other tricks
system = 'Windows'
# bpo-35516: Don't replace Darwin with macOS since input release and
# version arguments can be different than the currently running version.
return system, release, version | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def system_info() -> str:\n return \"\\n\".join(\n [\n f\"Python version: {platform.python_version()}\",\n f\"Python implementation: {platform.python_implementation()}\",\n f\"Python compiler: {platform.python_compiler()}\",\n f\"PyTorch version: {torch.__version__}\",\n f\"System: {platform.system() or 'Unable to determine'}\",\n f\"System version: {platform.release() or 'Unable to determine'}\",\n f\"Processor: {platform.processor() or 'Unable to determine'}\",\n f\"Number of CPUs: {multiprocessing.cpu_count()}\",\n ]\n )",
"def systemversionstr():\n return platform.uname().system",
"def get_os_release():\n if platform.linux_distribution()[0]:\n return \" \".join(platform.linux_distribution())\n elif platform.mac_ver()[0]:\n return \"%s %s\" % (platform.mac_ver()[0], platform.mac_ver()[2])\n else:\n return \"Unknown\"",
"def get_version_info():\n out = \"\\nmpsyt version : %s \" % __version__\n out += \"\\n notes : %s\" % __notes__\n out += \"\\npafy version : %s\" % pafy.__version__\n out += \"\\nPython version : %s\" % sys.version\n out += \"\\nProcessor : %s\" % platform.processor()\n out += \"\\nMachine type : %s\" % platform.machine()\n out += \"\\nArchitecture : %s, %s\" % platform.architecture()\n out += \"\\nPlatform : %s\" % platform.platform()\n out += \"\\nsys.stdout.enc : %s\" % sys.stdout.encoding\n out += \"\\ndefault enc : %s\" % sys.getdefaultencoding()\n out += \"\\nConfig dir : %s\" % get_config_dir()\n envs = \"TERM SHELL LANG LANGUAGE\".split()\n\n for env in envs:\n value = os.environ.get(env)\n out += \"\\nenv:%-11s: %s\" % (env, value) if value else \"\"\n\n return out",
"def display_name(self) -> str:\n if self.is_verified:\n return f\"Verified Package {self.csharp_version}\"\n elif self.is_main:\n return \"main (unstable)\"\n else:\n return self.release_tag.replace(\"_\", \" \").title()",
"def get_version():\n vers = [\"%(major)i.%(minor)i\" % __version_info__, ]\n\n if __version_info__['micro']:\n vers.append(\".%(micro)i\" % __version_info__)\n if __version_info__['releaselevel'] != 'final':\n vers.append('%(releaselevel)s' % __version_info__)\n return ''.join(vers)",
"def get_system_information(self):\n\t\tsys = platform.uname()\n\t\treturn {\n\t\t\t'hostname': sys.node,\n\t\t\t'operating_system': sys.system,\n\t\t\t'version': sys.version,\n\t\t\t'release': sys.release,\n\t\t\t'processor' : sys.processor,\n\t\t\t'processor_type': sys.machine,\n\t\t}",
"def uname():\n global _uname_cache\n\n if _uname_cache is not None:\n return _uname_cache\n\n # Get some infos from the builtin os.uname API...\n try:\n system, node, release, version, machine = infos = os.uname()\n except AttributeError:\n system = sys.platform\n node = _node()\n release = version = machine = ''\n infos = ()\n\n if not any(infos):\n # uname is not available\n\n # Try win32_ver() on win32 platforms\n if system == 'win32':\n release, version, csd, ptype = win32_ver()\n machine = machine or _get_machine_win32()\n\n # Try the 'ver' system command available on some\n # platforms\n if not (release and version):\n system, release, version = _syscmd_ver(system)\n # Normalize system to what win32_ver() normally returns\n # (_syscmd_ver() tends to return the vendor name as well)\n if system == 'Microsoft Windows':\n system = 'Windows'\n elif system == 'Microsoft' and release == 'Windows':\n # Under Windows Vista and Windows Server 2008,\n # Microsoft changed the output of the ver command. The\n # release is no longer printed. This causes the\n # system and release to be misidentified.\n system = 'Windows'\n if '6.0' == version[:3]:\n release = 'Vista'\n else:\n release = ''\n\n # In case we still don't know anything useful, we'll try to\n # help ourselves\n if system in ('win32', 'win16'):\n if not version:\n if system == 'win32':\n version = '32bit'\n else:\n version = '16bit'\n system = 'Windows'\n\n elif system[:4] == 'java':\n release, vendor, vminfo, osinfo = java_ver()\n system = 'Java'\n version = ', '.join(vminfo)\n if not version:\n version = vendor\n\n # System specific extensions\n if system == 'OpenVMS':\n # OpenVMS seems to have release and version mixed up\n if not release or release == '0':\n release = version\n version = ''\n\n # normalize name\n if system == 'Microsoft' and release == 'Windows':\n system = 'Windows'\n release = 'Vista'\n\n vals = system, node, release, version, machine\n # Replace 'unknown' values with the more portable ''\n _uname_cache = uname_result(*map(_unknown_as_blank, vals))\n return _uname_cache",
"def _syscmd_ver(system='', release='', version='',\n\n supported_platforms=('win32', 'win16', 'dos')):\n if sys.platform not in supported_platforms:\n return system, release, version\n\n # Try some common cmd strings\n import subprocess\n for cmd in ('ver', 'command /c ver', 'cmd /c ver'):\n try:\n info = subprocess.check_output(cmd,\n stdin=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL,\n text=True,\n encoding=\"locale\",\n shell=True)\n except (OSError, subprocess.CalledProcessError) as why:\n #print('Command %s failed: %s' % (cmd, why))\n continue\n else:\n break\n else:\n return system, release, version\n\n # Parse the output\n info = info.strip()\n m = _ver_output.match(info)\n if m is not None:\n system, release, version = m.groups()\n # Strip trailing dots from version and release\n if release[-1] == '.':\n release = release[:-1]\n if version[-1] == '.':\n version = version[:-1]\n # Normalize the version and build strings (eliminating additional\n # zeros)\n version = _norm_version(version)\n return system, release, version",
"def map_release_label():\n release = \"\".join(map(lambda x: x.lower(), sh.lsb_release(\"-irs\").split()))\n return OS_MAPPING[next(k for k in OS_MAPPING if re.search(k, release))]",
"def get_version_info() -> Tuple[Text, Text]:",
"def getSlavename():",
"def name(cls):\n\n system = platform.system()\n\n # Apply system map\n if system in NAME_MAP:\n system = NAME_MAP[system]\n\n return system",
"def describe_operating_systems():\n pass",
"def getLibVersion():\n return \"Software Development Library for Linux 1.999.1\"",
"def human_version(self):\n return _('Latest Stable') if self.version == 'latest_stable' else 'OpenRAVE %s'%self.version",
"def system_info():\n requirements = get_requirements(\"sunpy\")\n groups = get_keys_list(requirements)\n extra_groups = get_extra_groups(groups, ['all', 'dev'])\n base_reqs = get_keys_list(requirements['required'])\n extra_reqs = get_keys_list(requirements['all'])\n missing_packages, installed_packages = find_dependencies(package=\"sunpy\", extras=extra_groups)\n extra_prop = {\"System\": platform.system(),\n \"Arch\": f\"{platform.architecture()[0]}, ({platform.processor()})\",\n \"Python\": platform.python_version(),\n \"sunpy\": version(\"sunpy\")}\n sys_prop = {**installed_packages, **missing_packages, **extra_prop}\n print(\"==============================\")\n print(\"sunpy Installation Information\")\n print(\"==============================\")\n print()\n print(\"General\")\n print(\"#######\")\n if sys_prop['System'] == \"Linux\":\n print(f\"OS: {distro.name()} ({distro.version()}, Linux {platform.release()})\")\n elif sys_prop['System'] == \"Darwin\":\n print(f\"OS: Mac OS {platform.mac_ver()[0]}\")\n elif sys_prop['System'] == \"Windows\":\n print(f\"OS: Windows {platform.release()} {platform.version()}\")\n else:\n print(\"Unknown OS\")\n for sys_info in ['Arch', 'sunpy']:\n print(f'{sys_info}: {sys_prop[sys_info]}')\n print(f'Installation path: {distribution(\"sunpy\")._path}')\n print()\n print(\"Required Dependencies\")\n print(\"#####################\")\n for req in base_reqs:\n print(f'{req}: {sys_prop[req]}')\n print()\n print(\"Optional Dependencies\")\n print(\"#####################\")\n for extra_req in extra_reqs:\n print(f'{extra_req}: {sys_prop[extra_req]}')",
"def get_system_name_mappings(column_data):\n system_name_to_display_name = {}\n display_name_to_system_name = {}\n\n for key, entry in column_data.items():\n if entry.get(\"system_name\", None) is not None:\n system_name = entry.get(\"system_name\")\n else:\n system_name = key\n\n system_name_to_display_name[system_name] = key\n display_name_to_system_name[key] = system_name\n\n return system_name_to_display_name, display_name_to_system_name",
"def __getSuSEVersion(self):\n linuxVendor = \"SuSE\"\n linuxRelease, resultErr = self.ksp_ssh.ssh_execute_command(\n \"grep 'VERSION' /etc/SuSE-release | cut -d= -f2 | tr -d ' \\n'\")\n return linuxVendor.strip(), linuxRelease.strip()",
"def get_product_name_and_arch(self, package_name):\n architectures = ['.x86_64', '.noarch', '.i686']\n for arch in architectures:\n if package_name.endswith(arch):\n return package_name[:-len(arch)], arch\n return package_name, None",
"def get_os_name(x86=0):\r\n platform_in_short, on_win = sys.platform[:3], 0\r\n\r\n if platform_in_short == \"win\":\r\n on_win = 1\r\n os_name = \"nt\"\r\n elif platform_in_short == \"lin\":\r\n os_name = \"lin\"\r\n else:\r\n os_name = \"sol\"\r\n if not x86:\r\n os_name += \"64\"\r\n return on_win, os_name",
"def get_os_name(x86=0):\r\n platform_in_short, on_win = sys.platform[:3], 0\r\n\r\n if platform_in_short == \"win\":\r\n on_win = 1\r\n os_name = \"nt\"\r\n elif platform_in_short == \"lin\":\r\n os_name = \"lin\"\r\n else:\r\n os_name = \"sol\"\r\n if not x86:\r\n os_name += \"64\"\r\n return on_win, os_name",
"def get_system_name(self):\n\n\t\treturn self.__system_name",
"def getInfo(self, formatted=False):\n\n\t\tinfo = {}\n\t\tinfo['Python'] = \"%d.%d.%d\" %(sys.version_info[0], sys.version_info[1], sys.version_info[2])\n\t\tinfo[__binding__] = __binding_version__\n\t\tinfo['Qt'] = QtCore.qVersion()\n\t\tinfo['OS'] = platform.system()\n\t\tinfo['Environment'] = HOST\n\n\t\tif formatted:\n\t\t\tinfo_ls = []\n\t\t\tfor key, value in info.items():\n\t\t\t\tinfo_ls.append(\"{} {}\".format(key, value))\n\t\t\tinfo_str = \" | \".join(info_ls)\n\t\t\treturn info_str\n\n\t\telse:\n\t\t\treturn info",
"def get_system_spec():\n import pkg_resources\n import platform\n\n if sys.platform == 'darwin':\n system_info = 'macOS {} {}'.format(\n platform.mac_ver()[0],\n platform.architecture()[0],\n )\n else:\n system_info = '{} {} {} {}'.format(\n platform.system(),\n '_'.join(platform.architecture()),\n platform.release(),\n platform.machine(),\n )\n\n system_spec = dict(\n raiden=pkg_resources.require(raiden.__name__)[0].version,\n python_implementation=platform.python_implementation(),\n python_version=platform.python_version(),\n system=system_info,\n )\n return system_spec",
"def software(s):\n try:\n import maya.mel as mel\n version = mel.eval(\"$tmp = getApplicationVersionAsFloat();\")\n return \"Maya, %s\" % version\n except ImportError:\n pass\n return \"Unknown software.\"",
"def __getAamazonVersion(self):\n ret, resultErr = self.ksp_ssh.ssh_execute_command('cat /etc/system-release')\n linuxVendor = \"amzn\"\n # print(ret)\n if 'AMI' in ret:\n linuxRelease = '1'\n else:\n linuxRelease = '2'\n\n return linuxVendor.strip(), linuxRelease.strip()",
"def _get_osname():\n osname = sys.platform.lower()\n if osname == \"linux2\":\n osname = \"linux\"\n return osname",
"def get_sys_name(self):\n\t\treturn call_sdk_function('PrlVmDev_GetSysName', self.handle)",
"def _generate_os_code(self, name, version, bits, extra_info):\r\n name = name.replace(' Linux', '')\r\n name = name.replace('Enterprise', '')\r\n name = name.replace('GNU/Linux', '')\r\n\r\n os_code = name.strip().replace(' ', '_').upper()\r\n\r\n if os_code.startswith('RED_HAT'):\r\n os_code = 'REDHAT'\r\n\r\n if 'UBUNTU' in os_code:\r\n version = re.sub(r'\\.\\d+', '', version)\r\n\r\n os_code += '_' + version.replace('.0', '')\r\n\r\n if bits:\r\n os_code += '_' + bits\r\n\r\n if extra_info:\r\n garbage = ['Install', '(32 bit)', '(64 bit)']\r\n\r\n for obj in garbage:\r\n extra_info = extra_info.replace(obj, '')\r\n\r\n os_code += '_' + extra_info.strip().replace(' ', '_').upper()\r\n\r\n return os_code"
]
| [
"0.6215419",
"0.6108677",
"0.59298706",
"0.59254426",
"0.586801",
"0.5762022",
"0.5750569",
"0.574225",
"0.5742147",
"0.5718789",
"0.57069814",
"0.5694989",
"0.5692988",
"0.5684342",
"0.5640696",
"0.562362",
"0.56204915",
"0.556907",
"0.5566248",
"0.55369824",
"0.5528952",
"0.5528952",
"0.5521391",
"0.5516373",
"0.5508521",
"0.5505182",
"0.5502501",
"0.54983646",
"0.5495619",
"0.5469169"
]
| 0.69576734 | 0 |
Helper to format the platform string in a filename compatible format e.g. "systemversionmachine". | def _platform(*args):
# Format the platform string
platform = '-'.join(x.strip() for x in filter(len, args))
# Cleanup some possible filename obstacles...
platform = platform.replace(' ', '_')
platform = platform.replace('/', '-')
platform = platform.replace('\\', '-')
platform = platform.replace(':', '-')
platform = platform.replace(';', '-')
platform = platform.replace('"', '-')
platform = platform.replace('(', '-')
platform = platform.replace(')', '-')
# No need to report 'unknown' information...
platform = platform.replace('unknown', '')
# Fold '--'s and remove trailing '-'
while 1:
cleaned = platform.replace('--', '-')
if cleaned == platform:
break
platform = cleaned
while platform[-1] == '-':
platform = platform[:-1]
return platform | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def platform_to_str(self, platform_in):\n parsed_platform = self.parse_platform(platform_in)\n if parsed_platform[2]:\n return \"%s/%s/%s\" % parsed_platform\n if parsed_platform[1]:\n return \"%s/%s\" % parsed_platform[0:2]\n return parsed_platform[0]",
"def _format_platform(platform, release, architecture=None):\n rep = f\"{_PLATFORMS[platform]} {release}\"\n if architecture is None or architecture == default.architecture:\n return rep\n return f\"{rep} ({architecture})\"",
"def _format_filename(filename: str) -> str:\n stdlib = (\n f\"{sys.prefix}/lib/python{sys.version_info.major}.{sys.version_info.minor}/\"\n )\n site_pkg = f\"{sys.prefix}/lib/python{sys.version_info.major}.{sys.version_info.minor}/site-packages/\"\n home = f\"{Path.home()}/\"\n cwd = f\"{Path.cwd()}/\"\n if filename.startswith(site_pkg):\n return \"<sitepkg>/\" + filename[len(site_pkg) :]\n if filename.startswith(stdlib):\n return \"<stdlib>/\" + filename[len(stdlib) :]\n if filename.startswith(cwd):\n return \"<cwd>/\" + filename[len(cwd) :]\n if filename.startswith(home):\n return \"<home>/\" + filename[len(home) :]\n return filename",
"def _format_architecture(architecture):\n return f\"{architecture} architecture\"",
"def PlatformName():\n if override_platform_name:\n return override_platform_name\n if IsWindows():\n return 'win32'\n if IsLinux():\n return 'linux'\n if IsMac():\n return 'mac'\n raise NotImplementedError('Unknown platform \"%s\".' % sys.platform)",
"def systemversionstr():\n return platform.uname().system",
"def platform():\n return \"micaz\"",
"def get_os() -> str:\n system = platform.system().lower()\n\n if system == \"linux\":\n machine = os.uname().machine\n if machine.startswith(\"arm\") or machine.startswith(\"aarch\"):\n system = \"pi\"\n\n return system + \"_\" + platform.architecture()[0]",
"def _get_build_os_name():\n system = platform.system()\n if 'Darwin' in system or 'Macintosh' in system:\n return 'darwin-x86'\n\n # TODO: Add more values if needed.\n return 'linux-x86'",
"def platform(self, return_str=True):\n architecture = self.arch(\"docker\")\n host_platform = self.osversion() + \"/\" + architecture\n if return_str:\n return host_platform.lower()\n return self.parse_platform(host_platform)",
"def platform_extension_formatting(info):\n return ('\\n'+' '*(platform_maxwidth+3)).join(info.split())",
"def _get_osname():\n osname = sys.platform.lower()\n if osname == \"linux2\":\n osname = \"linux\"\n return osname",
"def get_os_name(x86=0):\r\n platform_in_short, on_win = sys.platform[:3], 0\r\n\r\n if platform_in_short == \"win\":\r\n on_win = 1\r\n os_name = \"nt\"\r\n elif platform_in_short == \"lin\":\r\n os_name = \"lin\"\r\n else:\r\n os_name = \"sol\"\r\n if not x86:\r\n os_name += \"64\"\r\n return on_win, os_name",
"def get_os_name(x86=0):\r\n platform_in_short, on_win = sys.platform[:3], 0\r\n\r\n if platform_in_short == \"win\":\r\n on_win = 1\r\n os_name = \"nt\"\r\n elif platform_in_short == \"lin\":\r\n os_name = \"lin\"\r\n else:\r\n os_name = \"sol\"\r\n if not x86:\r\n os_name += \"64\"\r\n return on_win, os_name",
"def GetOSName():\n return Config.osName_",
"def tsv_sheet_germline_platform_name():\n f = io.StringIO(\n textwrap.dedent(\n \"\"\"\n patientName\\tfatherName\\tmotherName\\tsex\\tisAffected\\tlibraryType\\tfolderName\\thpoTerms\\tseqPlatform\n 12_347\\t.\\t.\\tF\\tN\\tWGS\\t12_347\\t.\\tIllumina\n 12_347\\t.\\t.\\tF\\tN\\tWGS\\t12_347\\t.\\tPacBio\n \"\"\".lstrip()\n )\n )\n return f",
"def getSlavename():",
"def _generate_windows_code(self, description):\r\n version_check = re.search(r'Windows Server (\\d+)', description)\r\n version = version_check.group(1)\r\n\r\n os_code = 'WIN_' + version\r\n\r\n if 'Datacenter' in description:\r\n os_code += '-DC'\r\n elif 'Enterprise' in description:\r\n os_code += '-ENT'\r\n else:\r\n os_code += '-STD'\r\n\r\n if 'ith R2' in description:\r\n os_code += '-R2'\r\n elif 'ith Hyper-V' in description:\r\n os_code += '-HYPERV'\r\n\r\n bit_check = re.search(r'\\((\\d+)\\s*bit', description)\r\n if bit_check:\r\n os_code += '_' + bit_check.group(1)\r\n\r\n return os_code",
"def platform_distro():\n distro = platform_information()[0] or ''\n return distro.strip().lower()",
"def format_image_filename(device_image):\n return \"{}-{}-{}-{}.bit\".format(device_image.bitstream_type,\n device_image.pci_vendor,\n device_image.pci_device,\n device_image.uuid)",
"def get_filename(self):\n return self.get_package_name() + '-' + self.os + '-' + self.arch + GPPKG_EXTENSION",
"def get_os_release():\n if platform.linux_distribution()[0]:\n return \" \".join(platform.linux_distribution())\n elif platform.mac_ver()[0]:\n return \"%s %s\" % (platform.mac_ver()[0], platform.mac_ver()[2])\n else:\n return \"Unknown\"",
"def pythonversionstr():\n return '{t[0]}.{t[1]}.{t[2]}'.format(t=platform.python_version_tuple())",
"def php_uname(space, mode=\"a\"):\n t = os.uname()\n return space.newstr(' '.join([t[0], t[1], t[2], t[3], t[4]]))",
"def name(cls):\n\n system = platform.system()\n\n # Apply system map\n if system in NAME_MAP:\n system = NAME_MAP[system]\n\n return system",
"def _format_environment_value(value):\n value = str(value)\n if platform.system() == \"Windows\":\n # Split on semicolons first\n components = value.split(os.pathsep)\n\n # On each component, replace anything that looks like\n # a drive letter with a unix-like drive path.\n components = [re.sub(r\"^([A-Za-z]):\\\\\",\n r\"\\\\\\1\\\\\",\n c) for c in components]\n\n return \":\".join(components).replace(\"\\\\\", \"/\")\n\n return value",
"def get_platform():\n system_name = platform.system()\n if system_name == \"Linux\":\n # Previously we'd use either \"-gnu\" or \"-musl\" indicate which version\n # of libc we were built against. We now default to musl since it\n # reliably works on all platforms.\n return \"unknown-linux-musl\"\n elif system_name == \"Darwin\":\n return \"apple-darwin\"\n else:\n return \"unknown\"",
"def current_platform() -> str:\n if sys.platform.startswith('linux'):\n return 'linux'\n elif sys.platform.startswith('darwin'):\n return 'mac'\n elif (sys.platform.startswith('win') or\n sys.platform.startswith('msys') or\n sys.platform.startswith('cyg')):\n if sys.maxsize > 2 ** 31 - 1:\n return 'win64'\n return 'win32'\n else:\n print('Error: DO NOT SUPPORT OS', file=sys.stderr)\n sys.exit(1)",
"def convertString(path):\n if (\"win\" in sys.platform):\n return path.replace(\"/\",\"\\\\\")\n elif (\"linux\" in sys.platform):\n return path.replace(\"\\\\\",\"/\")",
"def get_launch_name():\n\n if product_type == \"RHEL7\":\n launch_name = \"Errata-{0}_{1}_{2}_{3}_{4}_{5}CDN\".format(errata_id, product_type, variant, arch, test_level, cdn)\n \n elif product_type == \"RHEL8\":\n launch_name = \"Errata-{0}_{1}_{2}_{3}_{4}CDN\".format(errata_id, product_type, arch, test_level, cdn)\n\n return launch_name"
]
| [
"0.73320854",
"0.7321266",
"0.69390446",
"0.6885955",
"0.67918944",
"0.6570802",
"0.6493723",
"0.64319324",
"0.64065176",
"0.6392717",
"0.63766414",
"0.6262217",
"0.6238464",
"0.6238464",
"0.6237757",
"0.62260294",
"0.6212253",
"0.62072617",
"0.62022316",
"0.61901605",
"0.6181173",
"0.6165045",
"0.61420745",
"0.6100829",
"0.60904664",
"0.60840255",
"0.60816205",
"0.6064317",
"0.60536855",
"0.603774"
]
| 0.746321 | 0 |
In case filepath is a symlink, follow it until a real file is reached. | def _follow_symlinks(filepath):
filepath = os.path.abspath(filepath)
while os.path.islink(filepath):
filepath = os.path.normpath(
os.path.join(os.path.dirname(filepath), os.readlink(filepath)))
return filepath | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def symlink(self, filen, link):\n src = os.path.abspath(filen)\n cwd = self.getWorkingDirectory()\n dest = os.path.join(cwd, link)\n os.symlink(os.path.relpath(src, cwd), dest)",
"def symlink(self, filen, link):\n src = os.path.abspath(filen)\n cwd = self.getWorkingDirectory()\n dest = os.path.join(cwd, link)\n os.symlink(os.path.relpath(src, cwd), dest)",
"def make_link(self, filepath):\n # Check file exists. It may have been deleted but still in manifest\n if not os.path.exists(self.fullpath(filepath)):\n print('File not found: {filepath}'.format(\n filepath=self.fullpath(filepath)))\n if self.contains(filepath):\n print('removing from manifest')\n self.delete(filepath)\n self.needsync = True\n self.existing_filepaths.discard(filepath)\n else:\n try:\n destdir = os.path.dirname(filepath)\n # Make destination directory if not already exists\n # Necessary because sometimes this is called before\n # individual model setup\n if not os.path.exists(destdir):\n os.makedirs(destdir)\n if self.copy_file(filepath):\n shutil.copy(self.fullpath(filepath), filepath)\n perm = (stat.S_IRUSR | stat.S_IRGRP\n | stat.S_IROTH | stat.S_IWUSR)\n os.chmod(filepath, perm)\n else:\n make_symlink(self.fullpath(filepath), filepath)\n except Exception:\n action = 'copying' if self.copy_file else 'linking'\n print('payu: error: {action} orig: {orig} '\n 'local: {local}'.format(action=action,\n orig=self.fullpath(filepath),\n local=filepath))\n raise\n finally:\n self.existing_filepaths.discard(filepath)",
"def symlink(self, req, link, parent, name):\r\n self.reply_err(req, EROFS)",
"def getLinkTarget(filename):\n is_link = False\n while os.path.exists(filename) and os.path.islink(filename):\n link_target = os.readlink(filename)\n\n filename = os.path.join(os.path.dirname(filename), link_target)\n is_link = True\n\n return is_link, filename",
"def ensure_symlink_exists(symlink_path, file_path):\n\n if not (os.path.islink(symlink_path) or (os.path.realpath(symlink_path) != os.path.realpath(file_path))):\n # This is bad.\n raise CronException(\"Path {0} is not a symlink or does not point where expected.\".format(symlink_path))",
"def is_broken_link(path):\r\n path = os.readlink(path)\r\n return not os.path.exists(path)",
"def relink(f):\n if os.path.islink(f):\n linkto = os.path.join(NEW_LINK_BASE, os.path.basename(os.readlink(f)))\n #print 'Relinking %s-> %s from \\n %s' % (f, linkto, os.readlink(f))\n #print 'removing %s' % f\n os.remove(f)\n os.symlink(linkto, f)",
"def test_create_symlink_file(self):\n pass",
"def open_file_link_manager(file):\n pass",
"def _safe_setup_link(link_filename, real_filename):\r\n real_filename = os.path.relpath(real_filename, os.path.dirname(link_filename))\r\n\r\n if os.path.exists(link_filename):\r\n try:\r\n os.unlink(link_filename)\r\n except OSError:\r\n pass\r\n try:\r\n os.symlink(real_filename, link_filename)\r\n except OSError as e:\r\n # Typically permission denied.\r\n pass",
"def copy_and_link(file_name):\n if os.path.normpath(output_path) != os.getcwd():\n write_to_runner(f\"mv {file_name} {output_path} \\n\")\n write_to_runner(f\"ln -s {output_path}/{file_name} . \\n\")",
"def try_as_file(inp):\n file = pathlib.Path(inp)\n\n if not file.is_absolute():\n file = pathlib.Path.cwd() / file\n\n if not file.exists():\n return None\n\n try:\n # this will throw if it is a symlink that has a loop in it so that it\n # never points to a base file.\n if file.is_file():\n return file\n except OSError as ex:\n raise Except.FunctionError(\"resolving file '{}' failed: {}\".format(\n file, ex.strerror.lower() ) )\n return None",
"def testIsSymlink(self):\r\n P=lambda p:ufsi.NativeUnixPath(p)\r\n existingValidSymlinkPath=P(self.existingValidSymlinkFilePathStr)\r\n existingInvalidSymlinkPath=P(self.existingInvalidSymlinkFilePathStr)\r\n nonExistingSymlinkPath=P(self.nonExistingSymlinkPathStr)\r\n\r\n # 1\r\n self.assertEquals(existingValidSymlinkPath.isSymlink(),True,\r\n 'Symlink %r exists'\r\n %str(existingValidSymlinkPath))\r\n\r\n # 2\r\n self.assertEquals(existingInvalidSymlinkPath.isSymlink(),True,\r\n 'Symlink %r exists'\r\n %str(existingInvalidSymlinkPath))\r\n\r\n # 3\r\n self.assertEquals(nonExistingSymlinkPath.isSymlink(),False,\r\n 'Symlink %r does not exist'\r\n %str(nonExistingSymlinkPath))",
"def attempt_symlink_to(path: str, to_path: str) -> None:\n try:\n Path(path).symlink_to(Path(to_path))\n except OSError:\n pytest.skip(\"could not create symbolic link\")",
"def _so_symlinks(path):\n if not os.path.isdir(path):\n assert AssertionError(\"Failed to make so symlinks: path '%s' is not a directory.\", path)\n for dirent in os.listdir(path):\n fname = os.path.join(path, dirent)\n if os.path.isdir(fname) or os.path.islink(fname):\n continue\n m = re.match(r'(.+\\.so)\\.(\\d+)\\.(\\d+)\\.(\\d+)$', fname)\n if m:\n so,x,y,z = m.groups()\n symlink(fname, \"%s.%s.%s\" % (so, x, y))\n symlink(fname, \"%s.%s\" % (so, x))\n symlink(fname, so)",
"def do_tail(filename, lines, follow, func=handle_line):\n fd = open(filename, 'r')\n\n for line in tail_lines(fd, lines):\n func(line + \"\\n\")\n\n if not follow:\n return\n\n while True:\n where = fd.tell()\n line = fd.readline()\n if not line:\n fd_results = os.fstat(fd.fileno())\n try:\n st_results = os.stat(filename)\n except OSError:\n st_results = fd_results\n\n if st_results[1] == fd_results[1]:\n time.sleep(1)\n fd.seek(where)\n else:\n print \"%s changed inode numbers from %d to %d\" % (filename, fd_results[1], st_results[1])\n fd = open(filename, 'r')\n else:\n func(line)",
"def IsSymlink(info):\n return (info.external_attr >> 16) == 0120777",
"def _symlink_file_on_disk(source, link_name):\n link_dir = os.path.dirname(link_name)\n\n # create intermediate dirs if they do not already exist\n if not os.path.isdir(link_dir):\n try:\n os.makedirs(link_dir)\n except OSError as exc:\n logger.error(\"Error creating directory '%s': %s\", link_dir, exc)\n return False\n\n # create symbolic link\n try:\n os.symlink(source, link_name)\n except OSError as exc:\n logger.error(\"Error creating symlink '%s': %s\", link_name, exc)\n return False\n\n logger.debug(\"Created symlink '%s' to '%s'\", link_name, source)\n return True",
"def relink(path, Arg = (None, True, False)):\n if not os.path.islink(path): return\n\n exps = Arg[0]\n debuginfo = Arg[1]\n v = Arg[2]\n\n path = os.path.normpath(path)\n s = os.readlink(path)\n snorm = os.path.normpath(s)\n p = os.path.join(PROJ_SRC, path)\n hatpath = os.path.join(PROJ_HAT, path)\n\n if snorm.startswith(PROJ_SRC + os.sep):\n srcpath = snorm[len(PROJ_SRC + os.sep):]\n\n pathl = path.split(os.sep)\n srcpathl = srcpath.split(os.sep)\n head = commonhead(pathl, srcpathl)\n\n if len(pathl) > len(head) + 1 or \\\n len(pathl) == len(head) + 1 and len(srcpathl) > len(head):\n # pathl: o o o a b # pathl: o o o a\n # srcpathl: o o o c d e # srcpathl: o o o c d e\n # head: o o o or # head: o o o\n # --------------------- # ---------------------\n # src: ../c/d/e # src: c/d/e\n srcl = [os.pardir for i in xrange(len(pathl) - 1 - len(head))] + srcpathl[len(head):]\n src = os.path.join(*srcl)\n elif len(pathl) == len(head) + 1 and len(srcpathl) == len(head):\n # pathl: o o o a\n # srcpathl: o o o\n # head: o o o\n # ---------------------\n # src: .\n src = os.curdir\n if v: print >> sys.stderr, 'detected symlink to current directory', `hatpath`, '->', `src`\n elif len(pathl) == len(head):\n src = os.path.join(*srcpathl[len(head) - 1:])\n if len(srcpathl) == len(head):\n # pathl: o o a\n # srcpathl: o o a\n # ---------------------\n # src: a\n if v: print >> sys.stderr, 'detected symlink to itself', `hatpath`, '->', `src`\n else:\n # pathl: o o a\n # srcpathl: o o a c\n # ---------------------\n # src: a/c\n if v: print >> sys.stderr, 'detected too many levels of symlinks', `hatpath`, '->', `src`\n else:\n print >> sys.stderr, 'detected UNFORESEEN', `path`, '->', `srcpath`\n return\n\n _srcpath = os.path.normpath(os.path.join(os.path.dirname(path), src))\n assert srcpath == _srcpath, '%s:\\n%s not equal to %s' % (path, `srcpath`, `_srcpath`)\n\n os.remove(path)\n if os.path.isfile(srcpath) or os.path.isdir(srcpath):\n try:\n os.symlink(src, path)\n except (IOError, os.error), why:\n print >> sys.stderr, 'Cannot symlink %s -> %s: %s' % (`hatpath`, `src`, str(why))\n else:\n if v: print 'symlinked', `hatpath`, '->', `src`\n else:\n if os.path.isfile(s):\n print >> sys.stderr, 'missing:', hatpath, '->', src\n try:\n shutil.copy2(s, path)\n except (IOError, os.error), why:\n print >> sys.stderr, 'Cannot copy %s -> %s: %s' % (`s`, `hatpath`, str(why))\n else:\n if v: print >> sys.stderr, 'copied', `s`, '->', `hatpath`\n elif os.path.isdir(s):\n print >> sys.stderr, 'missing:', hatpath, '->', src\n try:\n os.makedirs(srcpath)\n except (IOError, os.error), why:\n print >> sys.stderr, 'Cannot create directory %s: %s' % (`os.path.join(PROJ_HAT, srcpath)`, str(why))\n else:\n if v: print >> sys.stderr, 'created directory', `os.path.join(PROJ_HAT, srcpath)`\n try:\n os.symlink(src, path)\n except (IOError, os.error), why:\n print >> sys.stderr, 'Cannot symlink %s -> %s: %s' % (`hatpath`, `src`, str(why))\n else:\n if v: print 'symlinked', `hatpath`, '->', `src`\n else:\n print >> sys.stderr, 'dangling:', p, '->', s\n if v: print >> sys.stderr, 'removed', `hatpath`\n# elif os.path.normpath(os.path.join(os.path.dirname(p), s)).startswith(PROJ_SRC + os.sep):\n else:\n srcpath = os.path.normpath(os.path.join(os.path.dirname(p), s))\n# os.path.normpath(os.path.join(os.path.dirname(p), s)).startswith(PROJ_SRC + os.sep):\n if srcpath.startswith(PROJ_SRC + os.sep):\n if os.path.isfile(path) or os.path.isdir(path):\n if v: print 'relative:', hatpath, '->', s\n else:\n if os.path.isfile(p) or os.path.isdir(p):\n print >> sys.stderr, 'missing:', hatpath, '->', s\n else:\n print >> sys.stderr, 'dangling:', p, '->', s\n os.remove(path);\n if v: print >> sys.stderr, 'removed', `hatpath`\n else:\n if os.path.isfile(p) or os.path.isdir(p):\n if exps:\n dst = exps.destination(srcpath)\n if dst:\n os.remove(path)\n if not dst[1] or debuginfo:\n # if not dst[1] or DEBUGINFO == 'yes' or MODE == 'dbg':\n upl = [os.pardir for i in xrange(len(hatpath.split(os.sep)) - 1)]\n src = os.path.join(os.path.join(*upl), dst[0])\n try:\n os.symlink(src, path)\n except (IOError, os.error), why:\n print >> sys.stderr, 'Cannot symlink %s -> %s: %s' % (`hatpath`, `src`, str(why))\n else:\n if v: print 'symlinked', `hatpath`, '->', `src`\n else:\n print 'debuginfo:', hatpath, '->', s\n if v: print 'removed', `hatpath`\n else:\n print >> sys.stderr, 'not_exported:', srcpath\n os.remove(path);\n if v: print >> sys.stderr, 'removed', `hatpath`, '->', `s`\n else:\n print >> sys.stderr, 'external:', hatpath, '->', s\n os.remove(path);\n if v: print >> sys.stderr, 'removed', `hatpath`\n else:\n print >> sys.stderr, 'dangling:', p, '->', s\n os.remove(path);\n if v: print >> sys.stderr, 'removed', `hatpath`",
"def file_checker(file_name):\n if os.path.islink(file_name):\n print \"Crypto device Symlink %s exists\" % file_name\n return True\n else: \n try:\n with open(file_name):\n print \"File %s exists\" % file_name\n return True\n except IOError:\n print \"File %s does not exists\" % file_name\n return False",
"def _islink(path):\n if not os.path.isdir(path):\n return False\n\n if not isinstance(path, str):\n path = str(path)\n\n attributes = ctypes.windll.kernel32.GetFileAttributesW(path)\n if attributes == INVALID_FILE_ATTRIBUTES:\n return False\n\n return (attributes & FILE_ATTRIBUTE_REPARSE_POINT) > 0",
"def _find_ref_fname(fname, ref_fname):\n curr_dir = \"\"\n next_dir = os.path.dirname(os.path.abspath(fname))\n while next_dir != curr_dir:\n curr_dir = next_dir\n rcfile = os.path.join(curr_dir, ref_fname)\n if os.path.exists(rcfile):\n return rcfile\n next_dir = os.path.dirname(curr_dir)\n return \"\"",
"def is_linkto_file(host, fqpath):\n command = 'file %s' % fqpath\n rcode, rout, _ = g.run(host, command)\n\n if rcode == 0:\n if 'sticky empty' in rout.strip():\n stat = get_file_stat(host, fqpath)\n if int(stat['size']) == 0:\n # xattr = get_fattr(host, fqpath,\n # 'trusted.glusterfs.dht.linkto')\n xattr = get_dht_linkto_xattr(host, fqpath)\n if xattr is not None:\n return True\n\n return False",
"def _sync_symlink(self, binary_name, link_to):\n\n # The symlink we are creating:\n link_path = os.path.join(self.bin_dir, binary_name)\n\n # The expected file we should be linking to:\n link_dest = os.path.join(self.bin_dir, link_to)\n\n if not os.path.exists(link_path) or \\\n not os.path.islink(link_path) or \\\n os.path.realpath(link_path) != os.path.realpath(link_dest):\n if os.path.exists(link_path):\n os.remove(link_path)\n os.symlink(link_to, os.path.join(self.bin_dir, binary_name))\n self.output.append(\"Symlinked %s to %s.\" % (link_path, link_dest))\n self.changed = True",
"def _real_stat(self, path, _exception_for_missing_path=True):\n # Save for error message.\n original_path = path\n # Most code in this method is used to detect recursive link structures.\n visited_paths = set()\n while True:\n # Stat the link if it is one, else the file/directory.\n lstat_result = self._real_lstat(path, _exception_for_missing_path)\n if lstat_result is None:\n return None\n # If the file is not a link, the `stat` result is the same as the\n # `lstat` result.\n if not stat.S_ISLNK(lstat_result.st_mode):\n return lstat_result\n # If we stat'ed a link, calculate a normalized path for the file\n # the link points to.\n dirname, _ = self._path.split(path)\n path = self._path.join(dirname, lstat_result._st_target)\n path = self._path.abspath(self._path.normpath(path))\n # Check for cyclic structure.\n if path in visited_paths:\n # We had seen this path already.\n raise ftputil.error.RecursiveLinksError(\n \"recursive link structure detected for remote path '{}'\".format(\n original_path\n )\n )\n # Remember the path we have encountered.\n visited_paths.add(path)",
"def EnumeratePaths(args, paths):\n for fn in paths:\n try:\n # 3 - for ftp://, 4 for http://, 5 for https://\n if fn.find(\"://\") in (3,4,5):\n yield fn\n if os.path.islink(fn) and args.skiplinks:\n pass\n elif os.path.isdir(fn) and args.recurse:\n for f in DirEnumerator(args, fn):\n yield f\n elif os.path.isfile(fn):\n yield fn\n except Exception as e:\n print(\"EXCEPTION %s accessing %s\" % (e, fn))",
"def config_symlink(ipydir, profile):\n fexist = []\n for fpath in profile_files(profile):\n filename = osp.basename(fpath)\n dest_file = osp.join(ipydir, 'profile_' + profile, 'startup',\n filename)\n if osp.islink(dest_file) or osp.isfile(dest_file):\n fexist.append(dest_file)\n else:\n os.symlink(fpath, dest_file)\n logger.info(\"Files '%s' for profile '%s' exists.\",\n ', '.join(sorted(fexist)), profile)",
"def add_filepath(self, manifest, filepath, fullpath, copy=False):\n filepath = os.path.normpath(filepath)\n if self.manifests[manifest].add_filepath(\n filepath=filepath,\n fullpath=fullpath,\n hashes=self.fast_hashes + self.full_hashes,\n copy=copy):\n # Only link if filepath was added\n self.manifests[manifest].make_link(filepath)",
"def __links(self, fichier):\n f = fichier\n\n # un lien comprend une source et une destination\n src = self.files[f]['rcsdirname']\n dst = self.files[f]['rcslinkname']\n\n # on verifie si le lien existe et si il est valide\n lexist = self.files[f]['rcslinkexist']\n lok = self.files[f]['rcslinkisok']\n\n if f not in self.skips:\n if lexist:\n self.info('[lnk] %s, %s-->%s existe' % (f, src, dst))\n\n if lok:\n if (src, dst) not in self.linksok:\n self.info('[lnk] %s, %s-->%s lien valide' % (f, src, dst))\n self.linksok.append((src, dst))\n else:\n if (src, dst) not in self.linksko:\n self.error('[lnk] %s, %s-->%s lien non valide' % (f, src, dst))\n self.linksko.append((src, dst))\n else:\n if (src, dst) not in self.linksko:\n self.warn('[lnk] %s, %s-->%s innexistant' % (f, src, dst))\n self.linksko.append((src, dst))\n return None"
]
| [
"0.59758955",
"0.59758955",
"0.58667785",
"0.5801294",
"0.5766194",
"0.5745428",
"0.5708963",
"0.5690824",
"0.5634242",
"0.56276107",
"0.55910045",
"0.557269",
"0.5566692",
"0.55402976",
"0.5536394",
"0.5475052",
"0.54657185",
"0.5448645",
"0.54439133",
"0.5421889",
"0.5400893",
"0.5373602",
"0.53641224",
"0.5338699",
"0.53275585",
"0.53087443",
"0.53049105",
"0.53011733",
"0.52988786",
"0.5280453"
]
| 0.7632888 | 0 |
Interface to the system's file command. The function uses the b option of the file command to have it omit the filename in its output. Follow the symlinks. It returns default in case the command should fail. | def _syscmd_file(target, default=''):
if sys.platform in ('dos', 'win32', 'win16'):
# XXX Others too ?
return default
try:
import subprocess
except ImportError:
return default
target = _follow_symlinks(target)
# "file" output is locale dependent: force the usage of the C locale
# to get deterministic behavior.
env = dict(os.environ, LC_ALL='C')
try:
# -b: do not prepend filenames to output lines (brief mode)
output = subprocess.check_output(['file', '-b', target],
stderr=subprocess.DEVNULL,
env=env)
except (OSError, subprocess.CalledProcessError):
return default
if not output:
return default
# With the C locale, the output should be mostly ASCII-compatible.
# Decode from Latin-1 to prevent Unicode decode error.
return output.decode('latin-1') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def try_as_file(inp):\n file = pathlib.Path(inp)\n\n if not file.is_absolute():\n file = pathlib.Path.cwd() / file\n\n if not file.exists():\n return None\n\n try:\n # this will throw if it is a symlink that has a loop in it so that it\n # never points to a base file.\n if file.is_file():\n return file\n except OSError as ex:\n raise Except.FunctionError(\"resolving file '{}' failed: {}\".format(\n file, ex.strerror.lower() ) )\n return None",
"def open_file_link_manager(file):\n pass",
"def openf(self, parameter_s=''):\n\n parameter_s = parameter_s.strip()\n #bkms = self.shell.persist.get(\"bookmarks\",{})\n\n oldcwd = os.getcwd()\n numcd = re.match(r'(-)(\\d+)$', parameter_s)\n # jump in directory history by number\n if numcd:\n nn = int(numcd.group(2))\n try:\n ps = ip.ev('_sh[%d]' % nn)\n except IndexError:\n print('The requested directory does not exist in history.')\n return\n else:\n opts = {}\n# elif parameter_s.startswith('--'):\n# ps = None\n# fallback = None\n# pat = parameter_s[2:]\n# dh = self.shell.user_ns['_sh']\n# # first search only by basename (last component)\n# for ent in reversed(dh):\n# if pat in os.path.basename(ent) and os.path.isdir(ent):\n# ps = ent\n# break\n#\n# if fallback is None and pat in ent and os.path.isdir(ent):\n# fallback = ent\n#\n# # if we have no last part match, pick the first full path match\n# if ps is None:\n# ps = fallback\n#\n# if ps is None:\n# print \"No matching entry in directory history\"\n# return\n# else:\n# opts = {}\n\n else:\n # turn all non-space-escaping backslashes to slashes,\n # for c:\\windows\\directory\\names\\\n parameter_s = re.sub(r'\\\\(?! )', '/', parameter_s)\n opts, ps = self.parse_options(parameter_s, 'qb', mode='string')\n\n # jump to previous\n if ps == '-':\n try:\n ps = ip.ev('_sh[-2]' % nn)\n except IndexError:\n raise UsageError('%cd -: No previous directory to change to.')\n# # jump to bookmark if needed\n# else:\n# if not os.path.exists(ps) or opts.has_key('b'):\n# bkms = self.db.get('bookmarks', {})\n#\n# if bkms.has_key(ps):\n# target = bkms[ps]\n# print '(bookmark:%s) -> %s' % (ps,target)\n# ps = target\n# else:\n# if opts.has_key('b'):\n# raise UsageError(\"Bookmark '%s' not found. \"\n# \"Use '%%bookmark -l' to see your bookmarks.\" % ps)\n\n # at this point ps should point to the target dir\n if ps:\n ip.ex('openFile(\"%s\", f=1)' % ps)",
"def readlink(self) -> str:\n # https://github.com/python/mypy/issues/12278\n return error.checked_call(os.readlink, self.strpath) # type: ignore[arg-type,return-value]",
"def symlink(self, filen, link):\n src = os.path.abspath(filen)\n cwd = self.getWorkingDirectory()\n dest = os.path.join(cwd, link)\n os.symlink(os.path.relpath(src, cwd), dest)",
"def symlink(self, filen, link):\n src = os.path.abspath(filen)\n cwd = self.getWorkingDirectory()\n dest = os.path.join(cwd, link)\n os.symlink(os.path.relpath(src, cwd), dest)",
"def file_flag():\n\n return '-f' if random.randint(0, 1) else '--file'",
"def symlink(self, req, link, parent, name):\r\n self.reply_err(req, EROFS)",
"def test_create_symlink_file(self):\n pass",
"def find_binary(binary: str, paths=None, fallback=None) -> str:\n\n if os.path.isabs(binary):\n if not (os.path.isfile(binary) and access(binary, os.X_OK)):\n raise CommandNotFound(binary)\n return binary\n\n if paths is None:\n paths = os.environ.get(\"PATH\", \"\").split(\":\")\n\n for path in paths:\n filename = os.path.join(os.path.abspath(path), binary)\n if access(filename, os.X_OK) and os.path.isfile(filename):\n return filename\n\n if fallback is not None:\n return fallback\n\n raise CommandNotFound(binary)",
"def copy_and_link(file_name):\n if os.path.normpath(output_path) != os.getcwd():\n write_to_runner(f\"mv {file_name} {output_path} \\n\")\n write_to_runner(f\"ln -s {output_path}/{file_name} . \\n\")",
"def execute_file(self, files, **kw):\n\n mode = kw['mode'] if 'mode' in kw else 0\n\n # ranger can act as a file chooser when running with --choosefile=...\n if mode == 0 and 'label' not in kw:\n if ranger.args.choosefile:\n open(ranger.args.choosefile, 'w').write(self.fm.thisfile.path)\n\n if ranger.args.choosefiles:\n paths = []\n for hist in self.fm.thistab.history:\n for fobj in hist.files:\n if fobj.marked and fobj.path not in paths:\n paths += [fobj.path]\n paths += [f.path for f in self.fm.thistab.get_selection() if f.path not in paths]\n\n with open(ranger.args.choosefiles, 'w') as fobj:\n fobj.write('\\n'.join(paths) + '\\n')\n\n if ranger.args.choosefile or ranger.args.choosefiles:\n raise SystemExit\n\n if isinstance(files, set):\n files = list(files)\n elif not isinstance(files, (list, tuple)):\n files = [files]\n\n flags = kw.get('flags', '')\n if 'c' in squash_flags(flags):\n files = [self.fm.thisfile]\n\n self.signal_emit('execute.before', keywords=kw)\n filenames = [f.path for f in files]\n label = kw.get('label', kw.get('app', None))\n try:\n return self.rifle.execute(filenames, mode, label, flags, None)\n finally:\n self.signal_emit('execute.after')",
"def get_filename_as_agrv_if_no_ask(prompt):\n Found = False\n ln = len(sys.argv)\n while not Found:\n if ln < 2:\n file = input( prompt)\n else:\n file = sys.argv[1]\n try:\n RFH = open(file)\n Found = True\n except FileNotFoundError:\n print(\"%%Error! File not found!\")\n ln = 1\n# break\n return RFH",
"def do_GET(self):\n\n path = self.file_path\n\n if os.path.exists(path):\n # Symbolic link judgement.\n # Paths with denied symbolic links will pretend to be 404 errors.\n if args[TITLE_LOCAL_LINKS] and not (\"%s/\" % os.path.realpath(path)).startswith(os.getcwd() + \"/\"):\n return self.send_error(404, \"File not found\")\n elif args[TITLE_NO_LINKS]:\n # If all symbolic links are banned, then we must trace our\n # way down an existing path to make sure that no symbolic link exists\n curr = path\n while True:\n if os.path.islink(curr):\n return self.send_error(404, \"File not found\")\n if curr == path:\n break\n curr = os.path.dirname(path);\n\n f = None\n if os.path.isdir(path):\n\n if not getattr(self, common.ATTR_PATH, \"\").endswith(\"/\"):\n return self.send_redirect(\"%s/\" % getattr(self, common.ATTR_PATH, \"\"))\n\n for index in [\"index.html\", \"index.htm\"]:\n index = os.path.join(path, index)\n if os.path.exists(index):\n path = index\n break\n if path == self.file_path:\n return self.list_directory(path)\n\n return self.serve_file(path)",
"def link(prog_path: str, o_files: List[File]) -> File:\n print(\"linking\")\n os.system(\"gcc -o {prog_path} {o_files}\".format(\n prog_path=prog_path,\n o_files=' '.join(o_file.path for o_file in o_files),\n ))\n return File(prog_path)",
"def _sync_symlink(self, binary_name, link_to):\n\n # The symlink we are creating:\n link_path = os.path.join(self.bin_dir, binary_name)\n\n # The expected file we should be linking to:\n link_dest = os.path.join(self.bin_dir, link_to)\n\n if not os.path.exists(link_path) or \\\n not os.path.islink(link_path) or \\\n os.path.realpath(link_path) != os.path.realpath(link_dest):\n if os.path.exists(link_path):\n os.remove(link_path)\n os.symlink(link_to, os.path.join(self.bin_dir, binary_name))\n self.output.append(\"Symlinked %s to %s.\" % (link_path, link_dest))\n self.changed = True",
"def link(self, fname):\n return fname",
"def fileoutput(cd, bin, args=[]):\n cmd = \"cd %s; ./%s %s\" % (cd, bin, ' '.join(args))\n logger.info(cmd)\n\n return exec_command(cmd)",
"def _symlink_or_copy(src, dst):\n # try to symlink file\n try:\n os.symlink(src, dst)\n print('Creating symlink \"%s\" pointing to \"%s\"' % (dst, src))\n except Exception as ex_symlink:\n # try to copy file\n try:\n shutil.copyfile(src, dst)\n print('Copying file from \"%s\" to \"%s\"' % (src, dst))\n except Exception as ex_copy:\n raise RuntimeError('Could neither symlink nor copy file \"%s\" to \"%s\":\\n- %s\\n- %s' % (src, dst, str(ex_symlink), str(ex_copy)))",
"def testLink(self):\n def _check(results):\n self.flushLoggedErrors()\n self.assertEqual(results[0], b'')\n self.assertTrue(results[1].startswith(b'l'), 'link failed')\n return self.runCommand('rm testLink')\n\n d = self.runScript('ln testLink testfile1', 'ls -l testLink')\n d.addCallback(_check)\n d.addCallback(self.assertEqual, b'')\n return d",
"def _follow_symlinks(filepath):\n filepath = os.path.abspath(filepath)\n while os.path.islink(filepath):\n filepath = os.path.normpath(\n os.path.join(os.path.dirname(filepath), os.readlink(filepath)))\n return filepath",
"def lookup(file, category='undefined'):\n path = os.path.join(self.base_path, doc, file)\n existing_path = os.path.exists(path) and path\n link = doc+'/'+file\n self.log.debug(' %s file %s' % (category, existing_path or\n path+\" (not found)\"))\n return existing_path, link",
"def file_operation(path, command):\n with ChDir(path):\n subprocess.check_call(command)",
"def main(argv=None):\n\tif argv is None:\n\t\targv = sys.argv\n\ttry:\n\t\topts, args = getopt.getopt(sys.argv[1:], \"\", [\"help\",\"ftype=\",\"path=\"])\n\texcept getopt.error, msg:\n\t\tprint msg\n\t\tshow_help()\n\t\tsys.exit(2)\n\topts = dict(opts)\n\tif opts.has_key(\"--help\"):\n\t\tshow_help()\n\t\tsys.exit(2)\n\ttry:\n\t\tpath = get_path(opts['--path'])\n\texcept:\n\t\tprint \"Error in Path\"\n\t\tshow_help()\n\t\tsys.exit(2)\n\ttry:\n\t\tif(opts['--ftype']==\"\"):\n\t\t\tprint \"No filetype specified\"\n\t\t\tshow_help()\n\t\t\tsys.exit(2)\n\t\telse:\n\t\t\tftype = opts['--ftype']\n\texcept KeyError:\n\t\tprint \"No filetype specified\"\n\t\tshow_help()\n\t\tsys.exit(2)\n\tfilelist = subprocess.Popen(\"find \" + path + \" -name \\\"*.\"+ftype+\"\\\"\" , shell=True,stdout=subprocess.PIPE, cwd=None).stdout.read().strip('\\n').split('\\n')\n\tj=1\n\tfor i in filelist:\n\t\ttry:\n\t\t\tstatinfo = os.stat(i)\n\t\texcept OSError,msg:\n\t\t\tprint msg\n\t\t\tshow_help()\n\t\t\tsys.exit(2)\n\t\tnewname= i.rsplit('/',1)[0] +'/'+ time.strftime(\"%Y-%m-%d %H.%M.%S\", time.localtime(statinfo.st_mtime))\n\t\ttempnewname = newname + \".%s\"%(ftype)\n\t\tprint tempnewname\n\t\tif(not os.path.exists(tempnewname)):\n\t\t\tos.renames(i,tempnewname)\n\t\t\tj=1\n\t\telse:\n\t\t\ttempnewname = newname + str(j) + \".%s\"%(sys.argv[2])\n\t\t\tos.renames(i,tempnewname)\n\t\t\tj+=1",
"def filepath(p):\n if os.path.isfile(p):\n return os.path.realpath(p)\n else:\n raise ArgumentTypeError('{} is not a file.'.format(p))",
"def _real_stat(self, path, _exception_for_missing_path=True):\n # Save for error message.\n original_path = path\n # Most code in this method is used to detect recursive link structures.\n visited_paths = set()\n while True:\n # Stat the link if it is one, else the file/directory.\n lstat_result = self._real_lstat(path, _exception_for_missing_path)\n if lstat_result is None:\n return None\n # If the file is not a link, the `stat` result is the same as the\n # `lstat` result.\n if not stat.S_ISLNK(lstat_result.st_mode):\n return lstat_result\n # If we stat'ed a link, calculate a normalized path for the file\n # the link points to.\n dirname, _ = self._path.split(path)\n path = self._path.join(dirname, lstat_result._st_target)\n path = self._path.abspath(self._path.normpath(path))\n # Check for cyclic structure.\n if path in visited_paths:\n # We had seen this path already.\n raise ftputil.error.RecursiveLinksError(\n \"recursive link structure detected for remote path '{}'\".format(\n original_path\n )\n )\n # Remember the path we have encountered.\n visited_paths.add(path)",
"def unix_find(pathin):\n return [os.path.join(path, file)\n for (path, dirs, files) in os.walk(pathin, followlinks=False)\n for file in files]",
"def which(cmd, mode=os.F_OK | os.X_OK, path=None):\n # Check that a given file can be accessed with the correct mode.\n # Additionally check that `file` is not a directory, as on Windows\n # directories pass the os.access check.\n def _access_check(fn, mode):\n return (os.path.exists(fn) and os.access(fn, mode) and\n not os.path.isdir(fn))\n\n # If we're given a path with a directory part, look it up directly\n # rather than referring to PATH directories. This includes checking\n # relative to the current directory, e.g. ./script\n if os.path.dirname(cmd):\n if _access_check(cmd, mode):\n return cmd\n return None\n\n if path is None:\n path = os.environ.get(\"PATH\", os.defpath)\n if not path:\n return None\n path = path.split(os.pathsep)\n\n # On other platforms you don't have things like PATHEXT to tell you\n # what file suffixes are executable, so just pass on cmd as-is.\n files = [cmd]\n\n seen = set()\n for dir in path:\n normdir = os.path.normcase(dir)\n if normdir not in seen:\n seen.add(normdir)\n for thefile in files:\n name = os.path.join(dir, thefile)\n if _access_check(name, mode):\n return name\n return None",
"def fileopen(file):\n return _posixfile_().fileopen(file)",
"def _find_histfile_var(file_list, default=None):\n for f in file_list:\n f = expanduser_abs_path(f)\n if not os.path.isfile(f):\n continue\n with open(f, 'r') as rc_file:\n for line in rc_file:\n if line.startswith('HISTFILE='):\n hist_file = line.split('=', 1)[1].strip('\\'\"\\n')\n hist_file = expanduser_abs_path(hist_file)\n if os.path.isfile(hist_file):\n return hist_file\n else:\n if default:\n default = expanduser_abs_path(default)\n if os.path.isfile(default):\n return default"
]
| [
"0.57181937",
"0.54270643",
"0.52126133",
"0.52120095",
"0.52064097",
"0.52064097",
"0.5199541",
"0.518135",
"0.5115018",
"0.5114934",
"0.5024936",
"0.50143844",
"0.4999971",
"0.4974326",
"0.49650243",
"0.4924294",
"0.49133918",
"0.48827857",
"0.4861022",
"0.48501983",
"0.4835268",
"0.48319307",
"0.4815478",
"0.47859177",
"0.47753623",
"0.47732663",
"0.47571602",
"0.47394228",
"0.4706415",
"0.4699278"
]
| 0.5453112 | 1 |
Queries the given executable (defaults to the Python interpreter binary) for various architecture information. Returns a tuple (bits, linkage) which contains information about the bit architecture and the linkage format used for the executable. Both values are returned as strings. Values that cannot be determined are returned as given by the parameter presets. If bits is given as '', the sizeof(pointer) (or sizeof(long) on Python version < 1.5.2) is used as indicator for the supported pointer size. The function relies on the system's "file" command to do the actual work. This is available on most if not all Unix platforms. On some nonUnix platforms where the "file" command does not exist and the executable is set to the Python interpreter binary defaults from _default_architecture are used. | def architecture(executable=sys.executable, bits='', linkage=''):
# Use the sizeof(pointer) as default number of bits if nothing
# else is given as default.
if not bits:
import struct
size = struct.calcsize('P')
bits = str(size * 8) + 'bit'
# Get data from the 'file' system command
if executable:
fileout = _syscmd_file(executable, '')
else:
fileout = ''
if not fileout and \
executable == sys.executable:
# "file" command did not return anything; we'll try to provide
# some sensible defaults then...
if sys.platform in _default_architecture:
b, l = _default_architecture[sys.platform]
if b:
bits = b
if l:
linkage = l
return bits, linkage
if 'executable' not in fileout and 'shared object' not in fileout:
# Format not supported
return bits, linkage
# Bits
if '32-bit' in fileout:
bits = '32bit'
elif '64-bit' in fileout:
bits = '64bit'
# Linkage
if 'ELF' in fileout:
linkage = 'ELF'
elif 'PE' in fileout:
# E.g. Windows uses this format
if 'Windows' in fileout:
linkage = 'WindowsPE'
else:
linkage = 'PE'
elif 'COFF' in fileout:
linkage = 'COFF'
elif 'MS-DOS' in fileout:
linkage = 'MSDOS'
else:
# XXX the A.OUT format also falls under this class...
pass
return bits, linkage | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def architecture(executable=None, bits='', linkage=''): ###\n # Use the sizeof(pointer) as default number of bits if nothing\n # else is given as default.\n if not bits:\n import struct\n try:\n size = struct.calcsize('P')\n except ValueError: ###\n # Older installations can only query longs\n size = struct.calcsize('l')\n bits = str(size*8) + 'bit'\n\n return bits, linkage",
"def bitness():\n # see https://docs.python.org/2/library/platform.html#platform.architecture\n return '64-bit' if sys.maxsize > 2**32 else '32-bit'",
"def get_arch(self, elf_file_path):\n try:\n output = subprocess.check_output([self.readelf_path, '-h', elf_file_path])\n if output.find('AArch64') != -1:\n return 'arm64'\n if output.find('ARM') != -1:\n return 'arm'\n if output.find('X86-64') != -1:\n return 'x86_64'\n if output.find('80386') != -1:\n return 'x86'\n except subprocess.CalledProcessError:\n pass\n return 'unknown'",
"def architecture():\n if is_darwin:\n # Darwin's platform.architecture() is buggy and always\n # returns \"64bit\" event for the 32bit version of Python's\n # universal binary. So we roll out our own (that works\n # on Darwin).\n if sys.maxsize > 2 ** 32:\n return '64bit'\n else:\n return '32bit'\n else:\n return platform.architecture()[0]",
"def architecture(self):\n return self.random.choice([\n 'x86_64', \n 'x86'\n ])",
"def get_target_platform(target_binary):\n file_type_string = os.popen(f\"file {target_binary}\").read()\n if FILE_TYPE_STRING_ELF16 in file_type_string:\n platform = PLATFORM_LINUX\n architecture = ARCH_16_BIT\n elif FILE_TYPE_STRING_ELF32 in file_type_string:\n platform = PLATFORM_LINUX\n architecture = ARCH_32_BIT\n elif FILE_TYPE_STRING_ELF64 in file_type_string:\n platform = PLATFORM_LINUX\n architecture = ARCH_64_BIT\n elif FILE_TYPE_STRING_PE16 in file_type_string:\n platform = PLATFORM_WINDOWS\n architecture = ARCH_16_BIT\n elif FILE_TYPE_STRING_PE32 in file_type_string:\n platform = PLATFORM_WINDOWS\n architecture = ARCH_32_BIT\n elif FILE_TYPE_STRING_PE64 in file_type_string:\n platform = PLATFORM_WINDOWS\n architecture = ARCH_64_BIT\n else:\n log(\"unable to detect binary type\")\n is_linux_bin = prompt_yn(\"is this a linux binary?\")\n if is_linux_bin:\n platform = PLATFORM_LINUX\n else:\n platform = PLATFORM_WINDOWS\n architecture = prompt_list(\"select the architecture\", [ARCH_16_BIT, ARCH_32_BIT, ARCH_64_BIT])\n log(f\"platform is {architecture} {platform}\")\n return (platform, architecture)",
"def matchDLLArch(filename):\n # TODO: check machine type on other platforms?\n if not is_win:\n return True\n\n global _exe_machine_type\n try:\n if _exe_machine_type is None:\n pefilename = sys.executable # for exception handling\n exe_pe = pefile.PE(sys.executable, fast_load=True)\n _exe_machine_type = exe_pe.FILE_HEADER.Machine\n exe_pe.close()\n\n pefilename = filename # for exception handling\n pe = pefile.PE(filename, fast_load=True)\n match_arch = pe.FILE_HEADER.Machine == _exe_machine_type\n pe.close()\n except pefile.PEFormatError as exc:\n raise SystemExit('Can not get architecture from file: %s\\n'\n ' Reason: %s' % (pefilename, exc))\n return match_arch",
"def get_arch():\n with settings(hide('running', 'stdout')):\n arch = run('uname -m')\n return arch",
"def find_current_arch():\n \n filetype = idaapi.get_file_type_name()\n \n if '386' in filetype:\n print 'Architecture: 32-bit intel.'\n return (ir.IR_INTEL_x86, ir.intel.ir_intel_x86, intel.disassembler)\n elif 'x86-64' in filetype:\n print 'Architecture: 64-bit intel.'\n return (ir.IR_INTEL_x64, ir.intel.ir_intel_x64, intel.disassembler)\n \n raise RuntimeError(\"Don't know which arch to choose for %s\" % (repr(filetype), ))",
"def _get_binaries(**kwargs):\n binfiles = list()\n Nbytes = list()\n filenames = kwargs.get(\"filenames\")\n for filename in filenames:\n _, ext = os.path.splitext(filename) # Getting file extension\n if ext == \".H\": # SEPlib file\n binfiles.append(sep.get_binary(filename))\n Nbytes.append(os.path.getsize(binfiles[-1]))\n elif ext == \".h5\":\n raise NotImplementedError(\"ERROR! h5 files not supported yet.\")\n else:\n raise ValueError(\"ERROR! Unknown format for file %s\" % filename)\n return binfiles, Nbytes",
"def get_platform_architecture() -> None:\n global _PLATFORM, _ARCHITECTURE, _COMPRESSION\n\n x86_64 = {\"x86_64\", \"amd64\", \"AMD64\", \"64bit\"}\n i386 = {\"i386\", \"i486\", \"i586\", \"i686\", \"386\", \"x86\", \"32bit\"}\n\n system = platform.system()\n if system == \"Windows\":\n machine = platform.machine()\n else:\n machine = os.uname().machine\n\n if system == \"Linux\":\n _PLATFORM = \"linux\"\n if machine in x86_64:\n _ARCHITECTURE = \"64\"\n elif machine in i386:\n _ARCHITECTURE = \"32\"\n else:\n _ARCHITECTURE = \"other\"\n\n elif system in {\"OpenBSD\", \"NetBSD\", \"FreeBSD\"}:\n _PLATFORM = \"bsd\"\n _ARCHITECTURE = \"other\"\n if system == \"FreeBSD\":\n if machine in x86_64:\n if detect_freebsd_linux_compatibility(\"64\"):\n _PLATFORM = \"linux\"\n _ARCHITECTURE = \"64\"\n elif machine in i386:\n if detect_freebsd_linux_compatibility(\"32\"):\n _PLATFORM = \"linux\"\n _ARCHITECTURE = \"32\"\n\n elif system in {\"Haiku\", \"Hurd\"}:\n _PLATFORM = \"linux\"\n _ARCHITECTURE = \"other\"\n\n elif system == \"Darwin\":\n _PLATFORM = \"mac\"\n _ARCHITECTURE = \"os\"\n elif system == \"Windows\":\n _PLATFORM = \"win\"\n if machine in x86_64:\n _ARCHITECTURE = \"64\"\n elif machine in i386:\n _ARCHITECTURE = \"32\"\n if not all([_PLATFORM, _ARCHITECTURE]):\n raise PlatformError(f\"Failed to detect appropriate platform. {system} {machine}\")\n\n if _PLATFORM == \"win\":\n _COMPRESSION = \"zip\"\n else:\n _COMPRESSION = \"tar.gz\"",
"def mac_gcc_architecture():\n # Darwin's platform.architecture() is buggy and always\n # returns \"64bit\" event for the 32bit version of Python's\n # universal binary. So we roll out our own (that works\n # on Darwin).\n if sys.maxint > 2L ** 32:\n # 64bit\n return 'x86_64'\n else:\n # 32bit\n return 'i386'",
"def diassemble(self,filename, bits='32bit'):\n mode = bits.replace(\"bit\",\"\")\n diasm = subprocess.check_output(['lib/ZydisDisasm',\"-\"+mode, filename])\n return diasm.decode(\"utf-8\")",
"def _get_pkg_arch(metadata_dir):\n def _parse_march(flags_file):\n value = None\n with open(flags_file, 'r') as fcflags:\n cflags = fcflags.read()\n match = _RE_ARCH_VARIANT.search(cflags)\n if match is not None:\n value = match.group(2)\n return value\n with open(os.path.join(metadata_dir, 'CHOST'), 'r') as fchost:\n arch = fchost.readline().strip().split('-', 1)[0]\n variant = None\n for flag_file in ['CFLAGS', 'CXXFLAGS']:\n variant = _parse_march(os.path.join(metadata_dir, flag_file))\n if variant is not None:\n break\n return arch, variant",
"def determine_package_architecture(self, has_shared_object_files):\n logger.debug(\"Checking package architecture ..\")\n if has_shared_object_files:\n logger.debug(\"Package contains shared object files, tagging with %s architecture.\",\n self.converter.debian_architecture)\n return self.converter.debian_architecture\n else:\n logger.debug(\"Package doesn't contain shared object files, dealing with a portable package.\")\n return 'all'",
"def architecture(cls):\n\n bits, _ = platform.architecture()\n machine = platform.machine()\n\n # Check for ARM machine\n if bits == '32bit' and machine.startswith('armv'):\n return cls.arm(machine)\n\n # Check (bits, machine) map\n machine_key = (bits, machine)\n\n if machine_key in MACHINE_MAP:\n return MACHINE_MAP[machine_key]\n\n # Check (bits) map\n if bits in BITS_MAP:\n return BITS_MAP[bits]\n\n log.error('Unable to determine system architecture - bits: %r, machine: %r', bits, machine)\n return None",
"def get_reference_binary():\n return \"./Binary/linux-x64/astcenc\"",
"def zipfile_by_bitsize(binaries_url, headers, zipfile_regex, bitsize):\n # this is used by ccx and gmsh\n res = requests.get(binaries_url, headers=headers)\n html = res.text\n urls = re.findall(r'href=[\\'\"]?([^\\'\" >]+)', html)\n pattern = re.compile(zipfile_regex)\n urls = [url for url in urls if pattern.match(url)]\n urls = urls[-2:]\n url_choices = {32: urls[0], 64: urls[1]}\n if 'win32' in urls[1] or 'Windows32' in urls[1]:\n url_choices = {32: urls[1], 64: urls[0]}\n return url_choices[bitsize]",
"def osarch_is_amd64():\n return osarch_match(\"amd64\")",
"def get_arch():\n arch = platform.machine()\n if arch == \"i686\":\n return \"i686\"\n elif arch == \"x86_64\":\n return \"x86_64\"\n elif arch == \"aarch64\":\n return \"aarch64\"\n else:\n return \"unknown\"",
"def test_wrong_architecture(tmp_path, host_python, build_python, get_resource):\n\n crossenv = make_crossenv(tmp_path, host_python, build_python,\n '--cc=/usr/bin/gcc')\n for line in crossenv.creation_log.splitlines():\n if re.match(r'WARNING:.*architecture', line):\n return\n assert False, \"Crossenv did not detect wrong architecture\"",
"def arch(self):\n if self.method in ('buildArch', 'createdistrepo', 'livecd'):\n return self.params[2]\n if self.method in ('createrepo', 'runroot'):\n return self.params[1]\n if self.method == 'createImage':\n return self.params[3]\n if self.method == 'indirectionimage':\n return self.params[0]['arch']",
"def win_or_linux():\n\n if sys.platform =='win32':\n file = windows_arguments()\n return file\n\n if sys.platform =='linux2':\n file = linux_arguments()\n return file",
"def _GetSymbolBinaryDirectory(self, minidump, libraries):\n if minidump in self._minidump_symbol_binaries_directories:\n return self._minidump_symbol_binaries_directories[minidump]\n\n # Get the processor architecture reported by the minidump.\n arch = None\n matcher = re.compile(_PROCESSOR_ARCH_REGEX)\n for line in self._GetMinidumpDumpOutput(minidump).splitlines():\n match = matcher.match(line)\n if match:\n arch = match.groupdict()['arch'].lower()\n break\n if not arch:\n logging.error('Unable to find processor architecture for minidump %s',\n minidump)\n self._minidump_symbol_binaries_directories[minidump] = None\n return None\n if arch not in _BREAKPAD_ARCH_TO_FILE_REGEX:\n logging.error(\n 'Unsupported processor architecture %s for minidump %s. This is '\n 'likely fixable by adding the correct mapping for the architecture '\n 'in android_minidump_symbolizer._BREAKPAD_ARCH_TO_FILE_REGEX.',\n arch, minidump)\n self._minidump_symbol_binaries_directories[minidump] = None\n return None\n\n # Look for a directory that contains binaries with the correct architecture.\n matcher = re.compile(_BREAKPAD_ARCH_TO_FILE_REGEX[arch])\n symbol_dir = None\n for symbol_subdir in _POSSIBLE_SYMBOL_BINARY_DIRECTORIES:\n possible_symbol_dir = os.path.join(self._build_dir, symbol_subdir)\n if not os.path.exists(possible_symbol_dir):\n continue\n for f in os.listdir(possible_symbol_dir):\n if f not in libraries:\n continue\n binary_path = os.path.join(possible_symbol_dir, f)\n stdout = subprocess.check_output(\n ['file', binary_path], stderr=subprocess.STDOUT)\n if matcher.match(stdout):\n symbol_dir = possible_symbol_dir\n break\n\n if not symbol_dir:\n logging.error(\n 'Unable to find suitable symbol binary directory for architecture %s.'\n 'This is likely fixable by adding the correct directory to '\n 'android_minidump_symbolizer._POSSIBLE_SYMBOL_BINARY_DIRECTORIES.',\n arch)\n self._minidump_symbol_binaries_directories[minidump] = symbol_dir\n return symbol_dir",
"def is_64_windows():\n return 'PROGRAMFILES(X86)' in os.environ",
"def analyze_local_binary_get_target_addresses(target_binary, target_platform, target_architecture, target_type, target_port, target_prefix, target_offset):\n binaries = [target_binary]\n\n if target_platform == PLATFORM_WINDOWS:\n additional_binaries = prompt_base(\"are there any dlls associated with this binary? (separate with a space)\")\n binaries.extend([os.path.abspath(binary) for binary in additional_binaries.split(\" \")])\n\n log(\"locating targetable jump instructions\")\n\n all_targetable_jumps = []\n\n for binary in binaries:\n # todo: rewrite to be more graceful\n objdump = subprocess.Popen([\"objdump\", \"-D\", binary],stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n grepjmp = subprocess.Popen([\"grep\", \"jmp\"], stdin=objdump.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n grepesp = subprocess.Popen([\"grep\", \"esp\"], stdin=grepjmp.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n results = grepesp.stdout.readlines()\n\n start_address = get_binary_start_address(binary)\n binary_short_name = os.path.basename(binary)\n\n if results is not None:\n for line in results:\n instruction = line.decode().strip()\n all_targetable_jumps.append([instruction, binary_short_name, start_address])\n \n if len(all_targetable_jumps) > 1:\n target_instruction = prompt_table(\"select an instruction to target.\", all_targetable_jumps)\n elif len(all_targetable_jumps) == 1:\n target_instruction = all_targetable_jumps[0]\n else:\n log_error(\"no targetable addresses found\")\n\n\n target_instruction_address = target_instruction[0][:8]\n target_source_file = target_instruction[1]\n target_base_address = target_instruction[2][-8:]\n\n target_instruction_offset_distance = int(target_instruction_address, 16) - int(target_base_address, 16)\n\n log(f\"selected the instruction in {target_source_file} at 0x{target_instruction_address} (0x{target_base_address} + {target_instruction_offset_distance}\")\n \n return (target_source_file, target_base_address, target_instruction_address, target_instruction_offset_distance)",
"def osarch_is_64_bit():\n return osarch_match(\"64-bit\")",
"def get_device_arch(self):\n if not self.is_adb_available():\n return None\n\n return self._do_adb_command('shell getprop ro.product.cpu.abi')",
"def architectures(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"architectures\")",
"def CompiledBinary(\n makefile=None,\n compiler=\"gcc\",\n sources=None,\n binary_name=None,\n is_32_bit=True,\n executable_stack=True,\n no_stack_protector=True,\n aslr=False,\n compiler_flags=None,\n flag_file=None,\n static_flag=None,\n share_source=False,\n remote=False,\n):\n\n if compiler_flags is None:\n compiler_flags = []\n\n if is_32_bit and \"-m32\" not in compiler_flags:\n compiler_flags.append(\"-m32\")\n if executable_stack and \"-zexecstack\" not in compiler_flags:\n compiler_flags.append(\"-zexecstack\")\n if no_stack_protector and \"-fno-stack-protector\" not in compiler_flags:\n compiler_flags.append(\"-fno-stack-protector\")\n if no_stack_protector and \"-D_FORTIFY_SOURCE=0\" not in compiler_flags:\n compiler_flags.append(\"-D_FORTIFY_SOURCE=0\")\n\n if makefile is None and sources is None:\n assert False, \"You must provide either a makefile or a sources list\"\n\n if sources is None:\n assert (\n binary_name is not None\n ), \"You must provide the binary name if you use a makefile\"\n\n if flag_file is None:\n flag_file = \"flag.txt\"\n\n base_classes = [Compiled]\n if remote:\n base_classes.append(Remote)\n\n class Problem(*base_classes):\n files = copy([])\n\n remove_aslr = not aslr\n\n if share_source:\n files = copy([File(source) for source in sources])\n\n if binary_name is not None:\n program_name = binary_name\n else:\n program_name = os.path.splitext(sources[0])[0]\n\n def __init__(self):\n self.makefile = makefile\n self.compiler = compiler\n self.compiler_sources = sources\n self.compiler_flags = compiler_flags\n\n if not os.path.isfile(flag_file):\n with open(flag_file, \"w\") as f:\n f.write(\"{{flag}}\\n\")\n\n if static_flag is not None:\n self.generate_flag = lambda random: static_flag\n\n self.files.append(ProtectedFile(flag_file))\n\n return Problem"
]
| [
"0.79984057",
"0.605365",
"0.598074",
"0.5809001",
"0.5670192",
"0.5661787",
"0.5592954",
"0.5417459",
"0.5406665",
"0.53129387",
"0.5306204",
"0.5304704",
"0.5247234",
"0.5214229",
"0.51679796",
"0.51365",
"0.51007396",
"0.50877565",
"0.5084175",
"0.5070319",
"0.50638765",
"0.50329584",
"0.50159186",
"0.5003878",
"0.49871045",
"0.49433064",
"0.4919749",
"0.48911384",
"0.48866844",
"0.4882991"
]
| 0.8240533 | 0 |
Fairly portable uname interface. Returns a tuple of strings (system, node, release, version, machine, processor) identifying the underlying platform. Note that unlike the os.uname function this also returns possible processor information as an additional tuple entry. Entries which cannot be determined are set to ''. | def uname():
global _uname_cache
if _uname_cache is not None:
return _uname_cache
# Get some infos from the builtin os.uname API...
try:
system, node, release, version, machine = infos = os.uname()
except AttributeError:
system = sys.platform
node = _node()
release = version = machine = ''
infos = ()
if not any(infos):
# uname is not available
# Try win32_ver() on win32 platforms
if system == 'win32':
release, version, csd, ptype = win32_ver()
machine = machine or _get_machine_win32()
# Try the 'ver' system command available on some
# platforms
if not (release and version):
system, release, version = _syscmd_ver(system)
# Normalize system to what win32_ver() normally returns
# (_syscmd_ver() tends to return the vendor name as well)
if system == 'Microsoft Windows':
system = 'Windows'
elif system == 'Microsoft' and release == 'Windows':
# Under Windows Vista and Windows Server 2008,
# Microsoft changed the output of the ver command. The
# release is no longer printed. This causes the
# system and release to be misidentified.
system = 'Windows'
if '6.0' == version[:3]:
release = 'Vista'
else:
release = ''
# In case we still don't know anything useful, we'll try to
# help ourselves
if system in ('win32', 'win16'):
if not version:
if system == 'win32':
version = '32bit'
else:
version = '16bit'
system = 'Windows'
elif system[:4] == 'java':
release, vendor, vminfo, osinfo = java_ver()
system = 'Java'
version = ', '.join(vminfo)
if not version:
version = vendor
# System specific extensions
if system == 'OpenVMS':
# OpenVMS seems to have release and version mixed up
if not release or release == '0':
release = version
version = ''
# normalize name
if system == 'Microsoft' and release == 'Windows':
system = 'Windows'
release = 'Vista'
vals = system, node, release, version, machine
# Replace 'unknown' values with the more portable ''
_uname_cache = uname_result(*map(_unknown_as_blank, vals))
return _uname_cache | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def uname():\n global _uname_cache\n no_os_uname = 0\n\n if _uname_cache is not None:\n return _uname_cache\n\n processor = ''\n\n # Get some infos from the builtin os.uname API...\n try:\n system, node, release, version, machine = os.uname()\n except AttributeError:\n no_os_uname = 1\n\n #If any unknowns still exist, replace them with ''s, which are more portable\n if system == 'unknown':\n system = ''\n if node == 'unknown':\n node = ''\n if release == 'unknown':\n release = ''\n if version == 'unknown':\n version = ''\n if machine == 'unknown':\n machine = ''\n if processor == 'unknown':\n processor = ''\n\n _uname_cache = uname_result(system, node, release, version,\n machine, processor)\n return _uname_cache",
"def kern():\n return platform.uname()",
"def platform_info(self):\n return platform.uname()._asdict()",
"def systemversionstr():\n return platform.uname().system",
"def get_platform():\n system_name = platform.system()\n if system_name == \"Linux\":\n # Previously we'd use either \"-gnu\" or \"-musl\" indicate which version\n # of libc we were built against. We now default to musl since it\n # reliably works on all platforms.\n return \"unknown-linux-musl\"\n elif system_name == \"Darwin\":\n return \"apple-darwin\"\n else:\n return \"unknown\"",
"def get_arch():\n with settings(hide('running', 'stdout')):\n arch = run('uname -m')\n return arch",
"def get_arch():\n arch = platform.machine()\n if arch == \"i686\":\n return \"i686\"\n elif arch == \"x86_64\":\n return \"x86_64\"\n elif arch == \"aarch64\":\n return \"aarch64\"\n else:\n return \"unknown\"",
"def get_os() -> str:\n system = platform.system().lower()\n\n if system == \"linux\":\n machine = os.uname().machine\n if machine.startswith(\"arm\") or machine.startswith(\"aarch\"):\n system = \"pi\"\n\n return system + \"_\" + platform.architecture()[0]",
"def get_current_kernel_arch():\r\n try:\r\n return os.popen('uname -m').read().rstrip()\r\n except:\r\n logging.info(\"Not Found\")\r\n return -1",
"def processor():\n return uname().processor",
"def processor():\n return uname().processor",
"def machine():\n return uname().machine",
"def machine():\n return uname().machine",
"def get_system_information(self):\n\t\tsys = platform.uname()\n\t\treturn {\n\t\t\t'hostname': sys.node,\n\t\t\t'operating_system': sys.system,\n\t\t\t'version': sys.version,\n\t\t\t'release': sys.release,\n\t\t\t'processor' : sys.processor,\n\t\t\t'processor_type': sys.machine,\n\t\t}",
"def get_os_name(x86=0):\r\n platform_in_short, on_win = sys.platform[:3], 0\r\n\r\n if platform_in_short == \"win\":\r\n on_win = 1\r\n os_name = \"nt\"\r\n elif platform_in_short == \"lin\":\r\n os_name = \"lin\"\r\n else:\r\n os_name = \"sol\"\r\n if not x86:\r\n os_name += \"64\"\r\n return on_win, os_name",
"def get_os_name(x86=0):\r\n platform_in_short, on_win = sys.platform[:3], 0\r\n\r\n if platform_in_short == \"win\":\r\n on_win = 1\r\n os_name = \"nt\"\r\n elif platform_in_short == \"lin\":\r\n os_name = \"lin\"\r\n else:\r\n os_name = \"sol\"\r\n if not x86:\r\n os_name += \"64\"\r\n return on_win, os_name",
"def osversion():\n return platform()",
"def identify_system() -> str:\n system = platform.system()\n if system not in [\"Linux\", \"Darwin\"]:\n raise ValueError(f\"Unsupported system {system}\")\n return system",
"def known_os_type():\n return 'Linux'",
"def _get_build_os_name():\n system = platform.system()\n if 'Darwin' in system or 'Macintosh' in system:\n return 'darwin-x86'\n\n # TODO: Add more values if needed.\n return 'linux-x86'",
"def system_info() -> str:\n return \"\\n\".join(\n [\n f\"Python version: {platform.python_version()}\",\n f\"Python implementation: {platform.python_implementation()}\",\n f\"Python compiler: {platform.python_compiler()}\",\n f\"PyTorch version: {torch.__version__}\",\n f\"System: {platform.system() or 'Unable to determine'}\",\n f\"System version: {platform.release() or 'Unable to determine'}\",\n f\"Processor: {platform.processor() or 'Unable to determine'}\",\n f\"Number of CPUs: {multiprocessing.cpu_count()}\",\n ]\n )",
"def php_uname(space, mode=\"a\"):\n t = os.uname()\n return space.newstr(' '.join([t[0], t[1], t[2], t[3], t[4]]))",
"def get_platform_architecture() -> None:\n global _PLATFORM, _ARCHITECTURE, _COMPRESSION\n\n x86_64 = {\"x86_64\", \"amd64\", \"AMD64\", \"64bit\"}\n i386 = {\"i386\", \"i486\", \"i586\", \"i686\", \"386\", \"x86\", \"32bit\"}\n\n system = platform.system()\n if system == \"Windows\":\n machine = platform.machine()\n else:\n machine = os.uname().machine\n\n if system == \"Linux\":\n _PLATFORM = \"linux\"\n if machine in x86_64:\n _ARCHITECTURE = \"64\"\n elif machine in i386:\n _ARCHITECTURE = \"32\"\n else:\n _ARCHITECTURE = \"other\"\n\n elif system in {\"OpenBSD\", \"NetBSD\", \"FreeBSD\"}:\n _PLATFORM = \"bsd\"\n _ARCHITECTURE = \"other\"\n if system == \"FreeBSD\":\n if machine in x86_64:\n if detect_freebsd_linux_compatibility(\"64\"):\n _PLATFORM = \"linux\"\n _ARCHITECTURE = \"64\"\n elif machine in i386:\n if detect_freebsd_linux_compatibility(\"32\"):\n _PLATFORM = \"linux\"\n _ARCHITECTURE = \"32\"\n\n elif system in {\"Haiku\", \"Hurd\"}:\n _PLATFORM = \"linux\"\n _ARCHITECTURE = \"other\"\n\n elif system == \"Darwin\":\n _PLATFORM = \"mac\"\n _ARCHITECTURE = \"os\"\n elif system == \"Windows\":\n _PLATFORM = \"win\"\n if machine in x86_64:\n _ARCHITECTURE = \"64\"\n elif machine in i386:\n _ARCHITECTURE = \"32\"\n if not all([_PLATFORM, _ARCHITECTURE]):\n raise PlatformError(f\"Failed to detect appropriate platform. {system} {machine}\")\n\n if _PLATFORM == \"win\":\n _COMPRESSION = \"zip\"\n else:\n _COMPRESSION = \"tar.gz\"",
"def _get_osname():\n osname = sys.platform.lower()\n if osname == \"linux2\":\n osname = \"linux\"\n return osname",
"def get_os_release():\n if platform.linux_distribution()[0]:\n return \" \".join(platform.linux_distribution())\n elif platform.mac_ver()[0]:\n return \"%s %s\" % (platform.mac_ver()[0], platform.mac_ver()[2])\n else:\n return \"Unknown\"",
"def getplatform():\n\n # Return the system platform\n return sys.platform",
"def grr_uname(line: Text) -> Text:\n args = grr_uname.parser.parse_args(shlex.split(line))\n return magics_impl.grr_uname_impl(args.machine, args.kernel_release)",
"def local_info():\n local('uname -a')",
"def usefulFunction():\n print(platform.uname()) # Yay it told me about my computer - no idea what it means but thats cool",
"def system():\n return uname().system"
]
| [
"0.8784979",
"0.7336015",
"0.73255605",
"0.7301558",
"0.7273519",
"0.72542316",
"0.7228414",
"0.7146747",
"0.7137755",
"0.70993286",
"0.70993286",
"0.70929116",
"0.70929116",
"0.70900655",
"0.70837486",
"0.70837486",
"0.70675766",
"0.7037415",
"0.7003389",
"0.69323856",
"0.6914318",
"0.68676376",
"0.68665826",
"0.6836765",
"0.67934734",
"0.6725068",
"0.6712171",
"0.6669607",
"0.6668311",
"0.66656375"
]
| 0.8507206 | 1 |
Returns the (true) processor name, e.g. 'amdk6' An empty string is returned if the value cannot be determined. Note that many platforms do not provide this information or simply return the same value as for machine(), e.g. NetBSD does this. | def processor():
return uname().processor | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_processor_name() -> bytes:\n return collective.get_processor_name().encode()",
"def _detect_name(self):\n\n if 'Model name' in self.cpu_info:\n return self.cpu_info['Model name']\n\n # CPUs C/S Nodes Sockets\n # D03 16 4 1 4 (likely to change in the future)\n # D05 64 32 4 2\n # Amber 46-92 46 1 1-2\n # Tx2 28~224 28 2 1-2\n elif int(self.cpu_info['CPU(s)']) == 16 and \\\n int(self.cpu_info['Socket(s)']) == 4:\n return \"D03\"\n\n elif int(self.cpu_info['CPU(s)']) == 64 and \\\n int(self.cpu_info['Socket(s)']) == 2 and \\\n int(self.cpu_info['NUMA node(s)']) == 4:\n return \"D05\"\n\n elif int(self.cpu_info['Core(s) per socket']) == 46 and \\\n int(self.cpu_info['NUMA node(s)']) == 1:\n return \"Amberwing\"\n\n elif int(self.cpu_info['Core(s) per socket']) == 28 and \\\n int(self.cpu_info['NUMA node(s)']) == 2:\n return \"ThunderX2\"",
"def machine():\n return uname().machine",
"def machine():\n return uname().machine",
"def name(cls):\n\n system = platform.system()\n\n # Apply system map\n if system in NAME_MAP:\n system = NAME_MAP[system]\n\n return system",
"def machine():\n mach = platform.machine()\n if mach.startswith('arm'):\n return 'arm'\n else:\n # Assume x86/x86_64 machine.\n return None",
"def _get_machine_name(self):\n self.machine = platform.uname().node\n return self.machine",
"def cpu(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cpu\")",
"def cpu(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cpu\")",
"def cpu(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cpu\")",
"def get_cpuinfo() -> str:\n\n # Read /proc/cpuinfo\n try:\n with open('/proc/cpuinfo', 'r') as f:\n return f.read()\n except IOError:\n print('Error: Could not read /proc/cpuinfo', file = sys.stderr)\n return ''",
"def default_processor_version(self) -> str:\n return pulumi.get(self, \"default_processor_version\")",
"def get_arch():\n arch = platform.machine()\n if arch == \"i686\":\n return \"i686\"\n elif arch == \"x86_64\":\n return \"x86_64\"\n elif arch == \"aarch64\":\n return \"aarch64\"\n else:\n return \"unknown\"",
"def uname():\n global _uname_cache\n no_os_uname = 0\n\n if _uname_cache is not None:\n return _uname_cache\n\n processor = ''\n\n # Get some infos from the builtin os.uname API...\n try:\n system, node, release, version, machine = os.uname()\n except AttributeError:\n no_os_uname = 1\n\n #If any unknowns still exist, replace them with ''s, which are more portable\n if system == 'unknown':\n system = ''\n if node == 'unknown':\n node = ''\n if release == 'unknown':\n release = ''\n if version == 'unknown':\n version = ''\n if machine == 'unknown':\n machine = ''\n if processor == 'unknown':\n processor = ''\n\n _uname_cache = uname_result(system, node, release, version,\n machine, processor)\n return _uname_cache",
"def kern():\n return platform.uname()",
"def get_arch():\n with settings(hide('running', 'stdout')):\n arch = run('uname -m')\n return arch",
"def get_platform():\n system_name = platform.system()\n if system_name == \"Linux\":\n # Previously we'd use either \"-gnu\" or \"-musl\" indicate which version\n # of libc we were built against. We now default to musl since it\n # reliably works on all platforms.\n return \"unknown-linux-musl\"\n elif system_name == \"Darwin\":\n return \"apple-darwin\"\n else:\n return \"unknown\"",
"def systemversionstr():\n return platform.uname().system",
"def machine_name(self) -> str:\n return pulumi.get(self, \"machine_name\")",
"def machine_name(self) -> str:\n return pulumi.get(self, \"machine_name\")",
"def platform_num(self) -> str:\n return pulumi.get(self, \"platform_num\")",
"def get_os() -> str:\n system = platform.system().lower()\n\n if system == \"linux\":\n machine = os.uname().machine\n if machine.startswith(\"arm\") or machine.startswith(\"aarch\"):\n system = \"pi\"\n\n return system + \"_\" + platform.architecture()[0]",
"def get_isolate_cpus(self):\n\n command = \"cat /proc/cpuinfo | grep processor | awk '{print $NF}'\"\n out = run_and_getout(command)\n str_out = out.decode(self.default_code).replace('\\n', ' ').strip()\n str_out = str(str_out)\n if str_out[0] == \"0\":\n return str_out[2:]\n else:\n return str_out",
"def GetPlatform(self):\n arch = \"None\"\n # check architecture name\n if \"CMTCONFIG\" in os.environ:\n arch = os.environ[\"CMTCONFIG\"]\n elif \"SCRAM_ARCH\" in os.environ:\n arch = os.environ[\"SCRAM_ARCH\"]\n return arch",
"def _get_build_os_name():\n system = platform.system()\n if 'Darwin' in system or 'Macintosh' in system:\n return 'darwin-x86'\n\n # TODO: Add more values if needed.\n return 'linux-x86'",
"def machine_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"machine_name\")",
"def machine_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"machine_name\")",
"def identify_system() -> str:\n system = platform.system()\n if system not in [\"Linux\", \"Darwin\"]:\n raise ValueError(f\"Unsupported system {system}\")\n return system",
"def cpu(self) -> Optional[Any]:\n return pulumi.get(self, \"cpu\")",
"def cpu(self) -> Optional[Any]:\n return pulumi.get(self, \"cpu\")"
]
| [
"0.7343616",
"0.7283618",
"0.6868559",
"0.6868559",
"0.6843412",
"0.6774932",
"0.67210054",
"0.6705206",
"0.6705206",
"0.6705206",
"0.668735",
"0.6650386",
"0.656623",
"0.6536417",
"0.6442935",
"0.64179313",
"0.6400207",
"0.6389308",
"0.63062775",
"0.63062775",
"0.62691104",
"0.6259101",
"0.62427133",
"0.6231939",
"0.622072",
"0.62111557",
"0.62111557",
"0.62093174",
"0.6182439",
"0.6182439"
]
| 0.7920648 | 0 |
Returns a parsed version of Python's sys.version as tuple (name, version, branch, revision, buildno, builddate, compiler) referring to the Python implementation name, version, branch, revision, build number, build date/time as string and the compiler identification string. Note that unlike the Python sys.version, the returned value for the Python version will always include the patchlevel (it defaults to '.0'). The function returns empty strings for tuple entries that cannot be determined. sys_version may be given to parse an alternative version string, e.g. if the version was read from a different Python interpreter. | def _sys_version(sys_version=None):
# Get the Python version
if sys_version is None:
sys_version = sys.version
# Try the cache first
result = _sys_version_cache.get(sys_version, None)
if result is not None:
return result
# Parse it
if 'IronPython' in sys_version:
# IronPython
name = 'IronPython'
if sys_version.startswith('IronPython'):
match = _ironpython_sys_version_parser.match(sys_version)
else:
match = _ironpython26_sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse IronPython sys.version: %s' %
repr(sys_version))
version, alt_version, compiler = match.groups()
buildno = ''
builddate = ''
elif sys.platform.startswith('java'):
# Jython
name = 'Jython'
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse Jython sys.version: %s' %
repr(sys_version))
version, buildno, builddate, buildtime, _ = match.groups()
if builddate is None:
builddate = ''
compiler = sys.platform
elif "PyPy" in sys_version:
# PyPy
name = "PyPy"
match = _pypy_sys_version_parser.match(sys_version)
if match is None:
raise ValueError("failed to parse PyPy sys.version: %s" %
repr(sys_version))
version, buildno, builddate, buildtime = match.groups()
compiler = ""
else:
# CPython
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse CPython sys.version: %s' %
repr(sys_version))
version, buildno, builddate, buildtime, compiler = \
match.groups()
# XXX: RUSTPYTHON support
if "rustc" in sys_version:
name = "RustPython"
else:
name = 'CPython'
if builddate is None:
builddate = ''
elif buildtime:
builddate = builddate + ' ' + buildtime
if hasattr(sys, '_git'):
_, branch, revision = sys._git
elif hasattr(sys, '_mercurial'):
_, branch, revision = sys._mercurial
else:
branch = ''
revision = ''
# Add the patchlevel version if missing
l = version.split('.')
if len(l) == 2:
l.append('0')
version = '.'.join(l)
# Build and cache the result
result = (name, version, branch, revision, buildno, builddate, compiler)
_sys_version_cache[sys_version] = result
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def python_version_tuple():\n return tuple(_sys_version()[1].split('.'))",
"def get_version():\n vers = [\"%(major)i.%(minor)i\" % __version_info__, ]\n\n if __version_info__['micro']:\n vers.append(\".%(micro)i\" % __version_info__)\n if __version_info__['releaselevel'] != 'final':\n vers.append('%(releaselevel)s' % __version_info__)\n return ''.join(vers)",
"def py_version_str() -> str:\n return str(sys.version_info.major) + '.' + str(sys.version_info.minor)",
"def version():\n return '%d.%d' % (sys.version_info[0], sys.version_info[1])",
"def pythonversionstr():\n return '{t[0]}.{t[1]}.{t[2]}'.format(t=platform.python_version_tuple())",
"def pythonVersionString(self):\n vstring = \"{0}.{1}.{2}\".format(sys.version_info.major, sys.version_info.minor, sys.version_info.micro)\n if sys.version_info.releaselevel != \"final\":\n vstring += \" ({})\".format( sys.version_info.releaselevel )\n if sys.version_info.serial != 0:\n vstring += \" (serial: {})\".format( sys.version_info.serial )\n return vstring",
"def get_python_version():\r\n return \"py%i.%i\" % (sys.version_info[0], sys.version_info[1])",
"def get_python_version():\n return sys.version.split(\" \")[0]",
"def pyversion(part=None):\n if part is None:\n return sys.version_info\n return sys.version_info[part]",
"def get_python_version() -> str:\n return \"{} {} on {}\".format(\n platform.python_implementation(),\n platform.python_version(),\n platform.system(),\n )",
"def _pyVersion(self):\n return sys.version",
"def getVersion():\n try:\n fh=open(version_py, 'r')\n version=fh.read().strip().split('=')[-1].replace(\"'\",'').lstrip()\n fh.close()\n except:\n return None\n\n return version",
"def python_version():\n return _sys_version()[1]",
"def python_build():\n return _sys_version()[4:6]",
"def version_info():\r\n return tuple(map(int, __version__.split('.')))",
"def _pyVersion(self): \n return sys.version",
"def python_version(self):\n return sys.version.replace(\"\\n\", \"\")",
"def get_gcc_ver(exe=\"gcc\"):\n cmd = [exe, '-v']\n major = -1\n minor = -1\n patch = -1\n raw = sub.check_output(cmd, stderr=sub.STDOUT).decode('ascii').lower().split('\\n')\n for line in raw:\n if line.startswith('gcc version'):\n tokens = line.split()\n # we obtain a version string such as \"5.4.0\"\n verstr = tokens[2].strip()\n vertup = verstr.split('.')\n major = int(vertup[0])\n minor = int(vertup[1])\n patch = int(vertup[2])\n ver = major, minor, patch\n return ver",
"def get_version():\n major=c_int_t(0)\n minor=c_int_t(0)\n patch=c_int_t(0)\n safe_call(backend.get().af_get_version(c_pointer(major), c_pointer(minor), c_pointer(patch)))\n return major.value,minor.value,patch.value",
"def python_revision():\n return _sys_version()[3]",
"def getversion():\r\n\r\n global VERSION\r\n\r\n if len(VERSION) == 3:\r\n return '{}.{}.{}'.format(VERSION[0], VERSION[1], VERSION[2])\r\n else:\r\n return '{}.{}.{}-{}'.format(VERSION[0], VERSION[1], VERSION[2], VERSION[3])",
"def systemversionstr():\n return platform.uname().system",
"def python_implementation():\n return _sys_version()[0]",
"def get_version():\n return '%d.%d.%d' % version_info",
"def pyzmq_version_info():\n import re\n parts = re.findall('[0-9]+', __version__)\n parts = [ int(p) for p in parts ]\n if 'dev' in __version__:\n parts.append(float('inf'))\n return tuple(parts)",
"def getLibVersion():\n return \"Software Development Library for Linux 1.999.1\"",
"def getVersionString():\n return str(version_gen.major) + \".\" + str(version_gen.minor) + \".\" + str(version_gen.compilation)",
"def getversion(): # 3\n res,resargs = _msk.Env.getversion()\n if res != 0:\n raise Error(rescode(res),\"\")\n _major_return_value,_minor_return_value,_build_return_value,_revision_return_value = resargs\n return _major_return_value,_minor_return_value,_build_return_value,_revision_return_value",
"def python_branch():\n\n return _sys_version()[2]",
"def get_version():\n from colubrid import __version__\n from sys import version\n return '%s - Python %s' % (__version__, version.split('\\n')[0].strip())"
]
| [
"0.69661736",
"0.6936045",
"0.66326684",
"0.6534963",
"0.652174",
"0.6477283",
"0.6472309",
"0.64619017",
"0.6445718",
"0.64044285",
"0.6347685",
"0.63387734",
"0.6316219",
"0.62552184",
"0.624645",
"0.61655927",
"0.6143049",
"0.6101995",
"0.6096731",
"0.6026529",
"0.59860563",
"0.5955456",
"0.59344494",
"0.5929632",
"0.59286374",
"0.59045875",
"0.58937925",
"0.5870913",
"0.5861705",
"0.5859013"
]
| 0.7935132 | 0 |
Returns a string identifying the Python implementation. | def python_implementation():
return _sys_version()[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_python_version() -> str:\n return \"{} {} on {}\".format(\n platform.python_implementation(),\n platform.python_version(),\n platform.system(),\n )",
"def pythonversionstr():\n return '{t[0]}.{t[1]}.{t[2]}'.format(t=platform.python_version_tuple())",
"def module_name(self):\n return \"py{0:s}\".format(self.library_name[3:])",
"def get_python_version():\r\n return \"py%i.%i\" % (sys.version_info[0], sys.version_info[1])",
"def py_version_str() -> str:\n return str(sys.version_info.major) + '.' + str(sys.version_info.minor)",
"def from_interpreter_version(cls, py_major: int, py_minor: int) -> str:",
"def pythonVersionString(self):\n vstring = \"{0}.{1}.{2}\".format(sys.version_info.major, sys.version_info.minor, sys.version_info.micro)\n if sys.version_info.releaselevel != \"final\":\n vstring += \" ({})\".format( sys.version_info.releaselevel )\n if sys.version_info.serial != 0:\n vstring += \" (serial: {})\".format( sys.version_info.serial )\n return vstring",
"def get_python_exe():\n\n py = str(sc.sticky[\"PythonExe\"])\n\n return py",
"def getLibVersion():\n return \"Software Development Library for Linux 1.999.1\"",
"def python_compiler():\n return _sys_version()[6]",
"def protocol(self) -> str:\n return __name__",
"def get_python_version():\n return sys.version.split(\" \")[0]",
"def get_lib_extension():\r\n if sys.platform == 'win32':\r\n return 'pyd'\r\n else:\r\n return 'so'",
"def systemversionstr():\n return platform.uname().system",
"def python_build():\n return _sys_version()[4:6]",
"def versionstring():\n return \"%i.%i.%i\" % __version__",
"def versionstring():\n return \"%i.%i.%i\" % __version__",
"def get_version():\n from colubrid import __version__\n from sys import version\n return '%s - Python %s' % (__version__, version.split('\\n')[0].strip())",
"def show_python_executable():\n print(\"PYTHON:\", sys.executable)",
"def get_platform():\n system_name = platform.system()\n if system_name == \"Linux\":\n # Previously we'd use either \"-gnu\" or \"-musl\" indicate which version\n # of libc we were built against. We now default to musl since it\n # reliably works on all platforms.\n return \"unknown-linux-musl\"\n elif system_name == \"Darwin\":\n return \"apple-darwin\"\n else:\n return \"unknown\"",
"def get_implementation(self):\n return self.__capabilities[\"IMPLEMENTATION\"]",
"def getVersionString():\n return str(version_gen.major) + \".\" + str(version_gen.minor) + \".\" + str(version_gen.compilation)",
"def get_python():\n if sys.platform == 'win32':\n python = path.join(VE_ROOT, 'Scripts', 'python.exe')\n else:\n python = path.join(VE_ROOT, 'bin', 'python')\n return python",
"def get_version() -> str:\n return __version__",
"def python_version(self):\n return sys.version.replace(\"\\n\", \"\")",
"def getPythonCommand():\n\tthepython = \"exit 1 ; #\"\n\ttry:\n\t\timport sys\n\t\tif sys.__name__ is None:\n\t\t\traise ImportError(\"Failed to import system. WTF?!!\")\n\t\tthepython = checkPythonCommand([\"which\", \"coverage\"])\n\t\tif (str(\"/coverage\") in str(thepython)) and (sys.version_info >= (3, 3)):\n\t\t\tthepython = str(\"coverage run -p\")\n\t\telse:\n\t\t\tthepython = checkPythonCommand([\"which\", \"python3\"])\n\t\t\tif (str(\"/python3\") not in str(thepython)) or (sys.version_info <= (3, 2)):\n\t\t\t\tthepython = \"python3\"\n\texcept Exception:\n\t\tthepython = \"exit 1 ; #\"\n\t\ttry:\n\t\t\tthepython = checkPythonCommand([\"which\", \"python\"])\n\t\t\tif (str(\"/python\") in str(thepython)):\n\t\t\t\tthepython = \"python\"\n\t\texcept Exception:\n\t\t\tthepython = \"exit 1 ; #\"\n\treturn str(thepython)",
"def _pyVersion(self):\n return sys.version",
"def python_version():\n return _sys_version()[1]",
"def _pyVersion(self): \n return sys.version",
"def current_platform() -> str:\n if sys.platform.startswith('linux'):\n return 'linux'\n elif sys.platform.startswith('darwin'):\n return 'mac'\n elif (sys.platform.startswith('win') or\n sys.platform.startswith('msys') or\n sys.platform.startswith('cyg')):\n if sys.maxsize > 2 ** 31 - 1:\n return 'win64'\n return 'win32'\n else:\n print('Error: DO NOT SUPPORT OS', file=sys.stderr)\n sys.exit(1)"
]
| [
"0.7299703",
"0.7070567",
"0.6981937",
"0.680866",
"0.6634425",
"0.64393586",
"0.64343274",
"0.6358839",
"0.6286373",
"0.6282533",
"0.62478",
"0.61986905",
"0.6157437",
"0.6141968",
"0.60680294",
"0.60430735",
"0.60430735",
"0.5982926",
"0.59810865",
"0.5969606",
"0.596542",
"0.59465325",
"0.5942174",
"0.5911124",
"0.5859035",
"0.584272",
"0.5833092",
"0.57918143",
"0.5790727",
"0.57786965"
]
| 0.7831082 | 0 |
Returns the Python version as tuple (major, minor, patchlevel) of strings. Note that unlike the Python sys.version, the returned value will always include the patchlevel (it defaults to 0). | def python_version_tuple():
return tuple(_sys_version()[1].split('.')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_version():\n vers = [\"%(major)i.%(minor)i\" % __version_info__, ]\n\n if __version_info__['micro']:\n vers.append(\".%(micro)i\" % __version_info__)\n if __version_info__['releaselevel'] != 'final':\n vers.append('%(releaselevel)s' % __version_info__)\n return ''.join(vers)",
"def version_info():\r\n return tuple(map(int, __version__.split('.')))",
"def pythonversionstr():\n return '{t[0]}.{t[1]}.{t[2]}'.format(t=platform.python_version_tuple())",
"def py_version_str() -> str:\n return str(sys.version_info.major) + '.' + str(sys.version_info.minor)",
"def get_python_version():\r\n return \"py%i.%i\" % (sys.version_info[0], sys.version_info[1])",
"def get_version():\n major=c_int_t(0)\n minor=c_int_t(0)\n patch=c_int_t(0)\n safe_call(backend.get().af_get_version(c_pointer(major), c_pointer(minor), c_pointer(patch)))\n return major.value,minor.value,patch.value",
"def get_python_version():\n return sys.version.split(\" \")[0]",
"def python_version():\n return _sys_version()[1]",
"def to_interpreter_version(self) -> Tuple[int, int]:\n mo = cast(Match, re.match(self.PYTHON_RUNTIME_REGEX, self.value))\n return int(mo.group(\"major\")), int(mo.group(\"minor\"))",
"def pyversion(part=None):\n if part is None:\n return sys.version_info\n return sys.version_info[part]",
"def get_python_version() -> str:\n return \"{} {} on {}\".format(\n platform.python_implementation(),\n platform.python_version(),\n platform.system(),\n )",
"def version():\n return '%d.%d' % (sys.version_info[0], sys.version_info[1])",
"def pythonVersionString(self):\n vstring = \"{0}.{1}.{2}\".format(sys.version_info.major, sys.version_info.minor, sys.version_info.micro)\n if sys.version_info.releaselevel != \"final\":\n vstring += \" ({})\".format( sys.version_info.releaselevel )\n if sys.version_info.serial != 0:\n vstring += \" (serial: {})\".format( sys.version_info.serial )\n return vstring",
"def getversion(): # 3\n res,resargs = _msk.Env.getversion()\n if res != 0:\n raise Error(rescode(res),\"\")\n _major_return_value,_minor_return_value,_build_return_value,_revision_return_value = resargs\n return _major_return_value,_minor_return_value,_build_return_value,_revision_return_value",
"def get_python_version():\n return sys.version[:3]",
"def py_version(self):\n note_version = self.language_version or \"0\"\n if note_version == \"unknown\":\n note_version = \".\".join(map(str, sys.version_info[:3]))\n return version_string_to_list(note_version)",
"def python_version(self):\n return sys.version.replace(\"\\n\", \"\")",
"def to_interpreter_version(self) -> Optional[Tuple[int, int]]:\n if self.value is None:\n return None\n mo = cast(Match, re.match(self.PYTHON_RUNTIME_REGEX, self.value))\n return int(mo.group(\"major\")), int(mo.group(\"minor\"))",
"def _pyVersion(self):\n return sys.version",
"def python_revision():\n return _sys_version()[3]",
"def pyversion(ref=None):\n import platform\n ver = platform.python_version()\n if ref:\n return [\n int(x) for x in ver.split(\".\")[:2]\n ] >= [\n int(x) for x in ref.split(\".\")[:2]\n ]\n else: return ver",
"def pyzmq_version_info():\n import re\n parts = re.findall('[0-9]+', __version__)\n parts = [ int(p) for p in parts ]\n if 'dev' in __version__:\n parts.append(float('inf'))\n return tuple(parts)",
"def version(self):\n return \"%d.%d\" % (self._vmajor, self._vminor)",
"def _pyVersion(self): \n return sys.version",
"def get_major_version(version):\n return str(check_version(version)[0])",
"def geomdl_version():\n return tuple(__version__.split('.'))",
"def python_build():\n return _sys_version()[4:6]",
"def getVersion():\n try:\n fh=open(version_py, 'r')\n version=fh.read().strip().split('=')[-1].replace(\"'\",'').lstrip()\n fh.close()\n except:\n return None\n\n return version",
"def minor_version(self):\n return self.unpack_dword(0x18)",
"def get_major_dot_minor_version(version):\n return '.'.join([str(v) for v in version[:2]])"
]
| [
"0.72485656",
"0.7154039",
"0.70956457",
"0.69873315",
"0.6877759",
"0.6847924",
"0.6845456",
"0.6787605",
"0.6748689",
"0.6733849",
"0.6701764",
"0.6690922",
"0.6680496",
"0.6669175",
"0.6648375",
"0.662438",
"0.6623637",
"0.65420294",
"0.6529781",
"0.65242064",
"0.6508047",
"0.6453394",
"0.6436778",
"0.642955",
"0.6426766",
"0.64064544",
"0.6352454",
"0.6336116",
"0.62904763",
"0.6273142"
]
| 0.78322214 | 0 |
Returns a string identifying the Python implementation branch. For CPython this is the SCM branch from which the Python binary was built. If not available, an empty string is returned. | def python_branch():
return _sys_version()[2] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def scm_branch(self):\n return self._data.get('scm_branch')",
"def branch(self):\n return os.popen('git rev-parse --abbrev-ref HEAD').read().strip()",
"def get_branch(project_root: str) -> str:\n if os.path.isfile(os.path.join(os.path.abspath(project_root), os.pardir, os.pardir) + '/VERSION'):\n with open(os.path.join(os.path.abspath(project_root), os.pardir, os.pardir) + '/VERSION') as f:\n return f.read().replace('\\n', '')\n\n child = subprocess.Popen('cd {0} && git rev-parse --abbrev-ref HEAD'.format(project_root),\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.DEVNULL)\n exit_code = child.wait()\n branch = child.stdout.read().decode()\n if len(branch) != 0:\n branch = branch.replace('\\n', '')\n else:\n return 'unknown'\n if exit_code == 0 and branch != 'HEAD':\n return branch\n else:\n return 'unknown'",
"def GetBranch():\n m = BRANCH_REGEX.match(RCS_FILE)\n if m:\n return m.group(2)\n return DEFAULT_BRANCH",
"def get_branch():\n if os.getenv('GIT_BRANCH'):\n # Travis\n branch = os.getenv('GIT_BRANCH')\n elif os.getenv('BRANCH_NAME'):\n # Jenkins 2\n branch = os.getenv('BRANCH_NAME')\n else:\n branch = check_output(\n \"git rev-parse --abbrev-ref HEAD\".split(\" \")\n ).decode('utf-8').strip()\n\n return branch.replace(\"/\", \"_\")",
"def get_git_branch():\n branch = \"\"\n try:\n # git > 2.22 could do 'git branch --show-current'\n branch = check_output(\n ['git', 'rev-parse', '--abbrev-ref', 'HEAD'])\n\n # No git installed or project downloaded as a .zip\n except Exception:\n pass\n\n return branch.strip()",
"def branch(self) -> Optional[str]:\n return pulumi.get(self, \"branch\")",
"def GetCommonBranch():\n if GetPrefix().startswith(DEFAULT_DEPOT):\n return COMMON_BRANCH\n return DEFAULT_BRANCH",
"def current_branch():\n return subprocess.check_output('git branch --show-current'.split()).decode().strip()",
"def get_branch():\n command = [\"git\", \"branch\", \"--show-current\"]\n with subprocess.Popen(command, stdout=subprocess.PIPE) as proc:\n branch_str = proc.stdout.readline()\n return branch_str.decode(\"utf-8\").rstrip()",
"def identify_branch(self, gecko_branch):\n\n # Retrieve the name of the repository\n branch = re.search('.*/([\\S\\.]+$)', gecko_branch).group(1)\n\n # Supported branches: mozilla-aurora, mozilla-beta, mozilla-release, mozilla-esr*\n # All other branches (mozilla-central, mozilla-inbound, birch, elm, oak etc.) should fallback to the 'default' branch\n # This will work with Firefox and Thunderbird\n if not re.match(r'.*/releases/', gecko_branch):\n branch = \"default\"\n\n return branch",
"def branch(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"branch\")",
"def branch(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"branch\")",
"def branch(self):\n return self._changeset.get('branch', None)",
"def get_git_branch(path: str) -> Optional[str]:\n try:\n from git import Repo\n except ImportError as e:\n _logger.warning(\n \"Failed to import Git (the Git executable is probably not on your PATH),\"\n \" so Git SHA is not available. Error: %s\",\n e,\n )\n return None\n\n try:\n if os.path.isfile(path):\n path = os.path.dirname(path)\n repo = Repo(path, search_parent_directories=True)\n return repo.active_branch.name\n except Exception:\n return None",
"def git_branch():\n result, output = popen('git branch', False, False)\n branch = None\n for line in output:\n if line.startswith('*'):\n branch = line.split('*')[-1].strip()\n break\n return branch",
"def default_branch(self) -> str:\n return pulumi.get(self, \"default_branch\")",
"def svn_branch():\n return svn_url().split('/')[-1]",
"def production_branch(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"production_branch\")",
"def version(self) -> str:\n self.__verify_repo_initialized()\n res = vcompat.get_repository_software_version_spec(self._env.branchenv)\n return str(res)",
"def get_branch_and_root():\n repo = get_repo()\n root = get_project_root(repo)\n return repo.active_branch.name, root",
"def git_branch():\n git_branch = None\n branches = git(['branch']).split('\\n')\n pattern = re.compile(r'^\\*[ ]+(?P<branch>.*)$')\n for branch in branches:\n matches = pattern.match(branch)\n if matches:\n git_branch = matches.group('branch')\n return git_branch, '.', '.'",
"def gitstr():\n try:\n return \"%s\" % (open('.git/refs/heads/master').read().strip()[0:10])\n except FileNotFoundError:\n return \"\"\n except IndexError:\n return \"\"",
"def get_branch(self):\n if self._repository:\n return self._repository.dirstate.branch()",
"def get_current_branch_name(self):\n # type: () -> Optional[str]\n branch = self.get_current_branch()\n if branch:\n return branch.name\n return None",
"def getBranchName(directory):\n return subprocess.check_output([\"git\",\"rev-parse\",\"--abbrev-ref\",\"HEAD\"],cwd=directory).strip()",
"def python_implementation():\n return _sys_version()[0]",
"def _branch(self):\n printer = Printer(None)\n ci_manager = CIManager(printer)\n return ci_manager.get_branch()",
"def git_implementation(self) -> Optional[pulumi.Input[Union[str, 'GitImplementation']]]:\n return pulumi.get(self, \"git_implementation\")",
"def production_branch(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"production_branch\")"
]
| [
"0.7205967",
"0.7202124",
"0.7188247",
"0.71171105",
"0.7113601",
"0.70824313",
"0.6852207",
"0.66365796",
"0.66342664",
"0.64464325",
"0.6439272",
"0.6393707",
"0.6393707",
"0.6392835",
"0.63432646",
"0.6337903",
"0.6326181",
"0.6315394",
"0.6314967",
"0.6265553",
"0.6242711",
"0.62311774",
"0.6178093",
"0.6171352",
"0.6169644",
"0.61449134",
"0.61205494",
"0.61197287",
"0.6117952",
"0.60570353"
]
| 0.76201206 | 0 |
Returns a string identifying the Python implementation revision. For CPython this is the SCM revision from which the Python binary was built. If not available, an empty string is returned. | def python_revision():
return _sys_version()[3] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def scm_revision(self):\n return self._data.get('scm_revision')",
"def get_revision(self) -> str:\n try:\n return self.cmd.rev_parse(verify=True, args=\"HEAD\", check_returncode=True)\n except exc.CommandError:\n return \"initial\"",
"def git_version():\n def _minimal_ext_cmd(cmd):\n # construct minimal environment\n env = {}\n for k in ['SYSTEMROOT', 'PATH']:\n v = os.environ.get(k)\n if v is not None:\n env[k] = v\n # LANGUAGE is used on win32\n env['LANGUAGE'] = 'C'\n env['LANG'] = 'C'\n env['LC_ALL'] = 'C'\n out = subprocess.Popen(cmd, stdout = subprocess.PIPE, env=env).communicate()[0]\n return out\n\n try:\n out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])\n GIT_REVISION = out.strip().decode('ascii')\n except OSError:\n GIT_REVISION = \"Unknown\"\n return GIT_REVISION",
"def get_reversion():\n return to_str(backend.get().af_get_revision())",
"def get_version():\n return \"0.0.1 (prerelease prototype)\"",
"def getVersion():\n try:\n fh=open(version_py, 'r')\n version=fh.read().strip().split('=')[-1].replace(\"'\",'').lstrip()\n fh.close()\n except:\n return None\n\n return version",
"def get_revision(self) -> str:\n raise NotImplementedError",
"def version(self) -> str:\n self.__verify_repo_initialized()\n res = vcompat.get_repository_software_version_spec(self._env.branchenv)\n return str(res)",
"def get_version():\n vers = [\"%(major)i.%(minor)i\" % __version_info__, ]\n\n if __version_info__['micro']:\n vers.append(\".%(micro)i\" % __version_info__)\n if __version_info__['releaselevel'] != 'final':\n vers.append('%(releaselevel)s' % __version_info__)\n return ''.join(vers)",
"def get_python_version() -> str:\n return \"{} {} on {}\".format(\n platform.python_implementation(),\n platform.python_version(),\n platform.system(),\n )",
"def python_implementation():\n return _sys_version()[0]",
"def pythonversionstr():\n return '{t[0]}.{t[1]}.{t[2]}'.format(t=platform.python_version_tuple())",
"def get_version() -> str:\n return __version__",
"def get_version():\n init = read(\"src\", \"{{cookiecutter.module_name}}\", \"__init__.py\")\n return VERSION_RE.search(init).group(1)",
"def getVersionString():\n return str(version_gen.major) + \".\" + str(version_gen.minor) + \".\" + str(version_gen.compilation)",
"def get_version():\n\n with open('__init__.py') as f:\n for line in f.readlines():\n if '__version__' in line:\n apicem_version = line.strip().split(\"=\")[-1].strip(\" '\")\n if '__first_release_date__' in line:\n first_release_data_str = line.strip().split(\"=\")[-1].strip(\" '\")\n first_release_data = date(*[int(num) for num in first_release_data_str.split('.')])\n num_commits = get_cr_num(first_release_data)\n return '{apicem_version}.{num_commits}'.format(\n apicem_version=apicem_version, num_commits=num_commits)\n\n raise ValueError(\"could not read version\")",
"def python_build():\n return _sys_version()[4:6]",
"def get_version():\n init_py = open(os.path.join(PACKAGE_NAME, '__init__.py')).read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)",
"def pythonVersionString(self):\n vstring = \"{0}.{1}.{2}\".format(sys.version_info.major, sys.version_info.minor, sys.version_info.micro)\n if sys.version_info.releaselevel != \"final\":\n vstring += \" ({})\".format( sys.version_info.releaselevel )\n if sys.version_info.serial != 0:\n vstring += \" (serial: {})\".format( sys.version_info.serial )\n return vstring",
"def get_revision(self):\n vers = self.send(\"?R\", recv=True)\n # Verify its a valid version\n # ? why was this commented out\n float(vers)\n # But return as string to avoid precision issues\n return vers",
"def get_python_version():\r\n return \"py%i.%i\" % (sys.version_info[0], sys.version_info[1])",
"def get_version():\n with io.open(os.path.join(SCRIPT_DIR, 'oasislmf', '__init__.py'), encoding='utf-8') as init_py:\n return re.search('__version__ = [\\'\"]([^\\'\"]+)[\\'\"]', init_py.read()).group(1)",
"def vcs_revision(self):\n filename = os.path.join(self.requirement.source_directory, '.hg_archival.txt')\n if os.path.isfile(filename):\n with open(filename) as handle:\n for line in handle:\n name, _, value = line.partition(':')\n if name.strip() == 'node':\n return value.strip()",
"def py_version_str() -> str:\n return str(sys.version_info.major) + '.' + str(sys.version_info.minor)",
"def detect_version(self):\n\n version = self.scm_object.detect_version(self.args.__dict__).strip()\n logging.debug(\"VERSION(auto): %s\", version)\n return version",
"def get_version():\n init_py = open(path.join(here, 'silverstrike', '__init__.py')).read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)",
"def version():\n\n version = None\n output = gitopen(['--version'])\n m = re.search(br\" version ([\\d\\.A-Za-z]+)\", output)\n if m is not None:\n version = m.group(1).decode('utf-8')\n return version",
"def get_svn_version():\n return crds.__version__",
"def get_version():\n version_file = Path(__file__).resolve().parent / \"clinker\" / \"__init__.py\"\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file.read_text(), re.M\n )\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Failed to find version string\")",
"def get_version():\n from colubrid import __version__\n from sys import version\n return '%s - Python %s' % (__version__, version.split('\\n')[0].strip())"
]
| [
"0.70552576",
"0.68873155",
"0.68783283",
"0.6868426",
"0.6786601",
"0.6774839",
"0.67549443",
"0.6749165",
"0.6732668",
"0.67236096",
"0.6641747",
"0.66417205",
"0.6628212",
"0.6615963",
"0.6615776",
"0.66124874",
"0.65684795",
"0.6500381",
"0.64956903",
"0.64826936",
"0.64748824",
"0.6441919",
"0.643287",
"0.6409207",
"0.6400125",
"0.63915384",
"0.63720524",
"0.6368819",
"0.63686603",
"0.636254"
]
| 0.76246005 | 0 |
Returns a string identifying the compiler used for compiling Python. | def python_compiler():
return _sys_version()[6] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compiler(self, target):\n self._check_target(target)\n return target.compiler or self._default_compiler",
"def CC(self):\n return os.path.join(self._compiler_dir, 'gcc')",
"def dump_compiler(input_bytes):\n return dump_from_release(input_bytes, \"compiler\")",
"def is_cxx_compiler():\n\n wrapper_command = os.path.basename(sys.argv[0])\n return re.match(r'(.+)c\\+\\+(.*)', wrapper_command)",
"def get_compiler() -> EBNFCompiler:\n THREAD_LOCALS = access_thread_locals()\n try:\n compiler = THREAD_LOCALS.EBNF_00000001_compiler_singleton\n except AttributeError:\n THREAD_LOCALS.EBNF_00000001_compiler_singleton = EBNFCompiler()\n compiler = THREAD_LOCALS.EBNF_00000001_compiler_singleton\n return compiler",
"def get_compiler() -> XMLSnippetCompiler:\n THREAD_LOCALS = access_thread_locals()\n try:\n compiler = THREAD_LOCALS.XMLSnippet_00000001_compiler_singleton\n except AttributeError:\n THREAD_LOCALS.XMLSnippet_00000001_compiler_singleton = XMLSnippetCompiler()\n compiler = THREAD_LOCALS.XMLSnippet_00000001_compiler_singleton\n return compiler",
"def getCompilerError():",
"def CXX(self):\n return os.path.join(self._compiler_dir, 'g++')",
"def build(self, progname):\n self.run_programm(self.COMPILED[self.progtype][0], \"%s %s %s\" %\\\n (progname, self.COMPILED[self.progtype][1], COMPILED_FILENAME ))\n\n compiled_progname=COMPILED_FILENAME\n return compiled_progname",
"def get_compiler() -> XMLCompiler:\n THREAD_LOCALS = access_thread_locals()\n try:\n compiler = THREAD_LOCALS.XML_00000001_compiler_singleton\n except AttributeError:\n THREAD_LOCALS.XML_00000001_compiler_singleton = XMLCompiler()\n compiler = THREAD_LOCALS.XML_00000001_compiler_singleton\n return compiler",
"def gen_build_str_dec():\n\t#Get name of person building firmware\n\t#git config --get-all user.name\n\t#Get repo revision\n\t#git log | head -1 | cut -d \" \" -f 2\n\t#Get branch\n\t#git branch | grep \"\\*\" | cut -d \" \" -f 2\n\t#Get modified status\n\t#Date, time, gcc version (__VERSION__)\n\ts = \"Miniboard Firmware rev \"\n\treturn \"\"",
"def compile(self) -> str:\n compiled_command = (\n f\"{PUMP_ADDRESS[self.target_pump_num]}\"\n f\"{self.target_syringe}\"\n f\"{self.command}{self.command_value}\"\n )\n\n if self.parameter_value:\n compiled_command += f\"{self.optional_parameter}{self.parameter_value}\"\n\n return compiled_command + self.execution_command",
"def compile(self):\n return None # pragma: no cover",
"def getVersionString():\n return str(version_gen.major) + \".\" + str(version_gen.minor) + \".\" + str(version_gen.compilation)",
"def compile_code():\n pyc_cmd = \"python3 ../course/common/compiler.py \"\n\n with open('log.out', 'w+', encoding=\"utf-8\") as f:\n subprocess.call(shlex.split(pyc_cmd + './student/'), universal_newlines=True, stderr=f)\n f.seek(0)\n out_student = f.read()\n\n if out_student != \"\":\n rawhtml = rst.get_codeblock(\"\", out_student)\n feedback.set_global_result('failed')\n feedback.set_global_feedback(_(\"Your program does not compile: \\n \") + rawhtml + \"\\n\")\n sys.exit(0)",
"def _compile(self, filename, source):\n \n if source and source[-1] != '\\n':\n source = source + '\\n'\n code = __builtin__.compile(source, filename.cStr(), 'exec')\n\n # try to cache the compiled code\n pycFilename = Filename(filename)\n pycFilename.setExtension(pycExtension)\n try:\n f = open(pycFilename, 'wb')\n except IOError:\n pass\n else:\n f.write('\\0\\0\\0\\0')\n f.write(struct.pack('<I', self.timestamp))\n f.write(marshal.dumps(code))\n f.flush()\n f.seek(0, 0)\n f.write(imp.get_magic())\n f.close()\n\n return code",
"def _compile(self, source: str, filename: str) -> CodeType:\n return compile(source, filename, \"exec\") # type: ignore",
"def pythonversionstr():\n return '{t[0]}.{t[1]}.{t[2]}'.format(t=platform.python_version_tuple())",
"def get_python_version() -> str:\n return \"{} {} on {}\".format(\n platform.python_implementation(),\n platform.python_version(),\n platform.system(),\n )",
"def customize_compiler(compiler):\n if compiler.compiler_type == \"unix\":\n if sys.platform == \"darwin\":\n # Perform first-time customization of compiler-related\n # config vars on OS X now that we know we need a compiler.\n # This is primarily to support Pythons from binary\n # installers. The kind and paths to build tools on\n # the user system may vary significantly from the system\n # that Python itself was built on. Also the user OS\n # version and build tools may not support the same set\n # of CPU architectures for universal builds.\n global _config_vars\n # Use get_config_var() to ensure _config_vars is initialized.\n if not get_config_var('CUSTOMIZED_OSX_COMPILER'):\n import _osx_support\n _osx_support.customize_compiler(_config_vars)\n _config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True'\n\n (cc, cxx, opt, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \\\n get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS',\n 'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS')\n\n if 'CC' in os.environ:\n newcc = os.environ['CC']\n if (sys.platform == 'darwin'\n and 'LDSHARED' not in os.environ\n and ldshared.startswith(cc)):\n # On OS X, if CC is overridden, use that as the default\n # command for LDSHARED as well\n ldshared = newcc + ldshared[len(cc):]\n cc = newcc\n if 'CXX' in os.environ:\n cxx = os.environ['CXX']\n if 'LDSHARED' in os.environ:\n ldshared = os.environ['LDSHARED']\n if 'CPP' in os.environ:\n cpp = os.environ['CPP']\n else:\n cpp = cc + \" -E\" # not always\n if 'LDFLAGS' in os.environ:\n ldshared = ldshared + ' ' + os.environ['LDFLAGS']\n if 'CFLAGS' in os.environ:\n cflags = opt + ' ' + os.environ['CFLAGS']\n ldshared = ldshared + ' ' + os.environ['CFLAGS']\n if 'CPPFLAGS' in os.environ:\n cpp = cpp + ' ' + os.environ['CPPFLAGS']\n cflags = cflags + ' ' + os.environ['CPPFLAGS']\n ldshared = ldshared + ' ' + os.environ['CPPFLAGS']\n if 'AR' in os.environ:\n ar = os.environ['AR']\n if 'ARFLAGS' in os.environ:\n archiver = ar + ' ' + os.environ['ARFLAGS']\n else:\n archiver = ar + ' ' + ar_flags\n\n cc_cmd = cc + ' ' + cflags\n compiler.set_executables(\n preprocessor=cpp,\n compiler=cc_cmd,\n compiler_so=cc_cmd + ' ' + ccshared,\n compiler_cxx=cxx,\n linker_so=ldshared,\n linker_exe=cc,\n archiver=archiver)\n\n compiler.shared_lib_extension = shlib_suffix",
"def python_build():\n return _sys_version()[4:6]",
"def customize_compiler(compiler):\n if compiler.compiler_type == \"unix\":\n (cc, cxx, opt, basecflags, ccshared, ldshared, so_ext) = \\\n distutils.sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS', 'CCSHARED', 'LDSHARED', 'SO')\n\n if os.environ.has_key('CC'):\n cc = os.environ['CC']\n if os.environ.has_key('CXX'):\n cxx = os.environ['CXX']\n if os.environ.has_key('LDSHARED'):\n ldshared = os.environ['LDSHARED']\n if os.environ.has_key('CPP'):\n cpp = os.environ['CPP']\n else:\n cpp = cc + \" -E\" # not always\n if os.environ.has_key('LDFLAGS'):\n ldshared = ldshared + ' ' + os.environ['LDFLAGS']\n if basecflags:\n opt = basecflags + ' ' + opt\n if os.environ.has_key('CFLAGS'):\n opt = opt + ' ' + os.environ['CFLAGS']\n ldshared = ldshared + ' ' + os.environ['CFLAGS']\n if os.environ.has_key('CPPFLAGS'):\n cpp = cpp + ' ' + os.environ['CPPFLAGS']\n opt = opt + ' ' + os.environ['CPPFLAGS']\n ldshared = ldshared + ' ' + os.environ['CPPFLAGS']\n\n cc_cmd = cc + ' ' + opt\n compiler.set_executables(\n preprocessor=cpp,\n compiler=cc_cmd,\n compiler_so=cc_cmd + ' ' + ccshared,\n compiler_cxx=cxx,\n linker_so=ldshared,\n linker_exe=cc)\n\n compiler.shared_lib_extension = so_ext",
"def getCompiled(self):\n if self.isCompiled():\n return self.program\n else:\n raise Exception(\"el programa no ha sido compilado aun\")",
"def get_exe_language(code_file):\n extension = code_file.split('.')[-1]\n if extension == 'py':\n return 'python'\n elif extension == 'm':\n return 'matlab'\n elif extension == 'sh':\n return 'bash'\n elif extension == 'rb':\n return 'ruby'\n else:\n print(\"Warning: file %s don't have any known extension \\\n(.py/.m/.sh/.rb)\" % code_file)\n return None",
"def _make_source(name, init, body):\n code = \"\"\"\n #include <Python.h>\n\n %(body)s\n\n PyMODINIT_FUNC\n PyInit_%(name)s(void) {\n %(init)s\n }\n \"\"\" % dict(\n name=name, init=init, body=body,\n )\n return code",
"def get_code (self, name):\n containment = self.containments.get (name)\n if containment is None:\n raise ImportError ('No such module: \\'{}\\''.format (name))\n return compile (containment [0], containment [1], 'exec')",
"def get_jdk_in_path():\n return get_java_binary_version('javac')",
"def gcc_version(gcc):\n\tversion = \"\"\n\ttry:\n\t\tversion = os.popen(\"%s --version\" % gcc).readline().split()[-1]\n\texcept:\n\t\tpass\n\treturn version",
"def getPythonCommand():\n\tthepython = \"exit 1 ; #\"\n\ttry:\n\t\timport sys\n\t\tif sys.__name__ is None:\n\t\t\traise ImportError(\"Failed to import system. WTF?!!\")\n\t\tthepython = checkPythonCommand([\"which\", \"coverage\"])\n\t\tif (str(\"/coverage\") in str(thepython)) and (sys.version_info >= (3, 3)):\n\t\t\tthepython = str(\"coverage run -p\")\n\t\telse:\n\t\t\tthepython = checkPythonCommand([\"which\", \"python3\"])\n\t\t\tif (str(\"/python3\") not in str(thepython)) or (sys.version_info <= (3, 2)):\n\t\t\t\tthepython = \"python3\"\n\texcept Exception:\n\t\tthepython = \"exit 1 ; #\"\n\t\ttry:\n\t\t\tthepython = checkPythonCommand([\"which\", \"python\"])\n\t\t\tif (str(\"/python\") in str(thepython)):\n\t\t\t\tthepython = \"python\"\n\t\texcept Exception:\n\t\t\tthepython = \"exit 1 ; #\"\n\treturn str(thepython)",
"def pyo():\n local('python -O -m compileall .')"
]
| [
"0.7020128",
"0.66713786",
"0.6648044",
"0.63955796",
"0.63104576",
"0.61867356",
"0.6165967",
"0.61243194",
"0.6005208",
"0.59626865",
"0.59392613",
"0.5927113",
"0.5848496",
"0.58295643",
"0.58110297",
"0.5800708",
"0.57865334",
"0.5751744",
"0.57448256",
"0.57179636",
"0.56994355",
"0.5627376",
"0.56193006",
"0.56124026",
"0.5608583",
"0.5581416",
"0.5563011",
"0.55485946",
"0.5501748",
"0.54990625"
]
| 0.7652477 | 0 |
Function to find the source and target of floating edges, for now it only works for edges that support the relation between two other edges, it can be extended for other cases | def find_missing_source_target(property_restrictions, object_properties, sources_targets):
# Under this scope restrictions are all relations that indicate relationships
# between other object properties
for restriction in property_restrictions:
child = restriction["xml_object"]
geom_property = child[0]
for elem in geom_property:
if elem.attrib["as"] == "sourcePoint":
x_source, y_source = float(elem.attrib["x"]), float(elem.attrib["y"])
elif elem.attrib["as"] == "targetPoint":
x_target, y_target = float(elem.attrib["x"]), float(elem.attrib["y"])
# Iteration to look for other edges as possibles sources
for property in object_properties:
child2 = property["xml_object"]
#for child2 in root:
# We are considering the simple scenario in which the supporting or
# reference edges have source and target, however this is not always the situation
#source_child2 = child2.attrib["source"]
#target_child2 = child2.attrib["target"]
source_child2 = property["source"]
target_child2 = property["target"]
# Look for the source object and extract its geometry
for shape in sources_targets:
#for child3 in root:
child3 = shape["xml_object"]
if source_child2 == shape["id"]:
s_shape_x, s_shape_y = child3[0].attrib["x"], child3[0].attrib["y"]
s_shape_width, s_shape_height = child3[0].attrib["width"], child3[0].attrib["height"]
break
# Now compute the geometry of the initial source point of the edge
exitX = child2.attrib["style"].split("exitX=")[1].split(";")[0]
exitY = child2.attrib["style"].split("exitY=")[1].split(";")[0]
x_source_ref = float(s_shape_x) + float(s_shape_width) * float(exitX)
y_source_ref = float(s_shape_y) + float(s_shape_height) * float(exitY)
source_point_ref = [x_source_ref, y_source_ref]
# Look for the target object and extract its geometry
for shape in sources_targets:
#for child3 in root:
if target_child2 == shape["id"]:
t_shape_x, t_shape_y = child3[0].attrib["x"], child3[0].attrib["y"]
t_shape_width, t_shape_height = child3[0].attrib["width"], child3[0].attrib["height"]
break
# Now compute the geometry of the initial source point of the edge
entryX = child2.attrib["style"].split("entryX=")[1].split(";")[0]
entryY = child2.attrib["style"].split("entryY=")[1].split(";")[0]
x_target_ref = float(t_shape_x) + float(t_shape_width) * float(entryX)
y_target_ref = float(t_shape_y) + float(t_shape_height) * float(entryY)
target_point_ref = [x_target_ref, y_target_ref]
# We have to determine how many inflexion points it have, however sometimes it is
# not indicated explicitly and have to be derived from the associated shapes.
# Try to iter over the mxGeom elements, if they exist
elem = [i for i in child2[0] if i.attrib["as"] == "points"]
# if you found an element with the attribute "points", Eureka!
if len(elem) != 0:
#for elem in child2[0]:
#if elem.attrib["as"] == "points":
elem = elem[0]
points_ref = []
for mxPoint in elem:
point = [float(mxPoint.attrib["x"]), float(mxPoint.attrib["y"])]
points_ref.append(point)
points_ref.insert(0, source_point_ref)
points_ref.append(target_point_ref)
# At this point you already have all the points for a candidate line
# We are going to evaluate each segment of the candidate line
for index in range(len(points_ref) - 1):
point_A = points_ref[index]
point_B = points_ref[index + 1]
if point_A[0] > point_B[0]:
min_x, max_x = point_B[0], point_A[0]
else:
min_x, max_x = point_A[0], point_B[0]
if point_A[1] > point_B[1]:
min_y, max_y = point_B[1], point_A[1]
else:
min_y, max_y = point_A[1], point_B[1]
if restriction["source"] is None:
if x_source > min_x - 5 and x_source < max_x + 5:
x_within_limit = True
else:
x_within_limit = False
if y_source > min_y - 5 and y_source < max_y + 5:
y_within_limit = True
else:
y_within_limit = False
if x_within_limit and y_within_limit:
restriction["source"] = property["id"]
break
if restriction["target"] is None:
if x_target > min_x - 5 and x_target < max_x + 5:
x_within_limit = True
else:
x_within_limit = False
if y_target > min_y - 5 and y_target < max_y + 5:
y_within_limit = True
else:
y_within_limit = False
if x_within_limit and y_within_limit:
restriction["target"] = property["id"]
break
if restriction["source"] is not None and restriction["target"] is not None:
break
else:
# Sometimes we have straight lines and the process is simple to evaluate
if x_source_ref == x_target_ref or y_source_ref == y_target_ref:
point_A = [x_source_ref, y_source_ref]
point_B = [x_target_ref, y_target_ref]
if point_A[0] > point_B[0]:
min_x, max_x = point_B[0], point_A[0]
else:
min_x, max_x = point_A[0], point_B[0]
if point_A[1] > point_B[1]:
min_y, max_y = point_B[1], point_A[1]
else:
min_y, max_y = point_A[1], point_B[1]
if restriction["source"] is None:
if x_source > min_x - 5 and x_source < max_x + 5:
x_within_limit = True
else:
x_within_limit = False
if y_source > min_y - 5 and y_source < max_y + 5:
y_within_limit = True
else:
y_within_limit = False
if x_within_limit and y_within_limit:
restriction["source"] = property["id"]
break
if restriction["target"] is None:
if x_target > min_x - 5 and x_target < max_x + 5:
x_within_limit = True
else:
x_within_limit = False
if y_target > min_y - 5 and y_target < max_y + 5:
y_within_limit = True
else:
y_within_limit = False
if x_within_limit and y_within_limit:
restriction["target"] = property["id"]
break | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getEdge(source: LNode, target: LNode) -> LEdge:\n for edge in source.getConnectedEdges():\n # [TODO] or is suspicious\n if (edge.dstNode is target) or (edge.srcNode is target):\n return edge\n\n return None",
"def referenceEdge(u,v):\n v1 = u\n v2 = v\n\n e1 = u.getEdge().getPrev()\n e2 = v.getEdge().getPrev()\n\n aux = None #aux is an half-edge incident to u \n while aux != e1:\n if aux is None: aux = e1\n aux2 = None #aux2 is an half-edge incident to v\n\n while aux2 != e2:\n if aux2 is None: aux2 = e2\n if aux.getFace() == aux2.getFace(): return aux\n aux2 = aux2.getNext().getTwin() \n\n aux = aux.getNext().getTwin()\n\n return e1",
"def find_edges(starting_point, max_dist, hi, lo, bgArray):\n try:\n b = fetch_val(bgArray, starting_point)\n except IndexError:\n return None\n offsets = [(0,1), (1,0), (0,-1), (-1,0)]\n edgePoints = []\n for offset in offsets:\n first_result = find_edge(starting_point, offset, max_dist, hi, lo, bgArray)\n if first_result is not None:\n edgePoints.append(first_result[0])\n if b < lo or b > hi:\n # Try to find second point, since starting click was outside threshold\n second_result = find_edge(first_result[0], offset, max_dist - first_result[1], hi, lo, bgArray)\n if second_result is not None:\n edgePoints.append(second_result[0])\n return edgePoints",
"def adjacentTo()\r\n\r\n def fitsWith(self, other):\r\n # TODO: Can use list comprehesion??\r\n for i, selfEdge in enumerate(self.getEdges()):\r\n for j, otherEdge in other.getEdges():\r\n if selfEdge.fitsWith(otherEdge):\r\n return i, j\r\n return False # made it here, so no edge fits together\r",
"def find_flows(flows,\n source_query,\n target_query,\n flow_query=None,\n ignore_edges=None):\n if flow_query is not None:\n flows = flows[eval_selection(flows, '', flow_query)]\n\n if source_query is None and target_query is None:\n raise ValueError('source_query and target_query cannot both be None')\n\n elif source_query is None and target_query is not None:\n qt = eval_selection(flows, 'target', target_query)\n qs = (~eval_selection(flows, 'source', target_query) &\n ~flows.index.isin(ignore_edges or []))\n\n elif source_query is not None and target_query is None:\n qs = eval_selection(flows, 'source', source_query)\n qt = (~eval_selection(flows, 'target', source_query) &\n ~flows.index.isin(ignore_edges or []))\n\n else:\n qs = eval_selection(flows, 'source', source_query)\n qt = eval_selection(flows, 'target', target_query)\n\n f = flows[qs & qt]\n if source_query is None:\n internal_source = None\n else:\n internal_source = flows[qs & eval_selection(flows, 'target',\n source_query)]\n if target_query is None:\n internal_target = None\n else:\n internal_target = flows[qt & eval_selection(flows, 'source',\n target_query)]\n\n return f, internal_source, internal_target",
"def get_edge(self, from_, to):\n pass",
"def __edgeRouter(self):\r\n def getEndpoint(nodeTuple, pointList, direction, isReversedEdge):\r\n \"\"\" Gets the nearest arrow endpoint. Handles edge reversal \"\"\"\r\n if((direction == 'start' and not isReversedEdge)\r\n or (direction == 'end' and isReversedEdge)): \r\n endNode = nodeTuple[0]\r\n if(isReversedEdge):\r\n ix = -2\r\n iy = -1\r\n else:\r\n ix = 0\r\n iy = 1\r\n else: \r\n endNode = nodeTuple[1]\r\n if(isReversedEdge):\r\n ix = 0\r\n iy = 1\r\n else:\r\n ix = -2 \r\n iy = -1 \r\n \r\n # Is it connected to a named port!?!\r\n if(endNode.isConnectedByNamedPort(edgeObject)):\r\n handler = endNode.getConnectedByNamedPortHandler(nodeTuple[2]) \r\n return dc.coords(handler)[:2]\r\n \r\n # Not a named port...\r\n return list(endNode.getClosestConnector2Point( endNode, pointList[ix], \r\n pointList[iy])) \r\n \r\n \r\n \r\n #todo: improve method for spline arrows + add comments + optimize?\r\n print '----------------Dummy Edge Routing-----------------'\r\n for dummyEdge in NodeWrapper.ID2LayerEdgeDict.keys():\r\n \r\n dummyList = NodeWrapper.ID2LayerEdgeDict[dummyEdge]\r\n dummyNode = dummyList[0]\r\n dummyChild = dummyNode.children.keys()[0]\r\n linkFlagList = dummyNode.children[dummyChild]\r\n \r\n # Real nodes at start/end of the edge\r\n edgeSourceNode = dummyNode.parents.keys()[0]\r\n edgeSourceNode = edgeSourceNode.getASGNode().graphObject_\r\n dummyNode = dummyList[-1]\r\n edgeTargetNode = dummyNode.children.keys()[0]\r\n #print 'Dummy edge number', dummyEdge,\r\n #print dummyList[0].parents.keys()[0].getName(), edgeTargetNode.getName()\r\n edgeTargetNode = edgeTargetNode.getASGNode().graphObject_\r\n nodeTuple = [edgeSourceNode, edgeTargetNode, None]\r\n \r\n # Some edges are internally reversed to break cycles, when drawing\r\n # this must be taken into account\r\n isReversedEdge = False\r\n edgesToRoute = []\r\n for linkNode, isReversed in linkFlagList:\r\n edgesToRoute.append(linkNode)\r\n if(isReversed):\r\n isReversedEdge = True\r\n \r\n # Get all the points the edge must pass through (sorted by layer order)\r\n dummyList.sort(lambda a, b: cmp(a.getLayer(), b.getLayer()))\r\n if(isReversedEdge):\r\n dummyList.reverse()\r\n sortedDummyRouteList = []\r\n for node in dummyList:\r\n sortedDummyRouteList += node.getEdgePosition()\r\n \r\n # Set the coordinates of the edge directly \r\n # This is complicated by the fact that AToM3 treats edges as two\r\n # segments that join poorly (for spline arrows)\r\n for edgeObject in edgesToRoute: \r\n dc = edgeObject.graphObject_.dc\r\n linkObj = edgeObject.graphObject_ \r\n tag = linkObj.tag\r\n \r\n if(isReversedEdge):\r\n inPoint = dc.coords( tag + \"2ndSeg0\" )[:2]\r\n outPoint = dc.coords( tag + \"1stSeg0\" )[:2]\r\n else:\r\n inPoint = dc.coords( tag + \"1stSeg0\" )[:2]\r\n outPoint = dc.coords( tag + \"2ndSeg0\" )[:2]\r\n \r\n #print 'Dummy route', sortedDummyRouteList\r\n numPoints = len(sortedDummyRouteList) / 2\r\n # Add 2 extra control points for odd case (to make splines nice)\r\n if(numPoints % 2 == 1):\r\n if(numPoints == 1):\r\n center = sortedDummyRouteList\r\n else:\r\n start = sortedDummyRouteList[:numPoints - 1]\r\n end = sortedDummyRouteList[numPoints + 1:]\r\n center = sortedDummyRouteList[numPoints - 1:numPoints + 1]\r\n \r\n if(not isReversedEdge):\r\n newMid1 = [center[0], center[1] - 20]\r\n newMid2 = [center[0], center[1] + 20]\r\n else:\r\n newMid2 = [center[0], center[1] - 20]\r\n newMid1 = [center[0], center[1] + 20]\r\n \r\n \r\n if(numPoints == 1):\r\n sortedDummyRouteList = newMid1 + center + newMid2 \r\n else:\r\n sortedDummyRouteList = start + newMid1 + center + newMid2 + end\r\n centerIndex = numPoints - 1 + 2\r\n \r\n # Add 1 extra control point for even case (to make splines nice)\r\n else:\r\n start = sortedDummyRouteList[:numPoints]\r\n end = sortedDummyRouteList[numPoints:]\r\n center = [start[-2] + (end[0] - start[-2]) / 2, \r\n start[-1] + (end[1] - start[-1]) / 2]\r\n sortedDummyRouteList = start + center + end \r\n centerIndex = numPoints\r\n \r\n # Now I know where the center is... so lets move the center object\r\n # Is the edge object a hyperlink?\r\n if(len(edgeObject.in_connections_ + edgeObject.out_connections_) > 2):\r\n fromObjs = []\r\n for semObj in edgeObject.in_connections_:\r\n fromObjs.append(semObj.graphObject_)\r\n toObjs = []\r\n for semObj in edgeObject.out_connections_:\r\n toObjs.append(semObj.graphObject_)\r\n optimizerHyperLink(dc, linkObj, fromObjs, toObjs, 0, 0, 0, center )\r\n continue\r\n \r\n else:\r\n linkObj.moveTo(* center)\r\n \r\n # Go through the 2 segments in the link\r\n nodeTuple[2] = edgeObject\r\n for connTuple in linkObj.connections:\r\n itemHandler = connTuple[0]\r\n direction = connTuple[1]\r\n \r\n if( direction ): \r\n inPoint = getEndpoint(nodeTuple, sortedDummyRouteList,\r\n 'start', isReversedEdge)\r\n\r\n segCoords = inPoint + sortedDummyRouteList[:centerIndex+2]\r\n else: \r\n outPoint = getEndpoint(nodeTuple, sortedDummyRouteList,\r\n 'end', isReversedEdge) \r\n segCoords = sortedDummyRouteList[centerIndex:] + outPoint\r\n segCoords = self.__reverseCoordList(segCoords)\r\n \r\n # Applies the changed coords to the canvas\r\n dc.coords( * [itemHandler] + segCoords ) \r\n \r\n # This may change the associated link drawings: \r\n # move them to the new point \r\n if( direction ):\r\n linkObj.updateDrawingsTo(inPoint[0], inPoint[1], itemHandler, \r\n segmentNumber=1)\r\n else:\r\n linkObj.updateDrawingsTo(outPoint[0], outPoint[1], itemHandler, \r\n segmentNumber=2)",
"def edge_from_aif(\n obj: aif.Edge, nodes: t.Mapping[str, AbstractNode], config: Config\n) -> t.Optional[Edge]:\n source_id = obj.get(\"fromID\")\n target_id = obj.get(\"toID\")\n\n if source_id in nodes and target_id in nodes:\n return config.EdgeClass(\n id=obj[\"edgeID\"],\n source=nodes[source_id],\n target=nodes[target_id],\n )\n else:\n warn_missing_nodes(obj[\"edgeID\"], source_id, target_id)\n\n return None",
"def path(most_important_up, most_important_down, total_distance, to_source2, to_source1):\n\n if total_distance == min(total_distance, to_source2[0], to_source1[0]):\n return source_to_source(most_important_up, most_important_down), total_distance\n elif to_source2[0] == min(total_distance, to_source2[0], to_source1[0]):\n return most_important_to_source(to_source2[1]), to_source2[0]\n else:\n return most_important_to_source(to_source1[1], up=False), to_source1[0]",
"def __filterEdges(self):",
"def find_boundary(edges):\n\n inputs = set([x[0] for x in edges])\n outputs = set([x[1] for x in edges])\n for e in edges:\n inputs.discard(e[1])\n outputs.discard(e[0])\n return inputs, outputs",
"def Features(src, dest): \n # find common nodes of x and y\n common_out_x = out_edges.get(src,set())\n common_out_y = out_edges.get(dest, set())\n common_in_x = in_edges.get(src, set())\n common_in_y = in_edges.get(dest, set())\n\n\n intersection = (common_in_x|common_out_x) & \\\n (common_in_y|common_out_y)\n union = (common_in_x|common_out_x) | \\\n (common_in_y|common_out_y)\n\n # cosine similarity\n connected_x = common_out_x | common_in_x\n connected_y = common_out_y | common_in_y\n prod_xy = float(len(connected_x)*len(connected_y))\n cosine = len(intersection) / prod_xy if prod_xy != 0.0 else 0\n\n # jaccard\n jaccard = float(len(intersection)) / len(union) \\\n if len(union) > 0 else 0\n\n jaccard_mutate = Jaccard(dest, out_edges.get(src))\n\n #\n # compute the adamic/adar value of source node and\n # destination node\n adar_set = 0\n for z in intersection:\n degrees = 0\n # degrees of a common node\n if out_edges.get(z):\n degrees += len(out_edges[z]) \n if in_edges.get(z):\n degrees += len(in_edges[z]) \n\n if degrees != 0:\n adar_set += 1 / log(degrees)\n\n # preferential attachment\n pref_attach = len(connected_x) * len(connected_y)\n\n # kn1\n w_src_out = 1 / sqrt(1+len(common_out_x))\n w_dest_in = 1 / sqrt(1+len(common_in_y))\n w_kn1 = w_src_out * w_dest_in\n\n\n #return [len(common_out_x), len(common_in_y), jaccard, adar_set]\n return [src,\n dest,\n len(common_out_x),\n len(common_in_x),\n len(common_out_y),\n len(common_in_y), \n len(intersection),\n cosine,\n jaccard,\n jaccard_mutate,\n adar_set,\n pref_attach,\n w_kn1]",
"def edge_direction(a, b):\n if a[0] == b[0]:\n return -1, 1\n elif a[0] == b[1]:\n return -1, -1\n elif a[1] == b[0]:\n return 1, 1\n elif a[1] == b[1]:\n return 1, -1\n else:\n constants.log.debug('\\n'.join([\n 'edges not connected!',\n 'vertex path %s',\n 'entity path: %s',\n 'entity[a]: %s,',\n 'entity[b]: %s']),\n vertex_path,\n entity_path,\n entities[ea].points,\n entities[eb].points)\n\n return None, None",
"def edges(self):\n return self.dovetails + self.containments + self.internals",
"def _edge(u, v):\n return (u, v) if u < v else (v, u)",
"def get_edge(self, source: Node, target: Node) -> Optional[Edge]:\r\n return self.get_edge_by_index(source.index, target.index)",
"def _append_source_and_target(self, graph):\n graph.add_node( \"source\" )\n graph.add_node( \"target\" )\n \n for leave in (n for n,d in graph.out_degree_iter() if d==0):\n if leave is not \"source\" and leave is not \"target\":\n graph.add_edge( leave, \"target\" )\n \n for root in (n for n,d in graph.in_degree_iter() if d==0):\n if root is not \"source\" and root is not \"target\": \n graph.add_edge( \"source\", root )",
"def edge_ground(X):\n gradient_x = img_conv(X, kernel_sobel_x)\n gradient_y = img_conv(X, kernel_sobel_x.transpose())\n mag = (gradient_x ** 2.0 + gradient_y ** 2.0) ** 0.5\n is_edge = mag > 1.0\n return is_edge.astype('f')",
"def onImageEdge(self, tolerance=1):\n fs = FeatureSet()\n for f in self:\n if(f.onImageEdge(tolerance)):\n fs.append(f)\n return fs",
"def find_next(to_nodes, from_node):\n for to_node in to_nodes:\n interaction = (from_node, to_node)\n if interaction not in used_edges:\n return interaction\n return None",
"def test_graph_factory_with_ambiguous_edges(\n gdcmodels: FakeModels,\n gdcdictionary: models.FakeDictionary,\n src_id: str,\n dst_id: str,\n edge_label: str,\n circle_1_to_2: str,\n circle_2_to_1: str,\n) -> None:\n gf = GraphFactory(gdcmodels, gdcdictionary)\n\n nodes = [\n {\"label\": \"circle_1\", \"node_id\": UUID1},\n {\"label\": \"circle_2\", \"node_id\": UUID2},\n ]\n\n edges = [\n {\"src\": src_id, \"dst\": dst_id, \"label\": edge_label},\n ]\n\n created_nodes = gf.create_from_nodes_and_edges(nodes=nodes, edges=edges, unique_key=\"node_id\")\n\n assert len(created_nodes) == 2\n\n circle_1s = [n for n in created_nodes if n.label == \"circle_1\"]\n assert len(circle_1s) == 1\n circle_1 = circle_1s[0]\n circle_1_to_2_assic = getattr(circle_1, circle_1_to_2)\n assert len(circle_1_to_2_assic) == 1\n circle_2s = [n for n in created_nodes if n.label == \"circle_2\"]\n assert len(circle_2s) == 1\n circle_2 = circle_2s[0]\n circle_2_to_1_assoc = getattr(circle_2, circle_2_to_1)\n assert len(circle_2_to_1_assoc) == 1\n assert circle_1_to_2_assic[0] == circle_2\n assert circle_2_to_1_assoc[0] == circle_1\n\n assert len(circle_1.edges_out + circle_1.edges_in) == 1\n assert len(circle_2.edges_out + circle_2.edges_in) == 1",
"def dfs_edges_generator(graph, source, reverse=...):\n ...",
"def bfs(self, source, target):\n source.color = TriColor.WHITE\n target.color = TriColor.WHITE\n\n Q = deque()\n Q.append(source)\n\n while len(Q) > 0:\n v = Q.popleft()\n if v.color == TriColor.BLACK:\n # a previously finished vertex\n # used when graph vertices (e.g. `self.neighbors_of()` is calculated dynamically)\n continue\n else:\n v.color = TriColor.BLACK # mark finished\n if v == target:\n # re-assign `target` in case `Vertex.__eq__` has been overridden\n target = v\n break\n\n for w, _ in self.neighbors_of(v, color=TriColor.WHITE):\n w.color = TriColor.GRAY # mark discovered\n w.bfs_parent = v\n Q.append(w)\n\n S = [] # holds the shortest path, or empty if None\n u = target\n if u.color == TriColor.BLACK:\n while u is not None:\n S.append(u)\n u = u.bfs_parent\n\n if len(S) > 0:\n path = S[::-1]\n distance = len(path)\n else:\n path = None\n distance = None\n return path, distance",
"def fXYs(self) -> Dict[Tuple[int, ...], Optional[float]]:\n return {tuple(es.targets): es.fXY for es in self.edges_specs}",
"def edge_sig(T, source=[], target=[], type_='out'):\n case_cnt = sum([v[0] for v in T['start'].values()])\n S = dict()\n for a_i in source:\n S[a_i] = dict()\n target_ = T if type_ != 'out' else T[a_i]\n for a_j in target_:\n if (a_i == a_j) | (a_j not in target): continue\n if type_ != 'out':\n if a_i in T[a_j]: S[a_i][a_j] = T[a_j][a_i][1] / case_cnt\n else: S[a_i][a_j] = T[a_i][a_j][1] / case_cnt\n return S",
"def min_path(vs, es, source, target):\n dijkstra(vs, es, source, stop = target)\n test = target\n result = []\n while test != source:\n e = test._ss_edge\n result.append(e)\n test = e.v1 if e.v1 != test else e.v2\n assert test == source and test._ss_edge is None\n return result[::-1]",
"def bfs_edges_generator(graph, source, reverse=...):\n ...",
"def get_connection_directions(self, source: JuncNode, target: JuncNode) \\\n -> Tuple[JuncConnDirection, JuncConnDirection]:\n for node in source.all_nodes:\n for conn in node.get_connections():\n other_node = conn.other\n other_junc = self.get_junc_from_node(other_node)\n if other_junc == target:\n return source.side_of_node(node), target.side_of_node(other_node)\n raise Exception(f\"these juncs are not connected.\\n{source}\\n{target}\")",
"def e(src, dst):\n edge = pydot.Edge(src, dst)\n graph.add_edge(edge)",
"def find_edge(point, offset, max_dist, hi, lo, bgArray):\n for i in range(1, max_dist):\n next = (point[0] + i * offset[0], point[1] + i * offset[1])\n if is_edge(next, hi, lo, bgArray):\n return (next, i)\n return None"
]
| [
"0.6024941",
"0.58484167",
"0.5752038",
"0.5737474",
"0.57083946",
"0.56946784",
"0.56158674",
"0.5598349",
"0.55899394",
"0.55752385",
"0.55166656",
"0.5508613",
"0.5493426",
"0.5487787",
"0.5427421",
"0.5421382",
"0.5415151",
"0.5399748",
"0.5381808",
"0.53680706",
"0.5365186",
"0.5363713",
"0.53420854",
"0.5327927",
"0.53208584",
"0.53146666",
"0.5310788",
"0.53054523",
"0.53053737",
"0.53008825"
]
| 0.6372637 | 0 |
This function keeps album table clean without any empty albums Empty albums are albums that has no picture associated with that album This is necessary when uploading SenseCam images which are uploaded in a temporary album at the beginning and a temporary album is created for this purposes | def remove_empty_albums(aid):
print "aid"
print aid
if aid is None:
return
con = mdb.connect('localhost', 'root', 'sensepass', 'sensecambrowser')
with con:
query = "SELECT count(*) from fileuploader_picture WHERE album_id=%s" % (aid)
cur = con.cursor()
cur.execute(query)
data = cur.fetchall()
# there is no picture in this album
print "len(data)"
print len(data)
if len(data) == 0:
query = "DELETE from fileuploader_album WHERE id=%s" % (aid)
print query
cur = con.cursor()
cur.execute(query) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clean_up(self, graph):\n # Delete albums associated with place\n if len(self.albums) != 0:\n for album in self.albums:\n album.clean_up()\n album.delete(graph)",
"def test_single_track_blank_album(self):\n self.add_mp3(set_album=True, album='')\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, False)\n self.assertIn('has no album tag', status)\n self.assertEqual(self.get_album_count(), 0)",
"def clear_images(self):\r\n\r\n # audio = self.MutagenType(self['filename'])\r\n self.audio.pop(\"metadata_block_picture\", None)\r\n self.audio.pop(\"coverart\", None)\r\n self.audio.pop(\"coverartmime\", None)\r\n self.audio.save()",
"def unlink(self):\n album_id = self.albums_map[self.artist][self.c_album][1]\n # clear entry in self.albums_map[artist]\n self.albums_map[self.artist].pop(self.c_album)\n # remove Albums recording only if no more references to the album exist\n still_present = False\n for item in self.albums_map[self.artist].values():\n if item[1] == album_id:\n still_present = True\n if not still_present:\n dmla.unlink_album(self.a_album)\n self.modified = True\n self.refresh_screen(self.artists_list.currentIndex(),\n self.albums_list.currentIndex(), modifyoff=False)",
"def appendAlbum(song):\n\tsql = []\n\tsql.append(\"INSERT INTO ALBUM ('name') VALUES ('\" \n\t+ '/'.join(song.album) + \"');\")\n\t\n\tsql.append(\"INSERT INTO songs_album ('songs_id', 'album_id')\"\n\t+ \" VALUES ((select id from songs where hash = '\" + str(song.hash) + \"'), \"\n\t+ \"(select id from album where name = '\" + '/'.join(song.album) + \"'));\")\n\tsql.append(\"INSERT INTO artist_album ('artist_id', 'album_id')\"\n\t+ \" VALUES ((select id from songs where hash = '\" + str(song.hash) + \"'), \"\n\t+ \"(select id from album where name = '\" + '/'.join(song.album) + \"'));\")\n\t\n\treturn sql",
"def test_single_track_no_album(self):\n self.add_mp3(set_album=True)\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, False)\n self.assertIn('has no album tag', status)\n self.assertEqual(self.get_album_count(), 0)",
"def test_get_all_need_transform_no_albums_matched(self):\n orig_album = Album(artist='Artist', album='Album',\n totaltracks=1, totalseconds=120, last_transform=1)\n pk = orig_album.insert(self.app.db, self.app.curs)\n self.assertEqual(self.get_album_count(), 1)\n\n self.assertEqual(Album.get_all_need_transform(self.app.curs, 1), [])",
"def test_get_all_need_transform_one_album(self):\n orig_album = Album(artist='Artist', album='Album',\n totaltracks=1, totalseconds=120)\n pk = orig_album.insert(self.app.db, self.app.curs)\n self.assertEqual(self.get_album_count(), 1)\n\n albums = Album.get_all_need_transform(self.app.curs, 1)\n self.assertEqual(len(albums), 1)\n self.assertEqual(albums[0].pk, pk)",
"def save_all(self):\n data = []\n for key, albums in self.albums_to_update.items():\n self.albums_to_save[key] += albums\n with wait_cursor(self._parent):\n for artist, albumdata in self.albums_to_save.items():\n if not albumdata:\n continue\n artistid = self.artist_map[artist]\n data = []\n for name, year, key, is_live, tracks in albumdata:\n if key == 'X':\n key = 0\n data.append((key, name, year, is_live, tracks))\n albums = dmla.update_albums_by_artist(artistid, data)\n albums_map_lookup = {build_album_name(x): x.id for x in albums}\n for c_name, value in self.albums_map[artist].items():\n a_name, id = value\n try:\n test = albums_map_lookup[a_name]\n except KeyError:\n continue\n if id != test:\n self.albums_map[artist][c_name] = (a_name, test)\n self.albums_to_save.clear()\n self.albums_to_update.clear()\n self._parent.albums_map = self.albums_map\n self._parent.albums_map.update({x: {} for x, y in self.albums_map.items()\n if not y})\n ## self.last_handled = None\n save_appdata([self._parent.artist_map, self._parent.albums_map])\n self.refresh_screen(self.artist_list.currentIndex())",
"def clear_unique_audio(self):\n self.top_unique_audio_entry.delete(0, END)\n self.top_unique_audio_box.delete(0, END)\n self.unique_audio_found = False\n self.missing_files_label.grid_remove()",
"def test_get_all_need_transform_no_albums(self):\n self.assertEqual(Album.get_all_need_transform(self.app.curs, 1), [])",
"def get_albums(self):\n self.artist = self.artists_list.currentText()\n self.c_albums = [x['album'] for x in dmlc.list_albums(self.artist)\n if [x['album'] in self.albums_map[self.artist]]]\n self.albums_list.clear()\n self.albums_list.addItems(self.c_albums)\n self.update_navigation_buttons()",
"def test_track_without_association(self):\n track = Track(artist='Artist', album='Album')\n pk = track.insert(self.app.db, self.app.curs,\n 'xmms',\n datetime.datetime.now())\n\n for line in self.app.associate_albums():\n pass\n\n row = self.get_track_by_id(pk)\n self.assertEqual(row['album_id'], 0)",
"def test_get_all_unassociated_single_track_with_album(self):\n track = Track(artist='Artist', album='Album', title='Title')\n track.insert(self.app.db, self.app.curs,\n 'xmms', datetime.datetime.now())\n self.assertEqual(self.get_track_count(), 1)\n tracks = Track.get_all_unassociated(self.app.curs)\n self.assertEqual(len(tracks), 1)\n self.assertEqual(tracks[0].artist, 'Artist')\n self.assertEqual(tracks[0].title, 'Title')\n self.assertEqual(tracks[0].album_id, 0)",
"def test_no_tracks(self):\n (added, status) = self.app.add_album([])\n self.assertEqual(added, False)\n self.assertIn('No files', status)\n self.assertEqual(self.get_album_count(), 0)",
"def clean():\n\n tracks = []\n removed_playlists = 0\n for playlist in PlaylistManager.find():\n\n if len(playlist.tracks) == 0:\n PlaylistManager.remove(playlist.id)\n removed_playlists += 1\n else:\n tracks += playlist.tracks\n\n tracks = list(set(tracks))\n removed_tracks = 0\n for track in TrackManager.find():\n if track.id not in tracks:\n TrackManager.remove(track.id)\n removed_tracks += 1\n\n click.secho(\"Cleanup removed:\", bold=True)\n click.secho(\n tabulate( # type: ignore\n [\n (magenta(\"Tracks:\"), removed_tracks),\n (magenta(\"Playlists:\"), removed_playlists),\n ],\n tablefmt=\"plain\",\n colalign=(\"right\", \"left\"),\n )\n )",
"def clean_up(self):\n cleaning_sqls = [\"delete from meter where id not in (select meter_id from hymn)\",\n \"delete from author where id not in (select author_id from hymn)\",\n \"delete from category where id not in (select category_id from hymn)\"]\n for sql in cleaning_sqls:\n self.cur.execute(sql)\n self.conn.commit()",
"def clean_old_data():\n logger.info('Cleaning standalone files on disk...')\n for absolute_path in glob.glob(MEDIA_URL + '*'):\n file_name = os.path.basename(absolute_path)\n try:\n relative_path = os.path.join(AUDIOS_URL, file_name)\n audio = Audio.objects.get(filename=relative_path)\n if audio.get_type() == 'episode':\n try:\n # If there are inactive audios on its being\n for e in audio.podcast.episode_set.exclude(pk=audio.podcast.active_episode.pk):\n if not e.is_active():\n logger.info('Inactive audio found in podcast set. Erasing files.')\n e.delete_files()\n except Exception, e:\n logger.exception(e.message)\n except ObjectDoesNotExist, e:\n logger.info('A file with no audio registered in database')\n if os.path.isfile(relative_path):\n logger.info('Erasing: %s' % relative_path)\n os.remove(relative_path)\n logger.info('... Done.')",
"def reset(self):\n reset_query = \"DELETE * FROM %s\" % MediaCollection.COLLECTIONS_TABLE\n self.cursor.execute(reset_query)",
"def clear_thumbnails(self):",
"def clear_images(self):\r\n\r\n with translate_errors():\r\n self.audio.clear_pictures()\r\n self.audio.save()\r\n\r\n super().clear_images()",
"def tracked_albums():\n print('Your Google Photos Albums ([X] = tracked):')\n albums = get_albums(service)\n for i, a in enumerate(albums):\n check = 'X' if a.id in library.get_album_ids() else ' '\n print('[{}] {}. {}'.format(check, i+1, a.title))\n return albums",
"def song_clear():\r\n try:\r\n # Drop all tables then recreate them.\r\n Base.metadata.drop_all(bind=engine)\r\n print colored.red(\"Database cleared successfully.\", bold=12)\r\n Base.metadata.create_all(bind=engine)\r\n except:\r\n session.rollback()",
"def clear_all(self):\n self._item_picture = None\n self._bar_picutures.clear()\n self._bar_result.clear()\n self.update()",
"def clear_table(self):\n\n db = self.connection(database=\"imdb\")\n\n try:\n cur = db.cursor()\n sql = \"TRUNCATE TABLE film;\"\n cur.execute(sql)\n db.commit()\n except:\n return\n\n db.close()",
"def test_delete_album_all_fails(self):\n\n web.app.config['READONLY'] = False\n\n # Delete all albums\n response = self.client.delete('/album/')\n self.assertEqual(response.status_code, 405)\n\n # Note: if this fails, all albums have gone and rest of\n # tests will fail!",
"def clean(self):\n cursor = self.cnx.cursor()\n cursor.execute(\"DROP TABLE IF EXISTS FileNameFilter_scores\")\n cursor.execute(\"DROP TABLE IF EXISTS FileNameFilter_unique_name\")\n self.cnx.commit()\n cursor.close()",
"def upload_album(self, hash, girl=True):\n images = self._imgur.album_images(hash)['data']\n for image in images:\n doc = {\n '_id': image['id'],\n 'rating': 0.0,\n 'count': 0,\n 'link': image['link']\n }\n\n try:\n if girl:\n self._db.girls.insert_one(doc)\n else:\n self._db.boys.insert_one(doc)\n except pymongo.errors.DuplicateKeyError:\n continue",
"def test_shared_albums(self):\n with ts.SetupDbAndCredentials() as s:\n args = [\"--skip-files\"]\n s.test_setup(\n \"test_shared_albums\", args=args, trash_files=True, trash_db=True\n )\n s.gp.start(s.parsed_args)\n\n t = (\n TestAccount.album_image_count\n + TestAccount.album_shared_image_count\n + TestAccount.shared_album_image_count\n + TestAccount.shared_album_shared_image_count\n )\n\n with LocalData(s.root) as db:\n db.cur.execute(\"SELECT COUNT() FROM AlbumFiles\")\n count = db.cur.fetchone()\n self.assertEqual(\n t,\n count[0],\n \"expected {} files in all albums including shared\".format(t),\n )\n\n with ts.SetupDbAndCredentials() as s:\n args = [\"--skip-files\", \"--skip-shared-albums\"]\n s.test_setup(\n \"test_shared_albums\", args=args, trash_files=True, trash_db=True\n )\n s.gp.start(s.parsed_args)\n\n # note that unless we use --no-album-index the shared files in the\n # visible album will show up here\n t = (\n TestAccount.album_image_count + TestAccount.album_shared_image_count\n ) # see above\n with LocalData(s.root) as db:\n db.cur.execute(\"SELECT COUNT() FROM AlbumFiles\")\n count = db.cur.fetchone()\n self.assertEqual(\n t,\n count[0],\n \"expected {} files in all albums excluding shared\".format(t),\n )",
"def cleanTable(self):\n self.currentGroup = None"
]
| [
"0.6798063",
"0.58578044",
"0.5783619",
"0.5745405",
"0.5689042",
"0.56451136",
"0.55336326",
"0.547788",
"0.5440784",
"0.5437586",
"0.54365647",
"0.538309",
"0.53422564",
"0.53341514",
"0.5314252",
"0.53092307",
"0.5274518",
"0.52541596",
"0.5244834",
"0.5214889",
"0.51779515",
"0.51679724",
"0.516582",
"0.5160183",
"0.5149715",
"0.51378566",
"0.50867915",
"0.5067683",
"0.5044793",
"0.5041042"
]
| 0.7484433 | 0 |
This function retrieves sensor type id according to abbreviation | def get_sensor_type_id(abbreviation):
if abbreviation is None:
return
con = mdb.connect('localhost', 'root', 'sensepass', 'sensecambrowser')
with con:
query = "SELECT id from fileuploader_sensortype WHERE abbreviation=%s" % (abbreviation)
cur = con.cursor()
cur.execute(query)
data = cur.fetchall()
print "len(data)"
print data
if len(data) > 0:
return data[0]
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_sensor_type_id(sensor_type_name):\n query = db.session.query(\n TypeClass.id,\n ).filter(TypeClass.sensor_type == sensor_type_name)\n sensor_id = db.session.execute(query).fetchone()\n if isinstance(sensor_id, Iterable):\n sensor_id = sensor_id[0]\n return sensor_id",
"def get_id(type_: Dict[str, str]) -> int:\n return int(type_[f'{type_name}_id'])",
"def get_sensor_type_name(sensor_type_id):\n query = db.session.query(\n TypeClass.sensor_type,\n ).filter(TypeClass.id == sensor_type_id)\n sensor_name = db.session.execute(query).fetchone()\n if isinstance(sensor_name, Iterable):\n sensor_name = sensor_name[0]\n return sensor_name",
"def _get_id(self, zone=0, sensor_type=None):\n if str(zone) == \"0\":\n return self._name + HUB_SENSOR_NAME\n else:\n if sensor_type:\n return self._name + \"_\" + str(zone) + \"_\" + sensor_type\n else:\n _LOGGER.error(\n \"Hub: Get ID: Not allowed to create an entity_id without type, unless zone == 0.\"\n )",
"def getUnitType(self, id):\n self.send(\"#5\" + str(id) + \" SERECHO 1 \\r\")\n if self.readResponse() == \"1\":\n return \"XAP800\"\n self.send(\"#7\" + str(id) + \" SERECHO 1 \\r\")\n if self.readResponse() == \"1\":\n return \"XAP400\"\n self.send(\"#4\" + str(id) + \" SERECHO 1 \\r\")\n if self.readResponse() == \"1\":\n return \"PSR1212\"\n self.send(\"#6\" + str(id) + \" SERECHO 1 \\r\")\n if self.readResponse() == \"1\":\n return \"XAPTH2\"\n return \"No Device Found\"",
"def get_sensor_type(self):\n return self.data[1][:-1]",
"def get_type_id(type_url):\n # TODO\n return type_url",
"def get_default_sensor_type():\n return get_sensor_type_id(DEFAULT_SENSOR_TYPE)",
"def street_type():\r\n\r\n cursor.execute('SELECT * FROM street_types \\\r\n order by RANDOM() limit 1;')\r\n return cursor.fetchone()[0]",
"def getTypeID(self) -> int:\n ...",
"def get_device_type(device_type_id):\n netAdminToolDB = app.config['DATABASE']\n device_type = netAdminToolDB.get_device_type(device_type_id)\n\n if device_type == None:\n return jsonify({'error': 'device_type_id not found'}), 404\n\n return jsonify({'device_type': {\n 'id': device_type.id,\n 'make': device_type.make,\n 'model': device_type.model,\n 'code': device_type.code\n }\n\n\n })",
"def get_alarm_type_name(self, a_types, alarm_type_id):\r\n\r\n for a_type in a_types['rows']:\r\n\r\n if a_type['alarm_type_id'] == alarm_type_id:\r\n\r\n return a_type['alarm_type']\r\n\r\n return 0",
"def sensor_id(raft, ccd):\n return 'R%s%s_S%s%s' % (raft[2], raft[4], ccd[2], ccd[4])",
"def wrap_asdu_type(self, asdu_type):\n if not type(asdu_type) is str:\n return \"ERROR: The ASDU type has to be a string.\"\n if asdu_type == 'M_BO_NA_1':\n type_id = M_BO_NA_1\n elif asdu_type == 'M_ME_NC_1':\n type_id = M_ME_NC_1\n elif asdu_type == 'C_SC_NA_1':\n type_id = C_SC_NA_1\n elif asdu_type == 'C_IC_NA_1':\n type_id = C_IC_NA_1\n elif asdu_type == 'C_RD_NA_1':\n type_id = C_RD_NA_1\n else:\n return \"ERROR: The ASDU type was not recognized.\"\n return type_id",
"def _type(self):\n return self._id[1]",
"def _device_type_returner(self, symbol):\n if self.names.get_name_string(symbol.id) == \"AND\":\n return self.devices.AND\n if self.names.get_name_string(symbol.id) == \"OR\":\n return self.devices.OR\n if self.names.get_name_string(symbol.id) == \"NAND\":\n return self.devices.NAND\n if self.names.get_name_string(symbol.id) == \"NOR\":\n return self.devices.NOR\n if self.names.get_name_string(symbol.id) == \"XOR\":\n return self.devices.XOR\n if self.names.get_name_string(symbol.id) == \"CLOCK\":\n return self.devices.CLOCK\n if self.names.get_name_string(symbol.id) == \"SWITCH\":\n return self.devices.SWITCH\n if self.names.get_name_string(symbol.id) == \"DTYPE\":\n return self.devices.D_TYPE\n if self.names.get_name_string(symbol.id) == \"SIGGEN\":\n return self.devices.SIGGEN",
"def _ecc_id(self, ecc_type_name):\n return self.ecc_lookup[ecc_type_name]",
"def drive_type():",
"def sensor_types():\n sensors = FetchandStore.get_data(\"https://tie.digitraffic.fi/api/v1/data/weather-data\")\n sensornames = [sensor[\"name\"] for sensor in sensors ]\n sensornames = list(set(sensornames))\n for index, sensorname in enumerate(sorted(sensornames)):\n print(index, sensorname)",
"def typeToName(type: int) -> unicode:\n ...",
"def guess_part_type(self, data):\n if 'administrativEnhet' in data or 'saksbehandler' in data:\n typename = 'intern'\n elif 'kontaktperson' in data \\\n or -1 != data['navn'].find(' AS'):\n typename = 'enhet'\n else:\n typename = 'person'\n return typename",
"def get_type_id(self):\n\n raise Exception(\"Not implemented!\"+self.__class__)",
"def unique_id(self):\n return self.device_id + '_' + self._sensor_type",
"def get_device_type_by_name(name):\n\n type_id = None\n # try:\n for link in NetworkAdapter.LINKS:\n if link['name'] == name:\n type_id = link['id']\n break\n # except IndexError:\n # raise Exception('Parent category does not exist')\n if not type_id:\n raise Exception('Wrong type network adapter')\n\n return type_id",
"def __get_device_type_name(self, mps_db_session, device_type_id):\n device_type = mps_db_session.query(models.DeviceType).filter(models.DeviceType.id==device_type_id).all()\n\n if len(device_type) == 1:\n return device_type[0].name\n elif len(device_type) == 0:\n raise ValueError(\"Function \\\"__get_device_type_name(device_type_id={}). Not fault was found.\\\"\"\n .format(device_type_id))\n else:\n raise ValueError(\"Function \\\"__get_device_type_name(device_type_id={}). More than one device matches.\\\"\"\n .format(device_type_id))",
"def type_id(self):\n return self._device.type",
"def getId(self):\n return _libsbml.SpeciesType_getId(self)",
"def _get_sensor_type(self) -> list[str | None]:\n pres = self.gateway.const.Presentation\n set_req = self.gateway.const.SetReq\n\n _sensor_type = SENSORS.get(set_req(self.value_type).name, [None, None, None])\n if isinstance(_sensor_type, dict):\n sensor_type = _sensor_type.get(\n pres(self.child_type).name, [None, None, None]\n )\n else:\n sensor_type = _sensor_type\n return sensor_type",
"def _get_type_name(self, st_type):\n if st_type <= 2045: return 'str' + str(st_type)\n return self._type_names[st_type]",
"def get_type_s(self, type):\r\n\r\n return HTTP2_NAMES.get(type, None)"
]
| [
"0.69486004",
"0.68731916",
"0.66719717",
"0.6295486",
"0.6116762",
"0.6110051",
"0.6105108",
"0.6088619",
"0.6078984",
"0.6076802",
"0.6066289",
"0.6006523",
"0.5961502",
"0.588481",
"0.5851543",
"0.58258665",
"0.5825055",
"0.5802354",
"0.5762437",
"0.5751909",
"0.57310176",
"0.57282513",
"0.570619",
"0.5691793",
"0.5685748",
"0.56845325",
"0.5678976",
"0.56713533",
"0.5669413",
"0.56509686"
]
| 0.8284104 | 0 |
Plot a performance metric vs. forecast horizon from cross validation. Cross validation produces a collection of outofsample model predictions that can be compared to actual values, at a range of different horizons (distance from the cutoff). This computes a specified performance metric for each prediction, and aggregated over a rolling window with horizon. This uses fbprophet.diagnostics.performance_metrics to compute the metrics. Valid values of metric are 'mse', 'rmse', 'mae', 'mape', and 'coverage'. rolling_window is the proportion of data included in the rolling window of aggregation. The default value of 0.1 means 10% of data are included in the aggregation for computing the metric. As a concrete example, if metric='mse', then this plot will show the squared error for each cross validation prediction, along with the MSE averaged over rolling windows of 10% of the data. | def plot_cross_validation_metric(
df_cv, metric, rolling_window=0.1, ax=None, figsize=(10, 6)
):
if ax is None:
fig = plt.figure(facecolor='w', figsize=figsize)
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
# Get the metric at the level of individual predictions, and with the rolling window.
df_none = performance_metrics(df_cv, metrics=[metric], rolling_window=0)
df_h = performance_metrics(df_cv, metrics=[metric], rolling_window=rolling_window)
# Some work because matplotlib does not handle timedelta
# Target ~10 ticks.
tick_w = max(df_none['horizon'].astype('timedelta64[ns]')) / 10.
# Find the largest time resolution that has <1 unit per bin.
dts = ['D', 'h', 'm', 's', 'ms', 'us', 'ns']
dt_names = [
'days', 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds',
'nanoseconds'
]
dt_conversions = [
24 * 60 * 60 * 10 ** 9,
60 * 60 * 10 ** 9,
60 * 10 ** 9,
10 ** 9,
10 ** 6,
10 ** 3,
1.,
]
for i, dt in enumerate(dts):
if np.timedelta64(1, dt) < np.timedelta64(tick_w, 'ns'):
break
x_plt = df_none['horizon'].astype('timedelta64[ns]').astype(np.int64) / float(dt_conversions[i])
x_plt_h = df_h['horizon'].astype('timedelta64[ns]').astype(np.int64) / float(dt_conversions[i])
ax.plot(x_plt, df_none[metric], '.', alpha=0.5, c='gray')
ax.plot(x_plt_h, df_h[metric], '-', c='b')
ax.grid(True)
ax.set_xlabel('Horizon ({})'.format(dt_names[i]))
ax.set_ylabel(metric)
return fig | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_training_history(history, metric):\n \n val_metric = 'val_'+metric\n acc = history.history[metric]\n val_acc = history.history[val_metric]\n \n loss = history.history['loss']\n val_loss = history.history['val_loss']\n \n epochs_range = history.epoch\n \n plt.figure(figsize=(8, 8))\n plt.subplot(2, 1, 1)\n plt.plot(epochs_range, acc, label='Training Acc.')\n plt.plot(epochs_range, val_acc, label='Validation Acc.')\n plt.legend(loc='best',)\n plt.title('Training and Validation Accuracy')\n \n plt.subplot(2, 1, 2)\n plt.plot(epochs_range, loss, label='Training Loss')\n plt.plot(epochs_range, val_loss, label='Validation Loss')\n plt.legend(loc='best')\n plt.title('Training and Validation Loss')\n plt.show()",
"def plot_metrics(self, title=None, fig_size=None, col_count=3):\n metric_names = self.tracked_metrics()\n metric_vals = {metric_name: self.metrics_history[metric_name][\"epoch_vals\"] for metric_name in metric_names}\n if self.include_val_metrics:\n # also has validation metrics\n for metric_name in metric_names:\n metric_vals[f\"val_{metric_name}\"] = self.metrics_history[f\"val_{metric_name}\"][\"epoch_vals\"]\n\n # we will plot a max of 3 metrics per row\n MAX_COL_COUNT = col_count\n col_count = MAX_COL_COUNT if len(metric_names) > MAX_COL_COUNT else len(metric_names)\n row_count = len(metric_names) // MAX_COL_COUNT\n row_count += 1 if len(metric_names) % MAX_COL_COUNT != 0 else 0\n # we'll always have \"loss\" metric in list, so safest to pick!\n x_vals = np.arange(1, len(metric_vals[\"loss\"]) + 1)\n\n with sns.axes_style(\"darkgrid\"):\n sns.set_context(\"notebook\") # , font_scale = 1.2)\n sns.set_style(\n {\"font.sans-serif\": [\"Segoe UI\", \"Calibri\", \"SF Pro Display\", \"Arial\", \"DejaVu Sans\", \"Sans\"]}\n )\n fig_size = (16, 5) if fig_size is None else fig_size\n\n if len(metric_names) == 1:\n # only loss\n plt.figure(figsize=fig_size)\n plt.plot(x_vals, metric_vals[\"loss\"], lw=2, markersize=7, color=\"steelblue\", marker=\"o\")\n if self.include_val_metrics:\n plt.plot(x_vals, metric_vals[\"val_loss\"], lw=2, markersize=7, color=\"firebrick\", marker=\"o\")\n legend = [\"train\", \"valid\"] if self.include_val_metrics else [\"train\"]\n plt_title = (\n f\"Training & Cross-validation Loss vs Epochs\" if len(legend) == 2 else f\"Training Loss vs Epochs\"\n )\n plt.title(plt_title)\n plt.legend(legend, loc=\"best\")\n else:\n f, ax = plt.subplots(row_count, col_count, figsize=fig_size)\n for r in range(row_count):\n for c in range(col_count):\n index = r * (col_count - 1) + c\n if index < len(metric_names):\n metric_name = metric_names[index]\n if row_count == 1:\n ax[c].plot(x_vals, metric_vals[metric_name], lw=2, markersize=7, marker=\"o\")\n else:\n ax[r, c].plot(x_vals, metric_vals[metric_name], lw=2, markersize=7, marker=\"o\")\n if self.include_val_metrics:\n if row_count == 1:\n ax[c].plot(\n x_vals, metric_vals[f\"val_{metric_name}\"], lw=2, markersize=7, marker=\"o\"\n )\n else:\n ax[r, c].plot(\n x_vals, metric_vals[f\"val_{metric_name}\"], lw=2, markersize=7, marker=\"o\"\n )\n legend = [\"train\", \"valid\"] if self.include_val_metrics else [\"train\"]\n ax_title = (\n f\"Training & Cross-validation '{metric_name}' vs Epochs\"\n if len(legend) == 2\n else f\"Training '{metric_name}' vs Epochs\"\n )\n if row_count == 1:\n ax[c].legend(legend, loc=\"best\")\n ax[c].set_title(ax_title)\n else:\n ax[r, c].legend(legend, loc=\"best\")\n ax[r, c].set_title(ax_title)\n\n if title is not None:\n plt.suptitle(title)\n\n plt.show()",
"def plot_metrics_over_iterations(metrics, epoch):\n total_elbo, total_cond_log_like, total_kl = metrics\n legend = ['ELBO', 'log p(x | z)']\n\n for level in range(len(total_kl)):\n legend.append('KL Divergence, Level ' + str(level))\n\n nans = np.zeros((1, 2 + len(total_kl)))\n nans.fill(np.nan)\n indices = np.ones((1, 2 + len(total_kl)))\n\n handle = plot_line(nans, indices, legend=legend,\n title='Average Metrics During Inference Iterations, Epoch ' + str(epoch),\n xlabel='Inference Iterations', ylabel='Metrics (Nats)')\n\n iterations = np.arange(0, total_elbo.shape[1]).astype(int)\n\n ave_elbo = np.mean(total_elbo, axis=0)\n update_trace(ave_elbo, iterations, win=handle, name='ELBO')\n\n ave_recon = np.mean(total_cond_log_like, axis=0)\n update_trace(ave_recon, iterations, win=handle, name='log p(x | z)')\n\n for level in range(len(total_kl)):\n ave_kl = np.mean(total_kl[level], axis=0)\n update_trace(ave_kl, iterations, win=handle, name='KL Divergence, Level ' + str(level))",
"def plot(df, scaling=\"probit\", metric=\"accuracy\"):\n test_sets = df.test_set.unique()\n shift_sets = df.shift_set.unique()\n assert len(test_sets) == 1\n assert len(shift_sets) == 1\n test_set = test_sets[0]\n shift_set = shift_sets[0]\n\n title = f\"{test_set} vs. {shift_set} {metric}\"\n fig = make_subplots(\n rows=1, cols=1, subplot_titles=((f\"{title} ({scaling} scaling)\"), ),\n )\n traces = []\n for label, color in COLOR_MAP.items():\n traces.append(\n go.Scatter(x=[None], y=[None], mode='markers',\n marker=dict(size=8, color=color),\n showlegend=True, name=label))\n\n def map_colors(row):\n family = row.model_family\n hparams = row.hyperparameters\n if family in [\"AdaBoost\", \"KNN\", \"RandomFeatures\", \"RandomForest\"]:\n return COLOR_MAP[family]\n if family in [\"LogisticRegression\", \"RidgeClassifier\", \"SVM\", \"SGDClassifier\"]:\n return COLOR_MAP[\"Linear Model\"]\n if \"pretrained\" in hparams and hparams[\"pretrained\"]:\n return COLOR_MAP[\"ImageNet Pretrained Network\"]\n return COLOR_MAP[\"Neural Network\"]\n \n def get_name(row):\n model_name = row.model_family + \"<br>\"\n model_name += \"<br>\".join([f\"{key}={val}\" for key, val in row.hyperparameters.items()])\n return model_name\n\n # Generate the main scatter plot\n traces.extend(\n scatter_plot(\n xs=df[f\"test_{metric}\"],\n x_errs=list(df[f\"test_{metric}_ci\"].values),\n ys=df[f\"shift_{metric}\"],\n y_errs=list(df[f\"shift_{metric}_ci\"].values),\n model_names=list(df.apply(get_name, axis=1)),\n scaling=scaling,\n colors=df.model_type.apply(lambda x: COLOR_MAP[x]),\n )\n )\n \n metric_min, metric_max = 0.01, 0.99 # Avoid numerical issues\n traces.append(\n go.Scatter(\n mode=\"lines\",\n x=rescale(np.arange(metric_min, metric_max + 0.01, 0.01), scaling),\n y=rescale(np.arange(metric_min, metric_max + 0.01, 0.01), scaling),\n name=\"y=x\",\n line=dict(color=\"black\", dash=\"dashdot\")\n )\n )\n\n for trace in traces:\n fig.add_trace(trace, row=1, col=1)\n\n ax_range = [rescale(metric_min, scaling), rescale(metric_max, scaling)]\n fig.update_xaxes(title_text=f\"{test_set} {metric}\", range=ax_range, row=1, col=1)\n fig.update_yaxes(title_text=f\"{shift_set} {metric}\", range=ax_range, row=1, col=1)\n tickmarks = np.array([0.1, 0.25, 0.5, 0.7, 0.8, 0.9, 0.95, metric_max])\n ticks = dict(\n tickmode=\"array\",\n tickvals=rescale(tickmarks, scaling),\n ticktext=[f\"{mark:.2f}\" for mark in tickmarks],\n )\n fig.update_layout(width=1000, height=700, xaxis=ticks, yaxis=ticks)\n return fig",
"def plot_model_performance(train_loss, train_acc, train_val_loss, train_val_acc):\n \n green = '#72C29B'\n orange = '#FFA577'\n \n with plt.xkcd():\n # plot model loss\n fig, ax1 = plt.subplots()\n ax1.plot(range(1, len(train_loss) + 1), train_loss, green, linewidth=5,\n label='training')\n ax1.plot(range(1, len(train_val_loss) + 1), train_val_loss, orange,\n linewidth=5, label='validation')\n ax1.set_xlabel('# epoch')\n ax1.set_ylabel('loss')\n ax1.tick_params('y')\n ax1.legend(loc='upper right', shadow=False)\n # plot model accuracy\n fig, ax2 = plt.subplots()\n ax2.plot(range(1, len(train_acc) + 1), train_acc, green, linewidth=5,\n label='training')\n ax2.plot(range(1, len(train_val_acc) + 1), train_val_acc, orange,\n linewidth=5, label='validation')\n ax2.set_xlabel('# epoch')\n ax2.set_ylabel('accuracy')\n ax2.tick_params('y')\n ax2.legend(loc='lower right', shadow=False)\n plt.show()",
"def plot_metrics(history):\n\n pyplot.plot(history.history['loss'], label='loss')\n\n pyplot.plot(history.history['val_loss'], label='val_loss')\n\n pyplot.legend()\n\n pyplot.show()",
"def plot_performance(\n\t\tid:int\n\t\t, max_loss:float=None\n\t\t, min_accuracy:float=None\n\t\t, min_r2:float=None\n\t):\n\t\tbatch = Batch.get_by_id(id)\n\t\tanalysis_type = batch.algorithm.analysis_type\n\n\t\t# Now we need to filter the df based on the specified criteria.\n\t\tif (\"classification\" in analysis_type):\n\t\t\tif (min_r2 is not None):\n\t\t\t\traise ValueError(\"\\nYikes - Cannot use argument `min_r2` if `'classification' in batch.analysis_type`.\\n\")\n\t\t\tif (min_accuracy is None):\n\t\t\t\tmin_accuracy = 0.0\n\t\t\tmin_metric_2 = min_accuracy\n\t\t\tname_metric_2 = \"accuracy\"\n\t\telif (analysis_type == 'regression'):\n\t\t\tif (min_accuracy is not None):\n\t\t\t\traise ValueError(\"\\nYikes - Cannot use argument `min_accuracy` if `batch.analysis_type='regression'`.\\n\")\n\t\t\tif (min_r2 is None):\n\t\t\t\tmin_r2 = -1.0\n\t\t\tmin_metric_2 = min_r2\n\t\t\tname_metric_2 = \"r2\"\n\n\t\tif (max_loss is None):\n\t\t\tmax_loss = float('inf')\n\t\t\t\n\t\tdf = batch.metrics_to_pandas()\n\t\tif (df is None):\n\t\t\t# Warning message handled by `metrics_to_pandas() above`.\n\t\t\treturn None\n\t\tqry_str = \"(loss >= {}) | ({} <= {})\".format(max_loss, name_metric_2, min_metric_2)\n\t\tfailed = df.query(qry_str)\n\t\tfailed_runs = failed['result_id'].to_list()\n\t\tfailed_runs_unique = list(set(failed_runs))\n\t\t# Here the `~` inverts it to mean `.isNotIn()`\n\t\tdf_passed = df[~df['result_id'].isin(failed_runs_unique)]\n\t\tdf_passed = df_passed.round(3)\n\t\tdataframe = df_passed[['result_id', 'split', 'loss', name_metric_2]]\n\n\t\tif dataframe.empty:\n\t\t\tprint(\"Yikes - There are no models that met the criteria specified.\")\n\t\telse:\n\t\t\tPlot.performance(dataframe=dataframe)",
"def plot_metric(history_name, metric_name='accuracy', axis_label=None, graph_title=None, file_name=\"\", dpi=100,\n xaxis_tick_label=None):\n metric = history_name.history[metric_name]\n validation_metric = history_name.history['val_' + metric_name]\n epochs = range(1, len(metric) + 1)\n plt.figure(figsize=plt.figaspect(1.), dpi=dpi)\n plt.plot(epochs, metric, 'bo', label='Training ' + metric_name.capitalize())\n plt.plot(epochs, validation_metric, 'r', label='Validation ' + metric_name.capitalize())\n if axis_label is None:\n axis_label = ['Epochs', 'met']\n plt.xlabel(axis_label[0])\n plt.ylabel(axis_label[1])\n if graph_title is None:\n graph_title = metric_name.capitalize()\n plt.title(graph_title)\n if xaxis_tick_label:\n plt.xticks(epochs, xaxis_tick_label, rotation=90)\n plt.tight_layout()\n plt.legend()\n plt.grid()\n if file_name:\n plt.savefig(file_name, bbox_inches='tight')\n plt.show()\n return",
"def evaluation(self, model, forecast_horizon, training_period=None, prediction_period=None):\n\n # Passing periods to string with Timedelta format\n initial = pd.Timedelta(training_period, unit=self.freq)\n horizon = pd.Timedelta(forecast_horizon, unit=self.freq)\n\n # Cross validation\n from fbprophet.diagnostics import cross_validation\n df_cv = cross_validation(model, initial=initial, period=prediction_period, horizon=horizon)\n print(df_cv['cutoff'].unique())\n from fbprophet.diagnostics import performance_metrics\n df_p = performance_metrics(df_cv)\n print(df_p.head())\n\n # WAPE per fold\n def ewm_error(fold):\n ape = np.abs(fold.y - fold.yhat) / fold.y\n ape.replace(to_replace=float('inf'), value=1, inplace=True)\n wape = sum(ape * fold.yhat) / sum(fold.yhat)\n return wape\n errors_fold = df_cv.groupby('cutoff').apply(ewm_error)\n\n # Weighted mean of fold errors\n mean_wape = errors_fold.ewm(alpha=0.1, adjust=True).mean()[-1:].iloc[0]\n\n std = np.std(errors_fold)\n\n return mean_wape",
"def calc_params_train_val(history):\r\n # Metric of each feature\r\n acc_params = ['covid_severity_output_categorical_accuracy',\r\n 'pleural_regular_output_binary_accuracy',\r\n 'consolidation_output_binary_accuracy']\r\n precision_params = ['covid_severity_output_precision',\r\n 'pleural_regular_output_precision_1',\r\n 'consolidation_output_precision_2']\r\n recall_params = ['covid_severity_output_recall',\r\n 'pleural_regular_output_recall_1',\r\n 'consolidation_output_recall_2']\r\n AUC_params = ['covid_severity_output_auc',\r\n 'pleural_regular_output_auc_1',\r\n 'consolidation_output_auc_2']\r\n mae_params = ['covid_severity_output_mae',\r\n 'pleural_regular_output_mae',\r\n 'consolidation_output_mae']\r\n\r\n # Accuracy\r\n for acc_param in acc_params:\r\n plt.clf()\r\n fig = go.Figure()\r\n fig.add_trace(go.Scatter(\r\n y=history.history[acc_param],\r\n name='Train'))\r\n val_acc_param = 'val_' + acc_param\r\n fig.add_trace(go.Scatter(\r\n y=history.history[val_acc_param],\r\n name='Validation'))\r\n title_acc_param = 'Accuracy for ' + acc_param + ' feature'\r\n fig.update_layout(height=500,\r\n width=700,\r\n title=title_acc_param,\r\n xaxis_title='Epoch',\r\n yaxis_title='Accuracy')\r\n fig.show()\r\n\r\n # Precision\r\n for precision_param in precision_params:\r\n plt.clf()\r\n fig = go.Figure()\r\n fig.add_trace(go.Scatter(\r\n y=history.history[precision_param],\r\n name='Train'))\r\n val_precision_param = 'val_' + precision_param\r\n fig.add_trace(go.Scatter(\r\n y=history.history[val_precision_param],\r\n name='Validation'))\r\n title_precision_param = 'Precision for ' + precision_param + ' feature'\r\n fig.update_layout(height=500,\r\n width=700,\r\n title=title_precision_param,\r\n xaxis_title='Epoch',\r\n yaxis_title='Precision')\r\n fig.show()\r\n\r\n # Recall\r\n for recall_param in recall_params:\r\n plt.clf()\r\n fig = go.Figure()\r\n fig.add_trace(go.Scatter(\r\n y=history.history[recall_param],\r\n name='Train'))\r\n val_recall_param = 'val_' + recall_param\r\n fig.add_trace(go.Scatter(\r\n y=history.history[val_recall_param],\r\n name='Validation'))\r\n title_recall_param = 'Recall for ' + recall_param + ' feature'\r\n fig.update_layout(height=500,\r\n width=700,\r\n title=title_recall_param,\r\n xaxis_title='Epoch',\r\n yaxis_title='Recall')\r\n fig.show()\r\n\r\n # AUC\r\n for AUC_param in AUC_params:\r\n plt.clf()\r\n fig = go.Figure()\r\n fig.add_trace(go.Scatter(\r\n y=history.history[AUC_param],\r\n name='Train'))\r\n val_AUC_param = 'val_' + AUC_param\r\n fig.add_trace(go.Scatter(\r\n y=history.history[val_AUC_param],\r\n name='Validation'))\r\n title_AUC_param = 'AUC for ' + AUC_param + ' feature'\r\n fig.update_layout(height=500,\r\n width=700,\r\n title=title_AUC_param,\r\n xaxis_title='Epoch',\r\n yaxis_title='AUC')\r\n fig.show()\r\n\r\n # MAE\r\n for mae_param in mae_params:\r\n plt.clf()\r\n fig = go.Figure()\r\n fig.add_trace(go.Scatter(\r\n y=history.history[mae_param],\r\n name='Train'))\r\n val_mae_param = 'val_' + mae_param\r\n fig.add_trace(go.Scatter(\r\n y=history.history[val_mae_param],\r\n name='Validation'))\r\n title_mae_param = 'mae for ' + mae_param + ' feature'\r\n fig.update_layout(height=500,\r\n width=700,\r\n title=title_mae_param,\r\n xaxis_title='Epoch',\r\n yaxis_title='mae')\r\n fig.show()",
"def plot_training_info(case, metrics, save, history):\n val = False\n if 'val_accuracy' in history and 'val_loss' in history:\n val = True\n plt.ioff()\n if 'accuracy' in metrics:\n fig = plt.figure()\n plt.plot(history['accuracy'])\n if val:\n plt.plot(history['val_accuracy'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n if val:\n plt.legend(['train', 'val'], loc='upper left')\n else:\n plt.legend(['train'], loc='upper left')\n if save:\n plt.savefig(case + 'accuracy.png')\n plt.gcf().clear()\n else:\n plt.show()\n plt.close(fig)\n\n # summarize history for loss\n if 'loss' in metrics:\n fig = plt.figure()\n plt.plot(history['loss'])\n if val:\n plt.plot(history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n # plt.ylim(1e-3, 1e-2)\n plt.yscale(\"log\")\n if val:\n plt.legend(['train', 'val'], loc='upper left')\n else:\n plt.legend(['train'], loc='upper left')\n if save:\n plt.savefig(case + 'loss.png')\n plt.gcf().clear()\n else:\n plt.show()\n plt.close(fig)",
"def plot_train_results(metrics2record, loss_metric,\n train_metrics, test_metrics):\n pyplot.figure(figsize=(10, 5))\n min_, max_ = np.min(loss_metric), np.max(loss_metric)\n lg, = pyplot.plot(loss_metric)\n pyplot.yticks(min_ + np.arange(5) * (max_ - min_))\n # if learning_rate is not None:\n # lg, = pyplot.plot(learning_rate)\n pyplot.title('Loss')\n pyplot.xlabel('Epoch')\n pyplot.yscale('log')\n pyplot.show()\n\n for prm in basic_metrics:\n if prm in metrics2record:\n leg = []\n met_idx = metrics2record.index(prm)\n pyplot.figure(figsize=(10, 5))\n lg, = pyplot.plot(train_metrics[:, met_idx], label=('train'))\n leg.append(lg)\n lg, = pyplot.plot(test_metrics[:, met_idx], label=('test'))\n leg.append(lg)\n\n pyplot.legend(handles=leg)\n pyplot.title(prm)\n pyplot.xlabel('Epoch')\n pyplot.show()\n\n has_prf = any([(prm in PRF_metrics) for prm in metrics2record])\n if has_prf:\n pyplot.figure(figsize=(10, 5))\n leg = []\n for prm in PRF_metrics:\n if prm in metrics2record:\n met_idx = metrics2record.index(prm)\n lg, = pyplot.plot(train_metrics[:, met_idx],\n label=(prm + ':train'))\n leg.append(lg)\n\n for prm in PRF_metrics:\n if prm in metrics2record:\n met_idx = metrics2record.index(prm)\n lg, = pyplot.plot(test_metrics[:, met_idx],\n label=(prm + ':test'))\n leg.append(lg)\n\n pyplot.legend(handles=leg)\n pyplot.title('Precision / Recall')\n pyplot.xlabel('Epoch')\n pyplot.show()",
"def performance_metrics(self, y, y_predicted, type='mse'):\n if type== 'mse':\n if y.ndim > 1:\n m = np.shape(y)[0] #number of samples\n n = np.shape(y)[1] #number of output elements\n mean_over_output_elements = np.sum((y_predicted-y)**2, axis=1)/n\n mean_over_all_datasets = np.sum(mean_over_output_elements)/m\n metric = mean_over_all_datasets\n else: \n mean_over_output_elements = np.sum((y_predicted-y)**2)/(len(y))\n metric = mean_over_output_elements\n\n elif type == 'mae':\n if y.ndim > 1:\n m = np.shape(y)[0] #number of samples\n n = np.shape(y)[1] #number of output elements\n mean_over_output_elements = np.sum(np.abs(y_predicted-y), axis=1)/n\n mean_over_all_datasets = np.sum(mean_over_output_elements)/m\n metric = mean_over_all_datasets\n else: \n mean_over_output_elements = np.sum(np.abs(y_predicted-y))/(len(y))\n metric = mean_over_output_elements\n\n elif type == 'msle':\n if y.ndim > 1:\n m = np.shape(y)[0] #number of samples\n n = np.shape(y)[1] #number of output elements\n mean_over_output_elements = np.sum((np.log(1 +y_predicted)-np.log(1+y))**2, axis=1)/n\n mean_over_all_datasets = np.sum(mean_over_output_elements)/m\n metric = mean_over_all_datasets\n else: \n mean_over_output_elements = np.sum((np.log(1 +y_predicted)-np.log(1+y))**2)/(len(y))\n metric = mean_over_output_elements\n\n elif type == 'mape':\n if y.ndim > 1:\n m = np.shape(y)[0] #number of samples\n n = np.shape(y)[1] #number of output elements\n mean_over_output_elements = np.sum(np.abs(y_predicted-y)/np.maximum(1e-8,np.abs(y)), axis=1)/n\n mean_over_all_datasets = np.sum(mean_over_output_elements)/m\n metric = mean_over_all_datasets\n else: \n mean_over_output_elements = np.sum(np.abs(y_predicted-y)/np.maximum(1e-8,np.abs(y)))/(len(y))\n metric = mean_over_output_elements \n \n elif type == 'r2':\n if y.ndim > 1:\n n = np.shape(y)[0] #number of samples\n m = np.shape(y)[1] #number of output elements\n y_mean_over_output_elements = np.sum(y, axis=0)/m\n y_mean = y_mean_over_output_elements\n r2_over_output_elements = (np.sum((y-y_predicted)**2, axis=0))/((np.sum((y-y_mean)**2, axis=0)))\n r2_over_output_elements = np.sum(r2_over_output_elements)/n\n metric = 1 - r2_over_output_elements\n else: \n m = 1 #number of samples\n n = np.shape(y)[0] #number of output elements\n y_mean_over_output_elements = np.sum(y, axis=0)/n\n y_mean = y_mean_over_output_elements\n r2_over_output_elements = (np.sum((y-y_predicted)**2, axis=0))/(np.sum((y-y_mean)**2, axis=0))\n r2_over_output_elements = np.sum(r2_over_output_elements)\n metric = 1 - r2_over_output_elements\n elif type == 'rmse':\n if y.ndim > 1:\n m = np.shape(y)[0] #number of samples\n n = np.shape(y)[1] #number of output elements\n mean_over_output_elements = np.sum((y_predicted-y)**2, axis=1)/n\n mean_over_all_datasets = np.sum(mean_over_output_elements)/m\n metric = mean_over_all_datasets**(1/2)\n else: \n mean_over_output_elements = np.sum((y_predicted-y)**2)/(len(y))\n metric = mean_over_output_elements**(1/2)\n else:\n raise ValueError(\"undefined metric\")\n return metric",
"def linear_metric_explore(df, metrics):\n print df.describe()\n context = ramp.DataContext(data=df)\n config = ramp.Configuration(target=\"target\", metrics=[ramp.metrics.AUC()])\n models = [sklearn.svm.SVC(kernel=\"linear\", C=100.0)]\n for sub_metrics in [metrics] + list(_get_metrics_groups(metrics)):\n print \"==>\", sub_metrics\n factory = ramp.ConfigFactory(config, model=models,\n features=[[ramp.BaseFeature(x) for x in sub_metrics]])\n for x in factory:\n ramp.models.cv(x, context, folds=5, repeat=2,\n print_results=True)",
"def plot_accuracy_and_loss(histories=None):\n fig = subplots.make_subplots(rows=2, cols=2, subplot_titles=('Training accuracy', 'Validation accuracy',\n 'Training loss ', 'Validation loss'))\n\n def append_trace(model_name, acc, val_acc, loss, val_loss, epochs):\n e = list(range(epochs))\n color = random.choice(hex_colors_only)\n trace_ta = create_trace(e, acc, model_name, color)\n trace_va = create_trace(e, val_acc, model_name, color)\n trace_tl = create_trace(e, loss, model_name, color)\n trace_vl = create_trace(e, val_loss, model_name, color)\n\n fig.append_trace(trace_ta, 1, 1)\n fig.append_trace(trace_va, 1, 2)\n fig.append_trace(trace_tl, 2, 1)\n fig.append_trace(trace_vl, 2, 2)\n\n if histories is None:\n df_accuracies, df_losses = get_tensorboard_scalars()\n for model_name in df_accuracies.model_name.unique():\n df_acc = df_accuracies.loc[df_accuracies.model_name == model_name]\n df_l = df_losses.loc[df_losses.model_name == model_name]\n\n acc = df_acc.loc[df_acc.result_of == 'train'].accuracy.values.tolist()\n val_acc = df_acc.loc[df_acc.result_of == 'validation'].accuracy.values.tolist()\n loss = df_l.loc[df_l.result_of == 'train'].loss.values.tolist()\n val_loss = df_l.loc[df_l.result_of == 'validation'].loss.values.tolist()\n epochs = len(df_acc)\n\n append_trace(model_name, acc, val_acc, loss, val_loss, epochs)\n\n else:\n for model_name, history in histories.items():\n acc = history['accuracy']\n val_acc = history['val_accuracy']\n loss = history['loss']\n val_loss = history['val_loss']\n epochs = list(range(1, len(acc) + 1))\n\n append_trace(model_name, acc, val_acc, loss, val_loss, epochs)\n fig['layout']['xaxis'].update(title='Epoch')\n fig['layout']['xaxis2'].update(title='Epoch')\n fig['layout']['yaxis'].update(title='Accuracy', range=[0, 1])\n fig['layout']['yaxis2'].update(title='Loss', range=[0, 1])\n\n iplot(fig, filename='accuracies-losses')",
"def plot_fit_history(fit_history_obj):\r\n plt.plot(fit_history_obj.history['loss'])\r\n plt.plot(fit_history_obj.history['val_loss'])\r\n plt.title('model mean squared error loss')\r\n plt.ylabel('mean squared error loss')\r\n plt.xlabel('epoch')\r\n plt.legend(['training set', 'validation set'], loc='upper right')\r\n plt.show()",
"def plot_model_performance(train_loss, train_acc, train_val_loss, train_val_acc, save_figure_path):\n\n green = '#72C29B'\n orange = '#FFA577'\n\n with plt.xkcd():\n fig, (ax1, ax2) = plt.subplots(2, figsize=(10, 8))\n ax1.plot(range(1, len(train_loss) + 1), train_loss, green, linewidth=5,\n label='training')\n ax1.plot(range(1, len(train_val_loss) + 1), train_val_loss, orange,\n linewidth=5, label='validation')\n ax1.set_xlabel('# epoch')\n ax1.set_ylabel('loss')\n ax1.tick_params('y')\n ax1.legend(loc='upper right', shadow=False)\n ax1.set_title('Model loss through #epochs', fontweight='bold')\n\n ax2.plot(range(1, len(train_acc) + 1), train_acc, green, linewidth=5,\n label='training')\n ax2.plot(range(1, len(train_val_acc) + 1), train_val_acc, orange,\n linewidth=5, label='validation')\n ax2.set_xlabel('# epoch')\n ax2.set_ylabel('accuracy')\n ax2.tick_params('y')\n ax2.legend(loc='lower right', shadow=False)\n ax2.set_title('Model accuracy through #epochs', fontweight='bold')\n\n plt.tight_layout()\n plt.show()\n fig.savefig(save_figure_path)\n plt.close(fig)",
"def check_model_performances(X,Y, model,show=False):\n #model.fit(X, Y)\n predictions = model.predict(X)\n \n predictions = predictions#.reshape(-1,1)\n \n # ######## Computes MSE ####### \n MSE = mean_squared_error(Y, predictions)\n print(f'\\nMSE : {MSE}')\n \n # ######## Computes R2 ####### \n R2 = r2_score(Y, predictions)\n print(f'R2 : {R2}')\n \n # ######## Plot Model predictions vs. target ####### \n if show:\n fig = go.Figure()\n \n fig.add_trace(go.Scatter(y=Y,\n mode='lines',\n name='target'))\n fig.add_trace(go.Scatter(y=predictions\n ,\n mode='lines',\n name='predictions'))\n \n fig.show()",
"def plot_loss_metrics(history_file):\n history = pickle.load(open(history_file, \"rb\"))\n loss, metric, val_loss, val_metric = islice(history.keys(), 4)\n n_epochs = len(history[loss])\n\n plt.style.use(\"ggplot\")\n fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, sharex=True, figsize=(13, 8))\n\n ax1.set_title(loss)\n ax1.plot(np.arange(1, n_epochs + 1), history[loss], label='train')\n ax1.plot(np.arange(1, n_epochs + 1), history[val_loss], label='test')\n ax1.legend()\n\n ax2.set_title(metric)\n ax2.plot(np.arange(1, n_epochs + 1), history[metric], label='train')\n ax2.plot(np.arange(1, n_epochs + 1), history[val_metric], label='test')\n ax2.set_xlabel('Epochs')\n ax2.set_xlim((1, n_epochs + 1))\n xa = ax2.get_xaxis()\n xa.set_major_locator(MaxNLocator(integer=True))\n ax2.legend()\n plt.savefig(history_file + '.png')\n plt.show()",
"def plot_actor_critic_results(algorithm_results_list, threshold=None, window_len=100, \n plt_title=None):\n\n # extract data\n scores_list = []\n pol_loss_list = []\n val_loss_list = []\n clipped_L_list= []\n entropy_list = []\n alg_titles = []\n\n for alg_res in algorithm_results_list:\n if isinstance(alg_res, str):\n # load from file\n alg_titles.append(alg_res)\n data = pickle.load(open(alg_res, 'rb'))\n scores_list.append(data['scores'])\n pol_loss_list.append(data['policy_loss'])\n val_loss_list.append(data['value_loss'])\n clipped_L_list.append(data['clipped_surrogate'])\n entropy_list.append(data['entropy'])\n\n # plot scores\n fig = plt.figure(\"scores\")\n ax = fig.add_subplot(111)\n\n for scores in scores_list:\n \n # compute moving average and standard deviation\n mv_avg = np.asarray([np.mean(scores[max(0, i-window_len):i]) for i in range(len(scores))])\n # mv_std = np.asarray([np.std(scores[max(0, i-window_len):i]) for i in range(len(scores))])\n mv_q16 = np.asarray([np.quantile(scores[max(0, i-window_len):i], 0.16) for i in range(1,len(scores))])\n mv_q84 = np.asarray([np.quantile(scores[max(0, i-window_len):i], 0.84) for i in range(1,len(scores))])\n mv_q16 = np.insert(mv_q16, 0, 0.0)\n mv_q84 = np.insert(mv_q84, 0, 0.0)\n\n\n # plot\n ax.plot(np.arange(len(scores)), mv_avg)\n # ax.fill_between(np.arange(len(scores)), mv_avg-mv_std, mv_avg+mv_std, alpha=0.3)\n ax.fill_between(np.arange(len(scores)), mv_q16, mv_q84, alpha=0.3)\n\n # plot success threshold\n if threshold is not None:\n plt.hlines(threshold, 0, len(scores), colors='r', linestyles='dashed')\n plt.title(plt_title)\n plt.ylabel('Score')\n plt.xlabel('Episode #')\n plt.legend(alg_titles)\n\n\n # plot losses\n fig = plt.figure(\"losses\")\n ax = fig.add_subplot(111)\n for pol_losses in pol_loss_list:\n \n # # compute moving average and standard deviation\n # mv_avg = np.asarray([np.mean(pol_losses[max(0, i-window_len):i]) for i in range(len(pol_losses))])\n # # mv_std = np.asarray([np.std(pol_losses[max(0, i-window_len):i]) for i in range(len(pol_losses))])\n # mv_q16 = np.asarray([np.quantile(pol_losses[max(0, i-window_len):i], 0.16) for i in range(1,len(pol_losses))])\n # mv_q84 = np.asarray([np.quantile(pol_losses[max(0, i-window_len):i], 0.84) for i in range(1,len(pol_losses))])\n # mv_q16 = np.insert(mv_q16, 0, 0.0)\n # mv_q84 = np.insert(mv_q84, 0, 0.0)\n\n\n # plot\n ax.plot(np.arange(len(pol_losses)), pol_losses)\n # ax.fill_between(np.arange(len(pol_losses)), mv_avg-mv_std, mv_avg+mv_std, alpha=0.3)\n # ax.fill_between(np.arange(len(pol_losses)), mv_q16, mv_q84, alpha=0.3)\n\n for val_losses in val_loss_list:\n \n # plot\n ax.plot(np.arange(len(val_losses)), val_losses)\n\n for clipped_L in clipped_L_list:\n \n # plot\n ax.plot(np.arange(len(clipped_L)), clipped_L)\n\n for entropy in entropy_list:\n ax.plot(np.arange(len(entropy)), entropy)\n\n\n\n # plot success threshold\n if plt_title is not None:\n plt.title(plt_title + \": losses\")\n plt.ylabel('Losses')\n plt.xlabel('Training Iteration #')\n plt.legend(['policy loss', 'value loss', 'clipped surrogat', 'entropy'])\n\n # open plots\n plt.show()",
"def cross_validation_visualization(lambdas, loss_train, loss_test):\n plt.semilogx(lambdas, loss_train, marker=\".\", color='b', label='train error')\n plt.semilogx(lambdas, loss_test, marker=\".\", color='r', label='test error')\n plt.xlabel(\"lambda\")\n plt.ylabel(\"rmse\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(\"cross_validation_mse\")",
"def calculate_performance(controls_f, df, pop_type, neg, output):\n\n print('\\n Calculating performance...')\n log_write(output['log'], '\\n\\nPerformance based on results per %s\\n\\n' % pop_type)\n\n # Remove strains with missing penetrance and few cell count\n df = df.iloc[df['Penetrance'].isna().values == 0, :]\n df = df[df['Num_cells'] >= 15].reset_index(drop=True)\n\n # Plot penetrance distribution\n plt.figure(figsize=(6, 6))\n sns.set(font_scale=1.25)\n sns.set_style('white')\n sns.kdeplot(df['Penetrance'].values, color='mediumblue', shade=True)\n plt.xlabel('Penetrance (%)')\n mean_penetrance = df['Penetrance'].mean()\n plt.title('Mean penetrance: %.4f' % mean_penetrance)\n fig = plt.gcf()\n fig.savefig(output['Penetrance'].replace('.', '_%s.' % pop_type), dpi=150, bbox_inches='tight')\n fig.clf()\n plt.close(fig)\n log_write(output['log'], 'Mean penetrance: %.2f\\n' % mean_penetrance)\n log_write(output['log'], 'Mean WT penetrance: %.2f\\n' % df[df['Strain ID'].isin(neg)]['Penetrance'].mean())\n\n # Plot WT percentile distribution\n plt.figure(figsize=(6, 6))\n sns.set(font_scale=1.25)\n sns.set_style('white')\n sns.kdeplot(df['WT_percentile_at_threshold'].values, color='mediumblue', shade=True)\n plt.xlabel('WT Percentile at the score of maximum difference')\n mean_percentile = df['WT_percentile_at_threshold'].mean()\n plt.title('Mean percentile: %.4f' % mean_percentile)\n fig = plt.gcf()\n fig.savefig(output['WT_Percentile'].replace('.', '_%s.' % pop_type), dpi=150, bbox_inches='tight')\n fig.clf()\n plt.close(fig)\n log_write(output['log'], 'Mean WT percentile at threshold: %.2f\\n' % mean_percentile)\n\n # Plot correlation\n plt.figure(figsize=(6, 6))\n sns.set(font_scale=1.25)\n sns.set_style('white')\n limits = [-0.1, 100.1]\n ticks = [0, 20, 40, 60, 80, 100]\n sns.lineplot(x=limits, y=limits, color='k', dashes=True, linewidth=1)\n sns.scatterplot(x='Penetrance', y='KS_Penetrance', color='mediumblue', data=df, s=60, alpha=0.5, linewidth=0)\n plt.xlim(limits)\n plt.xticks(ticks)\n plt.ylim(limits)\n plt.yticks(ticks)\n plt.xlabel('Penetrance')\n plt.ylabel('KS Penetrance')\n correlation = stats.pearsonr(df['Penetrance'], df['KS_Penetrance'])[0]\n plt.title('Correlation: %.4f' % correlation)\n fig = plt.gcf()\n fig.savefig(output['KS_Correlation'].replace('.', '_%s.' % pop_type), dpi=150, bbox_inches='tight')\n fig.clf()\n plt.close(fig)\n log_write(output['log'], 'KS correlation: %.4f\\n' % correlation)\n\n # Get positive controls\n if controls_f:\n df_cont = pd.read_csv(controls_f, low_memory=False)\n pos = df_cont['Strain ID'].values\n\n # Calculate performance with maximum difference\n aupr, aupr_b, auroc = calculate_auc(df, neg, pos)\n correlation = plot_penetrance_agreement(df, df_cont, neg,\n output['PeneAgreement'].replace('.', '_%s.' % pop_type))\n log_write(output['log'], 'AUPR: %.4f\\n' % aupr)\n log_write(output['log'], 'AUPR balanced: %.4f\\n' % aupr_b)\n log_write(output['log'], 'AUROC: %.4f\\n' % auroc)\n log_write(output['log'], 'Correlation: %.4f\\n' % correlation)\n\n # Plot penetrance of controls\n plt.figure(figsize=(12, 4))\n sns.set(font_scale=1.25)\n sns.set_style('white')\n plt.scatter(x=df[df['Strain ID'].isin(neg)].index.values,\n y=df[df['Strain ID'].isin(neg)]['Penetrance'].values,\n color='dodgerblue', alpha=0.3, label='Negative control', s=20)\n plt.scatter(x=df[df['Strain ID'].isin(pos)].index.values,\n y=df[df['Strain ID'].isin(pos)]['Penetrance'].values,\n color='red', alpha=0.7, label='Positive control', s=20)\n plt.xticks([])\n plt.yticks([0, 25, 50, 75, 100])\n plt.xlabel('Genes')\n plt.ylabel('Penetrance (%)')\n plt.legend(loc='upper right')\n plt.savefig(output['PenetranceControls'].replace('.', '_%s.' % pop_type),\n dpi=150, bbox_inches='tight')",
"def plot_logs(experiments: List[Summary],\n smooth_factor: float = 0,\n ignore_metrics: Optional[Set[str]] = None,\n pretty_names: bool = False,\n include_metrics: Optional[Set[str]] = None) -> FigureFE:\n # Sort to keep same colors between multiple runs of visualization\n experiments = humansorted(to_list(experiments), lambda exp: exp.name)\n n_experiments = len(experiments)\n if n_experiments == 0:\n return FigureFE.from_figure(make_subplots())\n\n ignore_keys = ignore_metrics or set()\n ignore_keys = to_set(ignore_keys)\n ignore_keys |= {'epoch'}\n include_keys = to_set(include_metrics)\n # TODO: epoch should be indicated on the axis (top x axis?). Problem - different epochs per experiment.\n # TODO: figure out how ignore_metrics should interact with mode\n # TODO: when ds_id switches during training, prevent old id from connecting with new one (break every epoch?)\n ds_ids = set()\n metric_histories = defaultdict(_MetricGroup) # metric: MetricGroup\n for idx, experiment in enumerate(experiments):\n history = experiment.history\n # Since python dicts remember insertion order, sort the history so that train mode is always plotted on bottom\n for mode, metrics in sorted(history.items(),\n key=lambda x: 0 if x[0] == 'train' else 1 if x[0] == 'eval' else 2 if x[0] == 'test'\n else 3 if x[0] == 'infer' else 4):\n for metric, step_val in metrics.items():\n base_metric, ds_id, *_ = f'{metric}|'.split('|') # Plot acc|ds1 and acc|ds2 on same acc graph\n if len(step_val) == 0:\n continue # Ignore empty metrics\n if metric in ignore_keys or base_metric in ignore_keys:\n continue\n # Here we intentionally check against metric and not base_metric. If user wants to display per-ds they\n # can specify that in their include list: --include mcc 'mcc|usps'\n if include_keys and metric not in include_keys:\n continue\n metric_histories[base_metric].add(idx, mode, ds_id, step_val)\n ds_ids.add(ds_id)\n\n metric_list = list(sorted(metric_histories.keys()))\n if len(metric_list) == 0:\n return FigureFE.from_figure(make_subplots())\n ds_ids = humansorted(ds_ids) # Sort them to have consistent ordering (and thus symbols) between plot runs\n n_plots = len(metric_list)\n if len(ds_ids) > 9: # 9 b/c None is included\n warn(\"Plotting more than 8 different datasets isn't well supported. Symbols will be reused.\")\n\n # Non-Shared legends aren't supported yet. If they get supported then maybe can have that feature here too.\n # https://github.com/plotly/plotly.js/issues/5099\n # https://github.com/plotly/plotly.js/issues/5098\n\n # map the metrics into an n x n grid, then remove any extra columns. Final grid will be n x m with m <= n\n n_rows = math.ceil(math.sqrt(n_plots))\n n_cols = math.ceil(n_plots / n_rows)\n metric_grid_location = {}\n nd1_metrics = []\n idx = 0\n for metric in metric_list:\n if metric_histories[metric].ndim() == 1:\n # Delay placement of the 1D plots until the end\n nd1_metrics.append(metric)\n else:\n metric_grid_location[metric] = (idx // n_cols, idx % n_cols)\n idx += 1\n for metric in nd1_metrics:\n metric_grid_location[metric] = (idx // n_cols, idx % n_cols)\n idx += 1\n titles = [k for k, v in sorted(list(metric_grid_location.items()), key=lambda e: e[1][0] * n_cols + e[1][1])]\n if pretty_names:\n titles = [prettify_metric_name(title) for title in titles]\n\n fig = make_subplots(rows=n_rows, cols=n_cols, subplot_titles=titles, shared_xaxes='all')\n fig.update_layout({\n 'plot_bgcolor': '#FFF',\n 'hovermode': 'closest',\n 'margin': {\n 't': 50\n },\n 'modebar': {\n 'add': ['hoverclosest', 'hovercompare'], 'remove': ['select2d', 'lasso2d']\n },\n 'legend': {\n 'tracegroupgap': 5, 'font': {\n 'size': 11\n }\n }\n })\n\n # Set x-labels\n for idx, metric in enumerate(titles, start=1):\n plotly_idx = idx if idx > 1 else \"\"\n x_axis_name = f'xaxis{plotly_idx}'\n y_axis_name = f'yaxis{plotly_idx}'\n if metric_histories[metric].ndim() > 1:\n fig['layout'][x_axis_name]['title'] = 'Steps'\n fig['layout'][x_axis_name]['showticklabels'] = True\n fig['layout'][x_axis_name]['linecolor'] = \"#BCCCDC\"\n fig['layout'][y_axis_name]['linecolor'] = \"#BCCCDC\"\n else:\n # Put blank data onto the axis to instantiate the domain\n row, col = metric_grid_location[metric][0], metric_grid_location[metric][1]\n fig.add_annotation(text='', showarrow=False, row=row + 1, col=col + 1)\n # Hide the axis stuff\n fig['layout'][x_axis_name]['showgrid'] = False\n fig['layout'][x_axis_name]['zeroline'] = False\n fig['layout'][x_axis_name]['visible'] = False\n fig['layout'][y_axis_name]['showgrid'] = False\n fig['layout'][y_axis_name]['zeroline'] = False\n fig['layout'][y_axis_name]['visible'] = False\n\n # If there is only 1 experiment, we will use alternate colors based on mode\n color_offset = defaultdict(lambda: 0)\n n_colors = n_experiments\n if n_experiments == 1:\n n_colors = 4\n color_offset['eval'] = 1\n color_offset['test'] = 2\n color_offset['infer'] = 3\n colors = get_colors(n_colors=n_colors)\n alpha_colors = get_colors(n_colors=n_colors, alpha=0.3)\n\n # exp_id : {mode: {ds_id: {type: True}}}\n add_label = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: True))))\n # {row: {col: (x, y)}}\n ax_text = defaultdict(lambda: defaultdict(lambda: (0.0, 0.9))) # Where to put the text on a given axis\n # Set up ds_id markers. The empty ds_id will have no extra marker. After that there are 4 configurations of 3-arm\n # marker, followed by 'x', '+', '*', and pound. After that it will just repeat the symbol set.\n ds_id_markers = [None, 37, 38, 39, 40, 34, 33, 35, 36] # https://plotly.com/python/marker-style/\n ds_id_markers = {k: v for k, v in zip(ds_ids, cycle(ds_id_markers))}\n # Plotly doesn't support z-order, so delay insertion until all the plots are figured out:\n # https://github.com/plotly/plotly.py/issues/2345\n z_order = defaultdict(list) # {order: [(plotly element, row, col), ...]}\n\n # Figure out the legend ordering\n legend_order = []\n for exp_idx, experiment in enumerate(experiments):\n for metric, group in metric_histories.items():\n for mode in group.modes(exp_idx):\n for ds_id in group.ds_ids(exp_idx, mode):\n ds_title = f\"{ds_id} \" if ds_id else ''\n title = f\"{experiment.name} ({ds_title}{mode})\" if n_experiments > 1 else f\"{ds_title}{mode}\"\n legend_order.append(title)\n legend_order.sort()\n legend_order = {legend: order for order, legend in enumerate(legend_order)}\n\n # Actually do the plotting\n for exp_idx, experiment in enumerate(experiments):\n for metric, group in metric_histories.items():\n row, col = metric_grid_location[metric][0], metric_grid_location[metric][1]\n if group.ndim() == 1:\n # Single value\n for mode in group.modes(exp_idx):\n for ds_id in group.ds_ids(exp_idx, mode):\n ds_title = f\"{ds_id} \" if ds_id else ''\n prefix = f\"{experiment.name} ({ds_title}{mode})\" if n_experiments > 1 else f\"{ds_title}{mode}\"\n plotly_idx = row * n_cols + col + 1 if row * n_cols + col + 1 > 1 else ''\n fig.add_annotation(text=f\"{prefix}: {group.get_val(exp_idx, mode, ds_id)}\",\n font={'color': colors[exp_idx + color_offset[mode]]},\n showarrow=False,\n xref=f'x{plotly_idx} domain',\n xanchor='left',\n x=ax_text[row][col][0],\n yref=f'y{plotly_idx} domain',\n yanchor='top',\n y=ax_text[row][col][1],\n exclude_empty_subplots=False)\n ax_text[row][col] = (ax_text[row][col][0], ax_text[row][col][1] - 0.1)\n if ax_text[row][col][1] < 0:\n ax_text[row][col] = (ax_text[row][col][0] + 0.5, 0.9)\n elif group.ndim() == 2:\n for mode, dsv in group[exp_idx].items():\n color = colors[exp_idx + color_offset[mode]]\n for ds_id, data in dsv.items():\n ds_title = f\"{ds_id} \" if ds_id else ''\n title = f\"{experiment.name} ({ds_title}{mode})\" if n_experiments > 1 else f\"{ds_title}{mode}\"\n if data.shape[0] < 2:\n x = data[0][0]\n y = data[0][1]\n y_min = None\n y_max = None\n if isinstance(y, ValWithError):\n y_min = y.y_min\n y_max = y.y_max\n y = y.y\n marker_style = 'circle' if mode == 'train' else 'diamond' if mode == 'eval' \\\n else 'square' if mode == 'test' else 'hexagram'\n limit_data = [(y_max, y_min)] if y_max is not None and y_min is not None else None\n tip_text = \"%{x}: (%{customdata[1]:.3f}, %{y:.3f}, %{customdata[0]:.3f})\" if \\\n limit_data is not None else \"%{x}: %{y:.3f}\"\n error_y = None if limit_data is None else {\n 'type': 'data', 'symmetric': False, 'array': [y_max - y], 'arrayminus': [y - y_min]\n }\n z_order[2].append((go.Scatter(\n x=[x],\n y=[y],\n name=title,\n legendgroup=title,\n customdata=limit_data,\n hovertemplate=tip_text,\n mode='markers',\n marker={\n 'color': color,\n 'size': 12,\n 'symbol': _symbol_mash(marker_style, ds_id_markers[ds_id]),\n 'line': {\n 'width': 1.5, 'color': 'White'\n }\n },\n error_y=error_y,\n showlegend=add_label[exp_idx][mode][ds_id]['patch'],\n legendrank=legend_order[title]),\n row,\n col))\n add_label[exp_idx][mode][ds_id]['patch'] = False\n else:\n # We can draw a line\n y = data[:, 1]\n y_min = None\n y_max = None\n if isinstance(y[0], ValWithError):\n y = np.stack([e.as_tuple() for e in y])\n y_min = y[:, 0]\n y_max = y[:, 2]\n y = y[:, 1]\n if smooth_factor != 0:\n y_min = gaussian_filter1d(y_min, sigma=smooth_factor)\n y_max = gaussian_filter1d(y_max, sigma=smooth_factor)\n # TODO - for smoothed lines, plot original data in background but greyed out\n if smooth_factor != 0:\n y = gaussian_filter1d(y, sigma=smooth_factor)\n x = data[:, 0]\n linestyle = 'solid' if mode == 'train' else 'dash' if mode == 'eval' else 'dot' if \\\n mode == 'test' else 'dashdot'\n limit_data = [(mx, mn) for mx, mn in zip(y_max, y_min)] if y_max is not None and y_min is \\\n not None else None\n tip_text = \"%{x}: (%{customdata[1]:.3f}, %{y:.3f}, %{customdata[0]:.3f})\" if \\\n limit_data is not None else \"%{x}: %{y:.3f}\"\n z_order[1].append((go.Scatter(\n x=x,\n y=y,\n name=title,\n legendgroup=title,\n mode=\"lines+markers\" if ds_id_markers[ds_id] else 'lines',\n marker={\n 'color': color,\n 'size': 8,\n 'line': {\n 'width': 2, 'color': 'DarkSlateGrey'\n },\n 'maxdisplayed': 10,\n 'symbol': ds_id_markers[ds_id]\n },\n line={\n 'dash': linestyle, 'color': color\n },\n customdata=limit_data,\n hovertemplate=tip_text,\n showlegend=add_label[exp_idx][mode][ds_id]['line'],\n legendrank=legend_order[title]),\n row,\n col))\n add_label[exp_idx][mode][ds_id]['line'] = False\n if limit_data is not None:\n z_order[0].append((go.Scatter(x=x,\n y=y_max,\n mode='lines',\n line={'width': 0},\n legendgroup=title,\n showlegend=False,\n hoverinfo='skip'),\n row,\n col))\n z_order[0].append((go.Scatter(x=x,\n y=y_min,\n mode='lines',\n line={'width': 0},\n fillcolor=alpha_colors[exp_idx + color_offset[mode]],\n fill='tonexty',\n legendgroup=title,\n showlegend=False,\n hoverinfo='skip'),\n row,\n col))\n else:\n # Some kind of image or matrix. Not implemented yet.\n pass\n for z in sorted(list(z_order.keys())):\n plts = z_order[z]\n for plt, row, col in plts:\n fig.add_trace(plt, row=row + 1, col=col + 1)\n\n # If inside a jupyter notebook then force the height based on number of rows\n if in_notebook():\n fig.update_layout(height=280 * n_rows)\n\n return FigureFE.from_figure(fig)",
"def train_and_plot_prediction_metrics(X_train, y_train, X_test, y_test, pipelines):\n\n scores = pd.DataFrame(columns=[\"Model\", \"MAE\", \"MSE\", \"R2\"])\n\n for modelname, pipeline in pipelines.items():\n pipeline.fit(X_train, y_train)\n y_pred = pipeline.predict(X_test)\n mae = mean_absolute_error(y_test, y_pred)\n mse = mean_squared_error(y_test, y_pred)\n r2 = r2_score(y_test, y_pred)\n scores = scores.append(\n {\"Model\": modelname, \"MAE\": mae, \"MSE\": mse, \"R2\": r2}, ignore_index=True\n )\n\n for metric in [\"MAE\", \"MSE\", \"R2\"]:\n ax = sns.barplot(x=\"Model\", y=metric, data=scores)\n ax.set_ylim(bottom=0)\n plt.title(\"Test data: \" + metric)\n plt.show()",
"def __evaluate_other_metrics(dataset, m, y_act, y_pred):\n return evaluate_metric(y_act, y_pred, m, dataset.y_n_classes)",
"def plot_average_metrics(metrics, epoch, handle_dict, train_val):\n avg_elbo, avg_cond_log_like, avg_kl = metrics\n update_trace(np.array([-avg_elbo]), np.array([epoch]).astype(int), win=handle_dict['elbo'], name=train_val)\n update_trace(np.array([-avg_cond_log_like]), np.array([epoch]).astype(int), win=handle_dict['cond_log_like'], name=train_val)\n for level in range(len(avg_kl)):\n update_trace(np.array([avg_kl[level]]), np.array([epoch]).astype(int), win=handle_dict['kl'], name=train_val + ', Level ' + str(level))",
"def plot_metric_values(self, threshold=0):\n epochs_range = np.arange(threshold, len(self.accuracies), 1)\n plt.plot(epochs_range, self.accuracies[threshold:], color='red', marker='o')\n plt.title('Accuracy on test data. Eta={:.2f} Lambda={:2.2f}'.format(self.eta, self.lambda_r))\n plt.xlabel('Epochs')\n plt.ylabel('Accuracy')\n plt.grid(True)\n plt.show()",
"def plot_metrics_est(df, metrics_pre, metrics_post, n_arr, row='scenario',\n col='metric', **kwargs):\n risk_pre = metrics_pre.query(\"metric=='risk'\")['value'].values[0]\n risk_post = metrics_post.query(\"metric=='risk'\")['value'].values[0]\n risk_change = metrics_post.query(\"metric=='risk_change'\")['value'].values[0]\n gap_FPR = metrics_post.query(\"metric=='gap_FPR'\")['value'].values[0]\n gap_FNR = metrics_post.query(\"metric == 'gap_FNR'\")['value'].values[0]\n\n xlim = (min(n_arr), max(n_arr))\n g = sns.FacetGrid(df, row=row, col=col,\n col_order=['risk', 'risk_change', 'gap_FPR', 'gap_FNR'],\n xlim=xlim, **kwargs)\n g.map(sns.pointplot, 'n', 'value', order=n_arr, ci='sd')\n g.set_xticklabels(rotation=45)\n\n g.set_titles(template='')\n\n for ax, m in zip(g.axes[0, :],\n ['risk', 'risk_change', 'gap_FPR', 'gap_FNR']):\n ax.set_title(m)\n for ax, l in zip(g.axes[:, 0], df[row].unique()):\n ax.set_ylabel(l, rotation=90, ha='center', va='center')\n\n for i in range(g.axes.shape[0]):\n g.axes[i, 0].hlines(risk_post, *g.axes[i, 0].get_xlim())\n g.axes[i, 1].hlines(risk_change, *g.axes[i, 1].get_xlim())\n g.axes[i, 2].hlines(gap_FPR, *g.axes[i, 2].get_xlim())\n g.axes[i, 3].hlines(gap_FNR, *g.axes[i, 3].get_xlim())",
"def plot_observations():\n plt.plot(history.history['loss'], label='training_loss')\n plt.plot(history.history['val_loss'], label='val_loss ')\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.show()\n\n plt.plot(history.history['acc'], label='accuracy')\n plt.plot(history.history['val_acc'], label='val_accuracy')\n plt.xlabel('Epoch')\n plt.ylabel('Accuracy')\n plt.legend(loc='lower right')\n plt.show()\n\n test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)\n print(\"Test Accuracy:\", test_acc)",
"def plot_metric(metric, cases, input_dir = Path('.'), output_dir = Path('.'),\n station_name='종로구', targets=['PM10', 'PM25'], loss='MSE', sample_size=48, output_size=24):\n sns.set_context(\"paper\")\n sns.color_palette(\"tab10\")\n nrows = 2\n ncols = 2\n multipanel_labels = np.array(list(string.ascii_uppercase)[:nrows * ncols]).reshape(nrows, ncols)\n\n # rough figure size\n w_pad, h_pad = 1.08, 1.08\n # inch/1pt (=1.0inch / 72pt) * 10pt/row * 8row (6 row + margins)\n legend_size = 1.0 / 72 * 10.0 * 10\n ax_size = min(7.22 / ncols, (9.45 - legend_size) / nrows)\n # legend_size = 0.6 * fig_size\n fig_size_w = ax_size*ncols\n fig_size_h = ax_size*nrows + legend_size\n legend_frac = legend_size / (fig_size_h + legend_size)\n\n fig, axs = plt.subplots(nrows, ncols,\n figsize=(ax_size*ncols, ax_size*nrows + legend_size),\n dpi=600,\n frameon=False,\n subplot_kw={\n 'clip_on': False,\n 'box_aspect': 1\n })\n fig.tight_layout(w_pad=w_pad, h_pad=h_pad)\n fig.subplots_adjust(left=0.1, bottom=0.1, top=0.9)\n\n # targets is actually useless\n\n df_10 = pd.DataFrame()\n df_25 = pd.DataFrame()\n dfs = {\n 'PM10': df_10,\n 'PM25': df_25,\n }\n\n for target in targets:\n for case in cases[target]['Univariate'] + cases[target]['Multivariate']:\n df_obs, df_sim = load_df(input_dir, case,\n station_name=station_name, target=target)\n\n lags, res, p_val = compute_metric(df_obs, df_sim, metric)\n\n if dfs[target].empty:\n dfs[target].loc[:, 'time'] = lags\n\n dfs[target].loc[:, case] = res\n\n\n for rowi, target in enumerate(targets):\n # Univariate\n cases_uni = cases[target]['Univariate']\n df_uni = dfs[target].loc[:, cases_uni]\n df_uni.loc[:, 'time'] = dfs[target].loc[:, 'time'].to_numpy()\n sns.lineplot(x='time', y='value', hue='variable',\n data=pd.melt(df_uni, ['time']),\n ax = axs[rowi, 0])\n\n # Multivariate\n cases_mul = cases[target]['Multivariate']\n df_mul = dfs[target].loc[:, cases_mul]\n df_mul.loc[:, 'time'] = dfs[target].loc[:, 'time'].to_numpy()\n sns.lineplot(x='time', y='value', hue='variable',\n data=pd.melt(df_mul, ['time']),\n ax = axs[rowi, 1])\n\n ymins = np.zeros((2, 2))\n ymaxs = np.zeros((2, 2))\n for coli in range(2):\n ymins[rowi, coli], ymaxs[rowi, coli] = axs[rowi, coli].get_ylim()\n\n for coli in range(2):\n # legend\n if rowi == 0:\n leg_handles, _leg_labels = axs[rowi, coli].get_legend_handles_labels()\n # new legend label map\n leg_labels = [CASE_DICT[l] for l in _leg_labels]\n axs[rowi, coli].legend(leg_handles, leg_labels,\n bbox_to_anchor=(0.0, 1.02, 1, legend_frac),\n ncol=1,\n mode='expand',\n loc='lower left',\n borderaxespad=0.5,\n fancybox=True,\n fontsize='medium')\n else:\n axs[rowi, coli].get_legend().remove()\n\n # xaxis\n axs[rowi, coli].set_xlabel('time', fontsize='medium')\n axs[rowi, coli].set_xlim(-0.5, 24.5)\n\n # multipanel lagbel\n axs[rowi, coli].annotate(multipanel_labels[rowi, coli],\n (-0.08, 1.05), xycoords='axes fraction',\n fontsize='large', fontweight='bold')\n\n # xticks\n axs[rowi, coli].xaxis.set_major_locator(mpl.ticker.MultipleLocator(4))\n axs[rowi, coli].xaxis.set_minor_locator(mpl.ticker.MultipleLocator(1))\n\n # xgrid\n axs[rowi, coli].xaxis.grid(True, visible=True, which='major')\n\n for tick in axs[rowi, coli].xaxis.get_major_ticks():\n tick.label.set_fontsize('medium')\n for tick in axs[rowi, coli].yaxis.get_major_ticks():\n tick.label.set_fontsize('medium')\n\n # hide x label except last row\n if rowi == 0:\n axs[rowi, coli].xaxis.label.set_visible(False)\n\n # hide y label except first col\n if coli == 1:\n axs[rowi, coli].yaxis.label.set_visible(False)\n\n # yaxis\n # set limit of y axis\n axs[rowi, coli].set_ylabel(metric, fontsize='medium')\n\n # ylims\n axs[rowi, coli].set_ylim(np.amin(ymins[rowi, :]), np.amax(ymaxs[rowi, :]))\n\n if metric == 'MAPE':\n # Best MAPE => 1.0\n axs[rowi, coli].yaxis.set_major_locator(mpl.ticker.MultipleLocator(0.2))\n axs[rowi, coli].yaxis.set_minor_locator(mpl.ticker.MultipleLocator(0.1))\n elif metric == 'MAAPE':\n # Best MAAPE => 0.0, Worst => pi/2\n axs[rowi, coli].yaxis.set_major_locator(mpl.ticker.MultipleLocator(0.2))\n axs[rowi, coli].yaxis.set_minor_locator(mpl.ticker.MultipleLocator(0.1))\n elif metric == 'MAE':\n # Best MAE => Lower\n pass\n elif metric == 'MSE':\n # Best MSE => Lower\n pass\n elif metric == 'R2':\n # Best R2 => 1.0, Worst - 1.0\n axs[rowi, coli].yaxis.set_major_locator(mpl.ticker.MultipleLocator(0.2))\n axs[rowi, coli].yaxis.set_minor_locator(mpl.ticker.MultipleLocator(0.1))\n elif metric == 'PCORR':\n # Best PCORR => 1.0\n axs[rowi, coli].set_ylabel(r\"Pearson's $r$\", fontsize='medium')\n axs[rowi, coli].yaxis.set_major_locator(mpl.ticker.MultipleLocator(0.2))\n axs[rowi, coli].yaxis.set_minor_locator(mpl.ticker.MultipleLocator(0.1))\n elif metric == 'SCORR':\n # Best SCORR => 1.0\n axs[rowi, coli].set_ylabel(r\"Spearman's $\\rho$\", fontsize='x-small')\n axs[rowi, coli].yaxis.set_major_locator(mpl.ticker.MultipleLocator(0.2))\n axs[rowi, coli].yaxis.set_minor_locator(mpl.ticker.MultipleLocator(0.1))\n elif metric == 'CORR':\n # Best SCORR => 1.0\n axs[rowi, coli].set_ylabel(\"Corr. Coef.\", fontsize='x-small')\n axs[rowi, coli].yaxis.set_major_locator(mpl.ticker.MultipleLocator(0.2))\n axs[rowi, coli].yaxis.set_minor_locator(mpl.ticker.MultipleLocator(0.1))\n elif metric == 'FB':\n # Best FB => 0.0\n axs[rowi, coli].yaxis.set_minor_locator(mpl.ticker.MultipleLocator(0.1))\n elif metric == 'NMSE':\n # Best NMSE => 0.0\n axs[rowi, coli].yaxis.set_major_locator(mpl.ticker.MultipleLocator(0.2))\n axs[rowi, coli].yaxis.set_minor_locator(mpl.ticker.MultipleLocator(0.1))\n elif metric == 'MG':\n # Best MG => 1.0, Worst => far from 1.0\n pass\n elif metric == 'VG':\n # Best VG => 1.0, Worst => far from 1.0\n pass\n elif metric == 'FAC2':\n # Best FAC2 => 1.0\n axs[rowi, coli].yaxis.set_major_locator(mpl.ticker.MultipleLocator(0.2))\n axs[rowi, coli].yaxis.set_minor_locator(mpl.ticker.MultipleLocator(0.1))\n elif metric == 'SMAPE':\n # Best SMAPE => 1.0\n axs[rowi, coli].yaxis.set_major_locator(mpl.ticker.MultipleLocator(0.2))\n axs[rowi, coli].yaxis.set_minor_locator(mpl.ticker.MultipleLocator(0.1))\n elif metric == 'MNFB':\n axs[rowi, coli].set_ylim(-3, 3)\n elif metric == 'MNAFE':\n axs[rowi, coli].set_ylim(0, 3)\n\n # ygrid\n axs[rowi, coli].yaxis.grid(True, visible=True, which='major')\n\n axs[rowi, 0].set_ylabel(f'{metric} - ${TARGET_MAP[target]}$', fontsize='medium')\n fig.subplots_adjust(left=0.13, top=1.02-legend_frac)\n\n output_prefix = f'{station_name}_{metric}'\n png_path = output_dir / (output_prefix + '.png')\n svg_path = output_dir / (output_prefix + '.svg')\n plt.savefig(png_path, dpi=600)\n plt.savefig(svg_path)\n plt.close(fig)"
]
| [
"0.6284926",
"0.60687554",
"0.5854684",
"0.57665604",
"0.57452273",
"0.574382",
"0.5705404",
"0.5692809",
"0.55961776",
"0.5575118",
"0.55700886",
"0.55517894",
"0.55211616",
"0.55112994",
"0.5492204",
"0.5428145",
"0.5419277",
"0.53920174",
"0.5386919",
"0.5379678",
"0.5370937",
"0.52954483",
"0.52921915",
"0.5288537",
"0.52802604",
"0.5276882",
"0.52386916",
"0.5231244",
"0.52127194",
"0.5205817"
]
| 0.75381464 | 0 |
r""" $\alpha$geodesic between two probability distributions | def alpha_geodesic(
a: torch.Tensor,
b: torch.Tensor,
alpha: float,
lmd: float
) -> torch.Tensor:
a_ = a + 1e-12
b_ = b + 1e-12
if alpha == 1:
return torch.exp((1 - lmd) * torch.log(a_) + lmd * torch.log(b_))
elif alpha >= 1e+9:
return torch.min(a_, b_)
elif alpha <= -1e+9:
return torch.max(a_, b_)
else:
p = (1 - alpha) / 2
lhs = a_ ** p
rhs = b_ ** p
g = ((1 - lmd) * lhs + lmd * rhs) ** (1/p)
if alpha > 0 and (g == 0).sum() > 0:
return torch.min(a_, b_)
return g | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Dist(p1,p2):\n x1, y1 = p1\n x2, y2 = p2\n return (((x1-x2)*(x1-x2)) + ((y1-y2)*(y1-y2)))**0.5",
"def prob_larger_continuous(distr1, distr2):\n\n return distr1.expect(distr2.cdf)",
"def Geometric(name, p):\n return rv(name, GeometricDistribution, p)",
"def hyperboloidDist(point1, point2):\n return np.arccosh(-minkowskiDot(point1, point2))",
"def geodesic(dists, params):\n\n if len(params) == 1:\n\n g = np.exp(-1*(dists**2) / (2*params[0]**2))\n \n else:\n g = params[0] * np.exp(-1*(dists**2) / (2*params[1]**2))\n\n g = g/g.sum()\n\n return g",
"def euclid_dist(p1, p2):\n \n return float(np.linalg.norm(np.array(p1)-np.array(p2)))",
"def js_divergence(dist1, dist2):\n mean_dist = (dist1 + dist2) / 2.0\n js = (\n scipy.stats.entropy(dist1, mean_dist) + scipy.stats.entropy(dist2, mean_dist)\n ) / 2.0\n return js",
"def GetDist(feature_1, feature_2):\n return np.linalg.norm(feature_1 - feature_2)",
"def dist(p0, p1):\n return math.sqrt((p0[0] - p1[0])**2 + (p0[1] - p1[1])**2)",
"def extended_euclidean(self):\n self.a = gmpy2.invert(self.e1, self.e2)\n self.b = (float(self.gcd(self.e1, self.e2)-(self.a*self.e1)))/float(self.e2)",
"def fdist(param1, param2):\n return(prng.gamma(param1, param2))",
"def poincareDist(p1, p2):\n return np.arccosh(1 + 2 * ((np.linalg.norm(p1 - p2) ** 2)\n / (1 - (np.linalg.norm(p1) ** 2))\n * (1 - (np.linalg.norm(p2) ** 2))))",
"def g(inicio,pos):\n return (pos.costo-inicio.costo)*distancia_nodos(inicio,pos)",
"def geodesic_difference(self, x1, x0):\n return x1 - x0 # Default to Euclidean behavior.",
"def __ge__(self, other):\n return self.x ** 2 + self.y ** 2 >= other.x ** 2 + other.y ** 2",
"def runge(s1: float, s2: float, L: float, m: float):\n return (s2 - s1) / (L**m - 1)",
"def ge (x,y):\n\n return le(y,x)",
"def dist(p1,p2):\n\n return sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)",
"def _cal_igr(x, y):\n return (_cal_entropy(y) - _cal_conditionalEnt(x, y)) / _cal_conditionalEnt(x, y)",
"def product_on_basis(self, g1, g2):\n return self.monomial(g1 * g2)",
"def geometric_product(b1, b2):\n if MV.is_orthogonal:\n return MV.product_orthogonal_blades(b1, b2)\n else:\n result = MV.base_mul_table[(b1, b2)]\n return result",
"def compute_dist(p_1, p_2):\n return sqrt((p_2[0] - p_1[0])**2 + (p_2[1] - p_1[1])**2 +\n (p_2[2] - p_1[2])**2)",
"def difference(self, other):\n return self._geomgen(capi.geom_diff, other)",
"def dist(a,b): # compute distance between two points a & b\n return mag(sub(a,b))",
"def dist(gene1, gene2):\n return abs(len(gene1.goal) - len(gene2.goal))",
"def generate_distribution(p1=[1.0,0.1,10000],p2=[-1.0,0.6,30000]):\n mu1,sig1,num1 = p1\n mu2,sig2,num2 = p2\n g1 = scipy.stats.norm(mu1,sig1).rvs(num1)\n g2 = scipy.stats.norm(mu2,sig2).rvs(num2)\n\n samples = np.concatenate([g1,g2])\n return samples",
"def productGaussian(mu1, C1, mu2, C2):\n Cn = C1 + mat(.0001*identity(2))\n K = Cn*linalg.inv(Cn+C2)\n mu = mu1 + K*(mu2-mu1)\n C = Cn - K*Cn\n #denom = linalg.inv(C1+C2)\n #mu = denom*(C1*mu2+C2*mu1)\n #C = C1*denom*C2\n return mu,C",
"def get_pgeom(aor, e):\n return 1. / (aor * (1 - e*e)) * (aor > 1.0)",
"def gamma(x1, x2):\r\n gamma1 = math.exp(a / (1 + a * x1/(b * x2)) ** 2.0) \r\n gamma2 = math.exp(b / (1 + b * x2/(a * x1)) ** 2.0)\t\t\r\n return gamma1, gamma2",
"def euclidDist(pair1,pair2):\n return ((pair1[0]-pair2[0])**2+(pair1[1]-pair2[1])**2)**0.5"
]
| [
"0.6699356",
"0.6350744",
"0.6237337",
"0.62175363",
"0.60602874",
"0.6047542",
"0.6032256",
"0.5975014",
"0.5959113",
"0.59015775",
"0.59015435",
"0.5897139",
"0.588646",
"0.58855504",
"0.58800197",
"0.5860326",
"0.5824693",
"0.58132815",
"0.58132315",
"0.5791702",
"0.5790207",
"0.57816094",
"0.5765631",
"0.57646364",
"0.57414913",
"0.57297355",
"0.5716906",
"0.57158345",
"0.57146114",
"0.57114375"
]
| 0.6636303 | 1 |
Generate and Upload the Discharge Summary | def generate_discharge_summary_task(consultation_ext_id: str):
logger.info(f"Generating Discharge Summary for {consultation_ext_id}")
try:
consultation = PatientConsultation.objects.get(external_id=consultation_ext_id)
except PatientConsultation.DoesNotExist as e:
raise CeleryTaskException(
f"Consultation {consultation_ext_id} does not exist"
) from e
summary_file = generate_and_upload_discharge_summary(consultation)
if not summary_file:
raise CeleryTaskException("Unable to generate discharge summary")
return summary_file.id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def output_summary_stats(self):\n total_return = self.equity_curve['equity_curve'][-1]\n returns = self.equity_curve['returns']\n pnl = self.equity_curve['equity_curve']\n \n sharpe_ratio = create_sharpe_ratio(returns) #, periods=252*6.5*60) ??? \n drawdown, max_dd, dd_duration = create_drawdowns(pnl)\n self.equity_curve['drawdown'] = drawdown\n \n stats = [(\"Total Return\", \"%0.2f%%\" % ((total_return - 1.0) * 100.0)), \n (\"Sharpe Ratio\", \"%0.2f\" % sharpe_ratio), \n (\"Max Drawdown\", \"%0.2f%%\" % (max_dd * 100.0)), \n (\"Drawdown Duration\", \"%d\" % dd_duration)]\n \n self.equity_curve.to_csv('equity.csv')\n \n return stats",
"def generate_summary(cls, imeis, tracking_id):\n try:\n upload_path = os.path.join(app.config['DRS_UPLOADS'], '{0}'.format(tracking_id))\n complete_path = os.path.join(upload_path, 'dvs_input_file.txt')\n task_url = cls.dvs_api_v1 + '/drs_bulk'\n with open(complete_path, 'w') as file:\n for item in imeis:\n file.write('%s\\n' % item)\n response = requests.post(url=task_url, files={'file': open(complete_path, 'r')})\n # cls.remove_file(complete_path)\n return response.json().get('task_id')\n except Exception as e:\n app.logger.exception(e)\n return None",
"def output_summary_stats(self, filename):\r\n\r\n total_return = self.equity_curve['equity_curve'][-1]\r\n returns = self.equity_curve['returns']\r\n pnl = self.equity_curve['equity_curve']\r\n\r\n sharpe_ratio = create_sharpe_ratio(returns, periods=252)\r\n drawdown, max_dd, dd_duration = create_drawdowns(pnl)\r\n self.equity_curve['drawdown'] = drawdown\r\n\r\n stats = [(\"Total Return\", \"%0.2f%%\" % \\\r\n ((total_return - 1.0) * 100.0)),\r\n (\"Sharpe Ratio\", \"%0.2f%%\" % sharpe_ratio),\r\n (\"Max Drawdown\", \"%0.2f%%\" % (max_dd * 100.0)),\r\n (\"Drawdown Duration\", \"%f\" % dd_duration)]\r\n self.equity_curve.to_csv(filename)\r\n return stats",
"def gen_sample_report():\n sample_report().save()",
"def transmission_summarise():\n\n study_description = self.processDicomField(dcm_info, \"StudyDescription\")\n study_date = self.processDicomField(dcm_info, \"StudyDate\")\n series_description = self.processDicomField(dcm_info, \"SeriesDescription\")\n patient_id = self.processDicomField(dcm_info, \"PatientID\")\n patient_name = self.processDicomField(dcm_info, \"PatientName\")\n protocol_name = self.processDicomField(dcm_info, \"ProtocolName\")\n d_fileInfo = filesInSeries_determine()\n self.dp.qprint('Summary report:')\n self.dp.qprint('PatientID: %s' % patient_id, level = -1)\n self.dp.qprint('PatientName: %s' % patient_name, level = -1)\n self.dp.qprint('StudyDate: %s' % study_date, level = -1)\n self.dp.qprint('StudyDescription: %s' % study_description, level = -1)\n self.dp.qprint('SeriesDescription: %s' % series_description, level = -1)\n self.dp.qprint('ProtocolName: %s' % protocol_name, level = -1)\n if d_fileInfo['status']:\n self.dp.qprint('Number of files in Series: %d' % d_fileInfo['fileCount'], level = -1)\n self.dp.qprint('Directory size (raw): %d' % d_fileInfo['dirSizeRaw'], level = -1)\n self.dp.qprint('Directory size (human): %s' % d_fileInfo['str_dirSize'], level = -1)",
"def output_summary_stats(self):\n total_return = self.equity_curve['equity_curve'][-1]\n returns = self.equity_curve['returns']\n pnl = self.equity_curve['equity_curve']\n \n sharpe_ratio = create_sharpe_ratio(returns, periods=252*6.5*60)\n drawdown, max_dd, dd_duration = create_drawdowns(pnl)\n self.equity_curve['drawdown'] = drawdown\n \n stats = [(\"Total Return\", \"%0.2f%%\" % ((total_return - 1.0) * 100.0)), \n (\"Sharpe Ratio\", \"%0.2f\" % sharpe_ratio), \n (\"Max Drawdown\", \"%0.2f%%\" % (max_dd * 100.0)), \n (\"Drawdown Duration\", \"%d\" % dd_duration)]\n \n self.equity_curve.to_csv('equity.csv')\n \n return stats",
"def add_daily_summary(self):\n auth_date = self.report_date.strftime(\"%b %-d, %Y\")\n now = datetime.now().strftime(\"%x %X\")\n report_title = ' '.join([\n f'Report for {self.origin_value} participant consents authored on: {auth_date} 12:00AM-11:59PM UTC',\n f'(generated on {now} Central)'\n ])\n\n report_notes = [\n ['Notes:'],\n [f'Validation details on this sheet for {self.origin_value} participants only'],\n ['Checkbox validation currently only performed on GROR consents'],\n ['Total Errors can exceed Consents with Errors if any consents had multiple validation errors']\n ]\n\n self._add_text_rows(text_rows=[[report_title]], format_spec=self.format_specs.get('bold_text'))\n # Add any explanatory text / details about the report that have been included in the layout\n self._add_text_rows(text_rows=report_notes, format_spec=self.format_specs.get('legend_text'),\n row_pos=self.row_pos + 1)\n\n if not self._has_needs_correcting(self.consent_df):\n self._add_text_rows(text_rows=[['No consent validation errors detected']],\n format_spec=self.format_specs.get('italic_text'), row_pos=self.row_pos+1)\n\n # Daily summary counts for all the recently authored consents that were processed (regardless of errors)\n self._add_text_rows([['Total Consent Validation Counts']],\n format_spec=self.format_specs.get('bold_text'), row_pos=self.row_pos+1)\n self._add_consent_issue_count_header_section(hpo='All Entities')\n self._add_consent_issue_counts(self.consent_df, show_all_counts=True)",
"def _generate_snyk_report(self):\n details = self.SNYK_REPORT['details']\n stats = self.SNYK_REPORT['stats']\n for eco in SUPPORTED_ECOSYSTEMS:\n eco_details = details[eco]\n eco_stats = stats[eco]\n # Calculate the number of vulnerabilities pointing to pvt pkgs.\n eco_stats['pvt_pkg_vulnerability_count'] = \\\n len(eco_details['pvt_pkgs'])\n\n # Calculate the stats for vulnerabilities deleted.\n if len(eco_details['delete']) > 0:\n success_del = 0\n total_del = 0\n for del_vuln in eco_details['delete']:\n total_del += 1\n if eco_details['delete'][del_vuln]['status'] == \"success\":\n success_del += 1\n # Deletion accuracy calculation.\n eco_stats['successfully_deleted'] = success_del\n eco_stats['to_be_deleted'] = total_del\n eco_stats['deletion_accuracy'] = str(round((\n (success_del * 100) / total_del), 2)) + \"%\"\n\n else:\n # When there is no data available for an eco, this default data is populated.\n eco_stats['successfully_deleted'] = 0\n eco_stats['deletion_accuracy'] = \"NA\"\n\n # Calculate the stats for vulnerabilities ingested.\n if len(eco_details['ingest']) > 0:\n success_ing = 0\n total_ing = 0\n pkgs = []\n ver_count = 0\n hash_count = 0\n for ing_vuln in eco_details['ingest']:\n total_ing += 1\n pkgs.append(eco_details['ingest'][ing_vuln]['name'])\n ver_count += eco_details['ingest'][ing_vuln]['affected_version_count']\n \"\"\"\n if eco == \"golang\":\n hash_count += eco_details['ingest'][ing_vuln]['affected_commit_hash_count']\n \"\"\"\n if eco_details['ingest'][ing_vuln]['status'] == \"success\":\n success_ing += 1\n if eco_details['ingest'][ing_vuln]['premium']:\n eco_stats['premium_count'] += 1\n # Ingestion accuracy calculation.\n eco_stats['successfully_ingested'] = success_ing\n eco_stats['to_be_ingested'] = total_ing\n eco_stats['ingestion_accuracy'] = str(round((\n (success_ing * 100) / total_ing), 2)) + \"%\"\n # Total affected pkgs and versions count.\n eco_stats['packages_affected'] = len(list(set(pkgs)))\n eco_stats['versions_affected'] = ver_count\n # The details of commit hash count is needed only in case of golang.\n if eco == \"golang\":\n eco_stats['commit_hash_affected'] = hash_count\n else:\n # When there is no data available for an eco, this default data is populated.\n eco_stats['successfully_ingested'] = 0\n eco_stats['ingestion_accuracy'] = \"NA\"",
"def generate_report():\n\n # Fetch the top 3 most viewed articles and number of views and print them\n articles_query = get_articles_query()\n popular_articles = execute_query(articles_query)\n print_top_articles(popular_articles)\n\n # Fetch the most popular authors and print them\n authors_query = get_authors_query()\n popular_authors = execute_query(authors_query)\n print_authors(popular_authors)\n\n # Print the days when there were more than 1% errors in HTTP requests\n errors_query = get_errorData_query()\n error_data = execute_query(errors_query)\n print_error_data(error_data)",
"def _gen_report(self):\n print \"------------------------------------------\"\n print \"fio report\"\n print \"------------------------------------------\"\n print \"name\", \" \".join(f for f in FIELDS)\n # print fields\n for name in sorted(self.reports):\n report = self.reports[name]\n #print report\n print name, \" \".join(str(report.get(f)) for f in FIELDS)\n\n print \"*******************************************\"\n # print clats\n index = 0\n for name in sorted(self.reports):\n report = self.reports[name]\n if index == 0:\n print \"clat_percent\", \" \".join(\n str(c[0]) for c in report[\"clats\"])\n print name, \" \".join(str(c[1]) for c in report[\"clats\"])\n index += 1",
"def output_summary_stats(self):\n total_return = self.equity_curve['equity_curve'][-1]\n returns = self.equity_curve['returns']\n pnl = self.equity_curve['equity_curve']\n\n sharpe_ratio = create_sharpe_ratio(returns)\n drawdown, max_dd, dd_duration = create_drawdowns(pnl)\n self.equity_curve['drawdown'] = drawdown\n if len(dd_duration) == 1:\n dd_duration = dd_duration[0]\n\n stats = [(\"Total Return\", \"%0.2f%%\" % ((total_return - 1.0) * 100.0)),\n (\"Sharpe Ratio\", \"%0.2f\" % sharpe_ratio),\n (\"Max Drawdown\", \"%0.2f%%\" % (max_dd * 100.0)),\n (\"Drawdown Duration\", \"%s\" % dd_duration)]\n\n self.equity_curve.to_csv('equity.csv')\n self.positions.to_csv('positions.csv')\n self.prices.to_csv('prices.csv')\n\n return stats",
"def _generate_report(self):\n raise NotImplementedError",
"def summary_print(self):\r\n self.ensure_one()\r\n self.sent = True\r\n #return self.env['ir.actions.report'].report_action(self, 'proandsys_purchase_14.summary_landed_report')\r\n return self.env.ref('proandsys_purchase_14.summary_landedcost').report_action(self)",
"def inscription_summary(request, pk):\n candidat = get_object_or_404(Candidate, pk=pk)\n buff = io.BytesIO()\n pdf = InscriptionSummaryPDF(buff)\n pdf.produce(candidat)\n filename = slugify('{0}_{1}'.format(candidat.last_name, candidat.first_name)) + '.pdf'\n buff.seek(0)\n return FileResponse(buff, as_attachment=True, filename=filename)",
"def summarize(self):\n self.smalltalk += \"\\n Data IDs in this bundle: \\n\"\n self._files = {}\n inv_dict = {}\n # sort IDs to make sure pdfs are printed in same oder as they were\n # taken\n for k, v in self.stage_summaries.items():\n for qc_id in flatten_list(v):\n inv_dict[qc_id] = k\n sorted_ids = list(flatten_list(self.stage_summaries.values()))\n sorted_ids.sort(key=int)\n # for stage, value in self.stage_summaries.items():\n for qc_run_id in sorted_ids:\n # stage = inv_dict[qc_run_id]\n # if stage[0:7] == 'failed_':\n # stage = stage[7:]\n # try:\n # s = self.comments[qc_run_id]\n # except KeyError:\n # s = ''\n # self.comments[qc_run_id] = 'Classified as poor result.\\n' + s\n ds = Dataset(qc_run_id, self.db_name)\n device_name = ds.device_name\n f_folder = os.path.join(self.db_folder, \"tuning_results\", device_name)\n # for qc_run_id in flatten_list(value):\n self.smalltalk += str(qc_run_id) + \", \"\n\n # filename = stage + '_fit_ds'\n # filename += str(qc_run_id) + '.png'\n filename = os.path.join(f_folder, str(ds.ds.guid) + \".png\")\n\n self._files[str(qc_run_id)] = filename",
"def _export(self, report_type):\n model = self.env['report_trial_balance_contabilidad_cfdi']\n report = model.create(self._prepare_report_trial_balance())\n report.compute_data_for_report()\n return report.print_report(report_type)",
"def _generate_report(self):\n total_duration = 0.0\n total_nb_tests = 0\n total_nb_success = 0\n nb_modules = 0\n payload = []\n\n res_table = prettytable.PrettyTable(\n padding_width=2,\n field_names=['Module', 'Duration', 'nb. Test Run', 'Success'])\n res_table.align['Module'] = \"l\"\n res_table.align['Duration'] = \"r\"\n res_table.align['Success'] = \"r\"\n\n # for each scenario we draw a row for the table\n for item in self.summary:\n if item['task_status'] is True:\n nb_modules += 1\n total_duration += item['overall_duration']\n total_nb_tests += item['nb_tests']\n total_nb_success += item['nb_success']\n try:\n success_avg = 100 * item['nb_success'] / item['nb_tests']\n except ZeroDivisionError:\n success_avg = 0\n success_str = f\"{success_avg:0.2f}%\"\n duration_str = time.strftime(\"%H:%M:%S\",\n time.gmtime(item['overall_duration']))\n res_table.add_row([item['test_name'], duration_str,\n item['nb_tests'], success_str])\n payload.append({'module': item['test_name'],\n 'details': {'duration': item['overall_duration'],\n 'nb tests': item['nb_tests'],\n 'success rate': success_str,\n 'success': item['success'],\n 'failures': item['failures']}})\n\n total_duration_str = time.strftime(\"%H:%M:%S\",\n time.gmtime(total_duration))\n try:\n self.result = 100 * total_nb_success / total_nb_tests\n except ZeroDivisionError:\n self.result = 100\n success_rate = f\"{self.result:0.2f}\"\n success_rate_str = str(success_rate) + '%'\n res_table.add_row([\"\", \"\", \"\", \"\"])\n res_table.add_row([\"TOTAL:\", total_duration_str, total_nb_tests,\n success_rate_str])\n\n LOGGER.info(\"Rally Summary Report:\\n\\n%s\\n\", res_table.get_string())\n LOGGER.info(\"Rally '%s' success_rate is %s%% in %s/%s modules\",\n self.case_name, success_rate, nb_modules,\n len(self.summary))\n self.details['summary'] = {'duration': total_duration,\n 'nb tests': total_nb_tests,\n 'nb success': success_rate}\n self.details[\"modules\"] = payload",
"def main():\n s = content.DataFiles()\n \n date_list = generate.get_list_dates(2016, 2016, 500)\n prod_list = list(s.get_collist_by_name(os.path.join(content.data_fldr,'food','garden_produce.csv'), 'name')[0])\n \n tbl_cust = generate.TableGenerator(8, ['STRING','PEOPLE', 'PEOPLE', 'PLACE'], ['Customer ID', 'First Name', 'Surname', 'Country'])\n tbl_cust.save_table('customers.csv')\n cust_list = list(s.get_collist_by_name('customers.csv', 'Customer ID')[0])\n \n tbl_sales = generate.TableGenerator(25, [date_list, cust_list, prod_list, 'CURRENCY'], ['Date of sale', 'Customer ID', 'Product', 'Amount'])\n tbl_sales.save_table('sales.csv')",
"def legacy_reporter(self):\n logging.info('Creating database-friendly summary report')\n header = '{}\\n'.format(','.join(self.legacy_headers))\n # Create a string to store all the results\n data = str()\n for sample in self.metadata:\n # Add the value of the appropriate attribute to the results string\n data += GenObject.returnattr(sample, 'name')\n # SampleName\n data += GenObject.returnattr(sample.run, 'SamplePlate')\n # Genus\n data += GenObject.returnattr(sample.general, 'closestrefseqgenus')\n # SequencingDate\n data += GenObject.returnattr(sample.run, 'Date')\n # Analyst\n data += GenObject.returnattr(sample.run, 'InvestigatorName')\n # Legacy ConFindr clean/contaminated call\n data += 'ND,'\n # N50\n n50 = GenObject.returnattr(sample.quast, 'N50',\n number=True)\n if n50 != '-,':\n data += n50\n else:\n data += '0,'\n # NumContigs\n data += GenObject.returnattr(sample.quast, 'num_contigs',\n number=True)\n # TotalLength\n data += GenObject.returnattr(sample.quast, 'Total_length',\n number=True)\n # MeanInsertSize\n data += GenObject.returnattr(sample.quast, 'mean_insert',\n number=True)\n # InsertSizeSTD\n data += GenObject.returnattr(sample.quast, 'std_insert',\n number=True)\n # AverageCoverageDepth\n data += GenObject.returnattr(sample.qualimap, 'MeanCoveragedata',\n number=True)\n # CoverageDepthSTD\n data += GenObject.returnattr(sample.qualimap, 'StdCoveragedata',\n number=True)\n # PercentGC\n data += GenObject.returnattr(sample.quast, 'GC',\n number=True)\n # MASH_ReferenceGenome\n data += GenObject.returnattr(sample.mash, 'closestrefseq')\n # MASH_NumMatchingHashes\n data += GenObject.returnattr(sample.mash, 'nummatches')\n # 16S_result\n data += GenObject.returnattr(sample.sixteens_full, 'sixteens_match')\n # 16S PercentID\n data += GenObject.returnattr(sample.sixteens_full, 'percent_id')\n # rMLST_Result\n try:\n # If the number of matches to the closest reference profile is 53, return the profile number\n if sample.rmlst.matches == 53:\n if type(sample.rmlst.sequencetype) is list:\n rmlst_seq_type = ';'.join(sorted(sample.rmlst.sequencetype)).rstrip(';') + ','\n else:\n rmlst_seq_type = GenObject.returnattr(sample.rmlst, 'sequencetype')\n rmlst_seq_type = rmlst_seq_type if rmlst_seq_type != 'ND,' else 'new,'\n data += rmlst_seq_type\n else:\n # Otherwise the profile is set to new\n data += 'new,'\n except AttributeError:\n data += 'new,'\n # MLST_Result\n try:\n if sample.mlst.matches == 7:\n if type(sample.mlst.sequencetype) is list:\n mlst_seq_type = ';'.join(sorted(sample.mlst.sequencetype)).rstrip(';') + ','\n else:\n mlst_seq_type = GenObject.returnattr(sample.mlst, 'sequencetype')\n mlst_seq_type = mlst_seq_type if mlst_seq_type != 'ND,' else 'new,'\n data += mlst_seq_type\n else:\n data += 'new,'\n except AttributeError:\n data += 'new,'\n # MLST_gene_X_alleles\n try:\n # Create a set of all the genes present in the results (gene name split from allele)\n gene_set = {gene.split('_')[0] for gene in sample.mlst.combined_metadata_results}\n for gene in sorted(gene_set):\n allele_list = list()\n # Determine all the alleles that are present for each gene\n for allele in sample.mlst.combined_metadata_results:\n if gene in allele:\n allele_list.append(allele.replace(' ', '_'))\n # If there is more than one allele in the sample, add both to the string separated by a ';'\n if len(allele_list) > 1:\n data += '{},'.format(';'.join(allele_list))\n # Otherwise add the only allele\n else:\n data += allele_list[0] + ','\n # If there are fewer than seven matching alleles, add a ND for each missing result\n if len(gene_set) < 7:\n data += (7 - len(gene_set)) * 'ND,'\n except AttributeError:\n # data += '-,-,-,-,-,-,-,'\n data += 'ND,ND,ND,ND,ND,ND,ND,'\n # CoreGenesPresent\n data += GenObject.returnattr(sample.gdcs, 'coreresults')\n # E_coli_Serotype\n try:\n # If no O-type was found, set the output to be O-untypeable\n if ';'.join(sample.ectyper.o_type) == '-':\n otype = 'O-untypeable'\n else:\n otype = sample.ectyper.o_type\n # Same as above for the H-type\n if ';'.join(sample.ectyper.h_type) == '-':\n htype = 'H-untypeable'\n\n else:\n htype = sample.ectyper.h_type\n serotype = '{otype}:{htype},'.format(otype=otype,\n htype=htype)\n # Add the serotype to the data string unless neither O-type not H-type were found; add ND instead\n data += serotype if serotype != 'O-untypeable:H-untypeable,' else 'ND,'\n except AttributeError:\n data += 'ND,'\n # SISTR_serovar_antigen\n data += GenObject.returnattr(sample.sistr, 'serovar_antigen').rstrip(';')\n # SISTR_serovar_cgMLST\n data += GenObject.returnattr(sample.sistr, 'serovar_cgmlst')\n # SISTR_serogroup\n data += GenObject.returnattr(sample.sistr, 'serogroup')\n # SISTR_h1\n data += GenObject.returnattr(sample.sistr, 'h1').rstrip(';')\n # SISTR_h2\n data += GenObject.returnattr(sample.sistr, 'h2').rstrip(';')\n # SISTR_serovar\n data += GenObject.returnattr(sample.sistr, 'serovar')\n # GeneSeekr_Profile\n try:\n if sample.genesippr.report_output:\n data += ';'.join(sample.genesippr.report_output) + ','\n else:\n data += 'ND,'\n except AttributeError:\n data += 'ND,'\n # Vtyper_Profile\n data += GenObject.returnattr(sample.verotoxin, 'verotoxin_subtypes_set')\n # AMR_Profile and resistant/sensitive status\n if sample.resfinder_assembled.pipelineresults:\n # Profile\n for resistance, resistance_set in sorted(sample.resfinder_assembled.pipelineresults.items()):\n data += '{res}({r_set});'.format(res=resistance.replace(',', ';'),\n r_set=';'.join(sorted(list(resistance_set))))\n data += ','\n # Resistant/Sensitive\n data += 'Resistant,'\n else:\n # Profile\n data += 'ND,'\n # Resistant/Sensitive\n data += 'Sensitive,'\n # Plasmid Result'\n if sample.mobrecon.pipelineresults:\n for plasmid, details in sorted(sample.mobrecon.pipelineresults.items()):\n data += '{plasmid}({details});'.format(plasmid=plasmid,\n details=details)\n data += ','\n else:\n data += 'ND,'\n # TotalPredictedGenes\n data += GenObject.returnattr(sample.prodigal, 'predictedgenestotal',\n number=True)\n # PredictedGenesOver3000bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesover3000bp',\n number=True)\n # PredictedGenesOver1000bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesover1000bp',\n number=True)\n # PredictedGenesOver500bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesover500bp',\n number=True)\n # PredictedGenesUnder500bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesunder500bp',\n number=True)\n # NumClustersPF\n data += GenObject.returnattr(sample.run, 'NumberofClustersPF')\n # Percentage of reads mapping to PhiX control\n data += GenObject.returnattr(sample.run, 'phix_aligned')\n # Error rate calculated from PhiX control\n data += GenObject.returnattr(sample.run, 'error_rate')\n # LengthForwardRead\n data += GenObject.returnattr(sample.run, 'forwardlength',\n number=True)\n # LengthReverseRead\n data += GenObject.returnattr(sample.run, 'reverselength',\n number=True)\n # Real time strain\n data += GenObject.returnattr(sample.run, 'Description')\n # Flowcell\n data += GenObject.returnattr(sample.run, 'flowcell')\n # MachineName\n data += GenObject.returnattr(sample.run, 'instrument')\n # PipelineVersion\n data += self.commit + ','\n # AssemblyDate\n data += datetime.now().strftime('%Y-%m-%d') + ','\n # SamplePurity\n data += GenObject.returnattr(sample.confindr, 'num_contaminated_snvs')\n # cgMLST\n try:\n if type(sample.cgmlst.sequencetype) is list:\n if sample.cgmlst.sequencetype:\n cgmlst_seq_type = ';'.join(sorted(sample.cgmlst.sequencetype)).rstrip(';') + ','\n else:\n cgmlst_seq_type = 'ND,'\n else:\n cgmlst_seq_type = GenObject.returnattr(sample.cgmlst, 'sequencetype')\n # cgmlst_seq_type = cgmlst_seq_type if cgmlst_seq_type != 'ND,' else 'new,'\n data += cgmlst_seq_type\n except AttributeError:\n data += 'ND,'\n # Name of the database used in the analyses\n data += os.path.split(self.reffilepath)[-1] + ','\n # Database download date\n data += self.download_date\n # Append a new line to the end of the results for this sample\n data += '\\n'\n # Replace any NA values with ND\n cleandata = data.replace('NA', 'ND')\n with open(os.path.join(self.reportpath, 'legacy_combinedMetadata.csv'), 'w') as metadatareport:\n metadatareport.write(header)\n metadatareport.write(cleandata)",
"def makeReport(illqc_summary_dir, decontam_summary_dir, output_fp, illqc_prefix, decontam_prefix):\n illqc_header = ['input', 'both kept', 'rev only', 'dropped', 'fwd only']\n decontam_header = ['true', 'false']\n\n with open(output_fp, 'wb') as f_out:\n writer = csv.writer(f_out, delimiter='\\t')\n writer.writerow(['Sample'] + [s.replace(\" \", \"_\") for s in illqc_header] + ['human', 'non_human'])\n \n for file in glob.glob(os.path.join(illqc_summary_dir, illqc_prefix + '*')):\n sample = file.rsplit(illqc_prefix)[1]\n \n ill = getValues(build_summary_fn(illqc_summary_dir, illqc_prefix, sample), illqc_header)\n de = getValues(build_summary_fn(decontam_summary_dir, decontam_prefix, sample), decontam_header)\n \n ill.extend(de)\n ill.insert(0, sample)\n writer.writerow(ill)",
"def describe(self):\n self.separator()\n print('File Name: ' + self.file_name)\n print('File create date: {}'.format(self.file_header['Creation Date']))\n print('Batch Count: ' + str(self.file_control_record.get('Batch Count')))\n print('Total Debit Amount: ' +\n str(self.file_control_record.get('Total Debit Amount')))\n print(\"Total Credit Amount: \" +\n str(self.file_control_record.get(\"Total Credit Amount\")))\n self.separator()",
"def metadata_reporter(self):\n logging.info('Creating summary report')\n header = '{}\\n'.format(','.join(self.headers))\n # Create a string to store all the results\n data = str()\n for sample in self.metadata:\n # Add the value of the appropriate attribute to the results string\n data += GenObject.returnattr(sample, 'name')\n # SampleName\n data += GenObject.returnattr(sample.run, 'SamplePlate')\n # Genus\n data += GenObject.returnattr(sample.general, 'closestrefseqgenus')\n # SamplePurity\n data += GenObject.returnattr(sample.confindr, 'num_contaminated_snvs')\n # N50\n n50 = GenObject.returnattr(sample.quast, 'N50',\n number=True)\n if n50 != '-,':\n data += n50\n else:\n data += '0,'\n # NumContigs\n data += GenObject.returnattr(sample.quast, 'num_contigs',\n number=True)\n # TotalLength\n data += GenObject.returnattr(sample.quast, 'Total_length',\n number=True)\n # MeanInsertSize\n data += GenObject.returnattr(sample.quast, 'mean_insert',\n number=True)\n # InsertSizeSTD\n data += GenObject.returnattr(sample.quast, 'std_insert',\n number=True)\n # AverageCoverageDepth\n data += GenObject.returnattr(sample.qualimap, 'MeanCoveragedata',\n number=True)\n # CoverageDepthSTD\n data += GenObject.returnattr(sample.qualimap, 'StdCoveragedata',\n number=True)\n # PercentGC\n data += GenObject.returnattr(sample.quast, 'GC',\n number=True)\n # MASH_ReferenceGenome\n data += GenObject.returnattr(sample.mash, 'closestrefseq')\n # MASH_NumMatchingHashes\n data += GenObject.returnattr(sample.mash, 'nummatches')\n # 16S_result\n data += GenObject.returnattr(sample.sixteens_full, 'sixteens_match')\n # 16S PercentID\n data += GenObject.returnattr(sample.sixteens_full, 'percent_id')\n # CoreGenesPresent\n data += GenObject.returnattr(sample.gdcs, 'coreresults')\n # rMLST_Result\n try:\n # If the number of matches to the closest reference profile is 53, return the profile number\n if sample.rmlst.matches == 53:\n if type(sample.rmlst.sequencetype) is list:\n rmlst_seq_type = ';'.join(sorted(sample.rmlst.sequencetype)).rstrip(';') + ','\n else:\n rmlst_seq_type = GenObject.returnattr(sample.rmlst, 'sequencetype')\n rmlst_seq_type = rmlst_seq_type if rmlst_seq_type != 'ND,' else 'new,'\n data += rmlst_seq_type\n else:\n # Otherwise the profile is set to new\n data += 'new,'\n except AttributeError:\n data += 'new,'\n # MLST_Result\n try:\n if sample.mlst.matches == 7:\n if type(sample.mlst.sequencetype) is list:\n mlst_seq_type = ';'.join(sorted(sample.mlst.sequencetype)).rstrip(';') + ','\n else:\n mlst_seq_type = GenObject.returnattr(sample.mlst, 'sequencetype')\n mlst_seq_type = mlst_seq_type if mlst_seq_type != 'ND,' else 'new,'\n data += mlst_seq_type\n else:\n data += 'new,'\n except AttributeError:\n data += 'new,'\n # MLST_gene_X_alleles\n try:\n # Create a set of all the genes present in the results (gene name split from allele)\n gene_set = {gene.split('_')[0] for gene in sample.mlst.combined_metadata_results}\n for gene in sorted(gene_set):\n allele_list = list()\n # Determine all the alleles that are present for each gene\n for allele in sample.mlst.combined_metadata_results:\n if gene in allele:\n allele_list.append(allele.replace(' ', '_'))\n # If there is more than one allele in the sample, add both to the string separated by a ';'\n if len(allele_list) > 1:\n data += '{},'.format(';'.join(allele_list))\n # Otherwise add the only allele\n else:\n data += allele_list[0] + ','\n # If there are fewer than seven matching alleles, add a ND for each missing result\n if len(gene_set) < 7:\n data += (7 - len(gene_set)) * 'ND,'\n except AttributeError:\n # data += '-,-,-,-,-,-,-,'\n data += 'ND,ND,ND,ND,ND,ND,ND,'\n # E_coli_Serotype\n try:\n # If no O-type was found, set the output to be O-untypeable\n if ';'.join(sample.ectyper.o_type) == '-':\n otype = 'O-untypeable'\n else:\n otype = sample.ectyper.o_type\n # Same as above for the H-type\n if ';'.join(sample.ectyper.h_type) == '-':\n htype = 'H-untypeable'\n\n else:\n htype = sample.ectyper.h_type\n serotype = '{otype}:{htype},'.format(otype=otype,\n htype=htype)\n # Add the serotype to the data string unless neither O-type not H-type were found; add ND instead\n data += serotype if serotype != 'O-untypeable:H-untypeable,' else 'ND,'\n except AttributeError:\n data += 'ND,'\n # SISTR_serovar_antigen\n data += GenObject.returnattr(sample.sistr, 'serovar_antigen').rstrip(';')\n # SISTR_serovar_cgMLST\n data += GenObject.returnattr(sample.sistr, 'serovar_cgmlst')\n # SISTR_serogroup\n data += GenObject.returnattr(sample.sistr, 'serogroup')\n # SISTR_h1\n data += GenObject.returnattr(sample.sistr, 'h1').rstrip(';')\n # SISTR_h2\n data += GenObject.returnattr(sample.sistr, 'h2').rstrip(';')\n # SISTR_serovar\n data += GenObject.returnattr(sample.sistr, 'serovar')\n # GeneSeekr_Profile\n try:\n if sample.genesippr.report_output:\n data += ';'.join(sample.genesippr.report_output) + ','\n else:\n data += 'ND,'\n except AttributeError:\n data += 'ND,'\n # Vtyper_Profile\n data += GenObject.returnattr(sample.verotoxin, 'verotoxin_subtypes_set')\n # AMR_Profile and resistant/sensitive status\n if sample.resfinder_assembled.pipelineresults:\n # Profile\n for resistance, resistance_set in sorted(sample.resfinder_assembled.pipelineresults.items()):\n data += '{res}({r_set});'.format(res=resistance.replace(',', ';'),\n r_set=';'.join(sorted(list(resistance_set))))\n data += ','\n # Resistant/Sensitive\n data += 'Resistant,'\n else:\n # Profile\n data += 'ND,'\n # Resistant/Sensitive\n data += 'Sensitive,'\n # Plasmid Result'\n if sample.mobrecon.pipelineresults:\n for plasmid, details in sorted(sample.mobrecon.pipelineresults.items()):\n data += '{plasmid}({details});'.format(plasmid=plasmid,\n details=details)\n data += ','\n else:\n data += 'ND,'\n # TotalPredictedGenes\n data += GenObject.returnattr(sample.prodigal, 'predictedgenestotal',\n number=True)\n # PredictedGenesOver3000bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesover3000bp',\n number=True)\n # PredictedGenesOver1000bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesover1000bp',\n number=True)\n # PredictedGenesOver500bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesover500bp',\n number=True)\n # PredictedGenesUnder500bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesunder500bp',\n number=True)\n # AssemblyDate\n data += datetime.now().strftime('%Y-%m-%d') + ','\n # PipelineVersion\n data += self.commit + ','\n # Name of the database used in the analyses\n data += os.path.split(self.reffilepath)[-1] + ','\n # Database download date\n data += self.download_date\n # Append a new line to the end of the results for this sample\n data += '\\n'\n # Replace any NA values with ND\n cleandata = data.replace('NA', 'ND')\n with open(os.path.join(self.reportpath, 'combinedMetadata.csv'), 'w') as metadatareport:\n metadatareport.write(header)\n metadatareport.write(cleandata)",
"def generate_report(self) -> Report:\n # equity_curve = self._generate_equity_curve()\n # summary_stats = self._generate_summary_stats(equity_curve)\n # return Report(equity_curve, summary_stats)\n pass",
"def post(self):\n filekey = self.request.get(\"filekey\")\n blob_key = self.request.get(\"blobkey\")\n\n if self.request.get(\"daily_speed_sum\"):\n logging.info(\"Starting daily speed sum...\")\n pipeline = DailySpeedSumPipeline(filekey, blob_key)\n pipeline.start()\n self.redirect(pipeline.base_path + \"/status?root=\" + pipeline.pipeline_id)\n else:\n\t logging.info(\"Unrecognized operation.\")",
"def create_vuln_report():",
"def combsummary(self, date=yesterdayobj()):\n date = convert_date(date)\n columns = [\n \"基金名称\",\n \"基金代码\",\n \"当日净值\",\n \"单位成本\",\n \"持有份额\",\n \"基金现值\",\n \"基金总申购\",\n \"历史最大占用\",\n \"基金持有成本\",\n \"基金分红与赎回\",\n \"换手率\",\n \"基金收益总额\",\n \"投资收益率\",\n ]\n summarydf = pd.DataFrame([], columns=columns)\n for fund in self.fundtradeobj:\n summarydf = summarydf.append(\n fund.dailyreport(date), ignore_index=True, sort=True\n )\n tname = \"总计\"\n tcode = \"total\"\n tunitvalue = float(\"NaN\")\n tunitcost = float(\"NaN\")\n tholdshare = float(\"NaN\")\n tcurrentvalue = summarydf[\"基金现值\"].sum()\n tpurchase = summarydf[\"基金总申购\"].sum()\n tbtnk = bottleneck(self.totcftable[self.totcftable[\"date\"] <= date])\n tcost = summarydf[\"基金持有成本\"].sum()\n toutput = summarydf[\"基金分红与赎回\"].sum()\n tturnover = turnoverrate(self.totcftable[self.totcftable[\"date\"] <= date], date)\n # 计算的是总系统作为整体和外界的换手率,而非系统各成分之间的换手率\n tearn = summarydf[\"基金收益总额\"].sum()\n trate = round(tearn / tbtnk * 100, 4)\n trow = pd.DataFrame(\n [\n [\n tname,\n tcode,\n tunitvalue,\n tunitcost,\n tholdshare,\n tcurrentvalue,\n tpurchase,\n tbtnk,\n tcost,\n toutput,\n tturnover,\n tearn,\n trate,\n ]\n ],\n columns=columns,\n )\n summarydf = summarydf.append(trow, ignore_index=True, sort=True)\n\n return summarydf[columns].sort_values(by=\"基金现值\", ascending=False)",
"def report(self):\n #i need to figure out how to pass all these in a list or something, woof.\n self.report_generator_module.run(\\\n self.total,\\\n self.unique,\\\n self.top_10,\\\n self.top_10_base,\\\n self.lengths,\\\n self.counts,\\\n self.one_to_six,\\\n self.trailing_number,\\\n self.last_1digit,\\\n self.last_2digit,\\\n self.last_3digit,\\\n self.last_4digit,\\\n self.last_5digit,\\\n self.charset)",
"def upload_statistics(self):\n logger.info('Importing statistics...')\n call_command('import_qc', self.accession, self.rootpath, '--pipeline', self.version)\n logger.info('Stats successfully imported.')",
"def generate_reporting_objects(test_name, start_time, end_time, campaign, label_dict, label_dict_full, sample_interval, test_interval, test_type, metric_types, one_step_var, country):\n # e.g. labels = {'Static banner':'20101227_JA061_US','Fading banner':'20101228_JAFader_US'}\n use_labels_var = True\n \n \"\"\" Build reporting objects \"\"\"\n ir_cmpgn = DR.IntervalReporting(use_labels=False,font_size=20,plot_type='line',query_type='campaign',file_path=projSet.__web_home__ + 'campaigns/static/images/')\n \n \"\"\" \n DETERMINE DONOR DOLLAR BREAKDOWN \n ================================\n \"\"\"\n try:\n logging.info('')\n logging.info('Determining Donations Distribution:')\n logging.info('===================================\\n')\n \n DR.DonorBracketReporting(query_type=FDH._QTYPE_LP_, file_path=projSet.__web_home__ + 'tests/static/images/').run(start_time, end_time, campaign)\n except:\n pass\n \n \"\"\" \n DETERMINE CATEGORY DISTRIBUTION \n ===============================\n \"\"\"\n if(0):\n DR.CategoryReporting(file_path=projSet.__web_home__ + 'tests/static/images/').run(start_time, end_time, campaign)\n \n \n \"\"\" \n DETERMINE LANGUAGE BREAKDOWN \n ============================\n \"\"\"\n html_language = ''\n if(1):\n logging.info('')\n logging.info('Determining Languages Distribution:')\n logging.info('===================================\\n')\n \n columns, data = DL.CiviCRMLoader().get_donor_by_language(campaign, start_time, end_time)\n html_language = DR.DataReporting()._write_html_table(data, columns)\n \n \"\"\" \n DETERMINE PAYMENT METHODS \n =========================\n \"\"\"\n logging.info('')\n logging.info('Determining Payment Methods:')\n logging.info('============================\\n')\n \n ccl = DL.CiviCRMLoader()\n \n pm_data_counts, pm_data_conversions = ccl.get_payment_methods(campaign, start_time, end_time, country=country)\n\n html_table_pm_counts = DR.IntervalReporting().write_html_table_from_rowlists(pm_data_counts, ['Payment Method', 'Portion of Donations (%)'], 'Landing Page')\n html_table_pm_conversions = DR.IntervalReporting().write_html_table_from_rowlists(pm_data_conversions, ['Payment Method', 'Visits', 'Conversions', 'Conversion Rate (%)', 'Amount', 'Amount 25'], 'Landing Page')\n \n \n \"\"\" \n BUILD REPORTING OBJECTS \n =======================\n \"\"\"\n \n if test_type == FDH._TESTTYPE_BANNER_:\n ir = DR.IntervalReporting(use_labels=use_labels_var,font_size=20,plot_type='step',query_type=FDH._QTYPE_BANNER_,file_path=projSet.__web_home__ + 'tests/static/images/')\n link_item = '<a href=\"http://meta.wikimedia.org/w/index.php?title=Special:NoticeTemplate/view&template=%s\">%s</a>'\n measured_metric = ['don_per_imp', 'amt_norm_per_imp', 'click_rate'] \n \n elif test_type == FDH._TESTTYPE_LP_:\n ir = DR.IntervalReporting(use_labels=use_labels_var,font_size=20,plot_type='step',query_type=FDH._QTYPE_LP_, file_path=projSet.__web_home__ + 'tests/static/images/')\n link_item = '<a href=\"http://meta.wikimedia.org/w/index.php?title=Special:NoticeTemplate/view&template=%s\">%s</a>'\n measured_metric = ['don_per_view', 'amt_norm_per_view']\n \n elif test_type == FDH._TESTTYPE_BANNER_LP_:\n ir = DR.IntervalReporting(use_labels=use_labels_var,font_size=20,plot_type='step',query_type=FDH._QTYPE_BANNER_LP_,file_path=projSet.__web_home__ + 'tests/static/images/')\n link_item = '<a href=\"http://meta.wikimedia.org/w/index.php?title=Special:NoticeTemplate/view&template=%s\">%s</a>'\n measured_metric = ['don_per_imp', 'amt_norm_per_imp','don_per_view', 'amt_norm_per_view', 'click_rate']\n \n \n \"\"\" \n GENERATE PLOTS FOR EACH METRIC OF INTEREST \n ==========================================\n \"\"\"\n logging.info('')\n logging.info('Determining Metric Minutely Counts:')\n logging.info('==================================\\n')\n \n for metric in metric_types:\n ir.run(start_time, end_time, sample_interval, metric, campaign, label_dict, one_step=one_step_var, country=country)\n \n \n \"\"\" \n CHECK THE CAMPAIGN VIEWS AND DONATIONS \n ======================================\n \"\"\"\n ir_cmpgn.run(start_time, end_time, sample_interval, 'views', campaign, {}, one_step=one_step_var, country=country)\n ir_cmpgn.run(start_time, end_time, sample_interval, 'donations', campaign, {}, one_step=one_step_var, country=country)\n \n \n \"\"\" \n PERFORM HYPOTHESIS TESTING \n ==========================\n \"\"\"\n \n logging.info('')\n logging.info('Executing Confidence Queries:')\n logging.info('============================\\n')\n \n column_colours = dict()\n confidence = list()\n \n cr = DR.ConfidenceReporting(use_labels=use_labels_var,font_size=20,plot_type='line',hyp_test='t_test', query_type=test_type, file_path=projSet.__web_home__ + 'tests/static/images/')\n \n for metric in measured_metric:\n \n ret = cr.run(test_name, campaign, metric, label_dict, start_time, end_time, sample_interval, one_step=one_step_var, country=country)\n \n confidence.append(ret[0])\n column_colours[metric] = ret[1]\n \n \n \"\"\" \n GENERATE A REPORT SUMMARY TABLE\n ===============================\n \"\"\"\n \n logging.info('')\n logging.info('Generating Summary Report:')\n logging.info('=========================\\n')\n \n \"\"\"\n \n if one_step_var == True:\n summary_start_time = DL.CiviCRMLoader().get_earliest_donation(campaign)\n else:\n summary_start_time = DL.LandingPageTableLoader().get_earliest_campaign_view(campaign)\n \n summary_end_time = DL.CiviCRMLoader().get_latest_donation(campaign)\n \"\"\"\n \n srl = DL.SummaryReportingLoader(query_type=test_type)\n srl.run_query(start_time, end_time, campaign, one_step=one_step_var,country=country)\n \n columns = srl.get_column_names()\n summary_results = srl.get_results()\n \n \"\"\" \n REMOVED - links to pipeline artifacts, this was broken and should be implemented properly later\n \"\"\"\n \n \"\"\" Get Winners, Losers, and percent increase \"\"\"\n \n winner = list()\n loser = list()\n percent_increase = list()\n \n labels = list()\n for item_long_name in label_dict:\n labels.append(label_dict[item_long_name])\n\n for metric in measured_metric:\n ret = srl.compare_artifacts(label_dict.keys(), metric, labels=labels)\n \n winner.append(ret[0]) \n loser.append(ret[1])\n percent_increase.append(ret[2])\n \n \"\"\" Compose table for showing artifact \"\"\"\n html_table = DR.DataReporting()._write_html_table(summary_results, columns, coloured_columns=column_colours, use_standard_metric_names=True) \n \n metric_legend_table = DR.DataReporting().get_standard_metrics_legend()\n conf_legend_table = DR.ConfidenceReporting(query_type='bannerlp', hyp_test='TTest').get_confidence_legend_table()\n \n html_table = '<h4><u>Metrics Legend:</u></h4><div class=\"spacer\"></div>' + metric_legend_table + \\\n '<div class=\"spacer\"></div><h4><u>Confidence Legend for Hypothesis Testing:</u></h4><div class=\"spacer\"></div>' + conf_legend_table + '<div class=\"spacer\"></div><div class=\"spacer\"></div>' + html_table\n \n \"\"\" Generate totals for the test summary \"\"\"\n srl = DL.SummaryReportingLoader(query_type=FDH._QTYPE_TOTAL_)\n srl.run_query(start_time, end_time, campaign, one_step=one_step_var, country=country)\n html_table = html_table + '<br><br>' + DR.DataReporting()._write_html_table(srl.get_results(), srl.get_column_names(), use_standard_metric_names=True)\n \n\n return [measured_metric, winner, loser, percent_increase, confidence, html_table_pm_counts, html_table_pm_conversions, html_language, html_table]",
"def __report(self):\n dataframe = pd.read_csv(os.path.join(self.report_path, \"cyclomatic-complexity.csv\"),\n names=[\"NLOC\", \"CCN\", \"Token\", \"Param\", \"Length\", \"Location\",\n \"Path\", \"Function\", \"Args\", \"Row\", \"Col\"],\n sep=',')\n dataframe.drop(['Path', 'Function', 'Row', 'Col'], axis=1, inplace=True)\n dataframe.sort_values('CCN', ascending=False, inplace=True)\n dataframe[\"Location\"] = dataframe[\"Location\"].str.replace('\\\\', '/')\n self.report_html(os.path.join(self.report_path,\n \"cyclomatic-complexity-report.html\"), dataframe,\n \"Cyclomatic Complexity report\")"
]
| [
"0.58248866",
"0.5804602",
"0.57893074",
"0.57541466",
"0.5715792",
"0.5710718",
"0.5644889",
"0.562997",
"0.5606139",
"0.5578409",
"0.5574924",
"0.55447257",
"0.55363977",
"0.550161",
"0.547973",
"0.5438724",
"0.5418366",
"0.53975743",
"0.5387723",
"0.5375758",
"0.5373265",
"0.53425306",
"0.53370565",
"0.5296877",
"0.5268852",
"0.5245206",
"0.5216148",
"0.52031606",
"0.52025175",
"0.518205"
]
| 0.65132904 | 0 |
iterate over all available hbtasks | def available_hbtasks():
out = []
for tname in dir(tasks):
t = getattr(tasks,tname)
if inspect.isclass(t) and issubclass(t,tasks.HbTask) and t is not tasks.HbTask:
yield tname, t | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def list_tasks():",
"def get_all_tasks(self):\n return [\n self.create_virtual_environment,\n self.doc,\n self.install,\n self.lint,\n self.make_distribution,\n self.reset,\n self.setup,\n self.test,\n ]",
"def all_envs():\n return all_tasks.keys()",
"def tasks():",
"def available_shells(self):",
"def list_tasks(ctx):\n ctx.run(\"invoke --list\")",
"def discover_tasks(app):\n\n task_arguments.add_argument(\n \"preload-defaults-from-site\",\n type=str,\n required=False,\n default=\"\",\n choices=preload_defaults_from_site_choices,\n help=\"Select site within environment to load defaults from, argument format is <environment_name>/<site_name>\",\n )\n\n for tasks_base_dir in app.config[\"JINJAMATOR_TASKS_BASE_DIRECTORIES\"]:\n for file_ext in [\"py\", \"j2\"]:\n for tasklet_dir in glob.glob(\n os.path.join(tasks_base_dir, \"**\", f\"*.{file_ext}\"), recursive=True\n ):\n task_dir = os.path.dirname(tasklet_dir)\n append = True\n for dir_chunk in task_dir.replace(tasks_base_dir, \"\").split(\n os.path.sep\n ): # filter out hidden directories\n if dir_chunk.startswith(\".\") or dir_chunk in [\"__pycache__\"]:\n append = False\n break\n\n dir_name = task_dir.replace(tasks_base_dir, \"\")[1:]\n if append and dir_name not in available_tasks_by_path:\n\n task_id = xxhash.xxh64(task_dir).hexdigest()\n\n task_info = {\n \"id\": task_id,\n \"path\": dir_name,\n \"base_dir\": tasks_base_dir,\n \"description\": get_section_from_task_doc(task_dir)\n or \"no description\",\n }\n available_tasks_by_path[dir_name] = task_info\n try:\n task = JinjamatorTask()\n log.debug(app.config[\"JINJAMATOR_FULL_CONFIGURATION\"])\n task._configuration.merge_dict(\n app.config[\"JINJAMATOR_FULL_CONFIGURATION\"]\n )\n\n task.load(\n os.path.join(task_info[\"base_dir\"], task_info[\"path\"])\n )\n with app.app_context():\n data = json.loads(\n jsonify(\n task.get_jsonform_schema()[\"schema\"]\n ).data.decode(\"utf-8\")\n )\n task_models[task_info[\"path\"]] = api.schema_model(task_id, data)\n del task\n\n log.info(f\"registered model for task {task_dir}\")\n\n dynamic_role_name = f\"task_{dir_name}\"\n new_role = JinjamatorRole(name=dynamic_role_name)\n\n with app.app_context():\n db.session.add(new_role)\n try:\n db.session.commit()\n except Exception:\n pass\n\n @ns.route(f\"/{task_info['path']}\", endpoint=task_info[\"path\"])\n class APIJinjamatorTask(Resource):\n @api.doc(\n f\"get_task_{task_info['path'].replace(os.path.sep,'_')}_schema\"\n )\n @api.expect(task_arguments)\n @api.doc(\n params={\n \"Authorization\": {\n \"in\": \"header\",\n \"description\": \"A valid access token\",\n }\n }\n )\n @require_role(\n role=or_(\n User.roles.any(\n JinjamatorRole.name == dynamic_role_name\n ),\n User.roles.any(JinjamatorRole.name == \"tasks_all\"),\n )\n )\n def get(self):\n \"\"\"\n Returns the json-schema or the whole alpacajs configuration data for the task\n \"\"\"\n\n args = task_arguments.parse_args(request)\n schema_type = args.get(\"schema-type\", \"full\")\n try:\n preload_data = json.loads(\n args.get(\"preload-data\", \"{}\")\n )\n except TypeError:\n preload_data = {}\n preload_data = remove_redacted(preload_data)[1]\n environment_site = args.get(\n \"preload-defaults-from-site\"\n )\n relative_task_path = request.endpoint.replace(\n \"api.\", \"\"\n )\n inner_task = JinjamatorTask()\n\n inner_task._configuration.merge_dict(\n app.config[\"JINJAMATOR_FULL_CONFIGURATION\"]\n )\n inner_task.configuration.merge_dict(preload_data)\n\n inner_task.load(relative_task_path)\n\n if environment_site not in [None, \"None\", \"\"]:\n inner_task._configuration[\n \"jinjamator_site_path\"\n ] = site_path_by_name.get(environment_site)\n inner_task._configuration[\n \"jinjamator_site_name\"\n ] = environment_site\n env_name, site_name = environment_site.split(\"/\")\n roles = [\n role[\"name\"]\n for role in g._user.get(\"roles\", [])\n ]\n if (\n f\"environment_{env_name}|site_{site_name}\"\n in roles\n or f\"environments_all\" in roles\n or f\"administrator\" in roles\n ):\n inner_task.configuration.merge_yaml(\n \"{}/defaults.yaml\".format(\n site_path_by_name.get(environment_site)\n )\n )\n else:\n abort(\n 403,\n f\"User neither has no role environment_{env_name}|site_{site_name} nor environments_all nor administrator. Access denied.\",\n )\n\n full_schema = inner_task.get_jsonform_schema()\n\n if schema_type in [\"\", \"full\"]:\n response = jsonify(full_schema)\n elif schema_type in [\"schema\"]:\n response = jsonify(full_schema.get(\"schema\", {}))\n elif schema_type in [\"data\"]:\n response = jsonify(full_schema.get(\"data\", {}))\n elif schema_type in [\"options\"]:\n response = jsonify(full_schema.get(\"options\", {}))\n elif schema_type in [\"view\"]:\n response = jsonify(full_schema.get(\"view\", {}))\n del inner_task\n return response\n\n @api.doc(\n f\"create_task_instance_for_{task_info['path'].replace(os.path.sep,'_')}\"\n )\n @api.expect(task_models[task_info[\"path\"]], validate=False)\n @api.doc(\n params={\n \"Authorization\": {\n \"in\": \"header\",\n \"description\": \"A valid access token\",\n }\n }\n )\n @require_role(\n role=or_(\n User.roles.any(\n JinjamatorRole.name == dynamic_role_name\n ),\n User.roles.any(JinjamatorRole.name == \"tasks_all\"),\n )\n )\n def post(self):\n \"\"\"\n Creates an instance of the task and returns the job_id\n \"\"\"\n\n from jinjamator.task.celery import run_jinjamator_task\n from jinjamator.daemon.database import db\n\n relative_task_path = request.endpoint.replace(\n \"api.\", \"\"\n )\n data = request.get_json()\n job_id = str(uuid.uuid4())\n user_id = g._user[\"id\"]\n\n job = run_jinjamator_task.apply_async(\n [\n relative_task_path,\n data,\n data.get(\"output_plugin\", \"console\"),\n user_id,\n ],\n task_id=job_id,\n created_by_user_id=user_id,\n )\n\n db_job = list(\n db.session.query(DB_Job).filter(\n DB_Job.task_id == job.id\n )\n )\n db_job = db_job and db_job[0]\n if not db_job:\n db_job = DB_Job(job.id)\n db_job.status = \"SCHEDULED\"\n db_job.configuration = data\n db_job.jinjamator_task = relative_task_path\n db_job.created_by_user_id = user_id\n db.session.add(db_job)\n db.session.flush()\n db.session.commit()\n\n return jsonify({\"job_id\": job.id})\n\n if task_info[\"description\"]:\n post.__doc__ += task_info[\"description\"]\n get.__doc__ += task_info[\"description\"]\n\n except Exception as e:\n import traceback\n\n log.error(\n f\"unable to register {task_dir}: {e} {traceback.format_exc()}\"\n )",
"def list_hosts():\n task_run(\"/bin/hostname -f\",RING_1_dev__allnodes)",
"def default_tasks():\n tasks = {'run': run, 'bash': bash}\n for entry_point in pkg_resources.iter_entry_points('jarbas_task'):\n tasks[entry_point.name] = entry_point.load()\n return tasks",
"def options(request):\n out = {name:task().api for name, task in available_hbtasks()}\n return JsonResponse(out)",
"def test_apps(self):\n ## List the dirs in PATH\n apps = []\n for path in self.paths:\n apps.extend(os.listdir(path))\n \n for app in self.expected_executables:\n assert app in apps",
"def _list_all(root_pkg, prog):\n res = \"\\n\".join(\n sorted(\n pkinspect.package_module_names(_import(root_pkg)),\n key=str.lower,\n ),\n )\n sys.stderr.write(f\"usage: {prog} module command [args...]\\nModules:\\n{res}\\n\")\n return 1",
"def run_all_default_tasks():\n for func in DEFAULT_TASKS_KEY:\n func()",
"def _iter_commands(self):\n return {entry_point.name: entry_point for entry_point in\n pkg_resources.iter_entry_points('chanjo.subcommands')}",
"def ls():\n cfgmgr = ConfigManager()\n apps = cfgmgr['apps']\n for i in apps:\n print(fc(\"- {g}{appname}{rst}\", appname=i))",
"def _iter_entry_points(group, name=None, project=None):\n for dist in pkg_resources.working_set:\n if project and dist.project_name != project:\n continue\n entries = dist.get_entry_map(group)\n if name is None:\n for ep in entries.values():\n yield ep\n elif name in entries:\n yield entries[name]",
"def test_all_registered():\n for ep in iter_entry_points('fiona.fio_commands'):\n assert ep.name in main_group.commands",
"def get_current_jobs(ssh):\n stdin, stdout, stderr = ssh.exec_command('qstat')\n\n running_jobs = []\n for line in stdout.readlines():\n if '.awonmgr2' in line:\n jobid = line.split('.awonmgr2')[0]\n running_jobs.append(jobid)\n \n return running_jobs",
"def run(self):\n logging.debug('List Installed Programs')\n if self.short:\n print(' '.join([ent for ent in pakit.conf.IDB]))\n return\n\n nchars = 12\n fmt = str(nchars).join(['{prog:', '} {repo:',\n '} {hash:', '} {date}'])\n installed = ['Program Repo Hash Date']\n for prog in pakit.conf.IDB:\n entry = pakit.conf.IDB[prog]\n installed.append(fmt.format(prog=prog[0:nchars],\n repo=entry['repo'][0:nchars],\n date=entry['date'],\n hash=entry['hash'][0:nchars]))\n\n msg = 'Installed Programs:'\n msg += PREFIX + PREFIX.join(installed)\n print(msg)\n return msg",
"def iter_hosts():\n environmentdef = _get_environmentdef()\n\n for host in environmentdef.hosts():\n # fabric needs the host if we're calling from main()\n with this_hostname(host.host):\n yield host",
"def list_tasks(q = None):\n to = {\"p\":{}, \"v\":{}}\n for k, v in to.items():\n pin = HeaterController.pin_ids[k]\n state = subprocess.check_output([\"gpio\", 'read', pin]).strip()\n to[k][\"state\"] = \"on\" if state==\"0\" else \"off\"\n to[k][\"on_id\"] = \"\"\n to[k][\"on_time\"] = \"\"\n to[k][\"off_id\"] = \"\"\n to[k][\"off_time\"] = \"\"\n\n tasks = []\n if q is None:\n output = subprocess.check_output([\"atq\"])\n else:\n output = subprocess.check_output([\"atq\", \"-q\", q])\n for t in output.split(\"\\n\"):\n m = HeaterController.task_parse.match(t.strip())\n if m is not None:\n task_id = m.group(1)\n task_time = datetime.strptime(m.group(2), r'%a %b %d %H:%M:%S %Y').strftime(r'%y%m%d%H%M')\n q_name = m.group(3)\n tasks.append((task_id, task_time, q_name))\n tasks = sorted(tasks, key=lambda x: x[2] + x[1])\n while len(tasks):\n task_id, task_time, q_name = tasks.pop(0)\n output = subprocess.check_output([\"at\", \"-c\", task_id])\n # get last line of the output\n lines = output.strip().split(\"\\n\")\n # find value of -o parameter that specifies operation\n m = HeaterController.cmd_parse.match(lines[-1].strip())\n if m is not None:\n cmd = m.group(1)\n if cmd == r'on':\n to[q_name][\"on_id\"] = task_id\n to[q_name][\"on_time\"] = task_time\n elif cmd == r'off':\n to[q_name][\"off_id\"] = task_id\n to[q_name][\"off_time\"] = task_time\n else:\n assert False, \"Unexpected value of -o parameter: {}\".format(cmd)\n\n return {\"tasks\":to}",
"def all_programs(colname, path='.', latest=True):\n from os import listdir\n from os.path import isdir\n for fname in listdir(python_path(path)):\n if isdir(fname):\n logger.info('>>>> Program {}:'.format(fname))\n try:\n exam_program_pools(colname, program_path=fname, latest=latest)\n except IndexError:\n \"\"\"No files, no matter\"\"\"\n pass",
"def get_processes():\n yield from psutil.process_iter()",
"def ini_get_all():\n raise NotImplementedError()",
"def ShowAllIITs(cmd_args=[], cmd_options={}):\n try:\n iit_queue = kern.globals.global_iit_alloc_queue \n except ValueError:\n print \"This debug macro is only available in development or debug kernels\"\n return\n \n print GetIPCImportantTaskSummary.header\n for iit in IterateQueue(iit_queue, 'struct ipc_importance_task *', 'iit_allocation'):\n print GetIPCImportantTaskSummary(iit)\n return",
"def run(self):\n for tool in self.tools:\n tool.run()\n return",
"def get_tasks(loop):\n tasks = asyncio.all_tasks(loop)\n return \"Tasks: \" + \", \".join(\n [f\"{task.get_name()}: {task.get_coro().__name__}\" for task in tasks]\n )",
"def find_all_pythons():\n \n allpys = []\n \n # split PATH according to platform rules\n pathlist = string.split( os.environ['PATH'], os.pathsep )\n\n # search PATH, excluding nonexistant dirs\n for path in filter( os.path.isdir, pathlist ):\n allpys.extend( find_pythons_in_dir( path ) )\n\n # check the win32 registry, as appropriate\n allpys.extend( get_pythons_from_registry() )\n\n # and of course I'm running under a Python, in case\n # no others were found\n allpys.append( os.path.abspath(sys.executable) )\n \n return allpys",
"def freeze_includes() -> List[str]:\n import _pytest\n\n result = list(_iter_all_modules(_pytest))\n return result",
"def list_processor_names():\n return [ep.name for ep in pkg_resources.iter_entry_points(ENTRY_POINT_NAME)]"
]
| [
"0.5763663",
"0.5759618",
"0.56385845",
"0.5613529",
"0.55773354",
"0.54651177",
"0.54613435",
"0.53956527",
"0.5391799",
"0.53323907",
"0.53138953",
"0.5310662",
"0.52955556",
"0.5286145",
"0.5217878",
"0.51891303",
"0.5174316",
"0.5149377",
"0.51352507",
"0.51346594",
"0.51346487",
"0.5114405",
"0.51136637",
"0.5104641",
"0.5098127",
"0.5093159",
"0.50847083",
"0.5075069",
"0.5070035",
"0.50298744"
]
| 0.74593323 | 0 |
check the settings of a HbTask return json with result hash, which you can then use to set up the next e.g. /check/Add/?x=1&y=5 | def check(request,task_name):
try:
todo = getattr(tasks,task_name,None)
except KeyError:
return JsonResponse(
{'error':'This {} is not a known task'.format(taskname)})
parameters = todo().settings.get.keys()
try:
kwargs = {par:request.GET[par] for par in parameters}
except KeyError:
return JsonResponse(
{'error':'Missing parameter: please provide {}'.format(parameters)})
action = todo(**kwargs)
if not action.settings.valid:
return JsonResponse(
{'error':'Invalid settings: {}'.format(action.settings.errors)})
action.set_result()
a = action.result
add_to_project(action.result.hash,project)
return JsonResponse(action.description) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(request, resulthash):\n try:\n stored = models.HBTask.objects.get(resulthash=resulthash)\n except:\n stored = None\n thisone = {'Error', 'not found in database'}\n \n # Finished, and reported back\n if stored.status == models.HBTask.OK_STATUS:\n thisone = True\n\n # Submitted, have not heard from since\n elif stored.status == models.HBTask.PENDING_STATUS:\n obj = HbObject(hash=resulthash)\n status,fullstatus = check_stored_status(obj)\n thisone = fullstatus or True \n\n # resulted in error\n elif stored.status == models.HBTask.ERROR_STATUS:\n thisone = {'Error','something'}\n\n # no status: submit now\n else:\n # print 'Now status : ',stored.status\n # print 'Now submit task : ',stored.celery_taskname\n\n # to submit hb task\n todo = getattr(tasks,stored.hb_taskname)\n # celery_result = todo.delay(**json.loads(stored.parameters))\n parameters = json.loads(stored.parameters)\n \n action = todo(**parameters)\n\n if not action.ready_to_go:\n thisone = {'Warning':'Not all dependencies are met',\n 'dependency_status':action.dependency_status()}\n\n # Add me as waiting for a few\n todo = [d.split(':')[1] for d in action.dependencies_todo]\n dep = models.HBTask.objects.filter(resulthash__in=todo)\n for d in dep:\n w,isnew = models.Waiting.objects.get_or_create(todo=stored,dependency=d)\n # print 'Created ? ',w,isnew\n # submit dependency to run\n run(None,resulthash=d.resulthash)\n else:\n action.submit()\n time.sleep(0.5)\n obj = HbObject(hash=resulthash)\n status,fullstatus = check_stored_status(obj)\n thisone = fullstatus or True \n\n return JsonResponse({'result':thisone})\n # return JsonResponse(thisone)",
"def check_task(request, tid):\n try:\n slogger.glob.info(\"check task #{}\".format(tid))\n response = task.check(tid)\n except Exception as e:\n slogger.glob.error(\"cannot check task #{}\".format(tid), exc_info=True)\n return HttpResponseBadRequest(str(e))\n\n return JsonResponse(response)",
"def options(request):\n out = {name:task().api for name, task in available_hbtasks()}\n return JsonResponse(out)",
"def get_data_for_task_manager(data: dict) -> dict:\n if check_host(data['hostIp']):\n command = f\"ssh user@{data['hostIp']} -i ../id_rsa 'C:\\Setup\\{data['scriptName']}'\"\n dict_from_device = check_response_from_device(start_process_on_device(command))\n if dict_from_device[\"stringFromDevice\"] == \"correct\": \n dict_from_device[\"resultRequest\"] = True\n return dict_from_device\n return dict(resultRequest=False)",
"def test_task(self, mocker):\n\n tid = 289466\n site = \"mysite\"\n json = self.generate_task_dictionary(tid, state=\"error\")\n url = (\n \"https://cloudapi.acquia.com/v1/\"\n \"sites/prod:{site}/tasks/{tid}.json\".format(tid=tid, site=site)\n )\n\n mocker.register_uri(\"GET\", url, json=json)\n\n task = self.client.site(site).task(tid)\n self.assertEqual(task[\"id\"], tid)\n self.assertEqual(task[\"state\"], \"error\")",
"def process_ResultCheck(self):\n try:\n cmd = self.ExecutionTask.get_param().split(',')\n logging.debug(\"%s-%s-%s-%s-%s\" % ( TestScriptSymbolTable.get_value_from_sym_tab(cmd[0], TestScriptSymbolTable.test_script_sym_tab),cmd[0], cmd[1], cmd[2], cmd[3]))\n\n checkval = cmd[0].split('!') \n \n cval = TestScriptSymbolTable.get_value_from_sym_tab(checkval[1], TestScriptSymbolTable.capi_cmd_ret_sym_tab)\n\n if int(cval) >= int(cmd[1]):\n result = cmd[2]\n else:\n result = cmd[3]\n\n logging.info(\"\\nRESULT CHECK---> %15s\", result) \n self.setTestResult(result)\n \n #if result == 'FAIL':\n if 'FAIL' in result:\n self.setStatus('STOP')\n self.setTestResult('FAIL')\n except OSError:\n logging.info(\"\\nException - ResultCheck\")",
"def results(request):\n q = {k:v for k,v in request.GET.iteritems()}\n\n # q = request.GET.getlist('h')\n # if q is None:\n # return JsonResponse({'Error':'provide query data with e.g. /?query={}'})\n\n # allow for selection based on result type\n thetype = q.pop('type',None)\n if thetype is not None:\n q['hb_taskname'] = q.get('hbtaskname',False) or tasks_that_make(thetype)\n\n rr = models.HBTask.objects.filter(status__gt=models.HBTask.NO_STATUS,**q)\n\n if rr:\n res = defaultdict(list)\n for r in rr:\n res[r.resulttype].append(r)\n\n res2 = {}\n for k in res.keys():\n res2[k] = [r.description for r in res[k]]\n else:\n res2 = None\n\n return JsonResponse( {'results':res2} )",
"def available(request):\n hashes = request.GET.getlist('h',None)\n available = {}\n for h in hashes:\n \n available.update({h:check_available_object(h)})\n\n return JsonResponse(available)",
"async def parse_task_result(self, **kwargs):\n empty_result = {\n \"status\": None,\n \"c_time\": None,\n \"f_time\": None,\n \"worker\": None,\n \"history\": [],\n \"code\": None,\n \"stdout\": None,\n \"stderr\": None,\n }\n hosts = self.meta[\"hosts\"]\n\n if \"cost_gt\" in kwargs or \"cost_lt\" in kwargs:\n if \"finish\" in kwargs and not kwargs[\"finish\"]:\n raise TaskException(\"query by cost time need finish = True\")\n kwargs[\"finish\"] = True\n\n # create dict to save result\n result = {\n \"count\": 0,\n \"ips\": [],\n \"details\": {},\n }\n\n for ip in hosts:\n\n # result data\n ip_result = self.meta[\"result\"].get(ip, empty_result)\n\n # empty jump\n if not ip_result:\n continue\n\n # finish\n if \"finish\" in kwargs:\n if kwargs[\"finish\"] and not ip_result[\"f_time\"]:\n continue\n if not kwargs[\"finish\"] and ip_result[\"f_time\"]:\n continue\n\n # success\n if \"success\" in kwargs:\n if kwargs[\"success\"] and ip_result[\"status\"] != TASK_STATUS_SUCCESS:\n continue\n if not kwargs[\"success\"] and ip_result[\"status\"] == TASK_STATUS_SUCCESS:\n continue\n\n # failure\n if \"failure\" in kwargs:\n if kwargs[\"failure\"] and ip_result[\"status\"] != TASK_STATUS_FAILURE:\n continue\n if not kwargs[\"failure\"] and ip_result[\"status\"] == TASK_STATUS_FAILURE:\n continue\n\n # time cost\n if \"cost_gt\" in kwargs:\n if ip_result[\"f_time\"] - ip_result[\"c_time\"] < kwargs[\"cost_gt\"]:\n continue\n if \"cost_lt\" in kwargs:\n if ip_result[\"f_time\"] - ip_result[\"c_time\"] > kwargs[\"cost_lt\"]:\n continue\n\n # code\n if \"code\" in kwargs:\n if ip_result[\"code\"] != kwargs[\"code\"]:\n continue\n\n # retry\n # if \"retry\" in kwargs:\n # if \"retry\"\n\n result[\"count\"] += 1\n result[\"ips\"].append(ip)\n result[\"details\"][ip] = ip_result\n\n return result",
"def check_status(request, pk, task_id):\n from celery.result import AsyncResult\n res = AsyncResult(task_id)\n link = ''\n if res.state == 'SUCCESS':\n report = get_object_or_404(Report, pk=pk)\n link = report.report_file.url\n return HttpResponse(\n json.dumps({\n 'state': res.state,\n 'link': link,\n 'email': getattr(\n settings,\n 'REPORT_BUILDER_EMAIL_NOTIFICATION',\n False\n )\n }),\n content_type=\"application/json\")",
"def on_get(self, req, resp, task_id):\n try:\n builddata = req.get_param_as_bool('builddata')\n subtask_errors = req.get_param_as_bool('subtaskerrors')\n try:\n layers = int(req.params.get('layers', '0'))\n except Exception:\n layers = 0\n\n first_task = self.get_task(req, resp, task_id, builddata)\n\n if first_task is None:\n self.info(req.context, \"Task %s does not exist\" % task_id)\n self.return_error(resp,\n falcon.HTTP_404,\n message=\"Task %s does not exist\" % task_id,\n retry=False)\n else:\n # If layers is passed in then it returns a dict of tasks instead of the task dict.\n if layers:\n resp_data, errors = self.handle_layers(\n req, resp, task_id, builddata, subtask_errors, layers,\n first_task)\n # Includes subtask_errors if the query param 'subtaskerrors' is passed in as true.\n if (subtask_errors):\n resp_data['subtask_errors'] = errors\n else:\n resp_data = first_task\n # Includes subtask_errors if the query param 'subtaskerrors' is passed in as true.\n if (subtask_errors):\n _, errors = self.handle_layers(req, resp, task_id,\n False, subtask_errors,\n 1, first_task)\n resp_data['subtask_errors'] = errors\n\n resp.text = json.dumps(resp_data)\n resp.status = falcon.HTTP_200\n except Exception as ex:\n self.error(req.context, \"Unknown error: %s\" % (str(ex)))\n self.return_error(resp,\n falcon.HTTP_500,\n message=\"Unknown error\",\n retry=False)",
"def finished(request, resulthash):\n info = request.GET.get('short_info',None)\n print 'INFO : ',info\n try:\n stored = models.HBTask.objects.get(resulthash=resulthash)\n stored.status = 2\n if info:\n stored.short_info = info\n\n stored.save()\n thisone = 'Ok'\n runs = models.HBTaskRun.objects.filter(task=thisone,done=False)\n\n # make run finished\n for r in runs:\n r.done = True\n r.save()\n except:\n thisone = {'Error':'not found in database'}\n \n # Check if anyone was waiting for thisone to finish:\n waiting_for_me = models.Waiting.objects.filter(dependency=stored)\n\n for w in waiting_for_me: \n # Submit the task waiting if there was only one dependency left\n if w.todo.waiting_set.count() == 1:\n run(None, resulthash=w.todo.resulthash)\n # remove this dependency\n w.delete()\n\n return JsonResponse({resulthash:thisone})",
"def _validate_results(self, task, result):\n assert isinstance(result, dict), \\\n f\"{task} returned a {type(result)} rather than a dict\"\n for k in result:\n assert k in self.provides, \\\n f\"{task} provided unwanted output {k}\"\n for k in self.provides:\n assert k in result, \\\n f\"{task} failed to provide needed output {k}\"",
"async def get_task_result(task_id: TaskId):",
"def health_check(request):\n response = {\"Status\": True}\n return JsonResponse(response, safe=False)",
"def task_start_parsing():\n add_task(url_for(\"task_queue_users\"))\n add_task(url_for(\"task_clean_tmp_files\"))\n return OK_RESPONSE",
"def isTasksExists(request):\n task_status = {}\n task_result = 0\n flag = None\n for task in request.data['tasks']:\n task_obj = Tafv2Task.objects.filter(script=task)\n if task_obj:\n task_status[task] = \"Task Exists.\"\n else:\n task_result += 1\n task_status[task] = \"Task doesn't Exists.\"\n if task_result > 0:\n flag = False\n else:\n flag = True\n\n return {'taskResult': flag, 'taskStatus': task_status}",
"def post(self, request):\n result = None\n print(\"RESULT API: \", request.data)\n task_exec_update = TaskExecutionResult.objects.get(\n id=request.data['context']['taskExecutionID']\n )\n try:\n if request.data['result'].lower() == \"pass\":\n result = apisettings.PASS\n if request.data['result'].lower() == \"fail\":\n result = apisettings.FAIL\n if request.data['result'].lower() == \"abort\":\n result = apisettings.ABORT\n\n task_exec_update.result = result\n task_exec_update.save(update_fields=['result'])\n Log.summary_task_result(context=request.data.get(\"context\"), result=request.data['result'])\n return Response(status=HTTP_200_OK)\n except Exception as e:\n logger = Log.get_logger(__name__)\n logger.exception(e)\n return Response(status=HTTP_400_BAD_REQUEST)",
"def processTask(self):\n #Util.set_color(Util.FOREGROUND_YELLOW | Util.FOREGROUND_INTENSITY)\n #logging.info(\"cmd : %s\", self.ExecutionTask.get_cmd())\n #logging.info(\"param : %s\", self.ExecutionTask.get_param())\n #logging.info(\"ret : %s\", str(self.ExecutionTask.get_ret()))\n #logging.info(\"ipport : %s\", self.ExecutionTask.get_ipport())\n #Util.set_color(Util.FOREGROUND_WHITE)\n\n ##############################################################\n # Process for any commands without received messages.....\n ##############################################################\n if self.ExecutionTask.get_cmd() == 'PASS' or self.ExecutionTask.get_cmd() == 'FAIL':\n logging.debug(\"result is %s\", self.ExecutionTask.get_cmd())\n self.setStatus('STOP')\n self.setTestResult(self.ExecutionTask.get_cmd())\n return\n\n if self.ExecutionTask.get_cmd() == 'r_info':\n rinfo_result = self.ExecutionTask.get_param().split('!')\n\n if len(rinfo_result) > 1:\n msg = rinfo_result[1]\n logging.debug(\"%s\", msg)\n\n self.setStatus('STOP')\n self.setTestResult(rinfo_result[0])\n return\n\n if self.ExecutionTask.get_cmd() == 'ResultCheck':\n time.sleep(5)\n self.process_ResultCheck()\n return\n\n if self.ExecutionTask.get_cmd() == 'CheckThroughput':\n time.sleep(5)\n throughputChk = StreamHandler(self.test_mngr_initr)\n chk_result = throughputChk.processStreamResults(self.ExecutionTask.get_param())\n self.setCheckResult(chk_result)\n #if 'FAIL' in chk_result:\n # self.setStatus('STOP')\n return\n\n if self.ExecutionTask.get_cmd() == 'config_multi_subresults':\n self.process_config_multi_subresults()\n return\n\n ##############################################################\n # Process for any commands with received messages......\n ##############################################################\n status = \"\"\n retDict = self.ExecutionTask.get_ret()\n recvStr = \"\"\n if self.ExecutionTask.recv:\n recvStr = self.ExecutionTask.recv.rstrip('\\r\\n')\n #print \"recv : \" + recvStr\n \n if GlobalConfigFiles.curr_prog_name == \"WMMPS\" and \"sniffer_control_subtask\" in self.ExecutionTask.get_cmd():\n logging.debug('In WMMPS, before parsing the recvStr: %s' % recvStr)\n lines = re.split('\\n', recvStr)\n for line in lines:\n if re.search(\"RESULT\", line, re.I):\n if \"FAIL\" in line:\n self.setStatus('STOP')\n self.setTestResult('FAIL')\n logging.debug('set test result to FAIL')\n return\n if \"PASS\" in line:\n self.setTestResult('PASS')\n logging.debug('set test result to Pass')\n return\n return\n \n stitems = recvStr.split(',') \n if len(stitems) < 2:\n #logging.debug(\"Bypassing this cmd..\")\n return\n\n status = stitems[1]\n iDNB = TestScriptSymbolTable.get_value_from_sym_tab(\"iDNB\", TestScriptSymbolTable.test_script_sym_tab)\n iINV = TestScriptSymbolTable.get_value_from_sym_tab(\"iINV\", TestScriptSymbolTable.test_script_sym_tab) \n \n if iINV is None:\n iINV = 0\n \n if 'ERROR' in recvStr or 'INVALID' in recvStr and (iDNB == 0 or iDNB is None) and (iINV == 0 or iINV is None):\n #error case...\n logging.debug(\"Return ERROR or INVALID---> STOP process \")\n self.setStatus('STOP')\n self.setTestResult('FAIL')\n elif status != 'COMPLETE' and iDNB == 0 and iINV == 0:\n #incomplete case...(running?)\n logging.debug(\"Command %s not completed\", self.ExecutionTask.get_cmd())\n else:\n displayname = \"\"\n for tbd in self.test_mngr_initr.test_prog_mngr.test_prog.testbed_dev_list:\n if tbd.ctrlipaddr == self.ExecutionTask.get_ipport():\n displayname = tbd.displayname\n break\n \n if \"FAIL\" in recvStr and (iINV == 0 or iINV is None):\n if \"SNIFFER\" in displayname or \"sniffer\" in self.ExecutionTask.get_cmd():\n logging.info(\"Test Case Criteria Failure - Command returned FAIL\")\n self.setStatus('STOP')\n self.setTestResult('FAIL')\n\n elif self.ExecutionTask.get_cmd() == 'device_get_info':\n try:\n if displayname == '':\n self.tmsPacket.setDutDeviceInfo(recvStr)\n else:\n self.tmsPacket.setTestbedInfo(displayname, recvStr)\n\n #for validation\n self.setValidationInfo(displayname, recvStr)\n\n except OSError:\n logging.debug(\"exception -- device_get_info capi call\")\n elif self.ExecutionTask.get_cmd() == 'ca_get_version':\n self.setValidationInfo(displayname, recvStr)\n\n elif self.ExecutionTask.get_cmd() == 'sniffer_get_info':\n self.setValidationInfo('sniffer', recvStr)\n\n elif self.ExecutionTask.get_cmd() == 'sta_associate':\n time.sleep(10)\n\n if len(stitems) > 2:\n retParam = self.ExecutionTask.get_param().split(',')\n streamFlag = \"\"\n if len(retParam) > 4:\n streamFlag = retParam[3]\n\n if stitems[2] == 'streamID':\n streamHndler = StreamHandler(self.test_mngr_initr)\n logging.debug(\"stream config - streamID : %s\", stitems[3])\n if streamFlag == 'send':\n logging.debug(\"traffic config - send : streamInfo append\")\n streamPacket = streamInfo(\"%s\" % (stitems[3]), self.ExecutionTask.get_ipport(), -1, 'send',\n retParam[15], retParam[17], streamHndler.running_phase, streamHndler.RTPCount)\n streamHndler.add_streamInfo(streamPacket)\n streamHndler.RTPCount = streamHndler.RTPCount + 1\n\n elif streamFlag == 'receive':\n logging.debug(\"traffic config - receive : streamInfo append\")\n streamPacket = streamInfo(\"%s\" % (stitems[3]), self.ExecutionTask.get_ipport(), -1, 'receive',\n -1, -1, streamHndler.running_phase, -1)\n streamHndler.add_streamInfo(streamPacket)\n\n else:\n logging.debug(\"traffic config - else : \")\n\n\n\n if retParam[1] == 'Multicast':\n logging.debug(\"----MULTICAST----\")\n streamHndler.multicast = 1\n\n if self.ExecutionTask.get_cmd() != \"traffic_agent_send\":\n ret_val = \"%s\" %(stitems[3].strip())\n logging.debug(\"traffic config - ret_val : %s\", ret_val)\n setRetVal(getRetKey(retDict), ret_val)\n\n elif stitems[2].lower() == 'interfacetype':\n ret_val = (\"%s\" %(stitems[5]))\n setRetVal(getRetKey(retDict), ret_val)\n\n elif stitems[2].lower() == 'interfaceid':\n ret_val = stitems[3].split('_')[0]\n setRetVal(getRetKey(retDict), ret_val)\n\n elif self.ExecutionTask.get_cmd() == 'traffic_stop_ping':\n\n keyVal = retParam[1]\n #\"%s;%s\"%(retParam[1], self.ExecutionTask.get_ipport())\n setRetVal(keyVal, stitems[5])\n #print(\"%s = %s\" % (retParam[1], stitems[5]))\n pinginternalchk = TestScriptSymbolTable.get_value_from_sym_tab(\"PingInternalChk\", TestScriptSymbolTable.test_script_sym_tab)\n temp_key = getRetKey(self.ExecutionTask.get_ret())\n \n if \"$\" in temp_key:\n sent_reply = temp_key.split(',')\n #print \"SLIM==> ping result save...\"\n #print sent_reply[0]\n #print sent_reply[1]\n setRetVal(sent_reply[0], stitems[3])\n setRetVal(sent_reply[1], stitems[5]) \n\n setRetVal(\"$pingResp\", stitems[5])\n if pinginternalchk == '0':\n logging.debug(\"Ping Internal Check\")\n \n elif stitems[5] == '0':\n logging.debug (\"Test Case Criteria Failure - NO IP Connection -- Aborting the test\")\n self.setStatus('STOP')\n self.setTestResult('FAIL')\n else:\n if stitems[5] == '0':\n logging.debug (\"Test Case Criteria Failure - NO IP Connection -- Aborting the test\")\n self.setStatus('STOP')\n self.setTestResult('FAIL')\n else:\n if len(retDict) > 0:\n tempKey = getRetKey(retDict)\n temp_val = tempKey.split(',')\n count = 0\n item_len = len(stitems)\n for i in temp_val:\n if item_len > count + 3:\n setRetVal(i, stitems[3+count])\n count = count + 2\n\n if self.__status == 'STOP':\n logging.debug(\"generate final result if task stops.\")\n #self.generateFinalResult()\n else:\n pass\n #logging.debug(\"Continue---\")\n return",
"def status_check():\n return {\"status\": \"OK\"}",
"def is_success(self):\n succ = self.env._check_success()\n if isinstance(succ, dict):\n assert \"task\" in succ\n return succ\n return { \"task\" : succ }",
"def on_get(self, req, resp, task_id):\n task_result = AsyncResult(task_id)\n result = {'status': task_result.status, 'result': task_result.result}\n resp.status = falcon.HTTP_200\n resp.body = json.dumps(result)",
"def task_parse_results():\n pass",
"def checkRely(self, task):\n if not isinstance(task, dict):\n return False\n keys = task.get(\"rely\")\n #is empty or crontab, explain upstream is true\n if not keys or task.get(\"task_type\") == \"crontab\":\n return True\n\n keyl = []\n for k, v in keys.items():\n keyl.append(k)\n\n date = task.get(\"task_day\")\n if not date:\n date = self.date\n\n mkeys = [{\"task_key\": k} for k in keyl]\n tlist = {}\n for doc in self.mgdb.task_history.find({\"$or\": mkeys, \"task_day\": date}):\n tlist[doc.get(\"task_key\")] = doc\n\n if not tlist or len(tlist) != len(mkeys):\n #when debug, always return true.\n if self.config.get(\"is_debug\"):\n return True\n else:\n return False\n for c, d in tlist.iteritems():\n if d.get(\"status\") != \"finished\":\n return False\n\n return True",
"def _get_json_result(self, _app):\n\n status = {\"status\": \"ok\", \"message\": \"\"}\n\n result = {}\n\n if _app.status != \"ok\":\n if _app.status.startswith(\"na:\"):\n status_elements = _app.status.split(':')\n if status_elements[1].strip() != \"update\":\n status[\"message\"] = status_elements[1].strip()\n\n return status, {}\n else:\n status[\"status\"] = \"failed\"\n status[\"message\"] = _app.status\n return status, {}\n\n if _app.state == \"create\":\n for sk, s in _app.servers.items():\n if s.host_assignment_inx == -1:\n result[s.host_assignment_variable] = '::' + s.host\n else:\n p = '::' + s.host\n\n if s.host_assignment_variable not in result.keys():\n result[s.host_assignment_variable] = []\n result[s.host_assignment_variable].insert(s.host_assignment_inx, p)\n elif _app.state == \"update\":\n for sk, s in _app.servers.items():\n if s.host_assignment_inx == -1:\n result[s.host_assignment_variable] = \"\"\n else:\n p = \"\"\n\n if s.host_assignment_variable not in result.keys():\n result[s.host_assignment_variable] = []\n result[s.host_assignment_variable].insert(s.host_assignment_inx, p)\n\n return status, result",
"def status_check(task_id):\n logger.info(f\"Checking task status for {task_id}\")\n task = Task.objects.get(kf_id=task_id)\n task.status_check()",
"def test_get_task_success(self):\n task_id = util.MOCK_UUID_1\n\n rv = TEST_CLIENT.get(f\"/tasks/{task_id}\")\n result = rv.json()\n\n expected = util.MOCK_TASK_1\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)",
"def get(self, task_id, session=None):\n try:\n task = session.query(db.StatusTask).filter(db.StatusTask.id == task_id).one()\n except NoResultFound:\n raise NotFoundError('task status with id %d not found' % task_id)\n\n args = tasks_parser.parse_args()\n include_execution = args.get('include_execution')\n\n st_task = task.to_dict()\n if include_execution:\n execution = task.executions.order_by(db.TaskExecution.start.desc()).first()\n st_task['last_execution'] = execution.to_dict() if execution else {}\n return jsonify(st_task)",
"def task_state(task_id):\n tag_task = parse_html_tags.AsyncResult(task_id)\n return jsonify({'task_id': task_id, 'task_state': tag_task.state,\n 'result_url': url_for('task_result', task_id=tag_task.id)}), 202, {}",
"def get_task_status(task_id):\r\n mock_request = Mock()\r\n mock_request.REQUEST = {'task_id': task_id}\r\n response = instructor_task_status(mock_request)\r\n status = json.loads(response.content)\r\n return status"
]
| [
"0.641575",
"0.63129026",
"0.5868379",
"0.5789204",
"0.5733075",
"0.5673604",
"0.56617296",
"0.5656108",
"0.5621732",
"0.5575373",
"0.55724365",
"0.5537721",
"0.54453605",
"0.5443226",
"0.5416293",
"0.5410396",
"0.5378141",
"0.534365",
"0.533824",
"0.53032565",
"0.52877766",
"0.528643",
"0.5285838",
"0.5276592",
"0.5273426",
"0.52569354",
"0.52556545",
"0.5211266",
"0.5204897",
"0.5203908"
]
| 0.6802357 | 0 |
add a task result to a project provide hash and project name | def add_to_project(resulthash,project):
t = models.HBTask.objects.get(resulthash=resulthash)
p = models.Project.objects.get(name=projectname)
p.tasks.add(t) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_task(self, task):\n res = self.conn.cursor().execute(\"\"\"SELECT count(*) as \"order\" FROM tasks WHERE project_id=?\"\"\",\n (task['project_id'],))\n res = res.fetchone()\n order = int(res['order']) + 1\n cursor = self.conn.cursor().execute(\"INSERT INTO tasks VALUES (null, ?, ?, ?, ?, ?,?)\",\n (task['project_id'], order, task['description'], 0, datetime.now(), datetime.now(),))\n self.conn.commit()\n self.conn.cursor().execute(\"UPDATE projects SET last_update=? WHERE id=?\",\n (datetime.now(), task['project_id'],))\n self.conn.commit()\n return self.get_task(cursor.lastrowid)",
"def result(self, task_id, **options):\n pass",
"def v2_runner_on_ok(self, result, **kwargs):\n host = result._host\n task = result._task\n output = result._result\n if result._result.get('changed', False):\n status = 'changed'\n else:\n status = 'ok'\n self.results.append({\"host\": host.name, \"action\":task.action, \"status\":status, \"output\": output})",
"def _AddSingleResult(self, result: BaseResult, stats: BaseBuildStats) -> None:\n if result.actual_result == 'Pass':\n stats.AddPassedBuild(result.tags)\n else:\n stats.AddFailedBuild(result.build_id, result.tags)",
"def add(self, task):\n pass",
"def add_results():\n task_id = request.args.get('tid')\n task = add.get_task(task_id)\n if not task:\n return redirect('/')\n result = task.return_value\n if not result:\n return redirect('/progress?tid=' + task_id)\n task.delete()\n # Redis can also be used to cache results\n return render_template('results.html', value=result)",
"async def get_task_result(task_id: TaskId):",
"def on_task_result(self, task_id, raw_result):\n raise NotImplementedError",
"def putresult(task, config, log, dataset, user, label, cbase, cstore):\n logf = log.format(task)\n if not os.path.exists(logf):\n click.echo(click.style(\"the log file at {} doesn't exist, provide a valid location\".format(logf), fg='red'))\n return\n if not os.path.exists(config):\n click.echo(click.style(\"the config file at {} doesn't exist, provide a valid location\".format(config), fg='red'))\n return\n if not os.path.exists(dataset):\n click.echo(click.style(\"the dataset file at {} doesn't exist, provide a valid location\".format(dataset), fg='red'))\n return\n config_obj = read_config_file(config)\n datasets_set = index_by_label(read_config_file(dataset))\n dataset_key = config_obj['dataset']\n dataset_key = get_dataset_from_key(dataset_key, datasets_set)\n config_obj['dataset'] = dataset_key['label']\n ServerManager.get()\n result = ServerManager.api.put_result(task, to_swagger_experiment(task, config_obj, log, username=user, label=label))\n if result.response_type == 'success':\n eid = result.message\n click.echo(click.style('results stored with experiment: {}'.format(result.message), fg='green'))\n if cbase is None:\n return\n result = store_model(checkpoint_base=cbase, config_sha1=hash_config(read_config_file(config)),\n checkpoint_store=cstore, print_fn=click.echo)\n if result is not None:\n click.echo(click.style('model stored at {}'.format(result), fg='green'))\n update_result = ServerManager.api.update_property(task, eid, prop='checkpoint', value=result)\n if update_result.response_type == 'success':\n click.echo(click.style(update_result.message, fg='green'))\n else:\n click.echo(click.style(update_result.message, fg='red'))\n else:\n click.echo(click.style('failed to store model'.format(result), fg='red'))\n else:\n click.echo(click.style(result.message, fg='red'))",
"def add_to_db(task):\n print(\"\\n\\tAdded HIT[{}] to MOCK ALGO database!\".format(task['hitId']))",
"async def parse_task_result(self, **kwargs):\n empty_result = {\n \"status\": None,\n \"c_time\": None,\n \"f_time\": None,\n \"worker\": None,\n \"history\": [],\n \"code\": None,\n \"stdout\": None,\n \"stderr\": None,\n }\n hosts = self.meta[\"hosts\"]\n\n if \"cost_gt\" in kwargs or \"cost_lt\" in kwargs:\n if \"finish\" in kwargs and not kwargs[\"finish\"]:\n raise TaskException(\"query by cost time need finish = True\")\n kwargs[\"finish\"] = True\n\n # create dict to save result\n result = {\n \"count\": 0,\n \"ips\": [],\n \"details\": {},\n }\n\n for ip in hosts:\n\n # result data\n ip_result = self.meta[\"result\"].get(ip, empty_result)\n\n # empty jump\n if not ip_result:\n continue\n\n # finish\n if \"finish\" in kwargs:\n if kwargs[\"finish\"] and not ip_result[\"f_time\"]:\n continue\n if not kwargs[\"finish\"] and ip_result[\"f_time\"]:\n continue\n\n # success\n if \"success\" in kwargs:\n if kwargs[\"success\"] and ip_result[\"status\"] != TASK_STATUS_SUCCESS:\n continue\n if not kwargs[\"success\"] and ip_result[\"status\"] == TASK_STATUS_SUCCESS:\n continue\n\n # failure\n if \"failure\" in kwargs:\n if kwargs[\"failure\"] and ip_result[\"status\"] != TASK_STATUS_FAILURE:\n continue\n if not kwargs[\"failure\"] and ip_result[\"status\"] == TASK_STATUS_FAILURE:\n continue\n\n # time cost\n if \"cost_gt\" in kwargs:\n if ip_result[\"f_time\"] - ip_result[\"c_time\"] < kwargs[\"cost_gt\"]:\n continue\n if \"cost_lt\" in kwargs:\n if ip_result[\"f_time\"] - ip_result[\"c_time\"] > kwargs[\"cost_lt\"]:\n continue\n\n # code\n if \"code\" in kwargs:\n if ip_result[\"code\"] != kwargs[\"code\"]:\n continue\n\n # retry\n # if \"retry\" in kwargs:\n # if \"retry\"\n\n result[\"count\"] += 1\n result[\"ips\"].append(ip)\n result[\"details\"][ip] = ip_result\n\n return result",
"def result(self, result: osbuild.pipeline.BuildResult):",
"def record_task_success(task_name: str):\n\n from common.models import InvenTreeSetting\n\n InvenTreeSetting.set_setting(f'_{task_name}_SUCCESS', datetime.now().isoformat(), None)",
"def _store_result(self, task_id, result, status, traceback=None):\n session = Session()\n try:\n tasks = session.query(Task).filter(Task.task_id == task_id).all()\n if not tasks:\n task = Task(task_id)\n session.add(task)\n else:\n task = tasks[0]\n task.result = result\n task.status = status\n task.traceback = traceback\n if task.status == states.STARTED:\n task.date_began = datetime.now()\n session.commit()\n finally:\n session.close()\n return result",
"def convey_task_result(self, cntx, **kwargs):\n task_id = kwargs.get('task_id')\n state = kwargs.get('state')\n result = kwargs.get('result')\n\n db_api.start_tx()\n\n try:\n # TODO(rakhmerov): validate state transition\n task = db_api.task_get(task_id)\n workbook = self._get_workbook(task['workbook_name'])\n\n wf_trace_msg = \"Task '%s' [%s -> %s\" % \\\n (task['name'], task['state'], state)\n\n wf_trace_msg += ']' if state == states.ERROR \\\n else \", result = %s]\" % result\n WORKFLOW_TRACE.info(wf_trace_msg)\n\n task_output = data_flow.get_task_output(task, result)\n\n # Update task state.\n task, outbound_context = self._update_task(workbook, task, state,\n task_output)\n\n execution = db_api.execution_get(task['execution_id'])\n\n self._create_next_tasks(task, workbook)\n\n # Determine what tasks need to be started.\n tasks = db_api.tasks_get(workbook_name=task['workbook_name'],\n execution_id=task['execution_id'])\n\n new_exec_state = self._determine_execution_state(execution, tasks)\n\n if execution['state'] != new_exec_state:\n wf_trace_msg = \\\n \"Execution '%s' [%s -> %s]\" % \\\n (execution['id'], execution['state'], new_exec_state)\n WORKFLOW_TRACE.info(wf_trace_msg)\n\n execution = \\\n db_api.execution_update(execution['id'], {\n \"state\": new_exec_state\n })\n\n LOG.info(\"Changed execution state: %s\" % execution)\n\n tasks_to_start, delayed_tasks = workflow.find_resolved_tasks(tasks)\n\n self._add_variables_to_data_flow_context(outbound_context,\n execution)\n\n data_flow.prepare_tasks(tasks_to_start, outbound_context)\n\n db_api.commit_tx()\n except Exception as e:\n msg = \"Failed to create necessary DB objects: %s\" % e\n LOG.exception(msg)\n raise exc.EngineException(msg)\n finally:\n db_api.end_tx()\n\n if states.is_stopped_or_finished(execution[\"state\"]):\n return task\n\n for task in delayed_tasks:\n self._schedule_run(workbook, task, outbound_context)\n\n if tasks_to_start:\n self._run_tasks(tasks_to_start)\n\n return task",
"def task_parse_results():\n pass",
"def add(self, task):\n self._count += 1\n path = os.path.join(self._root, \"%d_%s\" % (self._count, task.guid))\n j.sal.fs.writeFile(path, self._serialize_task(task))",
"def add(self, name, project):\n self.projects[name] = project",
"def push_result(self, result):\n try:\n new_item = json.dumps(result)\n self.db.zadd(\"soq_results\", new_item, time())\n except Exception as e:\n print(\"An error occurred while saving the result:\", e)",
"def add(self, task):\n raise NotImplementedError()",
"def add_success(self, task: Task) -> None: # noqa: DAR101\n super().add_success(task)\n self._add_summary(task, _TaskExitCode.PASS)",
"def add_to_results(self, result_id: str, result):\n\n self.results[result_id] = result",
"def add_artifacts_from_result(args, result):\n for art in result.get_artifacts():\n add_artifact(args, art)",
"def result_comment(self, project, group, arch, results, comment):\n comment.append('## ' + arch + '\\n')\n if not results['cycle'].success:\n comment.append('### new [cycle(s)](/project/repository_state/{}/standard)\\n'.format(group))\n comment.append(results['cycle'].comment + '\\n')\n if not results['install'].success:\n comment.append('### [install check](/package/view_file/{}:Staging/dashboard/installcheck?expand=1)\\n'.format(project))\n comment.append(results['install'].comment + '\\n')",
"def Add(self, user, args):\n\n # Example tasks:\n # {'description':'', 'project-file':'C:/autoexec.bat', 'date-time':'12:12:12', 'force':'0', 'time-limit':'0'}\n # {'description':'', 'project-file':'C:/autoexec.bat', 'date-time':'Wednesday 12:12:12', 'force':'0', 'time-limit':'0'}\n # {'description':'', 'project-file':'C:/autoexec.bat', 'date-time':'2012-12-12 12:12:12', 'force':'1', 'time-limit':'0'}\n\n # If argument is a string\n if type(args) == type(str()):\n task = urlparse.parse_qs(args)\n # If argument is a valid dict\n elif type(args) == type(dict()):\n task = args\n else:\n msg = 'Add task: Invalid type of argument for add task: `{0}` !'.format(type(args))\n log.error(msg)\n return '*ERROR* ' + msg\n\n # if not self.conn:\n # print('Cannot add task! Central Engine connection not available !')\n # return False\n # elif self.conn.get_user_variable(user, 'status') == False:\n # print('Cannot add task! Invalid username `{0}` !'.format(user))\n # return False\n\n descrip = task.get('description')\n proj_file = task.get('project-file')\n proj_dt = task.get('date-time')\n proj_force = task.get('force')\n time_limit = task.get('time-limit')\n\n if not os.path.isfile(proj_file):\n msg = 'Add task: Invalid file path `{0}` !'.format(proj_file)\n log.error(msg)\n return '*ERROR* ' + msg\n\n dt, proj_type = _fix_date(proj_dt)\n if not dt: return False\n\n # Duplicate dates?\n if proj_dt in [v['date-time'] for v in self.tasks.values()]:\n msg = 'Add task: Duplicate date-time: `{0}` !'.format(proj_dt)\n log.error(msg)\n return '*ERROR* ' + msg\n\n # If force is not valid, reset it. By default, force is enabled.\n if proj_force != '0':\n proj_force = '1'\n\n try:\n time_limit = int(time_limit)\n except:\n log.error('Add task: Invalid Time-limit number: `{0}` ! Will default to ZERO.'.format(time_limit))\n time_limit = 0\n if time_limit < 0:\n time_limit = 0\n\n # This can only be executed by 1 thread at a time,\n # so there will never be 2 threads that create tasks at the same time\n with self.acc_lock:\n\n created_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')\n\n task_fixed = {\n 'user' : user,\n 'description' : descrip,\n 'project-file': proj_file,\n 'date-time' : proj_dt,\n 'force' : proj_force,\n 'time-limit' : time_limit,\n 'proj-type' : proj_type\n }\n\n self.tasks[created_time] = task_fixed\n\n log.debug('Created {proj-type} task for user {user} :: File `{project-file}`, activation date '\n '`{date-time}`, force `{force}`, time limit `{time-limit}`.\\n'.format(**task_fixed))\n\n self._save()\n\n return created_time",
"def set_success(\n self, \n task_id: str, \n finished_at: datetime = get_current_time(), \n result: ExecResult = ExecResult()\n ) -> Optional[bool]:\n with self.backend.lock:\n # Get task handle and remove it from the internal index. The result\n # is None if the task does not exist.\n task = pop_task(tasks=self.tasks, task_id=task_id)\n if task is None:\n return None\n # Get the handle for the head workflow of the specified branch and\n # the index for the module matching the identifier in the task.\n workflow, module_index = self.get_task_module(task)\n if workflow is None or module_index == -1:\n return None\n # Notify the backend that the task is finished\n self.backend.task_finished(task_id)\n module = workflow.modules[module_index]\n if not module.is_running:\n # The result is false if the state of the module did not change\n return False\n # print(\"UPDATED ARGUMENTS: {}\".format(result.updated_arguments))\n module.set_success(\n finished_at=finished_at,\n outputs=result.outputs,\n provenance=result.provenance,\n updated_arguments=result.updated_arguments\n )\n context = compute_context(workflow.modules[0:module_index])\n context = result.provenance.get_database_state(context)\n import sys\n sys.stderr.write(\"Module {} finished at {} / Context: {} / Reads: [{}] / Writes: [{}]\".format(\n module.external_form, \n finished_at,\n context,\n \",\".join(result.provenance.read) if result.provenance.read is not None else \"\",\n \",\".join(result.provenance.write) if result.provenance.write is not None else \"\",\n ))\n\n\n for next_module in workflow.modules[module_index+1:]:\n if not next_module.is_pending:\n # This case can only happen if we allow parallel execution\n # of modules in the future. At this point it should not\n # occur.\n raise RuntimeError('invalid workflow state')\n elif not next_module.provenance.requires_exec(context):\n # print(\"Module {} does not need re-execution, skipping\".format(next_module))\n context = next_module.provenance.get_database_state(context)\n next_module.set_success(\n finished_at=finished_at,\n outputs=next_module.outputs,\n provenance=next_module.provenance,\n )\n else:\n # print(\"Scheduling {} for execution\".format(next_module))\n command = next_module.command\n package_id = command.package_id\n command_id = command.command_id\n external_form = command.to_external_form(\n command=self.packages[package_id].get(command_id),\n datasets=dict( \n (name, cast(DatasetDescriptor, context[name]))\n for name in context \n if context[name].is_dataset \n )\n )\n # If the backend is going to run the task immediately we\n # need to adjust the module state\n state = self.backend.next_task_state()\n if state == mstate.MODULE_RUNNING:\n next_module.set_running(\n external_form=external_form,\n started_at=get_current_time()\n )\n else:\n next_module.update_property(\n external_form=external_form\n )\n self.execute_module(\n project_id=task.project_id,\n branch_id=workflow.branch_id,\n module=next_module,\n artifacts=context\n )\n break\n return True",
"def task1():\n logger.info(\"In API3 task1 function\")\n return \"task1 success!\"",
"def task_completed(self, worker_result):\n self.status = 'completed'\n self.modification_time = current_millis()\n self.result = {'content': worker_result.result,\n 'version': worker_result.version}\n return self",
"def generate_mock_result(project='TEST', repository=None, status='SUCCESS', success=True, run_id=1,\n timestamp=None):\n if not timestamp: # If no time provided, use right now.\n timestamp = str(int(time.time() * 1000))\n if not repository:\n repository = '{}-repo'.format(project.lower())\n result = dict(project=project, repository=repository, status=status, success=success, run_id=run_id,\n timestamp=timestamp, id='{}{}'.format(repository, run_id))\n return result",
"def _add_summary(self, task: Task, exit_code: _TaskExitCode) -> None: # noqa: DAR101\n task_summary = _TaskSummary(name=task.name, exit_code=exit_code)\n self._task_summaries[task_summary.name] = task_summary"
]
| [
"0.6374565",
"0.61560464",
"0.6085082",
"0.6062353",
"0.60185844",
"0.6016819",
"0.6004296",
"0.60006815",
"0.59521013",
"0.59515864",
"0.58847183",
"0.5825966",
"0.5824629",
"0.58160424",
"0.57332444",
"0.57234216",
"0.56767815",
"0.5675966",
"0.5663726",
"0.5663456",
"0.5654346",
"0.56479394",
"0.5646303",
"0.5623512",
"0.561755",
"0.5613099",
"0.55744874",
"0.5552369",
"0.5516046",
"0.5507928"
]
| 0.70965487 | 0 |
Check if hashes are already available E.g. /available/?h=1&h=2&h=1adf22a49521bb4da13686d2560953a6 | def available(request):
hashes = request.GET.getlist('h',None)
available = {}
for h in hashes:
available.update({h:check_available_object(h)})
return JsonResponse(available) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_route_used(_, hash_arg):\n for hash_object in Hash.objects.all():\n if hash_object.hash == hash_arg:\n return HttpResponse(\n json.dumps({\"Used\": True}), mimetype=\"application/json\")\n\n Hash(hash=hash_arg).save()\n return HttpResponse(\n json.dumps({\"Used\": False}), mimetype=\"application/json\")",
"def has_hash(self, h):\n rsp = h.hashlist(self.path)\n if re.search(\"\\n[0-9a-f]+\\smd5\\s%s\" % self.path, rsp):\n rval = True\n else:\n rval = False\n return rval",
"def check_hash(self):\n m = rtorrent9.rpc.Multicall(self)\n self.multicall_add(m, \"d.check_hash\")\n\n return m.call()[-1]",
"def CheckHashes(self, hashes, unused_external=True):\n hash_map = {}\n for hsh in hashes:\n if hsh.HasField(\"sha1\"):\n digest = hsh.sha1\n hash_urn = self.PATH.Add(str(digest))\n logging.info(\"Checking URN %s\", str(hash_urn))\n hash_map[hash_urn] = digest\n\n for metadata in aff4.FACTORY.Stat(list(hash_map), token=self.token):\n yield metadata[\"urn\"], hash_map[metadata[\"urn\"]]",
"def _is_hash_valid(self):\n downloaded_hash = sha1(self._downloaded_bytes).digest()\n return downloaded_hash == self.hash",
"def is_hash_locally_cached(self, ipfs_hash: str, ipfs_refs_local=None) -> bool:\n output = run([\"ipfs\", \"files\", \"stat\", \"--with-local\", \"--size\", f\"/ipfs/{ipfs_hash}\"])\n if \"(100.00%)\" in output:\n log(\"already fully cached\", \"green\")\n log(output)\n return True\n else:\n log(\"not fully cached\", \"red\")\n log(output)\n return False",
"def _check_grib(self, url):\n head = requests.head(url)\n check_exists = head.ok\n if check_exists:\n check_content = int(head.raw.info()['Content-Length']) > 1_000_000\n return check_exists and check_content\n else:\n return False",
"def is_page_available(host, path=\"/\"):\n try:\n conn = httplib.HTTPConnection(host)\n conn.request(\"HEAD\", path)\n if re.match(\"^[23]\\d\\d$\", str(conn.getresponse().status)):\n return True\n except StandardError:\n return None",
"def checkHash(song):\n\tsql = \"Select path, filename, hash from songs where hash = '\" + song.hash + \"';\"\n\tc, conn = connect()\n\tc.execute(sql)\n\tnotexists = True\n\tfor (path, filename, hash) in c:\n\t\tif hash == song.hash:\n\t\t\tnotexists = False\n\t\telse:\n\t\t\tnotexists = True\n\treturn notexists",
"def vt_hash_check(fhash, vt_api):\n if not is_hash(fhash):\n return None\n\n url = 'https://www.virustotal.com/vtapi/v2/file/report'\n parameters = {'resource': fhash, 'apikey': vt_api}\n response = requests.get(url, params=parameters)\n try:\n return response.json()\n except ValueError:\n return None",
"def test_basic_failover_bad_hashlib_hash_get(self) -> None:\n assert _attempt_get_hash_function(\"nonexist\", self.no_algorithms) is None",
"def checkForURL(self, data):\n \n moduleCoordinator.ModuleCoordinator().addEvent(moduleCoordinator.URL_EVENT, data, self.hash)",
"def canDo_url(self, url):\n hostname = urlparse.urlsplit(url)[1]\n for hostEnd in self.highwireHosts:\n if hostname.endswith(hostEnd):\n logging.log(5, 'url hostname %s ends with %s -> highwire' % (hostname, hostEnd))\n return True\n\n if hostname in self.hostCache:\n ipAddr = self.hostCache[hostname]\n else:\n logging.debug('Looking up IP for %s' % hostname)\n try:\n ipAddr = socket.gethostbyname(hostname)\n self.hostCache[hostname] = ipAddr\n except socket.gaierror:\n raise pubGetError('Illegal hostname %s in link' % hostname, 'invalidHostname', hostname)\n\n ipParts = ipAddr.split('.')\n ipParts = [ int(x) for x in ipParts ]\n result = ipParts[0] == 171 and ipParts[1] in range(64, 68)\n if result == True:\n logging.log(5, 'hostname %s is highwire host' % hostname)\n return result",
"def check_url_availability(url):\n\n response = website_alive.get_response_object(url)\n return response.status_code == requests.codes['ok']",
"def _verify_hashes(hashes):\n\n for item in hashes:\n try:\n hashlib.new(item)\n VALID_HASH.append(item)\n except Exception:\n pass",
"def check_item_in(self, url):\n item_hash = tools.url_hash(url)\n if item_hash not in self.__items:\n self.__item_lock.acquire()\n self.__items.add(item_hash)\n self.__item_lock.release()\n return False\n else:\n return True",
"def is_available(self, product_url):\n\t\tpass",
"def check_url(url):\n return 'products.json' in url",
"def available(self, request):\n username = request.query_params['username']\n resp_data = {}\n if User.objects.filter(username=username).exists():\n resp_data['available'] = False\n else:\n resp_data['available'] = True\n return Response(resp_data, status=status.HTTP_200_OK)",
"def is_hash_checking_queued(self):\n m = rtorrent9.rpc.Multicall(self)\n self.multicall_add(m, \"d.hashing\")\n self.multicall_add(m, \"d.is_hash_checking\")\n results = m.call()\n\n setattr(self, \"hashing\", results[0])\n setattr(self, \"hash_checking\", results[1])\n\n return self._is_hash_checking_queued()",
"def CheckHashes(self, hashes):\n hash_map = {}\n for hsh in hashes:\n if hsh.HasField(\"sha256\"):\n # The canonical name of the file is where we store the file hash.\n digest = hsh.sha256\n hash_map[aff4.ROOT_URN.Add(\"files/hash/generic/sha256\").Add(\n str(digest))] = digest\n\n for metadata in aff4.FACTORY.Stat(list(hash_map), token=self.token):\n yield metadata[\"urn\"], hash_map[metadata[\"urn\"]]",
"def torrent_availability(seeds, leeches):\n\n return seeds * 2 + leeches",
"def is_available():",
"def check_availability(self):\n pass",
"def _fetch_sha1(stale_check):\n retrycount = 5\n while retrycount != 0:\n try:\n contents = urlopen(\"http://\" + stale_check).read().decode(\"utf-8\")\n return json.loads(contents)[\"sha\"]\n except URLError:\n retrycount -= 1\n\n return None",
"def check_availability(url_str, datetime_fetched=None):\n wayback_url = \"http://archive.org/wayback/available\"\n params = {\n 'url': url_str.split('?')[0],\n }\n if datetime_fetched is not None:\n params['timestamp'] = datetime_fetched.strftime(\"%Y%m%d%H%M%S\")\n\n response = requests.get(wayback_url, params=params, timeout=30)\n r_json = response.json()\n\n # let's be nice and convert the returned timestamp to a datetime obj\n # wayback timestamps are in the form YYYYMMDDhhmmss\n if \"archived_snapshots\" in r_json and \\\n \"closest\" in r_json['archived_snapshots'] and \\\n \"timestamp\" in r_json['archived_snapshots']['closest']:\n wb_timestamp = r_json['archived_snapshots']['closest']['timestamp']\n r_json['archived_snapshots']['closest']['datetime'] = datetime.strptime(wb_timestamp, \"%Y%m%d%H%M%S\")\n\n return r_json",
"def req_CHECKURL(self, url):\n # TODO: what about those MULTI and list to be returned?\n # should we return all filenames or keys within archive?\n # might be way too many?\n # only if just archive portion of url is given or the one pointing\n # to specific file?\n lgr.debug(\"Current directory: %s, url: %s\" % (os.getcwd(), url))\n akey, afile, attrs = self._parse_url(url)\n size = attrs.get('size', None)\n\n # But reply that present only if archive is present\n # TODO: this would throw exception if not present, so this statement is kinda bogus\n akey_fpath = self.get_contentlocation(akey) #, relative_to_top=True))\n if akey_fpath:\n akey_path = opj(self.path, akey_fpath)\n\n # if for testing we want to force getting the archive extracted\n # _ = self.cache.assure_extracted(self._get_key_path(akey)) # TEMP\n efile = self.cache[akey_path].get_extracted_filename(afile)\n\n if size is None and exists(efile):\n size = os.stat(efile).st_size\n\n if size is None:\n size = 'UNKNOWN'\n\n # FIXME: providing filename causes annex to not even talk to ask\n # upon drop :-/\n self.send(\"CHECKURL-CONTENTS\", size) #, basename(afile))\n\n # so it was a good successful one -- record\n self._last_url = url\n else:\n # TODO: theoretically we should first check if key is available from\n # any remote to know if file is available\n self.send(\"CHECKURL-FAILURE\")",
"def check_urls(quartus_versions):\n success = True\n for quartus in quartus_versions.keys():\n parts = quartus_versions[quartus]\n parts_str = [str(k) for k in parts.keys()]\n #print(\"Checking Quartus %s, available parts (%s)\\n\" % (quartus, \",\".join(parts_str)))\n for part in parts:\n result = test_url(quartus, part, parts[part])\n if not result:\n print(\"\\nMissing %s/%s url=%s\" % (quartus, part, parts[part]))\n success = False\n return success",
"def verify_hash(self, where=\"\", parameters={}):\n failed_products = []\n products = self.search(where=where, parameters=parameters)\n for product in products:\n if product.core.active and 'archive_path' in product.core:\n if 'hash' not in product.core:\n raise Error(\"no hash available for product '%s' (%s)\" %\n (product.core.product_name, product.core.uuid))\n if self._calculate_hash(product) != product.core.hash:\n failed_products.append(product.core.uuid)\n return failed_products",
"def _urlcheck(self):\n if (self['.managerhost'] and self['.settingurl'] and self['.guid']):\n return True\n else:\n return False"
]
| [
"0.6649847",
"0.6125281",
"0.5972277",
"0.5907636",
"0.5861764",
"0.5686857",
"0.56727827",
"0.5666598",
"0.5653059",
"0.54939723",
"0.5464405",
"0.54545414",
"0.5419799",
"0.54197407",
"0.5365426",
"0.53599435",
"0.53240955",
"0.53148705",
"0.5297111",
"0.5288192",
"0.52737",
"0.5265323",
"0.5257184",
"0.52460545",
"0.524146",
"0.5237984",
"0.52339363",
"0.5220583",
"0.51981366",
"0.5180976"
]
| 0.7324028 | 0 |
Find most probable region in HarvardOxford Atlas of a vox coord. | def locate_peaks(vox_coords):
sub_names = harvard_oxford_sub_names
ctx_names = harvard_oxford_ctx_names
at_dir = op.join(os.environ["FSLDIR"], "data", "atlases")
ctx_data = nib.load(op.join(at_dir, "HarvardOxford",
"HarvardOxford-cort-prob-2mm.nii.gz")).get_data()
sub_data = nib.load(op.join(at_dir, "HarvardOxford",
"HarvardOxford-sub-prob-2mm.nii.gz")).get_data()
loc_list = []
for coord in vox_coords:
coord = tuple(coord)
ctx_index = np.argmax(ctx_data[coord])
ctx_prob = ctx_data[coord][ctx_index]
sub_index = np.argmax(sub_data[coord])
sub_prob = sub_data[coord][sub_index]
if not max(sub_prob, ctx_prob):
loc_list.append(("Unknown", 0))
continue
if not ctx_prob and sub_index in [0, 11]:
loc_list.append((sub_names[sub_index], sub_prob))
continue
if sub_prob > ctx_prob and sub_index not in [0, 1, 11, 12]:
loc_list.append((sub_names[sub_index], sub_prob))
continue
loc_list.append((ctx_names[ctx_index], ctx_prob))
return pd.DataFrame(loc_list, columns=["MaxProb Region", "Prob"]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def determine_region(x, y, width, height):\n xs = [0, width / 3, 2 * width / 3, width]\n ys = [0, height / 3, 2 * height / 3, height]\n for i in range(3):\n for j in range(3):\n if (x >= xs[j] and x < xs[j + 1]) and (y >= ys[i] and y < ys[i + 1]):\n return i * 3 + j",
"def minimum_spanning_arborescence(sol):",
"def search_diversification(self):\n self.stage = 2\n # Find index of least explored region,\n # express in binary\n b = bin(np.argmin(self.LTM))[2:]\n # Pad binary number\n if len(b) < self.x0.shape[0]:\n pad = self.x0.shape[0] - len(b)\n b = ''.join(['0' for i in range(pad)]) + b\n # Store digits in column vector\n d = len(b)\n b = np.array(list(b), dtype='int').reshape((d,1))\n # Generate random positive vector\n base = self.r_uni(low=0, high=2, size=(d,1))\n # Transform vector into appropriate region\n return base * (b * 2 - 1)",
"def create_local_voxmap(sampler, point, xd=10, yd=10, zd=10, voxel_size=1):\n \n # minimum and maximum north coordinates\n north_min = point[0] - xd\n north_max = point[0] + xd\n \n # minimum and maximum east coordinates\n east_min = point[1] - yd\n east_max = point[1] + yd\n \n # miniumum and maximum altitude\n alt_min = point[2] - zd\n alt_max = point[2] + zd\n \n # given the minimum and maximum coordinates we can\n # calculate the size of the grid.\n north_size = int(np.ceil((north_max - north_min))) // voxel_size\n east_size = int(np.ceil((east_max - east_min))) // voxel_size\n alt_size = int(np.ceil((alt_max - alt_min))) // voxel_size\n\n # Create an empty grid\n voxmap = np.zeros((north_size, east_size, alt_size), dtype=np.bool)\n \n #maximum distance between point and outer voxels\n d_voxmap = np.sqrt((xd**2+yd**2) + (zd/2)**2)\n \n #maximum distance between obstacle center and outer borders\n d_obstacle = np.max(np.array([ \n LA.norm(np.array(p.coords[0]) - \n np.array(p.coords[2])) / 2 \n for p in polygons]))\n \n #maximum combined distances between voxmap center and obstacle centers\n d_max = d_voxmap + d_obstacle\n\n #all obstacles in vincinity\n idxs = list(sampler._tree.query_radius(point[:2], r=d_max))[0]\n \n #loop over closeby obstacles\n for i in idxs:\n \n #current obstacle\n p = polygons[i]\n \n #get the obstacle bounds (north_min, north_max, east_min, east_max)\n bounds = [\n np.min([vals[0] for vals in p.coords]),\n np.max([vals[0] for vals in p.coords]),\n np.min([vals[1] for vals in p.coords]),\n np.max([vals[1] for vals in p.coords]),\n 0.,\n p.height\n ]\n \n #discretize obstacle bounds according to voxel size\n obstacle = [\n int(bounds[0] - north_min) // voxel_size,\n int(bounds[1] - north_min) // voxel_size,\n int(bounds[2] - east_min) // voxel_size,\n int(bounds[3] - east_min) // voxel_size,\n int(bounds[4] - alt_min) // voxel_size,\n int(bounds[5] - alt_min) // voxel_size\n ]\n \n #correct for out-of-bound values\n if obstacle[0]<0:\n obstacle[0]=0\n if obstacle[1]>voxmap.shape[0]-1:\n obstacle[1]=voxmap.shape[0]-1\n if obstacle[2]<0:\n obstacle[2]=0\n if obstacle[3]>voxmap.shape[1]-1:\n obstacle[3]=voxmap.shape[1]-1\n if obstacle[4]<0:\n obstacle[4]=0\n if obstacle[5]>voxmap.shape[2]-1:\n obstacle[5]=voxmap.shape[2]-1\n \n #add collision information to the voxmap\n voxmap[obstacle[0]:obstacle[1]+1,\n obstacle[2]:obstacle[3]+1,\n obstacle[4]:obstacle[5]+1] = True\n \n #collect collision information for the ground floor\n floor = int(0-alt_min)//voxel_size\n\n #if voxmap collides with ground floor: add collision information\n if floor>=0:\n voxmap[:,:,:floor]=True\n \n #return the voxmap\n return voxmap",
"def challenge2(self):\n # Let's try an octree-type approach\n # For each grid cube we should be able to find whether a nanobot:\n # 1) is not in range (is outside grid cube and not in range of nearest face)\n # 2) is in range of whole cube (all 8 corners are in range)\n # 3) is in range of part of the cube (i.e. not 1 or 2)\n # Root node: figure out extent of whole space\n mins = []\n maxs = []\n for axis in range(3):\n mins.append(min(self.nanobots, key=lambda n: n.coord[axis]).coord[axis])\n maxs.append(max(self.nanobots, key=lambda n: n.coord[axis]).coord[axis])\n\n for count in range(len(self.nanobots), 0, -1):\n results = self.search_coord_with_max_nanobots(mins, maxs, [], self.nanobots, count)\n if results and results[0].count >= count:\n break\n\n print(f\"Found {len(results)} octree search results with {results[0].count} nanobots in range.\")\n\n # Find result coord closest to origin\n closest_dist = np.iinfo(np.int32).max\n best_coord = None\n for result in results:\n for corner in itertools.product(*zip(result.mins, result.maxs)):\n d = manhattan_dist(corner, (0, 0, 0))\n if d < closest_dist:\n closest_dist = d\n best_coord = corner\n\n print(f\"Best coord: {best_coord} (dist={manhattan_dist(best_coord, (0, 0, 0))})\")",
"def gfind(x,y,xr=None,yr=None):\n\n global BTRACK, GSTRUC, NPIX\n \n # Assume bad until proven otherwise \n flag,rms,noise,par,pind = None,None,None,None,None\n results = {'x':x,'y':y,'pind':pind,'rms':rms,'noise':noise,'par':par,'visited':None,'npix':None} # initial bad values\n\n if x is None or y is None:\n results['visited'] = 0\n return 0,results\n \n # Setting the ranges\n if xr is not None:\n x0 = xr[0] \n x1 = xr[1] \n else: \n x0 = 0 \n x1 = 1000\n if yr is not None:\n y0 = yr[0] \n y1 = yr[1] \n else: \n y0 = 0\n y1 = 1000\n \n if (x < x0) or (x > x1) or (y < y0) or (y > y1): \n flag = 0\n results['visited'] = 0\n return flag,results\n \n # No GSTRUC yet, first position\n try:\n dum = len(GSTRUC)\n except:\n return 0,results\n \n # Looking for the position \n t0 = time.time() \n # Check GSTRUC\n pind, = np.where((GSTRUC['x']==x) & (GSTRUC['y']==y))\n # Check if it was visited before but no good spectrum/solution\n if len(pind)==0:\n bind, = np.where((BTRACK['x']==x) & (BTRACK['y']==y))\n # Found it\n if len(bind)>0:\n return 1,{'x':x,'y':y,'pind':None,'rms':np.inf,'noise':None,'par':None,'visited':1,'npix':None}\n \n # Found something, getting the values \n if len(pind) > 0:\n tstr = GSTRUC['data'][pind[0]]\n rms = tstr['rms']\n noise = tstr['noise']\n par = tstr['par']\n npix = tstr['npix']\n flag = 1 \n \n # Nothing found \n else:\n pind,rms,noise,par,npix = None,None,None,None,None\n flag = 0 \n \n results = {'x':x,'y':y,'pind':pind,'rms':rms,'noise':noise,'par':par,'visited':flag,'npix':npix}\n return flag,results",
"def find_max_score_location(grid, shape):",
"def findNearset(x,y,lon,lat):\n dist = np.sqrt( (lon - x)**2 + (lat - y)**2)\n\n return np.argwhere(dist==dist.min())[0][0]",
"def search_by_coordinates():\n print('CRS used is EPSG:3857 \\n for reference check https://epsg.io/3857 ')\n x = float(input('Enter x coordinate\\n'))\n y = float(input('Enter y coordinate\\n'))\n point_in_bound(os.path.abspath(\"..\")+\"\\Shape\\prealpinebavaria.shp\", x, y, 'Alpenvorland')\n point_in_bound(os.path.abspath(\"..\")+\"\\Shape\\oberrheinmaintiefland.shp\", x, y, 'Oberrheinisches Tiefland')\n point_in_bound(os.path.abspath(\"..\")+\"\\Shape\\Tiefland.shp\", x, y, 'Niederrheinisches Tiefland')",
"def find_goal(self):\n\n self.separate_frontier()\n goal = self.centroid(self.regions)\n\t#rospy.loginfo(goal)\n self.centroidValue = goal\n self.paint_cells(self.centroidValue,self.frontier)\n\tif goal is not None and goal!=0:\n \tfinal = self.in_bounds(goal)\n \trospy.loginfo(\"Region Found\")\n \treturn final",
"def best_coords(self):\n lat, lon = None, None\n for term in self.terms:\n # print(term)\n # print(term['weight'])\n geo = term.get(\"geo\")\n if geo:\n osm = geo['osm']\n gm = geo['gm']\n geo_data = None\n if osm:\n geo_data = osm\n elif gm:\n geo_data = gm\n if geo_data:\n g = geo_data[0]\n lat, lon = g['latitude'], g['longitude']\n break\n return lat, lon, self.region",
"def start_region(self, x, y):\n if x>0 and x<100 and y>0 and y<100:\n return 1\n elif x>700 and x<800 and y>0 and y<100:\n return 2\n elif x>0 and x<100 and y>400 and y<500:\n return 3\n elif x>700 and x<800 and y>400 and y<500:\n return 4\n return 0",
"def get_box_grid(x, y):\n for grid in GRIDS:\n if x >= grid[0][0] and y >= grid[0][1] and \\\n x <= grid[1][0] and y <= grid[1][1]:\n return grid\n return None",
"def test_d2_get_neighborhood_small(self):\n config.NR_COLS = 3\n config.NR_ROWS = 3\n gamefield = [\n [1, 0, 0],\n [1, 0, 0],\n [0, 1, 1],\n ]\n # top left\n nh = logic.get_neighborhood(gamefield, 0, 0)\n self.assertEqual(nh, 3)\n # top right\n nh = logic.get_neighborhood(gamefield, 0, 2)\n self.assertEqual(nh, 4)\n # bottom left\n nh = logic.get_neighborhood(gamefield, 2, 0)\n self.assertEqual(nh, 4)\n # bottom right\n nh = logic.get_neighborhood(gamefield, 2, 2)\n self.assertEqual(nh, 3)\n # center\n nh = logic.get_neighborhood(gamefield, 1, 1)\n self.assertEqual(nh, 4)",
"def getCityLimitsBoundingBox(city, expandBy=0.0):\n url = \"https://nominatim.openstreetmap.org/search?city={}&format=json&addressdetails=1&limit=1\".format(city)\n r = requests.get(url=url)\n bbox_coords = r.json()[0]['boundingbox']\n top = [float(bbox_coords[3]), float(bbox_coords[1])]\n right = top\n bot = [float(bbox_coords[2]), float(bbox_coords[0])]\n left = bot\n\n # enlarge bounding box in order to include the entire neighborhood of the respectve pilot \n if expandBy > 0.0:\n diff_y = top[1]-bot[1]\n top[1] = top[1]+(diff_y*expandBy)\n bot[1] = bot[1]-(diff_y*expandBy)\n diff_x = left[0]-right[0]\n right[0] = right[0]+(diff_x*expandBy)\n left[0] = left[0]-(diff_x*expandBy)\n return top, right, bot, left",
"def _find_largest_candidate(self, reduced):\n nbr_counts = np.count_nonzero(reduced == 0, axis=0) # = [1, 1, 4, 2,...] where each value is the number of neighbours for the variant at that index.\n count_max = nbr_counts.max()\n if count_max == 0: # Indicates there are no available variants close enough\n return None, [] # to the remaining unassigned. Usually raises an error.\n max_inds = np.nonzero(nbr_counts == count_max)[0] # Array containing the indices of all variants with the max number of neighbours.\n if len(max_inds) == 1: # A single largest cluster\n best_center = max_inds[0]\n best_clstr = np.nonzero(reduced[:,best_center] == 0)[0]\n else: # A tie for largest cluster. Broken by smallest sum of full scores\n # This was tested with the below more accurate and true scoring function. Unfortunately it became hideously slow (clustered_inds and centre_inds were given as args):\n # clstr_inds = np.nonzero(reduced[:,max_ind] == 0)[0]\n # covered_inds = list(clustered_inds | set(clstr_inds))\n # centre_inds.append(max_ind)\n # score = np.sum(np.min(self.orig_dists[np.ix_(covered_inds,centre_inds)], axis=1))\n # centre_inds.pop()\n best_center, best_clstr, best_score = None, [], np.inf\n for max_ind in max_inds:\n clstr_inds = np.nonzero(reduced[:,max_ind] == 0)[0]\n score = np.sum(self.orig_dists[clstr_inds,max_ind])\n if score < best_score:\n best_center, best_clstr, best_score = max_ind, clstr_inds, score\n return best_center, best_clstr",
"def get_random_position_near_path(\n game: TowerDefenceSolver,\n cov_xx: int,\n cov_yy: int,\n purchased_towers: Purchases,\n max_number_of_tries: Optional[int] = None,\n) -> Optional[Tuple[int, int]]:\n position = tuple(\n np.round(\n np.random.multivariate_normal(game.path[np.random.choice(len(game.path))], cov=[[cov_xx, 0], [0, cov_yy]])\n ).astype(int)\n )\n\n number_of_tries = 0\n while not validate_pos(game, position, purchased_towers):\n position = tuple(\n np.round(\n np.random.multivariate_normal(\n game.path[np.random.choice(len(game.path))], cov=[[cov_xx, 0], [0, cov_yy]]\n )\n ).astype(int)\n )\n number_of_tries += 1\n if max_number_of_tries and number_of_tries > max_number_of_tries:\n return None\n\n return position",
"def getTopPopulationRegion(self):\n\t\tdata = {}\n\t\tfor iProvince in range(con.iNumRegions):\n\t\t\tdata[iProvince] = 0\n\t\tfor iLoopPlayer in range(con.iBarbarian + 1):\n\t\t\tapCityList = PyPlayer(iLoopPlayer).getCityList()\n\t\t\tfor pCity in apCityList:\n\t\t\t\tdata[pCity.GetCy().plot().getRegionID()] += pCity.getPopulation()\n\t\tkey = -1\n\t\tfor key, value in sorted(data.iteritems(), key=lambda (k,v): (v,k)):\n\t\t\tpass\n\t\treturn key",
"def find_sector(self, x, y):\n # Initialize the sector guess\n m = x.size\n x_pos_guess = (np.ones(m) * self.x_n / 2).astype(int)\n y_pos_guess = (np.ones(m) * self.y_n / 2).astype(int)\n\n # Define a function that checks whether a set of points violates a linear\n # boundary defined by (x_bound_1,y_bound_1) and (x_bound_2,y_bound_2),\n # where the latter is *COUNTER CLOCKWISE* from the former. Returns\n # 1 if the point is outside the boundary and 0 otherwise.\n def violation_check(\n x_check, y_check, x_bound_1, y_bound_1, x_bound_2, y_bound_2\n ):\n return (\n (y_bound_2 - y_bound_1) * x_check - (x_bound_2 - x_bound_1) * y_check\n > x_bound_1 * y_bound_2 - y_bound_1 * x_bound_2\n ) + 0\n\n # Identify the correct sector for each point to be evaluated\n these = np.ones(m, dtype=bool)\n max_loops = self.x_n + self.y_n\n loops = 0\n while np.any(these) and loops < max_loops:\n # Get coordinates for the four vertices: (xA,yA),...,(xD,yD)\n x_temp = x[these]\n y_temp = y[these]\n xA = self.x_values[x_pos_guess[these], y_pos_guess[these]]\n xB = self.x_values[x_pos_guess[these] + 1, y_pos_guess[these]]\n xC = self.x_values[x_pos_guess[these], y_pos_guess[these] + 1]\n xD = self.x_values[x_pos_guess[these] + 1, y_pos_guess[these] + 1]\n yA = self.y_values[x_pos_guess[these], y_pos_guess[these]]\n yB = self.y_values[x_pos_guess[these] + 1, y_pos_guess[these]]\n yC = self.y_values[x_pos_guess[these], y_pos_guess[these] + 1]\n yD = self.y_values[x_pos_guess[these] + 1, y_pos_guess[these] + 1]\n\n # Check the \"bounding box\" for the sector: is this guess plausible?\n move_down = (y_temp < np.minimum(yA, yB)) + 0\n move_right = (x_temp > np.maximum(xB, xD)) + 0\n move_up = (y_temp > np.maximum(yC, yD)) + 0\n move_left = (x_temp < np.minimum(xA, xC)) + 0\n\n # Check which boundaries are violated (and thus where to look next)\n c = (move_down + move_right + move_up + move_left) == 0\n move_down[c] = violation_check(\n x_temp[c], y_temp[c], xA[c], yA[c], xB[c], yB[c]\n )\n move_right[c] = violation_check(\n x_temp[c], y_temp[c], xB[c], yB[c], xD[c], yD[c]\n )\n move_up[c] = violation_check(\n x_temp[c], y_temp[c], xD[c], yD[c], xC[c], yC[c]\n )\n move_left[c] = violation_check(\n x_temp[c], y_temp[c], xC[c], yC[c], xA[c], yA[c]\n )\n\n # Update the sector guess based on the violations\n x_pos_next = x_pos_guess[these] - move_left + move_right\n x_pos_next[x_pos_next < 0] = 0\n x_pos_next[x_pos_next > (self.x_n - 2)] = self.x_n - 2\n y_pos_next = y_pos_guess[these] - move_down + move_up\n y_pos_next[y_pos_next < 0] = 0\n y_pos_next[y_pos_next > (self.y_n - 2)] = self.y_n - 2\n\n # Check which sectors have not changed, and mark them as complete\n no_move = np.array(\n np.logical_and(\n x_pos_guess[these] == x_pos_next, y_pos_guess[these] == y_pos_next\n )\n )\n x_pos_guess[these] = x_pos_next\n y_pos_guess[these] = y_pos_next\n temp = these.nonzero()\n these[temp[0][no_move]] = False\n\n # Move to the next iteration of the search\n loops += 1\n\n # Return the output\n x_pos = x_pos_guess\n y_pos = y_pos_guess\n return x_pos, y_pos",
"def get_root():\n root = VGOCache('https://www.vegguide.org/region/0')\n return check_has_regions(root.results['regions']['primary'])",
"def detect_enemies_by_region(self, obs):\n position = self.get_current_location(obs)\n enemy_y, enemy_x = (\n obs.observation.feature_screen.player_relative == _PLAYER_ENEMY).nonzero()\n enemies = list(zip(enemy_x, enemy_y))\n\n # top-left of map is (0,0) and bottom-right is (map_width-1, map_height-1)\n def detect_enemies(region_direction):\n for enemy in enemies:\n if region_direction == \"NORTH_WEST\":\n if enemy[0] < position[0] and enemy[1] < position[1]:\n return 1\n elif region_direction == \"NORTH_EAST\":\n if enemy[0] >= position[0] and enemy[1] < position[1]:\n return 1\n elif region_direction == \"SOUTH_WEST\":\n if enemy[0] < position[0] and enemy[1] >= position[1]:\n return 1\n elif region_direction == \"SOUTH_EAST\":\n if enemy[0] >= position[0] and enemy[1] >= position[1]:\n return 1\n return 0\n\n presences = {}\n regions = [\"NORTH_WEST\", \"NORTH_EAST\", \"SOUTH_WEST\", \"SOUTH_EAST\"]\n for region in regions:\n presences[region] = detect_enemies(region)\n\n return presences[\"NORTH_WEST\"], presences[\"NORTH_EAST\"], presences[\"SOUTH_WEST\"], presences[\"SOUTH_EAST\"]",
"def cell_containing(self,xy,neighbors_to_test=4): \n hit = self.select_cells_nearest(xy, count=neighbors_to_test, inside=True)\n if hit is None:\n return -1\n else:\n return hit",
"def get_index(coord_ax, probs_map_shape_ax, grid_ax):\n # print (coord_ax, probs_map_shape_ax, grid_ax)\n _min = grid_ax//2\n _max = grid_ax//2\n\n ax_min = coord_ax - _min\n while ax_min < 0:\n _min -= 1\n ax_min += 1\n\n ax_max = coord_ax + _max\n while ax_max > probs_map_shape_ax:\n _max -= 1\n ax_max -= 1\n\n return _min, _max",
"def _find_solution(self, population, num_of_best_chromosomes):\n data = self._Individuals()\n for x in population:\n curr_fit = self._fitness(x)\n data.append_object(self._Individual(curr_fit, x))\n return data.sort_objects()[:num_of_best_chromosomes]",
"def TopologicalCoordinate(x, n):\n\titinerary = Itinerary(x,n)\n\tgamma, gammaBin = Splus2gamma(itinerary)\n\treturn gamma",
"def get_score(location, grid, shape):",
"def topography(x,y):\n \n z = -x/10\n \n N = len(x)\n for i in range(N):\n # Step\n if 10 < x[i] < 12:\n z[i] += 0.4 - 0.05*y[i]\n \n # Constriction\n if 27 < x[i] < 29 and y[i] > 3:\n z[i] += 2\n \n # Pole\n if (x[i] - 34)**2 + (y[i] - 2)**2 < 0.4**2:\n z[i] += 2\n \n return z",
"def get_region(self, point: Point) -> int:\n return self.get_erosion_level(point) % 3",
"def get_map_square(x, y):\n result = MAP_SQUARE_ROCK\n if ((x >=0 and x< width) and (y>= 0 and y< height)): # LT i think done TODO: Replace False with a condition that checks if the values x and y are valid. Valid index values start at 0. x must be less than width and y must be less than height. Negative numbers are not valid.\n row= dungeon_map[y]\n result= row[x] # LT... done? see bitmap hw in comments below TODO: Replace None with an expression that uses x and y to get the right value from dungeon_map. \n return result",
"def winner(self):\n for c in 'xo':\n for comb in [(0,3,6), (1,4,7), (2,5,8), (0,1,2), (3,4,5), (6,7,8), (0,4,8), (2,4,6)]:\n if all(self.spots[p] == c for p in comb):\n return c\n return None"
]
| [
"0.58675516",
"0.57267046",
"0.5638504",
"0.55983895",
"0.5562923",
"0.5545581",
"0.54555863",
"0.5422686",
"0.5355386",
"0.5342899",
"0.5318674",
"0.53076345",
"0.5263731",
"0.5236593",
"0.520713",
"0.51990044",
"0.51692146",
"0.51691866",
"0.5168615",
"0.51660985",
"0.51551026",
"0.5086571",
"0.50744283",
"0.5064431",
"0.50594085",
"0.5050946",
"0.50404876",
"0.5024722",
"0.5010915",
"0.5010832"
]
| 0.61049676 | 0 |
update location of an incident | def update_location_only(self, location, incident_id):
self.cursor.execute("""UPDATE incidents SET location='%s' WHERE incident_id='%s'"""%(location,incident_id))
self.commiting() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_location(self, id, location):\n sql = f\"UPDATE incidences SET location = \\'{location}\\'\\\n WHERE incidences.id = {id}\"\n conn = Db().con\n curr = conn.cursor()\n curr.execute(sql)\n conn.commit()",
"def upsert_location(self, location):",
"def update_location(request_form, location_id):\n values = {'latitude': request_form.get('latitude'), 'longitude': request_form.get('longitude'),\n 'city': request_form.get('city'), 'country': request_form.get('country')}\n db_session.query(Locations).filter_by(id=location_id).update(values)\n db_session.commit()\n return 'Updated location #%s: %s, %s.' \\\n % (location_id, values['city'].title(), values['country'].title()), 'success'",
"async def setIncident_locationName(\n self, eventID: str, incidentNumber: int, name: str, author: str\n ) -> None:",
"def update_location(self, **kwargs):\n \n self.options.update(kwargs)\n self.options['action'] = 'locator.location.update'\n return self.call(self.options)",
"def update_location(self, loc, dt): #pylint: disable=invalid-name\n self.observer.date = dt\n self.sat.compute(self.observer)\n loc.az = float(self.sat.az)\n loc.al = float(self.sat.alt)",
"def update_location(dest, location):\n open(dest, 'w').close()\n with open(dest, \"a\") as dest_file:\n dest_file.write(location)",
"def update(self, metric, loc):\n\n self._total_loc += loc\n for region in self._regions:\n region.update(metric, loc)",
"def location(self, location):\n sql = \"\"\"UPDATE barcodes.sample\n SET sample_location = %s\n WHERE sample_id = %s\"\"\"\n with pm.sql.TRN:\n pm.sql.TRN.add(sql, [location, self.id])",
"def update_loc(self, loc):\n\n self._total_loc += loc\n for region in self._regions:\n region.update_loc(loc)",
"def set_location(self, location):\n self.location = location",
"def process_update(message):\n resident = Resident.objects.get(phone_number=message.sender)\n resident.location = message.location.location\n resident.save()\n\n # TODO - wording\n message.respond('Thank you. Your location has been updated.')\n \n return TropoOkResponse()",
"def city_update(self):\n self.city = self.city_finder(self.location.__str__())",
"def put(self, problem_id):\n args = self.request.arguments\n x = args.pop('latitude')\n y = args.pop('longitude')\n args['location'] = create_location(x, y)\n self.sess.query(Problem).filter_by(id=int(problem_id)). \\\n update(args)\n\n self.sess.commit()\n\n activity = ProblemsActivity(\n problem_id=int(problem_id),\n user_id=self.get_current_user(),\n datetime=get_datetime(),\n activity_type=\"UPDATED\"\n )\n self.sess.add(activity)\n self.sess.commit()",
"def change_current_location(location, id):\n query = \"\"\"UPDATE parcels SET current_location = %s WHERE id = %s\"\"\"\n tuple =(location , id)\n db.insert(query, tuple)",
"def update_location(lat, lon):\n try:\n url = HOST + '/passport/user/travel'\n r = requests.post(url, headers=headers, data=json.dumps({\"lat\": lat, \"lon\": lon}))\n return r.json()\n except requests.exceptions.RequestException as e:\n print(\"Something went wrong. Could not update your location:\", e)",
"def _incident_event(self, incident):\n data = {\n \"type\": incident.get(\"type\"),\n \"description\": incident.get(\"friendly_description\"),\n \"timestamp\": incident.get(\"timestamp\"),\n \"location\": incident.get(\"location\"),\n }\n if incident.get(\"coordinates\"):\n data.update(\n {\n ATTR_LATITUDE: incident.get(\"coordinates\")[0],\n ATTR_LONGITUDE: incident.get(\"coordinates\")[1],\n }\n )\n self._hass.bus.fire(EVENT_INCIDENT, data)",
"async def setIncident_locationDescription(\n self, eventID: str, incidentNumber: int, description: str, author: str\n ) -> None:",
"def _update_location(map: MutableMapping[str, Any], loc: Sequence) -> None:\n if 'location' in map:\n map['location'] = [n + m for n, m in zip(map['location'], loc)]\n if 'source' in map:\n if isinstance(map['source'], Sequence):\n for item in map['source']:\n _update_location(item, loc)\n else:\n _update_location(map['source'], loc)",
"def updateOmLocation(self):\n if self.om != None:\n self.om.current_loc = self.destinations[self.current_loc]",
"def update_robot_location(self, x, y):\n location_id = self._team_name + \"-\" + self._robot_name\n request_name = \"set_robot_location\"\n request_type = self._request_types[request_name]\n\n arguments = dict()\n for key in request_type['schema_keys']:\n arguments[key] = None\n arguments[\"@id\"] = location_id\n arguments[\"@type\"] = request_type[\"schema_name\"]\n arguments[\"episode\"] = self._episode_name\n arguments[\"team\"] = self._team_name\n arguments[\"timestamp\"] = self._get_current_timestamp()\n arguments[\"x\"] = x\n arguments[\"y\"] = y\n arguments[\"z\"] = 0.0\n resp = self.make_request(request_name, url_id=location_id, arguments=arguments)",
"def update_location(username, password, game_id, lat, lng):\n\n payload = {'game_id': game_id, 'lat': lat, 'lng': lng}\n url = \"{}{}/game/{}\".format(hostname, rest_prefix, game_id)\n r = requests.put(url, auth=(username, password), data=payload)\n response = r.json()\n\n print response\n return response",
"def location(self, location):\n self._location = location",
"def update_location(self, lat, lon):\n endpoint = '/passport/user/travel'\n params = {\n \"lat\": lat,\n \"lon\": lon\n }\n return self.post_request(endpoint, params)",
"def set_location(self, location, now):\n def work():\n member = db.get(self.key())\n member.location = location\n member.location_time = now\n member.put()\n db.run_in_transaction(work)",
"def update_location(self, input_json):\n try:\n if AppConstants.LOCATION.LOCATION_ID not in input_json \\\n or (input_json[AppConstants.LOCATION.LOCATION_ID] is None\n or input_json[AppConstants.LOCATION.LOCATION_ID] == \"\"):\n print(AppConstants.LOCATION.LOCATION_ID + AppConstants.PROJECT.NOT_PRESENT_ERROR_MSG)\n raise BPLocationException(AppConstants.LOCATION.LOCATION_ID +\n AppConstants.PROJECT.NOT_PRESENT_ERROR_MSG)\n\n location_data = list(self.mongo_db_object.find_json(\n {AppConstants.LOCATION.LOCATION_ID: input_json[AppConstants.LOCATION.LOCATION_ID]},\n AppConfigurations.MONGO_DATABASE, AppConstants.LOCATION.MONGO_LOCATION_COLLECTION_NAME))\n\n if location_data:\n try:\n response = self.mongo_db_object.update_one(\n {AppConstants.LOCATION.LOCATION_ID: input_json[AppConstants.LOCATION.LOCATION_ID]},\n input_json, AppConfigurations.MONGO_DATABASE,\n AppConstants.LOCATION.MONGO_LOCATION_COLLECTION_NAME)\n print(\"Successfully updated location\")\n except Exception as e:\n print(e, 'exception in updating location')\n return AppConstants.result_success_template(\"successfully updated the location data\")\n else:\n print(\"No Location found with the specified ID\")\n raise BPLocationException(\"No Location found with the specified ID\")\n except Exception as e:\n raise BPLocationException(e)",
"def set_location(self, location_set):",
"def location(self, location):\n\n self._location = location",
"def location(self, location):\n\n self._location = location",
"def location(self, location):\n\n self._location = location"
]
| [
"0.76122206",
"0.69032365",
"0.6729832",
"0.6556108",
"0.6544285",
"0.64480174",
"0.61404055",
"0.6120101",
"0.61116576",
"0.60934275",
"0.6064723",
"0.6015835",
"0.5995509",
"0.5994672",
"0.59677976",
"0.5956472",
"0.5948218",
"0.5909087",
"0.5898638",
"0.5888847",
"0.58670104",
"0.58585835",
"0.58140147",
"0.5791256",
"0.57864887",
"0.5740496",
"0.5734626",
"0.5720609",
"0.5720609",
"0.5720609"
]
| 0.7888442 | 0 |
update comment of an incident | def update_comment_only(self, comment, incident_id):
self.cursor.execute("""UPDATE incidents SET comment='%s' WHERE incident_id='%s'"""%(comment ,incident_id))
self.commiting() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_comment(self, id, comment):\n sql = f\"UPDATE incidences SET comment = \\'{comment}\\'\\\n WHERE incidences.id = {id}\"\n conn = Db().con\n curr = conn.cursor()\n curr.execute(sql)\n conn.commit()",
"def edit_comment():\n # Implement me!\n\n logger.info(\"vars: %r\" % request.vars)\n logger.info(\"vars_comment_text: %r\" % request.vars.comment_text)\n logger.info(\"vars id: %r\" % request.vars.comment_id)\n logger.info(\"comment_text: %r\" % db(db.Comments.id == request.vars.comment_id))\n\n #comment.comment_text = request.vars.comment_text\n #comment.edited_on = datetime.datetime.utcnow()\n db(db.Comments.id == request.vars.comment_id).update(comment_text=request.vars.comment_text, edited_on=datetime.datetime.utcnow())\n db.commit()\n logger.info(\"comment_text: %r\" % db(db.Comments.id == request.vars.comment_id))\n return \"ok\"",
"def __add_comment(self, issue_id, comment):\n import httplib2\n http = httplib2.Http() \n response, content = http.request(\n uri=self.__issue_url % int(issue_id),\n method='PUT',\n body=comment,\n headers={\n 'X-Redmine-API-Key': self.__api_key,\n 'Content-type': 'application/json'\n }\n )\n print(response)\n print(content)",
"def put(self):\n client_data = self.data\n comment_id = client_data['comment_id']\n\n try:\n comment = self.event_comment_table.get_item(CommentID=comment_id)\n except:\n self.write_json_with_status(400,{\n 'result' : 'fail',\n 'reason' : 'invalid comment id'\n })\n\n if self.current_userid != comment[\"CreatorID\"]:\n self.write_json_with_status(403,{\n 'result' : 'fail',\n 'reason' : 'Anthantication failed'\n })\n\n comment['Coentent'] = client_data['data']\n comment['Timestamp'] = str(time.time())\n comment.partial_save();\n\n self.write_json({\n 'comment_id' : comment_id,\n 'Timestamp' : comment['Timestamp']\n })",
"def _apply_comment(self, iid, comment):\n data = {\"body\" : comment._body}\n resp = self._post(\n self._base + \"/issues/{}/comments\".format(iid),\n data=self._format_data(data))",
"def comment(self, body, incident_id):\n payload = {\"comment\":{\"body\":body, \"is_private\":\"false\"}}\n response = self.session.post(\n \"{0}/incidents/{1}/comments.json\".format(self.uri, incident_id),\n json=payload\n )\n return response.status_code",
"def comment(self, uuid, comment):\n # TODO: add overwrite (false by default) and append options\n cur = self.conn.cursor()\n cur.execute(\n \"\"\"\n UPDATE experiments\n SET comment = ?\n WHERE uuid = ?\n \"\"\", [comment, uuid])\n cur.close()\n self.conn.commit()",
"def comment_cable(cid, comment):\n\n SQL.execute('''\n SELECT \n cid,\n ticket,\n comment\n FROM \n cables\n WHERE\n cables.cid = ?\n LIMIT 1\n ''',(\n cid,\n ))\n\n for row in SQL.fetchall():\n vlog(2, 'add comment to cable c%s: %s' % (cid, comment))\n\n SQL.execute('''\n UPDATE\n cables \n SET\n comment = ?\n WHERE\n cid = ?\n ;''', (\n comment,\n cid\n ));\n\n if row['ticket'] and not DISABLE_TICKETS:\n EV.add_resolver_comment(row['ticket'], 'Bad Cable Comment:\\n%s' % comment)\n vlog(3, 'Updated Extraview Ticket %s for c%s with comment: %s' % (row['ticket'], cid, comment))",
"def edit(self, comment):\n try:\n self.comment = comment\n self.save()\n except Exception as e:\n raise Exception(\"Failed to save, rolling back transaction.\" \\\n \"Details: %s\" % e)",
"async def edit_comment(*, comment: models.Comment = Depends(resolve_user_owned_comment), edited_comment: EditComment,\n db: Session = Depends(get_db)):\n return crud.update_comment(db, comment_id=comment.id, **edited_comment.dict(exclude_unset=True))",
"def edit_comment(self, id, body, **args):\n args.update(id=id, body=body)\n return self.fetch(\"/comment\", post_args=args)",
"def comment(self, comment):\r\n\r\n core.FW_conf['connection'].comment(comment)",
"def modify_user_comment(username, comment_id, comment):\n result = get_comment_by_id(username, comment_id)\n result[\"comment\"] = comment\n result[\"date_updated\"] = datetime.datetime.now()",
"def modify_user_comment(username, comment_id, comment):\n result = get_comment_by_id(username, comment_id)\n result[\"comment\"] = comment\n result[\"date_updated\"] = datetime.datetime.now()",
"def update_comment(request, course_id, comment_id):\r\n comment = cc.Comment.find(comment_id)\r\n if 'body' not in request.POST or not request.POST['body'].strip():\r\n return JsonError(_(\"Body can't be empty\"))\r\n comment.body = request.POST[\"body\"]\r\n comment.save()\r\n if request.is_ajax():\r\n return ajax_content_response(request, SlashSeparatedCourseKey.from_deprecated_string(course_id), comment.to_dict())\r\n else:\r\n return JsonResponse(utils.safe_content(comment.to_dict()))",
"def _put(self, data, comment_id, obj):\n comment = obj\n comment_id = int(comment_id)\n\n # Ensure that user and customer have not been changed (they can only be written once)\n if data['user_id'] != comment['user_id']:\n flask_restful.abort(400, message=f\"Bad Request - cannot change user ID in \"\n f\"comment '{comment_id}'\")\n if data['ticket_id'] != comment['ticket_id']:\n flask_restful.abort(400, message=f\"Bad Request - cannot change ticket ID in \"\n f\"comment '{comment_id}'\")\n\n # Remove keys that are not in the new resource\n keys_to_remove = [stored_key for stored_key in comment.keys()\n if stored_key not in data]\n for old_key in keys_to_remove:\n DB_COMMENT_TABLE.update(delete(old_key), doc_ids=[comment_id])\n DB_COMMENT_TABLE.update(data, doc_ids=[comment_id])\n return Comment.get_self_url(comment_id=comment_id)",
"def edit_comment(bid, pid, cid):\n # pylint: disable=unused-argument\n comment = Comment.query.get(cid)\n form = CommentForm(request.form)\n if request.method == 'POST' and current_user.uid == comment.uid:\n if form.validate():\n if comment.text != form.text.data:\n comment.text = form.text.data\n DB.session.commit()\n flash('Comment successfully edited!')\n else:\n flash(constants.DEFAULT_SUBMISSION_ERR)\n return redirect(request.referrer)",
"def update(self, request, slug, id):\n article = ArticleInst.fetch(slug)\n updated_comment = request.data.get('comment', {})\n comment = self.check_comment(id, article)\n\n similar_comment = Comment.objects.filter(\n article=article,\n body=updated_comment.get('body')\n )\n\n if similar_comment:\n data = {'message': \"You've posted a similar comment before\"}\n status_ = status.HTTP_409_CONFLICT\n else:\n response = {'message': 'Comment Updated'}\n response['data'] = updated_comment\n serializer = self.serializer_class(\n comment,\n data=updated_comment,\n partial=True)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n data = serializer.data\n status_ = status.HTTP_200_OK\n\n return Response(data=data, status=status_)",
"def edit_comment(self, comment_id, comment):\n return self.proxy.wp.editComment(self.blog_id, self.username, self.password,\n comment_id, comment)",
"def put_comment(self, object_id, message):\n return self.put_object(object_id, \"comments\", message=message)",
"def put_comment(self, object_id, message):\n return self.put_object(object_id, \"comments\", message=message)",
"def update_comments(self):\n self.nb_comments = self.comments.count()\n self.save()",
"def update(self, id, message, user=None, repo=None):\n request = self.make_request('issues.comments.edit', user=user,\n repo=repo, id=id, body={'body': message})\n return self._patch(request)",
"def create_incident_comment(self, incident_id, data):\n try:\n uri = '/incidents/{}/comments'.format(incident_id)\n resilient_client = self.rest_client()\n heading = \"Raw Proofpoint TRAP Event Payload:\\n\"\n note = {\n 'format': 'text',\n 'content': '{}{}'.format(heading, pprint.pformat(data, indent=4))\n }\n payload = {'text': note}\n comment_response = resilient_client.post(uri=uri, payload=payload)\n return comment_response\n\n except SimpleHTTPException as ex:\n LOG.error(\"Failed to add note for incident %d: %s\", incident_id, ex)",
"def put(self, id):\n return add_comment(request.json, id)",
"def test_projects_id_comments_put(self):\n project = Comment()\n response = self.client.open('/project-tracker/projects/{id}/comments'.format(id=56),\n method='PUT',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))",
"def update_comment(self, comment_form):\n # Implemented from template for\n # osid.resource.ResourceAdminSession.update_resource_template\n collection = JSONClientValidated('commenting',\n collection='Comment',\n runtime=self._runtime)\n if not isinstance(comment_form, ABCCommentForm):\n raise errors.InvalidArgument('argument type is not an CommentForm')\n if not comment_form.is_for_update():\n raise errors.InvalidArgument('the CommentForm is for update only, not create')\n try:\n if self._forms[comment_form.get_id().get_identifier()] == UPDATED:\n raise errors.IllegalState('comment_form already used in an update transaction')\n except KeyError:\n raise errors.Unsupported('comment_form did not originate from this session')\n if not comment_form.is_valid():\n raise errors.InvalidArgument('one or more of the form elements is invalid')\n collection.save(comment_form._my_map)\n\n self._forms[comment_form.get_id().get_identifier()] = UPDATED\n\n # Note: this is out of spec. The OSIDs don't require an object to be returned:\n return objects.Comment(\n osid_object_map=comment_form._my_map,\n runtime=self._runtime,\n proxy=self._proxy)",
"def update_comment(user, wine, comment):\n\n current_comment = Comment.query.filter(Comment.user==user, Comment.wine==wine).first()\n\n #Conditional allows you to \"update\" a comment even if a previous comment doesn't exist\n if current_comment:\n current_comment.comment = comment\n db.session.merge(current_comment)\n else:\n current_comment = Comment(user=user, wine=wine, comment=comment)\n\n db.session.commit()\n\n return current_comment",
"def comment(self, comment) :\n\t\ttry :\n\t\t\tself._comment = comment\n\t\texcept Exception as e:\n\t\t\traise e",
"def update_comment_in_doc(doc):\n\n\t# only comments get updates, not likes, assignments etc.\n\tif doc.doctype == \"Comment\" and doc.comment_type != \"Comment\":\n\t\treturn\n\n\tdef get_truncated(content):\n\t\treturn (content[:97] + \"...\") if len(content) > 100 else content\n\n\tif doc.reference_doctype and doc.reference_name and doc.content:\n\t\t_comments = get_comments_from_parent(doc)\n\n\t\tupdated = False\n\t\tfor c in _comments:\n\t\t\tif c.get(\"name\") == doc.name:\n\t\t\t\tc[\"comment\"] = get_truncated(doc.content)\n\t\t\t\tupdated = True\n\n\t\tif not updated:\n\t\t\t_comments.append(\n\t\t\t\t{\n\t\t\t\t\t\"comment\": get_truncated(doc.content),\n\t\t\t\t\t# \"comment_email\" for Comment and \"sender\" for Communication\n\t\t\t\t\t\"by\": getattr(doc, \"comment_email\", None) or getattr(doc, \"sender\", None) or doc.owner,\n\t\t\t\t\t\"name\": doc.name,\n\t\t\t\t}\n\t\t\t)\n\n\t\tupdate_comments_in_parent(doc.reference_doctype, doc.reference_name, _comments)"
]
| [
"0.8003306",
"0.72777617",
"0.7273704",
"0.7262597",
"0.70803076",
"0.7057946",
"0.70297766",
"0.6917318",
"0.6718791",
"0.66493183",
"0.66147476",
"0.6471529",
"0.6453238",
"0.6453238",
"0.6449025",
"0.6447175",
"0.64454687",
"0.6445442",
"0.6428888",
"0.6378571",
"0.6378571",
"0.6314791",
"0.6272524",
"0.6268584",
"0.6268166",
"0.62489206",
"0.6240322",
"0.6222044",
"0.6207157",
"0.6194616"
]
| 0.8372494 | 0 |
update title of an incident | def update_title_only(self, title, incident_id):
self.cursor.execute("""UPDATE incidents SET title='%s' WHERE incident_id='%s'"""%(title, incident_id))
self.commiting() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_title(self, title):\n if type(title) != str:\n raise Exception(\"title is not a string\")\n\n self.__title_var.set(title)",
"def _update_title(self, title, tag, lid):\n return title",
"def _UpdateTitle(self, event, new_title='Updated event title'):\n\n previous_title = event.title.text\n event.title.text = new_title\n print 'Updating title of event from:\\'%s\\' to:\\'%s\\'' % (\n previous_title, event.title.text,)\n return self.cal_client.Update(event)",
"def updateTitle(rubricterm, event):\n rubricterm.updateTitle()",
"def update_title(self,title=None,owner=None):\n self.titles[owner].append(title)",
"def set_title(self, title):\n self.data['title'] = title",
"def title(self, title):\n\n self.container['title'] = title",
"def set_title(self, title):\n\t\tpass",
"def set_title(self, title):\n\n self.title = title\n\n self.add_metadata('DC', 'title', self.title)",
"def update_title(self,title=None,plan_id=None):\n self.titles[plan_id].append(title)",
"def SetTitle(self, title):\n self.title = str(title)",
"def set_title(self, title):\r\n self.title = title",
"def WaveletSetTitle(self, wave_id, wavelet_id, title):\n raise NotImplementedError()",
"def SetTitle(self, title):\n self.__context.builder.WaveletSetTitle(self.GetWaveId(), self.GetId(),\n title)\n self.__data.title = title",
"def setTitle(self, title):\n\n self._title = str(title)",
"async def title_edited(event, gh, *args, **kwargs):\n if \"title\" not in event.data[\"changes\"]:\n return\n await set_status(event, gh)",
"def title(self, new_title):\n\n # Check a type of 'new_title' parametr\n if not isinstance(new_title, basestring):\n raise TypeError('string type expected')\n self._title = new_title",
"def title(self, title):\n\n self._title = title",
"def title(self, title):\n\n self._title = title",
"def title(self, title):\n\n self._title = title",
"def title(self, title):\n\n self._title = title",
"def title(self, title):\n\n self._title = title",
"def title(self, title):\n\n self._title = title",
"def title(self, title):\n\n self._title = title",
"def title(self, title):\n\n self._title = title",
"def title(self, title):\n\n self._title = title",
"def title(self, title):\n\n self._title = title",
"def title(self, title):\n\n self._title = title",
"def title(self, title):\n\n self._title = title",
"def title(self, title):\n\n self._title = title"
]
| [
"0.7311984",
"0.7305447",
"0.7303785",
"0.7017659",
"0.69140047",
"0.69086075",
"0.68804616",
"0.68541485",
"0.68230927",
"0.6807289",
"0.6733378",
"0.672871",
"0.67204005",
"0.670341",
"0.66799015",
"0.66678137",
"0.66630304",
"0.66421956",
"0.66421956",
"0.66421956",
"0.66421956",
"0.66421956",
"0.66421956",
"0.66421956",
"0.66421956",
"0.66421956",
"0.66421956",
"0.66421956",
"0.66421956",
"0.66421956"
]
| 0.8301075 | 0 |
delete a specific incident | def delete_specific_incident(self, incident_id):
self.cursor.execute("""DELETE FROM incidents WHERE incident_id ='%s' AND status='draft'
""" %(incident_id))
self.commiting()
return incident_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_incident(self, id):\n sql = f\"DELETE FROM incidences WHERE incidences.id ={id}\"\n conn = Db().con\n curr = conn.cursor()\n curr.execute(sql)\n conn.commit()",
"def delete_entry(self, scenario_info):\n sql = self.delete(\"id\")\n self.cur.execute(sql, (scenario_info[\"id\"],))",
"def delete(self, _id):",
"def purge_entries_command():\n incident = demisto.args().get('id', get_investigation_id())\n cursor = COLLECTION.find({})\n deleted = 0\n # Iterate, collecting any name/value pairs associated with the incident\n for i in cursor:\n if incident in i:\n object_id = i.get('_id')\n COLLECTION.delete_one({'_id': object_id})\n deleted += 1\n if deleted == 1:\n return f'Incident \"{incident}\" key/value pairs purged - {str(deleted)} document/record deleted', {}, {}\n return f'Incident \"{incident}\" key/value pairs purged - {str(deleted)} documents/records deleted', {}, {}",
"def delete():",
"def delete_entry(self, scenario_id):\n sql = self.delete(\"id\")\n self.cur.execute(sql, (scenario_id,))",
"def delete(id_patient: str):\n database = get_connection()\n col = database.patients\n query = {\"patient_data.id\": id_patient}\n col.delete_one(query)",
"def delete_resident(email, chat_id):\n query = \"\"\"\n mutation deleteResident($email: String!){\n deleteResident(residentEmail: $email){\n residentEmail\n }\n }\n \"\"\"\n\n variables = {\n 'email': email\n }\n\n headers = {\n 'Authorization': 'JWT %s' % get_admin_token(chat_id)\n }\n\n response = requests.post(PATH, headers=headers, json={'query':query, 'variables':variables})\n\n return response.json()",
"def delete(openstack_resource):\n openstack_resource.delete()",
"def delete_ticket(event_id, net_id):\n connection = get_connection()\n cursor = connection.cursor()\n sql_string = \"DELETE FROM Ticket WHERE eventID=\"+str(event_id)+\" AND netID='\"+net_id+\"'\"\n cursor.execute(sql_string)\n connection.commit()",
"def delete_specimen(specimen_id):\n\n specimen = Specimen.query.get_or_404(specimen_id)\n\n if current_user.id == specimen.user_id:\n\n db.session.delete(specimen)\n db.session.commit()\n\n flash(\"Specimen deleted!\", \"success\")\n return redirect(f\"/user/{current_user.id}\")\n else:\n return (\"\", 403)",
"def delete(ident):\n con = mdb.connect(constants.sql_.IP, constants.sql_.USER, constants.sql_.PASS,\n constants.sql_.DB)\n with con:\n cur = con.cursor()\n sql = 'DELETE FROM '+constants.sql_tables.cron.name+' WHERE id = '+ str(ident)\n cur.execute(sql)\n con.close()",
"def delete(self, resource, id):\n self.request('/' + resource + '/' + str(id), 'DELETE')\n return True",
"def delete_appointment(request, appointment_id):\n appointment_id = appointment_id\n\n if not appointment_id:\n return HttpResponse(\"Please provide an appointment Id\"), 406\n \n try:\n appointment = Appointment.objects.get(id=int(appointment_id))\n except:\n return HttpResponse(\"No appointment with that ID exist\"), 404\n \n appointment.delete()\n return HttpResponse(\"Successfully Deleted\")",
"def delete_key_command():\n incident = demisto.args().get('id', get_investigation_id())\n key = demisto.args().get('key')\n # Search Collection for incident_id and key\n search = incident + '.key'\n cursor = COLLECTION.find_one({search: key})\n if cursor is not None:\n object_id = cursor.get('_id')\n COLLECTION.delete_one({'_id': object_id})\n return f'Incident \"{incident}\" - key/value collection - 1 document deleted', {}, {}\n return f'Key \"{key}\" for incident_id \"{incident}\" does not exist', {}, {}",
"async def delete_one(self, where):\n\n pass",
"def delete(self, obj):",
"def delete(self, xact, path):\n self._log.debug(\"Deleting NSR xact:%s, path:%s\", xact, path)\n self.regh.delete_element(path)\n self._log.debug(\"Deleted NSR xact:%s, path:%s\", xact, path)",
"def delete(self, id):\n raise NotImplementedError",
"def delete(self, cls, id):\n pass",
"def delete(self, id):\n\n ns.abort(404, 'This API is not supported yet.')",
"def delete(self, context, id_):\n try:\n db_resource_data = self.db_api.get_resource(\n context, id_)\n\n if db_resource_data['type'] == (eon_const.\n EON_RESOURCE_TYPE_ESX_CLUSTER):\n msg = _(\"Delete operation not supported for type %s\"\n % db_resource_data['type'])\n raise exception.DeleteException(err=msg)\n\n _resource_data = _make_response(\n db_resource_data)\n _resource_data_log = deepcopy(_resource_data)\n _resource_data_log.pop(\"meta_data\", None)\n LOG.info(\"Details for the ID %s is: %s\" % (\n id_, logging.mask_password(_resource_data_log)))\n driver_obj = driver.load_resource_driver(\n db_resource_data['type'])\n driver_obj.validate_delete(db_resource_data)\n driver_obj.delete(context, id_)\n self.db_api.delete_resource(context, id_)\n # delete the data from hlm input model\n try:\n LOG.info(\"[%s] remove resource from input model\" % id_)\n hux_obj = HLMFacadeWrapper(context)\n resource_id = db_resource_data[eon_const.EON_RESOURCE_ID]\n hux_obj.delete_server(resource_id)\n hux_obj.commit_changes(resource_id, \"Delete compute resource\")\n except facade_excep.NotFound:\n # log and do nothing\n LOG.warn(\"[%s] resource not found in hlm input model\" % id_)\n LOG.info(\"[%s]: Deleted resource from eon\" % id_)\n # Notify the message to consumers\n try:\n message = {\"resource_id\": id_,\n \"resource_state\": eon_const.EON_RESOURCE_STATE_REMOVED,\n \"resource_details\": _resource_data,\n }\n message_notifier.notify(context,\n message_notifier.EVENT_PRIORITY_INFO,\n message_notifier.EVENT_TYPE[\n 'removed'],\n message)\n except Exception as ex:\n LOG.exception(\n \"Exception while notifying the message : %s\" % ex)\n except exception.NotFound as e:\n msg = (\"Failed to delete resource %s. Error: %s\") % (\n _resource_data['name'], e.message)\n LOG.exception(msg)\n raise e",
"def delInfo(label: str):\r\n\r\n if not self.isClosed:\r\n if label in self.__identity_info.keys():\r\n del self.__identity_info[label]\r\n else:\r\n raise HDDOPermissionException('Tried to delete non-existing identity information in a HealthDominoDataObject.')\r\n else:\r\n raise HDDOPermissionException('Tried to delete identity information from a closed HealthDominoDataObject.')",
"def delete(self, xact, path):\n self._log.debug(\"Deleting VNFR xact = %s, %s\", xact, path)\n self.regh.delete_element(path)\n self._log.debug(\"Deleted VNFR xact = %s, %s\", xact, path)",
"def delete(self):\n data = request.get_json()\n\n if data is None:\n raise ClientDataError('Must include request data')\n\n event_id = data.get('id', None)\n bike_id = data.get('bike_id', None)\n\n if event_id is None:\n raise ClientDataError('Must include event id', 400)\n if bike_id is None:\n raise ClientDataError('Must include bike id', 400)\n\n event = MaintenanceEvent.query.get_or_404(event_id)\n bike = Bike.query.get_or_404(event.bike_id)\n if bike.id != bike_id:\n raise ClientDataError('Event does not belong to the given bike', 400)\n if bike.user_id != g.user.id:\n return None, 403\n\n try:\n db.session.delete(event)\n db.session.commit()\n except DataError:\n db.session.rollback()\n return None, 400\n except DataBaseError:\n db.session.rollback()\n return None, 500\n\n return {'id': event_id}, 200",
"def delete_event(event_id):\n connection = get_connection()\n cursor = connection.cursor()\n sql_string = \"DELETE FROM Event WHERE eventID =\"+str(event_id)\n cursor.execute(sql_string)\n connection.commit()",
"def delete(self, problem_id):\n\n activity = ProblemsActivity(\n problem_id=int(problem_id),\n user_id=self.get_current_user(),\n datetime=get_datetime(),\n activity_type='REMOVED')\n self.sess.add(activity)\n self.sess.commit()",
"def delete(self):\n return self.request('', pylastica.request.Request.DELETE)",
"def delete(self):\n ...",
"def delete(self, id):\n try:\n deleted_id = self.borrow_repo.remove_one_by_id(id)\n if deleted_id:\n self.write({'id': deleted_id})\n else:\n self.write_not_found(\n 'A request with id {} was not found'.format(id)\n )\n except BumerangError as e:\n self.set_status(500)\n self.finish({'error': str(e)})"
]
| [
"0.7976646",
"0.6722562",
"0.670638",
"0.6674578",
"0.66652",
"0.64647585",
"0.64437497",
"0.63609916",
"0.63390464",
"0.6286161",
"0.6257736",
"0.6240173",
"0.61823016",
"0.61280674",
"0.6107225",
"0.60961556",
"0.6091427",
"0.60879624",
"0.60801107",
"0.60540205",
"0.60530734",
"0.60513186",
"0.60493743",
"0.60467386",
"0.60364425",
"0.6019847",
"0.6009645",
"0.6008042",
"0.60023314",
"0.6000465"
]
| 0.7996932 | 0 |
Return branches that should be used as bases to check for branches that are already contained within them. The first branch in the list is the default branch for the origin remote. | def base_branches() -> list[str]:
branches = []
default = sh("git rev-parse --abbrev-ref origin/HEAD").removeprefix("origin/")
branches.append(default)
releases = sh(
"git branch --all --sort=-committerdate --list *release/* | head -10"
).splitlines()
releases = [b.removeprefix("*").strip() for b in releases]
branches.extend(releases)
return branches | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def branches(self):\n return sorted([\n br[20:] for br in self.repo.refs.keys() if (\n br.startswith('refs/remotes/origin/') and\n br[20:] != 'HEAD'\n )\n ])",
"def branches(self) -> list[str]:\n _args: list[Arg] = []\n _ctx = self._select(\"branches\", _args)\n return _ctx.execute_sync(list[str])",
"def _get_rebasebranch(self):\n logging.info('--- Get Rebasebranch ---')\n local_branch_candidates = {\n branch for branch in self.local_branches\n if branch == self.options.rebasebranch}\n remote_branch_candidates = {\n branch for branch in self.remote_branches\n if self.options.rebasebranch in branch}\n try:\n found_local_branch = local_branch_candidates.pop()\n except KeyError:\n gitwrapper.exit_with_error(\n 'No local branches named %r found.',\n self.options.rebasebranch)\n #\n if local_branch_candidates:\n gitwrapper.exit_with_error(\n 'Too many matching local branches found: %s, %s.',\n found_local_branch,\n ', '.join(local_branch_candidates))\n #\n if not remote_branch_candidates:\n gitwrapper.exit_with_error(\n 'No remote branches named %r found.',\n self.options.rebasebranch)\n #\n if len(remote_branch_candidates) > 2:\n # 1 if remote is not pushed, 2 if its pushed to remote\n gitwrapper.exit_with_error(\n 'Too many matching remote branches found: %s.',\n ', '.join(remote_branch_candidates))\n #\n self.local_branches = {found_local_branch}\n self.remote_branches = remote_branch_candidates\n logging.info('Found local branch %r.', found_local_branch)\n logging.info(\n 'Found remote branches %s.'\n ' and '.join(repr(branch) for branch in self.remote_branches))\n # We only rebase the specified branch\n self.tags = set()",
"def branches(self):\r\n url = self.base_url + 'branches/'\r\n return json.loads(self.bb.load_url(url))",
"def branches(self):\r\n url = '{0}/branches/'.format(self.get_url())\r\n request = http.Request('GET', url)\r\n\r\n return request, parsers.parse_json",
"def branches_full(config, args):\n for b in config.repo.branches():\n yield config.repo.branch(b.name)",
"def _get_branches(self):\n logging.info('--- Get Branches ---')\n self.local_branches = set(self.find_branches())\n self.remote_branches = set(self.find_branches(remote=True))\n # Tags are remote branches that start with \"tags/\".\n self.tags = {\n single_branch for single_branch in self.remote_branches\n if PRX_SVNTAGS_PREFIX.match(single_branch)}",
"def find_branches(self, remote=False):\n arguments = ['--no-color']\n if remote:\n arguments.append('-r')\n #\n for branch in self.git.branch(*arguments).splitlines():\n branch = branch.replace('*', '').strip()\n if branch:\n yield branch\n #\n #",
"def list_branches(self) -> List[str]:\n self.__verify_repo_initialized()\n branches = heads.get_branch_names(self._env.branchenv)\n return branches",
"def get_branches(self, *, refs=[\"refs/heads\", \"refs/remotes\"]):\n # type: (Sequence[str]) -> List[Branch]\n stdout = self.git(\n \"for-each-ref\",\n (\n \"--format=\"\n \"%(HEAD)%00\"\n \"%(refname)%00\"\n \"%(upstream)%00\"\n \"%(upstream:remotename)%00\"\n \"%(upstream:track,nobracket)%00\"\n \"%(committerdate:unix)%00\"\n \"%(objectname)%00\"\n \"%(contents:subject)\"\n ),\n *refs\n ) # type: str\n branches = [\n branch\n for branch in (\n self._parse_branch_line(line)\n for line in filter_(stdout.splitlines())\n )\n if branch.name != \"HEAD\"\n ]\n store.update_state(self.repo_path, {\"branches\": branches})\n return branches",
"def fallbackheads(self):\n if self.revs is None:\n # not target to push, all common are relevant\n return self.outgoing.commonheads\n unfi = self.repo.unfiltered()\n # I want cheads = heads(::ancestorsof and ::commonheads)\n # (ancestorsof is revs with secret changeset filtered out)\n #\n # This can be expressed as:\n # cheads = ( (ancestorsof and ::commonheads)\n # + (commonheads and ::ancestorsof))\"\n # )\n #\n # while trying to push we already computed the following:\n # common = (::commonheads)\n # missing = ((commonheads::ancestorsof) - commonheads)\n #\n # We can pick:\n # * ancestorsof part of common (::commonheads)\n common = self.outgoing.common\n rev = self.repo.changelog.index.rev\n cheads = [node for node in self.revs if rev(node) in common]\n # and\n # * commonheads parents on missing\n revset = unfi.set(\n b'%ln and parents(roots(%ln))',\n self.outgoing.commonheads,\n self.outgoing.missing,\n )\n cheads.extend(c.node() for c in revset)\n return cheads",
"def branches(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'branches')\r\n\r\n return http.Request('GET', url), parsers.parse_json",
"def _listBranches(self):\n assert self.wc.exists('branches')\n branches = self.wc.ls('branches')\n\n # Some early release branches used a different naming scheme\n # that doesn't sort properly with new-style release names. We\n # filter those out here, along with empty lines.\n branches = [b.strip('/') for b in branches\n if MELANGE_RELEASE_RE.match(b.strip('/'))]\n\n return sorted(branches)",
"def missing_branches(self):\n upstream_tags = self.upstream_model.tags_from_semver_point(\n enums.K8S_STARTING_SEMVER\n )\n deb_branches = self.deb_model.base.branches_from_semver_point(\n enums.K8S_STARTING_SEMVER\n )\n return list(set(upstream_tags) - set(deb_branches))",
"def get_branches( self ):\n\n branches = [ self ]\n\n for i in range( len( self.children ) ):\n branches.extend( self.children[i].get_branches() )\n\n return branches",
"def get_branches(local_only=False, directory=None):\n cmd = 'git branch --no-color'\n if not local_only:\n cmd += ' -a'\n out = check_output(cmd, shell=True, cwd=directory)\n branches = []\n for line in out.splitlines():\n if line.count('HEAD -> ') > 0:\n continue\n if line.count('(no branch)') > 0:\n continue\n line = line.strip('*').strip()\n branches.append(line)\n return branches",
"def missing_branches(self):\n upstream_tags = self.upstream_model.tags_from_semver_point(\"1.19.0\")\n deb_branches = self.deb_model.base.branches_from_semver_point(\"1.19.0\")\n return list(set(upstream_tags) - set(deb_branches))",
"def get_branches(self):\n\n # gets all branches in repository\n branches_endpoint = f'/repositories/{self.owner}/{self.repo}/refs/branches'\n filter_param = {'fields': 'values.name'}\n response = self._get_request(branches_endpoint, filter_param)\n # guard condition\n if response.status_code != STATUS_CODE_OK:\n raise BitbucketRequestSenderExc(\n f'Invalid parameter(s) in: owner: {self.owner},'\n f' repo: {self.repo}')\n # deserialize\n branches_page = response.json()\n\n return [\n {\n 'name': branch['name']\n } for branch in branches_page['values']\n ]",
"def missing_branches(self):\n upstream_tags = self.upstream_model.tags_from_semver_point(\"0.8.7\")\n deb_branches = self.deb_model.base.branches_from_semver_point(\"0.8.7\")\n return list(set(upstream_tags) - set(deb_branches))",
"def base_branch_remote(self):\n return self.git.config('--get', 'branch.{}.remote'.format(self.base_branch))",
"def get_branches(self):\n\n # gets all branches in repository\n branches_endpoint = f'/repos/{self.repo}/branches'\n response = self._get_request(branches_endpoint)\n # guard condition\n if response.status_code != STATUS_CODE_OK:\n return None\n # deserialize\n branches_page = response.json()\n\n return [\n {\n 'name': branch['displayId']\n } for branch in branches_page['values']\n ]",
"def get_branches(self):\n branches = []\n for bracket in self.__brackets:\n branches.append(bracket.get_branch())\n return branches",
"def _get_branches_to_merge(branch):\n branches = [(branch, branch.subfolder or '')]\n for dependency in branch.branch_dependency_ids:\n branches.append((dependency.merge_with_branch_id, dependency.merge_subfolder or ''))\n return branches[::-1]",
"def list_all_branches(self) -> dict:\n try:\n branches_response = self.repo.get_branches()\n branches_list = []\n for branch in branches_response:\n branches_list.append(branch.raw_data.get('name'))\n return make_success_response(200, branches_list)\n except GithubException as github_exc:\n return make_error_response(github_exc.status, github_exc.data)",
"def find_branches(self, commit, repo):\n ref_dict = repo.repo.refs.as_dict()\n branches = []\n for branch, branch_id in [(b, ref_dict[b]) for b in repo.branches]:\n obj = repo.repo[branch_id]\n if commit.id == obj.id:\n branches.append((branch, obj))\n return branches",
"def __branch(self):\n\n if len(np.unique(self.__data[1][self.__indexes])) <= 1:\n return []\n\n branches = []\n disc_max = -np.inf\n disc_max_col = None\n\n for col in range(self.__data[0].shape[1]):\n if col in self.__cols_exclude:\n continue\n disc = self.disc(col)\n if disc > disc_max:\n disc_max = disc\n disc_max_col = col\n\n if disc_max_col == None:\n return branches\n \n uniques = np.unique(self.__data[0][self.__indexes, disc_max_col])\n cols_exclude = [col for col in self.__cols_exclude]\n cols_exclude.append(disc_max_col)\n for unique in uniques:\n indexes = (self.__data[0][:, disc_max_col] == unique)\n indexes = np.logical_and(self.__indexes, indexes)\n rule = self.__rule(disc_max_col, unique)\n branches.append(dtree(self.__data, self.__n_groups, self.__max_depth - 1, indexes, cols_exclude, rule, self.__groups))\n \n return branches",
"def branches(self):\n unique_nodes, unique_counts = np.unique(self.edges, return_counts=True)\n return unique_nodes[ unique_counts >= 3 ]",
"def get_branch_names(self):\n return [\n branch.name for branch in self.repo.branches\n ]",
"def branch_names(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"branch_names\")",
"def dirty_branches(self):\n # If no course index has been set, then no branches have changed\n if self.index is None:\n return []\n\n # If there was no index in the database to start with, then all branches\n # are dirty by definition\n if self.initial_index is None:\n return list(self.index.get('versions', {}).keys())\n\n # Return branches whose ids differ between self.index and self.initial_index\n return [\n branch\n for branch, _id\n in self.index.get('versions', {}).items()\n if self.initial_index.get('versions', {}).get(branch) != _id\n ]"
]
| [
"0.71715426",
"0.71089673",
"0.6710281",
"0.6684703",
"0.66340756",
"0.66328895",
"0.6632771",
"0.6595276",
"0.6540279",
"0.6526692",
"0.65261155",
"0.6521703",
"0.63657045",
"0.6361955",
"0.63289434",
"0.6327129",
"0.62591374",
"0.6242348",
"0.6222655",
"0.6163094",
"0.6149386",
"0.6129939",
"0.6099803",
"0.60080117",
"0.5996749",
"0.59171224",
"0.58108425",
"0.5785797",
"0.5784204",
"0.5763337"
]
| 0.7896697 | 0 |
Create the Refund for the Invoice. | def post(invoice_id):
current_app.logger.info(f'<Refund.post : {invoice_id}')
request_json = request.get_json(silent=True)
try:
valid_format, errors = schema_utils.validate(request_json, 'refund') if request_json else (True, None)
if not valid_format:
return error_to_response(Error.INVALID_REQUEST, invalid_params=schema_utils.serialize(errors))
response = RefundService.create_refund(invoice_id, request_json)
except BusinessException as exception:
return exception.response()
current_app.logger.debug(f'>Refund.post : {invoice_id}')
return jsonify(response), HTTPStatus.ACCEPTED | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create(payment, **data):\n if isinstance(payment, resources.Payment):\n payment = payment.id\n\n http_client = HttpClient()\n response, _ = http_client.post(routes.url(routes.REFUND_RESOURCE, payment_id=payment), data)\n return resources.Refund(**response)",
"def initiate_refund(self, order: Order) -> OrderRefund:\n raise NotImplementedError",
"def invoice_create_onaccept(form):\n\n # Get record ID\n form_vars = form.vars\n if \"id\" in form_vars:\n record_id = form_vars.id\n elif hasattr(form, \"record_id\"):\n record_id = form.record_id\n else:\n return\n\n # Look up the billing ID\n table = current.s3db.fin_voucher_invoice\n query = (table.id == record_id)\n invoice = current.db(query).select(table.billing_id,\n limitby = (0, 1),\n ).first()\n\n if invoice:\n # Assign the invoice\n from .helpers import assign_pending_invoices\n assign_pending_invoices(invoice.billing_id,\n invoice_id = record_id,\n )",
"def enqueue_refund(self, status, user, refund_reason=None,\n rejection_reason=None):\n from mkt.prices.models import Refund\n refund, c = Refund.objects.safer_get_or_create(contribution=self,\n user=user)\n refund.status = status\n\n # Determine which timestamps to update.\n timestamps = []\n if status in (mkt.REFUND_PENDING, mkt.REFUND_APPROVED_INSTANT,\n mkt.REFUND_FAILED):\n timestamps.append('requested')\n if status in (mkt.REFUND_APPROVED, mkt.REFUND_APPROVED_INSTANT):\n timestamps.append('approved')\n elif status == mkt.REFUND_DECLINED:\n timestamps.append('declined')\n for ts in timestamps:\n setattr(refund, ts, datetime.datetime.now())\n\n if refund_reason:\n refund.refund_reason = refund_reason\n if rejection_reason:\n refund.rejection_reason = rejection_reason\n refund.save()\n return refund",
"def _prepare_refund(self, invoice, date_invoice=None, date=None, description=None, journal_id=None):\n values = super(AccountInvoice, self)._prepare_refund(invoice, date_invoice, date, description, journal_id)\n if invoice.payment_term_id:\n values['payment_term_id'] = invoice.payment_term_id.id\n elif invoice.partner_id.property_payment_term_id:\n values['payment_term_id'] = invoice.partner_id.property_payment_term_id.id\n return values",
"def create_invoice(invoice: Invoice, callback_url: Optional[HttpUrl] = None):\n # Send the invoice, collect the money, send the notification (the callback)\n return {\"msg\": \"Invoice received\"}",
"def action_invoice_create(self, cr, uid, ids, context=None):\n res = False\n\n journal_obj = self.pool.get('account.journal')\n inv_obj = self.pool.get('account.invoice')\n inv_line_obj = self.pool.get('account.invoice.line')\n fiscal_obj = self.pool.get('account.fiscal.position')\n\n for order in self.browse(cr, uid, ids, context=context):\n# pay_acc_id = order.partner_id.property_account_payable.id\n #use a new method to get the account_id\n pay_acc_id = self._get_inv_pay_acc_id(cr,uid,order) \n journal_ids = journal_obj.search(cr, uid, [('type', '=','purchase'),('company_id', '=', order.company_id.id)], limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error!'),\n _('Define purchase journal for this company: \"%s\" (id:%d).') % (order.company_id.name, order.company_id.id))\n\n # generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line\n inv_lines = []\n for po_line in order.order_line:\n #check if this line have quantity to generate invoice, by johnw\n if po_line.product_qty <= po_line.invoice_qty:\n continue \n# if po_line.product_id:\n# acc_id = po_line.product_id.property_account_expense.id\n# if not acc_id:\n# acc_id = po_line.product_id.categ_id.property_account_expense_categ.id\n# if not acc_id:\n# raise osv.except_osv(_('Error!'), _('Define expense account for this company: \"%s\" (id:%d).') % (po_line.product_id.name, po_line.product_id.id,))\n# else:\n# acc_id = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category').id \n #use a new method to get the account_id, by johnw \n acc_id = self._get_inv_line_exp_acc_id(cr,uid,order,po_line)\n fpos = order.fiscal_position or False\n acc_id = fiscal_obj.map_account(cr, uid, fpos, acc_id)\n\n inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)\n #update the quantity to the quantity, by johnw\n inv_line_data.update({'quantity':(po_line.product_qty - po_line.invoice_qty)})\n inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)\n inv_lines.append(inv_line_id)\n\n po_line.write({'invoiced':True, 'invoice_lines': [(4, inv_line_id)]}, context=context)\n \n #if no lines then return direct, by johnw\n if len(inv_lines) == 0:\n continue\n \n # get invoice data and create invoice\n inv_data = {\n 'name': order.partner_ref or order.name,\n 'reference': order.partner_ref or order.name,\n 'account_id': pay_acc_id,\n 'type': 'in_invoice',\n 'partner_id': order.partner_id.id,\n 'currency_id': order.pricelist_id.currency_id.id,\n 'journal_id': len(journal_ids) and journal_ids[0] or False,\n 'invoice_line': [(6, 0, inv_lines)],\n 'origin': order.name,\n 'fiscal_position': order.fiscal_position.id or False,\n 'payment_term': order.payment_term_id.id or False,\n 'company_id': order.company_id.id,\n }\n inv_id = inv_obj.create(cr, uid, inv_data, context=context)\n\n # compute the invoice\n inv_obj.button_compute(cr, uid, [inv_id], context=context, set_total=True)\n\n # Link this new invoice to related purchase order\n order.write({'invoice_ids': [(4, inv_id)]}, context=context)\n res = inv_id\n return res",
"def create_landlord_invoice(self):\n if self.tenancy_id.is_landlord_rent:\n account_jrnl_obj = self.env['account.journal'].search(\n [('type', '=', 'purchase')], limit=1)\n inv_lines_values = {\n # 'origin': 'tenancy.rent.schedule',\n 'name': 'Rent Cost for' + self.tenancy_id.name,\n 'quantity': 1,\n 'price_unit': self.amount or 0.00,\n 'account_id':\n self.tenancy_id.property_id.account_depreciation_expense_id.id or False,\n 'analytic_account_id': self.tenancy_id.id or False,\n }\n owner_rec = self.tenancy_id.property_owner_id\n invo_values = {\n 'partner_id': self.tenancy_id.property_owner_id.id or False,\n 'type': 'in_invoice',\n 'invoice_line_ids': [(0, 0, inv_lines_values)],\n 'property_id': self.tenancy_id.property_id.id or False,\n 'invoice_date': self.start_date or False,\n # 'account_id': owner_rec.property_account_payable_id.id,\n # 'schedule_id': self.id,\n 'new_tenancy_id': self.tenancy_id.id,\n 'journal_id': account_jrnl_obj.id or False\n }\n\n acc_id = self.env['account.move'].with_context({'default_type': 'in_invoice'}).create(invo_values)\n self.write({'invc_id': acc_id.id, 'inv': True})\n wiz_form_id = self.env['ir.model.data'].get_object_reference(\n 'account', 'view_move_form')[1]\n return {\n 'view_type': 'form',\n 'view_id': wiz_form_id,\n 'view_mode': 'form',\n 'res_model': 'account.move',\n 'res_id': self.invc_id.id,\n 'type': 'ir.actions.act_window',\n 'target': 'current',\n 'context': self._context,\n }",
"def action_invoice_create(self, cr, uid, ids, grouped=False, states=None, date_invoice=False, context=None):\n order = self.browse(cr, uid, ids[0], context=context)\n inv_obj = self.pool.get('account.invoice')\n # create the invoice\n inv_id = super(sale_order, self).action_invoice_create(cr, uid, ids, grouped, states, date_invoice, context=context)\n # modify the invoice\n inv_obj.write(cr, uid, [inv_id], {'past_doc': order.past_doc})\n return inv_id",
"def action_invoice_create(self, cr, uid, ids, context=None):\n res = False\n\n journal_obj = self.pool.get('account.journal')\n inv_obj = self.pool.get('account.invoice')\n inv_line_obj = self.pool.get('account.invoice.line')\n fiscal_obj = self.pool.get('account.fiscal.position')\n property_obj = self.pool.get('ir.property')\n\n for order in self.browse(cr, uid, ids, context=context):\n pay_acc_id = order.partner_id.property_account_payable.id\n journal_ids = journal_obj.search(cr, uid, [('type', '=','purchase'),('company_id', '=', order.company_id.id)], limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error !'),\n _('There is no purchase journal defined for this company: \"%s\" (id:%d)') % (order.company_id.name, order.company_id.id))\n\n # generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line\n inv_lines = []\n for po_line in order.order_line:\n if po_line.product_id:\n acc_id = po_line.product_id.product_tmpl_id.property_account_expense.id\n if not acc_id:\n acc_id = po_line.product_id.categ_id.property_account_expense_categ.id\n if not acc_id:\n raise osv.except_osv(_('Error !'), _('There is no expense account defined for this product: \"%s\" (id:%d)') % (po_line.product_id.name, po_line.product_id.id,))\n else:\n acc_id = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category').id\n fpos = order.fiscal_position or False\n acc_id = fiscal_obj.map_account(cr, uid, fpos, acc_id)\n\n inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)\n inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)\n inv_lines.append(inv_line_id)\n\n po_line.write({'invoiced':True, 'invoice_lines': [(4, inv_line_id)]}, context=context)\n\n # get invoice data and create invoice\n inv_data = {\n 'name': order.partner_ref or order.name,\n 'reference': order.partner_ref or order.name,\n 'account_id': pay_acc_id,\n 'type': 'in_invoice',\n 'partner_id': order.partner_id.id,\n 'currency_id': order.pricelist_id.currency_id.id,\n 'address_invoice_id': order.partner_address_id.id,\n 'address_contact_id': order.partner_address_id.id,\n 'journal_id': len(journal_ids) and journal_ids[0] or False,\n 'invoice_line': [(6, 0, inv_lines)], \n 'origin': order.name,\n 'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id,\n 'payment_term': order.partner_id.property_payment_term and order.partner_id.property_payment_term.id or False,\n 'company_id': order.company_id.id,\n 'add_disc': order.add_disc or 0.0\n }\n inv_id = inv_obj.create(cr, uid, inv_data, context=context)\n\n # compute the invoice\n inv_obj.button_compute(cr, uid, [inv_id], context=context, set_total=True)\n\n # Link this new invoice to related purchase order\n order.write({'invoice_ids': [(4, inv_id)]}, context=context)\n res = inv_id\n return res",
"def save(request):\n inv_num = request.POST[\"invoice_number\"]\n initial_data, data = process_request(request)\n tax_data = json.loads(request.POST[\"tax_data\"].replace(\"'\", \"\\\"\"))\n grand_total = request.POST[\"grand_total\"]\n\n Invoice.objects.create(number=inv_num,\n invoice_date=datetime.datetime.strptime(initial_data.get(\"invoice_date\"), \"%d %B, %Y\"),\n reference_number=initial_data.get(\"reference_number\"),\n reference_date=datetime.datetime.strptime(initial_data.get(\"reference_date\"), \"%d %B, %Y\"),\n addressed_to=initial_data.get(\"addressed_to\"),\n party_gst=initial_data.get(\"party_gst\"),\n created_at=datetime.datetime.now(),\n modified_at=datetime.datetime.now(),\n notes=tax_data.get(\"additional_notes\"),\n items=data,\n s_gst=tax_data.get(\"s_gst\"),\n c_gst=tax_data.get(\"c_gst\"),\n other_charges=tax_data.get(\"other_charges\"),\n total=grand_total\n ).save()\n\n return redirect(\"/invoice/print/\" + inv_num)",
"def obj_create(self, bundle, **kwargs):\n logger.info(\"Creating a new acknowledgement...\")\n #Create the object\n bundle.obj = Acknowledgement()\n #hydrate\n bundle = self.full_hydrate(bundle)\n \n #Set the customer\n try:\n logger.info(\"Setting customer...\")\n bundle.obj.customer = Customer.objects.get(pk=bundle.data[\"customer\"][\"id\"])\n bundle.obj.discount = bundle.obj.customer.discount\n except:\n logger.error(\"Customer with ID {0} could not be found.\".format(bundle.data['customer']['id']))\n raise\n \n #Set the employee\n try:\n logger.info(\"Setting employee...\")\n bundle.obj.employee = bundle.request.user\n except User.DoesNotExist:\n logger.error(\"User with ID {0} could not be found\".format(bundle.data['employee']['id']))\n raise\n except KeyError:\n logger.critical(\"Missing employee ID.\")\n raise\n \n #Set Status\n bundle.obj.status = \"ACKNOWLEDGED\"\n \n #Set the project or create a new one\n if \"project\" in bundle.data:\n try:\n project = Project.objects.get(pk=bundle.data['project']['id'])\n except KeyError, Project.DoesNotExist:\n try:\n project = Project()\n project.codename = bundle.data['project']['codename']\n project.save()\n except KeyError:\n project = None\n \n bundle.obj.project = project\n \n #Create items without saving them \n logger.info(\"Creating items...\")\n self.items = [Item.create(acknowledgement=bundle.obj,\n commit=False,\n **product) for product in bundle.data[\"items\"]]\n \n #Calculate the total price\n logger.info(\"Calculating balance of the order...\")\n bundle.obj.calculate_totals(self.items)\n bundle = self.save(bundle)\n \n #Save the items\n logger.info(\"Saving the items to the database...\")\n for item in self.items:\n item.acknowledgement = bundle.obj\n item.save()\n \n log_message = \"Ack {0} created on {1}. Schedule to be delivered on {1}\"\n log_message = log_message.format(bundle.obj.id,\n bundle.obj.time_created.strftime('%B %d, %Y'),\n bundle.obj.delivery_date.strftime('%B %d, %Y'))\n log = Log(message=log_message,\n delivery_date=bundle.obj.delivery_date,\n acknowledgement=bundle.obj)\n log.save()\n #Create and upload the pdfs to the \n #S3 system. The save the pdfs as\n #Attributes of the acknowledgement\n logger.info(\"Creating PDF documents...\")\n bundle.obj.create_and_upload_pdfs()\n \n \n #Add the url of the pdf to the outgoing data\n #only for when an acknowledgement is create\n try:\n ack = bundle.obj.acknowledgement_pdf\n production = bundle.obj.production_pdf\n bundle.data['pdf'] = {'acknowledgement': ack.generate_url(),\n 'production': production.generate_url()}\n except AttributeError: \n logger.warn('Missing acknowledgement or production pdf')\n \n #Conditionally email ack to Decoroom\n if \"decoroom\" in bundle.obj.customer.name.lower():\n try:\n logger.info(\"Emailing Decoroom Co., Ltd. the order details...\")\n bundle.obj.email_decoroom()\n except Exception as e:\n logger.error(\"Unable to mail decoroom.\")\n logger.error(e)\n \n \n \n logger.info(u\"Acknowledgement #{0} created for {1}\".format(bundle.obj.id, \n bundle.obj.customer.name)) \n return bundle",
"def refund(refund, bucket=None):\n if not isinstance(refund, _Refund):\n raise TypeError(\"The Refund must be of type Refund\")\n\n if refund.is_null():\n return _TransactionRecord()\n\n if bucket is None:\n bucket = _login_to_service_account()\n\n # return value from the credit to debit accounts\n debit_account = _Account(uid=refund.debit_account_uid(),\n bucket=bucket)\n credit_account = _Account(uid=refund.credit_account_uid(),\n bucket=bucket)\n\n # remember that a refund debits from the original credit account...\n # (and can only refund completed (DIRECT) transactions)\n debit_note = _DebitNote(refund=refund, account=credit_account,\n bucket=bucket)\n\n # now create the credit note to return the value into the debit account\n try:\n credit_note = _CreditNote(debit_note=debit_note,\n refund=refund,\n account=debit_account,\n bucket=bucket)\n except Exception as e:\n # delete the debit note\n try:\n debit_account._delete_note(debit_note, bucket=bucket)\n except:\n pass\n\n # reset the transaction to its original state\n try:\n _TransactionRecord.load_test_and_set(\n refund.transaction_uid(),\n _TransactionState.REFUNDING,\n _TransactionState.DIRECT,\n bucket=bucket)\n except:\n pass\n\n raise e\n\n try:\n paired_notes = _PairedNote.create(debit_note, credit_note)\n except Exception as e:\n # delete all records...!\n try:\n debit_account._delete_note(debit_note, bucket=bucket)\n except:\n pass\n\n try:\n credit_account._delete_note(credit_note, bucket=bucket)\n except:\n pass\n\n # reset the transaction to the pending state\n try:\n _TransactionRecord.load_test_and_set(\n refund.transaction_uid(),\n _TransactionState.REFUNDING,\n _TransactionState.DIRECT,\n bucket=bucket)\n except:\n pass\n\n raise e\n\n # now record the two entries to the ledger. The below function\n # is guaranteed not to raise an exception\n return Ledger._record_to_ledger(paired_notes, refund=refund,\n bucket=bucket)",
"def action_budget_create(self, cr, uid, ids, context=None):\n payment_term_obj = self.pool.get('account.payment.term')\n for porder in self.browse(cr, uid, ids, context=context):\n period = self.pool.get('account.period').find(cr,uid,porder.date_order, context = context)[0] \n result = []\n confirmation_dict={\n 'reference': porder.name,\n 'period_id': period,\n 'partner_id':porder.partner_id.id,\n 'amount': porder.amount_total,\n 'note':'',\n 'date':porder.date_order,\n 'type':'purchase'}\n\n for line in porder.order_line:\n confirmation_ids=[]\n account_id = self._choose_account_from_po_line(cr, uid, line, context=context)\n notes = _(\"Purchase Approval: %s \\nDescription: %s.\\nDate: %s \\nProducts: %s \") % (porder.name , porder.notes , porder.date_order , line.name )\n\n result= payment_term_obj.compute(cr, \n uid, porder.payment_term_id.id, line.price_subtotal,porder.date_order or False, context=context)\n for r in result:\n confirmation_dict.update(\n {'date':r[0],\n 'amount':r[1],\n 'note':notes,\n 'name':'/',\n 'general_account_id': account_id,\n 'account_analytic_id': line.account_analytic_id.id or False,\n })\n confirmation_id = self.pool.get('account.budget.confirmation').create(cr, uid, confirmation_dict)\n confirmation_ids.append(confirmation_id)\n line.write({'confirmation_ids':[(6, 0, confirmation_ids)] ,'state': 'waiting_budget'})\n self.write(cr, uid, ids, {'state': 'waiting_budget'})\n return True",
"def _create_invoice(self):\n self.ensure_one()\n partner = self.member_id.partner_id\n invoice = self.env['account.invoice'].create({\n 'partner_id': partner.id,\n 'account_id': partner.property_account_receivable_id.id,\n 'fiscal_position_id': partner.property_account_position_id.id\n })\n for line in self.line_ids:\n product = line.activity_id.product_id\n # Handling of invoice lines : needs cache record for onchange, then\n # real writing...\n invoice_line = self.env['account.invoice.line'].new({\n 'product_id': product.id,\n 'invoice_id': invoice.id\n })\n invoice_line._onchange_product_id()\n line_values = dict(invoice_line._cache)\n line_values['price_unit'] = line.price\n invoice_line = self.env['account.invoice.line'].create(line_values)\n invoice.compute_taxes()\n line.registration_id.invoice_line_id = invoice_line.id\n return invoice",
"def action_create_invoice(self):\n if self.partner_id:\n supplier = self.partner_id\n else:\n supplier = self.partner_id.search(\n [(\"name\", \"=\", \"Salon Default Customer\")])\n lines = []\n product_id = self.env['product.product'].search(\n [(\"name\", \"=\", \"Salon Service\")])\n for records in self.order_line_ids:\n if product_id.property_account_income_id.id:\n income_account = product_id.property_account_income_id.id\n elif product_id.categ_id.property_account_income_categ_id.id:\n income_account = product_id.categ_id.\\\n property_account_income_categ_id.id\n else:\n raise UserError(\n _(\"Please define income account for this product: \"\n \"'%s' (id:%d).\") % (product_id.name, product_id.id))\n value = (0, 0, {\n 'name': records.service_id.name,\n 'account_id': income_account,\n 'price_unit': records.price,\n 'quantity': 1,\n 'product_id': product_id.id,\n })\n lines.append(value)\n invoice_line = {\n 'move_type': 'out_invoice',\n 'partner_id': supplier.id,\n 'invoice_user_id': self.env.user.id,\n 'invoice_origin': self.name,\n 'invoice_line_ids': lines,\n }\n inv = self.env['account.move'].create(invoice_line)\n action = self.env.ref('account.action_move_out_invoice_type',\n raise_if_not_found=False)\n result = {\n 'name': action.name,\n 'type': 'ir.actions.act_window',\n 'views': [[False, 'form']],\n 'target': 'current',\n 'res_id': inv.id,\n 'res_model': 'account.move',\n }\n self.inv_stage_identifier = True\n self.stage_id = 3\n invoiced_records = self.env['salon.order'].search(\n [('stage_id', 'in', [3, 4]), ('chair_id', '=', self.chair_id.id)])\n total = 0\n for rows in invoiced_records:\n invoiced_date = str(rows.date)\n invoiced_date = invoiced_date[0:10]\n if invoiced_date == str(date.today()):\n total = total + rows.price_subtotal\n self.chair_id.collection_today = total\n self.update_number_of_orders()\n return result",
"def refund_payment(self, **kwargs):",
"def refund_payment(self, **kwargs):",
"def create_order_invoice(sender, instance, created, using, **kwargs):\n\n # Create invoice if it doesn't already exist\n if (\n created\n and not Invoice.objects.filter(\n order__order_number=instance.order_number\n ).exists()\n ):\n invoice = Invoice(order=instance)\n # Saving it in reverse to avoid having this signal called again\n invoice.save()\n\n for slug, cls in discount_rules.get_all_discount_rules():\n if cls.can_user_have_access(instance.user, invoice):\n cls.apply_discount(instance.user, invoice)",
"def post(self):\n user = get_authenticated_user()\n if not user.stripe_id:\n raise NotFound()\n\n data = request.get_json()\n created_field = create_billing_invoice_field(user, data[\"title\"], data[\"value\"])\n return created_field",
"def __refund_entry(self, entry):\n\n buyin = entry.contest_pool.prize_structure.buyin\n bm = BuyinManager(entry.user)\n transaction = None\n\n # Create a cash or ticket deposit as a refund,\n # based on what the user used to get into the contest\n if bm.entry_did_use_ticket(entry):\n tm = TicketManager(entry.user)\n tm.deposit(buyin)\n transaction = tm.transaction\n refund = self.__create_refund(transaction, entry)\n else:\n ct = CashTransaction(entry.user)\n ct.deposit(buyin)\n transaction = ct.transaction\n refund = self.__create_refund(transaction, entry)\n\n # Create refund transaction from escrow\n escrow_ct = CashTransaction(self.get_escrow_user())\n escrow_ct.withdraw(buyin, trans=transaction)\n return refund",
"def action_create_invoices(self, data):\n invoice_obj = self.env['account.invoice']\n values = {}\n for val in data:\n values.setdefault(val['invoice_type'], {\n 'order': val.get('sale', val.get('purchase')),\n 'values': []\n })\n values[val['invoice_type']]['values'].append((0, 0, val['values']))\n\n for inv_type, inv_data in values.items():\n invoice = invoice_obj.new(self._prepare_invoice(inv_type))\n invoice._onchange_partner_id()\n inv = invoice._convert_to_write({\n name: invoice[name] for name in invoice._cache\n })\n for _, _, line in inv_data['values']:\n line['account_id'] = inv['account_id']\n inv['invoice_line_ids'] = inv_data['values']\n new_invoice = invoice_obj.sudo().create(inv)\n new_invoice.action_invoice_open()\n inv_data['order'].write({\n 'exchange_invoice_ids': [(4, new_invoice.id)]\n })",
"def mark_refunded(self):\n order = self.clone()\n order.status = Order.STATUS_REFUNDED\n order.save()\n return order",
"def RefundOrder(capture_id, refund_amount=0, currency_code=\"EUR\"):\n\tsale = Sale.find(capture_id)\n\n\trefund = sale.refund({\n\t\"amount\": {\n\t\t\"total\": refund_amount,\n\t\t\"currency\": currency_code\n\t}\n\t})\n\n\tif refund.success():\n\t\tprint(\"Refund[%s] Success\" % (refund.id))\n\t\treturn True # Return True if the Refund was successfull\n\telse:\n\t\tprint(refund.error)\n\t\treturn False # Return False if the Refund failed",
"def create_invoice(self):\n for line in self:\n # if not line.account_id:\n # raise UserError(_('Please Add the incoming Account !!'))\n self.ensure_one()\n journal_id = self.env['account.journal'].search([\n ('type', '=', 'sale')], limit=1)\n inv_line_main = {\n 'name': line.description.name,\n 'price_unit': line.amount or 0.00,\n 'quantity': 1,\n 'discount': line.discount,\n 'account_id': line.description.property_account_income_id.id or line.description.categ_id.property_account_income_categ_id.id or False,\n }\n inv_values = {\n 'partner_id': line.patient_id.partner_id.id,\n 'patient_id': line.patient_id.id,\n 'dentist': line.dentist.id,\n 'move_type': 'out_invoice',\n 'invoice_date': datetime.now().strftime(DF) or False,\n 'journal_id': journal_id and journal_id.id or False,\n 'teeth_id': line.patient_id and line.patient_id.id or False,\n }\n acc_id = self.env['account.move'].create(inv_values)\n acc_id.write({'invoice_line_ids': [(0, 0, inv_line_main)]})\n\n self.write({'invc_id': acc_id.id, 'inv': True})\n context = dict(self._context or {})\n wiz_form_id = self.env['ir.model.data'].get_object_reference(\n 'account', 'view_move_form')[1]\n\n return {\n 'view_type': 'form',\n 'view_id': wiz_form_id,\n 'view_mode': 'form',\n 'res_model': 'account.move',\n 'res_id': self.invc_id.id,\n 'type': 'ir.actions.act_window',\n 'target': 'current',\n 'context': context,\n }",
"def _prepare_invoice(self):\n # get current logged in user's timezone\n local = pytz.timezone(self.env['res.users'].browse(self._uid).tz) or pytz.utc\n\n self.ensure_one()\n journal_id = self.env['account.journal'].search([('type', '=', 'purchase')], limit=1).id\n if not journal_id:\n raise UserError(_('Please define an accounting purchase journal for this company.'))\n invoice_vals = {\n 'name': self.partner_ref or '',\n 'origin': self.name,\n 'type': 'in_invoice',\n 'account_id': self.partner_id.property_account_payable_id.id,\n 'partner_id': self.partner_id.id,\n 'journal_id': journal_id,\n 'currency_id': self.currency_id.id,\n 'comment': self.notes,\n 'payment_term_id': self.payment_term_id.id,\n 'fiscal_position_id': self.fiscal_position_id.id or self.partner_id.property_account_position_id.id,\n 'company_id': self.company_id.id,\n 'purchase_id': self.id,\n 'date_invoice':pytz.utc.localize(datetime.datetime.now()).astimezone(local).strftime('%Y-%m-%d'),\n }\n return invoice_vals",
"def create_invoice(cls, payment_request: Tuple[Dict[str, Any]], authorization: Tuple[Dict[str, Any]]) -> Dict:\n # pylint: disable=too-many-locals, too-many-statements\n business_info = payment_request.get('businessInfo')\n filing_info = payment_request.get('filingInfo')\n account_info = payment_request.get('accountInfo', None)\n corp_type = business_info.get('corpType', None)\n business_identifier = business_info.get('businessIdentifier')\n\n payment_account = cls._find_payment_account(authorization)\n payment_method = _get_payment_method(payment_request, payment_account)\n current_app.logger.info(f'Creating Payment Request : '\n f'{payment_method}, {corp_type}, {business_identifier}, '\n f'{payment_account.auth_account_id}')\n\n bcol_account = cls._get_bcol_account(account_info, payment_account)\n\n # Calculate the fees\n fees = _calculate_fees(corp_type, filing_info)\n\n # Create payment system instance from factory\n pay_service: PaymentSystemService = PaymentSystemFactory.create(\n payment_method=payment_method,\n corp_type=corp_type,\n fees=sum(fee.total for fee in fees),\n account_info=account_info,\n payment_account=payment_account\n )\n current_app.logger.info(f'Created Pay System Instance : {pay_service}')\n\n pay_system_invoice: Dict[str, any] = None\n invoice: Invoice = None\n\n try:\n invoice = Invoice()\n invoice.bcol_account = bcol_account\n invoice.payment_account_id = payment_account.id\n invoice.cfs_account_id = payment_account.cfs_account_id\n invoice.invoice_status_code = pay_service.get_default_invoice_status()\n invoice.service_fees = sum(fee.service_fees for fee in fees) if fees else 0\n invoice.total = sum(fee.total for fee in fees) if fees else 0\n invoice.paid = 0\n invoice.refund = 0\n invoice.routing_slip = get_str_by_path(account_info, 'routingSlip')\n invoice.filing_id = filing_info.get('filingIdentifier', None)\n invoice.dat_number = get_str_by_path(account_info, 'datNumber')\n invoice.folio_number = filing_info.get('folioNumber', None)\n invoice.business_identifier = business_identifier\n invoice.payment_method_code = pay_service.get_payment_method_code()\n invoice.corp_type_code = corp_type\n details = payment_request.get('details')\n if not details or details == 'null':\n details = []\n invoice.details = details\n invoice = invoice.flush()\n\n line_items = []\n for fee in fees:\n line_items.append(PaymentLineItem.create(invoice.id, fee))\n\n current_app.logger.info(f'Handing off to payment system to create invoice for {invoice.id}')\n invoice_reference = pay_service.create_invoice(payment_account, line_items, invoice,\n corp_type_code=invoice.corp_type_code)\n\n invoice.commit()\n\n pay_service.complete_post_invoice(invoice, invoice_reference)\n\n invoice = Invoice.find_by_id(invoice.id, skip_auth_check=True)\n\n except Exception as e: # NOQA pylint: disable=broad-except\n current_app.logger.error('Rolling back as error occured!')\n current_app.logger.error(e)\n if invoice:\n invoice.rollback()\n if pay_system_invoice:\n pay_service.cancel_invoice(\n payment_account,\n pay_system_invoice.get('invoice_number'),\n )\n raise\n\n current_app.logger.debug('>Finished creating payment request')\n\n return invoice.asdict(include_dynamic_fields=True)",
"def create_invoice(sender, invoice, issuer_details, **kwargs):\n if not invoice.items:\n return\n\n price = sum([item.price for item in invoice.items.all()])\n\n if not price:\n return\n\n paypal_invoice = models.Invoice(\n customer=invoice.customer,\n year=invoice.year,\n month=invoice.month,\n invoice_date=invoice.invoice_date,\n end_date=invoice.due_date,\n tax_percent=invoice.tax_percent,\n issuer_details=issuer_details,\n )\n\n paypal_invoice.payment_details = {\n 'name': invoice.customer.name,\n 'address': invoice.customer.address,\n 'country': invoice.customer.country,\n 'country_name': invoice.customer.get_country_display(),\n 'email': invoice.customer.email,\n 'postal': invoice.customer.postal,\n 'phone_number': invoice.customer.phone_number,\n 'bank_name': invoice.customer.bank_name,\n 'bank_account': invoice.customer.bank_account,\n }\n\n paypal_invoice.save()\n\n for item in invoice.items.all():\n models.InvoiceItem.objects.create(\n invoice=paypal_invoice,\n price=item.price,\n tax=item.tax,\n quantity=item.quantity,\n unit_price=item.unit_price,\n unit_of_measure=helpers.convert_unit_of_measure(item.unit),\n name=item.name,\n start=item.start,\n end=item.end,\n )",
"def _create_payments(self, invoice):\n self.ensure_one()\n if self.schedule_id and self.schedule_id.occurences > 0:\n # TODO: make more intelligent price cut\n amount = invoice.amount_total\n amount_per_occurence = amount / self.schedule_id.occurences\n for day in self.schedule_id.day_ids:\n payment = self.env['account.payment'].new({\n 'payment_type': 'inbound',\n 'partner_type': 'customer',\n 'partner_id': self.member_id.partner_id.id,\n 'amount': amount_per_occurence,\n 'payment_date': day.day,\n 'journal_id': self.journal_id.id,\n })\n payment._onchange_journal()\n payment_values = dict(payment._cache)\n payment = self.env['account.payment'].create(payment_values)\n payment.invoice_ids = [(4, invoice.id, False)]",
"def create(request):\n if request.method == \"POST\":\n form = InitialInvoice(data=request.POST)\n if form.is_valid():\n data = form.cleaned_data\n return render(request,\n \"invoice/invoice_create.html\",\n {\n \"form\": ItemForm(),\n \"stage\": \"2\",\n \"initial_data\": data\n })\n\n return render(request,\n \"invoice/invoice_create.html\",\n {\n \"form\": InitialInvoice(),\n \"stage\": \"1\"\n })"
]
| [
"0.6858112",
"0.6258096",
"0.62494576",
"0.60835224",
"0.59996796",
"0.59798473",
"0.594909",
"0.5946934",
"0.5879488",
"0.58272815",
"0.57642823",
"0.57502127",
"0.56931144",
"0.56429595",
"0.5636706",
"0.5613712",
"0.5600246",
"0.5600246",
"0.55394715",
"0.552758",
"0.5492911",
"0.5481512",
"0.54520065",
"0.54285353",
"0.5426146",
"0.54167974",
"0.5413539",
"0.54041874",
"0.5396902",
"0.5350841"
]
| 0.70421207 | 0 |
Flag=1 for subproblem of ALR Flag=2 for subproblem of LR Flag=3 for subproblemmean of ALR Flag=4 for subproblemmean of LR | def g_solving_subproblem_of_ALR(self,vehicle_id):
global_LB = -10000
global_UB = 10000
iteration_for_RSP = 20
optimal_solution_for_RSP = None
self.multiplier_v = 0.5
# solve the expected shortest path problem
self.g_dynamic_programming_algorithm(vehicle_id, 3)
# obtain the variance
y_ =self.g_ending_state_vector[vehicle_id].VSStateVector[0].Primal_Label_cost_variance
for k in range(iteration_for_RSP):
# print(k)
LB = 0
# step 2: solve decomposed dual problems
# Part I: subproblem of x
self.g_dynamic_programming_algorithm(vehicle_id, 1)
LB += self.g_ending_state_vector[vehicle_id].VSStateVector[0].Label_cost_for_searching
# Part II: subproblem of y
obj_of_y_ = self.reliability * (y_) ** 0.5 - self.multiplier_v * y_
if obj_of_y_ > 0:
y = 0
LB += 0
else:
y = y_
LB += obj_of_y_
# generate an upper bound
variance = self.g_ending_state_vector[vehicle_id].VSStateVector[0].Primal_Label_cost_variance
Label_cost_for_lagrangian_mean = self.g_ending_state_vector[vehicle_id].VSStateVector[0].Label_cost_for_searching_mean
UB = Label_cost_for_lagrangian_mean + self.reliability * (variance) ** 0.5
# print("UB:{}".format(UB))
# print("LB:{}".format(LB))
# UB and LB update
if LB > global_LB:
global_LB = LB
if UB < global_UB:
global_UB = UB
optimal_solution_for_RSP = self.g_ending_state_vector[vehicle_id].VSStateVector[0]
# step 3: update multipliers
if variance- y != 0:
self.multiplier_v+= (global_UB - LB) / (variance-y)
# if self.multiplier_v<0:
# self.multiplier_v=1
# print(self.multiplier_v)
# step 4: termination condition test
if global_UB != 0:
gap = abs((global_UB - global_LB) / global_UB)
# print(gap)
if gap < 0.02:
print("iteration{}".format(k + 1))
print(self.multiplier_v)
print(global_LB, global_UB)
return optimal_solution_for_RSP, global_LB
else:
if global_UB - global_LB == 0:
print("iteration{}".format(k + 1))
print(self.multiplier_v)
print(global_LB, global_UB)
return optimal_solution_for_RSP, global_LB
if k == iteration_for_RSP - 1:
print("iteration{}".format(k + 1))
print(self.multiplier_v)
print(global_LB, global_UB)
return optimal_solution_for_RSP, global_LB | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_mean_fit(flag='L'):\n if flag == 'L':\n return np.mean(np.vstack(l_coeff_queue), axis =0) if len(l_coeff_queue)>1 else l_coeff_queue[-1]\n else:\n return np.mean(np.vstack(r_coeff_queue), axis =0) if len(r_coeff_queue)>1 else r_coeff_queue[-1]",
"def g_solving_subproblem_of_LR(self,vehicle_id):\r\n global_LB=-10000\r\n global_UB=10000\r\n iteration_for_RSP=20\r\n optimal_solution_for_RSP=None\r\n optimal_value_y=0\r\n self.multiplier_v=0.5\r\n\r\n #solve the expected shortest path problem\r\n self.g_dynamic_programming_algorithm(vehicle_id, 4)\r\n #obtain the variance\r\n y_=self.g_ending_state_vector[vehicle_id].VSStateVector[0].Primal_Label_cost_variance\r\n\r\n for k in range(iteration_for_RSP):\r\n # print(k)\r\n LB=0\r\n # step 2: solve decomposed dual problems\r\n # Part I: subproblem of x\r\n self.g_dynamic_programming_algorithm(vehicle_id, 2)\r\n LB+=self.g_ending_state_vector[vehicle_id].VSStateVector[0].Label_cost_for_lagrangian\r\n\r\n # Part II: subproblem of y\r\n obj_of_y_ = self.reliability * (y_) ** 0.5 - self.multiplier_v * y_\r\n if obj_of_y_ > 0:\r\n y = 0\r\n LB += 0\r\n else:\r\n y = y_\r\n LB += obj_of_y_\r\n # generate an upper bound\r\n variance = self.g_ending_state_vector[vehicle_id].VSStateVector[0].Primal_Label_cost_variance\r\n Label_cost_for_lagrangian_mean=self.g_ending_state_vector[vehicle_id].VSStateVector[0].Label_cost_for_lagrangian_mean\r\n UB=Label_cost_for_lagrangian_mean+self.reliability*(variance)**0.5\r\n\r\n # print(\"UB:{}\".format(UB))\r\n # print(\"LB:{}\".format(LB))\r\n\r\n # UB and LB update\r\n if LB > global_LB:\r\n global_LB = LB\r\n optimal_solution_for_RSP = self.g_ending_state_vector[vehicle_id].VSStateVector\r\n optimal_value_y = y\r\n\r\n if UB < global_UB:\r\n global_UB = UB\r\n\r\n\r\n # step 3: update multipliers\r\n if variance-y!= 0:\r\n self.multiplier_v+= (global_UB - LB) / (variance-y)\r\n # if self.multiplier_v<0:\r\n # self.multiplier_v=1\r\n # print(self.multiplier_v)\r\n\r\n # step 4: termination condition test\r\n if global_UB != 0:\r\n gap = abs((global_UB-global_LB) / global_UB)\r\n # print(gap)\r\n if gap < 0.02:\r\n print(\"iteration{}\".format(k + 1))\r\n print(self.multiplier_v)\r\n print(global_LB, global_UB)\r\n return optimal_solution_for_RSP, optimal_value_y,global_LB,global_UB\r\n else:\r\n if global_UB - global_LB == 0:\r\n print(\"iteration{}\".format(k + 1))\r\n print(self.multiplier_v)\r\n print(global_LB, global_UB)\r\n return optimal_solution_for_RSP,optimal_value_y,global_LB,global_UB\r\n\r\n if k == iteration_for_RSP - 1:\r\n print(\"iteration{}\".format(k + 1))\r\n print(self.multiplier_v)\r\n print(global_LB, global_UB)\r\n return optimal_solution_for_RSP,optimal_value_y,global_LB,global_UB",
"def makeSubapMap():\n a=numpy.zeros((sum(nsub),),numpy.int32)\n subFlag=subapFlag.copy()\n for i in range(NNGSCAM+NLGSOCAM+NBOBCAT):#ngs 1-3, truth, lgs, lofs, hofs\n tmp=subFlag[nsub[:i].sum():nsub[:i+1].sum()]\n tmp.shape=nsuby[i],nsubx[i]\n if i==NNGSCAM+NLGSOCAM:#lofs\n tmp[:]=sfNoObs*(i+1)\n elif i==1+NNGSCAM+NLGSOCAM:#hofs\n tmp[:]=sf14NoObs*(i+1)\n elif i==NNGSCAM:#lgs\n for j in range(4):\n jj=6-j\n tmp[j*2]=individualSubapFlag[jj]*(i+1)\n if j!=3:\n tmp[j*2+1]=individualSubapFlag[j]*(i+1)\n #jj=7-j\n #if jj<7:\n # tmp[j*2-1]=individualSubapFlag[jj]*(i+1)\n #tmp[j*2]=individualSubapFlag[j]*(i+1)\n else:\n tmp[:]=individualSubapFlag*(i+1)\n return subFlag",
"def _get_flagging(flagging_file, flag_ant_file, num_integ, n_chan, baseline_names, bad_chan_pct_count):\n\n # Inner: 1-6\n # Mid: 7-30\n # Outer: 31 - 36\n base_idx_map = _build_baseline_index(baseline_names)\n\n # Finding out which antenna has been flagged completely.\n all_ant1, all_ant2, all_flag = [], [], []\n per_integ_flag = []\n baseline_count, baseline_flag = np.zeros((len(baseline_names))), np.zeros((len(baseline_names)))\n integ_ant1, integ_ant2, integ_flag = [], [], []\n integ_num_inner, integ_flag_inner, integ_num_outer, integ_flag_outer = 0, 0, 0, 0\n integ_baseline_count, integ_baseline_flag = np.zeros((len(baseline_names))), np.zeros((len(baseline_names)))\n num_integ_flagged = 0\n print ('Processing ', flagging_file)\n with open(flagging_file, 'r') as f:\n for line in f:\n if \"#\" not in line: # grep -v \"#\"\n if line.startswith('Flagged a total of'):\n flag_total_line = line\n continue\n if \"Flagged\" in line: # grep -v \"Flagged\"\n continue\n tokens = line.split()\n if len(tokens) < 7: # Skip by-channel summaries at the end of the file\n continue\n ant1 = int(tokens[3])\n ant2 = int(tokens[4])\n flag = float(tokens[6])\n if (ant1 < ant2) and (flag == 100): \n # extract non-correlated antenna pairs with 100 percent flagging\n integ_ant1.append(ant1)\n integ_ant2.append(ant2)\n integ_flag.append(flag)\n if ant1 < ant2:\n # Record flagging for each baseline\n base_name = '{}-{}'.format(ant1+1, ant2+1)\n base_idx = base_idx_map[base_name]\n integ_baseline_count[base_idx] += 1\n integ_baseline_flag[base_idx] += flag\n bad_chan_pct_count[int(flag)] += 1\n elif \"# Integration Number:\" in line:\n tokens = line.split()\n integ_num = int(tokens[3])\n flag = float(tokens[5])\n per_integ_flag.append(flag)\n if flag == 100:\n num_integ_flagged += 1\n # totally flagged so don't count individual flagging\n else:\n all_ant1.extend(integ_ant1)\n all_ant2.extend(integ_ant2)\n all_flag.extend(integ_flag)\n baseline_count += integ_baseline_count\n baseline_flag += integ_baseline_flag\n # Reset the integration details ready for the enxt integration (if any)\n integ_ant1, integ_ant2, integ_flag = [], [], []\n integ_baseline_count, integ_baseline_flag = np.zeros((len(baseline_names))), np.zeros((len(baseline_names)))\n\n\n exp_count = (num_integ - num_integ_flagged) * 35 # Number of unflagged integrations times number of non-autocorrelation baselines\n\n # Analyse the flagging data\n ant1, ant2, flag = np.asarray(all_ant1), np.asarray(all_ant2), np.asarray(all_flag)\n\n ant_names = []\n for x in range(0,36):\n count1 = np.count_nonzero(ant1 == x)\n count2 = np.count_nonzero(ant2 == x)\n total_count = count1 + count2\n if total_count == exp_count:\n ant_num = x+1\n ant_name = 'ak{:02d}'.format(ant_num)\n ant_names.append(ant_name)\n\n total_flagged_ant = len(ant_names)\n\n with open(flag_ant_file,'a') as ffile:\n ffile.write(flagging_file[-24:-18])\n if total_flagged_ant > 0:\n ffile.write('\\n')\n for item in ant_names:\n ffile.write(item)\n ffile.write('\\n')\n else:\n ffile.write('\\n none \\n')\n ffile.write('\\n')\n \n flag_pct_integ = 0 if num_integ == 0 else 100* num_integ_flagged / num_integ\n baseline_flag_pct = baseline_flag / baseline_count\n\n # Getting data flagged percentage from the last line of the summary\n str_line = flag_total_line\n if isinstance(str_line, bytes):\n str_line = str_line.decode('utf-8')\n tokens = str_line.split()\n total_flagged_pct = float(tokens[-2]) #data+autocorrelation\n total_uv = float(tokens[7])\n autocorr_flagged_pct = (36 * num_integ * n_chan / total_uv)*100.0\n data_flagged_pct = round(total_flagged_pct - autocorr_flagged_pct, 3)\n\n return data_flagged_pct, total_flagged_ant, flag_ant_file, ant_names, flag_pct_integ, baseline_flag_pct, per_integ_flag",
"def control_opt(self):\n\n\n if self.run_opt['refine']:\n self.run_opt['relaunch']=1\n \n #check value for 'madweight_main'\n for i in range(3,9)+[-1,-3]:\n if self.run_opt[num_to_tag[i]]==1:\n self.run_opt['madweight_main']=1\n break\n\n if self.run_opt['relaunch']==1:\n self.run_opt['control']=1",
"def general_gantest(proba, nbr_qubits):\n for m in [4096, 2048]:\n for l in [1, 2, 3]:\n print(\"Easy mode results for m={} and l={}:\".format(m, l))\n Variationer_learn_gan(1000, l, m, proba=proba, n=nbr_qubits, distri_size=0, easy=True)\n print(\"\\n\")\n print(\"Distribution learning results for m={} and l={}:\".format(m, l))\n for d in [256, 512]:\n print(\"For \", d, \": \")\n Variationer_learn_gan(1000, l, m, proba=proba, n=nbr_qubits, distri_size=d, easy=False)\n print(\"Singleton learning results for m={} and l={}:\".format(m, l))\n Variationer_learn_gan(1000, l, m, proba=proba, n=nbr_qubits, distri_size=0, easy=False)",
"def get_Flagging(flagging_file, n_Rec, nChan, exp_count):\n\n line = subprocess.check_output(['grep','Flagged', flagging_file]) # grab the summary line\n str_line = line.decode('utf-8')\n TOKS = str_line.split()\n total_flagged_pct = float(TOKS[-2]) #data+autocorrelation\n total_uv = float(TOKS[7])\n\n # Getting data flagged percentage\n \n autocorr_flagged_pct = (36 * n_Rec * n_Chan / total_uv)*100.0\n data_flagged_pct = round(total_flagged_pct - autocorr_flagged_pct, 3)\n\n # Finding out which antenna has been flagged completely.\n ANT1, ANT2, FLAG = [], [], [] \n with open(flagging_file, 'r') as f:\n for line in f:\n if \"#\" not in line: # grep -v \"#\"\n if \"Flagged\" not in line: # grep -v \"Flagged\"\n if len(line.split())>2: # avoid new channel-wise summaries at end of flagSummary file\n TOKS=line.split()\n ant1 = int(TOKS[3])\n ant2 = int(TOKS[4])\n flag = float(TOKS[6])\n if (ant1 < ant2) and (flag == 100): # extract non-correlated antenna pairs with 100 percent flagging\n ANT1.append(ant1)\n ANT2.append(ant2)\n FLAG.append(flag)\n\n ant1, ant2, flag = np.asarray(ANT1), np.asarray(ANT2), np.asarray(FLAG)\n \n ANT_NAME = []\n for x in range(0,36):\n count1 = np.count_nonzero(ant1 == x)\n count2 = np.count_nonzero(ant2 == x)\n total_count = count1 + count2\n if total_count == exp_count:\n ant_num = x+1\n ant_name = 'ak'+ str(ant_num)\n ANT_NAME.append(ant_name)\n\n total_flagged_ant = len(ANT_NAME)\n \n flag_ant_file = 'flagged_antenna.txt'\n ffile = open(fig_dir + '/'+ flag_ant_file,'a')\n \n if total_flagged_ant > 1:\n ffile.write(flagging_file[-24:-18])\n ffile.write('\\n')\n for item in ANT_NAME:\n ffile.write(item)\n ffile.write('\\n')\n else:\n ffile.write(flagging_file[-24:-18])\n ffile.write('\\n none \\n')\n\n ffile.close()\n \n return data_flagged_pct, total_flagged_ant, flag_ant_file",
"def ModelEstimation(data, S, A):\n\n counts_sas = np.zeros((S,A,S))\n counts_sa = np.zeros((S,A))\n R_est = np.zeros((S,A))\n P_est = np.zeros((S,A,S))\n for traj in data:\n for sample in traj:\n (s,a,r,s_next) = sample\n counts_sa[s,a] += 1\n counts_sas[s,a,s_next] += 1\n R_est[s,a] += r\n\n for s in range(S):\n for a in range(A):\n if counts_sa[s,a] == 0:\n # if this state-action doesn't exist in data\n # Use default values:\n R_est[s,a] = 0.5\n P_est[s,a,:] = 1/S\n else:\n R_est[s,a] /= counts_sa[s,a]\n P_est[s, a, :] = counts_sas[s,a,:] / counts_sa[s,a]\n if np.any(np.abs(P_est.sum(axis=2) - 1) > 1e-5):\n raise RuntimeError('Probabilty matrix not normalized!!')\n return P_est, R_est",
"def check(self, data_input, debug_flag):\n self.results = [ [], [], [], False, [] ]\n _result = {}\n _wave1_t = data_input[0][0]\n _wave2_t = data_input[0][1]\n _wave3_t = data_input[0][2] \n _wave1_p = data_input[1][0]\n _wave2_p = data_input[1][1]\n _wave3_p = data_input[1][2]\n\n #Step1: b vs a\n #Step1.1: time_analaysis\n _result = {} \n _result[str(\"b_a_t\")] = EW_fibonacci.check_fibratio(_wave2_t, _wave1_t)\n self.results[0].append(_result)\n\n #Step1.2: price_analaysis\n _result = {} \n _result[str(\"b_a_p\")] = EW_fibonacci.check_fibratio(_wave2_p, _wave1_p)\n self.results[0].append(_result)\n\n\n #Step2: c vs b\n #Step2.1: time_analaysis\n _result = {} \n _result[str(\"c_b_t\")] = EW_fibonacci.check_fibratio(_wave3_t, _wave2_t)\n self.results[1].append(_result)\n\n #Step2.2: price_analaysis\n _result = {} \n _result[str(\"c_b_p\")] = EW_fibonacci.check_fibratio(_wave3_p, _wave2_p)\n self.results[1].append(_result)\n\n \n #Step3: c vs a\n #Step3.1: time_analaysis\n _result = {} \n _result[str(\"c_a_t\")] = EW_fibonacci.check_fibratio(_wave3_t, _wave1_t)\n self.results[2].append(_result)\n\n #Step3.2: price_analaysis\n _result = {} \n _result[str(\"c_a_p\")] = EW_fibonacci.check_fibratio(_wave3_p, _wave1_p)\n self.results[2].append(_result)\n\n\n #Step4: Check if this a-b-c is valid or not and which pattern can be chosen\n self.results[3], self.results[4] = self.check_type(data_input, debug_flag)\n\n\n #Step5: return the results\n return self.results",
"def get_untrained_ann(bool_var, data_desc, ml_models, train_feature, train_result,\n test_feature, option):\n # just return case we dont want any ann\n if not bool_var: \n return\n \n # if we want just an optimal configuration, append to it\n if option != 'optimization':\n [num_layers, momentum, learn_rate] = get_optimal_param(data_desc, 'ANN')\n ann_name = 'ANN' + get_suffix_ml_model()\n ann = MLPRegressor(hidden_layer_sizes = num_layers, solver = 'lbfgs', momentum =\n momentum, learning_rate_init = learn_rate)\n multi_ann = MultiOutputRegressor(ann)\n ml_models.append([ann_name, multi_ann, list(train_feature), \n list(test_feature)]) # it should be copies of the lists, not the lists\n # itselves, because we'll add to this lists \n \n # try all configurations\n else: \n # list in which we'll try the configurations\n [num_layers, NOT_momentum, NOT_learn_rate] = \\\n get_optimal_param(data_desc, 'ANN')\n #num_layers_lst = [12, 24, 36, 100]\n #momentum = 0.9\n #learn_rate = 0.001\n momentum_lst = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]\n learn_rate_lst = [0.001, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]\n\n # iterate through configurations\n for (momentum, learn_rate) in itertools.product(momentum_lst,\n learn_rate_lst):\n\n # get ann name - consider parameters\n ann_name = 'ANN' + get_suffix_ml_model()\n ann_name += get_suffix_configuration((num_layers, momentum, learn_rate))\n \n # get model and append to the list\n ann = MLPRegressor(hidden_layer_sizes = num_layers, solver = 'lbfgs',\n momentum = momentum, learning_rate_init = learn_rate, max_iter =\n MAX_ITER)\n multi_ann = MultiOutputRegressor(ann)\n ml_models.append([ann_name, multi_ann, list(train_feature), \n list(test_feature)])",
"def part2a_0():\n xs = exampleInput\n phi = Counter({('-BEGIN-', '-FEAT-'): 1.0, ('-FEAT-', 'Beautiful'): 1.0, ('-FEAT-', 'PREV:-BEGIN-'): 1.0, ('-FEAT-', 'NEXT:2'): 1.0, ('-FEAT-', '-CAPITALIZED-'): 1.0, ('-FEAT-', '-POST-CAPITALIZED-'): 0.0})\n phi_ = submission.nerFeatureFunction(0, '-BEGIN-', '-FEAT-', xs)\n grader.requireIsTrue( Counters.approximateEquals(phi, phi_) )\n\n phi = Counter({('-FEAT-', '-SIZE-'): 1.0, ('-SIZE-', 'PREV:Beautiful'): 1.0, ('-SIZE-', 'NEXT:bedroom'): 1.0, ('-SIZE-', '-PRE-CAPITALIZED-'): 1.0, ('-SIZE-', '2'): 1.0, ('-SIZE-', '-POST-CAPITALIZED-'): 0.0, ('-SIZE-', '-CAPITALIZED-'): 0.0})\n phi_ = submission.nerFeatureFunction(1, '-FEAT-', '-SIZE-', xs)\n grader.requireIsTrue( Counters.approximateEquals(phi, phi_) )\n \n phi = Counter({('-SIZE-', '-SIZE-'): 1.0, ('-SIZE-', 'PREV:2'): 1.0, ('-SIZE-', 'bedroom'): 1.0, ('-SIZE-', 'NEXT:-END-'): 1.0, ('-SIZE-', '-CAPITALIZED-'): 0.0, ('-SIZE-', '-PRE-CAPITALIZED-'): 0.0})\n phi_ = submission.nerFeatureFunction(2, '-SIZE-', '-SIZE-', xs)\n grader.requireIsTrue( Counters.approximateEquals(phi, phi_) )",
"def main():\n\n from argparse import ArgumentParser, RawDescriptionHelpFormatter\n from textwrap import dedent\n parser = ArgumentParser(description=dedent(main.__doc__),\n formatter_class=RawDescriptionHelpFormatter)\n parser.add_argument('--version', action='version', version='%(prog)s 1.0')\n args = parser.parse_args()\n\n import itertools\n from numpy import zeros, matrix, linalg, array\n\n # Create a file ('oa' for orientational average)\n fh_pol = open('oa_raman.data', 'w')\n fh_hpol = open('oa_hyperraman.data', 'w')\n fh_2hpol = open('oa_secondhyperraman.data', 'w')\n\n # +++++ Polarizability +++++\n\n # For the polarizability, we are concerned with the average:\n # <alpha_ij^2> = sum_{ab,cd}[ <T_ia*T_jb*T_ic*T_jd> alpha_ab*alpha_cd ]\n #\n # For Raman scattering measured in a perpendicular orientation, we need\n # the averages <alpha_ii^2> and <alpha_ij^2>. For averaging of the 4th\n # rank tensor on the right side of the equation, only two circumstances\n # give nonzero averages:\n # 1. a = b = c = d\n # 2. a = b, c = d\n # These are stored in the lists below.\n #laaaa = ['a', 'a', 'a', 'a']\n #laabb = ['a', 'a', 'b', 'b']\n laaaa = [1, 1, 1, 1]\n laabb = [1, 1, 2, 2]\n\n saaaa = set()\n saabb = set()\n\n genaaaa = itertools.permutations(laaaa,4)\n genaabb = itertools.permutations(laabb,4)\n\n txt = 'Polarizability Averaging Indices'\n print(len(txt)*'+', file=fh_pol) \n print(txt, file=fh_pol)\n print(len(txt)*'+', file=fh_pol)\n\n # Size of the basis set and number of linearly independent terms\n r4nn, r4qn = fullpermutations(4)\n print('', file=fh_pol)\n txt = 'For a tensor of rank 4'\n print('*'*2*len(txt), file=fh_pol)\n print(txt, file=fh_pol)\n print('*'*2*len(txt), file=fh_pol)\n txt = 'Size of basis set = ' + str(r4nn)\n print(txt, file=fh_pol)\n txt = 'Number of linearly independent terms = ' + str(r4qn)\n print(txt, file=fh_pol)\n print('', file=fh_pol)\n\n # Terms with aa,aa\n txt = 'Indices for aa,aa terms'\n print(len(txt)*'=', file=fh_pol)\n print(txt, file=fh_pol)\n print(len(txt)*'=', file=fh_pol)\n for i in genaaaa:\n if i not in saaaa:\n print(i, file=fh_pol)\n saaaa.add(i)\n\n print('', file=fh_pol)\n # Terms with aa,bb (all permutations)\n txt = 'Indices for aa,bb terms'\n print(len(txt)*'=', file=fh_pol)\n print(txt, file=fh_pol)\n print(len(txt)*'=', file=fh_pol)\n for i in genaabb:\n if i not in saabb:\n print(i, file=fh_pol)\n saabb.add(i)\n\n print('', file=fh_pol)\n print('~'*30, file=fh_pol)\n print('Number of aa,aa terms', len(saaaa), file=fh_pol)\n print('Number of aa,bb terms', len(saabb), file=fh_pol)\n print('~'*30, file=fh_pol)\n print('', file=fh_pol)\n\n # Terms for Mathematica\n print('%'*30, file=fh_pol)\n print('Mathematica style output', file=fh_pol)\n print('%'*30, file=fh_pol) \n print('', file=fh_pol)\n\n # Basis vectors in the experimental reference frame\n r4exp, r4mol = vectors_exp_mol(4)\n print('Experimental reference frame basis vectors', file=fh_pol)\n for item in r4exp:\n print(item, file=fh_pol)\n print('', file=fh_pol)\n\n # Matrix for generating orientational averages\n smat, vexp, vmol = generate_smat_and_vecs(r4nn,4,False)\n print('S matrix', file=fh_pol)\n print(smat, file=fh_pol)\n print('', file=fh_pol)\n \n # Basis vectors in the molecular reference frame\n print('Molecular reference frame basis vectors', file=fh_pol)\n for item in r4mol:\n print(item, file=fh_pol)\n print('', file=fh_pol)\n\n # Experimental vector containing basis vectors\n print('Experimental total vector', file=fh_pol)\n print(vexp, file=fh_pol)\n print('', file=fh_pol)\n\n # Molecular vector containing basis vectors\n print('Molecular total vector', file=fh_pol)\n print(vmol, file=fh_pol)\n print('', file=fh_pol)\n\n # Index equivalence for molecular reference frame data\n data, avg_alphaii, avg_alphaij = pol_mathematica(saaaa, saabb) \n\n print('Index equivalence for molecular reference frame vectors', file=fh_pol)\n for item in data:\n print(item, file=fh_pol)\n print('', file=fh_pol)\n\n print('Polarizability Average Terms', file=fh_pol)\n print('<alpha_ii^2> term', file=fh_pol)\n for item in avg_alphaii:\n print(item, file=fh_pol)\n print('', file=fh_pol)\n print('<alpha_ij^2> term', file=fh_pol)\n for item in avg_alphaij:\n print(item, file=fh_pol)\n\n # +++++ First Hyperpolarizability +++++\n\n # For the first hyperpolarizability, we are concerned with the average:\n # <beta_ijk^2> \n # = sum_{abc,def}[ <T_ia*T_jb*T_kc*T_id*T_je*T_kf> beta_abc*beta_def ]\n #\n # For hyper-Raman scattering measured in a perpendicular orientation, we need\n # the averages <beta_iii^2> and <beta_ijj^2>. For averaging of the 6th\n # rank tensor on the right side of the equation, three circumstances\n # give nonzero averages:\n # 1. a = b = c = d = e = f\n # 2. a = b = c = d, e = f\n # 3. a = b, c = d, e = f\n # These are stored in the lists below.\n #laaaaaa = ['a', 'a', 'a', 'a', 'a', 'a']\n #laaaabb = ['a', 'a', 'a', 'a', 'b', 'b']\n #laabbcc = ['a', 'a', 'b', 'b', 'c', 'c']\n laaaaaa = [1, 1, 1, 1, 1, 1]\n laaaabb = [1, 1, 1, 1, 2, 2]\n laabbcc = [1, 1, 2, 2, 3, 3]\n\n saaaaaa = set()\n saaaabb = set()\n saabbcc = set()\n\n genaaaaaa = itertools.permutations(laaaaaa,6)\n genaaaabb = itertools.permutations(laaaabb,6)\n genaabbcc = itertools.permutations(laabbcc,6)\n\n txt = 'First hyperpolarizability Averaging Indices'\n print(len(txt)*'+', file=fh_hpol) \n print(txt, file=fh_hpol)\n print(len(txt)*'+', file=fh_hpol)\n\n # Size of the basis set and number of linearly independent terms\n r6nn, r6qn = fullpermutations(6)\n print('', file=fh_hpol)\n txt = 'For a tensor of rank 6'\n print('*'*2*len(txt), file=fh_hpol)\n print(txt, file=fh_hpol)\n print('*'*2*len(txt), file=fh_hpol)\n txt = 'Size of basis set = ' + str(r6nn)\n print(txt, file=fh_hpol)\n txt = 'Number of linearly independent terms = ' + str(r6qn)\n print(txt, file=fh_hpol)\n print('', file=fh_hpol)\n\n # Terms with aaa,aaa\n txt = 'Indices for aaa,aaa terms'\n print(len(txt)*'=', file=fh_hpol)\n print(txt, file=fh_hpol)\n print(len(txt)*'=', file=fh_hpol)\n for i in genaaaaaa:\n if i not in saaaaaa:\n print(i, file=fh_hpol)\n saaaaaa.add(i)\n\n print('', file=fh_hpol)\n # Terms with aaa,abb (all permutations)\n txt = 'Indices for aaa,abb terms'\n print(len(txt)*'=', file=fh_hpol)\n print(txt, file=fh_hpol)\n print(len(txt)*'=', file=fh_hpol)\n for i in genaaaabb:\n if i not in saaaabb:\n print(i, file=fh_hpol)\n saaaabb.add(i)\n\n print('', file=fh_hpol)\n # Terms with aab,bcc (all permutations)\n # Here, we need to be careful that we don't overcount terms. It\n # is very easy to come up with an overcomplete basis.\n txt = 'Indices for aab,bcc terms'\n print(len(txt)*'=', file=fh_hpol)\n print(txt, file=fh_hpol)\n print(len(txt)*'=', file=fh_hpol)\n\n # This will generate all combinations of the aab,bcc terms. However,\n # it requires more prior knowledge than I'd like. \n #count1 = 0\n #count2 = 0\n #count3 = 0\n #count4 = 0\n #count5 = 0\n #for i in genaabbcc:\n # if i not in saabbcc:\n # if i[1] == 1:\n # count1 +=1\n # if count1 <= 3:\n # print(i, file=fh_hpol)\n # saabbcc.add(i)\n # elif i[2] == 1:\n # count2 +=1\n # if count2 <= 3:\n # print(i, file=fh_hpol)\n # saabbcc.add(i)\n # elif i[3] == 1:\n # count3 +=1\n # if count3 <= 3:\n # print(i, file=fh_hpol)\n # saabbcc.add(i)\n # elif i[4] == 1:\n # count4 +=1\n # if count4 <= 3:\n # print(i, file=fh_hpol)\n # saabbcc.add(i)\n # elif i[5] == 1:\n # count5 +=1\n # if count5 <= 3:\n # print(i, file=fh_hpol)\n # saabbcc.add(i)\n # Generate all combinations of aab,bcc terms. We remove the redundant\n # elements next.\n for i in genaabbcc:\n if i not in saabbcc:\n saabbcc.add(i)\n\n # Basis functions of Kronecker delta products\n f61m = \"krond(a,b)*krond(c,d)*krond(e,f)\"\n f62m = \"krond(a,b)*krond(c,e)*krond(d,f)\"\n f63m = \"krond(a,b)*krond(c,f)*krond(d,e)\"\n f64m = \"krond(a,c)*krond(b,d)*krond(e,f)\"\n f65m = \"krond(a,c)*krond(b,e)*krond(d,f)\"\n f66m = \"krond(a,c)*krond(b,f)*krond(d,e)\"\n f67m = \"krond(a,d)*krond(b,c)*krond(e,f)\"\n f68m = \"krond(a,d)*krond(b,e)*krond(c,f)\"\n f69m = \"krond(a,d)*krond(b,f)*krond(c,e)\"\n f610m = \"krond(a,e)*krond(b,c)*krond(d,f)\"\n f611m = \"krond(a,e)*krond(b,d)*krond(c,f)\"\n f612m = \"krond(a,e)*krond(b,f)*krond(c,d)\"\n f613m = \"krond(a,f)*krond(b,c)*krond(d,e)\"\n f614m = \"krond(a,f)*krond(b,d)*krond(c,e)\"\n f615m = \"krond(a,f)*krond(b,e)*krond(c,d)\"\n\n lmol = [ f61m, f62m, f63m, f64m, f65m, \n f66m, f67m, f68m, f69m, f610m,\n f611m, f612m, f613m, f614m, f615m ]\n\n # Temporary set for checking uniqueness\n stmp = set()\n # This set stores the elements of saabbcc that are redundant when \n # we insert values of the indices.\n sintersect = set()\n # Loop through the elements of saabbcc\n for item in saabbcc:\n # Assign values to the indices\n a = item[0]\n b = item[1]\n c = item[2]\n d = item[3]\n e = item[4]\n f = item[5]\n # Temporary list for storing vectors with values\n tmp = []\n for vec in lmol:\n # Evaluate the value of the Kronecker delta products\n v = eval_krond(vec, a, b, c, d, e, f, 0, 0)\n tmp.append(v)\n # We need immutable objects to add in a set\n tmp = tuple(tmp)\n # Add to sintersect if the item is in stmp\n if tmp in stmp:\n sintersect.add(item)\n # Add to stmp if it isn't present\n else:\n stmp.add(tmp)\n # This function removes elements of saabbcc that intersect with\n # elements of sintersect. The result is a set containing only the \n # unique elements.\n saabbcc.difference_update(sintersect)\n\n # Print elements of saabbcc.\n for i in saabbcc:\n print(i, file=fh_hpol)\n\n print('', file=fh_hpol)\n print('~'*30, file=fh_hpol)\n print('Number of aaa,aaa terms', len(saaaaaa), file=fh_hpol)\n print('Number of aaa,abb terms', len(saaaabb), file=fh_hpol)\n print('Number of aab,bcc terms', len(saabbcc), file=fh_hpol)\n print('~'*30, file=fh_hpol)\n print('', file=fh_hpol)\n\n print('%'*30, file=fh_hpol)\n print('Mathematica style output', file=fh_hpol)\n print('%'*30, file=fh_hpol)\n print('', file=fh_hpol)\n\n # Basis vectors in the experimental reference frame\n r6exp, r6mol = vectors_exp_mol(6)\n print('Experimental reference frame basis vectors', file=fh_hpol)\n for item in r6exp:\n print(item, file=fh_hpol)\n print('', file=fh_hpol)\n\n # Matrix for generating orientational averages\n smat, vexp, vmol = generate_smat_and_vecs(r6nn,6,False)\n print('S matrix', file=fh_hpol)\n print(smat, file=fh_hpol)\n print('', file=fh_hpol)\n\n # Basis vectors in the molecular reference frame\n print('Molecular reference frame basis vectors', file=fh_hpol)\n for item in r6mol:\n print(item, file=fh_hpol)\n print('', file=fh_hpol)\n\n # Experimental vector containing basis vectors\n print('Experimental total vector', file=fh_hpol)\n print(vexp, file=fh_hpol)\n print('', file=fh_hpol)\n\n # Molecular vector containing basis vectors\n print('Molecular total vector', file=fh_hpol)\n print(vmol, file=fh_hpol)\n print('', file=fh_hpol)\n\n data, avg_betaiii, avg_betaijj = hpol_mathematica(saaaaaa, saaaabb, saabbcc)\n\n print('Set up molecular reference frame vectors', file=fh_hpol)\n for item in data:\n print(item, file=fh_hpol)\n print('', file=fh_hpol)\n\n print('Hyperpolarizability Average Terms', file=fh_hpol)\n print('<beta_iii^2> term', file=fh_hpol)\n for item in avg_betaiii:\n print(item, file=fh_hpol)\n print('', file=fh_hpol)\n print('<beta_ijj^2> term', file=fh_hpol)\n for item in avg_betaijj:\n print(item, file=fh_hpol)\n\n # +++++ Second Hyperpolarizability +++++\n\n # For the second hyperpolarizability, we are concerned with the average:\n # <gamma_ijkl^2> \n # = sum_{abcd,efgh}[ <T_ia*T_jb*T_kc*T_ld*T_ie*T_jf*T_kg*T_lh> gamma_abcd*gamma_efgh ]\n #\n # For second hyper-Raman scattering measured in a perpendicular orientation, we need\n # the averages <gamma_iiii^2> and <gamma_ijjj^2>. For averaging of the 8th\n # rank tensor on the right side of the equation, four circumstances\n # give nonzero averages:\n # 1. a = b = c = d = e = f = g = h\n # 2. a = b = c = d = e = f, g = h\n # 3. a = b = c = d, e = f = g = h\n # 4. a = b = c = d, e = f, g = h\n # These are stored in the lists below.\n #laaaaaaaa = ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a']\n #laaaaaabb = ['a', 'a', 'a', 'a', 'a', 'a', 'b', 'b']\n #laaaabbbb = ['a', 'a', 'a', 'a', 'b', 'b', 'b', 'b']\n #laaaabbcc = ['a', 'a', 'a', 'a', 'b', 'b', 'c', 'c']\n laaaaaaaa = [1, 1, 1, 1, 1, 1, 1, 1]\n laaaaaabb = [1, 1, 1, 1, 1, 1, 2, 2]\n laaaabbbb = [1, 1, 1, 1, 2, 2, 2, 2]\n laaaabbcc = [1, 1, 1, 1, 2, 2, 3, 3]\n # This type of average is actually equivalent to the fourth term,\n # because the indices can only be x, y, or z. \n #laabbccdd = ['a', 'a', 'b', 'b', 'c', 'c', 'd', 'd']\n\n saaaaaaaa = set()\n saaaaaabb = set()\n saaaabbbb = set()\n saaaabbcc = set()\n #saabbccdd = set()\n\n genaaaaaaaa = itertools.permutations(laaaaaaaa,8)\n genaaaaaabb = itertools.permutations(laaaaaabb,8)\n genaaaabbbb = itertools.permutations(laaaabbbb,8)\n genaaaabbcc = itertools.permutations(laaaabbcc,8)\n #genaabbccdd = itertools.permutations(laabbccdd,8)\n\n txt = 'Second hyperpolarizability Averaging Indices'\n print(len(txt)*'+', file=fh_2hpol)\n print(txt, file=fh_2hpol)\n print(len(txt)*'+', file=fh_2hpol)\n\n # Size of the basis set and number of linearly independent terms\n r8nn, r8qn = fullpermutations(8)\n print('', file=fh_2hpol)\n txt = 'For a tensor of rank 8'\n print('*'*2*len(txt), file=fh_2hpol)\n print(txt, file=fh_2hpol)\n print('*'*2*len(txt), file=fh_2hpol)\n txt = 'Size of basis set = ' + str(r8nn)\n print(txt, file=fh_2hpol)\n txt = 'Number of linearly independent terms = ' + str(r8qn)\n print(txt, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n # Terms with aaaa,aaaa\n txt = 'Indices for aaaa,aaaa terms'\n print(len(txt)*'=', file=fh_2hpol)\n print(txt, file=fh_2hpol)\n print(len(txt)*'=', file=fh_2hpol)\n for i in genaaaaaaaa:\n if i not in saaaaaaaa:\n print(i, file=fh_2hpol)\n saaaaaaaa.add(i)\n\n print('', file=fh_2hpol)\n # Terms with aaaa,aabb (all permutations)\n txt = 'Indices for aaaa,aabb terms'\n print(len(txt)*'=', file=fh_2hpol)\n print(txt, file=fh_2hpol)\n print(len(txt)*'=', file=fh_2hpol)\n for i in genaaaaaabb:\n if i not in saaaaaabb:\n print(i, file=fh_2hpol)\n saaaaaabb.add(i)\n\n print('', file=fh_2hpol)\n # Terms with aaaa,bbbb (all permutations)\n txt = 'Indices for aaaa,bbbb terms'\n print(len(txt)*'=', file=fh_2hpol)\n print(txt, file=fh_2hpol)\n print(len(txt)*'=', file=fh_2hpol)\n for i in genaaaabbbb:\n if i not in saaaabbbb:\n print(i, file=fh_2hpol)\n saaaabbbb.add(i)\n\n print('', file=fh_2hpol)\n # Terms with aaaa,bbcc (all permutations)\n txt = 'Indices for aaaa,bbcc terms'\n print(len(txt)*'=', file=fh_2hpol)\n print(txt, file=fh_2hpol)\n print(len(txt)*'=', file=fh_2hpol)\n # Temporarily, we store all permutations. There are 420 permutations,\n # but only 210 of them are unique.\n for i in genaaaabbcc:\n if i not in saaaabbcc:\n #print(i, file=fh_2hpol)\n saaaabbcc.add(i)\n\n # Set up the Kronecker delta products as strings. \n f81m = 'krond(a,b)*krond(c,d)*krond(e,f)*krond(g,h)'\n f82m = 'krond(a,b)*krond(c,d)*krond(e,g)*krond(f,h)'\n f83m = 'krond(a,b)*krond(c,d)*krond(e,h)*krond(f,g)'\n f84m = 'krond(a,b)*krond(c,e)*krond(d,f)*krond(g,h)'\n f85m = 'krond(a,b)*krond(c,e)*krond(d,g)*krond(f,h)'\n f86m = 'krond(a,b)*krond(c,e)*krond(d,h)*krond(f,g)'\n f87m = 'krond(a,b)*krond(c,f)*krond(d,e)*krond(g,h)'\n f88m = 'krond(a,b)*krond(c,f)*krond(d,g)*krond(e,h)'\n f89m = 'krond(a,b)*krond(c,f)*krond(d,h)*krond(e,g)'\n f810m = 'krond(a,b)*krond(c,g)*krond(d,e)*krond(f,h)'\n f811m = 'krond(a,b)*krond(c,g)*krond(d,f)*krond(e,h)'\n f812m = 'krond(a,b)*krond(c,g)*krond(d,h)*krond(e,f)'\n f813m = 'krond(a,b)*krond(c,h)*krond(d,e)*krond(f,g)'\n f814m = 'krond(a,b)*krond(c,h)*krond(d,f)*krond(e,g)'\n f815m = 'krond(a,b)*krond(c,h)*krond(d,g)*krond(e,f)'\n f816m = 'krond(a,c)*krond(b,d)*krond(e,f)*krond(g,h)'\n f817m = 'krond(a,c)*krond(b,d)*krond(e,g)*krond(f,h)'\n f818m = 'krond(a,c)*krond(b,d)*krond(e,h)*krond(f,g)'\n f819m = 'krond(a,c)*krond(b,e)*krond(d,f)*krond(g,h)'\n f820m = 'krond(a,c)*krond(b,e)*krond(d,g)*krond(f,h)'\n f821m = 'krond(a,c)*krond(b,e)*krond(d,h)*krond(f,g)'\n f822m = 'krond(a,c)*krond(b,f)*krond(d,e)*krond(g,h)'\n f823m = 'krond(a,c)*krond(b,f)*krond(d,g)*krond(e,h)'\n f824m = 'krond(a,c)*krond(b,f)*krond(d,h)*krond(e,g)'\n f825m = 'krond(a,c)*krond(b,g)*krond(d,e)*krond(f,h)'\n f826m = 'krond(a,c)*krond(b,g)*krond(d,f)*krond(e,h)'\n f827m = 'krond(a,c)*krond(b,g)*krond(d,h)*krond(e,f)'\n f828m = 'krond(a,c)*krond(b,h)*krond(d,e)*krond(f,g)'\n f829m = 'krond(a,c)*krond(b,h)*krond(d,f)*krond(e,g)'\n f830m = 'krond(a,c)*krond(b,h)*krond(d,g)*krond(e,f)'\n f831m = 'krond(a,d)*krond(b,c)*krond(e,f)*krond(g,h)'\n f832m = 'krond(a,d)*krond(b,c)*krond(e,g)*krond(f,h)'\n f833m = 'krond(a,d)*krond(b,c)*krond(e,h)*krond(f,g)'\n f834m = 'krond(a,d)*krond(b,e)*krond(c,f)*krond(g,h)'\n f835m = 'krond(a,d)*krond(b,e)*krond(c,g)*krond(f,h)'\n f836m = 'krond(a,d)*krond(b,e)*krond(c,h)*krond(f,g)'\n f837m = 'krond(a,d)*krond(b,f)*krond(c,e)*krond(g,h)'\n f838m = 'krond(a,d)*krond(b,f)*krond(c,g)*krond(e,h)'\n f839m = 'krond(a,d)*krond(b,f)*krond(c,h)*krond(e,g)'\n f840m = 'krond(a,d)*krond(b,g)*krond(c,e)*krond(f,h)'\n f841m = 'krond(a,d)*krond(b,g)*krond(c,f)*krond(e,h)'\n f842m = 'krond(a,d)*krond(b,g)*krond(c,h)*krond(e,f)'\n f843m = 'krond(a,d)*krond(b,h)*krond(c,e)*krond(f,g)'\n f844m = 'krond(a,d)*krond(b,h)*krond(c,f)*krond(e,g)'\n f845m = 'krond(a,d)*krond(b,h)*krond(c,g)*krond(e,f)'\n f846m = 'krond(a,e)*krond(b,c)*krond(d,f)*krond(g,h)'\n f847m = 'krond(a,e)*krond(b,c)*krond(d,g)*krond(f,h)'\n f848m = 'krond(a,e)*krond(b,c)*krond(d,h)*krond(f,g)'\n f849m = 'krond(a,e)*krond(b,d)*krond(c,f)*krond(g,h)'\n f850m = 'krond(a,e)*krond(b,d)*krond(c,g)*krond(f,h)'\n f851m = 'krond(a,e)*krond(b,d)*krond(c,h)*krond(f,g)'\n f852m = 'krond(a,e)*krond(b,f)*krond(c,d)*krond(g,h)'\n f853m = 'krond(a,e)*krond(b,f)*krond(c,g)*krond(d,h)'\n f854m = 'krond(a,e)*krond(b,f)*krond(c,h)*krond(d,g)'\n f855m = 'krond(a,e)*krond(b,g)*krond(c,d)*krond(f,h)'\n f856m = 'krond(a,e)*krond(b,g)*krond(c,f)*krond(d,h)'\n f857m = 'krond(a,e)*krond(b,g)*krond(c,h)*krond(d,f)'\n f858m = 'krond(a,e)*krond(b,h)*krond(c,d)*krond(f,g)'\n f859m = 'krond(a,e)*krond(b,h)*krond(c,f)*krond(d,g)'\n f860m = 'krond(a,e)*krond(b,h)*krond(c,g)*krond(d,f)'\n f861m = 'krond(a,f)*krond(b,c)*krond(d,e)*krond(g,h)'\n f862m = 'krond(a,f)*krond(b,c)*krond(d,g)*krond(e,h)'\n f863m = 'krond(a,f)*krond(b,c)*krond(d,h)*krond(e,g)'\n f864m = 'krond(a,f)*krond(b,d)*krond(c,e)*krond(g,h)'\n f865m = 'krond(a,f)*krond(b,d)*krond(c,g)*krond(e,h)'\n f866m = 'krond(a,f)*krond(b,d)*krond(c,h)*krond(e,g)'\n f867m = 'krond(a,f)*krond(b,e)*krond(c,d)*krond(g,h)'\n f868m = 'krond(a,f)*krond(b,e)*krond(c,g)*krond(d,h)'\n f869m = 'krond(a,f)*krond(b,e)*krond(c,h)*krond(d,g)'\n f870m = 'krond(a,f)*krond(b,g)*krond(c,d)*krond(e,h)'\n f871m = 'krond(a,f)*krond(b,g)*krond(c,e)*krond(d,h)'\n f872m = 'krond(a,f)*krond(b,g)*krond(c,h)*krond(d,e)'\n f873m = 'krond(a,f)*krond(b,h)*krond(c,d)*krond(e,g)'\n f874m = 'krond(a,f)*krond(b,h)*krond(c,e)*krond(d,g)'\n f875m = 'krond(a,f)*krond(b,h)*krond(c,g)*krond(d,e)'\n f876m = 'krond(a,g)*krond(b,c)*krond(d,e)*krond(f,h)'\n f877m = 'krond(a,g)*krond(b,c)*krond(d,f)*krond(e,h)'\n f878m = 'krond(a,g)*krond(b,c)*krond(d,h)*krond(e,f)'\n f879m = 'krond(a,g)*krond(b,d)*krond(c,e)*krond(f,h)'\n f880m = 'krond(a,g)*krond(b,d)*krond(c,f)*krond(e,h)'\n f881m = 'krond(a,g)*krond(b,d)*krond(c,h)*krond(e,f)'\n f882m = 'krond(a,g)*krond(b,e)*krond(c,d)*krond(f,h)'\n f883m = 'krond(a,g)*krond(b,e)*krond(c,f)*krond(d,h)'\n f884m = 'krond(a,g)*krond(b,e)*krond(c,h)*krond(d,f)'\n f885m = 'krond(a,g)*krond(b,f)*krond(c,d)*krond(e,h)'\n f886m = 'krond(a,g)*krond(b,f)*krond(c,e)*krond(d,h)'\n f887m = 'krond(a,g)*krond(b,f)*krond(c,h)*krond(d,e)'\n f888m = 'krond(a,g)*krond(b,h)*krond(c,d)*krond(e,f)'\n f889m = 'krond(a,g)*krond(b,h)*krond(c,e)*krond(d,f)'\n f890m = 'krond(a,g)*krond(b,h)*krond(c,f)*krond(d,e)'\n f891m = 'krond(a,h)*krond(b,c)*krond(d,e)*krond(f,g)'\n f892m = 'krond(a,h)*krond(b,c)*krond(d,f)*krond(e,g)'\n f893m = 'krond(a,h)*krond(b,c)*krond(d,g)*krond(e,f)'\n f894m = 'krond(a,h)*krond(b,d)*krond(c,e)*krond(f,g)'\n f895m = 'krond(a,h)*krond(b,d)*krond(c,f)*krond(e,g)'\n f896m = 'krond(a,h)*krond(b,d)*krond(c,g)*krond(e,f)'\n f897m = 'krond(a,h)*krond(b,e)*krond(c,d)*krond(f,g)'\n f898m = 'krond(a,h)*krond(b,e)*krond(c,f)*krond(d,g)'\n f899m = 'krond(a,h)*krond(b,e)*krond(c,g)*krond(d,f)'\n f8100m = 'krond(a,h)*krond(b,f)*krond(c,d)*krond(e,g)'\n f8101m = 'krond(a,h)*krond(b,f)*krond(c,e)*krond(d,g)'\n f8102m = 'krond(a,h)*krond(b,f)*krond(c,g)*krond(d,e)'\n f8103m = 'krond(a,h)*krond(b,g)*krond(c,d)*krond(e,f)'\n f8104m = 'krond(a,h)*krond(b,g)*krond(c,e)*krond(d,f)'\n f8105m = 'krond(a,h)*krond(b,g)*krond(c,f)*krond(d,e)'\n\n # Molecular vector of basis functions\n lmol = [ f81m, f82m, f83m, f84m, f85m,\n f86m, f87m, f88m, f89m, f810m,\n f811m, f812m, f813m, f814m, f815m,\n f816m, f817m, f818m, f819m, f820m,\n f821m, f822m, f823m, f824m, f825m,\n f826m, f827m, f828m, f829m, f830m,\n f831m, f832m, f833m, f834m, f835m,\n f836m, f837m, f838m, f839m, f840m,\n f841m, f842m, f843m, f844m, f845m,\n f846m, f847m, f848m, f849m, f850m,\n f851m, f852m, f853m, f854m, f855m,\n f856m, f857m, f858m, f859m, f860m,\n f861m, f862m, f863m, f864m, f865m,\n f866m, f867m, f868m, f869m, f870m,\n f871m, f872m, f873m, f874m, f875m,\n f876m, f877m, f878m, f879m, f880m,\n f881m, f882m, f883m, f884m, f885m,\n f886m, f887m, f888m, f889m, f890m,\n f891m, f892m, f893m, f894m, f895m,\n f896m, f897m, f898m, f899m, f8100m,\n f8101m, f8102m, f8103m, f8104m, f8105m ]\n\n # Temporary set for checking uniqueness\n stmp = set()\n # This set stores the elements of saaaabbcc that are redundant when \n # we insert values of the indices.\n sintersect = set()\n # Loop through the elements of saaaabbcc\n for item in saaaabbcc:\n # Assign values to the indices\n a = item[0]\n b = item[1]\n c = item[2]\n d = item[3]\n e = item[4]\n f = item[5]\n g = item[6]\n h = item[7]\n # Temporary list for storing vectors with values\n tmp = []\n for vec in lmol:\n # Evaluate the value of the Kronecker delta products\n v = eval_krond(vec, a, b, c, d, e, f, g, h)\n tmp.append(v)\n # We need immutable objects to add in a set\n tmp = tuple(tmp)\n # Add to sintersect if the item is in stmp\n if tmp in stmp:\n sintersect.add(item)\n # Add to stmp if it isn't present\n else:\n stmp.add(tmp)\n # This function removes elements of saaaabbcc that intersect with\n # elements of sintersect. The result is a set containing only the \n # unique elements.\n saaaabbcc.difference_update(sintersect)\n\n # Print elements of saaaabbcc.\n for i in saaaabbcc:\n print(i, file=fh_2hpol)\n\n print('', file=fh_2hpol)\n print('~'*30, file=fh_2hpol)\n print('Number of aaaa,aaaa terms', len(saaaaaaaa), file=fh_2hpol)\n print('Number of aaaa,aabb terms', len(saaaaaabb), file=fh_2hpol)\n print('Number of aaaa,bbbb terms', len(saaaabbbb), file=fh_2hpol)\n print('Number of aaaa,bbcc terms', len(saaaabbcc), file=fh_2hpol)\n print('~'*30, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n print('%'*30, file=fh_2hpol)\n print('Mathematica style output', file=fh_2hpol)\n print('%'*30, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n # Basis vectors in the experimental reference frame\n r8exp, r8mol = vectors_exp_mol(8)\n print('Experimental reference frame basis vectors', file=fh_2hpol)\n for item in r8exp:\n print(item, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n # Matrix for generating orientational averages\n smat, vexp, vmol = generate_smat_and_vecs(r8nn,8,False)\n print('S matrix', file=fh_2hpol)\n print(smat, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n # Basis vectors in the molecular reference frame\n print('Molecular reference frame basis vectors', file=fh_2hpol)\n for item in r8mol:\n print(item, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n # Experimental vector containing basis vectors\n print('Experimental total vector', file=fh_2hpol)\n print(vexp, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n # Molecular vector containing basis vectors\n print('Molecular total vector', file=fh_2hpol)\n print(vmol, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n data, avg_gammaiiii, avg_gammaijjj = secondhpol_mathematica(saaaaaaaa, saaaaaabb, saaaabbbb, saaaabbcc)\n\n print('Set up molecular reference frame vectors', file=fh_2hpol)\n for item in data:\n print(item, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n print('Second Hyperpolarizability Average Terms', file=fh_2hpol)\n print('<gamma_iiii^2> term', file=fh_2hpol)\n for item in avg_gammaiiii:\n print(item, file=fh_2hpol)\n print('', file=fh_2hpol)\n print('<gamma_ijjj^2> term', file=fh_2hpol)\n for item in avg_gammaijjj:\n print(item, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n # Print out the irreducible bases\n red_expbasis, red_molbasis = reduced_basis_2hpol()\n\n print('Irreducible experimental reference frame basis vectors', file=fh_2hpol)\n for item in red_expbasis:\n print(item, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n print('Irreducible molecular reference frame basis vectors', file=fh_2hpol)\n for item in red_molbasis:\n print(item, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n # Generate the S matrix and total vectors in the irreducible bases\n smat, vexp, vmol = generate_smat_and_vecs(r8qn,8,True)\n \n # Irreducible S matrix\n print('Irreducible S matrix', file=fh_2hpol)\n print(smat, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n # Irreducible experimental vector containing basis vectors\n print('Irreducible experimental total vector', file=fh_2hpol)\n print(vexp, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n # Irreducible molecular vector containing basis vectors\n print('Irreducible molecular total vector', file=fh_2hpol)\n print(vmol, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n # Close the files\n fh_pol.close()\n fh_hpol.close()\n fh_2hpol.close()",
"def add_is_approximation(self):\n \n if (self.mf_steps == 0 or self.alpha ==0) and (not self.mixture)\\\n and (self.gibbs_steps ==0):\n print(\"Importance distribution is uniform\")\n weight_term = T.log(self.num_samples) + self.num_vars*T.log(0.5)\n \n elif (self.mf_steps > 0 and self.alpha > 0) and (not self.mixture)\\\n and (self.gibbs_steps ==0):\n print(\"Importance distribution is not uniform\")\n weight_term = T.log(self.num_samples)+\\\n self.get_importance_evals(T.transpose(self.x_tilda), \n T.transpose(self.sampler_theta))\n \n if self.resample:\n weight_term = T.reshape(weight_term, \n [self.batch_size, self.num_samples])\n \n elif self.mixture:\n \n if self.resample:\n n_iters = self.num_samples*self.batch_size \n else:\n n_iters = self.num_samples \n \n weight_term, _ =\\\n theano.scan(lambda j: \n self.get_mixture_evals(T.transpose(self.x_tilda[j,:])),\n sequences = [T.arange(n_iters)])\n \n if self.resample:\n weight_term = T.reshape(weight_term, \n [self.batch_size, self.num_samples]) \n \n weight_term = T.log(self.num_samples) + weight_term \n \n elif self.gibbs_steps > 0:\n print(\"Importance distribution is gibbs sampler\") \n weight_term = T.log(self.num_samples)+\\\n self.get_importance_evals(T.transpose(self.x_tilda), \n T.transpose(self.sampler_theta))\n \n if self.resample:\n weight_term = T.reshape(weight_term, \n [self.batch_size, self.num_samples]) \n \n \n if self.resample and self.num_hidden ==0:\n \n approx_Z = -self.compute_energy(self.x_tilda, \n self.num_samples*self.batch_size)\n \n elif (not self.resample) and self.num_hidden ==0:\n \n approx_Z = -self.compute_energy(self.x_tilda, \n self.num_samples)\n \n elif self.resample and self.num_hidden > 0:\n \n approx_Z = -self.compute_free_energy(self.x_tilda)\n \n elif (not self.resample) and self.num_hidden >0:\n \n approx_Z = -self.compute_free_energy(self.x_tilda)\n \n if self.resample:\n \n approx_Z = T.reshape(approx_Z, \n [self.batch_size, self.num_samples])\n \n approx_Z = approx_Z - weight_term\n \n return approx_Z",
"def update_gol(arr):\n nxt = np.zeros(arr.shape)\n rows,cols = nxt.shape\n for i in range(rows):\n for j in range(cols):\n nn = sum_vonneuman_nn(arr,i,j)\n if arr[i][j]==1:\n if nn==2 or nn==3:\n nxt[i][j]=1\n else:\n if nn==3:\n nxt[i][j]=1\n return nxt",
"def task2_extra2():\n N = 0\n lam = 0\n L = 10\n h = 0.001\n tau = 0.000099\n aa = numpy.array([0.25*a for a in range((L-1)*4)])\n x = numpy.linspace(-L, L, int(2*L/h) + 1)\n Vm = V1D(lam, x)\n # eps=int(0.1*len(x))\n\n iterss = []\n for a in aa:\n print(a)\n state = phi(N, x-a)\n\n iters = 0\n while True:\n prob = numpy.abs(state)**2\n mid = int(2*L/h) // 2\n # if max(prob) in prob[mid-eps:mid+eps]:\n if numpy.argmax(prob) <= mid:\n print(iters)\n iterss.append(iters)\n break\n\n state[0] = 0\n state[-1] = 0\n state = implicit_scheme_step(state, tau, h, Vm)\n iters += 1\n\n fig = plt.figure()\n plt.title(\"Iterations of Gaussian travel to center ($L={}$)\".format(L))\n plt.xlabel(\"$a$\")\n plt.ylabel(\"Time\")\n plt.plot(aa, tau*numpy.array(iterss))\n plt.show()\n fig.savefig(\"naloga2_iters_of_gaussian_travel_fixedL={}.pdf\".format(L), bbox_inches=\"tight\")",
"def solution(data):\n\t\tif data:\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn 0",
"def learn(self, a:int, r:float):\n pass",
"def check(self, data_input, debug_flag):\n self.results = [ [], [], [], [], [], [], [], [], [], False, [] ]\n _temp = 0\n _result = {}\n _wave1_t = data_input[0][0]\n _wave2_t = data_input[0][1]\n _wave3_t = data_input[0][2]\n _wave4_t = data_input[0][3]\n _wave5_t = data_input[0][4] \n _wave1_p = data_input[1][0]\n _wave2_p = data_input[1][1]\n _wave3_p = data_input[1][2]\n _wave4_p = data_input[1][3]\n _wave5_p = data_input[1][4]\n\n #Step1: 2 vs 1\n #Step1.1: time_analaysis\n _result = {} \n _result[str(\"2_1_t\")] = EW_fibonacci.check_fibratio(_wave2_t, _wave1_t)\n self.results[0].append(_result)\n \n #Step1.2: price_analaysis\n _result = {} \n _result[str(\"2_1_p\")] = EW_fibonacci.check_fibratio(_wave2_p, _wave1_p)\n self.results[0].append(_result)\n\n\n #Step2: 3 vs 1\n #Step2.1: time_analaysis\n _result = {} \n _result[str(\"3_1_t\")] = EW_fibonacci.check_fibratio(_wave3_t, _wave1_t)\n self.results[1].append(_result)\n \n #Step2.2: price_analaysis\n _result = {} \n _result[str(\"3_1_p\")] = EW_fibonacci.check_fibratio(_wave3_p, _wave1_p)\n self.results[1].append(_result)\n \n\n #Step3: 3 vs 2\n #Step3.1: time_analaysis\n _result = {} \n _result[str(\"3_2_t\")] = EW_fibonacci.check_fibratio(_wave3_t, _wave2_t)\n self.results[2].append(_result)\n \n #Step3.2: price_analaysis\n _result = {} \n _result[str(\"3_2_p\")] = EW_fibonacci.check_fibratio(_wave3_p, _wave2_p)\n self.results[2].append(_result) \n\n\n #Step4: 4 vs 2\n #Step4.1: time_analaysis\n _result = {} \n _result[str(\"4_2_t\")] = EW_fibonacci.check_fibratio(_wave4_t, _wave2_t)\n self.results[3].append(_result)\n \n #Step4.2: price_analaysis\n _result = {} \n _result[str(\"4_2_p\")] = EW_fibonacci.check_fibratio(_wave4_p, _wave2_p)\n self.results[3].append(_result) \n\n #Step5: 4 vs 3\n #Step5.1: time_analaysis\n _result = {} \n _result[str(\"4_3_t\")] = EW_fibonacci.check_fibratio(_wave4_t, _wave3_t)\n self.results[4].append(_result)\n \n #Step5.2: price_analaysis \n _result = {} \n _result[str(\"4_3_p\")] = EW_fibonacci.check_fibratio(_wave4_p, _wave3_p)\n self.results[4].append(_result)\n\n\n #Step6: 5 vs 1\n #Step6.1: time_analaysis\n _result = {} \n _result[str(\"5_1_t\")] = EW_fibonacci.check_fibratio(_wave5_t, _wave1_t)\n self.results[5].append(_result)\n \n #Step6.2: price_analaysis\n _result = {} \n _result[str(\"5_1_p\")] = EW_fibonacci.check_fibratio(_wave5_p, _wave1_p)\n self.results[5].append(_result) \n\n #Step7: 5 vs 3\n #Step7.1: time_analaysis\n _result = {} \n _result[str(\"5_3_t\")] = EW_fibonacci.check_fibratio(_wave5_t, _wave3_t)\n self.results[6].append(_result)\n \n #Step7.2: price_analaysis\n _result = {} \n _result[str(\"5_3_p\")] = EW_fibonacci.check_fibratio(_wave5_p, _wave3_p)\n self.results[6].append(_result)\n \n\n #Step8: 5 vs 0-3\n #Step8.1: time_analaysis\n _result = {} \n _result[str(\"5_0-3_t\")] = EW_fibonacci.check_fibratio(_wave5_t, (_wave1_t + _wave2_t +_wave3_t))\n self.results[7].append(_result) \n \n #Step8.2: price_analaysis\n _result = {} \n _result[str(\"5_0-3_p\")] = EW_fibonacci.check_fibratio(_wave5_p, (_wave1_p - _wave2_p +_wave3_p))\n self.results[7].append(_result)\n\n \n #Step9: 5 vs 4\n #Step9.1: time_analaysis\n _result = {} \n _result[str(\"5_4_t\")] = EW_fibonacci.check_fibratio(_wave5_t, _wave4_t)\n self.results[8].append(_result)\n \n #Step9.2: price_analaysis \n _result = {} \n _result[str(\"5_4_p\")] = EW_fibonacci.check_fibratio(_wave5_p, _wave4_p)\n self.results[8].append(_result)\n \n\n #Step10: Check if this impulse is valid or not\n self.results[9], self.results[10] = self.check_type(data_input, debug_flag)\n\n\n #Step11: return the results\n return self.results",
"def check(self, data_input, debug_flag):\n self.results = [ [], [], [], [], [], [], [], [], [], False, [] ]\n _temp = 0\n _result = {}\n _wave1_t = data_input[0][0]\n _wave2_t = data_input[0][1]\n _wave3_t = data_input[0][2]\n _wave4_t = data_input[0][3]\n _wave5_t = data_input[0][4] \n _wave1_p = data_input[1][0]\n _wave2_p = data_input[1][1]\n _wave3_p = data_input[1][2]\n _wave4_p = data_input[1][3]\n _wave5_p = data_input[1][4]\n\n #Step1: 2 vs 1\n #Step1.1: time_analaysis\n _result = {} \n _result[str(\"2_1_t\")] = EW_fibonacci.check_fibratio(_wave2_t, _wave1_t)\n self.results[0].append(_result)\n \n #Step1.2: price_analaysis\n _result = {} \n _result[str(\"2_1_p\")] = EW_fibonacci.check_fibratio(_wave2_p, _wave1_p)\n self.results[0].append(_result)\n\n\n #Step2: 3 vs 1\n #Step2.1: time_analaysis\n _result = {} \n _result[str(\"3_1_t\")] = EW_fibonacci.check_fibratio(_wave3_t, _wave1_t)\n self.results[1].append(_result)\n \n #Step2.2: price_analaysis\n _result = {} \n _result[str(\"3_1_p\")] = EW_fibonacci.check_fibratio(_wave3_p, _wave1_p)\n self.results[1].append(_result)\n \n\n #Step3: 3 vs 2\n #Step3.1: time_analaysis\n _result = {} \n _result[str(\"3_2_t\")] = EW_fibonacci.check_fibratio(_wave3_t, _wave2_t)\n self.results[2].append(_result)\n \n #Step3.2: price_analaysis\n _result = {} \n _result[str(\"3_2_p\")] = EW_fibonacci.check_fibratio(_wave3_p, _wave2_p)\n self.results[2].append(_result) \n\n\n #Step4: 4 vs 2\n #Step4.1: time_analaysis\n _result = {} \n _result[str(\"4_2_t\")] = EW_fibonacci.check_fibratio(_wave4_t, _wave2_t)\n self.results[3].append(_result)\n \n #Step4.2: price_analaysis\n _result = {} \n _result[str(\"4_2_p\")] = EW_fibonacci.check_fibratio(_wave4_p, _wave2_p)\n self.results[3].append(_result) \n\n #Step5: 4 vs 3\n #Step5.1: time_analaysis\n _result = {} \n _result[str(\"4_3_t\")] = EW_fibonacci.check_fibratio(_wave4_t, _wave3_t)\n self.results[4].append(_result)\n \n #Step5.2: price_analaysis \n _result = {} \n _result[str(\"4_3_p\")] = EW_fibonacci.check_fibratio(_wave4_p, _wave3_p)\n self.results[4].append(_result)\n\n\n #Step6: 5 vs 1\n #Step6.1: time_analaysis\n _result = {} \n _result[str(\"5_1_t\")] = EW_fibonacci.check_fibratio(_wave5_t, _wave1_t)\n self.results[5].append(_result)\n \n #Step6.2: price_analaysis\n _result = {} \n _result[str(\"5_1_p\")] = EW_fibonacci.check_fibratio(_wave5_p, _wave1_p)\n self.results[5].append(_result) \n\n #Step7: 5 vs 3\n #Step7.1: time_analaysis\n _result = {} \n _result[str(\"5_3_t\")] = EW_fibonacci.check_fibratio(_wave5_t, _wave3_t)\n self.results[6].append(_result)\n \n #Step7.2: price_analaysis\n _result = {} \n _result[str(\"5_3_p\")] = EW_fibonacci.check_fibratio(_wave5_p, _wave3_p)\n self.results[6].append(_result)\n \n\n #Step8: 5 vs 0-3\n #Step8.1: time_analaysis\n _result = {} \n _result[str(\"5_0-3_t\")] = EW_fibonacci.check_fibratio(_wave5_t, (_wave1_t + _wave2_t +_wave3_t))\n self.results[7].append(_result) \n \n #Step8.2: price_analaysis\n _result = {} \n _result[str(\"5_0-3_p\")] = EW_fibonacci.check_fibratio(_wave5_p, (_wave1_p - _wave2_p +_wave3_p))\n self.results[7].append(_result)\n\n \n #Step9: 5 vs 4\n #Step9.1: time_analaysis\n _result = {} \n _result[str(\"5_4_t\")] = EW_fibonacci.check_fibratio(_wave5_t, _wave4_t)\n self.results[8].append(_result)\n \n #Step9.2: price_analaysis \n _result = {} \n _result[str(\"5_4_p\")] = EW_fibonacci.check_fibratio(_wave5_p, _wave4_p)\n self.results[8].append(_result)\n \n\n #Step10: Check if this impulse is valid or not\n self.results[9], self.results[10] = self.check_type(data_input, debug_flag)\n\n\n #Step11: return the results\n return self.results",
"def check(self, data_input, debug_flag):\n self.results = [ [], [], [], [], [], [], [], [], [], False, [] ]\n _temp = 0\n _result = {}\n _wave1_t = data_input[0][0]\n _wave2_t = data_input[0][1]\n _wave3_t = data_input[0][2]\n _wave4_t = data_input[0][3]\n _wave5_t = data_input[0][4] \n _wave1_p = data_input[1][0]\n _wave2_p = data_input[1][1]\n _wave3_p = data_input[1][2]\n _wave4_p = data_input[1][3]\n _wave5_p = data_input[1][4]\n\n #Step1: 2 vs 1\n #Step1.1: time_analaysis\n _result = {} \n _result[str(\"2_1_t\")] = EW_fibonacci.check_fibratio(_wave2_t, _wave1_t)\n self.results[0].append(_result)\n \n #Step1.2: price_analaysis\n _result = {} \n _result[str(\"2_1_p\")] = EW_fibonacci.check_fibratio(_wave2_p, _wave1_p)\n self.results[0].append(_result)\n\n\n #Step2: 3 vs 1\n #Step2.1: time_analaysis\n _result = {} \n _result[str(\"3_1_t\")] = EW_fibonacci.check_fibratio(_wave3_t, _wave1_t)\n self.results[1].append(_result)\n \n #Step2.2: price_analaysis\n _result = {} \n _result[str(\"3_1_p\")] = EW_fibonacci.check_fibratio(_wave3_p, _wave1_p)\n self.results[1].append(_result)\n \n\n #Step3: 3 vs 2\n #Step3.1: time_analaysis\n _result = {} \n _result[str(\"3_2_t\")] = EW_fibonacci.check_fibratio(_wave3_t, _wave2_t)\n self.results[2].append(_result)\n \n #Step3.2: price_analaysis\n _result = {} \n _result[str(\"3_2_p\")] = EW_fibonacci.check_fibratio(_wave3_p, _wave2_p)\n self.results[2].append(_result) \n\n\n #Step4: 4 vs 2\n #Step4.1: time_analaysis\n _result = {} \n _result[str(\"4_2_t\")] = EW_fibonacci.check_fibratio(_wave4_t, _wave2_t)\n self.results[3].append(_result)\n \n #Step4.2: price_analaysis\n _result = {} \n _result[str(\"4_2_p\")] = EW_fibonacci.check_fibratio(_wave4_p, _wave2_p)\n self.results[3].append(_result) \n\n #Step5: 4 vs 3\n #Step5.1: time_analaysis\n _result = {} \n _result[str(\"4_3_t\")] = EW_fibonacci.check_fibratio(_wave4_t, _wave3_t)\n self.results[4].append(_result)\n \n #Step5.2: price_analaysis \n _result = {} \n _result[str(\"4_3_p\")] = EW_fibonacci.check_fibratio(_wave4_p, _wave3_p)\n self.results[4].append(_result)\n\n\n #Step6: 5 vs 1\n #Step6.1: time_analaysis\n _result = {} \n _result[str(\"5_1_t\")] = EW_fibonacci.check_fibratio(_wave5_t, _wave1_t)\n self.results[5].append(_result)\n \n #Step6.2: price_analaysis\n _result = {} \n _result[str(\"5_1_p\")] = EW_fibonacci.check_fibratio(_wave5_p, _wave1_p)\n self.results[5].append(_result) \n\n #Step7: 5 vs 3\n #Step7.1: time_analaysis\n _result = {} \n _result[str(\"5_3_t\")] = EW_fibonacci.check_fibratio(_wave5_t, _wave3_t)\n self.results[6].append(_result)\n \n #Step7.2: price_analaysis\n _result = {} \n _result[str(\"5_3_p\")] = EW_fibonacci.check_fibratio(_wave5_p, _wave3_p)\n self.results[6].append(_result)\n \n\n #Step8: 5 vs 0-3\n #Step8.1: time_analaysis\n _result = {} \n _result[str(\"5_0-3_t\")] = EW_fibonacci.check_fibratio(_wave5_t, (_wave1_t + _wave2_t +_wave3_t))\n self.results[7].append(_result) \n \n #Step8.2: price_analaysis\n _result = {} \n _result[str(\"5_0-3_p\")] = EW_fibonacci.check_fibratio(_wave5_p, (_wave1_p - _wave2_p +_wave3_p))\n self.results[7].append(_result)\n\n \n #Step9: 5 vs 4\n #Step9.1: time_analaysis\n _result = {} \n _result[str(\"5_4_t\")] = EW_fibonacci.check_fibratio(_wave5_t, _wave4_t)\n self.results[8].append(_result)\n \n #Step9.2: price_analaysis \n _result = {} \n _result[str(\"5_4_p\")] = EW_fibonacci.check_fibratio(_wave5_p, _wave4_p)\n self.results[8].append(_result)\n \n\n #Step10: Check if this impulse is valid or not\n self.results[9], self.results[10] = self.check_type(data_input, debug_flag)\n\n\n #Step11: return the results\n return self.results",
"def task2_extra():\n N = 0\n lam = 0\n Ls = numpy.array([2*L for L in range(1,23)])\n h = 0.01\n tau = 0.000099\n\n iterss = []\n\n for L in Ls:\n a = L // 2\n print(L)\n x = numpy.linspace(-L, L, int(2*L/h) + 1)\n # eps = int(0.1 * len(x))\n\n Vm = V1D(lam, x)\n state = phi(N, x-a)\n\n iters = 0\n while True:\n prob = numpy.abs(state)**2\n mid = int(2*L/h) // 2\n # if max(prob) in prob[mid-eps:mid+eps]:\n if numpy.argmax(prob) <= mid:\n print(iters)\n iterss.append(iters)\n break\n\n state[0] = 0\n state[-1] = 0\n state = implicit_scheme_step(state, tau, h, Vm)\n iters += 1\n\n fig = plt.figure()\n plt.title(\"Iterations of Gaussian travel to center\")\n plt.xlabel(\"$L$\")\n plt.ylabel(\"Time\")\n plt.plot(Ls, tau*numpy.array(iterss))\n plt.show()\n fig.savefig(\"naloga2_iters_of_gaussian_travel.pdf\", bbox_inches=\"tight\")",
"def _process_atom(self, atom, body):\n if isinstance(atom, Or):\n # Annotated disjunction\n atoms = atom.to_list()\n else:\n atoms = [atom]\n\n atoms_out = []\n extra_clauses = []\n\n has_lfi_fact = False\n prior_probability = 0.0 # Sum of prior weights in AD.\n fixed_probability = 0.0 # Sum of fixed (i.e. non-learnable) weights in AD.\n\n num_random_weights = 0\n for atom in atoms:\n if atom.probability and atom.probability.functor == \"t\":\n try:\n start_value = float(atom.probability.args[0])\n prior_probability += float(start_value)\n except InstantiationError:\n # Can't be converted to float => take random\n num_random_weights += 1\n except ArithmeticError:\n num_random_weights += 1\n elif atom.probability and atom.probability.is_constant():\n fixed_probability += float(atom.probability)\n\n random_weights = [random.random() for _ in range(0, num_random_weights + 1)]\n norm_factor = (1.0 - prior_probability - fixed_probability) / sum(\n random_weights\n )\n random_weights = [r * norm_factor for r in random_weights]\n\n # First argument is probability available for learnable weights in the AD.\n self.add_ad(1.0 - fixed_probability, []) # TODO : this adds extra ad\n\n # Replace anonymous variables with non-anonymous variables.\n class ReplaceAnon(object):\n def __init__(self):\n self.cnt = 0\n\n def __getitem__(self, key):\n if key == \"_\":\n self.cnt += 1\n return Var(\"anon_%s\" % self.cnt)\n else:\n return Var(key)\n\n prob_args = []\n if isinstance(atom.probability, Term):\n for arg in atom.probability.args:\n if not isinstance(arg, Constant) and arg != Var(\"_\"):\n prob_args.append(arg)\n\n newcount = \"_\".join([str(self.count + count) for count in range(len(atoms))])\n\n factargs = []\n # probargs = ()\n for atom in atoms:\n q = list(atom.apply(ReplaceAnon()).args)\n for var in q:\n if var not in factargs:\n factargs.append(var)\n\n lfi_rule = Term(\n \"lfi_rule\", Constant(newcount), Term(\"t\", *prob_args), *factargs\n )\n\n if body is not None:\n extra_clauses.append(Clause(lfi_rule, body))\n\n for atom in atoms:\n if atom.probability and atom.probability.functor == \"t\":\n # t(_, X)::p(X, Y) :- body.\n #\n # Translate to\n # lfi_prob(1, t(X))::lfi_fact(1, t(X), X, Y).\n # lfi_rule(1, t(X), X, Y): - body.\n # p(X, Y): - lfi_body(1, t(X), X, Y).\n # lfi_body(1, t(X), X, Y): - lfi_par(1, t(X), X, Y), lfi_fact(1, t(X), X, Y).\n # lfi_par(1, t(X), X, Y): - lfi_rule(1, t(X), X, Y).\n #\n #\n # For annotated disjunction: t(_)::p1(X); t(_)::p2(X): - body.\n # lfi_prob(0, t)::lfi_fact(0, t, X); lfi_prob(1, t)::lfi_fact(1, t, X): - lfi_rule(0_1, t, X).\n # lfi_rule(0_1, t, X): - body.\n # p1(X): - lfi_body(0, t, X).\n # lfi_body(0, t, X): - lfi_par(0, t, X), lfi_fact(0, t, X).\n # lfi_par(0, t, X): - lfi_rule(0_1, t, X).\n # p2(X): - lfi_body(1, t, X).\n # lfi_body(1, t, X): - lfi_par(1, t, X), lfi_fact(1, t, X).\n # lfi_par(1, t, X): - lfi_rule(0_1, t, X).\n # ...\n has_lfi_fact = True\n # Learnable probability\n try:\n start_value = float(atom.probability.args[0])\n except InstantiationError:\n start_value = None\n except ArithmeticError:\n start_value = None\n\n atom1 = atom.apply(ReplaceAnon())\n\n # 1) Introduce a new LFI terms\n factargs = atom1.args\n lfi_fact = Term(\n \"lfi_fact\", Constant(self.count), Term(\"t\", *prob_args), *factargs\n )\n lfi_body = Term(\n \"lfi_body\", Constant(self.count), Term(\"t\", *prob_args), *factargs\n )\n lfi_par = Term(\n \"lfi_par\", Constant(self.count), Term(\"t\", *prob_args), *factargs\n )\n lfi_prob = Term(\"lfi_prob\", Constant(self.count), Term(\"t\", *prob_args))\n\n # 2) Replacement atom\n replacement = lfi_fact.with_probability(lfi_prob)\n\n # 3) Create redirection clause\n extra_clauses.append(Clause(atom1.with_probability(), lfi_body))\n extra_clauses.append(Clause(lfi_body, lfi_par & lfi_fact))\n\n if body is None:\n extra_clauses.append(Clause(lfi_par, Term(\"true\")))\n else:\n extra_clauses.append(Clause(lfi_par, lfi_rule))\n\n self.append_ad(len(self._weights))\n\n # 4) Set initial weight\n if start_value is None:\n # Assign a random weight initially\n start_value = random_weights.pop(-1)\n self._add_weight(start_value)\n\n # 5) Add name\n self.names.append(atom)\n self.bodies.append(lfi_body)\n self.parents.append(lfi_par)\n atoms_out.append(replacement)\n else:\n atoms_out.append(atom)\n\n self.verify_ad()\n\n if has_lfi_fact:\n if len(atoms) == 1 and body is None:\n # Non AD\n return [atoms_out[0]] + extra_clauses\n else:\n # AD\n if body is None:\n return [\n AnnotatedDisjunction(atoms_out, Term(\"true\"))\n ] + extra_clauses\n else:\n return [AnnotatedDisjunction(atoms_out, lfi_rule)] + extra_clauses\n else:\n if len(atoms) == 1:\n if body is None:\n return [atoms_out[0]]\n else:\n return [Clause(atoms_out[0], body)]\n else:\n if body is None:\n body = Term(\"true\")\n return [AnnotatedDisjunction(atoms_out, body)]",
"def _himf(LATENTDIM, REG, EXPERIMENTNUM, gamma,\n nmfflag=None, lr=0.001, esflag=True):\n fn_hi = '../H3N2_HIdata/H3N2_integrated_/H3N2_HI_data_minority.csv'\n virusindex = readdata.readvirusindex(fn_hi)\n serumindex = readdata.readserumindex(fn_hi)\n ratings = np.load('ratings_minority.npy')\n\n\n \"\"\"\n Cache date check and get simtx from cache\n \"\"\"\n seq_date = os.stat(\"./realdata_minority.fa\").st_mtime\n simtx_date = os.stat(\"./simtx_minority.npy\").st_mtime\n if simtx_date <= seq_date:\n fsim = open(\"./realdata_minority.fa\")\n print(\"making simtx_minority.npy..\")\n simtx = simseq.simseq_parallel(virusindex, fsim)\n np.save(\"simtx_minority.npy\", simtx)\n else:\n simtx = np.load(\"simtx_minority.npy\")\n print(\"simtx_minority ready!\")\n\n\n # create train, validation and test sets.\n n = int(ratings.shape[0] * 0.8)\n train = ratings[:n]\n test = ratings[n:]\n v = int(train.shape[0] * 0.9)\n # split train to 1(validate) : 9(training)\n val = train[v:]\n train = train[:v]\n from rsvd import RSVD\n dims = (len(virusindex), len(serumindex))\n\n \"\"\"\n get the average score\n MF\n \"\"\"\n\n model = RSVD.train(LATENTDIM, train, dims, simtx,\n probeArray=val, esflag=esflag, maxEpochs=1000,\n learnRate=lr,\n regularization=REG,\n nmfflag=nmfflag,\n randomNoise=0.1,\n gamma=gamma)\n\n sqerr = 0.0\n\n reslist = []\n for strainID, serumID, rating in test:\n err = rating - model(strainID, serumID)\n reslist.append([rating, model(strainID, serumID)])\n sqerr += err * err\n sqerr /= test.shape[0]\n\n modelpath = \"./experiment{0}/model-ldim-{1}-reg-{2}\".format(\n EXPERIMENTNUM, LATENTDIM, REG)\n rmsepath = \"./experiment{0}/rmse-ldim-{1}-reg-{2}\".format(\n EXPERIMENTNUM, LATENTDIM, REG)\n if nmfflag:\n modelpath = modelpath + \"-nmf\"\n rmsepath = rmsepath + \"-nmf\"\n modelpath = modelpath + \"-gamma-{0}\".format(gamma)\n rmsepath = rmsepath + \"-gamma-{0}\".format(gamma)\n modelpath = modelpath + \"/\"\n\n if not os.path.exists(os.path.dirname(modelpath)):\n try:\n os.makedirs(os.path.dirname(modelpath))\n model.save(modelpath)\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise\n\n f = open(rmsepath, 'a+')\n print \"Test RMSE: {0}\\n\".format(np.sqrt(sqerr))\n f.write(\"Test RMSE: {0}\\n\".format(np.sqrt(sqerr)))\n f.close()\n\n np.save(modelpath + 'true_vs_prediction.npy',\n np.array(reslist))\n\n return reslist",
"def constellaqc(denovo_groups, annotated_groups):\n known_feat = np.unique(annotated_groups.loc[:, 'group'])\n pred_group = np.unique(denovo_groups.loc[:, 'group'])\n\n scores = []\n\n for anno in known_feat:\n # anno_bool_index = annotated_groups.loc[:, 'group'] == anno\n anno_group_calls = denovo_groups.loc[annotated_groups.loc[:, 'group'] == anno, 'group'].values\n # print(anno, 'count: ', np.sum(anno_bool_index))\n score_row = []\n for denovo in pred_group:\n score_row.append(np.sum(anno_group_calls == denovo))\n scores.append(score_row)\n\n scores = pd.DataFrame(scores, index=known_feat, columns=pred_group)\n\n if params.debug is not None:\n print('Known Feature-Predicted Group Scoring Matrix:\\n')\n print(scores)\n\n anno_sum = []\n anno_no = []\n anno_error = []\n ni = []\n\n for anno in known_feat:\n anno_sum.append(np.sum(scores.loc[anno, :].values))\n anno_no.append(np.sum(scores.loc[anno, :].values != 0))\n anno_error.append(np.sum(scores.loc[anno, :].values != 0) - 1)\n ni.append(1)\n pred_sum = []\n pred_no = []\n pred_error = []\n nj = []\n\n for denovo in pred_group:\n pred_sum.append(np.sum(scores.loc[:, denovo].values))\n pred_no.append(np.sum(scores.loc[:, denovo].values != 0))\n pred_error.append(np.sum(scores.loc[:, denovo].values != 0) - 1)\n nj.append(1)\n\n anno_valid = np.array(anno_sum) - ni - np.array(anno_error)\n # pred_valid = np.array(pred_sum) - nj - np.array(pred_error)\n\n v_sum = np.sum(anno_valid)\n s_sum = np.sum(anno_error)\n c_sum = np.sum(pred_error)\n total = v_sum + s_sum + c_sum\n\n print('\\n\\nValid Call Rate: ', round(100 * (v_sum / total), 2), '%')\n print('Splitting Call Rate: ', round(100 * (s_sum / total), 2), '%')\n print('Clumping Call Rate: ', round(100 * (c_sum / total), 2), '%')",
"def do(num_range,i, sub):\n\t\n\t#Converting the vector in all_states_explored to a matrix. \n\tmatrix = pMatrix.make_matrix(all_states_explored[i])\n\t\n\t#Tree for currrent iteration.\n\ttree = pMatrix.create_tree(matrix, num_range, sub) \n\t\n\t#Adding the tree to all_trees.\n\tall_trees.append(tree)\n\t\n\t#Calculating the number of states in current iteration.\n\tnum_states = tree.get_num_states()\n\t\t\n\t#Adding total number of states to all_total_states. \n\tall_total_states.append(num_states) \n\t\n\t#Finding results for each iteration.\n\tr = pMatrix.main(matrix, num_range, sub)\n\t\t\n\t#Adding results for current iteration to all_results. \n\tall_results.append(r)\n\t\n\t#Finding any new, previously unseen state and adding it to all_states_explored list.\n\tfind_new_states(tree)\n\t\n\t#Adding super_states\n\tupdate_super_states(tree)",
"def aksprob(alam):\r\n if type(alam) == N.ndarray:\r\n frozen = -1 *N.ones(alam.shape,N.float64)\r\n alam = alam.astype(N.float64)\r\n arrayflag = 1\r\n else:\r\n frozen = N.array(-1.)\r\n alam = N.array(alam,N.float64)\r\n arrayflag = 1\r\n mask = N.zeros(alam.shape)\r\n fac = 2.0 *N.ones(alam.shape,N.float_)\r\n sum = N.zeros(alam.shape,N.float_)\r\n termbf = N.zeros(alam.shape,N.float_)\r\n a2 = N.array(-2.0*alam*alam,N.float64)\r\n totalelements = N.multiply.reduce(N.array(mask.shape))\r\n for j in range(1,201):\r\n if asum(mask) == totalelements:\r\n break\r\n exponents = (a2*j*j)\r\n overflowmask = N.less(exponents,-746)\r\n frozen = N.where(overflowmask,0,frozen)\r\n mask = mask+overflowmask\r\n term = fac*N.exp(exponents)\r\n sum = sum + term\r\n newmask = N.where(N.less_equal(abs(term),(0.001*termbf)) +\r\n N.less(abs(term),1.0e-8*sum), 1, 0)\r\n frozen = N.where(newmask*N.equal(mask,0), sum, frozen)\r\n mask = N.clip(mask+newmask,0,1)\r\n fac = -fac\r\n termbf = abs(term)\r\n if arrayflag:\r\n return N.where(N.equal(frozen,-1), 1.0, frozen) # 1.0 if doesn't converge\r\n else:\r\n return N.where(N.equal(frozen,-1), 1.0, frozen)[0] # 1.0 if doesn't converge\r",
"def all_param_AN(ds, myloss='mean_squared_error'):\n wr = ds[0]\n wl = ds[1]\n V = ds[2]\n omega = ds[3]\n input = np.zeros((len(wl),2))\n input[:,0] = wr\n input[:,1] = wl\n output = np.zeros((len(wl),2))\n output[:,0] = V\n output[:,1] = omega\n input_layer = keras.layers.Input((2,),name=\"input\") #wr et wl\n hidden_layer = keras.layers.Dense(2, activation='linear', kernel_initializer='uniform',\n input_shape=(2,), use_bias=False, name=\"output\") #V et omega\n output_layer = hidden_layer(input_layer)\n ann = keras.models.Model(inputs=input_layer, outputs=output_layer)\n opt = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)\n ann.compile(loss=myloss, optimizer=opt)\n ann_in, ann_out = input, output\n history = ann.fit(ann_in, ann_out, epochs=40, batch_size=64, verbose=0,\n shuffle=True, validation_split=0.1)#, callbacks=callbacks)\n\n \"\"\"plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.show()\"\"\"\n\n weights = hidden_layer.get_weights()[0]\n Rr_est = weights[0][0]*2\n Rl_est = weights[1][0]*2\n L_est1 = 1/(weights[0][1]/Rr_est)\n L_est2 = -1/(weights[1][1]/Rr_est)\n return Rr_est, Rl_est, (L_est2+L_est1)/2 #moyenne des deux longueurs obtenues",
"def mask_evaluation(annotation_mask, result_mask, idx):\n\n true_positive = np.sum(np.logical_and(annotation_mask == 255, result_mask == 255)) \n false_positive = np.sum(np.logical_and(result_mask == 255, annotation_mask != result_mask))\n false_negative = np.sum(np.logical_and(annotation_mask == 255, annotation_mask != result_mask))\n\n precision = true_positive / (true_positive + false_positive)\n recall = true_positive / (true_positive + false_negative)\n f1_measure = 2 * ((precision * recall) / (precision + recall))\n\n return recall, precision, f1_measure",
"def FluorescenceAnalysis(self, folder, round_num, save_mask = True):\r\n RoundNumberList, CoordinatesList, fileNameList = self.retrive_scanning_scheme(folder, file_keyword = 'Zmax')\r\n # RoundNumberList, CoordinatesList, fileNameList = self.retrive_scanning_scheme(folder, file_keyword = 'Zfocus')\r\n \r\n if not os.path.exists(os.path.join(folder, 'MLimages_{}'.format(round_num))):\r\n # If the folder is not there, create the folder\r\n os.mkdir(os.path.join(folder, 'MLimages_{}'.format(round_num))) \r\n \r\n for EachRound in RoundNumberList:\r\n \r\n cells_counted_in_round = 0\r\n \r\n if EachRound == round_num:\r\n \r\n # Start numbering cells at each round\r\n self.cell_counted_inRound = 0 \r\n \r\n for EachCoord in CoordinatesList:\r\n \r\n # =============================================================================\r\n # For tag fluorescence:\r\n # ============================================================================= \r\n print(EachCoord)\r\n #-------------- readin image---------------\r\n for Eachfilename in enumerate(fileNameList):\r\n if EachCoord in Eachfilename[1] and EachRound in Eachfilename[1]:\r\n if '0Zmax' in Eachfilename[1]:\r\n ImgNameInfor = Eachfilename[1][0:len(Eachfilename[1])-14] # get rid of '_PMT_0Zmax.tif' in the name.\r\n elif '0Zfocus' in Eachfilename[1]:\r\n ImgNameInfor = Eachfilename[1][0:len(Eachfilename[1])-16] # get rid of '_PMT_0Zfocus.tif' in the name.\r\n _imagefilename = os.path.join(folder, Eachfilename[1])\r\n #------------------------------------------\r\n \r\n # =========================================================================\r\n # USING MASKRCNN...\r\n # =========================================================================\r\n # Imagepath = self.Detector._fixPathName(_imagefilename)\r\n Rawimage = imread(_imagefilename)\r\n \r\n# if ClearImgBef == True:\r\n# # Clear out junk parts to make it esaier for ML detection.\r\n# RawimageCleared = self.preProcessMLimg(Rawimage, smallest_size=300, lowest_region_intensity=0.16)\r\n# else:\r\n# RawimageCleared = Rawimage.copy()\r\n \r\n image = ProcessImage.convert_for_MaskRCNN(Rawimage)\r\n \r\n # Run the detection on input image.\r\n results = self.Detector.detect([image])\r\n \r\n MLresults = results[0]\r\n \r\n if save_mask == True:\r\n fig, ax = plt.subplots()\r\n # Set class_names = [None,None,None,None] to mute class name display.\r\n visualize.display_instances(image, MLresults['rois'], MLresults['masks'], MLresults['class_ids'],\r\n class_names = [None,None,None,None], ax=ax,\r\n centre_coors = MLresults['Centre_coor'], Centre_coor_radius = 2, \r\n WhiteSpace = (0, 0))#MLresults['class_ids'],MLresults['scores'], \r\n # ax.imshow(fig)\r\n fig.tight_layout()\r\n # Save the detection image\r\n fig_name = os.path.join(folder, 'MLimages_{}\\{}.tif'.format(round_num, ImgNameInfor))\r\n plt.savefig(fname = fig_name, dpi=200, pad_inches=0.0, bbox_inches='tight')\r\n \r\n # segmentationImg = Image.fromarray(fig) #generate an image object\r\n # segmentationImg.save(os.path.join(folder, 'MLimages_{}\\{}.tif'.format(round_num, ImgNameInfor)))#save as tif\r\n \r\n if self.cell_counted_inRound == 0:\r\n cell_Data, self.cell_counted_inRound, total_cells_counted_in_coord = \\\r\n ProcessImage.retrieveDataFromML(Rawimage, MLresults, str(ImgNameInfor), self.cell_counted_inRound)\r\n else: \r\n Cell_Data_new, self.cell_counted_inRound, total_cells_counted_in_coord = \\\r\n ProcessImage.retrieveDataFromML(Rawimage, MLresults, str(ImgNameInfor), self.cell_counted_inRound)\r\n if len(Cell_Data_new) > 0:\r\n cell_Data = cell_Data.append(Cell_Data_new)\r\n \r\n # Count in total how many flat and round cells are identified.\r\n cells_counted_in_round += total_cells_counted_in_coord\r\n \r\n print(\"Number of round/flat cells in this round: {}\".format(cells_counted_in_round))\r\n \r\n # Save to excel\r\n cell_Data.to_excel(os.path.join(os.path.join(folder, round_num + '_' + datetime.now().strftime('%Y-%m-%d_%H-%M-%S')+'_CellsProperties.xlsx')))\r\n \r\n return cell_Data",
"def experiment4(L=10):\n def apw_fit(sigma, mu, Ne):\n code = sample_code(L, sigma)\n def apw_phat(site):\n ep = score(code, site)\n return 1/(1+exp(ep-mu))**(Ne-1)\n chain = mh(lambda s:apw_phat(s), proposal=mutate_site, x0=random_site(L),\n capture_state = lambda s:apw_occ(code, mu, s))[25000:]\n return mean(chain)\n def linear_fit(sigma, mu, Ne):\n pssm = sample_matrix(L, sigma)\n def linear_phat(site):\n ep = score_seq(pssm, site)\n return 1/(1+exp(ep-mu))**(Ne-1)\n chain = mh(lambda s:linear_phat(s), proposal=mutate_site, x0=random_site(L),\n capture_state = lambda s:linear_occ(pssm, mu, s))[25000:]\n return mean(chain)\n def apw_occ(code, mu, site):\n ep = score(code, site)\n return 1/(1+exp(ep-mu))\n def linear_occ(pssm, mu, site):\n ep = score_seq(pssm, site)\n return 1/(1+exp(ep-mu))\n sigmas = np.linspace(0,5,5)\n mus = np.linspace(-10,10,5)\n Nes = np.linspace(0,5,5)\n apws = [apw_fit(sigma, mu, Ne) for sigma in tqdm(sigmas) for mu in mus for Ne in Nes]\n linears = [linear_fit(sigma, mu, Ne) for sigma in tqdm(sigmas) for mu in mus for Ne in Nes]"
]
| [
"0.58328474",
"0.54240626",
"0.50724727",
"0.5055976",
"0.5030519",
"0.49894556",
"0.49818513",
"0.49783278",
"0.49663126",
"0.4953816",
"0.49137238",
"0.49109685",
"0.4887026",
"0.484889",
"0.48471913",
"0.48341736",
"0.48184672",
"0.47867945",
"0.47867945",
"0.47867945",
"0.47859293",
"0.47843707",
"0.4782417",
"0.4771282",
"0.47579488",
"0.475247",
"0.47479984",
"0.47397983",
"0.47373176",
"0.47314015"
]
| 0.55380154 | 1 |
Returns True if another observation have been associated with the scan It also replaces that observation if the new one is closer in time to the respective wifi scan | def scan_observed(scan_timestamp, ap_scan, location, previous_observations, user_id):
location_timestamp = location[0]
latitude = location[1]
longitude = location[2]
distance_to_closest_scan = abs(location_timestamp - scan_timestamp)
for observation in previous_observations:
observed_wifi_scan_time = observation[3]
observation_location_scan_time = observation[4]
if scan_timestamp != observed_wifi_scan_time:
continue
if abs(observation_location_scan_time - location_timestamp) >= TIME_INTERVAL:
continue
if distance_to_closest_scan < abs(observed_wifi_scan_time - observation_location_scan_time):
scan_signal_strength = ap_scan[2]
loc_index = previous_observations.index(observation)
previous_observations[loc_index] = [latitude, longitude, scan_signal_strength, scan_timestamp,
location_timestamp, user_id]
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def isDuplicate(self, a, b):\n\n isDuplicate = (\n abs(a['distance_in_km'] - b['distance_in_km']) \n < RunDataProcessor.KM_SIMILARITY_THRESHOLD and \n abs((a['start_timestamp'].tz_convert(None) - b['start_timestamp'].tz_convert(None)).total_seconds()) \n < RunDataProcessor.SECONDS_SIMILARITY_THRESHOLD)\n if isDuplicate:\n logging.info(\"A: {} : {} : {}\\nB: {} : {} : {}\\n\".format(\n a['source'],\n a['start_timestamp'],\n a['distance_in_km'],\n b['source'],\n b['start_timestamp'],\n b['distance_in_km']))\n return isDuplicate",
"def is_scanning_update(data):\n\n global is_scanning\n global scanning_started\n\n if is_scanning != data.data:\n if data.data:\n scanning_started = now()\n if not data.data:\n dump()\n\n is_scanning = data.data",
"def _update_info(self):\n with self.lock:\n _LOGGER.info('Checking wireless clients')\n\n self.last_results = []\n\n active_clients = self.get_ddwrt_data()\n\n if not active_clients:\n return False\n\n self.last_results.extend(active_clients)\n\n return True",
"def discrepancy_resolved(self):\n # If there's a discrepancy and distance change matches the existing data, we're good.\n if self.distance_change == self.existing_data:\n return True\n # If recommend_updates, i.e., if self.distance_change == self.new_data, we'll update the data and we're good\n elif self.recommend_updates:\n return True\n else:\n return False",
"def is_driving(self, first: Waypoint, second: Waypoint) -> bool:\n dist = self.calc_distance(first, second)\n time_delta = (second.timestamp - first.timestamp).seconds\n if dist > GPS_DISTANCE_ACCURATE_METERS and time_delta < STOP_TIME_SECONDS:\n return True\n elif GPS_DISTANCE_ACCURATE_METERS < dist < CONNECTION_LOST_DISTANCE_THRESHOLD_METERS and \\\n time_delta < CONNECTION_LOST_TIMEOUT_SECONDS:\n return True\n else:\n return False",
"def is_duplicate(self, other):\n if self.att != other.att or self.pol != other.pol:\n return False\n similarity = F.cosine_similarity(self.emb.unsqueeze(0),\n other.emb.unsqueeze(0))\n return similarity >= self.threshold",
"def _do_connectivity(self, tstep):\n return ((tstep > 0) and (tstep % self.overset_update_interval) == 0)",
"def test_wifi_connection_while_single_scan(self):\n self.attenuators[ATTENUATOR].set_atten(0)\n data = wutils.start_wifi_single_scan(self.dut,\n self.default_scan_setting)\n idx = data[\"Index\"]\n scan_rt = data[\"ScanElapsedRealtime\"]\n self.log.info(\"Wifi single shot scan started with index: {}\".format(\n idx))\n asserts.assert_true(self.connect_to_reference_network(), NETWORK_ERROR)\n time.sleep(10) #wait for connection to be active\n asserts.assert_true(\n wutils.validate_connection(self.dut, self.ping_addr),\n \"Error, No internet connection for current network\")\n #generating event wait time from scan setting plus leeway\n scan_time, scan_channels = wutils.get_scan_time_and_channels(\n self.wifi_chs, self.default_scan_setting, self.stime_channel)\n wait_time = int(scan_time / 1000) + self.leeway\n validity = False\n try:\n event_name = \"{}{}onResults\".format(EVENT_TAG, idx)\n self.log.debug(\"Waiting for event: {} for time {}\".format(\n event_name, wait_time))\n event = self.dut.ed.pop_event(event_name, wait_time)\n self.log.debug(\"Event received: {}\".format(event))\n results = event[\"data\"][\"Results\"]\n bssids, validity = self.proces_and_valid_batch_scan_result(\n results, scan_rt, event[\"data\"][KEY_RET],\n self.default_scan_setting)\n self.log.info(\"Scan number Buckets: {}\\nTotal BSSID: {}\".format(\n len(results), bssids))\n asserts.assert_true(\n len(results) == 1 and bssids >= 1, EMPTY_RESULT)\n except queue.Empty as error:\n raise AssertionError(\n \"Event did not triggered for single scan {}\".format(error))",
"def is_connected(self):\n try:\n if self.coordinator.data[self._system_id][\"devices\"][self._item_id].get(\n \"connected\"\n ):\n connected_ap = self.coordinator.data[self._system_id][\"devices\"][\n self._item_id\n ].get(\"apId\")\n if connected_ap:\n connected_ap = self.coordinator.data[self._system_id][\n \"access_points\"\n ][connected_ap][\"accessPointSettings\"][\"accessPointOtherSettings\"][\n \"roomData\"\n ][\n \"name\"\n ]\n self._attrs[\"connected_ap\"] = connected_ap\n else:\n self._attrs[\"connected_ap\"] = \"NA\"\n\n self._attrs[\"ip_address\"] = self.coordinator.data[self._system_id][\n \"devices\"\n ][self._item_id].get(\"ipAddress\", \"NA\")\n\n self._mac = self.coordinator.data[self._system_id][\"devices\"][\n self._item_id\n ].get(\"macAddress\")\n\n self._attrs[\"mac\"] = self._mac if self._mac else \"NA\"\n\n self._is_connected = True\n else:\n self._is_connected = False\n except TypeError:\n pass\n except KeyError:\n pass\n # self.hass.async_create_task(\n # self.hass.config_entries.async_reload(self.coordinator.entry.entry_id)\n # )\n\n return self._is_connected",
"def is_map_updated(self):\r\n self.old_obs_len =0\r\n if len(self.obs_ls[0])!= self.old_obs_len:\r\n self.old_obs_len =len(self.obs_ls[0])\r\n return True\r\n return False",
"def update_dv(self):\n is_changed = False\n for name in self.distance_vector:\n smallest = float('Inf')\n smallest_neighbor = None\n for neighbor_name in self.neighbors:\n if self.neighbors[neighbor_name].is_killed:\n weight = float('Inf')\n else:\n weight = self.neighbors[neighbor_name].weight\n if name in self.neighbors[neighbor_name].distance_vector:\n candidate = self.neighbors[neighbor_name].distance_vector[name]\n candidate += weight\n if smallest > candidate:\n smallest = candidate\n smallest_neighbor = neighbor_name\n if self.distance_vector[name].cost != smallest and name != self.name_str:\n self.distance_vector[name].cost = smallest\n self.distance_vector[name].link = smallest_neighbor\n is_changed = True\n return is_changed",
"def check_time(self):\n while True:\n for name in self.neighbors:\n if not self.neighbors[name].is_killed:\n if not self.neighbors[name].update_ready and time.time() - self.neighbors[name].send_timer > self.timeout:\n self.neighbors[name].update_ready = True\n if time.time() - self.neighbors[name].kill_timer > 3 * self.timeout:\n self.neighbors[name].is_killed = True",
"def _pair_stale(self, pair):\r\n (_conn, return_time) = pair\r\n now = time.time()\r\n return return_time + ConnectionPool.STALE_DURATION < now",
"def is_interesting(self):\n current_time = self.time_in_air\n if self.last_point_name == 'home':\n new_name = 'node 1'\n else:\n new_name = 'node {0}'.format(int(self.last_point_name.split()[1]) + 1)\n new_sense = self.lidars[0].radius if isinf(self.lidars[0].get_sense()) else self.lidars[0].get_sense()\n\n if abs(new_sense - self.last_check) >= 9 * self.lidars[0].radius / 10:\n self.slam.add_point(name=new_name, data={'angle': self.yaw,\n 'duration': self.get_duration(current_time=current_time),\n 'time': current_time})\n self.slam.add_edge(from_node=self.last_point_name, to_node=new_name, distance=self.distance)\n self.distance = 0\n self.last_point_name = new_name",
"def validate_time_acc_gmap(self, source_lng, source_lat, destination_lng, destination_lat, arrival_time):\n to_notify = False\n new_to_notify_ts = None\n\n fetch_distance_success, distance_time_estimate = (\n GoogleDistanceService\n .get_distance_matrix_details(\n source_lng, source_lat,\n destination_lng, destination_lat,\n self.G_MAP_API_KEY, arrival_time.timestamp()\n )\n )\n\n if not fetch_distance_success:\n # postpone to check again, since gmap api is down\n new_to_notify_ts = arrival_time + timedelta(minutes=10) + timedelta(hours=5.5)\n RideReminderCronJob.logger.info(\"GMap failed adding more 10min \" + str(new_to_notify_ts))\n return to_notify, new_to_notify_ts\n\n total_time_deviation_seconds = distance_time_estimate + Constant.MAX_RIDE_ESTIMATE\n new_to_notify_ts = arrival_time - timedelta(seconds=total_time_deviation_seconds)\n\n time_diff = new_to_notify_ts - self.current_utc_ts\n\n if new_to_notify_ts < self.current_utc_ts or time_diff.total_seconds() < 1200:\n to_notify = True\n\n RideReminderCronJob.logger.info(\"New notif ts \" + str(new_to_notify_ts))\n\n return to_notify, new_to_notify_ts",
"def update(self):\n now = farc.Framework._event_loop.time()\n # Collect and prune expired neighbors\n expired_ngbrs = []\n for ngbr_addr, ngbr_data in self._ngbrs.items():\n frame = ngbr_data[\"BCN_FRAME\"]\n rx_time = frame.rx_meta[0]\n if now > rx_time + self._EXPIRATION_PRD:\n expired_ngbrs.append(ngbr_addr)\n for ngbr_addr in expired_ngbrs:\n del self._ngbrs[ngbr_addr]",
"def test_wifi_connection_and_pno_while_batch_scan(self):\n self.attenuators[ATTENUATOR].set_atten(0)\n data = wutils.start_wifi_background_scan(\n self.dut, self.default_batch_scan_setting)\n idx = data[\"Index\"]\n scan_rt = data[\"ScanElapsedRealtime\"]\n self.log.info(\n \"Wifi background scan started with index: {} rt {}\".format(\n idx, scan_rt))\n #generating event wait time from scan setting plus leeway\n scan_time, scan_channels = wutils.get_scan_time_and_channels(\n self.wifi_chs, self.default_batch_scan_setting, self.stime_channel)\n #default number buckets\n number_bucket = 10\n time_cache = self.default_batch_scan_setting[\n 'periodInMs'] * number_bucket #default cache\n #add 2 seconds extra time for switch between the channel for connection scan\n #multiply cache time by two to account for scheduler changing period\n wait_time = (time_cache * 2 + scan_time) / 1000 + self.leeway + 2\n result_flag = 0\n try:\n for snumber in range(1, 7):\n event_name = \"{}{}onResults\".format(EVENT_TAG, idx)\n self.log.info(\"Waiting for event: {}\".format(event_name))\n event = self.dut.ed.pop_event(event_name, wait_time)\n self.log.debug(\"Event onResults: {}\".format(event))\n results = event[\"data\"][\"Results\"]\n bssids, validity = self.proces_and_valid_batch_scan_result(\n results, scan_rt, event[\"data\"][KEY_RET],\n self.default_batch_scan_setting)\n self.log.info(\n \"Scan number: {}\\n Buckets: {}\\n BSSID: {}\".format(\n snumber, len(results), bssids))\n asserts.assert_true(bssids >= 1,\n \"Not able to fetch scan result\")\n if snumber == 1:\n self.log.info(\n \"Try to connect AP while waiting for event: {}\".format(\n event_name))\n asserts.assert_true(self.connect_to_reference_network(),\n NETWORK_ERROR)\n time.sleep(10) #wait for connection to be active\n asserts.assert_true(\n wutils.validate_connection(self.dut, self.ping_addr),\n \"Error, No internet connection for current network\")\n elif snumber == 3:\n self.log.info(\"Kicking PNO for reference network\")\n self.attenuators[ATTENUATOR].set_atten(90)\n elif snumber == 4:\n self.log.info(\"Bring back device for PNO connection\")\n current_network = self.dut.droid.wifiGetConnectionInfo()\n self.log.info(\"Current network: {}\".format(\n current_network))\n asserts.assert_true('network_id' in current_network,\n NETWORK_ID_ERROR)\n asserts.assert_true(\n current_network['network_id'] == -1,\n \"Device is still connected to network {}\".format(\n current_network[wutils.WifiEnums.SSID_KEY]))\n self.attenuators[ATTENUATOR].set_atten(0)\n time.sleep(\n 10\n ) #wait for connection to take place before waiting for scan result\n elif snumber == 6:\n self.log.info(\n \"Check connection through PNO for reference network\")\n current_network = self.dut.droid.wifiGetConnectionInfo()\n self.log.info(\"Current network: {}\".format(\n current_network))\n asserts.assert_true('network_id' in current_network,\n NETWORK_ID_ERROR)\n asserts.assert_true(current_network['network_id'] >= 0,\n NETWORK_ERROR)\n time.sleep(10) #wait for connection to be active\n asserts.assert_true(\n wutils.validate_connection(self.dut, self.ping_addr),\n \"Error, No internet connection for current network\")\n wutils.wifi_forget_network(self.dut,\n self.reference_networks[0][\"2g\"][\"SSID\"])\n except queue.Empty as error:\n raise AssertionError(\n \"Event did not triggered for batch scan {}\".format(error))\n finally:\n self.dut.droid.wifiScannerStopBackgroundScan(idx)\n self.dut.ed.clear_all_events()",
"def ping(self):\n self.last_seen_at = time.time()\n last_online = self.online\n self.online = True\n return last_online != self.online",
"def check_observation(self):\n last_obs_time = self.get_obs_time()\n last_obs_time_tz = last_obs_time.astimezone(\n ZoneInfo(self._config['db']['DisplayTimezone']))\n time_diff = datetime.now(tz=last_obs_time.tzinfo) - last_obs_time\n\n if int(time_diff.total_seconds() / 60) > \\\n int(self._config['observation']['Timeout']):\n if self._state['email_sent'] == 'False':\n if send_email(self._config['email'],\n 'env-logger: observation inactivity warning',\n 'No observations have been received in the env-logger '\n 'backend after {} (timeout {} minutes). Please check for '\n 'possible problems.'.format(last_obs_time_tz.isoformat(),\n self._config['observation']['Timeout'])):\n self._state['email_sent'] = 'True'\n else:\n self._state['email_sent'] = 'False'\n elif self._state['email_sent'] == 'True':\n send_email(self._config['email'],\n 'env-logger: observation received',\n 'An observation has been received at '\n f'{last_obs_time_tz.isoformat()}.')\n self._state['email_sent'] = 'False'",
"def data_association(imu_file, wifi_file):\n dr = dead_reckoning.deadReckoning(imu_file)\n distance = dr.displacment_from_axis(dr.ax)\n gyro = dr.used_data(dr.wz)\n imu_times = dr.used_data(dr.time)\n\n wifi_data = np.genfromtxt(wifi_file, delimiter=',')\n wifi_times = wifi_data[:,0]\n\n # get rid of data from after one of the two proccess ended\n if wifi_times[-1] > imu_times[-1]:\n out_of_time_index = wifi_times <= imu_times[-1]\n wifi_times = wifi_times[out_of_time_index]\n wifi_data = wifi_data[out_of_time_index, :]\n else:\n out_of_time_index = imu_times <= wifi_times[-1]\n imu_times = imu_times[out_of_time_index]\n distance = distance[out_of_time_index]\n gyro = gyro[out_of_time_index]\n\n # We imu data before the first wifi measurment\n # The other option is to have the first wifi measurment assosated with a 0 distance and 0 gyro\n # reading. I don't really know what option is better\n while wifi_times[0] < imu_times[0]:\n # delte the first wifi time stamp\n wifi_times = np.delete(wifi_times, 0)\n # delte the first row of wifi data\n wifi_data = np.delete(wifi_data, 0, 0)\n \n if wifi_times.size == 0:\n raise(\"Can not assoate WiFi times with IMU times\")\n\n # convert dBm to mW\n wifi_data_dbm = wifi_data[:,1:]\n wifi_data_mw = np.power(10, wifi_data_dbm/10.0)\n\n # create measurment data vector, it needs to be the same size as the wifi times\n measurment_data = np.empty([wifi_times.size, 2])\n\n # fill the first entries of measurment data with values from before the first wifi scan\n measurment_data[0,0] = np.sum(distance[imu_times <= wifi_times[0]])\n measurment_data[0,1] = np.mean(gyro[imu_times <= wifi_times[0]])\n measurment_data[0,1] = np.deg2rad(measurment_data[0,1])\n\n # now fill in the rest of the matrix\n for i in range(1, wifi_times.size):\n time_slices = (imu_times > wifi_times[i-1]) & (imu_times <= wifi_times[i])\n measurment_data[i,0] = np.sum(distance[time_slices])\n measurment_data[i,1] = np.mean(gyro[time_slices])\n\n return measurment_data, wifi_data_mw",
"def update_iemaccess(txn, entry):\n if entry[\"access_network\"] is None:\n return False\n ob = Observation(\n entry[\"access_station\"], entry[\"access_network\"], entry[\"cli_valid\"]\n )\n ob.load(txn)\n current = ob.data\n data = entry[\"data\"]\n logmsg = []\n if data.get(\"temperature_maximum\") is not None:\n climax = int(data[\"temperature_maximum\"])\n if climax != current[\"max_tmpf\"]:\n logmsg.append(f\"MaxT O:{current['max_tmpf']} N:{climax}\")\n current[\"max_tmpf\"] = climax\n\n if data.get(\"temperature_minimum\") is not None:\n climin = int(data[\"temperature_minimum\"])\n if climin != current[\"min_tmpf\"]:\n logmsg.append(f\"MinT O:{current['min_tmpf']} N:{climin}\")\n current[\"min_tmpf\"] = climin\n\n if data.get(\"precip_month\") is not None:\n val = data[\"precip_month\"]\n if val != current[\"pmonth\"]:\n logmsg.append(f\"PMonth O:{current['pmonth']} N:{val}\")\n current[\"pmonth\"] = val\n\n if data.get(\"precip_today\") is not None:\n val = data[\"precip_today\"]\n if val != current[\"pday\"]:\n logmsg.append(f\"PDay O:{current['pday']} N:{val}\")\n current[\"pday\"] = val\n\n for dkey, ikey in {\"snow_today\": \"snow\", \"snowdepth\": \"snowd\"}.items():\n if data.get(dkey) is not None:\n val = data[dkey]\n if current[ikey] is None or val != current[ikey]:\n logmsg.append(f\"{ikey} O:{current[ikey]} N:{val}\")\n current[ikey] = val\n\n if not logmsg:\n return True\n res = ob.save(txn, skip_current=True)\n LOG.warning(\n \"%s (%s) %s ob.save: %s\",\n entry[\"access_station\"],\n entry[\"cli_valid\"].strftime(\"%y%m%d\"),\n \",\".join(logmsg),\n res,\n )\n return res",
"def _update_info(self):\n\n if not self.success_init:\n return False\n\n _LOGGER.info(\"Scanning\")\n data = self.get_bt_smarthub_data()\n if not data:\n _LOGGER.warning(\"Error scanning devices\")\n return False\n\n clients = [client for client in data.values()]\n self.last_results = clients\n return True",
"def is_consolidating(symbol):\n \n try:\n df = web.DataReader(symbol, source, start, end)\n df['52wk High'] = df.High.rolling(250).max()\n if df['52wk High'][-1] == df['52wk High'][-20]:\n return True\n else:\n return False\n except:\n print('Ticker {} nedokazem nacitat.'.format(symbol))\n return False",
"def _match(self, single_traj, instance):\n locs = single_traj.groupby([constants.LATITUDE, constants.LONGITUDE, constants.TEMP]).size().reset_index(name=constants.COUNT)\n inst = pd.DataFrame(data=instance, columns=single_traj.columns)\n inst = inst.groupby([constants.LATITUDE, constants.LONGITUDE,constants.TEMP]).size().reset_index(name=constants.COUNT + \"inst\")\n locs_inst = pd.merge(locs, inst, left_on=[constants.LATITUDE, constants.LONGITUDE, constants.TEMP],\n right_on=[constants.LATITUDE, constants.LONGITUDE,constants.TEMP])\n if len(locs_inst.index) != len(inst.index):\n return 0\n else:\n condition = locs_inst[constants.COUNT] >= locs_inst[constants.COUNT + \"inst\"]\n if len(locs_inst[condition].index) != len(inst.index):\n return 0\n else:\n return 1",
"def is_in_hotspot(self):\r\n in_hotspot = False\r\n hotspots = parser.parse_hotspot_bed()\r\n \r\n if hotspots.get(self.chrom): \r\n chrom_hotspots = hotspots[self.chrom]\r\n \r\n for interval in chrom_hotspots: \r\n if interval[0] <= self.pos <= interval[1]:\r\n in_hotspot = True\r\n break\r\n \r\n return in_hotspot",
"def _match(self, single_traj, instance):\n inst = pd.DataFrame(data=instance, columns=single_traj.columns)\n locs_inst = pd.merge(single_traj[:2], inst, left_on=[constants.LATITUDE, constants.LONGITUDE],\n right_on=[constants.LATITUDE, constants.LONGITUDE])\n if len(locs_inst.index) == len(inst.index):\n return 1\n else:\n return 0",
"def _update_info(self):\n data = self._get_data()\n if not data:\n return False\n\n active_clients = [client for client in data if client.state]\n self.last_results = active_clients\n\n _LOGGER.debug(\n \"%s Active clients: %s\",\n len(active_clients),\n \",\".join(f\"{client.mac} {client.name}\" for client in active_clients),\n )\n return True",
"def device_rep1_2_rep2(df_rep1, drop=False):\n df = df_rep1.copy().reset_index(drop=True)\n df = df.sort_values(TIME)\n df.loc[:,'ones'] = 1\n \n rows_changed = 0\n syn_acts = []\n if drop:\n to_delete_idx = []\n for dev in df['device'].unique():\n df_dev = df[df['device'] == dev]\n first_row = df_dev.iloc[0].copy()\n last_row = df_dev.iloc[len(df_dev)-1].copy()\n if not first_row['val']:\n to_delete_idx.append(first_row.name)\n if last_row['val']:\n to_delete_idx.append(last_row.name)\n df = df.drop(to_delete_idx)\n rows_changed = -len(to_delete_idx)\n else:\n # add values to things that are false\n first_timestamp = df['time'].iloc[0]\n last_timestamp = df['time'].iloc[len(df)-1]\n eps = pd.Timedelta('1ns')\n for dev in df['device'].unique():\n df_dev = df[df['device'] == dev]\n first_row = df_dev.iloc[0].copy()\n last_row = df_dev.iloc[len(df_dev)-1].copy()\n if not first_row['val']:\n first_row['val'] = True\n first_row['time'] = first_timestamp + eps\n syn_acts.append(first_row)\n df = df.append(first_row, ignore_index=True)\n if last_row['val']:\n last_row['val'] = False\n last_row['time'] = last_timestamp - eps\n syn_acts.append(last_row)\n df = df.append(last_row, ignore_index=True)\n eps += pd.Timedelta('1ns')\n rows_changed = len(syn_acts)\n \n df = df.reset_index(drop=True).sort_values(TIME)\n \n # seperate the 0to1 and 1to0 device changes\n df.loc[:,VAL] = df[VAL].astype(bool)\n df_start = df[df[VAL]] \n df_end = df[~df[VAL]] \n df_end = df_end.rename(columns={TIME: END_TIME})\n df_start = df_start.rename(columns={TIME: START_TIME})\n\n # ordered in time to index them and make a correspondence\n df_end.loc[:,'pairs'] = df_end.groupby([DEVICE])['ones'].apply(lambda x: x.cumsum())\n df_start.loc[:,'pairs'] = df_start.groupby([DEVICE])['ones'].apply(lambda x: x.cumsum()) \n \n \n df = pd.merge(df_start, df_end, on=['pairs', DEVICE])\n df = df.sort_values(START_TIME)\n \n # sanity checks \n diff = int((len(df_rep1)+rows_changed)/2) - len(df)\n assert diff == 0, 'input {} - {} == {} result. Somewhere two following events of the \\\n # same device had the same starting point and end point'.format(int(len(df_rep1)/2), len(df), diff)\n \n if drop:\n return df[[START_TIME, END_TIME, DEVICE]]\n else:\n return df[[START_TIME, END_TIME, DEVICE]], syn_acts",
"def _match(self, single_traj, instance):\n inst = pd.DataFrame(data=instance, columns=single_traj.columns)\n inst.rename(columns={constants.FREQUENCY: constants.FREQUENCY + \"inst\"}, inplace=True)\n locs_inst = pd.merge(single_traj, inst, left_on=[constants.LATITUDE, constants.LONGITUDE],\n right_on=[constants.LATITUDE, constants.LONGITUDE])\n if len(locs_inst.index) != len(inst.index):\n return 0\n else:\n condition1 = locs_inst[constants.FREQUENCY + \"inst\"] >= locs_inst[constants.FREQUENCY] - (\n locs_inst[constants.FREQUENCY] * self.tolerance)\n condition2 = locs_inst[constants.FREQUENCY + \"inst\"] <= locs_inst[constants.FREQUENCY] + (\n locs_inst[constants.FREQUENCY] * self.tolerance)\n if len(locs_inst[condition1 & condition2].index) != len(inst.index):\n return 0\n else:\n return 1",
"def filter_for_activity(self, window, ssd_thres, minimum_wb):\n data_wb = self.data.copy()\n applyOffsetRemove(data_wb)\n applyFilter(data_wb)\n window = window \n ssd_threshold = ssd_thres\n minimum = minimum_wb\n ranges_ww = runWalkingBoutDetection(\n data_wb,\n ssd_threshold,\n window,\n minimum,\n )\n try:\n segment = ranges_ww[0]\n lower = self.data.loc[segment[0],0]\n upper = self.data.loc[segment[1],0]\n self.data = self.data[(self.data[0]>lower) & (self.data[0]<=upper)]\n except:\n print(\"No movement detected\")"
]
| [
"0.59715676",
"0.5802782",
"0.57483566",
"0.56059116",
"0.5456994",
"0.5426937",
"0.5389302",
"0.5318968",
"0.53116345",
"0.52669287",
"0.5255155",
"0.5237892",
"0.523052",
"0.5199206",
"0.51953757",
"0.5144182",
"0.5136891",
"0.51364833",
"0.51262516",
"0.5119525",
"0.51193655",
"0.5114605",
"0.50713694",
"0.50346243",
"0.5031861",
"0.5024776",
"0.5014386",
"0.5005344",
"0.50053436",
"0.4999114"
]
| 0.62825596 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.