query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Returns conservative prior bounds (pmin, pmax) given sampling times for each observatory. | def prior_bounds_from_data(npl, ts, rvs):
nobs=len(ts)
dts=[np.diff(t) for t in ts]
min_dt=reduce(min, [np.min(dt) for dt in dts])
tobss=[t[-1]-t[0] for t in ts]
max_obst=reduce(max, tobss)
min_dv=reduce(min, [np.min(np.abs(np.diff(rv))) for rv in rvs])
maxspread=reduce(max, [np.max(rv)-np.min(rv) for rv in rvs])
pmin=params.Parameters(nobs=nobs,npl=npl)
pmax=params.Parameters(nobs=nobs,npl=npl)
Vmin=[]
Vmax=[]
taumin=[]
taumax=[]
sigma0min=[]
sigma0max=[]
sigmamin=[]
sigmamax=[]
for t,rv in zip(ts, rvs):
spread=np.max(rv) - np.min(rv)
Vmin.append(np.min(rv) - spread)
Vmax.append(np.max(rv) + spread)
mindt=np.min(np.diff(t))
mindv=np.min(np.abs(np.diff(rv)))
T=t[-1] - t[0]
taumin.append(mindt/2.0)
taumax.append(T*2.0)
sigma0min.append(mindv/2.0)
sigma0max.append(2.0*np.std(rv))
sigmamin.append(mindv/2.0)
sigmamax.append(2.0*np.std(rv))
pmin.V = np.array(Vmin)
pmax.V = np.array(Vmax)
pmin.tau = np.array(taumin)
pmax.tau = np.array(taumax)
pmin.sigma0 = np.array(sigma0min)
pmax.sigma0 = np.array(sigma0max)
pmin.sigma = np.array(sigmamin)
pmax.sigma = np.array(sigmamax)
if npl >= 1:
pmin.n = 2.0*np.pi/(max_obst)
pmax.n = 2.0*np.pi/(min_dt)
pmin.chi = 0.0
pmax.chi = 1.0
pmin.e = 0.0
pmax.e = 1.0
pmin.omega = 0.0
pmax.omega = 2.0*np.pi
pmin.K = min_dv/2.0
pmax.K = 2.0*maxspread
return pmin, pmax | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def optimization_bounds(self, topology):\n bounds_low = np.zeros(self.number_of_parameters())\n bounds_up = np.zeros(self.number_of_parameters())\n\n for pkey, parameter in self.parameters.items():\n bounds_low[pkey] = parameter.bound_low(topology)\n bounds_up[pkey] = parameter.bound_up(topology)\n\n return bounds_low, bounds_up",
"def get_bounds_parameters(self):\n bounds = []\n bounds += self.var_noise.bounds\n bounds += self.mean.bounds\n bounds += self.kernel.get_bounds_parameters()\n\n return bounds",
"def get_bounds(self):\n return ([self.t_min] * self.dim,[self.t_max] * self.dim)",
"def _get_observation_lower_bound(self):\n lower_bound = -self._get_observation_upper_bound()\n lower_bound[-7] = 0.0\n lower_bound[-2:] = [self.min_speed, self.min_side_speed]\n return lower_bound",
"def _init_optimizer_bounds(self):\n bounds = []\n for filt in self.filters:\n if filt.optimize_fc:\n bounds.append((np.log10(filt.min_fc), np.log10(filt.max_fc)))\n if filt.optimize_q:\n bounds.append((filt.min_q, filt.max_q))\n if filt.optimize_gain:\n bounds.append((filt.min_gain, filt.max_gain))\n return bounds",
"def get_hyperparameter_bounds():\n minf = float(\"-inf\")\n inf = float(\"inf\")\n params = dict(mu=(minf,inf), nu=(0.0 ,inf), r=(0.0, inf), s=(0.0, inf))\n return params",
"def get_model_parameter_bounds():\n minf = float(\"-inf\")\n inf = float(\"inf\")\n params = dict(mu=(minf,inf), rho=(0.0 ,inf))\n return params",
"def _autobounds(self):\n bounds = {}\n\n def check(prop, compare, extreme, val):\n opp = min if compare is max else max\n bounds.setdefault(prop, val)\n bounds[prop] = opp(compare(bounds[prop], val), extreme)\n\n def bound_check(lat_lon):\n lat, lon = lat_lon\n check('max_lat', max, 90, lat)\n check('min_lat', min, -90, lat)\n check('max_lon', max, 180, lon)\n check('min_lon', min, -180, lon)\n\n lat_lons = [lat_lon for feature in self._features.values() for\n lat_lon in feature.lat_lons]\n if not lat_lons:\n lat_lons.append(self._default_lat_lon)\n for lat_lon in lat_lons:\n bound_check(lat_lon)\n\n return bounds",
"def initBoundedParams(bounds, sn=[]):\n hypinit = {\n 'cov': np.zeros(len(bounds)),\n 'lik': np.atleast_1d(np.log(sn)),\n 'mean': np.array([])\n }\n # Sample from a uniform distribution\n for idx, pair in enumerate(bounds):\n # Randomize only if bounds are specified\n if isinstance(pair, collections.Iterable):\n hypinit['cov'][idx] = np.random.uniform(pair[0], pair[1])\n # If no bounds, then keep default value always\n else:\n hypinit['cov'][idx] = pair\n return hypinit",
"def get_bounds():\n return [0.00], [1.00]",
"def _get_max_wt_all( # pylint:disable=too-many-arguments\n rectangle_lows: np.array,\n rectangle_ups: np.array,\n means: np.array,\n sampled: np.array,\n pooling_method: str = \"fro\",\n use_coef_var: bool = True,\n) -> int:\n max_uncertainty = -np.inf\n maxid = 0\n\n pooling_method = pooling_method.lower()\n\n for i in range(0, len(sampled)): # pylint:disable=consider-using-enumerate\n # Among the points x ∈ Pt ∪ Ut, the one with the largest wt(x)\n # is chosen as the next sample xt to be evaluated.\n # Intuitively, this rule biases the sampling towards exploring,\n # and thus improving the model for, the points most likely to be Pareto-optimal.\n if not sampled[i] == 1:\n # weight is the length of the diagonal of the uncertainty region\n if use_coef_var:\n uncer = np.divide(rectangle_ups[i, :] - rectangle_lows[i, :], means[i, :])\n else:\n uncer = rectangle_ups[i, :] - rectangle_lows[i, :]\n uncertainty = _pool(uncer, pooling_method)\n if uncertainty > max_uncertainty:\n max_uncertainty = uncertainty\n maxid = i\n\n return maxid",
"def get_params_bounds(self) -> np.array:\n pass",
"def p_prior(self):\n sampler = self.__sampler\n nwalkers = self.nwalkers\n pRanges = self.pRanges\n if sampler == \"EnsembleSampler\":\n p = [posRange(pRanges) for i in range(nwalkers)]\n elif sampler == \"PTSampler\":\n ntemps = self.ntemps\n p = np.zeros((ntemps, nwalkers, self.ndim))\n for loop_t in range(ntemps):\n for loop_w in range(nwalkers):\n p[loop_t, loop_w, :] = posRange(pRanges)\n return p",
"def compute_bounds(self):\n # Note: linear_constraints object has been been populated at this stage\n L_zero_var = []\n \n for constraint in self._linear_constraints.L_linear_constraints:\n lhs_string = constraint[0]\n rhs_string = constraint[1]\n if float(rhs_string)==0:\n #print \"rhs=0: forcing the variables to zero\"\n L_vars = re.split(r'[+-]',lhs_string)\n \n for var in L_vars:\n modform_var = var.strip()\n \n # forcing all the variables in this constraint to be zero\n self._linear_constraints.modform_space.D_PuLP_variables[modform_var] = pulp.LpVariable(modform_var, lowBound=0, upBound=0)\n #print \"var forced to zero: \", modform_var\n L_zero_var.append(modform_var)\n else: #if float(rhs)==0\n continue\n \n if len(L_zero_var)>0:\n print \"\\n####### Variables forced to zero (rhs = 0) ##########\"\n print \"variables forced to zero: \", set(L_zero_var)\n \n feasible_lc = flc.FeasibleLinearConstraints(self._linear_constraints)\n \n feasible_lc.get_feasible_linear_constraints()\n \n feasible_linear_constraints = feasible_lc.feasible_linear_constraints\n \n lp_solver = lps.LinearProgrammingSolver(feasible_linear_constraints)\n \n D_lower_bounds = {}; D_upper_bounds = {}\n \n for v in [self._linear_constraints.modform_space.D_PuLP_variables[k] for k in sorted(self._linear_constraints.modform_space.D_PuLP_variables.keys(), key=gbfunc.natural_keys)]:\n \n if str(v) in L_zero_var:\n D_lower_bounds[str(v)] = '0'\n D_upper_bounds[str(v)] = '0'\n continue\n #end if str(v) in L_zero_var\n \n objective_function_PuLP = v\n \n list_values_minimize = lp_solver.linear_programming_solver(objective_function_PuLP, pulp.LpMinimize)\n D_lower_bounds[str(v)] = \"%.3f\"%round(pulp.value(v),3)\n \n list_values_maximize = lp_solver.linear_programming_solver(objective_function_PuLP, pulp.LpMaximize)\n D_upper_bounds[str(v)] = \"%.3f\"%round(pulp.value(v),3)\n\n #end for v in ..\n\n return((D_lower_bounds, D_upper_bounds))",
"def fetchbounds(self):\n pnts = [x for x in [self.out_start, self.start, self.in_start, \\\n self.in_end, self.end, self.out_end] \\\n if x is not None]\n return min(pnts), max(pnts)",
"def get_bounds():\n bounds = [\n (0.1, 0.5), # Omega_m\n (0.05, 0.15) # beta\n ]\n return np.array(bounds)",
"def min_values(self, lower, upper): \n if not self.lower_bounds is None:\n return self.lower_bounds\n\n minus = np.clip(self.coeffs,-math.inf,0)\n plus = np.clip(self.coeffs,0,math.inf)\n self.lower_bounds = plus.dot(lower) + minus.dot(upper) + self.const\n \n return self.lower_bounds",
"def generate_initial_sample(pmin, pmax, ntemps, nwalkers):\n\n npl = pmin.npl\n nobs = pmin.nobs\n\n assert npl == pmax.npl, 'Number of planets must agree in prior bounds'\n assert nobs == pmax.nobs, 'Number of observations must agree in prior bounds'\n\n N = pmin.shape[-1]\n\n samps=params.Parameters(arr=np.zeros((ntemps, nwalkers, N)), nobs=nobs, npl=npl)\n\n V=samps.V\n tau=samps.tau\n sigma=samps.sigma\n sigma0=samps.sigma0\n for i in range(nobs):\n V[:,:,i] = nr.uniform(low=pmin.V[i], high=pmax.V[i], size=(ntemps, nwalkers))\n tau[:,:,i] = draw_logarithmic(low=pmin.tau[i], high=pmax.tau[i], size=(ntemps,nwalkers))\n sigma[:,:,i] = draw_logarithmic(low=pmin.sigma[i], high=pmax.sigma[i], size=(ntemps,nwalkers))\n sigma0[:,:,i] = draw_logarithmic(low=pmin.sigma[i], high=pmax.sigma[i], size=(ntemps, nwalkers))\n samps.V=np.squeeze(V)\n samps.tau = np.squeeze(tau)\n samps.sigma = np.squeeze(sigma)\n samps.sigma0 = np.squeeze(sigma0)\n\n if npl >= 1:\n samps.K = np.squeeze(draw_logarithmic(low=pmin.K[0], high=pmax.K[0], size=(ntemps, nwalkers, npl)))\n\n # Make sure that periods are increasing\n samps.n = np.squeeze(np.sort(draw_logarithmic(low=pmin.n, high=pmax.n, size=(ntemps,nwalkers,npl)))[:,:,::-1])\n\n samps.e = np.squeeze(nr.uniform(low=0.0, high=1.0, size=(ntemps, nwalkers,npl)))\n samps.chi = np.squeeze(nr.uniform(low=0.0, high=1.0, size=(ntemps, nwalkers,npl)))\n samps.omega = np.squeeze(nr.uniform(low=0.0, high=2.0*np.pi, size=(ntemps, nwalkers,npl)))\n\n return samps",
"def setup(self, proportion=False, maxmin=False):\n bounds = {}\n epsilons = {}\n\n # Maximize for super and minimize for Subinterval\n if maxmin:\n prob = pulp.LpProblem('SuperInterval LP', pulp.LpMaximize)\n else:\n prob = pulp.LpProblem('Max Subinterval LP', pulp.LpMinimize)\n\n # NOTE: Our LP requires each event to occur within a finite interval.\n # If the input LP does not have finite interval specified for all events, we want to set the setMakespan to MAX_FLOAT (infinity) so the LP works\n #\n # We do not want to run minimal network first because we are going to modify the contingent edges in LP, while some constraints in minimal network are obtained through contingent edges\n #\n # There might be better way to deal with this problem.\n # ##\n for (i, j) in self.stnu.edges():\n weight = self.stnu.get_edge_weight(i, j)\n if weight == float('inf'):\n self.stnu.update_edge_weight(i, j, MAX_FLOAT)\n\n # Store Original STN edges and objective variables for easy access. Not part of LP yet\n\n for i in self.stnu.nodes():\n bounds[(i, '+')] = pulp.LpVariable('t_%i_hi'%i, lowBound=0,\n upBound=self.stnu.get_edge_weight(0, i))\n\n lowbound = 0 if self.stnu.get_edge_weight(i, 0) == float('inf') else\\\n -self.stnu.get_edge_weight(i, 0)\n\n bounds[(i,'-')] = pulp.LpVariable('t_%i_lo'%i, lowBound=lowbound, upBound=None)\n\n self.add_constraint(bounds[(i, '-')] <= bounds[(i, '+')], prob)\n\n if i == 0:\n self.add_constraint(bounds[(i, '-')] == 0, prob)\n self.add_constraint(bounds[(i, '+')] == 0, prob)\n\n if i not in self.contingent_timepoints:\n self.add_constraint(bounds[(i, '-')] == bounds[(i, '+')], prob)\n\n if proportion:\n return (bounds, epsilons, prob)\n\n for (i, j) in self.constraints:\n if (i, j) in self.contingent_constraints:\n\n epsilons[(j, '+')] = pulp.LpVariable('eps_%i_hi' % j, lowBound=0, upBound=None)\n\n epsilons[(j, '-')] = pulp.LpVariable('eps_%i_lo' % j, lowBound=0, upBound=None)\n\n self.add_constraint(bounds[(j, '+')]-bounds[(i, '+')] ==\n self.stnu.get_edge_weight(i, j) - epsilons[(j,'+')], prob)\n self.add_constraint(bounds[(j, '-')]-bounds[(i, '-')] ==\n -self.stnu.get_edge_weight(j, i) + epsilons[(j, '-')], prob)\n\n else:\n # NOTE: We need to handle the infinite weight edges. Otherwise the LP would be infeasible\n upbound = MAX_FLOAT if self.stnu.get_edge_weight(i, j) == float('inf') \\\n else self.stnu.get_edge_weight(i, j)\n\n lowbound = MAX_FLOAT if self.stnu.get_edge_weight(j, i) == float('inf') \\\n else self.stnu.get_edge_weight(j, i)\n\n self.add_constraint(bounds[(j, '+')]-bounds[(i, '-')] <= upbound, prob)\n self.add_constraint(bounds[(i, '+')]-bounds[(j, '-')] <= lowbound, prob)\n\n return (bounds, epsilons, prob)",
"def getBounds(self, nStates, nParams):\n raise NotImplementedError(\n \"bounds have not been implemented for this Experiment\")",
"def _initialize_bounds(problem, bounds, get_bound, set_bound):\n for constraint in problem.constraints:\n root_expr = constraint.root_expr\n expr_bounds = Interval(constraint.lower_bound, constraint.upper_bound)\n if root_expr not in bounds:\n set_bound(root_expr, expr_bounds)\n else:\n existing_bounds = get_bound(root_expr)\n new_bounds = existing_bounds.intersect(expr_bounds)\n set_bound(root_expr, new_bounds)",
"def condition_bounds(self) -> Tuple[float, float]:\n raise NotImplementedError",
"def apply_bound(x, var_min, var_max):\n x.position = np.maximum(x.position, var_min)\n x.position = np.minimum(x.position, var_max)",
"def _bound(x, min_value, max_value):\n return np.maximum(min_value, np.minimum(x, max_value))",
"def _determine_overset_interval(self):\n flag = np.empty((1,), dtype=np.int)\n gflag = np.empty((1,), dtype=np.int)\n\n flag[0] = min(ss.overset_update_interval for ss in self.solvers)\n self.comm.Allreduce(flag, gflag, MPI.MIN)\n self.overset_update_interval = gflag[0]\n self.printer.echo(\"Overset update interval = \", self.overset_update_interval)",
"def get_bounds(self, t_index):\n mean = self.get_mean(t_index)\n std = self.get_std()\n return mean - std, mean + std",
"def force_bounds(self):\n return self._min_force, self._max_force",
"def get_continuum_in_range(w,s,low_low, low_high, high_low, high_high,\n pmin=12,pmax=88, only_correct_negative_values = False,\n fit_degree=2, plot = True, verbose = True, warnings=True) :\n s_low = s[np.where((w <= low_low))] \n s_high = s[np.where((w >= high_high))] \n \n w_fit = w[np.where((w > low_low) & (w < high_high))]\n w_fit_low = w[np.where((w > low_low) & (w < low_high))]\n w_fit_high = w[np.where((w > high_low) & (w < high_high))]\n\n y_fit = s[np.where((w > low_low) & (w < high_high))]\n y_fit_low = s[np.where((w > low_low) & (w < low_high))]\n y_fit_high = s[np.where((w > high_low) & (w < high_high))]\n\n # Remove outliers\n median_y_fit_low = np.nanmedian(y_fit_low)\n for i in range(len(y_fit_low)):\n if np.nanpercentile(y_fit_low,2) > y_fit_low[i] or y_fit_low[i] > np.nanpercentile(y_fit_low,98): y_fit_low[i] =median_y_fit_low\n\n median_y_fit_high = np.nanmedian(y_fit_high)\n for i in range(len(y_fit_high)):\n if np.nanpercentile(y_fit_high,2) > y_fit_high[i] or y_fit_high[i] > np.nanpercentile(y_fit_high,98): y_fit_high[i] =median_y_fit_high\n \n w_fit_cont = np.concatenate((w_fit_low,w_fit_high))\n y_fit_cont = np.concatenate((y_fit_low,y_fit_high))\n \n try:\n fit = np.polyfit(w_fit_cont,y_fit_cont, fit_degree)\n yfit = np.poly1d(fit)\n y_fitted = yfit(w_fit)\n \n y_fitted_low = yfit(w_fit_low)\n median_low = np.nanmedian(y_fit_low-y_fitted_low)\n rms=[]\n for i in range(len(y_fit_low)):\n rms.append(y_fit_low[i]-y_fitted_low[i]-median_low)\n \n # rms=y_fit-y_fitted\n lowlimit=np.nanpercentile(rms,pmin)\n highlimit=np.nanpercentile(rms,pmax)\n \n corrected_s_ =copy.deepcopy(y_fit)\n for i in range(len(w_fit)):\n if w_fit[i] >= low_high and w_fit[i] <= high_low: # ONLY CORRECT in [low_high,high_low] \n if only_correct_negative_values:\n if y_fit[i] <= 0 : \n corrected_s_[i] = y_fitted[i]\n else:\n if y_fit[i]-y_fitted[i] <= lowlimit or y_fit[i]-y_fitted[i] >= highlimit: corrected_s_[i] = y_fitted[i]\n \n \n corrected_s = np.concatenate((s_low,corrected_s_))\n corrected_s = np.concatenate((corrected_s,s_high))\n \n \n if plot:\n ptitle = \"CorrectionBase in range \"+np.str(np.round(low_low,2))+\" - [ \"+np.str(np.round(low_high,2))+\" - \"+np.str(np.round(high_low,2))+\" ] - \"+np.str(np.round(high_high,2))\n plot_plot(w_fit,[y_fit,y_fitted,y_fitted-highlimit,y_fitted-lowlimit,corrected_s_], color=[\"r\",\"b\", \"black\",\"black\",\"green\"], alpha=[0.3,0.7,0.2,0.2,0.5],xmin=low_low-40, xmax=high_high+40,vlines=[low_low,low_high,high_low,high_high],ptitle=ptitle, ylabel=\"Normalized flux\") \n #plot_plot(w,[s,corrected_s],xmin=low_low-40, xmax=high_high+40,vlines=[low_low,low_high,high_low,high_high])\n except Exception:\n if warnings: print(\" Fitting the continuum failed! Nothing done.\")\n corrected_s = s\n\n return corrected_s",
"def Bounds_to_short_filter(chargeBounds,dischargeBounds):\n \n global time_treshold \n \n ## first Filter filters all the windows which are below a certain time treshold called treshold\n # done by a delete function which deletes along axis n\n bound_diff_start=np.diff(chargeBounds,axis=1)\n bound_diff_end=np.diff(dischargeBounds,axis=1)\n chargeBounds=np.delete(chargeBounds,np.where(bound_diff_start<time_treshold),axis=0)\n dischargeBounds=np.delete(dischargeBounds,np.where(bound_diff_end<time_treshold),axis=0)\n \n ## second filter stitches windows together if the interval between them is small enough\n #small number of boundaries no vectorizaton needed\n for i in range(chargeBounds.shape[0]):\n try:\n while chargeBounds[i+1,0]-chargeBounds[i,1]<time_treshold:\n chargeBounds[i,1]=chargeBounds[i+1,1]\n chargeBounds=np.delete(chargeBounds,i+1,axis=0)\n except:\n pass \n for i in range(dischargeBounds.shape[0]):\n try:\n while dischargeBounds[i+1,0]-dischargeBounds[i,1]<time_treshold:\n dischargeBounds[i,1]=dischargeBounds[i+1,1]\n dischargeBounds=np.delete(dischargeBounds,i+1,axis=0)\n except:\n pass\n \n return(chargeBounds,dischargeBounds)",
"def get_constraints(self, X_v, U_v, X_last_p, U_last_p):\n\n constraints = [\n # Boundary conditions:\n X_v[0:2, 0] == self.x_init[0:2],\n X_v[2:4, 0] == self.x_init[2:4],\n X_v[4, 0] == self.x_init[4],\n X_v[5, 0] == self.x_init[5],\n\n X_v[:, -1] == self.x_final,\n\n # State constraints:\n cvx.abs(X_v[4, :]) <= self.t_max,\n cvx.abs(X_v[5, :]) <= self.w_max,\n X_v[1, :] >= 0,\n\n # Control constraints:\n cvx.abs(U_v[0, :]) <= self.max_gimbal,\n U_v[1, :] >= self.T_min,\n U_v[1, :] <= self.T_max,\n ]\n return constraints"
]
| [
"0.61832595",
"0.57588667",
"0.5651322",
"0.564041",
"0.5634838",
"0.54706204",
"0.5409203",
"0.53503823",
"0.5342432",
"0.53363365",
"0.5333335",
"0.5330018",
"0.52807766",
"0.52719593",
"0.5266766",
"0.52606773",
"0.5253961",
"0.5230743",
"0.52064013",
"0.5176964",
"0.5153499",
"0.5130189",
"0.5104656",
"0.50730485",
"0.50556046",
"0.50474024",
"0.50414443",
"0.5041368",
"0.50392765",
"0.5030272"
]
| 0.63172805 | 0 |
Draw random numbers of shape ``size`` distributed flat in logarithm between ``low`` and ``high``. | def draw_logarithmic(low, high, size=1):
if np.any(low <= 0.0) or np.any(high <= 0.0):
raise ValueError('draw_logarithmic expects positive arguments')
llow = np.log(low)
lhigh = np.log(high)
return np.exp(nr.uniform(low=llow, high=lhigh, size=size)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def draw(self, *size):\n max_value = self.alias.size(0)\n\n kk = self.alias.new(*size).random_(0, max_value).long().view(-1)\n prob = self.prob[kk]\n alias = self.alias[kk]\n # b is whether a random number is greater than q\n b = torch.bernoulli(prob).long()\n oq = kk.mul(b)\n oj = alias.mul(1 - b)\n\n return (oq + oj).view(size)",
"def uniform(low, high, size, dtype=np.float32):\n rng = np.random.default_rng(0)\n out = (high - low) * rng.random(size, dtype=dtype) + low\n return out",
"def random_floats(low, high=None, size=None):\n if high is None:\n high = low\n low = 0\n return low + (np.random.random(size) * (high - low))",
"def loguniform_dist(low, high, base=10):\n return ExponentiateDistribution(sp_uniform(low, high - low), base=base)",
"def generate_uniform_random_number(low=0.0, high=1.0, size=1):\n\n uniform_array = np.random.uniform(low, high, size)\n\n return uniform_array",
"def unifLogOneSample(self, low=0.01, hi=100., size=1):\n\n logSample = np.random.uniform(low=np.log(low), \\\n high=np.log(hi), \\\n size=size)\n\n thisSample = np.exp(logSample)\n return thisSample",
"def uniform_loggrid(xmin, xmax, npoints=100):\r\n return 10.0**np.linspace(np.log10(xmin), np.log10(xmax), npoints)",
"def gen_lognormal_data(mu, sigma, size, seed=None):\n np.random.seed(seed)\n\n data = np.random.lognormal(mean=mu, sigma=sigma, size=size)\n data = np.rint(data)\n\n return data",
"def LogSp(start,stop,num=50,**kwargs):\n assert 'base' not in kwargs, \"The base is irrelevant.\"\n return np.logspace(log10(start),log10(stop),num=num,base=10)",
"def uniform(\n self, low: float = 0, high: float = 1, size: Optional[Iterable[int]] = None\n ):\n _seed = self._seed() if callable(self._seed) else self._seed\n return _uniform(\n low=low,\n high=high,\n size=size,\n seed=_seed,\n device=self._device,\n handle=self._handle,\n )",
"def rvs(self, *args, **kwds):\n rndm = kwds.pop('random_state', None)\n args, loc, scale, size = self._parse_args_rvs(*args, **kwds)\n cond = np.logical_and(self._argcheck(*args), (scale >= 0))\n if not np.all(cond):\n raise ValueError(\"Domain error in arguments.\")\n\n if np.all(scale == 0):\n return loc * np.ones(size, 'd')\n\n # extra gymnastics needed for a custom random_state\n if rndm is not None:\n random_state_saved = self._random_state\n self._random_state = check_random_state(rndm)\n\n if isinstance(size, tuple):\n if len(size) > 0:\n raise ValueError(size)\n else:\n pass\n elif not isinstance(size, int):\n raise ValueError(size)\n\n low = np.log(args[0] - 0.4999)\n high = np.log(args[1] + 0.4999)\n size = self._random_state.randint(args[2], args[3] + 1)\n self._size = size\n vals = np.rint(\n np.exp(self._random_state.uniform(low=low, high=high, size=size))\n ).astype(int)\n\n vals = vals * scale + loc\n\n # do not forget to restore the _random_state\n if rndm is not None:\n self._random_state = random_state_saved\n\n vals = tuple([int(val) for val in vals])\n\n return vals",
"def sample_radii(size=1):\n interp_func = InterpolatedUnivariateSpline(m_grid, np.log(r_grid), k=1)\n return np.exp(interp_func(np.random.uniform(0, 1, size=size))) * u.kpc",
"def log_number_binary_trees(size):\n # This is equal to log of C_size, where C_n is the nth Catalan number.\n assert isinstance(size, int)\n assert size >= 0\n log = 0.0\n for k in range(2, size + 1):\n log += math.log(size + k) - math.log(k)\n return log",
"def size_rand_sample(size):\n\n assert size > 0\n @sinks\n def _dagpype_internal_fn_act(target):\n i = 0\n sample = None\n try:\n while True:\n e = (yield)\n sample = [e] * size if i == 0 else [e if random.randint(0, i) == 0 else ee for ee in sample]\n i += 1\n except GeneratorExit:\n if sample is not None:\n target.send(sample)\n target.close()\n\n return _dagpype_internal_fn_act",
"def uniform(self, size=None, low=0.0, high=1.0, ndim=None, dtype=None):\r\n return self.gen(uniform, size, low, high, ndim=ndim, dtype=dtype)",
"def log_uniform_sample(sample_range):\n log_min = np.log10(sample_range[0])\n log_max = np.log10(sample_range[1])\n u = np.random.rand()*(log_max-log_min) + log_min\n return np.power(10.0,u)",
"def Noise(self, eps, size):\n return eps * (np.random.uniform(size=size) * 2 - 1)",
"def logspace(start, stop, num=50, include_endpoint=True, base=10, dtype=None, constant=False):\n return Tensor(np.logspace(start, stop, num, include_endpoint, base, dtype), constant=constant)",
"def random_integers(self, size=None, low=0, high=1, ndim=None,\r\n dtype='int64'):\r\n return self.gen(random_integers, size, low, high, ndim=ndim,\r\n dtype=dtype)",
"def scale_noise(size: int) -> torch.Tensor:\n x = torch.FloatTensor(np.random.normal(loc=0.0, scale=1.0, size=size))\n\n return x.sign().mul(x.abs().sqrt())",
"def log_uniform_sample(sample_size,\n lambda_parameters):\n log_lower, log_upper = lambda_parameters\n ens_size = log_lower.shape[0]\n lambdas_dim = log_lower.shape[1]\n\n log_lower_ = tf.expand_dims(log_lower, 1) # (ens_size, 1, lambdas_dim)\n log_upper_ = tf.expand_dims(log_upper, 1) # (ens_size, 1, lambdas_dim)\n\n u = tf.random.uniform(shape=(ens_size, sample_size, lambdas_dim))\n return tf.exp((log_upper_-log_lower_) * u + log_lower_)",
"def _sample_loguniform_real_point(\n self, dimension: Real, below_points: numpy.ndarray, above_points: numpy.ndarray\n ) -> numpy.ndarray:\n return self._sample_real_point(\n dimension, below_points, above_points, is_log=True\n )",
"def mutate(self, size):\n rand = random.random()\n if rand <= 0.5:\n print u\"changing colour\"\n idx = random.randrange(0, 4)\n value = random.randrange(0, 256)\n colour = list(self.colour)\n colour[idx] = value\n self.colour = tuple(colour)\n else:\n print u\"changing point\"\n idx = random.randrange(0, len(self.points))\n point = generate_point(size[0], size[1])\n self.points[idx] = point",
"def plot_perc_scaling(q, sizes=np.logspace(1,2,50,dtype=int)):\n res = []\n for size in sizes:\n perc = Percolation(size, q)\n if test_perc(perc):\n num_filled = perc.num_wet() - size\n res.append((size, size**2, num_filled))\n\n sizes, cells, filled = zip(*res)\n\n options = dict(linestyle='dashed', color='gray', alpha=0.7)\n\n fig, ax = plt.subplots()\n ax.plot(sizes, cells, label='d=2', **options)\n ax.plot(sizes, filled, 'k.', label='filled')\n ax.plot(sizes, sizes, label='d=1', **options)\n\n decorate( xlabel = 'Array Size',\n ylabel = 'Cell Count',\n xscale = 'log', xlim = [9, 110],\n yscale = 'log', ylim = [9, 20000],\n loc = 'upper left')\n plt.show()\n\n for ys in [cells, filled, sizes]:\n params = linregress(np.log(sizes), np.log(ys))\n print('Slope of lines:\\n', params[0])",
"def uniform(stdev, size):\n return numpy.random.uniform(\n low=-stdev * numpy.sqrt(3),\n high=stdev * numpy.sqrt(3),\n size=size\n ).astype(theano.config.floatX)",
"def uniform(stdev, size):\n return numpy.random.uniform(\n low=-stdev * numpy.sqrt(3),\n high=stdev * numpy.sqrt(3),\n size=size\n ).astype(theano.config.floatX)",
"def _generate_random_vector(size):\n return np.random.uniform(-0.1, 0.1, size)",
"def generate_random_data(size, x_min=X_MIN, x_max=X_MAX, y_min=Y_MIN, y_max=Y_MAX):\n result = []\n for _i in range(size):\n result.append((randint(x_min, x_max), randint(y_min, y_max)))\n\n return result",
"def log10(tensor):\n return log(tensor, base=10)",
"def rand_sample(self, high, size=None, replace=True):\n\n a = np.arange(high)\n sample = np.random.choice(a, size=size, replace=replace)\n return sample"
]
| [
"0.62706465",
"0.6168546",
"0.61472845",
"0.6110595",
"0.6093453",
"0.5831724",
"0.5729024",
"0.56191534",
"0.5603818",
"0.5594131",
"0.5565909",
"0.55658317",
"0.55059993",
"0.5449366",
"0.54214203",
"0.53895634",
"0.5368529",
"0.5361834",
"0.53607714",
"0.5322366",
"0.5300247",
"0.52775896",
"0.52543724",
"0.5252881",
"0.52343005",
"0.52343005",
"0.5234215",
"0.5226798",
"0.51492935",
"0.51340044"
]
| 0.78491837 | 0 |
Generates an initial sample of parameters drawn uniformly from the prior . | def generate_initial_sample(pmin, pmax, ntemps, nwalkers):
npl = pmin.npl
nobs = pmin.nobs
assert npl == pmax.npl, 'Number of planets must agree in prior bounds'
assert nobs == pmax.nobs, 'Number of observations must agree in prior bounds'
N = pmin.shape[-1]
samps=params.Parameters(arr=np.zeros((ntemps, nwalkers, N)), nobs=nobs, npl=npl)
V=samps.V
tau=samps.tau
sigma=samps.sigma
sigma0=samps.sigma0
for i in range(nobs):
V[:,:,i] = nr.uniform(low=pmin.V[i], high=pmax.V[i], size=(ntemps, nwalkers))
tau[:,:,i] = draw_logarithmic(low=pmin.tau[i], high=pmax.tau[i], size=(ntemps,nwalkers))
sigma[:,:,i] = draw_logarithmic(low=pmin.sigma[i], high=pmax.sigma[i], size=(ntemps,nwalkers))
sigma0[:,:,i] = draw_logarithmic(low=pmin.sigma[i], high=pmax.sigma[i], size=(ntemps, nwalkers))
samps.V=np.squeeze(V)
samps.tau = np.squeeze(tau)
samps.sigma = np.squeeze(sigma)
samps.sigma0 = np.squeeze(sigma0)
if npl >= 1:
samps.K = np.squeeze(draw_logarithmic(low=pmin.K[0], high=pmax.K[0], size=(ntemps, nwalkers, npl)))
# Make sure that periods are increasing
samps.n = np.squeeze(np.sort(draw_logarithmic(low=pmin.n, high=pmax.n, size=(ntemps,nwalkers,npl)))[:,:,::-1])
samps.e = np.squeeze(nr.uniform(low=0.0, high=1.0, size=(ntemps, nwalkers,npl)))
samps.chi = np.squeeze(nr.uniform(low=0.0, high=1.0, size=(ntemps, nwalkers,npl)))
samps.omega = np.squeeze(nr.uniform(low=0.0, high=2.0*np.pi, size=(ntemps, nwalkers,npl)))
return samps | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sample_prior(size):\n return torch.randn(size)",
"def randomize(self):\n #first take care of all parameters (from N(0,1))\n x = self._get_params_transformed()\n x = np.random.randn(x.size)\n self._set_params_transformed(x)\n #now draw from prior where possible\n x = self._get_params()\n [np.put(x,i,p.rvs(1)) for i,p in enumerate(self.priors) if not p is None]\n self._set_params(x)\n self._set_params_transformed(self._get_params_transformed())#makes sure all of the tied parameters get the same init (since there's only one prior object...)",
"def WeightInitializer():\n return np.random.uniform(-1, 1)",
"def randomize(self):\r\n # first take care of all parameters (from N(0,1))\r\n x = self._get_params_transformed()\r\n x = np.random.randn(x.size)\r\n self._set_params_transformed(x)\r\n # now draw from prior where possible\r\n x = self._get_params()\r\n if self.priors is not None:\r\n [np.put(x, i, p.rvs(1)) for i, p in enumerate(self.priors) if not p is None]\r\n self._set_params(x)\r\n self._set_params_transformed(self._get_params_transformed()) # makes sure all of the tied parameters get the same init (since there's only one prior object...)\r",
"def priors_sample(self):\n \n theta_star = np.zeros(self.n_params)\n\n for i in xrange(self.n_params): \n np.random.seed() \n theta_star[i] = self.param_obj.prior()[i].rvs(size=1)[0]\n\n return theta_star",
"def initial_sampling(self, params):\n i = params\n theta_star = self.priors_sample()\n model = self.simz( theta_star )\n rho = test_dist(self.data, model)\n while rho > self.eps0: \n theta_star = self.priors_sample()\n model = self.simz( theta_star )\n rho = test_dist(self.data, model)\n data_list = [np.int(i)]\n\n for i_param in xrange(self.n_params): \n data_list.append(theta_star[i_param])\n data_list.append(1./np.float(self.N))\n data_list.append(rho)\n\treturn np.array(data_list)",
"def generate_parameters(self):\n self.parameters = np.zeros(self.D)\n for l in range(self.D):\n if self.p_l[l] >= np.random.uniform(0,1):\n self.parameters[l] = 1",
"def random_init(self, shape):\n return np.random.randn(shape[0],shape[1])*0.01",
"def sample_from_prior(self):\n raise NotImplementedError",
"def _construct_sample_from_prior(self):\n z_sym = T.matrix()\n x_sym = T.matrix()\n irs = self.ir_steps\n oputs = [self.obs_transform(self.s0)]\n oputs.extend([self.obs_transform(self.si[i]) for i in range(irs)])\n _, hi_zmuv = self._construct_zmuv_samples(x_sym, 1)\n sample_func = theano.function(inputs=[z_sym, x_sym], outputs=oputs, \\\n givens={ self.z: z_sym, \\\n self.x_in: T.zeros_like(x_sym), \\\n self.x_out: T.zeros_like(x_sym), \\\n self.hi_zmuv: hi_zmuv }, \\\n updates=self.scan_updates)\n def prior_sampler(samp_count):\n x_samps = to_fX( np.zeros((samp_count, self.obs_dim)) )\n old_switch = self.train_switch.get_value(borrow=False)\n # set model to generation mode\n self.set_train_switch(switch_val=0.0)\n z_samps = to_fX( npr.randn(samp_count, self.z_dim) )\n model_samps = sample_func(z_samps, x_samps)\n # set model back to either training or generation mode\n self.set_train_switch(switch_val=old_switch)\n return model_samps\n return prior_sampler",
"def _sample_schechter(x0, alpha, x_min, size=100, max_iter=1000):\n out = []\n n = 0\n num_iter = 0\n while (n<size) & (num_iter<max_iter):\n x = np.random.gamma(scale=x0, shape=alpha+2, size=size)\n x = x[x>x_min]\n u = np.random.uniform(size=x.size)\n x = x[u<x_min/x]\n out.append(x)\n n+=x.size\n num_iter += 1\n\n if num_iter >= max_iter:\n msg = (\"The maximum number of iterations reached.\",\n \"Random variates may not be representitive.\",\n \"Try increasing `max_iter`.\")\n print(msg)\n\n return np.concatenate(out)[:size]",
"def initial_x():\n\n # RANDOMLY GENERATES the INITIAL VALUES of the independent variables:\n temp = [uniform(1, cfg.n) for i in range(cfg.n)]\n\n return np.array(temp, dtype=np.float_)",
"def random_weight_init(_p: Perceptron):\n\n _p.weights = [rd.choice([1-rd.random(), -1+rd.random()]) for _ in range(_p.input_size)]",
"def sample_from_prior(self, *args, **kwargs):\n pass",
"def sample_parameters_prior(self, n_samples, random_seed=None):\n\n if random_seed is not None:\n np.random.seed(random_seed)\n samples = []\n samples.append(self.var_noise.sample_from_prior(n_samples))\n samples.append(self.mean.sample_from_prior(n_samples))\n samples.append(self.kernel.sample_parameters(n_samples))\n\n return np.concatenate(samples, 1)",
"def init_bias_params(self, rng):\n return self.bias_prior.sample(rng)",
"def sample_uniform(instance, params):\n subpop = np.random.randint(params['N'])\n return sample_from_subpop(instance, params, subpop)",
"def sample_GP_prior(x_test, mean_func, cov_func, kernel_params, \n\t\t\t\t\tseed=42, n_samples=5):\n m = mean_func(x_test)\n k = cov_func(x_test, x_test, *kernel_params)\n prng = np.random.RandomState(int(seed))\n sample = prng.multivariate_normal(m, k, n_samples)\n return sample",
"def GenerateInitialSolution():\n c = random.random()*C\n count = 0\n while np.count_nonzero(alpha) < gamma:\n rand = random.randint(0, len(x_train)-1)\n if y_train[rand] == 1:\n alpha[rand] = c\n L[rand, 1] = c\n # L[count, 0] = rand\n # L[count, 1] = alpha[rand]\n SVs[count] = rand\n count += 1\n while np.count_nonzero(alpha) < 2*gamma:\n rand = random.randint(0, len(x_train)-1)\n if y_train[rand] == -1:\n alpha[rand] = c\n L[rand, 1] = c\n # L[count, 0] = rand\n # L[count, 1] = alpha[rand]\n SVs[count] = rand\n count += 1\n return alpha",
"def sample_from_prior(self, n_samples):\n pass",
"def prior_sample(self):\n pass",
"def prior_sample(self, bn):\n x = np.zeros(3)\n\n # first joint prob\n random_choice = np.random.choice(bn[0], 1, bn[0].all(), bn[0])\n x[0] = random_choice[0]\n\n # Second Joint Prob\n if x[0] == 0.1:\n random_choice = np.random.choice(bn[1][0], 1, bn[1][0].all(), bn[1][0])\n x[1] = random_choice\n elif x[0] == 0.9:\n random_choice = np.random.choice(bn[1][1], 1, bn[1][1].all(), bn[1][1])\n x[1] = random_choice\n\n # Third Joint Prob\n if random_choice[0] == 0.8 or random_choice == 0.1:\n random_choice = np.random.choice(bn[2][0], 1, bn[2][0].all(), bn[2][0])\n x[2] = random_choice\n else:\n random_choice = np.random.choice(bn[2][1], 1, bn[2][1].all(), bn[2][1])\n x[2] = random_choice\n return x",
"def get_posterior_sample(self):\n total_tries = self.prior_success + self.prior_failure\n prob_success = self.prior_success / total_tries\n # np.random.binomial采样出来的是二项分布的均值, 即正面朝上的次数,所以要除以N\n boot_sample = np.random.binomial(total_tries, prob_success) / total_tries\n return boot_sample",
"def xavier_init(dims, uniform=True):\n n_inputs,n_outputs = dims\n if uniform:\n # 6 was used in the paper.\n init_range = np.sqrt(6.0 / (n_inputs + n_outputs))\n return tf.random_uniform(shape=dims,minval=-init_range, maxval=init_range)\n else:\n # 3 gives us approximately the same limits as above since this repicks\n # values greater than 2 standard deviations from the mean.\n stddev = np.sqrt(3.0 / (n_inputs + n_outputs))\n return tf.truncated_normal(shape=dims,stddev=stddev)",
"def _sample_likelihood_params(self):\r\n self._sample_omega()\r\n self._sample_beta()\r\n self._sample_r()",
"def build_initial(domain):\n return random_candidate_float(domain)",
"def generate_samples(self):\n self.analytic_probability()",
"def custom_init(init_params, seed=0):\n import numpy as np\n new_params = []\n rng = jax.random.PRNGKey(seed)\n i = 0\n number_layers = len([0 for l1 in init_params if len(l1) != 0])\n for l1 in init_params:\n if (len(l1)) == 0: new_params.append(()); continue\n new_l1 = []\n for l2 in l1:\n if len(l2.shape) == 1:\n # Zero init biases\n new_l1.append(jnp.zeros_like(l2))\n else:\n n = max(l2.shape)\n first = int(i == 0)\n last = int(i == number_layers - 1)\n mid = int((i != 0) * (i != number_layers - 1))\n mid *= i\n\n std = 1.0 / np.sqrt(n)\n std *= 2.2 * first + 0.58 * mid + n * last\n\n if std == 0:\n raise NotImplementedError(\"Wrong dimensions for MLP\")\n\n new_l1.append(jax.random.normal(rng, l2.shape) * std)\n rng += 1\n i += 1\n\n new_params.append(new_l1)\n\n return new_params",
"def gaussian_prior(self):\n self.prior = sps.multivariate_normal(self.m0,self.S0)",
"def sample_from_prior(self, n_samples):\n\n p0 = self.min + self.rng.rand(n_samples) * (self.max - self.min)\n return p0[:, np.newaxis]"
]
| [
"0.73352677",
"0.7266806",
"0.7135405",
"0.7086701",
"0.7080881",
"0.7004341",
"0.6950765",
"0.68322116",
"0.6757486",
"0.67500263",
"0.6737803",
"0.67059195",
"0.6700799",
"0.66987205",
"0.6639269",
"0.6631884",
"0.66122985",
"0.65678334",
"0.65501994",
"0.6534233",
"0.6531156",
"0.64952",
"0.64892715",
"0.644014",
"0.6416567",
"0.6403717",
"0.6390294",
"0.6388079",
"0.6383492",
"0.63552827"
]
| 0.74383086 | 0 |
Returns the average of the quantiles of the data residuals over the posterior samples in psamples. The quantiles over multiple observatories are flattened into one array. | def posterior_data_mean_quantiles(ts, rvs, psamples):
Nobs = len(ts)
Nsamples = psamples.shape[0]
Npl = (psamples.shape[-1] - 4*Nobs)/5
psamples=params.Parameters(arr=psamples, npl=Npl, nobs=Nobs)
ll=LogLikelihood(ts, rvs)
qs=np.zeros(sum([len(t) for t in ts]))
for psample in psamples:
one_qs=[]
for t, rv, V, sigma0, tau, sigma in zip(ts, rvs, psample.V, psample.sigma0, psample.tau, psample.sigma):
one_qs.append(correlated_gaussian_quantiles(ll.residuals(t, rv, psample),
V*np.ones_like(t),
generate_covariance(t, sigma0, sigma, tau)))
qs += np.array(one_qs).flatten()/Nsamples
return qs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reconstructions_mean(self):\n self.assert_sampled()\n return [[j.mean().numpy() for j in i] for i in self._reconstructions]",
"def results_psavg_sims():\n posterior_means = [[1.18040327516, 7.55106444832, 3.27420103073, 3.51998795534, 0.67212630002],\n [0.619197296326, 6.49420626987, 2.22495505139, 2.27682390376, 0.678172183554],\n [0.856628471666, 5.94732402905, 3.97580346111, 3.85788708662, 0.690090617623],\n [0.774906025167, 7.34275742443, 2.69729821931, 2.97994334746, 0.663015258594]]\n\n\n sgr1900_results.results_psavg_sims(posterior_means, [5,6,8,12], \"sgr1806\")\n\n return",
"def mpi_mean(data):\n s_local = data.sum(0)\n m = np.empty_like(s_local)\n mpi.COMM.Allreduce(s_local, m)\n num_data = mpi.COMM.allreduce(data.shape[0])\n m /= float(num_data)\n return m",
"def mean(posteriors):\n\tcontainer = [[0]*100]*len(posteriors)\n\tfor index, posterior in enumerate(posteriors):\n\t\tfor probability in posterior:\n\t\t\ttopic = probability[0]\n\t\t\tprob = probability[1]\n\t\t\tcontainer[index][topic] = prob\n\ta = np.array(container)\n\treturn a.mean(axis=0)",
"def get_mean(self):\r\n for i in range(1,len(self.data[0])):\r\n self.prom.append(np.mean(self.data[:,i]))",
"def representations_mean(self):\n self.assert_sampled()\n return [z.mean().numpy() for z in self.representations]",
"def _calc_res_normal_quantile(self):\n\n res = self._model.fit()\n plotting_position = lmstats.calc_plotting_position(res.resid, a=0.375)\n dist = stats.norm()\n normal_quantile = dist.ppf(plotting_position)\n\n quantile_series = pd.Series(index=res.resid.index, data=normal_quantile, name='Normal quantile of residual')\n\n return quantile_series",
"def samples_median(samples):\n return [np.median(s) for s in samples.T]",
"def samples_median(samples):\n return [np.median(s) for s in samples.T]",
"def get_normalized_regret(evaluator_list):\n values = collections.defaultdict(list)\n for e in evaluator_list:\n values[e.task_name].append(e.get_normalized_regret())\n return _map(np.mean, values), _map(np.std, values)",
"def bootstrap(data, iterations=10000):\n\n boot_mean = []\n\n for n in range(0, iterations):\n\n boot = resample(data, replace=True, n_samples=None,\n random_state=None)\n\n boot_mean.append(np.mean(boot))\n\n final_mean = np.mean(boot_mean)\n\n final_std = np.std(boot_mean, dtype=np.float64)\n\n return final_mean, final_std",
"def _compute_samples(self, samples):\n return samples",
"def trimean(data):\n p_25, p_50, p_75 = percentile(data, [25, 50, 75], axis=0)\n\n return (p_25 + 2 * p_50 + p_75) / 4",
"def get_meanrates(self):\n return np.asarray([ n.meanrate for n in self.alln.values() ])",
"def test_get_per_sample_average_diversities(self):\r\n # test that it extracts the correct max depth if depth==None\r\n exp_depth = 910\r\n exp_rare_mat = array([2.73645965, 2.20813124, 2.88191683,\r\n 2.78969155, 3.10064886, 3.08441138])\r\n exp_sids = ['Sam1', 'Sam2', 'Sam3', 'Sam4', 'Sam5', 'Sam6']\r\n exp = {'Sam1': 2.736459655,\r\n 'Sam2': 2.2081312350000002,\r\n 'Sam3': 2.8819168300000002,\r\n 'Sam4': 2.7896915474999999,\r\n 'Sam5': 3.1006488600000002,\r\n 'Sam6': 3.0844113799999997}\r\n obs = get_per_sample_average_diversities(self.rarefaction_data, None)\r\n # check that values are the same\r\n for k, v in exp.iteritems():\r\n assert_almost_equal(obs[k], v)\r\n # check that keys are the same\r\n self.assertEqual(obs.keys(), exp.keys())\r\n # test when depth is specified\r\n depth = 850\r\n exp = {'Sam1': 3.32916466,\r\n 'Sam2': nan,\r\n 'Sam3': nan,\r\n 'Sam4': 2.2746077633333335,\r\n 'Sam5': 3.0135700166666664,\r\n 'Sam6': 2.1973854533333337}\r\n obs = get_per_sample_average_diversities(self.rarefaction_data, depth)\r\n # check that values are the same\r\n for k, v in exp.iteritems():\r\n assert_almost_equal(obs[k], v)\r\n # check that keys are the same\r\n self.assertItemsEqual(obs.keys(), exp.keys())",
"def pw_rmsd(mols):\n m = len(mols)\n k = 0\n pw = []\n for mol1 in mols:\n k += 1\n if k > m:\n break\n for i in range(k, m):\n mol2 = mols[i]\n pw.append(rmsd.rmsd(mol1, mol2))\n ave_rmsd = np.mean(pw)\n return ave_rmsd",
"def calculate_truncated_energies(self) -> np.ndarray:\n truncated_energies = []\n for kernel_eigenvector in self.kernel_eigenvectors_:\n truncated_energy = (\n np.linalg.norm(\n kernel_eigenvector.T @ self.jointly_smooth_functions, axis=0\n )\n ** 2\n )\n truncated_energies.append(truncated_energy)\n return np.array(truncated_energies)",
"def _ave(self):\n return np.asarray(np.mean(self.model_estim.x, axis=0)).flatten()",
"def _ave(self):\n\n return np.asarray(np.mean(self.model_estim.x, axis=0)).flatten()",
"def spectral_data(spectra):\n weights = np.concatenate([ s.ivar for s in spectra ])\n flux = np.concatenate([ s.flux for s in spectra ])\n wflux = weights * flux\n return (weights, flux, wflux)",
"def get_qual_stats(qual_bins, score_min):\r\n\r\n ave_bins = []\r\n std_dev_bins = []\r\n total_bases_bins = []\r\n\r\n found_first_poor_qual_pos = False\r\n\r\n suggested_trunc_pos = None\r\n\r\n for base_position in qual_bins:\r\n\r\n total_bases_bins.append(len(base_position))\r\n\r\n std_dev_bins.append(std(base_position))\r\n\r\n ave_bins.append(average(base_position))\r\n\r\n if not found_first_poor_qual_pos:\r\n if average(base_position) < score_min:\r\n suggested_trunc_pos = qual_bins.index(base_position)\r\n found_first_poor_qual_pos = True\r\n\r\n return ave_bins, std_dev_bins, total_bases_bins, suggested_trunc_pos",
"def approximate(inp,w_len):\n\t\t\n\t\top = []\n\t\t\n\t\tfor i in range(0,len(inp),w_len):\n\t\t\n\t\t\top.append(np.mean(inp[i:i+w_len]))\n\t\t\t\n\t\treturn np.array(op)",
"def _process_quantiles(x, dim):\r\n x = np.asarray(x, dtype=float)\r\n\r\n if x.ndim == 0:\r\n x = x[np.newaxis]\r\n elif x.ndim == 1:\r\n if dim == 1:\r\n x = x[:, np.newaxis]\r\n else:\r\n x = x[np.newaxis, :]\r\n\r\n return x",
"def normalize_estimates(est_np, mix_np):\n mix_max = np.max(np.abs(mix_np))\n return np.stack([est * mix_max / np.max(np.abs(est)) for est in est_np])",
"def _quantiles(self):\n\n trials = []\n for trial, state in self._trial_state.items():\n if state.last_score is not None and not trial.is_finished():\n trials.append(trial)\n trials.sort(key=lambda t: self._trial_state[t].last_score)\n\n if len(trials) <= 1:\n return [], []\n else:\n num_trials_in_quantile = int(\n math.ceil(len(trials) * self._quantile_fraction))\n if num_trials_in_quantile > len(trials) / 2:\n num_trials_in_quantile = int(math.floor(len(trials) / 2))\n return (trials[:num_trials_in_quantile],\n trials[-num_trials_in_quantile:])",
"def _collect_params(self) -> np.ndarray:\n res = np.array([0.]*(self.dimensions))\n res[0] = self.model.rbf.variance\n res[1:-1] = self.model.rbf.lengthscale\n res[-1] = self.model.Gaussian_noise.variance\n return res",
"def summarize(dataset):\n summaries = [(np.mean(attribute), np.std(attribute)) for attribute in zip(*dataset)]\n\n return summaries",
"def get_summarized_results(self):\n stats = [v.stats() for (k, v) in self.examples.items() if v.is_ready()]\n res = self.ExampleClass.average_stats(stats)\n\n res['loss'] = self.loss/self.loss_cnt\n res['recent_loss'] = sum(self.recent_loss_array) / sum(self.recent_loss_bs_array)\n\n return res",
"def w_estimates(self):\n return np.copy(self._w_values)",
"def w_estimates(self):\n return np.copy(self._w_values)"
]
| [
"0.5438853",
"0.53853667",
"0.513857",
"0.51143295",
"0.5108789",
"0.50843376",
"0.50440335",
"0.49840745",
"0.49840745",
"0.49546733",
"0.49069557",
"0.48922843",
"0.48532405",
"0.48440713",
"0.4824196",
"0.4818035",
"0.4816973",
"0.4809339",
"0.47977382",
"0.4781622",
"0.47699693",
"0.4761144",
"0.47599125",
"0.47588083",
"0.47487745",
"0.47471532",
"0.47426695",
"0.4739796",
"0.4730298",
"0.4730298"
]
| 0.62415504 | 0 |
Make synthetic precision matrix and empirical covariance matrix. | def make_synthetic_matrix(n_features, n_samples, sparsity=.98, random_state=0):
prng = check_random_state(random_state)
prec = make_sparse_spd_matrix(n_features, alpha=sparsity,
smallest_coef=.4, largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
return emp_cov, prec | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _mn_cov_ ( self , size = -1 , root = False ) :\n #\n if size <= 0 : size = len ( self )\n size = min ( size , len ( self ) ) \n #\n from array import array\n matrix = array ( 'd' , [ 0 for i in range(0, size * size) ] )\n self.mnemat ( matrix , size )\n #\n import ostap.math.linalg\n from ostap.core.core import Ostap \n mtrx = Ostap.Math.SymMatrix ( size )() \n for i in range ( 0 , size ) :\n for j in range ( i , size ) : \n mtrx [ i , j ] = matrix [ i * size + j ]\n \n return mtrx",
"def _build_precomputed_data(self):\n if self.num_sampled == 0:\n self._K_chol = numpy.array([])\n self._K_inv_y = numpy.array([])\n else:\n covariance_matrix = python_utils.build_covariance_matrix(\n self._covariance,\n self._points_sampled,\n noise_variance=self._points_sampled_noise_variance,\n )\n\n C = self._build_integrated_term_maxtrix(self._covariance, self._points_sampled)\n self._K_Inv = numpy.linalg.inv(covariance_matrix)\n self._K_C = numpy.empty((covariance_matrix.shape[0],covariance_matrix.shape[0]))\n self._K_C = numpy.multiply(C, self._K_Inv)\n self._K_chol = scipy.linalg.cho_factor(covariance_matrix, lower=True, overwrite_a=True)\n self._K_inv_y = scipy.linalg.cho_solve(self._K_chol, self._points_sampled_value)\n self._marginal_mean_mat = self._build_marginal_matrix_mean()\n self._marginal_mean_mat_gradient = self._build_marginal_matrix_mean_gradient()",
"def getCovarianceNoiseMatrix(self):\n return np.dot ( self.getB().T, self.getB() )",
"def build_covariance_matrix (numpy_cloud, reduce_by_center_of_mass=True ):\r\n\r\n # build a sum over all points\r\n sum_xyz = np.sum (numpy_cloud, axis=0 )\r\n\r\n # and normalize it to get center of mass\r\n mass_center = sum_xyz / numpy_cloud.shape[0]\r\n\r\n # reduce point cloud by center of mass\r\n if (reduce_by_center_of_mass ):\r\n numpy_cloud_reduced = np.subtract (numpy_cloud[:, 0:3], mass_center )\r\n else:\r\n numpy_cloud_reduced = numpy_cloud.copy ()\r\n\r\n # build ATA matrix\r\n a_transposed_a = np.zeros ((3, 3 ))\r\n\r\n for point in numpy_cloud_reduced:\r\n a_transposed_a[0, 0] = a_transposed_a[0, 0] + np.float_power(point[0], 2 )\r\n a_transposed_a[0, 1] = a_transposed_a[0, 1] + point[0] * point[1]\r\n a_transposed_a[0, 2] = a_transposed_a[0, 2] + point[0] * point[2]\r\n\r\n a_transposed_a[1, 0] = a_transposed_a[1, 0] + point[0] * point[1]\r\n a_transposed_a[1, 1] = a_transposed_a[1, 1] + np.float_power(point[1], 2 )\r\n a_transposed_a[1, 2] = a_transposed_a[1, 2] + point[1] * point[2]\r\n\r\n a_transposed_a[2, 0] = a_transposed_a[2, 0] + point[0] * point[2]\r\n a_transposed_a[2, 1] = a_transposed_a[2, 1] + point[2] * point[1]\r\n a_transposed_a[2, 2] = a_transposed_a[2, 2] + np.float_power(point[2], 2 )\r\n\r\n return a_transposed_a, mass_center",
"def _construct_mvn(self,\n loc_shape,\n diag_shape,\n update_shape,\n dtype=np.float32):\n if loc_shape is not None:\n loc = np.random.normal(size=loc_shape).astype(dtype)\n else:\n loc = None\n cov_diag_factor = np.random.uniform(\n low=1., high=2., size=diag_shape).astype(dtype)\n cov_perturb_factor = np.random.normal(\n size=update_shape).astype(dtype)\n return mvdplrc.MultivariateNormalDiagPlusLowRankCovariance(\n loc=loc,\n cov_diag_factor=cov_diag_factor,\n cov_perturb_factor=cov_perturb_factor)",
"def covariance_matrix(self):\n\n self._order_observations()\n self.cov_matrix = self._compute_covariance_matrix(\n self.list_observations, self.list_observations)\n\n self.cov_matrix += np.diag(np.array([self.noise] * self.n_observation))\n\n return self.cov_matrix",
"def _getCovMat(self, cov_expr):\n # store the expression\n self.expr = cov_expr\n # create a PETSC matrix for cov_mat\n cov_mat = PETSc.Mat().create()\n cov_mat.setType('aij')\n cov_mat.setSizes(self.domain.getNodes(), self.domain.getNodes())\n cov_mat.setUp()\n\n # scalar valued function is evaluated in this variable\n cov_ij = np.empty((1), dtype=float)\n # the points to evalute the expression\n xycor = np.empty((4), dtype=float)\n\n print '---------------------------'\n print '---------------------------'\n print ' Building Covariance Matrix'\n print '---------------------------'\n print '---------------------------'\n # Loop through global nodes and build the matrix for i < j because of\n # symmetric nature.\n for node_i in range(0, self.domain.getNodes()):\n # global node node_i\n for node_j in range(node_i, self.domain.getNodes()):\n # global node node_j\n temp_cov_ij = 0\n for elem_i in self.node_to_elem[node_i]:\n # elem_i : element attached to node_i\n # x1 : x co-ordinate of the centroid of element elem_i\n x1 = self.c_centroid_array[elem_i].x()\n # y1 : x co-ordinate of the centroid of element elem_i\n y1 = self.c_centroid_array[elem_i].y()\n for elem_j in self.node_to_elem[node_j]:\n # elem_j : element attached to node_j\n # x2 : x co-ordinate for the centroid of element elem_j\n x2 = self.c_centroid_array[elem_j].x()\n # y2 : y co-ordinate for the centroid of element elem_j\n y2 = self.c_centroid_array[elem_j].y()\n xycor[0] = x1\n xycor[1] = x2\n xycor[2] = y1\n xycor[3] = y2\n # evaluate the expression\n cov_expr.eval(cov_ij, xycor)\n if cov_ij[0] > 0:\n temp_cov_ij += (1.0 / 3) * (1.0 / 3) * \\\n cov_ij[0] * \\\n self.c_volume_array[elem_i] * \\\n self.c_volume_array[elem_j]\n\n cov_mat.setValue(node_i, node_j, temp_cov_ij)\n cov_mat.setValue(node_j, node_i, temp_cov_ij)\n cov_mat.assemblyBegin()\n cov_mat.assemblyEnd()\n print '---------------------------'\n print '---------------------------'\n print ' Finished Covariance Matrix'\n print '---------------------------'\n print '---------------------------'\n\n return cov_mat",
"def project(self, new_expn):\n \"\"\"\n data = numpy.array(self.parent.serialisedArrayDataList)\n import sklearn\n skpca = sklearn.decomposition.PCA()\n X_r = skpca.fit(data).transform(data)\n \n self.__v = X_r\n \"\"\"\n # old martrisx\n matrix = numpy.array(self.parent.serialisedArrayDataList)\n U, S, V = numpy.linalg.svd(matrix.T, full_matrices=False)\n \n print(\"matrix\", matrix.shape)\n \n # set-ups\n self.parent = new_expn\n if self.rowwise:\n self.labels = new_expn[self.label_key]\n else:\n self.labels = new_expn.getConditionNames()\n \n matrix = numpy.array(self.parent.serialisedArrayDataList)\n S = numpy.diag(S)\n print(\"U\", U.shape)\n print(\"V\", V.shape)\n print(\"S\", S.shape)\n print(\"matrix\", matrix.shape)\n \n #data = np.dot(U, np.dot(S, V))\n #X_transformed = np.dot(X_transformed, self.V.T)\n print(numpy.dot(S, V).shape)\n\n pr = numpy.dot(matrix, S)\n print(\"pr\", pr.shape)\n #y = x*W;\n #y0 = Y(1,:);\n #sum(abs(y0 - y)) %\n \n # I want a new v. U and D are the same.\n \n self.__v = pr\n \n print(U)\n print()\n print(pr)\n \n print(numpy.allclose(U, pr)) \n print(numpy.allclose(matrix.T, numpy.dot(U, numpy.dot(S, V))))\n return(True)",
"def __init__(self, covs, sigma):\n assert isinstance(sigma, int)\n assert covs.ndim == 3\n assert sigma > 0.0\n self.covs = covs\n self.sigma = sigma\n self.num_covs = self.covs.shape[0]\n self.shape = self.covs.shape[1:]\n\n self.covs_mat = self.covs.reshape(self.num_covs, np.prod(self.shape)).T\n self.weight_mat = np.empty(self.shape)\n self.weight_mat.fill(1.0/sigma**2)\n self.alpha = np.zeros(self.num_covs)",
"def make_covariance_matrix(points, kernel):\n\n dim = len(points)\n p1 = np.reshape(points, (dim, 1, -1))\n p2 = np.reshape(points, (dim, -1, 1))\n\n return kernel(p1, p2)",
"def __init__(self, N0, N1):\n #self.w = np.zeros(N);\n self.p0 = N0/(N0+N1) \n self.p1 = N1/(N0+N1)\n self.mu0 = np.zeros(N0+N1)\n self.mu1 = np.zeros(N0+N1)\n self.covariance = 0",
"def _gp_cov_matrix(Nt, snr2, clen2):\n f = lambda x: np.exp(-(x**2)/clen2)\n C = snr2 * f(np.arange(Nt))\n C[0] += 1 # noise\n return scipy.linalg.toeplitz(C)",
"def covariance_matrix(self):\n\n cov_filename = self.covariance_filename\n cov_press, cov_data = self._co_star_read(cov_filename)\n\n # \"Fix\" covariances that are not positive definite\n if not np.all(np.linalg.eigvals(cov_data) > 0):\n warnings.warn(\"Covariance matrix for species {} is not positive definite, modifying eigenvals\".format(self.species))\n\n # Get eigen values and vector from matrix\n eigval, eigvec = np.linalg.eig(cov_data)\n\n # Find negative eigen values and set to the media\n eigval[np.where(eigval < 0)] = np.median(eigval)\n\n # Reconstruct matrix with modified eigen values\n cov_data = eigvec @ np.diag(eigval) @ np.linalg.inv(eigvec)\n\n return cov_data",
"def compute_covariance_matrix(Xs, sigma_2):\n m, d = Xs.shape\n t1 = np.reshape(np.tile(Xs, m), (m, m, d))\n t2 = np.reshape(np.tile(Xs, (m, 1)), (m, m, d))\n K1 = np.linalg.norm(t1 - t2, axis=2)\n coeff = 0.1\n Sigma = np.ones((m, m)) - coeff*K1\n return Sigma",
"def crescent_data(num_data=200, seed=default_seed):\r\n np.random.seed(seed=seed)\r\n sqrt2 = np.sqrt(2)\r\n # Rotation matrix\r\n R = np.array([[sqrt2 / 2, -sqrt2 / 2], [sqrt2 / 2, sqrt2 / 2]])\r\n # Scaling matrices\r\n scales = []\r\n scales.append(np.array([[3, 0], [0, 1]]))\r\n scales.append(np.array([[3, 0], [0, 1]]))\r\n scales.append([[1, 0], [0, 3]])\r\n scales.append([[1, 0], [0, 3]])\r\n means = []\r\n means.append(np.array([4, 4]))\r\n means.append(np.array([0, 4]))\r\n means.append(np.array([-4, -4]))\r\n means.append(np.array([0, -4]))\r\n\r\n Xparts = []\r\n num_data_part = []\r\n num_data_total = 0\r\n for i in range(0, 4):\r\n num_data_part.append(round(((i + 1) * num_data) / 4.))\r\n num_data_part[i] -= num_data_total\r\n part = np.random.normal(size=(num_data_part[i], 2))\r\n part = np.dot(np.dot(part, scales[i]), R) + means[i]\r\n Xparts.append(part)\r\n num_data_total += num_data_part[i]\r\n X = np.vstack((Xparts[0], Xparts[1], Xparts[2], Xparts[3]))\r\n\r\n Y = np.vstack((np.ones((num_data_part[0] + num_data_part[1], 1)), -np.ones((num_data_part[2] + num_data_part[3], 1))))\r\n return {'X':X, 'Y':Y, 'info': \"Two separate classes of data formed approximately in the shape of two crescents.\"}",
"def create_covariance_matrix(cls, coordinates):\n number_of_conformations = coordinates.shape[0]\n number_of_atoms = coordinates.shape[1]\n coordinates_per_conformation = number_of_atoms * 3\n covariance_matrix = numpy.zeros((coordinates_per_conformation, coordinates_per_conformation))\n coordinates = coordinates.reshape((number_of_conformations, coordinates_per_conformation))\n # Mean structure\n mean = coordinates.mean(0)\n # Changed for efficiency\n for coords in coordinates:\n deviations = coords - mean\n covariance_matrix += numpy.outer(deviations, deviations)\n return covariance_matrix / number_of_conformations",
"def build_supporting_covariance_matrices(self,\n t: tf.Tensor,\n system_std_dev: tf.Tensor,\n t_mean: tf.Tensor,\n t_std_dev: tf.Tensor) -> None:\n # Mean function\n self.computed_mean_function =\\\n self.mean_function.compute_mean_function(t)\n self.computed_derivative_mean_function =\\\n self.mean_function.compute_derivative_mean_function(t)\n # Covariance matrices\n self.c_phi_matrices = self._build_c_phi_matrices(t)\n self.c_phi_matrices_noiseless =\\\n self._build_c_phi_matrices_noiseless(t)\n self.diff_c_phi_matrices = self._build_diff_c_phi_matrices(t)\n self.c_phi_diff_matrices = self._build_c_phi_diff_matrices(t)\n self.diff_c_phi_diff_matrices = self._build_diff_c_phi_diff_matrices(t)\n # Total Covariance Matrix utilities\n self._compute_unrolled_total_c_phi()\n self._compute_unrolled_total_diff_c_phi()\n self._compute_unrolled_total_c_phi_diff()\n self._compute_unrolled_total_diff_c_phi_diff()\n self._compute_omega_matrix(t, t_mean, t_std_dev)\n self._compute_s_matrix(system_std_dev)\n self._compute_t_matrix()\n self._compute_b_matrix()\n return",
"def calcCovarianceMatrix(data):\n # Create covariance matrix and array to store the mean values for x_mean, y_mean, z_mean\n C = np.zeros((data.shape[1], data.shape[1]))\n mean_xyz = []\n # Calculate all mean values\n for i in range(0, data.shape[1]):\n mean_xyz.append(data[:,i].mean())\n mean_xyz = np.array(mean_xyz)\n # Check whether dimensions agree \n if data[:,0].size != data[:,1].size or data[:,0].size != data[:,2].size:\n print \"X, Y and Z must be of same dimensions.\"\n else:\n # For each row in covariance matrix C\n for i in range(0, C.shape[0]):\n # For each column in covariance matrix C\n for j in range(0, C.shape[1]):\n C[i,j] = 0\n # For each point in the dataset, access x, y, z-values\n for point in data:\n # For each point, access x,y and z in all combinations (xx, xy, xz, yx, yy, yz etc)\n C[i][j] = C[i][j] + (point[i]-mean_xyz[i])*(point[j]-mean_xyz[j])\n # Divide by the total number of points \n C = (1.0/data.shape[0]) * C\n return C",
"def emulator(pca, gp_model, params):\n # Weights prediction\n pred_weights = gp_predict(gp_model, params)\n\n # Inverse PCA (pred_weights * basis + mean)\n reconstructed = pca.inverse_transform(pred_weights)\n return reconstructed",
"def make_covariance_mixte_matrix(points1, points2, kernel):\n\n dim = len(points1)\n p1 = np.reshape(points1, (dim, -1, 1))\n p2 = np.reshape(points2, (dim, 1, -1))\n \n return kernel(p1, p2)",
"def create_design_matrix(self):\n self.design_matrix = np.zeros([self.n, self.p])\n self.design_matrix[:,0] = 1.0 #First comlum is 1 (bias term)\n\n for i in range(self.n):\n for j in range(1,self.p):\n self.design_matrix[i,j] = self.phi(self.x[i],j)\n\n self.design_eigvals = np.linalg.eigvals([email protected]_matrix)",
"def build_covariance(self):\n raise RuntimeError(\"Internal cosmosis error in SingleValueGaussianLikelihood\")",
"def build_covariance(self):\n raise RuntimeError(\"Your Gaussian covariance code needs to \"\n \"over-ride the build_covariance method so it knows how to \"\n \"load the data covariance (or set constant_covariance=False and \"\n \"over-ride the extract_covariance method)\")\n\n #using info in self.options,\n #like filenames etc,\n #build covariance",
"def makeenv(self):\n eps=np.ones((self.nx,self.ny))*const.epsilon_0\n mu=np.ones((self.nx,self.ny))*const.mu_0\n\n eps[:20,:] *= self.q #adself.ds a space of higher permittivity \n eps[-20:,:] *= self.q #adself.ds a space of higher permittivity \n eps[:,:20] *= self.q #adself.ds a space of higher permittivity \n eps[:,-20:] *= self.q #adself.ds a space of higher permittivity \n #mu[:20,:] /= self.q #adself.ds a space of higher permittivity \n #mu[-20:,:] /= self.q #adself.ds a space of higher permittivity \n #mu[:,:20] /= self.q #adself.ds a space of higher permittivity \n #mu[:,-20:] /= self.q #adself.ds a space of higher permittivity \n\n return eps, mu",
"def eval_cov_pre(self):\n \n precisions = self.precision_\n\n fit_score = []\n\n for i in range(precisions.shape[0]):\n precision = precisions[i]\n if self.loss == 'LL':\n fit_score.append(neg_logl(self.emp_cov[i], precision))\n else:\n fit_score.append(dtrace(self.emp_cov[i], precision))\n\n return self.emp_inv_score, self.C, fit_score, precisions",
"def compute_covariance_matrix1d(Xs):\n m, d = Xs.shape\n t1 = np.reshape(np.tile(Xs, m), (m, m, d))\n t2 = np.reshape(np.tile(Xs, (m, 1)), (m, m, d))\n K1 = np.abs(t1 - t2)\n K1 = np.reshape(K1, (m, m))\n coeff = 1.0\n Sigma = np.ones((m, m)) - coeff*K1\n return Sigma",
"def precompute(self):\n self.cov_inv = np.linalg.inv(self.cov)\n self.root_2pi_d_det = math.sqrt((2.0*math.pi)**self.raw_data.shape[1] *\n np.linalg.det(self.cov))",
"def _pmatrix(kn_u, kn_d, thickness):\n p = np.zeros((kn_u.size, 4, 4), np.complex128)\n\n p0 = np.exp(complex(0, 1) * kn_u * thickness)\n p1 = np.exp(complex(0, 1) * kn_d * thickness)\n\n p[:, 0, 0] = 1 / p0\n p[:, 1, 1] = p0\n p[:, 2, 2] = 1 / p1\n p[:, 3, 3] = p1\n\n return p",
"def pca(self):\n self.pca_mean = self.X.mean(axis=1)\n X_meanC = self.X - self.pca_mean[:, None]\n (self.pca_U, self.pca_S, self.pca_V) = np.linalg.svd(X_meanC, full_matrices=False)\n self.pc_weights = np.dot(np.diag(self.pca_S), self.pca_V)\n self.pc_stdevs = np.std(self.pc_weights, axis=1)",
"def build_toy_dataset(N):\n pi = np.array([0.4, 0.6])\n mus = [[1, 1], [-1, -1]]\n stds = [[0.1, 0.1], [0.1, 0.1]]\n x = np.zeros((N, 2), dtype=np.float32)\n\n for n in range(N):\n k = np.argmax(np.random.multinomial(1, pi))\n x[n, :] = np.random.multivariate_normal(mus[k], np.diag(stds[k]))\n\n return x"
]
| [
"0.60516495",
"0.5877823",
"0.57856286",
"0.57798356",
"0.5714626",
"0.5634681",
"0.5634241",
"0.5608569",
"0.55604804",
"0.55500406",
"0.5538883",
"0.55216837",
"0.5494331",
"0.54647994",
"0.5409464",
"0.5397474",
"0.53955364",
"0.5363118",
"0.53556013",
"0.5354827",
"0.5335376",
"0.532863",
"0.53043926",
"0.5273425",
"0.5273283",
"0.52632153",
"0.52609956",
"0.52479315",
"0.52446604",
"0.5217567"
]
| 0.6293105 | 0 |
Plot a matrix in a table. | def plot_table(mat, width=.15, ratio=4):
vmax = np.abs(mat).max()
vals = np.around(mat, 2)
fig = plt.figure()
ax = fig.add_subplot(111, frameon=False, xticks=[], yticks=[])
table = plt.table(cellText=vals, colWidths=[width]*vals.shape[1],
loc='center', cellColours=plt.cm.RdBu_r(
Normalize(-vmax, vmax)(mat)))
table.scale(1, ratio)
return fig | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_matrix(self, matrix: np.ndarray):\n sns.heatmap(matrix, annot=True)\n plt.show()",
"def showMatrix(self, frame, matrix, label=''): \n M = self.matrix2Table(matrix)\n mtable = self.showTable(frame, M, label)\n return mtable",
"def plot_matrix(loc_list):\n x_list = [x[0] for x in loc_list]\n y_list = [y[1] for y in loc_list]\n\n # print(x_list, y_list)\n # plt.figure()\n\n plt.plot(x_list, y_list)",
"def show_matrix(matrix,kind=\"temperature\"):\n if kind==\"temperature\":\n cmap = \"bwr\"\n plt.title(\"Temperature\")\n elif kind==\"habitat\":\n cmap = \"Greens\"\n plt.title(\"Habitat\")\n else:\n cmap = \"Blues\"\n plt.imshow(matrix,\n interpolation='None',\n cmap=cmap,\n vmin=0,\n vmax=1,\n aspect=\"equal\",)\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n\n plt.xticks([])\n plt.yticks([])\n plt.colorbar(orientation=\"horizontal\", fraction=0.045)",
"def plot_table(self, axn: str = \"table\", df: Optional[DataFrame] = None) -> None:\n if self.axes is None:\n axs = self.initialize_figure(mosaic=[[axn]], figsize=(6, 4), return_ax=True)\n else:\n axs = self.axes\n\n if df is None:\n df = DataFrame([\"empty\"])\n\n axs[axn].table(\n df.values.tolist(),\n colLabels=df.columns,\n colColours=[(1.0, 1.0, 1.0, 1.0)]\n + [self.cmap(i, alpha=0.75) for i in range(len(df.columns) - 1)],\n bbox=(0.0, 0.0, 1.0, 1.0),\n )\n axs[axn].set_xticks([])\n axs[axn].set_yticks([])\n return axs[axn]",
"def matrix_table(matrix):\r\n result =\"<table id='matrix_result'>\"\r\n for line in range(len(matrix)):\r\n result += \"<tr>\"\r\n for column in range(len(matrix)):\r\n result += \"<td>\" + str(matrix[line][column]) + \"</td>\"\r\n result += \"</tr>\"\r\n result += \"</table>\"\r\n return result",
"def scatterplot_matrix():\r\n\r\n # load data\r\n iris_dataset = load_iris()\r\n data = iris_dataset\r\n setosa = data['data'][data['target'] == 0]\r\n versicolor = data['data'][data['target'] == 1]\r\n virginica = data['data'][data['target'] == 2]\r\n\r\n # set picture frame\r\n num = 4\r\n fig, axes = plt.subplots(nrows=num, ncols=num, figsize=(18, 18))\r\n fig.subplots_adjust(hspace=0.5, wspace=0.25)\r\n\r\n # set scatter plot\r\n for i in range(0, num):\r\n for j in range(0, num):\r\n if i == j:\r\n continue\r\n axes[j, i].plot(setosa[:, j], setosa[:, i], color='navy', marker='o', linestyle='none')\r\n axes[j, i].plot(versicolor[:, j], versicolor[:, i], color='purple', marker='*', linestyle='none')\r\n axes[j, i].plot(virginica[:, j], virginica[:, i], color='pink', marker='s', linestyle='none')\r\n\r\n # set histgram on the diagram\r\n for i in range(0, num):\r\n axes[i, i].hist(setosa[:, i], color='navy')\r\n axes[i, i].hist(versicolor[:, i], color='purple')\r\n axes[i, i].hist(virginica[:, i], color='pink')\r\n\r\n axes[0, 0].set_title('Sepal length')\r\n axes[1, 1].set_title('Sepal width')\r\n axes[2, 2].set_title('Petal length')\r\n axes[3, 3].set_title('Petal width')\r\n\r\n plt.legend(('Setosa', 'Virginica', 'Versicolor')) # add legend\r\n\r\n # add Main title\r\n fig.suptitle('Iris Plots, measurements in cm', size=20)\r\n plt.show()",
"def table(ax: Axes, data: DataFrame | Series, **kwargs) -> Table:\n plot_backend = _get_plot_backend(\"matplotlib\")\n return plot_backend.table(\n ax=ax, data=data, rowLabels=None, colLabels=None, **kwargs\n )",
"def plot_eos_table(ax, mat, table_name, spec='t', vmin=None, vmax=None,\n nx=300, ny=350, xmax=None, ymax=None, xmin=None, ymin=None):\n\n table_name = table_name.format(s=spec)\n tab = mat.get_table(table_name)\n\n Rmin, Rmax = tab['Rmin'], tab['Rmax']\n Tmin, Tmax = tab['Tmin'], tab['Tmax']\n if xmin is not None:\n Rmin = xmin\n if ymin is not None:\n Tmin = ymin\n\n Xarr = np.logspace(np.log10(Rmin), np.log10(Rmax)-0.1, nx)\n Yarr = np.logspace(np.log10(Tmin), np.log10(Tmax)-0.1, ny)\n\n X, Y = np.meshgrid(Xarr, Yarr, indexing='ij')\n\n F = tab(X,Y)\n\n if vmax is None:\n vmax = np.percentile(F, 99.5) \n if vmin is None:\n vmin = np.percentile(F[F>0], 0.5)\n\n cs = ax.pcolormesh(X, Y*K2eV, F, cmap=plt.cm.jet, norm = LogNorm(),\n vmin=vmin, vmax=vmax)\n if vmin is not None:\n levels = np.arange(int(np.log10(vmin)), int(np.log10(F.max())))\n else:\n levels = np.arange(np.log10(F[F>0].min()), int(np.log10(F.max())))\n logF = np.log10(np.where(F>0, F, F[F>0].min()))\n cl = ax.contour(X, Y/11640, logF, levels, colors='k')\n plt.clabel(cl, fontsize=10, inline=False, fmt='%1.0d', use_clabeltext=True)\n plt.title('Table {0}: {1}'.format(tab['Material_ID'], table_name.replace('_', '\\_')))\n cb = plt.colorbar(cs)\n if F.min()<0:\n min_label = ' (min {0:.0e} GPa)'.format(F.min())\n else:\n min_label = ''\n cb.set_label('{0} [{1}] {2}'.format(tab.label.replace('_', '\\_'),\n tab.units, min_label))\n\n cl = ax.contourf(X, Y*K2eV, F>0, [0,0.5], colors='white', hatches=['//'])\n\n ax.set_xscale('symlog', linthreshx=3e-5)\n ax.set_yscale('symlog', linthreshy=0.1)\n if xmax is None:\n ax.set_xlim(0, Xarr.max())\n else:\n ax.set_xlim(0, xmax)\n if ymax is None:\n ax.set_ylim(0, Yarr.max()*K2eV)\n else:\n ax.set_ylim(0, ymax)\n\n ax.set_xlabel(r'$\\rho$ [g.cm$^{-3}$]')\n ax.set_ylabel(r'$T$ [eV]')\n return ax",
"def plot_axT(T_e, M, mat_n):\n for i in range(M):\n axT.plot(T_e,np.abs(mat_n[i,:]), c = 'b')",
"def plot_table(timestamps: dict, threadList: list, mList: list) -> None:\n plt.plot(threadList, timestamps.values(), 'o-')\n plt.legend(mList, title = 'Total valores', loc='best', bbox_to_anchor=(0.5, 0., 0.5, 0.5))\n plt.xlabel('Número de processos')\n plt.ylabel('Tempo de Execução (s)')\n plt.title('Tempo de Execução por Total de Processos e Valores')\n plt.show()",
"def plot_matrix(matrix, yaxis=None, xaxis=None, **kwargs):\n\n # Make new matplotlib figure.\n fig = pyplot.figure()\n ax = fig.add_subplot(1, 1, 1)\n fig.subplots_adjust(top=0.85)\n cax = ax.matshow(matrix, interpolation=kwargs.get('interpolation', 'bilinear'))\n cb = fig.colorbar(cax)\n cb.set_label(kwargs.get('cblabel', ''))\n\n # Set figure and axis titles\n fig.suptitle(kwargs.get('title', ''))\n ax.set_title(kwargs.get('subtitle', ''), fontsize=8)\n ax.set_ylabel(kwargs.get('ylabel', ''), fontsize=10)\n ax.set_xlabel(kwargs.get('xlabel', ''), fontsize=10)\n\n # Set the ticks and tick labels. Reverse y axis to align x/y origin\n yaxis_locs = range(0, len(yaxis), int(len(yaxis) / 10))\n ax.yaxis.set_ticks_position('left')\n ax.yaxis.set_major_locator(mticker.FixedLocator(yaxis_locs))\n ax.yaxis.set_major_formatter(mticker.FixedFormatter(['%1.2f' % yaxis[x] for x in yaxis_locs]))\n ax.invert_yaxis()\n\n xaxis_locs = range(0, len(xaxis), int(len(xaxis) / 10))\n ax.xaxis.set_ticks_position('bottom')\n ax.xaxis.set_major_locator(mticker.FixedLocator(xaxis_locs))\n ax.xaxis.set_major_formatter(mticker.FixedFormatter(['%1.2f' % xaxis[x] for x in xaxis_locs]))\n ax.grid(None)\n\n return fig",
"def plot_mat(self, parameter='s', fig=None, ylim=1.1, label=None):\n if parameter not in ['s', 'y']:\n raise Exception('Invalid parameter.')\n matrix = getattr(self, parameter)\n if fig is None:\n fig = plt.figure(figsize=(15.0, 10.0))\n for i in range(2):\n for j in range(2):\n subplotnum = 2*i+j+1 # add_subplot needs the +1 as indexing starts with 1\n ax = fig.add_subplot(2,2,subplotnum)\n ax.plot(self.f/1e9, matrix[:,i,j].real, label=('Re '+label if label else None))\n ax.plot(self.f/1e9, matrix[:,i,j].imag, label=('Im '+label if label else None))\n ax.set_xlabel('f [GHz]')\n ax.set_ylabel(parameter.upper()+r'$_{%d%d}$'%(i+1,j+1))\n ax.set_ylim([-ylim,ylim]) \n ax.set_xlim([min(self.f/1e9), max(self.f/1e9)])\n fig.tight_layout() \n\n return fig",
"def plotMerged(self, matrix, expcol, expdata=None,\n title='', showtable=True, ax=None, name=None,\n stats=True):\n if expdata==None:\n expdata = self.parent.tablemodel.simpleCopy(include=['Mutations'])\n merged = self.mergeMatrix(matrix, expdata)\n x,y,names,muts = merged.getColumns(['Total',expcol,'name','Mutations'],allowempty=False)\n from Correlation import CorrelationAnalyser\n C = CorrelationAnalyser()\n muts = ['mutation: '+i for i in muts]\n labels = zip(names, muts)\n ax,frame,mh = C.plotCorrelation(x,y,labels,title=title,ylabel=expcol,\n ax=ax,plotname=name,stats=stats,err=4)\n x=[round(float(i),2) for i in x]\n y=[round(float(i),2) for i in y] \n if showtable == True:\n table = self.showTable(frame, merged)\n mh.table = table\n \n return ax,mh,x,y",
"def plot_matrix_method(pulse, trap, ToP):\n n0, d = trap.matrix_method(pulse)\n for k in range(len(d)):\n ave_list = []\n timestep = np.arange(0, trap.N+1, 1)\n for i in range(len(d[k])):\n sum2 = 0\n for j in range(len(d[k][i])):\n sum2 += (j) * d[k][i][j]\n ave_list.append(sum2)\n if ToP == 'a':\n plt.plot(timestep * pulse.t * 1e3, ave_list, label = pulse.t)\n if ToP == 'b':\n plt.plot(timestep * pulse.t * 1e3, ave_list, color = 'black', label = 'Matrix')\n if ToP == 'c':\n plt.plot(timestep * pulse.t * 1e3, ave_list, color = 'b')\n # plt.legend()\n # plt.xlabel('time (ms)')\n # plt.ylabel('n')\n #plt.xlim(0, 10) ",
"def mostrar_tablero(mtx, n):\n # Cabecera de Columnas\n fila = \"/ |\"\n for i in range(n):\n fila = fila + \" \" + chr(65+i)\n print fila\n print \"-\"*(2*n+3)\n # Cabecera de Filas\n for i in range(n):\n fila = str(i+1)\n if i < 9 : fila += \" |\"\n else:\n fila+=\"|\"\n for e in range(n):\n fila = fila+\" \"+mtx[i][e]\n print fila\n fila = \"\"\n # Nueva linea\n print \"\"",
"def show_confusion_matrix(matrix: List[List], labels: List[str]):\n fig, ax = plt.subplots()\n fig.set_figheight(15)\n fig.set_figwidth(15)\n\n min_val, max_val = 0, len(labels)\n\n for i in range(max_val):\n for j in range(max_val):\n c = matrix[i][j]\n ax.text(i, j, str(int(c)), va='center', ha='center')\n\n ax.matshow(matrix, cmap=plt.cm.Blues)\n\n # Set number of ticks for x-axis\n ax.set_xticks(np.arange(max_val))\n # Set ticks labels for x-axis\n ax.set_xticklabels(labels, rotation='vertical', fontsize=16)\n\n # Set number of ticks for x-axis\n ax.set_yticks(np.arange(max_val))\n # Set ticks labels for x-axis\n ax.set_yticklabels(labels, rotation='horizontal', fontsize=16)\n \n #ax.set_xlim(min_val, max_val)\n ax.set_ylim(max_val - 0.5, min_val - 0.5)\n plt.show()",
"def heat_plot(matrix, filename, xTicks, yTicks, xLabel='X', yLabel='Y'):\n\tfig = plt.figure()\n\tax = fig.add_subplot(111)\n\tcax = ax.matshow(matrix, vmin=0, vmax=1)\n\tfig.colorbar(cax)\n\tticks = np.arange(0, matrix.shape[0], 1)\n\tax.set_xticks(ticks)\n\tax.set_yticks(ticks)\n\tax.set_xticklabels(xTicks)\n\tax.set_yticklabels(yTicks)\n\tax.set_xlabel(xLabel)\n\tax.set_ylabel(yLabel)\n\tplt.savefig(filename)\n\tplt.close()",
"def plot_hitlet_matrix(self, hitlets, _hitlet_points=None):\n if not _hitlet_points:\n _hitlet_points = self.hitlets_to_hv_points(hitlets, )\n\n hitlet_matrix = self._plot_base_matrix(_hitlet_points).opts(title='Hitlet Matrix',\n xlabel='Time [ns]',\n ylabel='PMT channel',\n ylim=(1995, 2125),\n color='area',\n clabel='Area [pe]',\n cmap='viridis',\n colorbar=True\n )\n return hitlet_matrix",
"def plot_matrix(A, O, word_dict, normalize=True):\n Osize = O.shape\n Onew = O\n\n if normalize:\n Onew = np.zeros(Osize)\n Anew = np.zeros(A.shape)\n for row in range(Osize[0]):\n Onew[row, :] = O[row, :]/max(O[row, :])\n Anew[row, :] = A[row, :]/max(A[row, :])\n\n plt.imshow(Onew, aspect='auto', cmap='magma', interpolation='nearest')\n plt.colorbar(orientation='horizontal', aspect=100)\n plt.clim(vmin=0, vmax=1)\n plt.tight_layout()\n plt.show()\n\n fig, ax1 = plt.subplots(1, 1)\n ax1.imshow(Onew[:, :100], aspect='auto', cmap='magma', interpolation='nearest', vmin=0.0, vmax=1.0)\n ax1.set_xticks(range(100))\n ax1.set_xticklabels(word_dict[:100], rotation=90)\n plt.show() # display\n\n plt.matshow(A, aspect='auto', cmap='magma')\n plt.colorbar()\n plt.show()",
"def plot_real_matrix(M, name='', outpath=None, fig='auto', climv=None, cmap=\"coolwarm\", show=False, close=True,\n fontsize=None):\n import lepm.plotting.plotting as leplt\n return leplt.plot_real_matrix(M, name='', outpath=None, fig='auto', climv=None, cmap=\"coolwarm\", show=False, close=True,\n fontsize=None)",
"def matrix(self, data):\n matrix_keys = ['cmap', 'vmin', 'vmax']\n matrix_config = self.config.filter(keys=matrix_keys, prefix='matrix_')\n\n vmin, vmax = self._parse_vrange(data)\n matrix_config['vmin'] = vmin\n matrix_config['vmax'] = vmax\n\n matrix = self.ax.matshow(data, **matrix_config)\n\n label_keys = ['size', 'color', 'bbox', 'format']\n label_config = self.config.filter(keys=label_keys, prefix='label_')\n label_format = label_config.pop('format')\n label_color = label_config.pop('color')\n\n min_value, max_value = np.nanmin(data), np.nanmax(data)\n for y, row in enumerate(data):\n for x, value in enumerate(row):\n normalized_value = (value - min_value) / (max_value - min_value)\n cell_color = plt.get_cmap(matrix_config['cmap'])(normalized_value)\n cell_brightness = np.mean(cell_color[:3])\n color = label_color if cell_brightness < 0.5 else invert_color(label_color)\n\n if isinstance(label_format, str):\n formatter = label_format\n elif isinstance(label_format, dict):\n formatter = ''\n for dtype, dtype_formatter in label_format.items():\n if isinstance(value, dtype):\n formatter = dtype_formatter\n\n text = format(value, formatter)\n\n self.subplot.add_text(text=text, x=x, y=y, color=color, **label_config)\n\n return [matrix]",
"def draw_table(ax, dfs, legend, x, y):\n col_labels = dfs_all_values(dfs, x)\n column_legend = []\n cell_text = []\n # loop over all pandas.DataFrame objects\n for df in dfs:\n # to allow query y(x) easily\n df = df.set_index(x)\n df_row = df[y]\n # build a row with filled blanks '-'\n row = [\"{:.2f}\".format(df_row[column]) if column in df_row.index else '-' \\\n for column in col_labels]\n cell_text.append(row)\n\n ax.axis('tight')\n ax.axis('off')\n ax.table(cellText=cell_text, rowLabels=legend, colLabels=col_labels, \\\n loc='top')",
"def conf_matrix_plotter(model, X_t_vec, y_t):\n fig, ax = plt.subplots()\n\n fig.suptitle(str(model))\n\n plot_confusion_matrix(model, X_t_vec, y_t, ax=ax, cmap=\"plasma\");",
"def plot_stability_matrix(self, file_name=None):\n size = len(self.seq) / 2.5\n plt.figure(figsize=(size, 2.5))\n plt.imshow(self.matrix,\n interpolation='none',\n cmap=plt.get_cmap('YlOrRd'))\n plt.yticks(range(4), ['A', 'C', 'G', 'U'], fontsize=12)\n plt.xticks(range(len(self.seq)), fontsize=12)\n if file_name is None:\n plt.show()\n else:\n plt.savefig(file_name,\n bbox_inches='tight',\n transparent=True,\n pad_inches=0)\n plt.close()",
"def table_example():\n\n print(\"\\nExample making a new table from scratch:\\n\")\n # Make a new (empty) table object\n tbl = table(\"A table with random data\")\n # Add three columns called \"x\", \"x^2\" and \"1/x\"\n tbl.addcolumn(\"x\")\n tbl.addcolumn(\"x^2\")\n tbl.addcolumn(\"1/x\")\n # Add some rows of data\n for i in range(0, 10):\n row = dict()\n row[\"x\"] = i\n row[\"x^2\"] = i * i\n if i != 0:\n row[\"1/x\"] = 1.0 / float(i)\n else:\n row[\"1/x\"] = \"?\"\n tbl.add_data(row)\n # Define some graphs\n tbl.definegraph(\"Y = X(squared)\", (\"x\", \"x^2\"))\n tbl.definegraph(\"Y = 1/X\", (\"x\", \"1/x\"))\n tbl.definegraph(\"All data\", (\"x\", \"x^2\", \"1/x\"))\n # Print out the data as a simple \"table\" and in loggraph markup\n print(tbl.show())\n print(tbl.loggraph())",
"def plot_settings_table(settings, table_nr=1, plot_out=None):\n\n keys = settings.keys()\n\n data_matrix_1 = [keys[:len(keys) / 3], []]\n for key in data_matrix_1[0]:\n data_matrix_1[1].append(str(settings[key]))\n\n data_matrix_2 = [keys[len(keys) / 3:2 * len(keys) / 3], []]\n for key in data_matrix_2[0]:\n data_matrix_2[1].append(str(settings[key]))\n\n data_matrix_3 = [keys[2 * len(keys) / 3:len(keys)], []]\n for key in data_matrix_3[0]:\n data_matrix_3[1].append(str(settings[key]))\n\n data = [data_matrix_1, data_matrix_2, data_matrix_3]\n\n nr_columns = len(data[table_nr - 1][0])\n plot = {'data': [{'colorscale': [[0, '#00083e'], [0.5, '#ededee'], [1, '#ffffff']],\n 'hoverinfo': 'none',\n 'opacity': 0.75,\n 'showscale': False,\n 'type': 'heatmap',\n 'z': [[0, 0.5] for row in range(nr_columns)]\n }],\n 'layout': {\n 'annotations': [],\n 'yaxis1': {'autorange': 'reversed',\n 'showgrid': False,\n 'showticklabels': False,\n 'zeroline': False,\n 'ticks': ''\n },\n 'xaxis1': {\n 'showgrid': False,\n 'showticklabels': False,\n 'zeroline': False,\n 'ticks': '',\n 'range': [0, 1]\n\n },\n 'title': \" \"\n }\n }\n\n # heading\n for table_cell in range(nr_columns):\n plot['layout']['annotations'].append({})\n plot['layout']['annotations'][table_cell].update({'text': data[table_nr - 1][0][table_cell]})\n plot['layout']['annotations'][table_cell].update({'font': {\n 'color': '#ffffff',\n 'size': 15}\n })\n plot['layout']['annotations'][table_cell].update({'y': table_cell})\n plot['layout']['annotations'][table_cell].update({'x': 0.1})\n plot['layout']['annotations'][table_cell].update({'xref': 'x1'})\n plot['layout']['annotations'][table_cell].update({'yref': 'y1'})\n plot['layout']['annotations'][table_cell].update({'align': 'center'})\n plot['layout']['annotations'][table_cell].update({'xanchor': 'left'})\n plot['layout']['annotations'][table_cell].update({'showarrow': False})\n\n # content\n for table_cell in range(nr_columns):\n plot['layout']['annotations'].append({})\n plot['layout']['annotations'][table_cell + nr_columns].update({'text': data[table_nr - 1][1][table_cell]})\n plot['layout']['annotations'][table_cell + nr_columns].update({'x': 0.75})\n plot['layout']['annotations'][table_cell + nr_columns].update({'y': table_cell})\n plot['layout']['annotations'][table_cell + nr_columns].update({'xref': 'x1'})\n plot['layout']['annotations'][table_cell + nr_columns].update({'yref': 'y1'})\n plot['layout']['annotations'][table_cell + nr_columns].update({'showarrow': False})\n\n if plot_out is not None:\n plotly_plot(plot, filename=settings['plot_out'], auto_open=False)\n else:\n return plot",
"def plot_diffusion_matrix(nodes, matrix, filename, show_labels=False):\n\n logging.info(\"Plotting figure as \" + str(filename))\n fig, axes = plt.subplots(1)\n axes.imshow(matrix, cmap=\"PuBu\")\n if show_labels == True:\n pass\n plt.show()\n fig.savefig(filename + \".pdf\", format=\"pdf\")",
"def settabular(self, *args, **kwargs):\n return _coordsys.coordsys_settabular(self, *args, **kwargs)",
"def plot(self, title=None, vmin=None, vmax=None, tlim=None, ax=None,\n cmap='RdBu_r', show=True, colorbar=True,\n xlabel=True, ylabel=True):\n return plot_gat_matrix(self, title=title, vmin=vmin, vmax=vmax,\n tlim=tlim, ax=ax, cmap=cmap, show=show,\n colorbar=colorbar, xlabel=xlabel, ylabel=ylabel)"
]
| [
"0.68654674",
"0.66270876",
"0.6347964",
"0.63052076",
"0.6269812",
"0.6202961",
"0.6155504",
"0.6137438",
"0.61339086",
"0.61286443",
"0.61217624",
"0.6113645",
"0.6092139",
"0.60756713",
"0.59949434",
"0.59656054",
"0.5892865",
"0.5877521",
"0.57820016",
"0.5773821",
"0.5742696",
"0.5726114",
"0.5697656",
"0.5667071",
"0.56480205",
"0.5639697",
"0.56368715",
"0.56223345",
"0.5603177",
"0.5588813"
]
| 0.6933746 | 0 |
Pull fresh data from Open AQ and replace existing data. | def refresh():
DB.drop_all()
DB.create_all()
# TODO Get data from OpenAQ, make Record objects with it, and add to db
DB.session.commit()
return 'Data refreshed!' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def refresh():\n DB.drop_all()\n DB.create_all()\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\n aq_data.add_aq_to_db()\n DB.session.commit()\n return 'Data refreshed!'",
"def refresh():\r\n DB.drop_all()\r\n DB.create_all()\r\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\r\n for i in time_x_values():\r\n DB.session.add(Record(datetime=i[0], value=i[1]))\r\n DB.session.commit()\r\n return 'Data refreshed!'",
"def reload_data(self):\n self._avro_payload.reload_data()",
"def reload_data(self):\n super(UpdateMessage, self).reload_data()\n self._previous_avro_payload.reload_data()",
"def refresh_from_api(self):\n self.populate_from_api(self.get_from_api())",
"def update_historical_data():\n print('updating historical data')\n for sp in SupplyPoint.objects.filter(supplypointwarehouserecord__isnull=True).exclude(type__code=SupplyPointCodes.ZONE):\n update_historical_data_for_supply_point(sp)",
"def pull(self):",
"def reloadData(self):\n self.dto.readFromData()\n print(\"Record reloaded.\")",
"def update_data(self, data):\n start_time = data.index[-1].strftime(\"%Y-%m-%dT%H:%M:%S.000000Z\")\n temp_data = self.gather_data(start=start_time)\n temp_data = self._list_to_df(temp_data)\n if (len(temp_data) > 1):\n # temp_data[0] is the same as data[-1]\n out_data = data.append(temp_data[1:])\n return out_data",
"def refresh(self):\r\n self.metadata = self.db.read(self.path).json()",
"def update_from_qai(self) -> dict:\n qaisession = self.qaisession\n if qaisession is None or not qaisession.is_logged_in():\n return dict(ok=False, msg=\"User not logged in\")\n # get the locally stored timestamp data from our database\n cur_tsdata = self.get_ts_data()\n try:\n newds = qai_helper.QAIDataset(None, cur_tsdata)\n except RuntimeError as err:\n return dict(ok=False, msg=\"QAI access error: {}\".format(str(err)))\n # load those parts from QAI that are out of date\n update_dct = qaisession.clever_update_qai_dump(newds)\n # if any value is True, then we did get something from QAI...\n num_updated = sum(update_dct.values())\n if num_updated > 0:\n try:\n self._db_has_changed = self.load_qai_data(newds, update_dct)\n except TypeError as err:\n return dict(ok=False, msg=\"database error: {}\".format(str(err)))\n return dict(ok=True, msg=\"Successfully updated {} tables for QAI\".format(num_updated))",
"def _refresh_data(data_format):\n\n cached_filename = f'{date.today()}.{data_format}'\n\n if BUCKET.blob(cached_filename).exists():\n bucket_data = BUCKET.blob(cached_filename).download_as_string()\n\n if data_format == 'json':\n return json.loads(bucket_data)\n else:\n return bucket_data \n\n df = _get_data()\n\n if data_format == 'csv':\n data = df.to_csv()\n res = data\n else:\n data = df.to_json(orient='records', date_format='iso')\n res = json.loads(data)\n \n BUCKET.blob(cached_filename).upload_from_string(\n data,\n content_type='application/json'\n )\n \n return res",
"def update_data(self):\n data, meta_data = ts.get_daily(symbol=self.stock_ticker, outputsize='full')\n self.data = data\n self.meta_data = meta_data",
"def __update_ohlcv(self, action, new_data): \n if self.data is None:\n end_time = datetime.now(timezone.utc)\n start_time = end_time - self.ohlcv_len * delta(self.bin_size)\n #logger.info(f\"start time fetch ohlcv: {start_time}\")\n #logger.info(f\"end time fetch ohlcv: {end_time}\")\n d1 = self.fetch_ohlcv(self.bin_size, start_time, end_time)\n if len(d1) > 0:\n d2 = self.fetch_ohlcv(allowed_range[self.bin_size][0],\n d1.iloc[-1].name + delta(allowed_range[self.bin_size][0]), end_time)\n\n self.data = pd.concat([d1, d2]) \n else:\n self.data = d1\n \n else:\n self.data = pd.concat([self.data, new_data]) \n\n # exclude current candle data \n re_sample_data = resample(self.data, self.bin_size)[:-1]\n \n if self.data.iloc[-1].name == re_sample_data.iloc[-1].name:\n self.data = re_sample_data.iloc[-1 * self.ohlcv_len:, :]\n\n if self.last_action_time is not None and \\\n self.last_action_time == re_sample_data.iloc[-1].name:\n return\n\n open = re_sample_data['open'].values\n close = re_sample_data['close'].values\n high = re_sample_data['high'].values\n low = re_sample_data['low'].values\n volume = re_sample_data['volume'].values \n\n try:\n if self.strategy is not None: \n self.strategy(open, close, high, low, volume) \n self.last_action_time = re_sample_data.iloc[-1].name\n except FatalError as e:\n # Fatal error\n logger.error(f\"Fatal error. {e}\")\n logger.error(traceback.format_exc())\n\n notify(f\"Fatal error occurred. Stopping Bot. {e}\")\n notify(traceback.format_exc())\n self.stop()\n except Exception as e:\n logger.error(f\"An error occurred. {e}\")\n logger.error(traceback.format_exc())\n\n notify(f\"An error occurred. {e}\")\n notify(traceback.format_exc())",
"def update_original_data(self):\n pass",
"def update_data():\n pass",
"def pull(self):\n raise NotImplementedError()",
"def last_buy(self):\n multi_data = []\n while not self.infoQueue.empty():\n multi_data.append(self.infoQueue.get_nowait())\n self.redisHandle.set_multiple_data(multi_data)\n print(\"flush all data\")",
"def test_cache_change_empty_data(self):\n self.connection.query = mock.MagicMock(return_value=())\n self.assertTrue(self.host_updater.refresh_cache())",
"def get_FIREX_AQ_data(debug=False, RtnAllData=True,\n FilterPollutedAirMasses=True,\n RmObsBelowGround=True,\n UpdateTimeeZone2LocalTime=True,\n FilterByTimeOfDay=True,\n SetFlaggedDataToNaN=True,\n stime='10:00', etime='15:00'):\n firex_vars = Get_FIREXAQ_variable_dict()\n keys = firex_vars.keys()\n # Read FIREX-AQ data\n path = '/mnt/lustre/groups/chem-acm-2018/shared_data/FIREX-AQ'\n # NOTE: if the merge file will be read if var='thru'\n# df0 = read_FIREXAQ_files(path, var='thru')\n df0 = get_FIREX_AQ_from_ICT_files(UseMergeFile=False)\n\n # Convert timezone and apply restrictions on data\n if UpdateTimeeZone2LocalTime:\n df0.index = df0.index.tz_localize('UTC').tz_convert('US/Pacific')\n if FilterByTimeOfDay:\n if not UpdateTimeeZone2LocalTime:\n print('WARNING: Selecting time of day in UTC, not local time')\n df0 = df0.between_time(stime, etime)\n # Filter out polluted air mass\n if FilterPollutedAirMasses:\n df0 = df0[df0['CO_DACOM_DISKIN'] < 100. ]\n # Filter out flagged data?\n if RmObsBelowGround:\n df0 = df0[df0['MSL_GPS_Altitude_YANG'] > 0. ]\n # Flag the data here that\n if SetFlaggedDataToNaN:\n FlagValue = -999999.000000\n for col in df0.columns:\n df0.loc[ df0[col] == FlagValue, col] = np.NaN\n\n # Return entire dataset or just a single species?\n if RtnAllData:\n return df0\n else:\n for var in keys:\n print( var )\n df = pd.concat([ df0[firex_vars[var]['firex']],\n df0['Latitude_YANG'],\n df0['Longitude_YANG'],\n df0['MSL_GPS_Altitude_YANG'] ],\n axis=1 )\n df.columns = [var,'Latitude','Longitude','Altitude']\n df = df[ df[var] > 0. ]\n if debug:\n print( df )\n\n # Save species to dictionary\n dfs[var] = df.copy()\n print(df)",
"def _pull(self) -> None:\n raise NotImplementedError() # pragma: no cover",
"def update(self):\r\n self.data = [self.make_item_tuple(i) for i in self.query]\r\n self._fetched = True\r\n query_cache.set(self.iden, self.data)",
"def test_append_updated_record_to_queue_same_data(small_app):\n pid = PersistentIdentifier.get(\"literature\", 11883)\n publication_id = str(pid.object_uuid)\n record = Record.get_record(publication_id)\n\n append_updated_record_to_queue(None, record, record, \"records-hep\", \"hep\")\n\n assert str(record.id) != \\\n DisambiguationRecord.query.order_by(desc(\"id\")).first().record_id",
"def reset_data(self):\n self.data = []",
"def update_database():\n\n # We obtain the data from the official database\n df = getData.extractData()\n\n # We save the dataframe for later use in the API\n auxiliary.saveToCsv(df, 'app/resources')",
"def reload_cache(self):\n self.data = self.read_data_cache()",
"def fill_db(self, data):\n check_input_params(data, self.DB)\n self.db = data[self.DB]",
"def refresh():\n DB.drop_all()\n DB.create_all()\n samples = pull_pm('Los Angeles', 'pm25')\n for sample in samples:\n measure = Record(datetime = str(sample[0]), value = sample[1])\n DB.session.add(measure)\n DB.session.commit()\n return 'Data refreshed!'",
"async def _pull_now(self) -> None:\n raise NotImplementedError()",
"def reset_data(self):\n self.data = None"
]
| [
"0.7107583",
"0.6050733",
"0.597681",
"0.58857965",
"0.5744607",
"0.5734073",
"0.55982405",
"0.5576293",
"0.5548099",
"0.5489543",
"0.54826975",
"0.54653",
"0.54617256",
"0.53852135",
"0.5350215",
"0.5341329",
"0.5318105",
"0.53093445",
"0.5303601",
"0.5300558",
"0.52949953",
"0.52924526",
"0.52854276",
"0.5252673",
"0.52469295",
"0.5242298",
"0.5185137",
"0.5183661",
"0.5170153",
"0.51595134"
]
| 0.65043575 | 1 |
Get the destination pathname from a source pathname | def path_src_to_dest(src_pathname, dest_filename_suffix=None):
src_relpath = Path(src_pathname).relative_to(config["topdir"])
dest_pathname = Path(config["outdir"]).joinpath(src_relpath)
if dest_filename_suffix:
dest_pathname = dest_pathname.with_suffix(dest_filename_suffix)
return dest_pathname | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getDestination(source):\n\ti = len(source)-1\n\tif source[i] == '/':\n\t\tsource = source[0:i - 1]\n\twhile i >= 0:\n\t\tif source[i] == '/':\n\t\t\tbreak\n\t\ti -= 1\n\tdestination = source[0: i]\n\treturn destination",
"def bestrelpath(self, dest):\n try:\n if self == dest:\n return os.curdir\n base = self.common(dest)\n if not base: # can be the case on windows\n return str(dest)\n self2base = self.relto(base)\n reldest = dest.relto(base)\n if self2base:\n n = self2base.count(self.sep) + 1\n else:\n n = 0\n lst = [os.pardir] * n\n if reldest:\n lst.append(reldest)\n target = dest.sep.join(lst)\n return target\n except AttributeError:\n return str(dest)",
"def src_to_dst(self,src_uri):\n m=re.match(self.src_root+\"(.*)$\",src_uri)\n if (m is None):\n raise \"FIXME - Does not match\"\n rel_path=m.group(1)\n if (os.sep != '/'):\n # if directoty path sep isn't / then translate for URI \n rel_path=rel_path.replace('/',os.sep)\n return(self.dst_root+rel_path)",
"def get_zipfile_path(url, destination):\n return os.path.join(destination, url.split('/')[-1])",
"def extract_file_name_from_source_full_path(source_full_path):\n destination_file_name = os.path.basename(source_full_path)\n return destination_file_name",
"def directory_slash(destination):\n\n if destination[-1] != '/':\n return destination + '/'\n\n return destination",
"def rel(dest, curdir):\n\n sc = splitpath(curdir)\n sd = splitpath(dest)\n\n while len(sc) > 0 and len(sd) > 0:\n if sc[0] != sd[0]:\n break\n sc = sc[1:]\n sd = sd[1:]\n\n if len(sc) == 0 and len(sd) == 0:\n out = \"\"\n elif len(sc) == 0:\n out = apply(join, sd)\n elif len(sd) == 0:\n out = apply(join, map(lambda x: os.pardir, sc))\n else:\n out = apply(join, map(lambda x: os.pardir, sc) + list(sd))\n\n # make sure the path is suitable for html consumption\n return out",
"def convertPath (source, target, filename):\n\tfrom os.path import join as joinPath\n\tfrom os import sep\n\n\t# Get the source path informations\n\tdirSrc = filenameSplit (source)[1]\n\n\t# Get the target path informations\n\tdiskDst, dirDst, nameDst, extDst = filenameSplit (target)\n\n\t# Get the current file informations\n\tdummy, dirFil, nameFil, extFil = filenameSplit (filename)\n\n\t# Build the target path\n\tdir_ = normalizePath(dirDst + sep + dirFil[len(dirSrc):len(dirSrc) + len(dirFil)-len(dirSrc)])\n\n\t# Add the target filename\n\tname = convertFilename (nameDst,nameFil)\n\n\t# Add the target extension\n\text = convertFilename (extDst,extFil)\n\n\treturn diskDst + joinPath(dir_, name + ext)",
"def relpath(d1, d2):\n assert d1.startswith(d2)\n return d1[len(d2):].lstrip('/')",
"def path_dest(life):\n\tif not life['path']:\n\t\treturn None\n\t\n\t_existing_chunk_map = brain.get_flag(life, 'chunk_path')\n\tif _existing_chunk_map:\n\t\treturn _existing_chunk_map['end']\n\t\n\treturn tuple(life['path'][len(life['path'])-1])",
"def convertPath(srcpath, dstdir):\n bits = srcpath.split(\"/\")\n bits.pop(0)\n # Strip out leading 'unsigned' from paths like unsigned/update/win32/...\n if bits[0] == 'unsigned':\n bits.pop(0)\n return os.path.join(dstdir, *bits)",
"def _CreateSanitizedDestination(\n self, source_file_entry, source_path_spec, destination_path):\n file_system = source_file_entry.GetFileSystem()\n path = getattr(source_path_spec, u'location', None)\n path_segments = file_system.SplitPath(path)\n\n # Sanitize each path segment.\n for index, path_segment in enumerate(path_segments):\n path_segments[index] = u''.join([\n character if character not in self._DIRTY_CHARACTERS else u'_'\n for character in path_segment])\n\n return (\n os.path.join(destination_path, *path_segments[:-1]), path_segments[-1])",
"def make_relative_path(source, dest, dest_is_directory=True):\n source = os.path.dirname(source)\n if not dest_is_directory:\n dest_filename = os.path.basename(dest)\n dest = os.path.dirname(dest)\n else:\n dest_filename = None\n dest = os.path.normpath(os.path.abspath(dest))\n source = os.path.normpath(os.path.abspath(source))\n dest_parts = dest.strip(os.path.sep).split(os.path.sep)\n source_parts = source.strip(os.path.sep).split(os.path.sep)\n while dest_parts and source_parts and dest_parts[0] == source_parts[0]:\n dest_parts.pop(0)\n source_parts.pop(0)\n full_parts = [\"..\"] * len(source_parts) + dest_parts\n if not dest_is_directory and dest_filename is not None:\n full_parts.append(dest_filename)\n if not full_parts:\n # Special case for the current directory (otherwise it'd be '')\n return \"./\"\n return os.path.sep.join(full_parts)",
"def goal_(s):\n a, b = path.split(s)\n return path.join(a, b[len('goal_'):])",
"def dst_to_src(self,dst_file):\n rel_path=os.path.relpath(dst_file,start=self.dst_root)\n if (rel_path == '.'):\n rel_path=''\n else:\n rel_path= '/'+rel_path\n if (os.sep != '/'):\n # if directoty path sep isn't / then translate for URI \n rel_path=rel_path.replace(os.sep,'/')\n return(self.src_root+rel_path)",
"def destination(self) -> str:\n for a in self.args:\n if a[0] != '-':\n return a\n try:\n return self.kwargs['dest']\n except KeyError:\n for a in self.args:\n if a.startswith('--'):\n dest = a.lstrip('-').replace('-', '_')\n if dest.isidentifier():\n return dest\n raise AttributeError(F'The argument with these values has no destination: {self!r}')",
"def determine_destination_name(\n destination_folder_name,\n destination_file_name,\n):\n destination_name = combine_folder_and_file_name(\n destination_folder_name, destination_file_name)\n return destination_name",
"def get_parsed_destination(dest_str):\n # Interpret strings of form \"project-XXXX\" (no colon) as project. If\n # we pass these through to resolve_path they would get interpreted\n # as folder names...\n if is_container_id(dest_str):\n return (dest_str, None, None)\n\n # ...otherwise, defer to resolver.resolve_path. This handles the\n # following forms:\n #\n # /FOLDER/\n # /ENTITYNAME\n # /FOLDER/ENTITYNAME\n # [PROJECT]:\n # [PROJECT]:/FOLDER/\n # [PROJECT]:/ENTITYNAME\n # [PROJECT]:/FOLDER/ENTITYNAME\n return try_call(resolve_path, dest_str)",
"def destination(self) -> str:\n return pulumi.get(self, \"destination\")",
"def get_path(self, src, dst):\r\n shortest_paths = self.awareness.shortest_paths\r\n if self.seletPathIndex==CONF.k_paths:\r\n self.seletPathIndex=0\r\n try:\r\n path= shortest_paths.get(src).get(dst)[self.seletPathIndex]\r\n self.seletPathIndex += 1\r\n return path\r\n except:\r\n return shortest_paths.get(src).get(dst)[0]",
"def get_shortest_path(self, src, dst):\n \n return self.get_sorted_paths(src, dst)[0]",
"def get_dest_filepath(self, filepath, destpath, flattencount):\n fp = self.split_path(filepath)\n if flattencount > 0:\n if len(fp) > flattencount:\n dpath = os.path.abspath(os.path.join(destpath, *fp[flattencount:]))\n else:\n dpath = os.path.abspath(os.path.join(destpath, fp[-1]))\n else:\n dpath = os.path.abspath(os.path.join(destpath, *fp))\n return dpath",
"def get_destination(metadata: Metadata):\n\n func = f\"{__name__}.get_destination\"\n\n metadata[\"destination\"] = metadata[\"full_clipname\"].replace(\n f\"/{app.capture_folder_name}/\", f\"/{app.destination_folder_name}/\"\n )\n\n if path.exists(metadata[\"destination\"]):\n metadata[\"renamed\"] = \"True\"\n print(f\"file exists:{metadata['destination']}\")\n directory = path.dirname(metadata[\"destination\"])\n filename = path.splitext(path.basename(metadata[\"destination\"]))[0]\n extension = path.splitext(path.basename(metadata[\"destination\"]))[1]\n number = 1\n dest = path.join(directory, f\"{filename}-{number:0>2d}{extension}\")\n metadata[\"destination\"] = dest\n print(f\"Will now test with:{dest}\")\n while path.exists(dest):\n number += 1\n dest = path.join(directory, f\"{filename}-{number:0>2d}{extension}\")\n print(f\"Will now test with:{dest}\")\n\n metadata[\"destination\"] = dest\n\n post_event(\n \"log_info\",\n f\"{func}\",\n f\"The destination is: {metadata['destination']} - was renamed: {metadata['renamed']}\",\n )\n\n return metadata",
"def directory_resolve_home(destination):\n\n if destination[:1] == '~':\n home = expanduser(\"~\")\n return home + destination.strip('~')\n\n return destination",
"def _get_local_dest(self, path: Path) -> Path:\n dest = \"\"\n\n if str(path).startswith(\"~\"):\n path = path.relative_to(\"~\")\n\n if self.category == \"global\":\n dest = f\"{self.local_base}/global/{path}\"\n elif self.category == \"local\":\n dest = f\"{self.local_base}/local/{path}\"\n else:\n dest = f\"{self.local_base}/custom/{path}\"\n\n return Path(dest)",
"def get_path(self, path):\n return abspath(join(self.origin, *path))",
"def get_image_path(source_path):\n\n split = source_path.split('\\\\')\n # get filename\n filename = split[-1].lstrip()\n # get folder name\n folder = split[-3]\n # get full data path\n current_path = folder + '/IMG/' + filename\n return current_path",
"def extracted_path(self):\n return os.path.join(self.destination_directory, self.__extracted_name__)",
"def _relativize(base: str, current: str) -> str:\n if current.startswith(base):\n return current.replace(base, \"\", 1)\n return current",
"def _read_sourced_path(self, line):\n # type: (str)->tp.Optional[str]\n if line.startswith('source '):\n sline = [x.strip() for x in line.split()]\n sline.pop(0)\n path = ' '.join(sline)\n if not os.path.isabs(path):\n current_root = self._root_interfaces_path\n if os.path.isfile(current_root):\n current_root = os.path.dirname(current_root)\n path = os.path.join(current_root, path)\n return path\n return None"
]
| [
"0.7815231",
"0.66032153",
"0.65959716",
"0.6490605",
"0.63670933",
"0.629784",
"0.61708474",
"0.6148273",
"0.6114234",
"0.6095512",
"0.60766137",
"0.6070404",
"0.6042116",
"0.6041628",
"0.60336053",
"0.60242707",
"0.60152113",
"0.60108036",
"0.59943426",
"0.5941626",
"0.5933208",
"0.5911396",
"0.5868981",
"0.5864846",
"0.57982224",
"0.578046",
"0.57368636",
"0.5731535",
"0.5725226",
"0.5673999"
]
| 0.6820696 | 1 |
Process a markdown file and copy it to the destination | def process_file_markdown(src_pathname):
dest_pathname = path_src_to_dest(src_pathname, '.html')
logging.info("Processing Markdown file: %s -> %s" %
(str(src_pathname), str(dest_pathname)))
ensure_dest_dir(dest_pathname)
with open(dest_pathname, 'w', encoding='UTF-8') as f:
outstr = docgen.generate.generate_doc(str(src_pathname),
verbose=config['verbose'],
inlinecss=True,
inlinewave=True,
asdiv=False)
f.write(outstr)
return dest_pathname | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def on_page_markdown(self, markdown, page, config, files):\n listext = self.config['ext']\n src_file_path = page.file.abs_src_path\n prepath, ext = os.path.splitext(src_file_path)\n lang = ext.lstrip('.')\n filename = page.file.name\n if ext in listext:\n new_markdown = \"# {0}\\n\\n```{1}\\n\".format(filename, lang) + markdown + \"\\n```\"\n return new_markdown\n else:\n return markdown",
"def import_editor_markdown_file(mdfile_dir,\n output_content_dir,\n output_dirname,\n create_empty_mdfile=False):\n\n # Create output directory if it does not exist\n if not os.path.isdir(output_content_dir):\n os.makedirs(output_content_dir)\n\n # Make sure source directory exists\n if not os.path.isdir(mdfile_dir):\n print 'Cannot find directory:', mdfile_dir\n return\n\n # Compute output directory\n output_markdown_dir = os.path.join(output_content_dir, output_dirname)\n\n # Create empty target structure directory\n if os.path.isdir(output_markdown_dir):\n shutil.rmtree(output_markdown_dir)\n os.makedirs(output_markdown_dir)\n\n # We know that input directory has only one markdown file with\n # the same name (content.md)\n input_mdfile = os.path.join(mdfile_dir, 'content.md')\n output_mdfile = os.path.join(output_markdown_dir, 'content.md')\n\n # Copy markdown file or create empty one if requested\n if os.path.isfile(input_mdfile):\n shutil.copy(input_mdfile, output_mdfile)\n else:\n if create_empty_mdfile:\n print 'Generating empty markdown file.'\n with open(output_mdfile, 'w') as f:\n f.write('# Empty Markdown\\n')\n else:\n # If there is nothing to import then return\n print 'Cannot find markdown file in {}', mdfile_dir\n return\n\n # Copy markdown images if needed\n input_mdimages_dir = os.path.join(mdfile_dir, 'md-imgs')\n if os.path.isdir(input_mdimages_dir):\n output_mdimages_dir = os.path.join(output_markdown_dir, 'md-imgs')\n shutil.copytree(input_mdimages_dir, output_mdimages_dir)",
"def process_markdown(input_markdown, output_name, latex_img_dir = \"./\", input_path = \"./\", thumb_size=64):\n\tmd = markdown.Markdown( extensions=[ 'meta'\n\t , 'codehilite'\n\t , 'tables'\n\t , 'def_list'\n\t , 'footnotes'\n\t , ResourceExtractor({ \"resource_dir\": output_name\n\t , \"relative_path\": input_path\n\t })\n\t , AbstractExtractor()\n\t , ToCExtractor()\n\t , MathJaxExtension()\n\t , LaTeX({ \"latex_img_dir\": latex_img_dir\n\t , \"input_path\": input_path\n\t })\n\t ]\n\t )\n\t\n\t# Basic HTML conversion\n\thtml = md.convert(input_markdown)\n\t\n\t# Generate table of contents\n\ttoc = md.toc\n\t\n\t# Choose document title (default to the output name)\n\ttitle = output_name\n\t# Use the first heading if possible\n\tif len(toc) > 0:\n\t\ttitle = toc[0][1]\n\t# Better yet, get the explicitly given metadata\n\ttitle = md.Meta.get(\"title\", [title])[0]\n\t\n\t# Choose document subtitle (only available from metadata)\n\tsubtitle = md.Meta.get(\"subtitle\", [None])[0]\n\t\n\t# Get the image from the metadata\n\timg = md.Meta.get(\"img\", [None])[0]\n\timg_alt = md.Meta.get(\"img_alt\", [title])[0]\n\t\n\t# The abstract should be taken to be the first paragraph.\n\tabstract = md.abstract if md.abstract is not None else \"\"\n\t\n\t# Get the list of tags\n\ttags = md.Meta.get(\"tags\", [])\n\t\n\t# Get the list of files to include\n\tincludes = md.Meta.get(\"include\", [])\n\t\n\t# Get the show option\n\tshow = md.Meta.get(\"show\", [\"True\"])[0] == \"True\"\n\t\n\tfiles = md.resources\n\t\n\t# Add the article image to the list of files and create a thumbnail if\n\t# possible.\n\tif img is not None and img.startswith(\"file://\"):\n\t\timg = os.path.join(input_path, img[len(\"file://\"):])\n\t\timg_output_name = \"%s/%s\"%(output_name,\n\t\t unique(os.path.basename(img),\n\t\t [f.split(\"/\")[-1] for (_,f) in files]))\n\t\t\n\t\timg_thumbnail = \"%s.thumb.png\"%img\n\t\t\n\t\tp = Popen( [\"convert\"\n\t\t , img\n\t\t , \"-thumbnail\", \"%dx%d\"%(thumb_size,thumb_size)\n\t\t , img_thumbnail]\n\t\t , stdin = None\n\t\t , stdout = sys.stderr\n\t\t , stderr = sys.stderr\n\t\t )\n\t\tif p.wait() != 0:\n\t\t\traise Exception(\"Creating img thumbnail failed.\")\n\t\t\n\t\tfiles.append((img_thumbnail, img_output_name))\n\t\timg = img_output_name\n\t\n\t# Generate meta-data\n\tmeta_data = {\n\t\t\"url\" : output_name,\n\t\t\"title\" : title,\n\t\t\"subtitle\" : subtitle,\n\t\t\"img\" : img,\n\t\t\"img_alt\" : img_alt,\n\t\t\"abstract\" : abstract,\n\t\t\"tags\" : tags,\n\t\t\"show\" : show,\n\t}\n\t\n\treturn html, toc, meta_data, files, includes",
"def process(text, output_dir, file_name, json_output):\n\t\n\t# Process HTML\n\tprocessed_text_html = process_html(text)\n\t# Write processed HTML output \n\t#pre_proc.create_text_file(output_dir + \"/html_\" + file_name + \"_pre.html\", processed_text_html)\n\n\t# Convert HMTL to MD\n\ttext_md = pre_proc.extract_text_md(processed_text_html)\n\n\t# Process MD\n\tprocessed_text_md = process_md(text_md)\n\t\n\tif(json_output):\n\t\t# Convert MD to JSON\n\t\tprocessed_json = pre_proc.convert_md_to_json(processed_text_md, file_name)\n\t\t# Write processed JSON output \n\t\tpre_proc.create_binary_file(output_dir + \"/\" + file_name + \".json\", processed_json)\n\telse:\n\t\t# Write processed MD output \n\t\tpre_proc.create_text_file(output_dir + \"/\" + file_name + \".md\", processed_text_md)",
"def convert_md(md_file, output_file, contents, numbered):\n\n\tscript = ['pandoc', '-s', md_file, '-o', output_file]\n\tscript += ['-c', os.path.join(SCRIPT_DIR, 'themes', 'base.css')]\n\tscript += ['-B', os.path.join(SCRIPT_DIR, 'themes', 'header.html')]\n\n\t# Check the markdown to see if we need to include MathJax\n\tmaths = False if re.search('\\\\n\\\\$\\\\$(.*?)\\\\$\\\\$\\\\n', read_file(md_file),\n\t flags=re.MULTILINE | re.DOTALL) is None else True\n\n\tif numbered:\n\t\tscript.append('--number-sections')\n\n\tif contents:\n\t\tscript.append('--toc')\n\n\tif maths:\n\t\tscript.append('--mathjax')\n\n\tscript += ['--self-contained', '--highlight-style=haddock']\n\n\twith cd(os.path.dirname(md_file)):\n\t\tprint('Converting %s to %s using Pandoc...' % (os.path.basename(md_file), os.path.basename(output_file)))\n\t\tcheck_output(script) # Runs the script on the OS and raises an exception on failure\n\n\tinclude_fonts(output_file) # Include Google fonts\n\tif contents or maths:\n\t\tinclude_js(output_file, maths)\n\t\tadd_contents(output_file)",
"def convert_file(fname):\n md = markdown.Markdown(extensions=['extra'], tab_length=2)\n with open(fname, \"r\") as f:\n content = ''.join(f.readlines())\n return md.convert(content)",
"def process(self,line):\n\n pattern_str = f\"src=.?[\\s\\\"].*?[\\s\\\"]\"\n p = re.compile(pattern_str)\n for m in p.finditer(line):\n\n file = m.group(0).split(\"src=\")[1][1:-1]\n if file.startswith(\"http\"):\n continue\n\n new_file = self._copy_file(file)\n\n re.sub(file,new_file,line)\n\n return line",
"def ler_markdown(nome_do_arquivo):\n return open(\"teste.md\").read()",
"def run():\r\n\r\n # Parse options and adjust logging level if necessary\r\n options, logging_level = parse_options()\r\n if not options: sys.exit(2)\r\n logger.setLevel(logging_level)\r\n logger.addHandler(logging.StreamHandler())\r\n\r\n # Run\r\n markdown.markdownFromFile(**options)",
"def render_markdown_from_file(f):\n s = StringIO()\n markdownFromFile(input=f, output=s, **MARKDOWN_KWARGS)\n html = s.getvalue()\n s.close()\n\n return html",
"def convert_md_file(self, md_file_path, config_file_path, heading_required=True, remove_title=True):\n try:\n # Check file exists\n content = open(md_file_path, encoding=\"UTF-8\").read()\n except FileNotFoundError:\n raise CouldNotFindMarkdownFileError(md_file_path, config_file_path)\n\n custom_processors = self.converter.processor_defaults()\n if remove_title:\n custom_processors.add(\"remove-title\")\n self.converter.update_processors(custom_processors)\n\n result = None\n try:\n result = self.converter.convert(content)\n except VertoError as e:\n raise VertoConversionError(md_file_path, e) from e\n\n if heading_required:\n if result.title is None:\n raise NoHeadingFoundInMarkdownFileError(md_file_path)\n\n if len(result.html_string) == 0:\n raise EmptyMarkdownFileError(md_file_path)\n check_converter_required_files(result.required_files, md_file_path)\n check_converter_glossary_links(result.required_glossary_terms, md_file_path)\n if result.heading_tree:\n check_heading_tree(result.heading_tree, md_file_path)\n return result",
"def render_md(filepath, file):\n try:\n with open(path.join(str(filepath[0]), str(file))) as fd:\n mkdwn = fd.read()\n html = markdown(mkdwn)\n except Exception:\n html = None\n\n return html",
"def __writeToFile(self, mdFile):\n with open(mdFile, 'a') as writer:\n for line in self.__markdownOutput: \n writer.write(line)",
"def outputMarkdown(self, mdFile):\n if os.path.exists(mdFile):\n os.remove(mdFile)\n\n self.__writeToFile(mdFile)",
"def do_single_file_preprocess(pdf_file):",
"def __html__(self, file_path:str) -> str:\n with open(f\"{file_path}\", \"r\") as mdfile: # Parse markdown file\n text = mdfile.read()\n html = self.md.convert(text) # Convert the markdown content text to hmtl\n return html",
"def read(self, source_path):\r\n\r\n self._md = Markdown(extensions=self.extensions)\r\n with pelican_open(source_path) as text:\r\n content = self._md.convert(text)\r\n\r\n metadata = self._parse_metadata(self._md.Meta)\r\n return content, metadata",
"def copy_text(input_text, output_text):\n with open(input_text, 'r') as in_text, open(output_text, 'a') as out_text:\n out_text.write(in_text.read())",
"def latex2wp():\n parser = argparse.ArgumentParser()\n parser.add_argument('input_path')\n #parser.add_argument('destination')\n args = parser.parse_args()\n\n source = None\n blocks = None\n paragraphs = []\n\n source = from_file(args.input_path)\n source = document(source)\n source = split_inline(source)\n blocks = split_blocks(source)\n fragments = process(blocks)\n\n html = refs('\\n'.join(fragments))\n print(html)",
"def doTheJob(input_file):\n\n Parse.parseFile(input_file)\n Write.writeFile(input_file.replace(\".txt\", \".html\"))",
"def read_markdown_text_file(file_name):\n \n with open (file_name, 'r', encoding='utf-16') as fp:\n htmlmarkdown = markdown.markdown(fp.read())\n return htmlmarkdown",
"def read(self, source_path):\n\n self._source_path = source_path\n self._md = Markdown(extensions=self.extensions)\n\n # Custom handling of ^include.html^\n self._md.inlinePatterns['include_html'] = IncludeHtmlPattern(self)\n\n with pelican_open(source_path) as text:\n content = self._md.convert(text)\n\n metadata = self._parse_metadata(self._md.Meta)\n return content, metadata",
"def render_markdown_from_file(f):\n return clean_markdown_html(djblets_markdown.render_markdown_from_file(\n f, **MARKDOWN_KWARGS))",
"def process_md(text_md):\n\tprocessed_text_md = ( pre_proc.replace_br(text_md)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.remove_false_titles)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.remove_blank_lines)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.replace_cid)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.replace_with_dash)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_by_hyphen)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_lines)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_lines)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_et_al)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_beta)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_vs)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.fix_enye)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_ellipsis)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_subtraction)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_by_colon)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.remove_duplicated_dashes)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.fix_marks)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_title_questions)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.remove_useless_lines)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.remove_duplicated_whitespaces)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.remove_repeated_strings)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t)\n\treturn processed_text_md",
"def markdown_post(post):\n post['entry'] = markdown(post['entry'].replace(\"\\n\",\" \\n\"), output=\"html5\")\n return post",
"def main():\n # Get the date and time in a formatted string.\n today = datetime.datetime.today()\n date_formatted = today.strftime(\"%Y-%m-%d\")\n time_formatted = today.strftime(\"%H:%M:%S\")\n\n # Form the file name and path.\n file_name = date_formatted+\"-post.md\"\n file_path = os.path.join(POSTS_DIR, file_name)\n\n # Make the new header.\n header = HEADER_TEMPLATE.format(date_formatted, time_formatted)\n\n with open(file_path, 'w') as f:\n f.write(header)\n \n os.system(EDITOR+\" \"+file_path)",
"def markdown(text):\n text = gfm(text)\n text = markdown_lib.markdown(text)\n return text",
"def copy_files(self):\n files = ['LICENSE.md', 'CONTRIBUTING.md']\n this_dir = sh.pwd().strip()\n for _file in files:\n sh.cp(\n '{0}/templates/{1}'.format(this_dir, _file),\n '{0}/'.format(self.book.textdir)\n )",
"def process_file(config_filename, output_filename):\n\n config_dir = os.path.dirname(config_filename)\n\n with open(output_filename, 'w') as outfile:\n\n # Write out the HTML header\n with open(HEADER_HTML, 'r') as f:\n outfile.write(f.read())\n\n with open(config_filename, 'r') as f:\n # Change directory to the config file\n os.chdir(config_dir)\n # Now change to the directory of files\n # This way we can just access the files with the file names\n os.chdir('../files')\n\n book = yaml.load(f)\n for entry in book:\n chapter_file = entry['file']\n print chapter_file\n with open(chapter_file) as infile:\n div = \"<div id='%s' class='mkdown-text'>\\n\" % entry['name']\n outfile.write(div)\n outfile.write(infile.read())\n outfile.write(\"</div>\\n\")\n\n outfile.write(\"</html>\")",
"def convert_to_markdown(self, text: str) -> str:"
]
| [
"0.6226209",
"0.6111119",
"0.6070878",
"0.58925706",
"0.58658713",
"0.58396715",
"0.58019733",
"0.5768846",
"0.5747689",
"0.5685798",
"0.5676189",
"0.566404",
"0.56561",
"0.56402737",
"0.55311203",
"0.552163",
"0.55025846",
"0.55008787",
"0.549469",
"0.54747707",
"0.54745865",
"0.5455402",
"0.5450182",
"0.5447678",
"0.5363974",
"0.53478587",
"0.53355896",
"0.5335348",
"0.53325087",
"0.5322719"
]
| 0.75130004 | 0 |
Process all files The specific processing action depends on the file type. | def process_all_files():
src_files = get_doc_files()
for src_pathname in src_files:
if src_pathname.suffix in MARKDOWN_EXTENSIONS:
process_file_markdown(src_pathname)
elif src_pathname.suffix in STATIC_ASSET_EXTENSIONS:
process_file_copytodest(src_pathname) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_files(self):\n matcher = self.choose_algorithm()\n # process one file at the time for better memory management\n for i, element in enumerate(self.input):\n filepath, _ = element\n\n try:\n with open(filepath, \"r\", encoding=\"utf-8\") as readfile:\n for line in readfile:\n matcher.find_match(line, self.case_insensitive)\n\n # collect unreadeable files for error log\n except Exception:\n self.errors.append(str(filepath))\n\n # copy results and reset matcher for next file\n self.__results = matcher.results\n\n if self.counter:\n self.__results = matcher.counts\n\n matcher.reset()\n\n # output - print or json\n if self.results:\n self.output(element)\n\n # if json print progress bar\n if self.json:\n self.progress_bar(i+1, len(self.input), prefix=\"Matching:\",\n fixed_len=True, length=40)",
"def processFile(handler, fileType, inputFiles, inputDir, outputDir):\n\tgetResult = lambda fileType, inputFiles: \\\n\t\t(fileType, -1, 'there are more one {0} files'.format(fileType)) \\\n\t\tif len(inputFiles) > 1 else \\\n\t\t(fileType, *(handler(inputFiles[0], inputDir, outputDir)))\n\n\n\tsendNotification(*(getResult(fileType, inputFiles)))\n\treturn inputFiles",
"def process_based_on_type(file_path):\n\t# Is this a file?\n\tif os.path.isfile(file_path):\n\t\tprocess_file(file_path)\n\t# Or is it a directory?\n\telif os.path.isdir(file_path):\n\t\tprocess_directory(file_path)",
"def updateBaseFiles(self):\n for filename, filetype in self._get_base_files():\n lines = open(filename).readlines()\n\n if self.Verbose:\n print 'Reading %s' % filename\n\n if filetype is 'Python':\n lines, write_out = self._update_python_file(lines, filename) \n elif filetype is 'Properties':\n lines, write_out = self._update_properties_file(lines,filename)\n else:\n raise TypeError, \"Unknown base file type %s\" % filetype\n\n if write_out:\n self._file_writer(lines, filename)",
"def process(self, args):\n for benchmark_file in args.benchmark_files:\n self.process_individual_file(benchmark_file)\n self.total_files += 1",
"def process( self ):\n\t\t\n\t\tprint( self._query[\"header\"], file = self._file )\n\t\tself._file.flush()\n\n\t\tfor root, dirs, files in os.walk(self._directory):\n\t\t\tpath = root.split(os.sep)\n\n\t\t\tif( root.endswith(\"logFiles\") and ( root.find(\"template\") == -1 ) ):\n\t\t\t\tLogProcessor._process_dir(root, self._file_list, self._columns, self._file, self._meta)",
"def processImages(self):\n for file in os.listdir(self.config[\"tempPath\"]):\n self.logger.debug(\"Calling generateImages for the file: {0}\".format(file))\n self.generateText(file)",
"def process_modules(self) -> typing.NoReturn:\n\t\tfor moduleName in self.moduleNameSet:\n\t\t\tdetected_encoding = detect_encoding(moduleName)\n\n\t\t\tprint(f\"Processing {moduleName} ({detected_encoding})\")\n\n\t\t\twith open(moduleName, 'r+', encoding=detected_encoding) as fileStream:\n\t\t\t\t# Store the content of the file\n\t\t\t\tfileContent: str = fileStream.read()\n\t\t\t\t# Sets the file's current position at the offset, the position of the read/write pointer within the file\n\t\t\t\tfileStream.seek(0, 0)\n\t\t\t\t# Truncates the file's size\n\t\t\t\tfileStream.truncate()\n\n\t\t\t\t# Process regex patterns\n\t\t\t\tfor regexDict in regexDictList:\n\t\t\t\t\tfileContent = self.process_function(regexDict, fileContent)\n\n\t\t\t\t# Rewrite the processed content of the file\n\t\t\t\tfileStream.write(fileContent)",
"def run(self):\n # FILE INPUT\n if self.text_type == \"file\":\n self.process_files()\n\n # STRING INPUT\n else:\n self.process_strings()\n\n if self.json:\n self.save_json()\n\n if self.errors:\n print(\"\\nThe following file(s) could not be opened:\")\n for error in self.errors:\n print(f\"\\t{error}\")",
"def process(self):\n level = self.parameter['level-of-operation']\n assert_file_grp_cardinality(self.input_file_grp, 1)\n assert_file_grp_cardinality(self.output_file_grp, 1)\n\n for (n, input_file) in enumerate(self.input_files):\n self.logger.info(\"INPUT FILE %i / %s\", n, input_file.pageId or input_file.ID)\n file_id = make_file_id(input_file, self.output_file_grp)\n\n pcgts = page_from_file(self.workspace.download_file(input_file))\n self.add_metadata(pcgts)\n page_id = pcgts.pcGtsId or input_file.pageId or input_file.ID # (PageType has no id)\n page = pcgts.get_Page()\n \n page_image, page_xywh, page_image_info = self.workspace.image_from_page(\n page, page_id, feature_filter='binarized')\n if self.parameter['dpi'] > 0:\n zoom = 300.0/self.parameter['dpi']\n elif page_image_info.resolution != 1:\n dpi = page_image_info.resolution\n if page_image_info.resolutionUnit == 'cm':\n dpi *= 2.54\n self.logger.info('Page \"%s\" uses %f DPI', page_id, dpi)\n zoom = 300.0/dpi\n else:\n zoom = 1\n \n if level == 'page':\n self.process_page(page, page_image, page_xywh, zoom,\n input_file.pageId, file_id)\n else:\n if level == 'table':\n regions = page.get_TableRegion()\n else: # region\n regions = page.get_AllRegions(classes=['Text'], order='reading-order')\n if not regions:\n self.logger.warning('Page \"%s\" contains no text regions', page_id)\n for region in regions:\n region_image, region_xywh = self.workspace.image_from_segment(\n region, page_image, page_xywh, feature_filter='binarized')\n if level == 'region':\n self.process_region(region, region_image, region_xywh, zoom,\n input_file.pageId, file_id + '_' + region.id)\n continue\n lines = region.get_TextLine()\n if not lines:\n self.logger.warning('Page \"%s\" region \"%s\" contains no text lines',\n page_id, region.id)\n for line in lines:\n line_image, line_xywh = self.workspace.image_from_segment(\n line, region_image, region_xywh, feature_filter='binarized')\n self.process_line(line, line_image, line_xywh, zoom,\n input_file.pageId, region.id,\n file_id + '_' + region.id + '_' + line.id)\n\n # update METS (add the PAGE file):\n file_path = os.path.join(self.output_file_grp, file_id + '.xml')\n pcgts.set_pcGtsId(file_id)\n out = self.workspace.add_file(\n ID=file_id,\n file_grp=self.output_file_grp,\n pageId=input_file.pageId,\n local_filename=file_path,\n mimetype=MIMETYPE_PAGE,\n content=to_xml(pcgts))\n self.logger.info('created file ID: %s, file_grp: %s, path: %s',\n file_id, self.output_file_grp, out.local_filename)",
"def ComputeFileTypes(self):\n for rel_path, file_data in self._files.iteritems():\n if 'ftype' in file_data:\n continue\n ftype = self._file_type_decoder.GetType(rel_path)\n if ftype:\n file_data['ftype'] = ftype",
"def _iterate_over_files(self):\n stats = Statistics()\n\n args = arguments.Args()\n\n for file in args.files:\n\n if isimage(file):\n before_size = stats.calculate_before_optimization(file)\n\n puts(\"%s %s\" % (\n e(\"==>\"),\n os.path.basename(file))\n )\n\n if \"--lossy\" in args.flags:\n Optimize.lossy(file)\n if \"--lossless\" in args.flags:\n Optimize.lossless(file)\n after_size = stats.calculate_after_optimization(file)\n\n puts(\"%s %s (%s)\" % (\n p(\"<==\"),\n os.path.basename(file),\n s(after_size) if after_size < before_size else after_size\n ))\n\n stats.show_statistics()",
"def main():\n\targuments_sent = sys.argv\n\tif len(arguments_sent) > 1:\n\t\tfile_path = arguments_sent[1]\n\t\tprocess_based_on_type(file_path)",
"def process_files(compress, files):\n [compress.add_file(file) for file in files]\n\n compress.execute() # upload files to iLovePDF\n compress.download() # download resultant file\n print(\"Compression saved {}% of disk space.\".format(\n PDFWorkshop.__percentage_storage_saved(compress))\n )\n compress.delete_current_task()",
"def process_files(self):\n for filename in self.temp_directory.iterdir():\n im = Image.open(str(filename))\n scaled = im.resize((640, 480))\n scaled.save(str(filename))",
"def convert_files_parallel(self) -> None:\n file_paths = []\n for file in os.listdir(self.audios_dir):\n if file.endswith(self.input_format):\n file_paths.append(os.path.join(\n self.audios_dir, file))\n with Pool(cpu_count()) as p:\n p.map(self.convert_file, file_paths)",
"def process(self, file_list, shapefiles_dir, output_dir):\n for datatype in self.data_types:\n for data_file in file_list:\n data_file = str(data_file)\n original_filename = os.path.basename(data_file)\n cut_fname = original_filename[:len(self.date_format)+2]\n observation_date = datetime.strptime(cut_fname, self.date_format).strftime('%Y%m%d')\n try:\n input_file = HE5(self.data_directory, original_filename)\n if ('nc4' in data_file):\n input_file = NC4(self.data_directory, original_filename)\n data = input_file.read(datatype.ds_name)\n except Exception as e:\n print(e)\n continue\n\n rasterdata = input_file.genTif(data, datatype.resolution)\n\n for shpfile in Path(shapefiles_dir).rglob('*.shp'):\n shapefile = Shapefile(shapefiles_dir, shpfile.name)\n csvfile = shapefile.read_shape_file(datatype, rasterdata, data, self.minVal)\n csvfile = np.array(csvfile)\n\n output = OutputFile(output_dir, shapefile.getDirName(), self.name, datatype, observation_date)\n output.save(csvfile)\n\n del rasterdata",
"def process_data(*args, **kwargs):\n\n filepath = kwargs[\"filepath\"]\n func = kwargs[\"func\"]\n \n # get all files matching extension from directory\n all_files = []\n for root, dirs, files in os.walk(filepath):\n files = glob.glob(os.path.join(root,'*.json'))\n for f in files :\n all_files.append(os.path.abspath(f))\n\n # get total number of files found\n num_files = len(all_files)\n print('{} files found in {}'.format(num_files, filepath))\n\n # iterate over files and process\n for i, datafile in enumerate(all_files, 1):\n func(datafile)\n print('{}/{} files processed.'.format(i, num_files))",
"def process_all():\n\tfiles = os.listdir('records')\n\tfiles = [file for file in files if file not in ('.DS_Store','old')]\n\tattr_list = []\n\tcorpus = []\n\tsentences = []\n\tcorp_set = set()\n\tfor file in files:\n\t\twith open('records/'+file) as f:\n\t\t\tattr_list, corpus, sentences = proc_file(f,file,corpus,attr_list,corp_set,sentences)\n\treturn attr_list,corpus,sentences",
"def parse_files():\n pfuncs = [ # parse py files : add #\n parse_test_files,\n parse_model_files,\n parse_url_files,\n parse_route_files,\n parse_settings_files,\n parse_setup_files,\n ]\n\n while PY_FILES:\n for _ in range(len(pfuncs)):\n a_func = pfuncs.pop()\n a_func()\n break",
"def load_all_files(self):\n\t\tself.get_rankings()\n\t\tself.get_partition()\n\t\tself.__load_factors()\n\t\tself.get_document_associations()\n\t\tself.get_term_associations()",
"def process_file(self, file, target_dir):\n raise NotImplementedError(\"Process file method not implemented\")",
"def convert_files_sequential(self) -> None:\n for file in os.listdir(self.audios_dir):\n if file.endswith(self.input_format):\n self.convert_file(os.path.join(\n self.audios_dir, file), self.output_format)",
"def data_process(self):\n logging.info('Processing the data and split files')\n lines = Utility.file_len(self.fname)\n self.lines_to_be, self.split_files = Utility.split_files(self.fname, lines,\n cpu_count().real)",
"def process_file(file_name):\n pass # delete this line and replace with your code here",
"def _process(self):\n # choose the correct transform model before processing TI data\n self._select_transform()\n\n # process type first, fail early\n self._process_type()\n\n # process type specific data\n if isinstance(self.transform, GroupTransformModel):\n self._process_group()\n elif isinstance(self.transform, IndicatorTransformModel):\n self._process_indicator()\n\n # self.process_associations(self.transform.associations)\n self._process_associated_group(self.transform.associated_groups)\n self._process_attributes(self.transform.attributes or [])\n self._process_security_labels(self.transform.security_labels or [])\n self._process_tags(self.transform.tags or [])\n\n # date added\n self._process_metadata_datetime('dateAdded', self.transform.date_added)\n\n # last modified\n self._process_metadata_datetime('lastModified', self.transform.last_modified)\n\n # xid\n self._process_metadata('xid', self.transform.xid)",
"def process(self):\n self.extract()\n self.transform()\n self.load()",
"def process_file(self):\n self._processing_logger.log_info('Start processing')\n self.parsing_start_time = datetime.datetime.now()\n if os.path.exists(self.tmp_stat_file_path) \\\n and not HcsParsingUtils.active_processing_exceed_timeout(self.tmp_stat_file_path):\n self._processing_logger.log_info('This file is processed by another parser, skipping...')\n return 2\n self.create_tmp_stat_file()\n hcs_index_file_path = self.hcs_root_dir + MEASUREMENT_INDEX_FILE_PATH\n time_series_details = self._extract_time_series_details(hcs_index_file_path)\n self.generate_ome_xml_info_file()\n xml_info_tree = ET.parse(self.ome_xml_info_file_path).getroot()\n plate_width, plate_height = self._get_plate_configuration(xml_info_tree)\n wells_tags = self.read_wells_tags()\n if wells_tags:\n self._processing_logger.log_info(\"Tags \" + str(wells_tags))\n if not TAGS_PROCESSING_ONLY and not EVAL_PROCESSING_ONLY:\n if not self._localize_related_files():\n self._processing_logger.log_info('Some errors occurred during copying files from the bucket, exiting...')\n return 1\n else:\n self._processing_logger.log_info('Localization is finished.')\n local_preview_dir = os.path.join(self.tmp_local_dir, 'preview')\n hcs_local_index_file_path = get_path_without_trailing_delimiter(self.tmp_local_dir) \\\n + MEASUREMENT_INDEX_FILE_PATH\n for sequence_id, timepoints in time_series_details.items():\n self._processing_logger.log_info('Processing sequence with id={}'.format(sequence_id))\n sequence_index_file_path = self.extract_sequence_data(sequence_id, hcs_local_index_file_path)\n conversion_result = os.system('bash \"{}\" \"{}\" \"{}\" {}'.format(\n OME_TIFF_SEQUENCE_CREATION_SCRIPT, sequence_index_file_path, local_preview_dir, sequence_id))\n if conversion_result != 0:\n self._processing_logger.log_info('File processing was not successful...')\n return 1\n sequence_overview_index_file_path, wells_grid_mapping = self.build_sequence_overview_index(sequence_index_file_path)\n conversion_result = os.system('bash \"{}\" \"{}\" \"{}\" {} \"{}\"'.format(\n OME_TIFF_SEQUENCE_CREATION_SCRIPT, sequence_overview_index_file_path, local_preview_dir,\n sequence_id, 'overview_data.ome.tiff'))\n if conversion_result != 0:\n self._processing_logger.log_info('File processing was not successful: well preview generation failure')\n return 1\n self.write_dict_to_file(os.path.join(local_preview_dir, sequence_id, 'wells_map.json'),\n self.build_wells_map(sequence_id, wells_grid_mapping, wells_tags))\n if LOCALIZE_USE_PIPE == \"true\":\n cloud_transfer_result = os.system('pipe storage cp -f -r \"{}\" \"{}\"'\n .format(local_preview_dir,\n HcsParsingUtils.extract_cloud_path(self.hcs_img_service_dir)))\n else:\n cloud_transfer_result = os.system('aws s3 sync \"{}\" \"{}\"'\n .format(local_preview_dir,\n HcsParsingUtils.extract_cloud_path(self.hcs_img_service_dir)))\n if cloud_transfer_result != 0:\n self._processing_logger.log_info('Results transfer was not successful...')\n return 1\n self._write_hcs_file(time_series_details, plate_width, plate_height)\n if not EVAL_PROCESSING_ONLY:\n tags_processing_result = self.try_process_tags(xml_info_tree, wells_tags)\n if TAGS_PROCESSING_ONLY:\n if wells_tags:\n for sequence_id, timepoints in time_series_details.items():\n path = os.path.join(self.hcs_img_service_dir, sequence_id, 'wells_map.json')\n self.write_dict_to_file(path, self.update_wells_json(path, wells_tags))\n return tags_processing_result\n if not TAGS_PROCESSING_ONLY:\n eval_processing_result = self.try_process_eval()\n if EVAL_PROCESSING_ONLY:\n return eval_processing_result\n self.create_stat_file()\n return 0",
"def io_files(self, iterable, ext=None, func=None):\n for input_path in iterable:\n output_path, temp_file = self.check_output_path(input_path, ext)\n\n try:\n func(input_path, temp_file)\n except Exception as e:\n if self._force_continue is True:\n self.handle_error(e, input_path)\n else:\n raise e\n\n self.overwrite_output_path(input_path, output_path, temp_file)",
"def process_all(fileinfos, args):\n # create overall figure\n count_and_draw(fileinfos,args)\n # create figures for all the files\n for key in fileinfos:\n count_and_draw(fileinfos,args,key)\n # create figures for all the elements\n els_processed = []\n for key in fileinfos:\n for key in fileinfos[key][\"usage_el\"]:\n if key not in els_processed:\n count_and_draw(fileinfos,args,key)\n els_processed.append(key)\n # create figures for all the attributes\n atts_processed = []\n for key in fileinfos:\n for key in fileinfos[key][\"usage_att\"]:\n if key not in atts_processed:\n count_and_draw(fileinfos,args,\"@\"+key)\n atts_processed.append(key)"
]
| [
"0.6923511",
"0.6782931",
"0.66985416",
"0.6443352",
"0.641794",
"0.64164966",
"0.6404735",
"0.63886476",
"0.62717456",
"0.62286955",
"0.62142897",
"0.61688447",
"0.6158451",
"0.6150643",
"0.61071146",
"0.6042156",
"0.60409087",
"0.60171324",
"0.60055137",
"0.59826595",
"0.59662753",
"0.59433204",
"0.5888666",
"0.58813334",
"0.5877026",
"0.58626693",
"0.58545196",
"0.58542556",
"0.58078444",
"0.57879984"
]
| 0.73723686 | 0 |
Plots single sided FFT | def plotFFT(filename):
fs_rate, signal = wavfile.read(filename)
len_audio = len(signal.shape)
print(signal.shape)
print(signal[:][0])
if len_audio == 2:
signal = signal.sum(axis=1) / 2
N = signal.shape[0]
FFT = abs(scipy.fft(signal))
FFT_side = FFT[range(N//2)]
freqs = scipy.fftpack.fftfreq(signal.size, 1.0/fs_rate)
fft_freqs = np.array(freqs)
freqs_side = freqs[range(N//2)] # one side frequency range
plt.plot(freqs_side, abs(FFT_side), "b") # plotting the complete fft spectrum
plt.xlabel('Frequency (Hz)')
plt.ylabel('Single-sided Amplitude')
plt.show() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_fft(self):\r\n\r\n self.ipx = int(self.imageData.shape[1]/2.)\r\n\r\n self.ipy = int(self.imageData.shape[2]/2.)\r\n\r\n nearf = np.absolute(self.DF[0:(self.freqs.shape[0]/2)-1,self.ipx-2:self.ipx+2,self.ipy-2:self.ipy+2])\r\n\r\n mpl.plot(self.freqs[0:(self.freqs.shape[0]/2)-1], np.mean(np.mean(nearf,axis=1), axis=1),\r\n\r\n 'ko-', markersize=2.5)\r\n\r\n mpl.plot(self.freqs[self.freq_point], np.mean(np.mean(nearf,axis=1), axis=1)[self.freq_point], 'ro', markersize=5)\r\n\r\n nearf = np.absolute(self.DF[0:(self.freqs.shape[0]/2)-1,-6:-1,-6:-1])\r\n\r\n mpl.plot(self.freqs[0:(self.freqs.shape[0]/2)-1], np.mean(np.mean(nearf,axis=1), axis=1),\r\n\r\n 'c-', markersize=2.5)\r\n\r\n mpl.title('FFT center of image and corner')",
"def plot_fft(x, y, th=1e-4):\n n = x.size\n Lx = x[-1]-x[0]\n yf = np.fft.rfft(y)\n xf = np.fft.rfftfreq(n, d=Lx/n)\n fig = plt.figure(figsize=[9, 9])\n ax = fig.add_subplot(211)\n ax.plot(x, y)\n plt.title('1) first component of ODE solution')\n\n ax = fig.add_subplot(223)\n yf = yf / (n/2)\n ii = (np.abs(yf) > th)\n ii[0] = False\n plt.plot(xf[ii], np.abs(yf[ii]))\n T0 = 1.0/np.mean(xf*np.abs(yf))\n plt.title('2) power spectrum')\n plt.draw()\n plt.pause(2)\n return T0",
"def FourierPlot(tas):\n detrend = signal.detrend(tas)\n L = len(tas)\n freqs = np.fft.fftfreq(L)\n tas_fft = np.fft.fft(detrend)\n R = tas_fft.real\n Im = tas_fft.imag\n mag = np.sqrt(R**2+Im**2)\n plt.plot(1/freqs,mag)",
"def plot_spectrum(wavetable: np.ndarray) -> None:\n ps = np.abs(np.fft.fft(wavetable)) ** 2\n\n time_step = 1 / 44100\n freqs = np.fft.fftfreq(wavetable.size, time_step)\n idx = np.argsort(freqs)\n\n plt.plot(freqs[idx], ps[idx])\n plt.show()",
"def fft_viz(image: np.ndarray, shift: bool = True) -> None:\n plt.imshow(img_fft(image, shift=shift), cmap='gray')",
"def show_spectrum(h, title=\"\"):\n H = fft2(h)\n\n # Remember to plot the abs of the fft2(h)\n plt.imshow(np.abs(H))\n plt.gray()\n plt.title(title)\n plt.show()",
"def plot_fft(self, theory=None, suffix='', label=None,\n substrate=None, surface=None, **kwargs):\n raise NotImplementedError\n import matplotlib.pyplot as plt\n c = coordinated_colors()\n if substrate is None and surface is None:\n raise TypeError(\"FFT reflectivity needs substrate or surface\")\n F = self.fresnel(substrate=substrate, surface=surface)\n #Qc = sqrt(16*pi*substrate)\n Qc = 0\n Qmax = max(self.Q)\n T = np.linspace(Qc, Qmax, len(self.Q))\n z = np.linspace(0, 2*pi/Qmax, len(self.Q)//2)\n if hasattr(self, 'R'):\n signal = np.interp(T, self.Q, self.R/F(self.Q))\n A = abs(numpy.fft.fft(signal - np.average(signal)))\n A = A[:len(A)//2]\n plt.plot(z, A, '.-', color=c['light'],\n label=self.label(prefix=label,\n gloss='data',\n suffix=suffix))\n if theory is not None:\n Q, R = theory\n signal = np.interp(T, Q, R/F(Q))\n A = abs(numpy.fft.fft(signal-np.average(signal)))\n A = A[:len(A)//2]\n plt.plot(z, A, '-', color=c['dark'],\n label=self.label(prefix=label,\n gloss='theory',\n suffix=suffix))\n plt.xlabel('w (A)')\n if substrate is None:\n name = \"air:%s\" % surface.name\n elif surface is None or isinstance(surface, Vacuum):\n name = substrate.name\n else:\n name = \"%s:%s\" % (substrate.name, surface.name)\n plt.ylabel('|FFT(R/R(%s))|' % name)",
"def fft_plot(x: np.ndarray, fs: Optional[int] = None,\n nfft: int = 2**18, onesided_flag: bool = None,\n mode: str = \"magnitude\", log_freq_flag: bool = False) -> go.Figure:\n\n # input validation\n assert x.ndim == 1, \"input must be a 1D array\"\n assert mode in [\"magnitude\", \"phase\", \"magnitude_phase\"], \\\n \"invalid mode, must be magnitude / phase / magnitude_phase\"\n if fs is None:\n fs = 2 * np.pi\n if onesided_flag is None:\n if all(np.isreal(x)):\n onesided_flag = True\n else:\n onesided_flag = False\n if log_freq_flag is True:\n assert onesided_flag is True, \\\n \"log scale can be plotted only if onesided_flag is True\"\n\n # calculate\n nfft = fft.next_fast_len(np.maximum(x.size, nfft))\n\n if onesided_flag:\n x_fft = fft.rfft(x, n=nfft)\n f_vec = fft.rfftfreq(nfft, 1/fs)\n else:\n x_fft = np.fft.fftshift(fft.fft(x, n=nfft))\n f_vec = np.fft.fftshift(fft.fftfreq(nfft, 1/fs))\n\n mag = 10*np.log10(np.abs(x_fft)**2)\n phase = np.angle(x_fft) * 180 / (np.pi)\n\n # plot\n freq_title = \"Frequency [rad]\" if fs == 2*np.pi else \"Frequency [Hz]\"\n\n if mode == \"magnitude\":\n fig = px.line(x=f_vec, y=mag, log_x=log_freq_flag)\n fig.update_xaxes(title_text=freq_title)\n fig.update_yaxes(title_text=\"Magnitude [dB]\")\n elif mode == \"phase\":\n fig = px.line(x=f_vec, y=phase, log_x=log_freq_flag)\n fig.update_xaxes(title_text=freq_title)\n fig.update_yaxes(title_text=\"Phase [degrees]\")\n elif mode == \"magnitude_phase\":\n fig = make_subplots(\n rows=2, cols=1,\n shared_xaxes=True)\n fig.add_trace(go.Scatter(x=f_vec, y=mag), row=1, col=1)\n fig.add_trace(go.Scatter(x=f_vec, y=phase), row=2, col=1)\n fig.update_xaxes(title_text=freq_title)\n if log_freq_flag:\n fig.update_xaxes(type=\"log\")\n fig.update_yaxes(title_text=\"Magnitude [dB]\", row=1, col=1)\n fig.update_yaxes(title_text=\"Phase [degrees]\", row=2, col=1)\n fig.update_layout(showlegend=False)\n\n fig.show()\n\n return fig",
"def plot(self):\n\t\tself.plotOfSpect()",
"def view(filename):\n n, data, data_dB,sr,ch=inputwav(filename)\n t=np.linspace(0,n/sr,n)\n py.close()\n fig, (ax1) = py.subplots(nrows=1) \n ax1.plot(t[0:n:100],data[0:n:100],'k-',linewidth=1,label=filename)\n ax1.legend(loc=1)\n ax1.set_ylabel('Amplitude (Rel. Bit)')\n ax1.set_xlabel('Time (s)')",
"def plot(self):\n\t\tself.plotOfSpect().plot()",
"def plot_timefrequency(z, time, f, signal=None, method=\"stft\"):\n\n if method == \"stft\":\n figure_title = \"Short-time Fourier Transform Magnitude\"\n fig, ax = plt.subplots()\n for i in range(len(time)):\n ax.plot(f, z[:, i], label=\"Segment\" + str(np.arange(len(time))[i] + 1))\n ax.legend()\n ax.set_title(\"Signal Spectrogram\")\n ax.set_ylabel(\"STFT Magnitude\")\n ax.set_xlabel(\"Frequency (Hz)\")\n\n elif method == \"cwt\":\n figure_title = \"Continuous Wavelet Transform Magnitude\"\n elif method == \"wvd\":\n figure_title = \"Wigner Ville Distrubution Spectrogram\"\n fig = plt.figure()\n plt.plot(time, signal)\n plt.xlabel(\"Time (sec)\")\n plt.ylabel(\"Signal\")\n\n elif method == \"pwvd\":\n figure_title = \"Pseudo Wigner Ville Distribution Spectrogram\"\n\n fig, ax = plt.subplots()\n spec = ax.pcolormesh(time, f, z, cmap=plt.get_cmap(\"magma\"), shading=\"auto\")\n plt.colorbar(spec)\n ax.set_title(figure_title)\n ax.set_ylabel(\"Frequency (Hz)\")\n ax.set_xlabel(\"Time (sec)\")\n return fig",
"def plotSpectrum(y,Fs):\n n = len(y) # length of the signal\n k = arange(n)\n T = n/Fs\n frq = k/T # two sides frequency range\n frq = frq[range(n/2)] # one side frequency range\n\n Y = fft(y)/n # fft computing and normalization\n Y = Y[range(n/2)]\n \n plt.plot(frq,abs(Y),'r') # plotting the spectrum\n xlabel('Freq (Hz)')\n ylabel('|Y(freq)|')",
"def show_waveform(self, peaks=[]):\n if peaks is None:\n peaks = []\n data = self.amplitude\n x_axis = range(0, len(data))\n x_axis = [x / self.fs for x in x_axis]\n plt.plot(x_axis, data)\n plt.axhline(self.height)\n for p in peaks:\n plt.axvline(p / self.fs, color=\"red\", alpha=0.2)\n plt.ylabel(\"Amplitude\")\n plt.xlabel(\"Time (seconds)\")\n plt.title(\"Waveform\")\n plt.show()",
"def plotDFT(x):\n \n X = DFTdirect(x)\n plt.plot([c.re for c in x], [c.im for c in x], 'ro')\n plt.plot([c.re for c in X], [c.im for c in X], 'bo')\n plt.show()",
"def plot_fourier_ampl(fourier_ica_obj, meg_data, W_orig,\n fnout=None, show=True):\n\n\n # ------------------------------------------\n # import necessary modules\n # ------------------------------------------\n from matplotlib import pyplot as plt\n from matplotlib import gridspec as grd\n\n\n # ------------------------------------------\n # generate sources for plotting\n # ------------------------------------------\n fourier_ampl = fourier_ica_obj.get_fourier_ampl(meg_data, W_orig)\n\n\n # ------------------------------------------\n # collect some general information\n # ------------------------------------------\n ncomp = fourier_ampl.shape[0]\n nbins = fourier_ampl.shape[1]\n sfreq_bins = nbins/(fourier_ica_obj.fhigh - fourier_ica_obj.flow)\n\n # define axis/positions for plots\n xaxis_fourier = np.arange(nbins)/sfreq_bins + fourier_ica_obj.flow\n\n\n # ------------------------------------------\n # loop over all activations\n # ------------------------------------------\n plt.ioff()\n plt.figure('Fourier amplitude', figsize=(5, 14))\n nplot = np.min([10, ncomp])\n\n gs = grd.GridSpec(nplot, 1)\n for icomp in range(nplot):\n\n if icomp == nplot-1:\n spines = ['bottom']\n else:\n spines = []\n\n # ----------------------------------------------\n # plot Fourier amplitudes\n # ----------------------------------------------\n p1 = plt.subplot(gs[icomp, 0])\n plt.xlim(fourier_ica_obj.flow, fourier_ica_obj.fhigh)\n plt.ylim(0.0, 1.0)\n adjust_spines(p1, spines, labelsize=13)\n if icomp == nplot-1:\n plt.xlabel('freq [Hz]')\n elif icomp == 0:\n p1.set_title(\"Fourier amplitude (arbitrary units)\")\n\n p1.bar(xaxis_fourier, fourier_ampl[icomp, :], 0.8, color='b', )\n\n # add some information\n IC_number = 'IC#%d' % (icomp+1)\n p1.text(fourier_ica_obj.flow-5, 0.4, IC_number, color='black', rotation=90)\n\n # save image\n if fnout:\n plt.savefig(fnout + '.png', format='png')\n\n # show image if requested\n if show:\n plt.show()\n\n plt.close('Fourier amplitude')\n plt.ion()",
"def plotWavelet(self):\n\n a = 0\n if(self.axQT is not None):\n self._bxPlot()\n a += 1\n if(self.bxQT is not None):\n self._cxPlot()\n a += 1\n\n if(a > 0):\n return\n\n self._setupPlot()\n self._axPlot()\n self._bxPlot()\n self._cxPlot()\n self._dxPlot()\n self._endingPlot()",
"def plot_freq_spec(data, title):\n plt.title(title)\n\n def plot_freq_spec(axis, line, label):\n n = len(axis)\n fft = fftpack.fft(axis) / n\n fft = fft[range(int(n / 2))]\n plt.plot(range(int(n / 2)), abs(fft), line, label=label)\n plot_freq_spec(data[:, 0], 'r-', label='x')\n plot_freq_spec(data[:, 1], 'g-', label='y')\n plot_freq_spec(data[:, 2], 'b-', label='z')",
"def plot_spectrum(self, sp1d, title=None, color='k'):\n if title is not None:\n self.set_title(title)\n self.set_axis_label(''.join(['Wavelength [', sp1d.wunit.name, ']']),\n 'x')\n self.set_axis_label('Counts', 'y')\n self.axplot.plot(sp1d.wlen, sp1d.counts, color)\n self.axplot.axis('tight')\n self.fig.canvas.draw()\n return",
"def plot(self):\n plot_spectrum(self.data, self.fig, self.ax_e, self.ax_s, title = \"Solar spectrum\")",
"def graphplot(self):\n if self.binned:\n self.line.set_ydata(self.fft_bins_y)\n else:\n self.line.set_ydata(self.spec_y)\n self.line2.set_ydata(self.wave_y)\n self.ax1.draw_artist(self.ax1.patch)\n self.ax2.draw_artist(self.ax2.patch)\n self.ax1.draw_artist(self.line)\n self.ax2.draw_artist(self.line2)\n self.fig.canvas.update()\n self.fig.canvas.flush_events()",
"def fdplot(self, imx):\n fig = plt.figure()\n maxval = np.max(imx)\n ims = list(map(lambda im: [plt.imshow(np.fabs(im),norm=colors.Normalize(0.0,maxval))], imx))\n animation = anim.ArtistAnimation(fig,ims,interval=50)\n plt.show()",
"def plot(self, inline=True, fname=None, show_colorbar=False):\n color_norm = matplotlib.colors.Normalize(\n vmin=self.decibel_limits[0], vmax=self.decibel_limits[1]\n )\n\n plt.imshow(self.spectrogram[::-1], cmap=\"Greys\", norm=color_norm)\n\n # pick values to show on time and frequency axes\n yvals = self.frequencies.round(-2).astype(int)\n xvals = self.times.round(2)\n y_idx = [int(ti) for ti in np.linspace(0, len(yvals), 8)]\n y_idx[-1] -= 1\n plt.yticks(len(yvals) - np.array(y_idx), yvals[y_idx])\n x_idx = [int(ti) for ti in np.linspace(0, len(xvals), 6)]\n x_idx[-1] -= 1\n plt.xticks(x_idx, xvals[x_idx])\n\n # add axes labels\n plt.ylabel(\"frequency (Hz): mel scale\")\n plt.xlabel(\"time (sec)\")\n\n if show_colorbar:\n plt.colorbar()\n\n # if fname is not None, save to file path fname\n if fname:\n plt.savefig(fname)\n\n # if not saving to file, check if a matplotlib backend is available\n if inline:\n if os.environ.get(\"MPLBACKEND\") is None:\n warnings.warn(\"MPLBACKEND is 'None' in os.environ. Skipping plot.\")\n else:\n plt.show()",
"def display_fft(self, N=4096):\n if len(self.series) % N != 0:\n return\n\n h = self.series[-N:]\n H = fft(h)\n\n # the squared magnitude of the fft is an estimate of the\n # power spectral density\n\n # http://documents.wolfram.com/applications/timeseries/\n # UsersGuidetoTimeSeries/1.8.3.html\n # http://en.wikipedia.org/wiki/Power_spectral_density\n freq = range(N / 2 + 1)\n sdf = [Hn * Hn.conjugate() for Hn in H]\n sdf = [sdf[f].real for f in freq]\n loglog(freq, sdf)\n xlabel(\"frequency\")\n ylabel(\"power\")\n show()",
"def plotAll(fx,tfarray,tlst,flst,fignum=1,starttime=0,timeinc='hrs',\r\n dt=1.0,title=None,vmm=None,cmap=None,aspect=None,interpolation=None,\r\n cbori=None,cbshrink=None,cbaspect=None,cbpad=None,normalize='n',\r\n scale='log'):\r\n \r\n #time increment\r\n if timeinc=='hrs':\r\n tinc=3600/dt\r\n elif timeinc=='min':\r\n tinc=60/dt\r\n elif timeinc=='sec':\r\n tinc=1/dt\r\n else:\r\n raise ValueError(timeinc+'is not defined')\r\n #colormap\r\n if cmap==None:\r\n cmap='jet'\r\n else:\r\n cmap=cmap\r\n #aspect ratio\r\n if aspect==None:\r\n aspect='auto'\r\n else:\r\n aspect=aspect\r\n #interpolation\r\n if interpolation==None:\r\n interpolation='gaussian'\r\n else:\r\n interpolation=interpolation\r\n #colorbar orientation\r\n if cbori==None:\r\n cbori='vertical'\r\n else:\r\n cbori=cbori\r\n #colorbar shinkage\r\n if cbshrink==None:\r\n cbshrink=.99\r\n else:\r\n cbshrink=cbshrink\r\n #colorbar aspect\r\n if cbaspect==None:\r\n cbaspect=20\r\n else:\r\n cbaspect=cbaspect\r\n #colorbar pad\r\n if cbpad==None:\r\n cbpad=.1\r\n else:\r\n cbpad=cbpad\r\n \r\n #scale\r\n if scale=='log':\r\n zerofind=np.where(abs(tfarray)==0)\r\n tfarray[zerofind]=1.0\r\n if normalize=='y':\r\n plottfarray=20*np.log10(abs(tfarray/np.max(abs(tfarray))))\r\n else:\r\n plottfarray=20*np.log10(abs(tfarray))\r\n elif scale=='linear':\r\n if normalize=='y':\r\n plottfarray=abs(plottfarray/np.max(abs(plottfarray)))**2\r\n else:\r\n plottfarray=abs(tfarray)**2\r\n \r\n t=np.arange(len(fx))*dt+starttime*dt\r\n FX=np.fft.fft(padzeros(fx))\r\n FXfreq=np.fft.fftfreq(len(FX),dt)\r\n \r\n #set some plot parameters\r\n plt.rcParams['font.size']=10\r\n plt.rcParams['figure.subplot.left']=.13\r\n plt.rcParams['figure.subplot.right']=.98\r\n plt.rcParams['figure.subplot.bottom']=.07\r\n plt.rcParams['figure.subplot.top']=.96\r\n plt.rcParams['figure.subplot.wspace']=.25\r\n plt.rcParams['figure.subplot.hspace']=.20\r\n #plt.rcParams['font.family']='helvetica'\r\n \r\n fig=plt.figure(fignum)\r\n \r\n #plot FFT of fx\r\n fax=fig.add_axes([.05,.25,.1,.7])\r\n plt.plot(abs(FX[0:len(FX)/2]/max(abs(FX)))**2,FXfreq[0:len(FX)/2],'-k')\r\n plt.xlim(0,1)\r\n plt.ylim(0,FXfreq[len(FX)/2-1])\r\n fax.xaxis.set_major_locator(MultipleLocator(.5))\r\n \r\n #plot TFD\r\n pax=fig.add_axes([.25,.25,.75,.7])\r\n if vmm!=None:\r\n vmin=vmm[0]\r\n vmax=vmm[1]\r\n plt.imshow(plottfarray,extent=(tlst[0]/tinc,tlst[-1]/tinc,\r\n flst[0],flst[-1]),aspect=aspect,vmin=vmin,vmax=vmax,cmap=cmap,\r\n interpolation=interpolation)\r\n else:\r\n plt.imshow(plottfarray,extent=(tlst[0]/tinc,tlst[-1]/tinc,\r\n flst[0],flst[-1]),aspect=aspect,cmap=cmap,\r\n interpolation=interpolation)\r\n plt.xlabel('Time('+timeinc+')',fontsize=12,fontweight='bold')\r\n plt.ylabel('Frequency (Hz)',fontsize=12,fontweight='bold')\r\n if title!=None:\r\n plt.title(title,fontsize=14,fontweight='bold')\r\n plt.colorbar(orientation=cbori,shrink=cbshrink,pad=cbpad,aspect=cbaspect)\r\n \r\n #plot timeseries\r\n tax=fig.add_axes([.25,.05,.60,.1])\r\n plt.plot(t,fx,'-k')\r\n plt.axis('tight')\r\n plt.show()",
"def plotfft(s, fmax):\n\n fs = abs(np.fft.fft(s))\n f = np.linspace(0, fmax // 2, len(s) // 2)\n return (f[1:len(s) // 2].copy(), fs[1:len(s) // 2].copy())",
"def plot_three(spectrum, thresh=1):\n plt.figure(figsize=(10, 4))\n plt.subplot(1,3,1)\n spectrum.plot()\n plt.subplot(1,3,2)\n plot_angle(spectrum, thresh=thresh)\n plt.subplot(1,3,3)\n wave = spectrum.make_wave()\n wave.unbias()\n wave.normalize()\n wave.segment(duration=0.01).plot()\n display(wave.make_audio())",
"def plot_flux( source, ctr_freq, bandwidth, nchan, format1 = False, show = False, **kwargs ):\n\n freqs = psr_u.chan_to_freq( ctr_freq, bandwidth, nchan )\n fluxes = []\n for f in freqs:\n flux = getFlux( f, source, format1 )\n fluxes.append( flux )\n\n if show:\n # Set up figure and axes\n fig = plt.figure()\n ax = fig.add_subplot( 111, facecolor = 'w' )\n\n xText = ax.set_xlabel( r\"Frequency (GHz)\" )\n yText = ax.set_ylabel( r\"Flux (Jy)\" )\n title = ax.set_title( r\"The change in flux (Jy) as a function of frequency (GHz)\" )\n ax.plot( freqs, fluxes, **kwargs )\n plt.show()\n return ax\n else:\n return fluxes",
"def plot_cf(self, **options):\n n = len(self.hs)\n xs = np.arange(-n//2, n//2)\n hs = np.roll(self.hs, len(self.hs) // 2)\n plt.plot(xs, hs.real, label='real', **options)\n plt.plot(xs, hs.imag, label='imag', **options)\n plt.legend()",
"def plot_chans(freq=True):\n f,ax = plt.subplots(4,3)\n for ant in range(12):\n snap.write_int('rst',1)\n snap.write_int('antenna',ant)\n snap.write_int('rst',0)\n\n time.sleep(ACC_LEN/(512*200e6)*1e3)\n arr = struct.unpack('>256Q',snap.read('spectrum',8*256))\n \n ax[ant%4][int(ant/4)].semilogy(FREQ,arr,'.-',lw=1)\n ax[ant%4][int(ant/4)].set_xlim(FREQ.max(), FREQ.min())\n ax[ant%4][int(ant/4)].set_title('Antenna %s'%ANT_LABELS[ant])\n plt.show()"
]
| [
"0.72081864",
"0.7085793",
"0.70096654",
"0.666678",
"0.663691",
"0.6634006",
"0.6609489",
"0.65550005",
"0.6514818",
"0.64428234",
"0.64358807",
"0.64106315",
"0.6407862",
"0.64054",
"0.6353617",
"0.6329611",
"0.6270725",
"0.62528634",
"0.624896",
"0.62402976",
"0.62365526",
"0.6206095",
"0.61966586",
"0.61935556",
"0.61861205",
"0.6183823",
"0.6174345",
"0.6166825",
"0.6156081",
"0.6146449"
]
| 0.7207237 | 1 |
To Train Instrument Detection Initialize a neural Net with 100 1000 frequencies as input for each audio file , | def trainNet(): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self):\n self.train(positivity_files, 0)\n self.train(subjectivity_files, 1)",
"def test():\r\n le = preprocessing.LabelEncoder()\r\n le.fit([\"Door Knocking\",\"Shower Running\",\"Toilet Flushing\",\"Vacuum Cleaning\",\"Keyboard Typing\", # encode class labels as numeric id values\r\n \"Coughing\",\"Neutral\"])\r\n \r\n if torch.cuda.is_available():\r\n device = \"cuda:0\"\r\n use_cuda = True\r\n else:\r\n device = \"cpu\"\r\n use_cuda = False\r\n \r\n myModel, start_epoch, train_hist = loadCheckpoint(31, use_cuda)\r\n \r\n #myModel = myModel.double()\r\n myModel = myModel.to(device, dtype=torch.double)\r\n next(myModel.parameters()).device # Check that it is on Cuda\r\n \r\n file_names = []\r\n class_ids = []\r\n max_s = 1\r\n sr = 44100 \r\n for entry in os.scandir(\"test wavs/\"): # for each folder corresponding to a class in dataset\r\n class_id = entry.name # get class numeric id according to label encoder\r\n relative_path = \"test wavs/\"+entry.name # get path location of data sample for loading audio\r\n file_names.append(relative_path) # append to list\r\n class_ids.append(class_id)\r\n\r\n max_s = 1\r\n sr = 44100\r\n X_test = [] \r\n for i in range(len(file_names)):\r\n audio = LoadAudio.load(file_names[i]) # load audio file\r\n audio = LoadAudio.resample(audio, sr) # resample audio\r\n audio = LoadAudio.mono(audio) # make audio stereo\r\n audio = LoadAudio.resize(audio, max_s) # resize audio \r\n sgram = LoadAudio.spectrogram(audio, n_mels=128, n_fft=1024, hop_len=None) # create spectrogram \r\n sgram = LoadAudio.hpssSpectrograms(audio,sgram)\r\n sgram_tensor = torch.tensor(sgram)\r\n X_test.append(sgram_tensor)\r\n\r\n pred = np.array([])\r\n for i in range(len(X_test)):\r\n inputs = X_test[i]\r\n # Normalize the inputs\r\n inputs_m, inputs_s = inputs.mean(), inputs.std()\r\n inputs = (inputs - inputs_m) / inputs_s\r\n inputs = inputs.unsqueeze(0)\r\n inputs = inputs.double()\r\n \r\n # Get predictions\r\n outputs = myModel(inputs)\r\n\r\n # Get the predicted class with the highest score\r\n _, predicted = torch.max(outputs.data, 1)\r\n \r\n pred = np.append(pred, le.inverse_transform(predicted.detach().cpu().numpy()))\r\n \r\n\r\n df = pd.DataFrame(pred, columns=[\"Predicted\"]) # save predictions as a datafram column\r\n df['True'] = class_ids # save true class as a datafram column\r\n print(\"\\nPredicted:\", df)",
"def train(self, trainfile):",
"def run(self):\n from audio import AudioRecorder\n\n loader = SingleInputLoader(128)\n recorder = AudioRecorder()\n\n with tf.Session() as sess:\n model = create_default_model('record', 128, loader)\n model.restore(sess, 'train/best-weights')\n \n while True:\n print('Listening...')\n audio, width = recorder.record()\n audio = np.array(audio)\n\n #calculate the power spectrum of the audio and of sampling rate 16000 \n input_ = preprocess.calculatePowerSpectrogram(audio, 16000)\n\n loader.set_input(input_)\n [decoded] = model.step(sess, loss=False, update=False, decode=True)\n\n decoded_ids_paths = [Test.extract_decoded_ids(path) for path in decoded]\n \n for decoded_path in decoded_ids_paths:\n decoded_ids = next(decoded_path)\n decoded_str = self.idsToSentence(decoded_ids)\n print('Predicted: {}'.format(decoded_str))",
"def load_train_dataset(data_dir, word_list, silence_percentage, noise_percentage):\n validation_percentage, testing_percentage = 0.1, 0.1\n temp_list = []\n\n #wav_lists = os.path.join(data_dir, *, '*.wav')\n for word_l in word_list:\n #wav_word_list = os.path.join(data_dir, word_l)\n wav_list = os.path.join(data_dir, word_l, '*.wav')\n for file in gfile.Glob(wav_list):\n _, word = os.path.split(os.path.dirname(file))\n word = word.lower()\n\n if which_set(file, validation_percentage, testing_percentage) == 'training':\n rate, signal = load_wav(file);\n signal_and_noise = add_noise(signal, rate, 1, os.path.join(data_dir,'_background_noise_'), noise_percentage)\n \n feature = psf.mfcc(signal_and_noise, rate, nfilt = 40,numcep = 12, appendEnergy = False)\n #if feature.shape[0] != 99:\n # print(str(len(signal)) + \" \" + str(rate))\n temp_list.append({'feature': feature, 'label': word_l})\n\n # hotspot\n #silence = len(X_train) * silence_percentage\n silence = int(math.ceil(len(temp_list) * silence_percentage / 100))\n for _ in range(silence):\n temp_list.append({'feature': 0, 'label': \"_silence_\"})\n\n random.shuffle(temp_list)\n\n X_train = np.zeros((len(temp_list), 99, 12))\n Y_train = np.zeros( len(temp_list) )\n\n for i in range(len(X_train)):\n X_train[i] = temp_list[i]['feature']\n Y_train[i] = word2index(temp_list[i]['label'])\n\n return X_train, Y_train",
"def train():\n ### DO NOT CHANGE SEEDS!\n # Set the random seeds for reproducibility\n np.random.seed(42)\n\n ## Prepare all functions\n # Get number of units in each hidden layer specified in the string such as 100,100\n if FLAGS.dnn_hidden_units:\n dnn_hidden_units = FLAGS.dnn_hidden_units.split(\",\")\n dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]\n else:\n dnn_hidden_units = []\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n model = MLP(n_hidden=dnn_hidden_units,n_classes=10,batch_size=FLAGS.batch_size, input_dim=32*32*3, \n weight_decay=FLAGS.weight_reg_strength, weight_scale=FLAGS.weight_init_scale)\n\n Datasets = utils.get_cifar10(data_dir = DATA_DIR_DEFAULT, one_hot = True, validation_size = 0)\n \n for i in range(1500): #(FLAGS.max_steps):\n train_batch = Datasets.train.next_batch(batch_size = FLAGS.batch_size)\n #Get the model output\n logits = model.inference(x=train_batch[0].reshape([FLAGS.batch_size,32*32*3]))\n #Get the loss and let the model set the loss derivative.\n loss = model.loss(logits=logits, labels=train_batch[1])\n #Perform training step\n model.train_step(loss=loss, flags=FLAGS)\n\n #Every 100th iteratin print accuracy on the whole test set.\n if i % 100 == 0:\n # for layer in model.layers:\n test_batch = Datasets.test.next_batch(batch_size = 200) #Datasets.test.num_examples\n logits = model.inference(x=test_batch[0].reshape([200,32*32*3]))\n print('-- Step: ', i, \" accuracy: \",model.accuracy(logits=logits,labels=test_batch[1]),'loss', loss )\n\n ########################\n # END OF YOUR CODE #\n #######################",
"def train():\n pass",
"def train(self):\r\n raw_dataset = pd.read_csv(self.datafile, sep = ',', header = 0,\r\n na_values = '?', comment = '\\t',\r\n skipinitialspace = True)\r\n\r\n dataset = raw_dataset.copy()\r\n dataset.tail()\r\n\r\n # Clear unknown values\r\n dataset.isna().sum()\r\n dataset = dataset.dropna()\r\n\r\n # takes a sample of 80% of the data points\r\n train_dataset = dataset.sample(frac = 0.8, random_state = 0)\r\n test_dataset = dataset.drop(train_dataset.index)\r\n\r\n # Split features from labels for training and test datasets\r\n train_features = train_dataset.copy()\r\n test_features = test_dataset.copy()\r\n train_labels = train_features.pop('Quality')\r\n test_labels = test_features.pop('Quality')\r\n\r\n # normalize data\r\n normalizer = preprocessing.Normalization()\r\n normalizer.adapt(np.array(train_features))\r\n\r\n # builds the model\r\n def build_and_compile_model(norm):\r\n model = keras.Sequential([\r\n norm,\r\n layers.Dense(64, activation='relu'),\r\n layers.Dense(64, activation='relu'),\r\n layers.Dense(1)\r\n ])\r\n\r\n model.compile(loss='mean_absolute_error',\r\n optimizer=tf.keras.optimizers.Adam(0.001))\r\n return model\r\n\r\n deep_neural_network_model = build_and_compile_model(normalizer)\r\n\r\n history = deep_neural_network_model.fit(\r\n train_features, train_labels,\r\n validation_split=0.2,\r\n verbose=0, epochs=100)\r\n\r\n deep_neural_network_model.save('deep_neural_network_model')",
"def nonlearning():\n\taT.featureAndTrain(['../../AudioData/chunked_data_sorted/pos', '../../AudioData/chunked_data_sorted/neg'], \n\t\t\t\t\t\t1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, \n \"svm\", \"emotion_classifier\", True)",
"def train(self):\n # self.recognizer.train()\n self.detector.train()\n self.shared_conv.train()",
"def train(self, num_batches: int):",
"def train_naive(): # add arguments as needed\n pass",
"def train(self):\n pass",
"def train(self):\n pass",
"def train(self):\n pass",
"def train(self):\n pass",
"def train(self):\n pass",
"def train():\n # YOUR TRAINING CODE GOES HERE",
"def train():\n counts = {size: dict() for size in NGRAM_SIZES}\n for word in tqdm.tqdm(word_iterator(\"resources/datasets\")):\n if word == \"\":\n continue\n for size in NGRAM_SIZES:\n for token in ngrams(word, 2 * size):\n left, right = token[:size], token[size:]\n counts[size].setdefault(left, dict())\n counts[size][left].setdefault(right, 0)\n counts[size][left][right] += 1\n model = {size: dict() for size in NGRAM_SIZES}\n for size in NGRAM_SIZES:\n for left in counts[size]:\n total = sum(counts[size][left].values())\n model[size][left] = dict()\n for right in counts[size][left]:\n model[size][left][right] = math.log(\n counts[size][left][right] / total)\n with open(MODEL_FILENAME, \"wb\") as file:\n pickle.dump(model, file)",
"def train(self, trainFilenames):\n\n\t\tstartIndex = len(self.documents)\n\t\tendIndex = startIndex + len(trainFilenames)\n\t\tself.documents += trainFilenames\n\n\t\tX = [[i] for i in range(startIndex, endIndex)]\n\t\tY = [isAroused(f) for f in trainFilenames]\n\n\t\tself.knn.fit(np.array(X), np.array(Y))",
"def train_start(self):\n self.module.img_enc.train()\n self.module.txt_enc.train()",
"def init_train(self):\n data = self.loader.load_labelled_data(self.conf.split, 'training')\n\n # Initialise unlabelled data iterator\n num_ul = 0\n if self.conf.ul_mix > 0:\n ul_data = self.loader.load_unlabelled_data(self.conf.split, 'all')\n\n # calculate number of unlabelled images as a proportion of the labelled images\n num_ul = int(data.size() * self.conf.ul_mix)\n num_ul = num_ul if num_ul <= ul_data.size() else ul_data.size()\n log.info('Sampling %d unlabelled images out of total %d.' % (num_ul, ul_data.size()))\n ul_data.sample(num_ul)\n self.gen_X_U = data_utils.generator(self.conf.batch_size, 'overflow', ul_data.images)\n\n # Initialise labelled data iterator\n assert self.conf.l_mix >= 0\n\n # calculate number of labelled images\n num_l = int(data.size() * self.conf.l_mix)\n num_l = num_l if num_l <= data.size() else data.size()\n log.info('Using %d labelled images out of total %d.' % (num_l, data.size()))\n train_images = data.images[:num_l]\n train_masks = data.masks[:num_l]\n\n self.conf.unlabelled_image_num = num_ul\n self.conf.labelled_image_num = num_l\n self.conf.data_len = num_ul if num_ul > num_l else num_l\n self.conf.batches = int(np.ceil(self.conf.data_len / self.conf.batch_size))\n self.conf.save()\n\n self.gen_X_L = data_utils.generator(self.conf.batch_size, 'overflow', train_images, train_masks)\n\n # Initialise real masks iterator for discriminator training, using the real masks from the data CV split.\n self.other_masks = data_utils.generator(self.conf.batch_size, 'overflow', data.masks + 0)",
"def train(**kwargs):\n\n # Roll out the parameters\n patch_size = kwargs[\"patch_size\"]\n image_data_format = kwargs[\"image_data_format\"]\n generator_type = kwargs[\"generator_type\"]\n dset = kwargs[\"dset\"]\n batch_size = kwargs[\"batch_size\"]\n n_batch_per_epoch = kwargs[\"n_batch_per_epoch\"]\n nb_epoch = kwargs[\"nb_epoch\"]\n model_name = kwargs[\"model_name\"]\n save_weights_every_n_epochs = kwargs[\"save_weights_every_n_epochs\"]\n visualize_images_every_n_epochs = kwargs[\"visualize_images_every_n_epochs\"]\n use_mbd = kwargs[\"use_mbd\"]\n label_smoothing = kwargs[\"use_label_smoothing\"]\n label_flipping_prob = kwargs[\"label_flipping_prob\"]\n use_l1_weighted_loss = kwargs[\"use_l1_weighted_loss\"]\n prev_model = kwargs[\"prev_model\"]\n discriminator_optimizer = kwargs[\"discriminator_optimizer\"]\n n_run_of_gen_for_1_run_of_disc = kwargs[\"n_run_of_gen_for_1_run_of_disc\"]\n MAX_FRAMES_PER_GIF = kwargs[\"MAX_FRAMES_PER_GIF\"]\n\n # batch_size = args.batch_size\n # n_batch_per_epoch = args.n_batch_per_epoch\n # nb_epoch = args.nb_epoch\n # save_weights_every_n_epochs = args.save_weights_every_n_epochs\n # generator_type = args.generator_type\n # patch_size = args.patch_size\n # label_smoothing = False\n # label_flipping_prob = False\n # dset = args.dset\n # use_mbd = False\n\n # Check and make the dataset\n # If .h5 file of dset is not present, try making it\n if not os.path.exists(\"../../data/processed/%s_data.h5\" % dset):\n print(\"dset %s_data.h5 not present in '../../data/processed'!\" % dset)\n if not os.path.exists(\"../../data/%s/\" % dset):\n print(\"dset folder %s not present in '../../data'!\\n\\nERROR: Dataset .h5 file not made, and dataset not available in '../../data/'.\\n\\nQuitting.\" % dset)\n return\n else:\n if not os.path.exists(\"../../data/%s/train\" % dset) or not os.path.exists(\"../../data/%s/val\" % dset) or not os.path.exists(\"../../data/%s/test\" % dset):\n print(\"'train', 'val' or 'test' folders not present in dset folder '../../data/%s'!\\n\\nERROR: Dataset must contain 'train', 'val' and 'test' folders.\\n\\nQuitting.\" % dset)\n return\n else:\n print(\"Making %s dataset\" % dset)\n subprocess.call(['python3', '../data/make_dataset.py', '../../data/%s' % dset, '3'])\n print(\"Done!\")\n\n epoch_size = n_batch_per_epoch * batch_size\n\n init_epoch = 0\n\n if prev_model:\n print('\\n\\nLoading prev_model from', prev_model, '...\\n\\n')\n prev_model_latest_gen = sorted(glob.glob(os.path.join('../../models/', prev_model, '*gen*.h5')))[-1]\n prev_model_latest_disc = sorted(glob.glob(os.path.join('../../models/', prev_model, '*disc*.h5')))[-1]\n prev_model_latest_DCGAN = sorted(glob.glob(os.path.join('../../models/', prev_model, '*DCGAN*.h5')))[-1]\n # Find prev model name, epoch\n model_name = prev_model_latest_DCGAN.split('models')[-1].split('/')[1]\n init_epoch = int(prev_model_latest_DCGAN.split('epoch')[1][:5]) + 1\n\n # Setup environment (logging directory etc), if no prev_model is mentioned\n general_utils.setup_logging(model_name)\n\n # img_dim = X_full_train.shape[-3:]\n img_dim = (256, 256, 3)\n\n # Get the number of non overlapping patch and the size of input image to the discriminator\n nb_patch, img_dim_disc = data_utils.get_nb_patch(img_dim, patch_size, image_data_format)\n\n try:\n\n # Create optimizers\n opt_dcgan = Adam(lr=1E-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n\n if discriminator_optimizer == 'sgd':\n opt_discriminator = SGD(lr=1E-3, momentum=0.9, nesterov=True)\n elif discriminator_optimizer == 'adam':\n opt_discriminator = Adam(lr=1E-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n\n # Load generator model\n generator_model = models.load(\"generator_unet_%s\" % generator_type,\n img_dim,\n nb_patch,\n use_mbd,\n batch_size,\n model_name)\n\n generator_model.compile(loss='mae', optimizer=opt_discriminator)\n\n # Load discriminator model\n discriminator_model = models.load(\"DCGAN_discriminator\",\n img_dim_disc,\n nb_patch,\n use_mbd,\n batch_size,\n model_name)\n\n discriminator_model.trainable = False\n\n DCGAN_model = models.DCGAN(generator_model,\n discriminator_model,\n img_dim,\n patch_size,\n image_data_format)\n\n if use_l1_weighted_loss:\n loss = [l1_weighted_loss, 'binary_crossentropy']\n else:\n loss = [l1_loss, 'binary_crossentropy']\n\n loss_weights = [1E1, 1]\n DCGAN_model.compile(loss=loss, loss_weights=loss_weights, optimizer=opt_dcgan)\n\n discriminator_model.trainable = True\n discriminator_model.compile(loss='binary_crossentropy', optimizer=opt_discriminator)\n\n # Load prev_model\n if prev_model:\n generator_model.load_weights(prev_model_latest_gen)\n discriminator_model.load_weights(prev_model_latest_disc)\n DCGAN_model.load_weights(prev_model_latest_DCGAN)\n\n # Load and rescale data\n print('\\n\\nLoading data...\\n\\n')\n X_full_train, X_sketch_train, X_full_val, X_sketch_val = data_utils.load_data(dset, image_data_format)\n check_this_process_memory()\n print('X_full_train: %.4f' % (X_full_train.nbytes/2**30), \"GB\")\n print('X_sketch_train: %.4f' % (X_sketch_train.nbytes/2**30), \"GB\")\n print('X_full_val: %.4f' % (X_full_val.nbytes/2**30), \"GB\")\n print('X_sketch_val: %.4f' % (X_sketch_val.nbytes/2**30), \"GB\")\n\n # Losses\n disc_losses = []\n gen_total_losses = []\n gen_L1_losses = []\n gen_log_losses = []\n\n # Start training\n print(\"\\n\\nStarting training\\n\\n\")\n for e in range(nb_epoch):\n # Initialize progbar and batch counter\n # progbar = generic_utils.Progbar(epoch_size)\n batch_counter = 0\n gen_total_loss_epoch = 0\n gen_L1_loss_epoch = 0\n gen_log_loss_epoch = 0\n start = time.time()\n for X_full_batch, X_sketch_batch in data_utils.gen_batch(X_full_train, X_sketch_train, batch_size):\n # Create a batch to feed the discriminator model\n X_disc, y_disc = data_utils.get_disc_batch(X_full_batch,\n X_sketch_batch,\n generator_model,\n batch_counter,\n patch_size,\n image_data_format,\n label_smoothing=label_smoothing,\n label_flipping_prob=label_flipping_prob)\n # Update the discriminator\n disc_loss = discriminator_model.train_on_batch(X_disc, y_disc)\n # Create a batch to feed the generator model\n X_gen_target, X_gen = next(data_utils.gen_batch(X_full_train, X_sketch_train, batch_size))\n y_gen = np.zeros((X_gen.shape[0], 2), dtype=np.uint8)\n y_gen[:, 1] = 1\n # Freeze the discriminator\n discriminator_model.trainable = False\n # Train generator\n for _ in range(n_run_of_gen_for_1_run_of_disc-1):\n gen_loss = DCGAN_model.train_on_batch(X_gen, [X_gen_target, y_gen])\n gen_total_loss_epoch += gen_loss[0]/n_run_of_gen_for_1_run_of_disc\n gen_L1_loss_epoch += gen_loss[1]/n_run_of_gen_for_1_run_of_disc\n gen_log_loss_epoch += gen_loss[2]/n_run_of_gen_for_1_run_of_disc\n X_gen_target, X_gen = next(data_utils.gen_batch(X_full_train, X_sketch_train, batch_size))\n gen_loss = DCGAN_model.train_on_batch(X_gen, [X_gen_target, y_gen])\n # Add losses\n gen_total_loss_epoch += gen_loss[0]/n_run_of_gen_for_1_run_of_disc\n gen_L1_loss_epoch += gen_loss[1]/n_run_of_gen_for_1_run_of_disc\n gen_log_loss_epoch += gen_loss[2]/n_run_of_gen_for_1_run_of_disc\n # Unfreeze the discriminator\n discriminator_model.trainable = True\n # Progress\n # progbar.add(batch_size, values=[(\"D logloss\", disc_loss),\n # (\"G tot\", gen_loss[0]),\n # (\"G L1\", gen_loss[1]),\n # (\"G logloss\", gen_loss[2])])\n print(\"Epoch\", str(init_epoch+e+1), \"batch\", str(batch_counter+1), \"D_logloss\", disc_loss, \"G_tot\", gen_loss[0], \"G_L1\", gen_loss[1], \"G_log\", gen_loss[2])\n batch_counter += 1\n if batch_counter >= n_batch_per_epoch:\n break\n gen_total_loss = gen_total_loss_epoch/n_batch_per_epoch\n gen_L1_loss = gen_L1_loss_epoch/n_batch_per_epoch\n gen_log_loss = gen_log_loss_epoch/n_batch_per_epoch\n disc_losses.append(disc_loss)\n gen_total_losses.append(gen_total_loss)\n gen_L1_losses.append(gen_L1_loss)\n gen_log_losses.append(gen_log_loss)\n check_this_process_memory()\n print('Epoch %s/%s, Time: %.4f' % (init_epoch + e + 1, init_epoch + nb_epoch, time.time() - start))\n # Save images for visualization\n if (e + 1) % visualize_images_every_n_epochs == 0:\n data_utils.plot_generated_batch(X_full_batch, X_sketch_batch, generator_model, batch_size, image_data_format,\n model_name, \"training\", init_epoch + e + 1, MAX_FRAMES_PER_GIF)\n # Get new images from validation\n X_full_batch, X_sketch_batch = next(data_utils.gen_batch(X_full_val, X_sketch_val, batch_size))\n data_utils.plot_generated_batch(X_full_batch, X_sketch_batch, generator_model, batch_size, image_data_format,\n model_name, \"validation\", init_epoch + e + 1, MAX_FRAMES_PER_GIF)\n # Plot losses\n data_utils.plot_losses(disc_losses, gen_total_losses, gen_L1_losses, gen_log_losses, model_name, init_epoch)\n # Save weights\n if (e + 1) % save_weights_every_n_epochs == 0:\n gen_weights_path = os.path.join('../../models/%s/gen_weights_epoch%05d_discLoss%.04f_genTotL%.04f_genL1L%.04f_genLogL%.04f.h5' % (model_name, init_epoch + e, disc_losses[-1], gen_total_losses[-1], gen_L1_losses[-1], gen_log_losses[-1]))\n generator_model.save_weights(gen_weights_path, overwrite=True)\n disc_weights_path = os.path.join('../../models/%s/disc_weights_epoch%05d_discLoss%.04f_genTotL%.04f_genL1L%.04f_genLogL%.04f.h5' % (model_name, init_epoch + e, disc_losses[-1], gen_total_losses[-1], gen_L1_losses[-1], gen_log_losses[-1]))\n discriminator_model.save_weights(disc_weights_path, overwrite=True)\n DCGAN_weights_path = os.path.join('../../models/%s/DCGAN_weights_epoch%05d_discLoss%.04f_genTotL%.04f_genL1L%.04f_genLogL%.04f.h5' % (model_name, init_epoch + e, disc_losses[-1], gen_total_losses[-1], gen_L1_losses[-1], gen_log_losses[-1]))\n DCGAN_model.save_weights(DCGAN_weights_path, overwrite=True)\n\n except KeyboardInterrupt:\n pass",
"def train(self):\r\n self.speaker2index_and_index2speaker()\r\n \"\"\"Initialize history matrix\"\"\"\r\n self.history = np.random.normal(loc=0, scale=0.1, size=(len(self.s2i), config.train.class_history))\r\n \"\"\"\"\"\"\r\n \"\"\"\"\"\"\r\n iterations = 0\r\n \"\"\"Get train/test\"\"\"\r\n if WORDSPLIT:\r\n train, test = self.get_train_test_wordsplit()\r\n elif UTTERANCE_SPLIT:\r\n train, test, val = self.get_train_test_utterance_split()\r\n wordlist = joblib.load('wordlist.pkl')\r\n dictionary = joblib.load('dict.pkl')\r\n phones = joblib.load('phones.pkl')\r\n metadata_help = {'wordlist': wordlist, 'dictionary': dictionary, 'phones': phones}\r\n p2c = utils.phone2class(phones)\r\n c2p = utils.class2phone(phones)\r\n \"\"\"CTC loss\"\"\"\r\n # self.ctc_loss = nn.CTCLoss(blank=p2c[config.data.PAD_token], reduction='mean')\r\n self.ctc_loss = nn.CTCLoss(blank=p2c[config.data.PAD_token], reduction='none')\r\n for epoch in range(config.train.num_epochs):\r\n \"\"\"Make dataloader\"\"\"\r\n train_data = Dataset({'files': train, 'mode': 'train', 'metadata_help': metadata_help})\r\n train_gen = data.DataLoader(train_data, batch_size=config.train.batch_size,\r\n shuffle=True, collate_fn=train_data.collate, drop_last=True)\r\n val_data = Dataset({'files': val, 'mode': 'train', 'metadata_help': metadata_help})\r\n val_gen = data.DataLoader(val_data, batch_size=config.train.batch_size,\r\n shuffle=True, collate_fn=val_data.collate, drop_last=True)\r\n\r\n for batch_number, features in enumerate(train_gen):\r\n spectrograms = features['spectrograms']\r\n phones = features['phones']\r\n input_lengths = features['input_lengths']\r\n target_lengths = features['target_lengths']\r\n metadata = features[\"metadata\"]\r\n batch_speakers = [x['speaker'] for x in metadata]\r\n self.G = self.G.train()\r\n\r\n #ipdb.set_trace()\r\n \"\"\"Make input_lengths and target_lengths torch ints\"\"\"\r\n input_lengths = input_lengths.to(torch.int32)\r\n target_lengths = target_lengths.to(torch.int32)\r\n phones = phones.to(torch.int32)\r\n\r\n outputs = self.G(spectrograms)\r\n\r\n outputs = outputs.permute(1, 0, 2) # swap batch and sequence length dimension for CTC loss\r\n\r\n loss = self.ctc_loss(log_probs=outputs, targets=phones,\r\n input_lengths=input_lengths, target_lengths=target_lengths)\r\n\r\n \"\"\"Update the loss history\"\"\"\r\n self.update_history(loss, batch_speakers)\r\n if epoch >= config.train.regular_epochs:\r\n loss_weights = self.get_loss_weights(batch_speakers, type=types[0])\r\n else:\r\n loss_weights = self.get_loss_weights(batch_speakers, type=types[1])\r\n loss = loss * loss_weights\r\n\r\n # Backward and optimize.\r\n self.reset_grad()\r\n # loss.backward()\r\n loss.sum().backward()\r\n self.g_optimizer.step()\r\n\r\n if iterations % self.log_step == 0:\r\n print(str(iterations) + ', loss: ' + str(loss.sum().item()))\r\n if self.use_tensorboard:\r\n self.logger.scalar_summary('loss', loss.sum().item(), iterations)\r\n\r\n if iterations % self.model_save_step == 0:\r\n \"\"\"Calculate validation loss\"\"\"\r\n val_loss = self.val_loss(val=val_gen, iterations=iterations)\r\n print(str(iterations) + ', val_loss: ' + str(val_loss))\r\n if self.use_tensorboard:\r\n self.logger.scalar_summary('val_loss', val_loss, iterations)\r\n \"\"\"Save model checkpoints.\"\"\"\r\n if iterations % self.model_save_step == 0:\r\n G_path = os.path.join(self.model_save_dir, '{}-G.ckpt'.format(iterations))\r\n torch.save({'model': self.G.state_dict(),\r\n 'optimizer': self.g_optimizer.state_dict()}, G_path)\r\n print('Saved model checkpoints into {}...'.format(self.model_save_dir))\r\n\r\n iterations += 1",
"def main():\n\n # Load the data and scale\n x_train = np.load(\"../data/audio/ESC-10/esc10_raw_train_audio.npy\")[:,:,0]\n y_train = np.load(\"../data/audio/ESC-10/esc10_raw_train_labels.npy\")\n x_test = np.load(\"../data/audio/ESC-10/esc10_raw_test_audio.npy\")[:,:,0]\n y_test = np.load(\"../data/audio/ESC-10/esc10_raw_test_labels.npy\")\n\n x_train = (x_train.astype('float32') + 32768) / 65536\n x_test = (x_test.astype('float32') + 32768) / 65536\n\n # Train and test the models\n train(x_train, y_train, x_test, y_test)",
"def DNN_Spectral_Mapping(args):\r\n PATH_ROOT = os.getcwd()\r\n os.chdir(PATH_ROOT)\r\n\r\n # noisy_train ; input of DNN\r\n path_dnn_noisy_train = os.path.join(PATH_ROOT, args.input_noisy_train)\r\n dnn_magnitude_noisy_train,_,sr = perform_stft(path_dnn_noisy_train, args)\r\n # dnn_magnitude_noisy_train= splice_frames(dnn_magnitude_noisy_train.T, args.left_context, args.right_context).T\r\n\r\n # clean_train ; output of DNN\r\n path_dnn_clean_train = os.path.join(PATH_ROOT, args.input_clean_train)\r\n dnn_magnitude_clean_train,_,_ = perform_stft(path_dnn_clean_train, args)\r\n\r\n # noise_train\r\n path_noise = os.path.join(PATH_ROOT, args.input_noise)\r\n dnn_magnitude_noise_train,_,_ = perform_stft(path_noise, args)\r\n\r\n path_clean_test = os.path.join(PATH_ROOT , args.input_clean_test)\r\n (sr, clean_test) = wav.read(path_clean_test)\r\n\r\n # noisy_test\r\n path_noisy_test = os.path.join(PATH_ROOT, args.input_noisy_test)\r\n (sr, noisy_test) = wav.read(path_noisy_test)\r\n dnn_magnitude_noisy_test, dnn_phase_noisy_test, _ = perform_stft(path_noisy_test, args)\r\n # magnitude_noisy_test= splice_frames(magnitude_noisy_test.T, args.left_context, args.right_context).T\r\n\r\n X_train = np.log(dnn_magnitude_noisy_train.T**2)\r\n y_train = np.log(dnn_magnitude_clean_train.T**2)\r\n X_test = np.log(dnn_magnitude_noisy_test.T**2)\r\n\r\n # DNN training stage\r\n #####################################################################################\r\n k.clear_session()\r\n def get_dnn_model(X_train, y_train, args):\r\n # LeakyReLU, PReLU, ELU, ThresholdedReLU, SReLU\r\n model = Sequential()\r\n model.add(Dense(args.n_hidden, input_dim=X_train.shape[1], init='glorot_normal')) # glorot_normal,he_normal\r\n model.add(BatchNormalization())\r\n # model.add(Activation('relu'))\r\n model.add(LeakyReLU(alpha=0.1))\r\n model.add(Dropout(args.drop_out))\r\n\r\n model.add(Dense(args.n_hidden, init='glorot_normal'))\r\n model.add(BatchNormalization())\r\n # model.add(Activation('relu'))\r\n model.add(LeakyReLU(alpha=0.1))\r\n model.add(Dropout(args.drop_out))\r\n\r\n model.add(Dense(args.n_hidden, init='glorot_normal'))\r\n model.add(BatchNormalization())\r\n # model.add(Activation('relu'))\r\n model.add(LeakyReLU(alpha=0.1))\r\n model.add(Dropout(args.drop_out))\r\n\r\n model.add(Dense(units=y_train.shape[1], init='glorot_normal'))\r\n model.add(BatchNormalization())\r\n model.add(Activation('linear'))\r\n\r\n model.compile(loss='mse',\r\n optimizer='adam',\r\n metrics=['mse'])\r\n # model.summary()\r\n return model\r\n\r\n model = get_dnn_model(X_train, y_train, args)\r\n with tf.device('/gpu:0'):\r\n model_info = model.fit(X_train, y_train, batch_size=args.n_batch, epochs=args.n_epoch)\r\n # plot_model_history(model_info)\r\n print(\"Training complete.\")\r\n\r\n # Enhancement stage\r\n #####################################################################################\r\n magnitude_estimated_clean = model.predict(X_test).T\r\n magnitude_estimated_clean = np.exp(np.sqrt(magnitude_estimated_clean))\r\n # magnitude_estimated_clean = magnitude_estimated_clean.astype('int16')\r\n\r\n # magnitude_estimated_clean=norm(magnitude_estimated_clean)\r\n #Reconstruction\r\n stft_reconstructed_clean = merge_magphase(magnitude_estimated_clean, dnn_phase_noisy_test)\r\n signal_reconstructed_clean =librosa.istft(stft_reconstructed_clean, hop_length=args.hop_size, window=args.window)\r\n signal_reconstructed_clean = signal_reconstructed_clean.astype('int16')\r\n #####################################################################################\r\n output_path_estimated_noisy_test = os.path.join(PATH_ROOT, args.output_file)\r\n wav.write(output_path_estimated_noisy_test,sr,signal_reconstructed_clean)\r\n\r\n # Display signals, spectrograms\r\n show_signal(clean_test,noisy_test,signal_reconstructed_clean,sr)\r\n show_spectrogram(clean_test,noisy_test, signal_reconstructed_clean, sr, args.num_FFT,args.hop_size)\r\n # =============================================================================\r\n # PESQ\r\n # =============================================================================\r\n # PATH_MATLAB='\"C:/Program Files/MATLAB/R2014a/bin/matlab.exe\"'\r\n\r\n # PATH_MATLAB1 = os.path.join(PATH_ROOT , 'PESQ_MATLAB/execute_pesq.m')\r\n # from pymatbridge import Matlab\r\n # mlab = Matlab()\r\n # mlab = Matlab(executable=PATH_MATLAB)\r\n # mlab.start()\r\n\r\n # #PATH_MATLAB1 = os.path.join(PATH_ROOT , \"PESQ_MATLAB\",\"execute_pesq.m\")\r\n # result_PESQ = mlab.run_func(PATH_MATLAB1, {'arg1': sr})\r\n # noisy_original_PESQ = result_PESQ['result'][0][0]\r\n # enhanced_PESQ = result_PESQ['result'][1][0]\r\n # mlab.stop()\r\n\r\n # snr=args.input_noisy_test\r\n # name=snr[53:-9]\r\n # print(\"[%s]\\n Original: %.2f\\n Spectral-Mapping\\t: %.2f\"%(name,noisy_original_PESQ,enhanced_PESQ))\r",
"def __train_model(self):\n for i in range(self.file_index):\n logger.info(\"Training the ALS model dataset \" + str(i))\n self.als = ALS(maxIter=5, regParam=0.01, userCol=\"UserId\", itemCol=\"GameId\", ratingCol=\"Userscore\",\n coldStartStrategy=\"drop\")\n self.model[i] = self.als.fit(self.df[i])\n logger.info(\"ALS model built!\")",
"def __init__(self, args):\n \n super(MicroNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 1, kernel_size=1)\n self.conv2 = nn.Conv2d(1, 29, kernel_size=5)\n self.maxpool2 = nn.MaxPool2d(3, stride=2 , ceil_mode=True)\n self.conv3 = nn.Conv2d(29, 59, kernel_size=3)\n self.maxpool3 = nn.MaxPool2d(3, stride=2 , ceil_mode=True)\n self.conv4 = nn.Conv2d(59, 74, kernel_size=3)\n self.maxpool4 = nn.MaxPool2d(3, stride=2 , ceil_mode=True)\n self.conv2_drop = nn.Dropout2d()\n self.conv3_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(1184, 300)\n self.fc2 = nn.Linear(300, args.num_classes)\n self.conv0_bn = nn.BatchNorm2d(3)\n self.conv1_bn = nn.BatchNorm2d(1)\n self.conv2_bn = nn.BatchNorm2d(29)\n self.conv3_bn = nn.BatchNorm2d(59)\n self.conv4_bn = nn.BatchNorm2d(74)\n self.dense1_bn = nn.BatchNorm1d(300)",
"def train(self, n, filename):\n self.n = n\n for line in open(filename):\n samp = line.rstrip('\\n')\n# samp = '~' + samp + '~'\n for i in range(len(samp) - n):\n w = samp[i:i + n]\n self.counts[w] += 1\n self.total_count += 1",
"def train(self, batch):\n pass"
]
| [
"0.68972456",
"0.6880117",
"0.6807847",
"0.66880417",
"0.668026",
"0.66636807",
"0.6658675",
"0.6646357",
"0.6613935",
"0.65864086",
"0.65287066",
"0.6460703",
"0.63921046",
"0.63921046",
"0.63921046",
"0.63921046",
"0.63921046",
"0.63910156",
"0.63789463",
"0.6351578",
"0.633823",
"0.6334179",
"0.63334006",
"0.63245445",
"0.6323549",
"0.63138926",
"0.62621087",
"0.62591743",
"0.62401384",
"0.6239875"
]
| 0.7181619 | 0 |
Additional standard variables that can optionally be used in templates. | def standard_variables(self):
std_vars = {
'time': {
'local': datetime.datetime.now(),
'utc': datetime.datetime.utcnow()
}
}
return std_vars | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _init_vars(self):\n if not self._has(\"vars\"):\n if self._has(\"p\"):\n self._.vars = self._.p.variables()\n elif self._has(\"q\"):\n self._.vars = self._.q.variables()\n elif self._has(\"P\"):\n self._.vars = variables(self._.P)\n elif self._has(\"Q\"):\n self._.vars = variables(self._.Q)\n self._.vars_ordered = len(self._.vars) <= 1",
"def variables(self):\n return ()",
"def inject_dict_for_all_templates():\n global_vars = {\"settings\": settings}\n\n if is_authenticated():\n global_vars[\"user\"] = get_authenticated_user()\n\n return global_vars",
"def _add_fundamental_variables(self):\n var = self.add_variable('configFile', (\"-c\", \"--configfile\"), \n dict(help=\"config file\"),\n envvar='ICAT_CFG', optional=True,\n type=lambda f: Path(f).expanduser())\n var.postprocess = _post_configFile\n var = self.add_variable('configSection', (\"-s\", \"--configsection\"), \n dict(help=\"section in the config file\", \n metavar='SECTION'), \n envvar='ICAT_CFG_SECTION', optional=True, \n default=defaultsection)\n var.postprocess = _post_configSection",
"def common_vars(environ):\n fields = getFields(environ)\n tooltips = getTooltips()\n usersign = environ['tiddlyweb.usersign']\n if not 'fields' in usersign:\n usersign['fields'] = {'name': ''}\n captcha = process_captcha(environ)\n query = environ['tiddlyweb.query']\n\n return {'fields':fields, 'tooltips':tooltips, 'usersign':usersign, 'captcha':captcha, 'query':query}",
"def setvariables(self, request, contextvars, thevars):\n postdata = {}\n if request.POST:\n postdata = dict(request.POST.dict())\n for var in thevars:\n if postdata.get(\"custom_\"+var):\n contextvars[var] = postdata.get(\"custom_\"+var)\n else:\n try:\n contextvars[var] = thevars[var]\n except Exception:\n pass\n return contextvars",
"def _add_basic_variables(self):\n self.add_variable('url', (\"-w\", \"--url\"), \n dict(help=\"URL to the web service description\"),\n envvar='ICAT_SERVICE')\n if self.ids:\n if self.ids == \"mandatory\":\n idsopt = False\n elif self.ids == \"optional\":\n idsopt = True\n else:\n raise ValueError(\"invalid value '%s' for argument ids.\" \n % self.ids) \n self.add_variable('idsurl', (\"--idsurl\",), \n dict(help=\"URL to the ICAT Data Service\"),\n envvar='ICAT_DATA_SERVICE', optional=idsopt)\n self.add_variable('checkCert', (\"--check-certificate\",), \n dict(help=\"don't verify the server certificate\"), \n type=flag, default=True)\n self.add_variable('http_proxy', (\"--http-proxy\",), \n dict(help=\"proxy to use for http requests\"),\n envvar='http_proxy', optional=True)\n self.add_variable('https_proxy', (\"--https-proxy\",), \n dict(help=\"proxy to use for https requests\"),\n envvar='https_proxy', optional=True)\n self.add_variable('no_proxy', (\"--no-proxy\",), \n dict(help=\"list of exclusions for proxy use\"),\n envvar='no_proxy', optional=True)",
"def set_vars(self, vars):\n if vars is False:\n self._.vars_ordered = False\n else:\n self._.vars = tuple(vars) + tuple(x for x in self._.vars\n if x not in vars)\n self._.vars_ordered = True",
"def set_vars():\n return dict()",
"def __init__(self, *args, **kwargs):\n if not kwargs.get('no_django', False):\n overrides = dict([(k, getattr(middleware, k, None),) for k in django_variables])\n kwargs.update(overrides)\n super(Template, self).__init__(*args, **kwargs)",
"def combined_vars(self):\n return self._return_if('_combined_vars')",
"def variables_declared (self) :\r\n\t\treturn {}",
"def __init__(self, *args, **kwargs):\r\n if not kwargs.get('no_django', False):\r\n overrides = {k: getattr(edxmako, k, None) for k in DJANGO_VARIABLES}\r\n overrides['lookup'] = edxmako.LOOKUP['main']\r\n kwargs.update(overrides)\r\n super(Template, self).__init__(*args, **kwargs)",
"def init_vars(self):\n # type: () -> None\n raise NotImplementedError",
"def _fill_user_specific_attributes(self, template_dictionary):\n template_dictionary[KEY_INCLUDE_TOOLTIP] = False\n template_dictionary[KEY_WRAP_CONTENT_IN_MAIN_DIV] = True\n template_dictionary[KEY_CURRENT_TAB] = 'none'\n\n return template_dictionary",
"def init_locals(self):\n pass",
"def template_extra_functions(self):\n\t\treturn []",
"def defaultTemplateParameter(self):\n self.tplparam['BODY'] = self.content\n self.tplparam['FLASH'] = (self.flash or '').replace('\"', r'\\\"')\n self.tplparam['PYMFRAMEVERSION'] = self.pymframeversion\n self.tplparam['USER'] = self.session.getAttribute(self.settings.authenvar)\n self.tplparam['RIGHTS'] = repr(self.session.getAttribute('rights'))\n self.tplparam['MENU'] = self.routing.getMenu(self.path,self.checkRights)\n self.tplparam['PATH'] = self.path",
"def add_optional_vars(self, varsdict):\n if isinstance(varsdict, dict):\n for key, val in varsdict.items():\n logger.debug(\"Adding: (%s: %s)\", key, val)\n self.vars[key] = val",
"def extra_context(self):\n from django.conf import settings\n\n return {\n \"site_name\": (lambda r: settings.LEONARDO_SITE_NAME\n if getattr(settings, 'LEONARDO_SITE_NAME', '') != ''\n else settings.SITE_NAME),\n \"debug\": lambda r: settings.TEMPLATE_DEBUG\n }",
"def global_variables(request):\n data = {\n 'DEBUG': settings.DEBUG,\n }\n return data",
"def _var(self, name=None, context=None):\n\t\tif name is None: name = None\n\t\tif context is None: context = self.context\n\t\tif (not name):\n\t\t\treturn context.getVariables().keys()\n\t\telif True:\n\t\t\treturn context.getVariables().get(name)",
"def all_common_variables(request):\n articles = Article.objects.all()\n random_article = Article.objects.order_by('?')[0:4]\n return {\n 'articles':articles,\n 'random_article':random_article,\n }",
"def variables_used (self) :\r\n\t\treturn []",
"def extra_options():\n extra_vars = {\n 'PrgEnv': [None, 'PrgEnv module to load, e.g., cray to load PrgEnv-cray, or None for automatic determination', CUSTOM],\n 'PrgEnv_load': [True, 'Load the PrgEnv module (if True) or just set the corresponding environment variable (if False)', CUSTOM],\n 'PrgEnv_family': [None, 'Declare to be a member of the PrgEnv family (if \\'PrgEnv\\), of the cpeToolchain family (if \\'cpeToolchain\\') or manually unload all known PrgEnv and cpe* modules (if None, needed when LMOD is not used)', CUSTOM],\n 'CPE_compiler': [None, 'Versionless compiler module to load, or None for automatic determination', CUSTOM],\n 'CPE_version': [None, 'Version of the CPE, if different from the version of the module', CUSTOM],\n 'CPE_load': [ 'first', 'First load the cpe module (if \\'first\\'), after the PrgEnv module (if \\'after\\'), load it at the end (if \\'last\\'), or do not load the cpe module (if None)', CUSTOM],\n 'cray_targets': [[], 'Targetting modules to load', CUSTOM],\n #'optional_example_param': [None, \"Example optional custom parameter\", CUSTOM],\n }\n return Bundle.extra_options(extra_vars)",
"def set_locals(self):\n\n if required is not None:\n self.required = set(required)\n if additional_definitions is not None:\n self.additional_definitions = additional_definitions",
"def global_variables():\n item_catalog_app.jinja_env.globals[\"ALL_CATEGORIES\"] = act.all_categories()\n __logged_in_user__ = act.user(\n pointer=login_session.get(\"user_id\")\n )\n item_catalog_app.jinja_env.globals[\"USER\"] = __logged_in_user__\n g.USER = __logged_in_user__",
"def test_with_global(self):\n t = Template(\n '{% load djblets_utils %}'\n '{% block main %}'\n '{% block inner %}'\n '{% definevar \"myvar\" global %}{{num}}{% enddefinevar %}'\n '{% endblock %}'\n '{% endblock %}'\n '[{{myvar}}]')\n\n self.assertEqual(\n t.render(Context({\n 'num': 123,\n })),\n '[123]')",
"def default_environment():\n return dict(_VARS)",
"def _set_var_ignore(self):\n self._var_ignore = [k for k in self.__dict__.keys() if k[0] != '_']"
]
| [
"0.62640846",
"0.60485345",
"0.59114224",
"0.5826683",
"0.58106107",
"0.5753299",
"0.5740808",
"0.5725044",
"0.5718518",
"0.5712702",
"0.56729376",
"0.5661352",
"0.5655612",
"0.5645327",
"0.56257856",
"0.55843884",
"0.5579836",
"0.5536659",
"0.550683",
"0.5505651",
"0.5472047",
"0.5397502",
"0.53900856",
"0.53839475",
"0.5367724",
"0.5362785",
"0.5335399",
"0.53201157",
"0.53024197",
"0.52954537"
]
| 0.65202695 | 0 |
Converts the feature matrix from numpy array to KaldiMatrix or KaldiCompressedMatrix. | def _convert_data(self, data):
if isinstance(data, np.ndarray):
data = data.astype(float_save(), copy=False)
if self.compress:
return KaldiCompressedMatrix.compress(data, self.compression_method)
return KaldiMatrix(data)
if isinstance(data, KaldiMatrix):
if self.compress:
return KaldiCompressedMatrix.compress(data, self.compression_method)
return data
if isinstance(data, KaldiCompressedMatrix):
if not self.compress:
return data.to_matrix()
return data
raise ValueError("Data is not ndarray or KaldiMatrix") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _convert_to_features(self, img: np.ndarray) -> np.ndarray:",
"def load_feature_matrix(src):\n feat_mat = smat_util.load_matrix(src)\n if isinstance(feat_mat, np.ndarray):\n feat_mat = np.ascontiguousarray(feat_mat)\n elif isinstance(feat_mat, smat.spmatrix):\n feat_mat = feat_mat.tocsr()\n feat_mat.sort_indices()\n return feat_mat",
"def _conv_array_to_sparse(arr):\n if has_scipy():\n from scipy.sparse import isspmatrix as scipy_sparse_isspmatrix\n else:\n from cuml.internals.import_utils import (\n dummy_function_always_false as scipy_sparse_isspmatrix,\n )\n if scipy_sparse_isspmatrix(arr):\n ret = cupyx.scipy.sparse.csr_matrix(arr.tocsr())\n elif cupyx.scipy.sparse.isspmatrix(arr):\n ret = arr\n elif isinstance(arr, cudf.DataFrame):\n ret = _conv_df_to_sparse(arr)\n elif isinstance(arr, np.ndarray):\n cupy_ary = rmm_cupy_ary(cp.asarray, arr, dtype=arr.dtype)\n ret = cupyx.scipy.sparse.csr_matrix(cupy_ary)\n\n elif isinstance(arr, cp.ndarray):\n ret = cupyx.scipy.sparse.csr_matrix(arr)\n else:\n raise ValueError(\"Unexpected input type %s\" % type(arr))\n return ret",
"def load_label_matrix(src, for_training=False):\n assert isinstance(src, str), \"src for load_label_matrix must be a str\"\n dtype = np.float32\n feat_mat = smat_util.load_matrix(src)\n feat_mat = feat_mat.tocsc() if for_training else feat_mat.tocsr()\n return feat_mat.astype(dtype)",
"def feature_matrix(self):\n return self._feat_matrix",
"def dataConvertToNumpy( self ):\n self.featureNumpy = np.asarray( self.feature )\n self.ClassNumpy = np.asarray( self.Class )",
"def cgmat2np(cgkit_mat):\n arr = np.array(cgkit_mat.toList())\n if len(arr) == 9:\n arr.shape = 3, 3\n elif len(arr) == 16:\n arr.shape = 4, 4\n else:\n raise ValueError(\"unknown shape\")\n return arr.T",
"def create_Tf_matrix(\n corpus,\n filename_npz=\"../data/tfidf/data_tf.npz\",\n filename_features=\"../data/tfidf/data_feature_names.pkl\",\n):\n\n vectorizer = CountVectorizer(max_features=len(corpus))\n X = vectorizer.fit_transform(corpus)\n print(\"-Vectorized matrix, \", X.toarray().shape)\n print(\" first line:\")\n print(X.toarray()[0])\n print(\"- Nombre de features :\" + str(len(vectorizer.get_feature_names())))\n print(vectorizer.get_feature_names()[0:10], \" ...\")\n\n data = pd.DataFrame(vectorizer.get_feature_names())\n data.to_pickle(filename_features)\n print(\"tf feature names - saved\")\n sparse.save_npz(filename_npz, X)\n print(\"tf matrix:\", filename_npz, \" - saved\")",
"def flatten_numpy(ndarray):\n return np.reshape(ndarray, (-1,), 'F')",
"def to_matrix(array):\n return Matrix(array.tolist())",
"def features_to_array(features_table, scaler):\n\n # Check arguments\n X = features_to_unscaled_matrix(features_table)\n return scaler.transform(X)",
"def createFeatureMatrix(self,batch):\n \n feature_dim = self.__flags.no_inner_unit * self.__flags.no_outer_unit\n data = np.zeros((len(batch), self.__flags.embedding_dim, 2 * feature_dim), dtype=np.float32)\n\n count = 0\n for obj in batch:\n m1 = self.__object2Matrix(obj)\n m2 = self.__object2Matrix(obj)\n data[count, :self.__flags.embedding_dim, :feature_dim] = m1\n data[count, :self.__flags.embedding_dim, feature_dim:2 * feature_dim] = m2\n count += 1\n scores = np.zeros(len(batch), dtype=np.float32)\n\n return (data,scores)",
"def lps_to_ijk_transformation_matrix_array(self):\n if self.lps_to_ijk_transformation_matrix is None:\n return None\n if self.__lps_to_ijk_transformation_matrix_array__ is None:\n import numpy as np\n self.__lps_to_ijk_transformation_matrix_array__ = np.array(self.lps_to_ijk_transformation_matrix, dtype=np.float)\n return self.__lps_to_ijk_transformation_matrix_array__",
"def features_to_unscaled_matrix(features_table):\n\n # Check arguments\n if features_table is None:\n raise ValueError('Cannot convert features table: None')\n\n if isinstance(features_table, str):\n features_table = pd.read_csv(features_table, sep='\\t', header=0)\n\n if not isinstance(features_table, pd.DataFrame):\n raise ValueError(\n 'Argument \"features_table\" must be a Pandas DataFrame or a string path to a features file that can be '\n 'loaded into a DataFrame: Found type \"{}\"'.format(type(features_table)))\n\n # Load\n X = features_table[list(GT_FEATURES)].copy()\n\n # Cast all features to float64\n X['SVTYPE'] = X['SVTYPE'].apply(lambda label: GT_SVTYPE_TO_NUMERIC[label]) # SVTYPE label numeric representation\n X = X.astype(np.float64)\n\n # Return feature matrix\n return X",
"def _float_matrix2numpy(self, java_float_matrix):\n columns_input = java_float_matrix.toArray()\n split = lambda lst, sz: [numpy.fromiter(lst[i:i+sz],dtype=numpy.float)\n for i in range(0, len(lst), sz)]\n cols = split(columns_input, java_float_matrix.rows)\n matrix = numpy.ma.column_stack(cols)\n return matrix",
"def to_sparse(self):\n from divisi2.sparse import SparseMatrix\n return SparseMatrix(self, self.row_labels, self.col_labels)",
"def __get_feature_mat(self, (cluster, articleID)):\n feat = self.feature_cache.get((cluster, articleID))\n\n if feat is None:\n feat = np.outer(self.user_feat[cluster],\n self.article_feat[articleID])\n self.feature_cache[(cluster, articleID)] = feat\n\n return feat",
"def get_float_vector_from_cntk_array(inputArray):\n tensorShape = inputArray.shape\n orderedWeights = np.zeros(inputArray.size, dtype=np.float)\n if (len(tensorShape) == 4):\n i = 0\n for filter in range(tensorShape[0]):\n for row in range(tensorShape[2]):\n for column in range(tensorShape[3]):\n for channel in range(tensorShape[1]):\n orderedWeights[i] = inputArray[filter][channel][row][column]\n i += 1\n # Reshape to (filters * rows, columns, channels)\n orderedWeights = orderedWeights.reshape(\n tensorShape[0] * tensorShape[2], tensorShape[3], tensorShape[1])\n elif (len(tensorShape) == 3):\n i = 0\n for row in range(tensorShape[1]):\n for column in range(tensorShape[2]):\n for channel in range(tensorShape[0]):\n orderedWeights[i] = inputArray[channel][row][column]\n i += 1\n # Reshape to (rows, columns, channels)\n orderedWeights = orderedWeights.reshape(\n tensorShape[1], tensorShape[2], tensorShape[0])\n elif (len(tensorShape) == 2):\n i = 0\n for row in range(tensorShape[1]):\n for column in range(tensorShape[0]):\n orderedWeights[i] = inputArray[column][row]\n i += 1\n # Reshape to (rows, 1, channels)\n orderedWeights = orderedWeights.reshape(\n tensorShape[1], 1, tensorShape[0])\n elif (len(tensorShape) == 1):\n i = 0\n for columnValue in inputArray:\n orderedWeights[i] = columnValue\n i += 1\n # Reshape to (1, 1, channels)\n orderedWeights = orderedWeights.reshape(1, 1, inputArray.size)\n else:\n print(\"Error: Input array has incorrect dimensions\")\n return None\n\n return np.ravel(orderedWeights)",
"def img_to_mat(path): \n\timg = Image.open(path)\n\timg2 = ImageOps.grayscale(img)\n\n\treturn np.array(img2)",
"def get_topic_matrix(self):\n print('get topic matrix')\n\n topic_words_dict = self.config['topic_words']\n\n topic_matrix = np.empty((0, self.wordvec.embedding_dim))\n\n topic_id = 0\n for topic in topic_words_dict.keys():\n topic_words = topic_words_dict[topic]\n topic_vector = self.wordvec.avg_words_vector(topic_words)\n\n topic_matrix = np.append(topic_matrix, topic_vector, axis=0)\n\n self.id2topic[str(topic_id)] = topic\n topic_id += 1\n\n return topic_matrix",
"def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features).tocoo()\n return sparse_to_tensor(features)",
"def const_to_matrix(self, value, convert_scalars=False):\n # Lists and 1D arrays become column vectors.\n if isinstance(value, list) or \\\n isinstance(value, np.ndarray) and value.ndim == 1:\n value = np.asmatrix(value, dtype='float64').T\n # First convert sparse to dense.\n elif sp.issparse(value):\n value = value.todense()\n return np.asmatrix(value, dtype='float64')",
"def matrix_features(self):\n return self._matrix_features",
"def _handle_feature(fea):\n if len(fea.shape) == 1:\n fea = np.array([fea]).T\n\n return fea",
"def build_feature_matrix(self, dataset):\n # Create the dictionary of feature functions if it is not created\n if len(features.features_fun_dict) == 0:\n i = 0\n for o in getmembers(features):\n if isfunction(o[1]):\n features.features_fun_dict[i] = o[1]\n i += 1\n features.num_features = len(features.features_fun_dict)\n\n matrix = np.zeros([dataset.shape[0], features.num_features])\n\n # For each sample in dataset, call every feature function and store its value\n for i in range(dataset.shape[0]):\n for j in range(features.num_features):\n args = getargspec(features.features_fun_dict[j]).args\n if len(args) == 2:\n matrix[i, j] = features.features_fun_dict[j](dataset[i], self.inv_vocab)\n else:\n matrix[i, j] = features.features_fun_dict[j](dataset[i])\n\n # Return sparse matrix with the features (needed by the classifier)\n return csr_matrix(matrix)",
"def features_to_np_array(self, images):\n \n images = list(images)\n \n images = np.stack(images, axis=0)\n \n return images",
"def kookurrenz_matrix(text, stoppwoerter, nachbarn_anzahl, häufigkeits_liste, vectorizer=TfidfVectorizer, gleiches_wort_null=False):\n vocab = vokabular_erstellen(häufigkeits_liste)\n nachbarn = nachbarn_aller_woerter(text, size=nachbarn_anzahl)\n c_vectorizer = vectorizer(stop_words=stoppwoerter, vocabulary=vocab)\n term_document_matrix = c_vectorizer.fit_transform(nachbarn)\n term_term_matrix = (term_document_matrix.T * term_document_matrix)\n \n if gleiches_wort_null:\n term_term_matrix.setdiag(0)\n \n \n ###\n # EVTL. AENDERN!!\n ###\n dense_term_term_matrix = term_term_matrix.todense() \n\n return dense_term_term_matrix\n #return term_term_matrix",
"def img_to_array(img, dim_ordering='default'):\n if dim_ordering == 'default':\n dim_ordering = K.image_dim_ordering()\n if dim_ordering not in {'th', 'tf'}:\n raise ValueError('Unknown dim_ordering: ', dim_ordering)\n # Numpy array x has format (dim1, dim2, dim3, channel)\n # or (channel, dim1, dim2, dim3)\n # nipy image has format (I don't know)\n if isinstance(img, np.ndarray):\n x = img.astype(K.floatx())\n else:\n x = img.get_data().astype(K.floatx())\n if len(x.shape) == 4:\n if dim_ordering == 'th':\n x = x.transpose(3, 0, 1, 2)\n elif len(x.shape) == 3:\n if dim_ordering == 'th':\n x = x[np.newaxis, ...]\n else:\n x = x[..., np.newaxis]\n else:\n raise ValueError('Unsupported image shape: ', x.shape)\n return x",
"def transform(self, X):\n if isinstance(self.featurizers, list):\n return csr_matrix(general_list(X, self.featurizers))\n\n _X = self.featurizers(X)\n return csr_matrix(_X)",
"def to_knx(self, value: Any) -> DPTArray:\n return self._climate_mode_transcoder.to_knx(value)"
]
| [
"0.55183977",
"0.5278449",
"0.5202684",
"0.5126736",
"0.50405765",
"0.50346196",
"0.4987234",
"0.49555334",
"0.49138463",
"0.4908572",
"0.4899938",
"0.4893329",
"0.48353204",
"0.48254713",
"0.479957",
"0.47969282",
"0.47604373",
"0.47432718",
"0.47428906",
"0.47403488",
"0.47377408",
"0.47372612",
"0.47309026",
"0.47266644",
"0.47230065",
"0.4719191",
"0.46814185",
"0.46702194",
"0.46667653",
"0.4655822"
]
| 0.6106754 | 0 |
Returns and optionally stores the distance matrix for a given network. Nodes in are arranged in the matrix according to their Without the floyd parameter, simple BFS is used for APSP calculation. Be aware that this only works for unweighted networks. Since the implementation uses a Numpy matrix for storing the distances, less memory is needed compared to the NetworkX implementation. | def get_distance_matrix_from_graph(network, nodelist):
if nodelist:
apspnodes = nodelist
else:
apspnodes = network.nodes()
mapping = {}
for index, node in enumerate(apspnodes):
mapping.update({node:index})
nodeset = set(apspnodes)
n = len(apspnodes)
D = numpy.zeros((n,n))
for node in apspnodes:
level = 0
levelnodes = {node}
seen = {}
while levelnodes:
worklist = levelnodes
levelnodes = {}
for target in worklist:
if target not in seen:
if target in nodeset:
D[mapping[node], mapping[target]] = level
seen[target] = level
try:
levelnodes.update(network[target])
except KeyError:
print "Error: The specified node '%s' could not be found in the network" % target
sys.exit(1)
level = level + 1
return D, mapping | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_node_distance_matrix(\n self, datapoint: np.ndarray, som_array: np.ndarray\n ) -> np.ndarray:\n # algorithms on the full matrix\n if self.distance_metric == \"euclidean\":\n return np.linalg.norm(som_array - datapoint, axis=2)\n\n # node-by-node algorithms\n distmat = np.zeros((self.n_rows, self.n_columns))\n if self.distance_metric == \"manhattan\":\n for node in self.node_list_:\n distmat[node] = dist.cityblock(\n som_array[node[0], node[1]], datapoint\n )\n\n elif self.distance_metric == \"mahalanobis\":\n for node in self.node_list_:\n som_node = som_array[node[0], node[1]]\n cov = np.cov(\n np.stack((datapoint, som_node), axis=0), rowvar=False\n )\n cov_pinv = np.linalg.pinv(cov) # pseudo-inverse\n distmat[node] = dist.mahalanobis(datapoint, som_node, cov_pinv)\n\n elif self.distance_metric == \"tanimoto\":\n # Note that this is a binary distance measure.\n # Therefore, the vectors have to be converted.\n # Source: Melssen 2006, Supervised Kohonen networks for\n # classification problems\n # VERY SLOW ALGORITHM!!!\n threshold = 0.5\n for node in self.node_list_:\n som_node = som_array[node[0], node[1]]\n distmat[node] = dist.rogerstanimoto(\n binarize(\n datapoint.reshape(1, -1),\n threshold=threshold,\n copy=True,\n ).ravel(),\n binarize(\n som_node.reshape(1, -1), threshold=threshold, copy=True\n ).ravel(),\n )\n\n elif self.distance_metric == \"spectralangle\":\n for node in self.node_list_:\n distmat[node] = np.arccos(\n np.divide(\n np.dot(som_array[node[0], node[1]], datapoint),\n np.multiply(\n # TODO check if an axis needs to be set here\n np.linalg.norm(som_array),\n np.linalg.norm(datapoint),\n ),\n )\n )\n\n return distmat",
"def get_full_weight_matrix_and_minimal_distances(G,\n sink_nodes,\n use_inverse_distance_as_adjacency = False,\n return_distance_matrix = False,\n ):\n\n nodes = set(list(G.nodes()))\n N = G.number_of_nodes()\n transient_nodes = list(nodes - set(sink_nodes))\n d = dict(nx.all_pairs_dijkstra_path_length(G))\n D = np.zeros((N,N))\n\n for i in range(N-1):\n for j in range(i+1,N):\n D[i,j] = d[i][j]\n D[j,i] = d[j][i]\n\n A = nx.adjacency_matrix(G).toarray()\n A = A.astype(float)\n W = A.copy()\n\n if use_inverse_distance_as_adjacency:\n W[A>0] = 1/A[A>0]\n else:\n W[A>0] = 1\n\n min_distances = D[:,sink_nodes].min(axis=1)\n\n if return_distance_matrix:\n return A, W, min_distances, D\n else:\n return A, W, min_distances",
"def get_distance_matrix(self):\n names = self.get_named_leaves()\n num_names = len(names)\n dist_mat = np.zeros((num_names, num_names), dtype='float')\n for i, j in itertools.combinations(range(num_names), 2):\n node1, node2 = self.node_names[names[i]], self.node_names[names[j]]\n dist = self.node_distance(node1, node2)\n dist_mat[i,j] = dist\n dist_mat[j,i] = dist\n return names, dist_mat",
"def adj_matrix(G,nodelist=None,weight='weight'):\n return nx.to_numpy_matrix(G,nodelist=nodelist,weight=weight)",
"def calculate_whole_dist_matrix(nodes, links, mode, ED):\n # Used in uncertainty calculation. Only consider tree distance mode.\n mode = 'td-mapping'\n G=nx.Graph()\n G.add_nodes_from([0, len(nodes)])\n for i in range(0, len(links)):\n E_dist = np.linalg.norm(nodes[links[i][0]][2]-nodes[links[i][1]][2])\n G.add_edge(links[i][0],links[i][1],weight=E_dist)\n dist = np.zeros((len(nodes), len(nodes)))\n for i in range(0, len(nodes)):\n for j in range(0, len(nodes)):\n if mode == \"td-mapping\":\n dist[i,j] = nx.shortest_path_length(G, source=i, target=j, weight='weight')\n if mode == \"ed-mapping\":\n dist[i,j] = np.linalg.norm(nodes[i][[0,2]]-nodes[j][[0,2]])\n if mode == \"et-mapping\":\n dist[i,j] = (1-ED)*nx.shortest_path_length(G, source=i, target=j, weight='weight')+ED*np.linalg.norm(nodes[i][[0,2]]-nodes[j][[0,2]])\n return dist",
"def getDistanceMatrix(self):\n return self.distmat.as_matrix()",
"def _freespace_matrix(distance):\n\n return np.array([[1., distance], [0., 1.]])",
"def from_numpy_matrix(self, matrix, node_names=None, directed=False, *args, **kwargs):\n\t\tN = list()\n\t\tE = dict()\n\t\tneighbours = dict()\n\n\t\t# Assert Square Adjacency Matrix\n\t\tif matrix.shape[0] != matrix.shape[1]:\n\t\t\traise ValueError('Adjacency Matrix not square')\n\n\t\t#matrix = matrix.A\n\n\t\tN = list( np.arange(matrix.shape[0]) )\n\t\tfor i, row in enumerate(matrix,start=0):\n\t\t\tneighbours[i] = []\n\t\t\tfor j, value in enumerate(row,start=0):\n\t\t\t\t# the diagonal is (must be) always zero (distance = 0)\n\t\t\t\tif i==j:\n\t\t\t\t\tcontinue\n\t\t\t\t# infinite distance doesn't have to be calculated\n\t\t\t\telif value == np.inf:\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tE[ (i,j) ] = float(value)\n\t\t\t\t\tneighbours[i].append(j)\n\n\t\treturn Dijkstra(N, E, neighbours, node_names, directed, *args, **kwargs)",
"def _distance_matrix(self):\n\n # Log the type of metric being used in Sequencing\n logger.info('Using {} Distance'.format(self.measure))\n\n # Convert the nodal coordinate tuples to a np.array\n coords = np.vstack(map(np.array, self.coords.values()))\n \n if self.measure == 'haversine':\n # Partially applied haversine function that takes a coord and computes the vector distances for all coords\n haversine = lambda coord: get_hav_distance(coords[:, 0], coords[:, 1], *coord) \n # Map the partially applied function over all coordinates, and stack to a matrix\n return np.vstack(map(haversine, coords))\n\n # Partially applied haversine function that takes a coord and computes the vector distances for all coords\n euclidean = lambda coord: get_euclidean_dist(coords, coord)\n # Map the partially applied function over all coordinates, and stack to a matrix\n return np.vstack(map(euclidean, coords))",
"def create_hop_matrix(G, max_hops, node_list):\n distances = dict(nx.all_pairs_dijkstra_path_length(G))\n hop_matrix = torch.zeros(max_hops, G.number_of_nodes(), G.number_of_nodes(), dtype=torch.int)\n for hop in range(max_hops):\n for i, node_from in enumerate(node_list):\n for j, node_to in enumerate(node_list):\n if node_to in distances[node_from].keys() and distances[node_from][node_to] == hop:\n hop_matrix[hop,i,j] = 1\n return hop_matrix",
"def adjacency_matrix():\n file_path = PROJECT_PATH + \"/geographycal_data/adjacency_matrix/Howgrp.txt\"\n router = Router(adjacency_metrix=file_path)\n # router.write2vtk(router.graph, \"adjacency_matrix\")\n # nx.draw(router.graph)\n # plt.show()\n # adjacency matrix\n A = nx.adjacency_matrix(router.graph, weight=None).toarray()\n # ... and its spectrum\n nx.adjacency_spectrum(router.graph, weight=None)\n # weighted adjacency\n W = nx.adjacency_matrix(router.graph)\n # D\n I = np.reshape(np.ones(12), (-1, 1))\n D = np.matmul(A, I)\n # combinatorial graph Laplacian L = D - A\n L = nx.laplacian_matrix(router.graph, weight=None)\n # ... and his spectrum\n nx.laplacian_spectrum(router.graph, weight=None)\n # weighted Laplacian\n Y = nx.laplacian_matrix(router.graph)\n\n # Note\n sumD = np.matmul(I.transpose(), D)\n sumD = sumD[0][0]\n sumA = 0\n for row in np.nditer(A):\n for e in np.nditer(row):\n sumA += e\n\n # Fielder vector\n fiedler_vector = nx.fiedler_vector(router.graph, weight=None)\n\n # Matrix Double index Sum\n\n def D_app(F):\n return D * F\n\n def A_app(F):\n AF = np.zeros(len(F))\n for i, e_i in enumerate(F):\n for j, e_j in enumerate(F):\n if (A[i][j] != 0):\n AF[i] += F[j]\n return AF",
"def from_sparse_matrix(self, matrix, node_names=None, directed=False, *args, **kwargs):\n\t\tN = list()\n\t\tE = dict()\n\t\tneighbours = dict()\n\n\t\t# Assert Square Adjacency Matrix\n\t\tif matrix.shape[0] != matrix.shape[1]:\n\t\t\traise ValueError('Adjacency Matrix not square')\n\n\t\tN = list( np.arange(matrix.shape[0]) )\n\t\tneighbours = {i:[] for i in np.arange(matrix.shape[0])}\n\t\t#\n\t\trows,cols = matrix.nonzero()\n\t\tfor i,j in zip(rows,cols):\n\t\t\t# the diagonal is (must be) always zero (distance = 0)\n\t\t\tif i==j:\n\t\t\t\tcontinue\n\t\t\t# infinite distance doesn't have to be calculated\n\t\t\telif matrix[i,j] == np.inf:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tE[ (i,j) ] = float(matrix[i,j])\n\t\t\t\tneighbours[i].append(j)\n\n\t\treturn Dijkstra(N, E, neighbours, node_names, directed, *args, **kwargs)",
"def compute_dist_matrix(X1, X2, distance):\n N, M = X1.shape[0], X2.shape[0]\n dist_matrix = np.zeros((N, M))\n for i in range(N):\n for j in range(M):\n dist_matrix[i][j] = dist(X1[i], X2[j], distance=distance)\n return dist_matrix",
"def _create_distance_matrix(mesh):\n l = len(mesh.faces)\n\n faces = polygons(mesh.faces, mesh.vertices, mesh.face_normals, mesh.area_faces)\n # map from edge-key to adjacent faces\n adj_faces_map = {}\n # find adjacent faces by iterating edges\n for index, face in enumerate(faces):\n for edge in face.edge_keys:\n if (edge[0] > edge[1]):\n new_edge = (edge[1], edge[0])\n else:\n new_edge = (edge[0], edge[1])\n if new_edge in adj_faces_map:\n adj_faces_map[new_edge].append(index) # 一对多\n else:\n adj_faces_map[new_edge] = [index]\n\n # helping vectors to create sparse matrix later on\n row_indices = []\n col_indices = []\n Gval = [] # values for matrix of angular distances\n Aval = [] # values for matrix of geodesic distances\n # iterate adjacent faces and calculate distances\n for edge, adj_faces in adj_faces_map.items():\n if len(adj_faces) == 2:\n i = adj_faces[0]\n j = adj_faces[1]\n # 一条边连接的两个面\n Gtemp = _geodesic_distance(mesh, faces[i], faces[j], edge) # 测地距离\n Atemp = _angular_distance(mesh, faces[i], faces[j]) # 角距离 # 其实是余弦距离\n Gval.append(Gtemp)\n Aval.append(Atemp)\n row_indices.append(i)\n col_indices.append(j)\n # add symmetric entry\n Gval.append(Gtemp)\n Aval.append(Atemp)\n row_indices.append(j)\n col_indices.append(i)\n\n elif len(adj_faces) > 2:\n print(\"Edge with more than 2 adjacent faces: \" + str(adj_faces) + \"!\")\n\n Gval = numpy.array(Gval)\n Aval = numpy.array(Aval)\n # delta是去全局变量,外部传入的\n values = delta * Gval / numpy.mean(Gval) + \\\n (1.0 - delta) * Aval / numpy.mean(Aval)\n\n # create sparse matrix\n distance_matrix = scipy.sparse.csr_matrix(\n (values, (row_indices, col_indices)), shape=(l, l))\n return distance_matrix",
"def make_adjacency_matrix(X, metric=\"correlation\", n_neighbors=6, n_jobs=1):\n knn = NearestNeighbors(n_neighbors=n_neighbors,\n metric=metric,\n algorithm=\"brute\",\n n_jobs=n_jobs,\n ).fit(X)\n\n adjacency_matrix = knn.kneighbors_graph(X,\n mode=\"distance\",\n ).toarray()\n\n return adjacency_matrix",
"def getDistanceMatrix(self):\n v = self.getVectors()\n vLis = v.keys()\n N = len(v.keys())\n D = np.zeros([N, N], dtype=np.float32)\n print(N)\n for i in range(N):\n print(\"%d/%d\" %(i, N))\n D[i, i] = 1\n for j in range(i + 1, N):\n dist = self.cosin_sim_pairs(v[vLis[i]], v[vLis[j]])\n D[i, j] = dist\n D[j, i] = dist\n return D",
"def _compute_adjacency_matrix(self):\n\n # Set up a quick-reference index to map cells to indexes\n for i, cell in enumerate(self.sim.cells):\n self._cell_indexes[cell] = i\n\n if all([self.sim.hub.cells == [self.sim.damaged],\n self.sim.damaged not in self.sim.cells]):\n # Add the \"damaged\" virtual cell to the index if we need it\n self._cell_indexes[self.sim.damaged] = len(self.sim.cells)\n\n node_count = len(list(self._cell_indexes.keys()))\n g_sparse = np.zeros((node_count, node_count), dtype=float)\n g_sparse[:] = np.inf\n\n for cluster in self.sim.clusters + [self.sim.hub]:\n cluster_tour = cluster.tour\n i = len(cluster_tour.vertices) - 1\n j = 0\n while j < len(cluster_tour.vertices):\n start_vertex = cluster_tour.vertices[i]\n stop_vertex = cluster_tour.vertices[j]\n\n start_pt = cluster_tour.points[start_vertex]\n stop_pt = cluster_tour.points[stop_vertex]\n distance = np.linalg.norm(stop_pt - start_pt)\n\n start_seg = cluster_tour.objects[start_vertex]\n stop_seg = cluster_tour.objects[stop_vertex]\n\n start_index = self._cell_indexes[start_seg]\n stop_index = self._cell_indexes[stop_seg]\n\n g_sparse[start_index, stop_index] = distance\n\n i = j\n j += 1\n\n g_sparse = sp.csgraph_from_dense(g_sparse, null_value=np.inf)\n return g_sparse",
"def _get_tsp_matrix(graph: networkx.Graph) -> np.ndarray:\n number_of_nodes = len(graph)\n matrix = np.zeros((number_of_nodes, number_of_nodes))\n for i in nx.all_pairs_dijkstra_path_length(graph, weight=\"weight\"):\n distance_dist = i[1]\n for j in distance_dist.items():\n matrix[i[0] - 1][j[0] - 1] = j[1]\n matrix[j[0] - 1][i[0] - 1] = matrix[i[0] - 1][j[0] - 1]\n return matrix",
"def compute_distance(self, transpose=False):\n\n # Calculate distance matrix\n if transpose:\n distance_matrix = pdist(self.matrix.T, self.distance)\n else:\n distance_matrix = pdist(self.matrix, self.distance)\n\n # Remove NaNs\n distance_matrix[np.isnan(distance_matrix)] = 1.0\n\n return distance_matrix",
"def raw_google_matrix(G, nodelist=None, weight='weight'):\n import numpy as np\n\n M = nx.to_numpy_matrix(G, nodelist=nodelist, dtype=np.float32,\n weight=weight)\n n, m = M.shape # should be square\n assert n == m and n > 0\n # Find 'dangling' nodes, i.e. nodes whose row's sum = 0\n dangling = np.where(M.sum(axis=1) == 0)\n # add constant to dangling nodes' row\n for d in dangling[0]:\n M[d] = 1.0 / n\n # Normalize. We now have the 'raw' Google matrix (cf. example on p. 11 of\n # Langville & Meyer (2006)).\n M = M / M.sum(axis=1)\n return M",
"def pathfinder(Xs, numnodes=None, valid=False, td=None):\n if numnodes == None:\n numnodes = len(set(flatten_list(Xs)))\n \n # From https://github.com/evanmiltenburg/dm-graphs\n def MST_pathfinder(G):\n \"\"\"The MST-pathfinder algorithm (Quirin et al. 2008) reduces the graph to the\n unions of all minimal spanning trees.\"\"\"\n NG = nx.Graph()\n NG.add_nodes_from(list(range(numnodes)))\n edges = sorted( ((G[a][b]['weight'],a,b) for a,b in G.edges()),\n reverse=False) # smaller distances are more similar\n clusters = {node:i for i,node in enumerate(G.nodes())}\n while not edges == []:\n w1,a,b = edges[0]\n l = []\n # Select edges to be considered this round:\n for w2,u,v in edges:\n if w1 == w2:\n l.append((u,v,w2))\n else:\n break\n # Remaining edges are those not being considered this round:\n edges = edges[len(l):]\n # Only select those edges for which the items are not in the same cluster\n l = [(a,b,c) for a,b,c in l if not clusters[a]==clusters[b]]\n # Add these edges to the graph:\n NG.add_weighted_edges_from(l)\n # Merge the clusters:\n for a,b,w in l:\n cluster_1 = clusters[a]\n cluster_2 = clusters[b]\n clusters = {node:cluster_1 if i==cluster_2 else i\n for node,i in clusters.items()}\n return NG\n\n if valid and not td:\n raise ValueError('Need to pass DataModel when generating \\'valid\\' pathfinder()')\n \n N = float(len(Xs))\n distance_mat = np.zeros((numnodes, numnodes))\n for item1 in range(numnodes):\n for item2 in range(item1+1,numnodes):\n Tij = 0\n dijk = 0\n for x in Xs:\n if (item1 in x) and (item2 in x):\n Tij += 1\n dijk = dijk + (abs(x.index(item1) - x.index(item2)) / float(len(x)))\n try:\n dij = dijk * (N / (Tij**2))\n except:\n dij = 0.0 # added constraint for divide-by-zero... this will ensure that no edge will exist between i and j\n distance_mat[item1, item2] = dij\n distance_mat[item2, item1] = dij\n\n #graph = scipy.sparse.csgraph.minimum_spanning_tree(distance_mat)\n graph = nx.to_numpy_array(MST_pathfinder(nx.Graph(distance_mat)))\n\n # binarize and make graph symmetric (undirected)... some redundancy but it's cheap\n #graph = np.where(graph.todense(), 1, 0)\n graph = np.array(np.where(graph, 1, 0))\n for rownum, row in enumerate(graph):\n for colnum, val in enumerate(row):\n if val==1:\n graph[rownum,colnum]=1\n graph[colnum,rownum]=1\n\n if valid:\n graph = makeValid(Xs, graph, td)\n \n #return np.array(graph).astype(int)\n return graph",
"def adj_matrix(self):\n return nx.adj_matrix(self.network)",
"def weighted_jaccard_distance_matrix(X, w, n_jobs=1):\n vint = np.vectorize(int)\n X_int = vint(X*100)\n print \"starting to make distance matrix\"\n distance_matrix = pairwise_distances(X_int, w=w, metric=weighted_jaccard,n_jobs=n_jobs)\n print \"done making distance matrix\"\n return distance_matrix",
"def distance_matrix(d1, d2=None):\n if d2 is None:\n dists = np.zeros(shape=(d1.shape[0], d1.shape[0]))\n for i in range(dists.shape[0]):\n dists[i] = (((d1 - d1[i]) ** 2).sum(axis=1)) ** 0.5\n else:\n dists = np.zeros(shape=(d1.shape[0], d2.shape[0]))\n for i in range(d1.shape[0]):\n dists[i] = (((d2 - d1[i]) ** 2).sum(axis=1)) ** 0.5\n return dists",
"def feature_calculator(args, graph):\n index_1 = [edge[0] for edge in graph.edges()]\n index_2 = [edge[1] for edge in graph.edges()]\n values = [1 for edge in graph.edges()]\n node_count = max(max(index_1)+1,max(index_2)+1)\n adjacency_matrix = sparse.coo_matrix((values, (index_1,index_2)),shape=(node_count,node_count),dtype=np.float32)\n degrees = adjacency_matrix.sum(axis=0)[0].tolist()\n degs = sparse.diags(degrees, [0])\n normalized_adjacency_matrix = degs.dot(adjacency_matrix)\n target_matrices = [normalized_adjacency_matrix.todense()]\n powered_A = normalized_adjacency_matrix\n if args.window_size > 1:\n for power in tqdm(range(args.window_size-1), desc = \"Adjacency matrix powers\"):\n powered_A = powered_A.dot(normalized_adjacency_matrix)\n to_add = powered_A.todense()\n target_matrices.append(to_add)\n target_matrices = np.array(target_matrices)\n return target_matrices",
"def get_adjacency_matrix(self):\n\n # Get dimension of future matrix\n dim = max([node.value for node in self.nodes])\n\n # Initialize square matrix of zeros\n # Matrix is square and indexes by from, to node values\n adjacency_matrix = [[0 for _ in range(dim+1)] for _ in range(dim+1)]\n\n # Insert edge value at the from, to coordinates\n # That is, fully identify each \"from, edge, to\" triplet\n for edge in self.edges:\n row = edge.node_from.value\n col = edge.node_to.value\n val = edge.value\n\n adjacency_matrix[row][col] = val\n\n # Return matrix of edge values indexed by from, to node values\n return adjacency_matrix",
"def _build_downhill_matrices(self, weight=0.6667):\n\n from scipy import sparse as sparse\n \n\n down_neighbour = np.empty(self.tri.npoints, dtype=np.int)\n\n for node in range (0,self.tri.npoints):\n down_neighbour[node] = self.neighbour_array_lo_hi[node][0]\n\n # Build a matrix of downhill-ness - one entry per node ! \n \n size = self.tri.npoints\n row_array = np.empty(size, dtype = int)\n col_array = np.empty(size, dtype = int)\n down_array = np.ones(size)\n accu_array = np.ones(size)\n\n\n for row in range(0, self.tri.npoints): \n row_array[row] = row\n col_array[row] = down_neighbour[row]\n \n accuMCOO = sparse.coo_matrix( (accu_array, (row_array, col_array)), shape=(size,size) ).T \n\n self.accumulatorMat = accuMCOO.tocsr() \n\n self._build_adjacency_matrix_1()\n self._build_adjacency_matrix_2()\n \n self.downhillMat = weight * self.adjacency1 + (1.0-weight) * self.adjacency2\n\n # A1 = self.downhillMat\n # A2 = self.downhillMat.dot(self.downhillMat)\n # A2a = A1 + A2\n # A4 = A2.dot(A2)\n # A4a = A2a + A2.dot(A2a)\n # A8 = A4.dot(A4)\n # A8a = A4a + A4.dot(A4a)\n # A16 = A8.dot(A8)\n # A16a = A8a + A8.dot(A8a)\n\n # self.downhillMat16 = A16\n # self.downhillMat8 = A8\n # self.downhillMat16a = A16a\n # self.downhillMat8a = A8a\n\n # We make it optional to build these as they are not sparse \n # This cleans up previously stored matrices\n\n self.downhillCumulativeMat = None\n self.sweepDownToOutflowMat = None\n \n return",
"def Distmatrix(self):\n self.Dismatrix = np.zeros((self.nodenum, self.nodenum))\n for i in range(len(self.Dismatrix)):\n for j in range(len(self.Dismatrix)):\n self.Dismatrix[i, j] = sf.dist(self.y[i], self.x[i], self.y[j], self.x[j])\n self.Dismatrix[j, i] = self.Dismatrix[i, j]",
"def compute_distance_matrix(input1, input2, metric='euclidean'):\n # check input\n assert isinstance(input1, torch.Tensor)\n assert isinstance(input2, torch.Tensor)\n assert input1.dim() == 2, 'Expected 2-D tensor, but got {}-D'.format(\n input1.dim()\n )\n assert input2.dim() == 2, 'Expected 2-D tensor, but got {}-D'.format(\n input2.dim()\n )\n assert input1.size(1) == input2.size(1)\n\n if metric == 'euclidean':\n distmat = euclidean_squared_distance(input1, input2)\n elif metric == 'cosine':\n distmat = cosine_distance(input1, input2)\n else:\n raise ValueError(\n 'Unknown distance metric: {}. '\n 'Please choose either \"euclidean\" or \"cosine\"'.format(metric)\n )\n\n return distmat",
"def _update_distances(dist_matrix, node1, node2, new_cluster):\n # Initialize new distance matrix.\n node_label = pd.Index([str(new_cluster)])\n new_labels = dist_matrix.axes[0].drop([node1, node2]).append(node_label)\n new_dist_matrix = pd.DataFrame(np.nan, index=new_labels, columns=new_labels)\n \n # Fill in distance matrix\n # First copy over values that stay the same\n for row in new_dist_matrix.axes[0].drop(node_label):\n for col in new_dist_matrix.axes[1].drop([node_label[0], row]):\n new_dist_matrix.at[row, col] = dist_matrix.at[row, col]\n new_dist_matrix.at[col, row] = dist_matrix.at[row, col]\n \n # Distance from other OTU, k, to new node, i-j (wiki EQ 3):\n # d(i-j, k) = .5 * (dist(i, k) + dist(j, k) - dist(i, j))\n for k in new_dist_matrix.axes[1].drop(node_label):\n dist = .5 * (dist_matrix.at[k, node1]\n + dist_matrix.at[k, node2]\n - dist_matrix.at[node1, node2])\n new_dist_matrix.at[node_label, k] = dist\n new_dist_matrix.at[k, node_label] = dist\n \n # Return the distance matrix.\n return new_dist_matrix"
]
| [
"0.6378258",
"0.63765",
"0.6248437",
"0.61726147",
"0.5803808",
"0.57704085",
"0.57417667",
"0.5732218",
"0.5680238",
"0.567566",
"0.56677747",
"0.56518614",
"0.56441486",
"0.56118584",
"0.5568117",
"0.5558283",
"0.55483097",
"0.55252606",
"0.5483473",
"0.54501635",
"0.544721",
"0.54273564",
"0.54199684",
"0.54149234",
"0.541451",
"0.5410689",
"0.540619",
"0.5403153",
"0.53947467",
"0.5371241"
]
| 0.67686623 | 0 |
Handle GET requests for single game | def retrieve(self, request, pk=None):
try:
# `pk` is a parameter to this function, and
# Django parses it from the URL route parameter
# http://localhost:8000/games/2
#
# The `2` at the end of the route becomes `pk`
game = Game.objects.get(pk=pk)
serializer = GameSerializer(game, context={'request': request})
return Response(serializer.data)
except Exception as ex:
return HttpResponseServerError(ex) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_game(self, request):\n game = get_by_urlsafe(request.urlsafe_key, Game)\n if not game:\n raise endpoints.NotFoundException('Game Not Found')\n else:\n if game.game_over:\n return game.to_form(message=\"This Game has ended!\")\n else:\n return game.to_form(message=\"Game in Progress!\")",
"def get_game(self, request):\n return games_ctrl.get_game(request.urlsafe_game_key)",
"def load_game(request, game_id):\r\n\ttry:\r\n\t\tstate = State.objects.filter(game__id=game_id, player__user=request.user).first()\r\n\t\treturn JsonResponse(state.current_state, safe=False)\r\n\texcept Exception as e:\r\n\t\treturn JsonResponse(data={}, status=500)",
"def get(self, request):\n\n queries = request.GET.dict()\n user = UserValidator.validate_user(request.user.id)\n\n if user is None:\n return JsonResponse({\n \"message\": \"Invalid credentials.\",\n }, status=400)\n\n if user is None:\n return JsonResponse({\n \"message\": \"Invalid credentials.\",\n }, status=400)\n\n try:\n game = Game.value_of(queries[\"game\"].lower())\n\n except (KeyError, ValueError, Exception):\n game = None\n\n try:\n sort = queries[\"sort\"].lower()\n\n if sort not in [\"wins\", \"total\",]:\n raise ValueError(\"invalid key value\")\n\n except (KeyError, ValueError, Exception):\n sort = \"wins\"\n\n entries = GameModel.objects.values(\"player\").filter(is_deleted=False)\n\n if game is not None:\n entries = entries.filter(game_played=game)\n game = game.value\n else:\n game = \"All\"\n\n entries = entries.annotate(\n wins=(Count(\"player\", filter=Q(did_win=True))),\n total=(Count(\"player\"))\n )\n\n if sort == \"wins\":\n entries = entries.order_by(\"-wins\")\n elif sort == \"total\":\n entries = entries.order_by(\"-total\")\n\n board = ScoreboardView.get_board_from_db_rows(entries)\n\n token = Token.get_tokens_for_user(user)\n\n return JsonResponse({\n \"game\": game,\n \"board\": board,\n \"access\": token[\"access\"],\n \"refresh\": token[\"refresh\"],\n })",
"def game_detail(req, game_id=None):\n\n data = Game.query.get(game_id)\n if data is None:\n raise NotFound()\n\n return render_response('game_detail.html', game=data)",
"def do_GET(self):\n global st_point, cur_request\n if time.time() - st_point < 1 and cur_request > args.MAX_REQ:\n self.send_response(429)\n self.send_header(\"Content-type\",\"text/html\")\n self.end_headers()\n time.sleep(0.2)\n return\n elif time.time() - st_point > 1:\n st_point = time.time()\n cur_request = 1\n self.func_PARSE()\n if self.parsed_url[2] in [\"/ping\", \"/cats\"]:\n self.func_DO()\n else:\n self.send_response(400)\n text=\"<h1 align=center>Bad request</h1>\"\n self.func_PRINT(text)",
"def do_GET(self):\n path = self.path.split('/')\n if len(path) == 3:\n key.key_events(path[2])\n self.send_head()",
"def status(request, game_id):\n try: \n game = Game.objects.get(pk=game_id)\n except Game.DoesNotExist:\n return HttpResponse(\"Game can't be found\")\n return render_to_response('ms/status.html' , RequestContext(request, {'g': game}))",
"def game_home():\n game = current_user.get_active_game()\n if game is None:\n game = current_user.get_last_ended_game()\n if game is None:\n flash('If you want to join a game, click the Join button.')\n return redirect(url_for('home'))\n else:\n flash('These are the results from your most recent ended game.')\n return redirect(url_for('game_summary', game_id=game.game_id))\n response_data = {\n 'game': game,\n 'north_playername': User.get_username_by_id(game.player_north),\n 'south_playername': User.get_username_by_id(game.player_south),\n 'east_playername': User.get_username_by_id(game.player_east),\n 'west_playername': User.get_username_by_id(game.player_west),\n 'player_direction': DirectionsEnum.NORTH, # Default to north, will check below\n 'hand': None,\n 'cards': {\n 'spades': [],\n 'hearts': [],\n 'clubs': [],\n 'diamonds': []\n },\n 'trick': None,\n 'tricks_taken': None,\n 'enable_bidding': False,\n }\n if game.player_is_direction(current_user.user_id, DirectionsEnum.SOUTH):\n response_data['player_direction'] = DirectionsEnum.SOUTH\n if game.player_is_direction(current_user.user_id, DirectionsEnum.EAST):\n response_data['player_direction'] = DirectionsEnum.EAST\n if game.player_is_direction(current_user.user_id, DirectionsEnum.WEST):\n response_data['player_direction'] = DirectionsEnum.WEST\n if game.state == GameStateEnum.FILLING:\n # No need to fetch hand or trick data, as game hasn't started yet\n return render_template('game.html', **response_data)\n elif game.state == GameStateEnum.IN_PROGRESS:\n # Fetch current Hand data\n hand = game.get_latest_hand()\n response_data['hand'] = hand\n response_data['ns_score'], response_data['ew_score'] = hand.get_score_from_previous_hand()\n cards = hand.get_playable_cards_for_user(current_user.user_id)\n cards.sort(key=lambda x: x.card)\n for suit, letter in [('spades', 'S'), ('hearts', 'H'), ('clubs', 'C'), ('diamonds', 'D')]:\n response_data['cards'][suit] = [card for card in cards if card.card.endswith(letter)]\n if None not in [\n hand.north_bid,\n hand.south_bid,\n hand.east_bid,\n hand.west_bid\n ]:\n # All bids have been placed. Fetch trick data.\n response_data['trick'] = hand.get_latest_trick()\n response_data['next_play_direction'] = response_data['trick'].get_next_play_direction()\n response_data['tricks_taken'] = {key.value: value for key, value in hand.get_total_tricks_taken().items()}\n else:\n # Waiting on at least one bid\n response_data['enable_bidding'] = game.can_user_place_bid(current_user.user_id, hand)\n return render_template('game.html', **response_data)\n else:\n # Shouldn't arrive at this state. Log it.\n flash('An unknown error occurred. Please try again.')\n logger.error(\n 'Game with id [%s] in bad state while user [%s] attempted to display game home.',\n game.game_id,\n current_user.username\n )\n return redirect(url_for('home'))",
"def do_GET(self):\r\n self._send_handler_response('GET')",
"def get_game(request):\n active_game = str(uuid4())\n GAMES[active_game] = Board()\n return {'id': active_game}",
"def render_GET(self, request):\n timestamp = int(self.url_matches[\"timestamp\"])\n \n if request.api_mode == \"prod\":\n mode_string = \"I'm production baby!\"\n elif request.api_mode == \"test\":\n mode_string = \"I'm in testing mode. :(\"\n else:\n mode_string = \"I have no clue what mode I'm in.\"\n \n response = \"PONG! Right back at ya. %s \" % mode_string\n response = response + \" (Timestamp Val: %d) \" % timestamp\n response = response + \"(API: %s, Version: %s, Mode: %s)\" % (request.api_name,\n request.api_version,\n request.api_mode)\n webapi.write_json(request, response)",
"def get_game(game_id):\n\n game = filter(lambda t: t[\"id\"] == game_id, games)\n if len(game) == 0:\n abort(404)\n return jsonify({\"game\": game[0]})",
"def retrieve(self, request, pk=None):\n try:\n game_type = GameType.objects.get(pk=pk)\n serializer = GameTypeSerializer(game_type, context={'request': request})\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseServerError(ex)",
"def retrieve(self, request, pk=None):\n try:\n # `pk` is a parameter to this function, and Django parses it from the URL route parameter\n # http://localhost:8000/gamers/2\n #\n # The `2` at the end of the route becomes `pk`\n gamer = Gamer.objects.get(pk=pk)\n serializer = GamerSerializer(gamer, context={'request': request})\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseServerError(ex)",
"def do_GET(self):\r\n if not self._client_allowed():\r\n return\r\n\r\n try:\r\n (_, _, path, query, _) = urlparse.urlsplit(self.path)\r\n params = urlparse.parse_qs(query)\r\n # Give each handler a chance to respond.\r\n for prefix, handler in self._GET_handlers:\r\n if self._maybe_handle(prefix, handler, path, params):\r\n return\r\n # If no path specified, default to showing the list of all runs.\r\n if path == '/':\r\n self._handle_runs('', {})\r\n return\r\n\r\n self._send_content('Invalid GET request %s' % self.path, 'text/html')\r\n except (IOError, ValueError):\r\n pass # Printing these errors gets annoying, and there's nothing to do about them anyway.\r\n #sys.stderr.write('Invalid GET request %s' % self.path)\r",
"def get(self, request, **kwargs):\n player = get_player_from_request(request)\n if player is None or player.room is not None:\n return redirect(\"rooms:redirect\")\n\n rooms = Room.objects.all()\n return render(request, self.template_name, {\"rooms\": rooms})",
"def game_view(game_id):\n response = make_response(json.jsonify({\"error\": \"Not found\"}), 404)\n updated_after = int(request.values.get(\"updatedAfter\", 0))\n\n game = queries.get_game(game_id)\n if game:\n\n # Grab the game state\n state = game.get_game_state()\n\n # If nothing has changed, let's send a minimal response\n if state[\"time_updated\"] <= updated_after:\n state = {\"game_id\": state[\"game_id\"], \"time_updated\": state[\"time_updated\"]}\n\n # Otherwise, send the full response\n # We tack on the projected board state here to make it quickly\n # accessible to the player's client -- maybe there is a better way?\n else:\n projected_board = game.board.project()\n state[\"projected\"] = projected_board.spots\n\n response = json.jsonify(game=state)\n\n return response",
"def get(self):\n room_id = sanitize(self.request.get('r'));\n if not room_id:\n \t#Redirect visitor to new room with random id\n room_id = generate_random(8)\n redirect = '/?r=' + room_id\n self.redirect(redirect)\n logging.info('Redirecting visitor to base URL to ' + redirect)\n return\n\n user = None\n initiator = 0\n #Get room from datastore\n room = Room.get_by_key_name(room_id)\n if not room: \n # New room.\n user = generate_random(8)\n room = Room(key_name = room_id)\n room.add_user(user)\n initiator = 0\n elif room and room.get_occupancy() == 1: \n # 1 occupant.\n user = generate_random(8)\n room.add_user(user)\n initiator = 1\n else:\n # 2 occupants (full).\n path = os.path.join(os.path.dirname(__file__), 'full.html')\n self.response.out.write(template.render(path, { 'room_id': room_id }));\n logging.info('Room ' + room_id + ' is full');\n return\n\n token = channel.create_channel(make_client_id(room, user))\n pc_config = make_pc_config('')\n logging.info('Room id: ' + str(room_id))\n template_values = {'token': token,\n 'me': user,\n 'room_id': room_id,\n 'initiator': initiator,\n 'pc_config': pc_config\n }\n path = os.path.join(os.path.dirname(__file__), 'index.html')\n self.response.out.write(template.render(path, template_values))\n logging.info('User ' + user + ' added to room ' + room_id);\n logging.info('Room ' + room_id + ' has state ' + str(room))",
"def get():\n return jsonify(baby='knight2'), 200",
"def game():\n\tif \"username\" in session:\n\t\treturn render_template(\"index.html\")\n\telse:\n\t\treturn redirect(url_for(\"default\"))",
"def get(self, request):\n pass",
"def get_game(self, request):\n game = get_by_urlsafe(request.urlsafe_game_key, Game)\n if game:\n if game.game_over:\n return game.to_form('Game is over.')\n else:\n return game.to_form('Make a move!')\n else:\n raise endpoints.NotFoundException('Game not found!')",
"def do_GET(self):\n self.log.debug('do_GET called')\n self.HeadGet('GET')",
"def player(hass, request):\n return request.param(hass)",
"def test_api_new_game(self):\n\n with self.client as client:\n response = client.get('/api/new-game')\n response_json = response.get_data(as_text=True)\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"gameId\", response_json)\n parsed_json = json.loads(response_json)\n self.assertEqual(type(parsed_json[\"gameId\"]), str)\n self.assertEqual(type(parsed_json[\"board\"]), list)\n self.assertEqual(type(parsed_json[\"board\"][0]), list)\n\n # write a test for this route",
"def get(self, request, **kwargs):\n player = get_player_from_request(request)\n if player is None:\n return redirect(\"rooms:create_user\")\n elif player.room is None:\n return redirect(\"rooms:overview_room\")\n elif player.room.game is not None:\n return player.room.game.get_redirect_route()\n else:\n return redirect(\"rooms:room\", room=player.room)",
"def get(self):\n\n max_level = ''\n star_obj = ''\n\n # - get user\n user = users.get_current_user()\n if user:\n login_key = users.create_logout_url(self.request.uri)\n gate = 'Logout'\n user_name = user.nickname()\n game = Game.get_by_id(user.user_id())\n if game:\n\t \tmax_level = game.max_level\n\t \tstar_obj = game.star_obj\n\n else: # - logged out\n login_key = users.create_login_url(self.request.uri)\n gate = 'Sign in to Save Progress'\n user_name = ''\n\n template_values = {\n 'login_key': login_key,\n 'gate': gate,\n 'user_name': user_name,\n 'max_level': max_level,\n 'star_obj': star_obj,\n }\n\n path = os.path.join(os.path.dirname(__file__), 'index.html')\n self.response.out.write(template.render(path, template_values))",
"def get(request, season):\n try:\n secret_token = settings.DATA_SECRET_TOKEN\n except AttributeError:\n secret_token = None\n\n if secret_token is not None:\n\n if 'Http-Auth-Token' not in request.headers:\n print('no token header')\n print(request.headers)\n return HttpResponseForbidden()\n\n auth = request.headers['Http-Auth-Token'].split()\n\n if len(auth) != 2:\n print('header wrong length')\n return HttpResponseForbidden()\n\n if auth[0].lower() != \"pool-token\":\n print('no pool-token')\n return HttpResponseForbidden()\n\n token = auth[1]\n\n if secret_token is None or token != secret_token:\n print('mismatch token')\n return HttpResponseForbidden()\n\n if season == 'all' or season == 0:\n matches = Match.objects.all()\n else:\n season_obj = get_object_or_404(Season, number=season)\n matches = Match.objects.filter(season=season_obj)\n\n data = [match.serialize() for match in matches]\n return JsonResponse(data, safe=False)",
"def do_GET(self):\n\n files = { \"/index.html\": \"index.html\",\n \"/\" : \"index.html\",\n \"/timeline-min.js\": \"timeline-min.js\",\n \"/timeline.js\": \"timeline.js\",\n \"/timeline.css\": \"timeline.css\"\n }\n if self.path in files:\n self._ServeFile(files[self.path])\n return\n\n if self.path.startswith(\"/api/data\"):\n self._ServeData()\n return\n\n self.send_error(404,'File Not Found: %s' % self.path)"
]
| [
"0.66888034",
"0.66363895",
"0.64767647",
"0.647073",
"0.6434573",
"0.6400234",
"0.63553363",
"0.634393",
"0.6320819",
"0.6303164",
"0.6297841",
"0.62595314",
"0.6243499",
"0.62432027",
"0.6229112",
"0.6198054",
"0.61873883",
"0.6175521",
"0.61560243",
"0.61315876",
"0.6109407",
"0.6091886",
"0.60548675",
"0.6050281",
"0.604659",
"0.60042787",
"0.5970749",
"0.5968759",
"0.59533",
"0.5941383"
]
| 0.6785095 | 0 |
Handle DELETE requests for a single game | def destroy(self, request, pk=None):
try:
game = Game.objects.get(pk=pk)
game.delete()
return Response({}, status=status.HTTP_204_NO_CONTENT)
except Game.DoesNotExist as ex:
return Response({'message': ex.args[0]}, status=status.HTTP_404_NOT_FOUND)
except Exception as ex:
return Response({'message': ex.args[0]}, status=status.HTTP_500_INTERNAL_SERVER_ERROR) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_game(request, game_id):\n user = request.user\n game = Game.objects.get(id=game_id)\n\n if not user.is_staff and not user in game.moderators.all():\n return HttpResponseRedirect('/game_details/' + game_id + '/')\n\n # Not only we have to delete object from database, but also all files related to it\n gamename = game.name\n path = settings.MEDIA_ROOT\n game.delete()\n \n system('rm -rf ' + path + settings.JUDGES_SOURCES_DIR + '/' + gamename + '/')\n system('rm -rf ' + path + settings.JUDGES_BINARIES_DIR + '/' + gamename + '/')\n system('rm -rf ' + path + settings.RULES_DIR + '/' + gamename + '/')\n\n return HttpResponseRedirect('/')",
"def delete_upload(request):\r\n\tgame_id = request.GET['id']\r\n\tgame = Game.objects.get(id = game_id)\r\n\tif(request.user.profile == game.developer):\r\n\t\tif request.method == 'POST':\r\n\t\t\tgame.delete()\r\n\t\t\tprint('game deleted')\r\n\t\t\treturn redirect('developer_dashboard')\r\n\t\telse:\r\n\t\t\treturn render(request, 'confirm_delete.html', {'game':game})\r\n\telse:\r\n\t\treturn redirect('home')",
"def delete_game(game_id):\n\n game = filter(lambda t: t[\"id\"] == game_id, games)\n if len(game) == 0:\n abort(404)\n games.remove(game[0])\n return jsonify({\"result\": True})",
"def delete_game(game_id):\n try:\n is_admin = True if \"admin\" in session else False\n if is_admin:\n mongo.db.terms.remove({\"game_fk\": ObjectId(game_id)})\n mongo.db.games.remove({\"_id\": ObjectId(game_id)})\n flash(\"Game successfully deleted\", category=\"success\")\n return redirect(url_for(\"get_games\"))\n else:\n flash(\"You do not have permission to manage supported games\",\n category=\"error\")\n return redirect(url_for(\"get_terms\"))\n except KeyError:\n flash(Markup(\"Please <a href='login'>\"\n \"login</a> to delete a game\"), category=\"error\")\n return redirect(url_for(\"get_terms\"))",
"def delete_game(sid, msg):\n uuid = msg['uuid']\n game = Game.objects.get(uuid=uuid)\n game.delete()",
"async def delete_game(self, game_id):\n game = await self.get_game(game_id)\n await ex.conn.execute(\"DELETE FROM blackjack.games WHERE gameid = $1\", game_id)\n await ex.conn.execute(\"DELETE FROM blackjack.currentstatus WHERE userid = $1\", game[1])\n await ex.conn.execute(\"DELETE FROM blackjack.currentstatus WHERE userid = $1\", game[2])\n log.console(f\"Game {game_id} deleted.\")",
"def delete(self, request , pk=None): \n return Response({'message':'DELETE'})",
"def delete_board(request):\n required_fields = ['user_id', 'game_id', 'token']\n\n # Check if the post request contain the required fields\n if set(required_fields) != set(list(request.data.keys())):\n return Response({'error': str('Missing required fields!')}, status=status.HTTP_400_BAD_REQUEST)\n\n # POST Request content\n data = request.data\n\n # check for not allowed characters\n if check_special_characters(str(data['user_id'])) or check_special_characters(str(data['game_id'])) \\\n or check_special_characters(str(data['token'])):\n return Response({'error': str('Unaccepted character passed!')},\n status=status.HTTP_400_BAD_REQUEST)\n\n # Here check if user_id matches the token with the database\n if not db.check_user(data['user_id'], data['token']):\n return Response({'error': str('UNAUTHORIZED')}, status=status.HTTP_401_UNAUTHORIZED)\n\n # Here delete the game board from user's saved profile\n if not db.delete_game(data['user_id'], data['game_id']):\n return Response({'error': str('Error when deleting the game!')}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n return Response({'status': 'success'})",
"def delete(self, _id):",
"def api_delete_dish(request, id):\n\n close_old_connections()\n \n # Not marking it as served if it isn't even ready yet.\n if not request.user.is_authenticated:\n return HttpResponseForbidden(\"You're not authenticated.\")\n \n # Delete the dish.\n Dish.objects.get(id=id).delete()\n\n close_old_connections()\n \n return HttpResponse('Deleted.')",
"def delete_game(game_id):\n\n game = Game.query.get(game_id)\n db.session.delete(game)\n db.session.commit()",
"def delete(self):\n self.request().delete()",
"def delete(self):\r\n return http.Request('DELETE', '{0}'.format(\r\n self.get_url())), parsers.parse_json",
"def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n self.object.delete()\n return JsonResponse({'status': 'ok'})",
"def _delete(self, *args, **kwargs):\n return self._request('delete', *args, **kwargs)",
"def delete():",
"def delete(self, *args, **kwargs):\n self.request(\"delete\", *args, **kwargs)",
"def delete(self, *args, **kwargs):\n return self.handle_delete_request()",
"def handle_delete(self, api, command):\n return self._make_request_from_command('DELETE', command)",
"def test_delete(self):\n uid = self._game.uid\n self._game.save(self._ds)\n self._game.delete(self._ds)\n self._game = None\n self.assertIsNone(self._ds.get_strict_controller(Game, uid))\n with self.assertRaises(ValueError):\n self._ds.get_controller(Game, uid)",
"def delete():\n # Must be logged in to perform any delete commands.\n auth_required()\n pass",
"def _delete(self, url):\n return self._request(url, method=\"DELETE\")",
"def _delete_request(self, url):\n url = self.baseUrl + url\n logger.debug(\"DELETE %s\", url)\n with self.session.delete(url) as req:\n try:\n result = req.json()\n except json.decoder.JSONDecodeError as exc:\n raise errors.PluginError(\"no JSON in API response\") from exc\n if result[\"result\"] == \"success\":\n return result[\"data\"]\n raise errors.PluginError(\n \"error in API request: {} / {}\".format(\n result[\"error\"][\"code\"], result[\"error\"][\"description\"]\n )\n )",
"def delete_player(player_id):\n if request.method == 'GET':\n Player.query.filter_by(player_id=player_id).delete()\n db.session.commit()\n return getAllPlayers()",
"def delete(self, url):\n return self.request(url, \"DELETE\")",
"def matchRequest_delete(self, data, sesh):\n\n\t\t# Verify fields\n\t\ttry: DictHelper.eval(data, ['id'])\n\t\texcept ValueError as e: return Services.Effect(error=(1001, [(f, \"missing\") for f in e.args]))\n\n\t\t# Find the request\n\t\toRequest = MatchRequest.get(data['id'])\n\t\tif not oRequest:\n\t\t\treturn Services.Effect(error=(1104, 'match_request:%s' % data['id']))\n\n\t\t# If the deleter is not the initiator or opponent\n\t\tif sesh['thrower']['_id'] != oRequest['initiator'] and \\\n\t\t\tsesh['thrower']['_id'] != oRequest['opponent']:\n\t\t\treturn Services.Effect(error=1000)\n\n\t\t# Delete it\n\t\tif not oRequest.delete():\n\t\t\treturn Services.Effect(error=1102)\n\n\t\t# If the initiator retracted their request\n\t\tif sesh['thrower']['_id'] == oRequest['initiator']:\n\n\t\t\t# Let the opponent know\n\t\t\tSync.push('auth', 'requests-%s' % oRequest['opponent'], {\n\t\t\t\t\"type\": \"match_request_delete\",\n\t\t\t\t\"id\": data['id']\n\t\t\t})\n\n\t\t# Else the opponent rejected the request\n\t\telse:\n\n\t\t\t# Let the initiator know\n\t\t\tSync.push('auth', 'request-%s' % data['id'], {\n\t\t\t\t\"type\": \"rejected\"\n\t\t\t})\n\n\t\t# Return OK\n\t\treturn Services.Effect(True)",
"def delete_item(request):\n if request.json_body[u'type'] == u'post':\n if DBSession.query(Post).filter(Post.name==request.json_body[u'name']).delete() == 1:\n return {\"deletion_status\":\"success\"}\n import ipdb; impdb.set_trace()\n return {\"deletion_status\":\"error\"}",
"def post_delete():\n req_data = request.get_json()\n print('This is the request itself \\n', req_data)\n print(req_data['name'])\n flask_wms.delete_entry(req_data['name'])\n return 'Request recieved, delete method'",
"def delete_data(request, result_id):\n result = TestResult.objects.get(id=result_id)\n result.delete()\n gun = result.bullet.gun\n return HttpResponseRedirect(reverse('gun', args=[gun.id]))",
"def event_delete(req):\n event_id = req.match_dict['event_id']\n try:\n db_conn.event_delete(event_id)\n json = {'deleted': True}\n except Exception as e:\n json = {'errors': [str(e)]}\n return req.Response(json=json)"
]
| [
"0.7457039",
"0.72932535",
"0.7260357",
"0.69909835",
"0.6901931",
"0.6847532",
"0.6820824",
"0.67531025",
"0.66829526",
"0.65518534",
"0.6548844",
"0.65336424",
"0.6498319",
"0.64814496",
"0.6447743",
"0.64435804",
"0.6435766",
"0.64318997",
"0.6413047",
"0.6384403",
"0.63819087",
"0.6377246",
"0.6365519",
"0.6361566",
"0.6344953",
"0.63424397",
"0.6342349",
"0.6335645",
"0.6327962",
"0.63106203"
]
| 0.75383687 | 0 |
Managing gamers signing up for games | def signup(self, request, pk=None):
# A gamer wants to sign up for an game
if request.method == "POST":
# The pk would be `2` if the URL above was requested
game = Game.objects.get(pk=pk)
# Django uses the `Authorization` header to determine
# which user is making the request to sign up
gamer = Gamer.objects.get(user=request.auth.user)
try:
# Determine if the user is already signed up
registration = Gamer.objects.get(
game=game, gamer=gamer)
return Response(
{'message': 'Gamer already follows this game.'},
status=status.HTTP_422_UNPROCESSABLE_ENTITY
)
except Gamer.DoesNotExist:
# The user is not signed up.
registration = Gamer()
registration.game = game
registration.gamer = gamer
registration.save()
return Response({}, status=status.HTTP_201_CREATED)
# User wants to leave a previously joined game
elif request.method == "DELETE":
# Handle the case if the client specifies a game
# that doesn't exist
try:
game = Game.objects.get(pk=pk)
except Game.DoesNotExist:
return Response(
{'message': 'User is not following this game.'},
status=status.HTTP_400_BAD_REQUEST
)
# Get the authenticated user
gamer = Gamer.objects.get(user=request.auth.user)
try:
# Try to delete the signup
registration = Follower.objects.get(
game=game, gamer=gamer)
registration.delete()
return Response(None, status=status.HTTP_204_NO_CONTENT)
except Follower.DoesNotExist:
return Response(
{'message': 'Not currently following this game.'},
status=status.HTTP_404_NOT_FOUND
)
# Calculate the averge and return it.
# If you don't know how to calculate averge, Google it.
# If the client performs a request with a method of
# anything other than POST or DELETE, tell client that
# the method is not supported
return Response({}, status=status.HTTP_405_METHOD_NOT_ALLOWED) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def signup():",
"def sign_up(self):\n self.log(\"Bot player signing up.\")\n self.subscribe_to_quorum_channel()\n while True:\n url = (\n \"{host}/participant/{self.worker_id}/\"\n \"{self.hit_id}/{self.assignment_id}/\"\n \"debug?fingerprint_hash={hash}&recruiter=bots:{bot_name}\".format(\n host=self.host,\n self=self,\n hash=uuid.uuid4().hex,\n bot_name=self.__class__.__name__,\n )\n )\n try:\n result = requests.post(url)\n result.raise_for_status()\n except RequestException:\n self.stochastic_sleep()\n continue\n\n if result.json()[\"status\"] == \"error\":\n self.stochastic_sleep()\n continue\n\n self.on_signup(result.json())\n return True",
"def signup(request):\r\n\tif request.user.is_authenticated:\r\n\t\t# Redirect user to home if already logged in\r\n\t\tgames = Game.objects.all()\r\n\t\treturn redirect('/', {'games': games, 'MEDIA_URL': settings.MEDIA_URL})\r\n\tif request.method == 'POST':\r\n\t\tform = SignUpForm(request.POST)\r\n\t\tif form.is_valid():\r\n\t\t\tuser = form.save()\r\n\t\t\tuser.refresh_from_db() # Retreive the newly saved object\r\n\t\t\tuser.is_active = False\r\n\t\t\tuser.profile.is_developer = form.cleaned_data.get('is_developer')\r\n\t\t\tuser.save()\r\n\t\t\t# Get current domain name and generate the user token\r\n\t\t\tcurrent_site = get_current_site(request)\r\n\t\t\tencodeded_uid = urlsafe_base64_encode(force_bytes(user.pk))\r\n\r\n\t\t\t# Create email subject and body\r\n\t\t\tsubject = 'Activate Your PlayMe Account'\r\n\t\t\tmessage = render_to_string('account_activation_email.html', {\r\n\t\t\t\t'user': user,\r\n\t\t\t\t'domain': current_site.domain,\r\n\t\t\t\t'uid': encodeded_uid.decode('utf-8'),\r\n\t\t\t\t'token': account_activation_token.make_token(user),\r\n\t\t\t})\r\n\t\t\tuser.email_user(subject, message)\r\n\t\t\treturn redirect('account_activation_sent')\r\n\telse:\r\n\t\tform = SignUpForm()\r\n\treturn render(request, 'registration/signup.html', {'form': form})",
"async def signups(self, ctx: commands.Context):\n if ctx.invoked_subcommand is None:\n if ctx.subcommand_passed is None:\n # No subcommand passed at all\n return await ctx.send(f\"Use '{self.prefix(ctx)}help signups' for more information.\")\n else:\n # Invalid subcommand passed\n return await ctx.send(\"No such game exists.\")\n else:\n if ctx.channel.id != self.data[ctx.guild.id]['channel']:\n raise GamesError(\"Games can only be played in the designated channel.\")",
"def register_galaxy(self):\n driver = self._get_webdriver()\n\n try:\n driver.get(self.GALAXY_URL)\n driver.find_element_by_link_text(\"Login or Register\").click()\n driver.find_element_by_id(\"register-toggle\").click()\n driver.find_element_by_name(\"email\").send_keys(self.GALAXY_EMAIL)\n driver.find_element_by_name(\"password\").send_keys(self.GALAXY_PASSWORD)\n driver.find_element_by_name(\"confirm\").send_keys(self.GALAXY_PASSWORD)\n driver.find_element_by_name(\"username\").send_keys(\"irida-test\")\n driver.find_element_by_name(\"create\").click()\n\n driver.get(self.GALAXY_URL)\n driver.find_element_by_link_text(\"Login or Register\").click()\n driver.find_element_by_name(\"login\").send_keys(self.GALAXY_EMAIL)\n driver.find_element_by_name(\"password\").send_keys(self.GALAXY_PASSWORD)\n driver.find_element_by_xpath(\"//button[@name='login']\").click()\n driver.find_element_by_name(\"login\").click()\n except selenium_exceptions.NoSuchElementException:\n pass\n finally:\n driver.quit()",
"def on_start(self):\n # self.signup()",
"def signup(self):\n # sign up\n new_username = generate_username()\n success = signup_helper(self, new_username)\n if success:\n # go to AuthenticatedTasks\n self.user.username = new_username\n self.interrupt()",
"def add_gamer_profile():\n user_id = current_user.get().id\n user = User.objects.get_or_404(id=user_id)\n\n # should we validate object_id here?\n\n # auto populate with g.formdata\n game_form = GamerForm(g.formdata)\n game_form.populate_obj(user.gamer)\n\n if request.files and request.files['avatar']:\n filename = Avatars.save(request.files['avatar'])\n user.gamer.avatar = Avatars.url(filename)\n\n # manually handle games\n user.gamer.games = [Game(id=_id)\n for _id in request.form.getlist('games')]\n\n user.save()\n return user",
"def sign_up(self):\n print('-=' * 12 + \" Sigh Up \" + '-=' * 12)\n mob_num, password = self._input_mob_num('Mobile Number :'), input(\"Password: \")\n username= input(\"User name: \")\n register_flag = self.auth.register(mob_num, password, username)\n if register_flag:\n print(\"Done registering, sign in NOW.\")\n return self.logging_page()\n else:\n print(\"This mobile number is already registered.\\n\" + '-=' * 30)\n options = {1: self.sign_up, 2: self.logging_page, 3: self.exit}\n print_out = \"(1) Try Again \\n (2) Back to Logging Page \\n (3) Exit\"\n return self._take_option(options, print_out)",
"async def signups_helper(self, ctx, game: str, minimum: int=2, maximum: int=50, rounds: int=1) -> bool:\n guild = ctx.guild.id #`guild` is actually the guild's id, but using guild to shorten the variable\n # Check if there is an existing game\n self._existing_game(ctx)\n\n # Creation of embed to start signups\n embed = discord.Embed(title=f\"Game of '{game.capitalize()}' by {ctx.author}\",\n description=f\"Sign up by reacting 🙋♂️ to this message!\\n{rounds} Rounds\\nMinimum Players: {minimum}\\nMaximum Players: {maximum}\",\n color=discord.Colour(random.randint(0, 16777215)))\n embed.add_field(name=\"Current Signups\", value='None', inline=True)\n embed.set_footer(text=f\"React ▶️ to close signups and start the game or react ⏹️ to cancel the game.\\nOnly the host or server moderators can start or cancel the game.\")\n self.games_info[guild][0] = await ctx.send(embed=embed)\n\n reactions = ('🙋♂️', '▶️', '⏹️')\n for emoji in reactions:\n await self.games_info[guild][0].add_reaction(emoji)\n self.games_info[guild][1] = True\n \n # Not sure if it is a bug, but somehow the bot when it reacts the stop button,\n # can stop the game. No idea how, but just to resolve it:\n await asyncio.sleep(1)\n\n # Wait for signal to start or cancel game\n def stop_signups_check(reaction, user:discord.Member):\n return (reaction.emoji in ['▶️', '⏹️']\n and reaction.message.id == self.games_info[guild][0].id\n and (user.id == ctx.author.id \n or ctx.channel.permissions_for(user).manage_guild))\n while True:\n signal, user = await self.bot.wait_for('reaction_add', check=stop_signups_check)\n if signal.emoji == '▶️':\n player_count = len(self.games_info[guild][2])\n # Check if number of players fits the requirement\n if player_count >= minimum and player_count <= maximum:\n self.games_info[guild][1] = False # Ensure that number of players don't change\n await ctx.send(f\"Request by {user}: Starting Game\")\n return True\n else:\n await ctx.send(f\"Recevied request to start game by {user}, but number of players does not meet requirement.\")\n elif signal.emoji == '⏹️':\n await ctx.send(f\"Game cancelled by {user}.\")\n self.games_info[guild] = gamesDict()\n return False\n else:\n raise Exception # Shouldn't happen by the nature of the above code",
"def signup(self, request, user):\n pass",
"def signup(request):\n\n if request.method == 'POST':\n form = SignUpForm(request.POST)\n if form.is_valid():\n # Save the form\n form.save()\n # Create the user\n username = form.cleaned_data.get('username')\n raw_password = form.cleaned_data.get('password1')\n # also make sure that the user gets into the STAFF,\n # otherwise he/she may not see the admin pages\n user = authenticate(username=username, \n password=raw_password,\n is_staff=True)\n user.is_staff = True\n user.save()\n # Add user to the \"RegistryUser\" group\n gQs = Group.objects.filter(name=\"seeker_user\")\n if gQs.count() > 0:\n g = gQs[0]\n g.user_set.add(user)\n # Log in as the user\n login(request, user)\n return redirect('home')\n else:\n form = SignUpForm()\n return render(request, 'signup.html', {'form': form})",
"def signup(**kwargs):\n\n pass",
"def _register(self):\n self._log(self.botlog, 'Registering as %s' % self.nickname)\n self._send('USER %s B C :%s' % (self.ident, self.realname))\n self._send('NICK %s' % self.nickname)",
"def signup(request):\r\n return {}",
"def testing_setup_finished_game():\n black_user = request.form['black_email']\n white_user = request.form['white_email']\n setup_finished_game_internal(black_user, white_user)\n return ''",
"def check_google_token():\n #get token from login page and google's token rigamorole\n gtoken = request.form.get(\"idtoken\")\n #validate token\n g_profile = apiapijoyjoy.validate_google_token(gtoken)\n #collect user info from google\n name = g_profile['given_name']\n lname = g_profile['family_name']\n email = g_profile['email']\n # start a session\n session[\"user_id\"] = email\n user = User.query.filter_by(email=email).first()\n #create flags for Flask to return to google's scripts and take frontend action accordingly\n if user:\n return \"FLASK SEES USER\"\n else:\n #create new user in SQLAlchemy using info above from Google. BUT WAIT WHAT?! CODE REVIEW PLS!\n new_user = User(email=email, name=name, lname=lname)\n db.session.add(new_user)\n db.session.commit()\n return \"FLASK SEES NO USER\"",
"def join_game(game):\n game = int(game)\n if 0 > game or game > len(games):\n return \"Not a valid gameBike\"\n if games.join_game(game):\n return \"Registration done\"\n else:\n return \"Not valid registration\"",
"def sign_up():\n #POST - the info coming from the sign-up-form\n\n #get username and password that was filled in sign-up form\n #if username exits - flash \"username taken\" and redirct to /sign-up-form\n\n #else save the new user to the database - user table, flash success message\n #and redirect back to /more-details/cat_id",
"async def new_game():\n if enough_players():\n GAME.new_game()\n await update_players()",
"async def _sign_in(self, ctx: Context, *, ignored: str = None):\n\n guild: discord.Guild = ctx.guild\n channel: discord.TextChannel = ctx.channel\n author: discord.Member = ctx.author\n\n if await self.config.guild(guild).get_raw(\"cycle\", \"number\") is not None:\n return await ctx.send(_(\"You can't do that. The game has already started!\"))\n\n if not await self.config.guild(guild).signups_on():\n return await ctx.send(_(\"Sign-ups are closed!\"))\n\n if not await self.check_total(guild):\n return await ctx.send(_(\"Maximum allowed players signed up!\"))\n\n if await self.config.guild(guild).signup_channel() == channel.id:\n player_id = await self.config.guild(guild).player_id()\n player_role = discord.utils.get(guild.roles, id=player_id)\n\n if player_role not in author.roles:\n try:\n await author.add_roles(player_role)\n await self.update_total(ctx, override=1)\n except discord.Forbidden:\n return await ctx.send(\n _(\n \"I either don't have permissions to manage\"\n \" roles or the `{}` role is above my highest role!\"\n ).format(player_role.name)\n )\n\n await self.remove_extra_roles(ctx, [\"spec\", \"repl\"])\n\n await ctx.message.add_reaction(CHECK_MARK)",
"async def _signups_fishing(self, ctx: commands.Context, num_rounds: PositiveInt=5):\n # Start signups:\n status = await self.signups_helper(ctx, 'fishing', rounds=num_rounds)\n if not status:\n return None\n\n scoreboard = defaultdict(int)\n for i in range(num_rounds):\n await asyncio.sleep(5 * random.random() + 5)\n await ctx.send(f\"Round {i+1} of {num_rounds}: There is a tug on the fishing rod! Type 'catch' to catch the fish!\")\n\n def catch_check(message):\n return (message.content.lower() == \"catch\" \n and message.author in self.games_info[ctx.guild.id][2])\n try:\n message = await self.bot.wait_for('message', check=catch_check, timeout=7)\n scoreboard[message.author] += 1\n result = f\"{message.author} caught the fish!\\n\"\n except asyncio.TimeoutError:\n result = \"Nobody caught the fish!\\n\"\n if i == num_rounds - 1:\n await ctx.send(result + \"Ending the game...\")\n else:\n await ctx.send(result + \"Moving to the next round...\")\n \n return await self.finish_game(ctx, scoreboard)",
"def signup_process(request):\r\n params = request.params\r\n email = params.get('email', None)\r\n\r\n if not email:\r\n # if still no email, I give up!\r\n return {\r\n 'errors': {\r\n 'email': 'Please supply an email address to sign up.'\r\n }\r\n }\r\n else:\r\n email = email.lower()\r\n\r\n # first see if the user is already in the system\r\n exists = UserMgr.get(email=email)\r\n if exists:\r\n return {\r\n 'errors': {\r\n 'email': 'The user has already signed up.'\r\n }\r\n }\r\n\r\n new_user = UserMgr.signup_user(email, 'signup')\r\n if new_user:\r\n # then this user is able to invite someone\r\n # log it\r\n AuthLog.reactivate(new_user.username)\r\n\r\n # and then send an email notification\r\n # @todo the email side of things\r\n settings = request.registry.settings\r\n\r\n # Add a queue job to send the user a notification email.\r\n tasks.email_signup_user.delay(\r\n new_user.email,\r\n \"Enable your Bookie account\",\r\n settings,\r\n request.route_url(\r\n 'reset',\r\n username=new_user.username,\r\n reset_key=new_user.activation.code\r\n )\r\n )\r\n\r\n # And let the user know they're signed up.\r\n return {\r\n 'message': 'Thank you for signing up from: ' + new_user.email\r\n }\r\n else:\r\n return {\r\n 'errors': {\r\n 'email': 'There was an unknown error signing up.'\r\n }\r\n }",
"def testing_create_game():\n black_user = request.form['black_email']\n white_user = request.form['white_email']\n stones = json.loads(request.form['stones'])\n create_game_internal(black_user, white_user, stones)\n return ''",
"def auto_register(request,backend=None,error_msgs=''):\r\n # Check if a username is provided\r\n username_form = forms.AutoRegisterForm()\r\n if request.method == 'POST' and request.POST.get('username'):\r\n name = setting('SOCIAL_AUTH_PARTIAL_PIPELINE_KEY', 'partial_pipeline')\r\n username_form = forms.AutoRegisterForm(request.POST)\r\n if username_form.is_valid():\r\n username = username_form.cleaned_data['username']\r\n try:\r\n interface.get_user_without_password(username)\r\n error_msgs ='That username is already in use.'\r\n except DoesNotExistError:\r\n request.session['saved_username'] = request.POST['username']\r\n backend = request.session[name]['backend']\r\n return redirect('socialauth_complete', backend=backend)\r\n name = setting('SOCIAL_AUTH_PARTIAL_PIPELINE_KEY', 'partial_pipeline')\r\n backend=request.session[name]['backend']\r\n return render_to_response('accounts/auto_register.html', {'backend' : backend, 'error_msgs' : error_msgs, 'username_form' : username_form}, RequestContext(request))",
"def registration_submission(request):\n username = request.POST.get('username', '')\n firstname = request.POST.get('firstname', '')\n lastname = request.POST.get('lastname', '')\n email = request.POST.get('email', '')\n password = request.POST.get('password', '')\n if len(User.objects.filter(username=username)) != 0: #pylint: disable=E1101\n return registration(request, \"Try again, the username %s %s.\"\n % (username, \"is already taken\"))\n if len(User.objects.filter(email=email)) != 0: #pylint: disable=E1101\n return registration(request, \"Try again, %s %s.\"\n % (\"there is already an account with that email\", email))\n user = User.objects.create_user(username=username, email=email, password=password,\n first_name=firstname, last_name=lastname) #pylint: disable=E1101\n first_char = user.character_set.create(character_name=\"pin#1111\", character_pin=\"1111\")\n first_char.is_logged = True\n first_char.save()\n Level_num.objects.create(user=user, user_point=0, user_level=1)\n Game_saved.objects.create(user=user, adventure_saved=\"\", task_saved=\"\")\n user = auth.authenticate(username=username, password=password)\n auth.login(request, user)\n return HttpResponseRedirect('/')",
"def create_login():\n user = {}\n username = input(\"We see that you are a new user! Please create a username!\\n\")\n password = input(\"Please input your password \\n\")\n clear_screen()\n print(\"Username accepted. Now generating g-number:\\n\")\n g_num = randint(00000000,99999999)\n print(\"Your new G-Number is: G\", g_num)\n user[username] = [username, password, g_num, 0.0]\n return user #Return type: dictionary",
"def user():",
"def register_user():\n pass",
"def game_created(self, pname, game):\n logging.debug('Game Created:')\n logging.debug(game)\n g = self.games.get(game['matchid'], None)\n if g:\n g.roomid = game['roomid']\n g.tableindex = game['tableindex']\n self.comm.game_ready(g)"
]
| [
"0.67609954",
"0.650206",
"0.61219937",
"0.59892434",
"0.59672654",
"0.59607625",
"0.59262365",
"0.5910292",
"0.5818596",
"0.58168954",
"0.57337415",
"0.5712152",
"0.5684307",
"0.5663185",
"0.55891526",
"0.55803233",
"0.55419314",
"0.5534772",
"0.5530431",
"0.5478124",
"0.5475521",
"0.54641074",
"0.546295",
"0.54623586",
"0.5461272",
"0.54603666",
"0.5443106",
"0.54410887",
"0.5440438",
"0.54353064"
]
| 0.6540942 | 1 |
Returns lists of random preference orders for a specified of buyers. Arguments | def get_preferences(buyer_count=5):
buyer_wants = np.random.randint(1, 4, buyer_count)
seller_count = sum(buyer_wants)
buyer_start = range(seller_count)
seller_start = range(buyer_count)
buyer_prefs = list()
seller_prefs = list()
for i in range(buyer_count):
random.shuffle(buyer_start)
buyer_prefs.append(buyer_start[:])
for i in range(seller_count):
random.shuffle(seller_start)
seller_prefs.append(seller_start[:])
return buyer_prefs, seller_prefs, buyer_wants | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_drink ():\n \n customer_pref = customer_order.drink_order()\n drink = []\n \n for pref in customer_pref:\n if customer_pref[pref] == True:\n drink.append(random.choice(ingredients[pref]))\n \n return drink",
"def get_matches(buyer_prefs, seller_prefs, buyer_counts):\n\n buyer_prefs = copy.deepcopy(buyer_prefs)\n seller_prefs = copy.deepcopy(seller_prefs)\n\n buyer_matches = []\n for i in range(len(buyer_prefs)):\n buyer_matches.append([])\n\n seller_matches = [None] * len(seller_prefs)\n\n while None in seller_matches:\n\n for buyer, buyer_pref in enumerate(buyer_prefs):\n\n sellers_needed = buyer_counts[buyer] - len(buyer_matches[buyer])\n for i in range(sellers_needed):\n\n seller = buyer_pref.pop(0)\n prev_buyer = seller_matches[seller]\n\n if prev_buyer is None:\n # If seller hasn't been matched yet, do it\n seller_matches[seller] = buyer\n buyer_matches[buyer].append(seller)\n\n else:\n # Seller has been matched... replace if new buyer preferred\n seller_pref = seller_prefs[seller]\n if seller_pref.index(buyer) < seller_pref.index(prev_buyer):\n seller_matches[seller] = buyer\n buyer_matches[prev_buyer].remove(seller)\n buyer_matches[buyer].append(seller)\n\n\n return seller_matches",
"def generate_customer_order():\n order_size = rand_order_size()\n corp_serv = corp_service()\n menu = corp_serv.get_menu_items()\n order = []\n for _ in range(order_size):\n choice = menu[randint(0, len(menu))]\n howmany = poisson(1.0) + 1\n order.append({\"name\": choice, \"quantity\": howmany})\n return order",
"def get_orders(tickers, limit):\n if type(tickers) != list:\n raise TypeError(\"tickers is malformed\")\n raise NotImplementedError(\"This is a template model\")",
"def make_prefs(player_names, seed):\n\n np.random.seed(seed)\n player_prefs = {\n name: np.random.permutation(\n [p for p in player_names if p != name]\n ).tolist()\n for name in player_names\n }\n\n return player_prefs",
"def get_all_orders():",
"def setup_game(num_players=1, num_decks=1):\n players = []\n for i in range (num_players):\n players.append(player(i+1))\n new_dealer = dealer(\"Dealer\")\n play_shoe = shoe(num_decks).shuffle_shoe()\n return players, new_dealer, play_shoe",
"def make_players(player_names, seed):\n\n np.random.seed(seed)\n players = [Player(name) for name in player_names]\n\n for player in players:\n player.set_prefs(\n np.random.permutation([p for p in players if p != player]).tolist()\n )\n\n return players",
"def _get_peppers(login):\n # Make the random sequence dependent on the user login\n random.seed(login)\n # noinspection PyUnusedLocal\n peppers = [random.randint(0, 9999999) for r in xrange(255)]\n\n # Jump to a request dependent state to shuffle the peppers.\n # This ensures that the shuffle is different from time to time\n random.jumpahead(int(time.time()))\n random.shuffle(peppers)\n\n # Yield the peppers one by one\n for pepper in peppers:\n yield pepper",
"def extract_buy_and_sell_orders(orders):\n buy_orders = []\n sell_orders = []\n for order in orders:\n if order.side == TradingClass.DatabaseHandlerUtils.Side.BUY:\n buy_orders.append(order)\n elif order.side == TradingClass.DatabaseHandlerUtils.Side.SELL:\n sell_orders.append(order)\n return buy_orders, sell_orders",
"def create_orders(self, new_weights):\n cur_weights = self.normalized_holdings()\n vols = ((new_weights - cur_weights) * self.total_wealth())[:-1]\n holdings = self.investor.portfolio\n tickers = sorted(holdings)\n prices = np.array([self.market.price_for(t) for t in tickers])\n\n # identify the correct prices for bid and ask transactions\n bid_asks = [p[(v < 0).astype(int)] for v, p in zip(vols, prices)]\n\n orders = []\n for v, ba, t in zip(vols, bid_asks, tickers):\n amt = np.abs((v / ba).astype(int))\n b_or_a = Bid if v > 0 else Ask\n if v != 0:\n orders.append(b_or_a(price=ba, amount=amt,\n ticker=t, other_party=self.investor))\n return orders",
"def orderPairs(self):\n pairsByTickers = {}\n for asset in self.availableTickers:\n holder = []\n for pair in self.allPairs:\n if asset in pair:\n holder.append(pair)\n if asset == \"XBT\":\n asset = \"BTC\"\n pairsByTickers[asset] = holder\n return pairsByTickers",
"def orderPairs(self):\n pairsByTickers = {}\n for asset in self.availableTickers:\n if asset[0]==\"X\" or asset[0]==\"Z\":\n asset = asset[1:]\n holder = []\n for pair in self.allPairs:\n if asset in pair:\n holder.append(pair)\n if asset == \"XBT\":\n asset = \"BTC\"\n pairsByTickers[asset] = holder\n return pairsByTickers",
"def orderPairs(self):\n pairsByTickers = {}\n for asset in self.availableTickers:\n if asset[0]==\"X\" or asset[0]==\"Z\":\n asset = asset[1:]\n holder = []\n for pair in self.allPairs:\n if asset in pair:\n holder.append(pair)\n if asset == \"XBT\":\n asset = \"BTC\"\n pairsByTickers[asset] = holder\n return pairsByTickers",
"def make_order_by_combination(self):\n order_by_list = []\n order_by = self.request.GET.get(\"order_by\", None)\n\n if order_by:\n order_by_list = [SORT_BY_REFERENCE_DICT[i.strip()]\n for i in order_by.split(\",\")]\n\n return order_by_list",
"def gen_orders(self, config):\n upper_bound = config.base + config.bound * MIN_TICK\n lower_bound = config.base - config.bound * MIN_TICK\n mid = config.base + random.randint(-config.bound + 1, config.bound - 1) * MIN_TICK\n direction = 1.0\n\n for i in range(config.samples):\n if i % config.variation == 0:\n if mid >= upper_bound:\n direction = -1.0\n elif mid <= lower_bound:\n direction = 1.0\n elif random.randint(0, 1) == 0:\n direction = -1.0\n else:\n direction = 1.0\n\n mid += direction * random.randint(1, 10) * MIN_TICK\n\n # if mid <= lower_bound:\n # mid = lower_bound\n\n # if mid >= upper_bound:\n # mid = upper_bound\n\n orders = []\n sell_price = mid + MIN_TICK\n buy_price = mid - MIN_TICK\n if direction < 0:\n qty = self.aggregate_bid_qty(sell_price)\n orders += [Order(secid=self.security,\n side=Side.SELL,\n price=round(sell_price, DECIMALS),\n qty=qty + 1)]\n orders += [Order(secid=self.security,\n side=Side.BUY,\n price=round(buy_price, DECIMALS),\n qty=1)]\n else:\n qty = self.aggregate_offer_qty(buy_price)\n orders += [Order(secid=self.security,\n side=Side.BUY,\n price=round(buy_price, DECIMALS),\n qty=qty+1)]\n orders += [Order(secid=self.security,\n side=Side.SELL,\n price=round(sell_price, DECIMALS),\n qty=1)]\n\n orders += OrderBookUtils.pad_book(self, self.depth_bids(), buy_price, Side.BUY)\n orders += OrderBookUtils.pad_book(self, self.depth_offers(), sell_price, Side.SELL)\n\n yield (orders, mid)",
"def rnd_pset(self):\n\t\treturn [rnd() * 10, rnd() * 10, rnd() * 12 * 15, rnd() * 12 * 15]",
"def getOrderedSetupList(whichTables = None):\n # if whichTables is None, then databaseDependenciesForSetup.keys() is used\n return socorro_pri.dependencyOrder(databaseDependenciesForSetup,whichTables)",
"def getPurchasableGenerators(self) -> list:\n pass",
"def get_order_lists(self, n_items, n_quantities):\n arr_stock_code = self._df_invoice_original.StockCode.unique()\n arr_stock_code = np.random.choice(arr_stock_code, n_items)\n list_stockCode = list(arr_stock_code)\n list_quantities = np.ones(arr_stock_code.shape[0])\n list_quantities *=n_quantities\n\n return list_stockCode, list_quantities",
"def random_order(store_name):\n conn = sqlite3.connect('/home/ubuntu/chai_cloud_deploy/db.sqlite3')\n cur = conn.cursor()\n\n # Get random customer email\n cur.execute(\"select max(cust_id) from Customer\")\n max_id = cur.fetchone()[0]\n cur.execute(\n \"select email from Customer where cust_id=?\", \n (random.randint(1,max_id),)\n )\n email = cur.fetchone()[0]\n\n # Generate random order list of 1-5 items\n cur.execute(\n \"select store_id from Store where name=?\",\n (store_name,)\n )\n store_id = cur.fetchone()[0]\n print \"store_id::::\", store_id\n cur.execute(\n \"select prod_id from Inventory where active=1 and store_id=?\",\n (store_id,)\n )\n raw_list = cur.fetchall()\n # edge case: less than 5 active items\n max_length = [5, len(raw_list)][len(raw_list) < 5]\n if not max_length:\n print \"[DEBUG] No active items in: %s. Cannot make orders.\" % \\\n store_name\n return 0\n prod_id_list = random.sample(\n [i[0] for i in raw_list],\n random.randint(1,max_length)\n )\n \n order_str = \"\" # string format for order list\n for prod_id in prod_id_list:\n # Get random option for each product\n cur.execute(\n \"select op_id from Option where prod_id=?\",\n (prod_id,)\n )\n op_id = random.choice([i[0] for i in cur.fetchall()])\n order_str += \"{'prod_id': %d, 'op_id':%d}, \" % (prod_id, op_id)\n order_str = \"[%s]\" % order_str\n \n # Create POST call\n command = \"http -f POST http://localhost/order order_list=\\\"\" + order_str + \\\n \"\\\" store_name=\" + store_name + \" email=\" + email\n #with open('randorder', 'w') as w:\n # w.write(command + \"\\n\")\n os.system(command)\n #os.system(\"chmod +x randorder\")\n #os.system(\"./randorder\")",
"def chosen_items(sack, items, weight):\n total = total_profit(sack, items, weight)\n chosen = []\n \n while total != 0:\n for i in range(items + 1):\n if total in sack[i]:\n chosen.append(i) \n total = total - profit[i - 1] \n break \n \n return sorted(chosen)",
"def generateSupplies(self):\n typePart = ['wrench','resistor','bulb','mushroom','coin']\n chosenPart = []\n for i in range(3):\n randomPart = choice(typePart)\n chosenPart.append(randomPart)\n typePart.remove(randomPart)\n for part in chosenPart:\n amount = randint(1,3)\n self._supplies.append(Node(part,amount))",
"def orders(self):\n\t\tORDER_MAP = [\"Do Nothing\", \"Collect All\", \"Drop All\", \"Collect\", \"Drop\", \"Collect All But\", \"Drop All But\", \"Garrison\"]\n\t\treturn [(delay, Star(star_id, galaxy=self.galaxy), ORDER_MAP[order], num_ships)\n\t\t for delay, star_id, order, num_ships in self.data.o]",
"def getOrderList(self):\r\n\t\treturn self.pair.orders",
"def lottery():\n lottery_numbers = sample(range(1, 50), k=6)\n return sorted(lottery_numbers)",
"def get_all_orders():\n response = requests.get(\n settings.SHOPIFY_ORDERS_URL,\n auth=(settings.SHOPIFY_API_KEY, settings.SHOPIFY_PASSWORD),\n )\n return response.json()[\"orders\"]",
"def select_people(torrent, select_seeders=True, num=50):\n selector = torrent.seeders if select_seeders else torrent.leechers\n resp = random.sample(selector, min(num, len(selector)))\n return resp",
"def buy_priority_order(self, decision):\n provinces_left = decision.game.card_counts[c.province]\n if provinces_left <= self.cutoff1:\n return [None, c.estate, c.silver, c.duchy, c.province]\n elif provinces_left <= self.cutoff2:\n return [None, c.silver, c.duchy, c.gold, c.province]\n else:\n return [None, c.silver, c.gold, c.province]",
"def recommend_random(self, num:int)->list:\n results = random.sample(self.items, k = num)\n results = [i.lower() for i in results]\n return results"
]
| [
"0.60050654",
"0.56459045",
"0.5636287",
"0.5581696",
"0.5522628",
"0.54411304",
"0.5377607",
"0.5308835",
"0.52995884",
"0.52846533",
"0.52772397",
"0.52624565",
"0.5234923",
"0.5234923",
"0.5210814",
"0.5184855",
"0.5133689",
"0.5119745",
"0.5116166",
"0.50826824",
"0.50702626",
"0.5063043",
"0.5058474",
"0.50351983",
"0.5034521",
"0.50278145",
"0.5023068",
"0.50139284",
"0.50114256",
"0.4999791"
]
| 0.7248244 | 0 |
Matches each seller to a buyer with no unstable matches. Arguments | def get_matches(buyer_prefs, seller_prefs, buyer_counts):
buyer_prefs = copy.deepcopy(buyer_prefs)
seller_prefs = copy.deepcopy(seller_prefs)
buyer_matches = []
for i in range(len(buyer_prefs)):
buyer_matches.append([])
seller_matches = [None] * len(seller_prefs)
while None in seller_matches:
for buyer, buyer_pref in enumerate(buyer_prefs):
sellers_needed = buyer_counts[buyer] - len(buyer_matches[buyer])
for i in range(sellers_needed):
seller = buyer_pref.pop(0)
prev_buyer = seller_matches[seller]
if prev_buyer is None:
# If seller hasn't been matched yet, do it
seller_matches[seller] = buyer
buyer_matches[buyer].append(seller)
else:
# Seller has been matched... replace if new buyer preferred
seller_pref = seller_prefs[seller]
if seller_pref.index(buyer) < seller_pref.index(prev_buyer):
seller_matches[seller] = buyer
buyer_matches[prev_buyer].remove(seller)
buyer_matches[buyer].append(seller)
return seller_matches | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_stability(buyer_prefs, seller_prefs, seller_matches):\n\n for seller, buyer in enumerate(seller_matches):\n\n seller_pref = seller_prefs[seller]\n better_buyers = seller_pref[0:seller_pref.index(buyer)]\n\n for bb in better_buyers:\n matched_seller = seller_matches.index(bb)\n if buyer_prefs[bb].index(seller) < buyer_prefs[bb].index(matched_seller):\n # A buyer preferred by a seller over their match also prefers\n # the seller over their match\n return False\n\n return True",
"def get_candidates(beer):\n span = tracer.current_span()\n span.set_tags({'beer.name': beer.name, 'beer.hops': beer.hops})\n\n db = DonutStats.instance()\n\n # find our optimal sugar level Donuts above or below this level\n # will certainly not be a good match\n optimal_sugar_level = db.get_optimal_sugar_level(beer.hops)\n return db.get_by_sugar_level(optimal_sugar_level, limit=10)",
"def test_market_1_2(self):\n\n def check_1_2(buyers: List[float], sellers: List[float], expected_num_of_deals: int,\n expected_prices: List[float]):\n market = Market([\n AgentCategory(\"buyer\", buyers),\n AgentCategory(\"seller\", sellers),\n ])\n ps_recipe = [1, 2]\n self._check_market(market, ps_recipe, expected_num_of_deals, expected_prices)\n\n check_1_2(buyers=[9], sellers=[-4, -3],\n expected_num_of_deals=0, expected_prices=[9, -4.5])\n check_1_2(buyers=[9, 8, 7, 6], sellers=[-6, -5, -4, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n check_1_2(buyers=[9, 8], sellers=[-4, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n check_1_2(buyers=[9, 8], sellers=[-6, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n check_1_2(buyers=[9, 8], sellers=[-4, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n\n # PRICE CROSSES ZERO AT FIRST PHASE\n check_1_2(buyers=list(range(20)), sellers=[-3, -2, -1],\n expected_num_of_deals=1, expected_prices=[18, -9])",
"def match(self, aggressor_side):\n\n # print(\"Matching on the following book:\")\n # self.print()\n trades = []\n for bid_i in range(len(self.bid) - 1, -1, -1):\n bid = self.bid[bid_i]\n size_offer = len(self.offer)\n offer_i = 0\n while offer_i < size_offer:\n offer = self.offer[offer_i]\n (crossed, remaining_qty) = OrderBookUtils.cross(bid, offer)\n if crossed:\n trade = Trade(price=offer.price, qty=offer.qty, aggressor=aggressor_side)\n stop = False\n if remaining_qty >= 0:\n offer.qty = remaining_qty\n trade.qty = bid.qty\n del self.bid[bid_i]\n stop = True\n if remaining_qty <= 0:\n bid.qty = abs(remaining_qty)\n del self.offer[offer_i]\n size_offer -= 1\n else:\n offer_i += 1\n trades += [trade]\n if stop:\n break\n else:\n return trades\n return trades",
"def test_list_products_filtered_by_seller_name(self):\n self._require_login(self.user1)\n response = self.client.get('/api/1.0/products/?seller=testuser1')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data.__len__(), 1)\n self.assertEqual(response.data[0]['name'], 'Producto 1')\n self.assertEqual(response.data[0]['description'], 'Descripcion producto 1')",
"def test_get_order_items_buyer_info(self):\n pass",
"def test_market_1_1(self):\n\n def check_1_1(buyers: List[float], sellers: List[float], expected_num_of_deals: int,\n expected_prices: List[float]):\n market = Market([\n AgentCategory(\"buyer\", buyers),\n AgentCategory(\"seller\", sellers),\n ])\n ps_recipe = [1, 1]\n self._check_market(market, ps_recipe, expected_num_of_deals, expected_prices)\n\n check_1_1(buyers=[9], sellers=[-4],\n expected_num_of_deals=0, expected_prices=[None, None])\n check_1_1(buyers=[9, 8], sellers=[-4],\n expected_num_of_deals=0, expected_prices=[None, None])\n check_1_1(buyers=[9], sellers=[-4, -3],\n expected_num_of_deals=1, expected_prices=[4, -4])\n check_1_1(buyers=[9, 8], sellers=[-4, -3],\n expected_num_of_deals=1, expected_prices=[8, -8])\n\n # ALL POSITIVE VALUES:\n check_1_1(buyers=[4, 3], sellers=[9, 8],\n expected_num_of_deals=1, expected_prices=[3, -3])\n\n # ALL NEGATIVE VALUES:\n check_1_1(buyers=[-4, -3], sellers=[-9, -8],\n expected_num_of_deals=0, expected_prices=[None, None])\n\n # LARGER EXAMPLE\n check_1_1(buyers=[19, 17, 15, 13, 11, 9], sellers=[-12, -10, -8, -6, -4, -2],\n expected_num_of_deals=4, expected_prices=[11, -11])",
"def seller(self, seller):\n\n self._seller = seller",
"def Trading(Seller,Buyer):\n if Seller.has_sold == False:\n if Buyer.like_buy >= Seller.like_sell:\n Seller.has_sold = True\n Buyer.has_bought = True\n Seller.sold_objects += 1\n Buyer.bought_objects += 1\n print('A trade has been made')\n else:\n Buyer.has_bought = False\n Seller.has_sold = False\n print('There was no deal')\n else:\n Buyer.has_bought = False",
"def create_seller(self, order_items_with_sellers):\n seller = {}\n\n for item in order_items_with_sellers:\n\n item_seller = item.pop(\"seller\")\n\n seller['seller_uid'] = item_seller['Order Item Seller Uid']\n seller['seller_unique_code'] = item_seller['Order Item Seller Code']\n seller['seller_name'] = item_seller['Order Item Seller Name']\n seller['seller_company'] = item_seller['Order Item Seller Company']\n seller['seller_email'] = item_seller['Order Item Seller Email']\n\n item['seller'] = copy.deepcopy(seller)\n seller.clear()\n\n return order_items_with_sellers",
"def buyer(self, buyer):\n\n self._buyer = buyer",
"def seller(self):\n if \"seller\" in self._prop_dict:\n return self._prop_dict[\"seller\"]\n else:\n return None",
"def ReflectingSeller(Seller):\n increase_step = 0.01\n\n if Seller.has_sold == True:\n Seller.like_sell *= (1+increase_step)\n elif Seller.like_sell * (1-increase_step) <= Seller.min_value and Seller.has_sold == False:\n Seller.like_sell = Seller.min_value\n else: \n Seller.like_sell *= (1-increase_step)\n Seller.has_sold = False #return to normal state",
"def ReflectingBuyer(Buyer):\n increase_step = 0.01\n\n if Buyer.has_bought == True:\n Buyer.like_buy *= (1-increase_step)\n elif Buyer.like_buy * (1+increase_step) >= Buyer.max_value and Buyer.has_bought == False:\n Buyer.like_buy = Buyer.max_value\n else:\n Buyer.like_buy *= (1+increase_step)\n Buyer.has_bought = False #return to normal state",
"def test_filter_transaction_by_receivers_success(self):\n self._attempt_list_storage.gateway_transaction_exists.return_value = False\n self._map_storage.coin_address_exists.return_value = True\n self._attempt_list_storage.find_by_trigger.return_value = None\n transaction = Transaction(tx='723968', receivers=[self._gateway_managed_receiver])\n res = self._coin_transaction_consumer_impl.filter_transaction(transaction)\n self.assertTrue(res)\n self._map_storage.coin_address_exists.assert_called_once_with(self._gateway_managed_receiver.address)\n self._attempt_list_storage.find_by_trigger.assert_called_once_with(\n AttemptListTrigger(tx=transaction.tx, receiver=0, currency=\"coin\"))",
"def check_results(player_list, dealer):\n dealer_score = dealer.get_score()\n dealer_hand = dealer.get_hand()\n blackjack_winners = []\n winners = []\n losers = []\n pushers = []\n dealer_plays = True\n if dealer_score > 21:\n dealer_plays = False\n for player in player_list:\n player_score = player.get_score()\n player_hand = player.get_hand()\n if dealer_plays and check_blackjack(dealer_score, dealer_hand):\n if check_blackjack(player_score, player_hand):\n pushers.append(player)\n else:\n losers.append(player)\n elif dealer_plays:\n if player_score > dealer_score and not(player.check_bust()):\n if check_blackjack(player_score, player_hand):\n blackjack_winners.append(player)\n else:\n winners.append(player)\n elif player_score == dealer_score:\n pushers.append(player)\n else:\n losers.append(player)\n else:\n if check_blackjack(player_score, player.get_hand()):\n blackjack_winners.append(player)\n break\n elif player_score <= 21:\n winners.append(player)\n else:\n losers.append(player)\n return winners, losers, pushers, blackjack_winners",
"def test_market_1_0_1(self):\n\n def check_1_0_1(buyers: List[float], mediators: List[float], sellers: List[float],\n expected_num_of_deals: int, expected_prices: List[float]):\n market = Market([\n AgentCategory(\"buyer\", buyers),\n AgentCategory(\"mediator\", mediators),\n AgentCategory(\"seller\", sellers),\n ])\n ps_recipe = [1, 0, 1]\n self._check_market(market, ps_recipe, expected_num_of_deals, expected_prices)\n\n check_1_0_1(buyers=[9, 8], mediators=[-5, -7], sellers=[-4, -3],\n expected_num_of_deals=1, expected_prices=[8, None, -8])",
"def prune_losers(self):\n self.log.debug(\"PRUNE LOSERS\")\n # check to see if people i followed follow me back\n cutoff_time = (datetime.now()\n - timedelta(hours=self.reciprocation_window))\n ingrates = Target.objects.filter(\n hunter=self.user, status=Target.PURGATORY,\n modified__lt=cutoff_time) # They didn't follow back in time\n\n for ingrate in ingrates:\n ingrate.status = Target.INGRATE\n ingrate.save()\n self.log.debug(\" => Unfollowed %s\" % ingrate.hunted.screen_name)\n try:\n self.api.destroy_friendship(ingrate.hunted)\n except Exception, e:\n print e\n return\n finally:\n pass\n #self.contact(ingrate)",
"def rfilter_by_binding_sites(r_primers, allele1, allele2, nontargets):\n\n # This is the list of the rprimers to return.\n candidates = []\n\n for primer in r_primers:\n # The primer should have at most 1 binding site in each allele.\n if (len(binding_sites((allele1,), primer)) > 1\n or len(binding_sites((allele2,), primer)) > 1):\n continue\n\n # Move on if the primer has other potential binding sites in\n # nontarget sequences.\n\n if binding_sites(nontargets, primer, stop=1):\n continue\n\n if binding_sites(nontargets, primer.rev_comp(), stop=1):\n continue\n\n # If it made it all the way down here, the primer's binding sites\n # are acceptable.\n candidates.append(primer)\n\n return candidates",
"def test_get_order_buyer_info(self):\n pass",
"def available_keywords_of(self, items, donor, exclude_recipients=None,\n exclude_keywords=None):\n pk_map = OrderedDict((item.pk, item) for item in items)\n\n if not pk_map:\n return []\n\n ct = ContentType.objects.get_for_model(items[0])\n\n # Remove objects that link to the donor themselves to avoid\n # short-circiuting\n donor_ct = ContentType.objects.get_for_model(donor)\n reverse_donors = (Link.objects\n .filter(keyword__content_type=donor_ct,\n keyword__object_id=donor.pk,\n content_type=ct)\n .values_list('object_id', flat=True))\n for reverse_pk in reverse_donors:\n pk_map.pop(reverse_pk, None)\n\n # Make sure the donor won't link to itself\n if models_of_same_type(donor, items[0]):\n pk_map.pop(donor.pk, None)\n\n if exclude_recipients and models_of_same_type(exclude_recipients[0], items[0]): # NOQA\n for item in exclude_recipients:\n pk_map.pop(item.pk, None)\n\n # Select keywords that haven't been used up.\n number_field = models.F('weight') - models.F('links__count')\n keywords = (self\n .filter(content_type=ct, object_id__in=pk_map)\n .annotate(models.Count('links'))\n .annotate(number=number_field)\n .filter(number__gt=0)\n .order_by('object_id', '-number'))\n\n if exclude_keywords:\n keywords = keywords.exclude(\n pk__in=[x.pk for x in exclude_keywords])\n\n unique_by_object = {}\n for keyword in keywords:\n # Fill back the object in the keyword to avoid unnecessary queries\n keyword.content_object = pk_map[keyword.object_id]\n unique_by_object.setdefault(keyword.object_id, keyword)\n\n return unique_by_object.values()",
"def test_get_additional_seller_inputs(self):\n pass",
"def dealer_matching(self):\n if len([card for card in self.dealer_hand if card[1] == '8']) > 0:\n self.discard_pile = [card for card in self.dealer_hand if card[1] == '8'][0]\n self.dealer_hand.remove(self.discard_pile)\n dealer_suits = [card[0] for card in self.dealer_hand]\n self.new_suit = max(set(dealer_suits), key=dealer_suits.count)\n print(\"\\nNew suit is :\", self.new_suit)\n return 1\n if self.new_suit != '':\n matching = []\n for card in self.dealer_hand:\n if card[0] == self.new_suit:\n matching.append(card)\n if len(matching) > 0:\n matching_values = list(map(self.card_value, matching))\n self.discard_pile = matching[matching_values.index(max(matching_values))]\n self.dealer_hand.remove(self.discard_pile)\n self.new_suit = ''\n return 1\n else:\n return 0\n if self.new_suit == '':\n matching = []\n for card in self.dealer_hand:\n if card[0] == self.discard_pile[0] or card[1] == self.discard_pile[1]:\n matching.append(card)\n if len(matching) > 0:\n matching_values = list(map(self.card_value, matching))\n self.discard_pile = matching[matching_values.index(max(matching_values))]\n self.dealer_hand.remove(self.discard_pile)\n return 1\n else:\n return 0",
"def test_single_identical_bid(self):\n bids = [Cost(ITEM1, ACTOR1, 1640),\n Cost(ITEM2, ACTOR1, 1540),\n Cost(ITEM3, ACTOR1, 1140),\n Cost(ITEM4, ACTOR1, 1640),\n Cost(ITEM5, ACTOR1, 1740),\n Cost(ITEM1, ACTOR2, 1540),\n Cost(ITEM2, ACTOR2, 1240),\n Cost(ITEM3, ACTOR2, 1340),\n Cost(ITEM4, ACTOR2, 1640),\n Cost(ITEM5, ACTOR2, 1940),\n Cost(ITEM1, ACTOR3, 1740),\n Cost(ITEM2, ACTOR3, 1540),\n Cost(ITEM3, ACTOR3, 1040),\n Cost(ITEM4, ACTOR3, 1640),\n Cost(ITEM5, ACTOR3, 1740),\n Cost(ITEM1, ACTOR4, 1440),\n Cost(ITEM2, ACTOR4, 1640),\n Cost(ITEM3, ACTOR4, 1240),\n Cost(ITEM4, ACTOR4, 1640),\n Cost(ITEM5, ACTOR4, 1740),\n Cost(ITEM1, ACTOR5, 1640),\n Cost(ITEM2, ACTOR5, 1640),\n Cost(ITEM3, ACTOR5, 1140),\n Cost(ITEM4, ACTOR5, 1640),\n Cost(ITEM5, ACTOR5, 1640)]\n result = self.splitter.split(ITEMS[:5], ACTORS[:5], bids)\n expected = [(ITEM1, ACTOR3, None),\n (ITEM2, ACTOR5, None),\n (ITEM3, ACTOR4, None),\n (ITEM4, ACTOR1, None),\n (ITEM5, ACTOR2, None)]\n item_assignments_present(self, result, expected)",
"def test_obvious_auction(self):\n bids = [Cost(ITEM1, ACTOR1, 1000),\n Cost(ITEM2, ACTOR1, 1000),\n Cost(ITEM3, ACTOR1, 1000),\n Cost(ITEM4, ACTOR1, 5000),\n\n Cost(ITEM1, ACTOR2, 1000),\n Cost(ITEM2, ACTOR2, 1000),\n Cost(ITEM3, ACTOR2, 5000),\n Cost(ITEM4, ACTOR2, 1000),\n\n Cost(ITEM1, ACTOR3, 1000),\n Cost(ITEM2, ACTOR3, 5000),\n Cost(ITEM3, ACTOR3, 1000),\n Cost(ITEM4, ACTOR3, 1000),\n\n Cost(ITEM1, ACTOR4, 5000),\n Cost(ITEM2, ACTOR4, 1000),\n Cost(ITEM3, ACTOR4, 1000),\n Cost(ITEM4, ACTOR4, 1000)]\n\n result = self.splitter.split(ITEMS[:4], ACTORS[:4], bids)\n expected = [(ITEM1, ACTOR4, None),\n (ITEM2, ACTOR3, None),\n (ITEM3, ACTOR2, None),\n (ITEM4, ACTOR1, None)]\n item_assignments_present(self, result, expected)",
"def test_get_small_and_light_eligibility_by_seller_sku(self):\n pass",
"def generate_matched_orders(self, new_action, matched_queries):\n if self.sell_list and self.buy_list:\n break_flag = False\n if new_action == \"buy\":\n # for a new buy order, multipleq ueries from sell list are\n # matched as long as formula holds good\n max_buy_order = self.buy_list[-1]\n completed_sell_orders = 0\n for sell_order in self.sell_list:\n buy_qty = max_buy_order.order_qty\n if sell_order.stock_value <= max_buy_order.stock_value:\n sell_qty = sell_order.order_qty\n if buy_qty > sell_qty:\n completed_sell_orders += 1\n max_buy_order.order_qty = buy_qty - sell_qty\n matched_qty = sell_qty\n elif sell_qty == buy_qty:\n self.buy_list.pop()\n self.sell_list = self.sell_list[1:]\n matched_qty = sell_qty\n break_flag = True\n else:\n self.buy_list.pop()\n sell_order.order_qty = sell_qty - buy_qty\n matched_qty = buy_qty\n break_flag = True\n matched_queries.append(\n \"%s %s %s %s\" % (sell_order.order_id,\n matched_qty,\n sell_order.stock_value,\n max_buy_order.order_id))\n else:\n break_flag = True\n if break_flag:\n break\n if completed_sell_orders:\n self.sell_list = self.sell_list[completed_sell_orders:]\n else:\n min_sell_order = self.sell_list[0]\n completed_buy_orders = 0\n # for a new sell order, multiple queries from buy list are\n # matched as long as formula holds good\n for index in range(len(self.buy_list)-1, -1, -1):\n break_flag = False\n buy_order = self.buy_list[index]\n sell_qty = min_sell_order.order_qty\n if min_sell_order.stock_value <= buy_order.stock_value:\n buy_qty = buy_order.order_qty\n if buy_qty > sell_qty:\n buy_order.order_qty = buy_qty - sell_qty\n self.sell_list = self.sell_list[1:]\n matched_qty = sell_qty\n break_flag = True\n elif buy_qty == sell_qty:\n self.buy_list.pop()\n self.sell_list = self.sell_list[1:]\n matched_qty = sell_qty\n break_flag = True\n else:\n completed_buy_orders -= 1\n min_sell_order.order_qty = sell_qty - buy_qty\n matched_qty = buy_qty\n matched_queries.append(\n \"%s %s %s %s\" % (min_sell_order.order_id,\n matched_qty,\n min_sell_order.stock_value,\n buy_order.order_id))\n else:\n break_flag = True\n if break_flag:\n break\n if completed_buy_orders:\n self.buy_list = self.buy_list[:completed_buy_orders]",
"def check_accepted_bid(self, username):\n good_bids = AuctionBids.objects.filter(good=self, user__username=username, accepted_by_seller=True)\n if good_bids.exists():\n return True\n return False",
"def evaluate_winners_and_losers(future_price):\n\n winners = []\n losers = []\n\n target_price = future_price.target_price\n try:\n actual_price_obj = Bitcoin_Price.objects.get(time=future_price.time_to_match_price)\n except:\n return # there is no bitcoin price for this time so this future_price cannot be evaluated\n actual_price = actual_price_obj.price\n price_is_less_than_target = actual_price < target_price\n price_is_equal_to_target = target_price == actual_price\n\n amounts = Received_Amount.objects.filter(\n amount__gt=0,\n prediction__future_price=future_price,\n time__lt=future_price.time_window_closes\n ).order_by('time', 'id')\n\n # Split into winners and losers\n for received_amount in amounts:\n guessed_correctly = (received_amount.prediction.price_will_be_less_than_target and price_is_less_than_target) or \\\n (not received_amount.prediction.price_will_be_less_than_target and not price_is_less_than_target)\n if guessed_correctly:\n # This is a winner\n returned_amount = {\n \"amount\": received_amount.amount,\n \"from_received_amount\": received_amount,\n \"to_prediction\": received_amount.prediction,\n }\n returned_amount_obj = Returned_Amount(**returned_amount)\n returned_amount_obj.save()\n winners.append({\n \"received_amount\": received_amount,\n \"from_losers\": 0\n })\n elif price_is_equal_to_target:\n # Eligible for refund but not for winnings\n # TODO: If the received amount is not confirmed, it will still be\n # returned\n returned_amount = {\n \"amount\": received_amount.amount,\n \"from_received_amount\": received_amount,\n \"to_prediction\": received_amount.prediction,\n }\n returned_amount_obj = Returned_Amount(**returned_amount)\n returned_amount_obj.save()\n else:\n # Record this so in the next step this can be allocated to winners\n losers.append({\n \"received_amount\": received_amount,\n \"to_winners\": 0,\n \"commission\": 0\n })\n\n for loser in losers:\n # Pay the winners\n for winner in winners:\n loser_funds_remaining = loser[\"received_amount\"].amount - loser[\"to_winners\"] - loser[\"commission\"]\n loser_is_broke = loser_funds_remaining == 0\n if loser_is_broke:\n break\n winner_received_from_losers = winner[\"from_losers\"]\n winner_total_owed_from_losers = winner[\"received_amount\"].amount * (1-COMMISSION)\n amount_remaining_to_pay_winner = winner_total_owed_from_losers - winner_received_from_losers\n if amount_remaining_to_pay_winner > 0:\n amount_to_pay_winner = min(amount_remaining_to_pay_winner, loser_funds_remaining * (1-COMMISSION))\n commission = amount_to_pay_winner / (1-COMMISSION) * COMMISSION\n loser[\"to_winners\"] = loser[\"to_winners\"] + amount_to_pay_winner\n loser[\"commission\"] = loser[\"commission\"] + commission\n winner[\"from_losers\"] = winner[\"from_losers\"] + amount_to_pay_winner\n returned_amount = {\n \"amount\": amount_to_pay_winner,\n \"from_received_amount\": loser[\"received_amount\"],\n \"to_prediction\": winner[\"received_amount\"].prediction,\n }\n returned_amount_obj = Returned_Amount(**returned_amount)\n returned_amount_obj.save()\n\n commission_amount = {\n \"returned_amount\": returned_amount_obj,\n \"amount\": commission\n }\n commission_amount_obj = Commission_Amount(**commission_amount)\n commission_amount_obj.save()\n # Return any amount remaining after all the winners are paid\n loser_funds_remaining = loser[\"received_amount\"].amount - loser[\"to_winners\"] - loser[\"commission\"]\n if loser_funds_remaining > 0:\n returned_amount = {\n \"amount\": loser_funds_remaining,\n \"from_received_amount\": loser[\"received_amount\"],\n \"to_prediction\": loser[\"received_amount\"].prediction,\n }\n returned_amount_obj = Returned_Amount(**returned_amount)\n returned_amount_obj.save()",
"def buy_beers(self):\n if self.location == \"Shop\":\n response = input(\"How many beers do you want to buy?\")\n while response not in [\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",'all of them']:\n response = input(\"Please specify the number of beers\")\n if response == 'all of them':\n # little cheat\n self.beers += 10\n else:\n money = self.money - int(response)\n if money >= 0:\n self.beers += int(response)\n self.money = money\n else:\n print(\"You idiot don't have enough money for that many beers!\")\n else:\n print('Your are not at the Shop and the monkey might pee on you but you can\\' find beer here.')"
]
| [
"0.61104155",
"0.55909395",
"0.55698174",
"0.54890186",
"0.54622793",
"0.5429476",
"0.53873",
"0.52969694",
"0.5285045",
"0.5226048",
"0.51746196",
"0.51704836",
"0.5148838",
"0.5141742",
"0.5132229",
"0.51314723",
"0.5095579",
"0.5074115",
"0.50718343",
"0.5070093",
"0.50691104",
"0.50662524",
"0.5011699",
"0.4988893",
"0.49754003",
"0.49688166",
"0.4965097",
"0.49553716",
"0.4948838",
"0.49102306"
]
| 0.67571694 | 0 |
Check the stability of a list of matches. Stability is defined as a matching where no buyer prefers a seller they are not matched to who also prefers said buyer. Arguments | def check_stability(buyer_prefs, seller_prefs, seller_matches):
for seller, buyer in enumerate(seller_matches):
seller_pref = seller_prefs[seller]
better_buyers = seller_pref[0:seller_pref.index(buyer)]
for bb in better_buyers:
matched_seller = seller_matches.index(bb)
if buyer_prefs[bb].index(seller) < buyer_prefs[bb].index(matched_seller):
# A buyer preferred by a seller over their match also prefers
# the seller over their match
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_check_stability():\n\n residents = [Player(\"A\", [1, 2]), Player(\"B\", [2]), Player(\"C\", [2, 1])]\n hospitals = [Player(1, [\"C\", \"A\"], 3), Player(2, [\"A\", \"B\", \"C\"], 3)]\n match = HospitalResident(residents, hospitals)\n\n matching = match.solve()\n assert match.check_stability()\n\n (P_A, P_B, P_C), (P_1, P_2) = match.suitors, match.reviewers\n matching[P_1] = [P_C]\n matching[P_2] = [P_A, P_B]\n\n assert not match.check_stability()",
"def check_stability(self):\n\n blocking_pairs = []\n for resident in self.residents:\n for hospital in self.hospitals:\n if (\n _check_mutual_preference(resident, hospital)\n and _check_resident_unhappy(resident, hospital)\n and _check_hospital_unhappy(resident, hospital)\n ):\n blocking_pairs.append((resident, hospital))\n\n self.blocking_pairs = blocking_pairs\n return not any(blocking_pairs)",
"def test_check_stability():\n\n students = [Student(\"A\"), Student(\"B\"), Student(\"C\")]\n projects = [Project(\"P\", 2), Project(\"Q\", 2)]\n supervisors = [Supervisor(\"X\", 2), Supervisor(\"Y\", 2)]\n\n (a, b, c), (p, q), (x, y) = students, projects, supervisors\n\n p.set_supervisor(x)\n q.set_supervisor(y)\n\n a.set_prefs([p, q])\n b.set_prefs([q])\n c.set_prefs([q, p])\n\n x.set_prefs([c, a])\n y.set_prefs([a, b, c])\n\n game = StudentAllocation(students, projects, supervisors)\n\n matching = game.solve()\n assert game.check_stability()\n\n (a, b, c), (p, q) = game.students, game.projects\n\n matching[p] = [c]\n matching[q] = [a, b]\n\n assert not game.check_stability()",
"def test_check_stability():\n\n residents = [Resident(\"A\"), Resident(\"B\"), Resident(\"C\")]\n hospitals = [Hospital(\"X\", 2), Hospital(\"Y\", 2)]\n\n (a, b, c), (x, y) = residents, hospitals\n\n a.set_prefs([x, y])\n b.set_prefs([y])\n c.set_prefs([y, x])\n\n x.set_prefs([c, a])\n y.set_prefs([a, b, c])\n\n game = HospitalResident(residents, hospitals)\n\n matching = game.solve()\n assert game.check_stability()\n\n (a, b, c), (x, y) = game.residents, game.hospitals\n matching[x] = [c]\n matching[y] = [a, b]\n\n assert not game.check_stability()",
"def check_stability(self):",
"def test_stability(self):\n dir_urel = np.array([1, 0, 0])\n dir_chord = np.array([1, 0, 0])\n\n alpha = 4 * np.pi / 180\n\n # rotate the freestream\n dir_urel = algebra.rotation3d_y(alpha).T.dot(dir_urel)\n\n # Test free stream rotation\n lab = ['x', 'z']\n with self.subTest(msg='Freestream test'):\n assert dir_urel[2] == np.sin(alpha), 'z component of freestream not properly rotated'\n with self.subTest(msg='Freestream test'):\n assert dir_urel[0] == np.cos(alpha), 'x component of freestream not properly rotated'\n\n # Stability axes\n c_bs = local_stability_axes(dir_urel, dir_chord)\n\n # Checking X_s\n for ax in [0, 2]:\n with self.subTest(msg='X_s', ax=ax):\n assert c_bs.dot(np.eye(3)[0])[ax] > 0, f'{ax}_b component of X_s not correct'\n\n # Checking Z_s\n ax = 0\n with self.subTest(msg='Z_s', ax=ax):\n assert c_bs.dot(np.eye(3)[2])[ax] < 0, f'{ax}_b component of Z_s not correct'\n\n ax = 2\n with self.subTest(msg='Z_s', ax=ax):\n assert c_bs.dot(np.eye(3)[2])[ax] > 0, f'{ax}_b component of Z_s not correct'",
"def stealability(self):\n stealability_score = float(self.price) / float(self.weight)\n print (stealability_score)\n\n if stealability_score < 0.5:\n return 'Not so stealable...'\n elif stealability_score >= 0.5 and stealability_score < 1.0:\n return 'Kinda stealable.'\n else:\n return 'Very stealable!'",
"def _validateSpies(self, config, team, sabotaged):\r\n spies = [s for s in team if s in self.getSpies(config)]\r\n \"\"\"If there are more spies in our config than the number of sabotages made \r\n then return True, because this config is compatible with the sabotages made. \r\n Otherwise it is not compatible, so return False.\"\"\"\r\n return len(spies) >= sabotaged",
"def stability(self, board):\n # Stable stones\n computer_board = self.get_stable_stones(board, self.computer_num)\n computer_stable = sum(sum(computer_board == 100))\n opponent_board = self.get_stable_stones(board, self.opponent_num)\n opponent_stable = sum(sum(opponent_board == 100))\n\n # Unstable stones are the ones which can be flanked in the next move\n computer_board = self.get_unstable_stones(board, self.opponent_color, self.computer_num,\n self.opponent_num, computer_board)\n computer_unstable = sum(sum(computer_board == 200))\n opponent_board = self.get_unstable_stones(board, self.computer_color, self.opponent_num,\n self.computer_num, opponent_board)\n opponent_unstable = sum(sum(opponent_board == 200))\n # the reset is semi stable with weight 0, so it is not important\n computer_stability = computer_stable - computer_unstable\n opponent_stability = opponent_stable - opponent_unstable\n\n if computer_stable + opponent_stable != 0:\n return 100 * (computer_stable - opponent_stable) / (computer_stable + opponent_stable)\n else:\n return 0",
"async def test_one_warning_with_multiple_severities(self):\n self.vulnerabilities_json[\"vulnerabilities\"].extend(\n [\n {\n \"id\": \"SNYK-JS-AJV-584908\",\n \"severity\": \"medium\",\n \"from\": [*self.direct_dependency_path, \"[email protected]\", \"[email protected]\"],\n },\n {\n \"id\": \"SNYK-JS-AJV-584908\",\n \"severity\": \"low\",\n \"title\": \"Prototype Pollution\",\n \"from\": [\n *self.direct_dependency_path,\n \"[email protected]\",\n \"[email protected]\",\n \"[email protected]\",\n ],\n },\n ],\n )\n expected_entities = [\n {\n \"key\": self.direct_dependency_key,\n \"dependency\": self.direct_dependency,\n \"nr_vulnerabilities\": 3,\n \"example_vulnerability\": \"SNYK-JS-AJV-584908\",\n \"url\": \"https://snyk.io/vuln/SNYK-JS-AJV-584908\",\n \"example_path\": \"package.json@* ➜ [email protected] ➜ [email protected] ➜ \"\n \"[email protected] ➜ [email protected]\",\n \"highest_severity\": \"medium\",\n },\n ]\n response = await self.collect(get_request_json_return_value=self.vulnerabilities_json)\n self.assert_measurement(response, value=\"1\", entities=expected_entities)",
"def check_results(player_list, dealer):\n dealer_score = dealer.get_score()\n dealer_hand = dealer.get_hand()\n blackjack_winners = []\n winners = []\n losers = []\n pushers = []\n dealer_plays = True\n if dealer_score > 21:\n dealer_plays = False\n for player in player_list:\n player_score = player.get_score()\n player_hand = player.get_hand()\n if dealer_plays and check_blackjack(dealer_score, dealer_hand):\n if check_blackjack(player_score, player_hand):\n pushers.append(player)\n else:\n losers.append(player)\n elif dealer_plays:\n if player_score > dealer_score and not(player.check_bust()):\n if check_blackjack(player_score, player_hand):\n blackjack_winners.append(player)\n else:\n winners.append(player)\n elif player_score == dealer_score:\n pushers.append(player)\n else:\n losers.append(player)\n else:\n if check_blackjack(player_score, player.get_hand()):\n blackjack_winners.append(player)\n break\n elif player_score <= 21:\n winners.append(player)\n else:\n losers.append(player)\n return winners, losers, pushers, blackjack_winners",
"def test_market_1_2(self):\n\n def check_1_2(buyers: List[float], sellers: List[float], expected_num_of_deals: int,\n expected_prices: List[float]):\n market = Market([\n AgentCategory(\"buyer\", buyers),\n AgentCategory(\"seller\", sellers),\n ])\n ps_recipe = [1, 2]\n self._check_market(market, ps_recipe, expected_num_of_deals, expected_prices)\n\n check_1_2(buyers=[9], sellers=[-4, -3],\n expected_num_of_deals=0, expected_prices=[9, -4.5])\n check_1_2(buyers=[9, 8, 7, 6], sellers=[-6, -5, -4, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n check_1_2(buyers=[9, 8], sellers=[-4, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n check_1_2(buyers=[9, 8], sellers=[-6, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n check_1_2(buyers=[9, 8], sellers=[-4, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n\n # PRICE CROSSES ZERO AT FIRST PHASE\n check_1_2(buyers=list(range(20)), sellers=[-3, -2, -1],\n expected_num_of_deals=1, expected_prices=[18, -9])",
"def evaluate_sce_list(sce_list, strand_state_list, breaks):\n best_mismatch_distance = None\n best_ground_state = None\n best_is_valid = None\n best_sce_list = None\n for w_ground_state, c_ground_state in [(2, 0), (1, 1), (0, 2)]:\n w_state, c_state = w_ground_state, c_ground_state\n mismatch_distance = 0\n valid = True\n for i in range(len(breaks) - 1):\n start = breaks[i]\n end = breaks[i + 1]\n w_actual_state, c_actual_state = strand_state_list[i]\n for sce_pos, w_state_diff, c_state_diff in sce_list:\n if sce_pos == start:\n w_state += w_state_diff\n c_state += c_state_diff\n # Test whether this sequence of SCEs has led to an impossible ground state\n # (at least under the assumption that the cell is diploid).\n if (w_state < 0) or (c_state < 0):\n valid = False\n if (w_actual_state, c_actual_state) != (w_state, c_state):\n mismatch_distance += end - start\n if (best_mismatch_distance is None) or ((valid, -mismatch_distance) > (best_is_valid, -best_mismatch_distance)):\n best_is_valid = valid\n best_mismatch_distance = mismatch_distance\n best_ground_state = (w_ground_state, c_ground_state)\n best_sce_list = copy.copy(sce_list)\n return best_is_valid, best_ground_state, best_mismatch_distance",
"def test_stochatreat_only_misfits(probs):\n N = 1_000\n df = pd.DataFrame(\n data={\n \"id\": np.arange(N),\n \"block\": np.arange(N),\n }\n )\n treats = stochatreat(\n data=df,\n block_cols=[\"block\"],\n treats=len(probs),\n idx_col=\"id\",\n probs=probs,\n random_state=42,\n )\n treatment_shares = treats.groupby([\"treat\"])[\"id\"].count() / treats.shape[0]\n\n np.testing.assert_almost_equal(treatment_shares, np.array(probs), decimal=3)",
"def compute_stability_scores(self):\n self.mutations, self.scores, self.matrix = stability(\n self.seq,\n alphabet='ACGU',\n fold_vectorize=self.fold_vectorize)",
"def effectiveness_of_a_pair(list_of_historic_matches):\n was_late_list = []\n was_ht_late_list = []\n was_ft_late_list = []\n late_matches = 0\n ht_late_matches = 0\n ft_late_matches = 0\n if len(list_of_historic_matches) >= 5:\n for match in list_of_historic_matches:\n all_late = check_if_late_goal(match.match_goals_minutes)\n was_late_list.append(all_late[0])\n was_ht_late_list.append(all_late[1])\n was_ft_late_list.append(all_late[2])\n total_matches = len(list_of_historic_matches)\n for j in range(len(was_late_list)):\n if was_late_list[j]:\n late_matches += 1\n effectiveness = f\"{late_matches}/{total_matches}\"\n for j in range(len(was_ht_late_list)):\n if was_ht_late_list[j]:\n ht_late_matches += 1\n ht_effectiveness = f\"{ht_late_matches}/{late_matches}\"\n for j in range(len(was_ft_late_list)):\n if was_ft_late_list[j]:\n ft_late_matches += 1\n ft_effectiveness = f\"{ft_late_matches}/{late_matches}\"\n else:\n return [\"0/0\", \"0/0\", \"0/0\"]\n return [effectiveness, ht_effectiveness, ft_effectiveness]",
"def test_market_1_1(self):\n\n def check_1_1(buyers: List[float], sellers: List[float], expected_num_of_deals: int,\n expected_prices: List[float]):\n market = Market([\n AgentCategory(\"buyer\", buyers),\n AgentCategory(\"seller\", sellers),\n ])\n ps_recipe = [1, 1]\n self._check_market(market, ps_recipe, expected_num_of_deals, expected_prices)\n\n check_1_1(buyers=[9], sellers=[-4],\n expected_num_of_deals=0, expected_prices=[None, None])\n check_1_1(buyers=[9, 8], sellers=[-4],\n expected_num_of_deals=0, expected_prices=[None, None])\n check_1_1(buyers=[9], sellers=[-4, -3],\n expected_num_of_deals=1, expected_prices=[4, -4])\n check_1_1(buyers=[9, 8], sellers=[-4, -3],\n expected_num_of_deals=1, expected_prices=[8, -8])\n\n # ALL POSITIVE VALUES:\n check_1_1(buyers=[4, 3], sellers=[9, 8],\n expected_num_of_deals=1, expected_prices=[3, -3])\n\n # ALL NEGATIVE VALUES:\n check_1_1(buyers=[-4, -3], sellers=[-9, -8],\n expected_num_of_deals=0, expected_prices=[None, None])\n\n # LARGER EXAMPLE\n check_1_1(buyers=[19, 17, 15, 13, 11, 9], sellers=[-12, -10, -8, -6, -4, -2],\n expected_num_of_deals=4, expected_prices=[11, -11])",
"async def test_random_redgame(players, strats, _):\n game = gamegen.samplegame(players, strats)\n rest = game.random_restriction()\n sched = gamesched.samplegamesched(game)\n sgame = schedgame.schedgame(sched)\n\n devgame1 = await sgame.get_deviation_game(rest)\n prof = devgame1.profiles()[\n np.all((devgame1.profiles() == 0) | ~np.isnan(devgame1.payoffs()), 1).nonzero()[\n 0\n ][0]\n ]\n assert prof in devgame1\n assert (\n devgame1.num_complete_profiles\n <= devgame1.num_profiles\n <= devgame1.num_all_profiles\n )\n\n devgame2 = await sgame.get_deviation_game(rest)\n assert hash(devgame1) == hash(devgame2)\n assert devgame1 == devgame2\n assert devgame1 + devgame2 == devgame2 + devgame1\n assert np.allclose(devgame1.get_payoffs(prof), devgame2.get_payoffs(prof))\n\n rrest = devgame1.random_restriction()\n assert devgame1.restrict(rrest) == devgame2.restrict(rrest)",
"def multiple_catalog_match(self, filter, cattype, matchlist):\n self.logger.warning(\"WARNING: multiple {} catalogs matched! Using the first.\".format(cattype))\n self.logger.warning(\"Observation filter: {}\".format(filter))\n self.logger.warning(\"Matched point source catalogs: {}\".format(matchlist))",
"def hit_or_stand(self, final_sums):\n if final_sums is None:\n final_sums = self.sum_cards()\n if final_sums[0] > 21: # should not happen\n print(\"BUST!\")\n return \"S\"\n highest_smaller_21 = final_sums[0]\n index = 1\n while index < len(final_sums) and final_sums[index] <= 21 :\n highest_smaller_21 = final_sums[index]\n index += 1\n index -= 1 # index of chosen sum in final_sums\n if highest_smaller_21 < 17 or (highest_smaller_21 == 17 and index >= 1): # smaller than 17 or soft 17\n return \"H\"\n else:\n return \"S\"",
"def inventory_report(prod_list):\n prod_list = list(set(prod_list))\n x = 0\n price = 0\n weight = 0\n flammability = 0\n stealability = 0\n for item in prod_list:\n x += 1\n price += item.price\n weight += item.weight\n flammability += item.flammability\n if stealability != 'Not so stealable...':\n stealability += 1\n\n avg_price = price / x\n avg_weight = weight / x\n avg_flammability = flammability / x\n print(f'There are {x} unique products in this list. The average price is {avg_price}, '\n f'average weight is {avg_weight},'\n f'and the average flammability is {avg_flammability}.')\n if stealability >= len(prod_list) / 2:\n print('Many of these items are highly stealable!')\n return avg_price, avg_weight, avg_flammability",
"def check_reward_volume_set(data, **_):\n metric = data[\"rewardVolume\"]\n passed = 0 < len(set(metric)) <= 2 and 0. in metric\n return metric, passed",
"def test_filtering_plans_by_metal_level_matches_only_silver(self):\n\n silver_plan_inputs = [\n {\n 'plan_id': '05276NA2900195',\n 'state': 'MI',\n 'metal_level': 'Silver',\n 'rate': '283.39',\n 'rate_area': '1'\n },\n {\n 'plan_id': '05276NA2900195',\n 'state': 'MI',\n 'metal_level': 'silver',\n 'rate': '283.39',\n 'rate_area': '1'\n }\n ]\n\n non_silver_plan_inputs = [\n {\n 'plan_id': '68493CI1477769',\n 'state': 'SC',\n 'metal_level': 'Bronze',\n 'rate': '214.57',\n 'rate_area': '21'\n },\n {\n 'plan_id': '09812TP4606635',\n 'state': 'NV',\n 'metal_level': 'Platinum',\n 'rate': '331.363599',\n 'rate_area': '1'\n },\n {\n 'plan_id': '11698OD6718414',\n 'state': 'SC',\n 'metal_level': 'Gold',\n 'rate': '269.54',\n 'rate_area': '8'\n },\n {\n 'plan_id': '70547DK6596753',\n 'state': 'FL',\n 'metal_level': 'Catastrophic',\n 'rate': '241.1',\n 'rate_area': '57'\n }\n ]\n\n for silver_plan in silver_plan_inputs:\n result = filter_plan_metal_level(silver_plan, DESIRED_METAL_LEVEL)\n self.assertEqual(True, result)\n\n for non_silver_plan in non_silver_plan_inputs:\n result = filter_plan_metal_level(\n non_silver_plan,\n DESIRED_METAL_LEVEL\n )\n self.assertEqual(False, result)",
"def assess_risk(black_marbles: list, white_marbles: list):\n danger = Evaluator.danger_positions\n black_risk = 0\n white_risk = 0\n for black_marble in black_marbles:\n letter, num = Board.convert_to_nums(black_marble)\n left = Board.convert_to_string(letter, num - 1)\n right = Board.convert_to_string(letter, num + 1)\n upL = Board.convert_to_string(letter + 1, num)\n upR = Board.convert_to_string(letter + 1, num + 1)\n downL = Board.convert_to_string(letter - 1, num - 1)\n downR = Board.convert_to_string(letter - 1, num)\n test_spots = [left, right, upL, upR, downL, downR]\n if black_marble in danger:\n black_risk += 2\n for spot in test_spots:\n if spot in white_marbles:\n black_risk += 5\n\n for white_marble in white_marbles:\n letter, num = Board.convert_to_nums(white_marble)\n left = Board.convert_to_string(letter, num - 1)\n right = Board.convert_to_string(letter, num + 1)\n upL = Board.convert_to_string(letter + 1, num)\n upR = Board.convert_to_string(letter + 1, num + 1)\n downL = Board.convert_to_string(letter - 1, num - 1)\n downR = Board.convert_to_string(letter - 1, num)\n test_spots = [left, right, upL, upR, downL, downR]\n if white_marble in danger:\n white_risk += 2\n for spot in test_spots:\n if spot in black_marbles:\n white_risk += 5\n return black_risk, white_risk",
"async def test_random_complete_dev(players, strats, _):\n game = gamegen.samplegame(players, strats)\n sched = gamesched.samplegamesched(game)\n sgame = schedgame.schedgame(sched)\n mix = sgame.random_sparse_mixture()\n supp = mix > 0\n dev_game = await sgame.get_deviation_game(supp)\n devs, jac = dev_game.deviation_payoffs(mix, jacobian=True)\n assert not np.isnan(devs).any()\n assert not np.isnan(jac[supp]).any()\n assert np.isnan(jac[~supp]).all()\n for role in range(sgame.num_roles):\n mask = role == sgame.role_indices\n dev_game = await sgame.get_deviation_game(supp, role_index=role)\n rdevs = dev_game.deviation_payoffs(mix)\n assert np.allclose(rdevs[supp], devs[supp])\n assert np.allclose(rdevs[mask], devs[mask])\n assert supp[~mask].all() or np.isnan(rdevs[~mask]).any()",
"def get_best_match(self, list):\n raise NotImplementedError",
"def test_check_ess_settings(self):\n ess_settings1 = {'gaussian': [self.servers[0]], 'molpro': [self.servers[1], self.servers[0]],\n 'qchem': [self.servers[0]]}\n ess_settings2 = {'gaussian': self.servers[0], 'molpro': self.servers[1], 'qchem': self.servers[0]}\n ess_settings3 = {'gaussian': self.servers[0], 'molpro': [self.servers[1], self.servers[0]],\n 'qchem': self.servers[0]}\n ess_settings4 = {'gaussian': self.servers[0], 'molpro': self.servers[1], 'qchem': self.servers[0]}\n ess_settings5 = {'gaussian': 'local', 'molpro': self.servers[1], 'qchem': self.servers[0]}\n\n ess_settings1 = check_ess_settings(ess_settings1)\n ess_settings2 = check_ess_settings(ess_settings2)\n ess_settings3 = check_ess_settings(ess_settings3)\n ess_settings4 = check_ess_settings(ess_settings4)\n ess_settings5 = check_ess_settings(ess_settings5)\n\n ess_list = [ess_settings1, ess_settings2, ess_settings3, ess_settings4, ess_settings5]\n\n for ess in ess_list:\n for soft, server_list in ess.items():\n self.assertTrue(soft in ['gaussian', 'molpro', 'qchem'])\n self.assertIsInstance(server_list, list)\n\n with self.assertRaises(SettingsError):\n ess_settings6 = {'nosoft': ['server1']}\n check_ess_settings(ess_settings6)\n with self.assertRaises(SettingsError):\n ess_settings7 = {'gaussian': ['noserver']}\n check_ess_settings(ess_settings7)",
"def IsValidInputType(self, list_of_matches):\n for entry in list_of_matches:\n if not entry:\n return False\n\n return True",
"def validate(self, s):\r\n\r\n nodes = [i for i, a in enumerate(s) if\r\n a != cf.SLEEP and self.network.get_node(i).energy >= (cf.COMMUNICATION_ENERGY + cf.SENSING_ENERGY) ] # get list of the active nodes\r\n\r\n for t in range(cf.NUM_TARGETS):\r\n no_sense_prob = 0\r\n for n in nodes:\r\n no_sense_prob += self.sensing_log_matrix[t][n]\r\n if no_sense_prob >= self.sensing_log_threshold:\r\n break\r\n\r\n if no_sense_prob < self.sensing_log_threshold:\r\n return False\r\n\r\n return True",
"def how_eligible(essay):\n eligibility_requirements = ['?', '\"', ',', '!']\n return len(set(filter((lambda x: x in eligibility_requirements), essay)))"
]
| [
"0.6328185",
"0.5939729",
"0.59161633",
"0.5770188",
"0.5550039",
"0.54295826",
"0.53119993",
"0.5306181",
"0.53002584",
"0.5294735",
"0.52435446",
"0.5045115",
"0.500675",
"0.49871248",
"0.49855766",
"0.4944164",
"0.494282",
"0.4937434",
"0.49179742",
"0.48819202",
"0.4844873",
"0.48421818",
"0.48402837",
"0.48385432",
"0.48325118",
"0.4828595",
"0.4817661",
"0.48173144",
"0.4810317",
"0.48095447"
]
| 0.70885044 | 0 |
Return pytz timezone give offset in minutes. If none found, then returns the EST as the default timezone. Javascript sends offset in minutes. 120 = GMT+0200, we're storing that in request.COOKIES.get('timezone_offset') minutes = request.COOKIES.get('timezone_offset') or 0 | def get_timezone_from_offset(minutes, default='US/Eastern'):
if minutes < 0:
minutes = -minutes
minus = True
else:
minus = False
min = minutes % 60
hours = (minutes - min) / 60
offset = "%s%02d%02d" % (minus and "+" or "-", hours, min)
# This is only method I found to determine timezone by offset.
# HACK: doing reversed(), to match offset 240, to EST instead of AST
for tz in reversed(pytz.common_timezones):
now = datetime.now(pytz.timezone(tz))
if now.strftime("%z") == offset:
return tz
return pytz.timezone(default) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_timezone_offset():\n timezone = get_localzone()\n offset_minutes = timezone.utcoffset(datetime.datetime.now()).total_seconds() // SECONDS_IN_MINUTE\n return parse_int(offset_minutes)",
"def get_timezone(time_zone=''):\n return pytz.timezone(time_zone)",
"def get_timezone(timezone: str=None) -> datetime.tzinfo:\n if timezone[0] in ('+', '-'):\n timezone = 'UTC' + timezone\n result = tz.gettz(timezone)\n if result is not None and not hasattr(result, '_timezone_'):\n setattr(result, '_timezone_', timezone[3:] if timezone.startswith('UTC') and len(timezone) > 3 else timezone)\n return result",
"def get_timezone():\n localTimezone = request.args.get('timezone')\n if localTimezone in pytz.all_timezones:\n return localTimezone\n else:\n raise pytz.exceptions.UnknownTimeZoneError\n userId = request.args.get('login_as')\n localTimezone = users[int(userId)]['timezone']\n if localTimezone in pytz.all_timezones:\n return localTimezone\n else:\n raise pytz.exceptions.UnknownTimeZoneError\n return app.config['BABEL_DEFAULT_TIMEZONE']",
"def timezone_offset():\r\n\r\n return _random.choice(\r\n [\r\n ['GMT+' + str(_random.randint(1, 12))],\r\n ['GMT'],\r\n ['GMT' + str(_random.randint(-12, -1))]\r\n ]\r\n )[0]",
"def get_timzone_offset(self, timezone):\n raise NotImplementedError",
"def _get_tz():\n return 'UTC'",
"def _parse_timezone(val):\n if not val:\n return\n\n if val == \"Z\" or val == \"+00:00\":\n return pytz.utc\n\n negative = val.startswith(\"-\")\n minutes = int(val[-2:])\n minutes += int(val[1:3]) * 60\n\n if negative:\n minutes = 0 - minutes\n return pytz.FixedOffset(minutes)",
"def get_timezone():\n return dates.get_timezone(_get_tz())",
"def timezone():\n\n return time.timezone",
"def _local_time_offset():\n if time.localtime().tm_isdst and time.daylight:\n return -time.altzone\n else:\n return -time.timezone",
"def time_zone():\n return timezone('Etc/GMT-10')",
"def get_fixed_timezone(offset):\n if isinstance(offset, timedelta):\n offset = offset.total_seconds() // 60\n sign = \"-\" if offset < 0 else \"+\"\n hhmm = \"%02d%02d\" % divmod(abs(offset), 60)\n name = sign + hhmm\n return timezone(timedelta(minutes=offset), name)",
"def _tzoffset(tz, t):\n try:\n return _TZINFO[tz].info(t)[0]\n except Exception:\n if numericTimeZoneMatch(tz) is not None:\n return int(tz[0:3]) * 3600 + int(tz[0] + tz[3:5]) * 60\n else:\n return 0 # ??",
"def get_utc_offset():\n timedelta = datetime.datetime.now() - datetime.datetime.utcnow()\n # XXX: `return -time.timezone`?\n return timedelta.total_seconds()",
"def _get_tzinfo(zonelabel):\n return moment.tzinfo(zonelabel) if zonelabel else _get_global_tz()",
"def get_fixed_timezone(offset):\n\n if isinstance(offset, timedelta):\n offset = offset.total_seconds() // 60\n\n sign = \"-\" if offset < 0 else \"+\"\n hhmm = \"%02d%02d\" % divmod(abs(offset), 60)\n name = sign + hhmm\n\n return timezone(timedelta(minutes=offset), name)",
"def tz(self, tz):\n return timezones.maybe_get_tz('dateutil/' + tz)",
"def getTimezone(profile):\n try:\n return timezone(profile['timezone'])\n except:\n return None",
"def timezone():\n \n pass",
"def timezone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"timezone\")",
"def get_tz_offset(self) -> float:\n return self.AD.tz.utcoffset(self.datetime()).total_seconds() / 60",
"def _get_local_tz(module, timezone='UTC'):\n if platform.system() == 'Linux':\n timedatectl = get_bin_path('timedatectl')\n if timedatectl is not None:\n rcode, stdout, stderr = module.run_command(timedatectl)\n if rcode == 0 and stdout:\n line = _findstr(stdout, 'Time zone')\n full_tz = line.split(\":\", 1)[1].rstrip()\n timezone = full_tz.split()[0]\n return timezone\n else:\n module.warn('Incorrect timedatectl output. Timezone will be set to UTC')\n else:\n if os.path.exists('/etc/timezone'):\n timezone = get_file_content('/etc/timezone')\n else:\n module.warn('Could not find /etc/timezone. Assuming UTC')\n\n elif platform.system() == 'SunOS':\n if os.path.exists('/etc/default/init'):\n for line in get_file_content('/etc/default/init', '').splitlines():\n if line.startswith('TZ='):\n timezone = line.split('=', 1)[1]\n return timezone\n else:\n module.warn('Could not find /etc/default/init. Assuming UTC')\n\n elif re.match('^Darwin', platform.platform()):\n systemsetup = get_bin_path('systemsetup')\n if systemsetup is not None:\n rcode, stdout, stderr = module.execute(systemsetup, '-gettimezone')\n if rcode == 0 and stdout:\n timezone = stdout.split(':', 1)[1].lstrip()\n else:\n module.warn('Could not run systemsetup. Assuming UTC')\n else:\n module.warn('Could not find systemsetup. Assuming UTC')\n\n elif re.match('^(Free|Net|Open)BSD', platform.platform()):\n if os.path.exists('/etc/timezone'):\n timezone = get_file_content('/etc/timezone')\n else:\n module.warn('Could not find /etc/timezone. Assuming UTC')\n\n elif platform.system() == 'AIX':\n aix_oslevel = int(platform.version() + platform.release())\n if aix_oslevel >= 61:\n if os.path.exists('/etc/environment'):\n for line in get_file_content('/etc/environment', '').splitlines():\n if line.startswith('TZ='):\n timezone = line.split('=', 1)[1]\n return timezone\n else:\n module.warn('Could not find /etc/environment. Assuming UTC')\n else:\n module.warn('Cannot determine timezone when AIX os level < 61. Assuming UTC')\n\n else:\n module.warn('Could not find /etc/timezone. Assuming UTC')\n\n return timezone",
"def tzoffset(self):\n return _tzoffset(self._tz, self._t)",
"def tz_to_offset(timezone):\n return TIMEZONES.get(timezone, {'offset': 0})['offset']",
"def get_timezone():\n try:\n for line in open('/etc/sysconfig/clock'):\n field, value = line.split('=')\n if field.strip() == 'ZONE':\n return value.replace('\"', '').strip()\n return \"\"\n except IOError:\n return \"\"",
"def localtime_for_timezone(value, timezone):\r\n return adjust_datetime_to_timezone(value, settings.TIME_ZONE, timezone)",
"def shn_user_utc_offset():\n\n if auth.is_logged_in():\n return db(db.auth_user.id == session.auth.user.id).select(db.auth_user.utc_offset, limitby=(0, 1)).first().utc_offset\n else:\n try:\n offset = db().select(db.s3_setting.utc_offset, limitby=(0, 1)).first().utc_offset\n except:\n offset = None\n return offset",
"def get_tz_offset_seconds() -> float:\n import time\n import datetime\n tval = time.time()\n utc_offset = (datetime.datetime.fromtimestamp(tval) -\n datetime.datetime.utcfromtimestamp(tval)).total_seconds()\n return utc_offset",
"def timezone_offset_country():\r\n\r\n return _random.choice(\r\n [\r\n 'Eniwetoa',\r\n 'Hawaii',\r\n 'Alaska',\r\n 'Pacific',\r\n 'Mountain',\r\n 'Central',\r\n 'Eastern',\r\n 'Atlantic',\r\n 'Canada',\r\n 'Brazilia',\r\n 'Buenos Aries',\r\n 'Mid-Atlantic',\r\n 'Cape Verdes',\r\n 'Greenwich Mean Time',\r\n 'Dublin',\r\n 'Berlin',\r\n 'Rome',\r\n 'Israel',\r\n 'Cairo',\r\n 'Moscow',\r\n 'Kuwait',\r\n 'Abu Dhabi',\r\n 'Muscat',\r\n 'Islamabad',\r\n 'Karachi',\r\n 'Almaty',\r\n 'Dhaka',\r\n 'Bangkok, Jakarta',\r\n 'Hong Kong',\r\n 'Beijing',\r\n 'Tokyo',\r\n 'Osaka',\r\n 'Sydney',\r\n 'Melbourne',\r\n 'Guam',\r\n 'Magadan',\r\n 'Soloman Islands',\r\n 'Fiji',\r\n 'Wellington',\r\n 'Auckland',\r\n ]\r\n )"
]
| [
"0.72785103",
"0.6679715",
"0.6583705",
"0.65549064",
"0.6543888",
"0.6491711",
"0.64791346",
"0.64500356",
"0.6345579",
"0.63271415",
"0.6276202",
"0.6270148",
"0.62497365",
"0.6246483",
"0.6226378",
"0.62032956",
"0.62001926",
"0.61094964",
"0.6066218",
"0.60319906",
"0.60287976",
"0.60119766",
"0.59384507",
"0.5913351",
"0.5863933",
"0.58598965",
"0.58199596",
"0.5806329",
"0.57890356",
"0.57689124"
]
| 0.68559456 | 1 |
follow on an unknown url should return 404 | def test_follow_404(self):
response = self.client.get(reverse('shortener:follow', kwargs={'slug': "fails"}))
self.assertEqual(response.status_code, 404) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def follow_redirects(self, url):\n try:\n return requests.get(url).url\n except requests.RequestException:\n return None",
"def test_following_non_existing_user(self):\n response = self.client.post(\n reverse(\n 'follow',\n kwargs={'username': 'NotThere'}\n )\n )\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)",
"def test_404_url():\n def not_found(request):\n request.send_error(404)\n\n with test_server(handler=not_found, methods=(\"post\", \"get\"),\n port=\"random\") as server:\n stream = TweetStream(\"foo\", \"bar\", url=server.baseurl)\n assert_raises(ConnectionError, stream.next)\n\n stream = FollowStream(\"foo\", \"bar\", [1, 2, 3], url=server.baseurl)\n assert_raises(ConnectionError, stream.next)\n\n stream = TrackStream(\"foo\", \"bar\", [\"opera\"], url=server.baseurl)\n assert_raises(ConnectionError, stream.next)",
"def test_unfollowing_non_existing_user(self):\n response = self.client.delete(\n reverse(\n 'follow',\n kwargs={'username': 'NotThere'}\n )\n )\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)",
"def test_follow_non_existent_user(self):\n with self.settings(\n EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend'):\n self.authorize_user(self.user)\n response = self.client.post(self.follow_url, format='json')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)",
"def test_404_url(self):\r\n url = 'http://lococast.net/archives/001'\r\n read = ReadUrl.parse(url)\r\n\r\n self.assertTrue(\r\n read.status == 404, \"The status is 404: \" + str(read.status))\r\n self.assertTrue(\r\n not read.is_image(), \"The content is not an image\")\r\n self.assertTrue(\r\n read.content is None, \"Content should be none\")",
"def view(self, url):\r\n abort(404)",
"def test_not_existing_url(client):\n response = client.get('/not-exists')\n assert response.status_code == 404",
"async def try_follow_redirect(text: str) -> Optional[str]:\n\n async with aiohttp.ClientSession() as cs:\n try:\n async with cs.get(text) as response:\n return str(response.url)\n except ValueError:\n return None",
"def error_404(error):\n return 'Bummer, there is nothing at this URL.'",
"def redirect(url):",
"def _is_follow_request(environ, result):\n r = Request(environ)\n if r.params.get(\"action\") == \"follow\":\n return True\n else:\n return False",
"def test_get_404(self):\n url = self.baseurl + \"/do-not-implement-this-page-it-is-not-found\"\n try:\n req = urllib2.urlopen(url, None, 3)\n self.assertTrue( False, \"Should have thrown an HTTP Error!\")\n except urllib2.HTTPError as e:\n self.assertTrue( e.getcode() == 404 , (\"404 Not FOUND! %d\" % e.getcode()))\n else:\n self.assertTrue( False, \"Another Error was thrown!\")",
"def test_follow(self):\n url = 'http://www.python.org/'\n link = Link.objects.create(slug='testslug', url=url)\n self.assertEqual(link.usage_count, 0)\n\n # try to open without logging in\n response = self.client.get(reverse('shortener:follow', kwargs={\n 'slug': link.slug}))\n self.assertRedirects(response, LOGIN_URL, 400)\n\n # follow the short url and get a redirect\n User.objects.create_user('testuser', email='[email protected]')\n self.client.login(username='testuser')\n response = self.client.get(reverse('shortener:follow', kwargs={\n 'slug': link.slug}))\n self.assertRedirects(response, url, 301, fetch_redirect_response=False)\n\n # re-fetch link so that we can make sure that usage_count incremented\n link = Link.objects.get(id=link.id)\n self.assertEqual(link.usage_count, 1)",
"def test_errors(self):\n rc = self.app.get('/this_should_not_exist', follow_redirects=True)\n assert b'404 error :(' in rc.data",
"def test_url_existence(self):\n self.assertEquals(self.response.status_code, 200)",
"def view(self, url):\n abort(404)",
"def test_missing_shortlink(self):\n rv = self.app.get('/TheStakeOut')\n assert 'No url found' in rv.data",
"def url_exist(url:str) -> bool:\r\n with closing(requests.head(url, allow_redirects=True)) as r:\r\n return r.ok",
"def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404",
"def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404",
"def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404",
"def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404",
"def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404",
"def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404",
"def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404",
"def test_not_found(self):\n self._error_test(fitbit_exceptions.HTTPNotFound)",
"def _check_next_url(next):\n if '://' in next:\n return None\n return next",
"def page_not_found(e):\n return 'Sorry, Nothing at this URL.', 404",
"def page_not_found(e):\n return 'Sorry, Nothing at this URL.', 404"
]
| [
"0.6998348",
"0.6852071",
"0.65475863",
"0.64777684",
"0.6282614",
"0.6119723",
"0.6115893",
"0.61138743",
"0.61022586",
"0.60383093",
"0.6015655",
"0.5979148",
"0.5972463",
"0.5962162",
"0.59428495",
"0.5912951",
"0.59049004",
"0.58943534",
"0.58770907",
"0.5874943",
"0.5874943",
"0.5874943",
"0.5874943",
"0.5874943",
"0.5874943",
"0.5874943",
"0.58588535",
"0.5849121",
"0.58254755",
"0.58254755"
]
| 0.72807777 | 0 |
calling from_decimal() on letters raises an EncodingError | def test_encoding_non_int_fails(self):
self.assertRaises(EncodingError, base62.from_decimal, string.ascii_letters) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_illgal_character(self):\n self.assertRaises(DecodingError, base62.to_decimal, '@@@@')",
"def test_decoding_non_str_fails(self):\n self.assertRaises(DecodingError, base62.to_decimal, sys.maxsize)",
"def test_convert_nonnumeric_value():\n with pytest.raises(TypeError):\n pressure_util.convert(\"a\", PRESSURE_HPA, PRESSURE_INHG)",
"def test_check_digits_with_custom_alphabet_and_sign(self, number, base, expected):\n alphabet = (\"Z\", \"!\", \"T\", \"#\", \"F\", \"%\", \"S\", \"&\", \"E\", \"(\", \"0\")\n\n converted = positional.encode(number, base, alphabet=alphabet, sign_literal=\"@\")\n self.assertEqual(converted, expected)\n self.assertEqual(\n positional.decode(converted, base, alphabet=alphabet, sign_literal=\"@\"),\n number,\n )",
"def human2bytes(s):\n init = s\n num = \"\"\n while s and s[0:1].isdigit() or s[0:1] == '.':\n num += s[0]\n s = s[1:]\n num = float(num)\n letter = s.strip()\n for name, sset in SYMBOLS.items():\n if letter in sset:\n break\n else:\n if letter == 'k':\n # treat 'k' as an alias for 'K' as per: http://goo.gl/kTQMs\n sset = SYMBOLS['customary']\n letter = letter.upper()\n else:\n raise ValueError(\"can't interpret %r\" % init)\n prefix = {sset[0]: 1}\n for i, s in enumerate(sset[1:]):\n prefix[s] = 1 << (i + 1) * 10\n return int(num * prefix[letter])",
"def human2bytes(s):\n symbols = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')\n letter = s[-1:].strip().upper()\n num = s[:-1]\n #assert num.isdigit() and letter in symbols\n #use below assert statement to handle sizes with decimal places\n assert float(num) and letter in symbols\n num = float(num)\n prefix = {symbols[0]: 1}\n for i, s in enumerate(symbols[1:]):\n prefix[s] = 1 << (i +1) *10\n return int(num * prefix[letter])",
"def human2bytes(s):\n if s is None:\n return None\n try:\n return int(s)\n except ValueError:\n symbols = 'BKMGTPEZY'\n letter = s[-1:].strip().upper()\n num = float(s[:-1])\n prefix = {symbols[0]: 1}\n for i, s in enumerate(symbols[1:]):\n prefix[s] = 1 << (i+1)*10\n return int(num * prefix[letter])",
"def letter_to_base40(letter):\n letters = {'C': 3, 'D': 9, 'E': 15, 'F': 20, 'G': 26, 'A': 32, 'B': 38}\n if letter not in letters.keys():\n raise ValueError('invalid letter \\'{}\\''.format(letter))\n return letters[letter]",
"def test_cast_non_numeric_false():\n assert _currency_column_to_numeric(\"10 dollars\", {\"foo\": 42}) == \"10\"",
"def test_normal_decimal_input(self):\r\n ws_leader = \"S. O'Neal (14.9)\"\r\n res = treat_input(ws_leader, type=\"float\")\r\n assert res == 14.9",
"def human2bytes(s):\n symbols = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')\n letter = s[-1:].strip().upper()\n num = s[:-1]\n assert num.isdigit() and letter in symbols\n num = float(num)\n prefix = {symbols[0]:1}\n for i, s in enumerate(symbols[1:]):\n prefix[s] = 1 << (i+1)*10\n return int(num * prefix[letter])",
"def human2bytes(s):\n symbols = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')\n letter = s[-1:].strip().upper()\n num = s[:-1]\n assert num.isdigit() and letter in symbols\n num = float(num)\n prefix = {symbols[0]:1}\n for i, s in enumerate(symbols[1:]):\n prefix[s] = 1 << (i+1)*10\n return int(num * prefix[letter])",
"def transform_python(self, value):\r\n return Decimal128(value)",
"def transform_python(self, value):\r\n return Decimal128(value)",
"def test_nonsense_decimal(self):\n test_passes = False\n try:\n self.parser.extract_zt(\"ZT.\")\n test_passes = False\n except Exception as e:\n test_passes = True\n self.assertTrue(test_passes)",
"def unicode2ascii(_unicrap):\n xlate = {0xc0:'A', 0xc1:'A', 0xc2:'A', 0xc3:'A', 0xc4:'A', 0xc5:'A',\n 0xc6:'Ae', 0xc7:'C',\n 0xc8:'E', 0xc9:'E', 0xca:'E', 0xcb:'E',\n 0xcc:'I', 0xcd:'I', 0xce:'I', 0xcf:'I',\n 0xd0:'Th', 0xd1:'N',\n 0xd2:'O', 0xd3:'O', 0xd4:'O', 0xd5:'O', 0xd6:'O', 0xd8:'O',\n 0xd9:'U', 0xda:'U', 0xdb:'U', 0xdc:'U',\n 0xdd:'Y', 0xde:'th', 0xdf:'ss',\n 0xe0:'a', 0xe1:'a', 0xe2:'a', 0xe3:'a', 0xe4:'a', 0xe5:'a',\n 0xe6:'ae', 0xe7:'c',\n 0xe8:'e', 0xe9:'e', 0xea:'e', 0xeb:'e',\n 0xec:'i', 0xed:'i', 0xee:'i', 0xef:'i',\n 0xf0:'th', 0xf1:'n',\n 0xf2:'o', 0xf3:'o', 0xf4:'o', 0xf5:'o', 0xf6:'o', 0xf8:'o',\n 0xf9:'u', 0xfa:'u', 0xfb:'u', 0xfc:'u',\n 0xfd:'y', 0xfe:'th', 0xff:'y',\n 0xa1:'!', 0xa2:'{cent}', 0xa3:'{pound}', 0xa4:'{currency}',\n 0xa5:'{yen}', 0xa6:'|', 0xa7:'{section}', 0xa8:'{umlaut}',\n 0xa9:'{C}', 0xaa:'{^a}', 0xab:'<<', 0xac:'{not}',\n 0xad:'-', 0xae:'{R}', 0xaf:'_', 0xb0:'{degrees}',\n 0xb1:'{+/-}', 0xb2:'{^2}', 0xb3:'{^3}', 0xb4:\"'\",\n 0xb5:'{micro}', 0xb6:'{paragraph}', 0xb7:'*', 0xb8:'{cedilla}',\n 0xb9:'{^1}', 0xba:'{^o}', 0xbb:'>>',\n 0xbc:'{1/4}', 0xbd:'{1/2}', 0xbe:'{3/4}', 0xbf:'?',\n 0xd7:'*', 0xf7:'/'\n }\n\n s = \"\"\n for i in _unicrap:\n ordi = ord(i)\n if ordi in xlate:\n s += xlate[ordi]\n elif ordi >= 0x80:\n pass\n else:\n s += str(i)\n return s",
"def test_from_knx_wrong_parameter_too_small(self):\n raw = [\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n ]\n with self.assertRaises(ConversionError):\n DPTString().from_knx(raw)",
"def test_convertsent(self):\n convert6 = cnv()\n\n convert6.setnum(\"also haben wir hundertunddrei nein hundert 4 tausend\")\n self.assertEqual(convert6.getnum(), 104000)\n\n convert6.setnum(\"also ein haben wir hundertunddrei nein tausend\")\n self.assertEqual(convert6.getnum(), 1000)\n\n convert6.setnum(\" \")\n self.assertEqual(convert6.getnum(), 0)\n\n convert6.setnum(\"fünfundzwanzig\")\n self.assertEqual(convert6.getnum(), 25)\n\n convert6.setnum(\"albert ein\")\n self.assertEqual(convert6.getnum(), 1)",
"def test_check_digits_with_wrong_alphabet(self, _, alpha):\n with self.assertRaises(exceptions.WrongArgumentValueError):\n positional.encode(42, 10, alphabet=alpha)",
"def test_from_knx_wrong_parameter_too_large(self):\n raw = [\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n ]\n with self.assertRaises(ConversionError):\n DPTString().from_knx(raw)",
"def convert(self, value):\n return self.ASCIIToDecimal(value)",
"def __init__(self, format_char=\"I\"):\r\n\t\tself.format = ENDIANNESS + format_char",
"def test_cast_non_numeric_true():\n assert _currency_column_to_numeric(\"foo\", {\"foo\": 42}) == 42",
"def decode(val):\n if isinstance(val, Decimal):\n return float(val)\n return val",
"def test_non_cast_input():\n assert _currency_column_to_numeric(\"-1,000,000 yen\") == \"-1000000\"",
"def test_20_phonenumbers_UnicodeDecodeError(self):\n number_phone = self.samples[2]\n with self.assertRaises(osv.except_osv):\n self.pn._symbol_set_char(number_phone)",
"def to_bytes(s):\n last = -1\n unit = s[last].lower()\n if unit.isdigit():\n # `s` is a integral number\n return int(s)\n if unit == 'b':\n # ignore the the 'b' or 'B' suffix\n last -= 1\n unit = s[last].lower()\n if unit == 'i':\n k = 1024\n last -= 1\n unit = s[last].lower()\n else:\n k = 1000\n # convert the substring of `s` that does not include the suffix\n if unit.isdigit():\n return int(s[0:(last + 1)])\n if unit == 'k':\n return int(float(s[0:last]) * k)\n if unit == 'm':\n return int(float(s[0:last]) * k * k)\n if unit == 'g':\n return int(float(s[0:last]) * k * k * k)\n if unit == 't':\n return int(float(s[0:last]) * k * k * k * k)\n if unit == 'p':\n return int(float(s[0:last]) * k * k * k * k * k)\n if unit == 'e':\n return int(float(s[0:last]) * k * k * k * k * k * k)\n if unit == 'z':\n return int(float(s[0:last]) * k * k * k * k * k * k * k)\n if unit == 'y':\n return int(float(s[0:last]) * k * k * k * k * k * k * k * k)",
"def test_from_knx_wrong_parameter2(self):\n with self.assertRaises(ConversionError):\n DPTValue1Ucount().from_knx(\"0x23\")",
"def _parse_bytes(s):\n if isinstance(s, (int, float)):\n return int(s)\n s = s.replace(\" \", \"\")\n if not any(char.isdigit() for char in s):\n s = \"1\" + s\n\n for i in range(len(s) - 1, -1, -1):\n if not s[i].isalpha():\n break\n index = i + 1\n\n prefix = s[:index]\n suffix = s[index:]\n\n try:\n n = float(prefix)\n except ValueError as e:\n raise ValueError(\n \"Could not interpret '%s' as a number\" % prefix\n ) from e\n\n try:\n multiplier = BYTE_SIZES[suffix.lower()]\n except KeyError as e:\n raise ValueError(\n \"Could not interpret '%s' as a byte unit\" % suffix\n ) from e\n\n result = n * multiplier\n return int(result)",
"def test_to_knx_too_long(self):\n with self.assertRaises(ConversionError):\n DPTString().to_knx(\"AAAAABBBBBCCCCx\")"
]
| [
"0.6712188",
"0.61095405",
"0.6011091",
"0.58963567",
"0.57924646",
"0.57552683",
"0.57357097",
"0.57234335",
"0.5707907",
"0.56947505",
"0.5693429",
"0.5693429",
"0.56900626",
"0.56900626",
"0.56581885",
"0.55696404",
"0.55681103",
"0.5540921",
"0.55063635",
"0.5486694",
"0.5474196",
"0.5468283",
"0.54644734",
"0.5456032",
"0.5413689",
"0.53231126",
"0.53152835",
"0.52614474",
"0.52611077",
"0.5257645"
]
| 0.64202726 | 1 |
trying to encode a character that is not within base62 raises an EncodingError | def test_illgal_character(self):
self.assertRaises(DecodingError, base62.to_decimal, '@@@@') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_encoding_non_int_fails(self):\n self.assertRaises(EncodingError, base62.from_decimal, string.ascii_letters)",
"def base62_encode(num, alphabet=ALPHABET):\n if (num == 0):\n return alphabet[0]\n arr = []\n base = len(alphabet)\n while num:\n rem = num % base\n num = num // base\n arr.append(alphabet[rem])\n arr.reverse()\n return ''.join(arr)",
"def base62_encode(num, alphabet=ALPHABET):\n if (num == 0):\n return alphabet[0]\n arr = []\n base = len(alphabet)\n while num:\n rem = num % base\n num = num // base\n arr.append(alphabet[rem])\n arr.reverse()\n return ''.join(arr)",
"def base62_encode(num, alphabet=ALPHABET):\n if (num == 0):\n return alphabet[0]\n arr = []\n base = len(alphabet)\n while num:\n rem = num % base\n num = num // base\n arr.append(alphabet[rem])\n arr.reverse()\n return ''.join(arr)",
"def base62_encode(num, alphabet=ALPHABET):\n if (num == 0):\n return alphabet[0]\n arr = []\n base = len(alphabet)\n while num:\n rem = num % base\n num = num // base\n arr.append(alphabet[rem])\n arr.reverse()\n return ''.join(arr)",
"def _value(ch):\n\n try:\n return CHARSET.index(ch)\n except ValueError:\n raise ValueError(\"base62: Invalid character (%s)\" % ch)",
"def escapeEncode(s: unicode) -> unicode:\n ...",
"def encode(self, decoded):",
"def encode(e):\n if PY2 and isinstance(e, unicode):\n e = e.encode('utf-8')\n return e",
"def test_encoder(self):\n from sosbeacon.utils import number_encode\n\n number = 123\n encoded = number_encode(number)\n self.assertEqual(encoded, 'b6')",
"def _encode(self, upper):\n return upper",
"def encode(self, text):",
"def test_decoding_non_str_fails(self):\n self.assertRaises(DecodingError, base62.to_decimal, sys.maxsize)",
"def test_encode(self):\n assert url_encoder.encode(1) == 'TheStakeOut'\n assert url_encoder.encode(800) == 'TheStockTip-TheSeven'\n assert url_encoder.encode(99999) == 'MaleUnbonding-TheConversion-TheAndreaDoria'",
"def test_encode(self):\n pass # TODO(tlarsen)",
"def test_ascii_to_phred64(self):\r\n self.assertEqual(ascii_to_phred64('@'), 0)\r\n self.assertEqual(ascii_to_phred64('^'), 30)",
"def encode(self, char):\n\n if char == self.pair[0]:\n return self.pair[1]\n elif char == self.pair[1]:\n return self.pair[0]\n else:\n return char",
"def encode(num, alphabet=BASE62):\n if num == 0:\n return alphabet[0]\n arr = []\n base = len(alphabet)\n while num:\n num, rem = divmod(num, base)\n arr.append(alphabet[rem])\n arr.reverse()\n return ''.join(arr)",
"def test_consistent_encoding_128(self):\n text = u\"abracadabra\" # pylint: disable=redundant-u-string-prefix\n self.assertEqual(\n CityHash128WithSeed(text), CityHash128WithSeed(text.encode(\"utf-8\"))\n )",
"def encode_email(email, key):\n return",
"def encode(self, strs):",
"def encode(self, strs):",
"def test_phred_to_ascii64(self):\r\n self.assertEqual(phred_to_ascii64(0), '@')\r\n self.assertEqual(phred_to_ascii64(30), '^')",
"def encode(key: T) -> int:\n \n if isinstance(key, str):\n result: int = 0\n p: int = 97 # p should roughly equal the number of characters in the input alphabet, we have 95 printable ASII chars\n m: int = 32361122672259149 # now that's a prime :), 19th in OEIS A118839\n p_pow: int = 1\n for c in key:\n result = (result + ord(c) * p_pow) % m\n p_pow = (p_pow * p) % m\n return result\n elif isinstance(key, int):\n return key\n else:\n raise Exception(f\"Cannot encode {type(key)} (Only strings and integers are supported)\")",
"def test_encodeWithErrors(self):\n text = u'Hello world'\n self.assertEqual(\n text.encode('imap4-utf-7', 'strict'),\n text.encode('imap4-utf-7'))",
"def _encode_key(self, key):\n return key.encode() if isinstance(key, str) else key",
"def encode(n):\n encode = []\n if n < 0:\n return ''\n while n >= 58:\n remainder = n % 58\n encode.append(LETTERS[remainder])\n n = n / 58\n if n:\n encode.append(LETTERS[n])\n return ''.join(reversed(encode))",
"def encode(s, storage=BIT_STORAGE, alpha=ALPHABET, char_func=unichr):\n n = s\n buf = ''\n while len(n) > 0:\n b = n[:storage]\n n = n[storage:]\n\n d = 11 - len(b)\n for i in range(d):\n b += '\\0'\n\n bs = BitString(data=b)\n\n for i in range(8):\n v = bs.readbits(storage).uint\n buf += char_func(alpha[v])\n\n return buf.rstrip(char_func(alpha[0]))",
"def __encode(self):\n\n for i, char in enumerate(self.__chars):\n self.__char2idx[char] = i\n self.__idx2char[i] = char",
"def encode2(s,n):\n r = [ chr(((ord(x)-97+n)%26)+97) if x!=' ' else x for x in s]\n return \"\".join(r)"
]
| [
"0.6859128",
"0.66699857",
"0.66699857",
"0.66699857",
"0.66699857",
"0.6384018",
"0.62883586",
"0.6282206",
"0.61720264",
"0.61659485",
"0.61273414",
"0.6121925",
"0.61131334",
"0.60798943",
"0.6070405",
"0.6053329",
"0.6049165",
"0.6011752",
"0.59600824",
"0.5955402",
"0.5942288",
"0.5942288",
"0.5926392",
"0.59245604",
"0.5917426",
"0.5913259",
"0.5893421",
"0.5874935",
"0.58670443",
"0.58529335"
]
| 0.6854086 | 1 |
Remove previous lines from the color bar | def cleanCb(axColorBar):
for line in axColorBar.axes.lines:
line.remove()
return axColorBar | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def suppressColorBar():\n dislin.nobar()",
"def removeColorbar(self):\n if self._colorbar is not None:\n self._colorbar.remove()\n self._colorbar = None",
"def _clear(self, event):\n if self.ignore(event) or self._changed_canvas():\n return\n self._background = self.canvas.copy_from_bbox(self.ax.bbox)\n self.ax.draw_artist(self._checks)\n if hasattr(self, '_lines'):\n for l1, l2 in self._lines:\n self.ax.draw_artist(l1)\n self.ax.draw_artist(l2)",
"def _onRemove(self, event):\n index = self.colorlist.GetSelection()\n del self.graphColors[index]\n self._tupleListToStrings()\n if len(self.graphColors) > 0:\n self.colorlist.SetSelection(0)\n self._updateButtons(None)",
"def colorWipe(self, color):\r\n #color = Color(R,G,B)\r\n for i in range(self.strip.numPixels()):\r\n self.strip.setPixelColor(i, color)\r\n self.strip.show()",
"def undo_glycan(self):\n chid = self.chain.get()\n sequon = self.sequon.get()\n self.sequon_colors[sequon] = [.5, .5, .5]\n key = self.sequon.get()\n if key in self.linked_glycanMolecules:\n del self.linked_glycanMolecules[key]\n del self.linked_glycans[key]\n self.draw_glycoprotein(chid)\n self.draw_glycan(sequon)",
"def onChartRemoveSeries(self):\n self.chart().removeAllSeries()\n self.series = {}\n self.yaxis = {}\n self.pen = {}\n self.ymin = {}\n self.ymax = {}",
"def clear(self):\n self.canvas = [[self.style] * self.cols for _ in range(self.lines)]",
"def _clear(self):\n self._calculate_bar_width()\n # sys.stdout.write(\"\\033[K\")\n # to fix bug when logging to console\n print(\" \" * self._tw, end='\\r')\n # sys.stdout.write(\"\\033[K\")",
"def OnPanelEraseBg(self, event):\r\n\r\n pass",
"def clear_screen(self):\n if self.x:\n self.move_cur_up((self.prev_x+1)/self.get_col_width())\n self.clear_line(self.get_num_lines(self.prev_lines) +\n self.get_num_lines(['>' + self.prev_str + ' ']))\n #time.sleep(2)",
"def _erase_last_line(self, img):\n if not self.prev_y is None:\n prev_x = self._pos-1\n cv.line(img, (prev_x, 0), (prev_x, self.h), (255, 255, 255), 1)\n\n y1, y2 = self.prev_y\n cv.line(img, (prev_x, y1), (prev_x, y2), (0, 0, 0), 1)",
"def clear():\n # TODO: this should actually create a stack of output so I can test each screen\n lines.clear()",
"def OnEraseBackground(self, event):\r\n \r\n pass",
"def OnEraseBackground(self, event):\r\n \r\n pass",
"def remove_line(self, origin):\n current_tile = self.board[origin[0]][origin[1]]\n\n if current_tile.is_dot:\n temp = current_tile.next\n current_tile.next = None\n current_tile = temp\n\n # Remove color of all non dot tiles in line.\n while current_tile and current_tile.color and not current_tile.is_dot:\n temp = current_tile.next\n current_tile.color = None\n current_tile.next = None\n current_tile = temp",
"def clear(self):\n lines = self._lines\n image, bkg_image = self.image, self._image\n for line in lines: line.clear(image, bkg_image) #prej bkg_img\n self._cursor = 0",
"def clear_colors(self):\n for r in range(0, self.maze.get_nrows()):\n for c in range(0, self.maze.get_ncols()):\n self.set_color((r, c), 'white', draw=False)\n\n self.cvs.itemconfig(self.cvs_cells[self.maze.get_start_cell()],\n fill='green')\n self.cvs.itemconfig(self.cvs_cells[self.maze.get_end_cell()],\n fill='red')\n\n self.draw()",
"def _clear(self, event):\n if self.ignore(event) or self._changed_canvas():\n return\n self._background = self.canvas.copy_from_bbox(self.ax.bbox)\n self.ax.draw_artist(self._buttons)\n if hasattr(self, \"_circles\"):\n for circle in self._circles:\n self.ax.draw_artist(circle)",
"def erase_in_display(self, how=0):\n super(CustomHistoryScreen, self).erase_in_display(how)\n\n if how == 3:\n self.reset_history()",
"def erase_plot(self, line_position=0):\n self.axplot.lines.pop(line_position).remove\n self.fig.canvas.draw()\n return",
"def delTcline(self, line):\n self._checkfigure()\n ld = self._get_linedict(line)\n for vline in ld['vlines']:\n vline.remove()\n ld['vlines'] = []",
"def off(self,ax):\n # remove cell lines if thery are on the plot\n # (if new axes are created the cell lines will be not there)\n for line in self.cell_lines:\n try:\n ax.lines.remove(line)\n except ValueError:\n pass\n # set lines and coordinates to empty lists \n self.cell_lines = []\n self.xx_cells = []",
"def _clear_progress(self):\n if not self._progress_started:\n return\n clear_line = \" \" * self._term_size[0]\n print(f\"\\r{clear_line}\\r\", end=\"\")",
"def clear_complete_lines():\n global board\n\n\n nb = []\n fn = []\n for idl, line in enumerate(board):\n if 0 in line:\n # Not full\n nb.append(line)\n else:\n fn.append(idl)\n\n if fn:\n # Update the board information\n board = new_board_lines(len(fn)) + nb\n\n # clear\n d_line = [obj for obj in scene.objects if type(obj) is box and obj.y in fn]\n for _ in xrange(10):\n rate(20)\n for obj in d_line:\n obj.opacity -= 0.1\n for obj in d_line:\n obj.visible = 0\n\n\n # decline\n for n in fn:\n for obj in (obj for obj in scene.objects if type(obj) is box and obj.y < n):\n obj.y += 1\n\n return fn",
"def cells_off(self):\n self.plotter.cells_off(self.ax)\n self.fig.canvas.draw()",
"def OnEraseBackground(self, event):\r\n\r\n pass",
"def OnEraseBackground(self, event):\r\n\r\n pass",
"def _clear_last_lines(n=1):\n for _ in range(n):\n sys.stdout.write(CURSOR_UP_ONE)\n sys.stdout.write(ERASE_LINE)",
"def deleteCmap(self):\n self.cmap_list = TrackedList()"
]
| [
"0.71303684",
"0.660231",
"0.6472268",
"0.619576",
"0.6145884",
"0.61119395",
"0.6065317",
"0.604062",
"0.60274917",
"0.60210115",
"0.5998631",
"0.5994174",
"0.5974805",
"0.5931044",
"0.5931044",
"0.5913806",
"0.5910324",
"0.58925134",
"0.58901626",
"0.5881482",
"0.5880484",
"0.5873228",
"0.5866642",
"0.58269423",
"0.5825076",
"0.5819419",
"0.58117384",
"0.58117384",
"0.5801412",
"0.57942826"
]
| 0.7817924 | 0 |
Fills the sudoku with a prechosen list (can be read from file) and fills in the rest with "random" numbers (19) | def fill(self, file = None):
if file == None:
self.prefill = {}
else:
f = open('sudoku.txt')
self.prefill = f.read()
f.close()
for u in range(2* self.N**2,3* self.N**2):
for val in range(self.N**2):
self.setValue(val+1,0,0,self.unitlist[u][val])
# in elke N^2 square in de sudoku
# for u in range(2*self.N*2, 3*self.N**2):
# posl = range(self.N**2) # Posibilities list
# val = randrange(len(posl))
# index = 0
# while val:
# self.setValue(val+1,0,0,self.unitlist[u][index])
# index+=1
# DEBUG
# print posl
# print val
# del posl[val]
# val = randrange(len(posl))
for u in self.prefill:
v = self.getValue(0,0,u)
s = self.units[u][2][self.prefill[u]-1]
self.setValue(self.prefill[u],0,0,u)
self.setValue(v,0,0,s) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def import_sudoku(lst_temp):\r\n temp_index = 0\r\n for row in range(9):\r\n for col in range(9):\r\n val = lst_temp[temp_index]\r\n if val != 0:\r\n change_value(val, Point(row, col))\r\n temp_index += 1",
"def generate_sudoku(self):\n\n # randomly generate the first row \n random_order_number = [x for x in range(1, 10)]\n random.shuffle(random_order_number)\n for x in range(9):\n value = random_order_number[x]\n this_cell = self.grid[0][x]\n this_cell.value = value\n self.remove_value(this_cell, 0, x, value)\n\n row = 1\n column = 0\n while row <9 and column < 9:\n time.sleep(0.05)\n # search for options\n # should only be done once for each cell\n this_cell = self.grid[row][column]\n if this_cell.options == None:\n this_cell.options = self.find_options(row, column, this_cell.grid)\n\n if not this_cell.options:\n # backtrace should only happen when there is no options for this cell\n row, column = self.backtrace(this_cell, row, column)\n\n else:\n # case 3: the number has options and the number returned from the cell is valid\n if this_cell.value != None:\n self.add_value(this_cell, row, column)\n this_cell.get_value_from_options()\n # when you switch the value for a value from the option, put the current value back into the row\n self.remove_value(this_cell, row, column, this_cell.value)\n if column == 8:\n row += 1\n column = 0\n else:\n column += 1\n try:\n self.print_detail(this_cell, row, column)\n except IndexError:\n pass",
"def solveSudoku(self, board: List[List[str]]) -> None:\n def dfs(idx):\n if idx == len(blankIdx):\n return True\n else:\n i, j = blankIdx[idx]\n for num in rg:\n num += 1\n if (num not in rows[i] and\n num not in cols[j] and\n num not in boxs[i//3][j//3]):\n board[i][j]=str(num)\n rows[i].add(num)\n cols[j].add(num)\n boxs[i//3][j//3].add(num)\n if dfs(idx+1):\n return True\n board[i][j] = blank\n rows[i].remove(num)\n cols[j].remove(num)\n boxs[i//3][j//3].remove(num)\n \n rg,blank = range(9), \".\"\n rows = [set() for _ in rg]\n cols = [set() for _ in rg]\n boxs = [[set() for _ in range(3)] for j in range(3)]\n blankIdx = list()\n for i in rg:\n for j in rg:\n if board[i][j]!=blank:\n ele = int(board[i][j])\n rows[i].add(ele)\n cols[j].add(ele)\n boxs[i//3][j//3].add(ele)\n else:\n blankIdx.append((i,j))\n dfs(0)",
"def solveSudoku(self, board: List[List[str]]) -> None:\n def getLocs(board):#初始化,获取需要填充的位置,记录为一个栈\n locs = []\n for row in range(9):\n for col in range(9):\n if board[row][col] == '.':\n locs.append((row, col))\n return locs\n\n def getMaps(board):#定义三个字典,跟踪9行、9列和9块的已填充数字,采用数据结构为defaultdict\n from collections import defaultdict as dd\n rowMap = [dd(int) for _ in range(9)]\n colMap = [dd(int) for _ in range(9)]\n blockMap = [dd(int) for _ in range(9)]\n for row in range(9):\n for col in range(9):\n if board[row][col] != '.':\n num = int(board[row][col])\n rowMap[row][num] += 1\n colMap[col][num] += 1\n bolckIndex = int(row/3)*3+int(col/3)\n blockMap[bolckIndex][num] += 1\n return rowMap, colMap, blockMap\n\n def fillBoard(board, locs):#递归填充剩余的数独空位置\n if not locs:\n return True\n row, col = locs.pop()#弹出一个待填充位置\n bolckIndex = int(row/3)*3+int(col/3)\n found = False\n for num in range(1, 10):\n if found:\n break\n if not rowMap[row][num] and not colMap[col][num] and not blockMap[bolckIndex][num]:\n ##如果当前行、当前列和当前块均不存在该数字,则将数字更新到相应行、列、块,并尝试填充\n rowMap[row][num] = 1\n colMap[col][num] = 1\n blockMap[bolckIndex][num] = 1\n board[row][col] = str(num)\n found = fillBoard(board, locs)#递归到下一层填充\n rowMap[row][num] = 0##状态回溯,将填充的位置清空\n colMap[col][num] = 0\n blockMap[bolckIndex][num] = 0\n if not found:##如果本轮都无法求解,则回溯到初始状态,继续从前面再填充\n locs.append((row, col))\n board[row][col] = '.'\n return found\n\n rowMap, colMap, blockMap = getMaps(board)\n locs = getLocs(board)\n fillBoard(board, locs)",
"def fill_board(self):\n slope = 0\n for i in range(0, len(self.row_map.keys())):\n for j in range(0, len(self.row_map.keys())):\n key = self.row_map[i + 1] + str(j + 1)\n value = int(self.raw_data[j + (8 * i + slope)])\n self.sudoku_board.update({key: value})\n slope += 1",
"def solveSudoku(board):\n # represents all numbers in a specific row, col, box\n # format: if (5,9) is in rows, that means row 5 contains digit 9\n\t\t# format: if (3, 2) is in cols, that means col 3 contains digit 2\n\t\t# format: if (0,2,8) is in boxes, that means box (0,2) contains 8\n\t\t# cellsToFill is a stack that holds all the (i,j) cells we need to fill\n rows, cols, boxes = set(), set(), set()\n cellsToFill = []\n m, n = len(board), len(board[0])\n \n def initDataSets():\n for i in range(m):\n for j in range(n):\n char = board[i][j]\n if char == '.':\n cellsToFill.append((i,j))\n else:\n addToDataSets((i, char), (j, char), (i//3, j//3, char))\n\n def addToDataSets(curRow, curCol, curBox):\n rows.add(curRow)\n cols.add(curCol)\n boxes.add(curBox)\n \n def removeFromDataSets(curRow, curCol, curBox):\n rows.remove(curRow)\n cols.remove(curCol)\n boxes.remove(curBox)\n \n def backtrack():\n if not cellsToFill:\n return True\n \n i, j = cellsToFill.pop()\n for char in '123456789':\n # check if the number is already in a row/col/box, if it is then skip to the next number\n curRow, curCol, curBox = (i, char), (j, char), (i//3, j//3, char)\n if curRow in rows or curCol in cols or curBox in boxes: continue\n \n # if not, add the number to the row/col/box\n addToDataSets(curRow, curCol, curBox)\n board[i][j] = char\n \n # start the recursive call for inserting the next number\n if (backtrack()):\n return True\n \n # backtrack wasn't successful, remove the number from the row/col/box\n removeFromDataSets(curRow, curCol, curBox)\n board[i][j] = '.'\n \n cellsToFill.append((i,j))\n return False\n \n initDataSets()\n print(board)\n backtrack()",
"def solveSudoku(self, board: List[List[str]]) -> None:\n n19 = set(list('123456789'))\n conn = defaultdict(set)\n center = [(i,j) for i in {1,4,7} for j in {1,4,7}]\n def get_conn(i,j):\n for x in range(0, 9):\n conn[(i,j)].add((x,j))\n conn[(i,j)].add((i,x))\n for ci, cj in center:\n if abs(i-ci)<=1 and abs(j-cj)<=1:\n for ii in range(-1,2):\n for jj in range(-1,2):\n ni, nj = ci + ii, cj + jj\n conn[(i,j)].add((ni, nj))\n break\n conn[(i,j)].discard((i,j))\n\n\n for i in range(9):\n for j in range(9):\n get_conn(i,j)\n\n def get_avail(i, j):\n choices = set(n19)\n for ni, nj in conn[(i,j)]:\n choices.discard(board[ni][nj])\n return choices\n\n to_fill = set()\n for i, row in enumerate(board):\n for j, v in enumerate(row):\n if v == '.':\n to_fill.add((i,j))\n\n def solve():\n if not to_fill:\n return True\n min_avail = n19\n ci, cj = None, None\n for i, j in to_fill:\n val = get_avail(i,j)\n if not val:\n return False\n if len(val) < len(min_avail):\n min_avail = val\n ci, cj = i, j\n to_fill.discard((ci, cj))\n for x in min_avail:\n board[ci][cj] = x\n if solve():\n return True\n board[ci][cj] = '.'\n to_fill.add((ci, cj))\n return False\n print(solve())",
"def parsePuzzle(fileName):\n data = []\n f = open(fileName, 'r')\n for line in f:\n splitLine = line.split(sep=\" \")\n row = []\n if len(splitLine) >= 9:\n for i in range(9):\n row.append(int(splitLine[i]))\n data.append(row)\n f.close()\n return SudokuPuzzle(data)",
"def init_board(file_name):\n board = parse_file(file_name)\n return SudokuBoard(len(board), board)",
"def make_sudoku(size):\r\n def mutate_list_1(lst, size):\r\n \"\"\"Helper function for removing part of a list from the beginning and add it to the end.\"\"\"\r\n count = 0\r\n while count < size:\r\n elem = lst[0]\r\n lst.remove(elem)\r\n lst.append(elem)\r\n count += 1\r\n return lst\r\n\r\n def mutate_list_2(lst):\r\n \"\"\"Helper function for removing element from the beginning of a list and add it to the end.\"\"\"\r\n elem = lst[0]\r\n lst.remove(elem)\r\n lst.append(elem)\r\n return lst\r\n\r\n count = 0\r\n matrix_length = size ** 2 # define a size of matrix\r\n matrix = [[] * matrix_length] # create an empty matrix\r\n matrix[0] = range(1, matrix_length + 1) # set a first row to a range from 1 to size ** 2\r\n while count < matrix_length - 1:\r\n l = matrix[count][:] # create a new list object that is a copy of previous row in a matrix\r\n if (count + 1) % size == 0: # check if a row in inner square of a matrix\r\n l = matrix[count - (size-1)][:] # if it is, l set to the first row of previous square\r\n matrix.append(mutate_list_2(l))\r\n else:\r\n matrix.append(mutate_list_1(l, size)) # mutate l and add it to the matrix\r\n count += 1\r\n\r\n\r\n return matrix",
"def __init__(self):\r\n self.rows = [[0]*9, [0]*9, [0]*9, [0]*9, [0]*9, [0]*9, [0]*9, [0]*9, [0]*9]\r\n self.block1 = []\r\n self.block5 = []\r\n self.block9 = []\r\n self.puzzle = []\r\n self.score = 0\r\n self.difficulty = 1 # By default Easy difficulty\r\n\r\n \"\"\" Creating blocks using random number generator\"\"\"\r\n while len(self.block1) < 9:\r\n r = random.randrange(1,10)\r\n if r not in self.block1:\r\n self.block1.append(r)\r\n\r\n while len(self.block5) < 9:\r\n r = random.randrange(1,10)\r\n if r not in self.block5:\r\n self.block5.append(r)\r\n\r\n while len(self.block9) < 9:\r\n r = random.randrange(1,10)\r\n if r not in self.block9:\r\n self.block9.append(r)\r\n x = 0\r\n for i in range(3):\r\n for j in range(3):\r\n self.rows[i][j] = self.block1[x]\r\n x = x+1\r\n x = 0\r\n for i in range(3, 6):\r\n for j in range(3, 6):\r\n self.rows[i][j] = self.block5[x]\r\n x = x+1\r\n x = 0\r\n for i in range(6,9):\r\n for j in range(6,9):\r\n self.rows[i][j] = self.block9[x]\r\n x = x+1\r\n \"\"\"Creating a valid solution\"\"\"\r\n self.createsolution(self.rows)",
"def new_tile(self):\n # replace with your code\n empty_list = []\n counter_1 = 0\n for _ in self._grid:\n counter_2 = 0\n line = _\n for blank in line:\n if blank == 0:\n blank_tile = (counter_1, counter_2)\n empty_list.append(blank_tile)\n counter_2 += 1\n else:\n counter_2 += 1\n counter_1 += 1\n #print empty_list\n \n self._tile = empty_list[random.randrange(len(empty_list))]\n \n value = [2,2,2,2,2,2,2,2,2,4]\n tile_value = value[random.randint(0,9)]\n \n self.set_tile(self._tile[0], self._tile[1], tile_value)",
"def __init__(self, size, given_cells):\n self.ROWS = string.ascii_uppercase[:size ** 2]\n self.COLS = [str(i) for i in range(1, size ** 2)]\n self.size = size\n self.given_cells = given_cells\n self.board = self.create_board()\n self.squares = [utility.cross(i, j) for i in [self.ROWS[i:i + size] for i in range(0, len(self.ROWS), size)]\n for j in [self.COLS[i:i + size] for i in range(0, len(self.COLS), size)]]\n self.attach_neighbors()\n self.update_neighbor_values_by_given()\n print(\"Initial board:\")\n GUI.print_sudoku(self.board, self.size)",
"def create_sudoku(self)->list:\n grid = [[None for x in range(9)] for row in range(9)]\n for row in range(0,9):\n for column in range(0,9):\n if row <= 2 and column <=2:\n grid[row][column] = cell.Cell(0)\n elif row <= 2 and 3 <= column <= 5:\n grid[row][column] = cell.Cell(1)\n elif row <= 2 and 6 <= column <= 8:\n grid[row][column] = cell.Cell(2)\n elif 3 <= row <= 5 and column <= 2:\n grid[row][column] = cell.Cell(3)\n elif 3 <= row <= 5 and 3 <= column <= 5:\n grid[row][column] = cell.Cell(4)\n elif 3 <= row <= 5 and 6 <= column <= 8:\n grid[row][column] = cell.Cell(5)\n elif 6 <= row <= 8 and column <= 2:\n grid[row][column] = cell.Cell(6)\n elif 6 <= row <= 8 and 3 <= column <= 5:\n grid[row][column] = cell.Cell(7)\n elif 6 <= row <= 8 and 6 <= column <= 8:\n grid[row][column] = cell.Cell(8)\n return grid",
"def init():\n for i in range(COLS):\n for j in range(ROWS):\n BOARD[i][j] = int(random(2))",
"def solveSudoku(self, board: List[List[str]]) -> None:\n\n def deepCopy(src, tar):\n n = len(src)\n for i in range(n):\n for j in range(n):\n tar[i][j] = src[i][j]\n\n def getNums(board, x, y):\n used_nums_x = []\n used_nums_y = []\n used_nums_square = []\n for i in range(n):\n if board[i][y] != '.':\n used_nums_y.append(board[i][y])\n for j in range(n):\n if board[x][j] != '.':\n used_nums_x.append(board[x][j])\n\n x1 = (x // 3) * 3\n x2 = ((x // 3) + 1) * 3 - 1\n y1 = (y // 3) * 3\n y2 = ((y // 3) + 1) * 3 - 1\n\n for i in range(x1, x2 + 1):\n for j in range(y1, y2 + 1):\n if board[i][j] != '.':\n used_nums_square.append(board[i][j])\n\n used_nums = set(used_nums_x + used_nums_y + used_nums_square)\n nums = set([str(i) for i in range(1, 10)]) - used_nums\n return nums\n\n def helper(board, points, result):\n n = len(board)\n if len(points) == 0:\n deepCopy(board, result)\n return\n\n x, y = points[-1]\n nums = getNums(board, x, y)\n for num in nums:\n board[x][y] = num\n points.pop()\n helper(board, points, result)\n points.append((x, y))\n board[x][y] = '.'\n\n n = len(board)\n points = [(i, j) for i in range(n) for j in range(n) if board[i][j] == '.']\n result = [['0'] * n for _ in range(n)]\n helper(board, points, result)\n deepCopy(result, board)",
"def solveSudoku(self, board: List[List[str]]) -> None:\n row, col, part = [set() for _ in range(9)], [set() for _ in range(9)], [set() for _ in range(9)]\n blank = []\n for i in range(9):\n for j in range(9):\n if board[i][j] != \".\":\n row[i].add(board[i][j])\n col[j].add(board[i][j])\n part[i//3 * 3 + j//3].add(board[i][j])\n else:\n blank.append([i, j])\n def recursion(row, col, part, blank, board, count, n):\n if count == n:\n return True\n else:\n x, y = blank.pop()\n for c in range(1, 10):\n c = str(c)\n if c not in row[x] and c not in col[y] and c not in part[x//3 * 3 + y//3]:\n row[x].add(c)\n col[y].add(c)\n part[x//3 * 3 + y//3].add(c)\n board[x][y] = c\n count += 1\n check = recursion(row, col, part, blank, board, count, n)\n if check:\n return check\n row[x].remove(c)\n col[y].remove(c)\n part[x//3 * 3 + y//3].remove(c)\n board[x][y] = \".\"\n count -= 1\n blank.append([x,y])\n return False\n count, n = 0, len(blank)\n recursion(row, col, part, blank, board, count, n)",
"def read_sudokus():\n with open(\"sudoku.txt\", \"r\") as f:\n lines = f.readlines()\n sudoku_strs = []\n for line in lines:\n if line[0] == 'G':\n sudoku_strs.append(\"\")\n else:\n sudoku_strs[-1] += line.replace(\"\", \" \")[1:]\n sudokus = []\n for sudoku_str in sudoku_strs:\n sudokus.append(np.fromstring(sudoku_str, sep=' ',\n dtype=np.int).reshape((9, 9)))\n return sudokus",
"def sudoku_solver(filename):\n with open(filename, \"r\") as f:\n lines = f.read().splitlines()\n\n # format grid\n grid = []\n for line in lines:\n row = []\n for char in line.split(\" \"):\n row += [char if char == \"x\" else int(char)]\n grid.append(row)\n\n solution, flag = solve(grid)\n if flag:\n # display solution\n for row in solution:\n print(\" \" + str(row))\n else:\n print(\"Unsolvable\")",
"def read_puzzle(board, input_puzzle):\n if any(x in input_puzzle for x in [\".txt\", \".sud\"]):\n with open(input_puzzle, \"rU\") as f:\n line = f.readline().rstrip()\n else:\n line = input_puzzle\n for i in range(n):\n for j in range(n):\n if (line[i*n+j] in \"123456789\"):\n board[i][j] = [int(line[i*n+j])]\n return 0",
"def test_grid_values(self):\n sudoku_grid = '..3.2.6..9..3.5..1..18.64....81.29..7.......8..67.82....26.95..8..2.3..9..5.1.3..'\n sudoku_dict = {'C7': '4', 'I6': '123456789', 'E7': '123456789', 'E6': '123456789',\n 'F6': '8', 'H6': '3', 'C9': '123456789', 'E4': '123456789', 'E9': '8',\n 'G7': '5', 'A3': '3', 'H2': '123456789', 'G8': '123456789', 'B5': '123456789',\n 'G5': '123456789', 'G1': '123456789', 'A2': '123456789', 'F7': '2', 'F4': '7',\n 'B2': '123456789', 'H7': '123456789', 'G2': '123456789', 'I8': '123456789', 'H9': '9',\n 'B7': '123456789', 'E2': '123456789', 'F9': '123456789', 'I7': '3', 'F3': '6', 'I9': '123456789',\n 'D8': '123456789', 'G9': '123456789', 'F1': '123456789', 'D5': '123456789', 'B4': '3', 'H5': '123456789',\n 'I2': '123456789', 'A6': '123456789', 'G3': '2', 'H8': '123456789', 'H4': '2', 'A4': '123456789', 'A9': '123456789',\n 'D9': '123456789', 'I3': '5', 'E1': '7', 'C2': '123456789', 'F8': '123456789', 'B8': '123456789', 'A7': '6',\n 'C1': '123456789', 'D2': '123456789', 'C5': '123456789', 'H3': '123456789', 'B1': '9', 'I5': '1', 'A8': '123456789',\n 'A5': '2', 'F2': '123456789', 'A1': '123456789', 'D7': '9', 'G4': '6', 'H1': '8', 'C8': '123456789',\n 'E5': '123456789', 'C3': '1', 'C6': '6', 'D3': '8', 'D4': '1', 'D1': '123456789', 'I1': '123456789',\n 'B3': '123456789', 'B6': '5', 'G6': '9', 'I4': '123456789', 'F5': '123456789', 'B9': '1', 'C4': '8',\n 'E8': '123456789', 'D6': '2', 'E3': '123456789'}\n\n self.assertEqual(solution.grid_values(sudoku_grid), sudoku_dict)",
"def solveSudoku(self, board: List[List[str]]) -> None:\n\n def test(row, column, value) -> bool:\n for i in range(9):\n if board[i][column] == str(value):\n return False\n for j in range(9):\n if board[row][j] == str(value):\n return False\n startR = math.floor(row / 3)\n startC = math.floor(column / 3)\n for p in range(3):\n for q in range(3):\n if board[startR * 3 + p][startC * 3 + q] == str(value):\n return False\n return True\n\n i, j = 0, 0\n fixed = [[int] * 2]\n while i < 9:\n j = 0\n while j < 9:\n # 预记录所有预设值\n if board[i][j] != '.' and [i, j] not in fixed:\n fixed.append([i, j])\n # 在预设值位置\n elif [i, j] in fixed:\n None\n # 不在预设位置\n else:\n isFind = False\n # 从0到9进行尝试\n for k in range(1, 10):\n if test(i, j, k):\n board[i][j] = str(k)\n isFind = True\n break\n # 本次没有找到,退回非固定上一列,且上一列数字加一,\n if not isFind:\n while i >= 0:\n if [i, j] not in fixed:\n board[i][j] = \".\"\n # 回退一列\n j -= 1\n # 如果回退至第一列,换上一行继续\n if j < 0:\n i -= 1\n j = 8\n # 回退至起点\n if i < 0:\n break\n\n # 已经尝试该位置所有可能性,或者说明是预设位置\n if board[i][j] == '9' or [i, j] in fixed:\n continue\n # 否则该空位值加一,继续探索\n else:\n start = int(board[i][j]) + 1\n isFindBack = False\n for k in range(start, 10):\n if test(i, j, k):\n board[i][j] = str(k)\n isFindBack = True\n break\n if isFindBack:\n break\n j += 1\n i += 1\n print(board)",
"def solveSudoku(self, board: 'List[List[str]]') -> 'None':\n\n select = '.'\n row_set = []\n col_set = []\n arr_set = []\n\n for row in range(9):\n for col in range(9):\n if col == 0:\n row_set.append(set('123456789'))\n if row == 0:\n col_set.append(set('123456789'))\n if row % 3 == 0 and col % 3 == 0:\n arr_set.append(set('123456789'))\n\n if board[row][col].isdigit():\n row_set[row].remove(board[row][col])\n col_set[col].remove(board[row][col])\n arr_index = (row - row % 3) + col // 3\n arr_set[arr_index].remove(board[row][col])",
"def solveSudoku(self, board: List[List[str]]) -> None:\n # initialize the hashmaps\n for row in range(self.size):\n for col in range(self.size):\n value = board[row][col]\n if value != '.':\n self.rows[row].add(value)\n self.cols[col].add(value)\n self.cells[self.cell_idx(row, col)].add(value)\n \n # start backtracking at the first field\n self.backtrack(board, 0)\n return board",
"def grids_from_files(files):\n grids = []\n for filename in files:\n with open(filename) as f:\n filtered_file_data = \"\".join([char for char in f.read() if char in [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \".\"]])\n while len(filtered_file_data) >= 81:\n grids.append(Sudoku(filtered_file_data[:81]))\n filtered_file_data = filtered_file_data[81:]\n return grids",
"def solveSudoku(self, board: List[List[str]]) -> None:\n for i in range(9):\n for j in range(9):\n if board[i][j] == \".\":\n for k in range(1,10):\n if self.check(num,pos) == True:",
"def brute_force(sudoku_grid):\n temp_grid = copy.deepcopy(sudoku_grid)\n for i in range(9):\n for j in range(9):\n if len(temp_grid[i][j]) > 1:\n for item in sudoku_grid[i][j]:\n temp_grid[i][j] = item\n temp_grid = brute_force(temp_grid)\n if validator(temp_grid):\n return temp_grid\n return sudoku_grid",
"def random_puzzle(N=17):\n values = dict((s, digits) for s in squares)\n for s in shuffled(squares):\n if not assign(values, s, random.choice(values[s])):\n break\n ds = [values[s] for s in squares if len(values[s]) == 1]\n if len(ds) >= N and len(set(ds)) >= 8:\n return ''.join(values[s] if len(values[s]) == 1 else '.' for s in squares)\n return random_puzzle(N) ## Give up and make a new puzzle",
"def parse_sudokus():\n # Open the url with the sudokus for the challenge\n data = urllib2.urlopen('https://projecteuler.net/project/resources/p096_sudoku.txt')\n sudokus = [] # List to hold all sudokus\n current_sudoku = None # Current sudoku we are building\n current_sudoku_row = 0 # Current line of the current sudoku we are building\n for line in data:\n # Check if the line is the start of a new sudoku\n result = re.match(r'(Grid \\d\\d)', line.strip())\n if not result is None:\n # New sudoku\n current_sudoku = np.zeros((9,9), dtype=np.int8)\n current_sudoku_row = 0\n # store the new sudoku\n sudokus.append(current_sudoku)\n else:\n # Get the numbers\n result = re.match(r'(\\d{9})', line.strip())\n col_string = result.groups()[0]\n # Fill up sudoku\n for col in xrange(0, 9):\n current_sudoku[current_sudoku_row, col] = int(col_string[col])\n current_sudoku_row += 1\n return sudokus",
"def default_values():\r\n start_lists = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]\r\n start = 0\r\n while start < 2:\r\n inde1 = random.randint(0, 3)\r\n inde2 = random.randint(0, 3)\r\n choice = random.randint(0, 7)\r\n if choice == 0:\r\n if start_lists[inde1][inde2] == 0:\r\n start_lists[inde1][inde2] = 4\r\n else:\r\n if start_lists[inde1][inde2] == 0:\r\n start_lists[inde1][inde2] = 2\r\n start = start + 1\r\n return start_lists"
]
| [
"0.677958",
"0.67431647",
"0.6112093",
"0.6093308",
"0.60833836",
"0.60346204",
"0.6033673",
"0.6002573",
"0.59757864",
"0.5961343",
"0.5908994",
"0.586101",
"0.5819024",
"0.5807154",
"0.5740937",
"0.5713412",
"0.5688407",
"0.56852025",
"0.5670065",
"0.5652325",
"0.5632649",
"0.5621392",
"0.56136376",
"0.5593789",
"0.55836046",
"0.55660415",
"0.5544601",
"0.5537992",
"0.5530892",
"0.55224645"
]
| 0.795898 | 0 |
Returns true if a tile exists, or false if it doesn't | def tile_exists(self, coords):
return not (
coords[0] < 0 or
coords[0] >= 25 or
coords[1] < 0 or
coords[1] >= 40) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tileExists(x, y):\n return _world.get((x, y))",
"def check_tile_availability(self, row, col):\n return self.board[row][col] == 0",
"def canTile(self):\n raise RuntimeError('Not implemented')\n \n return False",
"def check_tile_covers_land(self, tilename=None):\n land_tiles = self.list_tiles_covering_land()\n if self.check_tilename(tilename):\n tilename = self.tilename2short(tilename)\n return tilename in land_tiles",
"def tileOccupied(self, i, j):\n if self.tiles[i][j] == 1 or i == 0 or i == self.size[0] - 1 or j == 0 or j == self.size[1] - 1:\n return True\n for prop in self.props:\n if prop.i == i and prop.j == j:\n return True\n return False",
"def has_persistent_tile(self):\n return self.persist_tile_on_forms and (self.case_tile_template or self.custom_xml)",
"def __isTileInExplored(self, tile):\n for eachTile in self.explored:\n if eachTile.coordinate == tile.coordinate:\n return True\n return False",
"def isTileBlank(tile):\n for b in tile:\n if b: return False\n return True",
"def tile_exists_utm(boundsSrc, boundsTile):\n\n\n boundsSrcBox = box(*boundsSrc)\n boundsTileBox = box(*boundsTile)\n\n return boundsSrcBox.intersects(boundsTileBox)",
"def has_neighbor(self, tile: 'games.saloon.tile.Tile') -> bool:\n return bool(tile and tile in self.get_neighbors())",
"def tile_checker(stage_tiles,\n player_new):\n tile = stage_tiles.get(\"{0},{1}\".format(player_new[0], player_new[1]), \"ocean\")\n # Check each possible terrain\n if tile == \"rock\" or tile == \"mountain\":\n valid = False\n color.write(\"You can't move into a {}!\\n\".format(tile),\"ERROR\")\n else:\n valid = True\n\n return valid",
"def exists(self):\n try:\n self.world.find(self.ehandle)\n except KeyError:\n return False\n else:\n return True",
"def check(self):\n return self.tile==\"\"",
"def exists(self):\n\t\tif self.hasUdim:\n\t\t\treturn len( self.udimPaths ) != 0\n\t\treturn super( textureFile, self ).exists",
"def try_to_use_existing_tile(self, tx, ty, tz):\n for image_format in self.image_formats:\n if os.path.exists(self.get_full_path(tx, ty, tz, format_extension[image_format])):\n return image_format\n return None",
"def valid_tile(self, i, j):\n if (i >= 0 and i < self.rows) and (j >= 0 and j < self.cols):\n return True\n return False",
"async def __is_final_tile(self) -> bool:\n tiles = self.__get_neutral_tiles()\n if len(tiles) == 1:\n player = self.get_current_player()\n await self.announcer.auto_reveal(player)\n await self.flip(tiles[0])\n return True",
"def hasPng(self):\n\t\tif self.isPng:\n\t\t\treturn True\n\t\treturn textureFile( self.path.replace( self.extension, '.png' ) ).exists",
"def any_empty_tiles(self):\n for i in range(self.TILES_PER_ROW):\n for j in range(self.TILES_PER_ROW):\n if self.main_grid_values[i][j] == 0:\n return True\n\n return False",
"def has_picture(self):\n try:\n first = self.picture_planets()[0]\n except IndexError:\n first = None\n\n return first is not None",
"def tile_fits(self, location, tile):\n x, y = location\n CONNECTIONS_TO_CHECK = [\n [(x+1, y), 'east', 'west'],\n [(x-1, y), 'west', 'east'],\n [(x, y+1), 'north', 'south'],\n [(x, y-1), 'south', 'north']\n ]\n\n for neighbor_loc, my_offset, their_offset in CONNECTIONS_TO_CHECK:\n neighbor_tile = self.board.get(neighbor_loc)\n if neighbor_tile and tile.edges._asdict()[my_offset] != neighbor_tile.edges._asdict()[their_offset]:\n return False\n return True",
"def exists(path: str) -> bool:\n tdb_uri = paths.tiledb_uri_from_path(path)\n try:\n tiledb.cloud.array.info(tdb_uri)\n return True\n except tiledb.cloud.TileDBCloudError:\n pass\n return False",
"def has_scn_tilecache(self, unq_id):\n logger.debug(\"Creating Database Engine and Session.\")\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n logger.debug(\"Perform query to find scene.\")\n query_result = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.PID == unq_id).one()\n scn_json = query_result.ExtendedInfo\n ses.close()\n logger.debug(\"Closed the database session.\")\n\n tile_cache_calcd = False\n if scn_json is not None:\n json_parse_helper = eodatadown.eodatadownutils.EDDJSONParseHelper()\n tile_cache_calcd = json_parse_helper.doesPathExist(scn_json, [\"tilecache\"])\n return tile_cache_calcd",
"def exists(self):\n try:\n select_template(self.get_paths())\n return True\n except TemplateDoesNotExist:\n return False",
"def does_exist(self, index):\n if index in self.map:\n return True\n return False",
"def tile_is_set(index, level_map):\n return level_map[index] != -1",
"def board_tiles_availability(self):\n for row in range(GameData.rows):\n for col in range(GameData.columns):\n if self.board[row][col] == 0:\n return False\n # Game is draw, no more moves left!\n return True",
"def test_room_has_tiles(self):\n self.assertGreaterEqual(self.room.tile_set.count(), 2)",
"def isPositionInRoom(self, pos):\n if pos in self.tiles:\n return True\n else:\n return False",
"def entry_exists(title):\n try:\n f = default_storage.open(f\"entries/{title}.md\")\n return True\n\n except FileNotFoundError:\n return False"
]
| [
"0.787533",
"0.72520393",
"0.68646425",
"0.6789231",
"0.67263234",
"0.6716644",
"0.6633904",
"0.6555963",
"0.6532886",
"0.64995193",
"0.64422536",
"0.64104074",
"0.6401476",
"0.63654876",
"0.6352619",
"0.6338171",
"0.627523",
"0.6262617",
"0.62086785",
"0.6158971",
"0.6143745",
"0.6138587",
"0.6128681",
"0.6120576",
"0.6102754",
"0.6076674",
"0.60715127",
"0.60409707",
"0.6037919",
"0.6036213"
]
| 0.742259 | 1 |
Scales a unit's display rectangle to screen coordiantes. | def update_unit_rect(self, unit):
x, y = unit.tile_x, unit.tile_y
screen_x, screen_y = x*SIZE, y*SIZE
unit.rect.x = screen_x
unit.rect.y = screen_y | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def scale(self, sx, sy):\n frameWidth *= sx\n frameHeight *= sy\n repaint()",
"def eval_screen_size():\n center_x = 32 // 2 * app_manager.get_map_width()\n center_y = 32 // 2 * app_manager.get_map_height()\n\n loc1_le = EPD(0x58DC60)\n loc1_te = EPD(0x58DC60 + 4)\n loc1_re = EPD(0x58DC60 + 8)\n loc1_be = EPD(0x58DC60 + 12)\n\n # screen position and location\n loc1_lv = f_dwread_epd(loc1_le)\n loc1_tv = f_dwread_epd(loc1_te)\n loc1_rv = f_dwread_epd(loc1_re)\n loc1_bv = f_dwread_epd(loc1_be)\n prev_sx = f_dwread_epd(EPD(0x0062848C))\n prev_sy = f_dwread_epd(EPD(0x006284A8))\n\n # centerview and update x, y\n SeqCompute([\n (loc1_le, SetTo, center_x),\n (loc1_te, SetTo, center_y),\n (loc1_re, SetTo, center_x),\n (loc1_be, SetTo, center_y)])\n f_dwwrite_epd(loc1_le, center_x)\n f_dwwrite_epd(loc1_te, center_y)\n f_dwwrite_epd(loc1_re, center_x)\n f_dwwrite_epd(loc1_be, center_y)\n DoActions(CenterView(1))\n cur_sx = f_dwread_epd(EPD(0x0062848C))\n cur_sy = f_dwread_epd(EPD(0x006284A8))\n\n # get size\n dx = center_x - cur_sx\n dy = center_y - cur_sy\n\n # restore screen\n screen_x = prev_sx + dx\n screen_y = prev_sy + dy\n SeqCompute([\n (loc1_le, SetTo, screen_x),\n (loc1_te, SetTo, screen_y),\n (loc1_re, SetTo, screen_x),\n (loc1_be, SetTo, screen_y)])\n DoActions(CenterView(1))\n\n # restore location\n SeqCompute([\n (loc1_le, SetTo, loc1_lv),\n (loc1_te, SetTo, loc1_tv),\n (loc1_re, SetTo, loc1_rv),\n (loc1_be, SetTo, loc1_bv)])\n\n EUDReturn([dx*2, dy*2])",
"def zoom_units(self, units, center=None):\n # calculate pixels per unit etc\n unitscm = units\n cmsunit = 1 / float(unitscm)\n pixscm = 28.346457\n pixsunit = pixscm * cmsunit\n unitswidth = self.width / float(pixsunit) # use as the width of the bbox\n unitsheight = self.height / float(pixsunit) # use as the height of the bbox\n # zoom it\n newbbox = bboxhelper.resize_dimensions(self.coordspace_bbox,\n newwidth=unitswidth,\n newheight=unitsheight)\n # center it\n if center:\n newbbox = bboxhelper.center(newbbox, center)\n self.custom_space(*newbbox, lock_ratio=True)",
"def _update_rect(self):\r\n self.rect.x = int(self.view_pt.x_to_scr(self.x) / self.z)\r\n self.rect.y = int(self.view_pt.y_to_scr(self.y) / self.z)",
"def scale_to_display(x, y, w, h):\n return int((x+1)*(w/2)), int((1-y)*(h/2))",
"def resizeToUnit(model, size = 1.0, showBounds = False):\n bounds = model.getBounds()\n center = bounds.getCenter()\n radius = bounds.getRadius() * 1.15 #have to add .15 for scaling 1:1 (why?)\n newsize=1.0 / radius * size\n model.setScale(newsize)\n #model.showBounds()",
"def draw(self, screen, size_block):\n pos = self.board.coordinate_to_position(self.coordinate)\n screen.blit(pygame.transform.scale(self.image, (size_block, size_block)), (pos[0], pos[1]))",
"def set_scales(self):\r\n self.canvas.update()\r\n self.dxmin = self.dmargin\r\n self.dymin = self.dmargin\r\n self.dxmax = self.canvas.winfo_width() - self.dmargin - 1\r\n self.dymax = self.canvas.winfo_height() - self.dmargin - 1\r\n\r\n # Flip the Y coordinates to invert the result.\r\n if self.y_is_flipped:\r\n self.dymin, self.dymax = self.dymax, self.dymin\r\n\r\n self.xscale = (self.dxmax - self.dxmin) / (self.wxmax - self.wxmin)\r\n self.yscale = (self.dymax - self.dymin) / (self.wymax - self.wymin)\r\n\r\n # Calculate 1 pixel in world coordinates.\r\n self.xpix = 1 / self.xscale\r\n self.ypix = 1 / self.yscale",
"def update(self):\n \n self.rect.x += self.change_x\n self.rect.y += self.change_y\n \n if self.rect.x < 0:\n self.rect.x = 0\n if self.rect.x > screen_width - 60:\n self.rect.x = screen_width - 60\n if self.rect.y < 0:\n self.rect.y = 0 \n \n if self.rect.y > screen_height - 60:\n self.rect.y = screen_height - 60",
"def scale(self, scale):\n self.coords = self.coords * scale\n return self",
"def scale(self, scale=1):\n self.x *= scale\n self.y *= scale\n self.width *= scale\n self.height *= scale\n\n # Always update the corners after operation\n self.update_corners()\n return",
"def scale(self, sx, sy):\n self._impl.scale(sx, sy)",
"def scale(self, scale):\n \n scale_matrix = wf.scaleMatrix(scale, self.width/2, self.height/2, 0)\n self.transform(scale_matrix)",
"def ConvertScreenToWorld(self, x, y):\r\n return b2.b2Vec2((x + self.viewOffset.x) / self.viewZoom,\r\n ((self.screenSize.y - y + self.viewOffset.y)\r\n / self.viewZoom))",
"def render(self, app, offset, scale):\n\n if self.alive:\n # make the rectangle call more clear\n x, y = self.position \n x_off, y_off = offset\n # (x, y, width, height)\n pygame.draw.rect(app.screen, (0, 0, 0), ((x-x_off)*scale, (y-y_off)*scale, scale, scale))",
"def verticalScale(self):\n self.model.refreshScreen()",
"def set_ui_scale():\n # TODO test on other OS and resolutions\n moniter_h = QtWidgets.QDesktopWidget().screenGeometry(-1).height()\n if sys.platform == 'win32':\n if moniter_h == 1080:\n scale = 1.0\n elif moniter_h == 1440:\n scale = 1.0\n else:\n scale = 1.0\n elif sys.platform == 'linux':\n if moniter_h == 1080:\n scale = 1.0\n elif moniter_h == 1440:\n scale = 1.23\n else:\n scale = 1.4\n elif sys.platform == 'darwin':\n if moniter_h == 1080:\n scale = 1.0\n elif moniter_h == 1440:\n scale = 1.25\n else:\n scale = 1.55\n return scale",
"def scale(self, up):\n s = 1.1 if up else 0.9\n self.scaling_matrix = np.dot(\n self.scaling_matrix,\n F.scaling([s, s, s])\n )\n\n self.aabb.scale(s)",
"def resize_display(self, (w, h)):\n self.surface = pygame.display.set_mode((w, h), pygame.RESIZABLE)",
"def set_size(self, width, height):\r\n \r\n self.image = pygame.transform.scale(self.image, (width, height))\r\n self.rect = self.image.get_rect()",
"def scale(self, from_min, from_max, to_min, to_max):\n for i in range(len(self.poses)):\n self.poses[i].position.scale(from_min[:3], from_max[:3], to_min[:3], to_max[:3])\n self.wrenches[i].scale(from_min[3:], from_max[3:], to_min[3:], to_max[3:])",
"def draw(self, screen, size_block):\n for co in self.get_all_coordinates():\n pos = self.board.coordinate_to_position(co)\n screen.blit(pygame.transform.scale(self.image, (size_block, size_block)), pos)",
"def scale(self):",
"def set_screen(self, size):\r\n self.screen = size",
"def __init__(self):\n self.size = width, height = pygame.display.Info().current_w, pygame.display.Info().current_h\n self.screen = pygame.display.set_mode(self.size)\n self.x = int((width - 910) / 2)\n self.y = int((height - 675) / 2)",
"def center_mario(self):\n self.rect.midbottom = self.screen_rect.midbottom\n self.x, self.y = float(self.rect.x), float(self.rect.y)",
"def Scale(*args, **kwargs):\n return _gdi_.GraphicsMatrix_Scale(*args, **kwargs)",
"def scale(self, sf):\n self.scale(sf, sf)",
"def scale(self, scale_factor: Union[float, Tuple[float, float]]):\n\n if isinstance(scale_factor, float):\n self.x *= scale_factor\n self.y *= scale_factor\n self.width *= scale_factor\n self.height *= scale_factor\n\n elif isinstance(scale_factor, tuple):\n scale_x, scale_y = scale_factor\n self.x *= scale_x\n self.y *= scale_y\n self.width *= scale_x\n self.height *= scale_y",
"def pixel_space(self):\n self.drawer.settransform()\n self.coordspace_bbox = [0, 0, self.width, self.height]\n self.coordspace_transform = (1, 0, 0,\n 0, 1, 0)"
]
| [
"0.6639929",
"0.65602237",
"0.64782333",
"0.64168495",
"0.63549197",
"0.62980515",
"0.62899816",
"0.6286555",
"0.6229439",
"0.6228933",
"0.6177914",
"0.61489844",
"0.6106066",
"0.6067261",
"0.605976",
"0.6058187",
"0.6015277",
"0.59871423",
"0.5972736",
"0.59648705",
"0.5963883",
"0.5961938",
"0.5937675",
"0.5907062",
"0.5888952",
"0.58798003",
"0.58780056",
"0.58601934",
"0.58575517",
"0.5845367"
]
| 0.7455723 | 0 |
Calculates color difference over all neighboring pixels over all color channels. The dissimilarity measure relies on the premise that adjacent jigsaw pieces in the original image tend to share similar colors along their abutting edges, i.e., the sum (over all neighboring pixels) of squared color differences (over all three color bands) should be minimal. Let pieces pi , pj be represented in normalized Lab space by corresponding W x W x 3 matrices, where W is the height/width of each piece (in pixels). | def dissimilarity_measure0(first_piece, second_piece, orientation="LR"):
rows, columns, _ = first_piece.shape()
color_difference = None
# piece.shape 应该是三维的矩阵 第一维代表行,第二维代表列
# 第三维度如果是彩色图像,则为3 灰度图像和黑白图像为1
# | L | - | R |
if orientation == "LR":
# 如果是左右关系,则取左边的最右一列的三个通道减去右边的最左一列的三个通道
color_difference = first_piece[:rows, columns - 1, :] - second_piece[:rows, 0, :]
# | T |
# |
# | D |
if orientation == "TD":
# 如果是上下关系,则取上边的最下一行的三个通道减去下边的最上一列的三个通道
color_difference = first_piece[rows - 1, :columns, :] - second_piece[0, :columns, :]
# 先归一化,再利用np计算每个通道距离的平方
squared_color_difference = np.power(color_difference / 255.0, 2)
# 每个通道距离平方和相加就是颜色空间距离(没有开平方)
color_difference_per_row = np.sum(squared_color_difference, axis=1)
# 每个像素点的颜色空间距离相加
total_difference = np.sum(color_difference_per_row, axis=0)
# 对结果开方
value = np.sqrt(total_difference)
return value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calc_color_distance(rgb1, rgb2):\n\n color1_rgb = sRGBColor(rgb1[0], rgb1[1], rgb1[2])\n color2_rgb = sRGBColor(rgb2[0], rgb2[1], rgb2[2])\n\n color1_lab = convert_color(color1_rgb, LabColor)\n color2_lab = convert_color(color2_rgb, LabColor)\n\n delta_e = delta_e_cie2000(color1_lab, color2_lab)\n return delta_e",
"def c_q(pic, n_colors=16):\r\n try:\r\n im = np.array(Image.open(pic))[..., :3]\r\n except:\r\n raise Exception('Cannot load the image!')\r\n\r\n # changing the array into 2 dimensional array\r\n x, y, z = im.shape\r\n im_2d = im.reshape(x * y, z)\r\n\r\n # k-means clustering\r\n km_clstr = cluster.KMeans(n_clusters=n_colors)\r\n km_clstr.fit(im_2d)\r\n cluster_labels = km_clstr.labels_\r\n cluster_centers = km_clstr.cluster_centers_\r\n\r\n # creating 2d numpy array\r\n clus = cluster_centers[cluster_labels]\r\n\r\n # creating a list of colors in the array\r\n colors = [x[0] * 65536 + x[1] * 255 + x[0] for x in cluster_centers]\r\n\r\n # reconverting a new array to 3 dimensional\r\n new = clus.reshape(x, y, z)\r\n new2 = copy.deepcopy(new)\r\n # Floyd-Steinberg Dithering\r\n for y in range(len(new) - 1):\r\n for x in range(len(new[y]) - 1):\r\n # old values of pixel\r\n oldr = im[y][x][0]\r\n oldg = im[y][x][1]\r\n oldb = im[y][x][2]\r\n\r\n # getting quantization errors' values\r\n err_r = oldr - new[y][x][0]\r\n err_g = oldg - new[y][x][1]\r\n err_b = oldb - new[y][x][2]\r\n\r\n # changing neighbouring pixels according to algorithm\r\n # right neighbour\r\n test_r1 = im[y][x + 1][0] + err_r * 7 / 16.0\r\n test_g1 = im[y][x + 1][1] + err_g * 7 / 16.0\r\n test_b1 = im[y][x + 1][2] + err_b * 7 / 16.0\r\n test_pixel1 = test_r1 * 65536 + test_g1 * 256 + test_b1\r\n pixel1 = cluster_centers[colors.index(min(colors,\r\n key=lambda col:\r\n abs(col - test_pixel1)))]\r\n new2[y][x + 1][0] = pixel1[0]\r\n new2[y][x + 1][1] = pixel1[1]\r\n new2[y][x + 1][2] = pixel1[2]\r\n\r\n # bottom left-hand corner neighbour\r\n test_r2 = im[y][x + 1][0] + err_r * 3 / 16.0\r\n test_g2 = im[y][x + 1][1] + err_g * 3 / 16.0\r\n test_b2 = im[y][x + 1][2] + err_b * 3 / 16.0\r\n test_pixel2 = test_r2 * 65536 + test_g2 * 256 + test_b2\r\n pixel2 = cluster_centers[colors.index(min(colors,\r\n key=lambda col:\r\n abs(col - test_pixel2)))]\r\n new2[y + 1][x - 1][0] = pixel2[0]\r\n new2[y + 1][x - 1][1] = pixel2[1]\r\n new2[y + 1][x - 1][2] = pixel2[2]\r\n\r\n # bottom neighbour\r\n test_r3 = im[y][x + 1][0] + err_r * 5 / 16.0\r\n test_g3 = im[y][x + 1][1] + err_g * 5 / 16.0\r\n test_b3 = im[y][x + 1][2] + err_b * 5 / 16.0\r\n test_pixel3 = test_r3 * 65536 + test_g3 * 256 + test_b3\r\n pixel3 = cluster_centers[colors.index(min(colors,\r\n key=lambda col:\r\n abs(col - test_pixel3)))]\r\n new2[y + 1][x][0] = pixel3[0]\r\n new2[y + 1][x][1] = pixel3[1]\r\n new2[y + 1][x][2] = pixel3[2]\r\n\r\n # bottom right-hand corner neighbour\r\n test_r4 = im[y][x + 1][0] + err_r * 1 / 16.0\r\n test_g4 = im[y][x + 1][1] + err_g * 1 / 16.0\r\n test_b4 = im[y][x + 1][2] + err_b * 1 / 16.0\r\n test_pixel4 = test_r4 * 65536 + test_g4 * 256 + test_b4\r\n pixel4 = cluster_centers[colors.index(min(colors,\r\n key=lambda col:\r\n abs(col - test_pixel4)))]\r\n new2[y + 1][x + 1][0] = pixel4[0]\r\n new2[y + 1][x + 1][1] = pixel4[1]\r\n new2[y + 1][x + 1][2] = pixel4[2]\r\n\r\n # creating an image from the array\r\n new3 = Image.fromarray(new2.astype('uint8'))\r\n\r\n # saving the image as 'input_name' + '_new.png'\r\n new3.save('%s_new.png' % (str(pic)))",
"def checkQualityOfSection(originalPixels, newPixels):\n totalDistance = 0\n numPixels = 0\n for r in range(originalPixels.shape[0]):\n for c in range(originalPixels.shape[1]):\n if r == 0 or r == originalPixels.shape[0] - 1 or c == 0 or c == originalPixels.shape[1] - 1:\n continue\n totalDistance += colorDistance(originalPixels[r, c], newPixels[r, c])\n numPixels += 1\n return totalDistance/numPixels",
"def component_filter_by_color(components, img):\n new_component = []\n for component in components:\n component_left_neighbor = img[component[0].start:component[0].stop,\n max(component[1].start - 10, 0):component[1].start]\n component_right_neighbor = img[component[0].start:component[0].stop,\n component[1].stop:min(component[1].stop + 10, img.shape[1])]\n component_up_neighbor = img[max(component[0].start - 10, 0):component[0].start,\n component[1].start:component[1].stop]\n component_low_neighbor = img[component[0].stop:min(component[0].stop + 10, img.shape[0]),\n component[1].start:component[1].stop]\n left_white_ratio = np.sum(component_right_neighbor > 240) / (\n component_right_neighbor.shape[0] * component_right_neighbor.shape[1])\n right_white_ratio = np.sum(component_left_neighbor > 240) / (\n component_left_neighbor.shape[0] * component_left_neighbor.shape[1])\n up_white_ratio = np.sum(component_up_neighbor > 240) / (\n component_up_neighbor.shape[0] * component_up_neighbor.shape[1])\n low_white_ratio = np.sum(component_low_neighbor > 240) / (\n component_low_neighbor.shape[0] * component_low_neighbor.shape[1])\n if np.sum([left_white_ratio > 0.9, right_white_ratio > 0.9, up_white_ratio > 0.9, low_white_ratio > 0.9]) > 2:\n new_component.append(component)\n return new_component",
"def color_distance(RGB1, RGB2):\n d2_r = (RGB1[0] - RGB2[0]) ** 2\n d2_g = (RGB1[1] - RGB2[1]) ** 2\n d2_b = (RGB1[2] - RGB2[2]) ** 2\n return d2_r + d2_g + d2_b",
"def get_col_dist(image, hue_angel, saturation_interval_size, value_interval_size):\r\n\timage = rgb_to_hsv(image / 255.0)\r\n\r\n\t##convert the hue channel into angel degrees\r\n\timage[:, :, 0] = image[:, :, 0] * 360\r\n\r\n\t##to quantize the hue channel\r\n\timage[:, :, 0] = np.floor(image[:, :, 0] / hue_angel)\r\n\r\n\t##to quantize the saturation channel\r\n\timage[:, :, 1] = np.floor(image[:, :, 1] * 100 / saturation_interval_size)\r\n\r\n\t##to quantize the value channel\r\n\timage[:, :, 2] = np.floor(image[:, :, 2] * 100/ value_interval_size)\r\n\r\n\r\n\tcolor = [tuple(image[i, j, :]) for i in range(image.shape[0]) for j in range(image.shape[1])]\r\n\r\n\r\n\t## the hsv distributions for images\r\n\tdis = Counter(color)\r\n\r\n\t## normalize the distribution to account for different sizes of images\r\n\tdis = {i:dis[i]/(image.size/3) for i in dis}\r\n\r\n\tdis = dict(sorted(dis.items(), key = lambda x: x[1], reverse = True))\r\n\r\n\tcolor = {(i[0] * hue_angel, i[1], i[2]):dis[i] for i in dis}\r\n\r\n\tdis = np.array(list(dis.values()))\r\n\r\n\treturn dis, color",
"def compute_contrast(aoi):\n glcm = greycomatrix(aoi, distances=[5], angles=[0], levels=256,\n symmetric=True, normed=True)\n return (greycoprops(glcm, 'dissimilarity')[0, 0])",
"def split_necessity(self):\n return max(self._color_var_rel) * self.n_pix\n # return reduce(int.__mul__, (l-u for u,l in self.bounds)) * self.n_pix",
"def spreading_pressure_differences(adsorbed_mole_fractions):\n spreading_pressure_diff = numpy.zeros((n_components - 1, ))\n for i in range(n_components - 1):\n if i == n_components - 2:\n # automatically assert \\sum z_i = 1\n ads_mole_frac2 = 1.0 - numpy.sum(adsorbed_mole_fractions)\n else:\n ads_mole_frac2 = adsorbed_mole_fractions[i + 1]\n\n sp1 = isotherms[i].spreading_pressure_at(\n partial_pressures[i] / adsorbed_mole_fractions[i],\n branch=branch,\n )\n sp2 = isotherms[i + 1].spreading_pressure_at(\n partial_pressures[i + 1] / ads_mole_frac2,\n branch=branch,\n )\n spreading_pressure_diff[i] = sp1 - sp2\n\n return spreading_pressure_diff",
"def compute_colors(frames, ids_of_interest=None, scale_factor=1.0):\n\n distance_sample = ids_of_interest or frames[0].ids.tolist()\n if len(distance_sample) > 1000:\n distance_sample = np.random.choice(distance_sample, size=1000, replace=False).tolist()\n \n # First compute a distance matrix for the IDs for each frame\n outer_jaccard_distances = np.zeros((len(frames), len(frames)))\n inner_jaccard_distances = np.zeros((len(frames), len(frames)))\n for i in range(len(frames)):\n frame_1_neighbors = frames[i].field(Field.NEIGHBORS, distance_sample)\n for j in range(len(frames)):\n frame_2_neighbors = frames[j].field(Field.NEIGHBORS, distance_sample)\n # If the id set is the entire frame, there will be no outer neighbors\n # so we can just leave this at zero\n if ids_of_interest is not None and len(ids_of_interest):\n outer_jaccard_distances[i,j] = np.mean(inverse_intersection(frame_1_neighbors,\n frame_2_neighbors,\n List(distance_sample),\n True))\n inner_jaccard_distances[i,j] = np.mean(inverse_intersection(frame_1_neighbors,\n frame_2_neighbors,\n List(distance_sample),\n False))\n\n if ids_of_interest is not None and len(ids_of_interest):\n if len(ids_of_interest) == 1:\n distances = outer_jaccard_distances\n else:\n distances = 0.5 * (outer_jaccard_distances + inner_jaccard_distances)\n else:\n distances = inner_jaccard_distances\n \n # Compute clusteredness in each frame (only used to determine offset of colors)\n neighbor_dists = [np.log(1 + frame.distances(distance_sample, distance_sample).flatten()) for frame in frames]\n clusteredness = np.array([np.abs(ndists - np.mean(ndists)).mean() / np.maximum(np.max(ndists), 1e-3)\n for ndists in neighbor_dists])\n\n # Compute an ordering using hierarchical clustering\n ordering_indexes = _clustered_ordering(distances)\n # Put the most cluster-y embedding first\n first_index = np.argmax(clusteredness)\n ordering_position = np.argmax(ordering_indexes == first_index)\n ordering_indexes = np.concatenate([ordering_indexes[ordering_position:], ordering_indexes[:ordering_position]]).astype(int)\n\n # Arrange the colors around a color wheel in the L*a*b* color space.\n offset = clusteredness[first_index]\n reduced = _arrange_around_circle(distances, offset, ordering_indexes) #, max_dist=np.array(neighbor_dists).mean())\n\n # Generate colors in L*a*b* space and convert to HSL/HSV\n colors = []\n for point in reduced:\n scaled_point = np.array([point[0] * 100.0 * scale_factor,\n point[1] * 100.0 * scale_factor])\n lab = LabColor(70.0, scaled_point[1], scaled_point[0])\n rgb = convert_color(lab, HSLColor)\n colors.append((int(rgb.hsl_h), int(rgb.hsl_s * 100.0), int(rgb.hsl_l * 100.0)))\n\n return colors",
"def colorDistance(color1, color2):\n intColor1 = np.array(color1, dtype=int)\n intColor2 = np.array(color2, dtype=int)\n return sqrt(2 * (intColor1[0] - intColor2[0])**2 + 4 * (intColor1[1] - intColor2[1])**2 + 3 * (intColor1[2] - intColor2[2])**2)",
"def distance(rgb1, rgb2):\n diffs = np.array(rgb1) - np.array(rgb2)\n return math.sqrt(np.sum(diffs**2))",
"def spreading_pressure_differences(gas_mole_fractions):\n spreading_pressure_diff = numpy.zeros((n_components - 1, ))\n for i in range(n_components - 1):\n if i == n_components - 2:\n # automatically assert \\sum y_i = 1\n gas_mole_fraction_n = 1.0 - numpy.sum(gas_mole_fractions)\n else:\n gas_mole_fraction_n = gas_mole_fractions[i + 1]\n\n sp1 = isotherms[i].spreading_pressure_at(\n total_pressure * gas_mole_fractions[i] / adsorbed_mole_fractions[i],\n branch=branch,\n )\n sp2 = isotherms[i + 1].spreading_pressure_at(\n total_pressure * gas_mole_fraction_n / adsorbed_mole_fractions[i + 1],\n branch=branch\n )\n spreading_pressure_diff[i] = sp1 - sp2\n\n return spreading_pressure_diff",
"def diff_image_color(image_path0, image_path1):\n image0 = Image.open(image_path0)\n #color_image0 = get_histogram(image0)\n color_image0 = image0.histogram()\n cut_color_image0 = cut_histogram_min(color_image0)\n image1 = Image.open(image_path1)\n color_image1 = image1.histogram()\n #color_image1 = get_histogram(image1)\n cut_color_image1 = cut_histogram_min(color_image1)\n color_difference = bhattacharyya(color_image0, color_image1)\n return color_difference",
"def ss(image1, image2, hue_angel = 60, saturation_interval_size = 10, value_interval_size = 10):\r\n\r\n\tassert image1.shape[-1] == 3 and image2.shape[-1] == 3, \"only RGB images are accpted\"\r\n\tassert 1 <= saturation_interval_size <= 100, \"saturation_interval_size recommended to be between 1 and 100\"\r\n\tassert 1 <= value_interval_size <= 100, \"value_interval_size recommended to be between 1 and 100\"\r\n\r\n\tdis1, color1 = get_col_dist(image1, hue_angel, saturation_interval_size, value_interval_size)\r\n\tdis2, color2 = get_col_dist(image2, hue_angel, saturation_interval_size, value_interval_size)\r\n\r\n\t## to make sure the lengths of two distributions are the same\r\n\tif len(dis1) >= len(dis2):\r\n\r\n\t\tdis2 = np.pad(dis2, (0, len(dis1) - len(dis2)), \"constant\")\r\n\telse:\r\n\t\tdis1 = np.pad(dis1, (0, len(dis2) - len(dis1)), \"constant\")\r\n\r\n\t## the distribution difference\r\n\tdis_diff = (np.sum((dis1 - dis2) ** 2) / len(dis1)) ** 0.5\r\n\r\n\t\"\"\"\r\n\thue_diff = get_hue_diff(color1, color2)\r\n\r\n\tsaturation_diff = channel_sqrdiff(color1, color2, 2, 100 / saturation_interval_size)\r\n\r\n\tvalue_diff = channel_sqrdiff(color1, color2, 3, 100 / value_interval_size)\r\n\r\n\tcolor_difference = diff_aggregate(hue_diff, saturation_diff, value_diff,\r\n\t\tweights = (dis1 + dis2) / 2)\r\n\r\n\t\"\"\"\r\n\treturn dis_diff#, color_difference\r",
"def getDiffPercent(path, path2 ):\n global ans\n ans = []\n img = Image.open( path ) \n img2 = Image.open( path2 )\n\n width, height = img.size\n width2, height2 = img2.size\n \n diff = 0\n k = 0\n\n for i in range(width): \n for j in range(height):\n rgb = img.load()[i,j]\n rgb2 = img2.load()[i,j]\n \n if( rgb[0] == rgb2[0] and rgb[1] == rgb2[1] and rgb[2] == rgb2[2] and rgb[0] == 0 and rgb[1] == 0 and rgb[2] == 0 ):\n k = k+1\n if( rgb[0] == rgb2[0] and rgb[1] == rgb2[1] and rgb[2] == rgb2[2] and rgb[0] == 255 and rgb[1] == 255 and rgb[2] == 255 ):\n k = k+1 \n \n diff = diff + pixelDiff(rgb, rgb2)\n\n img.close()\n img2.close()\n \n mx = 3 * 255 * ( width * height - k)\n return 100*diff/mx",
"def contrast_per_pixel(im):\n from numpy import abs # pylint: disable=redefined-builtin\n from .util import get_diff_slices\n im = im.astype(float, copy=False)\n tmp = empty(im.shape)\n total = 0\n for slc_pos, slc_neg in get_diff_slices(im.ndim):\n tmp_x = tmp[slc_neg]\n abs(subtract(im[slc_pos], im[slc_neg], tmp_x), tmp_x)\n total += 2*tmp_x.sum()\n\n # Compute the scale\n # In the original paper this was im.size\n # This value is essentially im.size * (3**nim.dim-1) to account for number of neighbors\n scale = __get_total_neighbors(im.shape)\n return total / scale",
"def find_reddest_pixel(img):\n # HINTS/ADVICE-------------\n # Use a nested for loop here.\n #\n # BE CAREFUL DOING ARITHMETIC WITH UNSIGNED INTEGERS: \n # >>> a = np.array([2], dtype='uint8')\n # >>> b = np.array([3], dtype='uint8')\n # >>> a - b\n # array([255], dtype=uint8)\n #\n # Reminder:\n # numpy arrays have a \"shape\" attribute that stores the layout:\n # img.shape[0] - rows\n # img.shape[1] - columns\n # img.shape[2] - color channels\n\n max_redness = 0\n max_x = 0\n max_y = 0\n \n img = np.array(img, dtype = 'int32')\n for r in range(img.shape[0]):\n for c in range(img.shape[1]):\n red = img[r, c, 2]\n green = img[r, c, 1]\n blue = img[r, c, 0] \n redness = (red - green) + (red - blue)\n\n if redness > max_redness:\n max_redness = redness\n max_x = c\n max_y = r\n \n return (max_x, max_y)",
"def color_distance(color1, color2):\n dist_h = color1[0] - color2[0]\n dist_s = color1[1] - color2[1]\n dist_v = color1[2] - color2[2]\n\n return sqrt(dist_h * dist_h + dist_s * dist_s + dist_v * dist_v)",
"def distance(rgb1: Tuple[int, int, int], rgb2: Tuple[int, int, int]) -> float:\n r = rgb1[0] - rgb2[0]\n g = rgb1[1] - rgb2[1]\n b = rgb1[2] - rgb2[2]\n return math.sqrt(r**2 + g**2 + b**2)",
"def falso_color(img):\n rows,cols = img.shape\n img_red = np.copy(img)\n img_green = np.copy(img)\n img_blue = np.copy(img)\n img_false = np.zeros((rows, cols, 3), dtype=np.uint8)\n\n for i in range(0,rows):\n for j in range(0,cols):\n\n if (0 <= img[i, j] <= 43):\n img_red[i, j] = 255\n img_green[i, j] = img[i, j] * (255 / 43)\n img_blue[i, j] = 0\n\n elif(43 < img[i, j] <= 86):\n img_red[i, j] = (255 - (img[i, j] - 43) * (255 / 43))\n img_green[i, j] = 255\n img_blue[i,j] = 0\n\n elif(86 < img[i, j] <= 128):\n img_red[i, j] = 0\n img_green[i, j] = 255\n img_blue[i, j] = ((img[i, j] - 86) * (255 / 42))\n\n elif(128<img[i, j]<=171):\n img_red[i, j] = 0\n img_green[i, j] = ((171 - img[i, j]) * (255 / 43))\n img_blue[i, j] = 255\n\n elif(171 < img[i, j] <= 214):\n img_red[i, j] = (img[i, j] - 171) * (255 / 43)\n img_green[i, j] = 0\n img_blue[i, j] = 255\n\n elif(214 < img[i, j]):\n img_red[i, j] = 255\n img_green[i, j] = 0\n img_blue[i, j] = ((255 - img[i, j]) * (255 / 41))\n\n img_false[:, :, 0] = img_red\n img_false[:, :, 1] = img_green\n img_false[:, :, 2] = img_blue\n\n return img_false",
"def color_dist(c1, c2):\n return sum((a - b) ** 2 for a, b in zip(to_ycc(c1), to_ycc(c2)))",
"def remove_duplicates_by_matching():\n # 1) and 2)\n all_blobs = vision.pqr_r\n all_angles = np.zeros(0)\n right = np.transpose(vision.pqr_r)\n left = np.transpose(vision.pqr_l)\n\n if not right.size and not left.size:\n return (0, 0)\n\n if not right.size:\n for l in left:\n angle = np.arctan2(l[1], l[0]) * 180 / pi\n all_angles = np.append(all_angles, angle)\n return (vision.pqr_l, all_angles)\n\n if not left.size:\n for r in right:\n angle = np.arctan2(r[1], r[0]) * 180 / pi\n all_angles = np.append(all_angles, angle)\n return (vision.pqr_r, all_angles)\n\n\n for r in right:\n angle = np.arctan2(r[1], r[0]) * 180 / pi\n all_angles = np.append(all_angles, angle)\n cand_r = np.zeros((3,1))\n if angle < 15:\n cand_r = np.append(cand_r, [[r[0]], [r[1]], [r[2]]], axis=1)\n cand_r = np.delete(cand_r, 0, axis=1)\n cand_r = np.transpose(cand_r)\n\n for l in left:\n angle = np.arctan2(l[1], l[0]) * 180 / pi\n dot = 0\n if angle > -15:\n dl = max(0.001, np.linalg.norm(l))\n for r in cand_r:\n dr = max(0.001, np.linalg.norm(r))\n dot = np.dot(r, l) / (dr * dl)\n print(dot)\n if dot > 0.9:\n continue\n \n if dot <= 0.9:\n all_blobs = np.append(all_blobs, [[l[0]], [l[1]], [l[2]]], axis=1)\n all_angles = np.append(all_angles, angle)\n\n # make even number of blobs if necessary\n #if all_blobs.shape[1] % 2:\n # all_blobs = np.delete(all_blobs, 0, axis=1)\n # all_angles = np.delete(all_angles, 0)\n\n\n\n return (all_blobs, all_angles)",
"def compareTo(self,imagefullpath):\n exc = ExtractColor2(self.k)\n bgrcolor = exc.getColorBGR(imagefullpath)\n\n score = 0\n for i in range(self.k):\n score += np.linalg.norm(bgrcolor[i] - self._ref_BGRcolor[i])/(np.sqrt(255*255*3))\n score /= self.k\n return 1 - score",
"def diff_image(images):\n prev_image = cv2.absdiff(images[0], images[1])\n cur_image = cv2.absdiff(images[1], images[2])\n return cv2.bitwise_and(prev_image, cur_image)",
"def fitness(img_1, img_2):\n fitness = 0.0\n for y in range(0, img_1.size[1]):\n for x in range(0, img_1.size[0]):\n r1, g1, b1 = img_1.getpixel((x, y))\n r2, g2, b2 = img_2.getpixel((x, y))\n # get delta per color\n d_r = r1 - r2\n d_b = b1 - b2\n d_g = g1 - g2\n # measure the distance between the colors in 3D space\n pixel_fitness = math.sqrt(d_r * d_r + d_g * d_g + d_b * d_b )\n # add the pixel fitness to the total fitness (lower is better)\n fitness += pixel_fitness\n return fitness",
"def del2(im_input):\n\n # initialize output\n im_lap = np.zeros(im_input.shape)\n\n # process rows first\n D = np.diff(im_input, axis=0)\n S = np.zeros(im_input.shape)\n S[1:-1, :] = (D[1:, :] - D[0:-1, :]) / 2\n if im_input.shape[0] > 3:\n S[0, :] = 2 * S[1, :] - S[2, :]\n S[-1, :] = 2 * S[-2, :] - S[-3, :]\n elif im_input.shape[0] == 3:\n S[0, :] = S[1, :]\n S[-1, :] = S[1, :]\n else:\n S[0, :] = 0\n S[-1, :] = 0\n im_lap += S\n\n # process columns\n D = np.diff(im_input, axis=1)\n S = np.zeros(im_input.shape)\n S[:, 1:-1] = (D[:, 1:] - D[:, 0:-1]) / 2\n if im_input.shape[1] > 3:\n S[:, 0] = 2 * S[:, 1] - S[:, 2]\n S[:, -1] = 2 * S[:, -2] - S[:, -3]\n elif im_input.shape[1] == 3:\n S[0, :] = S[:, 1]\n S[:, -1] = S[:, 1]\n else:\n S[:, 0] = 0\n S[:, -1] = 0\n im_lap += S\n\n return im_lap / 2",
"def whatsgreen2(image):\n green = image.hueDistance(color= Color('green'), minvalue=40).binarize()\n return green",
"def similarity_two_images_color(img1: np.ndarray, img2: np.ndarray) -> np.ndarray:\n hist_image_1 = histogram_of_image_color(img1, HIST_BINS_INTENSITY, BIN_DIFFERENCE_INTENSITY_HALF)\n hist_image_2 = histogram_of_image_color(img2, HIST_BINS_INTENSITY, BIN_DIFFERENCE_INTENSITY_HALF)\n max_difference = max(2 * np.sum(hist_image_1), 2 * np.sum(hist_image_2))\n return 100 - 100 * np.sum(np.absolute(hist_image_1 - hist_image_2)) / max_difference",
"def findYellow(im): #Use the fast version! (findYellowFast(im)) \n #im = Image.open(imageName)\n pix = im.load() #load in pixel array \n #define HSV value ranges for yellow \n #for now just base of Hue - refine for actual yellows seen in field? \n minHue = 50/360.\n maxHue = 61/360.\n width, height = im.size #find the size of the image \n count = 0 #initialize a counter for yellow pixels. \n for i in range(width): \n for j in range(height): \n (r,g,b) = pix[i,j] #pull out the current r,g,b values \n (h,s,v) = rgb_to_hsv(r/255.,g/255.,b/255.) \n if minHue<=h and h<maxHue: \n count += 1 #add one to the count \n totalPix = width*height \n portion = float(count)/totalPix\n #print(portion)\n return portion"
]
| [
"0.59443426",
"0.5933423",
"0.59173316",
"0.58753294",
"0.5859361",
"0.5830542",
"0.5791157",
"0.57149744",
"0.5708682",
"0.5694798",
"0.5677042",
"0.5656942",
"0.5608543",
"0.5569199",
"0.5541749",
"0.554049",
"0.5519073",
"0.54866254",
"0.5473851",
"0.5472822",
"0.5465966",
"0.5460661",
"0.54021287",
"0.5400669",
"0.53678966",
"0.5355158",
"0.5350693",
"0.53417623",
"0.53275067",
"0.5326852"
]
| 0.61831427 | 0 |
Train & evaluate the VAE model | def train_vae(model, epochs: int, optimiser, loader_train: DataLoader, loader_test: DataLoader, beta: float,
device: torch.device, verbose: bool = True, save: bool = True):
# loss lists init
# (train)
total_train_loss = []
reconstruction_train_loss = []
kl_train_loss = []
# (test)
total_test_loss = []
reconstruction_test_loss = []
kl_test_loss = []
model.train() # set mode to training
for epoch in range(epochs):
data = None
train_loss = 0
train_kl_loss = 0
train_rl_loss = 0
for batch_idx, data in enumerate(loader_train):
data, _ = data
data = data.to(device)
optimiser.zero_grad()
reconstr_batch, mu, logvar = model(data) # forward pass
rl, kld, beta = loss_function_vae(reconstr_batch, data, mu, logvar, beta)
loss = rl + beta * kld
loss.backward()
train_loss += loss.item()
train_kl_loss += kld.item()
train_rl_loss += rl.item()
optimiser.step()
if verbose:
if batch_idx % 100 == 0: # print loss every so often
print(f'Epoch: {epoch}, Iteration {batch_idx}, loss = {round(loss.item() / len(data), 4)}')
print()
# get average loss for the epoch
epoch_total_train_loss = train_loss / len(loader_train.dataset)
epoch_kl_train_loss = train_kl_loss / len(loader_train.dataset)
epoch_rl_train_loss = train_rl_loss / len(loader_train.dataset)
# evaluate model on test set at end of epoch
epoch_total_test_loss, epoch_kl_test_loss, epoch_rl_test_loss = evaluate_vae(beta=beta,
model=model,
loader_test=loader_test,
device=device)
# save test losses
total_test_loss.append(epoch_total_test_loss)
reconstruction_test_loss.append(epoch_kl_test_loss)
kl_test_loss.append(epoch_rl_test_loss)
# save train losses
total_train_loss.append(epoch_total_train_loss)
reconstruction_train_loss.append(epoch_kl_train_loss)
kl_train_loss.append(epoch_rl_train_loss)
# save the final model to disk
if save:
if epoch == epochs - 1:
with torch.no_grad():
torch.jit.save(torch.jit.trace(model, (data), check_trace=False),
'saved_models/VAE_model.pth')
return model, total_train_loss, reconstruction_train_loss, kl_train_loss, \
total_test_loss, reconstruction_test_loss, kl_test_loss | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(data_dir: str = './env/data',\n vae_dir: str = './vae/model',\n epochs: int = 20\n ) -> None:\n # set random seed and deterministic backend\n SEED = 123\n np.random.seed(SEED)\n torch.manual_seed(SEED)\n torch.cuda.manual_seed(SEED)\n torch.backends.cudnn.deterministic = True\n\n # use GPU if available\n cuda = torch.cuda.is_available()\n device = torch.device(\"cuda\" if cuda else \"cpu\")\n\n # define input transformations\n transform_train = transforms.Compose([\n transforms.ToPILImage(),\n transforms.Resize((H, W)),\n transforms.RandomHorizontalFlip(p=.5),\n transforms.ToTensor(),\n ])\n\n transform_test = transforms.Compose([\n transforms.ToPILImage(),\n transforms.Resize((H, W)),\n transforms.ToTensor(),\n ])\n\n # define train and test datasets\n dir_train = os.path.join(data_dir, 'train/')\n dir_test = os.path.join(data_dir, 'test/')\n dataset_train = GymDataset(dir_train, transform=transform_train)\n dataset_test = GymDataset(dir_test, transform=transform_test)\n dataset_test.load_batch(0) # 1 batch of data used for test set\n dataloader_test = torch.utils.data.DataLoader(dataset_test,\n batch_size=BATCH_SIZE,\n shuffle=False,\n collate_fn=collate_fn)\n\n # set save and optional load directories\n load_file = os.path.join(vae_dir, 'best.tar')\n try:\n state = torch.load(load_file)\n except FileNotFoundError:\n state = None\n\n # define and load model\n model = VAE(CHANNELS, LATENT_SIZE)\n if state is not None:\n model.load_state_dict(state['state_dict'])\n model.zero_grad()\n model.to(device)\n\n # optimizer\n params = [p for p in model.parameters() if p.requires_grad]\n optimizer = Adam(params, lr=LR, betas=(0.9, 0.999), eps=1e-6, weight_decay=0)\n if state is not None:\n optimizer.load_state_dict(state['optimizer'])\n\n # learning rate scheduling\n lr_scheduler = StepLR(optimizer, step_size=3, gamma=0.1)\n if state is not None:\n lr_scheduler.load_state_dict(state['scheduler'])\n\n # define test fn\n def test():\n \"\"\" One test epoch. \"\"\"\n model.eval()\n test_loss = 0\n with torch.no_grad():\n for obs, _, _ in generate_obs(dataloader_test):\n obs = torch.stack(obs).to(device)\n obs_recon, mu, logsigma = model(obs)\n test_loss += loss_vae(obs_recon, obs, mu, logsigma).item()\n test_loss /= len(dataloader_test.dataset)\n return test_loss\n\n # train\n n_batch_train = len(dataset_train.batch_list)\n optimizer.zero_grad()\n\n cur_best = None\n\n tq_episode = tqdm_notebook(range(epochs))\n for epoch in tq_episode:\n\n model.train()\n loss_train = 0\n n_batch = 0\n\n tq_batch = tqdm_notebook(range(n_batch_train))\n for i in tq_batch: # loop over training data for each epoch\n\n dataset_train.load_batch(i)\n dataloader_train = torch.utils.data.DataLoader(dataset_train,\n batch_size=BATCH_SIZE,\n shuffle=True,\n collate_fn=collate_fn)\n\n tq_minibatch = tqdm_notebook(generate_obs(dataloader_train), total=len(dataloader_train), leave=False)\n for j, (obs, _, _) in enumerate(tq_minibatch):\n\n n_batch += 1\n\n # place on device\n obs = torch.stack(obs).to(device)\n\n # forward pass\n obs_recon, mu, logsigma = model(obs)\n\n # eval loss fn\n loss = loss_vae(obs_recon, obs, mu, logsigma)\n\n # backward pass\n loss.backward()\n\n # store loss value\n loss_train += loss.item()\n loss_train_avg = loss_train / (n_batch * BATCH_SIZE)\n\n # apply gradients and learning rate scheduling with optional gradient accumulation\n if (j + 1) % GRAD_ACCUMULATION_STEPS == 0:\n optimizer.step()\n optimizer.zero_grad()\n\n tq_minibatch.set_postfix(loss_train=loss_train_avg)\n\n tq_batch.set_postfix(loss_train=loss_train_avg)\n\n # learning rate scheduling\n lr_scheduler.step()\n\n # evaluate on test set\n loss_test_avg = test()\n\n # checkpointing\n best_filename = os.path.join(vae_dir, 'best.tar')\n filename = os.path.join(vae_dir, 'checkpoint.tar')\n is_best = not cur_best or loss_test_avg < cur_best\n if is_best:\n cur_best = loss_test_avg\n\n save_checkpoint({\n 'epoch': epoch,\n 'state_dict': model.state_dict(),\n 'precision': loss_test_avg,\n 'optimizer': optimizer.state_dict(),\n 'scheduler': lr_scheduler.state_dict()\n }, is_best, filename, best_filename)\n\n tq_episode.set_postfix(loss_train=loss_train_avg, loss_test=loss_test_avg)",
"def main(\n network_type: NetworkType = Argument(..., help=\"type of the VAE network\"),\n bottleneck_dim: int = Option(\n 16, \"--bottleneck_dim\", \"-n\", help=\"size of the VAE bottleneck\"\n ),\n lr: float = Option(0.001, \"--lr\", \"-r\", help=\"learning rate for training\"),\n batch_size: int = Option(..., \"--batch_size\", \"-b\", help=\"batch size for training\"),\n epochs: int = Option(..., \"--epochs\", \"-e\", help=\"epochs to train\"),\n device: str = Option(\n \"cpu\", \"--device\", \"-d\", help='device to train on, e.g. \"cuda:0\"'\n ),\n logdir: str = Option(\n \"./results\",\n \"--logdir\",\n \"-l\",\n help=\"directory to log the models and event file to\",\n ),\n):\n\n mnist_data = dataset.MyMNIST()\n\n if network_type == NetworkType.mlp:\n net = model.MLPVAE((1, 32, 32), bottleneck_dim)\n else:\n net = model.CNNVAE((1, 32, 32), bottleneck_dim)\n\n optim = torch.optim.Adam(net.parameters(), lr)\n vae_trainer = trainer.Trainer(net, mnist_data, optim, batch_size, device, logdir)\n vae_trainer.train(epochs)",
"def train_vqa(epochs, verbose=0):\r\n # Load Data\r\n with open(DATA_PATH, 'rb') as f:\r\n dataT = pickle.load(f)\r\n data = dataT[:80]\r\n test = dataT[80:100]\r\n with open(DATA_PATH2, 'rb') as f:\r\n dataT2 = pickle.load(f)\r\n test_out = dataT2[:20]\r\n with open(DATA_PATH3, 'rb') as f:\r\n dataT3 = pickle.load(f)\r\n test_out2 = dataT3[:20]\r\n\r\n\r\n # Initialize VQA Core\r\n print ('Initializing VQA Core!')\r\n core = VQAcore()\r\n\r\n # Initialize NPI Model\r\n print ('Initializing NPI Model!')\r\n npi = NPI(core, CONFIG, LOG_PATH, verbose=verbose)\r\n\r\n # Initialize TF Saver\r\n saver = tf.train.Saver()\r\n\r\n # Initialize TF Session\r\n with tf.Session() as sess:\r\n sess.run(tf.global_variables_initializer())\r\n\r\n # Start Training\r\n removed = {}\r\n errors = {}\r\n # for learning curve\r\n count = 0\r\n # tot_loss_def = []\r\n # tot_loss_arg = []\r\n # test_loss_def = []\r\n # test_loss_arg = []\r\n # test1_loss_def = []\r\n # test1_loss_arg = []\r\n # test2_loss_def = []\r\n # test2_loss_arg = []\r\n test_term_acct = []\r\n test_prog_acct = []\r\n test_arg_acct = []\r\n train_term_acct = []\r\n train_prog_acct = []\r\n train_arg_acct = []\r\n test1_term_acct = []\r\n test1_prog_acct = []\r\n test1_arg_acct = []\r\n test2_term_acct = []\r\n test2_prog_acct = []\r\n test2_arg_acct = []\r\n\r\n step = []\r\n for ep in range(1, epochs + 1):\r\n removed[ep] = 0\r\n for i in range(len(data)):\r\n # Reset NPI States\r\n npi.reset_state()\r\n\r\n # Setup Environment\r\n _, imgid, qid, qtype, steps = data[i]\r\n scene = Scene(imgid)\r\n x, y = steps[:-1], steps[1:]\r\n if len(x) == 0 or len(y) == 0:\r\n removed[ep] += 1\r\n continue\r\n count += 1\r\n\r\n # Run through steps, and fit!\r\n step_def_loss, step_arg_loss, term_acc, prog_acc, = 0.0, 0.0, 0.0, 0.0\r\n arg0_acc, arg1_acc, arg2_acc, num_args = 0.0, 0.0, 0.0, 0\r\n for j in range(len(x)):\r\n (prog_name, prog_in_id), arg, term = x[j]\r\n (_, prog_out_id), arg_out, term_out = y[j]\r\n\r\n # Update Environment if MOVE or WRITE\r\n if prog_in_id in EX_PROG_PID:\r\n scene.execute(prog_in_id, arg)\r\n\r\n # Get Environment, Argument Vectors\r\n env_in = [scene.get_env()]\r\n # env_in = [np.asarray(list(env_in.values())).transpose().flatten()]\r\n arg_in, arg_out = [get_args(arg, arg_in=True)], get_args(arg_out, arg_in=False)\r\n prog_in, prog_out = [[prog_in_id]], [prog_out_id]\r\n term_out = [1] if term_out else [0]\r\n\r\n # Fit!\r\n if prog_out_id in PAR_PROG_PID :\r\n loss, t_acc, p_acc, a_acc, _ = sess.run(\r\n [npi.arg_loss, npi.t_metric, npi.p_metric, npi.a_metrics, npi.arg_train_op],\r\n feed_dict={npi.env_in: env_in, npi.arg_in: arg_in, npi.prg_in: prog_in,\r\n npi.y_prog: prog_out, npi.y_term: term_out,\r\n npi.y_args[0]: [arg_out[0]], npi.y_args[1]: [arg_out[1]],\r\n npi.y_args[2]: [arg_out[2]]})\r\n step_arg_loss += loss\r\n term_acc += t_acc\r\n prog_acc += p_acc\r\n arg0_acc += a_acc[0]\r\n arg1_acc += a_acc[1]\r\n arg2_acc += a_acc[2]\r\n num_args += 1\r\n\r\n else:\r\n loss, t_acc, p_acc, _ = sess.run(\r\n [npi.default_loss, npi.t_metric, npi.p_metric, npi.default_train_op],\r\n feed_dict={npi.env_in: env_in, npi.arg_in: arg_in, npi.prg_in: prog_in,\r\n npi.y_prog: prog_out, npi.y_term: term_out})\r\n step_def_loss += loss\r\n term_acc += t_acc\r\n prog_acc += p_acc\r\n\r\n\r\n\r\n try:\r\n print (\"Epoch {0:02d} Step {1:03d} Default Step Loss {2:05f}, \" \\\r\n \"Argument Step Loss {3:05f}, Term: {4:03f}, Prog: {5:03f}, A0: {6:03f}, \" \\\r\n \"A1: {7:03f}, A2: {8:03}\".format(ep, i, step_def_loss / len(x), step_arg_loss / len(x), term_acc / len(x),\r\n prog_acc / len(x), arg0_acc / num_args, arg1_acc / num_args,\r\n arg2_acc / num_args))\r\n if count % 10 == 0:\r\n # Save Model\r\n tmp = stat.mean([arg0_acc / num_args, arg1_acc / num_args, arg2_acc / num_args])\r\n saver.save(sess, 'tasks/vqa/log/model.ckpt')\r\n train_arg_acct.append(tmp/len(x))\r\n train_prog_acct.append(prog_acc / len(x))\r\n train_term_acct.append(term_acc / len(x))\r\n step.append(count)\r\n a , b, c = test_vqa(test, npi, core, sess)\r\n test_arg_acct.append(c)\r\n test_prog_acct.append(b)\r\n test_term_acct.append(a)\r\n a, b, c = test_vqa(test_out, npi, core, sess)\r\n test1_arg_acct.append(c)\r\n test1_prog_acct.append(b)\r\n test1_term_acct.append(a)\r\n a, b, c = test_vqa(test_out2, npi, core, sess)\r\n test2_arg_acct.append(c)\r\n test2_prog_acct.append(b)\r\n test2_term_acct.append(a)\r\n except:\r\n print('main print failed')\r\n\r\n\r\n\r\n # Save Model\r\n saver.save(sess, 'tasks/vqa/log/model.ckpt')\r\n # print learning curve\r\n print('train term,prog,arg: ', test_term_acct[-1], test_prog_acct[-1], test_arg_acct[-1])\r\n print('test_inside term,prog,arg: ', test_term_acct[-1], test_prog_acct[-1], test_arg_acct[-1])\r\n print('test_out term,prog,arg: ', test1_term_acct[-1], test1_prog_acct[-1], test1_arg_acct[-1])\r\n print('test_out2 term,prog,arg: ', test2_term_acct[-1], test2_prog_acct[-1], test2_arg_acct[-1])\r\n\r\n plt.figure(figsize=(20, 5))\r\n plt.plot(step, train_term_acct, 'b', label='train_query_term')\r\n plt.plot(step, test_term_acct, 'm', label='test_query_term')\r\n plt.plot(step, test1_term_acct, 'c', label='test_count_term')\r\n plt.plot(step, test2_term_acct, 'k', label='test_exist_term')\r\n plt.legend()\r\n plt.xticks(step)\r\n plt.xlabel('step')\r\n plt.ylabel('acc')\r\n plt.title('learning curve for termination')\r\n plt.savefig(SAVE_PATH + 'acc_query_term')\r\n plt.close()\r\n plt.figure(figsize=(20, 5))\r\n plt.plot(step, train_prog_acct, 'b', label='train_query_prog')\r\n plt.plot(step, test_prog_acct, 'm', label='test_query_prog')\r\n plt.plot(step, test1_prog_acct, 'c', label='test_count_prog')\r\n plt.plot(step, test2_prog_acct, 'k', label='test_exist_prog')\r\n plt.legend()\r\n plt.xticks(step)\r\n plt.xlabel('step')\r\n plt.ylabel('acc')\r\n plt.title('learning curve for program')\r\n plt.savefig(SAVE_PATH + 'acc_query_prog')\r\n plt.close()\r\n plt.figure(figsize=(20, 5))\r\n plt.plot(step, train_arg_acct, 'b', label='train_query_arg')\r\n plt.plot(step, test_arg_acct, 'm', label='test_query_arg')\r\n plt.plot(step, test1_arg_acct, 'c', label='test_count_arg')\r\n plt.plot(step, test2_arg_acct, 'k', label='test_exist_arg')\r\n plt.legend()\r\n plt.xticks(step)\r\n plt.xlabel('step')\r\n plt.ylabel('acc')\r\n plt.title('learning curve for arguments')\r\n plt.savefig(SAVE_PATH + 'acc_query_arg')\r\n plt.close()\r\n # plt.hold\r\n # print learning curve\r\n # plt.plot(step, test_loss_def, 'r', label='test_inside_loss_def')\r\n # plt.plot(step, test_loss_arg, 'm', label='test_inside_loss_arg')\r\n # plt.legend()\r\n # plt.xticks(step)\r\n # plt.xlabel('step')\r\n # plt.ylabel('loss')\r\n # plt.title('learning curve')\r\n # plt.savefig(SAVE_PATH + 'learning_curve')\r\n # plt.close()\r",
"def train_and_eval(model_dir, model_type, train_steps, train_data, test_data, train_embeddings_file_name, test_embeddings_file_name, positive_labels, combination_method, method):\n \n index_map, weights = wvd.load(train_embeddings_file_name)\n #Get positive labels\n positive_labels = positive_labels.split(',')\n \n print(\"reading data...\")\n train_file_name = train_data \n df_train = pd.read_table(train_file_name, dtype={'node1':str, 'node2':str})\n df_train = df_train.sample(frac=1)\n\n # remove NaN elements\n df_train = df_train.dropna(how='any', axis=0)\n \n df_train[LABEL_COLUMN] = (\n df_train[\"label\"].apply(lambda x: label_func(x, positive_labels))).astype(int)\n\n model_dir = tempfile.mkdtemp() if not model_dir else model_dir\n print(\"model directory = %s\" % model_dir)\n \n train_x, _, train_y, _ = get_input(df_train, weights, index_map, combination_method)\n \n print(\"\\nBuilding model...\")\n m = build_estimator(model_dir, model_type, weights, index_map, combination_method)\n \n print(\"\\nTraining model...\")\n if model_type == \"regressor\":\n m.fit(train_x, train_y, n_epoch=train_steps, show_metric=True, snapshot_epoch=False)\n \n print(\"\\nTesting model...\")\n index_map, weights = wvd.load(test_embeddings_file_name)\n \n print(\"reading data...\")\n test_file_name = test_data\n df_test = pd.read_table(test_file_name, dtype={'node1':str, 'node2':str})\n df_test = df_test.sample(frac=1)\n\n # remove NaN elements\n df_test = df_test.dropna(how='any', axis=0)\n \n df_test[LABEL_COLUMN] = (\n df_test[\"label\"].apply(lambda x: label_func(x, positive_labels))).astype(int)\n \n if model_type == \"regressor\":\n test_x, test_original_y, test_index_y, test_original_x = get_input(df_test, weights, index_map, combination_method, data_purpose='test')\n node_sets = get_node_sets(test_original_x, test_original_y)\n \n print(\"\\nPredicting:\")\n model_predictions = m.predict(test_x)\n model_predictions = list(model_predictions)\n #Covert back to 1 and 0\n predictions = []\n model_predictions_probs = []\n for prediction in model_predictions:\n predictions.append(prediction[1]) #non-thresholded value of positve class\n model_predictions_probs.append(prediction[1])\n \n k = int(len([i for i in test_original_y if i == 1]) * 0.3)\n do_evaluations([x for x in test_original_x], [y for y in test_original_y], [p for p in predictions], k, node_sets, \n positive_labels, model=m, weights=weights, index_map=index_map, combination_method=combination_method)\n #Uncomment to log ranked links\n #log_predictions([x for x in test_original_x], [y for y in test_original_y], [p for p in predictions], k, node_sets, \n # positive_labels, model=m, weights=weights, index_map=index_map, combination_method=combination_method,\n # outfilename=combination_method, method=method)",
"def train_vae(loader,\r\n device,\r\n stats_logger,\r\n lr=1e-3,\r\n schedule_lr=False,\r\n latent_dims=16,\r\n epochs=100,\r\n optimizer_name='adam',\r\n adam_beta1=0.5,\r\n loss_weights=None,\r\n extractor_lr=1e-5,\r\n clip_gradients=None,\r\n encoder_class=vae.Encoder,\r\n decoder_class=vae.Decoder,\r\n schedule_classes=None,\r\n beta_schedule_class=BetaSchedule):\r\n repr_dims = loader.dataset.dims\r\n\r\n encoder = encoder_class(repr_dims, latent_dims)\r\n decoder = decoder_class(repr_dims, latent_dims)\r\n model = vae.VAE(encoder, decoder).to(device)\r\n\r\n if schedule_lr:\r\n # LambdaLR multiplies the initial learning rate with the value\r\n # returned from lambda each epoch. If we want to directly use the\r\n # value returned from lambda as the learning rate, we can set an\r\n # initial learning rate of 1.\r\n initial_lr = lr\r\n lr = 1.0\r\n\r\n parameter_groups = [\r\n {'params': model.parameters(), 'lr': lr},\r\n ]\r\n\r\n if optimizer_name == 'adam':\r\n optimizer = optim.Adam(parameter_groups,\r\n lr=lr,\r\n betas=(adam_beta1, 0.999))\r\n elif optimizer_name == 'radam':\r\n from radam import RAdam\r\n optimizer = RAdam(parameter_groups,\r\n lr=lr,\r\n betas=(adam_beta1, 0.999))\r\n elif optimizer_name == 'rmsprop':\r\n optimizer = optim.RMSprop(parameter_groups,\r\n lr=lr)\r\n else:\r\n raise ValueError(f'Unknown optimizer {optimizer_name}')\r\n\r\n if schedule_lr:\r\n lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer,\r\n LRSchedule(initial_lr))\r\n\r\n if loss_weights is None:\r\n loss_weights = {}\r\n else:\r\n assert isinstance(loss_weights, dict), \\\r\n 'Loss weights must be a dictionary `loss_name -> weight`'\r\n\r\n if schedule_classes is None:\r\n schedule_classes = {}\r\n else:\r\n assert isinstance(schedule_classes, dict), \\\r\n 'schedules_classes must be a dictionary `loss_name -> schedule_class`'\r\n schedule_classes['KLD'] = beta_schedule_class\r\n\r\n loss_schedules = {name: schedule_class()\r\n for name, schedule_class in schedule_classes.items()}\r\n\r\n print('Training VAE on features...')\r\n for epoch in range(1, epochs + 1):\r\n print('Learning rate is {}'.format(optimizer.param_groups[0]['lr']))\r\n for name, schedule in loss_schedules.items():\r\n if name == 'KLD':\r\n # Special case for KLD's weight (beta)\r\n if isinstance(schedule, BetaSchedule):\r\n beta = schedule.get_beta(epoch - 1)\r\n loss_weights['KLD'] = beta\r\n print(f'Beta is {beta}')\r\n else:\r\n loss_weights[name] = schedule.get_beta(epoch - 1)\r\n\r\n if model.reg_loss.use_bayes_factor_vae0_loss:\r\n variances = (1 / model.reg_loss.log_precision.exp()).cpu().detach().numpy()\r\n print(variances[variances > 1])\r\n\r\n start_time = time.time()\r\n epoch_stats = train_epoch(loader,\r\n model,\r\n optimizer,\r\n device,\r\n epoch,\r\n 1,\r\n loss_weights,\r\n stats_logger,\r\n clip_gradients)\r\n end_time = time.time()\r\n print(f'Epoch took {end_time-start_time:.2f} seconds')\r\n stats_logger.append(epoch - 1, epoch_stats)\r\n\r\n if schedule_lr:\r\n lr_scheduler.step()\r\n\r\n return model",
"def training(self):\r\n self.model, self.voc = svm_clf_training('all', self.dataset)\r\n return 0",
"def evaluate_vae(beta, model, loader_test, device):\n model.eval() # switch to evaluation mode\n\n test_loss = 0\n test_kl_loss = 0\n test_rl_loss = 0\n\n for i, data in enumerate(loader_test):\n data, _ = data\n data = data.to(device)\n\n reconstr_batch, mu, logvar = model(data)\n\n test_rl, test_kld, test_beta = loss_function_vae(reconstr_batch, data, mu, logvar, beta)\n\n loss = test_rl + test_beta * test_kld\n\n test_kl_loss += test_kld.item()\n test_rl_loss += test_rl.item()\n test_loss += loss.item()\n\n test_loss /= len(loader_test.dataset)\n test_kl_loss /= len(loader_test.dataset)\n test_rl_loss /= len(loader_test.dataset)\n\n print(f\"====> Test set loss: {test_loss}\")\n\n return test_loss, test_kl_loss, test_rl_loss",
"def train_model(self, X_train_A, X_train_V, X_dev_A, X_dev_V):\n if self.fitted:\n print(\"\\nmodel already trained ---\", self.name)\n self.load_model()\n return \n \n X_train_V, _, _, _ = self.separate_V(X_train_V)\n X_dev_V, _, _, _ = self.separate_V(X_dev_V)\n \n X_train_A = np.vstack((X_train_A, X_dev_A))\n X_train_V = np.vstack((X_train_V, X_dev_V))\n \n if self.noisy:\n X_train_A_noisy = self.add_noise(X_train_A, self.noise)\n X_train_V_noisy = self.add_noise(X_train_V, self.noise)\n else:\n X_train_A_noisy = X_train_A\n X_train_V_noisy = X_train_V\n\n assert X_train_A_noisy.shape == X_train_A.shape\n assert X_train_V_noisy.shape == X_train_V.shape\n\n csv_logger = CSVLogger(os.path.join(self.save_dir, self.name, \"logger.csv\"))\n checkpoint = ModelCheckpoint(os.path.join(self.save_dir, self.name, \"weights-improvement-{epoch:02d}-{loss:.2f}.hdf5\"), monitor='loss', verbose=1, save_best_only=True, mode='min')\n callbacks_list = [csv_logger, checkpoint]\n\n self.autoencoder.fit([X_train_A_noisy, X_train_V_noisy],\n [X_train_A, X_train_V],\n epochs=self.epochs,\n batch_size=self.batch_size,\n shuffle=True,\n callbacks=callbacks_list)\n print(\"\\nmodel trained and saved ---\", self.name)\n self.save_model()",
"def train_and_eva():\n for sol in _solvers:\n for sub_u_rate in _sub_u_rates:\n print(\"now processing \" + sol + \" \" + str(sub_u_rate))\n pu_first_stage_training(sol, sub_u_rate)\n first_stage_test(sol, sub_u_rate)\n print(\"\\n\\n\")",
"def _train_model(self):\n self.experiment = EpisodicExperiment(self.task, self.agent)\n n_epochs = int(self.rl_params.n_training_episodes / self.rl_params.n_episodes_per_epoch)\n logger.debug(\"Fitting user model over {} epochs, each {} episodes, total {} episodes.\"\n .format(n_epochs, self.rl_params.n_episodes_per_epoch, n_epochs*self.rl_params.n_episodes_per_epoch))\n for i in range(n_epochs):\n logger.debug(\"RL epoch {}\".format(i))\n self.experiment.doEpisodes(self.rl_params.n_episodes_per_epoch)\n self.agent.learn()\n self.agent.reset() # reset buffers",
"def train_ev_ea(self):\n # Set data loader.\n data_loader = self.data_loader\n \n noise = torch.FloatTensor(self.batch_size, self.nz_num)\n noise = noise.to(self.device) # noise vector z\n \n start_iters = 0\n\n # Start training.\n print('Start encoder_a and encoder_v training...')\n start_time = time.time()\n \n ev_ea_c_iters = self.ev_ea_c_iters\n c_pre_iters = self.c_pre_iters\n \n C_path = os.path.join(self.model_save_dir, '{}-C.ckpt'.format(ev_ea_c_iters))\n \n encoder_a_path = os.path.join(self.model_save_dir, '{}-encoder_a.ckpt'.format(ev_ea_c_iters))\n \n encoder_v_path = os.path.join(self.model_save_dir, '{}-encoder_v.ckpt'.format(ev_ea_c_iters))\n \n \n if os.path.exists(C_path):\n self.C.load_state_dict(torch.load(C_path, map_location=lambda storage, loc: storage))\n print('Load model checkpoints from {}'.format(C_path))\n \n self.encoder_a.load_state_dict(torch.load(encoder_a_path, map_location=lambda storage, loc: storage))\n print('Load model checkpoints from {}'.format(encoder_a_path))\n \n self.encoder_v.load_state_dict(torch.load(encoder_v_path, map_location=lambda storage, loc: storage))\n print('Load model checkpoints from {}'.format(encoder_v_path))\n else:\n C_pre_path = os.path.join(self.model_save_dir, '{}-C.ckpt'.format(c_pre_iters))\n if os.path.exists(C_pre_path):\n self.C.load_state_dict(torch.load(C_pre_path, map_location=lambda storage, loc: storage))\n print('Load model pretrained checkpoints from {}'.format(C_pre_path))\n else:\n for i in range(0, c_pre_iters):\n # Fetch real images, attributes and labels.\n x_real, wrong_images, attributes, _, label_org = data_loader.train.next_batch(self.batch_size,10)\n\n\n x_real = x_real.to(self.device) # Input images.\n attributes = attributes.to(self.device) # Input attributes\n label_org = label_org.to(self.device) # Labels for computing classification loss.\n \n ev_x = self.encoder_v(x_real)\n cls_x = self.C(ev_x.detach())\n # Classification loss from only images for C training\n c_loss_cls = self.classification_loss(cls_x, label_org) \n # Backward and optimize.\n self.c_optimizer.zero_grad()\n c_loss_cls.backward()\n self.c_optimizer.step()\n \n if (i+1) % self.log_step == 0:\n loss = {}\n loss['c_loss_cls'] = c_loss_cls.item()\n prec1, prec5 = accuracy(cls_x.data, label_org.data, topk=(1, 5))\n loss['prec1'] = prec1\n loss['prec5'] = prec5\n log = \"C pretraining iteration [{}/{}]\".format(i+1, c_pre_iters)\n for tag, value in loss.items():\n log += \", {}: {:.4f}\".format(tag, value)\n print(log)\n torch.save(self.C.state_dict(), C_pre_path)\n print('Saved model pretrained checkpoints into {}...'.format(C_pre_path))\n \n for i in range(c_pre_iters, ev_ea_c_iters):\n # Fetch real images, attributes and labels.\n x_real, wrong_images, attributes, _, label_org = data_loader.train.next_batch(self.batch_size,10)\n\n\n x_real = x_real.to(self.device) # Input images.\n attributes = attributes.to(self.device) # Input attributes\n label_org = label_org.to(self.device) # Labels for computing classification loss.\n \n\n # =================================================================================== #\n # Train the domain-specific features discriminator \n # =================================================================================== #\n \n noise.normal_(0, 1)\n # Compute embedding of both images and attributes\n ea_a = self.encoder_a(attributes, noise)\n ev_x = self.encoder_v(x_real)\n \n \n ev_x_real = self.D_s(ev_x, attributes)\n ds_loss_real = -torch.mean(ev_x_real)\n \n \n ea_a_fake = self.D_s(ea_a, attributes)\n ds_loss_fake = torch.mean(ea_a_fake)\n \n # Compute loss for gradient penalty.\n alpha = torch.rand(ev_x.size(0), 1).to(self.device)\n ebd_hat = (alpha * ev_x.data + (1 - alpha) * ea_a.data).requires_grad_(True)\n \n ebd_inter = self.D_s(ebd_hat, attributes)\n ds_loss_gp = self.gradient_penalty(ebd_inter, ebd_hat)\n \n ds_loss = ds_loss_real + ds_loss_fake + self.lambda_gp * ds_loss_gp #+ ds_loss_realw\n #self.reset_grad_eb()\n self.ea_optimizer.zero_grad()\n self.ds_optimizer.zero_grad()\n self.ev_optimizer.zero_grad()\n\n ds_loss.backward()\n self.ds_optimizer.step()\n if (i+1) % self.n_critic == 0:\n # =================================================================================== #\n # Train the encoder_a and C \n # =================================================================================== #\n ev_x = self.encoder_v(x_real)\n ev_x_real = self.D_s(ev_x, attributes)\n ev_loss_real = torch.mean(ev_x_real)\n \n cls_x = self.C(ev_x)\n c_loss_cls = self.classification_loss(cls_x, label_org)\n\n # Backward and optimize.\n ev_c_loss = ev_loss_real + c_loss_cls\n self.ea_optimizer.zero_grad()\n self.ds_optimizer.zero_grad()\n self.ev_optimizer.zero_grad()\n ev_c_loss.backward()\n self.ev_optimizer.step()\n \n # =================================================================================== #\n # Train the encoder_v #\n # =================================================================================== #\n noise.normal_(0, 1)\n ea_a = self.encoder_a(attributes,noise)\n ea_a_fake = self.D_s(ea_a, attributes)\n ea_loss_fake = -torch.mean(ea_a_fake)\n \n cls_a = self.C(ea_a)\n ebn_loss_cls = self.classification_loss(cls_a, label_org)\n \n\n # Backward and optimize.\n ea_loss = ea_loss_fake + ebn_loss_cls\n self.ea_optimizer.zero_grad()\n self.ds_optimizer.zero_grad()\n self.ev_optimizer.zero_grad()\n ea_loss.backward()\n self.ea_optimizer.step()\n \n # Logging.\n loss = {}\n \n loss['ds/ds_loss_real'] = ds_loss_real.item()\n loss['ds/ds_loss_fake'] = ds_loss_fake.item()\n loss['ds/ds_loss_gp'] = ds_loss_gp.item()\n \n # Print out training information.\n if (i+1) % self.log_step == 0:\n et = time.time() - start_time\n et = str(datetime.timedelta(seconds=et))[:-7]\n prec1, prec5 = accuracy(cls_x.data, label_org.data, topk=(1, 5))\n loss['prec1'] = prec1\n loss['prec5'] = prec5\n prec1e, prec5e = accuracy(cls_a.data, label_org.data, topk=(1, 5))\n loss['prec1e'] = prec1e\n loss['prec5e'] = prec5e\n log = \"Encoder_a and Encoder_v Training Elapsed [{}], Iteration [{}/{}]\".format(et, i+1, ev_ea_c_iters)\n for tag, value in loss.items():\n log += \", {}: {:.4f}\".format(tag, value)\n print(log)\n\n \n # Save model checkpoints.\n if (i+1) % self.model_save_step == 0:\n C_path = os.path.join(self.model_save_dir, '{}-C.ckpt'.format(i+1))\n torch.save(self.C.state_dict(), C_path)\n print('Saved model checkpoints into {}...'.format(C_path))\n \n encoder_a_path = os.path.join(self.model_save_dir, '{}-encoder_a.ckpt'.format(i+1))\n torch.save(self.encoder_a.state_dict(), encoder_a_path)\n print('Saved model checkpoints into {}...'.format(encoder_a_path))\n \n encoder_v_path = os.path.join(self.model_save_dir, '{}-encoder_v.ckpt'.format(i+1))\n torch.save(self.encoder_v.state_dict(), encoder_v_path)\n print('Saved model checkpoints into {}...'.format(encoder_v_path))",
"def train_and_eval(self):\n self.__create_indexes()\n model = None\n model = None\n if self.model == 'OMult':\n model = OMult(self.kwargs)\n elif self.model == 'ConvO':\n model = ConvO(self.kwargs)\n elif self.model == 'QMult':\n model = QMult(self.kwargs)\n elif self.model == 'ConvQ':\n model = ConvQ(self.kwargs)\n elif self.model == 'OMultBatch':\n model = OMultBatch(self.kwargs)\n elif self.model == 'ConvOBatch':\n model = ConvOBatch(self.kwargs)\n elif self.model == 'QMultBatch':\n model = QMultBatch(self.kwargs)\n elif self.model == 'ConvQBatch':\n model = ConvQBatch(self.kwargs)\n else:\n print(self.model, ' is not valid name')\n raise ValueError\n\n self.train(model)\n self.eval(model)",
"def main():\n data = load_data()\n analyze_features(data['full_features'])\n model = train(data)\n\n with open('model.pickle', 'wb') as f:\n pickle.dump(model, f)\n evaluate(model, data)",
"def train():\n # YOUR TRAINING CODE GOES HERE",
"def _train_and_evaluate(estimator, output_dir):\n \n \"\"\"X_train, y_train =utils._feature_label_split(df_train,\"is_churn\",\"msno\")\n df_val = utils.read_from_bigquery(\"amiable-octane-267022.kkbox.output_val_1\",\"amiable-octane-267022\")\n X_val, y_val =utils._feature_label_split(df_val,\"is_churn\",\"msno\")\"\"\"\n \n df_train=utils.over_sample(\"amiable-octane-267022.kkbox.output_train_1\",\"amiable-octane-267022\")\n X_train, y_train =utils._feature_label_split(df_train,\"is_churn\",\"msno\")\n df_val=utils.over_sample(\"amiable-octane-267022.kkbox.output_val_1\",\"amiable-octane-267022\")\n X_val, y_val =utils._feature_label_split(df_val,\"is_churn\",\"msno\")\n\n estimator.fit(X_train, y_train)\n f1_scorer = make_scorer(f1_score)\n accuracy_scorer =make_scorer(accuracy_score)\n\n if metadata.HYPERPARAMTER_TUNING:\n scores=model_selection.cross_val_score(estimator, X_val, y_val, cv=3,scoring=f1_scorer)\n #,scoring=f1_scorer\n\n logging.info('Score: %s', scores)\n\n #tune hyper\n hpt = hypertune.HyperTune()\n hpt.report_hyperparameter_tuning_metric(\n hyperparameter_metric_tag='F1_SCORE',\n metric_value=np.mean(scores),\n global_step=10000)\n \n#joblib.dump(estimator, 'model.joblib')\n\n # Write model and eval metrics to `output_dir`\n model_output_path = os.path.join(output_dir, 'model',metadata.MODEL_FILE_NAME)\n \n utils.dump_object(estimator, model_output_path)",
"def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)",
"def evaluate():\n log.info('Loading dev data...')\n if args.version_2:\n dev_data = SQuAD('dev', version='2.0')\n else:\n dev_data = SQuAD('dev', version='1.1')\n (_, _), (data_file_name, _) \\\n = dev_data._data_file[dev_data._version][dev_data._segment]\n dev_data_path = os.path.join(dev_data._root, data_file_name)\n\n if args.debug:\n sampled_data = [dev_data[0], dev_data[1], dev_data[2]]\n dev_data = mx.gluon.data.SimpleDataset(sampled_data)\n log.info('Number of records in dev data: %d', len(dev_data))\n\n dev_data_features = preprocess_dataset(\n tokenizer, dev_data, vocab=vocab, max_seq_length=args.max_seq_length,\n doc_stride=args.doc_stride, num_workers=args.num_workers,\n max_query_length=args.max_query_length, load_from_pickle=args.load_pickle,\n feature_file=args.dev_dataset_file)\n\n dev_data_input = convert_full_features_to_input_features(dev_data_features)\n log.info('The number of examples after preprocessing: %d', len(dev_data_input))\n\n dev_dataloader = mx.gluon.data.DataLoader(dev_data_input, batchify_fn=batchify_fn,\n num_workers=4, batch_size=args.test_batch_size,\n shuffle=False, last_batch='keep')\n\n log.info('start prediction')\n\n all_results = collections.defaultdict(list)\n\n epoch_tic = time.time()\n total_num = 0\n for (batch_id, data) in enumerate(dev_dataloader):\n data_list = list(split_and_load(data, ctx))\n for splited_data in data_list:\n example_ids, inputs, token_types, valid_length, p_mask, _, _, _ = splited_data\n total_num += len(inputs)\n outputs = net_eval(inputs, token_types, valid_length, p_mask=p_mask)\n example_ids = example_ids.asnumpy().tolist()\n for c, example_ids in enumerate(example_ids):\n result = RawResultExtended(start_top_log_probs=outputs[0][c].asnumpy().tolist(),\n start_top_index=outputs[1][c].asnumpy().tolist(),\n end_top_log_probs=outputs[2][c].asnumpy().tolist(),\n end_top_index=outputs[3][c].asnumpy().tolist(),\n cls_logits=outputs[4][c].asnumpy().tolist())\n all_results[example_ids].append(result)\n if batch_id % args.log_interval == 0:\n log.info('Batch: %d/%d', batch_id + 1, len(dev_dataloader))\n\n epoch_toc = time.time()\n log.info('Time cost=%2f s, Thoughput=%.2f samples/s', epoch_toc - epoch_tic,\n total_num / (epoch_toc - epoch_tic))\n\n log.info('Get prediction results...')\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n scores_diff_json = collections.OrderedDict()\n for features in dev_data_features:\n results = all_results[features[0].example_id]\n example_qas_id = features[0].qas_id\n score_diff, best_non_null_entry, nbest_json = predict_extended(\n features=features, results=results, n_best_size=args.n_best_size,\n max_answer_length=args.max_answer_length, start_n_top=args.start_top_n,\n end_n_top=args.end_top_n)\n scores_diff_json[example_qas_id] = score_diff\n all_predictions[example_qas_id] = best_non_null_entry\n all_nbest_json[example_qas_id] = nbest_json\n\n output_prediction_file = os.path.join(args.output_dir, 'predictions.json')\n output_nbest_file = os.path.join(args.output_dir, 'nbest_predictions.json')\n output_null_log_odds_file = os.path.join(args.output_dir, 'null_odds.json')\n\n with open(output_prediction_file, 'w') as writer:\n writer.write(json.dumps(all_predictions, indent=4) + '\\n')\n with open(output_nbest_file, 'w') as writer:\n writer.write(json.dumps(all_nbest_json, indent=4) + '\\n')\n with open(output_null_log_odds_file, 'w') as writer:\n writer.write(json.dumps(scores_diff_json, indent=4) + '\\n')\n\n if os.path.exists(sys.path[0] + '/evaluate-v2.0.py'):\n arguments = [\n dev_data_path, output_prediction_file, '--na-prob-thresh',\n str(args.null_score_diff_threshold)\n ]\n if args.version_2:\n arguments += ['--na-prob-file', output_null_log_odds_file]\n subprocess.call([sys.executable, sys.path[0] + '/evaluate-v2.0.py'] + arguments)\n else:\n log.info('Please download evaluate-v2.0.py to get evaluation results for SQuAD. '\n 'Check index.rst for the detail.')",
"def eval(self):\n self.train(mode=False)",
"def evaluate(args):\n dataset_param_filepath = os.path.join(args.model, 'dataset.params')\n dataset_params = putils.load_params(dataset_param_filepath)\n left_vocab_filepath = os.path.join(args.model, 'left.vocab')\n left_vocab = Vocab(vocab_filepath=left_vocab_filepath)\n right_vocab_filepath = os.path.join(args.model, 'right.vocab')\n right_vocab = Vocab(vocab_filepath=right_vocab_filepath)\n model_params_filepath = os.path.join(args.model, 'model.params')\n model_params = putils.load_params(model_params_filepath)\n checkpoint_filepath = os.path.join(args.model, 'checkpoint.tar')\n if not torch.cuda.is_available() and model_params['cuda']:\n logger.info('Loading a GPU-trained model on CPU')\n checkpoint = torch.load(checkpoint_filepath,\n map_location=const.DEVICE)\n elif torch.cuda.is_available() and model_params['cuda']:\n logger.info('Loading a GPU-trained model on GPU')\n checkpoint = torch.load(checkpoint_filepath)\n elif torch.cuda.is_available() and not model_params['cuda']:\n logger.info('Loading a CPU-trained model on GPU')\n checkpoint = torch.load(checkpoint_filepath,\n map_location='cuda:0')\n else:\n logger.info('Loading a CPU-trained model on CPU')\n checkpoint = torch.load(checkpoint_filepath)\n if checkpoint['encoder']['model_type'] == 'transformer':\n encoder = TEncoder(input_size=checkpoint['encoder']['input_size'],\n hidden_size=checkpoint['encoder']['hidden_size'],\n num_layers=checkpoint['encoder']['num_layers'],\n dropout=checkpoint['encoder']['dropout'],\n num_attention_heads=checkpoint['encoder']['num_attention_heads'])\n else:\n encoder = Encoder(model_type=checkpoint['encoder']['model_type'],\n input_size=checkpoint['encoder']['input_size'],\n hidden_size=checkpoint['encoder']['hidden_size'],\n num_layers=checkpoint['encoder']['num_layers'],\n nonlinearity=checkpoint['encoder']['nonlinearity'],\n bias=checkpoint['encoder']['bias'],\n dropout=checkpoint['encoder']['dropout'],\n bidirectional=checkpoint['encoder']['bidirectional'])\n if checkpoint['decoder']['model_type'] == 'transformer':\n decoder = TDecoder(hidden_size=checkpoint['decoder']['hidden_size'],\n output_size=checkpoint['decoder']['output_size'],\n num_layers=checkpoint['decoder']['num_layers'],\n dropout=checkpoint['decoder']['dropout'],\n num_attention_heads=checkpoint['decoder']['num_attention_heads'])\n elif checkpoint['decoder']['with_attention']:\n decoder = Attention(hidden_size=checkpoint['decoder']['hidden_size'],\n output_size=checkpoint['decoder']['output_size'],\n max_seq_len=dataset_params['max_seq_len'],\n num_layers=checkpoint['decoder']['num_layers'],\n nonlinearity=checkpoint['decoder']['nonlinearity'],\n bias=checkpoint['decoder']['bias'],\n dropout=checkpoint['decoder']['dropout'])\n else:\n decoder = Decoder(model_type=checkpoint['decoder']['model_type'],\n hidden_size=checkpoint['decoder']['hidden_size'],\n output_size=checkpoint['decoder']['output_size'],\n num_layers=checkpoint['decoder']['num_layers'],\n nonlinearity=checkpoint['decoder']['nonlinearity'],\n bias=checkpoint['decoder']['bias'],\n dropout=checkpoint['decoder']['dropout'])\n encoder.load_state_dict(checkpoint['encoder_state_dict'])\n decoder.load_state_dict(checkpoint['decoder_state_dict'])\n if torch.cuda.is_available():\n encoder.to(const.DEVICE)\n decoder.to(const.DEVICE)\n encoder.eval()\n decoder.eval()\n pairs = putils.convert_to_seq_pairs(args.data)\n indexed_pairs = putils.index_pairs(pairs, left_vocab.char2idx,\n right_vocab.char2idx)\n if dataset_params['reverse']:\n indexed_pairs = [(y, x) for x, y in indexed_pairs]\n source_vocab = right_vocab\n target_vocab = left_vocab\n else:\n source_vocab = left_vocab\n target_vocab = right_vocab\n if args.random > 0:\n random.shuffle(indexed_pairs)\n for seq_num in range(args.random):\n seq = indexed_pairs[seq_num]\n print('-'*80)\n input_str = ' '.join(\n ''.join([source_vocab.idx2char[idx] for idx in seq[0] if idx\n not in [const.SOS_IDX, const.EOS_IDX]])\n .split(const.SEP))\n gold_str = ' '.join(\n ''.join([target_vocab.idx2char[idx] for idx in seq[1] if idx\n not in [const.SOS_IDX, const.EOS_IDX]])\n .split(const.SEP))\n predicted_idxx = decode(seq[0], args.itemize, encoder, decoder,\n dataset_params['max_seq_len'])\n pred_str = ' '.join(\n ''.join([target_vocab.idx2char[idx] for idx in predicted_idxx\n if idx not in [const.SOS_IDX, const.EOS_IDX]])\n .split(const.SEP))\n print('>', input_str)\n print('=', gold_str)\n print('<', pred_str)\n else:\n _evaluate(indexed_pairs, args.itemize, encoder, decoder,\n target_vocab.idx2char, dataset_params['max_seq_len'])",
"def evaluate(args):\n dataset_param_filepath = os.path.join(args.model, 'dataset.params')\n dataset_params = putils.load_params(dataset_param_filepath)\n source_vocab_filepath = os.path.join(args.model, 'source.vocab')\n source_vocab = Vocab(vocab_filepath=source_vocab_filepath)\n target_vocab_filepath = os.path.join(args.model, 'target.vocab')\n target_vocab = Vocab(vocab_filepath=target_vocab_filepath)\n model_params_filepath = os.path.join(args.model, 'model.params')\n model_params = putils.load_params(model_params_filepath)\n checkpoint_filepath = os.path.join(args.model, 'checkpoint.tar')\n if not torch.cuda.is_available() and model_params['cuda']:\n logger.info('Loading a GPU-trained model on CPU')\n checkpoint = torch.load(checkpoint_filepath,\n map_location=const.DEVICE)\n elif torch.cuda.is_available() and model_params['cuda']:\n logger.info('Loading a GPU-trained model on GPU')\n checkpoint = torch.load(checkpoint_filepath)\n elif torch.cuda.is_available() and not model_params['cuda']:\n logger.info('Loading a CPU-trained model on GPU')\n checkpoint = torch.load(checkpoint_filepath,\n map_location='cuda:0')\n else:\n logger.info('Loading a CPU-trained model on CPU')\n checkpoint = torch.load(checkpoint_filepath)\n encoder = Encoder(model_type=checkpoint['encoder']['model_type'],\n input_size=checkpoint['encoder']['input_size'],\n hidden_size=checkpoint['encoder']['hidden_size'],\n num_layers=checkpoint['encoder']['num_layers'],\n nonlinearity=checkpoint['encoder']['nonlinearity'],\n bias=checkpoint['encoder']['bias'],\n dropout=checkpoint['encoder']['dropout'],\n bidirectional=checkpoint['encoder']['bidirectional'])\n if checkpoint['with_attention']:\n decoder = Attention(model_type=checkpoint['decoder']['model_type'],\n hidden_size=checkpoint['decoder']['hidden_size'],\n output_size=checkpoint['decoder']['output_size'],\n max_seq_len=dataset_params['max_seq_len'],\n num_layers=checkpoint['decoder']['num_layers'],\n nonlinearity=checkpoint['decoder']['nonlinearity'],\n bias=checkpoint['decoder']['bias'],\n dropout=checkpoint['decoder']['dropout'],\n bidirectional=checkpoint['decoder']['bidirectional'])\n else:\n decoder = Decoder(model_type=checkpoint['decoder']['model_type'],\n hidden_size=checkpoint['decoder']['hidden_size'],\n output_size=checkpoint['decoder']['output_size'],\n num_layers=checkpoint['decoder']['num_layers'],\n nonlinearity=checkpoint['decoder']['nonlinearity'],\n bias=checkpoint['decoder']['bias'],\n dropout=checkpoint['decoder']['dropout'],\n bidirectional=checkpoint['decoder']['bidirectional'])\n encoder.load_state_dict(checkpoint['encoder_state_dict'])\n decoder.load_state_dict(checkpoint['decoder_state_dict'])\n if torch.cuda.is_available():\n encoder.to(const.DEVICE)\n decoder.to(const.DEVICE)\n encoder.eval()\n decoder.eval()\n indexes = putils.index_dataset(\n args.data, source_vocab.item2idx, target_vocab.item2idx,\n dataset_params['is_character_based'], dataset_params['max_seq_len'],\n dataset_params['is_reversed'])\n if args.random > 0:\n random.shuffle(indexes)\n for seq_num in range(args.random):\n seq = indexes[seq_num]\n print('-'*80)\n print('>', ' '.join([source_vocab.idx2item[idx]\n for idx in seq[0]]))\n print('=', ' '.join([target_vocab.idx2item[idx]\n for idx in seq[1]]))\n # TODO: add support for OOV\n predicted_idx, _ = _decode(seq[0], encoder, decoder,\n checkpoint['with_attention'],\n dataset_params['max_seq_len'])\n print('<', ' '.join([target_vocab.idx2item[idx]\n for idx in predicted_idx]))\n else:\n _evaluate(indexes, encoder, decoder, target_vocab, checkpoint,\n dataset_params)",
"def training(self) -> None:\n self.compile_model()\n self.train_epoch()\n self.agent.save()",
"def train():\n pass",
"def train(self):\n p = self._params\n if self.train_data != None:\n tens_to_log = self.params.tens_to_log\n logging_hook = tf.train.LoggingTensorHook(tensors = tens_to_log,\n every_n_iter = p.logging_step,\n )\n t_fn = tf.estimator.inputs.numpy_input_fn(x = {\"x\": self.train_data[\"x\"]},\n y = self.train_data[\"y\"],\n batch_size = p.batch_size,\n num_epochs = None,\n shuffle = True,\n )\n self._model.train(input_fn = t_fn,\n steps = self.params.training_steps,\n hooks = [logging_hook],\n )\n \n if self.eval_data != None:\n e_fn = tf.estimator.inputs.numpy_input_fn(x = {\"x\": self.eval_data[\"x\"]},\n y = self.eval_data[\"y\"],\n num_epochs = 1,\n shuffle = False,\n )\n eval_results = self.model.evaluate(input_fn = e_fn,\n checkpoint_path = self.model_dir,\n )\n print(eval_results)",
"def train(self):\n \n for kernel_name, kernel in self.kernel_dict.items():\n if self.verbose: print('Training with {:s} kernel'.format(kernel_name))\n model = BnpQedModel(self.x, self.y, kernel, self.labelFunc, \n self.labelLUT, self.mode, self.design)\n model.train(num_restarts=self.num_restarts, b=self.b) \n if self.verbose:\n print('Log Bayes factor in favor of discontinuity = {:0.2f}'.format(model.summary(b=self.b)['logbayesfactor']))\n print('Evidence: M_C = {:0.3f}, M_D = {:0.3f}'.format(model.summary(b=self.b)['evidence']['mc'], \n model.summary(b=self.b)['evidence']['md']))\n print('Posterior model probabilities: p(M_C|D) = {:0.3f}, p(M_D|D) = {:0.3f}'.format(model.summary(b=self.b)['pmp']['pmc'], \n model.summary(b=self.b)['pmp']['pmd']))\n print('') \n self.results[kernel_name] = model \n self.trained = True \n return self.results",
"def train(self, session, train_examples, dev_examples, train_dir):\n\n # some free code to print out number of parameters in your model\n # it's always good to check!\n # you will also want to save your model parameters in train_dir\n # so that you can use your trained model to make predictions, or\n # even continue training\n\n tic = time.time()\n params = tf.trainable_variables()\n num_params = sum(map(lambda t: np.prod(tf.shape(t.value()).eval()), params))\n toc = time.time()\n logging.info(\"Number of params: %d (retreival took %f secs)\" % (num_params, toc - tic))\n\n if self.summary_flag:\n self.train_writer = tf.summary.FileWriter(self.summaries_dir + '/train', session.graph)\n\n logging.info(\"Train Loss File: {}\".format(self.train_loss_log))\n logging.info(\"Dev Loss File: {}\".format(self.dev_loss_log))\n best_score = 100000\n train_log = open(self.train_loss_log, \"w\")\n dev_log = open(self.dev_loss_log, \"w\")\n for epoch in range(self.n_epoch):\n print(\"Epoch {:} out of {:}\".format(epoch + 1, self.n_epoch))\n dev_score = self.run_epoch(session, train_examples, dev_examples, epoch, train_log)\n dev_log.write(\"{},{}\\n\".format(epoch + 1, dev_score))\n logging.info(\"Average Dev Cost: {}\".format(dev_score))\n logging.info(\"train F1 & EM\")\n f1, em = self.evaluate_answer(session, train_examples, self.rev_vocab, log = True)\n logging.info(\"Dev F1 & EM\")\n f1, em = self.evaluate_answer(session, dev_examples, self.rev_vocab, log = True)\n if dev_score < best_score:\n best_score = dev_score\n print(\"New best dev score! Saving model in {}\".format(train_dir + \"/\" + self.model_name))\n self.saver.save(session, train_dir + \"/\" + self.model_name)\n\n return best_score",
"def fit(self, plot_loss=True):\n adam = optim.Adam(self.learning_rate)\n self.svi = SVI(\n self.vae_model, \n self.vae_guide, \n adam, \n Trace_ELBO()\n )\n # encoder_nn = self.vae_encoder()\n # decoder_nn = self.vae_decoder()\n rng_key, rng_key_samp, rng_key_init = random.split(self.rng_key, 3)\n\n self.gp_predictive = Predictive(self.gp.sample, num_samples=self.batch_size)\n\n # initialise with a sample batch\n sample_batch = self.gp_predictive(rng_key=rng_key_samp, x=self.x)\n \n svi_state = self.svi.init(rng_key_init, sample_batch['y'])\n test_loss_list = []\n\n for i in range(self.num_epochs):\n rng_key, rng_key_train, rng_key_test = random.split(rng_key, 3)\n t_start = time.time()\n\n _, svi_state = self.epoch_train(rng_key_train, svi_state)\n test_loss = self.eval_test(rng_key_test, svi_state)\n test_loss_list += [test_loss]\n\n print(\n \"Epoch {}: loss = {} ({:.2f} s.)\".format(\n i, test_loss, time.time() - t_start\n )\n )\n \n if np.isnan(test_loss): break\n\n if plot_loss:\n plt.figure()\n plt.plot(np.arange(0, self.num_epochs, 1)[0:len(test_loss_list)], test_loss_list)\n plt.xlabel(\"epochs\")\n plt.ylabel(\"test error\")\n plt.savefig('src/test/plots/vae_lost.png')\n plt.show()\n plt.close()\n\n # return optimal parameters for decoder\n return self.svi.get_params(svi_state)[\"decoder$params\"]",
"def _build_and_compile_vae(self):\n z_dimension = int(self.n_dims//2)+1\n\n self.encoder, inputs, z_mean, z_var = self._encoder(z_dimension, self.mcd)\n\n self.decoder, outputs = self._decoder(inputs, z_dimension, self.mcd)\n\n self.model = Model(inputs, outputs, name=\"VAE IMPUTER\")\n\n loss = self.vae_loss(self.n_dims, z_mean, z_var)\n\n self.model.compile(optimizer=self.optimizer, loss=loss)",
"def train_and_evaluate(model, train_data, val_data, optimizer, scheduler, params, model_dir, restore_dir=None):\n # reload weights from restore_dir if specified\n if restore_dir is not None:\n model = BertForSequenceTagging.from_pretrained(tagger_model_dir)\n \n best_val_f1 = 0.0\n patience_counter = 0\n\n for epoch in range(1, params.epoch_num + 1):\n # Run one epoch\n logging.info(\"Epoch {}/{}\".format(epoch, params.epoch_num))\n\n # Compute number of batches in one epoch\n params.train_steps = params.train_size // params.batch_size\n params.val_steps = params.val_size // params.batch_size\n\n # data iterator for training\n train_data_iterator = data_loader.data_iterator(train_data, shuffle=True)\n\n # Train for one epoch on training set\n train_epoch(model, train_data_iterator, optimizer, scheduler, params)\n\n # data iterator for evaluation\n # train_data_iterator = data_loader.data_iterator(train_data, shuffle=False)\n val_data_iterator = data_loader.data_iterator(val_data, shuffle=False)\n\n # Evaluate for one epoch on training set and validation set\n # params.eval_steps = params.train_steps\n # train_metrics = evaluate(model, train_data_iterator, params, mark='Train') # callback train f1\n params.eval_steps = params.val_steps\n val_metrics = evaluate(model, val_data_iterator, params, mark='Val')\n \n val_f1 = val_metrics['f1']\n improve_f1 = val_f1 - best_val_f1\n if improve_f1 > 1e-5: \n logging.info(\"- Found new best F1\")\n best_val_f1 = val_f1\n model.save_pretrained(model_dir)\n if improve_f1 < params.patience:\n patience_counter += 1\n else:\n patience_counter = 0\n else:\n patience_counter += 1\n\n # Early stopping and logging best f1\n if (patience_counter >= params.patience_num and epoch > params.min_epoch_num) or epoch == params.epoch_num:\n logging.info(\"Best val f1: {:05.2f}\".format(best_val_f1))\n break",
"def test_training(self):\n warnings.filterwarnings('ignore')\n example_args = example_args_parser()\n example_args.unittest = True\n # prepare data\n example_args.stage = 'prepare'\n example_wrapper(example_args)\n # train goalDNN model\n example_args.stage = 'train'\n example_args.model = 'goalDNN'\n example_wrapper(example_args)\n # train cVAE model\n example_args.model = 'cVAE'\n example_wrapper(example_args)\n # train gcVAE model\n example_args.model = 'gcVAE'\n example_wrapper(example_args)\n # cVAE harmonization\n example_args.stage = 'predict'\n example_args.model = 'cVAE'\n example_wrapper(example_args)\n # gcVAE harmonization\n example_args.model = 'gcVAE'\n example_wrapper(example_args)\n # goalDNN prediction\n example_args.model = 'goalDNN'\n example_wrapper(example_args)\n # XGBoost\n example_args.stage = 'train'\n example_args.model = 'XGBoost'\n example_wrapper(example_args)\n # compare with reference results\n check_args = check_results_args_parser()\n check_args.unittest = True\n check_reference_results(check_args)",
"def run_offenseval_task_a(training_data, test_data):\n #grid_search_svm(training_data, test_data)\n compare_classifiers(classifiers(), training_data, test_data, dev_stage=True)\n #compare_classifiers(classifiers(), training_data, test_data, dev_stage=False)"
]
| [
"0.71673036",
"0.7005656",
"0.68949646",
"0.6782185",
"0.6759987",
"0.666773",
"0.6665484",
"0.6631297",
"0.6623886",
"0.66078615",
"0.65956473",
"0.65770334",
"0.6567851",
"0.6563507",
"0.6548026",
"0.6535641",
"0.64974064",
"0.64903086",
"0.6463899",
"0.64631844",
"0.6426341",
"0.6415232",
"0.6412658",
"0.63972384",
"0.6382966",
"0.63604975",
"0.63602954",
"0.63528174",
"0.6349629",
"0.63389856"
]
| 0.72611 | 0 |
Get design by it's id | def get_design_id(design_id: str):
design = storage.get_design(UUID(design_id))
if design is None:
abort(400)
return design.serialize() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_designs(self):",
"def get_dessert_by_id(dessert_id: int):\n return get_data_by_id(\"Desserts\", dessert_id)",
"def get_object(id):",
"def get_med_by_id(self):\n return \"SELECT * FROM medic WHERE id = %s\"",
"def get(self, id):\n return Matstamm.find_by_id(id)",
"def getDmd(self):\n for obj in aq_chain(self):\n if getattr(obj, 'id', None) == 'dmd': return obj",
"def design(self):\n return self[self.design_columns]",
"def design(self):\n return self[self.design_columns]",
"def get(self, _id):",
"def get_by_id(self, id: str) -> \"Dataset\":\n raise NotImplementedError",
"def getbyid(self, id):\n\n return esd.retrieve(id)",
"def get(self, id):\n if id == 'body':\n return document.body\n else:\n return self.instances[id]",
"def get(self, id):\n return Freigabe.find_by_id(id)",
"def get(id: str) -> DataSet:\n pass",
"def get_by_id(self, id):\n return Entry.all().filter('entry_id = ', id).get()",
"def find_by_id(self, id_):\n return self.by_id.get(id_)",
"def get_technique_by_id(self, id_code):\r\n return self.fs.query([\r\n Filter('type', '=', 'attack-pattern'),\r\n Filter('external_references.external_id', '=', id_code)\r\n ])",
"def findItem(self, id):\n itemFound = None\n for curItem in self.scene.items():\n if not isinstance(curItem, DiagramItem):\n continue \n if curItem.itemId == int(id):\n itemFound = curItem\n break\n return itemFound",
"def get_software_by_id(self, id_code):\r\n malware_return = self.fs.query([\r\n Filter('type', '=', 'malware'),\r\n Filter('external_references.external_id', '=', id_code)\r\n ])\r\n\r\n tool_return = self.fs.query([\r\n Filter('type', '=', 'tool'),\r\n Filter('external_references.external_id', '=', id_code)\r\n ])\r\n\r\n if malware_return:\r\n return malware_return\r\n elif tool_return:\r\n return tool_return",
"def get_by_id(self, id: int):\n\n\t\traise NotImplemented",
"def get_physics_object_from_id(self, id):\n for p in self.physics_objects:\n if p.canvas_id == id:\n return p",
"def get_object(self, id_):\n return self._objects.get(id_, None)",
"def get_by_id(dataobj_id):\n results = list(get_data_dir().rglob(f\"{dataobj_id}-*.md\"))\n return results[0] if results else None",
"def get_definition(self, labware_id: str) -> LabwareDefinition:\n return self.get_definition_by_uri(\n LabwareUri(self.get(labware_id).definitionUri)\n )",
"def getCard(self,id):\n if not self.cardExists(id):\n return None\n return self.cards[id]",
"def via_dom_id(self, dom_id):\n try:\n return DOM.from_json([d for d in self._json\n if d[\"DOMId\"] == dom_id][0])\n except IndexError:\n log.critical(\"No DOM found for DOM ID '{0}'\".format(dom_id))",
"def find(id):\n return QueryBuilder(Card).find(id)",
"def read_design(designfile):\r\n designtype = None\r\n if re.search(r\"\\.adm$\", designfile, flags=re.I) is not None:\r\n designtype = XMLDesign\r\n elif re.search(r\"\\.xml$\", designfile, flags=re.I) is not None:\r\n designtype = XMLDesign\r\n elif re.search(r\"\\.json$\", designfile, flags=re.I) is not None:\r\n designtype = JSONDesign\r\n design = designtype(designfile)\r\n return design",
"def get_review(self, id_):\n cursor = self._connection.cursor()\n select_command = make_select_command(\"reviews\")\n select_command += \" WHERE id_ = ?\"\n cursor.execute(select_command, (id_,))\n for row in cursor:\n return expandable_from_tuple(row, FIELD_DESCRIPTIONS) \n return None",
"def get_title_by_id(id):\n\n # your code"
]
| [
"0.63311213",
"0.6309447",
"0.6089819",
"0.6040134",
"0.60139215",
"0.59313655",
"0.5839858",
"0.5839858",
"0.5813057",
"0.57999706",
"0.5788469",
"0.57393354",
"0.57166123",
"0.56829333",
"0.5643689",
"0.5634991",
"0.5632631",
"0.56263745",
"0.55991787",
"0.5594078",
"0.55876327",
"0.5566793",
"0.5552548",
"0.5551086",
"0.55125254",
"0.5486028",
"0.54770017",
"0.54491657",
"0.54482454",
"0.5436457"
]
| 0.7091185 | 0 |
Get locus of design by id | def get_locus_of_design_id(design_id):
existing_design = storage.get_design(UUID(design_id))
if existing_design is None:
abort(400)
result = execution.possible_positions(existing_design.robot, 40)
output = list(map(lambda x:
{'position': {'x': x['position'][0], 'z': x['position'][1], 'y': x['position'][2]},
'configuration': x['configuration']}
, result))
return jsonify(output) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_location_by_id(self, location_id):",
"def get_location(self, labware_id: str) -> LabwareLocation:\n return self.get(labware_id).location",
"def locate(self):\n return utils.get_object(\"crds\", self.observatory, \"locate\")",
"def get_by_id(cls, name):\n\t\treturn super(Locality, cls).get_by_id(cls.normalized_name(name))",
"def get_location_from_id(id):\n tree = ET.parse('./devset_topics.xml')\n root = tree.getroot()\n for item in root.findall('./topic'):\n if id == item[0].text:\n return item[1].text",
"def get_locus_by_taxon(id): # noqa: E501\n return 'do some magic!'",
"def get_software_by_id(self, id_code):\r\n malware_return = self.fs.query([\r\n Filter('type', '=', 'malware'),\r\n Filter('external_references.external_id', '=', id_code)\r\n ])\r\n\r\n tool_return = self.fs.query([\r\n Filter('type', '=', 'tool'),\r\n Filter('external_references.external_id', '=', id_code)\r\n ])\r\n\r\n if malware_return:\r\n return malware_return\r\n elif tool_return:\r\n return tool_return",
"def get_city(self, territory_id: str = \"\"):",
"def get_city(self, territory_id: str = \"\"):",
"def findLocationById(cls, id):\r\n return cls.query.filter_by(id=id).first()",
"def get_designs(self):",
"def getbyid(self, id):\n\n return esd.retrieve(id)",
"def get_locations(self, id_):\n with self._db_connection() as connection:\n return connection.get_locations(id_)",
"def getElementById(self, id) :\n\t\tif id in self.lid.keys() :\n\t\t\treturn self.lid[id]\n\t\telse :\n\t\t\treturn None",
"def find_location_by_id(self, id):\n for location in self.locations:\n if location.id == id:\n yield location",
"def get_locus_by_qtl(id): # noqa: E501\n return 'do some magic!'",
"def get_object(id):",
"def find_by_id(self, id_):\n return self.by_id.get(id_)",
"def get(self, oid: str, elasticube: str = None) -> Resource:\n for hierarchy in self.all(elasticube):\n if hierarchy._id == oid:\n return hierarchy\n\n return None",
"def get(self, _id):",
"def station_by_id(self, id):\n for station in self.stations:\n if id == station.stationid:\n return station\n\n print(str.format(\"Could not find station with '{0}'\",str(id)))",
"def lookup(self, siteId):\n # If it is an individual site, it will be in the object's dictionary\n siteId = self.siteIdWarn(siteId)\n if hasattr(self, siteId):\n site = getattr(self, siteId)\n if isinstance(site, IconSite):\n return site\n return None\n # If it is a series site, split the name up in to the series name and index\n # and return the site by-index from the series\n seriesName, seriesIndex = splitSeriesSiteId(siteId)\n if seriesName is None:\n return None\n series = getattr(self, seriesName, None)\n if not isinstance(series, IconSiteSeries) or seriesIndex >= len(series):\n return None\n return series[seriesIndex]",
"def resolve_from_local_lookup_table(self, id: str) -> GeoLocation:\n return self.local_lookup(id)",
"def get_entity_by_id(self, id):\n # url = '{}/ngsi-ld/v1/entities?type={}&offset={}&limit={}'.format(self.url, type, offset, limit)\n url = '{}/ngsi-ld/v1/entities/{}'.format(self.url, id)\n r = requests.get(url, headers=self.headers_ld)\n return r.json()",
"def get_locations_by_ids(self, id_list):",
"def get_study_info(self,std_id):\n raise NotImplementedError",
"def get_design_id(design_id: str):\n design = storage.get_design(UUID(design_id))\n if design is None:\n abort(400)\n return design.serialize()",
"def info(self, id):",
"def get_location(self, cp):\n LOCATION_URL = \"https://api-seloger.svc.groupe-seloger.com/api/v1/locations/search\"\n \n LOCATION_PAYLOAD = {\n \"latitude\": 0.0,\n \"limit\": 50,\n \"locationTypes\": 30,\n \"longitude\": 0.0,\n \"radius\": 0,\n \"searchTerm\": cp,\n \"type\": 0\n }\n \n r = requests.post(LOCATION_URL, data=json.dumps(LOCATION_PAYLOAD), headers=self.headers)\n return r.json()[0]['id']",
"def find_cluster(self, id):\n raise NotImplementedError"
]
| [
"0.6050606",
"0.5750205",
"0.56887347",
"0.5628116",
"0.56142986",
"0.55571204",
"0.548368",
"0.54005086",
"0.54005086",
"0.5387359",
"0.5373012",
"0.5343095",
"0.5323943",
"0.5312559",
"0.52997965",
"0.52954257",
"0.52212054",
"0.52206117",
"0.5204883",
"0.51911783",
"0.51818967",
"0.51782125",
"0.51693237",
"0.5142024",
"0.51379275",
"0.51211685",
"0.5108607",
"0.50988287",
"0.50949734",
"0.50943476"
]
| 0.664265 | 0 |
judge element is exist,The return result is true or false. | def is_element_exist(self, locator):
t1 = time.time()
try:
self.driver.find_element(locator)
self.my_print("{0} Element: <{1}> is exist, Spend {2} seconds".format(success,locator, time.time() - t1))
return True
except TimeoutException:
self.my_print("{0} Element: <{1}> is not exist, Spend {2} seconds".format(fail, locator, time.time() - t1))
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def exist(self,list,a):\r\n\t\ti = 0\r\n\t\tfor elem in list:\r\n\t\t\tif (elem == a):\r\n\t\t\t\ti=i+1\r\n\t\tif (i>0):\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\treturn False",
"def isExist(data):\n return True/False",
"def is_exist(self):\n try:\n if self.pathType == 'ID':\n self.driver.find_element_by_id(self.pathValue)\n return True\n if self.pathType == 'XPATH':\n self.driver.find_elements_by_xpath(self.pathValue)\n return True\n if self.pathType == 'CLASSNAME':\n self.driver.find_element_by_class_name(self.pathValue)\n return True\n if self.pathType == 'NAME':\n self.driver.find_element_by_name(self.pathValue)\n return True\n except NoSuchElementException:\n return False",
"def _is_element_present():\r\n return self.q(css=element_selector).present",
"def contains(self, element) -> bool:\n\n return self.__find_node(element) is not None",
"def exist(self):",
"def isElementPresent(self,locator=\"\",locatorType='id', element=None):\n\n\n\n\n try:\n if locator:\n element = self.getElement(locator, locatorType)\n\n if element is not None:\n self.logger.info(\"Element found with locator \"+locator+\" LocatorType \"+locatorType)\n return True\n\n else:\n self.logger.info(\"Element not found with locator \" + locator + \" LocatorType \" + locatorType)\n return False\n\n except:\n print(\"Element not found\")\n return False",
"def exist(x):\n return x is not None",
"def est_present(element, t):\r\n for elt in t:\r\n if elt == element:\r\n return True\r\n return False",
"def elementIsPresent(self, element_tuple):\n try:\n self.CORE.find_element(*self.format_element(element_tuple))\n result = True\n except SeleniumExceptions.NoSuchElementException:\n result = False\n self.log_info(f\"Browser.elementIsPresent: {element_tuple} is {'' if result else 'not '}present\")\n return result",
"def _is_element_present():\r\n is_present = self.q(css=selector).present\r\n return is_present, is_present",
"def contains(self, element):\n pass",
"def exista(self, item):\n if item not in self._items:\n return False\n for x in self._items:\n if x == item:\n return True",
"def exists(self):\n\n return self.ids[-1] is not None",
"def exists(self, selector):\n return not self.main_frame.findFirstElement(selector).isNull()\n\n\n #TODO: Still not work.",
"def checkElementInArray(element,array):\n\t\n\texists = False\n\t\n\tfor i in array:\n\t\n\t\tif i == element:\n\t\t\texists = True\n\n\treturn exists",
"def exists(self) -> bool:\n try:\n result = self.get()\n except KeyError:\n return False\n return True",
"def does_element_exist(driver, selectors):\n try:\n driver.find_element_by_css_selector(selectors)\n except NoSuchElementException:\n return False\n return True",
"def does_exist(self, index):\n if index in self.map:\n return True\n return False",
"def GetElementIfIndexExists(self, arg0: 'unsigned long long', arg1: 'itkQuadEdgeMeshPointF3GQEULLULLBBT') -> \"bool\":\n return _itkQuadEdgeCellTraitsInfoPython.itkMapContainerULLQEMPF3GQEULLULLBBT_GetElementIfIndexExists(self, arg0, arg1)",
"def is_element_present(self, web_element, timeout):\n try:\n WebDriverWait(self.web_driver, timeout).until(web_element)\n return True\n except TimeoutException:\n return False",
"def exists(self):\n return True",
"def exists(self):\n return True",
"def GetElementIfIndexExists(self, arg0: 'unsigned long long', arg1: 'itkQuadEdgeMeshPointF2GQEULLULLBBT') -> \"bool\":\n return _itkQuadEdgeCellTraitsInfoPython.itkMapContainerULLQEMPF2GQEULLULLBBT_GetElementIfIndexExists(self, arg0, arg1)",
"def exist_test(y, d):\n\tif y in d: \n\t\treturn True\n\telse: \n\t\td.add(y)\n\t\treturn False",
"def check_exists_by_xpath(self, xpath):\n try:\n self.driver.find_element_by_xpath(xpath)\n except NoSuchElementException:\n return False\n return True",
"def __contains__(self, elem):\n return elem in list(self)",
"def exists(self):\n return bool(self.get())",
"def has(self, tag, index):\n return self.get(tag, index) is not None",
"def wait_element(self, wait_time):\n time.sleep(wait_time)\n if self.is_exist():\n return True\n else:\n return False"
]
| [
"0.7326738",
"0.725836",
"0.7174447",
"0.71559846",
"0.6963649",
"0.6924973",
"0.68580633",
"0.6807948",
"0.678384",
"0.6759503",
"0.6753746",
"0.67457867",
"0.67321026",
"0.6707753",
"0.6610276",
"0.65590334",
"0.6483656",
"0.646101",
"0.6455775",
"0.6402263",
"0.63802975",
"0.6380118",
"0.6380118",
"0.6334145",
"0.6323046",
"0.62951887",
"0.6288487",
"0.62615025",
"0.62482333",
"0.623472"
]
| 0.7406134 | 0 |
Implicitly wait.All elements on the page. | def wait(self, secs):
t1 = time.time()
self.driver.implicitly_wait(secs)
self.my_print("{0} Set wait all element display in {1} seconds, Spend {2} seconds".format(success,
secs,time.time() - t1)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def wait(self):\n try:\n confirm_modal_dialog = EC.presence_of_all_elements_located((By.CLASS_NAME, 'btn-default'))\n WebDriverWait(self.web_element, 2).until(confirm_modal_dialog)\n except TimeoutException:\n confirm_ajs_dialog = EC.presence_of_all_elements_located((By.CLASS_NAME, 'ajs-cancel'))\n WebDriverWait(self.web_element, 2).until(confirm_ajs_dialog)",
"def web_elements(self):\n if isinstance(self._selector, tuple):\n return self._driver.find_elements(*self._selector)",
"def get_elements(self, selector):\n self._wait_element_localed(self.driver, selector)\n elements = self.driver.find_elements(*self._selector_to_by(selector))\n\n return elements",
"def find_elements(self, elements_locator: Tuple[By, str], wait_time=10, skip_exception=False) -> List[WebElement]:\n try:\n return WebDriverWait(self.driver, wait_time).until(EC.presence_of_all_elements_located(elements_locator),\n message=f\"Can't find elements with {elements_locator}\")\n except TimeoutException as err:\n if not skip_exception:\n print(f\"Elements was not found in {wait_time} seconds\")\n raise err\n return []",
"def wait_for_page_load(self):\n pass",
"def find_elements(self, elements: List[WebElement]) -> List[WebElement]:\n return elements",
"def elements(xpath_selection):\n driver = Driver().connect()\n return driver.find_elements_by_xpath(xpath_selection)",
"def wait_for_page_to_fully_load(self, **kwargs):\n ref_element_locator = kwargs.get('ref_element_locator', self.locators.DATATABLE_TABLE_ROWS)\n timeout = kwargs.get('timeout', 30)\n self.wait_for_elements(ref_element_locator)",
"def _find_elements(locator, timeout=1, type = By.XPATH):\n try:\n logger.debug(\"Looking for elements with locator [%s]\"%(locator))\n return WebDriverWait(driver, timeout).until(EC.visibility_of_all_elements_located((type, locator)))\n except TimeoutException:\n logger.warning(f\"No elements with locator {locator} were visible within {timeout} seconds\")\n return None",
"def load_elements(self):\n for path in self.element_paths:\n self.process_path(path)",
"def wait_until_elements_find(self, locator_type, locator):\n self.wait.until(EC.presence_of_element_located((locator_type, locator)))\n return self.driver.find_elements(by=locator_type, value=locator)",
"def look_for_elements(self, selector, locator=By.CSS_SELECTOR, timeout=5):\n try:\n logger.debug(\"Look for elements using '{}' locator and '{}' selector\".format(locator, selector))\n WebDriverWait(self.driver, timeout).until(EC.visibility_of_all_elements_located((locator, selector)))\n return self.driver.find_elements(locator, selector)\n except TimeoutException:\n logger.exception(\"Element '{}' is not appeared in {} seconds\".format(selector, timeout))\n allure.attach(body=self.driver.get_screenshot_as_png(),\n attachment_type=allure.attachment_type.PNG)\n allure.attach.file(source=PATH_TO_LOGS + \"chrome_logs.log\",\n attachment_type=allure.attachment_type.TEXT)\n raise AssertionError(\"Element '{}' is not appeared in {} seconds\".format(selector, timeout))",
"def wait_for_page_load(self, timeout=30):\n old_page = self.driver.find_element_by_tag_name('html')\n yield\n WebDriverWait(self.driver, timeout).until(\n staleness_of(old_page)\n )",
"def init_page_elements(self):\n pass",
"def _wait_for_search_results(self):\n try:\n #Search results are positive\n WebDriverWait(self.driver, self.search_module_wait_time).until(EC.visibility_of_all_elements_located(self.PRODUCTS_IMG))\n except:\n #Search results are negative\n WebDriverWait(self.driver, self.search_module_wait_time).until(EC.visibility_of_element_located(self.EMPTY_ALERT))",
"def wait_until_loading_is_complete(self, locator=None):\n locator = lex_locators[\"body\"] if locator is None else locator\n try:\n self.selenium.wait_until_page_contains_element(locator)\n self.wait_for_aura()\n # this knowledge article recommends waiting a second. I don't\n # like it, but it seems to help. We should do a wait instead,\n # but I can't figure out what to wait on.\n # https://help.salesforce.com/articleView?id=000352057&language=en_US&mode=1&type=1\n time.sleep(1)\n\n except Exception:\n try:\n self.selenium.capture_page_screenshot()\n except Exception as e:\n self.builtin.warn(\"unable to capture screenshot: {}\".format(str(e)))\n raise",
"def element_wait(self, selector, secs=1):\n if \"=>\" not in selector:\n raise NameError(\"Positioning syntax errors, lack of '=>'.\")\n\n by = selector.split(\"=>\")[0].strip()\n value = selector.split(\"=>\")[1].strip()\n messages = 'Element: {0} not found in {1} seconds.'.format(selector, secs)\n\n if by == \"id\":\n WebDriverWait(self.driver, secs, 1).until(EC.presence_of_element_located((By.ID, value)), messages)\n elif by == \"name\":\n WebDriverWait(self.driver, secs, 1).until(EC.presence_of_element_located((By.NAME, value)), messages)\n elif by == \"class\":\n WebDriverWait(self.driver, secs, 1).until(EC.presence_of_element_located((By.CLASS_NAME, value)),messages)\n elif by == \"link_text\":\n WebDriverWait(self.driver, secs, 1).until(EC.presence_of_element_located((By.LINK_TEXT, value)),messages)\n elif by == \"xpath\":\n WebDriverWait(self.driver, secs, 1).until(EC.presence_of_element_located((By.XPATH, value)), messages)\n elif by == \"css\":\n WebDriverWait(self.driver, secs, 1).until(EC.presence_of_element_located((By.CSS_SELECTOR, value)),messages)\n else:\n raise NameError(\"Please enter the correct targeting elements,'id','name','class','link_text','xpaht','css'.\")",
"def wait_page_loaded(self, timeout=10):\n from selenium.webdriver.common.by import By\n from selenium.webdriver.support import expected_conditions as ec\n\n old_page = self.selenium.find_element(By.TAG_NAME, \"html\")\n yield\n # Wait for the next page to be loaded\n self.wait_until(ec.staleness_of(old_page), timeout=timeout)\n self.wait_page_ready(timeout=timeout)",
"def get_all_elements_with_xpath(_xpath=None):\n if _xpath:\n try:\n return WebDriverWait(driver, timeout).until(\n ec.presence_of_all_elements_located((By.XPATH, _xpath)))\n except TimeoutException:\n print(f'Element with xpath \"{_xpath}\" was not found in \"{timeout}\" seconds.')\n close_and_finish_execution()\n else:\n print(f'Attribute xpath is mandatory.')\n close_and_finish_execution()",
"def eighth_page_execution(self):\n self.errors_and_correct_input_values_helper()\n self.ninth_page.wait_for_page()",
"def wait_for_page_load(self):\n # For right now, just wait for 2 seconds since webdriver returns when loaded.\n # TODO: switch to waiting for network idle\n time.sleep(2)",
"def get_elements(self, by, criteria):\n # Need reuse criteria\n return self._find_by_locator().find_elements(by, criteria)",
"def get_descendant_elements(self, xpath) -> list:\n tmp_xpath = self._chain_xpath(xpath)\n tmp_loc = (By.XPATH, tmp_xpath)\n return self._wait.until(EC.visibility_of_all_elements_located(tmp_loc))",
"def invoke_all_and_wait(self):\n list_promise = []\n for thread in self.__list_thread:\n thread.start()\n list_promise.append(thread)\n for process in list_promise: process.join()",
"def wait_for_load(browser):\n loader = browser.find_element_by_class_name('ui-loader')\n while loader.is_displayed():\n time.sleep(0.1)",
"def seventh_page_execution(self):\n self.errors_and_correct_input_values_helper()\n self.eighth_page.wait_for_page()",
"def driverWait(driver, timeout, element, elementValue, login=0):\n try:\n WebDriverWait(driver, timeout).until(\n expected_conditions.presence_of_all_elements_located((element, elementValue)))\n except:\n if login:\n print(\"Login Failed.\", file=sys.stderr)\n print(\"Wrong username or password.\", file=sys.stderr)\n else:\n print(\"Connection timed out.\", file=sys.stderr)\n driver.quit()\n exit(0)",
"def wait_for_appear(self, timeout=None):\n wait_until(lambda: self.get_element(False) is not None,\n \"Element '%s' did not exit in <TIMEOUT>.\" % self._locator,\n timeout)",
"def fill_listing(self):\r\n self.driver.get(FORM)\r\n for i in range(len(self.all_links)):\r\n\r\n time.sleep(3)\r\n question_1 = self.driver.find_element_by_xpath(\"\"\"//*[@id=\"mG61Hd\"]/div[2]/div/div[2]/div[1]/div/div/div[2]/div/div[1]/div/div[1]/input\"\"\")\r\n question_1.send_keys(self.address_list[i])\r\n question_1.send_keys(Keys.TAB)\r\n question_2 = self.driver.find_element_by_xpath(\"\"\"//*[@id=\"mG61Hd\"]/div[2]/div/div[2]/div[2]/div/div/div[2]/div/div[1]/div/div[1]/input\"\"\")\r\n question_2.send_keys(self.price_list[i])\r\n question_2.send_keys(Keys.TAB)\r\n question_3 = self.driver.find_element_by_xpath(\"\"\"//*[@id=\"mG61Hd\"]/div[2]/div/div[2]/div[3]/div/div/div[2]/div/div[1]/div/div[1]/input\"\"\")\r\n question_3.send_keys(self.all_info[i])\r\n question_3.send_keys(Keys.TAB)\r\n question_4 = self.driver.find_element_by_xpath(\"\"\"//*[@id=\"mG61Hd\"]/div[2]/div/div[2]/div[4]/div/div/div[2]/div/div[1]/div/div[1]/input\"\"\")\r\n question_4.send_keys(self.all_links[i])\r\n send = self.driver.find_element_by_xpath(\"\"\"//*[@id=\"mG61Hd\"]/div[2]/div/div[3]/div[1]/div/div/span/span\"\"\").click()\r\n time.sleep(3)\r\n self.driver.find_element_by_xpath(\"\"\"/html/body/div[1]/div[2]/div[1]/div/div[4]/a\"\"\").click()",
"def refresh(self):\n\n self.driver.implicitly_wait(5)\n self.driver.refresh()"
]
| [
"0.6774106",
"0.6140227",
"0.604411",
"0.60229385",
"0.58454835",
"0.5785627",
"0.57709783",
"0.5755594",
"0.57234937",
"0.57192695",
"0.56270385",
"0.5626797",
"0.5621867",
"0.5621245",
"0.55927444",
"0.555113",
"0.5505866",
"0.55023366",
"0.5485123",
"0.5483368",
"0.5418298",
"0.5408406",
"0.54032266",
"0.5395018",
"0.53942996",
"0.53801566",
"0.5371715",
"0.53530335",
"0.5340797",
"0.5333069"
]
| 0.6151248 | 1 |
Input a css selecter,use javascript click element. | def js_click(self, css):
t1 = time.time()
js_str = "$('{0}').click()".format(css)
try:
self.driver.execute_script(js_str)
self.my_print("{0} Use javascript click element: {1}, Spend {2} seconds".format(success,js_str,time.time()-t1))
except Exception:
self.my_print("{0} Unable to use javascript click element: {1}, Spend {2} seconds".format(fail,
js_str, time.time() - t1))
raise | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def click(self) -> None:\n logging.info(f\"click element. {self.desc}\")\n js = f\"\"\"var elm = document.querySelectorAll(\"{self.css}\")[{self.index}];\n elm.style.border=\"2px solid red\";\n elm.click();\"\"\"\n self._execute_javascript(js)",
"def id_click(elem_id):\r\n css_click('#{}'.format(elem_id))",
"def click_css(page, css, source_index, require_notification=True):\r\n buttons = page.q(css=css)\r\n target = buttons[source_index]\r\n ActionChains(page.browser).click(target).release().perform()\r\n if require_notification:\r\n wait_for_notification(page)",
"def click_element(self, css_selector):\n self.wait.until(EC.element_to_be_clickable(\n (By.CSS_SELECTOR, css_selector)))\n self.driver.find_element_by_css_selector(css_selector).click()",
"def click(self, selector):\n el = self.locate_element(selector)\n el.click()",
"def createSelector(self,type='select',speed=2.0):\n self.selector = self.loadObject(type, scale=2, parent=render, transparency=True, pos=Point2(0,0), glow=1)\n self.selector.hide()\n ival = self.selector.hprInterval((speed), Vec3(0, 0, 360))\n ival.loop()",
"def select(self, target):",
"def click_display(self) -> None:\n logging.info(f\"Click on the displayed element. {self.desc}\")\n js = 'var elm = document.querySelector(\"' + self.css + '\");' \\\n ' if(elm != null){elm.style.border=\"2px solid red\";elm.click();}'\n self._execute_javascript(js)",
"def select(self):\r\n pass",
"def click(self):\n self.dispatch['elementClick'] = self.clickJsFnc",
"def click(self, selector, index=0):\n self.find_css(selector).nth(index).click()",
"def do_select(self, line):\n xpath, option = split_args(line)\n e = self._find_element_by_xpath(xpath)\n select = Select(e)\n select.select_by_value(option)",
"def set_selector(*args):\n return _ida_segment.set_selector(*args)",
"def select(self):\n pass",
"def select(self):\n pass",
"def select_character(self):\n pass",
"def make_selection ( self ,\n tag , \n algotype ,\n inputs , \n *args ,\n **kwargs ) :\n sel_tag = '%s_Selection' % tag\n sel_name = 'Sel%sFor%s' % ( tag , self.name() )\n #\n ## check existing selection\n #\n sel = self._selection ( sel_tag )\n if sel : return sel \n\n #\n ## adjust a bit the arguments\n if not kwargs.has_key('Preambulo') :\n kwargs ['Preambulo' ] = self['Preambulo']\n\n if not kwargs.has_key( 'ParticleCombiners' ) :\n kwargs ['ParticleCombiners'] = { '' : 'LoKi::VertexFitter:PUBLIC' } \n \n # \n ## use \"simple-selection\"\n #\n from PhysSelPython.Wrappers import SimpleSelection\n sel = SimpleSelection (\n sel_name ,\n algotype ,\n inputs , \n *args ,\n **kwargs )\n # \n return self._add_selection( sel_tag , sel )",
"def click(self):\r\n pass",
"def select(self,item):\r\n pass",
"def setMySelector(self, x, y, z, scale):\n selectorPos = (self.selector.getX(), self.selector.getY(), self.selector.getZ())\n if selectorPos != (x,y,z):\n self.selector.setPos(x,y,z)\n self.selector.show()\n self.selector.setScale(scale)\n return 1\n else:\n self.selector.setPos(-1,-1,-1)\n return 0\n #self.enableScrollWheelZoom = 0",
"def click_by_css(self, xpath):\n element = self.wait_for_displayed(xpath)\n if element:\n self.driver.find_element_by_css_selector(xpath).click()\n return True\n else:\n return False",
"def select_me(self, mouse_pos):\r\n\t\t#self.active = self.rect.collidepoint(mouse_pos)\r\n\t\tself.active = True",
"def selector(**kwargs):\n return kwargs",
"def select(message, items=[], timeout=0, buttons=DIALOGBUTTON_OK):\n warnings.warn(\"This Method moved uwstyle.select()\", DeprecationWarning)\n return uwstyle.select(message, items, timeout, buttons)",
"def css_check(css_selector, wait_time=30):\r\n css_click(css_selector=css_selector, wait_time=wait_time)\r\n wait_for(lambda _: css_find(css_selector).selected)\r\n return True",
"def hxlselect():\n run_script(hxlselect_main)",
"def select(*args):",
"def select(*args):",
"def clickMe():\r\n global Format\r\n Format = typeToChoose.get()\r\n print(Format)\r\n action.configure(text='selected ' + Format) # show the selected item after clicked\r\n action.configure(state='disabled') # button disabled after clicked\r",
"def select_action(self):\n pass"
]
| [
"0.65767026",
"0.6216461",
"0.61339897",
"0.60989267",
"0.57894135",
"0.5623732",
"0.5614071",
"0.55613613",
"0.55136573",
"0.5502544",
"0.5444525",
"0.53893334",
"0.53802377",
"0.5371031",
"0.5371031",
"0.53598744",
"0.5336465",
"0.53346795",
"0.532027",
"0.5297092",
"0.52764547",
"0.5233463",
"0.52321213",
"0.5228836",
"0.52227736",
"0.52053493",
"0.517295",
"0.517295",
"0.51502126",
"0.51467824"
]
| 0.6484632 | 1 |
Rotate counter clockwise by given angle around given axis in local coordinate system. | def rotate_local(self, angle, axis=(0., 0., 1.)):
self.rotation *= aa2q(angle, glm.vec3(axis)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rotate_global(self, angle, axis=(0., 0., 1.)):\n self.rotation = aa2q(angle, glm.vec3(axis)) * self.rotation",
"def rotate_clockwise(self, angle):\r\n angle = degrees_to_radians(angle)\r\n current_angle = atan(self.x / self.y)\r\n angle += current_angle\r\n\r\n length = self.length\r\n self.x = length*sin(angle)\r\n self.y = length*cos(angle)",
"def R_axis_angle(axis, angle):\n\n # Trig factors.\n ca = math.cos(angle)\n sa = math.sin(angle)\n C = 1 - ca\n\n # Depack the axis.\n x, y, z = axis\n\n # Multiplications (to remove duplicate calculations).\n xs = x * sa\n ys = y * sa\n zs = z * sa\n xC = x * C\n yC = y * C\n zC = z * C\n xyC = x * yC\n yzC = y * zC\n zxC = z * xC\n\n # Update the rotation matrix.\n matrix = np.zeros((3, 3))\n matrix[0, 0] = x * xC + ca\n matrix[0, 1] = xyC - zs\n matrix[0, 2] = zxC + ys\n matrix[1, 0] = xyC + zs\n matrix[1, 1] = y * yC + ca\n matrix[1, 2] = yzC - xs\n matrix[2, 0] = zxC - ys\n matrix[2, 1] = yzC + xs\n matrix[2, 2] = z * zC + ca\n return matrix",
"def rotation_around_axis(self,axis,angle,**kwargs):\n xyz = self.get('x,y,z',**kwargs)\n\n # get the data\n ct,st = np.cos(angle),np.sin(angle)\n ux,uy,uz = axis\n\n # get the center of the molecule\n xyz0 = np.mean(xyz,0)\n\n # definition of the rotation matrix\n # see https://en.wikipedia.org/wiki/Rotation_matrix\n rot_mat = np.array([\n [ct + ux**2*(1-ct), ux*uy*(1-ct) - uz*st, ux*uz*(1-ct) + uy*st],\n [uy*ux*(1-ct) + uz*st, ct + uy**2*(1-ct), uy*uz*(1-ct) - ux*st],\n [uz*ux*(1-ct) - uy*st, uz*uy*(1-ct) + ux*st, ct + uz**2*(1-ct) ]])\n\n # apply the rotation\n xyz = np.dot(rot_mat,(xyz-xyz0).T).T + xyz0\n self.update('x,y,z',xyz,**kwargs)\n\n return xyz0",
"def rotate(self, axis, theta):\n return NotImplemented",
"def rotate(self, axis, theta):\n v = Vector3(self) # ensure vector\n k = Vector3(axis.uv())\n return type(self)(\n cosd(theta) * v\n + sind(theta) * k.cross(v)\n + (1 - cosd(theta)) * k * (k.dot(v))\n )",
"def rotate (vect, angle, axis):\n\n cosine = np.cos (angle)\n sine = np.sin (angle)\n\n return (vect * cosine + \\\n sine * np.cross (axis, vect) + \\\n np.dot (axis, vect) * (1 - cosine) * axis)",
"def rotate_axis(self, axis: \"Vertex\", angle: float):\n self.vertices = list(\n Matrix44.axis_rotate(axis, angle).transform_vertices(self.vertices)\n )\n return self",
"def _rotate_about_origin(self, angle, axis):\n matrix = rotation_matrix(angle, axis)\n self._center = matrix.dot(self._center)",
"def axis_angle_rm(axis=np.array([1, 0, 0]), angle=-1.57):\n c = math.cos(angle)\n s = math.sin(angle)\n t = 1 - c\n x, y, z = axis[0], axis[1], axis[2]\n rotation_matrix = np.array(\n [\n [t*x*x + c, t*x*y - z*s, t*x*z + y*s],\n [t*x*y + z*s, t*y*y + c, t*y*z - x*s],\n [t*x*z - y*s, t*y*z + x*s, t*z*z + c]\n ])\n return rotation_matrix",
"def rotate_around_axis(vec, axis, angle):\n\n axis = normalise(axis)\n a = cos(angle / 2.0)\n b, c, d = -axis * sin(angle / 2.0)\n aa, bb, cc, dd = a * a, b * b, c * c, d * d\n bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d\n rot_matrix = np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],\n [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],\n [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])\n return vec.dot(rot_matrix)",
"def _rot(axis, angle):\n if axis == 1:\n return Matrix([[1, 0, 0],\n [0, cos(angle), -sin(angle)],\n [0, sin(angle), cos(angle)]])\n elif axis == 2:\n return Matrix([[cos(angle), 0, sin(angle)],\n [0, 1, 0],\n [-sin(angle), 0, cos(angle)]])\n elif axis == 3:\n return Matrix([[cos(angle), -sin(angle), 0],\n [sin(angle), cos(angle), 0],\n [0, 0, 1]])",
"def rotate(self, angle, axis):\r\n R=self.rotation(angle, axis)\r\n self.mlist = (self*R).mlist\r\n return self",
"def rotateEuler(axis, angle):\n if(axis == 'Z'):\n return np.array([[cos(angle), -sin(angle),0,0],[sin(angle), cos(angle),0,0],[0,0,1,0],[0,0,0,1]])\n if(axis == 'Y'):\n return np.array([[cos(angle),0,sin(angle),0],[0,1,0,0],[-sin(angle),0,cos(angle),0],[0,0,0,1]])\n if(axis == 'X'):\n return np.array([[1,0,0,0],[0,cos(angle), -sin(angle),0],[0,sin(angle), cos(angle),0],[0,0,0,1]])",
"def _rotate_about_origin(self, angle, axis):\n print 'Invoked abstract {}._rotate_about_origin({}, {})'.format(\n self, angle, axis)\n return",
"def rotateEuler(self,axis, angle):\n if(axis == 'Z'):\n return np.array([[cos(angle), -sin(angle),0,0],[sin(angle), cos(angle),0,0],[0,0,1,0],[0,0,0,1]])\n if(axis == 'Y'):\n return np.array([[cos(angle),0,sin(angle),0],[0,1,0,0],[-sin(angle),0,cos(angle),0],[0,0,0,1]])\n if(axis == 'X'):\n return np.array([[1,0,0,0],[0,cos(angle), -sin(angle),0],[0,sin(angle), cos(angle),0],[0,0,0,1]])",
"def _rotate_about_origin(self, angle, axis):\n matrix = rotation_matrix(angle, axis)\n self._normal = matrix.dot(self._normal)\n self._position = matrix.dot(self._position)",
"def rotateAroundAxis(self, rotation_axis, angle):\n # For the mathematics look for: Rodrigues rotation formula.\n # http://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula\n unit_rotation_axis = rotation_axis.getNormalizedVector()\n\n rotated_vector = self.scalarMultiplication(np.cos(angle))\n\n tmp_vector = unit_rotation_axis.crossProduct(self)\n tmp_vector = tmp_vector.scalarMultiplication(np.sin(angle))\n rotated_vector = rotated_vector.addVector(tmp_vector)\n\n scalar_factor = self.scalarProduct(unit_rotation_axis) * (1.0 - np.cos(angle))\n tmp_vector = unit_rotation_axis.scalarMultiplication(scalar_factor)\n rotated_vector = rotated_vector.addVector(tmp_vector)\n\n return rotated_vector",
"def rotation(axis, angle):\n axis = np.asarray(axis)\n try:\n angle = angle[:,None]\n except:\n pass\n return np.hstack([np.asarray(axis)*np.sin(angle/2.),np.cos(angle/2.)])",
"def rotate_axis(self):\n try:\n self.obj.rotate(angle=self.rotation_speed * self.time_scale / self.refresh_rate, axis=vector(0, 1, 0))\n except ZeroDivisionError:\n print(\"ERROR: REFRESH_RATE is 0\")\n except (AttributeError, TypeError):\n print(\"ERROR: wrong arguments type while initializing!!\")",
"def rotation(self, angle, axis):\r\n\r\n sqr_a = axis.x*axis.x\r\n sqr_b = axis.y*axis.y\r\n sqr_c = axis.z*axis.z\r\n len2 = sqr_a+sqr_b+sqr_c\r\n\r\n k2 = math.cos(angle)\r\n k1 = (1.0-k2)/len2\r\n k3 = math.sin(angle)/math.sqrt(len2)\r\n k1ab = k1*axis.x*axis.y\r\n k1ac = k1*axis.x*axis.z\r\n k1bc = k1*axis.y*axis.z\r\n k3a = k3*axis.x\r\n k3b = k3*axis.y\r\n k3c = k3*axis.z\r\n\r\n return mat4( k1*sqr_a+k2, k1ab-k3c, k1ac+k3b, 0.0,\r\n k1ab+k3c, k1*sqr_b+k2, k1bc-k3a, 0.0,\r\n k1ac-k3b, k1bc+k3a, k1*sqr_c+k2, 0.0,\r\n 0.0, 0.0, 0.0, 1.0)",
"def rotate( self, degrees, axis ):\n # copy and normalize axis\n axis = Vector3( axis ).normalize()\n\n # get stub of self projected onto axis\n stub = Vector3( self ).project( axis )\n\n # subtract stub from self\n self -= stub\n\n # get new vector crossed with axis\n crossed = Vector3( axis ).cross( self )\n\n # trigify self and crossed to account for rotation\n crossed *= math.sin( math.radians(degrees) )\n self *= math.cos( math.radians(degrees) )\n\n # add crossed and stub components to self\n self += crossed\n self += stub\n \n return self",
"def rotate(x_or_y,degree):\r\n\r\n #axis=0 represents x-axis\r\n #axis=1 represents y-axis\r\n \r\n if x_or_y=='X' or x_or_y=='x':\r\n axis=0\r\n elif x_or_y=='Y' or x_or_y=='y':\r\n axis=1\r\n elif x_or_y==0:\r\n axis=0\r\n elif x_or_y==1:\r\n axis=1\r\n else:\r\n print(\"Illeagel argument in rotate_degree\")\r\n return\r\n\r\n #decide which pins to use accroding to the axis\r\n #info is for debug used it can be eliminated\r\n if axis==0:\r\n info=\"x-axis\"\r\n stepsPin=xCwPin;\r\n cwOrCcwPin=xCcwPin\r\n elif axis==1:\r\n info=\"y-axis\"\r\n stepsPin=yCwPin;\r\n cwOrCcwPin=yCcwPin\r\n\r\n if degree>0:\r\n info=info+\" rotate cw\"\r\n GPIO.output(cwOrCcwPin, True) #cw\r\n elif degree<0:\r\n info=info+\" rotate ccw\"\r\n GPIO.output(cwOrCcwPin, False) #ccw\r\n elif degree==0:\r\n return\r\n\r\n tmp=abs(degree)/0.36\r\n steps=round(tmp)\r\n\r\n info=info+\" for \"+str(degree)+\" degrees \"+str(steps)+\" steps\"\r\n\r\n i=0\r\n while i<steps:\r\n GPIO.output(stepsPin, True)\r\n time.sleep(0.001)\r\n GPIO.output(stepsPin, False)\r\n time.sleep(0.05)\r\n i=i+1\r\n #GPIO.output(cwOrCcwPin, True)\r\n\r\n if SHOW_ROTATE:\r\n print(info)",
"def rotate(self, angle, axis, position=None):\n if position is not None:\n pos = np.array(position)\n self.translate(-pos)\n self._rotate_about_origin(angle, axis)\n self.translate(pos)\n else:\n self._rotate_about_origin(angle, axis)",
"def rotation(theta, axis):\n axis = np.asarray(axis)\n axis = axis/math.sqrt(np.dot(axis, axis))\n a = math.cos(theta/2.0)\n b, c, d = -axis*math.sin(theta/2.0)\n aa, bb, cc, dd = a*a, b*b, c*c, d*d\n bc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d\n return np.array([[aa+bb-cc-dd, 2*(bc+ad), 2*(bd-ac)],\n [2*(bc-ad), aa+cc-bb-dd, 2*(cd+ab)],\n [2*(bd+ac), 2*(cd-ab), aa+dd-bb-cc]])",
"def axis_angle(cls, axis: Union[tuple, Vector], angle: Number):\n if isinstance(axis, (list, tuple)):\n assert(len(axis) == 3)\n axis = Vector(*axis)\n\n assert(isinstance(axis, Vector))\n\n K = Matrix(3, 3)\n\n axis.normalize()\n\n x = axis[0, 0]\n y = axis[1, 0]\n z = axis[2, 0]\n\n K[0, 1] = -z\n K[0, 2] = y\n K[1, 2] = -x\n\n K[1, 0] = z\n K[2, 0] = -y\n K[2, 1] = x\n\n c = np.cos(angle)\n s = np.sin(angle)\n\n I = Matrix.identity(3)\n\n rot = I + (s * I + (1 - c) * K) * K\n\n return cls(rot)",
"def _rotate(self, angle):\n angle *= self._degreesPerAU\n self._orient = self._orient.rotate(angle)",
"def rotate(self, angle, axis, position=None):\n for bound in self._bounds:\n bound.rotate(angle, axis, position)",
"def rotate(self, angle):\n n, a = Vector.polar([self.x, self.y])\n a += angle\n self.x = n * cos(a)\n self.y = n * sin(a)",
"def rotate_vector ( angle, axis, old ):\n\n import numpy as np\n \n # Note that the axis vector should be normalized and we test for this\n # In general, the old vector need not be normalized, and the same goes for the result\n # although quite often in our applications they will be\n\n assert old.size == 3, 'Incorrect size of old'\n assert axis.size == 3, 'Incorrect size of axis'\n assert np.isclose(np.sum(axis**2),1.0), 'Non-unit vector {} {} {}'.format(*axis)\n\n c = np.cos ( angle )\n s = np.sin ( angle )\n proj = np.dot ( axis, old ) # The two vectors need not be perpendicular\n\n # Standard (Goldstein) rotation formula\n e = c * old + ( 1.0 - c ) * proj * axis + s * np.cross ( axis, old )\n\n return e"
]
| [
"0.73928547",
"0.71643406",
"0.70795226",
"0.6971528",
"0.6945819",
"0.6878461",
"0.6877881",
"0.68302655",
"0.6827688",
"0.6783266",
"0.66841495",
"0.66782784",
"0.6670662",
"0.66611797",
"0.6657584",
"0.66516477",
"0.6647359",
"0.65800184",
"0.65794575",
"0.65659213",
"0.65483534",
"0.6534614",
"0.64264566",
"0.6422118",
"0.63817966",
"0.6378122",
"0.63720995",
"0.6368396",
"0.63452154",
"0.6341374"
]
| 0.748749 | 0 |
Move by given displacement in global coordinate system. | def move_global(self, xyz):
self.position += xyz | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def translate(self, displacement):\n self._position = self._position + np.array(displacement)",
"def _move(self):\n self.pos += self.direction # add direction vector\n self.direction += self.gravity # add gravity to direction\n self.direction = self.direction.elementwise() * self.drag # apply drag to direction",
"def _moveCamera(self, displacement):\n if type(displacement) is not Vec2:\n displacement = Vec2(displacement)\n\n self.camCenter += displacement",
"def translate(self, displacement):\n self._center = self._center + np.array(displacement)\n self._position = self._position + np.array(displacement)",
"def move(self, rel_pos):\n self.pos = (self.pos[0] + rel_pos[0] * GRID, self.pos[1] + rel_pos[1] * GRID)",
"def move(self):\n vector = vectors[compass.index(self.heading)]\n x = self.position[0] + vector[0]\n y = self.position[1] + vector[1]\n self._check_move(x, self.plateau[0])\n self._check_move(y, self.plateau[1])\n return replace(self, position=(x, y))",
"def move(x,y):\r\n pass",
"def translate(self, displacement):\n\n self.center = (self.center[0] + displacement[0],\n self.center[1] + displacement[1])",
"def move(self):\n c = self.get_position()\n\n f = c['f']\n if f == 'NORTH':\n c['y'] += 1\n elif f == 'EAST':\n c['x'] += 1\n elif f == 'SOUTH':\n c['y'] -= 1\n elif f == 'WEST':\n c['x'] -= 1\n\n if self.valid_position(c):\n self.update_position(c)\n else:\n raise ValueError('InvalidPosition')",
"def move(self,dt):\n self.x_pos += self.x_vel*dt\n self.y_pos += self.y_vel*dt",
"def move(self, x, y):\n\n #log.info(\"MOVE x:%s y:%s\", x, y)",
"def move(self):\n self.x += math.sin(self.angle) * self.speed\n self.y -= math.cos(self.angle) * self.speed\n # Next, account for gravity\n (self.angle, self.speed) = addVectors((self.angle, self.speed), gravity)\n # Then, friction / drag\n self.speed *= drag",
"def move(self, p):\r\n self.position.setvalue(p)",
"def moveBasedOnCurrentMomentum(self):\n self.xPos-=self.xMomentum\n self.yPos-=self.yMomentum\n self.syncSpriteCoordinates()",
"def move(self):\n self.pos += self.direc\n self.nearest_node = self.pixel_to_node()",
"def move(i, j):\n global X\n global Y\n X += i\n Y += j",
"def move(self, x, y):\n self.x = x\n self.y = y\n self.call('move', x, y)",
"def move(self):\n \n self.position = self.explore()",
"def do_move(self, rel=True):\n cmd = self.MGMSG_MOT_MOVE_ABSOLUTE\n if rel:\n cmd = self.MGMSG_MOT_MOVE_RELATIVE\n self.__send_short(cmd, self.__chan, 0x00)",
"def move(self,dt):\n raise NotImplementedError(\"Robot.move\")",
"def move(self):\n neg = -1 # neg used to change direction of velocity if ship is jumping or not\n\n if self.goForward:\n self.x_pos += self.x_velocity\n\n if self.x_pos > self.final_pos_x and self.goForward:\n self.goForward = False\n\n if self.y_velocity > 0:\n self.isJump = False\n neg = 1\n\n self.y_pos += (self.y_velocity ** 2) * neg / 15 # formula to simulate a real fall or jump\n self.y_velocity += 0.5",
"def move(solarsystem, asteroids, dt):\n calc_force(solarsystem, solarsystem, dt)\n calc_force(asteroids, solarsystem, dt)\n solarsystem['x'] += solarsystem['vx'] * dt\n solarsystem['y'] += solarsystem['vy'] * dt\n solarsystem['z'] += solarsystem['vz'] * dt\n\n asteroids['x'] += asteroids['vx'] * dt\n asteroids['y'] += asteroids['vy'] * dt\n asteroids['z'] += asteroids['vz'] * dt",
"def move(self):\n self.position += self.speed",
"def movement_step(self, distance, displacement, obs, for_subgroup=False):\n if for_subgroup:\n new_target = self.get_avg_location_of_self_subgroup(obs)\n else:\n new_target = self.get_current_location(obs)\n\n new_target[0] += distance * displacement[0]\n new_target[1] += distance * displacement[1]\n\n # cap map bounds of new target within map dimensions\n border_limit = 2 # target will not be set within border_limit distance of the edge of map\n if new_target[0] >= (self.max_map_height - border_limit):\n new_target[0] = (self.max_map_height - border_limit)\n if new_target[1] >= (self.max_map_width - border_limit):\n new_target[1] = (self.max_map_width - border_limit)\n if new_target[0] <= border_limit:\n new_target[0] = border_limit\n if new_target[1] <= border_limit:\n new_target[1] = border_limit\n\n self.set_target_destination(new_target)",
"def execute_move(self, game_state):\n # Set new location based on which ghost this is\n game_state.ghosts_pos[self.ghost_id] = self.next_move",
"def execute_move(self, game_state):\n # Set new location based on which ghost this is\n game_state.ghosts_pos[self.ghost_id] = self.next_move",
"def move(self):\r\n min_x = self.__screen.SCREEN_MIN_X\r\n min_y = self.__screen.SCREEN_MIN_Y\r\n delta_x = self.__screen.SCREEN_MAX_X - min_x\r\n delta_y = self.__screen.SCREEN_MAX_Y - min_y\r\n\r\n # new location formula according to pdf.\r\n new_x = (self.__x_speed + self.__x - min_x) % delta_x + min_x\r\n new_y = (self.__y_speed + self.__y - min_y) % delta_y + min_y\r\n self.__x, self.__y = new_x, new_y",
"def automove(self):\n if self.x < self.end_cinematic_x_pos:\n self.x += self.SHIP_SPEED\n if self.x > self.end_cinematic_x_pos:\n self.x -= self.SHIP_SPEED\n if self.y < self.end_cinematic_y_pos:\n self.y += self.SHIP_SPEED\n if self.y > self.end_cinematic_y_pos:\n self.y -= self.SHIP_SPEED",
"def moving(self,newX,newY):\n LOGGER.debug(\"{} moved to {} | {}\".format(self.physic_id,newX,newY))\n lazzyUpdate().sendTrame(self.physic_id,{\"coordX\":newX,\"coordY\":newY})",
"def move(self, coordinates, direction):\n pass"
]
| [
"0.71599424",
"0.7121574",
"0.69739336",
"0.6910599",
"0.6829471",
"0.6662486",
"0.6624079",
"0.6605594",
"0.65653765",
"0.65229875",
"0.649036",
"0.6486678",
"0.6419891",
"0.64157003",
"0.64138556",
"0.6388829",
"0.63627565",
"0.6348988",
"0.63424224",
"0.6332199",
"0.6326108",
"0.6325241",
"0.63236845",
"0.63017774",
"0.62980485",
"0.62980485",
"0.6286511",
"0.62510353",
"0.62495035",
"0.6233895"
]
| 0.7544456 | 0 |
Rotate counter clockwise by given angle around given axis in global coordinate system. | def rotate_global(self, angle, axis=(0., 0., 1.)):
self.rotation = aa2q(angle, glm.vec3(axis)) * self.rotation | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def R_axis_angle(axis, angle):\n\n # Trig factors.\n ca = math.cos(angle)\n sa = math.sin(angle)\n C = 1 - ca\n\n # Depack the axis.\n x, y, z = axis\n\n # Multiplications (to remove duplicate calculations).\n xs = x * sa\n ys = y * sa\n zs = z * sa\n xC = x * C\n yC = y * C\n zC = z * C\n xyC = x * yC\n yzC = y * zC\n zxC = z * xC\n\n # Update the rotation matrix.\n matrix = np.zeros((3, 3))\n matrix[0, 0] = x * xC + ca\n matrix[0, 1] = xyC - zs\n matrix[0, 2] = zxC + ys\n matrix[1, 0] = xyC + zs\n matrix[1, 1] = y * yC + ca\n matrix[1, 2] = yzC - xs\n matrix[2, 0] = zxC - ys\n matrix[2, 1] = yzC + xs\n matrix[2, 2] = z * zC + ca\n return matrix",
"def rotate_clockwise(self, angle):\r\n angle = degrees_to_radians(angle)\r\n current_angle = atan(self.x / self.y)\r\n angle += current_angle\r\n\r\n length = self.length\r\n self.x = length*sin(angle)\r\n self.y = length*cos(angle)",
"def rotate_local(self, angle, axis=(0., 0., 1.)):\n self.rotation *= aa2q(angle, glm.vec3(axis))",
"def rotation_around_axis(self,axis,angle,**kwargs):\n xyz = self.get('x,y,z',**kwargs)\n\n # get the data\n ct,st = np.cos(angle),np.sin(angle)\n ux,uy,uz = axis\n\n # get the center of the molecule\n xyz0 = np.mean(xyz,0)\n\n # definition of the rotation matrix\n # see https://en.wikipedia.org/wiki/Rotation_matrix\n rot_mat = np.array([\n [ct + ux**2*(1-ct), ux*uy*(1-ct) - uz*st, ux*uz*(1-ct) + uy*st],\n [uy*ux*(1-ct) + uz*st, ct + uy**2*(1-ct), uy*uz*(1-ct) - ux*st],\n [uz*ux*(1-ct) - uy*st, uz*uy*(1-ct) + ux*st, ct + uz**2*(1-ct) ]])\n\n # apply the rotation\n xyz = np.dot(rot_mat,(xyz-xyz0).T).T + xyz0\n self.update('x,y,z',xyz,**kwargs)\n\n return xyz0",
"def axis_angle_rm(axis=np.array([1, 0, 0]), angle=-1.57):\n c = math.cos(angle)\n s = math.sin(angle)\n t = 1 - c\n x, y, z = axis[0], axis[1], axis[2]\n rotation_matrix = np.array(\n [\n [t*x*x + c, t*x*y - z*s, t*x*z + y*s],\n [t*x*y + z*s, t*y*y + c, t*y*z - x*s],\n [t*x*z - y*s, t*y*z + x*s, t*z*z + c]\n ])\n return rotation_matrix",
"def rotate_axis(self, axis: \"Vertex\", angle: float):\n self.vertices = list(\n Matrix44.axis_rotate(axis, angle).transform_vertices(self.vertices)\n )\n return self",
"def rotate (vect, angle, axis):\n\n cosine = np.cos (angle)\n sine = np.sin (angle)\n\n return (vect * cosine + \\\n sine * np.cross (axis, vect) + \\\n np.dot (axis, vect) * (1 - cosine) * axis)",
"def rotate(self, axis, theta):\n return NotImplemented",
"def rotate_axis(self):\n try:\n self.obj.rotate(angle=self.rotation_speed * self.time_scale / self.refresh_rate, axis=vector(0, 1, 0))\n except ZeroDivisionError:\n print(\"ERROR: REFRESH_RATE is 0\")\n except (AttributeError, TypeError):\n print(\"ERROR: wrong arguments type while initializing!!\")",
"def rotate(self, axis, theta):\n v = Vector3(self) # ensure vector\n k = Vector3(axis.uv())\n return type(self)(\n cosd(theta) * v\n + sind(theta) * k.cross(v)\n + (1 - cosd(theta)) * k * (k.dot(v))\n )",
"def rotation(axis, angle):\n axis = np.asarray(axis)\n try:\n angle = angle[:,None]\n except:\n pass\n return np.hstack([np.asarray(axis)*np.sin(angle/2.),np.cos(angle/2.)])",
"def _rot(axis, angle):\n if axis == 1:\n return Matrix([[1, 0, 0],\n [0, cos(angle), -sin(angle)],\n [0, sin(angle), cos(angle)]])\n elif axis == 2:\n return Matrix([[cos(angle), 0, sin(angle)],\n [0, 1, 0],\n [-sin(angle), 0, cos(angle)]])\n elif axis == 3:\n return Matrix([[cos(angle), -sin(angle), 0],\n [sin(angle), cos(angle), 0],\n [0, 0, 1]])",
"def rotate(self, angle, axis):\r\n R=self.rotation(angle, axis)\r\n self.mlist = (self*R).mlist\r\n return self",
"def _rotate_about_origin(self, angle, axis):\n matrix = rotation_matrix(angle, axis)\n self._center = matrix.dot(self._center)",
"def rotateEuler(self,axis, angle):\n if(axis == 'Z'):\n return np.array([[cos(angle), -sin(angle),0,0],[sin(angle), cos(angle),0,0],[0,0,1,0],[0,0,0,1]])\n if(axis == 'Y'):\n return np.array([[cos(angle),0,sin(angle),0],[0,1,0,0],[-sin(angle),0,cos(angle),0],[0,0,0,1]])\n if(axis == 'X'):\n return np.array([[1,0,0,0],[0,cos(angle), -sin(angle),0],[0,sin(angle), cos(angle),0],[0,0,0,1]])",
"def rotate(x_or_y,degree):\r\n\r\n #axis=0 represents x-axis\r\n #axis=1 represents y-axis\r\n \r\n if x_or_y=='X' or x_or_y=='x':\r\n axis=0\r\n elif x_or_y=='Y' or x_or_y=='y':\r\n axis=1\r\n elif x_or_y==0:\r\n axis=0\r\n elif x_or_y==1:\r\n axis=1\r\n else:\r\n print(\"Illeagel argument in rotate_degree\")\r\n return\r\n\r\n #decide which pins to use accroding to the axis\r\n #info is for debug used it can be eliminated\r\n if axis==0:\r\n info=\"x-axis\"\r\n stepsPin=xCwPin;\r\n cwOrCcwPin=xCcwPin\r\n elif axis==1:\r\n info=\"y-axis\"\r\n stepsPin=yCwPin;\r\n cwOrCcwPin=yCcwPin\r\n\r\n if degree>0:\r\n info=info+\" rotate cw\"\r\n GPIO.output(cwOrCcwPin, True) #cw\r\n elif degree<0:\r\n info=info+\" rotate ccw\"\r\n GPIO.output(cwOrCcwPin, False) #ccw\r\n elif degree==0:\r\n return\r\n\r\n tmp=abs(degree)/0.36\r\n steps=round(tmp)\r\n\r\n info=info+\" for \"+str(degree)+\" degrees \"+str(steps)+\" steps\"\r\n\r\n i=0\r\n while i<steps:\r\n GPIO.output(stepsPin, True)\r\n time.sleep(0.001)\r\n GPIO.output(stepsPin, False)\r\n time.sleep(0.05)\r\n i=i+1\r\n #GPIO.output(cwOrCcwPin, True)\r\n\r\n if SHOW_ROTATE:\r\n print(info)",
"def rotateAroundAxis(self, rotation_axis, angle):\n # For the mathematics look for: Rodrigues rotation formula.\n # http://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula\n unit_rotation_axis = rotation_axis.getNormalizedVector()\n\n rotated_vector = self.scalarMultiplication(np.cos(angle))\n\n tmp_vector = unit_rotation_axis.crossProduct(self)\n tmp_vector = tmp_vector.scalarMultiplication(np.sin(angle))\n rotated_vector = rotated_vector.addVector(tmp_vector)\n\n scalar_factor = self.scalarProduct(unit_rotation_axis) * (1.0 - np.cos(angle))\n tmp_vector = unit_rotation_axis.scalarMultiplication(scalar_factor)\n rotated_vector = rotated_vector.addVector(tmp_vector)\n\n return rotated_vector",
"def rotate_around_axis(vec, axis, angle):\n\n axis = normalise(axis)\n a = cos(angle / 2.0)\n b, c, d = -axis * sin(angle / 2.0)\n aa, bb, cc, dd = a * a, b * b, c * c, d * d\n bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d\n rot_matrix = np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],\n [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],\n [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])\n return vec.dot(rot_matrix)",
"def rotate(self, angle, axis, position=None):\n for bound in self._bounds:\n bound.rotate(angle, axis, position)",
"def rotateEuler(axis, angle):\n if(axis == 'Z'):\n return np.array([[cos(angle), -sin(angle),0,0],[sin(angle), cos(angle),0,0],[0,0,1,0],[0,0,0,1]])\n if(axis == 'Y'):\n return np.array([[cos(angle),0,sin(angle),0],[0,1,0,0],[-sin(angle),0,cos(angle),0],[0,0,0,1]])\n if(axis == 'X'):\n return np.array([[1,0,0,0],[0,cos(angle), -sin(angle),0],[0,sin(angle), cos(angle),0],[0,0,0,1]])",
"def rotation(self, angle, axis):\r\n\r\n sqr_a = axis.x*axis.x\r\n sqr_b = axis.y*axis.y\r\n sqr_c = axis.z*axis.z\r\n len2 = sqr_a+sqr_b+sqr_c\r\n\r\n k2 = math.cos(angle)\r\n k1 = (1.0-k2)/len2\r\n k3 = math.sin(angle)/math.sqrt(len2)\r\n k1ab = k1*axis.x*axis.y\r\n k1ac = k1*axis.x*axis.z\r\n k1bc = k1*axis.y*axis.z\r\n k3a = k3*axis.x\r\n k3b = k3*axis.y\r\n k3c = k3*axis.z\r\n\r\n return mat4( k1*sqr_a+k2, k1ab-k3c, k1ac+k3b, 0.0,\r\n k1ab+k3c, k1*sqr_b+k2, k1bc-k3a, 0.0,\r\n k1ac-k3b, k1bc+k3a, k1*sqr_c+k2, 0.0,\r\n 0.0, 0.0, 0.0, 1.0)",
"def _rotate_about_origin(self, angle, axis):\n print 'Invoked abstract {}._rotate_about_origin({}, {})'.format(\n self, angle, axis)\n return",
"def rotate( self, degrees, axis ):\n # copy and normalize axis\n axis = Vector3( axis ).normalize()\n\n # get stub of self projected onto axis\n stub = Vector3( self ).project( axis )\n\n # subtract stub from self\n self -= stub\n\n # get new vector crossed with axis\n crossed = Vector3( axis ).cross( self )\n\n # trigify self and crossed to account for rotation\n crossed *= math.sin( math.radians(degrees) )\n self *= math.cos( math.radians(degrees) )\n\n # add crossed and stub components to self\n self += crossed\n self += stub\n \n return self",
"def rotate(self, angle, axis, position=None):\n if position is not None:\n pos = np.array(position)\n self.translate(-pos)\n self._rotate_about_origin(angle, axis)\n self.translate(pos)\n else:\n self._rotate_about_origin(angle, axis)",
"def axis_angle(cls, axis: Union[tuple, Vector], angle: Number):\n if isinstance(axis, (list, tuple)):\n assert(len(axis) == 3)\n axis = Vector(*axis)\n\n assert(isinstance(axis, Vector))\n\n K = Matrix(3, 3)\n\n axis.normalize()\n\n x = axis[0, 0]\n y = axis[1, 0]\n z = axis[2, 0]\n\n K[0, 1] = -z\n K[0, 2] = y\n K[1, 2] = -x\n\n K[1, 0] = z\n K[2, 0] = -y\n K[2, 1] = x\n\n c = np.cos(angle)\n s = np.sin(angle)\n\n I = Matrix.identity(3)\n\n rot = I + (s * I + (1 - c) * K) * K\n\n return cls(rot)",
"def _rotate(self, angle):\n angle *= self._degreesPerAU\n self._orient = self._orient.rotate(angle)",
"def _rotate_about_origin(self, angle, axis):\n matrix = rotation_matrix(angle, axis)\n self._normal = matrix.dot(self._normal)\n self._position = matrix.dot(self._position)",
"def rotation(theta, axis):\n axis = np.asarray(axis)\n axis = axis/math.sqrt(np.dot(axis, axis))\n a = math.cos(theta/2.0)\n b, c, d = -axis*math.sin(theta/2.0)\n aa, bb, cc, dd = a*a, b*b, c*c, d*d\n bc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d\n return np.array([[aa+bb-cc-dd, 2*(bc+ad), 2*(bd-ac)],\n [2*(bc-ad), aa+cc-bb-dd, 2*(cd+ab)],\n [2*(bd+ac), 2*(cd-ab), aa+dd-bb-cc]])",
"def rotate_vector ( angle, axis, old ):\n\n import numpy as np\n \n # Note that the axis vector should be normalized and we test for this\n # In general, the old vector need not be normalized, and the same goes for the result\n # although quite often in our applications they will be\n\n assert old.size == 3, 'Incorrect size of old'\n assert axis.size == 3, 'Incorrect size of axis'\n assert np.isclose(np.sum(axis**2),1.0), 'Non-unit vector {} {} {}'.format(*axis)\n\n c = np.cos ( angle )\n s = np.sin ( angle )\n proj = np.dot ( axis, old ) # The two vectors need not be perpendicular\n\n # Standard (Goldstein) rotation formula\n e = c * old + ( 1.0 - c ) * proj * axis + s * np.cross ( axis, old )\n\n return e",
"def rotate(x, y, angle):\n return x * cos(angle) - y * sin(angle), y * cos(angle) + x * sin(angle)"
]
| [
"0.71005154",
"0.70900816",
"0.6978643",
"0.69763106",
"0.68740296",
"0.6857633",
"0.6840003",
"0.6829036",
"0.67823285",
"0.67059314",
"0.6655236",
"0.6598192",
"0.65935904",
"0.65712935",
"0.65678674",
"0.65628415",
"0.6562488",
"0.65563333",
"0.6545525",
"0.6534826",
"0.65218365",
"0.65047723",
"0.6448723",
"0.64127296",
"0.6368335",
"0.6363238",
"0.63594866",
"0.63519424",
"0.63107646",
"0.627842"
]
| 0.7825282 | 0 |
Skip test completely if no Dockerbased XNAT instance available | def docker_available(func=None):
def check_and_raise():
if 'setup_docker_xnat' in func.__name__:
print('Initializing XNAT.')
return
fp = op.abspath('.xnat.cfg')
print(fp, op.isfile(fp))
x = Interface(config=op.abspath('.xnat.cfg'))
try:
x.head('')
list(x.select.projects())
print('Docker instance found.')
except (ConnectionError, KeyError):
print('Skipping it.')
pytest.skip('Docker-based XNAT instance unavailable')
if func:
@wraps(func)
def newfunc(*args, **kwargs):
check_and_raise()
return func(*args, **kwargs)
return newfunc
else:
check_and_raise() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def in_host():\n return not in_docker()",
"def docker_allow_fallback():\n return False",
"def test_need_proxy(self):\n os.environ['no_proxy'] = 'blah.com,blah2.com'\n self.assertTrue(dockerv2.need_proxy('proxy.blah3.com'))\n self.assertFalse(dockerv2.need_proxy('proxy.blah.com'))",
"def test_node_start_no_detected_ips(\n self, m_docker_client, m_client, m_install_kube,\n m_warn_if_hostname_conflict, m_warn_if_unknown_ip,\n m_get_host_ips, m_check_system, m_os_makedirs, m_os_path_exists,\n m_sys_exit):\n # Set up mock objects\n m_get_host_ips.return_value = []\n\n # Set up arguments\n node_image = 'node_image'\n log_dir = './log_dir'\n ip = ''\n ip6 = 'aa:bb::zz'\n as_num = ''\n detach = False\n kubernetes = True\n\n # Call method under test\n node.node_start(\n node_image, log_dir, ip, ip6, as_num, detach, kubernetes\n )\n\n # Assert\n m_sys_exit.assert_called_once_with(1)",
"def test_not_ready_with_no_networking_and_broken_felix(self):\n with DockerHost('host1',\n additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, start_calico=False) as host1:\n # Start node without felix healthcheck endpoint.\n host1.start_calico_node(env_options=\"-e FELIX_HEALTHENABLED=false -e CALICO_NETWORKING_BACKEND=none\")\n\n # Run readiness checks against felix\n self.assertRaisesRegexp(CalledProcessError, \"calico/node is not ready: felix is not ready\", host1.execute,\n \"docker exec calico-node /bin/calico-node -felix-ready\")",
"def ping_docker():\n with Docker('unittest-36', image='python:3.6') as tun:\n return tun.call(python_version)[:2]",
"def test_list_docker_not_running(self, docker_ping):\n docker_ping.side_effect = Exception('Boom!')\n\n runner = CliRunner()\n result = runner.invoke(cli_node_list, [])\n\n # check exit code\n self.assertEqual(result.exit_code, 1)",
"def test_nrpe_configured(self):\n if not zaza.model.get_relation_id(\n self.application_name, 'nrpe',\n remote_interface_name='nrpe-external-master'):\n self.skipTest('The NRPE charm is not related to the '\n 'charm under test.')\n units = zaza.model.get_units(self.application_name)\n cmds = []\n for check_name in self.nrpe_checks:\n cmds.append(\n 'egrep -oh /usr/local.* /etc/nagios/nrpe.d/'\n 'check_{}.cfg'.format(check_name)\n )\n ret = self._retry_check_commands_on_units(cmds, units)\n if ret:\n logging.info(ret)\n self.assertIsNone(ret, msg=ret)",
"def test_connection_failure(aggregator, check, bad_instance):\n instance_tags = [\"supervisord_server:travis\"]\n with pytest.raises(Exception):\n check.check(bad_instance)\n aggregator.assert_service_check(\"supervisord.can_connect\", status=check.CRITICAL, tags=instance_tags, count=1)",
"def test_live_migration_src_check_compute_node_not_alive(self):\n instance_id = self._create_instance()\n i_ref = db.instance_get(self.context, instance_id)\n t = utils.utcnow() - datetime.timedelta(10)\n s_ref = self._create_compute_service(created_at=t, updated_at=t,\n host=i_ref['host'])\n\n self.assertRaises(exception.ComputeServiceUnavailable,\n self.scheduler.driver._live_migration_src_check,\n self.context, i_ref)\n\n db.instance_destroy(self.context, instance_id)\n db.service_destroy(self.context, s_ref['id'])",
"def test_liveness(self):\n with DockerHost('host1',\n additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS) as host1:\n retry_until_success(host1.assert_is_live, retries=30)",
"def verify_host(self):\n super().verify_host()\n if not self.use_docker:\n if self.tools.host_os != \"Linux\":\n raise UnsupportedHostError(self.supported_host_os_reason)",
"def test_ip_addr_fails(self, mock_ghn, mock_grnam, mock_pwnam):\n # Should pass\n self.driver.check_for_setup_error()\n # Throw exception, need to clear internal cached host in driver\n self._fail_ip = True\n self.driver._vgc_host = None\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)",
"def docker_available(): # type: () -> bool\n return bool(get_docker_command())",
"def no_vnodes():\n return unittest.skipIf(not DISABLE_VNODES, 'Test disabled for vnodes')",
"def pass_sanity_checks():\n if not check_database_connection():\n return False\n\n # The IP address might take some time to arrive\n have_public_ip = False\n for i in range(10):\n if public_ip():\n have_public_ip = True\n break\n else:\n time.sleep(5)\n\n if not have_public_ip:\n return False\n\n if not access_s3():\n return False\n\n # If we get here we're good\n return True",
"def test_not_ready_with_broken_felix(self):\n with DockerHost('host1',\n additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS, start_calico=False) as host1:\n # Start node without felix healthcheck endpoint.\n host1.start_calico_node(env_options=\"-e FELIX_HEALTHENABLED=false\")\n\n # Run readiness checks against felix\n self.assertRaisesRegexp(CalledProcessError, \"calico/node is not ready: felix is not ready\", host1.execute,\n \"docker exec calico-node /bin/calico-node -felix-ready\")",
"async def test_check(\n docker: DockerAPI, coresys: CoreSys, install_addon_ssh: Addon, folder: str\n):\n docker.containers.get = _make_mock_container_get(\n [\"homeassistant\", \"hassio_audio\", \"addon_local_ssh\"], folder\n )\n with patch.object(DockerInterface, \"is_running\", return_value=True):\n await coresys.plugins.load()\n await coresys.homeassistant.load()\n await coresys.addons.load()\n\n docker_config = CheckDockerConfig(coresys)\n coresys.core.state = CoreState.RUNNING\n assert not coresys.resolution.issues\n assert not coresys.resolution.suggestions\n\n # An issue and suggestion is added per container with a config issue\n await docker_config.run_check()\n\n assert len(coresys.resolution.issues) == 4\n assert Issue(IssueType.DOCKER_CONFIG, ContextType.CORE) in coresys.resolution.issues\n assert (\n Issue(IssueType.DOCKER_CONFIG, ContextType.ADDON, reference=\"local_ssh\")\n in coresys.resolution.issues\n )\n assert (\n Issue(IssueType.DOCKER_CONFIG, ContextType.PLUGIN, reference=\"audio\")\n in coresys.resolution.issues\n )\n assert (\n Issue(IssueType.DOCKER_CONFIG, ContextType.SYSTEM) in coresys.resolution.issues\n )\n\n assert len(coresys.resolution.suggestions) == 4\n assert (\n Suggestion(SuggestionType.EXECUTE_REBUILD, ContextType.CORE)\n in coresys.resolution.suggestions\n )\n assert (\n Suggestion(\n SuggestionType.EXECUTE_REBUILD, ContextType.PLUGIN, reference=\"audio\"\n )\n in coresys.resolution.suggestions\n )\n assert (\n Suggestion(\n SuggestionType.EXECUTE_REBUILD, ContextType.ADDON, reference=\"local_ssh\"\n )\n in coresys.resolution.suggestions\n )\n assert (\n Suggestion(SuggestionType.EXECUTE_REBUILD, ContextType.SYSTEM)\n in coresys.resolution.suggestions\n )\n\n assert await docker_config.approve_check()\n\n # IF config issue is resolved, all issues are removed except the main one. Which will be removed if check isn't approved\n docker.containers.get = _make_mock_container_get([])\n with patch.object(DockerInterface, \"is_running\", return_value=True):\n await coresys.plugins.load()\n await coresys.homeassistant.load()\n await coresys.addons.load()\n\n assert not await docker_config.approve_check()\n assert len(coresys.resolution.issues) == 1\n assert len(coresys.resolution.suggestions) == 1\n assert (\n Issue(IssueType.DOCKER_CONFIG, ContextType.SYSTEM) in coresys.resolution.issues\n )",
"def is_geth_running(self) -> bool:\r\n command = 'docker exec -t %s geth attach ipc://root/abc/geth.ipc --exec \"admin.nodeInfo\"' % self.name\r\n result = self.ip.exec_command(command)\r\n return False if result.split(':')[0] == 'Fatal' else True",
"def test_not_exectuable(self):\n (status, output, imlog, makelog) = \\\n self.run_instmake_build(log_prefix=\"not-executable\",\n make_opts=[\"not-executable\"])\n\n self.assertEqual(status, util.SUCCESS, output)",
"def _is_docker(self) -> bool:\n from hubble.executor.helper import is_valid_docker_uri\n\n uses = getattr(self.args, 'uses', '')\n return is_valid_docker_uri(uses)",
"def using_xdist(request: pytest.FixtureRequest) -> bool:\n try:\n is_master = request.getfixturevalue(\"worker_id\") == \"master\"\n return not is_master\n except pytest.FixtureLookupError:\n return False",
"def test_not_in_domain(self, mock_ghn, mock_grnam, mock_pwnam):\n # Should pass\n self.driver.check_for_setup_error()\n # Throw exception, need to clear internal cached host in driver\n self._empty_domain_list = True\n self.driver._vgc_host = None\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)",
"def test_instance_not_overscaled(self) -> None:\n if self.prod_env:\n ec2_name = 'saints-xctf-server-prod-asg'\n else:\n ec2_name = 'saints-xctf-server-dev-asg'\n\n instances = self.get_ec2(ec2_name)\n self.assertTrue(len(instances) < 3)",
"def test_unavailable_server(cluster):\n node2 = cluster.instances[\"node2\"]\n global uuids\n node2.query(\n \"\"\"\n CREATE TABLE test0 UUID '{}'\n (id Int32) ENGINE = MergeTree() ORDER BY id\n SETTINGS storage_policy = 'web';\n \"\"\".format(\n uuids[0]\n )\n )\n node2.stop_clickhouse()\n try:\n # NOTE: you cannot use separate disk instead, since MergeTree engine will\n # try to lookup parts on all disks (to look unexpected disks with parts)\n # and fail because of unavailable server.\n node2.exec_in_container(\n [\n \"bash\",\n \"-c\",\n \"sed -i 's#http://nginx:80/test1/#http://nginx:8080/test1/#' /etc/clickhouse-server/config.d/storage_conf_web.xml\",\n ]\n )\n with pytest.raises(Exception):\n # HTTP retries with backup can take awhile\n node2.start_clickhouse(start_wait_sec=120, retry_start=False)\n assert node2.contains_in_log(\n \"Caught exception while loading metadata.*Connection refused\"\n )\n assert node2.contains_in_log(\n \"HTTP request to \\`http://nginx:8080/test1/.*\\` failed at try 1/10 with bytes read: 0/unknown. Error: Connection refused.\"\n )\n finally:\n node2.exec_in_container(\n [\n \"bash\",\n \"-c\",\n \"sed -i 's#http://nginx:8080/test1/#http://nginx:80/test1/#' /etc/clickhouse-server/config.d/storage_conf_web.xml\",\n ]\n )\n node2.start_clickhouse()\n node2.query(\"DROP TABLE test0 SYNC\")",
"def test_skip_container(self):\n result = utils.skip_container('bob', '')\n\n self.assertTrue(isinstance(result, bool))",
"def test_readiness(self):\n with DockerHost('host1',\n additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS) as host1:\n retry_until_success(host1.assert_is_ready, retries=30)",
"def is_inside_im_container() -> bool:\n # TODO(*): Why not testing only STAGE?\n condition = (\n os.environ.get(\"STAGE\") == \"TEST\"\n and os.environ.get(\"POSTGRES_HOST\") == \"im_postgres_test\"\n ) or (\n os.environ.get(\"STAGE\") == \"LOCAL\"\n and os.environ.get(\"POSTGRES_HOST\") == \"im_postgres_local\"\n )\n return condition",
"def skip_if_no_network(func=None):\n\n def check_and_raise():\n if os.environ.get('PYXNAT_SKIP_NETWORK_TESTS'):\n raise SkipTest(\"Skipping since no network settings\")\n\n if func:\n @wraps(func)\n @attr('skip_if_no_network')\n def newfunc(*args, **kwargs):\n check_and_raise()\n return func(*args, **kwargs)\n return newfunc\n else:\n check_and_raise()",
"def skipIfSingleNode():\n if len(get_host_list()[1]) == 0:\n return unittest.skip('requires multiple nodes')\n return lambda o: o"
]
| [
"0.69378114",
"0.6733271",
"0.6270841",
"0.61562204",
"0.60823655",
"0.60283184",
"0.6022272",
"0.59598815",
"0.5942479",
"0.5767451",
"0.5749508",
"0.57380295",
"0.57183486",
"0.56644875",
"0.5642528",
"0.5562638",
"0.5547674",
"0.5546532",
"0.5536506",
"0.5506728",
"0.5505367",
"0.5502541",
"0.54795516",
"0.5462842",
"0.54490834",
"0.544581",
"0.544006",
"0.54297566",
"0.5419015",
"0.5407258"
]
| 0.68378395 | 1 |
Play russian roulette. Use 'new' to start a new round. | async def roulette(self, ctx: commands.Context, *args):
if isinstance(ctx.channel, DMChannel):
await ctx.send("Roulette can only be played on servers or in group chats.")
return
group: bool = isinstance(ctx.channel, GroupChannel)
hash_code: int = hash(ctx.channel)
if args.__contains__("new") or (group and not self.roulette_group_dict.__contains__(hash_code)) \
or (not group and not self.roulette_server_dict.__contains__(hash_code)):
# We have to start a new round.
new_round: (str, RussianRoulette) = Games.start_new_round()
await ctx.send(new_round[0])
if group:
self.roulette_group_dict[hash_code] = new_round[1]
else:
self.roulette_server_dict[hash_code] = new_round[1]
return
else:
if group:
roulette: RussianRoulette = self.roulette_group_dict.get(hash_code)
else:
roulette: RussianRoulette = self.roulette_server_dict.get(hash_code)
author: User = ctx.author
message: str = "The trigger is pulled and " + author.mention
if roulette.pull():
message += " is killed!"
else:
message += " lives!"
await ctx.send(message) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def russian_roulette(self, ctx, amount: int = None):\n\n session = self.manager.get_session(ctx.channel)\n if session is None:\n with self.manager.temp_session(ctx.channel, RussianRouletteSession(ctx)) as inst:\n try:\n await inst.add_member(ctx.author, amount)\n except InvalidGameState as e:\n return await ctx.send(e)\n\n await ctx.send(\n f'Russian Roulette game is starting... Type {ctx.prefix}{ctx.invoked_with} '\n 'to join! You have 15 seconds before it closes.'\n )\n\n try:\n winner = await inst.run()\n except InvalidGameState as e:\n if amount is not None:\n # We can assert that there will only be one racer because there\n # must be at least two players.\n user = one(inst.players)\n await add_money(ctx.session, user.id, amount)\n\n return await ctx.send(e)\n\n if inst.pot:\n await (ctx.session.update.table(Currency)\n .set(Currency.amount + inst.pot)\n .where(Currency.user_id == winner.id)\n )\n extra = f'You win **{inst.pot}**{ctx.bot.emoji_config.money}. Hope that was worth it...'\n else:\n extra = ''\n\n await ctx.send(f'{winner.mention} is the lone survivor. Congratulations... {extra}')\n\n else:\n try:\n await session.add_member(ctx.author, amount)\n except InvalidGameState as e:\n return await ctx.send(e)\n\n await ctx.send(f'Alright {ctx.author.mention}. Good luck.')",
"async def roulette(message: discord.Message, participants: int=6):\n await init_game(message, Roulette, participants)",
"async def roulette(self, ctx):\n choices = [\"This is the end of the world\", \"And I don't know what to put here\"]\n await ctx.send(random.choice(choices))",
"def __init__(self):\n \n smach.State.__init__(self, \n outcomes=['GoToNormal','GoToPlay'])\n \n self.rate = rospy.Rate(200) # Loop at 50 Hz",
"def __init__(self):\n smach.State.__init__(self, \n outcomes=['GoToNormal','GoToSleep','GoToPlay'])\n\n self.rate = rospy.Rate(1) \n self.counter = 0",
"async def rps(self, ctx):\r\n\r\n async def play():\r\n await ctx.send('Lets play **Rock, Paper, Scissors**. Choose your weapon:')\r\n choices = ('rock', 'paper', 'scissors')\r\n computer = choices[randint(0, 2)]\r\n player = await self.viking.wait_for('message', check=lambda message: message.author == ctx.author)\r\n player = player.content.lower()\r\n\r\n beats = {\r\n 'rock': ['paper'],\r\n 'paper': ['scissors'],\r\n 'scissors': ['rock']\r\n }\r\n\r\n if computer and player in choices:\r\n if computer == player:\r\n await ctx.send('**Tie!** You both chose **{}**.'.format(computer.title()))\r\n await gameover()\r\n elif player in beats[computer]:\r\n await ctx.send('**You win!** Viking chose: **{}** and you chose: **{}**.'.format(computer.title(), player.title()))\r\n await gameover()\r\n else:\r\n await ctx.send('**You lose!** Viking chose: **{}** and you chose: **{}**.'.format(computer.title(), player.title()))\r\n await gameover()\r\n else:\r\n await ctx.send('Please choose a weapon.')\r\n await play()\r\n\r\n async def gameover():\r\n await ctx.send('Do you want to play again? (Enter: **Yes** / **No**)')\r\n response = await self.viking.wait_for('message', check=lambda message: message.author == ctx.author)\r\n response = response.content.lower()\r\n\r\n if response == 'yes':\r\n await play()\r\n elif response == 'no':\r\n await ctx.send('Thanks for playing!')\r\n else:\r\n await ctx.send('Invalid option!')\r\n await gameover()\r\n\r\n await play()",
"def play(self, tround, context):",
"def main(filename=\"./img/roulette.gif\", emblem=\"./img/cat.png\", fontname=\"disco\"):\n # Load resources\n logo = Image.open(emblem).resize((160, 160))\n font = ImageFont.truetype(f\"./img/font/{fontname}.ttf\", 48)\n\n # Run the animation\n colors = [COLOR_GREEN] + ([COLOR_RED, COLOR_BLACK] * 18)\n display_numbers = [str(x) for x in NUMBERS]\n frames, durations, ang = spinner.generate_animation(\n 90,\n 37,\n display_numbers,\n colors,\n logo,\n font,\n 416,\n 448,\n 4,\n emblem_func=render_emblem,\n )\n\n # Save the GIF\n frames[0].save(\n filename,\n format=\"GIF\",\n append_images=frames[1:],\n save_all=True,\n duration=durations,\n loop=0,\n )\n\n # Figure out the winning prize\n width = 360 / 37\n offset = 180 + (width / 2)\n print(NUMBERS[math.floor(((ang + offset) % 360) / width)])",
"def newRound():\r\n pass",
"def round(self):\n #player turn\n if self.started:\n self.started = False #registers the game as started then immediately turns that value false\n if self.initial_action:\n card = self.deck.deal()\n self.player.value += card.value\n if card.is_ace:\n self.player.usable_ace = True\n else:\n self.player.playing = False\n else: \n if self.apply_policy():\n card = self.deck.deal()\n self.player.value += card.value\n if card.is_ace:\n self.player.usable_ace = True\n else:\n self.player.playing = False\n\n #dealer turn\n if self.dealer.value < 17:\n card = self.deck.deal()\n self.dealer.value += card.value\n self.dealer.visible_value += card.value\n #allow people to reduce their scores by applying aces\n self.apply_ace()\n #check to see if anyone has bust by making bust people not _playing\n if self.player.value > 21:\n self.player.broke = True\n self.player.playing = False\n if self.dealer.value > 21:\n self.dealer.broke = True",
"def main():\n game = Blackjack()\n game.play()",
"def NewGame():\n\twhile(True):\n\t\tos.system('cls')\n\t\tprint ('\\n')\n\t\tprint ( ' Muy bien, ¡un nuevo juego!')\n\t\tprint ( ' Vamos a comenzar \\n')\n\t\tsleep(1)\n\t\tans = input(' ¿Piedra, Papel o tijera? >> ')\n\t\tcpu = random.choice(outcomes)\n\t\tsleep(1)\n\t\tprint (' CPU: '+cpu)\n\t\tPaperRockScissor(ans,cpu)\n\t\tsleep(1)\n\t\t\n\t\taksAgain = input(' Deseas seguir jugando?: Y/N ')\n\t\t\"\"\" Verificamos que su respuesta no sea un no, en caso de serlo rompremos el ciclo \"\"\"\n\t\tif aksAgain in [\"N\",\"NO\",\"n\",\"no\"]:\n\t\t\tbreak",
"def __init__(self, rate):\n super(RandomWander, self).__init__()\n self.iteration = 0\n self.rate = rate\n self.speed = 0\n self.heading = 0",
"async def rps(self, ctx, your_choice : RPSParser):\r\n author = ctx.message.author\r\n player_choice = your_choice.choice\r\n red_choice = choice((RPS.rock, RPS.paper, RPS.scissors))\r\n cond = {\r\n (RPS.rock, RPS.paper) : False,\r\n (RPS.rock, RPS.scissors) : True,\r\n (RPS.paper, RPS.rock) : True,\r\n (RPS.paper, RPS.scissors) : False,\r\n (RPS.scissors, RPS.rock) : False,\r\n (RPS.scissors, RPS.paper) : True\r\n }\r\n\r\n if red_choice == player_choice:\r\n outcome = None # Tie\r\n else:\r\n outcome = cond[(player_choice, red_choice)]\r\n\r\n if outcome is True:\r\n await self.bot.say(\"{} You win {}!\"\r\n \"\".format(red_choice.value, author.mention))\r\n elif outcome is False:\r\n await self.bot.say(\"{} You lose {}!\"\r\n \"\".format(red_choice.value, author.mention))\r\n else:\r\n await self.bot.say(\"{} We're square {}!\"\r\n \"\".format(red_choice.value, author.mention))",
"def spinupplayer():\n if __name__ == '__main__':\n _playthread = ImmediatePlayer(PLAYER_SETTINGS, COLOR_SETTINGS)\n PROCESSES.append(_playthread)\n _playthread.start()",
"def __init__(self):\n\n smach.State.__init__(self, \n outcomes=['GoToNormal','GoToSleep'])\n \n self.rate = rospy.Rate(200) # Loop at 50 Hz",
"def rop():\n return",
"def main():\n play_game(progression)",
"def run(self):\n \n rospy.spin()",
"def play_round(self):\n move1 = self.p1.move()\n move2 = self.p2.move()\n print(f\"P1: {move1} P2: {move2}\")\n self.p1.learn(move1, move2)\n self.p2.learn(move2, move1)\n \"\"\"Proneround_score and ptworound_score resets\n to 0 at beginning of every round.\"\"\"\n poneround_score = 0\n ptworound_score = 0\n if self.beats(move1, move2):\n print(\"Player 1 Wins This Round\")\n poneround_score = 1\n self.pone_score += 1\n elif self.beats(move2, move1):\n print(\"Player 2 Wins This Round\")\n ptworound_score = 1\n self.ptwo_score += 1\n else:\n print(\"Tie! No Points.\")\n print(f\"Round Points - P1: {poneround_score} | P2: {ptworound_score}\")",
"def rpsls(name):\n global player_choice, comp_choice, winner, image_num, win_method\n \n # convert name to player_number\n player_number = item_dict.get(name)\n \n # determine if input is valid\n if player_number == None:\n player_choice = 'Invalid input. No ' + name + \" in RPSLS.\"\n comp_choice = ''\n win_method = ''\n winner = ''\n image_num = 5\n return\n\n # compute random guess for comp_number using random.randrange()\n comp_number = random.randrange(0,5)\n\n # compute difference of player_number and comp_number modulo five\n # (could use verb_grid to compute winner, but d = p1 - p2 % 5 is probably faster & easier)\n difference = (player_number - comp_number) % 5\n \n # convert comp_number to name using number_to_name\n comp_name = number_to_name(comp_number)\n\n # use if/elif/else to determine winner\n if (difference == 1) or (difference == 2):\n winner = \"Player wins!\"\n image_num = player_number\n win_method = ' '.join([name, verb_grid[player_number][comp_number], comp_name + '...'])\n elif (difference == 3) or (difference == 4):\n winner = \"Computer wins!\"\n image_num = comp_number\n win_method = ' '.join([comp_name, verb_grid[comp_number][player_number], name + '...'])\n elif difference == 0:\n winner = \"Player and computer tie!\"\n image_num = 6\n win_method = ''\n else:\n winner = \"Error computing winner.\"\n \n # update player and computer choice for display\n player_choice = \"Player chooses \" + name\n comp_choice = \"Computer chooses \" + comp_name",
"def rock():\n typer.echo(\"🤖🤘\")",
"def run(self):\n r = rospy.Rate(100)\n while not rospy.is_shutdown():\n r.sleep()",
"def play(request, **kwargs):\n from .play_game import RPSLS, WIN, LOSE, TIE\n\n # Initialize the game engine\n game = RPSLS(request, **kwargs)\n\n # Play the game\n resp = game.play_game()\n\n # Update the scores\n session = request.session\n if resp['result'] == WIN:\n session['win'] += 1\n elif resp['result'] == LOSE:\n session['lose'] += 1\n else:\n session['tie'] += 1\n\n context = {\n 'result': resp['result'],\n 'player_choice': resp['player_choice'],\n 'computer_choice': resp['computer_choice'],\n 'win': session['win'],\n 'lose': session['lose'],\n 'tie': session['tie'],\n 'project': 'RPSLS'\n }\n\n # Explanation messages for game result\n if resp['result'] != TIE:\n messages = {\n 'ScissorsPaper': 'Scissors cut paper!',\n 'ScissorsSpock': 'Spock smashes scissors!',\n 'ScissorsRock': 'Rock crushes scissors!',\n 'ScissorsLizard': 'Scissors decapitate lizard!',\n 'PaperRock': 'Paper covers rock!',\n 'RockLizard': 'Rock crushes lizard!',\n 'LizardSpock': 'Lizard poisons Spock!',\n 'LizardPaper': 'Lizard eats paper!',\n 'PaperSpock': 'Paper disproves Spock!',\n 'SpockRock': 'Spock vaporizes rock!',\n }\n try:\n context['explanation'] = messages[resp['player_choice']+resp['computer_choice']]\n except KeyError:\n context['explanation'] = messages[resp['computer_choice']+resp['player_choice']]\n\n else:\n context['explanation'] = ''\n\n return context",
"def newPlayer():\r\n pass",
"def play(self, turn):\n # global black_prompt, white_prompt, res, pi, board\n if turn % 2 == 0:\n prompt, requests_add, responses_add, color_to_play = self.bp, self.bp, self.wp, BLACK\n print(\"pure\")\n res = pure_MCTS.UCTAlg(json=prompt).run(time_limit=1)\n else:\n prompt, requests_add, responses_add, color_to_play = self.wp, self.wp, self.bp, WHITE\n print(\"alpha\")\n res = mcts.uctAlg.UCTAlg(predict_model=player, json=prompt, mode='comp').run(time_limit=1)[0]\n print(res)\n self.board.disc_place(color_to_play, res[0], res[1]) # record steps to board\n\n dct = {'x': res[0], 'y': res[1]}\n requests_add[\"responses\"].append(dct)\n responses_add[\"requests\"].append(dct)",
"def play_game():\n pass",
"def play(self):\n print(\"Bientôt ! :)\")",
"def __init__(self):\r\n self.you = yourPlay()\r\n self.opposite = rockyman()",
"def com_turn(self):\r\n circle = copy.deepcopy(self.circle)\r\n# Creates a turtle to use in the computer turns\r\n t = turtle.Turtle()\r\n t.hideturtle()\r\n com_take= circle%5\r\n if com_take==0:\r\n com_take= random.choice([\"1\",\"2\",\"3\",\"4\"])#Random number between 1-4 if remainder is zero.\r\n\r\n com_take=str(com_take)\r\n self.update_scoreboard(com_take)\r\n com_take = int(com_take)\r\n self.remove_circle(com_take)\r\n self.circle -= com_take\r\n return self.circle"
]
| [
"0.6880835",
"0.6490289",
"0.5815056",
"0.5750089",
"0.5742491",
"0.5729538",
"0.5697026",
"0.55669904",
"0.55635935",
"0.5501709",
"0.5485667",
"0.5421673",
"0.53926754",
"0.53612477",
"0.53382343",
"0.53361785",
"0.5334666",
"0.53133357",
"0.5294174",
"0.52602094",
"0.5255706",
"0.5254353",
"0.52261215",
"0.52221674",
"0.5212626",
"0.5209613",
"0.52028394",
"0.5201707",
"0.517736",
"0.5169543"
]
| 0.6879734 | 1 |
speed_str(float speed) > string with , separators and B/s | KiB/s | MiB/s | def speed_str(speed):
if speed > 1024*1024:
return '{:,.4}'.format(speed/1024/1024) + ' MiB/s'
elif speed > 1024:
return '{:,.4}'.format(speed/1024) + ' KiB/s'
else:
return '{:,.4}'.format(speed) + ' B/s' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pretty_speed(speed):\n unit = 'bps'\n kmg = ['', 'K', 'M', 'G']\n i = 0\n while speed >= 1000:\n speed /= 1000\n i += 1\n return \"{:.2f}\".format(speed) + ' ' + kmg[i] + unit",
"def speed_convert(size):\r\n power = 2 ** 10\r\n zero = 0\r\n units = {0: \"\", 1: \"Kb/s\", 2: \"MB/s\", 3: \"Gb/s\", 4: \"Tb/s\"}\r\n while size > power:\r\n size /= power\r\n zero += 1\r\n return f\"{round(size, 2)} {units[zero]}\"",
"def __formatCpuSpeed(self, cpuSpeed):\n return '%.1f GHz' % (cpuSpeed / 10.0)",
"def get_hr_speed(self):\n speed = self.speed\n if speed is None:\n return ''\n speed *= 8\n if speed == 10**12:\n return '1 Tbps'\n if speed >= 10**9:\n return '{} Gbps'.format(round(speed / 10**9))\n return '{} Mbps'.format(round(speed / 10**6))",
"def calc_speed(start, now, byte_counter):\n diff = now - start\n if byte_counter == 0 or diff < 0.001: # One millisecond\n return '%10s' % '---b/s'\n return '%10s' % ('%sb/s'\n % FileDownloader.format_bytes(float(byte_counter)\n / diff))",
"def parse_speed(as_str: str) -> float:\n return float(as_str.rstrip(\"x\"))",
"def perfcounter_to_str(val):\n return f\"{math.floor(val / 60)}m {math.floor(val % 60)}s {math.floor((val % 1) * 1000)}ms\"",
"def speed(self) -> str:\n return self._current_speed",
"def get_speed(val):\n if val in ['', 255]:\n return None\n return speed(val, 'KMH').value('KT')",
"def _validate_speed(self, speed: pint.Quantity | None) -> str:\n # Validated speeds are used as command argument, with empty string being the default for None\n if speed is None:\n return \"\"\n\n # Alert if out of bounds but don't raise exceptions, according to general philosophy.\n # Target flow rate too high\n if speed < ureg.Quantity(\"2 sec/stroke\"):\n speed = ureg.Quantity(\"2 sec/stroke\")\n warnings.warn(\n f\"Desired speed ({speed}) is unachievable!\"\n f\"Set to {self._seconds_per_stroke_to_flowrate(speed)}\"\n f\"Wrong units? A bigger syringe is needed?\"\n )\n\n # Target flow rate too low\n if speed > ureg.Quantity(\"3692 sec/stroke\"):\n speed = ureg.Quantity(\"3692 sec/stroke\")\n warnings.warn(\n f\"Desired speed ({speed}) is unachievable!\"\n f\"Set to {self._seconds_per_stroke_to_flowrate(speed)}\"\n f\"Wrong units? A smaller syringe is needed?\"\n )\n\n return str(round(speed.m_as(\"sec / stroke\")))",
"def bytes2best_str(bytes, decimals=1):\n sizes = (\n (1<<0, 'b'),\n (1<<10, 'Kb'),\n (1<<20, 'Mb'),\n (1<<30, 'Gb'),\n (sys.maxint, 'Gb')\n )\n # find the best index in sizes array for the given bytes value\n for i in range(len(sizes)-1):\n if bytes < sizes[i+1][0]:\n break\n # bytes always must be displayed without decimals\n if i == 0:\n decimals = 0\n # format the string\n f = float(bytes)/float(sizes[i][0])\n format = '%(value).' + str(decimals) + 'f %(magnitude)s'\n s = format % {'value': f, 'magnitude': sizes[i][1]}\n return s",
"def byte_measurement( bytes, detailed = False ):\r\n B = bytes\r\n MB = int( float( B ) / 1024 / 1024 )\r\n B = B - MB * 1024 * 1024\r\n kB = int( float( B ) / 1024 )\r\n B = B - kB * 1024\r\n if detailed:\r\n if MB:\r\n result += str( MB )\r\n if kB or B:\r\n result += \".\"\r\n if kB:\r\n result += str( percent_from_ratio( kB, 1024 ) )\r\n if B:\r\n result += str( percent_from_ratio( B, 1024 ) )\r\n result += \"MB\"\r\n elif kB:\r\n result += str( kB )\r\n if B:\r\n result += \".%i\" % percent_from_ratio( B, 1024 )\r\n result += \"kB\"\r\n elif B:\r\n result += \"%ib\" % B\r\n else:\r\n if MB:\r\n result = \"%iMB\" % MB\r\n elif kB:\r\n result = \"%ikB\" % kB\r\n else:\r\n result = \"%ib\" % B\r\n return result",
"def get_interface_speed(interface_name):\n c = BASE_RESULT.child(dimensions={'interface': interface_name})\n\n command = \"ethtool \" + interface_name\n interface_data = commands.getoutput(command)\n speed = ''\n\n # Filter lines here. We only want the speed.\n lines = interface_data.split('\\n')\n for line in lines:\n if 'Speed' in line:\n _, speed = line.split(':')\n\n # Only leave the digits\n speed = ''.join(c for c in speed if c in string.digits)\n if speed:\n c.value = str_to_num(speed)\n else:\n c.value = 0\n return c",
"def test_str_speed_ms(self):\n xknx = XKNX(loop=self.loop)\n sensor = Sensor(\n xknx,\n 'TestSensor',\n group_address_state='1/2/3',\n value_type=\"speed_ms\")\n sensor.sensor_value.payload = DPTArray((0x00, 0x1b,))\n\n self.assertEqual(sensor.resolve_state(), 0.27)\n self.assertEqual(sensor.unit_of_measurement(), \"m/s\")\n self.assertEqual(sensor.ha_device_class(), None)",
"def speed(self) -> str:\n return self._attributes.get(\"current_speed\")",
"def bytes_filesize_to_readable_str(bytes_filesize: int) -> str:\n if bytes_filesize < 1024:\n return \"{} B\"\n num = float(bytes_filesize)\n for unit in [\"B\", \"KB\", \"MB\", \"GB\"]:\n if abs(num) < 1024.0:\n return \"{:.1f} {}\".format(num, unit)\n num /= 1024.0\n return \"{:.1f} {}\".format(num, \"TB\")",
"def test_str_speed(self):\n xknx = XKNX()\n sensor = Sensor(\n xknx, \"TestSensor\", group_address_state=\"1/2/3\", value_type=\"speed\"\n )\n sensor.sensor_value.payload = DPTArray(\n (\n 0xC5,\n 0xCD,\n 0x1C,\n 0x6A,\n )\n )\n\n self.assertEqual(sensor.resolve_state(), -6563.5517578125)\n self.assertEqual(sensor.unit_of_measurement(), \"m/s\")\n self.assertEqual(sensor.ha_device_class(), None)",
"def human_bytes(num, suffix=\"B\") -> str:\n for unit in [\"\", \"Ki\", \"Mi\", \"Gi\", \"Ti\", \"Pi\", \"Ei\", \"Zi\"]:\n if abs(num) < 1024.0:\n return \"%3.1f%s%s\" % (num, unit, suffix)\n num /= 1024.0\n return \"%.1f%s%s\" % (num, \"Yi\", suffix)",
"def __str__(self):\n return \"Car with the maximum speed of \" + str(self.max_speed) + \\\n \" \" + self.speed_unit",
"def bytes_filesize_to_readable_str(bytes_filesize):\n if bytes_filesize < 1024:\n return \"{} B\"\n num = float(bytes_filesize)\n for unit in [\"B\", \"KB\", \"MB\", \"GB\"]:\n if abs(num) < 1024.0:\n return \"{:.1f} {}\".format(num, unit)\n num /= 1024.0\n return \"{:.1f} {}\".format(num, \"TB\")",
"def format_size(size):\n size = float(size)\n for unit in ['bit','Kibit','Mibit','Gibit']:\n if size < 1024.0:\n return \"{size:3.2f}{unit}\".format(size=size, unit=unit)\n size /= 1024.0\n return \"{size:.2f}{unit}\".format(size=size, unit='TiB')",
"def size_str(num):\n if num > 2 ** 30:\n return \"%0.2fGB\" % (num / 2 ** 30)\n elif num > 2 ** 20:\n return \"%0.2fMB\" % (num / 2 ** 20)\n elif num > 2 ** 10:\n return \"%0.2fkB\" % (num / 2 ** 10)\n else:\n return \"%d bytes\" % num",
"def write_speed(self, current_speed):\n try:\n if isinstance(current_speed, numbers.Number):\n if current_speed in [app_screen.SPEED_SLOW, app_screen.SPEED_MEDIUM, app_screen.SPEED_FAST]:\n current_speed_str = str(current_speed)\n self.store.put(SPEED_STORE, speed=current_speed_str)\n except:\n print \"Error: cannot save game speed!\"",
"def timestr(msec):\n sec = float(msec) / 1000\n\n hours = int(sec / 3600)\n sec -= hours * 3600\n\n minutes = int(sec / 60)\n sec -= minutes * 60\n\n return f\"{hours:02d}:{minutes:02d}:{sec:06.3f}\".replace(\".\", \",\")",
"def tranfer_unit(number):\n count = 0\n unit_name = \"\"\n if 2 ** 20 > number > 2 ** 10:\n unit_name = \"Kb\"\n count = 1\n elif 2 ** 30 > number > 2 ** 20:\n unit_name = \"Mb\"\n count = 2\n elif number > 2 ** 30:\n unit_name = \"Gb\"\n count = 3\n else:\n unit_name = \"b\"\n if count != 0:\n unit_number = round(number / ((2 ** 10) ** count), 2)\n else:\n unit_number = round(number, 2)\n unit_str = \"{num}{name}\".format(num=unit_number, name=unit_name)\n return unit_str",
"def human_readable(bytes, units=[' bytes','kB','MB','GB','TB', 'PB', 'EB']):\n return str(bytes) + units[0] if bytes < 1024 else human_readable(bytes>>10, units[1:])",
"def time_string(time_f: float) -> str:\n m, s = divmod(time_f, 60)\n h, m = divmod(m, 60)\n\n if h < 1:\n if m < 1 and s < 1:\n msec = int(s * 1000)\n return '{:=03d}msec'.format(msec)\n\n if m < 1:\n return '{:=02.0f}sec'.format(s)\n\n return '{:=02.0f}min:{:=02.0f}sec'.format(m, s)\n else:\n return '{:=01.0f}h:{:=02.0f}min:{:=02.0f}sec'.format(h, m, s)",
"def hms_string(sec_elapsed):\n h = int(sec_elapsed / (60 * 60))\n m = int((sec_elapsed % (60 * 60)) / 60)\n s = sec_elapsed % 60.\n return \"{}:{:>02}:{:>05.2f}\".format(h, m, s)",
"def hms_string(sec_elapsed):\n h = int(sec_elapsed / (60 * 60))\n m = int((sec_elapsed % (60 * 60)) / 60)\n s = sec_elapsed % 60.\n return \"{}:{:>02}:{:>05.2f}\".format(h, m, s)",
"def human_readable(size):\n if size < 1024**2:\n hreadable = float(size)/1024.0\n return \"%.0fK\" % hreadable\n elif size < (1024**3):\n hreadable = float(size)/(1024**2)\n return \"%.1fM\" % round(hreadable, 1)\n else:\n hreadable = float(size)/(1024.0**3)\n return \"%.2fG\" % round(hreadable, 2)"
]
| [
"0.7814756",
"0.6862278",
"0.66547114",
"0.6566598",
"0.63913155",
"0.6271223",
"0.6214141",
"0.6205538",
"0.6020402",
"0.5959611",
"0.5913494",
"0.5903802",
"0.588717",
"0.585047",
"0.5820893",
"0.5784452",
"0.5770387",
"0.5766688",
"0.5766226",
"0.5753665",
"0.5750202",
"0.5747055",
"0.57409924",
"0.5739699",
"0.57081133",
"0.56753",
"0.5664929",
"0.5658787",
"0.5658787",
"0.56540614"
]
| 0.8510952 | 0 |
This method actually starts the transfer and writes out the statistics. | def start(self):
import datetime
with self.in_fd as in_file:
with self.out_fd as out_file:
with self.info_fd as info_file:
self.total_bytes = 0
self.start_time = datetime.datetime.now()
while 1:
data = in_file.read(1024*14)
data_len = len(data)
if data_len == 0:
break
out_file.write(data)
self.total_bytes += data_len
self.total_time = (datetime.datetime.now() -
self.start_time).total_seconds()
self.average_speed = self.total_bytes / self.total_time
info_file.write(
'\033[0GTotal bytes: %d ' % self.total_bytes +
'Total time: %.3f ' % self.total_time +
'Average speed: %s\033[K' % speed_str(
self.average_speed)
)
info_file.write('\n') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def transfer_progress(self, stats):",
"def _send(self):\n executor_id = self.status['executor_id']\n job_id = self.status['job_id']\n call_id = self.status['call_id']\n act_id = self.status['activation_id']\n\n if self.status['type'] == '__init__':\n init_key = create_init_key(executor_id, job_id, call_id, act_id)\n self.internal_storage.put_data(init_key, '')\n\n elif self.status['type'] == '__end__':\n status_key = create_status_key(executor_id, job_id, call_id)\n dmpd_response_status = json.dumps(self.status)\n drs = sizeof_fmt(len(dmpd_response_status))\n logger.info(\"Storing execution stats - Size: {}\".format(drs))\n self.internal_storage.put_data(status_key, dmpd_response_status)",
"def upload_statistics(self):\n logger.info('Importing statistics...')\n call_command('import_qc', self.accession, self.rootpath, '--pipeline', self.version)\n logger.info('Stats successfully imported.')",
"def transfer_stats(self):\n tr_sum, tr_max, tr_min, tr_mean, tr_std = self.stats.get_transfer_stats()\n du_sum, du_max, du_min, du_mean, du_std = self.stats.get_duration_stats()\n\n stats_str = \"Summary:\\n\"\n stats_str += \"========\"\n stats_str += \"\"\n stats_str += f\" total steps: {self.stats.nsteps}\"\n stats_str += f\" total data (MB): {(tr_sum / 1024 / 1024)}\"\n stats_str += f\" transfer times(sec): {(du_sum)}\"\n stats_str += f\" throughput (MB/sec): {tr_sum / 1024 / 1024 / du_sum}\"\n\n return stats_str",
"def run(self):\n self.update_link_statistics()\n self.send_packet()",
"def run(self):\n\t\tlogger.info(\"Uploading data... @ %f, PID: %d\" % (time.time(), os.getpid()))\n\n\t\tself.dump_db()",
"def run(self):\n # Logging the task\n logger = self.log_setup()\n logger.info(\"Starting the GetRowsAndTransfer task . . .\")\n\n # Processing the tasks\n self.get_rows(logger)\n self.upload_files(logger)\n\n logger.info(\"Process finished successfully . . .\")",
"def _transfer(self):\n copy_func = BIDSCopy(overwrite=self.force_override.get(),\n verify=self.verify.get(),\n file_name_tracker=self.curr_file,\n file_num_tracker=self.transferred_count,\n file_prog_tracker=self.curr_file_progress)\n self.curr_file.set('Mapping destination BIDS structure...')\n dst_folder = BIDSTree(self.dst)\n for src in self.srcs:\n dst_folder.add(src, copier=copy_func.copy_files)\n if self.set_copied:\n self._rename_complete(src)\n self.transferred_count.set(self.file_count)\n self.curr_file.set('Complete!')",
"def run(self):\n self.socket.connect()\n with open('src/inputs/output.file', 'rb') as f:\n self.sent_bytes = f.read()\n self.socket.send(self.sent_bytes)\n self.socket.disconnect()\n self.socket.close()",
"def run(self):\n # Get data objects (in a dict) from the controller process \n dataDict = self.controller.recv()\n self.orderedStreams = dataDict['orderedStreams']\n\n ID = None\n data = None\n while self.clients:\n result = self.resultQ.get()\n if result is None:\n self.clients -= 1\n continue\n ID, data = result\n # Data sequence is unimportant, simply write it out and proceed\n self.writePairs(data)\n\n # Send updated data (stats mainly) via the pipe directly back to\n # the MPController object, close filehandles and finish up.\n self.updateObjectsToController()\n self.closeFileHandles()",
"def start(self):\n\t\tself.stream.start_stream()",
"def onTransferUpdate(self, api, transfer):\n logging.info('Transfer update ({} {});'\n ' Progress: {} KB of {} KB, {} KB/s'\n .format(transfer,\n transfer.getFileName(),\n transfer.getTransferredBytes() / 1024,\n transfer.getTotalBytes() / 1024,\n transfer.getSpeed() / 1024))",
"def connectionMade(self):\n self.output = DelayedStartupLineLogger()\n self.output.makeConnection(self.transport)\n self.output.tag = self.name",
"def stat_beg_batch(self, transfer_name, src, dst, transclass=None):\n\n if miscutils.fwdebug_check(3, 'TRANSFERSTATS_DEBUG'):\n miscutils.fwdebug_print(f\"beg {transfer_name} {src} {dst} {transclass}\")\n self.batchvals['transfer_name'] = transfer_name\n self.batchvals['src'] = src\n self.batchvals['dst'] = dst\n self.batchvals['transfer_class'] = transclass\n self.batchvals['start_time'] = datetime.datetime.now()\n\n if miscutils.fwdebug_check(3, 'TRANSFERSTATS_DEBUG'):\n miscutils.fwdebug_print(\"end\")\n return -1",
"def transfer(self):\n pass",
"def run(self):\n # Wait for the 'shot' message ready\n self.wait_for_messages()\n # Send the initial states to the server\n self.send_shape_and_states()\n # Wait for the 'method' message ready\n self.wait_for_messages()\n\n # Send the measurement angles to the server\n for y in range(self.__depth):\n self.send_angle_bulks(y)\n\n # Obtain the measurement outcomes\n result = self.get_classical_output()[::-1]\n self.send_back(\n 'local',\n self.__wrap_shot_message(\n 'setResult',\n {'result': result, 'shot': self.__shots},\n )\n )",
"def run_report(self) -> None:\n t1 = self.t1 or time.time()\n\n dt = t1 - self.t0\n\n if dt and self.max_tasks:\n speed = len(self.statistics) / dt / self.max_tasks\n else:\n speed = 0\n\n LOGGER.info('CRAWLER STATISTICS REPORT')\n\n show = list(self.statistics)\n show.sort(key=lambda stat: str(stat.url))\n\n for stat in show:\n self.log_url_metadata(stat)\n\n LOGGER.info(\n f'Completed parsing {len(self.statistics)} urls in {dt} secs; (max_tasks={self.max_tasks}) ({speed} urls per second per task)', # pylint: disable=C0301 # noqa: E501\n )\n\n LOGGER.info(f'Remaining: {self.queue.qsize()}')\n LOGGER.info(f'Total Statistics: {len(self.statistics)}')\n LOGGER.info(f'Datetime: {time.ctime()} local time')",
"def import_measurements():\n\n print('Receive a transfer...')",
"def run(self):\n self.debug(__name__ + \".run(): self.threadName=\" + str(self.threadName) + \"\\n\")\n self.debug(__name__ + \".run(): self.statusFile=\" + str(self.statusFile) + \"\\n\")\n self.debug(__name__ + \".run(): self.recvData=\" + str(self.recvData) + \"\\n\")\n self.debug(__name__ + \".run(): self.socketConn=\" + str(self.socketConn) + \"\\n\")\n\n status = True\n data = self.getFileData()\n self.mySocketObj.serverSend(self.socketConn, data)\n if self.socketConn: self.socketConn.close()\n # self.updateCounts()\n self.status = status\n if status:\n self.appendMsg(__name__ + \".run(): Completed successfully for \" + str(self.threadName) + \"\\n\")\n else:\n self.appendMsg(__name__ + \".run(): Failed for \" + str(self.threadName) + \"\\n\")\n # Endif",
"def start(self):\r\n self.debug(\"### starting gox streaming API, trading %s%s\" %\r\n (self.curr_base, self.curr_quote))\r\n self.client.start()",
"def _stream(self):\n logger.info('getting meta-data')\n while not self.handle.has_metadata():\n time.sleep(0.1)\n\n #self.handle.rename_file(0, 'test.mp4')\n\n while not self.handle.is_seed():\n stat = self.handle.status()\n\n print 'downloading %.2f%%'%(stat.progress * 100)\n sys.stdout.flush()\n\n time.sleep(1)",
"def run(self):\n # Get data objects (in a dict) from the controller process \n dataDict = self.controller.recv()\n self.orderedStreams = dataDict['orderedStreams']\n\n ID = None\n data = None\n output_compressed = set()\n output_normal = set()\n while self.clients:\n result = self.resultQ.get()\n if result is None:\n self.clients -= 1\n continue\n ID, data = result\n for pDict in data:\n if pDict['gzipped']:\n for filename in pDict['files']:\n output_compressed.add(filename)\n else:\n for filename in pDict['files']:\n output_normal.add(filename)\n for filename in pDict['files']:\n self.outputfiles.add(filename)\n \n self.integrateStats(data)\n self.Counter.value += len(data)\n\n # Now concatenate any output files together\n if self.heartbeat is not None:\n self.heartbeat.message(\"Beginning file block merging..\", True)\n\n fcount = 0\n blkavg = 0\n for extension in ('sam', \n 'pp.sam',\n '1.fastq',\n '2.fastq',\n 'pp.1.fastq',\n 'pp.2.fastq',\n 'sh.fastq',\n 'sh.pp.fastq'):\n fc,ba = self.concatenate(output_compressed, extension, do_gzip=True)\n fcount += fc\n blkavg += ba\n fc,ba = self.concatenate(output_normal, extension, do_gzip=False)\n fcount += fc\n blkavg += ba\n\n if self.heartbeat is not None and fcount > 0:\n self.heartbeat.message(\n \"Merged %d blocks (avg) in each of %d output files\" % \n (int(round(blkavg * 1.0 / fcount)), fcount), True)\n \n\n # Send updated data (stats mainly) via the pipe directly back to\n # the MPController object, close filehandles and finish up.\n self.updateObjectsToController()",
"def status(self):\n\t\tstatus = self.thread.status()\n#\t\tprint_array(status)\n\t\tmessage = [\"------ RSYNC PROGRESS ------ \"]\n\t\tif self.log_message:\n\t\t\tmessage.append(self.log_message)\n\t\tmessage.append(\"Current file: %s\" % status['current_file'])\n\t\tmessage.append(\"\\tBytes Copied: %s\" % status['bytes_copied'])\n\t\tmessage.append(\"\\tPercent Done: %s\" % status['percent_done'])\n\t\tmessage.append(\"\\tTransfer Rate: %s\" % status['transfer_rate'])\n\t\tmessage.append(\"\\tTime Remaining: %s\" % status['est_remain'])\n\t\tmessage.append(\"\\tTransfer Number: %s\" % status['xfer_num'])\n\t\tmessage.append(\"\\tTransfers Remaining: %s\" % status['xfer_remain'])\n\t\tmessage.append(\"\\tTransfers Total: %s\" % status['xfer_total'])\n\t\tmessage.append(\"\\t----------------------------------\")\n\t\ttry:\n\t\t\toverall_percent = int(round((int(status['xfer_num'])*1.0)/int(status['xfer_total']),2)*100)\n\t\texcept: overall_percent = 0\n\t\tmessage.append(\"\\tTotal Rsync done: %s%%\\n\" % overall_percent)\n\t\tp = open(self.progress_file,'w+',0)\n\t\tfor line in message:\n\t\t\t#print line\n\t\t\tp.write(\"%s\\n\" % line)\n\t\tp.flush()\n\t\tp.close()",
"def send_output(self):\n self.__status_handler.io.async_refresh()",
"def execute(self):\n install_path, data_path = get_config_paths()\n datadir = os.path.join(data_path, self._eventid, 'current')\n if not os.path.isdir(datadir):\n raise NotADirectoryError('%s is not a valid directory.' % datadir)\n\n # look for the presence of a NO_TRANSFER file in the datadir.\n notransfer = os.path.join(datadir, NO_TRANSFER)\n if os.path.isfile(notransfer):\n self.logger.info(\n 'Event has a %s file blocking transfer.' % NO_TRANSFER)\n return\n\n products_dir = os.path.join(datadir, 'products')\n if not os.path.isdir(products_dir):\n raise NotADirectoryError('%s does not exist.' % products_dir)\n\n # get the path to the transfer.conf spec file\n configspec = os.path.join(get_data_path(), 'transferspec.conf')\n\n # look for an event specific transfer.conf file\n transfer_conf = os.path.join(datadir, 'transfer.conf')\n if not os.path.isfile(transfer_conf):\n # if not there, use the system one\n transfer_conf = os.path.join(\n install_path, 'config', 'transfer.conf')\n if not os.path.isfile(transfer_conf):\n raise FileNotFoundError('%s does not exist.' % transfer_conf)\n\n # get the config information for transfer\n config = ConfigObj(transfer_conf, configspec=configspec)\n\n # get the output container with all the things in it\n datafile = os.path.join(products_dir, 'shake_result.hdf')\n if not os.path.isfile(datafile):\n raise FileNotFoundError('%s does not exist.' % datafile)\n\n # Open the ShakeMapOutputContainer and extract the data\n container = ShakeMapOutputContainer.load(datafile)\n\n # call the transfer method\n _transfer(config, container, products_dir)\n\n # copy the current folder to a new backup directory\n self._make_backup(data_path)",
"def start_stream(self):\n pass",
"def run(self):\n\n # Create the ports\n self.wan_port.create_on_server(self.server)\n self.cpe_port.create_on_server(self.server)\n\n flows = []\n\n # Create all the downstream flows which are requested\n for i in range(self.number_of_downstream_flows):\n flow_name = \"Downstream_%d\" % (i + 1)\n logging.info('Creating flow \"%s\"', flow_name)\n flows.append(\n self.traffic_profile.create_between(name=flow_name,\n flow_number=i + 1,\n source=self.wan_port,\n destination=self.cpe_port,\n duration=self.traffic_duration)\n )\n\n # Create all the upstream flows which are configured\n for i in range(self.number_of_upstream_flows):\n flow_name = \"Upstream_%d\" % (i + 1)\n logging.info('Creating flow \"%s\"', flow_name)\n flows.append(\n self.traffic_profile.create_between(name=flow_name,\n flow_number=i + 1,\n source=self.cpe_port,\n destination=self.wan_port,\n duration=self.traffic_duration)\n )\n\n # Start the traffic and with until finished\n self.run_traffic(flows)\n\n # Get the results from the flow and return them in a list of dicts\n return [flow.get_results() for flow in flows]",
"def on_begin(self, args, kwargs):\n self.stdout = open(self.stdout_path, \"wb\")\n self.stderr = open(self.stderr_path, \"wb\")",
"def start(self):\n while True:\n LogService.log_info(\"aggregator\", \"Creating statistics\")\n self.create_statistics()\n LogService.log_info(\"aggregator\", \"Cleaning up\")\n self.cleanup_measurements()\n LogService.log_info(\"aggregator\", \"Sleeping for 60 minutes\")\n time.sleep(60*60)",
"def run(self):\r\n self.collect_data()"
]
| [
"0.71215886",
"0.64963603",
"0.6108362",
"0.60545087",
"0.60371166",
"0.59502673",
"0.58914244",
"0.58742326",
"0.5850657",
"0.5825628",
"0.58090675",
"0.57770205",
"0.5729513",
"0.5726948",
"0.57021993",
"0.56705713",
"0.56651455",
"0.5656119",
"0.56470776",
"0.56435704",
"0.5634718",
"0.56096935",
"0.55933475",
"0.5580162",
"0.5578516",
"0.5554489",
"0.55414003",
"0.5532516",
"0.5532329",
"0.55246544"
]
| 0.6586207 | 1 |
Handles of server replies | def reply_handler(msg):
print "Server Response: %s, %s" % (msg.typeName, msg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reply_handler(msg):\n print(\"Server Response: %s, %s\" % (msg.typeName, msg))\n pass",
"def handle(self):\n socket = self.request[1]\n data = self.request[0].strip()\n logger.info(\"Address {} at {} wrote: '{}'\".format(self.client_address[1], self.client_address[0], data))\n cmd_strn, ret = self.command_service(data)\n print(ret)\n self.command_response(cmd_strn, ret, self.request[1], self.client_address[0],\n self.mapInterface.router[cmd_strn])",
"def handle(self):\r\n # self.request is the TCP socket connected to the client\r\n # read the incoming command\r\n request = self.request.recv(1024).strip()\r\n # write to the queue waiting to be processed by the server\r\n INPUT_QUEUE.put(request)\r\n # wait for the server answer in the output queue\r\n response = OUTPUT_QUEUE.get(timeout=5.0)\r\n # send back the answer\r\n self.request.send(response)",
"def handle(self):\n req_lines = self._read_lines()\n if not req_lines:\n self.cleanup()\n for req in req_lines:\n log.debug('%s => %s', self.client, req)\n req = req.split()\n cmd = req.pop(0)\n try:\n self.get_command(cmd)(req)\n result = [OK]\n except Exception as error:\n result = [ERROR, error.message]\n self.send_line(' '.join(result))\n self.flush()",
"def handle(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request\n #self.logged_in = False\n\n # Loop that listens for messages from the client\n while True:\n received_string = self.connection.recv(4096).strip()\n if received_string:\n jsonObject = json.loads(received_string)\n request = jsonObject.get('request')\n #print(received_string)\n #self.handle_data(received_string)\n if request == 'login':\n print 'logging in'\n self.login(jsonObject)\n elif request == 'logout':\n self.logout()\n elif request == 'msg':\n self.send(jsonObject)\n elif request == 'names':\n self.getNames()\n elif request == 'help':\n return \"geiegohruuhiegr\"\n else:\n return \"you suck\"\n\n else:\n print('The client is disconnected.')\n break \n # TODO: Add handling of received payload from client",
"def finished(self, reply):\n pass",
"def handle(self):\n global log_th\n sent = 1\n msg_body = ''\n get_recv = True\n get_data = True\n empty_check = 0\n # Looping session requests\n while 1:\n try:\n # If enabled sleep feauture\n if self.sleep_between != 0:\n time.sleep(self.sleep_between)\n # If no answer feauture\n if self.no_answer != 0:\n time.sleep(1)\n continue\n # Changing receive size if receiving data part\n if sent == 3 or sent == 4:\n data = self.request.recv(self.data_recv_size)\n else:\n data = self.request.recv(self.std_recv_size)\n if sent != 5:\n self.command_w_th_inc.write_commands(\n data=bytes(data).decode().encode('ascii', 'ignore')\n .decode().rstrip(), qid=self.message_id)\n # To many empty line received, closed thread\n if self.func_empty_check(data):\n if empty_check >= 3:\n break\n else:\n empty_check += 1\n continue\n # Logging session requests if steps not equal to data section\n if sent != 5:\n log_th.log_info('{} - {} client executed : \"{}\"'.format(\n self.message_id, self.client_ip, bytes(data).decode().rstrip()))\n # Break the loop\n if self.func_quit(data):\n break\n except Exception as ae:\n log_th.log_warning('{} encounter an error from {} thread : {}'.format(\n self.client_ip, threading.current_thread().name, str(ae)))\n break\n else:\n try:\n # Checking the all steps\n if self.func_rset(data):\n sent = 2\n continue\n if self.func_auth(data):\n continue\n if self.func_auth_plain(data):\n continue\n if self.func_starttls(data):\n continue\n # Starting the sent steps\n # Ehlo/hello\n if sent == 1:\n if self.func_ehlo(data) or self.func_helo(data):\n sent += 1\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('command not found'))\n # Mail from, rcpt to, data\n elif sent == 2:\n if bytes(data).decode().encode('ascii', 'ignore').decode().rstrip().splitlines().__len__() > 2:\n get_data = False\n get_recv = False\n elif bytes(data).decode().encode('ascii',\n 'ignore').decode().rstrip().splitlines().__len__() > 1:\n get_recv = False\n if self.func_from(data, get_recv):\n sent += 1\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('mail from'))\n if not get_recv:\n if self.func_to(data, get_recv, get_data):\n sent += 1\n get_recv = True\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('rcpt to'))\n if not get_data:\n if self.func_data(data, get_recv, get_data):\n sent += 1\n get_data = True\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('data'))\n # rcpt to and data\n elif sent == 3:\n if bytes(data).decode().encode('ascii', 'ignore').decode().rstrip().splitlines().__len__() > 1:\n get_data = False\n if self.func_to(data, get_recv, get_data):\n sent += 1\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('rcpt to'))\n if not get_data:\n if self.func_data(data, get_recv, get_data):\n sent += 1\n get_data = True\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('data'))\n # data\n elif sent == 4:\n if self.func_to(data, get_recv, get_data):\n continue\n if self.func_data(data, get_recv, get_data):\n sent += 1\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('data'))\n # content writing to file (if enabled) and quit statement\n elif sent == 5:\n data_list = bytes(data).decode().split('\\r\\n')\n for line in data_list:\n if str(line) == '.':\n if self.mail_save_enable != 0:\n out_file = open(self.mail_save_path + '/'\n + self.message_id + '.eml', 'w')\n out_file.write(msg_body)\n out_file.close()\n self.func_data_ok()\n sent = 1\n break\n else:\n msg_body += str(line) + '\\r\\n'\n except IndexError:\n if sent == 2:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('mail from'))\n elif sent == 3:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('rcpt to'))",
"def handle(self):\n try:\n # Wait for data\n data = json.loads(self.request.recv(1024).decode('UTF-8').strip())\n\n # Process data\n self.process_data(data)\n\n except Exception as e:\n print(\"Exception wile receiving message: \", e)\n self.request.sendall(\n bytes(json.dumps({'return': 'error'}), 'UTF-8'))",
"def process_server_response(self, server_response):",
"def serve(self):\r\n self.channel.wait()\r\n handler, seq, obj = self._recv()\r\n if handler == \"result\":\r\n self.dispatch_result(seq, obj)\r\n elif handler == \"exception\":\r\n self.dispatch_exception(seq, obj)\r\n else:\r\n self.dispatch_request(handler, seq, obj)",
"async def _response_handler(self):",
"def request_handler(self):\n\n size = 1024\n while True:\n # accept message from client\n clientSock, addr = self.socket.accept()\n self.printLine()\n print('connect to {}'.format(addr))\n\n # print client message content\n msg = clientSock.recv(size).decode('utf-8')\n self.printLine()\n print(\"sent message :\")\n print(msg)\n\n self.printLine()\n self._set_fileName(msg)\n\n # check for existance of file in the server (with name of file.txt)\n data, isFileExist = self._send_file_handler()\n\n self.printLine()\n print('data of file :')\n print(data)\n\n # create header for response message\n if isFileExist:\n header = self._generate_headers(200)\n else:\n header = self._generate_headers(404)\n\n response = header.encode() + data.encode()\n\n # send response in http protocol\n clientSock.send(response)\n\n # close the signal\n clientSock.close()",
"def handle(self):\n data = self.request.recv(1024)\n self.request.send(data)",
"def handle_messages():\n print(\"Handling Messages\")\n payload = request.get_data()\n for sender, incoming_message, payload in messaging_events(payload):\n # The following statements check which options the user selected\n # Response handler contains \"templates\" for the various messages\n user_name = get_full_name(sender, PAT)\n if \"hei\" in incoming_message.lower() or \"hallo\" in incoming_message.lower() or \"yo\" in incoming_message.lower()\\\n or \"hi\" in incoming_message.lower():\n send_message(PAT, send_message(PAT, response_handler.greeting_message(sender, user_name)))\n if user_methods.has_user(user_name):\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.no_course(sender))\n\n elif payload == \"change subject\" or \"change subject\" in incoming_message.lower():\n send_message(PAT, response_handler.text_message(sender, \"You can change course at any time simply by \"\n \"writing the course code on the form [TAG][CODE]\\n\"\n \"ex. TDT4120\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif \"help\" in incoming_message.lower():\n\n send_message(PAT, response_handler.text_message(sender, \"Are you lost ...? \"))\n send_message(PAT, response_handler.text_message(sender, \"You can change course at any time simply by \"\n \"writing the course code on the form: [TAG][CODE]\\n\"\n \"ex. TDT4120\"))\n send_message(PAT, response_handler.text_message(sender, \"If you want to see your currently selected course \"\n \"and other information type 'Status'.\"))\n send_message(PAT, response_handler.text_message(sender, \"You can also type 'Hei' or 'Hallo' at any time \"\n \"to receive a greeting that shows your options.\"))\n send_message(PAT, response_handler.text_message(sender, \"Here is a list of commands you can use. This is \"\n \"recommended for the experienced user:\\n\"\n \"Change subject\\n\"\n \"Give feedback\\n\"\n \"How did today's lecture go?\\n\"\n \"Get schedule\\n\"\n \"Get info\\n\"\n \"All lectures\\n\"\n \"A specific lecture\\n\"\n \"You can type most of the commands in chat. Just \"\n \"give it a try!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif \"status\" in incoming_message.lower():\n subject = user_methods.get_subject_from_user(user_name)\n year = feedback_methods.get_year()\n week = feedback_methods.get_week()\n day = feedback_methods.get_day()\n user = get_full_name(sender, PAT)\n lecture_id_current = lecture_methods.get_lecture_from_date(year, week, day, subject)\n lecture = feedback_methods.get_lecture_object(lecture_id_current)\n\n if user_methods.has_user(user_name):\n sub = user_methods.get_subject_from_user(user_name) + \" : \" + \\\n subject_info.course_name(user_methods.get_subject_from_user(user_name))\n send_message(PAT, response_handler.user_info(sender, user_name, sub))\n if feedback_methods.user_has_feedback_for_lecture(user, lecture):\n send_message(PAT, response_handler.text_message(sender, \"You have given feedback for \"\n + subject + \"today. Well done! Be proud of \"\n \"yourself and remember to check in \"\n \"tomorrow.\"))\n send_message(PAT,\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"No feedback for the given lecture on this date. \"\n \"Please press 'Give Feedback' or write it in the \"\n \"chat to do so.\"))\n send_message(PAT,\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"We seem to not be able to detect you in the database. \"\n \"Please report this to the staff!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n # Checks if the subject has lectures in the database, adds them if not.\n\n elif payload == \"give feedback\" or \"give feedback\" in incoming_message.lower():\n send_message(PAT, response_handler.give_feedback_choice(sender))\n\n elif payload == \"lecture speed\" or \"lecture speed\" in incoming_message.lower():\n\n subject = user_methods.get_subject_from_user(user_name)\n\n if lecture_methods.check_lecture_in_db(subject):\n send_message(PAT, response_handler.lec_feed(sender))\n else:\n schedule = subject_info.get_schedule(subject)\n if schedule:\n database_entry = subject_info.gather_lecture_information(schedule)\n lecture_methods.add_lecture_information_db(database_entry)\n send_message(PAT, response_handler.text_message(sender, \"Lectures for the subject \" + subject +\n \" were not in the database. It is now added.\"))\n send_message(PAT, response_handler.lec_feed(sender))\n else:\n send_message(PAT, response_handler.text_message(sender, \"Lectures for the subject \" + subject +\n \" does not exist. Likely due to the subject having \"\n \"no lectures this semester.\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n\n elif payload == \"evaluation_questions\" or \"lecture questions\" in incoming_message.lower():\n # User wants to give feedback for a lecture.\n subject = user_methods.get_subject_from_user(user_name)\n payload = \"evaluation_questions\" # if user typed 'lecture questions' the payload will be None\n\n if lecture_methods.check_lecture_in_db(subject):\n if feedback_methods.user_can_give_feedback_evaluation(user_name,\n user_methods.get_subject_from_user(user_name)):\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"Feedback can not be given either because there \"\n \"is no lecture today, or because you have already \"\n \"given feedback for this lecture.\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n else:\n schedule = subject_info.get_schedule(subject)\n if schedule:\n database_entry = subject_info.gather_lecture_information(schedule)\n lecture_methods.add_lecture_information_db(database_entry)\n send_message(PAT, response_handler.text_message(sender, \"Lectures for the subject \" + subject +\n \" were not in the database. It is now added\"))\n if feedback_methods.user_can_give_feedback_evaluation(user_name,\n user_methods.get_subject_from_user(\n user_name)):\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"Feedback can not be given either because \"\n \"there is no lecture today, or because you\"\n \" have already given feedback for this lecture.\"\n \"\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n else:\n send_message(PAT, response_handler.text_message(sender, \"Lectures for the subject \" + subject +\n \"does not exist. Likely due to the subject having \"\n \"no \"\n \"lectures this semester.\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n\n elif \"too slow\" in incoming_message.lower():\n # Adds feedback if the subject has a lecture on the given day\n # and if the user has not already given feedback\n payload = '0'\n message_response = \"too slow\"\n if feedback_methods.add_entry(user_name, user_methods.get_subject_from_user(user_name), payload):\n send_message(PAT, response_handler.text_message(sender, \"You chose: \" + \"'\" +\n message_response + \"'\" + \"\\nFeedback Received!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"There is either no lecture active in the selected\"\n \" subject, or you have already given feedback\"\n \" to the active lecture.\\nFeedback denied!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif \"it's all right\" in incoming_message.lower() or \"its all right\" in incoming_message.lower():\n # Adds feedback if the subject has a lecture on the given day\n # and if the user has not already given feedback\n payload = '1'\n message_response = \"It's all right\"\n if feedback_methods.add_entry(user_name, user_methods.get_subject_from_user(user_name), payload):\n send_message(PAT, response_handler.text_message(sender, \"You chose: \" + \"'\" +\n message_response + \"'\" + \"\\nFeedback Received!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"There is either no lecture active in the selected\"\n \" subject, or you have already given feedback\"\n \" to the active lecture.\\nFeedback denied!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif \"too fast\" in incoming_message.lower():\n # Adds feedback if the subject has a lecture on the given day\n # and if the user has not already given feedback\n payload = '2'\n message_response = \"too fast\"\n if feedback_methods.add_entry(user_name, user_methods.get_subject_from_user(user_name), payload):\n send_message(PAT, response_handler.text_message(sender, \"You chose: \" + \"'\" +\n message_response + \"'\" + \"\\nFeedback Received!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"There is either no lecture active in the selected\"\n \" subject, or you have already given feedback\"\n \" to the active lecture.\\nFeedback denied!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif (\"today\" in incoming_message.lower() and \"lecture\" in incoming_message.lower()) or \\\n (\"todays\" in incoming_message.lower() and \"lecture\" in incoming_message.lower()) or \\\n (\"today's\" in incoming_message.lower() and \"lecture\" in incoming_message.lower()):\n # Gathers the correct information about the date.\n year = feedback_methods.get_year()\n week = feedback_methods.get_week()\n day = feedback_methods.get_day()\n subject = user_methods.get_subject_from_user(user_name)\n # Gathers the feedback from today's lecture:\n if lecture_methods.check_lecture_in_db(subject):\n feedback_list = feedback_methods.get_single_lecture_feed(year, week, day, subject)\n if feedback_list[0] is not None:\n send_message(PAT, response_handler.present_single_lecture_feedback(sender, feedback_list))\n send_message(PAT,\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"No feedback for the given lecture on this date. \"\n \"Please try again at a later date.\"))\n send_message(PAT,\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.text_message(sender, \"No lecture present in the database. \"\n \"Please provide some feedback and try again.\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload == \"get schedule\" or \"get schedule\" in incoming_message.lower():\n subject = user_methods.get_subject_from_user(user_name)\n schedule = subject_info.printable_schedule(subject_info.get_schedule(subject))\n if len(schedule) > 640:\n msg_list = message_split.message_split(schedule)\n for msg in msg_list:\n print(msg)\n send_message(PAT, response_handler.text_message(sender, msg))\n else:\n send_message(PAT, response_handler.text_message(sender, schedule))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload == \"get info\" or \"get info\" in incoming_message.lower():\n subject = user_methods.get_subject_from_user(user_name)\n send_message(PAT, response_handler.text_message(sender,\n subject_info.printable_course_info(\n subject_info.get_course_json(subject))))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload == \"get feedback\" or \"get feedback\" in incoming_message.lower():\n send_message(PAT, response_handler.get_feedback_specific_or_all(sender))\n\n elif payload == \"all_lectures\" or \"all lectures\" in incoming_message.lower():\n # The user wants to see feedback for all lectures in the selected subject\n subject = user_methods.get_subject_from_user(user_name)\n if not lecture_methods.check_lecture_in_db(subject):\n send_message(PAT, response_handler.text_message(sender, \"Course has no feedback.\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n feedback, feedbackevaluation = feedback_methods.get_all_subject_feed(subject)\n if len(feedback) > 0:\n percent_list = bot_feedback.generate_percent_for_speed(feedback)\n send_message(PAT, response_handler.all_feedback_speed(sender, subject, percent_list))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"Course has no feedback for lecture speed.\"))\n if len(feedbackevaluation) > 0:\n percent_list_questions = bot_feedback.generate_percent_for_questions(feedbackevaluation)\n\n send_message(PAT, response_handler.all_feedback_questions(sender, subject, percent_list_questions))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"Course has no feedback for lecture questions.\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload == \"a_specific_lecture\" or \"a specific lecture\" in incoming_message.lower():\n # Let the user choose what year to get feedback from.\n years = lecture_feedback_db_methods.get_year(user_methods.get_subject_from_user(user_name))\n if len(years) > 0:\n send_message(PAT, response_handler.get_feedback_year(sender, years))\n else:\n send_message(PAT, response_handler.text_message(sender, 'No feedback for the selected subject.'))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload is not None:\n # Underneath are check that use .split() on the payload.\n if \"evaluation_questions\" in payload.split()[0]:\n payload_split = payload.split()\n if len(payload_split) == 1:\n # 1st question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 2:\n # 2nd question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 3:\n # 3rd question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 4:\n # 4th question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 5:\n # 5th question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 6:\n # 6th question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 7:\n # 7th question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 8:\n # store feedback.\n subject = user_methods.get_subject_from_user(user_name)\n if feedback_methods.add_feedback_evaluation(user_name, subject, int(payload_split[1]),\n int(payload_split[2]), int(payload_split[3]),\n int(payload_split[4]), int(payload_split[5]),\n int(payload_split[6]), int(payload_split[7])):\n # Storing the feedback succeeded.\n send_message(PAT, response_handler.text_message(sender, 'Feedback received!'))\n send_message(PAT, response_handler.has_course(sender, subject))\n else:\n # Storing the feedback failed.\n send_message(PAT, response_handler.text_message(sender,\n \"There is either no lecture active in the \"\n \"selected subject, or you have already given \"\n \"feedback to the active lecture.\\n Feedback \"\n \"denied!\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n pass\n\n elif \"get_lecture_feedback_year\" in payload.split()[0]:\n # Let the user choose what semester to get feedback from.\n semesters = []\n if lecture_feedback_db_methods.check_lecture_semester(user_methods.get_subject_from_user(user_name),\n 1, 17, int(payload.split()[1])):\n semesters.append('Spring')\n elif lecture_feedback_db_methods.check_lecture_semester(user_methods.get_subject_from_user(user_name),\n 32, 49, int(payload.split()[1])):\n semesters.append('Fall')\n if len(semesters) > 0:\n send_message(PAT, response_handler.get_feedback_semester(sender, payload.split()[1], semesters))\n else:\n # Take the user one step up to choose a different year.\n years = lecture_feedback_db_methods.get_year(user_methods.get_subject_from_user(user_name))\n send_message(PAT, response_handler.get_feedback_year(sender, years))\n\n elif \"get_lecture_feedback_semester\" in payload.split()[0]:\n # Let the user choose what weeks to get feedback from.\n\n week_list = lecture_feedback_db_methods.get_lecture_weeks(user_methods.get_subject_from_user(user_name),\n int(payload.split()[1]), payload.split()[2])\n if len(week_list) > 8:\n send_message(PAT, response_handler.get_feedback_month(sender, payload.split()[1], week_list))\n else:\n send_message(PAT, response_handler.get_feedback_week(sender, payload.split()[1], week_list))\n\n elif \"get_lecture_feedback_month\" in payload.split()[0]:\n # Let the user select week\n week_list = []\n payload_split = payload.split()\n for i in range(2, len(payload_split)):\n week_list.append(int(payload_split[i].rstrip(',')))\n\n send_message(PAT, response_handler.get_feedback_week(sender, payload_split[1], week_list))\n\n elif \"get_lecture_feedback_week\" in payload.split()[0]:\n # Lets the user select day\n lecture_days = lecture_feedback_db_methods.get_day_of_lecture_in_week(\n user_methods.get_subject_from_user(user_name), payload.split()[1], payload.split()[2])\n\n send_message(PAT, response_handler.get_feedback_day(sender, payload.split()[1], lecture_days,\n payload.split()[2]))\n\n elif \"get_lecture_feedback_day\" in payload.split()[0]:\n\n subject = user_methods.get_subject_from_user(user_name)\n # Gives the user feedback from the selected day.\n feedback_list = feedback_methods.get_single_lecture_feed(payload.split()[1],\n payload.split()[2],\n payload.split()[3],\n subject)\n feedback_questions_list = feedback_methods.get_single_lecture_feedback_questions(payload.split()[1],\n payload.split()[2],\n payload.split()[3],\n subject)\n\n if len(feedback_list[1]) > 0: # Checks if there is feedback in the variable.\n send_message(PAT, response_handler.present_single_lecture_feedback(sender, feedback_list))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"This lecture has no feedback for lecture speed.\"))\n if len(feedback_questions_list) > 0: # Checks if there is feedback in the variable.\n feedback_questions = bot_feedback.generate_percent_for_questions(feedback_questions_list)\n send_message(PAT,\n response_handler.present_single_lecture_feedback_questions(sender, feedback_questions))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"This lecture has no feedback for lecture \"\n \"questions.\"))\n\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif ime_data_fetch.subject_exists_boolean(incoming_message.upper().split()[0]):\n if user_methods.has_user(user_name):\n user_methods.add_subject(user_name, incoming_message.split()[0])\n else:\n user_methods.add_user(user_name, incoming_message.split()[0])\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"Type 'help' to see what you can do with L.I.M.B.O.\\n If \"\n \"you tried to enter a subject-code and got this message,\"\n \" you either misspelled it or the subject you are looking \"\n \"for is not a subject at NTNU.\"))\n if user_methods.has_user(user_name):\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.no_course(sender))\n\n return \"ok\"",
"def handle_msg(self, msg):\n self.logger.debug(\"Received: {}\".format(msg))\n\n try:\n msg_type = msg[\"type\"]\n except KeyError as e:\n return msgs.error(e)\n\n if msg_type == \"ping_req\":\n reply = msgs.ping_reply()\n elif msg_type == \"list_req\":\n reply = self.list_callables()\n elif msg_type == \"call_req\":\n try:\n obj_name = msg[\"obj_name\"]\n method = msg[\"method\"]\n params = msg[\"params\"]\n reply = self.call_method(obj_name, method, params)\n except KeyError as e:\n return msgs.error(e)\n elif msg_type == \"exit_req\":\n self.logger.info(\"Received message to die. Bye!\")\n reply = msgs.exit_reply()\n # Need to actually send reply here as we're about to exit\n self.logger.debug(\"Sending: {}\".format(reply))\n self.ctrl_sock.send_json(reply)\n self.clean_up()\n sys.exit(0)\n else:\n err_msg = \"Unrecognized message: {}\".format(msg)\n self.logger.warning(err_msg)\n reply = msgs.error(err_msg)\n return reply",
"def tunnel_recv_handler(self, payload):\n _log.analyze(self.node.id, \"+ CLIENT\", {'payload': payload})\n if 'msg_uuid' in payload and payload['msg_uuid'] in self.replies and 'cmd' in payload and payload['cmd']=='REPLY':\n kwargs = {}\n if 'key' in payload:\n kwargs['key'] = payload['key']\n if 'value' in payload:\n kwargs['value'] = payload['value']\n if 'response' in payload:\n kwargs['value'] = calvinresponse.CalvinResponse(encoded=payload['response'])\n self.replies.pop(payload['msg_uuid'])(**kwargs)",
"def _response(self, *lines):\n for line in lines:\n self.client.dataReceived(line + b'\\r\\n')\n self.client.dataReceived(\n b'0001 OK [READ-ONLY] ' + self.command + b' completed\\r\\n')",
"def usingHandler(self, cmd):\n self.command_handler.handle_command(cmd)\n while msg_queue.empty() is False:\n self.writeresponse(msg_queue.get())",
"def handle_recv(self,stream,msgs):\n pass",
"def handle(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request\n\n state.addConnection(self.connection)\n\n # Loop that listens for messages from the client\n while True:\n received_string = self.connection.recv(4096)\n\n if len(received_string) == 0:\n continue\n\n # TODO: Add handling of received payload from client\n\n # Convert payload from JSON to object\n payloadToData = json.loads(received_string)\n\n # determine what request is being made\n request_handler = RequestHandler(payloadToData,\n state,\n self.connection)\n\n # execute and generate response (JSON formatted)\n jsonResponse = request_handler.callHandler()\n\n if not jsonResponse == 'BROADCAST':\n # send response\n self.connection.send(bytes(jsonResponse, \"ascii\"))",
"def handler(self):\r\n\t\tif (self.state != self.INIT):\r\n\t\t\tself.sendRtspRequest(self.TEARDOWN)\r\n\t\t\treply = self.recvRtspReply()\r\n\t\t\t\r\n\t\t\treplyEle = self.parseRtspReply(reply)\r\n\t\t\ttotalSendPacketCount = int(replyEle[3][1])\r\n\r\n\t\t\tif (reply.split('\\n')[0] == \"RTSP/1.0 200 OK\"):\r\n\t\t\t\tself.networkStat.computeLoss(totalSendPacketCount, self.networkStat.receivedPacketCount)\r\n\t\t\t\tself.networkStat.computeADR()\r\n\t\t\t\tself.networkStat.exportLogFile(self.sessionId, self.rtspSeq)\r\n\t\t\t\r\n\t\t\tif os.path.exists(self.cacheFile):\r\n\t\t\t\tos.remove(self.cacheFile)\r\n\t\ttry:\r\n\t\t\tself.rtpSocket_client.close()\r\n\t\t\tself.rtspSocket_client.close()\r\n\t\texcept:\r\n\t\t\tNone\r\n\t\tself.master.destroy()\r\n\t\tsys.exit()",
"def respond(self, resp):\n self.push(resp + '\\r\\n')\n self.logline('==> %s' % resp)",
"def recieve_responses(self):\r\n logging.debug('starting Riva ASR service response reciever thread')\r\n \r\n for response in self.responses: # this is blocking\r\n if not response.results:\r\n continue\r\n\r\n result = response.results[0]\r\n\r\n if not result.alternatives:\r\n continue\r\n\r\n text = result.alternatives[0].transcript\r\n text = text.strip()\r\n \r\n if len(text) == 0:\r\n continue\r\n \r\n self.responses_queue.put({\r\n 'text' : text,\r\n 'end' : result.is_final\r\n })\r\n\r\n logging.debug('exiting Riva ASR service response reciever thread')",
"def handle(self) -> None:\r\n\r\n if self.data.get(\"message-id\") != None:\r\n if self.data[\"status\"] == \"error\":\r\n print(self.data[\"error\"])\r\n return\r\n else:\r\n requestData = self.obs.pendingResponses.pop(self.data[\"message-id\"])\r\n request = requestData[\"request-type\"]\r\n #Requests as of version 4.8.0\r\n\r\n #General\r\n if request == \"GetVersion\":\r\n pass\r\n\r\n elif request == \"GetAuthRequired\":\r\n if self.data[\"authRequired\"]:\r\n secret_string: str = self.obs.password + self.data[\"salt\"]\r\n secret_hash: sha256 = sha256(secret_string.encode(\"utf-8\"))\r\n secret: bytes = b64encode(secret_hash.digest())\r\n\r\n response_string: str = secret.decode(\"utf-8\") + self.data[\"challenge\"]\r\n response_hash: sha256 = sha256(response_string.encode(\"utf-8\"))\r\n response: bytes = b64encode(response_hash.digest())\r\n\r\n self.obs.requests.append({\r\n \"type\": \"Authenticate\",\r\n \"auth\": response.decode(\"utf-8\")})\r\n\r\n else:\r\n self.obs.requests.append({\"type\": \"GetSceneList\"})\r\n\r\n elif request == \"Authenticate\":\r\n self.obs.requests.append({\"type\": \"GetSceneList\"})\r\n\r\n elif request == \"SetHeartbeat\":\r\n #To be removed in 5.0.0\r\n pass\r\n\r\n elif request == \"SetFilenameFormatting\":\r\n pass\r\n\r\n elif request == \"GetFilenameFormatting\":\r\n pass\r\n\r\n elif request == \"GetStats\":\r\n pass\r\n\r\n elif request == \"BroadcastCustomMessage\":\r\n pass\r\n\r\n elif request == \"GetVideoInfo\":\r\n pass\r\n\r\n elif request == \"OpenProjector\":\r\n pass\r\n\r\n elif request == \"TriggerHotkeyByName\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"TriggerHotkeyBySequence\":\r\n #Unreleased\r\n pass\r\n\r\n #Media Control\r\n elif request == \"PlayPauseMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"RestartMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StopMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"NextMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"PreviousMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaDuration\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaTime\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"SetMediaTime\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"ScrubMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaState\":\r\n #Unreleased\r\n pass\r\n\r\n #Sources\r\n\r\n elif request == \"GetMediaSourcesList\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetSourcesList\":\r\n pass\r\n\r\n elif request == \"GetSourceTypesList\":\r\n pass\r\n\r\n elif request == \"GetVolume\":\r\n pass\r\n\r\n elif request == \"SetVolume\":\r\n pass\r\n\r\n elif request == \"GetMute\":\r\n pass\r\n\r\n elif request == \"SetMute\":\r\n pass\r\n\r\n elif request == \"ToggleMute\":\r\n pass\r\n\r\n elif request == \"GetAudioActive\":\r\n pass\r\n\r\n elif request == \"SetSourceName\":\r\n pass\r\n\r\n elif request == \"SetSyncOffset\":\r\n pass\r\n\r\n elif request == \"GetSyncOffset\":\r\n pass\r\n\r\n elif request == \"GetSourceSettings\":\r\n pass\r\n\r\n elif request == \"SetSourceSettings\":\r\n pass\r\n\r\n elif request == \"GetTextGDIPlusProperties\":\r\n pass\r\n\r\n elif request == \"SetTextGDIPlusProperties\":\r\n pass\r\n\r\n elif request == \"GetTextFreetype2Properties\":\r\n pass\r\n\r\n elif request == \"SetTextFreetype2Properties\":\r\n pass\r\n\r\n elif request == \"GetBrowserSourceProperties\":\r\n pass\r\n\r\n elif request == \"SetBrowserSourceProperties\":\r\n pass\r\n\r\n elif request == \"GetSpecialSources\":\r\n pass\r\n\r\n elif request == \"GetSourceFilters\":\r\n source = self.obs.getSource(requestData[\"sourceName\"])\r\n if source != None:\r\n for _filter in self.data[\"filters\"]:\r\n source.addFilter(_filter) #type: ignore\r\n\r\n elif request == \"GetSourceFilterInfo\":\r\n pass\r\n\r\n elif request == \"AddFilterToSource\":\r\n pass\r\n\r\n elif request == \"RemoveFilterFromSource\":\r\n pass\r\n\r\n elif request == \"ReorderSourceFilter\":\r\n pass\r\n\r\n elif request == \"MoveSourceFilter\":\r\n pass\r\n\r\n elif request == \"SetSourceFilterSettings\":\r\n pass\r\n\r\n elif request == \"SetSourceFilterVisibility\":\r\n pass\r\n \r\n elif request == \"GetAudioMonitorType\":\r\n pass\r\n\r\n elif request == \"SetAudioMonitorType\":\r\n pass\r\n\r\n elif request == \"TakeSourceScreenshot\":\r\n pass\r\n\r\n #Outpute\r\n elif request == \"ListOutputs\":\r\n pass\r\n\r\n elif request == \"GetOutputInfo\":\r\n pass\r\n\r\n elif request == \"StartOutput\":\r\n pass\r\n\r\n elif request == \"StopOutput\":\r\n pass\r\n\r\n #Profiles\r\n elif request == \"SetCurrentProfile\":\r\n pass\r\n\r\n elif request == \"GetCurrentProfile\":\r\n pass\r\n\r\n elif request == \"ListProfiles\":\r\n pass\r\n\r\n #Recording\r\n elif request == \"GetRecordingStatus\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StartStopRecording\":\r\n pass\r\n\r\n elif request == \"StartRecording\":\r\n pass\r\n\r\n elif request == \"StopRecording\":\r\n pass\r\n\r\n elif request == \"PauseRecording\":\r\n pass\r\n\r\n elif request == \"ResumeRecording\":\r\n pass\r\n\r\n elif request == \"SetRecordingFolder\":\r\n pass\r\n\r\n elif request == \"GetRecordingFolder\":\r\n pass\r\n\r\n #Replay Buffer\r\n elif request == \"GetReplayBufferStatus\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StartStopReplayBuffer\":\r\n pass\r\n\r\n elif request == \"StartReplayBuffer\":\r\n pass\r\n\r\n elif request == \"StopReplayBuffer\":\r\n pass\r\n\r\n elif request == \"SaveReplayBuffer\":\r\n pass\r\n\r\n #Scene Collections\r\n elif request == \"SetCurrentSceneCollection\":\r\n pass\r\n\r\n elif request == \"GetCurrentSceneCollection\":\r\n pass\r\n\r\n elif request == \"ListSceneCollections\":\r\n pass\r\n\r\n #Scene Items\r\n elif request == \"GetSceneItemList\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetSceneItemProperties\":\r\n pass\r\n\r\n elif request == \"SetSceneItemProperties\":\r\n pass\r\n\r\n elif request == \"ResetSceneItem\":\r\n pass\r\n\r\n elif request == \"SetSceneItemRender\":\r\n pass\r\n\r\n elif request == \"SetSceneItemPosition\":\r\n pass\r\n\r\n elif request == \"SetSceneItemTransform\":\r\n pass\r\n\r\n elif request == \"SetSceneItemCrop\":\r\n pass\r\n\r\n elif request == \"DeleteSceneItem\":\r\n pass\r\n\r\n elif request == \"AddSceneItem\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"DuplicateSceneItem\":\r\n pass\r\n\r\n #Scenes\r\n elif request == \"SetCurrentScene\":\r\n pass\r\n\r\n elif request == \"GetCurrentScene\":\r\n self.obs.setCurrentScene(self.data[\"name\"])\r\n\r\n elif request == \"GetSceneList\":\r\n for scene in self.data[\"scenes\"]:\r\n self.obs.addScene(scene)\r\n self.obs.setCurrentScene(self.data[\"current-scene\"])\r\n\r\n elif request == \"CreateScene\":\r\n pass\r\n\r\n elif request == \"ReorderSceneItems\":\r\n pass\r\n\r\n elif request == \"SetSceneTransitionOverride\":\r\n pass\r\n\r\n elif request == \"RemoveSceneTransitionOverride\":\r\n pass\r\n\r\n elif request == \"GetSceneTransitionOverride\":\r\n pass\r\n\r\n #Streaming\r\n elif request == \"GetStreamingStatus\":\r\n pass\r\n\r\n elif request == \"StartStopStreaming\":\r\n pass\r\n\r\n elif request == \"StartStreaming\":\r\n pass\r\n\r\n elif request == \"StopStreaming\":\r\n pass\r\n\r\n elif request == \"SetStreamSettings\":\r\n pass\r\n\r\n elif request == \"GetStreamSettings\":\r\n pass\r\n\r\n elif request == \"SaveStreamSettings\":\r\n pass\r\n\r\n elif request == \"SendCaptions\":\r\n pass\r\n\r\n #Studio Mode\r\n elif request == \"GetStudioModeStatus\":\r\n pass\r\n\r\n elif request == \"GetPreviewScene\":\r\n pass\r\n\r\n elif request == \"SetPreviewScene\":\r\n pass\r\n\r\n elif request == \"TransitionToProgram\":\r\n pass\r\n\r\n elif request == \"EnableStudioMode\":\r\n pass\r\n\r\n elif request == \"DisableStudioMode\":\r\n pass\r\n\r\n elif request == \"ToggleStudioMode\":\r\n pass\r\n\r\n #Transitions\r\n elif request == \"GetTransitionList\":\r\n pass\r\n\r\n elif request == \"GetCurrentTransition\":\r\n pass\r\n\r\n elif request == \"SetCurrentTransition\":\r\n pass\r\n\r\n elif request == \"SetTransitionDuration\":\r\n pass\r\n\r\n elif request == \"GetTransitionDuration\":\r\n pass\r\n\r\n elif request == \"GetTransitionPosition\":\r\n pass\r\n\r\n else:\r\n print(f\"Unhandled response of type {request} and data {self.data}.\")\r\n\r\n \r\n\r\n else:\r\n event: str = self.data[\"update-type\"]\r\n #Events as of 4.8.0\r\n\r\n #Scenes\r\n if event == \"SwitchScenes\":\r\n self.obs.setCurrentScene(self.data[\"scene-name\"])\r\n\r\n elif event == \"ScenesChanged\":\r\n #self.obs.purgeScenes()\r\n pass\r\n\r\n elif event == \"SceneCollectionChanged\":\r\n pass\r\n\r\n elif event == \"SceneCollectionListChanged\":\r\n pass\r\n\r\n #Transitions\r\n elif event == \"SwitchTransition\":\r\n pass\r\n\r\n elif event == \"TransitionListChanged\":\r\n pass\r\n\r\n elif event == \"TransitionDurationChanged\":\r\n pass\r\n\r\n elif event == \"TransitionBegin\":\r\n pass\r\n\r\n elif event == \"TransitionEnd\":\r\n pass\r\n\r\n elif event == \"TransitionVideoEnd\":\r\n pass\r\n\r\n #Profiles\r\n elif event == \"ProfileChanged\":\r\n pass\r\n\r\n elif event == \"ProfileListChanged\":\r\n pass\r\n\r\n #Streaming\r\n elif event == \"StreamStarting\":\r\n pass\r\n\r\n elif event == \"StreamStarted\":\r\n pass\r\n\r\n elif event == \"StreamStopping\":\r\n pass\r\n\r\n elif event == \"StreamStopped\":\r\n pass\r\n\r\n elif event == \"StreamStatus\":\r\n pass\r\n\r\n #Recording\r\n elif event == \"RecordingStarting\":\r\n pass\r\n\r\n elif event == \"RecordingStarted\":\r\n pass\r\n\r\n elif event == \"RecordingStopping\":\r\n pass\r\n\r\n elif event == \"RecordingStopped\":\r\n pass\r\n\r\n elif event == \"RecordingPaused\":\r\n pass\r\n\r\n elif event == \"RecordingResumed\":\r\n pass\r\n\r\n #Replay Buffer\r\n elif event == \"ReplayStarting\":\r\n pass\r\n\r\n elif event == \"ReplayStarted\":\r\n pass\r\n\r\n elif event == \"ReplayStopping\":\r\n pass\r\n\r\n elif event == \"ReplayStopped\":\r\n pass\r\n\r\n #Other\r\n elif event == \"Exiting\":\r\n pass\r\n\r\n #General\r\n elif event == \"Heartbeat\":\r\n pass\r\n\r\n elif event == \"BroadcastCustomMessage\":\r\n pass\r\n\r\n #Sources\r\n elif event == \"SourceCreated\":\r\n pass\r\n\r\n elif event == \"SourceDestroyed\":\r\n pass\r\n\r\n elif event == \"SourceVolumeChanged\":\r\n pass\r\n\r\n elif event == \"SourceMuteStateChanged\":\r\n pass\r\n\r\n elif event == \"SourceAudioDeactivated\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"SourceAudioActivated\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"SourceAudioSyncOffsetChanged\":\r\n pass\r\n\r\n elif event == \"SourceAudioMixersChanged\":\r\n pass\r\n\r\n elif event == \"SourceRenamed\":\r\n pass\r\n\r\n elif event == \"SourceFilterAdded\":\r\n pass\r\n\r\n elif event == \"SourceFilterRemoved\":\r\n pass\r\n\r\n elif event == \"SourceFilterVisibilityChanged\":\r\n source = self.obs.getSource(self.data[\"sourceName\"])\r\n if source != None:\r\n _filter = source.getFilter(self.data[\"filterName\"]) #type: ignore\r\n if _filter != None:\r\n _filter.setVisible(self.data[\"filterEnabled\"]) #type: ignore\r\n\r\n elif event == \"SourceFiltersReordered\":\r\n pass\r\n\r\n #Media\r\n elif event == \"MediaPlaying\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaPaused\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaRestarted\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaStopped\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaNext\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaPrevious\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaStarted\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaEnded\":\r\n #Unreleased\r\n pass\r\n\r\n #Scene Items\r\n elif event == \"SceneItemOrderChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemAdded\":\r\n pass\r\n\r\n elif event == \"SceneItemRemoved\":\r\n pass\r\n\r\n elif event == \"SceneItemVisibilityChanged\":\r\n scene = self.obs.getScene(self.data[\"scene-name\"])\r\n if scene != None:\r\n source = scene.getSource(self.data[\"item-name\"]) #type: ignore\r\n if source != None:\r\n source.setVisible(self.data[\"item-visible\"]) #type: ignore\r\n \r\n\r\n elif event == \"SceneItemLockChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemTransformChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemSelected\":\r\n pass\r\n\r\n elif event == \"SceneItemDeselected\":\r\n pass\r\n\r\n #Studio Mode\r\n elif event == \"PreviewSceneChanged\":\r\n pass\r\n\r\n elif event == \"StudioModeSwitched\":\r\n pass\r\n\r\n #Unhandled Events\r\n else:\r\n print(\"Unhandled event with data: \" + str(self.data))",
"def process(self):\n\n try:\n self._read_buffer += self._socket.recv(4096)\n except socket.error as exc:\n if exc.errno not in [errno.EAGAIN,\n errno.EWOULDBLOCK,\n errno.WSAEWOULDBLOCK]:\n raise\n response, self._read_buffer = Message.decode(self._read_buffer)\n # Check if terminating RESPONSE_VALUE with body 00 01 00 00\n if (response.type == Message.SERVERDATA_RESPONSE_VALUE and\n response.body.encode(\"ascii\") == \"\\x00\\x01\\x00\\x00\"):\n response = Message(self._response[0].id,\n self._response[0].type,\n \"\".join([r.body for r in self._response]))\n self._active_requests[response.id].response = response\n self._response = []\n self._active_requests[response.id]\n elif response.type == Message.SERVERDATA_RESPONSE_VALUE:\n self._response.append(response)\n elif response.type == Message.SERVERDATA_AUTH_RESPONSE:\n self._active_requests[self._response[0].id].response = response\n # Clear empty SERVERDATA_RESPONSE_VALUE sent before\n # SERVERDATA_AUTH_RESPONSE\n self._response = []\n self._active_requests[response.id]",
"def process_hub_reply(self, hub_reply):\n\n # Typical response from hub is \"OK\" if there are no user or\n # automated librian requests. Almost all responses are just \"OK\"\n # therefore the default process_hub_reply is \"pass\"\n # TODO Respond to hub repies if they are other than 'OK'\n # for example, push \"send 10 frames\" request onto deque\n # and then add \"do requested extra frames\" to detectors loop\n # so that images get sent even though there is no routine reason\n pass",
"def _handle_reply(self, fullname, body):\n handled = False\n thing = self._reddit.get_thing_from_fullname(fullname)\n if thing:\n logger.id(logger.info, self,\n 'Processing {color_thing} ...',\n color_thing=reddit.display_id(thing),\n )\n # only handle specific types of things\n if isinstance(thing, RateLimitHandler.VALID_THINGS):\n logger.id(logger.info, self,\n 'Replying to {color_thing} ...',\n color_thing=reddit.display_id(thing),\n )\n\n # Note: we may be rate-limited again\n success = self._reddit.do_reply(\n thing, body, self._killed,\n )\n\n if success or success is None:\n logger.id(logger.debug, self,\n 'Removing \\'{color_thing}\\' from'\n ' reddit ratelimit queue ...',\n color_thing=thing,\n )\n # reply either succeeded or a reply is not possible\n # (eg. 403 Forbidden)\n # remove the element from the queue database\n with self.rate_limit_queue:\n self.rate_limit_queue.delete(thing, body=body)\n\n if success:\n # try to add the thing to the reply history\n # (but only if we can find instagram users\n # in the body)\n ig_users = replies.Formatter.ig_users_in(\n body\n )\n if ig_users:\n try:\n with self.reply_history:\n self.reply_history.insert(\n thing, ig_users,\n )\n\n except database.UniqueConstraintFailed:\n display = reddit.display_id(\n thing\n )\n logger.id(logger.warn, self,\n 'Duplicate instagram user'\n ' posted in'\n ' {color_submission}!'\n ' (users={color_users})',\n color_submission=display,\n color_users=ig_users,\n exc_info=True,\n )\n handled = True\n\n return handled",
"def handle(self):\n for request in self._each_msg():\n r_len, r_type = struct.unpack_from('> I B', request)\n\n if r_type == self.SSH2_AGENTC_REQUEST_IDENTITIES:\n response = self._merge_identities(request)\n elif r_type == self.SSH2_AGENTC_SIGN_REQUEST:\n # Extract key blob from request\n key_blob_len = struct.unpack_from('> I', request, 5)[0]\n key_blob = request[9:9 + key_blob_len]\n hex_blob = ''.join('{:02x}'.format(b) for b in key_blob)\n\n agent = self._identity_map[hex_blob]\n\n if agent:\n if agent == self.server.alternate_agent:\n key_digest = self._key_digest(key_blob)\n LOG.info(\"identity %s used by %s: %s\", key_digest,\n self.username, self.process_info)\n\n response = agent.forward_request(request)\n else:\n response = \\\n self.server.default_agent.forward_request(request)\n else:\n response = self.server.default_agent.forward_request(request)\n\n self.request.sendall(response)",
"def _dispatch_to_client_request(self):\n # Listen for client connection\n self._to_client_request.listen()\n\n while not self._exit_request:\n readable, _, _ = select([self._to_client_request], [], [self._to_client_request], 0.1)\n if readable:\n client_conn, client_addr = readable[0].accept()\n client_conn.setblocking(False)\n self._to_client_connections.append(client_conn)\n print(\"Sending replies to [\" + client_addr[0] + \", \" + str(client_addr[1]) + ']')",
"def handle(self, message):"
]
| [
"0.76629096",
"0.66553223",
"0.6645519",
"0.662176",
"0.6588144",
"0.65254015",
"0.64312863",
"0.639899",
"0.6365997",
"0.6332262",
"0.633033",
"0.6302304",
"0.6296347",
"0.6267659",
"0.6252938",
"0.6237211",
"0.61984926",
"0.61887336",
"0.61629516",
"0.61628944",
"0.61119646",
"0.60743576",
"0.607183",
"0.6068312",
"0.6064956",
"0.60497075",
"0.6037183",
"0.602331",
"0.5985866",
"0.59816694"
]
| 0.7671313 | 1 |
Create a Contract object defining what will be purchased, at which exchange and in which currency. symbol The ticker symbol for the contract sec_type The security type for the contract ('STK' is 'stock') exch The exchange to carry out the contract on prim_exch The primary exchange to carry out the contract on curr The currency in which to purchase the contract | def create_contract(symbol, sec_type, exch, prim_exch, curr):
contract = Contract()
contract.m_symbol = symbol
contract.m_secType = sec_type
contract.m_exchange = exch
contract.m_primaryExch = prim_exch
contract.m_currency = curr
return contract | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def create_order(self, symbol: str, type: OrderType, side: OrderSide, amount, price=None, params={}):\n await self.load_markets()\n market = self.market(symbol)\n # order types \"limit\" and \"market\" immediatley parsed \"EXCHANGE LIMIT\" and \"EXCHANGE MARKET\"\n # note: same order types exist for margin orders without the EXCHANGE prefix\n orderTypes = self.safe_value(self.options, 'orderTypes', {})\n orderType = type.upper()\n if market['spot']:\n # although they claim that type needs to be 'exchange limit' or 'exchange market'\n # in fact that's not the case for swap markets\n orderType = self.safe_string_upper(orderTypes, type, type)\n stopPrice = self.safe_string_2(params, 'stopPrice', 'triggerPrice')\n timeInForce = self.safe_string(params, 'timeInForce')\n postOnlyParam = self.safe_value(params, 'postOnly', False)\n reduceOnly = self.safe_value(params, 'reduceOnly', False)\n clientOrderId = self.safe_value_2(params, 'cid', 'clientOrderId')\n params = self.omit(params, ['triggerPrice', 'stopPrice', 'timeInForce', 'postOnly', 'reduceOnly', 'price_aux_limit'])\n amountString = self.amount_to_precision(symbol, amount)\n amountString = amountString if (side == 'buy') else Precise.string_neg(amountString)\n request = {\n # 'gid': 0123456789, # int32, optional group id for the order\n # 'cid': 0123456789, # int32 client order id\n 'type': orderType,\n 'symbol': market['id'],\n # 'price': self.number_to_string(price),\n 'amount': amountString,\n # 'flags': 0, # int32, https://docs.bitfinex.com/v2/docs/flag-values\n # 'lev': 10, # leverage for a derivative orders, the value should be between 1 and 100 inclusive, optional, 10 by default\n # 'price_trailing': self.number_to_string(priceTrailing),\n # 'price_aux_limit': self.number_to_string(stopPrice),\n # 'price_oco_stop': self.number_to_string(ocoStopPrice),\n # 'tif': '2020-01-01 10:45:23', # datetime for automatic order cancellation\n # 'meta': {\n # 'aff_code': 'AFF_CODE_HERE'\n # },\n }\n stopLimit = ((orderType == 'EXCHANGE STOP LIMIT') or ((orderType == 'EXCHANGE LIMIT') and (stopPrice is not None)))\n exchangeStop = (orderType == 'EXCHANGE STOP')\n exchangeMarket = (orderType == 'EXCHANGE MARKET')\n stopMarket = (exchangeStop or (exchangeMarket and (stopPrice is not None)))\n ioc = ((orderType == 'EXCHANGE IOC') or (timeInForce == 'IOC'))\n fok = ((orderType == 'EXCHANGE FOK') or (timeInForce == 'FOK'))\n postOnly = (postOnlyParam or (timeInForce == 'PO'))\n if (ioc or fok) and (price is None):\n raise InvalidOrder(self.id + ' createOrder() requires a price argument with IOC and FOK orders')\n if (ioc or fok) and exchangeMarket:\n raise InvalidOrder(self.id + ' createOrder() does not allow market IOC and FOK orders')\n if (orderType != 'MARKET') and (not exchangeMarket) and (not exchangeStop):\n request['price'] = self.price_to_precision(symbol, price)\n if stopLimit or stopMarket:\n # request['price'] is taken for stop orders\n request['price'] = self.price_to_precision(symbol, stopPrice)\n if stopMarket:\n request['type'] = 'EXCHANGE STOP'\n elif stopLimit:\n request['type'] = 'EXCHANGE STOP LIMIT'\n request['price_aux_limit'] = self.price_to_precision(symbol, price)\n if ioc:\n request['type'] = 'EXCHANGE IOC'\n elif fok:\n request['type'] = 'EXCHANGE FOK'\n # flag values may be summed to combine flags\n flags = 0\n if postOnly:\n flags = self.sum(flags, 4096)\n if reduceOnly:\n flags = self.sum(flags, 1024)\n if flags != 0:\n request['flags'] = flags\n if clientOrderId is not None:\n request['cid'] = clientOrderId\n params = self.omit(params, ['cid', 'clientOrderId'])\n response = await self.privatePostAuthWOrderSubmit(self.extend(request, params))\n #\n # [\n # 1653325121, # Timestamp in milliseconds\n # \"on-req\", # Purpose of notification('on-req', 'oc-req', 'uca', 'fon-req', 'foc-req')\n # null, # unique ID of the message\n # null,\n # [\n # [\n # 95412102131, # Order ID\n # null, # Group ID\n # 1653325121798, # Client Order ID\n # \"tDOGE:UST\", # Market ID\n # 1653325121798, # Millisecond timestamp of creation\n # 1653325121798, # Millisecond timestamp of update\n # -10, # Amount(Positive means buy, negative means sell)\n # -10, # Original amount\n # \"EXCHANGE LIMIT\", # Type of the order: LIMIT, EXCHANGE LIMIT, MARKET, EXCHANGE MARKET, STOP, EXCHANGE STOP, STOP LIMIT, EXCHANGE STOP LIMIT, TRAILING STOP, EXCHANGE TRAILING STOP, FOK, EXCHANGE FOK, IOC, EXCHANGE IOC.\n # null, # Previous order type(stop-limit orders are converted to limit orders so for them previous type is always STOP)\n # null, # Millisecond timestamp of Time-In-Force: automatic order cancellation\n # null, # _PLACEHOLDER\n # 4096, # Flags, see parseOrderFlags()\n # \"ACTIVE\", # Order Status, see parseOrderStatus()\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # 0.071, # Price(Stop Price for stop-limit orders, Limit Price for limit orders)\n # 0, # Average Price\n # 0, # Trailing Price\n # 0, # Auxiliary Limit price(for STOP LIMIT)\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # 0, # Hidden(0 if False, 1 if True)\n # 0, # Placed ID(If another order caused self order to be placed(OCO) self will be that other order's ID)\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # \"API>BFX\", # Routing, indicates origin of action: BFX, ETHFX, API>BFX, API>ETHFX\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # {\"$F7\":1} # additional meta information about the order( $F7 = IS_POST_ONLY(0 if False, 1 if True), $F33 = Leverage(int))\n # ]\n # ],\n # null, # CODE(work in progress)\n # \"SUCCESS\", # Status of the request\n # \"Submitting 1 orders.\" # Message\n # ]\n #\n status = self.safe_string(response, 6)\n if status != 'SUCCESS':\n errorCode = response[5]\n errorText = response[7]\n raise ExchangeError(self.id + ' ' + response[6] + ': ' + errorText + '(#' + errorCode + ')')\n orders = self.safe_value(response, 4, [])\n order = self.safe_value(orders, 0)\n return self.parse_order(order, market)",
"async def create_order(self, symbol: str, type: OrderType, side: OrderSide, amount, price=None, params={}):\n await self.load_markets()\n market = self.market(symbol)\n orderType = self.capitalize(type)\n reduceOnly = self.safe_value(params, 'reduceOnly')\n if reduceOnly is not None:\n if (market['type'] != 'swap') and (market['type'] != 'future'):\n raise InvalidOrder(self.id + ' createOrder() does not support reduceOnly for ' + market['type'] + ' orders, reduceOnly orders are supported for swap and future markets only')\n brokerId = self.safe_string(self.options, 'brokerId', 'CCXT')\n qty = self.parse_to_int(self.amount_to_precision(symbol, amount))\n request = {\n 'symbol': market['id'],\n 'side': self.capitalize(side),\n 'orderQty': qty, # lot size multiplied by the number of contracts\n 'ordType': orderType,\n 'text': brokerId,\n }\n if (orderType == 'Stop') or (orderType == 'StopLimit') or (orderType == 'MarketIfTouched') or (orderType == 'LimitIfTouched'):\n stopPrice = self.safe_number_2(params, 'stopPx', 'stopPrice')\n if stopPrice is None:\n raise ArgumentsRequired(self.id + ' createOrder() requires a stopPx or stopPrice parameter for the ' + orderType + ' order type')\n else:\n request['stopPx'] = float(self.price_to_precision(symbol, stopPrice))\n params = self.omit(params, ['stopPx', 'stopPrice'])\n if (orderType == 'Limit') or (orderType == 'StopLimit') or (orderType == 'LimitIfTouched'):\n request['price'] = float(self.price_to_precision(symbol, price))\n clientOrderId = self.safe_string_2(params, 'clOrdID', 'clientOrderId')\n if clientOrderId is not None:\n request['clOrdID'] = clientOrderId\n params = self.omit(params, ['clOrdID', 'clientOrderId'])\n response = await self.privatePostOrder(self.extend(request, params))\n return self.parse_order(response, market)",
"def create_order(self, asset, amount, is_buy, style):\n exchange_symbol = self.get_symbol(asset)\n if isinstance(style, ExchangeLimitOrder) \\\n or isinstance(style, ExchangeStopLimitOrder):\n price = style.get_limit_price(is_buy)\n order_type = 'limit'\n\n elif isinstance(style, ExchangeStopOrder):\n price = style.get_stop_price(is_buy)\n order_type = 'stop'\n\n else:\n raise InvalidOrderStyle(exchange=self.name,\n style=style.__class__.__name__)\n\n req = dict(\n symbol=exchange_symbol,\n amount=str(float(abs(amount))),\n price=\"{:.20f}\".format(float(price)),\n side='buy' if is_buy else 'sell',\n type='exchange ' + order_type, # TODO: support margin trades\n exchange=self.name,\n is_hidden=False,\n is_postonly=False,\n use_all_available=0,\n ocoorder=False,\n buy_price_oco=0,\n sell_price_oco=0\n )\n\n date = pd.Timestamp.utcnow()\n try:\n self.ask_request()\n response = self._request('order/new', req)\n order_status = response.json()\n except Exception as e:\n raise ExchangeRequestError(error=e)\n\n if 'message' in order_status:\n raise ExchangeRequestError(\n error='unable to create Bitfinex order {}'.format(\n order_status['message'])\n )\n\n order_id = str(order_status['id'])\n order = Order(\n dt=date,\n asset=asset,\n amount=amount,\n stop=style.get_stop_price(is_buy),\n limit=style.get_limit_price(is_buy),\n id=order_id\n )\n\n return order",
"def create_stock_from_symbol(symbol, book=None):\n from .commodity import Commodity\n\n share = get_latest_quote(symbol)\n\n stock = Commodity(\n mnemonic=symbol,\n fullname=share.name,\n fraction=10000,\n namespace=share.exchange,\n quote_flag=1,\n quote_source=\"yahoo\",\n quote_tz=share.timezone,\n )\n\n if book:\n book.add(stock)\n book.flush()\n\n return stock",
"def create_order(self, symbol, tradeType, price, amount):\r\n param = {\r\n 'symbol': self.__transfer_symbol(symbol),\r\n 'tradeType': tradeType, #BUY/SELL\r\n 'price': price,\r\n 'amount': amount,\r\n 'appid': self.apiKey,\r\n 'nonce': int(time.time() * 1000),\r\n 'timestamp': int(time.time())\r\n }\r\n return self.__sign_POST('/api/v1/order/create', param, self.timeout)",
"def create_get_price_ticker_request(self, symbol: Optional[str] = None) -> Request:",
"async def create_order(self, symbol: str, side: str, price: str, amount: str, urgency: int = 0) -> dict:\n base, quote = symbol.lower().split('_')\n spendable = quote if side == 'buy' else base\n params = {\n 'pair': symbol,\n 'type': side,\n 'price': price,\n spendable: amount\n }\n return await self._safe_call(urgency, self._request_private, 'trade', params)",
"def solidity_create_contract_with_zero_price(\n self,\n source_code,\n owner,\n name=None,\n contract_name=None,\n libraries=None,\n balance=0,\n address=None,\n args=(),\n gas=None,\n compile_args=None,\n):\n if compile_args is None:\n compile_args = dict()\n\n if libraries is None:\n deps = {}\n else:\n deps = dict(libraries)\n\n contract_names = [contract_name]\n while contract_names:\n contract_name_i = contract_names.pop()\n try:\n global cached_compile\n cache_key = (source_code, contract_name_i)\n if cache_key in cached_compile:\n compile_results = cached_compile[cache_key]\n else:\n compile_results = self._compile(\n source_code, contract_name_i, libraries=deps, crytic_compile_args=compile_args\n )\n cached_compile[cache_key] = compile_results\n md = SolidityMetadata(*compile_results)\n if contract_name_i == contract_name:\n constructor_types = md.get_constructor_arguments()\n\n if constructor_types != \"()\":\n if args is None:\n args = self.make_symbolic_arguments(constructor_types)\n\n constructor_data = ABI.serialize(constructor_types, *args)\n else:\n constructor_data = b\"\"\n # Balance could be symbolic, lets ask the solver\n # Option 1: balance can not be 0 and the function is marked as not payable\n maybe_balance = balance == 0\n self._publish(\n \"will_solve\", None, self.constraints, maybe_balance, \"can_be_true\"\n )\n must_have_balance = SelectedSolver.instance().can_be_true(\n self.constraints, maybe_balance\n )\n self._publish(\n \"did_solve\",\n None,\n self.constraints,\n maybe_balance,\n \"can_be_true\",\n must_have_balance,\n )\n if not must_have_balance:\n # balance always != 0\n if not md.constructor_abi[\"payable\"]:\n raise EthereumError(\n f\"Can't create solidity contract with balance ({balance}) \"\n f\"different than 0 because the contract's constructor is not payable.\"\n )\n\n for state in self.ready_states:\n world = state.platform\n\n expr = Operators.UGE(world.get_balance(owner.address), balance)\n self._publish(\"will_solve\", None, self.constraints, expr, \"can_be_true\")\n sufficient = SelectedSolver.instance().can_be_true(self.constraints, expr,)\n self._publish(\n \"did_solve\", None, self.constraints, expr, \"can_be_true\", sufficient\n )\n\n if not sufficient:\n raise EthereumError(\n f\"Can't create solidity contract with balance ({balance}) \"\n f\"because the owner account ({owner}) has insufficient balance.\"\n )\n\n contract_account = create_contract(\n self,\n owner=owner,\n balance=balance,\n address=address,\n init=md._init_bytecode + constructor_data,\n name=name,\n gas=gas,\n )\n else:\n contract_account = create_contract(\n self, owner=owner, init=md._init_bytecode, balance=0, gas=gas\n )\n if contract_account is None:\n return None\n\n self.metadata[int(contract_account)] = md\n\n deps[contract_name_i] = int(contract_account)\n except DependencyError as e:\n contract_names.append(contract_name_i)\n for lib_name in e.lib_names:\n if lib_name not in deps:\n contract_names.append(lib_name)\n except EthereumError as e:\n logger.info(f\"Failed to build contract {contract_name_i} {str(e)}\")\n self.kill()\n raise\n\n # If the contract was created successfully in at least 1 state return account\n for state in self.ready_states:\n if state.platform.get_code(int(contract_account)):\n return contract_account\n\n logger.info(\"Failed to compile contract %r\", contract_names)",
"def create(symbol, number_of_shares, purchase_price):\n stock = Stock(symbol, number_of_shares, purchase_price)\n database.session.add(stock)\n database.session.commit()",
"def get_quote(pair: CurrencyPair, amount: int) -> QuoteData:\n storage_quote = create_quote(\n currency_pair=CurrencyPairs.from_pair(pair),\n rate=get_rate(currency_pair=pair).rate,\n amount=amount,\n expires_at=datetime.now() + timedelta(minutes=10),\n )\n return QuoteData(\n quote_id=QuoteId(uuid.UUID(storage_quote.id)),\n rate=Rate(storage_quote.currency_pair.value, storage_quote.rate),\n expires_at=storage_quote.expires_at,\n amount=storage_quote.amount,\n )",
"def create_order(self, symbol: str, type: OrderType, side: OrderSide, amount, price=None, params={}):\n self.load_markets()\n method = 'privatePostOrderAddOrder'\n market = self.market(symbol)\n request = {\n 'Amount': amount,\n 'Pair': market['id'],\n }\n if type == 'market':\n method += 'MarketPrice' + self.capitalize(side)\n else:\n request['Price'] = price\n amountString = self.number_to_string(amount)\n priceString = self.number_to_string(price)\n request['Total'] = self.parse_number(Precise.string_mul(amountString, priceString))\n request['IsBid'] = (side == 'buy')\n response = getattr(self, method)(self.extend(request, params))\n return self.parse_order(response, market)",
"def buy(symbol: str,\n quantity: Any,\n order_type: str = \"market\",\n price: Any = None,\n exchange: str = CRYPTO_EXCHANGE,\n api_key: str = CRYPTO_API_KEY,\n api_secret: str = CRYPTO_API_SECRET,\n exchange_password: Any = CRYPTO_API_PASSWORD,\n exchange_uid: Any = CRYPTO_API_UID,\n test_mode: bool = False) -> Any:\n try:\n if test_mode == True:\n url = CRYPTO_URL_TEST\n else:\n url = CRYPTO_URL_LIVE\n payload = {\n 'symbol': symbol.upper(),\n 'quantity': quantity,\n 'order_type': order_type,\n 'limitPrice': price\n }\n response = requests.post('{}/buy/{}'.format(url, exchange),\n headers=crypto_get_headers(\n api_key, api_secret, exchange_password,\n exchange_uid),\n json=payload)\n if response:\n return response.json()\n if response.status_code == 400:\n logger.error('Oops! An error Occurred ⚠️')\n raise BadRequest(response.text)\n if response.status_code == 401:\n logger.error('Oops! An error Occurred ⚠️')\n raise InvalidCredentials(response.text)\n except Exception as exception:\n logger.error('Oops! An error Occurred ⚠️')\n raise exception",
"def add_trade(self, trade_type: str, trade_id: int, trade_time: int, asset: str, ref_asset: str, qty: float,\n price: float, fee: float, fee_asset: str, is_buyer: bool, symbol: Optional[str] = None,\n auto_commit: bool = True):\n row = (trade_id, trade_time, asset, ref_asset, qty, price, fee, fee_asset, int(is_buyer))\n if trade_type == 'spot':\n table = tables.SPOT_TRADE_TABLE\n elif trade_type == 'cross_margin':\n table = tables.CROSS_MARGIN_TRADE_TABLE\n elif trade_type == 'isolated_margin':\n table = tables.ISOLATED_MARGIN_TRADE_TABLE\n if symbol is None:\n raise ValueError(\"trade_type was isolated margin but symbol was not provided\")\n row = (trade_id, trade_time, symbol, asset, ref_asset, qty, price, fee, fee_asset, int(is_buyer))\n else:\n msg = f\"trade type should be one of ('spot', 'cross_margin', 'isolated_margin') but {trade_type} was\" \\\n f\" received\"\n raise ValueError(msg)\n self.add_row(table, row, auto_commit)",
"def create_get_order_book_ticker_request(self, symbol: Optional[str] = None) -> Request:",
"def __init__(\n self, ticker, init_action=None, init_quantity=0,\n init_price=0, init_commission=0,\n bid=None, ask=None\n ):\n \n self.ticker = ticker\n self.realised_pnl = 0\n self.market_value = 0\n self.cost_basis = 0\n self.unrealised_pnl = 0\n self.total_pnl = 0\n\n self.buys = 0\n self.sells = 0\n self.net = 0\n\n self.avg_bot = 0\n self.avg_sld = 0\n self.avg_price = 0\n\n self.total_bot = 0\n self.total_sld = 0\n self.total_commission = 0\n self.net_total = 0\n self.net_incl_comm = 0\n\n self.transact_shares(init_action, init_quantity, init_price, init_commission, bid, ask)",
"def test_17_transaction_create_sell_cash(self):\n portfolio = Portfolio.get_portfolio_by_slug(\"test\")\n user = \"automated unit tester\"\n\n sell_cash_eur = Transaction.sell_cash(\n portfolio=portfolio,\n asset=\"EUR\",\n t_currency=TRANSACTION_CURRENCY_USD,\n amount=100000,\n unit_price=1.17,\n user=user\n )\n\n self.assertTrue(isinstance(sell_cash_eur, Transaction),\n msg=\"Transaction is NOT returning a valid object while selling EUR in cash\")\n print(\"Transaction sell_cash method is returning a valid EUR transaction: {}\".format(\n sell_cash_eur))\n\n \"\"\"Is transaction avoiding short sell cash objects?\"\"\"\n short_sell_cash_eur = Transaction.sell_cash(\n portfolio=portfolio,\n asset=\"EUR\",\n t_currency=TRANSACTION_CURRENCY_USD,\n amount=500000,\n unit_price=1.10,\n user=user\n )\n\n self.assertFalse(isinstance(short_sell_cash_eur, Transaction),\n msg=\"Transaction is NOT avoiding short selling EUR in cash\")\n print(\"Transaction sell_cash method is avoiding a short sell EUR transaction: {}\".format(\n short_sell_cash_eur))",
"async def create_order(self, symbol: str, type: OrderType, side: OrderSide, amount, price=None, params={}):\n await self.load_markets()\n market = self.market(symbol)\n request = {\n 'pair': market['id'],\n 'amount': self.amount_to_precision(symbol, amount),\n 'side': side,\n 'type': type,\n }\n if type == 'limit':\n request['price'] = self.price_to_precision(symbol, price)\n response = await self.privatePostUserSpotOrder(self.extend(request, params))\n data = self.safe_value(response, 'data')\n return self.parse_order(data, market)",
"def create_order_book(currency_pair, **kwargs):\n client = OrderBookClient(currency_pair)\n return create_order_book_from_client(client, **kwargs)",
"def buy(self,\n currency_pair,\n rate,\n amount):\n pass",
"def __init__(self, ticker, shares, enter_price):\n self.ticker = ticker\n self.entered = enter_price\n self.current_price = enter_price\n self.shares = shares",
"def initialize(context):\n # Rebalance every day, 1 hour after market open.\n set_slippage(slippage.FixedSlippage(spread=0.00))\n set_commission(commission.PerShare(cost=0.0, min_trade_cost=0.0))\n context.lookback = 60\n context.leverage = 0.02\n context.day = 1\n #context.ETFs = []\n context.market = [symbol('SPY')]\n context.bo = 1.25\n context.so = 1.25\n context.bc = 0.75\n context.sc = 0.5\n context.stocks = []\n context.initialized = False\n context.holding_book_shares = None\n context.order_hist = {}\n \n context.xlb = symbol('XLB') #sid(19654) #Materials 101\n context.xly = symbol('XLY') #sid(19662) #Consumer Discretionary 102\n context.xlf = symbol('XLF') #sid(19656) #Financials 103\n context.xlre = symbol('IYR') #sid() #Real estate 104\n context.xlp = symbol('XLP') #sid(19659) #Consumer Staples 205\n context.xlv = symbol('XLV') #sid(19661) #Health Care 206\n context.xlu = symbol('XLU') #sid(19660) #Utilities 207\n context.xtl = symbol('IYZ') #sid() #Communication Services 308\n context.xle = symbol('XLE') #sid(19655) #Energy 309\n context.xli = symbol('XLI') #sid(19657) #Industrials 310\n context.xlk = symbol('XLK') #sid(19658) #Technology 311\n \n context.ETF_lookup = {context.xlb:101, 101:context.xlb,\n context.xly:102, 102:context.xly,\n context.xlf:103, 103:context.xlf,\n context.xlre:104, 104:context.xlre,\n context.xlp:205, 205: context.xlp,\n context.xlv:206, 206: context.xlv,\n context.xlu:207, 207:context.xlu,\n context.xtl:308, 308:context.xtl,\n context.xle:309, 309:context.xle,\n context.xli:310, 310:context.xli,\n context.xlk:311, 311:context.xlk}\n\n context.ETFs = [context.xlb,\n context.xly,\n context.xlf,\n context.xlre,\n context.xlp,\n context.xlv,\n context.xlu,\n context.xtl,\n context.xle,\n context.xli,\n context.xlk\n ]",
"def getCloseStrikePrice(ib, qualityContracts, aStockSymbol, price, startDate, right, exchange = 'SMART' ):\n # print(qualityContracts.symbol)\n # [ticker] = ib.reqTickers(qualityContracts)\n\n chains = ib.reqSecDefOptParams(qualityContracts.symbol, '', qualityContracts.secType, qualityContracts.conId)\n chain = next(c for c in chains if c.tradingClass == aStockSymbol and c.exchange == exchange)\n\n # if price >= then $40 Get to the next round at +/- 5\n # if price < $40 get to round at the +/-2\n if price >= 40 :\n strikePlus = (5 * round(price / 5)) + 5\n strikeMinus = (5 * round(price / 5)) - 5\n # print('strikePlus', strikePlus)\n # print('strikeMinus', strikeMinus)\n else:\n strikePlus = (2 * round(price / 2)) + 3\n strikeMinus = (2 * round(price / 2)) - 3\n # print('strikePlus', strikePlus)\n # print('strikeMinus', strikeMinus)\n\n # get strikes at strikePlus\n strikes = [strike for strike in chain.strikes\n if strike >= strikeMinus and strike <= strikePlus ]\n # print('strikes: ', strikes)\n\n # get experation date in proper format\n dateFromISOFromat = dateUtils.getDateFromISO8601(startDate)\n nextFridayDateFormat = dateUtils.nextFriday(dateFromISOFromat)\n expiration = dateUtils.nextFridayOrgFormat(nextFridayDateFormat) # = sorted(exp for exp in chain.expirations)\n # print('expiration: ', expiration)\n\n contracts = [Option(qualityContracts.symbol, expiration, strike, right, exchange)\n for strike in strikes]\n ib.qualifyContracts(*contracts)\n # print('contracts: ', contracts, '\\n\\n\\n')\n\n # Todo: should this be ib.reqSecDefOptParams instead of ib.reqContractDetails???\n optionContractsDetails = [ib.reqContractDetails(cd)\n for cd in contracts]\n\n return strikes, contracts\n\n # # # Todo - need to update to return close price when market closed else return last\n # removing last as we were getting errors if it did not exist.\n # # return tickers[0].close + tickers[1].close",
"async def crypto(self, ctx, ticker: str):\n ticker = ticker.upper()\n api_endpoint = \"https://min-api.cryptocompare.com/data/pricemultifull\"\\\n f\"?tsyms=USD&fsyms={ticker}\"\n api_json = await self.bot.aiojson(api_endpoint)\n if \"Message\" in api_json:\n await ctx.send(f\"Error from API: `{api_json['Message']}`\")\n return\n\n raw_data = api_json[\"RAW\"][ticker][\"USD\"]\n stylized_data = api_json[\"DISPLAY\"][ticker][\"USD\"]\n\n change_color = self.get_change_color(raw_data[\"CHANGEPCTDAY\"], 10)\n\n data_timestamp = datetime.datetime.utcfromtimestamp(\n raw_data[\"LASTUPDATE\"])\n\n coin_name = await self.get_crypto_name(ticker)\n\n embed = discord.Embed(color=change_color, timestamp=data_timestamp)\n\n embed.set_author(name=f\"Price info for {coin_name} from {stylized_data['MARKET']}\")\n embed.set_footer(text=\"Price info supplied by CryptoCompare. \" + self.legal_notice)\n\n embed.add_field(name=\"Current Price\", value=stylized_data[\"PRICE\"])\n embed.add_field(name=\"Opening Price\", value=stylized_data[\"OPENDAY\"])\n\n embed.add_field(name=\"Change\", value=f\"{stylized_data['CHANGEDAY']} \"\\\n f\"({stylized_data['CHANGEPCTDAY']}%)\")\n embed.add_field(name=\"Volume\", value=stylized_data[\"VOLUMEDAY\"])\n\n embed.add_field(name=\"High\", value=stylized_data[\"HIGHDAY\"])\n embed.add_field(name=\"Low\", value=stylized_data[\"LOWDAY\"])\n\n await ctx.send(embed=embed)",
"def __init__(self, symbol, order_type, quantity, direction):\n self.type = 'ORDER'\n self.symbol = symbol\n self.order_type = order_type\n self.quantity = quantity\n self.direction = direction",
"async def buy(self, ctx, quantity: int, symbol: str):\r\n symbol = symbol.upper()\r\n author = ctx.author\r\n with DB() as db:\r\n company = await self.get_active_company(ctx, db, author)\r\n await self.market_open_check(ctx)\r\n await self.stock_symbol_check(ctx, db, symbol)\r\n \r\n price = self.iex.price(symbol)\r\n cost = quantity * price\r\n if company.balance < cost:\r\n await ctx.send(f\"{company.name}\\nBalance: {company.balance} USD\\nPurchase cost: {cost} USD\")\r\n raise StonksError()\r\n\r\n value = price * quantity\r\n self.iex.buy(db, company.id, symbol, quantity, price)\r\n await ctx.send(f\"``-{value} {company.name} ⯮ {quantity} {symbol} @ {price}``\")",
"def create_contract(self, owner, balance=0, address=None, init=None, name=None, gas=None):\n if not self.count_ready_states():\n raise NoAliveStates\n\n nonce = self.get_nonce(owner)\n expected_address = evm.EVMWorld.calculate_new_address(int(owner), nonce=nonce)\n\n if address is None:\n address = expected_address\n elif address != expected_address:\n raise EthereumError(\n \"Address was expected to be %x but was given %x\" % (expected_address, address)\n )\n\n if name is None:\n name = self._get_uniq_name(\"contract\")\n if name in self._accounts:\n raise EthereumError(\"Name already used\")\n\n # setting price to zero when calling transaction\n self._transaction(\"CREATE\", owner, balance, address, data=init, gas=gas, price=0)\n if self.count_ready_states():\n self._accounts[name] = EVMContract(\n address=address, manticore=self, default_caller=owner, name=name\n )\n return self.accounts[name]",
"def _consume_cryptocom_entry(self, csv_row: Dict[str, Any]) -> None:\n row_type = csv_row['Transaction Kind']\n timestamp = deserialize_timestamp_from_date(\n date=csv_row['Timestamp (UTC)'],\n formatstr='%Y-%m-%d %H:%M:%S',\n location='crypto.com',\n )\n description = csv_row['Transaction Description']\n notes = f'{description}\\nSource: crypto.com (CSV import)'\n\n # No fees info until (Nov 2020) on crypto.com\n # fees are not displayed in the export data\n fee = Fee(ZERO)\n fee_currency = A_USD # whatever (used only if there is no fee)\n\n if row_type in (\n 'crypto_purchase',\n 'crypto_exchange',\n 'referral_gift',\n 'referral_bonus',\n 'crypto_earn_interest_paid',\n 'referral_card_cashback',\n 'card_cashback_reverted',\n 'reimbursement',\n ):\n # variable mapping to raw data\n currency = csv_row['Currency']\n to_currency = csv_row['To Currency']\n native_currency = csv_row['Native Currency']\n amount = csv_row['Amount']\n to_amount = csv_row['To Amount']\n native_amount = csv_row['Native Amount']\n\n trade_type = TradeType.BUY if to_currency != native_currency else TradeType.SELL\n\n if row_type == 'crypto_exchange':\n # trades crypto to crypto\n base_asset = Asset(to_currency)\n quote_asset = Asset(currency)\n if quote_asset is None:\n raise DeserializationError('Got a trade entry with an empty quote asset')\n base_amount_bought = deserialize_asset_amount(to_amount)\n quote_amount_sold = deserialize_asset_amount(amount)\n else:\n base_asset = Asset(currency)\n quote_asset = Asset(native_currency)\n base_amount_bought = deserialize_asset_amount(amount)\n quote_amount_sold = deserialize_asset_amount(native_amount)\n\n rate = Price(abs(quote_amount_sold / base_amount_bought))\n pair = TradePair(f'{base_asset.identifier}_{quote_asset.identifier}')\n trade = Trade(\n timestamp=timestamp,\n location=Location.CRYPTOCOM,\n pair=pair,\n trade_type=trade_type,\n amount=base_amount_bought,\n rate=rate,\n fee=fee,\n fee_currency=fee_currency,\n link='',\n notes=notes,\n )\n self.db.add_trades([trade])\n\n elif row_type == 'crypto_withdrawal' or row_type == 'crypto_deposit':\n if row_type == 'crypto_withdrawal':\n category = AssetMovementCategory.WITHDRAWAL\n amount = deserialize_asset_amount_force_positive(csv_row['Amount'])\n else:\n category = AssetMovementCategory.DEPOSIT\n amount = deserialize_asset_amount(csv_row['Amount'])\n\n asset = Asset(csv_row['Currency'])\n asset_movement = AssetMovement(\n location=Location.CRYPTOCOM,\n category=category,\n address=None,\n transaction_id=None,\n timestamp=timestamp,\n asset=asset,\n amount=amount,\n fee=fee,\n fee_asset=asset,\n link='',\n )\n self.db.add_asset_movements([asset_movement])\n\n elif row_type in (\n 'crypto_earn_program_created',\n 'lockup_lock',\n 'lockup_unlock',\n 'dynamic_coin_swap_bonus_exchange_deposit',\n 'crypto_wallet_swap_debited',\n 'crypto_wallet_swap_credited',\n 'lockup_swap_debited',\n 'lockup_swap_credited',\n 'lockup_swap_rebate',\n 'dynamic_coin_swap_bonus_exchange_deposit',\n # we don't handle cryto.com exchange yet\n 'crypto_to_exchange_transfer',\n 'exchange_to_crypto_transfer',\n # supercharger actions\n 'supercharger_deposit',\n 'supercharger_withdrawal',\n # already handled using _import_cryptocom_double_entries\n 'dynamic_coin_swap_debited',\n 'dynamic_coin_swap_credited',\n 'dust_conversion_debited',\n 'dust_conversion_credited',\n ):\n # those types are ignored because it doesn't affect the wallet balance\n # or are not handled here\n return\n else:\n raise UnsupportedCryptocomEntry(\n f'Unknown entrype type \"{row_type}\" encountered during '\n f'cryptocom data import. Ignoring entry',\n )",
"def __init__(self, target_currency, api_key):\n self.api_key = api_key\n self.rate = None\n self.target_currency = target_currency\n self.exchange = Fixerio(symbols=[self.target_currency], access_key=self.api_key)",
"def tx_create(self,\n keypair,\n amount=1,\n deposit=CONTRACT_DEFAULT_DEPOSIT,\n init_state=\"()\",\n gas=CONTRACT_DEFAULT_GAS,\n gas_price=CONTRACT_DEFAULT_GAS_PRICE,\n fee=DEFAULT_FEE,\n vm_version=CONTRACT_DEFAULT_VM_VERSION,\n tx_ttl=DEFAULT_TX_TTL):\n try:\n call_data = self.encode_calldata(\"init\", init_state)\n\n # get the transaction builder\n txb = TxBuilder(self.client, keypair)\n # create spend_tx\n tx, sg, tx_hash, contract_id = txb.tx_contract_create(self.bytecode, call_data, amount, deposit, gas, gas_price, vm_version, fee, tx_ttl)\n # post the transaction to the chain\n txb.post_transaction(tx, tx_hash)\n if self.client.blocking_mode:\n txb.wait_tx(tx_hash)\n # store the contract address in the instance variabl\n self.address = contract_id\n return tx\n except OpenAPIClientException as e:\n raise ContractError(e)",
"def test_get_concise_contract():\n contract_concise = ContractHandler.get_concise_contract(\"DataTokenTemplate\")\n assert isinstance(contract_concise, ConciseContract)"
]
| [
"0.605647",
"0.6021374",
"0.576684",
"0.56790835",
"0.5601576",
"0.5555213",
"0.55236983",
"0.55217147",
"0.54633653",
"0.54632473",
"0.5438681",
"0.5346382",
"0.53407604",
"0.53043514",
"0.52908975",
"0.5279396",
"0.52601576",
"0.52305746",
"0.5226109",
"0.5208201",
"0.5194693",
"0.51920646",
"0.51793635",
"0.51563287",
"0.5153646",
"0.5143867",
"0.5128599",
"0.50948757",
"0.50894225",
"0.50816315"
]
| 0.87328726 | 1 |
Create an Order object (Market/Limit) to go long/short. order_type 'MKT', 'LMT' for Market or Limit orders quantity Integral number of assets to order action 'BUY' or 'SELL' | def create_order(order_type, quantity, action):
order = Order()
order.m_orderType = order_type
order.m_totalQuantity = quantity
order.m_action = action
return order | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_order(order_type, quantity, action, price = None):\n order = Order()\n order.m_orderType = order_type\n order.m_totalQuantity = quantity\n order.m_action = action\n order.m_account = ConfigMgr.get_ib_config()['account_code']\n if order_type == 'LMT':\n order.m_lmtPrice = price\n elif order_type == 'STP':\n order.m_auxPrice = price\n return order",
"async def create_order(self, symbol: str, type: OrderType, side: OrderSide, amount, price=None, params={}):\n await self.load_markets()\n market = self.market(symbol)\n # order types \"limit\" and \"market\" immediatley parsed \"EXCHANGE LIMIT\" and \"EXCHANGE MARKET\"\n # note: same order types exist for margin orders without the EXCHANGE prefix\n orderTypes = self.safe_value(self.options, 'orderTypes', {})\n orderType = type.upper()\n if market['spot']:\n # although they claim that type needs to be 'exchange limit' or 'exchange market'\n # in fact that's not the case for swap markets\n orderType = self.safe_string_upper(orderTypes, type, type)\n stopPrice = self.safe_string_2(params, 'stopPrice', 'triggerPrice')\n timeInForce = self.safe_string(params, 'timeInForce')\n postOnlyParam = self.safe_value(params, 'postOnly', False)\n reduceOnly = self.safe_value(params, 'reduceOnly', False)\n clientOrderId = self.safe_value_2(params, 'cid', 'clientOrderId')\n params = self.omit(params, ['triggerPrice', 'stopPrice', 'timeInForce', 'postOnly', 'reduceOnly', 'price_aux_limit'])\n amountString = self.amount_to_precision(symbol, amount)\n amountString = amountString if (side == 'buy') else Precise.string_neg(amountString)\n request = {\n # 'gid': 0123456789, # int32, optional group id for the order\n # 'cid': 0123456789, # int32 client order id\n 'type': orderType,\n 'symbol': market['id'],\n # 'price': self.number_to_string(price),\n 'amount': amountString,\n # 'flags': 0, # int32, https://docs.bitfinex.com/v2/docs/flag-values\n # 'lev': 10, # leverage for a derivative orders, the value should be between 1 and 100 inclusive, optional, 10 by default\n # 'price_trailing': self.number_to_string(priceTrailing),\n # 'price_aux_limit': self.number_to_string(stopPrice),\n # 'price_oco_stop': self.number_to_string(ocoStopPrice),\n # 'tif': '2020-01-01 10:45:23', # datetime for automatic order cancellation\n # 'meta': {\n # 'aff_code': 'AFF_CODE_HERE'\n # },\n }\n stopLimit = ((orderType == 'EXCHANGE STOP LIMIT') or ((orderType == 'EXCHANGE LIMIT') and (stopPrice is not None)))\n exchangeStop = (orderType == 'EXCHANGE STOP')\n exchangeMarket = (orderType == 'EXCHANGE MARKET')\n stopMarket = (exchangeStop or (exchangeMarket and (stopPrice is not None)))\n ioc = ((orderType == 'EXCHANGE IOC') or (timeInForce == 'IOC'))\n fok = ((orderType == 'EXCHANGE FOK') or (timeInForce == 'FOK'))\n postOnly = (postOnlyParam or (timeInForce == 'PO'))\n if (ioc or fok) and (price is None):\n raise InvalidOrder(self.id + ' createOrder() requires a price argument with IOC and FOK orders')\n if (ioc or fok) and exchangeMarket:\n raise InvalidOrder(self.id + ' createOrder() does not allow market IOC and FOK orders')\n if (orderType != 'MARKET') and (not exchangeMarket) and (not exchangeStop):\n request['price'] = self.price_to_precision(symbol, price)\n if stopLimit or stopMarket:\n # request['price'] is taken for stop orders\n request['price'] = self.price_to_precision(symbol, stopPrice)\n if stopMarket:\n request['type'] = 'EXCHANGE STOP'\n elif stopLimit:\n request['type'] = 'EXCHANGE STOP LIMIT'\n request['price_aux_limit'] = self.price_to_precision(symbol, price)\n if ioc:\n request['type'] = 'EXCHANGE IOC'\n elif fok:\n request['type'] = 'EXCHANGE FOK'\n # flag values may be summed to combine flags\n flags = 0\n if postOnly:\n flags = self.sum(flags, 4096)\n if reduceOnly:\n flags = self.sum(flags, 1024)\n if flags != 0:\n request['flags'] = flags\n if clientOrderId is not None:\n request['cid'] = clientOrderId\n params = self.omit(params, ['cid', 'clientOrderId'])\n response = await self.privatePostAuthWOrderSubmit(self.extend(request, params))\n #\n # [\n # 1653325121, # Timestamp in milliseconds\n # \"on-req\", # Purpose of notification('on-req', 'oc-req', 'uca', 'fon-req', 'foc-req')\n # null, # unique ID of the message\n # null,\n # [\n # [\n # 95412102131, # Order ID\n # null, # Group ID\n # 1653325121798, # Client Order ID\n # \"tDOGE:UST\", # Market ID\n # 1653325121798, # Millisecond timestamp of creation\n # 1653325121798, # Millisecond timestamp of update\n # -10, # Amount(Positive means buy, negative means sell)\n # -10, # Original amount\n # \"EXCHANGE LIMIT\", # Type of the order: LIMIT, EXCHANGE LIMIT, MARKET, EXCHANGE MARKET, STOP, EXCHANGE STOP, STOP LIMIT, EXCHANGE STOP LIMIT, TRAILING STOP, EXCHANGE TRAILING STOP, FOK, EXCHANGE FOK, IOC, EXCHANGE IOC.\n # null, # Previous order type(stop-limit orders are converted to limit orders so for them previous type is always STOP)\n # null, # Millisecond timestamp of Time-In-Force: automatic order cancellation\n # null, # _PLACEHOLDER\n # 4096, # Flags, see parseOrderFlags()\n # \"ACTIVE\", # Order Status, see parseOrderStatus()\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # 0.071, # Price(Stop Price for stop-limit orders, Limit Price for limit orders)\n # 0, # Average Price\n # 0, # Trailing Price\n # 0, # Auxiliary Limit price(for STOP LIMIT)\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # 0, # Hidden(0 if False, 1 if True)\n # 0, # Placed ID(If another order caused self order to be placed(OCO) self will be that other order's ID)\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # \"API>BFX\", # Routing, indicates origin of action: BFX, ETHFX, API>BFX, API>ETHFX\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # {\"$F7\":1} # additional meta information about the order( $F7 = IS_POST_ONLY(0 if False, 1 if True), $F33 = Leverage(int))\n # ]\n # ],\n # null, # CODE(work in progress)\n # \"SUCCESS\", # Status of the request\n # \"Submitting 1 orders.\" # Message\n # ]\n #\n status = self.safe_string(response, 6)\n if status != 'SUCCESS':\n errorCode = response[5]\n errorText = response[7]\n raise ExchangeError(self.id + ' ' + response[6] + ': ' + errorText + '(#' + errorCode + ')')\n orders = self.safe_value(response, 4, [])\n order = self.safe_value(orders, 0)\n return self.parse_order(order, market)",
"async def create_order(self, symbol: str, type: OrderType, side: OrderSide, amount, price=None, params={}):\n await self.load_markets()\n market = self.market(symbol)\n orderType = self.capitalize(type)\n reduceOnly = self.safe_value(params, 'reduceOnly')\n if reduceOnly is not None:\n if (market['type'] != 'swap') and (market['type'] != 'future'):\n raise InvalidOrder(self.id + ' createOrder() does not support reduceOnly for ' + market['type'] + ' orders, reduceOnly orders are supported for swap and future markets only')\n brokerId = self.safe_string(self.options, 'brokerId', 'CCXT')\n qty = self.parse_to_int(self.amount_to_precision(symbol, amount))\n request = {\n 'symbol': market['id'],\n 'side': self.capitalize(side),\n 'orderQty': qty, # lot size multiplied by the number of contracts\n 'ordType': orderType,\n 'text': brokerId,\n }\n if (orderType == 'Stop') or (orderType == 'StopLimit') or (orderType == 'MarketIfTouched') or (orderType == 'LimitIfTouched'):\n stopPrice = self.safe_number_2(params, 'stopPx', 'stopPrice')\n if stopPrice is None:\n raise ArgumentsRequired(self.id + ' createOrder() requires a stopPx or stopPrice parameter for the ' + orderType + ' order type')\n else:\n request['stopPx'] = float(self.price_to_precision(symbol, stopPrice))\n params = self.omit(params, ['stopPx', 'stopPrice'])\n if (orderType == 'Limit') or (orderType == 'StopLimit') or (orderType == 'LimitIfTouched'):\n request['price'] = float(self.price_to_precision(symbol, price))\n clientOrderId = self.safe_string_2(params, 'clOrdID', 'clientOrderId')\n if clientOrderId is not None:\n request['clOrdID'] = clientOrderId\n params = self.omit(params, ['clOrdID', 'clientOrderId'])\n response = await self.privatePostOrder(self.extend(request, params))\n return self.parse_order(response, market)",
"def _create_order(self, order_status):\n if order_status['is_cancelled']:\n status = ORDER_STATUS.CANCELLED\n elif not order_status['is_live']:\n log.info('found executed order {}'.format(order_status))\n status = ORDER_STATUS.FILLED\n else:\n status = ORDER_STATUS.OPEN\n\n amount = float(order_status['original_amount'])\n filled = float(order_status['executed_amount'])\n\n if order_status['side'] == 'sell':\n amount = -amount\n filled = -filled\n\n price = float(order_status['price'])\n order_type = order_status['type']\n\n stop_price = None\n limit_price = None\n\n # TODO: is this comprehensive enough?\n if order_type.endswith('limit'):\n limit_price = price\n elif order_type.endswith('stop'):\n stop_price = price\n\n executed_price = float(order_status['avg_execution_price'])\n\n # TODO: bitfinex does not specify comission. I could calculate it but not sure if it's worth it.\n commission = None\n\n date = pd.Timestamp.utcfromtimestamp(float(order_status['timestamp']))\n date = pytz.utc.localize(date)\n order = Order(\n dt=date,\n asset=self.assets[order_status['symbol']],\n amount=amount,\n stop=stop_price,\n limit=limit_price,\n filled=filled,\n id=str(order_status['id']),\n commission=commission\n )\n order.status = status\n\n return order, executed_price",
"async def create_order(self, symbol: str, type: OrderType, side: OrderSide, amount, price=None, params={}):\n await self.load_markets()\n market = self.market(symbol)\n request = {\n 'pair': market['id'],\n 'amount': self.amount_to_precision(symbol, amount),\n 'side': side,\n 'type': type,\n }\n if type == 'limit':\n request['price'] = self.price_to_precision(symbol, price)\n response = await self.privatePostUserSpotOrder(self.extend(request, params))\n data = self.safe_value(response, 'data')\n return self.parse_order(data, market)",
"def order(\n self,\n order_class: str,\n symbol: str,\n order_type: str,\n duration: str,\n quantity: Optional[int],\n side: Optional[str],\n limit_price: float = None,\n stop_price: float = None,\n tag: str = None,\n account_id: str = None,\n option_symbol: str = None,\n option_symbol_0: str = None,\n side_0: str = None,\n quantity_0: int = None,\n option_symbol_1: str = None,\n side_1: str = None,\n quantity_1: int = None,\n option_symbol_2: str = None,\n side_2: str = None,\n quantity_2: int = None,\n option_symbol_3: str = None,\n side_3: str = None,\n quantity_3: int = None,\n ) -> OrderDetails:\n if account_id is None:\n account_id = self.default_account_id\n url = f\"/v1/accounts/{account_id}/orders\"\n params = {\n \"class\": order_class,\n \"symbol\": symbol,\n \"option_symbol\": option_symbol,\n \"side\": side,\n \"quantity\": quantity,\n \"type\": order_type,\n \"duration\": duration,\n \"price\": limit_price,\n \"stop\": stop_price,\n \"tag\": tag,\n \"option_symbol[0]\": option_symbol_0,\n \"side[0]\": side_0,\n \"quantity[0]\": quantity_0,\n \"option_symbol[1]\": option_symbol_1,\n \"side[1]\": side_1,\n \"quantity[1]\": quantity_1,\n \"option_symbol[2]\": option_symbol_2,\n \"side[2]\": side_2,\n \"quantity[2]\": quantity_2,\n \"option_symbol[3]\": option_symbol_3,\n \"side[3]\": side_3,\n \"quantity[3]\": quantity_3,\n }\n params = {k: v for k, v in params.items() if v is not None}\n data = self.post(url, params)\n res = OrderAPIResponse(**data)\n if res.errors:\n raise TradierOrderError(res.errors.error_list)\n return res.order",
"def place_order(self, tradetype, market, amount, rate,\n ordertype, timeInEffect, \n conditionType=None, target=None):\n\n if tradetype in ('BUY', 'buy'):\n method = \"tradebuy\"\n elif tradetype in ('SELL', 'sell'):\n method = \"tradesell\"\n\n if not conditionType:\n conditionType = \"CONDITION_NONE\"\n if not target:\n target = \"0\"\n options = {\"marketname\": market, \n \"ordertype\": ordertype, \n \"quantity\": str(amount),\n \"rate\": str(rate),\n \"timeineffect\": str(timeInEffect),\n \"conditiontype\": conditionType,\n \"target\": target}\n\n return self.__call__('market', method, options)",
"def create_order(self, asset, amount, is_buy, style):\n exchange_symbol = self.get_symbol(asset)\n if isinstance(style, ExchangeLimitOrder) \\\n or isinstance(style, ExchangeStopLimitOrder):\n price = style.get_limit_price(is_buy)\n order_type = 'limit'\n\n elif isinstance(style, ExchangeStopOrder):\n price = style.get_stop_price(is_buy)\n order_type = 'stop'\n\n else:\n raise InvalidOrderStyle(exchange=self.name,\n style=style.__class__.__name__)\n\n req = dict(\n symbol=exchange_symbol,\n amount=str(float(abs(amount))),\n price=\"{:.20f}\".format(float(price)),\n side='buy' if is_buy else 'sell',\n type='exchange ' + order_type, # TODO: support margin trades\n exchange=self.name,\n is_hidden=False,\n is_postonly=False,\n use_all_available=0,\n ocoorder=False,\n buy_price_oco=0,\n sell_price_oco=0\n )\n\n date = pd.Timestamp.utcnow()\n try:\n self.ask_request()\n response = self._request('order/new', req)\n order_status = response.json()\n except Exception as e:\n raise ExchangeRequestError(error=e)\n\n if 'message' in order_status:\n raise ExchangeRequestError(\n error='unable to create Bitfinex order {}'.format(\n order_status['message'])\n )\n\n order_id = str(order_status['id'])\n order = Order(\n dt=date,\n asset=asset,\n amount=amount,\n stop=style.get_stop_price(is_buy),\n limit=style.get_limit_price(is_buy),\n id=order_id\n )\n\n return order",
"def order(self, id, long, qty, limit=0, stop=0, post_only=False, reduce_only=False, trailing_stop=0, activationPrice=0, when=True):\n self.__init_client()\n\n # if self.get_margin()['excessMargin'] <= 0 or qty <= 0:\n # return\n\n if not when:\n return\n\n side = \"BUY\" if long else \"SELL\"\n ord_qty = qty\n\n order = self.get_open_order(id)\n ord_id = id + ord_suffix() #if order is None else order[\"clientOrderId\"]\n\n if order is None:\n self.__new_order(ord_id, side, ord_qty, limit, stop, post_only, reduce_only, trailing_stop, activationPrice)\n else:\n self.__new_order(ord_id, side, ord_qty, limit, stop, post_only, reduce_only, trailing_stop, activationPrice)\n #self.__amend_order(ord_id, side, ord_qty, limit, stop, post_only)\n return",
"def create_order(self, symbol: str, type: OrderType, side: OrderSide, amount, price=None, params={}):\n self.load_markets()\n method = 'privatePostOrderAddOrder'\n market = self.market(symbol)\n request = {\n 'Amount': amount,\n 'Pair': market['id'],\n }\n if type == 'market':\n method += 'MarketPrice' + self.capitalize(side)\n else:\n request['Price'] = price\n amountString = self.number_to_string(amount)\n priceString = self.number_to_string(price)\n request['Total'] = self.parse_number(Precise.string_mul(amountString, priceString))\n request['IsBid'] = (side == 'buy')\n response = getattr(self, method)(self.extend(request, params))\n return self.parse_order(response, market)",
"def create_order(self, symbol, tradeType, price, amount):\r\n param = {\r\n 'symbol': self.__transfer_symbol(symbol),\r\n 'tradeType': tradeType, #BUY/SELL\r\n 'price': price,\r\n 'amount': amount,\r\n 'appid': self.apiKey,\r\n 'nonce': int(time.time() * 1000),\r\n 'timestamp': int(time.time())\r\n }\r\n return self.__sign_POST('/api/v1/order/create', param, self.timeout)",
"def new_order(self, symbol: Symbol, side: OrderSide, order_type: OrderType, quantity: str,\n price: Optional[str] = None,\n receive_window: Optional[int] = None):\n api_params = {\n 'symbol': symbol.value,\n 'side': side.value,\n 'type': order_type.value,\n 'quantity': quantity,\n 'timestamp': get_current_time_milliseconds()\n }\n\n if price is not None:\n api_params['price'] = price\n\n if receive_window is not None:\n api_params['receiveWindow'] = receive_window\n\n return self.request.post(path='/order', json_data=api_params)",
"def __init__(self, symbol, order_type, quantity, direction):\n self.type = 'ORDER'\n self.symbol = symbol\n self.order_type = order_type\n self.quantity = quantity\n self.direction = direction",
"def create_order(self, rate, amount, order_type, pair):\n nonce = nounce()\n payload = {'rate': rate,\n 'amount': amount,\n 'order_type': order_type,\n 'pair': pair\n }\n url = 'https://coincheck.com/api/exchange/orders'\n body = 'rate={rate}&amount={amount}&order_type={order_type}&pair={pair}'.format(**payload)\n message = nonce + url + body\n signature = hmac.new(self.secret_key.encode('utf-8'), message.encode('utf-8'), hashlib.sha256).hexdigest()\n headers = {\n 'ACCESS-KEY': self.access_key,\n 'ACCESS-NONCE': nonce,\n 'ACCESS-SIGNATURE': signature\n }\n r = requests.post(url, headers=headers, data=body, timeout=self.timeout)\n return json.loads(r.text)",
"def _new_order_parameters(self, create_order_action):\n parameters = {}\n parameters['client_order_id'] = str(id(create_order_action))\n parameters['amount'] = str(create_order_action.amount)\n parameters['symbol'] = \"btcusd\"\n parameters['side'] = 'buy' if create_order_action.side == \\\n exchanges.Side.BID else 'sell'\n # The only supported type is a limit order.\n parameters['type'] = 'exchange limit'\n # A market order needs to be carried out as a limit order.\n if create_order_action.type == exchanges.Order.Type.MARKET:\n parameters['options'] = [\"immediate-or-cancel\"]\n # TODO: there is an opportunity to provide extra safety.\n temp_max_price = \"1000000\" # $1 million\n temp_min_price = \"0\"\n if create_order_action.side == exchanges.Side.BID:\n parameters['price'] = temp_max_price\n else:\n parameters['price'] = temp_min_price\n else:\n parameters['price'] = str(create_order_action.price)\n return parameters",
"def create_order(self, walletId, side, currency, amount, price, instrument):\n return",
"async def _create_order(self,\n trade_type: TradeType,\n order_id: str,\n trading_pair: str,\n amount: Decimal,\n price: Decimal):\n\n amount = self.quantize_order_amount(trading_pair, amount)\n price = self.quantize_order_price(trading_pair, price)\n base, quote = trading_pair.split(\"-\")\n api_params = {\"base\": base,\n \"quote\": quote,\n \"trade_type\": \"buy\" if trade_type is TradeType.BUY else \"sell\",\n \"amount\": str(amount),\n \"secret\": self._terra_wallet_seeds,\n # \"maxPrice\": str(price),\n }\n self.start_tracking_order(order_id, None, trading_pair, trade_type, price, amount)\n try:\n order_result = await self._api_request(\"post\", \"terra/trade\", api_params)\n hash = order_result[\"txHash\"]\n txSuccess = order_result[\"txSuccess\"]\n tracked_order = self._in_flight_orders.get(order_id)\n if tracked_order is not None:\n self.logger().info(f\"Created {trade_type.name} order {order_id} txHash: {hash} \"\n f\"for {amount} {trading_pair}.\")\n tracked_order.update_exchange_order_id(hash)\n if txSuccess:\n tracked_order.fee_asset = order_result[\"fee\"][\"token\"]\n tracked_order.executed_amount_base = amount\n tracked_order.executed_amount_quote = amount * price\n tracked_order.fee_paid = order_result[\"fee\"][\"amount\"]\n event_tag = MarketEvent.BuyOrderCreated if trade_type is TradeType.BUY else MarketEvent.SellOrderCreated\n event_class = BuyOrderCreatedEvent if trade_type is TradeType.BUY else SellOrderCreatedEvent\n self.trigger_event(event_tag, event_class(self.current_timestamp, OrderType.LIMIT, trading_pair, amount,\n price, order_id, hash))\n self.trigger_event(MarketEvent.OrderFilled,\n OrderFilledEvent(\n self.current_timestamp,\n tracked_order.client_order_id,\n tracked_order.trading_pair,\n tracked_order.trade_type,\n tracked_order.order_type,\n price,\n amount,\n TradeFee(0.0, [(tracked_order.fee_asset, tracked_order.fee_paid)]),\n hash\n ))\n\n event_tag = MarketEvent.BuyOrderCompleted if tracked_order.trade_type is TradeType.BUY \\\n else MarketEvent.SellOrderCompleted\n event_class = BuyOrderCompletedEvent if tracked_order.trade_type is TradeType.BUY \\\n else SellOrderCompletedEvent\n self.trigger_event(event_tag,\n event_class(self.current_timestamp,\n tracked_order.client_order_id,\n tracked_order.base_asset,\n tracked_order.quote_asset,\n tracked_order.fee_asset,\n tracked_order.executed_amount_base,\n tracked_order.executed_amount_quote,\n tracked_order.fee_paid,\n tracked_order.order_type))\n self.stop_tracking_order(tracked_order.client_order_id)\n else:\n self.trigger_event(MarketEvent.OrderFailure,\n MarketOrderFailureEvent(self.current_timestamp, order_id, OrderType.LIMIT))\n except asyncio.CancelledError:\n raise\n except Exception as e:\n self.stop_tracking_order(order_id)\n self.logger().network(\n f\"Error submitting {trade_type.name} order to Terra for \"\n f\"{amount} {trading_pair} \"\n f\"{price}.\",\n exc_info=True,\n app_warning_msg=str(e)\n )\n self.trigger_event(MarketEvent.OrderFailure,\n MarketOrderFailureEvent(self.current_timestamp, order_id, OrderType.LIMIT))",
"def new_order(self, signal, type):\n # self.client = bitmex.bitmex(test=True, api_key=self.strategy.api_key.key, api_secret=self.strategy.api_key.secret)\n if not self.strategy.live_trade:\n self.logger.info('Notice: Trading on testnet.')\n if self.scrape_only:\n return\n self.update_position()\n self.logger.info('New Order {} {}'.format(signal, type))\n self.logger.info(\"Current Position: {}\".format(self.p))\n self.logger.info(\"Canceling all orders\")\n self.client.Order.Order_cancelAll(symbol=self.strategy.symbol).result()\n self.trigers = []\n\n if type == 'entry' and signal == 'LONG' and self.p == 0:\n\n # self.client.Order.Order_cancelAll(symbol = self.strategy.symbol).result()\n oq = self.strategy.contract_size\n ot = self.strategy.order_type\n try:\n self.logger.info(\"Placing LONG entry Order of {}\".format(oq))\n order = self.execute_order(oq, ot, text=\"{} {}_{}\".format(self.strategy.id, signal, type))\n if self.strategy.stop_loss:\n triger = {\n \"side\": -1,\n \"price\": order['price'] - self.strategy.stop_loss,\n \"type\": 'sl'\n }\n self.trigers.append(triger)\n self.logger.info('Stop loss trigger placed at {}'.format(triger['price']))\n if self.strategy.take_profit:\n triger = {\n 'side': -1,\n \"price\": order['price'] + self.strategy.take_profit,\n \"type\": 'tp'\n }\n self.trigers.append(triger)\n self.logger.info('Take Profit trigger placed at {}'.format(triger['price']))\n if self.strategy.trailing_stop:\n triger = {\n 'side': -1,\n \"price\": order['price'] + self.strategy.trailing_stop,\n 'type': 'ts'\n }\n self.trigers.append(triger)\n self.logger.info('Trailing Stop trigger placed at {}'.format(triger['price']))\n except Exception as ex:\n self.logger.error(\"{}: Couldn't place order {}, {} \".format(self.strategy.id, signal, type))\n self.logger.error(str(ex))\n self.logger.error(repr(ex))\n\n if type == 'entry' and signal == 'SHORT' and self.p == 0:\n # self.client.Order.Order_cancelAll(symbol = self.strategy.symbol).result()\n oq = self.strategy.contract_size * -1\n ot = self.strategy.order_type\n try:\n self.logger.info(\"Placing Short entry Order of {}\".format(oq))\n order = self.execute_order(oq, ot, text=\"{} {}_{}\".format(self.strategy.id, signal, type))\n if self.strategy.stop_loss:\n triger = {\n \"side\": 1,\n \"price\": order['price'] + self.strategy.stop_loss,\n \"type\": 'sl'\n }\n self.trigers.append(triger)\n self.logger.info('Stop loss trigger placed at {}'.format(triger['price']))\n if self.strategy.take_profit:\n triger = {\n 'side': 1,\n \"price\": order['price'] - self.strategy.take_profit,\n \"type\": 'tp'\n }\n self.trigers.append(triger)\n self.logger.info('Take profit trigger placed at {}'.format(triger['price']))\n if self.strategy.trailing_stop:\n triger = {\n 'side': 1,\n \"price\": order['price'] - self.strategy.trailing_stop,\n 'type': 'ts'\n }\n self.trigers.append(triger)\n self.logger.info('Trailing Stop trigger placed at {}'.format(triger['price']))\n except Exception as ex:\n self.logger.error(\"{}: Couldn't place order {}, {} \".format(self.strategy.id, signal, type))\n self.logger.error(str(ex))\n self.logger.error(repr(ex))\n\n if type == 'entry' and signal == 'LONG' and self.p < 0:\n\n # self.client.Order.Order_cancelAll(symbol = self.strategy.symbol).result()\n p = self.p * -1\n oq = p + self.strategy.contract_size\n ot = self.strategy.order_type\n try:\n self.logger.info(\"Placing LONG entry and Short Exit Order of {}\".format(oq))\n order = self.execute_order(oq, ot, text=\"{} {}_{}-{}_{}\".format(self.strategy.id, signal, type, \"SHORT\",\n \"exit\"))\n if self.strategy.stop_loss:\n triger = {\n \"side\": -1,\n \"price\": order['price'] - self.strategy.stop_loss,\n \"type\": 'sl'\n }\n self.trigers.append(triger)\n self.logger.info('Stop loss triger placed at {}'.format(triger['price']))\n if self.strategy.take_profit:\n triger = {\n 'side': -1,\n \"price\": order['price'] + self.strategy.take_profit,\n \"type\": 'tp'\n }\n self.trigers.append(triger)\n self.logger.info('Take Profit triger placed at {}'.format(triger['price']))\n if self.strategy.trailing_stop:\n triger = {\n 'side': -1,\n \"price\": order['price'] + self.strategy.trailing_stop,\n 'type': 'ts'\n }\n self.trigers.append(triger)\n self.logger.info('Trailing Stop trigger placed at {}'.format(triger['price']))\n except Exception as ex:\n self.logger.error(\"{}: Couldn't place order {}, {} \".format(self.strategy.id, signal, type))\n self.logger.error(str(ex))\n self.logger.error(repr(ex))\n\n if type == 'entry' and signal == 'SHORT' and self.p > 0:\n # self.client.Order.Order_cancelAll(symbol = self.strategy.symbol).result()\n oq = -(self.p + self.strategy.contract_size)\n ot = self.strategy.order_type\n\n try:\n self.logger.info(\"Placing Short entry and Long Exit Order of {}\".format(oq))\n order = self.execute_order(oq, ot,\n text=\"{} {}_{}-{}_{}\".format(self.strategy.id, signal, type, \"LONG\", \"exit\"))\n if self.strategy.stop_loss:\n triger = {\n \"side\": 1,\n \"price\": order['price'] + self.strategy.stop_loss,\n \"type\": 'sl'\n }\n self.trigers.append(triger)\n self.logger.info('Stop loss triger placed at {}'.format(triger['price']))\n if self.strategy.take_profit:\n triger = {\n 'side': 1,\n \"price\": order['price'] - self.strategy.take_profit,\n \"type\": 'tp'\n }\n self.trigers.append(triger)\n self.logger.info('Take Profit triger placed at {}'.format(triger['price']))\n if self.strategy.trailing_stop:\n triger = {\n 'side': 1,\n \"price\": order['price'] - self.strategy.trailing_stop,\n 'type': 'ts'\n }\n self.trigers.append(triger)\n self.logger.info('Trailing Stop trigger placed at {}'.format(triger['price']))\n except Exception as ex:\n self.logger.error(\"{}: Couldn't place order {}, {} \".format(self.strategy.id, signal, type))\n self.logger.error(str(ex))\n self.logger.error(repr(ex))\n\n if type == 'exit' and signal == 'LONG' and self.p > 0:\n # self.client.Order.Order_cancelAll(symbol = self.strategy.symbol).result()\n oq = -(self.p)\n try:\n self.logger.info(\"Placing Long Exit Order of {}\".format(oq))\n self.execute_order(oq, self.strategy.order_type, text=\"{} {}_{}\".format(self.strategy.id, signal, type))\n except Exception as ex:\n self.logger.error(\"{}: Couldn't place order {}, {} \".format(self.strategy.id, signal, type))\n self.logger.error(str(ex))\n self.logger.error(repr(ex))\n\n if type == 'exit' and signal == 'SHORT' and self.p < 0:\n # self.client.Order.Order_cancelAll(symbol = self.strategy.symbol).result()\n oq = -(self.p)\n try:\n self.logger.info(\"Placing Shot Exit Order of {}\".format(oq))\n self.execute_order(oq, self.strategy.order_type, text=\"{} {}_{}\".format(self.strategy.id, signal, type))\n except Exception as ex:\n self.logger.error(\"{}: Couldn't place order {}, {} \".format(self.strategy.id, signal, type))\n self.logger.error(str(ex))\n self.logger.error(repr(ex))",
"def place_order(self, **kwargs):\r\n create_options = self._generate_create_dict(**kwargs)\r\n return self.client['Product_Order'].placeOrder(create_options)",
"async def new_limit_order(side):\n symbol = App.config[\"symbol\"]\n now_ts = now_timestamp()\n\n #\n # Find limit price (from signal, last kline and adjustment parameters)\n #\n last_kline = App.analyzer.get_last_kline(symbol)\n last_close_price = to_decimal(last_kline[4]) # Close price of kline has index 4 in the list\n if not last_close_price:\n log.error(f\"Cannot determine last close price in order to create a market buy order.\")\n return None\n\n price_adjustment = App.config[\"trader\"][\"limit_price_adjustment\"]\n if side == SIDE_BUY:\n price = last_close_price * Decimal(1.0 - price_adjustment) # Adjust price slightly lower\n elif side == SIDE_SELL:\n price = last_close_price * Decimal(1.0 + price_adjustment) # Adjust price slightly higher\n\n price_str = round_str(price, 2)\n price = Decimal(price_str) # We will use the adjusted price for computing quantity\n\n #\n # Find quantity\n #\n if side == SIDE_BUY:\n # Find how much quantity we can buy for all available USD using the computed price\n quantity = App.quote_quantity # USD\n percentage_used_for_trade = App.config[\"trader\"][\"percentage_used_for_trade\"]\n quantity = (quantity * percentage_used_for_trade) / Decimal(100.0) # Available for trade\n quantity = quantity / price # BTC to buy\n # Alternatively, we can pass quoteOrderQty in USDT (how much I want to spend)\n elif side == SIDE_SELL:\n # All available BTCs\n quantity = App.base_quantity # BTC\n\n quantity_str = round_down_str(quantity, 6)\n\n #\n # Execute order\n #\n order_spec = dict(\n symbol=symbol,\n side=side,\n type=ORDER_TYPE_LIMIT, # Alternatively, ORDER_TYPE_LIMIT_MAKER\n timeInForce=TIME_IN_FORCE_GTC,\n quantity=quantity_str,\n price=price_str,\n )\n\n if App.config[\"trader\"][\"no_trades_only_data_processing\"]:\n print(f\"NOT executed order spec: {order_spec}\")\n else:\n order = execute_order(order_spec)\n\n #\n # Store/log order object in our records (only after confirmation of success)\n #\n App.order = order\n App.order_time = now_ts\n\n return order",
"def create_order(self, oid, price, context=None, expires=None):\n expires = absdatetime(expires, default=self.EXP_ORDER)\n orders = self.request(\n 'post',\n safeformat('objects/{:int}/orders/', oid),\n json.dumps({\n 'price': price,\n 'order_expires': expires.isoformat(),\n 'context': context\n }))\n orderid = self._extract_id_from_batch_response(orders)\n return {\n 'id': orderid,\n 'expires': expires,\n 'context': context,\n 'object': {\n 'id': oid,\n 'price': price\n },\n 'urls': {\n 'redirect': urljoin(self.usr_frontend, '/orders/%d/add/' % orderid),\n 'popup': urljoin(self.usr_frontend, '/popup/orders/%d/add/' % orderid)\n }\n }",
"async def create_order(self, symbol: str, side: str, price: str, amount: str, urgency: int = 0) -> dict:\n base, quote = symbol.lower().split('_')\n spendable = quote if side == 'buy' else base\n params = {\n 'pair': symbol,\n 'type': side,\n 'price': price,\n spendable: amount\n }\n return await self._safe_call(urgency, self._request_private, 'trade', params)",
"def map_to_order(self, raw_order: HitbtcRawOrderModel) -> HitbtcOrderModel:\n\n id_ = raw_order[\"id\"]\n client_order_id = raw_order[\"clientOrderId\"]\n symbol = raw_order[\"symbol\"]\n side = raw_order[\"side\"]\n status = raw_order[\"status\"]\n type_ = raw_order[\"type\"]\n time_in_force = raw_order[\"timeInForce\"]\n quantity = Decimal(raw_order[\"quantity\"])\n price = Decimal(raw_order[\"price\"])\n cum_quantity = Decimal(raw_order[\"cumQuantity\"])\n created_at = raw_order[\"createdAt\"]\n updated_at = raw_order[\"updatedAt\"]\n post_only = raw_order[\"postOnly\"]\n raw_avg_price = raw_order.get(\"avgPrice\")\n avg_price = Decimal(\n raw_avg_price) if raw_avg_price is not None else raw_avg_price\n raw_stop_price = raw_order.get(\"stopPrice\")\n stop_price = Decimal(\n raw_stop_price) if raw_stop_price is not None else raw_stop_price\n expire_time = raw_order.get(\"expireTime\")\n raw_trades_report = raw_order.get(\"tradesReport\")\n trades_report = (self.map_to_symbol_trades(\n raw_trades_report) if raw_trades_report is not None else raw_trades_report)\n\n res = HitbtcOrderModel(\n id=id_,\n client_order_id=client_order_id,\n symbol=symbol,\n side=side,\n status=status,\n type=type_,\n time_in_force=time_in_force,\n quantity=quantity,\n price=price,\n cum_quantity=cum_quantity,\n created_at=created_at,\n updated_at=updated_at,\n post_only=post_only,\n avg_price=avg_price,\n stop_price=stop_price,\n expire_time=expire_time,\n trades_report=trades_report)\n\n return res",
"def create_aws_order(lengow_order, order_id):\n if (\n lengow_order[\"marketplace\"] in not_allowed_marketplaces or\n lengow_order[\"order_status\"][\"lengow\"] != \"processing\"):\n return\n\n # Data that will be sent to AWS CreateFulfillmentOrder method\n aws_order_data = {}\n\n aws_order_data[\"SellerFulfillmentOrderId\"] = order_id\n aws_order_data[\"DisplayableOrderId\"] = order_id\n\n # The date of the fulfillment order. Displays as the order date in customer-facing materials\n # such as the outbound shipment packing slip.\n aws_order_data[\"DisplayableOrderDateTime\"] = \"{date}T{time}\".format(\n date=lengow_order[\"order_purchase_date\"], time=lengow_order[\"order_purchase_heure\"])\n\n # Order-specific text that appears in customer-facing materials such as the outbound shipment\n # packing slip.\n aws_order_data[\"DisplayableOrderComment\"] = order_comment\n\n aws_order_data[\"ShippingSpeedCategory\"] = (\n \"Expedited\" if float(lengow_order[\"order_shipping\"]) > 0 else \"Standard\")\n\n # The destination address for the fulfillment order.\n address = lengow_order[\"delivery_address\"]\n country_iso = address[\"delivery_country_iso\"]\n aws_order_data[\"DestinationAddress\"] = {\n \"Name\": \"{firstname} {lastname}\".format(\n firstname=address[\"delivery_firstname\"].title(),\n lastname=address[\"delivery_lastname\"]).title(),\n \"Line1\": address[\"delivery_address\"].title(),\n \"Line2\": address[\"delivery_address_2\"].title(),\n \"Line3\": address[\"delivery_address_complement\"].title(),\n \"City\": address[\"delivery_city\"].title(),\n \"StateOrProvinceCode\": address[\"delivery_zipcode\"],\n \"PostalCode\": address[\"delivery_zipcode\"],\n \"CountryCode\": country_iso,\n \"PhoneNumber\": address[\"delivery_phone_mobile\"]\n }\n # As specified in AWS documentation, don\"t include city if country is JP.\n if country_iso == \"JP\":\n aws_order_data[\"DestinationAddress\"].pop(\"City\")\n\n aws_order_data[\"Items\"] = []\n for product in lengow_order[\"cart\"][\"products\"]:\n item = {\n \"SellerSKU\": product[\"sku\"],\n \"SellerFulfillmentOrderItemId\": product[\"sku\"],\n \"Quantity\": product[\"quantity\"],\n \"PerUnitDeclaredValue\": {\n \"Value\": product[\"price_unit\"], \"CurrencyCode\": lengow_order[\"order_currency\"]\n }\n }\n aws_order_data[\"Items\"].append(item)\n\n # aws_order_data[\"NotificationEmailList\"] = [address[\"delivery_email\"]]\n\n mws_shipments = mws.OutboundShipments(\n access_key=MWS_ACCESS_KEY, secret_key=MWS_SECRET_KEY,\n account_id=MWS_MERCHANT_ID, region=\"FR\")\n\n data = dict(Action=\"CreateFulfillmentOrder\")\n data.update(enumerate_data(aws_order_data))\n return mws_shipments.make_request(data, \"POST\")",
"def modify_order(\n self,\n order_id,\n order_type: str = None,\n duration: str = None,\n limit_price: float = None,\n stop_price: float = None,\n account_id=None,\n ) -> OrderDetails:\n if account_id is None:\n account_id = self.default_account_id\n url = f\"/v1/accounts/{account_id}/orders/{order_id}\"\n params = {\n \"type\": order_type,\n \"duration\": duration,\n \"price\": limit_price,\n \"stop\": stop_price,\n }\n params = {k: v for k, v in params.items() if v is not None}\n data = self.put(url, params)\n res = OrderAPIResponse(**data)\n return res.order",
"def create_order(self, openid, **kwargs):\n data = kwargs\n data['openid'] = openid\n\n self._check_create_order(**data)\n\n order_data = {\n 'appid': self.appid,\n 'mch_id': self.mch_id,\n 'nonce_str': uuid.uuid4().hex,\n 'body': kwargs.get('title'),\n 'out_trade_no': kwargs.get('order_uid'),\n 'total_fee': int(kwargs.get('total') * 100),\n 'notify_url': kwargs.get('notify_url'),\n 'spbill_create_ip': kwargs.get('ip'),\n 'trade_type': kwargs.get('trade_type')\n }\n fields = ('detail', 'time_expire', 'time_start', 'openid',\n 'product_id')\n for field in fields:\n if field in kwargs:\n order_data[field] = kwargs[field]\n\n return self.send_data(self.UNIFIED_ORDER_URL, **order_data)",
"def place_instruction(order_type, selection_id, side, handicap=None, limit_order=None, limit_on_close_order=None,\n market_on_close_order=None, customer_order_ref=None):\n\n args = locals()\n return {\n to_camel_case(k): v for k, v in args.items() if v is not None\n }",
"def genMarketOrder(self, orderDict):\n try:\n # validate market order\n result = self.validateMarketOrder(orderDict)\n if result == 1:\n # pay for order\n result = self.payForMarketOrder(orderDict)\n if result == 1:\n # valid, process\n id = self.getNextID(self.marketOrders)\n d = {'id':id}\n for key, value in orderDict.iteritems():\n d[key] = value\n d['round'] = self.currentRound\n myMarketOrder = order.MarketOrder(d)\n self.marketOrders[id] = myMarketOrder\n return result\n except:\n return 'galaxy->genMarketOrder error'",
"def create_order(order):\n response = requests.post(\n settings.SHOPIFY_ORDERS_URL,\n auth=(settings.SHOPIFY_API_KEY, settings.SHOPIFY_PASSWORD),\n json={\"order\": order},\n )\n if response.status_code != 201:\n raise ShopifyResponseException(\n f\"The Shopify API returned an invalid response:\\n{response.text}\"\n )",
"def create_po(self):\n pricelist_id = 1\n partner_id = self.ref('base.res_partner_1')\n order = self.order_obj.create({\n 'partner_id': partner_id,\n 'location_id': self.ref('stock.stock_location_stock'),\n 'pricelist_id': pricelist_id})\n return order"
]
| [
"0.8001797",
"0.721019",
"0.7111651",
"0.6973866",
"0.6950393",
"0.69217306",
"0.6838977",
"0.68315655",
"0.6824298",
"0.6806495",
"0.67919177",
"0.67190963",
"0.6627814",
"0.6626678",
"0.659759",
"0.653215",
"0.6509903",
"0.64288145",
"0.6380781",
"0.63743025",
"0.6343502",
"0.6337219",
"0.62978",
"0.628784",
"0.6234044",
"0.62216043",
"0.62172407",
"0.62000155",
"0.61974436",
"0.61896336"
]
| 0.75655824 | 1 |
Returns the url to access a particular blogauthor instance. | def get_absolute_url(self):
return reverse('blogs-by-author', args=[str(self.id)]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_absolute_url(self):\n return reverse('author-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('author-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('author-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('author-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n\t\treturn reverse('author-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n\n return reverse('author-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('authors', args=[str(self.id)])",
"def detail_url(author_id):\n return reverse('author-books', args=[author_id])",
"def get_authors_page(author_name):\n return 'http://techcrunch.com/author/' + slugify(author_name)",
"def scopus_author_link(self):\n return self._json['coredata'].get('link', [])[1].get('@href')",
"def post_url(self, post):\n post_id = post.get('id')\n if not post_id:\n return None\n author_id = post.get('from', {}).get('id')\n if author_id:\n return 'https://facebook.com/%s/posts/%s' % (author_id, post_id)\n else:\n return self.object_url(post_id)",
"def get_absolute_url(self):\n return reverse('blog', args=[str(self.id)])",
"def coauthor_link(self):\n return self._json['coredata'].get('link', [])[3].get('@href')",
"def get_author(self):\n return self.author",
"def get_author(self):\n return self.author",
"def retrieve_author_url(name):\n response = requests.get('https://api.github.com/search/users', {'q': name})\n data = json.loads(response.text)\n if data.get('total_count', 0) > 0:\n return data['items'][0]['html_url']\n else:\n print \"--- ERROR: no author URL retrieved for '{0}' ---\".format(\n response.url)\n return name",
"def author(self) -> str:\n return pulumi.get(self, \"author\")",
"def get_absolute_url(self):\n return reverse('blog-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n return urls.reverse('blog:article', args=[self.category.slug, self.slug])",
"def author(self) -> str:\n return self._author",
"def author(self) -> str:\n return self._author",
"def author(self):\r\n return self.user",
"def getAuthorUri(self, authorName: str):\n matches = self.suggestAuthors(authorName)\n if matches is not None and isinstance(matches, list) and len(matches) > 0 and \"uri\" in matches[0]:\n return matches[0][\"uri\"]\n return None",
"def get_absolute_url(self):\n return reverse('blogger-detail', args=[str(self.id)])",
"def author(self):\n return self._data.get('author', None)",
"def author(self):\n return self._author",
"def author(self):\n return self._author",
"def author(self):\n return self._author",
"def url(self) -> str:\n return getattr(\n self.auth_accounts[-1], \"url\" # pylint: disable=unsubscriptable-object\n )",
"def get_absolute_url(self):\n return reverse('blogpost-detail', args=[str(self.id)])"
]
| [
"0.72703695",
"0.72703695",
"0.72703695",
"0.72703695",
"0.7250402",
"0.7206668",
"0.70338786",
"0.6936608",
"0.6820081",
"0.67791575",
"0.647343",
"0.6456801",
"0.6372989",
"0.6371024",
"0.6371024",
"0.6346951",
"0.6312395",
"0.62948346",
"0.62751096",
"0.62518233",
"0.62518233",
"0.6206816",
"0.6193303",
"0.6171172",
"0.6143403",
"0.61250705",
"0.61250705",
"0.61250705",
"0.6086873",
"0.602464"
]
| 0.8021804 | 0 |
Sample frames every X seconds, resize the frame and add it to an numpy array | def sample_frames(frame_dir, fps, visualize_sample_rate):
visualize_every_x_frames = visualize_sample_rate * int(fps)
sampled_frames = np.empty((0, 3, IMG_DIM, IMG_DIM), dtype=np.float32) # B, C, H, W
i = 0
for file in sorted(os.listdir(frame_dir)):
if i % visualize_every_x_frames == 0:
img = skimage.img_as_float(skimage.io.imread(os.path.join(frame_dir, file))).astype(np.float32)
img = skimage.transform.resize(img, (IMG_DIM, IMG_DIM)) # H, W, C
img = img.swapaxes(1, 2).swapaxes(0, 1) # C, H, W
sampled_frames = np.append(sampled_frames, np.array([img]), axis=0)
i += 1
logger.debug("total number of frames: {}".format(i))
return sampled_frames | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def split_into_frames(filename_raw, thr_var_per_event=5e-4, downsampling_factor=2, disable_display=False,\n filename_output_video=None):\n\n assert downsampling_factor == int(downsampling_factor), \"Error: downsampling_factor must be an integer\"\n assert downsampling_factor >= 0, \"Error: downsampling_factor must be >= 0\"\n\n mv_adaptive_rate_iterator = AdaptiveRateEventsIterator(input_path=filename_raw,\n thr_var_per_event=thr_var_per_event,\n downsampling_factor=downsampling_factor)\n\n height, width = mv_adaptive_rate_iterator.get_size()\n\n if filename_output_video == None:\n video_process = None\n else:\n assert not os.path.exists(filename_output_video)\n video_process = FFmpegWriter(filename_output_video)\n\n if video_process or not disable_display:\n img_bgr = np.zeros((height, width, 3), dtype=np.uint8)\n\n cv2.namedWindow(\"img\", cv2.WINDOW_NORMAL)\n\n for events in mv_adaptive_rate_iterator:\n assert events.size > 0\n start_ts = events[0][\"t\"]\n end_ts = events[-1][\"t\"]\n print(\"frame: {} -> {} delta_t: {} fps: {} nb_ev: {}\".format(start_ts, end_ts,\n end_ts - start_ts,\n 1e6 / (end_ts - start_ts),\n events.size))\n if video_process or not disable_display:\n img = events_to_diff_image(events, sensor_size=(height, width))\n img_bgr[...] = 0\n img_bgr[img < 0, 0] = 255\n img_bgr[img > 0, 1] = 255\n\n chunk_start_ts = events[0][\"t\"]\n chunk_end_ts = events[-1][\"t\"]\n delta_t_frame = chunk_end_ts - chunk_start_ts + 1\n frame_txt = \"ts: {} -> {} delta_t: {} fps: {} (nb_ev): {}\".format(chunk_start_ts, chunk_end_ts,\n delta_t_frame,\n int(1.e6/delta_t_frame),\n events.size)\n img_bgr[20:45, ...] = 0\n cv2.putText(img_bgr,\n frame_txt,\n (int(0.05 * width), 40),\n cv2.FONT_HERSHEY_PLAIN, 1.0, (200, 200, 100))\n\n if video_process:\n video_process.writeFrame(img_bgr.astype(np.uint8)[..., ::-1])\n if not disable_display:\n cv2.imshow(\"img\", img_bgr)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n if video_process:\n video_process.close()\n if not disable_display:\n cv2.destroyAllWindows()",
"def animate_with_numpy_frame_sequence(self, numpy_frame_sequence, frames_per_second=15):\n\n sleep_time = 1/frames_per_second\n for animation_frame in numpy_frame_sequence:\n tic = time.time()\n self.set_image_from_numpy_array(animation_frame)\n self.update()\n toc = time.time()\n frame_generation_time = toc-tic\n if frame_generation_time < sleep_time:\n new_sleep_time = sleep_time - frame_generation_time\n time.sleep(new_sleep_time)\n else:\n pass",
"def micsample(listentime):\n frames, sampling_rate = record_audio(listentime)\n samples = np.hstack([np.frombuffer(i, np.int16) for i in frames])\n times = np.arange(samples.size) / sampling_rate\n return samples, times",
"def _subsample_frames(self, video_clip_frames):\n subsampled_frames = []\n current_ix = 0\n step_size = len(video_clip_frames) / float(config.RGB_N_FRAMES)\n for _ in range(config.RGB_N_FRAMES):\n frame = video_clip_frames[int(current_ix)]\n subsampled_frames.append(frame)\n current_ix += step_size\n\n return np.array(subsampled_frames)",
"def rec_one_shot(self, sec, file_name=None):\n self.__open_noncallback_stream()\n frames = []\n for i in range(int(self.RATE / self.CHUNK * sec)):\n data = self.stream.read(self.CHUNK)\n data = np.fromstring(data, dtype=np.int16)\n frames.append(data)\n self.stream.stop_stream()\n if file_name is not None:\n with wave.open(file_name, 'wb') as wav_file:\n wav_file.setnchannels(self.CHANNELS)\n wav_file.setsampwidth(self.recorder.get_sample_size(self.FORMAT))\n wav_file.setframerate(self.RATE)\n wav_file.writeframes(b''.join(frames))\n frame = np.concatenate(frames, 0)\n self.stop_streaming()\n return frame",
"def make_frame(t):\r\n while world['t'] < hours_per_second*t:\r\n update(world)\r\n return world_to_npimage(world)",
"def get_sample():\n global counter\n counter = counter + 1 \n \n # capture frames from the camera;\n \n frame=picamera.array.PiRGBArray(camera)\n camera.capture(frame, 'bgr', use_video_port=True)\n\t# grab the raw NumPy array representing the image, then initialize the timestamp\n\t# and occupied/unoccupied text\n image = frame.array\n preprocessed_image_buffer=preprocess(image)\n\t# show the frame\n cv2.imshow(\"Frame0\", image)\n key = cv2.waitKey(1) & 0xFF\n return preprocessed_image_buffer",
"def make_frames(signal, sampling_rate, frame_size=0.025, frame_overlap=0.015):\n frame_length = int(round(frame_size * sampling_rate)) #seconds to samples\n frame_step = int(round((frame_size - frame_overlap) * sampling_rate)) #seconds to samples\n #signal_length = len(emphasized_signal)\n\n nf = abs(len(signal) - frame_length)/float(frame_step)\n num_frames = 0\n if int(nf) < 1:\n num_frames = 1 # Make sure that we have at least 1 frame\n else:\n num_frames = int(np.ceil(nf))\n\n padding = np.zeros((num_frames * frame_step) + frame_length - len(signal)) #padding to be added at the end of the signal\n# padded_signal = np.concatenate((signal, padding), axis = None)\n padded_signal = np.zeros((len(padding)+len(signal)))\n np.put(padded_signal, list(range(len(signal))), signal) #put original signal in the front\n np.put(padded_signal, list(range(len(signal), len(padded_signal))), padding) #put padding at the back after signal\n\n indices = np.tile(np.array(range(0, frame_length)), (num_frames, 1)) + np.tile(np.array(range(0, num_frames * frame_step, frame_step)), (frame_length, 1)).T\n frames = padded_signal[indices.astype(np.int32, copy=False)]\n\n #Windowing\n frames = frames * hamming(frame_length)\n return frames",
"def sample_generator(self, data, index):\r\n out = []\r\n frames = data[\"video\"]\r\n for speed_idx, speed in enumerate(self.speed_set):\r\n # generate all the samples according to the speed set\r\n num_input_frames, h, w, c = frames.shape\r\n frame_idx = random.randint(0, num_input_frames-1)\r\n selected_frame = frames[frame_idx] # H, W, C\r\n\r\n # standardize the frame size\r\n if self.cfg.PRETRAIN.FRAME_SIZE_STANDARDIZE_ENABLE: \r\n selected_frame = self.frame_size_standardize(selected_frame)\r\n \r\n # generate the sample index \r\n h, w, c = selected_frame.shape\r\n speed_x, speed_y = speed\r\n start_x, end_x = self.get_crop_params(speed_x/(self.num_speeds//2), w)\r\n start_y, end_y = self.get_crop_params(speed_y/(self.num_speeds//2), h)\r\n intermediate_x = (torch.linspace(start_x, end_x, self.num_frames).long()).clamp_(0, w-self.crop_size)\r\n intermediate_y = (torch.linspace(start_y, end_y, self.num_frames).long()).clamp_(0, h-self.crop_size)\r\n \r\n frames_out = torch.empty(\r\n self.num_frames, self.crop_size, self.crop_size, c, device=frames.device, dtype=frames.dtype\r\n )\r\n\r\n for t in range(self.num_frames):\r\n frames_out[t] = selected_frame[\r\n intermediate_y[t]:intermediate_y[t]+self.crop_size, intermediate_x[t]:intermediate_x[t]+self.crop_size, :\r\n ]\r\n\r\n # performs augmentation on the generated image sequence\r\n if self.transform is not None:\r\n frames_out = self.transform(frames_out)\r\n \r\n # applies static mask\r\n if self.static_mask_enable:\r\n frames_out = self.static_mask(frames_out)\r\n out.append(frames_out)\r\n out = torch.stack(out)\r\n data[\"video\"] = out\r\n return data",
"def data_play(Y, visualizer, frame_rate=30):\r\n \r\n\r\n for y in Y:\r\n visualizer.modify(y[None, :])\r\n time.sleep(1./float(frame_rate))",
"def fill_buffer(self):\n num_of_smp = 0\n while num_of_smp < self.buf_size:\n c, t = self.inlet.pull_chunk(timeout=0.0)\n new_c = []\n new_t = []\n while c:\n new_c += c\n new_t += t\n c, t = self.inlet.pull_chunk(timeout=0.0)\n\n # add samples to buffer\n if any(new_c):\n # add samples\n num_of_smp += len(new_c)\n data_v = [item for sublist in new_c for item in sublist]\n self.gbuffer = np.roll(self.gbuffer, -len(data_v))\n self.gbuffer[-len(data_v):] = data_v\n # add timestamps\n if new_t:\n self.gtimes = np.roll(self.gtimes, -len(new_t))\n self.gtimes[-len(new_t):] = new_t",
"def add_sample(self, time, value):\n\t\tif self.buf_full:\n\t\t\tself.buf.pop(0)\n\t\t\n\t\tself.buf.append((time, value))",
"def query_frame(self):\n x, y = N.ogrid[0:self._resolution[1], 0:self._resolution[0]]\n x0, y0 = int(self._resolution[1] / 2), int(self._resolution[0] / 2)\n r = N.hypot(x - x0, y - y0)\n w0 = 75.0\n self.frame = N.array(N.exp(-r ** 2 / w0 ** 2) * 60000, dtype=N.uint16)\n self.frame += N.random.uniform(low=0, high=5535, size=self._resolution[::-1])",
"def replay_sequence(images_path):\n sequence = [(parse_time(f), parse_line(f)) for f in sorted(glob.glob(os.path.join(images_path, '????-*.jpg'))) if 'edges' in f]\n start_time = datetime.now()\n for frame_time, line in sequence:\n frame_time_str = frame_time.strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]\n data_sample = (frame_time_str, line)\n frame_draw_time = start_time + (frame_time - sequence[0][0])\n sleep_until(frame_draw_time)\n print(repr(data_sample))\n yield frame_time, line",
"def test_frame_times_framesync():\n my_file_struct = FileStruct(os.path.join(\"fixtures\", \"chirp.mp3\"))\n pcp = PCP(my_file_struct, FeatureTypes.framesync, sr=11025)\n times = pcp.frame_times\n assert(isinstance(times, np.ndarray))",
"def frame_generator(frame_duration_ms, audio, sample_rate):\n n = int(sample_rate * (frame_duration_ms / 1000.0) * 2)\n offset = 0\n timestamp = 0.0\n duration = (float(n) / sample_rate) / 2.0\n while offset + n < len(audio):\n yield Frame(audio[offset:offset + n], timestamp, duration)\n timestamp += duration\n offset += n",
"def _init_sample(self):\n self.timestamps = np.zeros(5)\n self.data = np.zeros((5, 12))",
"def run(self):\n\n last_mean = 0\n st = time.time()\n sframe = 0\n while True:\n if time.time()-1 > st:\n st = time.time()\n #print 'fps', self.frame_counter - sframe\n sframe = self.frame_counter\n\n self.frame_counter += 1\n frame = next(self.frame_generator)\n\n xMax = frame.shape[1]\n yMax = frame.shape[0]\n\n capture_area = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n mean, stddev = cv2.meanStdDev(capture_area)\n mean = mean[0][0]\n stddev = stddev[0][0]\n\n if abs(mean-last_mean) > ACTIVATE_MEAN_DIFF:\n self.wakeup()\n\n last_mean = mean\n\n faces = []\n if abs(self.frame_counter - self.last_face_frame) < 20 or self.frame_counter % 5 == 0:\n faces = faceCascade.detectMultiScale(\n capture_area,\n scaleFactor=1.1,\n minNeighbors=MIN_NEIGHBOURS,\n minSize=(30, 30)\n )\n\n if len(faces) == 1:\n self.last_face_frame = self.frame_counter\n face = faces[0]\n x, y, w, h = face\n\n x1 = x\n x2 = x+w\n y1 = y\n y2 = y+h\n\n # expand_area\n width_plus = int(w/4.0)\n height_plus = int(h/4.0)\n x1 -= width_plus\n x2 += width_plus\n y1 -= height_plus\n y2 += height_plus\n\n y_max, x_max = frame.shape[:2]\n\n x1 = max(0, x1)\n y1 = max(0, y1)\n x2 = min(x_max, x2)\n y2 = min(y_max, y2)\n\n colour_face = frame[y1:y2, x1:x2]\n colour_face = np.copy(colour_face)\n\n face_obj = Face(face, colour_face, self.frame_counter)\n self.capture_face(face_obj)\n\n #st = time.time()\n bm = get_best_match(colour_face)\n match_person = bm\n if match_person is not None:\n self.found_people[match_person] += 1\n\n\n #et = time.time()\n #print et-st\n #result = self.pool.apply_async(get_best_match, (colour_face,))\n #self.pool_results.append(result)\n\n if len(self.pool_results) > 0:\n print(len(self.pool_results))\n res = self.pool_results[0]\n try:\n match_person = res.get()\n print('match here', match_person)\n except TimeoutError:\n pass\n else:\n self.pool_results.popleft()\n if match_person is not None:\n self.found_people[match_person] += 1\n\n # do flush if we have enough frames\n if len(self.capture_buffer) >= FRAMES_COUNT_TO_SAVE:\n self.flush_capture_buffer()\n\n # clear buffer if we never got enough frames\n if len(self.capture_buffer) > 0:\n if self.frame_counter - self.capture_buffer[-1].frame_counter > MAX_FRAMES_BETWEEN_CAPTURES:\n self.flush_capture_buffer()\n\n # Draw a rectangle around the faces\n for (x, y, w, h) in faces:\n cv2.rectangle(frame, (x, y), (x+w, y+h), DRAWING_COLOR, 15)\n\n # Display the resulting frame\n frame = cv2.flip(frame, flipCode=1)\n\n if self.draw_wanted_start_frame > self.frame_counter - TEXT_DISPLAY_TIME:\n cv2.putText(frame, \"Thanks!\", (150,250), cv2.FONT_HERSHEY_DUPLEX, 8.0, DRAWING_COLOR, 14)\n if self.thank_person is not None:\n cv2.putText(frame, self.thank_person, (150,450), cv2.FONT_HERSHEY_DUPLEX, 6.0, DRAWING_COLOR, 12)\n\n # When the screen goes off, we hang on waitKey, so don't do it if we haven't done a wakeup recently\n # Also no point in updating the screen if it is off.\n if self.last_wakeup + 40 > time.time():\n cv2.imshow('Video', frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n # Display the resulting frame\n cv2.imshow('Video', frame)",
"def audio_resample(self, data):\n\n data = np.asarray(data)\n if data.ndim <= 1:\n logging.log_first_n(logging.INFO,\n 'Converting %s sound from shape %s to 2-D' %\n (self._name, data.shape), 5)\n data = np.reshape(data, (-1, 1))\n if data.shape[1] > data.shape[0]:\n logging.log_first_n(logging.INFO,\n 'Transposing %s sound from shape %s' %\n (self._name, data.shape), 5)\n data = np.transpose(data)\n\n # Get half window size in seconds.\n half_window_size = 0.5 * self._window / self._fs_out\n\n # Concatenate and update buffer.\n if self._buff is not None:\n data = np.concatenate((self._buff, data), axis=0)\n tau = self._buff.shape[0]\n else:\n tau = 0\n self._buff = data[-int(self._fs_in * half_window_size):, :]\n\n # Get i/o data dimensions.\n frames_in = data.shape[0]\n frames_out = int(round((frames_in - tau) / self._fs_in * self._fs_out))\n\n # Resample data via moving average.\n data_out = np.zeros((frames_out, data.shape[1]))\n if self._fs_out < self._fs_in or self._window > 1:\n for i in range(frames_out):\n t = float(i) / self._fs_out # center of window in seconds\n t1 = int(max(0, round(self._fs_in * (t - half_window_size)) + tau))\n t2 = int(min(frames_in,\n round(self._fs_in * (t + half_window_size)) + tau))\n data_out[i, :] = np.mean(data[t1:t2, :], axis=0)\n\n else:\n\n data_out = data\n\n return data_out",
"def frame_ndarray(a, frame_size, hop_size):\n n = a.shape[0]\n nframes = 1 + (n - frame_size) // hop_size\n other_dim = a.shape[1:]\n if(nframes < 0):\n nframes = 0\n b = np.zeros([nframes, frame_size] + list(other_dim), dtype=a.dtype)\n\n for i in range(nframes):\n b[i] = a[i * hop_size: i * hop_size + frame_size]\n return b",
"def frame_generator(wav_data, timestamp_offset):\n n = int(sample_rate * (frame_duration_ms / 1000.0) * 2)\n offset = 0\n timestamp = timestamp_offset\n duration = (float(n) / sample_rate) / 2.0\n while offset + n < len(wav_data):\n yield Frame(wav_data[offset:offset + n], timestamp, duration)\n timestamp += duration\n offset += n",
"def adjust_image_data(self):\r\n\r\n print('Adjusting image data: ')\r\n\r\n if self.removeFirstSequence: # used to remove the first trial from the sequence\r\n\r\n frames_per_rep = self.nFrames/self.nrepetitions\r\n\r\n self.imageData = self.imageData[frames_per_rep:, :, :]\r\n\r\n self.nFrames = self.imageData.shape[0]\r\n\r\n self.nrepetitions = int(self.nFrames/(self.period * self.framerate))\r\n\r\n self.times = np.arange(0, self.nFrames/self.framerate, 1.0/self.framerate)\r\n\r\n \r\n\r\n # first squeeze the image to 3d if it is 4d\r\n\r\n maxt = np.max(self.times) # find last image time\r\n\r\n sh = self.imageData.shape\r\n\r\n if len(sh) == 4:\r\n\r\n self.imageData = self.imageData.squeeze()\r\n\r\n sh = self.imageData.shape\r\n\r\n dt = np.mean(np.diff(self.times)) # get the mean dt\r\n\r\n n_Periods = int((maxt+dt)/self.period) # how many full periods in the image set - include the first?\r\n\r\n if self.nrepetitions > 0 and self.nrepetitions < n_Periods:\r\n\r\n n_Periods = self.nrepetitions\r\n\r\n n_PtsPerCycle = int(np.floor(self.period/dt)); # estimate image points in a stimulus cycle\r\n\r\n ndt = self.period/n_PtsPerCycle\r\n\r\n self.imageData = self.imageData[range(0, n_Periods*n_PtsPerCycle),:,:] # reduce to only what we need\r\n\r\n print (' Adjusted image info')\r\n\r\n print (\" # Periods: %d Pts/cycle: %d Cycle dt %8.4fs (%8.3fHz) Cycle: %7.4fs\" %(n_Periods, n_PtsPerCycle, ndt, 1.0/ndt, self.period))\r\n\r\n self.print_image_info()",
"def samples_per_frame(self, value):\n self._samples_per_frame = value",
"def stream_timelapse(path):\n parts = path.split('.')[0].split('_')\n count = int(parts[2])\n filename = parts[0] + '_{0:03d}_{1:03d}.jpg'\n for i in range(count):\n frame = open(filename.format(i, count), 'rb').read()\n yield b'--frame\\r\\nContent-Type: image/jpeg\\r\\nContent-Length: ' + \\\n str(len(frame)).encode() + b'\\r\\n\\r\\n' + frame + b'\\r\\n'\n time.sleep(0.5)",
"def populate_buffer(self, num_transitions):\n while len(self.replay_buffer) < self.buffer_sample_size:\n self.play(num_transitions)",
"def resample(self):\n pass",
"def get_image(self, frame):\n msec = frame * config.MS_PER_FRAME\n frame = msec // 250\n return self.frames[frame % self.num_frames]",
"def make_frame(t):\r\n mlab.view(360*t/duration, 90) # camera angle\r\n return mlab.screenshot(antialiased=True) # return a RGB image\r",
"def temporal_sampling(\n num_frames, start_idx, end_idx, num_samples, start_frame=0\n):\n index = torch.linspace(start_idx, end_idx, num_samples)\n index = torch.clamp(index, 0, num_frames - 1).long()\n return start_frame + index",
"def stream_frames(video_capture):"
]
| [
"0.6244921",
"0.61682856",
"0.6129182",
"0.6064074",
"0.6050894",
"0.6037316",
"0.6022225",
"0.59974605",
"0.597372",
"0.5961255",
"0.5939521",
"0.5893111",
"0.58840185",
"0.58180964",
"0.57428",
"0.57267004",
"0.57258826",
"0.57146883",
"0.5705632",
"0.5685429",
"0.56762886",
"0.56733954",
"0.56509554",
"0.5613936",
"0.56025153",
"0.5592551",
"0.55575097",
"0.5531955",
"0.5527599",
"0.55245644"
]
| 0.6889909 | 0 |
Upload the extracted frames and the preview image to S3 | def load_data_to_s3(frame_dir, preview_file_name, s3_bucket, frame_prefix, upload_frames, video_preview_prefix,
working_dir):
if upload_frames:
count = 0
frames_s3_prefix = frame_prefix + frame_dir.split('/')[-1]
start = time.time()
for frame in os.listdir(frame_dir):
# this will upload the frame in vid_a/vid_a_000001.jpg to s3://bucket/frame-prefix/vid_a/vid_a_000001.jpg
frame_local_path = os.path.join(frame_dir, frame)
frame_s3_key = "{}/{}".format(frames_s3_prefix, frame)
s3.Bucket(s3_bucket).upload_file(frame_local_path, frame_s3_key)
count += 1
if count % REPORT_STATUS == 0:
logger.info("uploaded {} frames. ".format(count))
logger.info("took {:10.4f} seconds to upload {} frames".format(time.time() - start, REPORT_STATUS))
start = time.time()
logger.info("uploaded {} frames to s3://{}/{}".format(count, s3_bucket, frames_s3_prefix))
if preview_file_name is not None:
preview_file_s3_key = video_preview_prefix + preview_file_name
s3.Bucket(s3_bucket).upload_file(os.path.join(working_dir, preview_file_name), preview_file_s3_key)
logger.info("uploaded preview to s3://{}/{}".format(s3_bucket, preview_file_s3_key)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def project_uploader():\n if not current_app.config['S3_KEY']:\n return ''\n if len(request.files) == 0:\n return 'No files selected'\n img = request.files['file']\n if not img or img.filename == '':\n return 'No filename'\n ext = img.filename.split('.')[-1].lower()\n if ext not in ACCEPTED_TYPES:\n return 'Invalid format (allowed: %s)' % ','.join(ACCEPTED_TYPES)\n # generate a simpler filename\n keepcharacters = ('.', '_')\n safe_filename = img.filename.replace(' ', '_')\n safe_filename = \"\".join(\n c for c in safe_filename\n if c.isalnum() or c in keepcharacters).rstrip()\n if not safe_filename:\n safe_filename = \"\".join(random_password(8), '.', ext)\n # use random subfolder inside user id folder\n filename = '/'.join([\n str(current_user.id),\n random_password(24),\n safe_filename\n ])\n # with tempfile.TemporaryDirectory() as tmpdir:\n # img.save(path.join(tmpdir, filename))\n if 'S3_FOLDER' in current_app.config:\n s3_filepath = '/'.join([current_app.config['S3_FOLDER'], filename])\n else:\n s3_filepath = filename\n # print('Uploading to %s' % s3_filepath)\n if 'S3_ENDPOINT' in current_app.config:\n s3_obj = boto3.client(\n service_name='s3',\n endpoint_url=current_app.config['S3_ENDPOINT'],\n aws_access_key_id=current_app.config['S3_KEY'],\n aws_secret_access_key=current_app.config['S3_SECRET'],\n )\n #print('Uploading to endpoint %s' % current_app.config['S3_ENDPOINT'])\n else:\n s3_obj = boto3.client(\n service_name='s3',\n region_name=current_app.config['S3_REGION'],\n aws_access_key_id=current_app.config['S3_KEY'],\n aws_secret_access_key=current_app.config['S3_SECRET'],\n )\n #print('Uploading to region %s' % current_app.config['S3_REGION'])\n # Commence upload\n s3_obj.upload_fileobj(img,\n current_app.config['S3_BUCKET'],\n s3_filepath,\n ExtraArgs={'ContentType': img.content_type,\n 'ACL': 'public-read'}\n )\n return escape('/'.join([current_app.config['S3_HTTPS'], s3_filepath]))",
"def __upload(self, filename):\n # Save to local path\n save_img = self.__frame.copy()\n\n # Initialize the bucket for after usage\n image_blob = None\n\n # Make the Google Cloud Storage client\n # and set the storage path\n if self.__yaml[\"bucket\"] is not None:\n client = storage.Client()\n bucket = client.get_bucket(self.__yaml[\"bucket\"])\n image_blob = bucket.blob(filename)\n\n # Upload and save the image\n try:\n if self.__yaml[\"output_path\"] is not None:\n # Save image in local\n LOGGER.info(f\"Saved {filename} in local folder\", )\n path = os.path.sep.join((self.__yaml[\"output_path\"], filename))\n cv2.imwrite(path, save_img)\n\n # Upload to Google Cloud Storage\n # if the user set the \"bucket\" option\n if self.__yaml[\"bucket\"] is not None:\n image_blob.upload_from_filename(os.path.sep.join((self.__yaml[\"output_path\"],\n filename)),\n content_type=\"image/jpeg\")\n\n LOGGER.info(f\"Saved {filename} to google cloud storage\")\n elif self.__yaml[\"bucket\"] is not None:\n # Convert numpy array to bytes\n temp_file = Image.fromarray(cv2.cvtColor(save_img, cv2.COLOR_BGR2RGB))\n temp_file_bytes = io.BytesIO()\n temp_file.save(temp_file_bytes,\n format=\"JPEG\")\n\n # Read the bytes from beginning\n temp_file_bytes.seek(0)\n image_blob.upload_from_file(temp_file_bytes,\n content_type=\"image/jpeg\")\n\n LOGGER.info(f\"Saved {filename} to google cloud storage\")\n except Exception as error:\n # If errors occur, just print the error messages\n # and don't exit the program\n LOGGER.warning(error)",
"def _upload_to_s3(filename):\n if not app.config.get('UPLOAD_SCREENSHOTS_TO_S3', False):\n return\n\n import boto\n from boto.s3.key import Key\n conn = boto.connect_s3()\n b = conn.get_bucket(app.config['S3_BUCKET'])\n k = Key(b)\n k.key = '{}/{}'.format(\n app.config.get('S3_FILES_PREFIX', 'sleepypuppy'),\n filename\n )\n k.set_contents_from_filename(\n \"{}/{}\".format(\n app.config['UPLOAD_FOLDER'],\n filename\n )\n )\n os.remove(\n \"{}/{}\".format(\n app.config['UPLOAD_FOLDER'],\n filename\n )\n )",
"def image_upload(request, filename, format=None):\n file_obj = request.data['file']\n #filename = request.data['filename']\n #filename = request.FILES['filename'].name\n\n timestamp = datetime.now()\n extension = filename[-4:]\n newfilename = timestamp.strftime(\"%Y%m%d%H%M%S\") + extension\n savedir = 'img/' + newfilename\n contentType = filename[-3:]\n \n path = default_storage.save(savedir, ContentFile(file_obj.read()))\n success = True\n error = \"\"\n\n try:\n s3 = boto3.client('s3')\n #s3.upload_file(path, 'demo-poppag-s3-bucket-2020', savedir, ExtraArgs={'ContentType': \"image/\" + contentType, 'ACL': \"public-read\"})\n s3.upload_file(path, S3BUCKET, savedir, ExtraArgs={'ContentType': \"image/\" + contentType, 'ACL': \"public-read\"})\n\n print(\"Upload Successful\")\n success = True\n except FileNotFoundError:\n print(\"The file was not found\")\n error = \"This file was not found\"\n success = False\n except NoCredentialsError:\n error = \"Credentials not available\"\n success = False\n\n #path = default_storage.save('tmp/somename.png', ContentFile(request.FILES[\"filename\"]))\n #tmp_file = os.path.join(settings.MEDIA_ROOT, path)\n\n uploaded = \"https://demo-poppag-s3-bucket-2020.s3.us-west-1.amazonaws.com/\" + savedir\n\n return JsonResponse({ \"filename\" : uploaded, \"success\" : success, error : error})",
"def _upload_s3(self, filename, bucket, objectKey):\n return s3_client.upload_file(filename, bucket, objectKey)",
"def _s3_stash(self):\n s3_url = 's3://{}/{}'.format(BUCKET, self.atom_file)\n bucketpath = BUCKET.strip(\"/\")\n bucketbase = BUCKET.split(\"/\")[0]\n parts = urlparse.urlsplit(s3_url)\n mimetype = 'application/xml' \n \n conn = boto.connect_s3()\n\n try:\n bucket = conn.get_bucket(bucketbase)\n except boto.exception.S3ResponseError:\n bucket = conn.create_bucket(bucketbase)\n self.logger.info(\"Created S3 bucket {}\".format(bucketbase))\n\n if not(bucket.get_key(parts.path)):\n key = bucket.new_key(parts.path)\n key.set_metadata(\"Content-Type\", mimetype)\n key.set_contents_from_filename(self.atom_file)\n msg = \"created {0}\".format(s3_url)\n self.logger.info(msg)\n else:\n key = bucket.get_key(parts.path)\n key.set_metadata(\"Content-Type\", mimetype)\n key.set_contents_from_filename(self.atom_file)\n msg = \"re-uploaded {}\".format(s3_url)\n self.logger.info(msg)",
"def _upload(self, errors):\n if self.backup_bucket is None:\n return\n\n try:\n with open(\"%s/%s.tar.gz\"%(self.backup_path, self.name), 'r+') as f:\n s3upload.upload_to_s3(f,\n self.backup_bucket,\n \"%s/%s.tar.gz\"%(self.backup_id, self.name))\n\n # Cleaning up resources, since the upload was successful\n run(\"rm -f %s/%s.tar.gz\"%(self.backup_path, self.name))\n except Exception as e:\n logging.exception(e)\n errors.put(Exception(\"Error uploading %s server backup to S3\" % self.name))\n traceback.print_exc()",
"def uploadFilestoS3(self):\n allfilesuploadedcount = 0\n for eachfiledic in self.fileTobeUploaded:\n if eachfiledic[\"uploadedSuccess\"] == 0: #Means this file never got uploaded.\n if os.path.getsize(eachfiledic[\"filepath\"]) < 1000000000: #<1GB\n s3Log.info (\"FileSize < 1GB for :{}, so using single part upload.\".format(eachfiledic[\"filepath\"]) )\n if self.singlePartUpload(eachfiledic) == True:\n eachfiledic[\"uploadedSuccess\"] = 1\n allfilesuploadedcount = allfilesuploadedcount + 1\n else:\n s3Log.info (\"FileSize > 1GB for :{}, so using Multi Part upload. \\n\".format(eachfiledic[\"filepath\"]) )\n if self.multiPartUpload(eachfiledic) == True:\n eachfiledic[\"uploadedSuccess\"] = 1\n allfilesuploadedcount = allfilesuploadedcount + 1\n\n\n elif eachfiledic[\"uploadedSuccess\"] == 1: #Means it got uploaded in the last run.\n allfilesuploadedcount = allfilesuploadedcount + 1\n\n self.saveStateOfThisRun()\n if len(self.fileTobeUploaded) == allfilesuploadedcount: #Means we uploaded all files in the queue\n return True\n else:\n return False",
"def pushToS3()-> None:\n logging.info(f\"Connecting to s3 {getTime()}\")\n s3 = boto3.client(\"s3\",endpoint_url=\"http://localhost:4566\")\n if(not s3.head_bucket(Bucket=\"demo\")):\n s3.create_bucket(Bucket='demo')\n try:\n logging.info(f\"Uploading to s3 {getTime()}\")\n s3.upload_file(\"result.csv\",\"demo\",\"result.csv\")\n logging.info(f\"Finished uploding to s3 {getTime()}\")\n except ClientError as e:\n logging.error(f\"Error uploading file to S3 {getTime()}\")",
"def publish():\n reset()\n compress()\n build()\n s3deploy()\n log_success()",
"def upload(file_path, aws_path, access_key, secret_key) -> None:\n # bucket = \"dev-com-courtlistener-storage\"\n bucket = \"seals.free.law\"\n client = boto3.client(\n \"s3\",\n aws_access_key_id=access_key,\n aws_secret_access_key=secret_key,\n )\n transfer = S3Transfer(client)\n if \".png\" in file_path:\n content_type = \"image/png\"\n else:\n content_type = \"image/svg+xml\"\n transfer.upload_file(\n file_path,\n bucket,\n aws_path,\n extra_args={\"ContentType\": content_type, \"ACL\": \"public-read\"},\n )\n print(f\"http://{bucket}.s3-us-west-2.amazonaws.com/{aws_path}\")",
"def upload(jsonfiles):\n # clear S3 Bucket\n bucket = S3Bucket()\n bucket.clear()\n for jsonfile in jsonfiles:\n filename = os.path.basename(jsonfile)\n key = build_key(filename)\n logging.info(\"%s %s\", filename, key)\n # store json in S3 object\n bucket.store(key, jsonfile)",
"def upload_to_s3(file_from_machine, bucket, file_to_s3):\n s3.upload_file(file_from_machine, bucket, file_to_s3)\n print(file_to_s3, \" : is upoaded to s3\")",
"def put_upload(self):\n # print \"starting upload...\", self.current_upload['filepath']\n self.touch()\n self.log(\"STARTING_UPLOAD\", level=INFO)\n try:\n Backend.put_file(self.fileobj, self.current_upload[\"gcs_url\"])\n except exceptions.FilePutError as err:\n self.handle_put_error(err, self.fileobj)\n raise",
"def test_put_file(self):\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n uploads = self.upload_path\n src = os.path.join(uploads, 'demo-test.tar.gz')\n id = utils.generate_id('demo-test.tar.gz')\n backend.put(src, id)\n path = '/'.join(backend.id_to_path(id)) + '/demo-test.tar.gz'\n self.assertTrue(backend.exists(path))",
"def upload_images_to_s3(directory):\n for f in directory.iterdir():\n if str(f).endswith(('.png', '.jpg', '.jpeg')):\n full_file_path = str(f.parent) + \"/\" + str(f.name)\n file_name = str(f.name)\n s3_client.upload_file(full_file_path, BASE_BUCKET, file_name)\n print(f,\"put\")",
"def _s3_origin(self):\n pipeline_builder = self.sdc_builder.get_pipeline_builder()\n s3_origin = pipeline_builder.add_stage('Amazon S3', type='origin')\n s3_origin.set_attributes(bucket=self.environments['aws'].s3_bucket_name,\n common_prefix='origin_data',\n prefix_pattern=f\"{DATASETS[self.dataset]['file_pattern']}\",\n data_format='DELIMITED',\n header_line='WITH_HEADER',\n delimiter_format_type='CUSTOM',\n delimiter_character=DATASETS[self.dataset]['delimiter'],\n number_of_threads=self.number_of_threads,\n max_batch_size_in_records=self.batch_size)\n return s3_origin, pipeline_builder",
"def upload_file_to_s3(self, file_data):\r\n\r\n file_key = file_data.name + datetime.now(UTC).strftime(\r\n xqueue_interface.dateformat\r\n )\r\n\r\n file_data.seek(0)\r\n s3_public_url = upload_to_s3(\r\n file_data, file_key, self.s3_interface\r\n )\r\n\r\n return s3_public_url",
"def upload_files_s3(files, bucket):\n \n print('************************************')\n print('Uploading files to s3 bucket...')\n print('************************************')\n \n for i in range(len(files)):\n upload_file_s3(files[i], bucket)\n \n print('************************************')\n print('Upload complete')\n print('************************************')",
"def store_to_s3():\n\n try:\n # establish aws/s3 connection\n s3 = boto3.client('s3',\n aws_access_key_id=ACCESS_KEY,\n aws_secret_access_key=SECRET_KEY\n )\n logger.info(\"S3 connection established!\")\n except Exception as e:\n logger.error('Fail to connect to aws s3. Please check your credentials!')\n logger.error(e)\n else:\n try:\n # upload local file to S3 bucket\n logger.info(\"Uploading {} to {} bucket as {}\".format(config.Local_File_To_Upload,\n config.Bucket_Name,\n config.S3_Filename))\n s3.upload_file(config.Local_File_To_Upload,\n config.Bucket_Name,\n config.S3_Filename)\n logger.info('File successfully uploaded to S3 bucket!')\n except FileNotFoundError:\n logger.error('File not found, pleas check the file path.')\n except Exception as e:\n logger.error(e)",
"def upload_example(request, object_id):\n \n example = get_object_or_404(Example, id=object_id)\n\n #\n # Create an options dictionary and pass it to uploadify_s3.UploadifyS3()\n # to set Uploadify options. See http://www.uploadify.com/documentation/.\n #\n # These options override any set in your project settings file.\n #\n # Here we specify the name of our JavaScript onComplete event handler.\n # See /media/js/uploadify_event_handlers.js.\n #\n\n options={\n 'onComplete' : 'uploadifyOnComplete',\n }\n\n #\n # The key_pattern set here will be sent to S3 as the 'key' form field\n # below. You can use it to set the key (e.g. name) of your uploaded objects. \n #\n \n key_pattern = 'example-%s/${filename}' % object_id\n \n #\n # Create a post_data dictionary and pass it to uploadify_s3.UploadifyS3()\n # to set any desired S3 POST variables.\n #\n # See:\n # http://docs.amazonwebservices.com/AmazonS3/latest/index.html?UsingHTTPPOST.html\n #\n # 'key' is the only required field that is not automatically set by DUS3. It\n # may be set here in the view or by setting the AWS_DEFAULT_KEY_PATTERN in\n # your project settings.\n #\n # Note: Some reports indicate that Flash/Uploadify has problems with HTTP \n # responses with an empty body. To avoid this, set a success_action_status\n # of 201, which forces S3 to return an XML document.\n #\n \n post_data={\n 'key': key_pattern,\n 'success_action_status': \"201\",\n }\n\n #\n # S3 uses conditions to validate the upload data. DUS3 automatically constructs\n # and includes conditions for most of the elements that will be sent to S3, but you \n # need to pass in conditions for:\n # - 'key', whose value changes at upload time. Note that the condition's value\n # must correspond to the key pattern set above.\n # - any extra elements set at upload time\n #\n # See the DUS3 README for more information on the conditions mapping:\n # https://github.com/sbc/django-uploadify-s3\n #\n\n conditions={\n 'key': {'op': 'starts-with', 'value': 'example-%s/' % object_id},\n }\n\n #\n # Initialize UploadifyS3 and call get_options_json() to get the Uploadify\n # JSON payload. \n #\n \n uploadify_options = uploadify_s3.UploadifyS3(\n uploadify_options=options,\n post_data=post_data, \n conditions=conditions\n ).get_options_json()\n\n #\n # Pass the Uploadify JSON payload to the file_upload template as extra_context.\n # \n \n return direct_to_template(request, 'examples/example_file_upload.html', extra_context={ 'example': example, 'uploadify_options': uploadify_options })",
"def push_file_to_s3(logger, app_config, s3_object_info, start_timing):\n import boto3\n s3_resource = boto3.resource('s3')\n logging.getLogger('boto3').addHandler(logger)\n s3_object = get_config_item(app_config, 's3_info.object_base') + \\\n '/' + s3_object_info['camera_name'] + '/' + \\\n s3_object_info['date_string'] + '/' + \\\n s3_object_info['hour_string'] + '/' + \\\n s3_object_info['img_type'] + '/' + \\\n s3_object_info['just_file']\n\n utc_ts = int(time.time() * 1000)\n\n object_metadata = {'camera': s3_object_info['camera_name'],\n 'camera_timestamp': str(utc_ts)}\n s3_resource.Object(get_config_item(app_config, 's3_info.bucket_name'),\n s3_object).put(Body=open(s3_object_info['file_name'], 'rb'),\n Metadata=object_metadata)\n totaltime = time.time() - start_timing\n logger.info(\"S3 Object: {} written to s3 in {} seconds.\".format(s3_object, totaltime))\n return utc_ts",
"def upload_bucket_samples():\n if not Config.region:\n logger.error(\"You must specify a region in order to scan a bucket target\")\n raise SystemExit(\n \"Target region not specified. Use -r or --region to specify the target region.\"\n )\n # Connect to S3 in our target region\n s_3 = boto3.resource(\"s3\", region_name=Config.region)\n # Connect to our target bucket\n bucket = s_3.Bucket(Config.target_dir)\n # Retrieve a list of all objects in the bucket\n summaries = bucket.objects.all()\n # Inform the user as this may take a minute\n logger.info(\"Assembling volume from target bucket (%s) for submission\", Config.target_dir)\n # Loop through our list of files, downloading each to memory then upload them to the Sandbox\n for item in summaries:\n # Grab the file name from the path\n filename = os.path.basename(item.key)\n # Teensy bit of witch-doctor magic to download the file\n # straight into the payload used for our upload to the Sandbox\n response = Samples.upload_sample(file_name=filename,\n file_data=io.BytesIO(\n bucket.Object(key=item.key).get()[\"Body\"].read()\n )\n )\n # Retrieve our uploaded file SHA256 identifier\n sha = response[\"body\"][\"resources\"][0][\"sha256\"]\n # Add this SHA256 to the upload payload element\n Analyzer.uploaded.append(sha)\n # Track the upload so we recognize the file when we're done\n Analyzer.files.append([filename, item.key, sha])\n # Inform the user of our progress\n logger.debug(\"Uploaded %s to %s\", filename, sha)",
"def upload_image():\n s3client = __initiate_s3client()\n # Generate random UUIDs as image ids\n image_id = str(uuid.uuid4())\n # Generate pre-signed POST url\n url_info = s3client.generate_presigned_post(\n Bucket=env.get('bucket'),\n Key=image_id\n )\n return Response(status_code=201,\n headers={'Content-Type': 'application/json'},\n body={'status': 'success',\n 'upload_url': url_info,\n 'id': image_id})",
"def process_images():\n for message in get_messages_from_sqs():\n try:\n message_content = json.loads(message.body)\n image = unquote_plus(message_content\n ['Records'][0]['s3']['object']\n ['key'])\n s3.download_file(input_bucket_name, image, image)\n process_json(image)\n upload_image(image)\n cleanup_files(image)\n except:\n message.change_visibility(VisibilityTimeout=0)\n continue\n else:\n message.delete()",
"def save_image_to_s3(filename, file):\n \n s3_path = f\"s3://shopifyimagerepository/{filename}\"\n \n s3_client.put_object(Body=file,\n Bucket=\"shopifyimagerepository\",\n Key=filename,\n ACL=\"public-read\")",
"def upload_example_done(request, object_id):\n\n example = get_object_or_404(Example, id=object_id)\n\n #\n # Grab the post data sent by our OnComplete handler and parse it. Set the fields \n # on our example object as appropriate and save.\n #\n\n if request.method == 'POST':\n post_response = request.POST['s3_response']\n location_rexp = '<Location>(.*)</Location>'\n example.file_url = unquote_plus(re.search(location_rexp, post_response).group(1))\n example.file_name = request.POST['file_obj[name]']\n example.file_size = request.POST['file_obj[size]']\n example.file_upload_speed = request.POST['upload_data[speed]']\n example.file_uploaded = datetime.now()\n example.save()\n \n print example.file_url\n print example.file_name\n print example.file_uploaded\n \n return HttpResponse((reverse('examples_example_detail', args=[example.id])))",
"def upload_to_s3(site, bucket, directory=None, files=None, prefix=None):\n if bucket is None:\n print red('Error: Bucket must be specified.')\n return\n if directory is None and files is None:\n print red('Error: Directory and/or files must be specified.')\n return\n # Setup boto\n import boto\n from boto.s3.bucket import Bucket\n from boto.s3.key import Key\n import mimetypes\n import fnmatch\n\n setup_aws_access_key(site)\n\n # Connect to S3\n c = boto.connect_s3()\n b = Bucket(c, bucket)\n\n # Fix the prefix\n # prefix itself shouldn't have a / prefix itself but should end with /\n if prefix:\n prefix = prefix.lstrip('/')\n if prefix and not prefix.endswith('/'):\n prefix = prefix + '/'\n\n def __upload(key, filename):\n k = Key(b)\n k.key = key\n headers = {}\n content_type = mimetypes.guess_type(filename)[0]\n if site.has_key('webapp') and site['webapp'].get('cache_control'):\n for pattern in site['webapp']['cache_control']:\n if fnmatch.fnmatch(filename, pattern):\n headers['Cache-Control'] = site['webapp']['cache_control'][pattern]\n break\n if site.has_key('webapp') and site['webapp'].get('gzip_types') and content_type in site['webapp']['gzip_types']:\n from gzip import GzipFile\n from StringIO import StringIO\n # Need to specify content_type when uploading from a string!\n headers['Content-Type'] = content_type\n headers['Content-Encoding'] = 'gzip'\n s = StringIO()\n g = GzipFile(fileobj=s, mode='wb')\n with open(filename, 'rb') as f:\n g.write(f.read())\n g.close()\n k.set_contents_from_string(s.getvalue(), headers)\n else:\n k.set_contents_from_filename(filename, headers)\n\n if files:\n # Upload individual files\n if directory:\n keys = [filename.lstrip('/') for filename in files]\n files = [os.path.join(directory, filename) for filename in files]\n else:\n keys = [os.path.split(filename)[1] for filename in files]\n for i, filename in enumerate(files):\n print 'Uploading %s' % keys[i]\n if prefix:\n key = prefix + keys[i]\n else:\n key = keys[i]\n __upload(key, filename)\n elif directory:\n # Upload an entire directory\n def __upload_dir(arg, dirname, names):\n # arg is the starting directory\n for name in names:\n filename = os.path.join(dirname, name)\n if not os.path.isdir(filename) and not os.path.islink(filename) and not name.startswith('.'):\n key = filename[len(arg):]\n if key.startswith('/'):\n key = key[1:]\n if prefix:\n key = prefix + key\n print 'Uploading %s' % key\n __upload(key, filename)\n os.path.walk(directory, __upload_dir, directory)",
"def resize_and_upload(input, output, width, height):\n image = PIL.Image.open(input)\n if image.mode == \"CMYK\":\n image = image.convert(\"RGB\")\n if height:\n if height > image.size[1]:\n width = int(image.size[0] * (width / height))\n height = image.size[1]\n elif width > image.size[0]:\n height = int(image.size[1] * (height / width))\n width = image.size[0]\n # crop\n new_image = PIL.ImageOps.fit(\n image,\n (height, width),\n PIL.Image.ANTIALIAS\n )\n else:\n # if a bigger size of the original is asked, use original image\n if width > image.size[0]:\n new_image = image\n else:\n wpercent = (width / float(image.size[0]))\n height = int((float(image.size[1]) * float(wpercent)))\n new_image = image.resize((width, height), PIL.Image.ANTIALIAS)\n\n out_img = io.BytesIO()\n new_image.save(out_img, 'PNG')\n out_img.seek(0)\n uploaded = s3.Bucket(settings.AWS_STORAGE_BUCKET_NAME).put_object(\n ACL='public-read',\n Key=os.path.join(output, uuid.uuid4().hex + '.png'),\n Body=out_img,\n ContentDisposition='inline',\n ContentType='image/png',\n )\n if hasattr(settings, 'AWS_S3_CUSTOM_DOMAIN') and settings.AWS_S3_CUSTOM_DOMAIN:\n host = 'https://{}'.format(settings.AWS_S3_CUSTOM_DOMAIN)\n else:\n host = 'https://s3.{}.amazonaws.com/{}'.format(\n settings.AWS_S3_REGION_NAME,\n settings.AWS_STORAGE_BUCKET_NAME\n )\n return {\n 'href': '{}/{}'.format(\n host,\n uploaded.key,\n ),\n 'width': new_image.size[0],\n 'height': new_image.size[1],\n }",
"def upload(filename, bucket):\n print(\"Uploading {} to S3\".format(filename.lower().replace('_', '-')))\n url = \"https://s3.ca-central-1.amazonaws.com/{}/{}\".format(bucket,\n filename.lower().replace('_', '-'))\n with open('{}/{}'.format(WORK_DIR, filename), 'rb') as data:\n requests.put(url, data=data)"
]
| [
"0.6675343",
"0.6497543",
"0.6431096",
"0.6395432",
"0.63769287",
"0.6231634",
"0.621618",
"0.6210985",
"0.6208489",
"0.6164117",
"0.61071897",
"0.60737467",
"0.6061896",
"0.5999967",
"0.5952431",
"0.5931581",
"0.5929783",
"0.5909609",
"0.5908395",
"0.5890025",
"0.588783",
"0.58622026",
"0.5831534",
"0.5805657",
"0.5804053",
"0.576385",
"0.5745807",
"0.5740085",
"0.5727939",
"0.572234"
]
| 0.68508595 | 0 |
Get network adapter from environment or first loopback. | def get_network_adapter() -> network.NetworkAdapter:
if (ip := os.getenv('ref_ip')) is not None: # noqa: SIM112
return network.get_adapter_containing_ip(ip)
# get next available loopback adapter
return next(adapter for adapter in network.get_adapters() if adapter.is_loopback) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _external_network(self):\n try:\n router = next(self._connection.network.routers.all())\n except StopIteration:\n raise errors.ImproperlyConfiguredError('Could not find tenancy router.')\n return self._connection.network.networks.get(router.external_gateway_info['network_id'])",
"def remote_getLoopback(self):\r\n if not self._loopback:\r\n self._loopback = Loopback(self)\r\n\r\n return self._loopback",
"def get_net_adapter_type(self):\n\t\treturn call_sdk_function('PrlSrvCfgNet_GetNetAdapterType', self.handle)",
"def getLoopback(self):\r\n if not self._loopback:\r\n protocol = Protocol(self)\r\n self._loopback = LoopbackConnection(protocol)\r\n self.callRemote('getLoopback').chainDeferred(protocol)\r\n\r\n return self._loopback",
"def _find_adapter(self):\n required_interfaces = [GATT_MANAGER_IFACE, LE_ADVERTISING_MANAGER_IFACE]\n object_manager = dbus.Interface(self.bus.get_object(BLUEZ_SERVICE_NAME, '/'), DBUS_OM_IFACE)\n objects = object_manager.GetManagedObjects()\n\n for object_path, properties in objects.items():\n missing_interfaces = [i for i in required_interfaces if i not in properties.keys()]\n if missing_interfaces:\n continue\n return object_path.rsplit('/', 1)[1]\n\n return None",
"def get_net_adapter(self, nIndex):\n\t\treturn handle_to_object(call_sdk_function('PrlSrvCfg_GetNetAdapter', self.handle, nIndex))",
"def get_device(self):\n addr = self.address\n servers = [server for server in pyrax.cloudservers.list()\n if addr in server.networks.get(\"private\", \"\")]\n try:\n return servers[0]\n except IndexError:\n return None",
"def get_net_adapter(self, nIndex):\n\t\treturn handle_to_object(call_sdk_function('PrlVmCfg_GetNetAdapter', self.handle, nIndex))",
"def _get_adapter_config(self):\n proxy = self.core.get_proxy('/')\n try:\n config = proxy.get('/adapters/' + self.adapter_name)\n return config\n except KeyError:\n return None",
"def get_net(con):\n try:\n return con.virtual_network_read(fq_name=conf.get('default_net', 'UNEXPECTED_VALUE'))\n except NoIdError:\n log.debug('Unable to find net.')\n return None",
"def guess_network(self):\n # decide what sort of network we are going to use\n # return the actual type\n # right now we just use the first host only network and that's it\n host_only = list(HostOnlyNetwork.find_networks())\n if host_only:\n return host_only[0]\n else:\n return NewHostOnlyNetwork()",
"def cosmo_find_external_net(self):\n nets = self.list_networks()['networks']\n ls = [net for net in nets if net.get('router:external')]\n if len(ls) == 1:\n return ls[0]\n if len(ls) != 1:\n raise RuntimeError(\n \"Expected exactly one external network but found {0}\".format(\n len(ls)))",
"def _tenant_network(self):\n port = self._connection.network.ports.find_by_device_owner('network:router_interface')\n if port:\n return self._connection.network.networks.get(port.network_id)\n else:\n raise errors.ImproperlyConfiguredError('Could not find tenancy network')",
"def get_network(self):\n return self.get_ip_network()[-1]",
"def get_adapter_name(self):\n\t\treturn call_sdk_function('PrlVirtNet_GetAdapterName', self.handle)",
"def get_default_ip():\r\n if CONFIG.BIND_INTERFACE is None:\r\n default_gw = netifaces.gateways()['default']\r\n if netifaces.AF_INET in default_gw:\r\n preferred_interface = default_gw[netifaces.AF_INET][1]\r\n else:\r\n interfaces = netifaces.interfaces()\r\n preferred_interface = next((i for i in interfaces if i != 'lo'), interfaces[0])\r\n else:\r\n preferred_interface = CONFIG.BIND_INTERFACE\r\n return netifaces.ifaddresses(preferred_interface)[netifaces.AF_INET][0]['addr']",
"def getlan():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n s.connect(('10.255.255.255', 1))\n lan = str(s.getsockname()[0])\n s.close()\n except socket.error:\n s.close()\n sys.exit('>> Unable to find LAN IP')\n\n return lan",
"def get_adapter(self):\n\t\timportlib.import_module('app.adapters.{0}'.format(self.builder.name))\n\n\t\tclasses = inspect.getmembers(\n\t\t\tsys.modules['app.adapters.{0}'.format(self.builder.name)],\n\t\t\tinspect.isclass\n\t\t)\n\n\t\tadapter = next(\n\t\t\tcls_ for cls_ in classes \\\n\t\t\tif hasattr(cls_[1], 'tech') \\\n\t\t\t and cls_[1].tech == self.builder.__class__.tech \\\n\t\t\t and hasattr(cls_[1], 'ctx') \\\n\t\t\t and cls_[1].ctx == self.builder.__class__.ctx\n\t\t)[1]\n\n\t\treturn adapter(self.builder())",
"def get_bound_adapter_name(self):\n\t\treturn call_sdk_function('PrlVmDevNet_GetBoundAdapterName', self.handle)",
"def network_interface(self): \n return self._network_interface",
"def getLocalIP():\r\n try:\r\n csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n csock.connect(('8.8.8.8', 80))\r\n (addr, port) = csock.getsockname()\r\n csock.close()\r\n return addr\r\n except socket.error:\r\n return \"127.0.0.1\"",
"def get_network_default_gateway(self):\n return self.mycam.devicemgmt.GetNetworkDefaultGateway()",
"def get_adapter(self, command: str) -> BaseAdapter:\n return self.routes.get(command, self.adapter)",
"def get_netiface():\n ip = mu.get_ip()\n for interface in netifaces.interfaces():\n addrs = netifaces.ifaddresses(interface)\n if netifaces.AF_INET in addrs.keys():\n i_addr = addrs[netifaces.AF_INET][0]['addr']\n if i_addr == ip:\n return interface\n\n # Return None if no interface found\n return None",
"def get_adapter(self, name = \"memory\", *args, **kwargs):\r\n\r\n name_f = name.title() + \"Adapter\"\r\n adapter_c = getattr(netius.adapters, name_f)\r\n adapter = adapter_c(*args, **kwargs)\r\n return adapter",
"def get_device_ip():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.connect((\"8.8.8.8\", 80))\n ip = sock.getsockname()[0]\n sock.close()\n return ip",
"def guest_dev(self):\n if self.netns is None:\n raise ex.excError(\"could not determine netns\")\n with open(\"/proc/net/dev\", \"r\") as filep:\n local_devs = [line.split(\":\", 1)[0] for line in filep.readlines() if \":\" in line]\n\n cmd = [rcEnv.syspaths.nsenter, \"--net=\"+self.netns, \"ip\" , \"link\"]\n out, err, ret = justcall(cmd)\n used = []\n for line in out.splitlines():\n if \": eth\" not in line:\n continue\n idx = line.split()[1].replace(\":\", \"\").replace(\"eth\", \"\")\n if \"@\" in idx:\n # strip \"@if<n>\" suffix\n idx = idx[:idx.index(\"@\")]\n try:\n used.append(int(idx))\n except ValueError:\n # user named interface. ex: eth-metier\n continue\n idx = 0\n nspid = self.get_nspid()\n while True:\n guest_dev = \"eth%d\" % idx\n local_dev = \"v%spl%s\" % (guest_dev, nspid)\n if idx not in used and local_dev not in local_devs:\n return guest_dev\n idx += 1",
"def get_default_device():\n return MXNET_DEFAULT_DEVICE",
"def _guess_lan_address():\n blacklist = [\"127.0.0.1\", \"0.0.0.0\", \"255.255.255.255\"]\n for interface in netifaces.interfaces():\n addresses = netifaces.ifaddresses(interface)\n for option in addresses.get(netifaces.AF_INET, []):\n if \"broadcast\" in option and \"addr\" in option and not option[\"addr\"] in blacklist:\n if __debug__: dprint(\"interface \", interface, \" address \", option[\"addr\"])\n return option[\"addr\"]\n #Exception for virtual machines/containers\n for interface in netifaces.interfaces():\n addresses = netifaces.ifaddresses(interface)\n for option in addresses.get(netifaces.AF_INET, []):\n if \"addr\" in option and not option[\"addr\"] in blacklist:\n if __debug__: dprint(\"interface \", interface, \" address \", option[\"addr\"])\n return option[\"addr\"]\n dprint(\"Unable to find our public interface!\", level=\"error\")\n return None",
"def get_external_ip():\n try:\n r = requests.get(\n METADATA_NETWORK_INTERFACE_URL,\n headers={'Metadata-Flavor': 'Google'},\n timeout=2)\n return r.text\n except requests.RequestException:\n logging.info('Metadata server could not be reached, assuming local.')\n return 'localhost'"
]
| [
"0.64374787",
"0.6360085",
"0.6208538",
"0.6162118",
"0.6123978",
"0.61113894",
"0.6101043",
"0.60912037",
"0.6047662",
"0.58597225",
"0.58495176",
"0.5846594",
"0.5832499",
"0.58117396",
"0.5698909",
"0.56513643",
"0.56490064",
"0.564142",
"0.56289345",
"0.56115305",
"0.5590554",
"0.5590368",
"0.5583641",
"0.55627215",
"0.54893947",
"0.5477926",
"0.5476668",
"0.5431131",
"0.54299986",
"0.54259807"
]
| 0.8552797 | 0 |
Get location from environment or default. | def get_location() -> location.SdcLocation:
return location.SdcLocation(fac=os.getenv('ref_fac', default='r_fac'), # noqa: SIM112
poc=os.getenv('ref_poc', default='r_poc'), # noqa: SIM112
bed=os.getenv('ref_bed', default='r_bed')) # noqa: SIM112 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getConfigPath():\n\n global args, ConfigPathDefault\n\n if args.config_location:\n return args.config_location;\n return ConfigPathDefault;",
"def get_location(self):\n return self._overridden_location or self.get_default_location()",
"def _config_location(cls):\n local = cls._find_local()\n if local is not None:\n return local, ConfigLocations.local\n global_path = cls._find_global()\n if global_path is not None:\n return global_path, ConfigLocations.config\n env = cls._find_env()\n if env is not None:\n return env, ConfigLocations.env\n raise FileNotFoundError(\"Can't find config files\")",
"def get_default_environment(cwd=None):\n\n # NOTE(dittrich): I know this code has multiple return points\n # but it is simpler and easier to understand this way.\n #\n # Highest priority is inhereted environment variable.\n environment = os.getenv('D2_ENVIRONMENT', None)\n if environment is not None:\n return environment\n #\n # Next is saved file in current working directory.\n if cwd is None:\n cwd = os.getcwd()\n local_default = get_saved_default_environment(cwd=cwd)\n if local_default not in ['', None]:\n return local_default\n #\n # Lowest priority is the directory path basename.\n return os.path.basename(cwd)",
"def _locate_config_dir():\n if CONFIG_DIR_ENV in os.environ:\n config_dir = os.environ[CONFIG_DIR_ENV]\n else:\n config_dir = os.path.join(os.environ[\"HOME\"], CONFIG_HOME_DIR)\n return config_dir",
"def get_current_location(location_type):\n assert location_type in available_location_types()\n file_path = os.path.join('/', 'nail', 'etc', location_type)\n with open(file_path) as f:\n return f.read().strip()",
"def location(self) -> Optional[str]:\n return pulumi.get(self, \"location\")",
"def location(self) -> Optional[str]:\n return pulumi.get(self, \"location\")",
"def location(self) -> Optional[str]:\n return pulumi.get(self, \"location\")",
"def get_default_tfff1_loc():\n if platform.system() == 'Windows': # Windows\n return join_path(os.getenv('APPDATA'), \"Tfff1\")\n else: # *nix\n return join_path(os.getenv('HOME'), \"Tfff1\")",
"def location(self):\n\n p = os.path.abspath(__file__)\n pathSP = os.path.split(p)\n return pathSP",
"def get_current_location():\n global current_location\n return current_location",
"def location(self):\n location = self.args.get('location')\n if not location:\n raise JalBotError('Missing required argument -l|-location')\n return location",
"def get(self, key, default=None):\n value = os.environ.get(key)\n\n if value:\n self.logging.info(\"Got %s from environment.\" % key)\n self.logging.debug(value)\n return_val = value\n elif key in self._config.keys():\n self.logging.info(\"Got %s from config file.\" % key)\n self.logging.debug(value)\n return_val = self._config[key]\n else:\n return_val = default\n return return_val",
"def location(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"location\")",
"def location(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"location\")",
"def location(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"location\")",
"def location(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"location\")",
"def __get_location(self) -> str:\n\t\treturn os.getenv('SQLITE_DRIVER_LOCATION', 'db.sqlite')",
"def location():\n return _locate_or_create()",
"def GetLocation(args):\n return args.location or properties.VALUES.eventarc.location.GetOrFail()",
"def get_location(self) -> Union[str, None]:\n return self._get_location()",
"def getVaultPath():\n\n global args, vaultPathDefault\n\n if args.vault_location:\n return args.vault_location;\n return vaultPathDefault;",
"def location_uri(self) -> Optional[str]:\n return pulumi.get(self, \"location_uri\")",
"def location(self):\n if self.scoping:\n return self.scoping.location\n else:\n return None",
"def get_default_mc_loc():\n if platform.system() == 'Windows': # Windows\n return join_path(os.getenv('APPDATA'), '.minecraft') # C:\\Users\\<user name>\\AppData\\Roaming\n elif platform.system() == 'Darwin': # MacOS\n return join_path(os.getenv('HOME'), 'Library', 'Application Support', 'minecraft') # /home/<user name>/Library/Application Support/minecraft\n else: # Linux\n return join_path(os.getenv('HOME'), '.minecraft') # /home/<user name>/.minecraft",
"def get_environment_var(env_name, default_value):\n if env_name in os.environ:\n return os.environ[env_name]\n else:\n return default_value",
"def getDefaultFileLocation(self):\n\n label_env = os.getenv('DISPASS_LABELFILE')\n std_env = os.getenv('XDG_DATA_HOME') or os.getenv('APPDATA')\n home_file = '~/.dispass/labels'\n\n if label_env:\n return label_env\n if not exists(home_file) and std_env:\n return std_env + '/dispass/labels'\n else:\n return home_file",
"def location(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"location\")",
"def location(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"location\")"
]
| [
"0.7177347",
"0.69822437",
"0.6899748",
"0.6846995",
"0.67594796",
"0.67408764",
"0.6567466",
"0.6567466",
"0.6567466",
"0.6550206",
"0.6428071",
"0.6418596",
"0.64051044",
"0.6391328",
"0.6383778",
"0.6383778",
"0.6383778",
"0.6383778",
"0.6373471",
"0.63651264",
"0.6365045",
"0.63570946",
"0.6356287",
"0.63519424",
"0.6348294",
"0.6343885",
"0.63032925",
"0.6287938",
"0.6261341",
"0.6261341"
]
| 0.70311564 | 1 |
Get ssl context from environment or None. | def get_ssl_context() -> ssl.SSLContext | None:
if (ca_folder := os.getenv('ref_ca')) is None: # noqa: SIM112
return None
return mk_ssl_context_from_folder(ca_folder,
private_key='user_private_key_encrypted.pem',
certificate='user_certificate_root_signed.pem',
ca_public_key='root_certificate.pem',
cyphers_file=None,
ssl_passwd=os.getenv('ref_ssl_passwd')) # noqa: SIM112 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_ssl_context(self):\n context = ssl.SSLContext(self.TLS_VERSION)\n context.load_cert_chain(self.ssl_cert, self.ssl_key)\n return context",
"def getContext(self):\n ctx = SSL.Context(SSL.SSLv23_METHOD)\n ctx.use_certificate_file(Settings.BaseDir+'/server.pem')\n ctx.use_privatekey_file(Settings.BaseDir+'/privkey.pem')\n return ctx",
"def _context(use_tls=False):\n if use_tls is False:\n return None\n config = Config()\n ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)\n ctx.load_cert_chain(config.tls_cert, config.tls_key)\n ctx.options |= ssl.OP_NO_SSLv2\n ctx.options |= ssl.OP_NO_SSLv3\n ctx.options |= ssl.OP_NO_COMPRESSION\n ctx.options |= ssl.OP_CIPHER_SERVER_PREFERENCE\n if not config.args.less_secure:\n ctx.options |= ssl.OP_SINGLE_DH_USE\n ctx.options |= ssl.OP_SINGLE_ECDH_USE\n ctx.set_ciphers(\":\".join(ciphers))\n if config.tls_dhparams:\n ctx.load_dh_params(config.tls_dhparams)\n return ctx",
"def getContext(self):\n ctx = SSL.Context(SSL.SSLv3_METHOD)\n ctx.use_certificate_file(config.client.certificate)\n ctx.use_privatekey_file(config.client.private_key)\n return ctx",
"def context():\n return Context(SSLv23_METHOD)",
"def getContext(self):\n ctx = SSL.Context(SSL.SSLv3_METHOD)\n ctx.use_certificate_file(config.server.certificate)\n ctx.use_privatekey_file(config.server.private_key)\n ctx.set_verify(SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT,\n self.verify_certificate)\n # Since we have self-signed certs we have to explicitly\n # tell the server to trust them.\n ctx.load_verify_locations(config.server.root_ca)\n return ctx",
"def ssl(self) -> Optional[pulumi.Input['SslConfigurationArgs']]:\n return pulumi.get(self, \"ssl\")",
"def _get_noverify_context(self):\n from twisted.internet.ssl import ClientContextFactory\n context_factory = ClientContextFactory()\n if self.ssl_method is not None:\n context_factory.method = self.ssl_method\n return context_factory.getContext()",
"def _create_ssl_context(cfg):\n ctx = ssl.SSLContext(cfg.ssl_version)\n ctx.load_cert_chain(cfg.certfile, cfg.keyfile)\n ctx.verify_mode = cfg.cert_reqs\n if cfg.ca_certs:\n ctx.load_verify_locations(cfg.ca_certs)\n if cfg.ciphers:\n ctx.set_ciphers(cfg.ciphers)\n return ctx",
"def context_get():\n global __context\n if __context is None:\n __context = Context()\n return __context",
"def _default_ssl_context() -> ssl.SSLContext:\n ssl_context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLS_CLIENT)\n ssl_context.minimum_version = ssl.TLSVersion.TLSv1_2\n ssl_context.verify_mode = ssl.CERT_REQUIRED\n ssl_context.check_hostname = True\n ssl_context.load_default_certs()\n return ssl_context",
"def ssl_options_to_context(ssl_options):\n ...",
"def ssl_context(self) -> SSLContext | None:\n if (\n self.security_protocol != KafkaSecurityProtocol.SSL\n or self.cluster_ca_path is None\n or self.client_cert_path is None\n or self.client_key_path is None\n ):\n return None\n\n client_cert_path = Path(self.client_cert_path)\n\n if self.client_ca_path is not None:\n # Need to contatenate the client cert and CA certificates. This is\n # typical for Strimzi-based Kafka clusters.\n if self.cert_temp_dir is None:\n raise RuntimeError(\n \"KAFKIT_KAFKA_CERT_TEMP_DIR must be set when \"\n \"a client CA certificate is provided.\"\n )\n client_ca = Path(self.client_ca_path).read_text()\n client_cert = Path(self.client_cert_path).read_text()\n sep = \"\" if client_ca.endswith(\"\\n\") else \"\\n\"\n new_client_cert = sep.join([client_cert, client_ca])\n new_client_cert_path = Path(self.cert_temp_dir) / \"client.crt\"\n new_client_cert_path.write_text(new_client_cert)\n client_cert_path = Path(new_client_cert_path)\n\n return create_ssl_context(\n cluster_ca_path=Path(self.cluster_ca_path),\n client_cert_path=client_cert_path,\n client_key_path=Path(self.client_key_path),\n )",
"def test_get_context(self):\n context = Context(SSLv23_METHOD)\n connection = Connection(context, None)\n assert connection.get_context() is context",
"def __call__(self):\n ssl_mode, external_ca = ssl_utils.get_ssl_mode()\n\n ctxt = {\n 'ssl_mode': ssl_mode,\n }\n\n if ssl_mode == 'off':\n close_port(config('ssl_port'))\n ssl_utils.reconfigure_client_ssl()\n return ctxt\n\n ssl_key = convert_from_base64(config('ssl_key'))\n ssl_cert = convert_from_base64(config('ssl_cert'))\n ssl_ca = convert_from_base64(config('ssl_ca'))\n ssl_port = config('ssl_port')\n\n # If external managed certs then we need all the fields.\n if (ssl_mode in ('on', 'only') and any((ssl_key, ssl_cert)) and\n not all((ssl_key, ssl_cert))):\n log('If ssl_key or ssl_cert are specified both are required.',\n level=ERROR)\n sys.exit(1)\n\n if not external_ca:\n ssl_cert, ssl_key, ssl_ca = ServiceCA.get_service_cert()\n\n ctxt.update(self.enable_ssl(\n ssl_key, ssl_cert, ssl_port, ssl_ca,\n ssl_only=(ssl_mode == \"only\"), ssl_client=False\n ))\n\n ssl_utils.reconfigure_client_ssl(True)\n open_port(ssl_port)\n\n return ctxt",
"async def _create_context(self) -> ssl.SSLContext:\n context = utils.server_context_modern()\n\n await self.cloud.run_executor(\n context.load_cert_chain,\n self._acme.path_fullchain,\n self._acme.path_private_key,\n )\n\n return context",
"def get_ssl_certificate():",
"def test_getContext(self):\n contextFactory = crypto.SSLVerifyingContextFactory(self.url)\n self.assertIsInstance(contextFactory.getContext(),\n OpenSSL.SSL.Context)",
"def get_ssl_certificate() :",
"def getContext(self, hostname=None, port=None):\n ctx = super(SSLVerifyingContextFactory, self).getContext()\n store = ctx.get_cert_store()\n verifyOptions = OpenSSL.SSL.VERIFY_PEER\n ctx.set_verify(verifyOptions, self.verifyHostname)\n return ctx",
"def get_context(agent=None):\n global _context\n if _context is None:\n assert agent is not None\n _context = Context(agent)\n\n return _context",
"def __get_http2_ssl_context(self):\n # Get the basic context from the standard library.\n if self.client_side == False:\n #self.ctx = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)\n self.ctx = ssl._create_unverified_context()\n else:\n #self.ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=self.server_cert)\n #self.ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH)\n self.ctx = ssl._create_unverified_context()\n\n # RFC 7540 Section 9.2: Implementations of HTTP/2 MUST use TLS version 1.2\n # or higher. Disable TLS 1.1 and lower.\n self.ctx.options |= (\n ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1\n )\n\n # RFC 7540 Section 9.2.1: A deployment of HTTP/2 over TLS 1.2 MUST disable\n # compression.\n self.ctx.options |= ssl.OP_NO_COMPRESSION\n\n # RFC 7540 Section 9.2.2: \"deployments of HTTP/2 that use TLS 1.2 MUST\n # support TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\". In practice, the\n # blacklist defined in this section allows only the AES GCM and ChaCha20\n # cipher suites with ephemeral key negotiation.\n\n\n if self.client_side == False:\n self.ctx.load_cert_chain(certfile=self.server_cert, keyfile=self.server_key)\n self.ctx.load_verify_locations(cafile=self.client_certs) \n else:\n self.ctx.load_cert_chain(certfile=self.client_certs, keyfile=self.client_key)\n self.ctx.load_verify_locations(cafile=self.client_certs) \n pass\n\n\n\n # We want to negotiate using NPN and ALPN. ALPN is mandatory, but NPN may\n # be absent, so allow that. This setup allows for negotiation of HTTP/1.1.\n self.ctx.set_alpn_protocols([\"h2\", \"http/1.1\"])\n\n try:\n self.ctx.set_npn_protocols([\"h2\", \"http/1.1\"])\n except NotImplementedError as e:\n print(\"TLS Error: NotImplementedError=%s\" % (e))\n pass\n\n #self.ctx = ctx\n\n return True",
"def get_context():\n context = {}\n cfg = load_service_config(\"lighttpd\")\n ip = \"127.0.0.1\"\n enable_caching = False\n try:\n mconfig = load_service_mconfig_as_json('lighttpd')\n enable_caching = mconfig.enable_caching\n except LoadConfigError:\n logging.info(\"Using default values for service 'lighttpd'\")\n\n if enable_caching:\n ip = get_ip_from_if(cfg['interface'])\n\n context['interface_ip'] = ip\n context['store_root'] = cfg['store_root']\n\n return context",
"def sslmode(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sslmode\")",
"def get_ssl_socket(self):\n _print(f\"THREADING IDENT: {threading.get_ident()}\")\n return Server.t_ssl_sockets.get(threading.get_ident())",
"def _get_ssl_context_options(ssl_context: SSL.Context) -> int:\n # the OpenSSL.SSL.Context wrapper doesn't expose get_options, so we have to\n # use the low-level interface\n return SSL._lib.SSL_CTX_get_options(ssl_context._context)",
"def context(self) -> Optional[EnvelopeContext]:\n return self._context",
"def ssl_cert(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ssl_cert\")",
"def ssl_cert(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ssl_cert\")",
"def fetch_x509_context(self) -> X509Context:"
]
| [
"0.7178645",
"0.7067759",
"0.7032663",
"0.6906664",
"0.667105",
"0.661573",
"0.6535112",
"0.65186465",
"0.6505062",
"0.6488944",
"0.63673794",
"0.6362219",
"0.635696",
"0.63292336",
"0.6304141",
"0.6238989",
"0.6150658",
"0.6108097",
"0.60013795",
"0.59824854",
"0.5979148",
"0.5964068",
"0.592192",
"0.591389",
"0.58483964",
"0.5847599",
"0.58086956",
"0.57983434",
"0.57983434",
"0.5764743"
]
| 0.7796807 | 0 |
Renders the provide Rezume using template/rezume.mst template file. | def render(rezume: Rezume):
base_dir = Path(__file__).absolute().parent
template = base_dir / "template" / "rezume.mst"
assert template is not None
with template.open("r") as tf:
data = rezume.dump_data()
html = chevron.render(tf, {"rezume": data})
return html | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def recog():\n\n return render_template('recog.html')",
"def render(self):\n pass",
"def render(self):\n pass",
"def render(self):\n pass",
"def render(self):\n pass",
"def render(self):\n pass",
"def render(self):\n pass",
"def get(self):\n return render_template ('nome do html')",
"def render(self):",
"def main():\r\n return render_template(\"UI.html\")",
"def render(self):\r\n super().render()",
"def _render(self) -> None:\n pass",
"def machinelearn2():\n return render_template('frontml.html')",
"def regression_page():\n return render_template(\"regr-matmortality.html\")",
"def render(self, mode='human'):",
"def get(self):\n self.render_front()",
"def render(self):\n raise NotImplementedError",
"def render(self, mode='human'):\n\n pass",
"def index(self):\n return render(\"/derived/rock/index.mako\")",
"def navebarre_graphe(request):\r\n return render(request, 'menu/navebarre_graphe.html')",
"def render(self, mode='human'):\n pass # no use in this situation",
"def render(self):\n self.increase_view_count()\n return render_to_string(self.template.template_file, {'advert':self})",
"def render(self):\n raise NotImplementedError()",
"def aboutus():\n return render_template(\"aboutUs.html\")",
"def navebarre_soluce(request):\r\n return render(request, 'menu/navebarre_soluce.html')",
"def get(self):\n self.render('view.html')",
"def render(self, request):\n return self.leaf.render(request)",
"def capteur_info_relever1():\n return render_template(\n \"relever_capt.html\",\n liste = get_capteurs())",
"def render(self):\n raise RenderNotImplemented('Render function is not implemented.')",
"def about_us():\r\n return render_template(\"about_us.html\")"
]
| [
"0.6526516",
"0.63922876",
"0.63922876",
"0.63922876",
"0.63922876",
"0.63922876",
"0.63922876",
"0.6361408",
"0.6308955",
"0.62740874",
"0.6142634",
"0.6116638",
"0.6111507",
"0.60967636",
"0.60754156",
"0.60511714",
"0.5973882",
"0.5929334",
"0.5927219",
"0.59258807",
"0.5902134",
"0.5869767",
"0.58564717",
"0.5827724",
"0.5807618",
"0.5785457",
"0.5778441",
"0.5642895",
"0.56074536",
"0.5599216"
]
| 0.82971025 | 0 |
Returns a new object with the same month, day, year as the calling object (self). | def copy(self):
dnew = Date(self.month, self.day, self.year)
return dnew | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, new_month, new_day, new_year):\n self.month = new_month\n self.day = new_day\n self.year = new_year",
"def __init__(self, month, day, year):",
"def __init__(self, year, month, day):",
"def __init__(self, month, day, year):\n self.month = month\n self.day = day\n self.year = year",
"def copy(self):\n new_date = Date(self.month, self.day, self.year)\n return new_date",
"def copy(self):\n new_date = Date(self.month, self.day, self.year)\n \n return new_date",
"def __init__(self):\n self.date = str(date.today())\n today_date = str(date.today())\n today_date = today_date.split(\"-\")\n self.curr_year = int(today_date[0])\n self.curr_month = int(today_date[1])\n self.curr_date = int(today_date[2])",
"def date(self, year: Number, month: Number, day: Number) -> Date:\n return Date(year, month, day) # type: ignore",
"def __init__(self, *args):\n this = _libsbml.new_Date(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, init_month, init_day, init_year):\n # add the necessary assignment statements below\n self.month = init_month\n self.day = init_day\n self.year = init_year",
"def date(self):\n return Date(self.year, self.month, self.day)",
"def clone(self):\n return _libsbml.Date_clone(self)",
"def __init__(self, month, key):\n self.__parent__ = month\n self.__name__ = key\n self.blog = month.blog\n self.month = month\n self.year = month.__parent__\n self.number = int(key)\n self.date = datetime.date(self.year.number, self.month.number, self.number)",
"def replace(self, year=None, month=None, day=None):\n if year is None:\n year = self._year\n if month is None:\n month = self._month\n if day is None:\n day = self._day\n return date(year, month, day)",
"def __init__(self, x, y, date):\n super().__init__(x, y)\n self.date = date",
"def __init__(self, year, month, day, hour=0, minute=0, second=0, microsecond=0, tzinfo=None):",
"def __init__(self, year, month=None, day=None):\n self._year = year\n self._month = month\n self._day = day\n\n # Is it a valid date?\n y = year\n m = month\n d = day\n if month is None:\n m = 1\n d = 1\n elif day is None:\n d = 1\n self._date = datetime.date(y, m, d)",
"def fromdate(cls, date):\n return cls(date.year, date.month, date.day)",
"def combine(cls, date_obj, time_obj):\n return cls(date_obj.year, date_obj.month, date_obj.day,\n time_obj.hour, time_obj.minute, time_obj.second,\n time_obj.nanosecond)",
"def test_define_a_second_constructor(self):\n a = Date(2012, 12, 21)\n self.assertEqual(a.year, 2012)\n self.assertEqual(a.month, 12)\n self.assertEqual(a.day, 21)\n\n t = time.localtime()\n b = Date.today()\n self.assertEqual(b.year, t.tm_year)\n self.assertEqual(b.month, t.tm_mon)\n self.assertEqual(b.day, t.tm_mday)",
"def Date(year, month, day):\r\n return datetime.datetime(year, month, day, 0, 0)",
"def replace(self, year, month, day):\n return self",
"def get_date(self):\n return datetime.date(\n int(self.kwargs['year']),\n int(self.kwargs['month']),\n int(self.kwargs['day'])\n )",
"def date(self):\n return date(self._year, self._month, self._day)",
"def __init__(self, d, m, y):\n\n self.set_calendar(d, m, y)",
"def from_date(cls, date: dt.date) -> Date:\n return cls(date.year, date.month, date.day)",
"def __parent__(self):\n year = Year(self.blog, \"%04d\" % self.date.year)\n month = Month(year, \"%02d\" % self.date.month)\n day = Day(month, \"%02d\" % self.date.day)\n return day",
"def todate(self):\n return date(self.year, self.month, self.day)",
"def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.creation_date = datetime.now()",
"def fromdate(cls, date_: date):\n if date_ is None:\n return None\n else:\n return cls(date_.year, date_.month, date_.day)"
]
| [
"0.7616714",
"0.7335453",
"0.73295027",
"0.7061737",
"0.7014032",
"0.6962942",
"0.67879266",
"0.6735777",
"0.6664058",
"0.65950024",
"0.65334845",
"0.64315635",
"0.62427527",
"0.6209779",
"0.6198609",
"0.6188022",
"0.61878073",
"0.6174517",
"0.6170295",
"0.6150374",
"0.60712725",
"0.60400474",
"0.60267824",
"0.5958148",
"0.58286446",
"0.5819634",
"0.58032984",
"0.57646227",
"0.57530576",
"0.56937987"
]
| 0.75362664 | 1 |
Decides if self and d2 represent the same calendar date, whether or not they are the in the same place in memory. | def equals(self, d2):
if self.year == d2.year and self.month == d2.month and self.day == d2.day:
return True
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __eq__(self, other):\n if self.day == other.day and self.month == other.month and self.year == other.year:\n return True\n else:\n return False",
"def __eq__(self, other):\n if self.month == other.month:\n if self.day == other.day:\n if self.year ==other.year:\n return True\n return False",
"def __eq__(self, other):\n\n same_ae = True\n\n if (self.date_start != other.date_start) or \\\n (self.date_end != other.date_end) or \\\n (self.type_event != other.type_event):\n\n same_ae = False\n\n return same_ae",
"def __eq__(self, Other):\n return self.date == Other.date and self.time_of_day == Other.time_of_day",
"def test_date1_equal_date2(self):\n date1 = datetime.date(2014, 11, 29)\n date2 = datetime.date(2014, 11, 29)\n\n self.assertFalse(self.expander.is_same_date_month_ahead(date1, date2))",
"def __eq__(self, other):\n if not isinstance(other, DayTrend):\n return False\n\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n if not isinstance(other, NoSQLDrDateSyncIndicators):\n return False\n\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n if type(self) is not type(other):\n return NotImplemented\n \n if self.day != other.day:\n return False\n \n if self.month != other.month:\n return False\n \n if self.name != other.name:\n return False\n \n if self.color_code != other.color_code:\n return False\n \n return True",
"def __eq__(self, other):\n if not isinstance(other, Todays):\n return False\n\n return self.to_dict() == other.to_dict()",
"def isBefore(self, d2):\n if self.year < d2.year:\n return True\n elif self.year == d2.year and self.month < d2.month:\n return True\n elif self.year == d2.year and self.month == d2.month and self.day < d2.day:\n return True\n else:\n return False",
"def compare_dates(dt1, dt2):\n return dt1.year == dt2.year and dt1.month == dt2.month and dt1.day == dt2.day",
"def __eq__(self, other):\n if self is other:\n return True\n elif type(self) != type(other):\n return False\n else:\n return self._start_time == other._start_time \\\n and self._end_time == other._end_time \\\n and self._events == other._events",
"def __eq__(self, other):\n if not isinstance(other, DeterminantDisease):\n return False\n\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n if not isinstance(other, DefaultDashManifestPeriod):\n return False\n\n return self.__dict__ == other.__dict__",
"def __cmp__(self, other):\n if not isinstance(other, date):\n types = (type(other), date)\n raise TypeError('Type mismatch: %s not instance of %s' % types)\n # pylint: disable=protected-access\n return self._cmp(self._days, other._days)",
"def __eq__(self, other):\n return self.start == other.start and self.end == other.end",
"def __eq__(self, other):\n if not isinstance(other, CreateCalendarRequest):\n return False\n\n return self.to_dict() == other.to_dict()",
"def __eq__(self, other):\n if not isinstance(other, DayTrendInput):\n return False\n\n return self.__dict__ == other.__dict__",
"def __ne__(self, Other):\n return self.date != Other.date or self.time_of_day != Other.time_of_day",
"def __eq__(self, other):\n return (self.start == other.start and self.end == other.end)",
"def __eq__(self, other):\n if not isinstance(other, V1ClockOffset):\n return False\n\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n return self.times == other.times",
"def __eq__(self, other):\n if not isinstance(other, DataPoint):\n return False\n\n return self.to_dict() == other.to_dict()",
"def same(series1, series2):\n # pylint: disable=protected-access\n return bool(\n series1._data == series2._data and\n Collection.same(series1._collection, series2._collection)\n )",
"def __eq__(self, other):\n return (isinstance(other, BaseDataObject) and\n self.defined and\n other.defined and\n (self.identifier == other.identifier))",
"def __eq__(self, other):\n if not isinstance(other, RecurrencePatternDto):\n return False\n\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n if not isinstance(other, TpoDataDTOsSharedPatientDTO):\n return False\n\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n if not isinstance(other, V1RollingUpdateDaemonSet):\n return False\n\n return self.__dict__ == other.__dict__",
"def __ne__(self, other):\n if not isinstance(other, Todays):\n return True\n\n return self.to_dict() != other.to_dict()",
"def __eq__(self, other):\n return self.semitone_interval == other.semitone_interval"
]
| [
"0.7327474",
"0.70843506",
"0.69978386",
"0.69955295",
"0.67568344",
"0.6674572",
"0.6666818",
"0.66469276",
"0.6643512",
"0.66216856",
"0.6603491",
"0.65605384",
"0.65052336",
"0.6463389",
"0.6428842",
"0.63874286",
"0.6381778",
"0.63668907",
"0.6357204",
"0.6326482",
"0.6297406",
"0.6295491",
"0.6291131",
"0.62843347",
"0.6266728",
"0.62459147",
"0.62323916",
"0.621876",
"0.61994725",
"0.6197017"
]
| 0.7742764 | 0 |
changes the calling object so that it represents N calendar days BEFORE the date it originally represented | def subNDays(self, N):
print self
for i in range(N):
self.yesterday()
print self | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_n_days_ahead(self, startdate, n, fmt=None):\n return startdate + datetime.timedelta(days=n)",
"def _prev_month(self):\r\n self._canvas.place_forget()\r\n\r\n self._date = self._date - self.timedelta(days=1)\r\n self._date = self.datetime(self._date.year, self._date.month, 1)\r\n self._build_calendar() # reconstuct calendar\r",
"def _prev_month(self):\n self._canvas.place_forget()\n\n self._date = self._date - self.timedelta(days=1)\n self._date = self.datetime(self._date.year, self._date.month, 1)\n self._build_calendar() # reconstuct calendar",
"def visitBefore(self, date):\n raise NotImplementedError()",
"def advance(self):\n try:\n self.day += 1\n except TypeError:\n try:\n self.month += 1\n self.day = 1\n except TypeError:\n self.year += 1\n self.month = 1\n self.day = 1",
"def from_fixed(cls, date):\n count = date - cls.CORRELATION + 1\n number = amod(count, 13)\n name = amod(count, 20)\n return AztecTonalpohualliDate(number, name)",
"def hydrate_date(days):\n return Date.from_ordinal(unix_epoch_date_ordinal + days)",
"def add_n_days(self, n):\n print(self)\n while n > 0:\n self.tomorrow()\n print(self)\n n -= 1",
"def __simulate_one_day__(self):\n self.compute()\n self.days.append(next(self.index))",
"def __sub__(self, other):\n if not isinstance(other, datetime.timedelta):\n raise TypeError(\n \"Unsupported operand type(s) for +: {.__name__} and {.__name__}.\".format(type(self), type(other)))\n delta_years, delta_months = 0, 0\n delta_days = other.days\n total_passed_days_this_year = sum(\n NepaliDate.calendar_data[self.year][:self.month - 1]\n ) + self.day\n from_year, from_month, from_day = self.year, self.month, self.day\n if delta_days >= total_passed_days_this_year:\n delta_days -= total_passed_days_this_year\n from_year = self.year - 1\n from_month = 12\n if from_year < MIN_DATE['year']:\n raise OverflowError(\"Resulting date out of range.\")\n from_day = NepaliDate.calendar_data[from_year][11]\n if delta_days >= NepaliDate.total_days(from_year):\n for year in range(from_year, MIN_DATE['year'] - 1, -1):\n total_days = NepaliDate.total_days(year)\n if delta_days > total_days:\n delta_days -= total_days\n delta_years += 1\n else:\n break\n from_year -= delta_years\n if from_year < MIN_DATE['year']:\n raise OverflowError(\"Resulting date out of range.\")\n if from_year == self.year:\n total_passed_days_this_month = from_day\n if delta_days >= total_passed_days_this_month:\n delta_days -= total_passed_days_this_month\n from_month -= 1\n from_day = NepaliDate.calendar_data[from_year][from_month - 1]\n for month_days in NepaliDate.calendar_data[from_year][from_month - 1::-1]:\n if delta_days >= month_days:\n delta_days -= month_days\n from_month -= 1\n from_day = NepaliDate.calendar_data[from_year][from_month - 1]\n else:\n break\n from_day -= delta_days\n self.year, self.month, self.day = from_year, from_month, from_day\n return self",
"def __init__(self, *args):\n this = _libsbml.new_Date(*args)\n try: self.this.append(this)\n except: self.this = this",
"def _increment_date_data(klass, series, date_data):\n\n # delta is the timedelta in between events\n delta = timedelta(days=7 * series.every)\n date_data['start_date'] = date_data['start_date'] + delta\n date_data['end_date'] = date_data['end_date'] + delta",
"def advance(self):\n\n max_days = Calendar.months[self.__months - 1]\n if self.__months == 2 and Calendar.leapyear(self.__years):\n max_days += 1\n if self.__days == max_days:\n self.__days = 1\n if self.__months == 12:\n self.__months = 1\n self.__years += 1\n else:\n self.__months += 1\n else:\n self.__days += 1",
"def from_fixed(cls, date):\n count = mod(date - cls.CORRELATION, 365)\n day = mod(count, 20) + 1\n month = quotient(count, 20) + 1\n return AztecXihuitlDate(month, day)",
"def yesterday(self):\n if self.isLeapYear():\n fdays = 29\n else:\n fdays = 28\n\n DIM = [0, 31, fdays, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n\n currentDay = self.day\n firstMonth = 1\n firstDay = 1\n\n if currentDay == firstDay and self.month == firstMonth:\n self.year -= 1\n self.month = 12\n self.day = 31\n elif currentDay == firstDay:\n self.month -= 1\n self.day = DIM[self.month]\n else:\n self.day -= 1",
"def datefixer(ds):\n\n\n\t# ========== create the new dates ==========\n\tyear = ds.Year\n\n\t# +++++ set up the list of dates +++++\n\tdates = OrderedDict()\n\ttm = [dt.datetime(int(year) , int(np.floor(tm)), int(tm%1*30+1)) for tm in ds.time]\n\tdates[\"time\"] = pd.to_datetime(tm)\n\n\tdates[\"calendar\"] = 'standard'\n\tdates[\"units\"] = 'days since 1900-01-01 00:00'\n\t\n\tdates[\"CFTime\"] = date2num(\n\t\ttm, calendar=dates[\"calendar\"], units=dates[\"units\"])\n\n\treturn dates",
"def is_before(self,other_date):",
"def prepend(self, *args):\n return _libsbml.ListWrapperDate_prepend(self, *args)",
"def new_day(self):\n self.previous_days.append(self.energy_debt)\n self.energy_debt = defaultdict(lambda: 0.0)\n\n #TODO: add the settelement mechanism here",
"def n_business_days(self, n=-2):\n\n business_days = 0\n calendar_days = 0 \n if n != 0:\n step = int(n/abs(n))\n while business_days != abs(n):\n calendar_days = calendar_days + step\n if business_day(self.time_stamp + timedelta(calendar_days)):\n business_days = business_days + 1\n return self.time_stamp + timedelta(calendar_days)\n return date",
"def copy(self):\n dnew = Date(self.month, self.day, self.year)\n return dnew",
"def date(self, new_date):\n self._date.date = new_date",
"def setDay(self, *args):\n return _libsbml.Date_setDay(self, *args)",
"def _normalize(self):\n if self.event_type == 'event_allday':\n from_date = self.from_date\n if from_date is not None:\n time_since_daystart = from_date.hour() * 3600 \\\n + from_date.minute() * 60 + from_date.second()\n if time_since_daystart:\n timeTime = from_date.timeTime()\n self.from_date = DateTime(timeTime - time_since_daystart)\n to_date = self.to_date\n if to_date is not None:\n time_since_daystart = to_date.hour() * 3600 \\\n + to_date.minute() * 60 + to_date.second()\n # 86399 sec = 24h - 1sec\n if time_since_daystart != 86399:\n timeTime = to_date.timeTime()\n self.to_date = DateTime(\n timeTime - time_since_daystart + 86399)\n if self.to_date.lessThan(self.from_date):\n to_date = self.to_date\n self.to_date = self.from_date\n self.from_date = to_date\n\n if self.event_type == 'event_allday':\n self._normalize()",
"def __init__(self, dateStart, dateEnd): \n #TODO: Raise an exception if dateEnd<dateStart.\n super(dateGenerator,self).__init__()\n d = dateEnd - dateStart\n self._startDate = dateStart\n self._dateDiffSeconds = d.days * 86400 + d.seconds",
"def date(self):",
"def test_before():\n date = datetime.datetime\n\n #july forth is this week\n assert CALENDAR.nth_trading_day_before(3, date(2006, 7, 7)) == date(2006, 7, 3)\n assert CALENDAR.nth_trading_day_before(0, date(2006, 7, 7)) == date(2006, 7, 7)\n assert CALENDAR.nth_trading_day_before(0, date(2006, 7, 8)) == date(2006, 7, 7)",
"def clone(self):\n return _libsbml.Date_clone(self)",
"def __parent__(self):\n year = Year(self.blog, \"%04d\" % self.date.year)\n month = Month(year, \"%02d\" % self.date.month)\n day = Day(month, \"%02d\" % self.date.day)\n return day",
"def _date(self, _date):\n\n self.__date = _date"
]
| [
"0.59353423",
"0.58411884",
"0.5824707",
"0.5729099",
"0.56658214",
"0.5655242",
"0.5619501",
"0.55904967",
"0.5576773",
"0.55663043",
"0.55275434",
"0.55259895",
"0.5522124",
"0.5513618",
"0.5505",
"0.5504524",
"0.5501555",
"0.54578924",
"0.54559875",
"0.5449029",
"0.5436721",
"0.54350483",
"0.5432713",
"0.54298943",
"0.54093003",
"0.53918296",
"0.53902596",
"0.5375791",
"0.5374038",
"0.536135"
]
| 0.60222596 | 0 |
returns True if the calling object is a calendar date BEFORE the argument named d2 (an object of type Date) returns False is self and d2 represent the same day or if self is AFTER d2 | def isBefore(self, d2):
if self.year < d2.year:
return True
elif self.year == d2.year and self.month < d2.month:
return True
elif self.year == d2.year and self.month == d2.month and self.day < d2.day:
return True
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_before(self,other_date):",
"def is_before(self, other):\n if self.year > other.year:\n return False\n if self.year == other.year:\n if self.month > other.month:\n return False\n if self.year == other.year:\n if self.month == other.month:\n if self.day >= other.day:\n return False\n return True",
"def __lt__(self, Other):\n if self.date < Other.date:\n return True\n elif self.date == Other.date:\n return self.time_of_day < Other.time_of_day\n else:\n return False",
"def __cmp__(self, other):\n if not isinstance(other, date):\n types = (type(other), date)\n raise TypeError('Type mismatch: %s not instance of %s' % types)\n # pylint: disable=protected-access\n return self._cmp(self._days, other._days)",
"def __lt__(self, other):\n return self.date < other.date",
"def __lt__(self, other):\n # TODO: Support comparison with other NullableDates.\n return self._as_date() < other",
"def __gt__(self, other):\n if self.date > other.date:\n return True\n else:\n return False",
"def is_before(self, other):\n if self.year < other.year:\n return True\n elif self.year == other.year:\n if self.month < other.month:\n return True\n elif self.month == other.month:\n if self.day < other.day:\n return True\n return False",
"def date_occurs_before(gedcom_date_first : str, gedcom_date_second : str) -> bool:\n date_first = gedcom_date_to_datetime(gedcom_date_first)\n date_second = gedcom_date_to_datetime(gedcom_date_second)\n\n return date_first < date_second",
"def isAfter(self, d2):\n if self.equals(d2):\n return False\n elif self.isBefore(d2):\n return False\n else:\n return True",
"def dateIsBefore(year1, month1, day1, year2, month2, day2):\n if year1 < year2:\n return True\n if year1 == year2:\n if month1 < month2:\n return True\n if month1 == month2:\n return day1 < day2\n return False",
"def dateIsBefore(year1, month1, day1, year2, month2, day2):\n if year1 < year2:\n return True\n if year1 == year2:\n if month1 < month2:\n return True\n if month1 == month2:\n return day1 < day2\n return False",
"def equals(self, d2):\n if self.year == d2.year and self.month == d2.month and self.day == d2.day:\n return True\n else:\n return False",
"def test_date2_lower_date1(self):\n date1 = datetime.date(2019, 5, 2)\n date2 = datetime.date(2019, 5, 1)\n\n self.assertFalse(self.expander.is_same_date_month_ahead(date1, date2))",
"def test_date1_equal_date2(self):\n date1 = datetime.date(2014, 11, 29)\n date2 = datetime.date(2014, 11, 29)\n\n self.assertFalse(self.expander.is_same_date_month_ahead(date1, date2))",
"def one_day(self):\n return self.end.date() == self.date",
"def __cmp__(self, other):\n if not isinstance(other, datetime):\n types = (type(other), datetime)\n raise TypeError('Type mismatch: %s not instance of %s' % types)\n # pylint: disable=protected-access\n return (self._cmp(self._days, other._days)\n or self._cmp(self.seconds, other.seconds)\n or self._cmp(self.nanosecond, other.nanosecond))",
"def _filter_by_date(self, date: datetime.datetime) -> bool:\n if (self._date_from and date < self._date_from) or (self._date_to and date > self._date_to):\n return False\n return True",
"def compare_dates(dt1, dt2):\n return dt1.year == dt2.year and dt1.month == dt2.month and dt1.day == dt2.day",
"def __gt__(self, other):\n self_list = self.date.split(\"/\")\n other_list = other.date.split(\"/\")\n if self_list[2] > other_list[2]:\n return True\n else:\n if self_list[2] == other_list[2]:\n if self_list[1] > other_list[1]:\n return True\n elif self_list[1] == other_list[1]:\n if self_list[0] > other_list[0]:\n return True\n return False",
"def dateIsAfter(year1, month1, day1, year2, month2, day2):\n if year1 > year2:\n return True\n if year1 == year2:\n if month1 > month2:\n return True\n if month1 == month2:\n return day1 > day2\n return False",
"def __ne__(self, Other):\n return self.date != Other.date or self.time_of_day != Other.time_of_day",
"def earlier_date(date1, date2):\r\n return (time.strptime(date1, \"%b %d %Y\") < time.strptime(date2, \"%b %d %Y\"))",
"def date_occurs_before_cond(gedcom_date_first : str, gedcom_date_second : str, cond : str) -> bool:\n\n return cond == 'NA' or date_occurs_before(gedcom_date_first, gedcom_date_second)",
"def __eq__(self, Other):\n return self.date == Other.date and self.time_of_day == Other.time_of_day",
"def __eq__(self, other):\n\n same_ae = True\n\n if (self.date_start != other.date_start) or \\\n (self.date_end != other.date_end) or \\\n (self.type_event != other.type_event):\n\n same_ae = False\n\n return same_ae",
"def intersects(self, other: \"DateRange\") -> bool:\n return (\n self.end_date - other.start_date\n >= timedelta(0)\n >= self.start_date - other.end_date\n )",
"def earlier_date(date1, date2):\n return (time.strptime(date1, \"%b %d %Y\") < time.strptime(date2, \"%b %d %Y\"))",
"def earlier_date(date1, date2):\n return (time.strptime(date1, \"%b %d %Y\") < time.strptime(date2, \"%b %d %Y\"))",
"def __lt__(self, other: Schedule) -> bool:\n return self.next_time.__lt__(other.next_time)"
]
| [
"0.76194257",
"0.71506417",
"0.7093044",
"0.6992385",
"0.6915931",
"0.68639606",
"0.6809329",
"0.6788419",
"0.6785996",
"0.67289376",
"0.66954887",
"0.66954887",
"0.6686702",
"0.6642271",
"0.6528611",
"0.6379765",
"0.6233878",
"0.62327737",
"0.6227459",
"0.6219062",
"0.6192563",
"0.619005",
"0.61432594",
"0.61205214",
"0.6071039",
"0.60468084",
"0.6012302",
"0.6003131",
"0.6003131",
"0.5927065"
]
| 0.8014479 | 0 |
returns True if the calling object is a calendar date AFTER the argument named d2 (an object of type Date) returns False is self and d2 represent the same day or if self is BEFORE d2 | def isAfter(self, d2):
if self.equals(d2):
return False
elif self.isBefore(d2):
return False
else:
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def isBefore(self, d2):\n if self.year < d2.year:\n return True\n elif self.year == d2.year and self.month < d2.month:\n return True\n elif self.year == d2.year and self.month == d2.month and self.day < d2.day:\n return True\n else:\n return False",
"def is_before(self,other_date):",
"def __gt__(self, other):\n if self.date > other.date:\n return True\n else:\n return False",
"def __cmp__(self, other):\n if not isinstance(other, date):\n types = (type(other), date)\n raise TypeError('Type mismatch: %s not instance of %s' % types)\n # pylint: disable=protected-access\n return self._cmp(self._days, other._days)",
"def dateIsAfter(year1, month1, day1, year2, month2, day2):\n if year1 > year2:\n return True\n if year1 == year2:\n if month1 > month2:\n return True\n if month1 == month2:\n return day1 > day2\n return False",
"def is_before(self, other):\n if self.year > other.year:\n return False\n if self.year == other.year:\n if self.month > other.month:\n return False\n if self.year == other.year:\n if self.month == other.month:\n if self.day >= other.day:\n return False\n return True",
"def equals(self, d2):\n if self.year == d2.year and self.month == d2.month and self.day == d2.day:\n return True\n else:\n return False",
"def __gt__(self, other):\n self_list = self.date.split(\"/\")\n other_list = other.date.split(\"/\")\n if self_list[2] > other_list[2]:\n return True\n else:\n if self_list[2] == other_list[2]:\n if self_list[1] > other_list[1]:\n return True\n elif self_list[1] == other_list[1]:\n if self_list[0] > other_list[0]:\n return True\n return False",
"def test_date2_lower_date1(self):\n date1 = datetime.date(2019, 5, 2)\n date2 = datetime.date(2019, 5, 1)\n\n self.assertFalse(self.expander.is_same_date_month_ahead(date1, date2))",
"def is_after(self, other):\n if self.is_before(other) or self == other:\n return False\n else:\n return True",
"def __lt__(self, Other):\n if self.date < Other.date:\n return True\n elif self.date == Other.date:\n return self.time_of_day < Other.time_of_day\n else:\n return False",
"def is_after(self, other):\n if self.is_before(other) == False:\n if self != other:\n return True\n return False",
"def test_date1_equal_date2(self):\n date1 = datetime.date(2014, 11, 29)\n date2 = datetime.date(2014, 11, 29)\n\n self.assertFalse(self.expander.is_same_date_month_ahead(date1, date2))",
"def date_occurs_before(gedcom_date_first : str, gedcom_date_second : str) -> bool:\n date_first = gedcom_date_to_datetime(gedcom_date_first)\n date_second = gedcom_date_to_datetime(gedcom_date_second)\n\n return date_first < date_second",
"def __lt__(self, other):\n return self.date < other.date",
"def one_day(self):\n return self.end.date() == self.date",
"def is_before(self, other):\n if self.year < other.year:\n return True\n elif self.year == other.year:\n if self.month < other.month:\n return True\n elif self.month == other.month:\n if self.day < other.day:\n return True\n return False",
"def __lt__(self, other):\n # TODO: Support comparison with other NullableDates.\n return self._as_date() < other",
"def __ne__(self, Other):\n return self.date != Other.date or self.time_of_day != Other.time_of_day",
"def dateIsBefore(year1, month1, day1, year2, month2, day2):\n if year1 < year2:\n return True\n if year1 == year2:\n if month1 < month2:\n return True\n if month1 == month2:\n return day1 < day2\n return False",
"def dateIsBefore(year1, month1, day1, year2, month2, day2):\n if year1 < year2:\n return True\n if year1 == year2:\n if month1 < month2:\n return True\n if month1 == month2:\n return day1 < day2\n return False",
"def __cmp__(self, other):\n if not isinstance(other, datetime):\n types = (type(other), datetime)\n raise TypeError('Type mismatch: %s not instance of %s' % types)\n # pylint: disable=protected-access\n return (self._cmp(self._days, other._days)\n or self._cmp(self.seconds, other.seconds)\n or self._cmp(self.nanosecond, other.nanosecond))",
"def _filter_by_date(self, date: datetime.datetime) -> bool:\n if (self._date_from and date < self._date_from) or (self._date_to and date > self._date_to):\n return False\n return True",
"def is_outdated(self):\n today = datetime.datetime.today()\n day = datetime.datetime.combine(self.date, self.start_time)\n return day <= today",
"def __gt__(self, other):\n if type(self) is not type(other):\n return NotImplemented\n \n # Month has priority over day.\n self_month = self.month\n other_month = other.month\n \n if self_month > other_month:\n return True\n \n if self_month < other_month:\n return False\n \n self_day = self.day\n other_day = other.day\n \n if self_day > other_day:\n return True\n \n if self_day < other_day:\n return False\n \n # And lastly the name\n self_name = self.name\n other_name = other.name\n \n if self_name > other_name:\n return True\n \n if self_name < other_name:\n return False\n \n if self.color_code > other.color_code:\n return True\n \n return False",
"def EndAfterStart(obj):\n if obj.end_date is None: return\n if obj.end_date <= obj.start_date:\n raise interface.Invalid(\n _(\"End Date must be after Start Date\"), \n \"start_date\", \n \"end_date\")",
"def test_date1_date_higher_date2_month_days(self):\n dates1 = (\n datetime.date(1999, 1, 29),\n datetime.date(2005, 1, 30),\n datetime.date(2012, 1, 31),\n datetime.date(1999, 3, 31),\n datetime.date(1999, 5, 31),\n datetime.date(1999, 8, 31),\n )\n\n dates2 = (\n datetime.date(1999, 2, 28),\n dates1[1] + datetime.timedelta(31),\n dates1[2] + datetime.timedelta(31),\n datetime.date(1999, 4, 30),\n datetime.date(1999, 6, 30),\n datetime.date(1999, 10, 1),\n )\n for date1, date2 in zip(dates1, dates2):\n self.assertFalse(self.expander.is_same_date_month_ahead(date1, date2))",
"def earlier_date(date1, date2):\r\n return (time.strptime(date1, \"%b %d %Y\") < time.strptime(date2, \"%b %d %Y\"))",
"def is_after(self, dt: datetime) -> bool:\n return self.target_time >= make_tz_aware(dt)",
"def compare_dates(dt1, dt2):\n return dt1.year == dt2.year and dt1.month == dt2.month and dt1.day == dt2.day"
]
| [
"0.75077987",
"0.71372366",
"0.7086298",
"0.6838413",
"0.68156666",
"0.67405784",
"0.65129244",
"0.65059084",
"0.64875185",
"0.64625776",
"0.6457431",
"0.6406961",
"0.6401933",
"0.6385087",
"0.63760567",
"0.6274783",
"0.6255542",
"0.62544763",
"0.62399304",
"0.6124017",
"0.6124017",
"0.6077731",
"0.60401136",
"0.6034442",
"0.59410805",
"0.5928567",
"0.5921111",
"0.5893762",
"0.58849114",
"0.5855222"
]
| 0.75769514 | 0 |
returns an integer representing the NUMBER OF DAYS between self and d2 | def diff(self, d2):
copyD1 = self.copy()
copyD2 = d2.copy()
count = 0
if copyD1.equals(copyD2):
return count
elif copyD1.isBefore(copyD2):
while copyD1.isBefore(copyD2):
count += 1
copyD1.tomorrow()
return -count
else:
while copyD1.isAfter(copyD2):
count += 1
copyD1.yesterday()
return count | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def days_between(self, other):\n new_self = self.copy()\n new_other = other.copy()\n count=0\n if self.is_before(other):\n while(True):\n if new_self == new_other:\n break\n count-=1\n new_self.advance_one()\n elif self.is_after(other):\n while(True):\n if new_self==new_other:\n break\n count+=1\n new_other.advance_one()\n\n return count",
"def compute_real_days(self):\n if (self.end_date > date.today()):\n return SchoolDB.models.get_num_days_in_period(\n self.start_date, date.today())\n else:\n return SchoolDB.models.get_num_days_in_period(\n self.start_date, self.end_date)",
"def get_number_days(self):\r\n return 1",
"def compute_days(start: date, end: date) -> int:\n delta = end - start\n return delta.days + 1",
"def _get_number_of_days(self, date_from, date_to, employee_id):\n\t\tfrom_dt = fields.Datetime.from_string (date_from)\n\t\tto_dt = fields.Datetime.from_string (date_to)\n\t\tif employee_id:\n\t\t\temployee = self.env['hr.employee'].browse (employee_id)\n\n\t\t\t# Testing 16/11/19\n\t\t\tshift = employee.resource_calendar_ids\n\t\t\treturn employee.get_work_days_count (from_dt, to_dt, shift)\n\n\t\ttime_delta = to_dt - from_dt\n\t\treturn math.ceil (time_delta.days + float (time_delta.seconds) / 86400)",
"def days_between(self, d1, d2):\n self.is_not_used()\n try:\n d1 = self.format_date(d1)\n d2 = self.format_date(d2)\n d1 = datetime.strptime(d1, '%Y-%m-%d')\n d2 = datetime.strptime(d2, '%Y-%m-%d')\n return abs((d2 - d1).days)\n except Exception as e:\n self.error(str(e))",
"def _get_number_of_days(self, date_from, date_to):\n\n DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n from_dt = datetime.strptime(date_from, DATETIME_FORMAT)\n to_dt = datetime.strptime(date_to, DATETIME_FORMAT)\n timedelta = to_dt - from_dt\n diff_days = timedelta.days + float(timedelta.seconds) / 86400\n return diff_days",
"def daysBetweenDates(year1, month1, day1, year2, month2, day2):\n if year1 <= year2 and month1 <= month2 and day1 <= day2:\n nOfDays = 0\n while nextDay < (year2, month2, day2):\n \n nOfDays += 1\n \n return nOfDays",
"def get_number_days(self):\r\n raise NotImplementedError",
"def size(self):\n\t\treturn (self.dates[1] - self.dates[0]).days",
"def numOfDays():\n\n print(\"Podaj rok, miesiac oraz dzien pierwszej daty: \")\n inputs = [input() for i in range(3)]\n\n print(\"Podaj rok, miesiac oraz dzien drugiej daty: \")\n inputs1 = [input() for i in range(3)]\n\n d0 = date(inputs[0], inputs[1], inputs[2])\n d1 = date(inputs1[0], inputs1[1], inputs1[2])\n delta = abs(d1 - d0)\n \n print(delta.days)\n return abs(delta.days)",
"def days(self):\n return int(self.hours / 24)",
"def nday(bdate1,bdate2, n):\n b1=date(bdate1[2],bdate1[1],bdate1[0])\n b2=date(bdate2[2],bdate2[1],bdate2[0])\n diff= abs(b1-b2)\n nday=max(b1,b2)+diff*(n-1)\n return nday",
"def DAYS(end_date, start_date):\n return (_make_datetime(end_date) - _make_datetime(start_date)).days",
"def compute_total_days(start, end):\n # Use the datetime module to subtract the dates (+1 if inclusive)\n return (end - start).days + 1",
"def elapsed_days(self) -> int:\n return (datetime.today() - self.release_datetime).days",
"def days(t1, t2, date_format):\n\n start = datetime.strptime(t1, date_format)\n stop = datetime.strptime(t2, date_format)\n delta = stop - start\n return delta.days",
"def difference_in_days(y1,m1,d1, y2,m2,d2):\n date1 = date(y1,m1,d1)\n date2 = date(y2,m2,d2)\n return abs(date2 - date1)",
"def distance_from(self, other) -> int:\n assert isinstance(other, CustomDate), \"You must pass a valid CustomDate object\"\n return (self.minutes() - other.minutes()) + (self - other) * 24 * 60",
"def get_num_of_days(start, end):\n\n start = clean_date(start)\n end = clean_date(end)\n # print(date(start[0], start[1], start[2]))\n\n start_date = date(start[0], start[1], start[2])\n end_date = date(end[0], end[1], end[2])\n\n delta = end_date - start_date # as timedelta\n \n return delta.days",
"def DAYS(\n end_date: func_xltypes.XlDateTime,\n start_date: func_xltypes.XlDateTime\n) -> func_xltypes.XlNumber:\n\n days = end_date - start_date\n return days",
"def hindu_day_count(cls, date):\n return date - cls.EPOCH",
"def days(self):\n ends_at = created_at = datetime.datetime.now().replace(tzinfo=utc)\n if self.created_at:\n created_at = self.created_at\n if self.ends_at:\n ends_at = self.ends_at\n return (ends_at - created_at).days",
"def days_between(date_1, date_2):\n date_1 = datetime.strptime(date_1, \"%d/%m/%Y\")\n date_2 = datetime.strptime(date_2, \"%d/%m/%Y\")\n days_between.time_between = abs((date_2 - date_1).days)",
"def daysBetweenDates(year1, month1, day1, year2, month2, day2):\n\n days = 0\n while dateIsBefore(year1, month1, day1, year2, month2, day2):\n year1, month1, day1 = nextDay(year1, month1, day1)\n days += 1\n return days",
"def days_registered(self):\n days_registered = (datetime.utcnow() - self.date_joined).days\n if not days_registered:\n return 1\n return days_registered",
"def calculate_days(time):\n return int(time / 86400)",
"def diff_dates(date1, date2):\n\n return abs(date2 - date1).days",
"def diff(self, other):\n self_copy = self.copy()\n other_copy = other.copy()\n count = 0\n if self_copy.is_before(other_copy):\n while self_copy != other_copy:\n self_copy.tomorrow()\n count += 1\n return -1 * count\n elif self_copy.is_after(other_copy):\n while self_copy != other_copy:\n other_copy.tomorrow()\n count += 1\n return count\n else:\n return count",
"def daysBetweenDates(year1, month1, day1, year2, month2, day2):\n\n assert not dateIsBefore(year2, month2, day2, year1, month1, day1)\n days = 0\n while dateIsBefore(year1, month1, day1, year2, month2, day2):\n year1, month1, day1 = nextDay(year1, month1, day1)\n days += 1\n return days"
]
| [
"0.7421999",
"0.70217705",
"0.7015891",
"0.70060354",
"0.6984977",
"0.6973993",
"0.69367176",
"0.69147235",
"0.6771477",
"0.6736787",
"0.6724909",
"0.67015916",
"0.6631658",
"0.6615054",
"0.65624464",
"0.64617056",
"0.64391494",
"0.64064854",
"0.63979155",
"0.6366386",
"0.6330748",
"0.63271546",
"0.62469625",
"0.6244629",
"0.6227546",
"0.61595494",
"0.6153234",
"0.6138424",
"0.61119103",
"0.6110258"
]
| 0.7299645 | 1 |
If the passed language is a variant, return its parent | def get_parent_language(lang: str) -> str:
is_language_variant = "-" in lang
if is_language_variant:
return lang[: lang.index("-")] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getParent():",
"def get_version(self, language):\n\n if isinstance(language, basestring):\n language = Language.get(language)\n\n for version in self.versions:\n if version.language == language:\n return version\n\n return None",
"def get_language(self, language):\n found_lang = None\n for lang in self.catalog['languages']:\n if lang['identifier'] == language['identifier']:\n found_lang = lang\n break\n if not found_lang:\n self.catalog['languages'].append(language)\n else:\n language = found_lang\n if 'resources' not in language:\n language['resources'] = []\n return language",
"def wordclass_parent(self):\n if self.wordclass is None:\n return None\n for ancestor in self.ancestors_descending():\n if ancestor.wordclass is not None:\n return ancestor",
"def parent(self):\n return self.get_parent().specific",
"def wordclass_parent_minus_one(self):\n if self.wordclass is None:\n return None\n for i, ancestor in enumerate(self.ancestors_descending()):\n if self.ancestors_descending()[i + 1].wordclass is not None:\n return ancestor",
"def parent(self) -> Optional[Heirarchical]:\n return None",
"def get_parent(self): # real signature unknown; restored from __doc__\n pass",
"def get_parent_technique_of():\n global parent_technique_of\n\n if not parent_technique_of:\n parent_technique_of = rsh.parent_technique_of(get_srcs())\n \n return parent_technique_of",
"def parent(self):\n return getattr(self, \"parent_%s\" % self.discriminator)",
"def parent(v=None):\n if v is None or isinstance(v, Widget):\n return v\n else:\n raise ValueError('parent must be a widget or None')",
"def parent(self, v):\n # method here",
"def findTypeParent(element, tag):\n \n p = element\n while True:\n p = p.getparent()\n if p.tag == tag:\n return p\n \n # Not found\n return None",
"def getParent(obj):",
"def find_parent(self):\n pass",
"def get_parent_object_of_type(self, meta_type):\n if hasattr(self.context, \"meta_type\") and self.context.meta_type == meta_type:\n return self.context\n obj = Acquisition.aq_inner(self.context)\n while not isinstance(obj, PloneSite):\n obj = Acquisition.aq_parent(obj)\n if hasattr(obj, \"meta_type\") and obj.meta_type == meta_type:\n return obj\n return None",
"def get_parent(self):\n if self.parent:\n return self.parent()\n else:\n return None",
"def variant(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"variant\")",
"def get_parent(self, collation):\n if self.is_first_collation(collation):\n return None\n return self.get_collation(collation.header.parent_collation_hash)",
"def get_lang(self):\n\n path = self.get_lang_path()\n for language in self.languages:\n if language in path:\n return language",
"def _determine_parent(self, caller):\n self.msgin(4, \"determine_parent\", caller)\n\n parent = None\n if caller:\n pname = caller.identifier\n\n if isinstance(caller, Package):\n parent = caller\n\n elif '.' in pname:\n pname = pname[:pname.rfind('.')]\n parent = self.findNode(pname)\n\n elif caller.packagepath:\n # XXX: I have no idea why this line\n # is necessary.\n parent = self.findNode(pname)\n\n self.msgout(4, \"determine_parent ->\", parent)\n return parent",
"def get_parent(self) :\n return self.parent",
"def test_language_parent_fallback(self): \n SiteWording.objects.filter(identifier='test_1', language__code='en-us').delete()\n \n activate('en-us')\n self.assertEqual(get_wording_text('test_1'), 'en')",
"def wordclass_parent_plus_one(self):\n if self.wordclass is None:\n return None\n for i, ancestor in enumerate(self.ancestors_descending()):\n if ancestor.wordclass is not None:\n try:\n return self.ancestors_descending()[i + 1]\n except IndexError:\n return None",
"def parent(self, node):\r\n return self.find_node(node).parent.content",
"def get_for_language(self, language):\r\n assert isinstance(language, str)\r\n\r\n language = language.strip().lower()\r\n if language in self.__languages:\r\n code = self.__languages[language]\r\n return code\r\n return None",
"def getLanguage(self):\n return self.getOrDefault(self.language)",
"def get_parent(self):\n return self.parent",
"def get_parent(self):\n return self.parent",
"def get_parent(self):\n return self.parent"
]
| [
"0.60797113",
"0.6018734",
"0.5930238",
"0.57886934",
"0.5780646",
"0.57438123",
"0.57289916",
"0.5696669",
"0.5694845",
"0.5691957",
"0.56833094",
"0.5668092",
"0.5651179",
"0.56136966",
"0.55905885",
"0.5588487",
"0.5560915",
"0.5552444",
"0.5551547",
"0.5506665",
"0.5490639",
"0.5484428",
"0.54705775",
"0.5457617",
"0.5396253",
"0.5384684",
"0.53629726",
"0.535792",
"0.535792",
"0.535792"
]
| 0.7379791 | 0 |
Return all message translations that are required on boot. | def get_messages_for_boot():
messages = get_all_translations(frappe.local.lang)
messages.update(get_dict_from_hooks("boot", None))
return messages | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_translated_text():\n return {\n code: ''\n for code, name\n in settings.LANGUAGES\n }",
"def _extract_18n_messages():\n BabelCLI().run(['', 'extract', '-F', 'babel.cfg', '-k', '_t', '--no-location', '--sort-output',\n '--omit-header', '-o', os.path.join(I18N_PATH, 'messages.pot'), 'aliquis'])",
"def translations(cls) -> list:\n if cls._translations is None:\n msg = (\n \"translation location not defined, the file location\"\n \"required as a list of strings\"\n )\n run_log.error(msg)\n raise ConfigurationNotFullyPopulated(msg)\n return cls._translations",
"def getMessages(self):\n messages = []\n if capi.pivot_languages:\n is_translated = False\n context_lang = self.context.language\n for lang in capi.pivot_languages:\n if lang == context_lang:\n is_translated = True\n break\n trans = translation.get_field_translations(self.context, lang)\n if trans:\n is_translated = True\n break\n if not is_translated:\n messages.append(\n {\"level\": \"warning\", \"header\": _(\"Pivot Translation\"), \n \"text\": u\"This document has no pivot translation.\"}\n )\n return messages",
"def init_translations():\n if \"@lang\" in input.load_input():\n lang = input.get_lang()\n try:\n trad = gettext.GNUTranslations(open(\"../course/common_student/$i18n/\" + lang + \".mo\", \"rb\"))\n except FileNotFoundError:\n trad = gettext.NullTranslations()\n trad.install()\n return lang\n trad = gettext.NullTranslations()\n trad.install()\n return \"en\"",
"def load_messages(language):\n\tfrappe.clear_cache()\n\tset_default_language(get_language_code(language))\n\tfrappe.db.commit()\n\tsend_translations(get_messages_for_boot())\n\treturn frappe.local.lang",
"def i18ninit():\n click.echo('-> Initializing i18n message files...')\n _extract_18n_messages()\n langs = app.config['BABEL_LANGUAGES']\n for lang in langs:\n _write_message_files(lang, command='init')\n click.echo('-> i18n message files initialized.')\n click.echo('You should now edit translations in following files:')\n for lang in langs:\n click.echo(os.path.join(I18N_PATH, lang, 'LC_MESSAGES', 'messages.po'))",
"def getAvailableTranslations(self):\n\n supported = set()\n for project in self.__projects:\n supported.update(project.getTranslations().keys())\n\n return supported",
"def translations(self):\r\n return Translations(self)",
"def translations(self):\r\n return Translations(self)",
"def _initalize_mapping():\n linter = lint.PyLinter()\n linter.load_defaults()\n linter.load_default_plugins()\n\n mapping = {\n message.msgid: message.symbol\n for message in linter.msgs_store.messages\n }\n\n return mapping",
"def test_supported_translations_retrieval(self):\n\t\t\n\t\thelpers.find_supported_translations()\n\t\tself.assertTrue(helpers.get_supported_translations() != 0)",
"def test_get_translation_resources(self):\n pass",
"def __availableTranslationsLoaded(self):\n origLanguage = self.__plugin.getPreferences(\"OriginalLanguage\")\n transLanguage = self.__plugin.getPreferences(\"TranslationLanguage\")\n \n self.__updateLanguages()\n \n origIndex = self.origLanguageComboBox.findData(origLanguage)\n self.origLanguageComboBox.setCurrentIndex(origIndex)\n self.on_origLanguageComboBox_currentIndexChanged(origIndex)\n self.transLanguageComboBox.setCurrentIndex(\n self.transLanguageComboBox.findData(transLanguage))",
"def localize(self, msg):\n return self.translations.get(msg, msg)",
"def localize(self, msg):\n return self.translations.get(msg, msg)",
"def get_translation(self):",
"def TitleTranslations(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('title_translations', default)\n return [HEP.TitleTranslationObject(i) for i in tmp]",
"def get_gettext():\n local_path = os.path.realpath(os.path.dirname(sys.argv[0])) + \\\n '/translations'\n langs = []\n lc, encoding = locale.getdefaultlocale()\n if (lc):\n langs = [lc]\n osLanguage = os.environ.get('LANGUAGE', None)\n if (osLanguage):\n langs += osLanguage.split(\":\")\n langs += [\"en_US\"]\n lang = gettext.translation('wicd', local_path, languages=langs, \n fallback=True)\n _ = lang.gettext\n return _",
"def get_langs():\r\n temp = \"\"\r\n translate_client = translate.Client()\r\n for i in translate_client.get_languages():\r\n temp += i['name'] + \": \" + i['language'] + \"\\n\"\r\n\r\n return temp",
"def get_all_translations(lang: str) -> dict[str, str]:\n\tif not lang:\n\t\treturn {}\n\n\tdef _merge_translations():\n\t\tall_translations = get_translations_from_apps(lang).copy()\n\t\ttry:\n\t\t\t# get user specific translation data\n\t\t\tuser_translations = get_user_translations(lang)\n\t\t\tall_translations.update(user_translations)\n\t\texcept Exception:\n\t\t\tpass\n\n\t\treturn all_translations\n\n\ttry:\n\t\treturn frappe.cache.hget(MERGED_TRANSLATION_KEY, lang, generator=_merge_translations)\n\texcept Exception:\n\t\t# People mistakenly call translation function on global variables\n\t\t# where locals are not initalized, translations dont make much sense there\n\t\treturn {}",
"def gettext(self, message):\n if self._translations.has_key(message):\n return self._translations[message]\n return super(Translations, self).gettext(message)",
"def test_default_translations(self):\n\t\t\n\t\tself.assertTrue(data.get_default_translation('Catholicism', 3) == 'DRA')\n\t\tself.assertTrue(data.get_default_translation('Christianity', 3) == 'ESV')",
"def translated_locales(self):\r\n return sorted(set(self.locales) - set([self.source_locale]))",
"def lang_init():\n _locale, _encoding = locale.getdefaultlocale() # Default system values\n path = os.path.join(os.path.dirname(sys.argv[0]), 'localization/lang')\n if os.path.exists(path):\n lang = gettext.translation('UnrulyPuzzlePython', path, [_locale],\n fallback=True)\n else:\n lang = gettext.translation('UnrulyPuzzlePython', path,\n fallback=True)\n return lang.gettext",
"def bot_locales(self) -> pulumi.Output[Optional[Sequence['outputs.BotLocale']]]:\n return pulumi.get(self, \"bot_locales\")",
"def getLocales(self):\n pass",
"def ugettext(self):\n return self._translations.gettext",
"def gettext_for(locale='en'):\n return Translations.load(\n os.path.join(BASEDIR, 'app', 'translations'), [locale]\n ).ugettext",
"def prepare_translations():\n output_fn = '/home/jelle/Desktop/django.csv'\n local('po2csv apps/dasa/locale/id/LC_MESSAGES/django.po %(output_fn)s' % locals())\n print 'output written to %(output_fn)s' % locals()"
]
| [
"0.66692555",
"0.6611699",
"0.65892076",
"0.6535671",
"0.6471713",
"0.636463",
"0.6295026",
"0.6211213",
"0.6134869",
"0.6134869",
"0.6019158",
"0.5999323",
"0.59798187",
"0.59468955",
"0.594278",
"0.594278",
"0.59133863",
"0.5900633",
"0.5893161",
"0.5886416",
"0.5883705",
"0.5846922",
"0.5810157",
"0.5792819",
"0.57695115",
"0.5696963",
"0.56834334",
"0.56776357",
"0.56549287",
"0.56416976"
]
| 0.7995621 | 0 |
Load and return the entire translations dictionary for a language from apps + user translations. | def get_all_translations(lang: str) -> dict[str, str]:
if not lang:
return {}
def _merge_translations():
all_translations = get_translations_from_apps(lang).copy()
try:
# get user specific translation data
user_translations = get_user_translations(lang)
all_translations.update(user_translations)
except Exception:
pass
return all_translations
try:
return frappe.cache.hget(MERGED_TRANSLATION_KEY, lang, generator=_merge_translations)
except Exception:
# People mistakenly call translation function on global variables
# where locals are not initalized, translations dont make much sense there
return {} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_lang(lang, apps=None):\n\n\tif lang=='en':\n\t\treturn {}\n\n\tout = frappe.cache().hget(\"lang_full_dict\", lang, shared=True)\n\n\tif not out:\n\t\tout = {}\n\t\tfor app in (apps or frappe.get_all_apps(True)):\n\t\t\tpath = os.path.join(frappe.get_pymodule_path(app), \"translations\", lang + \".json\")\n\t\t\tout.update(get_translation_dict_from_file(path, lang, app) or {})\n\n\t\tif '-' in lang:\n\t\t\tparent = lang.split('-')[0]\n\t\t\tparent_out = load_lang(parent)\n\t\t\tparent_out.update(out)\n\t\t\tout = parent_out\n\n\t\tfrappe.cache().hset(\"lang_full_dict\", lang, out, shared=True)\n\n\treturn out or {}",
"def get_translations_from_apps(lang, apps=None):\n\n\tif lang == \"en\":\n\t\treturn {}\n\n\ttranslations = {}\n\tfor app in apps or frappe.get_installed_apps(_ensure_on_bench=True):\n\t\tpath = frappe.get_app_path(app, \"translations\", lang + \".csv\")\n\t\ttranslations.update(get_translation_dict_from_file(path, lang, app) or {})\n\tif \"-\" in lang:\n\t\tparent = lang.split(\"-\", 1)[0]\n\t\tparent_translations = get_translations_from_apps(parent)\n\t\tparent_translations.update(translations)\n\t\treturn parent_translations\n\n\treturn translations",
"def update_translations(lang, untranslated_file, translated_file, app=\"_ALL_APPS\"):\n\tclear_cache()\n\tfull_dict = get_all_translations(lang)\n\n\tdef restore_newlines(s):\n\t\treturn (\n\t\t\ts.replace(\"|||||\", \"\\\\\\n\")\n\t\t\t.replace(\"| | | | |\", \"\\\\\\n\")\n\t\t\t.replace(\"||||\", \"\\\\n\")\n\t\t\t.replace(\"| | | |\", \"\\\\n\")\n\t\t\t.replace(\"|||\", \"\\n\")\n\t\t\t.replace(\"| | |\", \"\\n\")\n\t\t)\n\n\ttranslation_dict = {}\n\tfor key, value in zip(\n\t\tfrappe.get_file_items(untranslated_file, ignore_empty_lines=False),\n\t\tfrappe.get_file_items(translated_file, ignore_empty_lines=False),\n\t):\n\n\t\t# undo hack in get_untranslated\n\t\ttranslation_dict[restore_newlines(key)] = restore_newlines(value)\n\n\tfull_dict.update(translation_dict)\n\tapps = frappe.get_all_apps(True)\n\n\tif app != \"_ALL_APPS\":\n\t\tif app not in apps:\n\t\t\tprint(f\"Application {app} not found!\")\n\t\t\treturn\n\t\tapps = [app]\n\n\tfor app_name in apps:\n\t\twrite_translations_file(app_name, lang, full_dict)",
"def get_untranslated(lang, untranslated_file=None, get_all=False, app=None, write=True):\n\tclear_cache()\n\n\tmessages = []\n\tuntranslated = defaultdict(lambda: defaultdict(dict))\n\tif app:\n\t\tmessages = get_messages_for_app(app)\n\telse:\n\t\tfor app in frappe.get_all_apps(True):\n\t\t\tmessages.extend(get_messages_for_app(app))\n\n\tmessages = messages\n\n\tdef escape_newlines(s):\n\t\treturn (s.replace(\"\\\\\\n\", \"|||||\")\n\t\t\t\t.replace(\"\\\\n\", \"||||\")\n\t\t\t\t.replace(\"\\n\", \"|||\"))\n\n\tfull_dict = load_lang(lang, [app])\n\tcomparison_dict = reduce(lambda a,b: a.update(b) or a, list(full_dict.values()), {})\n\tif get_all:\n\t\tprint(str(len(messages)) + \" messages\")\n\t\tfor m in messages:\n\t\t\t\tuntranslated[m[0]][escape_newlines(m[1])] = get_existing_translation(escape_newlines(m[1]), comparison_dict)\n\n\t\tif write:\n\t\t\twrite_json_file(untranslated_file, untranslated)\n\t\telse:\n\t\t\treturn untranslated\n\n\telse:\n\t\tmessages_count = 0\n\t\tuntranslated_count = 0\n\n\t\tfor m in messages:\n\t\t\tmessages_count += 1\n\t\t\tif m[0] and m[0] not in full_dict:\n\t\t\t\tuntranslated_count += 1\n\t\t\t\tuntranslated[m[0]][escape_newlines(m[1])] = get_existing_translation(escape_newlines(m[1]), comparison_dict)\n\n\t\t\telif m[0] and m[1] not in full_dict[m[0]]:\n\t\t\t\tuntranslated_count += 1\n\t\t\t\tuntranslated[m[0]][escape_newlines(m[1])] = get_existing_translation(escape_newlines(m[1]), comparison_dict)\n\n\t\tif untranslated:\n\t\t\tprint(str(untranslated_count) + \" missing translations of \" + str(messages_count))\n\t\t\t\n\t\t\tif write:\n\t\t\t\twrite_json_file(untranslated_file, untranslated)\n\t\t\telse:\n\t\t\t\treturn untranslated\n\n\t\telse:\n\t\t\tprint(\"all translated!\")",
"def get_translation_dict_from_file(path, lang, app):\n\tjson_content = {}\n\tif os.path.exists(path):\n\t\twith open(path, 'r') as f:\n\t\t\tjson_content = json.loads(f.read())\n\n\treturn json_content",
"def get_untranslated(lang, untranslated_file, get_all=False, app=\"_ALL_APPS\"):\n\tclear_cache()\n\tapps = frappe.get_all_apps(True)\n\tif app != \"_ALL_APPS\":\n\t\tif app not in apps:\n\t\t\tprint(f\"Application {app} not found!\")\n\t\t\treturn\n\t\tapps = [app]\n\n\tmessages = []\n\tuntranslated = []\n\tfor app_name in apps:\n\t\tmessages.extend(get_messages_for_app(app_name))\n\n\tmessages = deduplicate_messages(messages)\n\n\tdef escape_newlines(s):\n\t\treturn s.replace(\"\\\\\\n\", \"|||||\").replace(\"\\\\n\", \"||||\").replace(\"\\n\", \"|||\")\n\n\tif get_all:\n\t\tprint(str(len(messages)) + \" messages\")\n\t\twith open(untranslated_file, \"wb\") as f:\n\t\t\tfor m in messages:\n\t\t\t\t# replace \\n with ||| so that internal linebreaks don't get split\n\t\t\t\tf.write((escape_newlines(m[1]) + os.linesep).encode(\"utf-8\"))\n\telse:\n\t\tfull_dict = get_all_translations(lang)\n\n\t\tfor m in messages:\n\t\t\tif not full_dict.get(m[1]):\n\t\t\t\tuntranslated.append(m[1])\n\n\t\tif untranslated:\n\t\t\tprint(str(len(untranslated)) + \" missing translations of \" + str(len(messages)))\n\t\t\twith open(untranslated_file, \"wb\") as f:\n\t\t\t\tfor m in untranslated:\n\t\t\t\t\t# replace \\n with ||| so that internal linebreaks don't get split\n\t\t\t\t\tf.write((escape_newlines(m) + os.linesep).encode(\"utf-8\"))\n\t\telse:\n\t\t\tprint(\"all translated!\")",
"def update_translations(lang, translated_data, app, is_file=True):\n\tclear_cache()\n\tfull_dict = load_lang(lang, [app])\n\n\tif full_dict:\n\t\tdef restore_newlines(s):\n\t\t\treturn (s.replace(\"|||||\", \"\\\\\\n\")\n\t\t\t\t\t.replace(\"| | | | |\", \"\\\\\\n\")\n\t\t\t\t\t.replace(\"||||\", \"\\\\n\")\n\t\t\t\t\t.replace(\"| | | |\", \"\\\\n\")\n\t\t\t\t\t.replace(\"|||\", \"\\n\")\n\t\t\t\t\t.replace(\"| | |\", \"\\n\"))\n\n\t\ttranslation_dict = defaultdict(dict)\n\t\tfor k in full_dict:\n\t\t\tfor m in full_dict[k]:\n\t\t\t\ttranslation_dict[k][m] = full_dict[restore_newlines(k)][restore_newlines(m)]\n\n\t\tif is_file:\n\t\t\tnew_translations = frappe._dict(frappe.get_file_json(translated_data))\n\t\telse:\n\t\t\tnew_translations = translated_data\n\n\t\tfor k in new_translations:\n\t\t\tfor m in new_translations[k]:\n\t\t\t\tif new_translations[k][m] != \"\":\n\t\t\t\t\ttranslation_dict[k][restore_newlines(m)] = restore_newlines(new_translations[k][m])\n\n\t\twrite_translations_file(app, lang, translation_dict)",
"def import_translations(lang, path):\n\tclear_cache()\n\tfull_dict = get_all_translations(lang)\n\tfull_dict.update(get_translation_dict_from_file(path, lang, \"import\"))\n\n\tfor app in frappe.get_all_apps(True):\n\t\twrite_translations_file(app, lang, full_dict)",
"def get_reduced_dict(lang):\n\tif not lang:\n\t\treturn {}\n\t# found in local, return!\n\tif getattr(frappe.local, 'lang_reduced_dict', None) and frappe.local.lang_reduced_dict.get(lang, None):\n\t\treturn frappe.local.lang_reduced_dict\n\n\tfrappe.local.lang_reduced_dict = reduce(lambda a,b: a.update(b) or a, list(load_lang(lang).values()), {})\n\n\ttry:\n\t\t# get user specific translation data\n\t\tuser_translations = get_user_translations(lang)\n\texcept Exception:\n\t\tuser_translations = None\n\n\tif user_translations:\n\t\tfrappe.local.lang_reduced_dict.update(user_translations)\n\n\treturn frappe.local.lang_reduced_dict",
"def get_full_dict(lang):\n\tif not lang:\n\t\treturn {}\n\t# found in local, return!\n\tif getattr(frappe.local, 'lang_full_dict', None) and frappe.local.lang_full_dict.get(lang, None):\n\t\treturn frappe.local.lang_full_dict\n\n\tfrappe.local.lang_full_dict = load_lang(lang)\n\n\treturn frappe.local.lang_full_dict",
"def load_messages(language):\n\tfrappe.clear_cache()\n\tset_default_language(get_language_code(language))\n\tfrappe.db.commit()\n\tsend_translations(get_messages_for_boot())\n\treturn frappe.local.lang",
"def get_translation_dict_from_file(path, lang, app, throw=False) -> dict[str, str]:\n\ttranslation_map = {}\n\tif os.path.exists(path):\n\t\tcsv_content = read_csv_file(path)\n\n\t\tfor item in csv_content:\n\t\t\tif len(item) == 3 and item[2]:\n\t\t\t\tkey = item[0] + \":\" + item[2]\n\t\t\t\ttranslation_map[key] = strip(item[1])\n\t\t\telif len(item) in [2, 3]:\n\t\t\t\ttranslation_map[item[0]] = strip(item[1])\n\t\t\telif item:\n\t\t\t\tmsg = \"Bad translation in '{app}' for language '{lang}': {values}\".format(\n\t\t\t\t\tapp=app, lang=lang, values=cstr(item)\n\t\t\t\t)\n\t\t\t\tfrappe.log_error(message=msg, title=\"Error in translation file\")\n\t\t\t\tif throw:\n\t\t\t\t\tfrappe.throw(msg, title=\"Error in translation file\")\n\n\treturn translation_map",
"def load_language(self, file: str):\n from App import App\n\n try:\n qm_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'translate', '{0}.qm'.format(file))\n App.instance().load_language(qm_file)\n\n app_doc_data = AppDocData.instance()\n configs = [Config('app', 'language', file)]\n app_doc_data.save_app_configs(configs)\n finally:\n self.retranslateUi(self)",
"def _read_translations(self):\n print('Reading original translations')\n self.translations_map = {}\n n_translations = 0\n with open(os.path.join(self.src_dir, 'translations.txt'),\n 'rb') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n self.translations_map.setdefault(\n row['trans_id'], {})[row['lang']] = row['translation']\n n_translations += 1\n print('\\ttotal original translations: %s' % n_translations)",
"def __loadTranslator(self):\n if self.__ui is not None:\n loc = self.__ui.getLocale()\n if loc and loc != \"C\":\n locale_dir = os.path.join(\n os.path.dirname(__file__), \"ProjectDjango\", \"i18n\")\n translation = \"django_{0}\".format(loc)\n translator = QTranslator(None)\n loaded = translator.load(translation, locale_dir)\n if loaded:\n self.__translator = translator\n e5App().installTranslator(self.__translator)\n else:\n print(\"Warning: translation file '{0}' could not be\"\n \" loaded.\".format(translation))\n print(\"Using default.\")",
"def loadLanguage(request, lang):\n request.clock.start('loadLanguage')\n from MoinMoin import caching\n # farm notice: for persistent servers, only the first wiki requesting some language\n # gets its cache updated - a bit strange and redundant, but no problem.\n cache = caching.CacheEntry(request, arena='i18n', key=lang)\n import MoinMoin.request\n langfilename = os.path.join(MoinMoin.request.prefix + \"/i18n\", '%s.py' % filename(lang))\n needsupdate = cache.needsUpdate(langfilename)\n if debug: request.log(\"i18n: langfilename %s needsupdate %d\" % (langfilename, needsupdate))\n if not needsupdate:\n try:\n (uc_texts, uc_unformatted) = pickle.loads(cache.content())\n except (IOError, ValueError, pickle.UnpicklingError): # bad pickle data, no pickle\n if debug: request.log(\"i18n: pickle %s load failed\" % lang)\n needsupdate = 1\n\n if needsupdate: \n from MoinMoin.util import pysupport\n lang_module = \"MoinMoin.i18n.%s\" % filename(lang)\n try:\n # Language module without text dict will raise AttributeError\n texts = pysupport.importName(lang_module, \"text\")\n except ImportError:\n if debug: request.log(\"i18n: import of module %s failed.\" % lang_module)\n request.clock.stop('loadLanguage')\n return None, None\n meta = pysupport.importName(lang_module, \"meta\") \n encoding = meta['encoding']\n\n # convert to unicode\n if debug: request.log(\"i18n: processing unformatted texts of lang %s\" % lang)\n uc_unformatted = {}\n for key, text in texts.items():\n ukey = key.decode(encoding)\n utext = text.decode(encoding)\n uc_unformatted[ukey] = utext\n\n if meta.get('wikimarkup', False):\n if debug: request.log(\"i18n: processing formatted texts of lang %s\" % lang)\n # use the wiki parser now to replace some wiki markup with html\n uc_texts = {}\n for key, text in uc_unformatted.items():\n try:\n uc_texts[key] = formatMarkup(request, text)\n except: # infinite recursion or crash\n if debug:\n request.log(\"i18n: crashes in language %s on string: %s\" % (lang, text))\n uc_texts[key] = \"%s*\" % text\n else:\n uc_texts = uc_unformatted\n if debug: request.log(\"i18n: dumping lang %s\" % lang)\n cache.update(pickle.dumps((uc_texts, uc_unformatted), PICKLE_PROTOCOL))\n request.clock.stop('loadLanguage')\n return uc_texts, uc_unformatted",
"def init_translations():\n if \"@lang\" in input.load_input():\n lang = input.get_lang()\n try:\n trad = gettext.GNUTranslations(open(\"../course/common_student/$i18n/\" + lang + \".mo\", \"rb\"))\n except FileNotFoundError:\n trad = gettext.NullTranslations()\n trad.install()\n return lang\n trad = gettext.NullTranslations()\n trad.install()\n return \"en\"",
"def __availableTranslationsLoaded(self):\n origLanguage = self.__plugin.getPreferences(\"OriginalLanguage\")\n transLanguage = self.__plugin.getPreferences(\"TranslationLanguage\")\n \n self.__updateLanguages()\n \n origIndex = self.origLanguageComboBox.findData(origLanguage)\n self.origLanguageComboBox.setCurrentIndex(origIndex)\n self.on_origLanguageComboBox_currentIndexChanged(origIndex)\n self.transLanguageComboBox.setCurrentIndex(\n self.transLanguageComboBox.findData(transLanguage))",
"def translate(lang):\n\n\tlangfilename = os.path.join(\"data\", \"translations\", lang + \".json\")\n\tif os.path.exists(langfilename):\n\t\twith open(langfilename, 'r') as langfile:\n\t\t\ttranslations = json.loads(langfile.read())\n\telse:\n\t\ttranslations = {}\n\n\twith open(os.path.join(\"data\", \"translations\", \"message_list.json\"), \"r\") as message_list_file:\n\t\tmessages = json.loads(message_list_file.read())\n\n\tcnt = 0\n\tfor m in messages:\n\t\tcnt += 1\n\t\t#if cnt > 15: break\n\t\tif not translations.get(m):\n\t\t\tprint 'translating: ' + m\n\t\t\tresponse = requests.get(\"\"\"https://www.googleapis.com/language/translate/v2\"\"\",\n\t\t\t\tparams = {\n\t\t\t\t\t\"key\": conf.google_api_key,\n\t\t\t\t\t\"source\": \"en\",\n\t\t\t\t\t\"target\": lang,\n\t\t\t\t\t\"q\": m\n\t\t\t\t}, verify=False)\n\n\t\t\tt = response.json[\"data\"][\"translations\"][0][\"translatedText\"] or m\n\t\t\ttranslations[m] = t.encode('utf-8')\n\n\t\t\twith open(langfilename, 'w') as langfile:\n\t\t\t\tlangfile.write(json.dumps(translations, indent=1, sort_keys=True))",
"def make_dict_from_messages(messages, full_dict=None, load_user_translation=True):\n\tout = {}\n\tif full_dict is None:\n\t\tif load_user_translation:\n\t\t\tfull_dict = get_all_translations(frappe.local.lang)\n\t\telse:\n\t\t\tfull_dict = get_translations_from_apps(frappe.local.lang)\n\n\tfor m in messages:\n\t\tif m[1] in full_dict:\n\t\t\tout[m[1]] = full_dict[m[1]]\n\t\t# check if msg with context as key exist eg. msg:context\n\t\tif len(m) > 2 and m[2]:\n\t\t\tkey = m[1] + \":\" + m[2]\n\t\t\tif full_dict.get(key):\n\t\t\t\tout[key] = full_dict[key]\n\n\treturn out",
"def load_word_pairs(languages: ty.Tuple[str, str]) -> ty.Dict[str, ty.List[str]]:\n logg = logging.getLogger(f\"c.{__name__}.load_word_pairs\")\n # logg.setLevel(\"DEBUG\")\n logg.debug(\"Start load_word_pairs\")\n\n lang0 = pycountry.languages.get(name=languages[0])\n lang_alpha2_tag0 = lang0.alpha_2\n lang1 = pycountry.languages.get(name=languages[1])\n lang_alpha2_tag1 = lang1.alpha_2\n\n word_pairs_folder = get_package_folders(\"word_pairs\")\n lang_pairs_folder = word_pairs_folder / f\"{lang_alpha2_tag0}_{lang_alpha2_tag1}\"\n\n word_pairs_name_template = f\"{lang_alpha2_tag0}_{lang_alpha2_tag1}_{{}}.json\"\n\n all_word_pairs: ty.Dict[str, ty.List[str]] = {}\n\n for letter in ascii_lowercase:\n word_pairs_name = word_pairs_name_template.format(letter)\n word_pairs_path = lang_pairs_folder / word_pairs_name\n logg.debug(f\"word_pairs_path: {word_pairs_path}\")\n\n word_pairs_letter = json.loads(word_pairs_path.read_text(encoding=\"utf-8\"))\n\n for word0 in word_pairs_letter:\n\n # filter entries with more than one word\n if \" \" in word0:\n continue\n\n # add the whole list to the known dict\n all_word_pairs[word0] = word_pairs_letter[word0]\n\n logg.info(f\"len(all_word_pairs): {len(all_word_pairs)}\")\n\n return all_word_pairs",
"def load_language(self, file):\n from Models.Configuration import Configuration\n\n try:\n qm_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'translate', '{0}.qm'.format(file))\n QtWidgets.qApp.load_language(qm_file)\n\n app_doc_data = AppDocData.instance()\n configs = [Configuration('app', 'language', file)]\n app_doc_data.save_app_configs(configs)\n\n for action in self.menuLanguage.actions():\n if action.text().lower() == file.lower():\n continue\n action.setChecked(False)\n finally:\n self.retranslateUi(self)\n self.propertyTableWidget.retranslateUi()",
"def make_translated_text():\n return {\n code: ''\n for code, name\n in settings.LANGUAGES\n }",
"def get_language_list_gui():\n _ = get_gettext()\n language = {}\n language['connect'] = _(\"Connect\")\n language['ip'] = _(\"IP\")\n language['netmask'] = _(\"Netmask\")\n language['gateway'] = _('Gateway')\n language['dns'] = _('DNS')\n language['use_static_ip'] = _('Use Static IPs')\n language['use_static_dns'] = _('Use Static DNS')\n language['use_encryption'] = _('Use Encryption')\n language['advanced_settings'] = _('Advanced Settings')\n language['wired_network'] = _('Wired Network')\n language['wired_network_instructions'] = _('To connect to a wired network,'\n ' you must create a network profile. To create a network profile, type a'\n ' name that describes this network, and press Add.')\n language['automatic_connect'] = _('Automatically connect to this network')\n language['secured'] = _('Secured')\n language['unsecured'] = _('Unsecured')\n language['channel'] = _('Channel')\n language['preferences'] = _('Preferences')\n language['wpa_supplicant_driver'] = _('WPA Supplicant Driver')\n language['wireless_interface'] = _('Wireless Interface')\n language['wired_interface'] = _('Wired Interface')\n language['hidden_network'] = _('Hidden Network')\n language['hidden_network_essid'] = _('Hidden Network ESSID')\n language['connected_to_wireless'] = _('Connected to $A at $B (IP: $C)')\n language['connected_to_wired'] = _('Connected to wired network (IP: $A)')\n language['not_connected'] = _('Not connected')\n language['no_wireless_networks_found'] = _('No wireless networks found.')\n language['killswitch_enabled'] = _('Wireless Kill Switch Enabled')\n language['key'] = _('Key')\n language['username'] = _('Username')\n language['password'] = _('Password')\n language['anonymous_identity'] = _('Anonymous Identity')\n language['identity'] = _('Identity')\n language['authentication'] = _('Authentication')\n language['path_to_pac_file'] = _('Path to PAC File')\n language['select_a_network'] = _('Choose from the networks below:')\n language['connecting'] = _('Connecting...')\n language['wired_always_on'] = _('Always show wired interface')\n language['auto_reconnect'] = _('Automatically reconnect on connection loss')\n language['create_adhoc_network'] = _('Create an Ad-Hoc Network')\n language['essid'] = _('ESSID')\n language['use_wep_encryption'] = _('Use Encryption (WEP only)')\n language['before_script'] = _('Run script before connect')\n language['after_script'] = _('Run script after connect')\n language['disconnect_script'] = _('Run disconnect script')\n language['script_settings'] = _('Scripts')\n language['use_ics'] = _('Activate Internet Connection Sharing')\n language['madwifi_for_adhoc'] = _('Check if using madwifi/atheros drivers')\n language['default_wired'] = _('Use as default profile (overwrites any previous default)')\n language['use_debug_mode'] = _('Enable debug mode')\n language['use_global_dns'] = _('Use global DNS servers')\n language['use_default_profile'] = _('Use default profile on wired autoconnect')\n language['show_wired_list'] = _('Prompt for profile on wired autoconnect')\n language['use_last_used_profile'] = _('Use last used profile on wired autoconnect')\n language['choose_wired_profile'] = _('Select or create a wired profile to connect with')\n language['wired_network_found'] = _('Wired connection detected')\n language['stop_showing_chooser'] = _('Stop Showing Autoconnect pop-up temporarily')\n language['display_type_dialog'] = _('Use dBm to measure signal strength')\n language['scripts'] = _('Scripts')\n language['invalid_address'] = _('Invalid address in $A entry.')\n language['global_settings'] = _('Use these settings for all networks sharing this essid')\n language['encrypt_info_missing'] = _('Required encryption information is missing.')\n language['enable_encryption'] = _('This network requires encryption to be enabled.')\n language['wicd_auto_config'] = _('Automatic (recommended)')\n language[\"gen_settings\"] = _(\"General Settings\")\n language[\"ext_programs\"] = _(\"External Programs\")\n language[\"dhcp_client\"] = _(\"DHCP Client\")\n language[\"wired_detect\"] = _(\"Wired Link Detection\")\n language[\"route_flush\"] = _(\"Route Table Flushing\")\n language[\"backend\"] = _(\"Backend\")\n language[\"backend_alert\"] = _(\"Changes to your backend won't occur until the daemon is restarted.\")\n language['0'] = _('0')\n language['1'] = _('1')\n language['2'] = _('2')\n language['3'] = _('3')\n language['4'] = _('4')\n language['5'] = _('5')\n language['6'] = _('6')\n language['7'] = _('7')\n language['8'] = _('8')\n language['9'] = _('9')\n language['interface_down'] = _('Putting interface down...')\n language['resetting_ip_address'] = _('Resetting IP address...')\n language['interface_up'] = _('Putting interface up...')\n language['setting_encryption_info'] = _('Setting encryption info')\n language['removing_old_connection'] = _('Removing old connection...')\n language['generating_psk'] = _('Generating PSK...')\n language['generating_wpa_config'] = _('Generating WPA configuration file...')\n language['flushing_routing_table'] = _('Flushing the routing table...')\n language['configuring_interface'] = _('Configuring wireless interface...')\n language['validating_authentication'] = _('Validating authentication...')\n language['setting_broadcast_address'] = _('Setting broadcast address...')\n language['setting_static_dns'] = _('Setting static DNS servers...')\n language['setting_static_ip'] = _('Setting static IP addresses...')\n language['running_dhcp'] = _('Obtaining IP address...')\n language['dhcp_failed'] = _('Connection Failed: Unable to Get IP Address')\n language['aborted'] = _('Connection Cancelled')\n language['bad_pass'] = _('Connection Failed: Bad password')\n language['done'] = _('Done connecting...')\n return language",
"def get_dict(fortype, name=None):\n\tfortype = fortype.lower()\n\tcache = frappe.cache()\n\tasset_key = fortype + \":\" + (name or \"-\")\n\ttranslation_assets = cache.hget(\"translation_assets\", frappe.local.lang, shared=True) or {}\n\n\tif not asset_key in translation_assets:\n\t\tif fortype==\"doctype\":\n\t\t\tmessages = get_messages_from_doctype(name)\n\t\telif fortype==\"page\":\n\t\t\tmessages = get_messages_from_page(name)\n\t\telif fortype==\"report\":\n\t\t\tmessages = get_messages_from_report(name)\n\t\telif fortype==\"include\":\n\t\t\tmessages = get_messages_from_include_files()\n\t\telif fortype==\"jsfile\":\n\t\t\tmessages = get_messages_from_file(name)\n\t\telif fortype==\"template\":\n\t\t\tmessages = get_all_messages_from_template_files()\n\t\telif fortype==\"boot\":\n\t\t\tmessages = get_messages_from_include_files()\n\t\t\tmessages += get_all_messages_from_js_files()\n\t\t\tmessages += frappe.db.sql(\"select concat('Print Format: ', name), name from `tabPrint Format`\")\n\t\t\tmessages += frappe.db.sql(\"select concat('DocType: ', name), name from tabDocType\")\n\t\t\tmessages += frappe.db.sql(\"select concat('Role: ', name), name from tabRole\")\n\t\t\tmessages += frappe.db.sql(\"select concat('Module: ', name), name from `tabModule Def`\")\n\t\t\tmessages += frappe.db.sql(\"select concat('Page: ', name), name from `tabPage`\")\n\t\t\tmessages += frappe.db.sql(\"select concat('Report: ', name), name from `tabReport`\")\n\t\t\tmessages += \"null\"\n\n\t\tmessage_dict = make_dict_from_messages(messages)\n\t\tmessage_dict.update(get_dict_from_hooks(fortype, name))\n\n\t\ttry:\n\t\t\t# get user specific translation data\n\t\t\tuser_translations = get_user_translations(frappe.local.lang)\n\t\texcept Exception:\n\t\t\tuser_translations = None\n\n\t\tif user_translations:\n\t\t\tmessage_dict.update(user_translations)\n\n\t\ttranslation_assets[asset_key] = message_dict\n\n\t\tcache.hset(\"translation_assets\", frappe.local.lang, translation_assets, shared=True)\n\n\treturn translation_assets[asset_key]",
"def get_localization(self, language: str) -> Localization:\n ...",
"def _translation(basename, props_dir, languages, key_language=None):\n props_dir = os.path.abspath(props_dir)\n if os.path.isfile(props_dir):\n props_dir = os.path.dirname(props_dir)\n trans = None\n use_key_as_lang = False\n for lang in languages:\n while True:\n trans = _try_file \\\n (props_dir, basename + \"_\" + lang + \".properties\", lang, trans)\n # Use identity mapping instead (or in addition to) file?\n if lang == key_language:\n use_key_as_lang = True\n # We need no more fallbacks after identity mapping\n break;\n lang_up = lang.rsplit(\"_\", 1)[0]\n if lang_up == lang:\n break\n lang = lang_up\n # Finally try properties file without language specification\n trans = _try_file(props_dir, basename + \".properties\", None, trans)\n if trans:\n trans._add_fallback_unchecked(BaseTranslations()) # last resort\n else:\n if use_key_as_lang:\n trans = BaseTranslations(key_language)\n else:\n trans = BaseTranslations()\n return trans",
"def get_pers_trans(lang: Lang) -> dict:\n return read_json(f'languages/{lang}/persons')",
"def __init__(self, language=None):\n self.language = language\n self.translations = {}",
"def get_translation_from_cache ( self, text, src_lang, target_lang ):\n return self.app_cache.get_translation_from_cache ( text, src_lang, target_lang )"
]
| [
"0.74525684",
"0.69477826",
"0.66838795",
"0.6574711",
"0.65717417",
"0.6553139",
"0.6467629",
"0.6460375",
"0.6457447",
"0.6336055",
"0.63262165",
"0.62614495",
"0.6216522",
"0.61567247",
"0.6142031",
"0.61204106",
"0.60881054",
"0.6085042",
"0.60762614",
"0.5987815",
"0.5964085",
"0.59592104",
"0.5910912",
"0.5886689",
"0.58141893",
"0.5788462",
"0.5712479",
"0.5710332",
"0.570151",
"0.56907755"
]
| 0.7437248 | 1 |
Combine all translations from `.csv` files in all `apps`. For derivative languages (esGT), take translations from the base language (es) and then update translations from the child (esGT) | def get_translations_from_apps(lang, apps=None):
if lang == "en":
return {}
translations = {}
for app in apps or frappe.get_installed_apps(_ensure_on_bench=True):
path = frappe.get_app_path(app, "translations", lang + ".csv")
translations.update(get_translation_dict_from_file(path, lang, app) or {})
if "-" in lang:
parent = lang.split("-", 1)[0]
parent_translations = get_translations_from_apps(parent)
parent_translations.update(translations)
return parent_translations
return translations | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_translations(lang, untranslated_file, translated_file, app=\"_ALL_APPS\"):\n\tclear_cache()\n\tfull_dict = get_all_translations(lang)\n\n\tdef restore_newlines(s):\n\t\treturn (\n\t\t\ts.replace(\"|||||\", \"\\\\\\n\")\n\t\t\t.replace(\"| | | | |\", \"\\\\\\n\")\n\t\t\t.replace(\"||||\", \"\\\\n\")\n\t\t\t.replace(\"| | | |\", \"\\\\n\")\n\t\t\t.replace(\"|||\", \"\\n\")\n\t\t\t.replace(\"| | |\", \"\\n\")\n\t\t)\n\n\ttranslation_dict = {}\n\tfor key, value in zip(\n\t\tfrappe.get_file_items(untranslated_file, ignore_empty_lines=False),\n\t\tfrappe.get_file_items(translated_file, ignore_empty_lines=False),\n\t):\n\n\t\t# undo hack in get_untranslated\n\t\ttranslation_dict[restore_newlines(key)] = restore_newlines(value)\n\n\tfull_dict.update(translation_dict)\n\tapps = frappe.get_all_apps(True)\n\n\tif app != \"_ALL_APPS\":\n\t\tif app not in apps:\n\t\t\tprint(f\"Application {app} not found!\")\n\t\t\treturn\n\t\tapps = [app]\n\n\tfor app_name in apps:\n\t\twrite_translations_file(app_name, lang, full_dict)",
"def migrate_translations(source_app, target_app):\n\tclear_cache()\n\tstrings_in_source_app = [m[1] for m in frappe.translate.get_messages_for_app(source_app)]\n\tstrings_in_target_app = [m[1] for m in frappe.translate.get_messages_for_app(target_app)]\n\n\tstrings_in_target_app_but_not_in_source_app = list(\n\t\tset(strings_in_target_app) - set(strings_in_source_app)\n\t)\n\n\tlanguages = frappe.translate.get_all_languages()\n\n\tsource_app_translations_dir = frappe.get_app_path(source_app, \"translations\")\n\ttarget_app_translations_dir = frappe.get_app_path(target_app, \"translations\")\n\n\tif not os.path.exists(target_app_translations_dir):\n\t\tos.makedirs(target_app_translations_dir)\n\n\tfor lang in languages:\n\t\tsource_csv = os.path.join(source_app_translations_dir, lang + \".csv\")\n\n\t\tif not os.path.exists(source_csv):\n\t\t\tcontinue\n\n\t\ttarget_csv = os.path.join(target_app_translations_dir, lang + \".csv\")\n\t\ttemp_csv = os.path.join(source_app_translations_dir, \"_temp.csv\")\n\n\t\twith open(source_csv) as s, open(target_csv, \"a+\") as t, open(temp_csv, \"a+\") as temp:\n\t\t\tsource_reader = reader(s, lineterminator=\"\\n\")\n\t\t\ttarget_writer = writer(t, lineterminator=\"\\n\")\n\t\t\ttemp_writer = writer(temp, lineterminator=\"\\n\")\n\n\t\t\tfor row in source_reader:\n\t\t\t\tif row[0] in strings_in_target_app_but_not_in_source_app:\n\t\t\t\t\ttarget_writer.writerow(row)\n\t\t\t\telse:\n\t\t\t\t\ttemp_writer.writerow(row)\n\n\t\tif not os.path.getsize(target_csv):\n\t\t\tos.remove(target_csv)\n\t\tos.remove(source_csv)\n\t\tos.rename(temp_csv, source_csv)",
"def translate_elements(filename = \"en_csv.csv\", filepath = '.\\\\', language = 'en'):\n \n output_str = \"\"\n\n file_exists = False\n\n my_file = Path(filepath + \"\\\\\" + language[:2] + \"_text.txt\")\n if my_file.is_file():\n file_exists = True\n file_dict = read_file_into_dict(filepath + \"\\\\\" + language[:2] + \"_text.txt\")\n \n with open(filepath + \"\\\\\" + filename, 'r') as f:\n lines = f.readlines()\n\n first_line = lines.pop(0)[:-1].replace('\"', '').split(',')\n \n app_index = first_line.index('app')\n field_name_index = first_line.index('field_name')\n language_id_index = first_line.index('language_id')\n localized_text_index = first_line.index('localized_text')\n \n total = len(lines)\n \n translate_client = translate.Client()\n index = 0\n \n for line in lines:\n data = line.replace('\"', '').split(',')\n \n for i in range(0, len(data)):\n data[i] = data[i].replace('\"', '\\'')\n \n key = data[field_name_index]+data[app_index]+language[:2]\n output_str+='insert into localized_text'\n output_str+=' values(\\''\n output_str+=data[field_name_index]\n output_str+='\\',\\''\n output_str+=data[app_index]\n output_str+='\\',\\''\n output_str+=language[:2]\n output_str+='\\',\\''\n\n if file_exists and key in file_dict.keys():\n output_str+= file_dict[key]\n elif language == 'en':\n output_str+= data[localized_text_index] \n else:\n print('Translation')\n translation = translate_client.translate(data[localized_text_index], source_language = 'en', target_language = language)\n output_str+=translation['translatedText'].replace(\"\\n\", \"\")\n output_str+='\\');\\n'\n index+=1\n print(str(index) + \" out of \" + str(total))\n \n\n with open(filepath + \"\\\\\" + language[:2] + \"_text.txt\", 'w', encoding = 'utf-8') as f:\n f.write(output_str)\n \n return output_str",
"def update_translations(lang, translated_data, app, is_file=True):\n\tclear_cache()\n\tfull_dict = load_lang(lang, [app])\n\n\tif full_dict:\n\t\tdef restore_newlines(s):\n\t\t\treturn (s.replace(\"|||||\", \"\\\\\\n\")\n\t\t\t\t\t.replace(\"| | | | |\", \"\\\\\\n\")\n\t\t\t\t\t.replace(\"||||\", \"\\\\n\")\n\t\t\t\t\t.replace(\"| | | |\", \"\\\\n\")\n\t\t\t\t\t.replace(\"|||\", \"\\n\")\n\t\t\t\t\t.replace(\"| | |\", \"\\n\"))\n\n\t\ttranslation_dict = defaultdict(dict)\n\t\tfor k in full_dict:\n\t\t\tfor m in full_dict[k]:\n\t\t\t\ttranslation_dict[k][m] = full_dict[restore_newlines(k)][restore_newlines(m)]\n\n\t\tif is_file:\n\t\t\tnew_translations = frappe._dict(frappe.get_file_json(translated_data))\n\t\telse:\n\t\t\tnew_translations = translated_data\n\n\t\tfor k in new_translations:\n\t\t\tfor m in new_translations[k]:\n\t\t\t\tif new_translations[k][m] != \"\":\n\t\t\t\t\ttranslation_dict[k][restore_newlines(m)] = restore_newlines(new_translations[k][m])\n\n\t\twrite_translations_file(app, lang, translation_dict)",
"def write_translations_file(app, lang, full_dict=None, app_messages=None):\n\tif not app_messages:\n\t\tapp_messages = get_messages_for_app(app)\n\n\tif not app_messages:\n\t\treturn\n\n\ttpath = frappe.get_app_path(app, \"translations\")\n\tfrappe.create_folder(tpath)\n\twrite_csv_file(\n\t\tos.path.join(tpath, lang + \".csv\"), app_messages, full_dict or get_all_translations(lang)\n\t)",
"def merge_files(locale, fail_if_missing=True):\r\n for target, sources in CONFIGURATION.generate_merge.items():\r\n merge(locale, target, sources, fail_if_missing)",
"def get_untranslated(lang, untranslated_file, get_all=False, app=\"_ALL_APPS\"):\n\tclear_cache()\n\tapps = frappe.get_all_apps(True)\n\tif app != \"_ALL_APPS\":\n\t\tif app not in apps:\n\t\t\tprint(f\"Application {app} not found!\")\n\t\t\treturn\n\t\tapps = [app]\n\n\tmessages = []\n\tuntranslated = []\n\tfor app_name in apps:\n\t\tmessages.extend(get_messages_for_app(app_name))\n\n\tmessages = deduplicate_messages(messages)\n\n\tdef escape_newlines(s):\n\t\treturn s.replace(\"\\\\\\n\", \"|||||\").replace(\"\\\\n\", \"||||\").replace(\"\\n\", \"|||\")\n\n\tif get_all:\n\t\tprint(str(len(messages)) + \" messages\")\n\t\twith open(untranslated_file, \"wb\") as f:\n\t\t\tfor m in messages:\n\t\t\t\t# replace \\n with ||| so that internal linebreaks don't get split\n\t\t\t\tf.write((escape_newlines(m[1]) + os.linesep).encode(\"utf-8\"))\n\telse:\n\t\tfull_dict = get_all_translations(lang)\n\n\t\tfor m in messages:\n\t\t\tif not full_dict.get(m[1]):\n\t\t\t\tuntranslated.append(m[1])\n\n\t\tif untranslated:\n\t\t\tprint(str(len(untranslated)) + \" missing translations of \" + str(len(messages)))\n\t\t\twith open(untranslated_file, \"wb\") as f:\n\t\t\t\tfor m in untranslated:\n\t\t\t\t\t# replace \\n with ||| so that internal linebreaks don't get split\n\t\t\t\t\tf.write((escape_newlines(m) + os.linesep).encode(\"utf-8\"))\n\t\telse:\n\t\t\tprint(\"all translated!\")",
"def _apply_patch_odoo(self):\n paths = [os.path.join('openerp', 'tools', 'translate.py'),\n os.path.join('odoo', 'tools', 'translate.py')]\n for path in paths:\n s_file = os.path.join(self._server_path, path)\n if not os.path.isfile(s_file):\n continue\n cmd = [\"sed\", \"-i\", \"-e\",\n r\"s/translation'] = src/translation'] = ''/g\",\n s_file]\n print \" \".join(cmd)\n subprocess.call(cmd)",
"def import_translations(lang, path):\n\tclear_cache()\n\tfull_dict = get_all_translations(lang)\n\tfull_dict.update(get_translation_dict_from_file(path, lang, \"import\"))\n\n\tfor app in frappe.get_all_apps(True):\n\t\twrite_translations_file(app, lang, full_dict)",
"def prepare_translations():\n output_fn = '/home/jelle/Desktop/django.csv'\n local('po2csv apps/dasa/locale/id/LC_MESSAGES/django.po %(output_fn)s' % locals())\n print 'output written to %(output_fn)s' % locals()",
"def pkg_app_translator(self, translation_dict, rows_list):\n for ls in rows_list:\n # transform the applist str to a list for each row\n app_list = ls[1].split(',')\n for i in range(len(app_list)):\n # translation of app_list on each row\n # if the pkg is in the dict, then translate; otw, stick with\n # the pkg\n # get rid of the space, [, and ] in applist\n processed_key = app_list[i].strip().strip('[').strip(']')\n app_list[i] = translation_dict.get(processed_key,\n processed_key)\n ls[1] = ', '.join(app_list)\n\n # also translate the first row\n for i in range(len(rows_list[0])):\n rows_list[0][i] = translation_dict.get(rows_list[0][i], rows_list[0][i])\n\n return rows_list",
"def pkg_app_translator(self, translation_dict, rows_list):\n for list in rows_list:\n # transform the applist str to a list for each row\n app_list = list[1].split(',')\n for i in range(len(app_list)):\n # translation of app_list on each row\n # if the pkg is in the dict, then translate; otw, stick with\n # the pkg\n # get rid of the space, [, and ] in applist\n processed_key = app_list[i].strip().strip('[').strip(']')\n app_list[i] = translation_dict.get(processed_key,\n processed_key)\n list[1] = ', '.join(app_list)\n\n # also translate the first row\n for i in range(len(rows_list[0])):\n rows_list[0][i] = translation_dict.get(rows_list[0][i], rows_list[0][i])\n\n return rows_list",
"def _read_translations(self):\n print('Reading original translations')\n self.translations_map = {}\n n_translations = 0\n with open(os.path.join(self.src_dir, 'translations.txt'),\n 'rb') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n self.translations_map.setdefault(\n row['trans_id'], {})[row['lang']] = row['translation']\n n_translations += 1\n print('\\ttotal original translations: %s' % n_translations)",
"def convert_translations(self, dest_dir):\n if not os.path.isdir(dest_dir):\n os.makedirs(dest_dir)\n total_translation_rows = 0\n with open(os.path.join(dest_dir, 'translations.txt'),\n 'w+b') as out_file:\n writer = csv.DictWriter(\n out_file, fieldnames=NEW_TRANSLATIONS_FIELDS)\n writer.writeheader()\n for filename in sorted(os.listdir(self.src_dir)):\n if not (filename.endswith('.txt') and\n os.path.isfile(os.path.join(self.src_dir, filename))):\n print('Skipping %s' % filename)\n continue\n table_name = filename[:-len('.txt')]\n if table_name == 'translations':\n continue\n total_translation_rows += self._translate_table(\n dest_dir, table_name, writer)\n print('Total translation rows: %s' % total_translation_rows)",
"def aggregate_translations(wildcards):\n checkpoint_output = checkpoints.align.get(**wildcards).output.translations\n return expand(build_dir + \"/{build_name}/{segment}/nextalign/masked.gene.{gene}.fasta\",\n build_name=wildcards.build_name,\n segment=wildcards.segment,\n gene=GENES[wildcards.segment])",
"def get_untranslated(lang, untranslated_file=None, get_all=False, app=None, write=True):\n\tclear_cache()\n\n\tmessages = []\n\tuntranslated = defaultdict(lambda: defaultdict(dict))\n\tif app:\n\t\tmessages = get_messages_for_app(app)\n\telse:\n\t\tfor app in frappe.get_all_apps(True):\n\t\t\tmessages.extend(get_messages_for_app(app))\n\n\tmessages = messages\n\n\tdef escape_newlines(s):\n\t\treturn (s.replace(\"\\\\\\n\", \"|||||\")\n\t\t\t\t.replace(\"\\\\n\", \"||||\")\n\t\t\t\t.replace(\"\\n\", \"|||\"))\n\n\tfull_dict = load_lang(lang, [app])\n\tcomparison_dict = reduce(lambda a,b: a.update(b) or a, list(full_dict.values()), {})\n\tif get_all:\n\t\tprint(str(len(messages)) + \" messages\")\n\t\tfor m in messages:\n\t\t\t\tuntranslated[m[0]][escape_newlines(m[1])] = get_existing_translation(escape_newlines(m[1]), comparison_dict)\n\n\t\tif write:\n\t\t\twrite_json_file(untranslated_file, untranslated)\n\t\telse:\n\t\t\treturn untranslated\n\n\telse:\n\t\tmessages_count = 0\n\t\tuntranslated_count = 0\n\n\t\tfor m in messages:\n\t\t\tmessages_count += 1\n\t\t\tif m[0] and m[0] not in full_dict:\n\t\t\t\tuntranslated_count += 1\n\t\t\t\tuntranslated[m[0]][escape_newlines(m[1])] = get_existing_translation(escape_newlines(m[1]), comparison_dict)\n\n\t\t\telif m[0] and m[1] not in full_dict[m[0]]:\n\t\t\t\tuntranslated_count += 1\n\t\t\t\tuntranslated[m[0]][escape_newlines(m[1])] = get_existing_translation(escape_newlines(m[1]), comparison_dict)\n\n\t\tif untranslated:\n\t\t\tprint(str(untranslated_count) + \" missing translations of \" + str(messages_count))\n\t\t\t\n\t\t\tif write:\n\t\t\t\twrite_json_file(untranslated_file, untranslated)\n\t\t\telse:\n\t\t\t\treturn untranslated\n\n\t\telse:\n\t\t\tprint(\"all translated!\")",
"def handler_unbabel_translations():\n jobs = []\n for item in db.stories.find({}):\n for lang in [l[0] for l in UNBABEL_API_LANGUAGES if l[0] != 'en']:\n uid = item.get('unbabel_uid_{}'.format(lang), None)\n if uid:\n jobs.append(get_unbabel_translation.s(uid, lang))\n job = group(jobs)\n job.apply_async()\n return job",
"def fixturize(app=\"All\"):\n\n if app == \"All\":\n local('python manage.py dumpdata resources > resources/fixtures/resources.json')\n local('python manage.py dumpdata military > military/fixtures/military.json')\n local('python manage.py dumpdata arenas > arenas/fixtures/arena.json')\n local('python manage.py dumpdata sciences > sciences/fixtures/technologies.json')\n local('python manage.py dumpdata auth.Group > fixtures/groups.json')\n elif app == \"resource\":\n local('python manage.py dumpdata resources > resources/fixtures/resources.json')\n elif app == \"military\":\n local('python manage.py dumpdata military > military/fixtures/military.json')\n elif app == \"arena\":\n local('python manage.py dumpdata arenas > arenas/fixtures/arena.json')\n elif app == \"sciences\":\n local('python manage.py dumpdata sciences > sciences/fixtures/technologies.json')\n elif app == \"groups\":\n local('python manage.py dumpdata auth.Group > fixtures/groups.json')",
"def _build_localization(self, package):\n for lang in package:\n localization = package[lang]\n language = localization['language']\n del localization['language']\n language = self.get_language(language) # gets the existing language container or creates a new one\n language.update(localization)",
"def install_translations(where='local'):\n config = get_config(where)\n with settings(host_string=config['host_string']), cd(config['installation_dir']):\n\n if where == 'local':\n # if we are local, we also generate new po files\n with cd('apps/dasa/'):\n run('../../bin/django makemessages -l id')\n run('../../bin/django makemessages -l en')\n run('../../bin/django compilemessages')\n with cd('project'):\n# run('../bin/django makemessages -l id')\n run('../bin/django makemessages -l en')\n run('../bin/django compilemessages')\n else: # otherwise, we just compile\n run('git pull')\n with cd('apps/dasa/'):\n run('../../bin/django compilemessages')\n with cd('project'):\n run('../bin/django compilemessages')\n restart(where)",
"def clean_translated_locales():\r\n for locale in CONFIGURATION.translated_locales:\r\n clean_locale(locale)",
"def translate_phrases(translator, phrases, language):\n for phrase in phrases:\n translator.type_phrase_to_translate(phrase)\n sleep(0.5)\n translated_phrase = translator.read_translated_phrase()\n add_translation_to_file(language, translated_phrase)",
"def update_templates():\n logging.info(\"Copying english po files to %s\" % POT_PATH)\n\n # post them to exposed URL\n ensure_dir(POT_PATH)\n shutil.copy(get_po_filepath(lang_code=\"en\", filename=\"django.po\"), os.path.join(POT_PATH, \"kalite.pot\"))\n shutil.copy(get_po_filepath(lang_code=\"en\", filename=\"djangojs.po\"), os.path.join(POT_PATH, \"kalitejs.pot\"))",
"def backup_po_to_db_by_language_and_domains(language, domains=['django', 'djangojs', 'angular']):\n from rosetta.models import TranslationBackup\n\n available_langs = dict(settings.LANGUAGES)\n if language not in available_langs:\n logger.debug('Language %s is not available for backup. Add language to settings.LANGUAGES' % language)\n return\n\n for path in settings.LOCALE_PATHS:\n po_pattern = os.path.join(path, language, \"LC_MESSAGES\", \"*.po\")\n\n for pofile in glob(po_pattern):\n logger.debug(\"Backuping %s\" % pofile)\n\n domain = os.path.splitext(os.path.basename(pofile))[0]\n if domain in domains:\n with codecs.open(pofile, 'r', 'utf-8') as pofile_opened:\n content = pofile_opened.read()\n\n backup = TranslationBackup(\n language=language,\n locale_path=path,\n domain=domain,\n content=content,\n )\n backup.save()",
"def write_translations_file(app, lang, app_messages=None):\n\tif not app_messages:\n\t\tapp_messages = get_messages_for_app(app)\n\n\tif not app_messages:\n\t\treturn\n\n\ttpath = frappe.get_pymodule_path(app, \"translations\")\n\tfrappe.create_folder(tpath)\n\twrite_json_file(os.path.join(tpath, lang + \".json\"), app_messages)",
"def main():\n actual_dir = os.getcwd()\n i18n_dir = os.path.join(actual_dir, 'i18n') # Directory of I18n app.\n i18n_dirname = os.path.basename(i18n_dir)\n models_file = os.path.join(i18n_dir, 'models.py')\n data_dir = os.path.join(i18n_dir, 'data') # CSV files.\n data_license = os.path.join(data_dir, 'LICENSE_CC')\n project_dir = os.path.dirname(i18n_dir)\n settings_file = os.path.join(project_dir, 'settings.py')\n\n show_license(data_license)\n i18n_model = setup_environ(project_dir, i18n_dirname, settings_file)\n models = get_data_models(models_file)\n new_models = sort_models(data_dir, models)\n for model in new_models:\n load_data(model, i18n_model, i18n_dirname)",
"def translate_files(input_file, output_file, translate_dict, delete_symbols):\n\n for line in input_file:\n result = translate(line, translate_dict, delete_symbols)\n output_file.write(result)",
"def clean_configuration_directory():\r\n for locale in CONFIGURATION.translated_locales:\r\n clean_conf_folder(locale)",
"def main(strict=True, verbosity=1):\r\n for locale in CONFIGURATION.translated_locales:\r\n merge_files(locale, fail_if_missing=strict)\r\n # Dummy text is not required. Don't raise exception if files are missing.\r\n for locale in CONFIGURATION.dummy_locales:\r\n merge_files(locale, fail_if_missing=False)\r\n\r\n compile_cmd = 'django-admin.py compilemessages -v{}'.format(verbosity)\r\n if verbosity:\r\n stderr = None\r\n else:\r\n stderr = DEVNULL\r\n execute(compile_cmd, working_directory=BASE_DIR, stderr=stderr)",
"def i18nupdate():\n click.echo('-> Updating i18n message files...')\n _extract_18n_messages()\n langs = app.config['BABEL_LANGUAGES']\n for lang in langs:\n _write_message_files(lang)\n click.echo('-> i18n message files updated.\\n')\n click.echo('You should now edit translations in following files:')\n for lang in langs:\n click.echo(os.path.join(I18N_PATH, lang, 'LC_MESSAGES', 'messages.po'))"
]
| [
"0.6750786",
"0.65281385",
"0.6090613",
"0.59707904",
"0.5891968",
"0.5812921",
"0.58110124",
"0.57808274",
"0.57644635",
"0.5687877",
"0.5668619",
"0.56059396",
"0.5555772",
"0.55203915",
"0.54778147",
"0.5460354",
"0.5453668",
"0.54250836",
"0.5405557",
"0.53435117",
"0.53187776",
"0.5309723",
"0.52445865",
"0.5224172",
"0.5202879",
"0.51972234",
"0.51869565",
"0.51436085",
"0.51337147",
"0.5104225"
]
| 0.6750639 | 1 |
Returns all messages (list) for a specified `app` | def get_messages_for_app(app, deduplicate=True):
messages = []
modules = [frappe.unscrub(m) for m in frappe.local.app_modules[app]]
# doctypes
if modules:
if isinstance(modules, str):
modules = [modules]
filtered_doctypes = (
frappe.qb.from_("DocType").where(Field("module").isin(modules)).select("name").run(pluck=True)
)
for name in filtered_doctypes:
messages.extend(get_messages_from_doctype(name))
# pages
filtered_pages = (
frappe.qb.from_("Page").where(Field("module").isin(modules)).select("name", "title").run()
)
for name, title in filtered_pages:
messages.append((None, title or name))
messages.extend(get_messages_from_page(name))
# reports
report = DocType("Report")
doctype = DocType("DocType")
names = (
frappe.qb.from_(doctype)
.from_(report)
.where((report.ref_doctype == doctype.name) & doctype.module.isin(modules))
.select(report.name)
.run(pluck=True)
)
for name in names:
messages.append((None, name))
messages.extend(get_messages_from_report(name))
for i in messages:
if not isinstance(i, tuple):
raise Exception
# workflow based on app.hooks.fixtures
messages.extend(get_messages_from_workflow(app_name=app))
# custom fields based on app.hooks.fixtures
messages.extend(get_messages_from_custom_fields(app_name=app))
# app_include_files
messages.extend(get_all_messages_from_js_files(app))
# server_messages
messages.extend(get_server_messages(app))
# messages from navbar settings
messages.extend(get_messages_from_navbar())
if deduplicate:
messages = deduplicate_messages(messages)
return messages | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_messages_for_app(app):\n\tmessages = []\n\tmodules = \", \".join(['\"{}\"'.format(m.title().replace(\"_\", \" \")) \\\n\t\tfor m in frappe.local.app_modules[app]])\n\n\t# doctypes\n\tif modules:\n\t\tfor name in frappe.db.sql_list(\"\"\"select name from tabDocType\n\t\t\twhere module in ({})\"\"\".format(modules)):\n\t\t\tmessages.extend(get_messages_from_doctype(name))\n\n\t\t# pages\n\t\tfor name, title in frappe.db.sql(\"\"\"select name, title from tabPage\n\t\t\twhere module in ({})\"\"\".format(modules)):\n\t\t\tmessages.append(('Page: ' + title or name, title or name))\n\t\t\tmessages.extend(get_messages_from_page(name))\n\n\n\t\t# reports\n\t\tfor name in frappe.db.sql_list(\"\"\"select tabReport.name from tabDocType, tabReport\n\t\t\twhere tabReport.ref_doctype = tabDocType.name\n\t\t\t\tand tabDocType.module in ({})\"\"\".format(modules)):\n\t\t\tmessages.append(('Report: ' + name, name))\n\t\t\tmessages.extend(get_messages_from_report(name))\n\t\t\tfor i in messages:\n\t\t\t\tif not isinstance(i, tuple):\n\t\t\t\t\traise Exception\n\n\t# workflow based on app.hooks.fixtures\n\tmessages.extend(get_messages_from_workflow(app_name=app))\n\n\t# custom fields based on app.hooks.fixtures\n\tmessages.extend(get_messages_from_custom_fields(app_name=app))\n\n\t# app_include_files\n\tmessages.extend(get_all_messages_from_js_files(app))\n\tmessages.extend(get_messages_from_include_files(app))\n\n\t# server_messages\n\tmessages.extend(get_server_messages(app))\n\treturn messages",
"def list_messages(self):",
"def get_all_messages_from_template_files(app_name=None):\n\tmessages = []\n\tfor app in ([app_name] if app_name else frappe.get_installed_apps()):\n\t\tif os.path.exists(frappe.get_app_path(app, \"templates\")):\n\t\t\tfor basepath, dummy, files in os.walk(frappe.get_app_path(app, \"templates\")):\n\t\t\t\tfor fname in files:\n\t\t\t\t\tif fname.endswith(\".js\") or fname.endswith(\".html\") or fname.endswith(\".vue\"):\n\t\t\t\t\t\tmessages.extend(get_messages_from_file(os.path.join(basepath, fname)))\n\n\treturn messages",
"def get_all_messages_from_js_files(app_name=None):\n\tmessages = []\n\tfor app in [app_name] if app_name else frappe.get_installed_apps(_ensure_on_bench=True):\n\t\tif os.path.exists(frappe.get_app_path(app, \"public\")):\n\t\t\tfor basepath, folders, files in os.walk(frappe.get_app_path(app, \"public\")):\n\t\t\t\tif \"frappe/public/js/lib\" in basepath:\n\t\t\t\t\tcontinue\n\n\t\t\t\tfor fname in files:\n\t\t\t\t\tif fname.endswith(\".js\") or fname.endswith(\".html\") or fname.endswith(\".vue\"):\n\t\t\t\t\t\tmessages.extend(get_messages_from_file(os.path.join(basepath, fname)))\n\n\treturn messages",
"def get_all_messages_from_js_files(app_name=None):\n\tmessages = []\n\tfor app in ([app_name] if app_name else frappe.get_installed_apps()):\n\t\tif os.path.exists(frappe.get_app_path(app, \"public\")):\n\t\t\tfor basepath, dummy, files in os.walk(frappe.get_app_path(app, \"public\")):\n\t\t\t\tif \"frappe/public/js/lib\" in basepath:\n\t\t\t\t\tcontinue\n\n\t\t\t\tif os.path.isfile(frappe.get_app_path(app, \"public/build.json\")):\n\t\t\t\t\twith open(frappe.get_app_path(app, \"public/build.json\"), 'r') as f:\n\t\t\t\t\t\tbuilt_files = json.loads(f.read())\n\t\t\t\t\t\tbuilt_files = reduce(lambda a,b: a.extend(b) or a, list(built_files.values()), [])\n\n\t\t\t\tfor fname in files:\n\t\t\t\t\tif fname not in built_files and (fname.endswith(\".js\") or fname.endswith(\".html\") or fname.endswith(\".vue\")):\n\t\t\t\t\t\tmessages.extend(get_messages_from_file(os.path.join(basepath, fname)))\n\n\treturn messages",
"def get_all_msgs(self):\n data = self.database.select(self.tname)\n msgs = []\n for item in data:\n msgs.append((item[0], self.data_to_msg(item)))\n return msgs",
"def get_messages_from_include_files(app_name=None):\n\tmessages = []\n\tfor file in (frappe.get_hooks(\"app_include_js\", app_name=app_name) or []) + (frappe.get_hooks(\"web_include_js\", app_name=app_name) or []):\n\t\tmessages.extend(get_messages_from_file(os.path.join(frappe.local.sites_path, file)))\n\n\tfor app in ([app_name] if app_name else frappe.get_installed_apps()):\n\t\tif os.path.isfile(frappe.get_app_path(app, \"public/build.json\")):\n\t\t\twith open(frappe.get_app_path(app, \"public/build.json\"), 'r') as f:\n\n\t\t\t\tfor f in json.loads(f.read()):\n\t\t\t\t\tif not f.startswith(\"concat:\"):\n\t\t\t\t\t\tmessages.extend(get_messages_from_file(os.path.join(frappe.local.sites_path, \"assets/\" + f)))\n\n\treturn messages",
"def get_messages(self):\n res = self.conn.cursor().execute(\"SELECT * FROM messages\")\n return res.fetchall()",
"def get_app_message(self):\n return self.messages[\"app\"].get()",
"async def app_list(self) -> List[interface.App]:\n return await self.relay(\"app_list\")()",
"def get_user_messages(user_id):\n pass \n # user_message_list = []\n\n # for message in sent messages:",
"def all_messages(self):\n request = {'token': self.token, 'include_received': True, 'include_read': True, 'include_sent': True}\n return Session.send_request('messages', request, Session.FULL_RESPONSE_OR_NONE)",
"def _messages_list(self, queue):\n\n return queue.messages()",
"def get_all_messages(**kwargs):\n request = kwargs.pop('request')\n area = get_location_for_user(request.user)\n if not area == Location.tree.root_nodes()[0]:\n return Message.objects.exclude(connection__identity__in=getattr(settings, 'MODEM_NUMBERS', ['256777773260', '256752145316', '256711957281', '256790403038', '256701205129'])).\\\n exclude(connection__backend__name=\"yo8200\").filter(direction='I', connection__contact__reporting_location__in=area.get_descendants(include_self=True).all()).order_by('-date')\n\n return Message.objects.exclude(connection__identity__in=getattr(settings, 'MODEM_NUMBERS', ['256777773260', '256752145316', '256711957281', '256790403038', '256701205129'])).\\\n exclude(connection__backend__name=\"yo8200\").filter(direction='I').order_by('-date')",
"def list(self):\n try:\n response = self.service.users().messages().list(userId=self.user_id,\n q=self.query).execute()\n messages = []\n if 'messages' in response:\n messages.extend(response['messages'])\n\n while 'nextPageToken' in response:\n page_token = response['nextPageToken']\n response = self.service.users().messages().list(userId=self.user_id, q=self.query,\n pageToken=page_token).execute()\n messages.extend(response['messages'])\n\n return messages\n except errors.HttpError as error:\n print('An error occurred: %s' % error)",
"def get_message_list(self):\n \n result = requests.get(\n url = root_url + '/{}'.format(\"message\"),\n headers = { 'Authorization': api_key },\n )\n\n message_list = result.json()\n\n self.message_list = message_list",
"def get_all_messages():\n with open(\"data/messages.txt\", \"r\") as chat_list:\n messages = chat_list.readlines()\n return messages",
"def get_messages(request):\n import urllib.parse\n if request.user.is_authenticated():\n msgs = BroadcastMessage.objects.current().for_auth_users()\n else:\n msgs = BroadcastMessage.objects.current().for_unauth_users()\n\n # exclude by those seen\n excluded_session = decode_excluded(request.session.get(\"excluded_broadcasts\", \"\"))\n excluded_cookie = decode_excluded(request.COOKIES.get(\"excluded_broadcasts\", \"\"))\n excluded = excluded_session | excluded_cookie\n msgs = msgs.exclude(pk__in=list(excluded))\n\n # filter them by the HTTP_REFERER\n url_parts = urllib.parse.urlparse(request.META.get('HTTP_REFERER', '/'))\n path = url_parts.path\n valid_messages = [msg for msg in msgs if re.match(msg.url_target, path)]\n msg_list = []\n for msg in valid_messages:\n msg_list.append(msg.msg_info())\n if msg.show_frequency == BroadcastMessage.SHOW_ONCE:\n excluded_cookie.add(msg.pk)\n elif msg.show_frequency == BroadcastMessage.SHOW_ONCE_SESSION:\n excluded_session.add(msg.pk)\n request.session['excluded_broadcasts'] = encode_excluded(excluded_session)\n response = HttpResponse(json.dumps(msg_list),\n content_type=\"application/json\")\n response.set_cookie('excluded_broadcasts', encode_excluded(excluded_cookie))\n return response",
"def getAllMessages(self):\n return self.db.getAllMessages()",
"def get_messages(self):\r\n return self.messages",
"def get_all(self):\n request = get_current_request()\n messages = []\n for queue in self.queues:\n for peeked in request.session.peek_flash(queue):\n messages.append({'message': peeked, 'queue': queue,})\n request.session.pop_flash(queue)\n return messages",
"def get_messages_from_include_files(app_name=None):\n\tfrom frappe.utils.jinja_globals import bundled_asset\n\n\tmessages = []\n\tapp_include_js = frappe.get_hooks(\"app_include_js\", app_name=app_name) or []\n\tweb_include_js = frappe.get_hooks(\"web_include_js\", app_name=app_name) or []\n\tinclude_js = app_include_js + web_include_js\n\n\tfor js_path in include_js:\n\t\tfile_path = bundled_asset(js_path)\n\t\trelative_path = os.path.join(frappe.local.sites_path, file_path.lstrip(\"/\"))\n\t\tmessages_from_file = get_messages_from_file(relative_path)\n\t\tmessages.extend(messages_from_file)\n\n\treturn messages",
"def get_messages(self):\n other_user_email = request.args.get('other_user_email')\n page = request.args.get('page')\n per_page = request.args.get('per_page')\n if not other_user_email or not page or not per_page:\n self.logger.debug(messages.MISSING_FIELDS_ERROR % \"query params\")\n return messages.ERROR_JSON % messages.MISSING_FIELDS_ERROR % \"query params\", 400\n email_token = auth.current_user()[0]\n page = int(page)\n per_page = int(per_page)\n # App sends starting with 1 but we start at 0\n page -= 1\n try:\n message_list, pages = self.friend_database.get_conversation(email_token, other_user_email, per_page, page)\n except NoMoreMessagesError:\n self.logger.debug(messages.NO_MORE_PAGES_ERROR)\n return messages.NO_MORE_PAGES_ERROR, 404\n message_list = [{k:v for k,v in m._asdict().items() if k != \"hidden_to\"} for m in message_list]\n for i in range(len(message_list)):\n message_list[i][\"timestamp\"] = message_list[i][\"timestamp\"].isoformat()\n return json.dumps({\"messages\": message_list, \"pages\": pages}), 200",
"def get_all_message(): \n return \"<br>\".join(messages)",
"def messages(self):\n return Session.send_request('messages', {'token': self.token}, Session.FULL_RESPONSE_OR_NONE)",
"def get_messages():\n dynamodb = boto3.client('dynamodb')\n messages = []\n _messages = []\n paginator = dynamodb.get_paginator('scan')\n for page in paginator.paginate(TableName=os.environ.get('MESSAGE_TABLE_NAME')):\n _messages.extend(page['Items'])\n\n if not _messages:\n return _messages\n\n for message in _messages:\n m = {\n message['timestamp']['N']: message['data']['S']\n }\n messages.append(m)\n\n # sort list of dict by timestamp\n messages = list(map(dict, sorted(list(i.items()) for i in messages)))\n\n _messages = []\n for message in messages:\n _, v = list(message.items())[0]\n _messages.append(v)\n\n return _messages",
"def get_message_list(self):\n count = 0\n for msg in self.mbox:\n if msg['From'].find(self.config['tgt_email']) > -1:\n dtime = arrow.get(msg['Date'], 'ddd, D MMM YYYY HH:mm:ss ZZ')\n message = dict({'from': msg['From'],\n 'date': dtime,\n 'subject': msg['Subject']})\n # boundary = msg.get_boundary()\n # if boundary is not None:\n # bounds = [m.start() for m\n # in re.finditer(boundary, str(msg))]\n # else:\n # bounds = list()\n # if len(bounds) > 2:\n # message['text'] = str(msg)[bounds[1]:bounds[2]]\n # else:\n # message['text'] = None\n pl = None\n if msg['Subject'].find(\":\") == -1:\n finished = False\n pl = msg.get_payload()\n while finished is False:\n if isinstance(pl, str):\n finished = True\n elif isinstance(pl, list):\n pl = pl[0].get_payload()\n else:\n raise ValueError(\"Non-list, non-str payload?\")\n break\n message['text'] = self.clean_text(str(pl))\n\n if message['text'] is not None:\n self.messages.append(message)\n count += 1\n # print count\n self.messages.sort(key=lambda item: item['date'])",
"def get_messages(email: str) -> list:\n\n params = {\"action\": \"getMessages\", \"login\": email, \"domain\": domain}\n\n response = requests.get(endpoint, data=params)\n email_data = json.load(response)\n\n for email_message in email_data:\n for field, value in email_message.items():\n print(f\"{field}: {value}\")\n print()",
"def ListApps(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def list(ctx):\n # pylint: disable=redefined-builtin\n _list_apps(ctx.obj['config'], ctx.obj['client'])"
]
| [
"0.7564479",
"0.6858341",
"0.6817746",
"0.6686708",
"0.6553618",
"0.63832015",
"0.6328123",
"0.62921464",
"0.62920743",
"0.62573105",
"0.61743414",
"0.6165361",
"0.61263746",
"0.6086476",
"0.60778743",
"0.60688084",
"0.6063915",
"0.6063191",
"0.60603523",
"0.6006632",
"0.6006579",
"0.6003663",
"0.5954419",
"0.5900541",
"0.587599",
"0.5870837",
"0.58531386",
"0.5848935",
"0.58328164",
"0.5820711"
]
| 0.7323146 | 1 |
Return all labels from Navbar Items, as specified in Navbar Settings. | def get_messages_from_navbar():
labels = frappe.get_all("Navbar Item", filters={"item_label": ("is", "set")}, pluck="item_label")
return [("Navbar:", label, "Label of a Navbar Item") for label in labels] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def items(cls) -> t.List[t.Tuple[t.Any, t.Union[str, NameTitle]]]:\n return list(cls.__labels__.items())",
"def get_labels(self) -> List[str]:\n return self.labels",
"def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")",
"def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")",
"def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")",
"def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")",
"def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")",
"def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")",
"def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")",
"def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")",
"def labels(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"labels\")",
"def labels_all(self):\n return self._labels_all",
"def labels(self) -> list[\"Label\"]:\n _args: list[Arg] = []\n _ctx = self._select(\"labels\", _args)\n _ctx = Label(_ctx)._select_multiple(\n _name=\"name\",\n _value=\"value\",\n )\n return _ctx.execute_sync(list[Label])",
"def get_labels(self):\n return []",
"def get_labels(self):\n return get_labels(self.api_key)",
"def labels(self):\n return self._labels",
"def labels(self) -> Dict[str, str]:\n return self.attrs.get(\"Labels\", {})",
"def get_labels(self):\n return self.labels",
"def get_labels(self) -> Set[str]:",
"def get_labels():\n return if_found(dao.get_labels())",
"def get_cora_label_names():\n # type: () -> List[str]\n return _label_names",
"def get_labels(self) -> List[str]:\n raise NotImplementedError()",
"def get_labels(self):\n resp = self._client.scan(TableName=self.LABELS_TABLE)\n return [self._item_to_label(item) for item in resp['Items']]",
"def get_labels(self):\n return self.labels[1:]",
"def get_all_labels(self):\n labels = self.wls_board.get_labels\n return labels",
"def navbar_list(cls):\n return cls.objects.filter(status=0).filter(is_nav=True)[:10]",
"def get_labels(pr_id):\n label_json = get_status_json(pr_id, 'labels')\n current_labels = [l['name'] for l in label_json]\n return current_labels",
"def labels(self) -> pulumi.Output[Mapping[str, str]]:\n return pulumi.get(self, \"labels\")",
"def labels(self) -> pulumi.Output[Mapping[str, str]]:\n return pulumi.get(self, \"labels\")",
"def labels(self) -> pulumi.Output[Mapping[str, str]]:\n return pulumi.get(self, \"labels\")"
]
| [
"0.63265485",
"0.6256548",
"0.61688757",
"0.61688757",
"0.61688757",
"0.61688757",
"0.61688757",
"0.61688757",
"0.61688757",
"0.61688757",
"0.61010146",
"0.6079814",
"0.60744786",
"0.6046383",
"0.600911",
"0.59898907",
"0.59807444",
"0.59694654",
"0.5955326",
"0.59483004",
"0.59366393",
"0.58624005",
"0.5824125",
"0.5806255",
"0.57716125",
"0.57667655",
"0.5764619",
"0.5750878",
"0.5750878",
"0.5750878"
]
| 0.7627104 | 0 |
Extract all translatable messages for a doctype. Includes labels, Python code, Javascript code, html templates | def get_messages_from_doctype(name):
messages = []
meta = frappe.get_meta(name)
messages = [meta.name, meta.module]
if meta.description:
messages.append(meta.description)
# translations of field labels, description and options
for d in meta.get("fields"):
messages.extend([d.label, d.description])
if d.fieldtype == "Select" and d.options:
options = d.options.split("\n")
if not "icon" in options[0]:
messages.extend(options)
if d.fieldtype == "HTML" and d.options:
messages.append(d.options)
# translations of roles
messages.extend(d.role for d in meta.get("permissions") if d.role)
messages = [message for message in messages if message]
messages = [("DocType: " + name, message) for message in messages if is_translatable(message)]
# extract from js, py files
if not meta.custom:
doctype_file_path = frappe.get_module_path(meta.module, "doctype", meta.name, meta.name)
messages.extend(get_messages_from_file(doctype_file_path + ".js"))
messages.extend(get_messages_from_file(doctype_file_path + "_list.js"))
messages.extend(get_messages_from_file(doctype_file_path + "_list.html"))
messages.extend(get_messages_from_file(doctype_file_path + "_calendar.js"))
messages.extend(get_messages_from_file(doctype_file_path + "_dashboard.html"))
# workflow based on doctype
messages.extend(get_messages_from_workflow(doctype=name))
return messages | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_messages_from_doctype(name):\n\tmessages = []\n\tmeta = frappe.get_meta(name)\n\n\tmessages = [meta.name, meta.module]\n\n\tif meta.description:\n\t\tmessages.append(meta.description)\n\n\t# translations of field labels, description and options\n\tfor d in meta.get(\"fields\"):\n\t\tmessages.extend([d.label, d.description])\n\n\t\tif d.fieldtype=='Select' and d.options:\n\t\t\toptions = d.options.split('\\n')\n\t\t\tif not \"icon\" in options[0]:\n\t\t\t\tmessages.extend(options)\n\n\t# translations of roles\n\tfor d in meta.get(\"permissions\"):\n\t\tif d.role:\n\t\t\tmessages.append(d.role)\n\n\tmessages = [message for message in messages if message]\n\tmessages = [('DocType: ' + name, message) for message in messages if is_translatable(message)]\n\n\t# extract from js, py files\n\tif not meta.custom:\n\t\tdoctype_file_path = frappe.get_module_path(meta.module, \"doctype\", meta.name, meta.name)\n\t\tmessages.extend(get_messages_from_file(doctype_file_path + \".js\"))\n\t\tmessages.extend(get_messages_from_file(doctype_file_path + \"_list.js\"))\n\t\tmessages.extend(get_messages_from_file(doctype_file_path + \"_list.html\"))\n\t\tmessages.extend(get_messages_from_file(doctype_file_path + \"_calendar.js\"))\n\t\tmessages.extend(get_messages_from_file(doctype_file_path + \"_dashboard.html\"))\n\n\t# workflow based on doctype\n\tmessages.extend(get_messages_from_workflow(doctype=name))\n\n\treturn messages",
"def _extract_18n_messages():\n BabelCLI().run(['', 'extract', '-F', 'babel.cfg', '-k', '_t', '--no-location', '--sort-output',\n '--omit-header', '-o', os.path.join(I18N_PATH, 'messages.pot'), 'aliquis'])",
"def extract_messages_from_code(code):\n\tfrom jinja2 import TemplateError\n\n\ttry:\n\t\tcode = frappe.as_unicode(render_include(code))\n\n\t# Exception will occur when it encounters John Resig's microtemplating code\n\texcept (TemplateError, ImportError, InvalidIncludePath, OSError) as e:\n\t\tif isinstance(e, InvalidIncludePath):\n\t\t\tfrappe.clear_last_message()\n\n\tmessages = []\n\n\tfor m in TRANSLATE_PATTERN.finditer(code):\n\t\tmessage = m.group(\"message\")\n\t\tcontext = m.group(\"py_context\") or m.group(\"js_context\")\n\t\tpos = m.start()\n\n\t\tif is_translatable(message):\n\t\t\tmessages.append([pos, message, context])\n\n\treturn add_line_number(messages, code)",
"def make_translated_text():\n return {\n code: ''\n for code, name\n in settings.LANGUAGES\n }",
"def get_massage():\n # Javascript code in ths page generates HTML markup\n # that isn't parsed correctly by BeautifulSoup.\n # To avoid this problem, all document.write fragments are removed\n my_massage = copy(BeautifulSoup.MARKUP_MASSAGE)\n my_massage.append((re.compile(u\"document.write(.+);\"), lambda match: \"\"))\n my_massage.append((re.compile(u'alt=\".+\">'), lambda match: \">\"))\n return my_massage",
"def main(verbosity=1):\r\n logging.basicConfig(stream=sys.stdout, level=logging.INFO)\r\n LOCALE_DIR.parent.makedirs_p()\r\n source_msgs_dir = CONFIGURATION.source_messages_dir\r\n remove_file(source_msgs_dir.joinpath('django.po'))\r\n\r\n # Extract strings from mako templates.\r\n verbosity_map = {\r\n 0: \"-q\",\r\n 1: \"\",\r\n 2: \"-v\",\r\n }\r\n babel_verbosity = verbosity_map.get(verbosity, \"\")\r\n\r\n babel_mako_cmd = 'pybabel {verbosity} extract -F {config} -c \"Translators:\" . -o {output}'\r\n babel_mako_cmd = babel_mako_cmd.format(\r\n verbosity=babel_verbosity,\r\n config=base(LOCALE_DIR, 'babel_mako.cfg'),\r\n output=base(CONFIGURATION.source_messages_dir, 'mako.po'),\r\n )\r\n if verbosity:\r\n stderr = None\r\n else:\r\n stderr = DEVNULL\r\n\r\n execute(babel_mako_cmd, working_directory=BASE_DIR, stderr=stderr)\r\n\r\n makemessages = \"django-admin.py makemessages -l en -v{}\".format(verbosity)\r\n ignores = \" \".join('--ignore=\"{}/*\"'.format(d) for d in CONFIGURATION.ignore_dirs)\r\n if ignores:\r\n makemessages += \" \" + ignores\r\n\r\n # Extract strings from django source files, including .py files.\r\n make_django_cmd = makemessages + ' --extension html'\r\n execute(make_django_cmd, working_directory=BASE_DIR, stderr=stderr)\r\n\r\n # Extract strings from Javascript source files.\r\n make_djangojs_cmd = makemessages + ' -d djangojs --extension js'\r\n execute(make_djangojs_cmd, working_directory=BASE_DIR, stderr=stderr)\r\n\r\n # makemessages creates 'django.po'. This filename is hardcoded.\r\n # Rename it to django-partial.po to enable merging into django.po later.\r\n os.rename(\r\n source_msgs_dir.joinpath('django.po'),\r\n source_msgs_dir.joinpath('django-partial.po')\r\n )\r\n\r\n # makemessages creates 'djangojs.po'. This filename is hardcoded.\r\n # Rename it to djangojs-partial.po to enable merging into djangojs.po later.\r\n os.rename(\r\n source_msgs_dir.joinpath('djangojs.po'),\r\n source_msgs_dir.joinpath('djangojs-partial.po')\r\n )\r\n\r\n files_to_clean = set()\r\n\r\n # Extract strings from third-party applications.\r\n for app_name in CONFIGURATION.third_party:\r\n # Import the app to find out where it is. Then use pybabel to extract\r\n # from that directory.\r\n app_module = importlib.import_module(app_name)\r\n app_dir = path(app_module.__file__).dirname().dirname()\r\n output_file = source_msgs_dir / (app_name + \".po\")\r\n files_to_clean.add(output_file)\r\n\r\n babel_cmd = 'pybabel {verbosity} extract -F {config} -c \"Translators:\" {app} -o {output}'\r\n babel_cmd = babel_cmd.format(\r\n verbosity=babel_verbosity,\r\n config=LOCALE_DIR / 'babel_third_party.cfg',\r\n app=app_name,\r\n output=output_file,\r\n )\r\n execute(babel_cmd, working_directory=app_dir, stderr=stderr)\r\n\r\n # Segment the generated files.\r\n segmented_files = segment_pofiles(\"en\")\r\n files_to_clean.update(segmented_files)\r\n\r\n # Finish each file.\r\n for filename in files_to_clean:\r\n LOG.info('Cleaning %s' % filename)\r\n po = pofile(source_msgs_dir.joinpath(filename))\r\n # replace default headers with edX headers\r\n fix_header(po)\r\n # replace default metadata with edX metadata\r\n fix_metadata(po)\r\n # remove key strings which belong in messages.po\r\n strip_key_strings(po)\r\n po.save()",
"def get_messages_for_boot():\n\tmessages = get_all_translations(frappe.local.lang)\n\tmessages.update(get_dict_from_hooks(\"boot\", None))\n\n\treturn messages",
"def messages(request):\n ctx = {}\n messages = get_messages(request)\n if messages:\n ctx['mesgs'] = messages\n return ctx",
"def extract_messages_from_code(code, is_py=False):\n\ttry:\n\t\tcode = frappe.as_unicode(render_include(code))\n\texcept (TemplateError, ImportError, InvalidIncludePath, IOError):\n\t\t# Exception will occur when it encounters John Resig's microtemplating code\n\t\tpass\n\n\tmessages = []\n\tmessages += [(m.start(), m.groups()[0]) for m in re.compile('_\\(\"([^\"]*)\"').finditer(code)]\n\tmessages += [(m.start(), m.groups()[0]) for m in re.compile(\"_\\('([^']*)'\").finditer(code)]\n\tif is_py:\n\t\tmessages += [(m.start(), m.groups()[0]) for m in re.compile('_\\(\"{3}([^\"]*)\"{3}.*\\)').finditer(code)]\n\n\tmessages = [(pos, message) for pos, message in messages if is_translatable(message)]\n\treturn pos_to_line_no(messages, code)",
"def get_messages_for_app(app, deduplicate=True):\n\tmessages = []\n\tmodules = [frappe.unscrub(m) for m in frappe.local.app_modules[app]]\n\n\t# doctypes\n\tif modules:\n\t\tif isinstance(modules, str):\n\t\t\tmodules = [modules]\n\t\tfiltered_doctypes = (\n\t\t\tfrappe.qb.from_(\"DocType\").where(Field(\"module\").isin(modules)).select(\"name\").run(pluck=True)\n\t\t)\n\t\tfor name in filtered_doctypes:\n\t\t\tmessages.extend(get_messages_from_doctype(name))\n\n\t\t# pages\n\t\tfiltered_pages = (\n\t\t\tfrappe.qb.from_(\"Page\").where(Field(\"module\").isin(modules)).select(\"name\", \"title\").run()\n\t\t)\n\t\tfor name, title in filtered_pages:\n\t\t\tmessages.append((None, title or name))\n\t\t\tmessages.extend(get_messages_from_page(name))\n\n\t\t# reports\n\t\treport = DocType(\"Report\")\n\t\tdoctype = DocType(\"DocType\")\n\t\tnames = (\n\t\t\tfrappe.qb.from_(doctype)\n\t\t\t.from_(report)\n\t\t\t.where((report.ref_doctype == doctype.name) & doctype.module.isin(modules))\n\t\t\t.select(report.name)\n\t\t\t.run(pluck=True)\n\t\t)\n\t\tfor name in names:\n\t\t\tmessages.append((None, name))\n\t\t\tmessages.extend(get_messages_from_report(name))\n\t\t\tfor i in messages:\n\t\t\t\tif not isinstance(i, tuple):\n\t\t\t\t\traise Exception\n\n\t# workflow based on app.hooks.fixtures\n\tmessages.extend(get_messages_from_workflow(app_name=app))\n\n\t# custom fields based on app.hooks.fixtures\n\tmessages.extend(get_messages_from_custom_fields(app_name=app))\n\n\t# app_include_files\n\tmessages.extend(get_all_messages_from_js_files(app))\n\n\t# server_messages\n\tmessages.extend(get_server_messages(app))\n\n\t# messages from navbar settings\n\tmessages.extend(get_messages_from_navbar())\n\n\tif deduplicate:\n\t\tmessages = deduplicate_messages(messages)\n\n\treturn messages",
"def generate_strings():\n\n # used by error pages and in the sidebar for why to create a subverbify\n for category, strings in funny_translatable_strings.iteritems():\n for string in strings:\n print \"# TRANSLATORS: Do not translate literally. Come up with a funny/relevant phrase (see the English version for ideas.) Accepts markdown formatting.\"\n print \"print _('\" + string + \"')\"\n\n # these are used in v1.lib.pages.trafficpages\n INTERVALS = (\"hour\", \"day\", \"month\")\n TYPES = (\"uniques\", \"pageviews\", \"traffic\", \"impressions\", \"clicks\")\n for interval in INTERVALS:\n for type in TYPES:\n print \"print _('%s by %s')\" % (type, interval)",
"async def funslate(self,ctx,lang=\"ja\"):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n await self.translater(ctx,lang,wordsDict.generate())",
"def getMessages(self):\n messages = []\n if capi.pivot_languages:\n is_translated = False\n context_lang = self.context.language\n for lang in capi.pivot_languages:\n if lang == context_lang:\n is_translated = True\n break\n trans = translation.get_field_translations(self.context, lang)\n if trans:\n is_translated = True\n break\n if not is_translated:\n messages.append(\n {\"level\": \"warning\", \"header\": _(\"Pivot Translation\"), \n \"text\": u\"This document has no pivot translation.\"}\n )\n return messages",
"def propeller_messages(context, *args, **kwargs):\n # Force Django 1.8+ style, so dicts and not Context\n # TODO: This may be due to a bug in Django 1.8/1.9+\n if Context and isinstance(context, Context):\n context = context.flatten()\n context.update({'message_constants': message_constants})\n return render_template_file('propeller/messages.html', context=context)",
"def lang_postprocessing(variables):\n return variables",
"def alert_ru(subj, text):\n pos_skip = text.find(\"Ещё результаты\")\n if pos_skip >= 0:\n text = text[:pos_skip]\n\n lines = []\n for line in text.split('\\n'):\n if not any([line.startswith(i) for i in DROP_RU]): # pylint: disable=use-a-generator\n lines.append(make_markdown(line))\n\n return [\n MARKUP,\n clear_markdown(subj),\n '',\n clear_trash(Parser.drop_newlines('\\n'.join(handle_lines(lines)))),\n ]",
"def update_templates():\n logging.info(\"Copying english po files to %s\" % POT_PATH)\n\n # post them to exposed URL\n ensure_dir(POT_PATH)\n shutil.copy(get_po_filepath(lang_code=\"en\", filename=\"django.po\"), os.path.join(POT_PATH, \"kalite.pot\"))\n shutil.copy(get_po_filepath(lang_code=\"en\", filename=\"djangojs.po\"), os.path.join(POT_PATH, \"kalitejs.pot\"))",
"def parse_text(self):\n self.text={}\n for i, lang in enumerate(LANGS):\n text=file(self.src).read()\n self.text[lang]=\"\"\n extracted, finish = \"\", 0\n start_string, stop_string = r\"<!--%s-->\" % lang, r\"<!--/%s-->\" % lang\n # Iterates to check multiple blocks of text within the file!\n # Pay attention to infinite loops!\n # AttributeError exception raised when no more blocks to extract exist\n while True:\n try:\n start=re.compile(start_string, re.IGNORECASE).search(text).span()[1]\n finish=re.compile(stop_string, re.IGNORECASE).search(text).span()[0]\n extracted+=text[start:finish]\n text=text[finish+1:]\n except AttributeError:\n break\n self.text[lang]+=extracted",
"def _po_package_contents(self, package):\n po_files = glob(os.path.join(package['source_dir'], '*.po'))\n for po_file in po_files:\n language = os.path.splitext(os.path.basename(po_file))[0]\n lang_dir = os.path.join(package['build_dir'], language)\n msg_dir = os.path.join(lang_dir, 'LC_MESSAGES')\n mo_file = os.path.join(msg_dir, package['name'] + '.mo')\n yield {'language': language,\n 'lang_dir': lang_dir,\n 'msg_dir': msg_dir,\n 'mo_file': mo_file,\n 'po_file': po_file}",
"def _lint(self, html):\n path = ka_root.join('templates', 'd.html')\n self.set_file_contents(path, html)\n return i18n_lint.lint_non_literal_i18n_do_not_translate([path])",
"def main(strict=True, verbosity=1):\r\n for locale in CONFIGURATION.translated_locales:\r\n merge_files(locale, fail_if_missing=strict)\r\n # Dummy text is not required. Don't raise exception if files are missing.\r\n for locale in CONFIGURATION.dummy_locales:\r\n merge_files(locale, fail_if_missing=False)\r\n\r\n compile_cmd = 'django-admin.py compilemessages -v{}'.format(verbosity)\r\n if verbosity:\r\n stderr = None\r\n else:\r\n stderr = DEVNULL\r\n execute(compile_cmd, working_directory=BASE_DIR, stderr=stderr)",
"def clean_locale(locale):\r\n dirname = CONFIGURATION.get_messages_dir(locale)\r\n for filename in ('django-partial.po', 'djangojs-partial.po', 'mako.po'):\r\n clean_file(dirname.joinpath(filename))",
"def get_all_messages_from_template_files(app_name=None):\n\tmessages = []\n\tfor app in ([app_name] if app_name else frappe.get_installed_apps()):\n\t\tif os.path.exists(frappe.get_app_path(app, \"templates\")):\n\t\t\tfor basepath, dummy, files in os.walk(frappe.get_app_path(app, \"templates\")):\n\t\t\t\tfor fname in files:\n\t\t\t\t\tif fname.endswith(\".js\") or fname.endswith(\".html\") or fname.endswith(\".vue\"):\n\t\t\t\t\t\tmessages.extend(get_messages_from_file(os.path.join(basepath, fname)))\n\n\treturn messages",
"def load2TexAS(data):\n # State global variable\n global cache_stanza, cache_spacy, cache_udpipe, cache_trankit\n\n # Collect the data\n string = data['text']\n lang = data['lang']\n packages = data['packages']\n\n hash_string = hashlib.sha1(string.encode()).hexdigest()\n\n final_HTML = \"\"\n message_HTML = \"<div class=\\'message\\'>\"\n isMessage = False\n header_input = []\n log_row = [datetime.now().strftime('%Y-%m-%d %H:%M:%S'), lang]\n\n if \"stanza\" in packages:\n # Initialize the TexAS document\n mydoc = tx.Document(string)\n mydoc.meta().set(\"authors\",\"hegler,yiwen,celine,yuqian\")\n mydoc.date().setTimestamp(\"2021-01-19T14:44\")\n\n ## If cache is full, reload the cache.\n if cache.count(cache_stanza) > 100:\n cache.write(cache_stanza, \"stanza\")\n cache_stanza = cache.load(\"stanza\")\n \n ## Check text whether is already in cache\n if hash_string in cache_stanza[lang].keys():\n tokens, end_pos, lemma, pos, nlpWordsList, hasCompoundWords, cache_stanza = cache.read(\"stanza\", cache_stanza, lang, string) #The output cache_stanza has 'count' been updated.\n else:\n tokens, end_pos, lemma, pos, nlpWordsList, hasCompoundWords, cache_stanza = cache.add(\"stanza\", cache_stanza, lang, string, get_services_stanza)\n \n mydoc.setTokenList(tokens, indexed=True)\n mydoc.views().get(\"TOKENS\").meta().set(\"generator\", \"stanza\")\n mydoc.views().get(\"TOKENS\").meta().set(\"model\", \"stanza\" + \"-\" + lang)\n mydoc.setSentenceList(end_pos)\n\n if hasCompoundWords:\n mydoc.addTokenView( \"WORDS\", nlpWordsList )\n mydoc.addTokenView(\"LEMMA\", lemma)\n mydoc.addTokenView(\"POS\", pos)\n \n # Extract HTML View\n myTabView = tx.UITabularView(mydoc)\n if hasCompoundWords:\n myTabView.showView(\"WORDS\")\n myTabView.showView(\"LEMMA\", labelCSS=False)\n myTabView.showView(\"POS\")\n\n # concatenate the myTabView.HTML()\n header_input.append((\"Stanza\", str(len(end_pos)) , str(len(tokens)), str(get_tokens_per_sents(end_pos))))\n final_HTML += \"<div class='subtitle'>Stanza</div> <br>\" + myTabView.HTML().replace(\"\\n\", \"\") + \"<br>\"\n log_row.append(\"stanza\")\n \n else:\n log_row.append(\"\")\n\n if \"spacy\" in packages:\n # SpaCy does not support Arabic and Russian\n if lang == 'ara' or lang == 'rus':\n message_HTML += \"SpaCy does not support Arabic or Russian. <br>\"\n isMessage = True\n\n else:\n mydoc = tx.Document(string)\n mydoc.meta().set(\"authors\",\"hegler,yiwen,celine,yuqian\")\n mydoc.date().setTimestamp(\"2021-01-19T14:44\")\n \n ## If cache is full, reload the cache.\n if cache.count(cache_spacy) > 100:\n cache.write(cache_spacy, \"spacy\")\n cache_spacy = cache.load(\"spacy\")\n \n ## Check text whether is already in cache\n if hash_string in cache_spacy[lang].keys():\n tokens, end_pos, lemma, pos, cache_spacy = cache.read(\"spacy\", cache_spacy, lang, string)\n else:\n tokens, end_pos, lemma, pos, cache_spacy = cache.add(\"spacy\", cache_spacy, lang, string, get_services_spacy)\n \n mydoc.setTokenList(tokens, indexed=True)\n mydoc.views().get(\"TOKENS\").meta().set(\"generator\", \"spacy\")\n mydoc.views().get(\"TOKENS\").meta().set(\"model\", \"spacy\" + \"-\" + lang )\n mydoc.setSentenceList(end_pos)\n mydoc.addTokenView(\"LEMMA\", lemma)\n mydoc.addTokenView(\"POS\", pos)\n \n # Extract HTML View\n myTabView = tx.UITabularView(mydoc)\n myTabView.showView(\"LEMMA\", labelCSS=False)\n myTabView.showView(\"POS\")\n\n # concatenate the myTabView.HTML()\n header_input.append((\"SpaCy\", str(len(end_pos)) , str(len(tokens)), str(get_tokens_per_sents(end_pos))))\n final_HTML += \"<div class='subtitle'>\" + \"SpaCy\" + \"</div><br>\" + myTabView.HTML().replace(\"\\n\", \"\") + \"<br>\"\n log_row.append(\"spacy\")\n \n else:\n log_row.append(\"\")\n\n if \"udpipe\" in packages: \n ## If cache is full, reload the cache.\n if cache.count(cache_udpipe) > 100:\n cache.write(cache_udpipe, \"udpipe\")\n cache_udpipe = cache.load(\"udpipe\")\n \n ## Check text whether is already in cache\n if hash_string in cache_udpipe[lang].keys():\n tokens, end_pos, lemma, pos, cache_udpipe = cache.read(\"udpipe\", cache_udpipe, lang, string)\n else:\n tokens, end_pos, lemma, pos, cache_udpipe = cache.add(\"udpipe\", cache_udpipe, lang, string, get_services_udpipe)\n \n string_udpipe = \" \".join(tokens)\n\n # Initialize the TexAS document\n mydoc = tx.Document(string_udpipe)\n mydoc.meta().set(\"authors\",\"hegler,yiwen,celine,yuqian\")\n mydoc.date().setTimestamp(\"2021-01-19T14:44\")\n\n mydoc.setTokenList(tokens, indexed=True)\n mydoc.views().get(\"TOKENS\").meta().set(\"generator\", \"udpipe\")\n mydoc.views().get(\"TOKENS\").meta().set(\"model\", \"udpipe\" + \"-\" + lang )\n mydoc.setSentenceList(end_pos)\n mydoc.addTokenView(\"LEMMA\", lemma)\n mydoc.addTokenView(\"POS\", pos)\n \n # Extract HTML View\n myTabView = tx.UITabularView(mydoc)\n myTabView.showView(\"LEMMA\", labelCSS=False)\n myTabView.showView(\"POS\")\n\n # concatenate the myTabView.HTML()\n header_input.append((\"UDpipe\", str(len(end_pos)) , str(len(tokens)), str(get_tokens_per_sents(end_pos))))\n final_HTML += \"<div class='subtitle'>UDpipe</div> <br>\" + myTabView.HTML().replace(\"\\n\", \"\") + \"<br>\"\n log_row.append(\"udpipe\")\n \n else:\n log_row.append(\"\")\n \n if \"trankit\" in packages:\n # trankit temporarily only support english\n if lang == 'eng':\n mydoc = tx.Document(string)\n mydoc.meta().set(\"authors\",\"hegler,yiwen,celine,yuqian\")\n mydoc.date().setTimestamp(\"2021-01-19T14:44\")\n \n ## If cache is full, reload the cache.\n if cache.count(cache_trankit) > 100:\n cache.write(cache_trankit, \"trankit\")\n cache_trankit = cache.load(\"trankit\")\n \n ## Check text whether is already in cache\n if hash_string in cache_trankit[lang].keys():\n tokens, end_pos, lemma, pos, cache_trankit = cache.read(\"trankit\", cache_trankit, lang, string)\n else:\n tokens, end_pos, lemma, pos, cache_trankit = cache.add(\"trankit\", cache_trankit, lang, string, get_services_trankit)\n \n mydoc.setTokenList(tokens, indexed=True)\n mydoc.views().get(\"TOKENS\").meta().set(\"generator\", \"spacy\")\n mydoc.views().get(\"TOKENS\").meta().set(\"model\", \"spacy\" + \"-\" + lang )\n mydoc.setSentenceList(end_pos)\n mydoc.addTokenView(\"LEMMA\", lemma)\n mydoc.addTokenView(\"POS\", pos)\n \n # Extract HTML View\n myTabView = tx.UITabularView(mydoc)\n myTabView.showView(\"LEMMA\", labelCSS=False)\n myTabView.showView(\"POS\")\n\n # concatenate the myTabView.HTML()\n header_input.append((\"Trankit\", str(len(end_pos)) , str(len(tokens)), str(get_tokens_per_sents(end_pos))))\n final_HTML += \"<div class='subtitle'>\" + \"Trankit\" + \"</div><br>\" + myTabView.HTML().replace(\"\\n\", \"\") + \"<br>\"\n log_row.append(\"trankit\")\n\n else:\n message_HTML += \"Trankit temporarily only supports English. <br>\"\n isMessage = True \n \n else:\n log_row.append(\"\")\n\n message_HTML += \"</div>\"\n if isMessage:\n return message_HTML + get_header_table(header_input) + \"<br><br>\" + final_HTML\n\n writeLog(log_row)\n return get_header_table(header_input) + \"<br><br>\" + final_HTML",
"def extract_messages_from_javascript_code(code: str) -> list[tuple[int, str, str | None]]:\n\n\tmessages = []\n\n\tfor message in extract_javascript(\n\t\tcode,\n\t\tkeywords=[\"__\"],\n\t\toptions={},\n\t):\n\t\tlineno, _func, args = message\n\n\t\tif not args or not args[0]:\n\t\t\tcontinue\n\n\t\tsource_text = args[0] if isinstance(args, tuple) else args\n\t\tcontext = None\n\n\t\tif isinstance(args, tuple) and len(args) == 3 and isinstance(args[2], str):\n\t\t\tcontext = args[2]\n\n\t\tmessages.append((lineno, source_text, context))\n\n\treturn messages",
"def _lint(self, html):\n path = ka_root.join('templates', 'd.html')\n self.set_file_contents(path, html)\n return i18n_lint.lint_no_wrong_i18n_markup_in_jinja2([path])",
"def get_translation(self):",
"def unwrap(xml_file, missing_message=\"NO TRANSLATION AVAILABLE\"):\n tree = ET.parse(xml_file) \n\n # Find and check the source langs, ref langs and translators\n src_langs, ref_langs, translators = set(), set(), set()\n\n for src_doc in tree.getroot().findall(\".//src\"):\n src_langs.add(src_doc.get(\"lang\"))\n\n for ref_doc in tree.getroot().findall(\".//ref\"):\n ref_langs.add(ref_doc.get(\"lang\"))\n translator = ref_doc.get(\"translator\")\n if translator: translators.add(translator)\n \n if len(src_langs) > 1:\n raise RuntimeError(\"Multiple source languages found\")\n\n if len(src_langs) == 0:\n raise RuntimeError(\"No source languages found\")\n\n src_lang = src_langs.pop()\n src = []\n\n if len(ref_langs) > 1:\n raise RuntimeError(\"Multiple reference languages found -- this case is not currently handled\")\n\n\n if len(ref_langs) > 0:\n if len(translators) == 0:\n LOG.info(\"No translator identifiers found -- reading first translation for each document\")\n translators.add(DEFAULT_TRANSLATOR)\n ref_lang = ref_langs.pop()\n ref = {translator : [] for translator in translators}\n else:\n LOG.info(\"No references found\")\n ref_lang = None\n ref = {}\n\n\n # Extract text\n src_sent_count,doc_count = 0,0\n for doc in tree.getroot().findall(\".//doc\"):\n doc_count += 1\n src_sents = {int(seg.get(\"id\")): seg.text for seg in doc.findall(\".//src//seg\")}\n if ref_lang: \n ref_docs = doc.findall(\".//ref\")\n trans_to_ref = {}\n\n # If no translator identifiers, we just read one reference (if any) \n # If there are translator identifiers, we add a reference for each translator\n\n def get_ref_sents(ref_doc):\n return {int(seg.get(\"id\")): seg.text for seg in ref_doc.findall(f\".//seg\")}\n\n if len(translators) == 1 and DEFAULT_TRANSLATOR in translators:\n if len(ref_docs):\n trans_to_ref[DEFAULT_TRANSLATOR] = get_ref_sents(ref_docs[0])\n else:\n trans_to_ref[DEFAULT_TRANSLATOR] = {}\n else:\n trans_to_ref = {ref_doc.get(\"translator\"): get_ref_sents(ref_doc) for ref_doc in ref_docs}\n\n for seg_id in sorted(src_sents.keys()):\n src.append(src_sents[seg_id])\n src_sent_count += 1\n if ref_lang:\n for translator in translators:\n ref[translator].append(trans_to_ref.get(translator, {translator: {}}).get(seg_id, missing_message))\n\n LOG.info(f\"Extracted {doc_count} document(s) containing {src_sent_count} sentences in {src_lang}\")\n\n\n return src_lang,src,ref_lang,ref",
"def map2mw_Des(d,k1,entry):\n if k1 in map2mw_special_Des:\n return map2mw_special_Des[k1]\n regexes = [\n u'<ab>dés.</ab> de {%(.*?)%}',\n u'<ab>dés.</ab> {%(.*?)%}',\n u'<ab>dés.</ab> du <ab>c.</ab> de {%(.*?)%}',\n\n ]\n line = entry.datalines[0] # first line of entry in bur.txt\n for regex in regexes:\n m = re.search(regex,line)\n if m:\n root = m.group(1) # root in \n root_slp1=roman_slp1_mw(root,'verb',d)\n if root_slp1 != None:\n return root_slp1\n\n return '?'",
"def t(message):\n\n tpl = string.Template(message)\n return tpl.substitute(country=settings.COUNTRY_NAME, language=settings.LANGUAGE_NAME)"
]
| [
"0.6551545",
"0.6259681",
"0.57060575",
"0.55480427",
"0.5310939",
"0.5262401",
"0.51958627",
"0.5157017",
"0.51079404",
"0.51038665",
"0.50712764",
"0.49744385",
"0.4971144",
"0.49695867",
"0.49429208",
"0.49290147",
"0.4922138",
"0.49156842",
"0.4910096",
"0.49036318",
"0.48854375",
"0.48780268",
"0.4860889",
"0.48571682",
"0.48535404",
"0.48465842",
"0.48207268",
"0.48190853",
"0.4811944",
"0.48057175"
]
| 0.6539834 | 1 |
Returns messages from js files included at time of boot like desk.min.js for desk and web | def get_messages_from_include_files(app_name=None):
from frappe.utils.jinja_globals import bundled_asset
messages = []
app_include_js = frappe.get_hooks("app_include_js", app_name=app_name) or []
web_include_js = frappe.get_hooks("web_include_js", app_name=app_name) or []
include_js = app_include_js + web_include_js
for js_path in include_js:
file_path = bundled_asset(js_path)
relative_path = os.path.join(frappe.local.sites_path, file_path.lstrip("/"))
messages_from_file = get_messages_from_file(relative_path)
messages.extend(messages_from_file)
return messages | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def js():\n with lcd(BASEDIR):\n js_ext = (\n 'submodules/jquery-cookie/src/jquery.cookie.js',\n 'submodules/jquery-treegrid/js/jquery.treegrid.js',\n 'submodules/bootstrap/dist/js/bootstrap.js',\n )\n js_own = (\n 'js/variables.js',\n 'js/bmf-autocomplete.js',\n 'js/bmf-calendar.js',\n 'js/bmf-editform.js',\n 'js/bmf-inlineform.js',\n 'js/bmf-buildform.js',\n 'js/menu.js',\n )\n\n local('cp submodules/bootstrap/dist/js/bootstrap.min.js djangobmf/static/djangobmf/js/')\n local('yui-compressor --type js -o djangobmf/static/djangobmf/js/jquery.cookie.min.js submodules/jquery-cookie/src/jquery.cookie.js')\n local('yui-compressor --type js -o djangobmf/static/djangobmf/js/jquery.treegrid.min.js submodules/jquery-treegrid/js/jquery.treegrid.js')\n\n local('cat %s > djangobmf/static/djangobmf/js/djangobmf.js' % ' '.join(js_ext + js_own))\n local('yui-compressor --type js -o djangobmf/static/djangobmf/js/djangobmf.min.js djangobmf/static/djangobmf/js/djangobmf.js')\n local('cat %s > djangobmf/static/djangobmf/js/djangobmf.js' % ' '.join(js_own))",
"def process_js():\n source_paths = [\n os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/js/admin.js'),\n os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/js/app.js'),\n os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/js/footnotes.js'),\n os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/js/table_of_contents.js'),\n os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/js/text_resize.js'),\n os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/js/toastr.js'),\n ]\n dest_path = os.path.join(settings.BASE_DIR, 'static/CMESH/js/app.js')\n min_path = os.path.join(settings.BASE_DIR, 'static/CMESH/js/app.min.js')\n\n process_js_files(source_paths, dest_path, min_path)",
"def get_messages_from_include_files(app_name=None):\n\tmessages = []\n\tfor file in (frappe.get_hooks(\"app_include_js\", app_name=app_name) or []) + (frappe.get_hooks(\"web_include_js\", app_name=app_name) or []):\n\t\tmessages.extend(get_messages_from_file(os.path.join(frappe.local.sites_path, file)))\n\n\tfor app in ([app_name] if app_name else frappe.get_installed_apps()):\n\t\tif os.path.isfile(frappe.get_app_path(app, \"public/build.json\")):\n\t\t\twith open(frappe.get_app_path(app, \"public/build.json\"), 'r') as f:\n\n\t\t\t\tfor f in json.loads(f.read()):\n\t\t\t\t\tif not f.startswith(\"concat:\"):\n\t\t\t\t\t\tmessages.extend(get_messages_from_file(os.path.join(frappe.local.sites_path, \"assets/\" + f)))\n\n\treturn messages",
"def get_default_javascript():\n return [\"_static/require.js\"]",
"def get_all_messages_from_js_files(app_name=None):\n\tmessages = []\n\tfor app in [app_name] if app_name else frappe.get_installed_apps(_ensure_on_bench=True):\n\t\tif os.path.exists(frappe.get_app_path(app, \"public\")):\n\t\t\tfor basepath, folders, files in os.walk(frappe.get_app_path(app, \"public\")):\n\t\t\t\tif \"frappe/public/js/lib\" in basepath:\n\t\t\t\t\tcontinue\n\n\t\t\t\tfor fname in files:\n\t\t\t\t\tif fname.endswith(\".js\") or fname.endswith(\".html\") or fname.endswith(\".vue\"):\n\t\t\t\t\t\tmessages.extend(get_messages_from_file(os.path.join(basepath, fname)))\n\n\treturn messages",
"def read_javascript_includes():\n\tif \"CFG_PREFIX\" in globals(): \n\t\tjs_filepath = os.path.join(CFG_PREFIX,\"var/www/js/jquery/jquery-lib.html\")\n\t\tif os.path.exists(js_filepath):\n\t\t\tf = open(js_filepath,\"r\")\n\t\t\tjs_text = f.read()\n\t\t\tf.close()\n\t\t\treturn js_text\n\t\telse: \t\n\t\t\twarning(\"no javascipt file included %s\" %js_filepath)\n\t\t\treturn None\n\telse: \t\n\t\twarning(\"CFG_PREFIX not set. no javascript includes\")\n\t\treturn None",
"def get_messages_for_boot():\n\tmessages = get_all_translations(frappe.local.lang)\n\tmessages.update(get_dict_from_hooks(\"boot\", None))\n\n\treturn messages",
"def _get_bulma_js() -> List[str]:\n return list(get_js_files())",
"def get_gizmo_js():\n return (\n \"tethys_gizmos/js/gizmo_utilities.js\",\n \"tethys_gizmos/js/cesium_map_view.js\",\n \"tethys_gizmos/js/DrawHelper.min.js\",\n )",
"def get_all_messages_from_js_files(app_name=None):\n\tmessages = []\n\tfor app in ([app_name] if app_name else frappe.get_installed_apps()):\n\t\tif os.path.exists(frappe.get_app_path(app, \"public\")):\n\t\t\tfor basepath, dummy, files in os.walk(frappe.get_app_path(app, \"public\")):\n\t\t\t\tif \"frappe/public/js/lib\" in basepath:\n\t\t\t\t\tcontinue\n\n\t\t\t\tif os.path.isfile(frappe.get_app_path(app, \"public/build.json\")):\n\t\t\t\t\twith open(frappe.get_app_path(app, \"public/build.json\"), 'r') as f:\n\t\t\t\t\t\tbuilt_files = json.loads(f.read())\n\t\t\t\t\t\tbuilt_files = reduce(lambda a,b: a.extend(b) or a, list(built_files.values()), [])\n\n\t\t\t\tfor fname in files:\n\t\t\t\t\tif fname not in built_files and (fname.endswith(\".js\") or fname.endswith(\".html\") or fname.endswith(\".vue\")):\n\t\t\t\t\t\tmessages.extend(get_messages_from_file(os.path.join(basepath, fname)))\n\n\treturn messages",
"def get_lang_js(fortype, name):\n\treturn \"\\n\\n$.extend(frappe._messages, %s)\" % json.dumps(get_dict(fortype, name))",
"def load_jinja_fns(self):\n\n\t\t# Function for jinja, to remove duplicates from flashed messages\n\t\tdef remove_duplicates(msgs):\n\t\t\tuniq_msgs = []\n\t\t\tfor msg in msgs:\n\t\t\t\tif msg not in uniq_msgs:\n\t\t\t\t\tuniq_msgs.append(msg)\n\n\t\t\treturn uniq_msgs\n\n\t\tcurrent_app.jinja_env.globals.update(remove_duplicates=remove_duplicates)",
"def get_lang_js(fortype: str, name: str) -> str:\n\treturn f\"\\n\\n$.extend(frappe._messages, {json.dumps(get_dict(fortype, name))})\"",
"def loadjs(*args):\n return render(settings, 'JS_FILES', 'staticloader/load_js.html', *args)",
"def get_settings_message(d):\n if d['lang'] == 'us':\n msg = \"Running US \"\n elif d['lang'] == 'english':\n msg = \"Running canada english\"\n else:\n msg = \"Running canada french\" \n if d['env']:\n msg += \" on production \"\n else:\n msg += \" on development \"\n if d['upload']:\n msg += \"with upload \"\n else:\n msg += \"without upload \"\n if d['file']:\n msg += \"for {0} catalog\".format(d['file'])\n return msg",
"def builder_inited(app):\n if app.config.offline_skin_js_path is not None:\n app.add_javascript(path.basename(app.config.offline_skin_js_path))\n else:\n app.add_javascript(ONLINE_SKIN_JS)\n if app.config.offline_wavedrom_js_path is not None:\n app.add_javascript(path.basename(app.config.offline_wavedrom_js_path))\n else:\n app.add_javascript(ONLINE_WAVEDROM_JS)",
"def vendor_bundle(self) -> str:\n\n if self.minimize:\n js_url = f\"https://cdn.jsdelivr.net/gh/salesforce/cloudsplaining@{__version__}/cloudsplaining/output/dist/js/chunk-vendors.js\"\n bundle = f'<script type=\"text/javascript\" src=\"{js_url}\"></script>'\n return bundle\n else:\n vendor_bundle_path = get_vendor_bundle_path()\n with open(vendor_bundle_path, \"r\", encoding=\"utf-8\") as f:\n bundle_content = f.read()\n # bundle_content = vendor_bundle_path.read_text(encoding=\"utf-8\")\n bundle = f'<script type=\"text/javascript\">\\n{bundle_content}\\n</script>'\n return bundle",
"def load_extensions_js(context, extension_manager_key):\n for manager in get_extension_managers():\n if manager.key == extension_manager_key:\n return ''.join([\n _render_js_bundle(context, extension, 'default')\n for extension in manager.get_enabled_extensions()\n if 'default' in extension.js_bundles\n ])\n\n return ''",
"def resource_js(self):\n \n portal_url = getSite().absolute_url()\n \n return \"\"\"\n <script type=\"text/javascript\" src=\"%s/++resource++swfobject.js\"></script>\n <script type=\"text/javascript\" src=\"%s/++resource++audio_player.js\"></script> \n <script type=\"text/javascript\"> \n AudioPlayer.setup(\"%s/++resource++audio_player.swf\", { \n width: 300\n }); \n </script>\n \"\"\" % (portal_url, portal_url, portal_url)",
"def add_javascripts_subscriber(event):\n c = event.request.tmpl_context\n c.javascripts = [\n ('spline', 'lib/jquery-1.7.1.min'),\n ('spline', 'lib/jquery.cookies-2.2.0.min'),\n ('spline', 'lib/jquery.ui-1.8.4.min'),\n ('spline', 'core'),\n ('pokedex', 'pokedex-suggestions'),\n ('pokedex', 'pokedex'), # XXX only on main pokedex pages\n ]",
"def app_bundle(self) -> str:\n if self.minimize:\n js_url = f\"https://cdn.jsdelivr.net/gh/salesforce/cloudsplaining@{__version__}/cloudsplaining/output/dist/js/index.js\"\n bundle = f'<script type=\"text/javascript\" src=\"{js_url}\"></script>'\n return bundle\n else:\n with open(app_bundle_path, \"r\", encoding=\"utf-8\") as f:\n bundle_content = f.read()\n # bundle_content = app_bundle_path.read_text(encoding=\"utf-8\")\n bundle = f'<script type=\"text/javascript\">\\n{bundle_content}\\n</script>'\n return bundle",
"def toastr_messages_js(subdomain: t.Optional[str] = None) -> Response:\n return current_app.response_class(\n render_template('toastr_messages.js.jinja2'), mimetype='application/javascript'\n )",
"def build_body(self) -> str:\n # Always include default.js\n files = [os.path.join(self.directory, \"default.js\")]\n\n # Find increasingly less specific files based on the request path.\n paths = self.path.replace(\"/\", \"\").split(\".\")\n while paths:\n files.append(os.path.join(self.directory, \".\".join(paths)))\n paths = paths[1:]\n\n # Combine the files found, if they exist.\n body = \"// dotjs is working! //\\n\"\n for filename in files:\n if os.path.exists(filename):\n with open(filename) as fp:\n body += fp.read() + \"\\n\"\n\n return body",
"def get_client_js(self, components, url):\n out = \"\\n\\n\"\n if len(components) > 0:\n out += \"Depender.loaded.combine(['\"\n out += \"','\".join([ \"/\".join(c) for c in components ]) + \"']);\\n\\n\"\n out += \"Depender.setOptions({\\n\"\n out += \"\tbuilder: '\" + url + \"'\\n\"\n out += \"});\"\n return out;",
"def i18ninit():\n click.echo('-> Initializing i18n message files...')\n _extract_18n_messages()\n langs = app.config['BABEL_LANGUAGES']\n for lang in langs:\n _write_message_files(lang, command='init')\n click.echo('-> i18n message files initialized.')\n click.echo('You should now edit translations in following files:')\n for lang in langs:\n click.echo(os.path.join(I18N_PATH, lang, 'LC_MESSAGES', 'messages.po'))",
"def is_min(filename):\r\n return re.search(\"min.js$\", filename)",
"def third_party_scripts(request):\n return {\n 'ORCHESTRA_THIRD_PARTY_SCRIPTS_TEMPLATE':\n settings.ORCHESTRA_THIRD_PARTY_SCRIPTS_TEMPLATE\n }",
"def preReadyInitalizatize(self):\n self.pbug(\"Pre Ready Initalization\")\n self.xj(\"window.onerror = miteErrorCallback\")\n for funcs in self.j_funcs:\n if \"bind\" in funcs:\n for bind in funcs[\"bind\"]:\n self.pbug(\"Binding {0} function to #{1}\".format(bind, funcs[\"name\"]))\n script = \"$('#{0}').attr('{1}', '{2}()');\".format(bind, funcs[\"event\"], funcs[\"name\"])\n self.xj(script)\n self.pbug(\"Mite Load Finished\")",
"def get_imports() -> str:\n extension = \"\"\n for js_ in JS_FILES.values():\n extension += f'<script src=\"{js_}\"></script>'\n for css in CSS_FILES.values():\n extension += f'<link rel=\"stylesheet\" href=\"{css}\" is=\"custom-style\">'\n\n return extension",
"def generate_js_dir():\n\n return pkg_resources.resource_filename('linkedin.mobster.har.visualization.js', None)"
]
| [
"0.6217054",
"0.60809547",
"0.60367644",
"0.58267856",
"0.5802923",
"0.5796915",
"0.5757939",
"0.57492536",
"0.5695297",
"0.56889427",
"0.5657771",
"0.5492464",
"0.5477332",
"0.5373451",
"0.52452934",
"0.52360976",
"0.5204163",
"0.51525414",
"0.51245177",
"0.5084141",
"0.5050128",
"0.50357085",
"0.50205374",
"0.50195754",
"0.50104624",
"0.49935907",
"0.49924505",
"0.49769476",
"0.4935263",
"0.49242622"
]
| 0.63474244 | 0 |
Returns a list of transatable strings from a code file | def get_messages_from_file(path: str) -> list[tuple[str, str, str | None, int]]:
frappe.flags.setdefault("scanned_files", set())
# TODO: Find better alternative
# To avoid duplicate scan
if path in frappe.flags.scanned_files:
return []
frappe.flags.scanned_files.add(path)
bench_path = get_bench_path()
if not os.path.exists(path):
return []
with open(path) as sourcefile:
try:
file_contents = sourcefile.read()
except Exception:
print(f"Could not scan file for translation: {path}")
return []
messages = []
if path.lower().endswith(".py"):
messages += extract_messages_from_python_code(file_contents)
else:
messages += extract_messages_from_code(file_contents)
if path.lower().endswith(".js"):
# For JS also use JS parser to extract strings possibly missed out
# by regex based extractor.
messages += extract_messages_from_javascript_code(file_contents)
return [
(os.path.relpath(path, bench_path), message, context, line)
for (line, message, context) in messages
] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_strings(src_file):\n res = []\n try:\n res = open(src_file,'r').readlines()\n res = [x.strip() for x in res]\n except:\n res = []\n return res",
"def load_codes(self, codes_file):\n with open(codes_file, \"r\") as cfile:\n codes = set([ line.strip().lower() for line in cfile ])\n if \"\" in codes: codes.remove(\"\")\n return(codes)",
"def extract_messages_from_code(code):\n\tfrom jinja2 import TemplateError\n\n\ttry:\n\t\tcode = frappe.as_unicode(render_include(code))\n\n\t# Exception will occur when it encounters John Resig's microtemplating code\n\texcept (TemplateError, ImportError, InvalidIncludePath, OSError) as e:\n\t\tif isinstance(e, InvalidIncludePath):\n\t\t\tfrappe.clear_last_message()\n\n\tmessages = []\n\n\tfor m in TRANSLATE_PATTERN.finditer(code):\n\t\tmessage = m.group(\"message\")\n\t\tcontext = m.group(\"py_context\") or m.group(\"js_context\")\n\t\tpos = m.start()\n\n\t\tif is_translatable(message):\n\t\t\tmessages.append([pos, message, context])\n\n\treturn add_line_number(messages, code)",
"def get_messages_from_file(path):\n\tapps_path = get_bench_dir()\n\tif os.path.exists(path):\n\t\twith open(path, 'r') as sourcefile:\n\t\t\treturn [(os.path.relpath(path, apps_path),\n\t\t\t\t\tmessage) for pos, message in extract_messages_from_code(sourcefile.read(), path.endswith(\".py\"))]\n\telse:\n\t\treturn []",
"def _process_trans(self):\n\t\tt_word = list()\n\t\t# with open(self.trans_file_path, 'r', encoding='utf-8') as in_f:\n\t\twith open(self.trans_file_path, 'r') as in_f:\n\t\t\tf = iter(in_f)\n\t\t\tfor line in f:\n\t\t\t\tword = line.lower().split();\n\t\t\t\tif len(word) == 0 or len(word) == 1:\n\t\t\t\t\tcontinue\n\t\t\t\t'''\n\t\t\t\tif w_0[len(w_0)-1] == '+': # ignore the label starting with #pat+ || #doc+\n\t\t\t\t\tcontinue;\n\t\t\t\t'''\n\t\t\t\tfor i in range(len(word)):\n\t\t\t\t\tif i != 0: # exclude the channel\n\t\t\t\t\t\tw = word[i]\n\t\t\t\t\t\tif w == \"<name>\":\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tt_word.append([w, 0, 0])\n\t\treturn t_word",
"def _read_txt(file_path):\n translation_pairs = []\n with file_path.open() as f:\n for line in f:\n translation_pairs.append(\n evaluation.TranslationPair(source=None, translation=line.strip())\n )\n return translation_pairs",
"def get_list(file_name):\n with open(file_name, \"r\", encoding=\"latin-1\") as file:\n text = file.read()\n text = text.lower() # Make everything lowercase\n text = text.split(\"\\n\")\n return text",
"def load(filename: str) -> list:\n try:\n with open(filename) as in_file:\n loaded_txt = in_file.read().strip().split(\"\\n\")\n loaded_txt = [x.lower() for x in loaded_txt]\n return loaded_txt\n except IOError as e:\n print(\"{}\\nError opening {}. Terminating program.\".format(e, filename))",
"def get_instructions(file_input: TextIO) -> List[str]:\n instructions = []\n for instruction in file_input:\n instruction = instruction.strip()\n instructions.append(instruction)\n\n return instructions",
"def wrapped_getlines(filename, globals):\n lines = orig(filename, globals)\n source = self.format_source(\"\".join(lines))\n\n if sys.version_info < (3,):\n source = self.try_to_encode(source)\n\n return source.splitlines(True)",
"def read_code(filename):\n f = open('files/%s.code' % filename)\n string = f.read()\n tokens = scan(string)\n ret = parse_code(tokens)\n return ret",
"def read_asm_file_to_code(file_path):\r\n with open(file_path, 'r') as file:\r\n asm_code = []\r\n for line in file:\r\n asm_code.append(line)\r\n\r\n return asm_code",
"def translationText(language, listOfWords):\n txt = open(language+\".txt\", mode=\"r\").readlines()\n translatedWords = []\n for word in listOfWords:\n for line in txt:\n if line.split()[0] == word:\n translatedWords.append(line.split()[1])\n return translatedWords",
"def extract_messages_from_python_code(code: str) -> list[tuple[int, str, str | None]]:\n\tfrom babel.messages.extract import extract_python\n\n\tmessages = []\n\n\tfor message in extract_python(\n\t\tio.BytesIO(code.encode()),\n\t\tkeywords=[\"_\"],\n\t\tcomment_tags=(),\n\t\toptions={},\n\t):\n\t\tlineno, _func, args, _comments = message\n\n\t\tif not args or not args[0]:\n\t\t\tcontinue\n\n\t\tsource_text = args[0] if isinstance(args, tuple) else args\n\t\tcontext = args[1] if len(args) == 2 else None\n\n\t\tmessages.append((lineno, source_text, context))\n\n\treturn messages",
"def _get_code_files(self):\n for dirpath, dirnames, filenames in os.walk(self.CodesDirectory):\n for f in filenames:\n rel_name = path.join(dirpath, f)\n if f.endswith('.py'):\n yield (rel_name, 'Python')\n elif f.endswith('.pyx'):\n yield (rel_name, 'PyRex')\n elif f.endswith('.c'):\n yield (rel_name, 'C')\n else:\n pass",
"def extract_messages_from_code(code, is_py=False):\n\ttry:\n\t\tcode = frappe.as_unicode(render_include(code))\n\texcept (TemplateError, ImportError, InvalidIncludePath, IOError):\n\t\t# Exception will occur when it encounters John Resig's microtemplating code\n\t\tpass\n\n\tmessages = []\n\tmessages += [(m.start(), m.groups()[0]) for m in re.compile('_\\(\"([^\"]*)\"').finditer(code)]\n\tmessages += [(m.start(), m.groups()[0]) for m in re.compile(\"_\\('([^']*)'\").finditer(code)]\n\tif is_py:\n\t\tmessages += [(m.start(), m.groups()[0]) for m in re.compile('_\\(\"{3}([^\"]*)\"{3}.*\\)').finditer(code)]\n\n\tmessages = [(pos, message) for pos, message in messages if is_translatable(message)]\n\treturn pos_to_line_no(messages, code)",
"def language_text_sources(lang):\n return [\n DATA + \"/tokenized/{source}/{lang}.txt\".format(source=source, lang=lang)\n for source in LANGUAGE_SOURCES[lang]\n if source in FULL_TEXT_SOURCES\n ]",
"def getlistfromtext(self,filename):\n l=[]\n\n if self.encoding:\n f = codecs.open(filename,\"r\",encoding=self.encoding)\n for line in f:\n l.append(line.rstrip())\n f.close()\n\n else:\n f = open(filename,\"r\")\n for line in f:\n l.append(line.rstrip())\n f.close()\n return l",
"def read_next_code_chunk(self) -> List[str]:\n with open(self._filepath) as f:\n for line in f:\n yield [line.strip()]",
"def file_to_list(filename, dir=\"../resources\"):\n os.chdir(dir)\n vocabulary = []\n f = open(filename, \"r\")\n lines = f.readlines()\n for line in lines:\n vocabulary.append(line.replace(\"\\n\", \"\"))\n return vocabulary",
"def _tokenize(self, fileName):\n debug.show(\"Tokenizing...\")\n try:\n f = open(fileName, 'r')\n code = f.read()\n except IOError:\n sys.exit('Error: Bad input file')\n f.close()\n pattern = '/?[a-zA-Z][a-zA-Z0-9_]*|[-]?[0-9]+|[}{]|%.*|[^\\t\\n ]'\n return re.findall(pattern, code)",
"def read_codes(self, filename=\"static/codes.txt\"):\n with open(filename, \"r\") as f:\n contents = f.read().splitlines()\n code = contents[0]\n \n return code",
"def file_to_list_of_parsed(nameoffile):\n a = Grammar()\n b = a.syntax()\n file1 = open(nameoffile,'r')\n parsed = []\n for line in file1:\n parsed.append(b.parseString(line))\n return parsed",
"def translator(filename: str, outfile):\r\n progname = filename[:-3]\r\n vm_code = parser(filename)\r\n for line in vm_code:\r\n out_line = trans_line(line, progname)\r\n outfile.write(out_line) # write out_line to file\r",
"def translate(self, filepath):\n pass",
"def verify_file(self, filename_or_string):\n po = polib.pofile(filename_or_string)\n return [\n self.lint_poentry(entry) for entry in po.translated_entries()\n ]",
"def _get_wordlist(file_name):\n ifile = codecs.open(file_name, 'r', encoding='utf-8')\n for _ in range(int(ifile.__next__())):\n yield (ifile.__next__().strip() for _ in range(int(ifile.__next__())))",
"def load(filename):\n try:\n with open(filename) as in_file:\n loaded_txt = in_file.read().strip().split(\"\\n\")\n loaded_txt = [x.lower() for x in loaded_txt]\n return loaded_txt\n except IOError as e:\n print(\"{}\\nError opening {}. Terminating program.\".format(e, filename))\n # sys.exit(1)",
"def list():\n\n return cache.codeTableList()",
"def find_langs(args):\n infile = args.source\n langs = {}\n for line in infile:\n name_and_files = line.split()\n name = name_and_files[0]\n if name not in langs:\n langs[name] = []\n langs[name] += read_files.filter_files(name_and_files[1:])\n langs[args.unknown] += read_files.filter_files(args.classify)\n return langs"
]
| [
"0.69848895",
"0.6175604",
"0.6124402",
"0.610023",
"0.60719025",
"0.60711503",
"0.604522",
"0.6015361",
"0.59906834",
"0.59792006",
"0.59200686",
"0.58881736",
"0.5861332",
"0.5845954",
"0.58428144",
"0.58312666",
"0.5825503",
"0.57980675",
"0.57935786",
"0.578261",
"0.5745024",
"0.5730421",
"0.57301754",
"0.5711279",
"0.56964743",
"0.56584203",
"0.56122893",
"0.5607335",
"0.5602877",
"0.56013745"
]
| 0.6680396 | 1 |
Extracts translatable strings from a code file | def extract_messages_from_code(code):
from jinja2 import TemplateError
try:
code = frappe.as_unicode(render_include(code))
# Exception will occur when it encounters John Resig's microtemplating code
except (TemplateError, ImportError, InvalidIncludePath, OSError) as e:
if isinstance(e, InvalidIncludePath):
frappe.clear_last_message()
messages = []
for m in TRANSLATE_PATTERN.finditer(code):
message = m.group("message")
context = m.group("py_context") or m.group("js_context")
pos = m.start()
if is_translatable(message):
messages.append([pos, message, context])
return add_line_number(messages, code) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _extract_18n_messages():\n BabelCLI().run(['', 'extract', '-F', 'babel.cfg', '-k', '_t', '--no-location', '--sort-output',\n '--omit-header', '-o', os.path.join(I18N_PATH, 'messages.pot'), 'aliquis'])",
"def extract_messages_from_code(code, is_py=False):\n\ttry:\n\t\tcode = frappe.as_unicode(render_include(code))\n\texcept (TemplateError, ImportError, InvalidIncludePath, IOError):\n\t\t# Exception will occur when it encounters John Resig's microtemplating code\n\t\tpass\n\n\tmessages = []\n\tmessages += [(m.start(), m.groups()[0]) for m in re.compile('_\\(\"([^\"]*)\"').finditer(code)]\n\tmessages += [(m.start(), m.groups()[0]) for m in re.compile(\"_\\('([^']*)'\").finditer(code)]\n\tif is_py:\n\t\tmessages += [(m.start(), m.groups()[0]) for m in re.compile('_\\(\"{3}([^\"]*)\"{3}.*\\)').finditer(code)]\n\n\tmessages = [(pos, message) for pos, message in messages if is_translatable(message)]\n\treturn pos_to_line_no(messages, code)",
"def extract_strings():\n\n ap = renpy.arguments.ArgumentParser(description=\"Extracts translated strings.\")\n ap.add_argument(\"language\", help=\"The language to extract translated strings from.\")\n ap.add_argument(\"destination\", help=\"The json file to store the translated strings into.\")\n ap.add_argument(\"--merge\", help=\"If given, the current contents of the file are preserved, and new contents are merged into the file.\", action=\"store_true\")\n ap.add_argument(\"--force\", help=\"If given, noting happens if the language does not exist.\", action=\"store_true\")\n\n args = ap.parse_args()\n\n language = args.language\n\n if language == 'None':\n language = None\n\n extract_strings_core(language, args.destination, args.merge, args.force)\n\n return False",
"def translate(self, filepath):\n pass",
"def get_messages_from_file(path: str) -> list[tuple[str, str, str | None, int]]:\n\tfrappe.flags.setdefault(\"scanned_files\", set())\n\t# TODO: Find better alternative\n\t# To avoid duplicate scan\n\tif path in frappe.flags.scanned_files:\n\t\treturn []\n\n\tfrappe.flags.scanned_files.add(path)\n\n\tbench_path = get_bench_path()\n\tif not os.path.exists(path):\n\t\treturn []\n\n\twith open(path) as sourcefile:\n\t\ttry:\n\t\t\tfile_contents = sourcefile.read()\n\t\texcept Exception:\n\t\t\tprint(f\"Could not scan file for translation: {path}\")\n\t\t\treturn []\n\n\t\tmessages = []\n\n\t\tif path.lower().endswith(\".py\"):\n\t\t\tmessages += extract_messages_from_python_code(file_contents)\n\t\telse:\n\t\t\tmessages += extract_messages_from_code(file_contents)\n\n\t\tif path.lower().endswith(\".js\"):\n\t\t\t# For JS also use JS parser to extract strings possibly missed out\n\t\t\t# by regex based extractor.\n\t\t\tmessages += extract_messages_from_javascript_code(file_contents)\n\n\t\treturn [\n\t\t\t(os.path.relpath(path, bench_path), message, context, line)\n\t\t\tfor (line, message, context) in messages\n\t\t]",
"def make_translated_text():\n return {\n code: ''\n for code, name\n in settings.LANGUAGES\n }",
"def read_po(self, inputfile):\n is_index = False\n lines = inputfile.readlines()\n index = ''\n value = ''\n for line in lines:\n if line.startswith('#'):\n continue\n elif line.startswith('msgid'):\n is_index = True\n self.translations[index] = value\n index = ''\n value = ''\n elif line.startswith('msgstr'):\n is_index = False\n\n v = re.match('.*\"(.*)\".*', line)\n if v:\n if is_index:\n index += ''.join(v.groups())\n else:\n value += ''.join(v.groups())",
"def extract_messages_from_python_code(code: str) -> list[tuple[int, str, str | None]]:\n\tfrom babel.messages.extract import extract_python\n\n\tmessages = []\n\n\tfor message in extract_python(\n\t\tio.BytesIO(code.encode()),\n\t\tkeywords=[\"_\"],\n\t\tcomment_tags=(),\n\t\toptions={},\n\t):\n\t\tlineno, _func, args, _comments = message\n\n\t\tif not args or not args[0]:\n\t\t\tcontinue\n\n\t\tsource_text = args[0] if isinstance(args, tuple) else args\n\t\tcontext = args[1] if len(args) == 2 else None\n\n\t\tmessages.append((lineno, source_text, context))\n\n\treturn messages",
"def _process_trans(self):\n\t\tt_word = list()\n\t\t# with open(self.trans_file_path, 'r', encoding='utf-8') as in_f:\n\t\twith open(self.trans_file_path, 'r') as in_f:\n\t\t\tf = iter(in_f)\n\t\t\tfor line in f:\n\t\t\t\tword = line.lower().split();\n\t\t\t\tif len(word) == 0 or len(word) == 1:\n\t\t\t\t\tcontinue\n\t\t\t\t'''\n\t\t\t\tif w_0[len(w_0)-1] == '+': # ignore the label starting with #pat+ || #doc+\n\t\t\t\t\tcontinue;\n\t\t\t\t'''\n\t\t\t\tfor i in range(len(word)):\n\t\t\t\t\tif i != 0: # exclude the channel\n\t\t\t\t\t\tw = word[i]\n\t\t\t\t\t\tif w == \"<name>\":\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tt_word.append([w, 0, 0])\n\t\treturn t_word",
"def translationText(language, listOfWords):\n txt = open(language+\".txt\", mode=\"r\").readlines()\n translatedWords = []\n for word in listOfWords:\n for line in txt:\n if line.split()[0] == word:\n translatedWords.append(line.split()[1])\n return translatedWords",
"def get_translation(self):",
"def buildTranslation(lang,suppressAlert=False):\n global string_cache\n fName = os.path.join(langPath, lang + \".trn\")\n if verifyLangCode(lang) and not lang == \"en_US\":\n data = open(fName, \"rb\").read() + \"\\x00\"\n trnPattern = re.compile(r\"^o\\d+[ ]|^t\\d+[ ]\", re.M|re.S)\n grps = re.finditer(trnPattern, data)\n oStart = -1\n oEnd = -1\n tStart = -1\n tEnd = -1\n org = None\n for grp in grps:\n g = grp.group()\n if g.startswith(\"o\"):\n oStart = grp.end()\n tEnd = grp.start() -1\n elif g.startswith(\"t\"):\n oEnd = grp.start() -1\n tStart = grp.end()\n if oStart > -1 and oEnd > -1 and tStart > tEnd:\n org = data[oStart:oEnd]\n if tStart > -1 and (tEnd > -1):\n if tEnd > tStart:\n string_cache[org] = correctEncoding(data[tStart:tEnd])\n tStart = -1\n tEnd = -1\n string_cache[org] = correctEncoding(data[tStart:tEnd -1])\n return string_cache",
"def get_strings():\n import re\n import pathlib\n\n # the absolute path to the root directory\n rootdir = pathlib.Path(__file__).resolve().parent\n\n # read README.md and overwrite readme\n with open(rootdir.joinpath(\"README.md\"), 'r') as f:\n readme = f.read()\n\n # read __init__.py\n with open(rootdir.joinpath(\"yasynccli\", \"__init__.py\"), 'r') as f:\n content = f.read()\n\n # version\n version = re.search(\"__version__\\s*?=\\s*?(?P<version>\\S+?)$\", content, re.MULTILINE)\n version = version.group(\"version\").strip(\"\\\"\\'\")\n\n # desc\n desc = re.search(\"^\\\"\\\"\\\"(?P<desc>\\S.*?)$\", content, re.MULTILINE)\n desc = desc.group(\"desc\")\n\n return version, desc, readme",
"def get_lines(file_name):\n phrase_dict = {}\n file_text = file_name.readlines()\n for line_index, line in enumerate(file_text):\n if line.startswith('msgid '):\n line = line[7:-2]\n if line:\n phrase_dict[line_index] = line\n return phrase_dict, file_text",
"def parse_text(self):\n self.text={}\n for i, lang in enumerate(LANGS):\n text=file(self.src).read()\n self.text[lang]=\"\"\n extracted, finish = \"\", 0\n start_string, stop_string = r\"<!--%s-->\" % lang, r\"<!--/%s-->\" % lang\n # Iterates to check multiple blocks of text within the file!\n # Pay attention to infinite loops!\n # AttributeError exception raised when no more blocks to extract exist\n while True:\n try:\n start=re.compile(start_string, re.IGNORECASE).search(text).span()[1]\n finish=re.compile(stop_string, re.IGNORECASE).search(text).span()[0]\n extracted+=text[start:finish]\n text=text[finish+1:]\n except AttributeError:\n break\n self.text[lang]+=extracted",
"def gettext_translate( s ):\n return catalogs.translate(s)",
"def language_text_sources(lang):\n return [\n DATA + \"/tokenized/{source}/{lang}.txt\".format(source=source, lang=lang)\n for source in LANGUAGE_SOURCES[lang]\n if source in FULL_TEXT_SOURCES\n ]",
"def generate_strings():\n\n # used by error pages and in the sidebar for why to create a subverbify\n for category, strings in funny_translatable_strings.iteritems():\n for string in strings:\n print \"# TRANSLATORS: Do not translate literally. Come up with a funny/relevant phrase (see the English version for ideas.) Accepts markdown formatting.\"\n print \"print _('\" + string + \"')\"\n\n # these are used in v1.lib.pages.trafficpages\n INTERVALS = (\"hour\", \"day\", \"month\")\n TYPES = (\"uniques\", \"pageviews\", \"traffic\", \"impressions\", \"clicks\")\n for interval in INTERVALS:\n for type in TYPES:\n print \"print _('%s by %s')\" % (type, interval)",
"def verify_file(self, filename_or_string):\n po = polib.pofile(filename_or_string)\n return [\n self.lint_poentry(entry) for entry in po.translated_entries()\n ]",
"def get_processed_content(self, fn):\n fin = open(os.path.join(self.wiki_path, fn), 'rb')\n text = fin.read()\n fin.close()\n return (x for x in gensim.utils.tokenize(text, lowercase=True, deacc=True, errors=\"ignore\") if x not in STOPLIST)",
"def get_language(fn):\n # FIXME - this expects the fn to be '.../XX/LC_MESSAGES/messages.po'\n return fn.split(os.sep)[-3]",
"def extract_messages_from_javascript_code(code: str) -> list[tuple[int, str, str | None]]:\n\n\tmessages = []\n\n\tfor message in extract_javascript(\n\t\tcode,\n\t\tkeywords=[\"__\"],\n\t\toptions={},\n\t):\n\t\tlineno, _func, args = message\n\n\t\tif not args or not args[0]:\n\t\t\tcontinue\n\n\t\tsource_text = args[0] if isinstance(args, tuple) else args\n\t\tcontext = None\n\n\t\tif isinstance(args, tuple) and len(args) == 3 and isinstance(args[2], str):\n\t\t\tcontext = args[2]\n\n\t\tmessages.append((lineno, source_text, context))\n\n\treturn messages",
"def translate():\n pass",
"def get_strings(src_file):\n res = []\n try:\n res = open(src_file,'r').readlines()\n res = [x.strip() for x in res]\n except:\n res = []\n return res",
"def translator(filename: str, outfile):\r\n progname = filename[:-3]\r\n vm_code = parser(filename)\r\n for line in vm_code:\r\n out_line = trans_line(line, progname)\r\n outfile.write(out_line) # write out_line to file\r",
"def getStoryString():\n inFile = open(STORY_FILENAME, 'r')\n wordList = inFile.read()\n return wordList",
"def find_langs(args):\n infile = args.source\n langs = {}\n for line in infile:\n name_and_files = line.split()\n name = name_and_files[0]\n if name not in langs:\n langs[name] = []\n langs[name] += read_files.filter_files(name_and_files[1:])\n langs[args.unknown] += read_files.filter_files(args.classify)\n return langs",
"def load(file):\n\n try:\n with open(file) as in_file:\n loaded_text = in_file.read().strip().split(\"\\n\")\n loaded_text = [x.lower() for x in loaded_text]\n return loaded_text\n except IOError as e:\n print(\"{}\\n Error opening {}. Terminationg program.\".format(e,file), file = sys.stderr)\n sys.exit()",
"def get_gettext():\n local_path = os.path.realpath(os.path.dirname(sys.argv[0])) + \\\n '/translations'\n langs = []\n lc, encoding = locale.getdefaultlocale()\n if (lc):\n langs = [lc]\n osLanguage = os.environ.get('LANGUAGE', None)\n if (osLanguage):\n langs += osLanguage.split(\":\")\n langs += [\"en_US\"]\n lang = gettext.translation('wicd', local_path, languages=langs, \n fallback=True)\n _ = lang.gettext\n return _",
"def get_messages_from_file(path):\n\tapps_path = get_bench_dir()\n\tif os.path.exists(path):\n\t\twith open(path, 'r') as sourcefile:\n\t\t\treturn [(os.path.relpath(path, apps_path),\n\t\t\t\t\tmessage) for pos, message in extract_messages_from_code(sourcefile.read(), path.endswith(\".py\"))]\n\telse:\n\t\treturn []"
]
| [
"0.68949",
"0.6539807",
"0.6512879",
"0.6115357",
"0.60902613",
"0.6088147",
"0.60030216",
"0.59867567",
"0.5933709",
"0.5923882",
"0.5830527",
"0.5817076",
"0.5807938",
"0.57887924",
"0.57524884",
"0.573279",
"0.5712957",
"0.563107",
"0.56271267",
"0.56074834",
"0.5578763",
"0.5566959",
"0.55570453",
"0.554615",
"0.5535745",
"0.5528213",
"0.54981655",
"0.5490124",
"0.54790133",
"0.54581696"
]
| 0.6918703 | 0 |
Write translation CSV file. | def write_csv_file(path, app_messages, lang_dict):
app_messages.sort(key=lambda x: x[1])
with open(path, "w", newline="") as msgfile:
w = writer(msgfile, lineterminator="\n")
for app_message in app_messages:
context = None
if len(app_message) == 2:
path, message = app_message
elif len(app_message) == 3:
path, message, lineno = app_message
elif len(app_message) == 4:
path, message, context, lineno = app_message
else:
continue
t = lang_dict.get(message, "")
# strip whitespaces
translated_string = CSV_STRIP_WHITESPACE_PATTERN.sub(r"{\g<1>}", t)
if translated_string:
w.writerow([message, translated_string, context]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_csv(self, outputfile):\n d = csv.writer(outputfile, quoting=csv.QUOTE_ALL)\n for row in self.translations.iteritems():\n d.writerow(row)",
"def write_csv(self, filelike):\r\n items = self.rows()\r\n writer = unicodecsv.writer(filelike, encoding=\"utf-8\")\r\n writer.writerow(self.header())\r\n for item in items:\r\n writer.writerow(item)",
"def write_file(self, filename):\n\n with open(filename, 'w', newline = '') as csvfile:\n langwriter = csv.writer(csvfile, delimiter=' ',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n for key in self.features:\n value = self.features[key]\n l = []\n for val in value:\n l.append(str(val))\n langwriter.writerow([l])\n return",
"def write_translations_file(app, lang, full_dict=None, app_messages=None):\n\tif not app_messages:\n\t\tapp_messages = get_messages_for_app(app)\n\n\tif not app_messages:\n\t\treturn\n\n\ttpath = frappe.get_app_path(app, \"translations\")\n\tfrappe.create_folder(tpath)\n\twrite_csv_file(\n\t\tos.path.join(tpath, lang + \".csv\"), app_messages, full_dict or get_all_translations(lang)\n\t)",
"def write_csv(header_row, data_rows, filename, course_id):\n shared.ensure_directory_exists(utils.ANSWERS_DISTRIBUTION_REPORTS_DIRECTORY,\n course_id.org, course_id.course)\n\n\n path = shared.get_safe_file_path(utils.ANSWERS_DISTRIBUTION_REPORTS_DIRECTORY,\n course_id.org, course_id.course,\n filename)\n ## need to encode the unico path in order to open the file in prod env\n path = path.encode('utf-8')\n\n with open(path, \"wb\") as ofile:\n writer = csv.writer(ofile, quoting=csv.QUOTE_ALL)\n writer.writerow(header_row)\n for datarow in data_rows:\n encoded_row = [cleanup_newlines(unicode(s).encode('utf-8'))\n for s in datarow]\n writer.writerow(encoded_row)",
"def write_csv(self, file):\n # Write header row\n file.write('Timestamp,MessageType,Queue,Price,Volume,OrderID\\n')\n # Write content\n for x in self.records:\n row = (str(x[0]) + ',' + x[1][\"MessageType\"] + ',' +\n x[1][\"Queue\"] + ',' + str(x[1][\"Price\"]) + ',' +\n str(x[1][\"Volume\"]) + ',' + str(x[1][\"OrderID\"]) + '\\n')\n file.write(row)",
"def write_csv_label(labels, csv_file):\n with open(csv_file, 'w') as f:\n writer = csv.writer(f)\n for key, value in labels.items():\n writer.writerow([key, value])",
"def write(self): \n # Open csv file\n with open(self.file_name, 'w', newline='') as file:\n self._writer = csv.writer(file)\n \n # Write header rows\n# self.write_sim_header_data(self.trace.sim.get_data())\n \n # Write trace table\n self._writer.writerow(['Record #', 'Rep', 'Time',\n 'Priority', 'Record Type', 'Name'])\n for trace_record in self.trace._record_list:\n self._writer.writerow(trace_record.get_row())\n file.close()",
"def write_csv_file(filepath, fieldnames, rows):\n headers = [{'label': field} for field in fieldnames]\n with open(filepath, 'w') as f_buf:\n outfile = CsvWriter()\n outfile.set_headers(headers)\n outfile._datas = rows\n outfile.render(f_buf)",
"def writeToMonthCsv(news_dict):\n\n for k in news_dict:\n output_f = open(k + \".csv\", \"wb\")\n writer = csv.writer(output_f)\n writer.writerow([news_dict[k].replace(\",\", \"\").encode(\"utf-8\")])\n output_f.close()",
"def writeToFile(self):\n self.dto.writeToCsv()\n print(\"File written.\")",
"def write_output_file(ad_models):\n\n with open('output-data-utf8.csv', 'w', newline='', encoding='UTF-8') as output_file:\n csv_writer = csv.writer(output_file, delimiter=',')\n for ad in ad_models:\n csv_writer.writerow((ad.date.strftime('%Y/%m/%d'), ad.country_code, ad.impression, ad.clicks))",
"def write_to_csv(self, data):\n with open(\"out.csv\", \"w\", newline=\"\") as f:\n writer = csv.writer(f)\n writer.writerow(self.column_names)\n writer.writerows(data)\n print(\" Updated succesfully \")",
"def __create_csv(self):\n with open(self.__csv_file_name, 'w', newline='', encoding='utf-8') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=self.__csv_fields, delimiter=';')\n writer.writeheader()",
"def write_to_file(self) -> None:\n with open(self.output_file_path, mode='w', newline='') as csv_file:\n tweet = ['id', 'created_time', 'text']\n writer = csv.DictWriter(csv_file, fieldnames=tweet)\n writer.writeheader()\n for tweet in self.clean_unique_tweets:\n try:\n writer.writerow(tweet)\n except:\n pass\n print(\"Tweets written to a file\")",
"def writeToCsv(news_dict, f_name):\n\n f_data = open(f_name, \"wb\")\n writer = csv.writer(f_data)\n for k in news_dict:\n writer.writerow([k, news_dict[k].replace(\",\", \"\")])\n f_data.close()",
"def write_to_file_ann(self) -> None:\n with open(self.output_file_path, mode='w', newline='') as csv_file:\n tweet = ['id', 'created_time', 'text']\n writer = csv.DictWriter(csv_file, fieldnames=tweet)\n writer.writeheader()\n for tweet in self.unique_tweets:\n try:\n writer.writerow(tweet)\n except:\n pass\n print(\"Tweets written to a file\")",
"def save_csv(filename, rows):\n with open(filename, 'w', newline='', encoding='utf-8') as f:\n writer = csv.writer(f)\n writer.writerow([\n 'title', 'runtime', 'genre(s)', 'director(s)', 'writer(s)',\n 'actor(s)', 'rating(s)', 'number of rating(s)'\n ])\n\n writer.writerows(rows)",
"def write_csv(d, f):\n with open(f, 'w') as csv_file:\n writer = csv.writer(csv_file)\n writer.writerow(d[0])\n for row in d[1]:\n row_encode = list()\n for x in row:\n if type(x) == unicode:\n row_encode.append(x.encode('utf8'))\n else:\n row_encode.append(x)\n writer.writerow(row_encode)\n return True",
"def write_to_file(self, results):\n with open(self.outputFilename, \"w\") as csvFile:\n csvWriter = csv.writer(csvFile, delimiter=',') \n title_row = ('asset_id', 'component_id', 'latitude', 'longitude', 'installation_date', 'commissioning_date', 'street_name', 'cabinet_id', 'nominal_wattage', 'current_time', 'current_LogValue', 'current_IsLogValueOff') \n csvWriter.writerow(title_row)\n for record in results:\n csvWriter.writerow(record)",
"def writeToCsv(clue):\n filename = 'new_clue_import_for_editing.csv'\n f = open(filename, 'w')\n fieldnames = list(set([m['Clue_field'] for m in mapping]))\n fieldnames.append('date')\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n writer.writeheader()\n for c in clue:\n writer.writerow(c)\n f.close()",
"def create_csv_file(self):\r\n # Create a new csv-file\r\n with open(self.fname, 'w') as f:\r\n writer = csv.writer(f, dialect='excel')\r\n writer.writerow(['set_time',\r\n 'read_time_P_ac',\r\n 'read_time_P_bat',\r\n 'soc',\r\n 'set_value',\r\n 'P_ac',\r\n 'P_bat'])",
"def write_csv(settings, row, mode):\n with open(settings.output_file_path, mode=mode) as csv_file:\n csv_writer = csv.writer(csv_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n csv_writer.writerow(row)",
"def write_csv(fn, toCSV):\n keys = toCSV[0].keys()\n with open(fn, 'w') as output_file:\n dict_writer = csv.DictWriter(output_file, keys)\n dict_writer.writeheader()\n dict_writer.writerows(toCSV)",
"def write_csv(file_name, data):\n\n with open(file_name, \"w\") as fp:\n\n writer = RiscvInstructionTraceCsv(fp)\n writer.start_new_trace()\n\n for entry in data:\n writer.write_trace_entry(entry)",
"def write_output(self):\n with open(self.filename, 'a', newline='', encoding='utf-8') as \\\n csv_file:\n csv_writer = csv.writer(csv_file)\n if os.stat(self.filename).st_size == 0:\n # if the csv file needs a headers\n csv_writer.writerow(Configurations.header)\n for quote in self.quotes_objects:\n csv_writer.writerow(quote.info)",
"def save_csv(outfile, cities):\n writer = csv.writer(outfile)\n writer.writerow(['Name'])\n for row in cities:\n writer.writerow([row])",
"def save_csv(self, filename): # DONE\n self.data.to_csv(filename)",
"def write_to_csv(self, output_dir, delimiter, include_language, filename=None):\n\n if filename is not None:\n self.filename = filename\n\n if len(self.values) == 0:\n logger.info(\"Nothing to export.\")\n return\n\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n file_path = os.path.join(output_dir, self.filename)\n\n # write search results to UTF8-encoded CSV file (see also http://stackoverflow.com/a/844443)\n with codecs.open(file_path, 'w', encoding='utf8') as fp:\n logger.info('Exporting search results to ' + file_path + '...')\n writer = csv.writer(fp, delimiter=delimiter)\n\n column_names = SearchResult.get_column_names(include_language)\n\n # write header of CSV file\n writer.writerow(column_names)\n\n count = 0\n try:\n for row in self.get_rows(include_language):\n if len(row) == len(column_names):\n writer.writerow(row)\n count = count + 1\n else:\n raise IllegalArgumentError(\n str(abs(len(column_names) - len(row))) + ' parameter(s) is/are missing for \"'\n + str(row) + '\"')\n\n except UnicodeEncodeError:\n logger.error('Encoding error while writing data for: ' + str(row))\n\n logger.info(str(count) + ' search results have been exported.')",
"def write_csv(self, filename, cutoff=2):\n f = csv.writer(open(filename, 'wb'))\n for row in self.rows(cutoff=cutoff):\n f.writerow(row)"
]
| [
"0.7842705",
"0.72142833",
"0.67409736",
"0.66681284",
"0.66015655",
"0.6600977",
"0.65728676",
"0.6566953",
"0.6543331",
"0.65170884",
"0.65123487",
"0.6505551",
"0.650296",
"0.64963204",
"0.64958304",
"0.6467368",
"0.6466191",
"0.6463179",
"0.6432045",
"0.64244795",
"0.63858116",
"0.63806915",
"0.63413584",
"0.63395053",
"0.6338162",
"0.6316019",
"0.6310448",
"0.62903905",
"0.6285129",
"0.6284137"
]
| 0.7470537 | 1 |
Returns all untranslated strings for a language and writes in a file | def get_untranslated(lang, untranslated_file, get_all=False, app="_ALL_APPS"):
clear_cache()
apps = frappe.get_all_apps(True)
if app != "_ALL_APPS":
if app not in apps:
print(f"Application {app} not found!")
return
apps = [app]
messages = []
untranslated = []
for app_name in apps:
messages.extend(get_messages_for_app(app_name))
messages = deduplicate_messages(messages)
def escape_newlines(s):
return s.replace("\\\n", "|||||").replace("\\n", "||||").replace("\n", "|||")
if get_all:
print(str(len(messages)) + " messages")
with open(untranslated_file, "wb") as f:
for m in messages:
# replace \n with ||| so that internal linebreaks don't get split
f.write((escape_newlines(m[1]) + os.linesep).encode("utf-8"))
else:
full_dict = get_all_translations(lang)
for m in messages:
if not full_dict.get(m[1]):
untranslated.append(m[1])
if untranslated:
print(str(len(untranslated)) + " missing translations of " + str(len(messages)))
with open(untranslated_file, "wb") as f:
for m in untranslated:
# replace \n with ||| so that internal linebreaks don't get split
f.write((escape_newlines(m) + os.linesep).encode("utf-8"))
else:
print("all translated!") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_untranslated(lang, untranslated_file=None, get_all=False, app=None, write=True):\n\tclear_cache()\n\n\tmessages = []\n\tuntranslated = defaultdict(lambda: defaultdict(dict))\n\tif app:\n\t\tmessages = get_messages_for_app(app)\n\telse:\n\t\tfor app in frappe.get_all_apps(True):\n\t\t\tmessages.extend(get_messages_for_app(app))\n\n\tmessages = messages\n\n\tdef escape_newlines(s):\n\t\treturn (s.replace(\"\\\\\\n\", \"|||||\")\n\t\t\t\t.replace(\"\\\\n\", \"||||\")\n\t\t\t\t.replace(\"\\n\", \"|||\"))\n\n\tfull_dict = load_lang(lang, [app])\n\tcomparison_dict = reduce(lambda a,b: a.update(b) or a, list(full_dict.values()), {})\n\tif get_all:\n\t\tprint(str(len(messages)) + \" messages\")\n\t\tfor m in messages:\n\t\t\t\tuntranslated[m[0]][escape_newlines(m[1])] = get_existing_translation(escape_newlines(m[1]), comparison_dict)\n\n\t\tif write:\n\t\t\twrite_json_file(untranslated_file, untranslated)\n\t\telse:\n\t\t\treturn untranslated\n\n\telse:\n\t\tmessages_count = 0\n\t\tuntranslated_count = 0\n\n\t\tfor m in messages:\n\t\t\tmessages_count += 1\n\t\t\tif m[0] and m[0] not in full_dict:\n\t\t\t\tuntranslated_count += 1\n\t\t\t\tuntranslated[m[0]][escape_newlines(m[1])] = get_existing_translation(escape_newlines(m[1]), comparison_dict)\n\n\t\t\telif m[0] and m[1] not in full_dict[m[0]]:\n\t\t\t\tuntranslated_count += 1\n\t\t\t\tuntranslated[m[0]][escape_newlines(m[1])] = get_existing_translation(escape_newlines(m[1]), comparison_dict)\n\n\t\tif untranslated:\n\t\t\tprint(str(untranslated_count) + \" missing translations of \" + str(messages_count))\n\t\t\t\n\t\t\tif write:\n\t\t\t\twrite_json_file(untranslated_file, untranslated)\n\t\t\telse:\n\t\t\t\treturn untranslated\n\n\t\telse:\n\t\t\tprint(\"all translated!\")",
"def buildTranslation(lang,suppressAlert=False):\n global string_cache\n fName = os.path.join(langPath, lang + \".trn\")\n if verifyLangCode(lang) and not lang == \"en_US\":\n data = open(fName, \"rb\").read() + \"\\x00\"\n trnPattern = re.compile(r\"^o\\d+[ ]|^t\\d+[ ]\", re.M|re.S)\n grps = re.finditer(trnPattern, data)\n oStart = -1\n oEnd = -1\n tStart = -1\n tEnd = -1\n org = None\n for grp in grps:\n g = grp.group()\n if g.startswith(\"o\"):\n oStart = grp.end()\n tEnd = grp.start() -1\n elif g.startswith(\"t\"):\n oEnd = grp.start() -1\n tStart = grp.end()\n if oStart > -1 and oEnd > -1 and tStart > tEnd:\n org = data[oStart:oEnd]\n if tStart > -1 and (tEnd > -1):\n if tEnd > tStart:\n string_cache[org] = correctEncoding(data[tStart:tEnd])\n tStart = -1\n tEnd = -1\n string_cache[org] = correctEncoding(data[tStart:tEnd -1])\n return string_cache",
"def translate_elements(filename = \"en_csv.csv\", filepath = '.\\\\', language = 'en'):\n \n output_str = \"\"\n\n file_exists = False\n\n my_file = Path(filepath + \"\\\\\" + language[:2] + \"_text.txt\")\n if my_file.is_file():\n file_exists = True\n file_dict = read_file_into_dict(filepath + \"\\\\\" + language[:2] + \"_text.txt\")\n \n with open(filepath + \"\\\\\" + filename, 'r') as f:\n lines = f.readlines()\n\n first_line = lines.pop(0)[:-1].replace('\"', '').split(',')\n \n app_index = first_line.index('app')\n field_name_index = first_line.index('field_name')\n language_id_index = first_line.index('language_id')\n localized_text_index = first_line.index('localized_text')\n \n total = len(lines)\n \n translate_client = translate.Client()\n index = 0\n \n for line in lines:\n data = line.replace('\"', '').split(',')\n \n for i in range(0, len(data)):\n data[i] = data[i].replace('\"', '\\'')\n \n key = data[field_name_index]+data[app_index]+language[:2]\n output_str+='insert into localized_text'\n output_str+=' values(\\''\n output_str+=data[field_name_index]\n output_str+='\\',\\''\n output_str+=data[app_index]\n output_str+='\\',\\''\n output_str+=language[:2]\n output_str+='\\',\\''\n\n if file_exists and key in file_dict.keys():\n output_str+= file_dict[key]\n elif language == 'en':\n output_str+= data[localized_text_index] \n else:\n print('Translation')\n translation = translate_client.translate(data[localized_text_index], source_language = 'en', target_language = language)\n output_str+=translation['translatedText'].replace(\"\\n\", \"\")\n output_str+='\\');\\n'\n index+=1\n print(str(index) + \" out of \" + str(total))\n \n\n with open(filepath + \"\\\\\" + language[:2] + \"_text.txt\", 'w', encoding = 'utf-8') as f:\n f.write(output_str)\n \n return output_str",
"def write_translations_file(app, lang, full_dict=None, app_messages=None):\n\tif not app_messages:\n\t\tapp_messages = get_messages_for_app(app)\n\n\tif not app_messages:\n\t\treturn\n\n\ttpath = frappe.get_app_path(app, \"translations\")\n\tfrappe.create_folder(tpath)\n\twrite_csv_file(\n\t\tos.path.join(tpath, lang + \".csv\"), app_messages, full_dict or get_all_translations(lang)\n\t)",
"def write_translations_file(app, lang, app_messages=None):\n\tif not app_messages:\n\t\tapp_messages = get_messages_for_app(app)\n\n\tif not app_messages:\n\t\treturn\n\n\ttpath = frappe.get_pymodule_path(app, \"translations\")\n\tfrappe.create_folder(tpath)\n\twrite_json_file(os.path.join(tpath, lang + \".json\"), app_messages)",
"def translationText(language, listOfWords):\n txt = open(language+\".txt\", mode=\"r\").readlines()\n translatedWords = []\n for word in listOfWords:\n for line in txt:\n if line.split()[0] == word:\n translatedWords.append(line.split()[1])\n return translatedWords",
"def prepare_translations():\n output_fn = '/home/jelle/Desktop/django.csv'\n local('po2csv apps/dasa/locale/id/LC_MESSAGES/django.po %(output_fn)s' % locals())\n print 'output written to %(output_fn)s' % locals()",
"def make_translated_text():\n return {\n code: ''\n for code, name\n in settings.LANGUAGES\n }",
"def showTranslatedWithoutJoin(cls):\n print (\"ALL WORDS WITH TRANSLATIONS STORED IN DATABASE:\")\n for word1 in EnglishHelper.query(\"SELECT english_word FROM EnglishWords\", fetchAll=True):\n try:\n print word1[0],\" - \", (EnglishHelper.query(\"select polish_word from PolishWords where \"\n \" id_pl=(select id_pl from translations where \"\n \"id_eng = (select id_eng from EnglishWords \"\n \"where english_word = '%s'))\"%word1))[0].encode('utf-8')\n except:\n print \"There is no translation, sorry :(\"",
"def extract_strings():\n\n ap = renpy.arguments.ArgumentParser(description=\"Extracts translated strings.\")\n ap.add_argument(\"language\", help=\"The language to extract translated strings from.\")\n ap.add_argument(\"destination\", help=\"The json file to store the translated strings into.\")\n ap.add_argument(\"--merge\", help=\"If given, the current contents of the file are preserved, and new contents are merged into the file.\", action=\"store_true\")\n ap.add_argument(\"--force\", help=\"If given, noting happens if the language does not exist.\", action=\"store_true\")\n\n args = ap.parse_args()\n\n language = args.language\n\n if language == 'None':\n language = None\n\n extract_strings_core(language, args.destination, args.merge, args.force)\n\n return False",
"def _write_message_files(lang, command='update'):\n BabelCLI().run(['', command, '-i', os.path.join(I18N_PATH, 'messages.pot'), '-d', I18N_PATH,\n '-l', lang])",
"def translate(lang):\n\n\tlangfilename = os.path.join(\"data\", \"translations\", lang + \".json\")\n\tif os.path.exists(langfilename):\n\t\twith open(langfilename, 'r') as langfile:\n\t\t\ttranslations = json.loads(langfile.read())\n\telse:\n\t\ttranslations = {}\n\n\twith open(os.path.join(\"data\", \"translations\", \"message_list.json\"), \"r\") as message_list_file:\n\t\tmessages = json.loads(message_list_file.read())\n\n\tcnt = 0\n\tfor m in messages:\n\t\tcnt += 1\n\t\t#if cnt > 15: break\n\t\tif not translations.get(m):\n\t\t\tprint 'translating: ' + m\n\t\t\tresponse = requests.get(\"\"\"https://www.googleapis.com/language/translate/v2\"\"\",\n\t\t\t\tparams = {\n\t\t\t\t\t\"key\": conf.google_api_key,\n\t\t\t\t\t\"source\": \"en\",\n\t\t\t\t\t\"target\": lang,\n\t\t\t\t\t\"q\": m\n\t\t\t\t}, verify=False)\n\n\t\t\tt = response.json[\"data\"][\"translations\"][0][\"translatedText\"] or m\n\t\t\ttranslations[m] = t.encode('utf-8')\n\n\t\t\twith open(langfilename, 'w') as langfile:\n\t\t\t\tlangfile.write(json.dumps(translations, indent=1, sort_keys=True))",
"def _read_files(self):\n \n for langname in self.langnames:\n filename = f'data/word_lists/{langname}.txt'\n with open(filename) as f:\n index = self.langnames.index(langname)\n lang_list = getattr(self, f'word_list{index}')\n words = f.readlines()\n for word in words:\n fword = ''.join(char for char in word if char is not '\\n')\n lang_list.append(fword)\n f.close()\n return",
"def add_translation_to_file(language, translation):\n with open(f'{TRANSLATIONS_DIRECTORY}/{language}', 'a') as f:\n f.write(translation + '\\n')",
"def save_horizon_to_txt(self):\n #for every language\n for i in self.languages_three_dict:\n #get content of the page\n res = requests.get(self.languages_three_dict.get(i))\n html_page = res.content\n soup = BeautifulSoup(html_page, 'html.parser')\n text = soup.find_all(text = True)\n #initialise file to write the output. The name depends on the \n #language of the input url\n output_filename = i + '.txt'\n file = open(output_filename, 'w')\n #define counter for the abstract\n counter_abstract = 0\n #for every line in the page\n for t in text:\n if t.parent.name == 'title':\n file.write('Title: ' + t + '\\n')\n if t.parent.name == 'script' and '\"author\"' in t:\n file.write('Author: ' + \n (((json.loads(str(t))).get('@graph'))[-1]).get('name') + '\\n')\n #if the parent name is 'p' (get only text), the length of the \n #line is greater than 2 (remove empty lines, and creative \n #commons), the line is not the caption of an image (captions \n #have |) and the amount of spaces in >0.5len(line) (to exclude \n #lines not belonging to the main text)\n if (t.parent.name == 'p' and len(t)>2 and '| ' not in t\n and not sum(c.isspace() for c in t) > 0.5*len(t)):\n #write line to txt file\n if counter_abstract == 0:\n file.write('Abstract: ' + t + '\\n\\n')\n counter_abstract +=1\n else:\n file.write(t+'\\n')\n file.close()\n return",
"def saveFilesByLang(self, folderToSaveLangs, fileLangs):\n for lang in self.hashDictByLang:\n fileLangs.write(lang + \"\\n\")\n with open(folderToSaveLangs + \"/\" +lang, \"w\") as fout:\n for hash in self.hashDictByLang[lang]:\n trainobj = self.hashDictByLang[lang][hash]\n text = trainobj.getText()\n id = trainobj.getHbaseDocumentId()\n categories = trainobj.getCategories()\n headTrainObj = str(id)\n for cat in categories:\n headTrainObj += \" \" + cat.replace(\" \", \"_\")\n fout.write(headTrainObj + \"\\n\")\n fout.write(text + \"\\n\")",
"def language_text_sources(lang):\n return [\n DATA + \"/tokenized/{source}/{lang}.txt\".format(source=source, lang=lang)\n for source in LANGUAGE_SOURCES[lang]\n if source in FULL_TEXT_SOURCES\n ]",
"def get_langs():\r\n temp = \"\"\r\n translate_client = translate.Client()\r\n for i in translate_client.get_languages():\r\n temp += i['name'] + \": \" + i['language'] + \"\\n\"\r\n\r\n return temp",
"def write_locales(config: Config) -> Config:\n strings_rendered = render_strings(reduce_strings(config.root))\n\n destination_files = []\n\n for key, contents in strings_rendered.items():\n destination_file = os.path.join(\n config.destination,\n \"res\",\n key,\n \"description\",\n \"{}.str\".format(config.name)\n )\n\n contents = \"\\n\".join([COMMENT_C + PREFIX, contents])\n\n assert_directories(destination_file, True)\n\n with open(destination_file, \"w\") as f:\n f.write(contents)\n\n destination_files.append(destination_file)\n\n return config",
"def translate(self, language=None):",
"def generate_dummy_translation(source_pot_file_path, po_file_path):\n with open(po_file_path, \"wb\") as outputfile:\n convertpo(source_pot_file_path, outputfile, None, rewritestyle=\"unicode\")",
"def update_translations(lang, untranslated_file, translated_file, app=\"_ALL_APPS\"):\n\tclear_cache()\n\tfull_dict = get_all_translations(lang)\n\n\tdef restore_newlines(s):\n\t\treturn (\n\t\t\ts.replace(\"|||||\", \"\\\\\\n\")\n\t\t\t.replace(\"| | | | |\", \"\\\\\\n\")\n\t\t\t.replace(\"||||\", \"\\\\n\")\n\t\t\t.replace(\"| | | |\", \"\\\\n\")\n\t\t\t.replace(\"|||\", \"\\n\")\n\t\t\t.replace(\"| | |\", \"\\n\")\n\t\t)\n\n\ttranslation_dict = {}\n\tfor key, value in zip(\n\t\tfrappe.get_file_items(untranslated_file, ignore_empty_lines=False),\n\t\tfrappe.get_file_items(translated_file, ignore_empty_lines=False),\n\t):\n\n\t\t# undo hack in get_untranslated\n\t\ttranslation_dict[restore_newlines(key)] = restore_newlines(value)\n\n\tfull_dict.update(translation_dict)\n\tapps = frappe.get_all_apps(True)\n\n\tif app != \"_ALL_APPS\":\n\t\tif app not in apps:\n\t\t\tprint(f\"Application {app} not found!\")\n\t\t\treturn\n\t\tapps = [app]\n\n\tfor app_name in apps:\n\t\twrite_translations_file(app_name, lang, full_dict)",
"def _extract_18n_messages():\n BabelCLI().run(['', 'extract', '-F', 'babel.cfg', '-k', '_t', '--no-location', '--sort-output',\n '--omit-header', '-o', os.path.join(I18N_PATH, 'messages.pot'), 'aliquis'])",
"def translate(commands, fileName):\n file = open(fileName, \"w\")\n for cmd in commands:\n file.write(cmd.translate() + \"\\n\")\n\n file.close()",
"def translate(self, filepath):\n pass",
"def ner_spacy(filepath):\n\n\n out = \"\"\n\n with codecs.open(filepath,'r','utf-8') as current_file:\n\n text = current_file.readlines()\n\n with codecs.open(filepath+\".ner\",'w','utf-8') as outfile:\n\n for line in text:\n doc = nlp(line.rstrip())\n for word in doc:\n if word.ent_type_ != u\"\":\n outfile.write(word.text+\"|\"+word.ent_type_+' ')\n else:\n outfile.write(word.text+' ')\n outfile.write('\\n')",
"def GetLanguages():\n return GetDataFromCsvFile('languages.csv')",
"def alert_ru(subj, text):\n pos_skip = text.find(\"Ещё результаты\")\n if pos_skip >= 0:\n text = text[:pos_skip]\n\n lines = []\n for line in text.split('\\n'):\n if not any([line.startswith(i) for i in DROP_RU]): # pylint: disable=use-a-generator\n lines.append(make_markdown(line))\n\n return [\n MARKUP,\n clear_markdown(subj),\n '',\n clear_trash(Parser.drop_newlines('\\n'.join(handle_lines(lines)))),\n ]",
"def translate_phrases(translator, phrases, language):\n for phrase in phrases:\n translator.type_phrase_to_translate(phrase)\n sleep(0.5)\n translated_phrase = translator.read_translated_phrase()\n add_translation_to_file(language, translated_phrase)",
"def segment_pofiles(locale):\r\n files_written = set()\r\n for filename, segments in CONFIGURATION.segment.items():\r\n filename = CONFIGURATION.get_messages_dir(locale) / filename\r\n files_written.update(segment_pofile(filename, segments))\r\n return files_written"
]
| [
"0.6584445",
"0.65648997",
"0.65052485",
"0.64634526",
"0.6346493",
"0.62578046",
"0.620105",
"0.61931974",
"0.6145856",
"0.6066353",
"0.6035533",
"0.6004509",
"0.5979703",
"0.5902814",
"0.5893097",
"0.58732826",
"0.5835999",
"0.5832829",
"0.58216834",
"0.5774166",
"0.57482123",
"0.5722486",
"0.5704471",
"0.5687893",
"0.568473",
"0.567589",
"0.5672041",
"0.5649734",
"0.5644386",
"0.5628922"
]
| 0.68825924 | 0 |
Update translations from a source and target file for a given language. | def update_translations(lang, untranslated_file, translated_file, app="_ALL_APPS"):
clear_cache()
full_dict = get_all_translations(lang)
def restore_newlines(s):
return (
s.replace("|||||", "\\\n")
.replace("| | | | |", "\\\n")
.replace("||||", "\\n")
.replace("| | | |", "\\n")
.replace("|||", "\n")
.replace("| | |", "\n")
)
translation_dict = {}
for key, value in zip(
frappe.get_file_items(untranslated_file, ignore_empty_lines=False),
frappe.get_file_items(translated_file, ignore_empty_lines=False),
):
# undo hack in get_untranslated
translation_dict[restore_newlines(key)] = restore_newlines(value)
full_dict.update(translation_dict)
apps = frappe.get_all_apps(True)
if app != "_ALL_APPS":
if app not in apps:
print(f"Application {app} not found!")
return
apps = [app]
for app_name in apps:
write_translations_file(app_name, lang, full_dict) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_translations(lang, translated_data, app, is_file=True):\n\tclear_cache()\n\tfull_dict = load_lang(lang, [app])\n\n\tif full_dict:\n\t\tdef restore_newlines(s):\n\t\t\treturn (s.replace(\"|||||\", \"\\\\\\n\")\n\t\t\t\t\t.replace(\"| | | | |\", \"\\\\\\n\")\n\t\t\t\t\t.replace(\"||||\", \"\\\\n\")\n\t\t\t\t\t.replace(\"| | | |\", \"\\\\n\")\n\t\t\t\t\t.replace(\"|||\", \"\\n\")\n\t\t\t\t\t.replace(\"| | |\", \"\\n\"))\n\n\t\ttranslation_dict = defaultdict(dict)\n\t\tfor k in full_dict:\n\t\t\tfor m in full_dict[k]:\n\t\t\t\ttranslation_dict[k][m] = full_dict[restore_newlines(k)][restore_newlines(m)]\n\n\t\tif is_file:\n\t\t\tnew_translations = frappe._dict(frappe.get_file_json(translated_data))\n\t\telse:\n\t\t\tnew_translations = translated_data\n\n\t\tfor k in new_translations:\n\t\t\tfor m in new_translations[k]:\n\t\t\t\tif new_translations[k][m] != \"\":\n\t\t\t\t\ttranslation_dict[k][restore_newlines(m)] = restore_newlines(new_translations[k][m])\n\n\t\twrite_translations_file(app, lang, translation_dict)",
"def translate(self, source_file):\n \n # Name of output file. The extension of output file is .translated\n if self.pos_tagging:\n output_file = os.path.splitext(source_file)[0] + '_pos.translated'\n else:\n output_file = os.path.splitext(source_file)[0] + '.translated'\n \n try:\n # Open output file for writing\n output_file = open(output_file, 'w')\n except:\n print('Cannot open file' + output_file + ' for writing', file=sys.stderr)\n sys.exit(1)\n \n source_lines = self.read_text_file(source_file)\n # Loop on source file line by line\n for source_line in source_lines:\n # Generate word tokens\n source_words = list(word_tokenize(source_line.strip()))\n # Preform POS tagging\n if self.pos_tagging:\n source_words = pos_tag(source_words)\n \n translated_words = []\n # Generate translated words\n for word in source_words:\n if self.model[word]:\n translated_word = max(self.model[word].items(), key=itemgetter(1))[0]\n translated_words.append(translated_word)\n \n # Remove POS tags\n if self.pos_tagging:\n translated_words = [word[0] for word in translated_words]\n \n # Convert words to sentences\n translated_sentence = self.words_to_sentence(translated_words)\n \n # Write translated sentence to the output file\n output_file.write(translated_sentence + '\\n')",
"def add_translation_to_file(language, translation):\n with open(f'{TRANSLATIONS_DIRECTORY}/{language}', 'a') as f:\n f.write(translation + '\\n')",
"def merge(locale, target='django.po', sources=('django-partial.po',), fail_if_missing=True):\r\n LOG.info('Merging {target} for locale {locale}'.format(target=target, locale=locale))\r\n locale_directory = CONFIGURATION.get_messages_dir(locale)\r\n try:\r\n validate_files(locale_directory, sources)\r\n except Exception, e:\r\n if not fail_if_missing:\r\n return\r\n raise\r\n\r\n # merged file is merged.po\r\n merge_cmd = 'msgcat -o merged.po ' + ' '.join(sources)\r\n execute(merge_cmd, working_directory=locale_directory)\r\n\r\n # clean up redunancies in the metadata\r\n merged_filename = locale_directory.joinpath('merged.po')\r\n clean_pofile(merged_filename)\r\n\r\n # rename merged.po -> django.po (default)\r\n target_filename = locale_directory.joinpath(target)\r\n os.rename(merged_filename, target_filename)",
"def upload_messages_to_transifex(\n self, legalcode, pofile: polib.POFile = None\n ):\n language_code = legalcode.language_code\n resource_slug = legalcode.license.resource_slug\n resource_name = legalcode.license.resource_name\n pofilename = legalcode.translation_filename()\n\n resources = self.get_transifex_resources()\n resource_slugs = [item[\"slug\"] for item in resources]\n\n if pofile is None:\n pofile = legalcode.get_pofile()\n\n pofile_content = get_pofile_content(pofile)\n\n if resource_slug not in resource_slugs:\n if language_code != DEFAULT_LANGUAGE_CODE:\n raise ValueError(\n f\"The resource {resource_slug} does not yet exist in\"\n \" Transifex. Must upload English first to create it.\"\n )\n self.create_resource(\n resource_slug, resource_name, pofilename, pofile_content\n )\n elif language_code == DEFAULT_LANGUAGE_CODE:\n # We're doing English, which is the source language.\n self.update_source_messages(\n resource_slug, pofilename, pofile_content\n )\n else:\n self.update_translations(\n resource_slug, language_code, pofilename, pofile_content\n )",
"def _translation(basename, props_dir, languages, key_language=None):\n props_dir = os.path.abspath(props_dir)\n if os.path.isfile(props_dir):\n props_dir = os.path.dirname(props_dir)\n trans = None\n use_key_as_lang = False\n for lang in languages:\n while True:\n trans = _try_file \\\n (props_dir, basename + \"_\" + lang + \".properties\", lang, trans)\n # Use identity mapping instead (or in addition to) file?\n if lang == key_language:\n use_key_as_lang = True\n # We need no more fallbacks after identity mapping\n break;\n lang_up = lang.rsplit(\"_\", 1)[0]\n if lang_up == lang:\n break\n lang = lang_up\n # Finally try properties file without language specification\n trans = _try_file(props_dir, basename + \".properties\", None, trans)\n if trans:\n trans._add_fallback_unchecked(BaseTranslations()) # last resort\n else:\n if use_key_as_lang:\n trans = BaseTranslations(key_language)\n else:\n trans = BaseTranslations()\n return trans",
"def migrate_translations(source_app, target_app):\n\tclear_cache()\n\tstrings_in_source_app = [m[1] for m in frappe.translate.get_messages_for_app(source_app)]\n\tstrings_in_target_app = [m[1] for m in frappe.translate.get_messages_for_app(target_app)]\n\n\tstrings_in_target_app_but_not_in_source_app = list(\n\t\tset(strings_in_target_app) - set(strings_in_source_app)\n\t)\n\n\tlanguages = frappe.translate.get_all_languages()\n\n\tsource_app_translations_dir = frappe.get_app_path(source_app, \"translations\")\n\ttarget_app_translations_dir = frappe.get_app_path(target_app, \"translations\")\n\n\tif not os.path.exists(target_app_translations_dir):\n\t\tos.makedirs(target_app_translations_dir)\n\n\tfor lang in languages:\n\t\tsource_csv = os.path.join(source_app_translations_dir, lang + \".csv\")\n\n\t\tif not os.path.exists(source_csv):\n\t\t\tcontinue\n\n\t\ttarget_csv = os.path.join(target_app_translations_dir, lang + \".csv\")\n\t\ttemp_csv = os.path.join(source_app_translations_dir, \"_temp.csv\")\n\n\t\twith open(source_csv) as s, open(target_csv, \"a+\") as t, open(temp_csv, \"a+\") as temp:\n\t\t\tsource_reader = reader(s, lineterminator=\"\\n\")\n\t\t\ttarget_writer = writer(t, lineterminator=\"\\n\")\n\t\t\ttemp_writer = writer(temp, lineterminator=\"\\n\")\n\n\t\t\tfor row in source_reader:\n\t\t\t\tif row[0] in strings_in_target_app_but_not_in_source_app:\n\t\t\t\t\ttarget_writer.writerow(row)\n\t\t\t\telse:\n\t\t\t\t\ttemp_writer.writerow(row)\n\n\t\tif not os.path.getsize(target_csv):\n\t\t\tos.remove(target_csv)\n\t\tos.remove(source_csv)\n\t\tos.rename(temp_csv, source_csv)",
"def update_file(dst, src, language, mutator):\n\n # if the source and destination are the same, we're updating in place\n inplace = dst == src\n\n if isinstance(src, str):\n # if a filename was provided, open the file\n if inplace:\n mode = \"r+\"\n else:\n mode = \"r\"\n src = open(src, mode)\n\n orig_lines = []\n\n # grab all of the lines of the file and strip them of their line ending\n old_lines = list(line.rstrip(\"\\r\\n\") for line in src)\n new_lines = list(mutator(old_lines, src.name, language))\n\n for line in src:\n line = line\n\n if inplace:\n # if we're updating in place and the file hasn't changed, do nothing\n if old_lines == new_lines:\n return\n\n # otherwise, truncate the file and seek to the beginning.\n dst = src\n dst.truncate(0)\n dst.seek(0)\n elif isinstance(dst, str):\n # if we're not updating in place and a destination file name\n # was provided, create a file object\n dst = open(dst, \"w\")\n\n for line in new_lines:\n dst.write(line)\n dst.write(\"\\n\")",
"def update_templates():\n logging.info(\"Copying english po files to %s\" % POT_PATH)\n\n # post them to exposed URL\n ensure_dir(POT_PATH)\n shutil.copy(get_po_filepath(lang_code=\"en\", filename=\"django.po\"), os.path.join(POT_PATH, \"kalite.pot\"))\n shutil.copy(get_po_filepath(lang_code=\"en\", filename=\"djangojs.po\"), os.path.join(POT_PATH, \"kalitejs.pot\"))",
"def translations_import(pod_path, source, locale, include_obsolete, untranslated):\n if source.endswith('.po') and locale is None:\n text = 'Must specify --locale when --source is a .po file.'\n raise click.ClickException(text)\n if not source.endswith('.po') and locale is not None:\n text = 'Cannot specify --locale when --source is not a .po file.'\n raise click.ClickException(text)\n source = os.path.expanduser(source)\n root = os.path.abspath(os.path.join(os.getcwd(), pod_path))\n pod = pods.Pod(root, storage=storage.FileStorage)\n if not pod.exists:\n raise click.ClickException('Pod does not exist: {}'.format(pod.root))\n source = glob.glob(source)\n with pod.profile.timer('translations_grow_i'):\n for path in source:\n pod.catalogs.import_translations(\n path, locale=locale, include_obsolete=include_obsolete,\n untranslated=untranslated)\n return pod",
"def i18nupdate():\n click.echo('-> Updating i18n message files...')\n _extract_18n_messages()\n langs = app.config['BABEL_LANGUAGES']\n for lang in langs:\n _write_message_files(lang)\n click.echo('-> i18n message files updated.\\n')\n click.echo('You should now edit translations in following files:')\n for lang in langs:\n click.echo(os.path.join(I18N_PATH, lang, 'LC_MESSAGES', 'messages.po'))",
"def translate(self, to_lang: str = TARGET_LANG):\n if not self.language:\n self.detect_language()\n if not all([self.clean, self.language != to_lang]):\n return\n self.payload += '&source={}&target={}'.format(self.language, to_lang)\n resp = requests.request('POST', self.url_translation, data=self.payload.encode('utf-8'),\n headers=self.translate_headers)\n try:\n self.translation = json.loads(resp.text)['data']['translations'][0]['translatedText']\n except KeyError:\n return",
"def set_translation_to_cache ( self, text, src_lang, target_lang, translated_text ):\n self.app_cache.set_translation_to_cache ( text, src_lang, target_lang, translated_text )",
"def translate(self, filepath):\n pass",
"def translate(self, language=None):",
"def translate_elements(filename = \"en_csv.csv\", filepath = '.\\\\', language = 'en'):\n \n output_str = \"\"\n\n file_exists = False\n\n my_file = Path(filepath + \"\\\\\" + language[:2] + \"_text.txt\")\n if my_file.is_file():\n file_exists = True\n file_dict = read_file_into_dict(filepath + \"\\\\\" + language[:2] + \"_text.txt\")\n \n with open(filepath + \"\\\\\" + filename, 'r') as f:\n lines = f.readlines()\n\n first_line = lines.pop(0)[:-1].replace('\"', '').split(',')\n \n app_index = first_line.index('app')\n field_name_index = first_line.index('field_name')\n language_id_index = first_line.index('language_id')\n localized_text_index = first_line.index('localized_text')\n \n total = len(lines)\n \n translate_client = translate.Client()\n index = 0\n \n for line in lines:\n data = line.replace('\"', '').split(',')\n \n for i in range(0, len(data)):\n data[i] = data[i].replace('\"', '\\'')\n \n key = data[field_name_index]+data[app_index]+language[:2]\n output_str+='insert into localized_text'\n output_str+=' values(\\''\n output_str+=data[field_name_index]\n output_str+='\\',\\''\n output_str+=data[app_index]\n output_str+='\\',\\''\n output_str+=language[:2]\n output_str+='\\',\\''\n\n if file_exists and key in file_dict.keys():\n output_str+= file_dict[key]\n elif language == 'en':\n output_str+= data[localized_text_index] \n else:\n print('Translation')\n translation = translate_client.translate(data[localized_text_index], source_language = 'en', target_language = language)\n output_str+=translation['translatedText'].replace(\"\\n\", \"\")\n output_str+='\\');\\n'\n index+=1\n print(str(index) + \" out of \" + str(total))\n \n\n with open(filepath + \"\\\\\" + language[:2] + \"_text.txt\", 'w', encoding = 'utf-8') as f:\n f.write(output_str)\n \n return output_str",
"def load_language(self, language_file):\n try:\n if self._translator is not None:\n self.removeTranslator(self._translator)\n\n self.language_name = 'en_us'\n if os.path.isfile(language_file):\n self._translator = QTranslator() # I18N 관련\n self._translator.load(language_file)\n self.installTranslator(self._translator)\n self.language_name = os.path.splitext(os.path.basename(language_file))[0]\n finally:\n pass",
"def _read_translations(self):\n print('Reading original translations')\n self.translations_map = {}\n n_translations = 0\n with open(os.path.join(self.src_dir, 'translations.txt'),\n 'rb') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n self.translations_map.setdefault(\n row['trans_id'], {})[row['lang']] = row['translation']\n n_translations += 1\n print('\\ttotal original translations: %s' % n_translations)",
"def _apply_patch_odoo(self):\n paths = [os.path.join('openerp', 'tools', 'translate.py'),\n os.path.join('odoo', 'tools', 'translate.py')]\n for path in paths:\n s_file = os.path.join(self._server_path, path)\n if not os.path.isfile(s_file):\n continue\n cmd = [\"sed\", \"-i\", \"-e\",\n r\"s/translation'] = src/translation'] = ''/g\",\n s_file]\n print \" \".join(cmd)\n subprocess.call(cmd)",
"def load_language(self, file):\n from Models.Configuration import Configuration\n\n try:\n qm_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'translate', '{0}.qm'.format(file))\n QtWidgets.qApp.load_language(qm_file)\n\n app_doc_data = AppDocData.instance()\n configs = [Configuration('app', 'language', file)]\n app_doc_data.save_app_configs(configs)\n\n for action in self.menuLanguage.actions():\n if action.text().lower() == file.lower():\n continue\n action.setChecked(False)\n finally:\n self.retranslateUi(self)\n self.propertyTableWidget.retranslateUi()",
"def convert_translations(self, dest_dir):\n if not os.path.isdir(dest_dir):\n os.makedirs(dest_dir)\n total_translation_rows = 0\n with open(os.path.join(dest_dir, 'translations.txt'),\n 'w+b') as out_file:\n writer = csv.DictWriter(\n out_file, fieldnames=NEW_TRANSLATIONS_FIELDS)\n writer.writeheader()\n for filename in sorted(os.listdir(self.src_dir)):\n if not (filename.endswith('.txt') and\n os.path.isfile(os.path.join(self.src_dir, filename))):\n print('Skipping %s' % filename)\n continue\n table_name = filename[:-len('.txt')]\n if table_name == 'translations':\n continue\n total_translation_rows += self._translate_table(\n dest_dir, table_name, writer)\n print('Total translation rows: %s' % total_translation_rows)",
"def translate(lang):\n\n\tlangfilename = os.path.join(\"data\", \"translations\", lang + \".json\")\n\tif os.path.exists(langfilename):\n\t\twith open(langfilename, 'r') as langfile:\n\t\t\ttranslations = json.loads(langfile.read())\n\telse:\n\t\ttranslations = {}\n\n\twith open(os.path.join(\"data\", \"translations\", \"message_list.json\"), \"r\") as message_list_file:\n\t\tmessages = json.loads(message_list_file.read())\n\n\tcnt = 0\n\tfor m in messages:\n\t\tcnt += 1\n\t\t#if cnt > 15: break\n\t\tif not translations.get(m):\n\t\t\tprint 'translating: ' + m\n\t\t\tresponse = requests.get(\"\"\"https://www.googleapis.com/language/translate/v2\"\"\",\n\t\t\t\tparams = {\n\t\t\t\t\t\"key\": conf.google_api_key,\n\t\t\t\t\t\"source\": \"en\",\n\t\t\t\t\t\"target\": lang,\n\t\t\t\t\t\"q\": m\n\t\t\t\t}, verify=False)\n\n\t\t\tt = response.json[\"data\"][\"translations\"][0][\"translatedText\"] or m\n\t\t\ttranslations[m] = t.encode('utf-8')\n\n\t\t\twith open(langfilename, 'w') as langfile:\n\t\t\t\tlangfile.write(json.dumps(translations, indent=1, sort_keys=True))",
"def load_language(self, file: str):\n from App import App\n\n try:\n qm_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'translate', '{0}.qm'.format(file))\n App.instance().load_language(qm_file)\n\n app_doc_data = AppDocData.instance()\n configs = [Config('app', 'language', file)]\n app_doc_data.save_app_configs(configs)\n finally:\n self.retranslateUi(self)",
"def import_translations(pod_path, source, locale):\n if source.endswith('.po') and locale is None:\n text = 'Must specify --locale when --source is a .po file.'\n raise click.ClickException(text)\n if not source.endswith('.po') and locale is not None:\n text = 'Cannot specify --locale when --source is not a .po file.'\n raise click.ClickException(text)\n source = os.path.expanduser(source)\n root = os.path.abspath(os.path.join(os.getcwd(), pod_path))\n pod = pods.Pod(root, storage=storage.FileStorage)\n if not pod.exists:\n raise click.ClickException('Pod does not exist: {}'.format(pod.root))\n pod.catalogs.import_translations(source, locale=locale)",
"def update_project(builder):\r\n\r\n projectfile = join(THISDIR, \"ringo-wp8.csproj\")\r\n\r\n dom = parse(projectfile)\r\n Languages = getattr(builder.CustomCfg, \"Languages\", None )\r\n\r\n if not Languages is None:\r\n Languages = [lan.replace('en-US', 'en') for lan in Languages]\r\n print \"Modified languages\", \",\".join( Languages )\r\n \r\n Languages = [] if Languages is None else Languages\r\n update_project_with_values(dom,\r\n Languages = Languages)\r\n\r\n with open(projectfile, 'wb') as f:\r\n data = dom.toprettyxml(indent = \" \")\r\n # toprettyxml adds extra new lines\r\n lines = [ x for x in data.split(\"\\n\") if len(x.strip()) > 0]\r\n data = \"\\n\".join(lines)\r\n f.write(data)\r\n\r\n if len(Languages) > 0 :\r\n default_language = Languages[0]\r\n if default_language != \"en\" and default_language.lower() != \"en-us\" :\r\n temppath = join(THISDIR, \"src\", \"MobileSecurity\",\"resources\");\r\n print \"Renaming: \", temppath\r\n try:\r\n os.remove(join(temppath,\"Localized.en.resx\"))\r\n except:\r\n pass\r\n os.rename(join(temppath,\"Localized.resx\"), join(temppath,\"Localized.en.resx\"))\r\n try:\r\n os.remove(join(temppath, \"Localized.resx\"))\r\n except:\r\n pass\r\n os.rename(join(temppath,\"Localized.%s.resx\" %(default_language)), join(temppath, \"Localized.resx\"))",
"def translate(self,phrase, **kwargs):\n \n #Load the input and output languages\n if 'output_language' in kwargs:\n out_lang = kwargs.pop('output_language')\n else:\n out_lang = self.language\n \n if 'input_language' in kwargs:\n in_lang = kwargs.pop('input_language')\n else:\n in_lang = 'english'\n \n #Identify the language based on intput\n if out_lang in ['Spanish', 'spanish', 'Espanol', 'espanol', 's', 'S']:\n output_language = 'spanish'\n elif out_lang in ['Portuguese', 'portuguese', 'Português', 'português', 'p', 'P']:\n output_language = 'portuguese'\n elif out_lang in ['English', 'english', 'E', 'e']:\n output_language = 'english'\n else:\n output_language = 'english'\n print('Unable to find language:', out_lang)\n \n #Open CSV with translations\n with open(self.translations, encoding='ISO-8859-15') as csv_file:\n csvread = csv.DictReader(csv_file)\n found = 0\n for row in csvread:\n if row[in_lang] == phrase:\n output_phrase = row[output_language] #translate phrase\n found = 1 #set flag indicating that the phrase was successfully translated\n\n #If no translation was found, return original phrase and present an error message\n if found == 0:\n output_phrase = phrase\n print('Unable to find phrase ', phrase, \"in language \", out_lang)\n \n return output_phrase",
"def translate_dataset(target_lang_iso_1, config):\n logger.debug('Translating dataset: {}'.format(\n config['datasets']['source']))\n logger.debug(' from: {}'.format(config['from']))\n logger.debug(' to: {}'.format(target_lang_iso_1))\n os.makedirs(os.path.dirname(config['datasets']['target']),\n exist_ok=True)\n with open(config['datasets']['source'], 'r')as source_stream,\\\n open(config['datasets']['target'], 'w') as target_stream:\n\n logger.debug(\n 'Saving translations to output file: {} '.format(target_stream))\n translated_mentries = _translate_dataset(source_stream,\n target_lang_iso_1,\n config)\n coverage = _get_recall_metrics(translated_mentries)\n logger.info('{} words translated out of {}'.format(coverage[0],\n coverage[1]))\n\n num_errors = _get_accuracy_metrics(translated_mentries)\n logger.info(\n '{} errors out of {} entries'.format(num_errors,\n len(translated_mentries)))\n\n accuracy = ((len(translated_mentries) - num_errors) /\n len(translated_mentries)) * 100\n recall = (coverage[0] / coverage[1]) * 100\n f1_score = ((accuracy * recall) / (accuracy + recall)) * 2\n\n logger.info('Accuracy = {}%'.format(round(accuracy, 1)))\n logger.info('Recall = {}%'.format(round(recall, 1)))\n logger.info('F1 = {}%'.format(round(f1_score, 1)))\n\n print(const.NEW_LINE.join(['{} {}'.format(trans.pair.first,\n trans.pair.last)\n for trans in translated_mentries]),\n file=target_stream)",
"def import_translations(lang, path):\n\tclear_cache()\n\tfull_dict = get_all_translations(lang)\n\tfull_dict.update(get_translation_dict_from_file(path, lang, \"import\"))\n\n\tfor app in frappe.get_all_apps(True):\n\t\twrite_translations_file(app, lang, full_dict)",
"def loadLanguage(request, lang):\n request.clock.start('loadLanguage')\n from MoinMoin import caching\n # farm notice: for persistent servers, only the first wiki requesting some language\n # gets its cache updated - a bit strange and redundant, but no problem.\n cache = caching.CacheEntry(request, arena='i18n', key=lang)\n import MoinMoin.request\n langfilename = os.path.join(MoinMoin.request.prefix + \"/i18n\", '%s.py' % filename(lang))\n needsupdate = cache.needsUpdate(langfilename)\n if debug: request.log(\"i18n: langfilename %s needsupdate %d\" % (langfilename, needsupdate))\n if not needsupdate:\n try:\n (uc_texts, uc_unformatted) = pickle.loads(cache.content())\n except (IOError, ValueError, pickle.UnpicklingError): # bad pickle data, no pickle\n if debug: request.log(\"i18n: pickle %s load failed\" % lang)\n needsupdate = 1\n\n if needsupdate: \n from MoinMoin.util import pysupport\n lang_module = \"MoinMoin.i18n.%s\" % filename(lang)\n try:\n # Language module without text dict will raise AttributeError\n texts = pysupport.importName(lang_module, \"text\")\n except ImportError:\n if debug: request.log(\"i18n: import of module %s failed.\" % lang_module)\n request.clock.stop('loadLanguage')\n return None, None\n meta = pysupport.importName(lang_module, \"meta\") \n encoding = meta['encoding']\n\n # convert to unicode\n if debug: request.log(\"i18n: processing unformatted texts of lang %s\" % lang)\n uc_unformatted = {}\n for key, text in texts.items():\n ukey = key.decode(encoding)\n utext = text.decode(encoding)\n uc_unformatted[ukey] = utext\n\n if meta.get('wikimarkup', False):\n if debug: request.log(\"i18n: processing formatted texts of lang %s\" % lang)\n # use the wiki parser now to replace some wiki markup with html\n uc_texts = {}\n for key, text in uc_unformatted.items():\n try:\n uc_texts[key] = formatMarkup(request, text)\n except: # infinite recursion or crash\n if debug:\n request.log(\"i18n: crashes in language %s on string: %s\" % (lang, text))\n uc_texts[key] = \"%s*\" % text\n else:\n uc_texts = uc_unformatted\n if debug: request.log(\"i18n: dumping lang %s\" % lang)\n cache.update(pickle.dumps((uc_texts, uc_unformatted), PICKLE_PROTOCOL))\n request.clock.stop('loadLanguage')\n return uc_texts, uc_unformatted",
"def translate(self, action):\r\n self.current_language = str(action.data().toString()).strip(\"tr_\").rstrip(\".qm\")\r\n\r\n log.info(\"Switching language to: %s\" % action.text())\r\n self.uiTranslator.load(\":/languages/tr_%s.qm\" % self.current_language)\r\n self.app.installTranslator(self.uiTranslator)\r\n\r\n self.retranslateFreeseerApp()\r\n self.aboutDialog.aboutWidget.retranslate(self.current_language)\r\n self.retranslate()"
]
| [
"0.66791344",
"0.63966095",
"0.6078378",
"0.60729563",
"0.6036594",
"0.6028217",
"0.59740347",
"0.5968465",
"0.59569347",
"0.5926012",
"0.59227216",
"0.5869459",
"0.5791227",
"0.5777387",
"0.5757889",
"0.57465124",
"0.57136136",
"0.57036763",
"0.5703097",
"0.56558466",
"0.5653505",
"0.56280106",
"0.55726314",
"0.5570107",
"0.5563746",
"0.55532926",
"0.55428976",
"0.5519222",
"0.55083406",
"0.54513335"
]
| 0.6637544 | 1 |
Import translations from file in standard format | def import_translations(lang, path):
clear_cache()
full_dict = get_all_translations(lang)
full_dict.update(get_translation_dict_from_file(path, lang, "import"))
for app in frappe.get_all_apps(True):
write_translations_file(app, lang, full_dict) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def translate(self, filepath):\n pass",
"def read_po(self, inputfile):\n is_index = False\n lines = inputfile.readlines()\n index = ''\n value = ''\n for line in lines:\n if line.startswith('#'):\n continue\n elif line.startswith('msgid'):\n is_index = True\n self.translations[index] = value\n index = ''\n value = ''\n elif line.startswith('msgstr'):\n is_index = False\n\n v = re.match('.*\"(.*)\".*', line)\n if v:\n if is_index:\n index += ''.join(v.groups())\n else:\n value += ''.join(v.groups())",
"def _read_translations(self):\n print('Reading original translations')\n self.translations_map = {}\n n_translations = 0\n with open(os.path.join(self.src_dir, 'translations.txt'),\n 'rb') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n self.translations_map.setdefault(\n row['trans_id'], {})[row['lang']] = row['translation']\n n_translations += 1\n print('\\ttotal original translations: %s' % n_translations)",
"def load_language(self, file: str):\n from App import App\n\n try:\n qm_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'translate', '{0}.qm'.format(file))\n App.instance().load_language(qm_file)\n\n app_doc_data = AppDocData.instance()\n configs = [Config('app', 'language', file)]\n app_doc_data.save_app_configs(configs)\n finally:\n self.retranslateUi(self)",
"def translations_import(pod_path, source, locale, include_obsolete, untranslated):\n if source.endswith('.po') and locale is None:\n text = 'Must specify --locale when --source is a .po file.'\n raise click.ClickException(text)\n if not source.endswith('.po') and locale is not None:\n text = 'Cannot specify --locale when --source is not a .po file.'\n raise click.ClickException(text)\n source = os.path.expanduser(source)\n root = os.path.abspath(os.path.join(os.getcwd(), pod_path))\n pod = pods.Pod(root, storage=storage.FileStorage)\n if not pod.exists:\n raise click.ClickException('Pod does not exist: {}'.format(pod.root))\n source = glob.glob(source)\n with pod.profile.timer('translations_grow_i'):\n for path in source:\n pod.catalogs.import_translations(\n path, locale=locale, include_obsolete=include_obsolete,\n untranslated=untranslated)\n return pod",
"def prepare_translations():\n output_fn = '/home/jelle/Desktop/django.csv'\n local('po2csv apps/dasa/locale/id/LC_MESSAGES/django.po %(output_fn)s' % locals())\n print 'output written to %(output_fn)s' % locals()",
"def load_trans(self, fname):\n info = read_trans(fname)\n head_mri_trans = info['trans']\n self.set_trans(head_mri_trans)",
"def import_translations(pod_path, source, locale):\n if source.endswith('.po') and locale is None:\n text = 'Must specify --locale when --source is a .po file.'\n raise click.ClickException(text)\n if not source.endswith('.po') and locale is not None:\n text = 'Cannot specify --locale when --source is not a .po file.'\n raise click.ClickException(text)\n source = os.path.expanduser(source)\n root = os.path.abspath(os.path.join(os.getcwd(), pod_path))\n pod = pods.Pod(root, storage=storage.FileStorage)\n if not pod.exists:\n raise click.ClickException('Pod does not exist: {}'.format(pod.root))\n pod.catalogs.import_translations(source, locale=locale)",
"def from_cheetah_file(cls, filename):\n return translate.load_cheetah(cls, filename)",
"def init_translations():\n if \"@lang\" in input.load_input():\n lang = input.get_lang()\n try:\n trad = gettext.GNUTranslations(open(\"../course/common_student/$i18n/\" + lang + \".mo\", \"rb\"))\n except FileNotFoundError:\n trad = gettext.NullTranslations()\n trad.install()\n return lang\n trad = gettext.NullTranslations()\n trad.install()\n return \"en\"",
"def read_json(self, inputfile):\n transtransfile = json.load(inputfile)\n self.language = transfile['lang']\n self.translations = transfile['strings']",
"def _read_txt(file_path):\n translation_pairs = []\n with file_path.open() as f:\n for line in f:\n translation_pairs.append(\n evaluation.TranslationPair(source=None, translation=line.strip())\n )\n return translation_pairs",
"def read_csv(self, inputfile):\n d = csv.reader(inputfile)\n for row in d.read():\n self.translations[row[0]] = row[1]",
"def load_language(self, file):\n from Models.Configuration import Configuration\n\n try:\n qm_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'translate', '{0}.qm'.format(file))\n QtWidgets.qApp.load_language(qm_file)\n\n app_doc_data = AppDocData.instance()\n configs = [Configuration('app', 'language', file)]\n app_doc_data.save_app_configs(configs)\n\n for action in self.menuLanguage.actions():\n if action.text().lower() == file.lower():\n continue\n action.setChecked(False)\n finally:\n self.retranslateUi(self)\n self.propertyTableWidget.retranslateUi()",
"def load(file):\n\n try:\n with open(file) as in_file:\n loaded_text = in_file.read().strip().split(\"\\n\")\n loaded_text = [x.lower() for x in loaded_text]\n return loaded_text\n except IOError as e:\n print(\"{}\\n Error opening {}. Terminationg program.\".format(e,file), file = sys.stderr)\n sys.exit()",
"def translate_elements(filename = \"en_csv.csv\", filepath = '.\\\\', language = 'en'):\n \n output_str = \"\"\n\n file_exists = False\n\n my_file = Path(filepath + \"\\\\\" + language[:2] + \"_text.txt\")\n if my_file.is_file():\n file_exists = True\n file_dict = read_file_into_dict(filepath + \"\\\\\" + language[:2] + \"_text.txt\")\n \n with open(filepath + \"\\\\\" + filename, 'r') as f:\n lines = f.readlines()\n\n first_line = lines.pop(0)[:-1].replace('\"', '').split(',')\n \n app_index = first_line.index('app')\n field_name_index = first_line.index('field_name')\n language_id_index = first_line.index('language_id')\n localized_text_index = first_line.index('localized_text')\n \n total = len(lines)\n \n translate_client = translate.Client()\n index = 0\n \n for line in lines:\n data = line.replace('\"', '').split(',')\n \n for i in range(0, len(data)):\n data[i] = data[i].replace('\"', '\\'')\n \n key = data[field_name_index]+data[app_index]+language[:2]\n output_str+='insert into localized_text'\n output_str+=' values(\\''\n output_str+=data[field_name_index]\n output_str+='\\',\\''\n output_str+=data[app_index]\n output_str+='\\',\\''\n output_str+=language[:2]\n output_str+='\\',\\''\n\n if file_exists and key in file_dict.keys():\n output_str+= file_dict[key]\n elif language == 'en':\n output_str+= data[localized_text_index] \n else:\n print('Translation')\n translation = translate_client.translate(data[localized_text_index], source_language = 'en', target_language = language)\n output_str+=translation['translatedText'].replace(\"\\n\", \"\")\n output_str+='\\');\\n'\n index+=1\n print(str(index) + \" out of \" + str(total))\n \n\n with open(filepath + \"\\\\\" + language[:2] + \"_text.txt\", 'w', encoding = 'utf-8') as f:\n f.write(output_str)\n \n return output_str",
"def load_language(self, language_file):\n try:\n if self._translator is not None:\n self.removeTranslator(self._translator)\n\n self.language_name = 'en_us'\n if os.path.isfile(language_file):\n self._translator = QTranslator() # I18N 관련\n self._translator.load(language_file)\n self.installTranslator(self._translator)\n self.language_name = os.path.splitext(os.path.basename(language_file))[0]\n finally:\n pass",
"def _extract_18n_messages():\n BabelCLI().run(['', 'extract', '-F', 'babel.cfg', '-k', '_t', '--no-location', '--sort-output',\n '--omit-header', '-o', os.path.join(I18N_PATH, 'messages.pot'), 'aliquis'])",
"def from_crystfel_file(cls, filename):\n return translate.load_crystfel(cls, filename)",
"def from_crystfel_file(cls, filename):\n return translate.load_crystfel(cls, filename)",
"def get_translation_dict_from_file(path, lang, app, throw=False) -> dict[str, str]:\n\ttranslation_map = {}\n\tif os.path.exists(path):\n\t\tcsv_content = read_csv_file(path)\n\n\t\tfor item in csv_content:\n\t\t\tif len(item) == 3 and item[2]:\n\t\t\t\tkey = item[0] + \":\" + item[2]\n\t\t\t\ttranslation_map[key] = strip(item[1])\n\t\t\telif len(item) in [2, 3]:\n\t\t\t\ttranslation_map[item[0]] = strip(item[1])\n\t\t\telif item:\n\t\t\t\tmsg = \"Bad translation in '{app}' for language '{lang}': {values}\".format(\n\t\t\t\t\tapp=app, lang=lang, values=cstr(item)\n\t\t\t\t)\n\t\t\t\tfrappe.log_error(message=msg, title=\"Error in translation file\")\n\t\t\t\tif throw:\n\t\t\t\t\tfrappe.throw(msg, title=\"Error in translation file\")\n\n\treturn translation_map",
"def load_simplified_conversation_text(filename, conv_number):\n pass",
"def get_translation_dict_from_file(path, lang, app):\n\tjson_content = {}\n\tif os.path.exists(path):\n\t\twith open(path, 'r') as f:\n\t\t\tjson_content = json.loads(f.read())\n\n\treturn json_content",
"def load_language_codes(self, path): \n try:\n fp = open(path, 'r+')\n languages = [line for line in fp.readlines()]\n fp.close\n except:\n print('Failed to read languages file.')\n return\n \n self.languages = languages",
"def translate_files(input_file, output_file, translate_dict, delete_symbols):\n\n for line in input_file:\n result = translate(line, translate_dict, delete_symbols)\n output_file.write(result)",
"def upload_messages_to_transifex(\n self, legalcode, pofile: polib.POFile = None\n ):\n language_code = legalcode.language_code\n resource_slug = legalcode.license.resource_slug\n resource_name = legalcode.license.resource_name\n pofilename = legalcode.translation_filename()\n\n resources = self.get_transifex_resources()\n resource_slugs = [item[\"slug\"] for item in resources]\n\n if pofile is None:\n pofile = legalcode.get_pofile()\n\n pofile_content = get_pofile_content(pofile)\n\n if resource_slug not in resource_slugs:\n if language_code != DEFAULT_LANGUAGE_CODE:\n raise ValueError(\n f\"The resource {resource_slug} does not yet exist in\"\n \" Transifex. Must upload English first to create it.\"\n )\n self.create_resource(\n resource_slug, resource_name, pofilename, pofile_content\n )\n elif language_code == DEFAULT_LANGUAGE_CODE:\n # We're doing English, which is the source language.\n self.update_source_messages(\n resource_slug, pofilename, pofile_content\n )\n else:\n self.update_translations(\n resource_slug, language_code, pofilename, pofile_content\n )",
"def _translation(basename, props_dir, languages, key_language=None):\n props_dir = os.path.abspath(props_dir)\n if os.path.isfile(props_dir):\n props_dir = os.path.dirname(props_dir)\n trans = None\n use_key_as_lang = False\n for lang in languages:\n while True:\n trans = _try_file \\\n (props_dir, basename + \"_\" + lang + \".properties\", lang, trans)\n # Use identity mapping instead (or in addition to) file?\n if lang == key_language:\n use_key_as_lang = True\n # We need no more fallbacks after identity mapping\n break;\n lang_up = lang.rsplit(\"_\", 1)[0]\n if lang_up == lang:\n break\n lang = lang_up\n # Finally try properties file without language specification\n trans = _try_file(props_dir, basename + \".properties\", None, trans)\n if trans:\n trans._add_fallback_unchecked(BaseTranslations()) # last resort\n else:\n if use_key_as_lang:\n trans = BaseTranslations(key_language)\n else:\n trans = BaseTranslations()\n return trans",
"def gettext(filename):\n doc = Document(filename)\n fulltext = []\n for paragraph in doc.paragraphs:\n fulltext.append(paragraph.text)\n\n zipf_file(' '.join(fulltext), filename)",
"def __loadTranslator(self):\n if self.__ui is not None:\n loc = self.__ui.getLocale()\n if loc and loc != \"C\":\n locale_dir = os.path.join(\n os.path.dirname(__file__), \"ProjectDjango\", \"i18n\")\n translation = \"django_{0}\".format(loc)\n translator = QTranslator(None)\n loaded = translator.load(translation, locale_dir)\n if loaded:\n self.__translator = translator\n e5App().installTranslator(self.__translator)\n else:\n print(\"Warning: translation file '{0}' could not be\"\n \" loaded.\".format(translation))\n print(\"Using default.\")",
"def _read_translation_file(language_code: str, filename: str):\n twlight_home = settings.TWLIGHT_HOME\n filepath = \"{twlight_home}/locale/{language_code}/{filename}.json\".format(\n twlight_home=twlight_home, language_code=language_code, filename=filename\n )\n if os.path.isfile(filepath):\n with open(filepath, \"r\") as translation_file:\n translation_dict = json.load(translation_file)\n\n # Remove the \"@metadata\" key from the dictionary\n if \"@metadata\" in translation_dict:\n translation_dict.pop(\"@metadata\")\n return translation_dict\n else:\n return {}"
]
| [
"0.6626948",
"0.660755",
"0.65344864",
"0.650112",
"0.64576405",
"0.6426083",
"0.6205688",
"0.6152179",
"0.61160636",
"0.60194945",
"0.59757906",
"0.5965752",
"0.59382874",
"0.59208184",
"0.5886727",
"0.58723736",
"0.586663",
"0.5846847",
"0.5812325",
"0.5812325",
"0.57960385",
"0.57704717",
"0.57405543",
"0.5732787",
"0.5725022",
"0.57133645",
"0.57124716",
"0.5693306",
"0.565001",
"0.5640246"
]
| 0.663314 | 0 |
Migrate targetappspecific translations from sourceapp to targetapp | def migrate_translations(source_app, target_app):
clear_cache()
strings_in_source_app = [m[1] for m in frappe.translate.get_messages_for_app(source_app)]
strings_in_target_app = [m[1] for m in frappe.translate.get_messages_for_app(target_app)]
strings_in_target_app_but_not_in_source_app = list(
set(strings_in_target_app) - set(strings_in_source_app)
)
languages = frappe.translate.get_all_languages()
source_app_translations_dir = frappe.get_app_path(source_app, "translations")
target_app_translations_dir = frappe.get_app_path(target_app, "translations")
if not os.path.exists(target_app_translations_dir):
os.makedirs(target_app_translations_dir)
for lang in languages:
source_csv = os.path.join(source_app_translations_dir, lang + ".csv")
if not os.path.exists(source_csv):
continue
target_csv = os.path.join(target_app_translations_dir, lang + ".csv")
temp_csv = os.path.join(source_app_translations_dir, "_temp.csv")
with open(source_csv) as s, open(target_csv, "a+") as t, open(temp_csv, "a+") as temp:
source_reader = reader(s, lineterminator="\n")
target_writer = writer(t, lineterminator="\n")
temp_writer = writer(temp, lineterminator="\n")
for row in source_reader:
if row[0] in strings_in_target_app_but_not_in_source_app:
target_writer.writerow(row)
else:
temp_writer.writerow(row)
if not os.path.getsize(target_csv):
os.remove(target_csv)
os.remove(source_csv)
os.rename(temp_csv, source_csv) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _apply_patch_odoo(self):\n paths = [os.path.join('openerp', 'tools', 'translate.py'),\n os.path.join('odoo', 'tools', 'translate.py')]\n for path in paths:\n s_file = os.path.join(self._server_path, path)\n if not os.path.isfile(s_file):\n continue\n cmd = [\"sed\", \"-i\", \"-e\",\n r\"s/translation'] = src/translation'] = ''/g\",\n s_file]\n print \" \".join(cmd)\n subprocess.call(cmd)",
"def pkg_app_translator(self, translation_dict, rows_list):\n for ls in rows_list:\n # transform the applist str to a list for each row\n app_list = ls[1].split(',')\n for i in range(len(app_list)):\n # translation of app_list on each row\n # if the pkg is in the dict, then translate; otw, stick with\n # the pkg\n # get rid of the space, [, and ] in applist\n processed_key = app_list[i].strip().strip('[').strip(']')\n app_list[i] = translation_dict.get(processed_key,\n processed_key)\n ls[1] = ', '.join(app_list)\n\n # also translate the first row\n for i in range(len(rows_list[0])):\n rows_list[0][i] = translation_dict.get(rows_list[0][i], rows_list[0][i])\n\n return rows_list",
"def pkg_app_translator(self, translation_dict, rows_list):\n for list in rows_list:\n # transform the applist str to a list for each row\n app_list = list[1].split(',')\n for i in range(len(app_list)):\n # translation of app_list on each row\n # if the pkg is in the dict, then translate; otw, stick with\n # the pkg\n # get rid of the space, [, and ] in applist\n processed_key = app_list[i].strip().strip('[').strip(']')\n app_list[i] = translation_dict.get(processed_key,\n processed_key)\n list[1] = ', '.join(app_list)\n\n # also translate the first row\n for i in range(len(rows_list[0])):\n rows_list[0][i] = translation_dict.get(rows_list[0][i], rows_list[0][i])\n\n return rows_list",
"def update_translations(lang, untranslated_file, translated_file, app=\"_ALL_APPS\"):\n\tclear_cache()\n\tfull_dict = get_all_translations(lang)\n\n\tdef restore_newlines(s):\n\t\treturn (\n\t\t\ts.replace(\"|||||\", \"\\\\\\n\")\n\t\t\t.replace(\"| | | | |\", \"\\\\\\n\")\n\t\t\t.replace(\"||||\", \"\\\\n\")\n\t\t\t.replace(\"| | | |\", \"\\\\n\")\n\t\t\t.replace(\"|||\", \"\\n\")\n\t\t\t.replace(\"| | |\", \"\\n\")\n\t\t)\n\n\ttranslation_dict = {}\n\tfor key, value in zip(\n\t\tfrappe.get_file_items(untranslated_file, ignore_empty_lines=False),\n\t\tfrappe.get_file_items(translated_file, ignore_empty_lines=False),\n\t):\n\n\t\t# undo hack in get_untranslated\n\t\ttranslation_dict[restore_newlines(key)] = restore_newlines(value)\n\n\tfull_dict.update(translation_dict)\n\tapps = frappe.get_all_apps(True)\n\n\tif app != \"_ALL_APPS\":\n\t\tif app not in apps:\n\t\t\tprint(f\"Application {app} not found!\")\n\t\t\treturn\n\t\tapps = [app]\n\n\tfor app_name in apps:\n\t\twrite_translations_file(app_name, lang, full_dict)",
"def merge(locale, target='django.po', sources=('django-partial.po',), fail_if_missing=True):\r\n LOG.info('Merging {target} for locale {locale}'.format(target=target, locale=locale))\r\n locale_directory = CONFIGURATION.get_messages_dir(locale)\r\n try:\r\n validate_files(locale_directory, sources)\r\n except Exception, e:\r\n if not fail_if_missing:\r\n return\r\n raise\r\n\r\n # merged file is merged.po\r\n merge_cmd = 'msgcat -o merged.po ' + ' '.join(sources)\r\n execute(merge_cmd, working_directory=locale_directory)\r\n\r\n # clean up redunancies in the metadata\r\n merged_filename = locale_directory.joinpath('merged.po')\r\n clean_pofile(merged_filename)\r\n\r\n # rename merged.po -> django.po (default)\r\n target_filename = locale_directory.joinpath(target)\r\n os.rename(merged_filename, target_filename)",
"def translate_to(common_form, target):\r\n # retrieve the correct translation dictionary\r\n target_dict = get_dict(target)\r\n # recreate the form with the translated keys\r\n target_form = {target_dict[key]: common_form[key]\r\n for key in target_dict.keys()}\r\n return target_form",
"def translate(translate_from, translate_to, string_to_translate=\"\"):\n dictionary = DICTIONARIES.get(\"%s_%s\" % (translate_from, translate_to))\n if not dictionary:\n print(\"Offline: No such translation direction in dictionary: %s-%s\" % (translate_from, translate_to))\n else:\n words = [dictionary.get(w, w) for w in string_to_translate.split(' ')]\n print(\"Offline: %s\" % (' '.join(words)))",
"def fixturize(app=\"All\"):\n\n if app == \"All\":\n local('python manage.py dumpdata resources > resources/fixtures/resources.json')\n local('python manage.py dumpdata military > military/fixtures/military.json')\n local('python manage.py dumpdata arenas > arenas/fixtures/arena.json')\n local('python manage.py dumpdata sciences > sciences/fixtures/technologies.json')\n local('python manage.py dumpdata auth.Group > fixtures/groups.json')\n elif app == \"resource\":\n local('python manage.py dumpdata resources > resources/fixtures/resources.json')\n elif app == \"military\":\n local('python manage.py dumpdata military > military/fixtures/military.json')\n elif app == \"arena\":\n local('python manage.py dumpdata arenas > arenas/fixtures/arena.json')\n elif app == \"sciences\":\n local('python manage.py dumpdata sciences > sciences/fixtures/technologies.json')\n elif app == \"groups\":\n local('python manage.py dumpdata auth.Group > fixtures/groups.json')",
"def translate_text(target, text):\n return text",
"def translate(self, action):\r\n self.current_language = str(action.data().toString()).strip(\"tr_\").rstrip(\".qm\")\r\n\r\n log.info(\"Switching language to: %s\" % action.text())\r\n self.uiTranslator.load(\":/languages/tr_%s.qm\" % self.current_language)\r\n self.app.installTranslator(self.uiTranslator)\r\n\r\n self.retranslateFreeseerApp()\r\n self.aboutDialog.aboutWidget.retranslate(self.current_language)\r\n self.retranslate()",
"def translate_text(query, source_lang_code, target_lang_code):\n\n try:\n translations = TRANSLATION_SERVICE.translations().list(\n source=source_lang_code,\n target=target_lang_code,\n q=query\n ).execute()\n translation = translations['translations'][0]\n if 'detectedSourceLanguage' in translation.keys():\n source_lang_code = translation['detectedSourceLanguage']\n resp = random.choice(_TRANSLATE_RESULT).format(\n text=translation['translatedText'],\n fromLang=language_code_dict[source_lang_code],\n toLang=language_code_dict[target_lang_code])\n except (HTTPError, URLError, HTTPException):\n resp = random.choice(_TRANSLATE_NETWORK_ERROR)\n except Exception:\n resp = random.choice(_TRANSLATE_ERROR)\n return resp",
"def install_translations(where='local'):\n config = get_config(where)\n with settings(host_string=config['host_string']), cd(config['installation_dir']):\n\n if where == 'local':\n # if we are local, we also generate new po files\n with cd('apps/dasa/'):\n run('../../bin/django makemessages -l id')\n run('../../bin/django makemessages -l en')\n run('../../bin/django compilemessages')\n with cd('project'):\n# run('../bin/django makemessages -l id')\n run('../bin/django makemessages -l en')\n run('../bin/django compilemessages')\n else: # otherwise, we just compile\n run('git pull')\n with cd('apps/dasa/'):\n run('../../bin/django compilemessages')\n with cd('project'):\n run('../bin/django compilemessages')\n restart(where)",
"def trans_app(value):\n return _(value.title())",
"def translate(self, text: str, src_lang: str, target_lang: str) -> str:\n result = self.__translate(text, src_lang, target_lang)\n obj_result = json.loads(result)\n\n list_sentence = [x[0] for x in obj_result[0][:-1]]\n\n return ''.join(list_sentence)",
"def main():\n # Parse args\n parser = argparse.ArgumentParser(description=\"Open-source offline translation.\\n\")\n parser.add_argument(\n \"text\",\n nargs=\"?\",\n metavar=\"TEXT\",\n help=\"The text to translate. Read from standard input if missing.\",\n )\n parser.add_argument(\n \"--from-lang\",\n \"-f\",\n help=\"The code for the language to translate from (ISO 639-1)\",\n )\n parser.add_argument(\n \"--to-lang\", \"-t\", help=\"The code for the language to translate to (ISO 639-1)\"\n )\n args = parser.parse_args()\n\n from_and_to_lang_provided = args.from_lang is not None and args.to_lang is not None\n\n # Get text to translate\n if args.text:\n # argos-translate-cli --from-lang en --to-lang es \"Text to translate\"\n text_to_translate = args.text\n elif from_and_to_lang_provided:\n # echo \"Text to translate\" | argos-translate-cli --from-lang en --to-lang es\n text_to_translate = \"\".join(sys.stdin)\n else:\n # argos-translate\n parser.print_help()\n return\n\n # Perform translation\n if from_and_to_lang_provided:\n installed_languages = {\n lang.code: lang for lang in translate.load_installed_languages()\n }\n if args.from_lang not in installed_languages:\n parser.error(\"{!r} is not an installed language.\".format(args.from_lang))\n if args.to_lang not in installed_languages:\n parser.error(\"{!r} is not an installed language.\".format(args.to_lang))\n from_lang = installed_languages[args.from_lang]\n to_lang = installed_languages[args.to_lang]\n translation = from_lang.get_translation(to_lang)\n if translation is None:\n parser.error(\n f\"No translation installed from {args.from_name} to {args.to_name}\"\n )\n else:\n translation = translate.IdentityTranslation(\"\")\n\n # Print translation\n print(translation.translate(text_to_translate))",
"def copy_translations(self, oldinstance, language=None):\n query = CourseRunTranslation.objects.filter(master=oldinstance)\n if language:\n query = query.filter(language_code=language)\n\n for translation_object in query:\n try:\n target_pk = CourseRunTranslation.objects.filter(\n master=self, language_code=translation_object.language_code\n ).values_list(\"pk\", flat=True)[0]\n except IndexError:\n translation_object.pk = None\n else:\n translation_object.pk = target_pk\n translation_object.master = self\n translation_object.save()",
"def upload_messages_to_transifex(\n self, legalcode, pofile: polib.POFile = None\n ):\n language_code = legalcode.language_code\n resource_slug = legalcode.license.resource_slug\n resource_name = legalcode.license.resource_name\n pofilename = legalcode.translation_filename()\n\n resources = self.get_transifex_resources()\n resource_slugs = [item[\"slug\"] for item in resources]\n\n if pofile is None:\n pofile = legalcode.get_pofile()\n\n pofile_content = get_pofile_content(pofile)\n\n if resource_slug not in resource_slugs:\n if language_code != DEFAULT_LANGUAGE_CODE:\n raise ValueError(\n f\"The resource {resource_slug} does not yet exist in\"\n \" Transifex. Must upload English first to create it.\"\n )\n self.create_resource(\n resource_slug, resource_name, pofilename, pofile_content\n )\n elif language_code == DEFAULT_LANGUAGE_CODE:\n # We're doing English, which is the source language.\n self.update_source_messages(\n resource_slug, pofilename, pofile_content\n )\n else:\n self.update_translations(\n resource_slug, language_code, pofilename, pofile_content\n )",
"def map_to_app_safedeployment(self, app):\n self.safedeployment.form.map_to_app(app)",
"def migrate(manager, orig, to):\n #TODO: Fix logic, expose to user.\n if manager not in pacman:\n sys.exit(\"wrong package manager\")\n pass\n #is package installed?\n orig_pkgs = []\n packages_raw = Popen(['fink', 'list', orig], stdout=PIPE).communicate()[0]\n for line in packages_raw.splitlines():\n package_meta = line.split('\\t')\n if package_meta[0].strip():\n #yes package is installed.\n name = (line.split('\\t')[1])\n # fink's variants are denoted by -py25 so seperate by - and pop last value\n orig_pkgs.append(name)\n #does package match from\n #does package have a counterpart\n new_pkgs = []\n for pkg in orig_pkgs:\n new_pkgs.append(pkg[:-4] + to)\n #install counterparts\n fink_install(' '.join(new_pkgs))\n if remove:\n pass",
"def translate_raw(self, text: str, src_lang: str, target_lang: str) -> str:\n return self.__translate(text, src_lang, target_lang)",
"def migrate_new_apps():\n new_apps = run('%s %s/fabfiles/django_scripts/get_apps_without_migration.py'\n % (env.PYTHON_BIN, env.SRC_PATH))\n # The script denotes the start of its output by \"{% output %}\" tag so we\n # only take whatever's after that\n new_apps = new_apps.split('{% output %}')[1].split()\n with cd(env.SRC_PATH):\n for app in new_apps:\n sudo(\"%s manage.py schemamigration %s --initial\" %\n (env.PYTHON_BIN, app.strip()))\n sudo(\"%s manage.py migrate %s --no-initial-data\" %\n (env.PYTHON_BIN, app.strip()))",
"def binaryTranslationsCallback(self, filename):\n if filename.endswith(\".po\"):\n filename = filename.replace(\".po\", \".mo\")\n return filename",
"def question_new_translate():",
"def southify(app):\n managepy('migrate %s 0001 --fake' % app)\n managepy('migrate %s' % app)",
"def translate_text(Text=None, TerminologyNames=None, SourceLanguageCode=None, TargetLanguageCode=None):\n pass",
"def translate(self, to_lang: str = TARGET_LANG):\n if not self.language:\n self.detect_language()\n if not all([self.clean, self.language != to_lang]):\n return\n self.payload += '&source={}&target={}'.format(self.language, to_lang)\n resp = requests.request('POST', self.url_translation, data=self.payload.encode('utf-8'),\n headers=self.translate_headers)\n try:\n self.translation = json.loads(resp.text)['data']['translations'][0]['translatedText']\n except KeyError:\n return",
"def translate_to(self, lang):\n TranslatableWindow.translate_all(lang)",
"def cmd_lingvo(ensoapi, word_from_lang_to_lang = \"\"):\n \n translate_word(ensoapi, word_from_lang_to_lang)",
"def map_to_app_resources(self, app):\n # TODO: Extract resources app data\n pass",
"def test_translate_module(self, _info):\n # Check that we get all the build targets we expect.\n self.args.tests = [uc.MODULE_NAME, uc.CLASS_NAME]\n targets, test_infos = self.ctr.translate(self.args)\n unittest_utils.assert_strict_equal(\n self, targets, uc.MODULE_CLASS_COMBINED_BUILD_TARGETS)\n unittest_utils.assert_strict_equal(self, test_infos, {uc.MODULE_INFO,\n uc.CLASS_INFO})"
]
| [
"0.5715308",
"0.55745673",
"0.55168104",
"0.5490492",
"0.54586124",
"0.5419781",
"0.5308909",
"0.52588713",
"0.5256012",
"0.5236541",
"0.522904",
"0.5202649",
"0.5195046",
"0.51658565",
"0.51555526",
"0.5112733",
"0.50993305",
"0.50826365",
"0.50750506",
"0.50692075",
"0.50494075",
"0.50385326",
"0.5032169",
"0.50265175",
"0.5026408",
"0.49979135",
"0.49870342",
"0.49854892",
"0.49295992",
"0.49225947"
]
| 0.78916484 | 0 |
Append translated dict in `frappe.local.response` | def send_translations(translation_dict):
if "__messages" not in frappe.local.response:
frappe.local.response["__messages"] = {}
frappe.local.response["__messages"].update(translation_dict) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_translated_text():\n return {\n code: ''\n for code, name\n in settings.LANGUAGES\n }",
"def get_translation(self):",
"def get_lang_js(fortype: str, name: str) -> str:\n\treturn f\"\\n\\n$.extend(frappe._messages, {json.dumps(get_dict(fortype, name))})\"",
"def get_lang_js(fortype, name):\n\treturn \"\\n\\n$.extend(frappe._messages, %s)\" % json.dumps(get_dict(fortype, name))",
"def get_dict(fortype, name=None):\n\tfortype = fortype.lower()\n\tcache = frappe.cache()\n\tasset_key = fortype + \":\" + (name or \"-\")\n\ttranslation_assets = cache.hget(\"translation_assets\", frappe.local.lang, shared=True) or {}\n\n\tif not asset_key in translation_assets:\n\t\tif fortype==\"doctype\":\n\t\t\tmessages = get_messages_from_doctype(name)\n\t\telif fortype==\"page\":\n\t\t\tmessages = get_messages_from_page(name)\n\t\telif fortype==\"report\":\n\t\t\tmessages = get_messages_from_report(name)\n\t\telif fortype==\"include\":\n\t\t\tmessages = get_messages_from_include_files()\n\t\telif fortype==\"jsfile\":\n\t\t\tmessages = get_messages_from_file(name)\n\t\telif fortype==\"template\":\n\t\t\tmessages = get_all_messages_from_template_files()\n\t\telif fortype==\"boot\":\n\t\t\tmessages = get_messages_from_include_files()\n\t\t\tmessages += get_all_messages_from_js_files()\n\t\t\tmessages += frappe.db.sql(\"select concat('Print Format: ', name), name from `tabPrint Format`\")\n\t\t\tmessages += frappe.db.sql(\"select concat('DocType: ', name), name from tabDocType\")\n\t\t\tmessages += frappe.db.sql(\"select concat('Role: ', name), name from tabRole\")\n\t\t\tmessages += frappe.db.sql(\"select concat('Module: ', name), name from `tabModule Def`\")\n\t\t\tmessages += frappe.db.sql(\"select concat('Page: ', name), name from `tabPage`\")\n\t\t\tmessages += frappe.db.sql(\"select concat('Report: ', name), name from `tabReport`\")\n\t\t\tmessages += \"null\"\n\n\t\tmessage_dict = make_dict_from_messages(messages)\n\t\tmessage_dict.update(get_dict_from_hooks(fortype, name))\n\n\t\ttry:\n\t\t\t# get user specific translation data\n\t\t\tuser_translations = get_user_translations(frappe.local.lang)\n\t\texcept Exception:\n\t\t\tuser_translations = None\n\n\t\tif user_translations:\n\t\t\tmessage_dict.update(user_translations)\n\n\t\ttranslation_assets[asset_key] = message_dict\n\n\t\tcache.hset(\"translation_assets\", frappe.local.lang, translation_assets, shared=True)\n\n\treturn translation_assets[asset_key]",
"def mocked_translate(lur):\n lur = {\n \"success\": {\"total\": 1},\n \"contents\": {\n \"translated\": \"Hi,Zaafira, I am\",\n \"text\": \"Hi, I am Zaafira\",\n \"translation\": \"yoda\",\n },\n }\n\n json_response_mock = mock.Mock()\n json_response_mock.json.return_value = lur\n return json_response_mock",
"def get_report_translations(request):\n\n id_report = request.GET.get('id_report',None)\n if id_report is not None:\n languages = []\n lang = Report.objects.filter(id_report = id_report)\n for el in lang:\n if el.language not in languages:\n languages.append(el.language)\n\n json_resp = {}\n # print(languages)\n json_resp['languages'] = languages\n return JsonResponse(json_resp)",
"def translate():\n\n # Logging the input payload\n json_payload = request.json\n my_word = json_payload['word']\n LOG.info(f\"Word to be translated: \\n{my_word}\")\n\n sql = f\"select * from translation.translator where origin='{my_word}';\"\n result = db.engine.execute(sql)\n result = result.fetchall()\n if len(result) > 0:\n LOG.info(f\"Results: \\n{result}\")\n json_result = [{column: value for column, value in rowproxy.items()}\n for rowproxy in result]\n json_result[0][\"translated_from\"] = \"translator_db\"\n else:\n json_result = dict()\n json_result[\"translated_from\"] = \"google_api\"\n translator = Translator()\n result = translator.translate(my_word)\n json_result[\"origin\"] = my_word\n json_result[\"origin_language\"] = result.src\n json_result[\"translation\"] = result.text\n json_result[\"translation_language\"] = result.dest\n sql_statement = f\"insert into translation.translator(origin, origin_language, translation, translation_language) values('{my_word}', '{json_result['origin_language']}','{json_result['translation']}', '{json_result['translation_language']}')\"\n result = db.engine.execute(sql_statement)\n\n db.session.commit()\n\n return jsonify({'result': json_result})",
"def add_renderer_globals(event):\r\n request = event['request']\r\n event['_'] = request.translate\r\n event['ungettext'] = request.ungettext\r\n event['localizer'] = request.localizer",
"def add_localizer(event):\n def auto_translate(string):\n \"\"\" Use the message factory to translate strings.\"\"\"\n return localizer.translate(MessageFactory(string))\n\n def gettext_translate(string):\n \"\"\" Translate untranslated strings with FormEncode.\"\"\"\n # Try default translation first\n translation = localizer.old_translate(i18n.TranslationString(string))\n if translation == string:\n # translation failed then use FormEncode\n translation = formencode_api._stdtrans(string)\n return translation\n\n request = event.request\n localizer = i18n.get_localizer(request)\n request.localizer = localizer\n request.translate = auto_translate\n\n if not hasattr(localizer, \"old_translate\"):\n localizer.old_translate = localizer.translate\n locale_name = i18n.get_locale_name(request)\n formencode_api.set_stdtranslation(languages=[locale_name])\n localizer.translate = gettext_translate",
"def _transform_and_send_one_resource(self, cr, uid, external_session, resource, resource_id,\n update_date, mapping, mapping_id, defaults=None, context=None):\n for key_lang in resource:\n resource[key_lang] = self._transform_one_resource(cr, uid, external_session, 'from_openerp_to_external',\n resource[key_lang], mapping=mapping, mapping_id=mapping_id,\n defaults=defaults, context=context)\n return self.send_to_external(cr, uid, external_session, {resource_id : resource}, mapping, mapping_id, update_date, context=context)",
"def _from_dict_to_destination(self):\n self._translated_xml_tree = etree.Element(\"root\")\n for key, value in self._translated_dict[\"root\"].items():\n etree.SubElement(self._translated_xml_tree, key).text = value",
"def _build_localization(self, package):\n for lang in package:\n localization = package[lang]\n language = localization['language']\n del localization['language']\n language = self.get_language(language) # gets the existing language container or creates a new one\n language.update(localization)",
"def machine_translation(request):\n log.debug(\"Get translation from machine translation service.\")\n\n try:\n text = request.GET['text']\n locale = request.GET['locale']\n check = request.GET['check']\n except MultiValueDictKeyError as e:\n log.error(str(e))\n return HttpResponse(\"error\")\n\n if hasattr(settings, 'MICROSOFT_TRANSLATOR_API_KEY'):\n api_key = settings.MICROSOFT_TRANSLATOR_API_KEY\n else:\n log.error(\"MICROSOFT_TRANSLATOR_API_KEY not set\")\n return HttpResponse(\"apikey\")\n\n obj = {}\n\n # On first run, check if target language supported\n if check == \"true\":\n supported = False\n languages = settings.MICROSOFT_TRANSLATOR_LOCALES\n\n if locale in languages:\n supported = True\n\n else:\n for lang in languages:\n if lang.startswith(locale.split(\"-\")[0]): # Neutral locales\n supported = True\n locale = lang\n break\n\n if not supported:\n log.debug(\"Locale not supported.\")\n return HttpResponse(\"not-supported\")\n\n obj['locale'] = locale\n\n url = \"http://api.microsofttranslator.com/V2/Http.svc/Translate\"\n payload = {\n \"appId\": api_key,\n \"text\": text,\n \"from\": \"en\",\n \"to\": locale,\n \"contentType\": \"text/html\",\n }\n\n try:\n r = requests.get(url, params=payload)\n log.debug(r.content)\n\n # Parse XML response\n root = ET.fromstring(r.content)\n translation = root.text\n obj['translation'] = translation\n\n return HttpResponse(json.dumps(obj), content_type='application/json')\n\n except Exception as e:\n log.error(e)\n return HttpResponse(\"error\")",
"def tr(text, sourcelang, targetlang):\n request = urllib2.Request(url.format(text, sourcelang, targetlang),\n headers={ 'User-Agent': 'Mozilla/5.0', 'Accept-Charset': 'utf-8' })\n response = urllib2.urlopen(request).read()\n fixedJSON = re.sub(r',{2,}', ',', response).replace(',]', ']')\n data = json.loads(fixedJSON)\n result = {}\n result[\"definition\"] = data[0][0]\n for row in data[1]:\n try:\n result[row[0]] = row[1]\n except:\n pass\n return result",
"def translate():\n pass",
"def translate(self, language=None):",
"def add_translations(self, translations):\n for translation in translations:\n self.add_field_translation(translation)",
"def question_new_translate():",
"def update_translations(lang, untranslated_file, translated_file, app=\"_ALL_APPS\"):\n\tclear_cache()\n\tfull_dict = get_all_translations(lang)\n\n\tdef restore_newlines(s):\n\t\treturn (\n\t\t\ts.replace(\"|||||\", \"\\\\\\n\")\n\t\t\t.replace(\"| | | | |\", \"\\\\\\n\")\n\t\t\t.replace(\"||||\", \"\\\\n\")\n\t\t\t.replace(\"| | | |\", \"\\\\n\")\n\t\t\t.replace(\"|||\", \"\\n\")\n\t\t\t.replace(\"| | |\", \"\\n\")\n\t\t)\n\n\ttranslation_dict = {}\n\tfor key, value in zip(\n\t\tfrappe.get_file_items(untranslated_file, ignore_empty_lines=False),\n\t\tfrappe.get_file_items(translated_file, ignore_empty_lines=False),\n\t):\n\n\t\t# undo hack in get_untranslated\n\t\ttranslation_dict[restore_newlines(key)] = restore_newlines(value)\n\n\tfull_dict.update(translation_dict)\n\tapps = frappe.get_all_apps(True)\n\n\tif app != \"_ALL_APPS\":\n\t\tif app not in apps:\n\t\t\tprint(f\"Application {app} not found!\")\n\t\t\treturn\n\t\tapps = [app]\n\n\tfor app_name in apps:\n\t\twrite_translations_file(app_name, lang, full_dict)",
"def include_fields_in_response(self, *args):\n logger.info(\"Adding to desired response fields: \" + str(args))\n self.desired_response_fields.extend([a.lower() for a in args]) # lowercase for convenience",
"def addTrans(self, track_dict):\n for i in ['order_id', 'total']: # fix required ; let javascript show errors if null\n if i not in track_dict:\n track_dict[i] = ''\n for i in ['opt_affiliation', 'opt_tax', 'opt_shipping', 'opt_city', 'opt_state', 'opt_country']: # fix optionals for positioning\n if i not in track_dict:\n track_dict[i] = ''\n self.data_struct['_addTrans'].append(\"\"\"['_addTrans',%(order_id)s,'%(opt_affiliation)s','%(total)s','%(opt_tax)s','%(opt_shipping)s','%(opt_city)s','%(opt_state)s','%(opt_country)s']\"\"\" % track_dict)",
"def _handle_localise(self, data):\n result = self._do_localisation()\n return_vel = Bool()\n return_vel.data = result\n return LocaliseResponse(return_vel)",
"def translate(self):\n pass",
"def current_translations(request):\n list = []\n t = UserHistory.objects.all().exclude(submission_timestamp=None)\n for i in t:\n a = datetime.datetime.now()-i.submission_timestamp\n if a.seconds <= 3600:\n dict = {}\n dict['username'] = i.user\n dict['original_sentence'] = i.original_sentence\n dict['translated_sentence'] = i.translated_sentence\n list.append(dict)\n \n data = {\n 'current_translations':list,\n 'count':len(list)\n }\n return render_to_response('my_admin_tools/menu/current_translations.html',data,context_instance=RequestContext(request))",
"def write_translations_file(app, lang, full_dict=None, app_messages=None):\n\tif not app_messages:\n\t\tapp_messages = get_messages_for_app(app)\n\n\tif not app_messages:\n\t\treturn\n\n\ttpath = frappe.get_app_path(app, \"translations\")\n\tfrappe.create_folder(tpath)\n\twrite_csv_file(\n\t\tos.path.join(tpath, lang + \".csv\"), app_messages, full_dict or get_all_translations(lang)\n\t)",
"def update_translations(lang, translated_data, app, is_file=True):\n\tclear_cache()\n\tfull_dict = load_lang(lang, [app])\n\n\tif full_dict:\n\t\tdef restore_newlines(s):\n\t\t\treturn (s.replace(\"|||||\", \"\\\\\\n\")\n\t\t\t\t\t.replace(\"| | | | |\", \"\\\\\\n\")\n\t\t\t\t\t.replace(\"||||\", \"\\\\n\")\n\t\t\t\t\t.replace(\"| | | |\", \"\\\\n\")\n\t\t\t\t\t.replace(\"|||\", \"\\n\")\n\t\t\t\t\t.replace(\"| | |\", \"\\n\"))\n\n\t\ttranslation_dict = defaultdict(dict)\n\t\tfor k in full_dict:\n\t\t\tfor m in full_dict[k]:\n\t\t\t\ttranslation_dict[k][m] = full_dict[restore_newlines(k)][restore_newlines(m)]\n\n\t\tif is_file:\n\t\t\tnew_translations = frappe._dict(frappe.get_file_json(translated_data))\n\t\telse:\n\t\t\tnew_translations = translated_data\n\n\t\tfor k in new_translations:\n\t\t\tfor m in new_translations[k]:\n\t\t\t\tif new_translations[k][m] != \"\":\n\t\t\t\t\ttranslation_dict[k][restore_newlines(m)] = restore_newlines(new_translations[k][m])\n\n\t\twrite_translations_file(app, lang, translation_dict)",
"def __init__(self, language=None):\n self.language = language\n self.translations = {}",
"def addExtension(self, extension_response):\n extension_response.toMessage(self.fields)",
"def get_translation ( self ):\n self.verify_post_data ( )\n\n text = request.json[ 'text' ]\n src_lang = request.json[ 'source_lang' ]\n target_lang = request.json[ 'target_lang' ]\n\n # if translation is available in cache, just fetch it from there. Otherwise use translation service.\n translated_text = self.get_set_translation_from_cache ( text, src_lang, target_lang )\n\n return jsonify ( {\"Translation\": translated_text} )"
]
| [
"0.5645178",
"0.5510957",
"0.5468224",
"0.5429747",
"0.54025203",
"0.5355868",
"0.5319116",
"0.5250902",
"0.5245807",
"0.5243739",
"0.5239572",
"0.5220736",
"0.521488",
"0.5180243",
"0.5142529",
"0.5140901",
"0.511876",
"0.51048046",
"0.509845",
"0.50925344",
"0.50731796",
"0.5072658",
"0.5044095",
"0.5035866",
"0.49781778",
"0.4972934",
"0.49642596",
"0.49599937",
"0.49548736",
"0.4953586"
]
| 0.7026861 | 0 |
Getter for the grade value of the current grade instance | def get_grade(self):
return self.__grade_value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_grade(self) -> int :\n return self.grade",
"def get_value(\n self\n ) -> float:\n\n return self.average",
"def get_g_score(self):\n return self._g_score",
"def publish_grade(self):\r\n score = self.lcp.get_score()\r\n self.runtime.publish(\r\n self,\r\n 'grade',\r\n {\r\n 'value': score['score'],\r\n 'max_value': score['total'],\r\n }\r\n )\r\n\r\n return {'grade': score['score'], 'max_grade': score['total']}",
"def grade(self):\n if round(self.numAvg,0) >= 70:\n return round(self.numAvg,0)\n elif self.PassSummer:\n return 70\n elif round(self.numAvg,0) >= 55 and not self.PassSummer:\n return round(self.numAvg,0)\n else:\n return 55",
"def getValue(self):\n return self.value",
"def get_score(self):\n return self.score",
"def what_is_the_grade(self):\n\t\treturn_dict = {\n\t\t\t'section_title': self.title, \n\t\t\t'section_weight': self.weight,\n\t\t\t'grade_value' : self.current_grade_value,\n\t\t\t'comment_text' : self.current_comment_text,\n\t\t\t'default_comments_text' : self.current_default_comment_text,\n\t\t\t'example_comments_text' : self.current_example_comment_text,\n\t\t\t'is_complete': self.is_complete\n\t\t}\n\n\t\treturn return_dict",
"def getValue(self):\n return self.value",
"def get_score(self):\n return self.score",
"def get_score(self):\n return self.score",
"def get_score(self):\n return self.score",
"def getValue(self):\n raise Exception(\"getValue function not defined with class {0}\".format(self.__class__.__name__))",
"def get_value(self):\n return self.value",
"def get_value(self):\n return self.value",
"def get_value(self):\n return self.value",
"def get_grading_id(self):\n return self._grading_id",
"def value(self):\n return self.compute_value()",
"def get_value(self) -> float:\n return self._data_provider.get_value()",
"def _get_value(self):\n \n return self._value",
"def _get_value(self):\n return self.__value",
"def get_score(self):\n return self.__score",
"def getValue(self):\n raise NotImplementedError(\"Define in derived class\")",
"def get_value(self):\n raise NotImplementedError",
"def get_value(self):\n return self._value",
"def value (self) :\n\n return self.__value__",
"def value(self):\n\n\t\treturn self.__value",
"def get_value(self):\n pass",
"def value(self) -> float:\n return self._value",
"def value(self) -> float:\n return self._value"
]
| [
"0.8369609",
"0.7082165",
"0.6822362",
"0.67607737",
"0.6667259",
"0.6656464",
"0.66217035",
"0.66202563",
"0.661658",
"0.65875876",
"0.65875876",
"0.65875876",
"0.65620965",
"0.6537988",
"0.6537988",
"0.6537988",
"0.6512228",
"0.6496693",
"0.6479231",
"0.64789337",
"0.6476344",
"0.64723057",
"0.64497083",
"0.6438209",
"0.6431396",
"0.6417061",
"0.6405054",
"0.64034194",
"0.640135",
"0.640135"
]
| 0.8781585 | 0 |
Getter for the student ID of the current grade instance | def get_student_id(self):
return self.__student_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_student_id(self):\n return self._student_id",
"def get_student(self):\n return self.student",
"def get_student(self):\n return self._student",
"def get_grading_id(self):\n return self._grading_id",
"def get_student(self):\n return db.get(self.student_key)",
"def getIdent (self) :\n return self.id",
"def id_getter(self):\n return self._id",
"def get_current():\n return Student.query.get(session.get('id'))",
"def get_grade(self) -> int :\n return self.grade",
"def get_ID(self):\n return self.ID",
"def get_student_name(self):\n return self.__student_name",
"def identifier(self):\r\n return self.id",
"def _get_id(self):\n return self.id",
"def get_id(self):\n\n\t\treturn self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id"
]
| [
"0.8487692",
"0.73362076",
"0.7248596",
"0.7178025",
"0.7065409",
"0.68116",
"0.679066",
"0.66875654",
"0.65918547",
"0.6590938",
"0.6582891",
"0.65591073",
"0.6555714",
"0.65166616",
"0.6502383",
"0.6502383",
"0.6502383",
"0.6502383",
"0.6502383",
"0.6502383",
"0.6502383",
"0.6502383",
"0.6502383",
"0.6502383",
"0.6502383",
"0.6502383",
"0.6502383",
"0.6502383",
"0.6502383",
"0.6502383"
]
| 0.8555902 | 0 |
Getter for the discipline ID of the current grade instance | def get_discipline_id(self):
return self.__discipline_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_course_id(self):\n return self.ID",
"def get_discipline_name(self):\n return self.__discipline_name",
"def get_student_id(self):\n return self.__student_id",
"def get_student_id(self):\n return self._student_id",
"def cisid(self):\n return self._cisid",
"def getIdent (self) :\n return self.id",
"def get_grading_id(self):\n return self._grading_id",
"def get_ID(self):\n return self.ID",
"def getID(self) -> int:\n ...",
"def get_id(self):\n\n\t\treturn self.__id",
"def getID(self):\n return self.__id",
"def getId(self):\n return self._id",
"def getId(self):\n return self.identifier",
"def getid(self):\n return self.__id",
"def getId(self):\n\n return self.__id",
"def _get_id(self):\n return self.id",
"def getID(self):\n return self._id",
"def identifier(self):\r\n return self.id",
"def getId(self):\n return self.id",
"def id_getter(self):\n return self._id",
"def identifier(self):\n return self.__id",
"def id(self):\r\n return self.location.course_key",
"def get_id(self):\n return self.__id",
"def get_id(self):\n return self.__id",
"def id(self) -> int:\n return pulumi.get(self, \"id\")",
"def identifier(self):\n return self._id",
"def GetID(self):\n return self.id",
"def get_id(self):\n return self.id",
"def get_id(self):\n return self.id",
"def get_id(self):\n return self.id"
]
| [
"0.6913118",
"0.6868929",
"0.6817603",
"0.67311543",
"0.66436386",
"0.6582481",
"0.6561987",
"0.64400834",
"0.6425438",
"0.6415259",
"0.6405952",
"0.64044946",
"0.6386278",
"0.63797176",
"0.63546896",
"0.6352328",
"0.63472956",
"0.63415945",
"0.63360286",
"0.6332231",
"0.6309721",
"0.6307438",
"0.6296139",
"0.6296139",
"0.6292334",
"0.62840647",
"0.6282219",
"0.6271423",
"0.6271423",
"0.6271423"
]
| 0.8454621 | 0 |
Getter for the discipline's name for the current Grade instance | def get_discipline_name(self):
return self.__discipline_name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_student_name(self):\n return self.__student_name",
"def __str__(self):\n return str(self.__student_name) + \" has grade \" + str(self.__grade_value) + \" at \" + str(self.__discipline_name)",
"def _get_name(self):\n return self.name",
"def get_name(self) -> str:\r\n return self.name",
"def get_name(self): \r\n return self.name",
"def get_name(self):\n\n return self.name",
"def get_name(self):\r\n return self.name",
"def get_name(self):\n\t\treturn self.name",
"def get_name(self):\n\t\treturn self.name",
"def get_name(self):\n\t\treturn self.name",
"def get_name(self):\n return self.name",
"def get_name(self):\n return self.name",
"def get_name(self):\n return self.name",
"def get_name(self):\n return self.name",
"def get_name(self):\n return self.name",
"def get_name(self):\n return self.name",
"def get_name(self):\n return self.name",
"def get_name(self):\n return self.name",
"def get_name(self):\n return self.name",
"def get_name(self):\n return self.name",
"def get_name(self):\n return self.name",
"def get_name(self):\n return self.name",
"def get_name(self):\n return self.name",
"def get_name(self):\n return self.name",
"def get_name(self):\n return self.name",
"def get_name(self):\n return self.name",
"def get_name(self):\n return self.name",
"def get_name(self):\n return self.name",
"def get_name(self):\n return self.name",
"def get_name(self) -> str:\n return self.name"
]
| [
"0.7079603",
"0.6880193",
"0.6759028",
"0.6706063",
"0.6699811",
"0.6699467",
"0.6690337",
"0.6684808",
"0.6684808",
"0.6684808",
"0.66412586",
"0.6621796",
"0.6621796",
"0.6621796",
"0.6621796",
"0.6621796",
"0.6621796",
"0.6621796",
"0.6621796",
"0.6621796",
"0.6621796",
"0.6621796",
"0.6621796",
"0.6621796",
"0.6621796",
"0.6621796",
"0.6621796",
"0.6621796",
"0.6621796",
"0.6617315"
]
| 0.8392133 | 0 |
Getter for the student's name for the current Grade instance | def get_student_name(self):
return self.__student_name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_student(self):\n return self.student",
"def __str__(self):\n return str(self.__student_name) + \" has grade \" + str(self.__grade_value) + \" at \" + str(self.__discipline_name)",
"def get_name(self) -> str:\r\n return self.name",
"def get_name(self): \r\n return self.name",
"def _get_name(self):\n return self.name",
"def get_name(self):\r\n return self.name",
"def get_name(self):\n\n return self.name",
"def get_name(self):\n\t\treturn self.name",
"def get_name(self):\n\t\treturn self.name",
"def get_name(self):\n\t\treturn self.name",
"def get_name(self):\n return self.name",
"def get_name(self) -> str:\n return self.name",
"def get_name(self) -> str:\n return self.name",
"def get_name(self) -> str:\n return self.name",
"def get_name(self):\n return self.name # return the name",
"def get_name(self):\n return self.name # return the name",
"def get_name(self):\n return self.name",
"def get_name(self):\n return self.name",
"def get_name(self):\n return self.name",
"def get_name(self):\n return self.name",
"def get_name(self):\n return self.name",
"def get_name(self):\n return self.name",
"def get_name(self):\n return self.name",
"def get_name(self):\n return self.name",
"def get_name(self):\n return self.name",
"def get_name(self):\n return self.name",
"def get_name(self):\n return self.name",
"def get_name(self):\n return self.name",
"def get_name(self):\n return self.name",
"def get_name(self):\n return self.name"
]
| [
"0.7320124",
"0.7260825",
"0.7234937",
"0.72178173",
"0.7213346",
"0.71864283",
"0.7184898",
"0.7140616",
"0.7140616",
"0.7140616",
"0.71247065",
"0.7109712",
"0.7109712",
"0.7109712",
"0.7109598",
"0.7109598",
"0.70767534",
"0.70767534",
"0.70767534",
"0.70767534",
"0.70767534",
"0.70767534",
"0.70767534",
"0.70767534",
"0.70767534",
"0.70767534",
"0.70767534",
"0.70767534",
"0.70767534",
"0.70767534"
]
| 0.8675116 | 0 |
Overrides the str function for grade instances | def __str__(self):
return str(self.__student_name) + " has grade " + str(self.__grade_value) + " at " + str(self.__discipline_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __str__(self):\n return f\"{self.semester} | {self.school} | {self.position} | {self.class_name}\"",
"def __str__(self):\n return \"student:\"+str(self.name)+\":\"+str(self.age)+\":\"+str(self.major)",
"def __str__(self) -> str:",
"def __str__(self) -> str:",
"def __str__(self) -> str:",
"def __str__(self) -> str:",
"def __str__(self):\n raise NotImplementedError(\"Implemented in a subclass\")",
"def __str__(self):\n return self.format()",
"def __str__(self) :\n raise NotImplementedError",
"def __str__(self):\n raise NotImplementedError(\"Should be implemented by subclass\")",
"def __str__(self):\n raise NotImplementedError(\"__str__ not implemented for \"+str(type(self)))",
"def __str__(self):\n return \"%s (graded by %s at %s)\" % (self.submission, self.grader, self.date)",
"def __str__(self) -> str:\n raise NotImplementedError",
"def __str__(self):\n return str((self.code, self.fitness,))",
"def __str__(self):\n return self.get_string()",
"def __str__(self):\n raise RuntimeError(\"Needs to be implemented in base class\")",
"def __str__(self) -> str:\n pass",
"def __str__(self):\n # print(self.get_string())\n return self.get_string()",
"def __str__(self):\n # print(self.get_string())\n return self.get_string()",
"def __str__(self):\n return self.get_str()",
"def __str__(self):\n # for values that should be recorded exactly e.g. iteration number\n if self.count == 0:\n return str(self.val)\n # for stats\n return '%.4f (%.4f)' % (self.val, self.avg)",
"def __str__(self):\n return self.s",
"def __str__(self):\n return \"{}\".format(super().__str__())",
"def toString():",
"def __str__(self):\n return str(self.get_rating())",
"def __str__(self):\r\n # Use the properties or you are not executing the formatting\r\n return f'{self.lname}, {self.fname} - {self.major}'",
"def __str__(self):\n return \"Name: \" + self.name + \"\\nScores: \" + \\\n \" \".join(map(str, self.scores))",
"def __str__(self):\n return \"Name: \" + self._name + \"\\nScores: \" + \\\n \" \".join(map(str, self._scores))",
"def __str__(self):\n return self.string",
"def __str__(self):\n # for values that should be recorded exactly e.g. iteration number\n if self.count == 0:\n return str(self.val)\n # for stats\n return '%.4f (%.4f)' % (self.val, self.avg)"
]
| [
"0.71322924",
"0.70280325",
"0.69138056",
"0.69138056",
"0.69138056",
"0.69138056",
"0.68971586",
"0.6842432",
"0.6839214",
"0.68321973",
"0.6827448",
"0.6816284",
"0.67801344",
"0.6777593",
"0.6743071",
"0.67238766",
"0.6723739",
"0.66961354",
"0.66961354",
"0.6677916",
"0.66637784",
"0.6660705",
"0.6659193",
"0.66536814",
"0.6642383",
"0.6636448",
"0.66343313",
"0.6620359",
"0.6614352",
"0.6614318"
]
| 0.8083111 | 0 |
Using torchaudio.transforms to grab MFCC features and labels which fit pytorch | def features_and_labels(soundfile, frag_length=128):
label = soundfile.split('\\')[-1].split('_')[0]
waveform, sample_rate = torchaudio.load(soundfile)
MFCCs = transforms.MFCC(n_mfcc=128, melkwargs={'n_mels':128, 'win_length':320, 'hop_length':160, 'n_fft':1024 })(waveform[0][:])
MFCCs = MFCCs.T.view((-1, frag_length, 128)) # transform the shape into (index, time_representation, melbands)
frag_nums = MFCCs.shape[0]
labels = int(label)*np.ones(frag_nums, dtype=np.int8)
labels = torch.from_numpy(labels)
return MFCCs, labels | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def forward(self, audio, feat_kinds=['sp','mcc','f0','ap','en']):\n device = audio.device\n audio = audio.detach().cpu().numpy()\n feat = dict()\n for feat_kind in feat_kinds:\n feat[feat_kind] = list()\n\n for x in audio:\n # Preprocess\n x = x * MAX_WAV_VALUE\n x = self.low_cut_filter(x, cutoff=self.cutoff_freq)\n # Extract f0\n f0, time_axis = pyworld.harvest(x, self.fs, f0_floor=self.minf0, f0_ceil=self.maxf0, frame_period=self.shiftms)\n\n # Extract sp \n sp = pyworld.cheaptrick(x, f0, time_axis, self.fs, fft_size=self.fft_size)\n if 'sp' in feat_kinds:\n feat['sp'].append(torch.from_numpy(sp).float().t())\n\n # Extract ap\n if 'ap' in feat_kinds:\n ap = pyworld.d4c(x, f0, time_axis, self.fs, fft_size=self.fft_size)\n feat['ap'].append(torch.from_numpy(ap).float().t())\n\n # Extract mcc\n if 'mcc' in feat_kinds:\n mcc = pysptk.sp2mc(sp, self.mcc_dim, self.mcc_alpha)\n feat['mcc'].append(torch.from_numpy(mcc).float().t())\n\n # Extract energy\n if 'en' in feat_kinds:\n mcc = pysptk.sp2mc(sp, self.mcc_dim, self.mcc_alpha)\n en = pysptk.mc2e(mcc, alpha=self.mcc_alpha, irlen=256)\n # en = np.clip(en, 1e-10, None)\n feat['en'].append(torch.from_numpy(en).float().view(-1)) \n\n # Fix f0\n if 'f0' in feat_kinds:\n f0[f0 < 0] = 0\n feat['f0'].append(torch.from_numpy(f0).float().view(-1))\n\n for key, val_list in feat.items():\n feat[key] = torch.cat([val.unsqueeze(0) for val in val_list],dim=0).to(device)\n\n return feat",
"def load_features_labels(self):\n MFCCs = torch.from_numpy(np.load(self.feature_file))\n labels = torch.from_numpy(np.load(self.label_file))\n 'Loading from files finished!'\n return MFCCs.view(-1,1,128,128), labels.long()",
"def preprocess_data(num_mfcc_coeffs, num_filters, window_len, window_step, max_num_frames):\n inputs = [] \n labels = [] \n \n SOURCE_DIR = '../data/cmu_arctic/scottish-english-male-awb/wav/' \n TARGET_DIR = '../data/cmu_arctic/us-english-male-bdl/wav/'\n index = 0\n for source_fname, target_fname in zip(os.listdir(SOURCE_DIR), os.listdir(TARGET_DIR)):\n if index >= 20:\n break\n index += 1\n\n if source_fname == '.DS_Store' or target_fname == '.DS_Store':\n continue\n\n (source_sample_rate, source_wav_data) = wav.read(SOURCE_DIR + source_fname) \n (target_sample_rate, target_wav_data) = wav.read(TARGET_DIR + target_fname)\n\n source_mfcc_features = np.array(mfcc(source_wav_data, samplerate=source_sample_rate, numcep=num_mfcc_coeffs, nfilt=num_filters, winlen=window_len, winstep=window_step))\n target_mfcc_features = np.array(mfcc(target_wav_data, samplerate=target_sample_rate, numcep=num_mfcc_coeffs, nfilt=num_filters, winlen=window_len, winstep=window_step))\n\n # align with FastDTW\n source_mfcc_features, target_mfcc_features = get_dtw_series(source_mfcc_features, target_mfcc_features)\n\n # pad MFCC feature matrices (rows) to max_num_frames\n source_padded_frames = pad_sequence(source_mfcc_features, max_num_frames)\n target_padded_frames = pad_sequence(target_mfcc_features, max_num_frames)\n\n inputs.append(source_padded_frames) \n labels.append(target_padded_frames) \n\n return inputs, labels",
"def get_features(filename, training=True):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n wav, _ = librosa.load(filename, \n sr=SAMPLE_RATE, \n mono=True,\n dtype=np.float64)\n energy = np.abs(wav)\n silence_threshold = np.percentile(energy, 95)\n offsets = np.where(energy > silence_threshold)[0]\n if training:\n audio_voice_only = wav[offsets[0]:offsets[-1]]\n else:\n #avoid cutting off too abruptly\n audio_voice_only = wav[offsets[0]:offsets[-1] + 4800]\n if training:\n if len(audio_voice_only) >= 160 * NUM_FRAMES:\n start_ = np.random.randint(len(audio_voice_only) - 160 * NUM_FRAMES + 1)\n end_ = start_ + 160 * NUM_FRAMES - 1\n audio_voice_only = audio_voice_only[start_:end_]\n else:\n return [0], [0]\n wav = librosa.util.normalize(audio_voice_only)\n #deep speaker uses preemphasis here, I do not, because I want the model to correctly transform lower\n #frequencies, too. I apply preemphasis to spectrum before putting data into model embedder instead.\n wav = lfilter([1., -PREEMPH], [1.], wav)[1:]\n #f0 extraction (most time consuming operation in this function)\n f0, timeaxis = pyworld.harvest(wav, SAMPLE_RATE, frame_period=FRAME_PERIOD, f0_floor=71.0, f0_ceil=800.0)\n sp = pyworld.cheaptrick(wav, f0, timeaxis, SAMPLE_RATE, fft_size=NFFT)\n ap = pyworld.d4c(wav, f0, timeaxis, SAMPLE_RATE, fft_size=NFFT)\n mfe = sp2mfe(sp)\n lmfe = np.log(mfe)\n mean = np.mean(lmfe)\n std = np.std(lmfe)\n nmfe = (lmfe - mean) / std\n \n if training:\n return nmfe.T, f0\n else:\n out_len = len(f0) // 4 * 4\n# out_len = len(f0)\n return nmfe[:out_len].T, mean, std, sp[:out_len], f0[:out_len], ap[:out_len]",
"def forward(self, audio):\n feature_extractor = self.feature_extractor\n wave_gan = self.wave_gan\n pqmf = self.pqmf\n use_noise_input = self.use_noise_input\n config = self.config\n pad_fn = self.pad_fn\n\n # Added for processing single audio file as in deepspeech armory [Sonal 29Oct20]\n if audio.ndim == 1:\n num_samples = audio.shape[0]\n mel_spectrogram = feature_extractor.transform(audio)\n # Setup inputs\n inputs = ()\n if use_noise_input:\n noise = torch.randn(\n 1,\n 1,\n len(mel_spectrogram) * config[\"hop_size\"],\n device=mel_spectrogram.device,\n )\n inputs += (noise,)\n\n mel_spectrogram = pad_fn(mel_spectrogram.unsqueeze(0).transpose(2, 1))\n inputs += (mel_spectrogram,)\n # Generate\n if config[\"generator_params\"][\"out_channels\"] == 1:\n reconstructed_audio = wave_gan(*inputs).view(-1)\n reconstructed_audio = reconstructed_audio[:num_samples]\n else:\n reconstructed_audio = pqmf.synthesis(wave_gan(*inputs)).view(-1)\n reconstructed_audio = reconstructed_audio[:num_samples]\n return reconstructed_audio\n\n else:\n reconstructions = []\n num_samples = audio.shape[1]\n for idx in range(audio.shape[0]):\n recording = audio[idx, :]\n mel_spectrogram = feature_extractor.transform(recording)\n # Setup inputs\n inputs = ()\n if use_noise_input:\n noise = torch.randn(\n 1,\n 1,\n len(mel_spectrogram) * config[\"hop_size\"],\n device=recording.device,\n )\n inputs += (noise,)\n mel_spectrogram = pad_fn(mel_spectrogram.unsqueeze(0).transpose(2, 1))\n inputs += (mel_spectrogram,)\n # Generate\n if config[\"generator_params\"][\"out_channels\"] == 1:\n reconstructed_audio = wave_gan(*inputs).view(-1)\n reconstructed_audio = reconstructed_audio[:num_samples]\n else:\n reconstructed_audio = pqmf.synthesis(wave_gan(*inputs)).view(-1)\n reconstructed_audio = reconstructed_audio[:, :num_samples]\n reconstructions.append(reconstructed_audio)\n return torch.stack(reconstructions)",
"def get_mfcc_transform(conf):\n\n mel_spectro_args = {\n 'win_length': conf['data']['transform']['win_length'],\n 'hop_length': conf['data']['transform']['hop_length'],\n 'n_fft': conf['data']['transform']['n_fft'],\n 'f_min': conf['data']['transform']['f_min'],\n 'f_max': conf['data']['transform']['f_max'],\n 'n_mels': conf['data']['transform']['n_mels'],\n }\n\n mfcc_transform = torchaudio.transforms.MFCC(sample_rate=conf['data']['transform']['sample_rate'],\n n_mfcc=conf['data']['transform']['n_mfcc'],\n melkwargs=mel_spectro_args,\n )\n\n return mfcc_transform",
"def extract_feature_1d(file_name, **kwargs):\n mfcc = kwargs.get(\"mfcc\")\n mel = kwargs.get(\"mel\")\n audio = kwargs.get(\"audio\")\n\n y, sr = librosa.load(file_name, duration=8, sr=16000, dtype=np.float32)\n result = np.array([])\n\n if mfcc:\n # O np mean é utilizado para transformar a matriz em vetor, tirando a media de cada linha\n mfccs = np.mean(librosa.feature.mfcc(y=y, sr=sr, n_mfcc=128).T, axis=0)\n result = np.hstack((result, mfccs))\n\n if mel:\n mel1d = np.mean(librosa.feature.melspectrogram(y, sr=sr).T,axis=0)\n mel = librosa.power_to_db(mel1d ** 2)\n\n result = np.hstack((result, mel))\n if audio:\n result = np.hstack((result, y))\n\n return result",
"def features_combine():\n\n\n\t# PROCESSING AUDIO",
"def forward(self, audio: Tensor) -> Tensor:\r\n\r\n nb_sources = self.nb_targets\r\n nb_samples = audio.shape[0]\r\n\r\n # getting the STFT of mix:\r\n # (nb_samples, nb_channels, nb_bins, nb_frames, 2)\r\n mix_stft = self.stft(audio)\r\n X = self.complexnorm(mix_stft)\r\n\r\n # initializing spectrograms variable\r\n spectrograms = torch.zeros(X.shape + (nb_sources,), dtype=audio.dtype, device=X.device)\r\n\r\n for j, (target_name, target_module) in enumerate(self.target_models.items()):\r\n # apply current model to get the source spectrogram\r\n target_spectrogram = target_module(X.detach().clone())\r\n spectrograms[..., j] = target_spectrogram\r\n\r\n # transposing it as\r\n # (nb_samples, nb_frames, nb_bins,{1,nb_channels}, nb_sources)\r\n spectrograms = spectrograms.permute(0, 3, 2, 1, 4)\r\n\r\n # rearranging it into:\r\n # (nb_samples, nb_frames, nb_bins, nb_channels, 2) to feed\r\n # into filtering methods\r\n mix_stft = mix_stft.permute(0, 3, 2, 1, 4)\r\n\r\n # create an additional target if we need to build a residual\r\n if self.residual:\r\n # we add an additional target\r\n nb_sources += 1\r\n\r\n if nb_sources == 1 and self.niter > 0:\r\n raise Exception(\r\n \"Cannot use EM if only one target is estimated.\"\r\n \"Provide two targets or create an additional \"\r\n \"one with `--residual`\"\r\n )\r\n\r\n nb_frames = spectrograms.shape[1]\r\n targets_stft = torch.zeros(\r\n mix_stft.shape + (nb_sources,), dtype=audio.dtype, device=mix_stft.device\r\n )\r\n for sample in range(nb_samples):\r\n pos = 0\r\n if self.wiener_win_len:\r\n wiener_win_len = self.wiener_win_len\r\n else:\r\n wiener_win_len = nb_frames\r\n while pos < nb_frames:\r\n cur_frame = torch.arange(pos, min(nb_frames, pos + wiener_win_len))\r\n pos = int(cur_frame[-1]) + 1\r\n\r\n targets_stft[sample, cur_frame] = wiener(\r\n spectrograms[sample, cur_frame],\r\n mix_stft[sample, cur_frame],\r\n self.niter,\r\n softmask=self.softmask,\r\n residual=self.residual,\r\n )\r\n\r\n # getting to (nb_samples, nb_targets, channel, fft_size, n_frames, 2)\r\n targets_stft = targets_stft.permute(0, 5, 3, 2, 1, 4).contiguous()\r\n\r\n # inverse STFT\r\n estimates = self.istft(targets_stft, length=audio.shape[2])\r\n\r\n return estimates",
"def ExtractFeatures(self):\n\n self.MFCC = librosa.feature.mfcc(self.sample, sr=self.sample_rate, n_mfcc=13)\n self.MFCC_DELTA = librosa.feature.delta(self.MFCC)\n self.MEL_SPECTROGRAM = librosa.feature.melspectrogram(self.sample, sr=self.sample_rate)\n f, t, SPECTRO = signal.spectrogram(self.sample)\n self.SPECTRO\n self.LPC = np.array(audiolazy.lazy_lpc.lpc.autocor(self.sample, 2).numerator)\n self.FFT = np.fft.fft(self.sample)\n widths = np.arange(1, 31)\n self.CWT = signal.cwt(self.sample, signal.ricker, widths)",
"def make_returnn_audio_features_func():\n return _extract",
"def convert2mel(audio,base_path,fs, n_fft,fmax,n_mels,hop_length_samples, window_lenght,type_training):\n\n path = os.path.join(base_path, audio)\n if type_training != \"train\":\n if os.path.isfile(os.path.join(base_path,\"processed_wavs_train\",audio)):\n data,_ = librosa.core.load(os.path.join(base_path,\"processed_wavs_train\",audio), sr=fs, res_type=\"kaiser_best\")\n else:\n data,_ = librosa.core.load(os.path.join(base_path,\"processed_wavs_test\",audio), sr=fs, res_type=\"kaiser_best\")\n else:\n data, _ = librosa.core.load(path, sr=fs, res_type=\"kaiser_best\")\n data = normalize_amplitude(data)\n\n powSpectrum = np.abs(stft(data+ 0.00001,n_fft,hop_length = hop_length_samples, win_length = window_lenght, window = windowing(window_lenght, sym=False), center=True, pad_mode='reflect'))**2\n\n mels = melspectrogram(y= None,n_fft=n_fft ,sr=fs ,S= powSpectrum, hop_length= hop_length_samples ,n_mels=n_mels,fmax=fmax , fmin = 0.0).T\n mels = librosa.core.power_to_db(mels, ref=np.min(mels))\n mels = mels / np.max(mels)\n\n return mels.T",
"def test():\r\n le = preprocessing.LabelEncoder()\r\n le.fit([\"Door Knocking\",\"Shower Running\",\"Toilet Flushing\",\"Vacuum Cleaning\",\"Keyboard Typing\", # encode class labels as numeric id values\r\n \"Coughing\",\"Neutral\"])\r\n \r\n if torch.cuda.is_available():\r\n device = \"cuda:0\"\r\n use_cuda = True\r\n else:\r\n device = \"cpu\"\r\n use_cuda = False\r\n \r\n myModel, start_epoch, train_hist = loadCheckpoint(31, use_cuda)\r\n \r\n #myModel = myModel.double()\r\n myModel = myModel.to(device, dtype=torch.double)\r\n next(myModel.parameters()).device # Check that it is on Cuda\r\n \r\n file_names = []\r\n class_ids = []\r\n max_s = 1\r\n sr = 44100 \r\n for entry in os.scandir(\"test wavs/\"): # for each folder corresponding to a class in dataset\r\n class_id = entry.name # get class numeric id according to label encoder\r\n relative_path = \"test wavs/\"+entry.name # get path location of data sample for loading audio\r\n file_names.append(relative_path) # append to list\r\n class_ids.append(class_id)\r\n\r\n max_s = 1\r\n sr = 44100\r\n X_test = [] \r\n for i in range(len(file_names)):\r\n audio = LoadAudio.load(file_names[i]) # load audio file\r\n audio = LoadAudio.resample(audio, sr) # resample audio\r\n audio = LoadAudio.mono(audio) # make audio stereo\r\n audio = LoadAudio.resize(audio, max_s) # resize audio \r\n sgram = LoadAudio.spectrogram(audio, n_mels=128, n_fft=1024, hop_len=None) # create spectrogram \r\n sgram = LoadAudio.hpssSpectrograms(audio,sgram)\r\n sgram_tensor = torch.tensor(sgram)\r\n X_test.append(sgram_tensor)\r\n\r\n pred = np.array([])\r\n for i in range(len(X_test)):\r\n inputs = X_test[i]\r\n # Normalize the inputs\r\n inputs_m, inputs_s = inputs.mean(), inputs.std()\r\n inputs = (inputs - inputs_m) / inputs_s\r\n inputs = inputs.unsqueeze(0)\r\n inputs = inputs.double()\r\n \r\n # Get predictions\r\n outputs = myModel(inputs)\r\n\r\n # Get the predicted class with the highest score\r\n _, predicted = torch.max(outputs.data, 1)\r\n \r\n pred = np.append(pred, le.inverse_transform(predicted.detach().cpu().numpy()))\r\n \r\n\r\n df = pd.DataFrame(pred, columns=[\"Predicted\"]) # save predictions as a datafram column\r\n df['True'] = class_ids # save true class as a datafram column\r\n print(\"\\nPredicted:\", df)",
"def extract_features(wavfile, feature, sampling_rate=16000):\n\n raw_signal, sr = librosa.core.load(wavfile,\n sampling_rate,\n mono=True,\n dtype='float'\n )\n\n\n if feature == 'MFCC':\n feat_seq = librosa.feature.mfcc(raw_signal,\n sampling_rate,\n n_fft=400,\n hop_length=160,\n n_mfcc=13,\n fmin=75,\n fmax=5999\n )\n # Numerical Stability\n #feat_seq = np.where(feat_seq == 0, np.finfo(float).eps, feat_seq)\n\n\n elif feature == 'FBANK':\n feat_seq = librosa.feature.melspectrogram(raw_signal,\n sampling_rate,\n n_fft=400,\n hop_length=160,\n n_mels=13,\n fmin=75,\n fmax=5999\n )\n\n # Numerical Stability\n feat_seq = np.where(feat_seq == 0, np.finfo(float).eps, feat_seq)\n\n # 20 * log | convert to Me-Scale\n feat_seq = 20*np.log10(feat_seq)\n\n # z-norm: feature normalization\n feat_norm = preprocessing.scale(feat_seq, axis=1)\n\n return feat_norm",
"def mfcc_features(self, audio, rate, numcep = 20, nfft = 2000, N = 2):\n self.mfcc = python_speech_features.mfcc(audio, rate, numcep = numcep, nfft = nfft)\n #self.mfcc = preprocessing.scale(self.mfcc)\n \n self.delta_mfcc = python_speech_features.delta(self.mfcc, N)\n \n self.mfcc_feature = np.hstack((self.mfcc, self.delta_mfcc))\n \n return self.mfcc_feature",
"def prepare_features(self, wavs, stage):\n wavs, lens = wavs\n if stage == sb.Stage.TRAIN:\n if hasattr(self.modules, \"env_corrupt\"):\n wavs_noise = self.modules.env_corrupt(wavs, lens)\n wavs = torch.cat([wavs, wavs_noise], dim=0)\n lens = torch.cat([lens, lens])\n\n if hasattr(self.hparams, \"augmentation\"):\n wavs = self.hparams.augmentation(wavs, lens)\n\n # Choose what features we want to use\n # todo: support multiple features and feature concat\n target_feats = self.hparams.embedding_features\n\n FEATURE_EXTRACTOR = {\n # 'cqt': self.modules.cqt,\n # 'fbanks': self.modules.fbanks\n 'fastaudiogauss': self.modules.fastaudiogauss\n # 'ifr': self.modules.ifr\n # 'mag': self.modules.mag\n # 'mfcc': self.modules.mfcc\n # 'leaf': self.modules.leaf\n # 'tdfbanks': self.modules.tdfbanks\n # 'pcen': self.modules.pcen\n # 'sincnet': self.modules.sincnet\n # 'trainable_fbanks': self.modules.trainable_fbanks\n }\n\n if len(target_feats) == 1:\n # wavs = wavs.unsqueeze(1).cuda()\n feats = FEATURE_EXTRACTOR[target_feats[0]](wavs)\n # feats = torch.unsqueeze(feats, 1)\n # feats = torch.transpose(feats, 1,2)\n if target_feats[0]=='cqt':\n log_spec = 10.0 * torch.log10(torch.clamp(feats, min=1e-30))\n log_spec -= 10.0\n feats=log_spec\n feats = torch.transpose(feats, 1,2)\n else:\n feats = []\n for target in target_feats:\n temp = FEATURE_EXTRACTOR[target](wavs)\n if target=='cqt':\n temp = torch.transpose(temp, 1,2)\n feats.append(temp)\n f =feats[0]\n for i in range(1, len(feats)):\n f = torch.cat((f, feats[i]), dim=2)\n feats = f\n feats = self.modules.mean_var_norm(feats, lens)\n return feats, lens",
"def extract_features(audio_filename, args):\n #print(\"Extract_features\")\n spec_type = args['spec_type']\n\n if spec_type == 'cqt':\n bin_multiple = args['bin_multiple']\n max_midi = args['max_midi']\n min_midi = args['min_midi']\n note_range = max_midi - min_midi + 1\n sr = args['sr']\n hop_length = args['hop_length']\n window_size = args['window_size']\n\n bins_per_octave = 12 * bin_multiple # should be a multiple of 12\n n_bins = note_range * bin_multiple\n\n # down-sample,mono-channel\n y, _ = librosa.load(audio_filename, sr)\n # y: an np.ndarray[ shape=(n,) ] giving the audio time series. librosa.load automatically downsamples to the\n # required sample rate sr\n # doku on librosa.cqt:\n # https://librosa.github.io/librosa/generated/librosa.core.cqt.html?highlight=cqt#librosa.core.cqts\n S = librosa.cqt(y, fmin=librosa.midi_to_hz(min_midi), sr=sr, hop_length=hop_length,\n bins_per_octave=bins_per_octave, n_bins=n_bins)\n S = S.T\n S = np.abs(S)\n min_db = np.min(S)\n print(np.min(S), np.max(S), np.mean(S))\n S = np.pad(S, ((window_size // 2, window_size // 2), (0, 0)), 'constant', constant_values=min_db)\n\n windows = []\n\n # IMPORTANT NOTE:\n # Since we pad the the spectrogram frame,\n # the onset frames are actually `offset` frames.\n # To obtain a window of the center frame at each true index, we take a slice from i to i+window_size\n # starting at frame 0 of the padded spectrogram\n for i in range(S.shape[0] - window_size + 1):\n w = S[i:i + window_size, :]\n windows.append(w)\n\n # print inputs\n x = np.array(windows)\n return x\n\n else:\n print(\"WARNING: feature type \" + spec_type + \" not implemented.\")\n return 0",
"def Prediction(self):\n # converts first 19 chunks of audio bytes into 16 bit int values\n in_data = np.fromstring(np.array(self.frames[:19]), 'Int16')\n\n # extract MFCCs from the 19 chunks of audio\n audio_sig = np.array([mfcc(in_data, self.rate, self.window,\n self.stride, self.mfcc, self.filter_banks,\n self.fft_num, 0, None, True)])\n\n # makes predictions\n prediction = self.ww_model.model.predict(audio_sig)\n\n if(self.print_pred):\n print(prediction)\n\n return prediction",
"def speech_recog(event, context):\n \n \n try:\n\n DEVICE=torch.device('cpu')\n model = SpeechRNN()\n model = model.to(DEVICE)\n model.load_state_dict(torch.load('weights_cpu_voicerec.pt', map_location=DEVICE))\n\n wav_file = random.choice(sample_file_list)\n waveform,_ = torchaudio.load(wav_file, normalization=True)\n \n # if the waveform is too short (less than 1 second) we pad it with zeroes\n if waveform.shape[1] < 16000:\n waveform = F.pad(input=waveform, pad=(0, 16000 - waveform.shape[1]), mode='constant', value=0)\n \n mfcc_transform = torchaudio.transforms.MFCC(n_mfcc=12, log_mels=True)\n mfcc = mfcc_transform(waveform).squeeze(0).transpose(0,1)\n x = mfcc.unsqueeze(0)\n\n model.eval()\n y = model(x)\n predicted_label = classes[y.max(1)[1].numpy().item()]\n\n input_text = wav_file.split(\"/\")[-1]\n output = f'Prediction of input file {wav_file.split(\"/\")[-1]} is {predicted_label}.'\n\n fields = {'input': input_text,\n 'predicted': output}\n\n return {\"statusCode\": 200, \"headers\": headers, \"body\": json.dumps(fields)}\n\n except ValueError as ve:\n # logger.exception(ve)\n print(ve)\n return {\n \"statusCode\": 422,\n \"headers\": headers,\n \"body\": json.dumps({\"error\": repr(ve)}),\n }\n except Exception as e:\n # logger.exception(e)\n print(e)\n return {\n \"statusCode\": 500,\n \"headers\": headers,\n \"body\": json.dumps({\"error\": repr(e)}),\n }",
"def convert_linear_spectrogram_to_audio(self, spec: 'torch.tensor', **kwargs) -> 'torch.tensor':",
"def forward(self, features, captions, lengths): # for training\n embeddings = self.embed(captions) # [B, 10, 256] for captions = [B, 10]\n embeddings = torch.cat((features.unsqueeze(1), embeddings), 1)\n packed = pack_padded_sequence(embeddings, lengths, batch_first=True) \n hiddens, _ = self.lstm(packed)\n outputs = self.linear(hiddens[0])\n return outputs",
"def forward(self, features, captions, lengths):\n embeddings = self.embed(captions)\n # print(features.unsqueeze(0).unsqueeze(0))\n # print(features)\n # print(embeddings)\n embeddings = torch.cat((features.unsqueeze(0), embeddings), 1)\n # print(embeddings.transpose(0,1).size(1))\n # print()\n packed = pack_padded_sequence(embeddings, lengths, batch_first=True) \n hiddens, _ = self.lstm(packed)\n outputs = self.linear(hiddens[0])\n # print(lengths)\n # print(outputs)\n # exit(0)\n return outputs",
"def sample(args):\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n _, dataset = get_cub2011_data_loader()\n d_vocab = len(dataset.dict)\n\n # Build models\n model = Img2Txt(d_embed=args.embed_size, d_hidden=args.hidden_size, d_vocab=d_vocab, d_layers=args.num_layers).eval().to(device) # eval mode (batchnorm uses moving mean/variance)\n\n # Load the trained model parameters\n model.enc.load_state_dict(torch.load(args.encoder_path))\n model.dec.load_state_dict(torch.load(args.decoder_path))\n\n # Prepare images.\n images_dir = args.images\n assert os.path.isdir(images_dir), f'{images_dir} is not a directory.'\n img_filenames = os.listdir(images_dir)\n img_filenames = [f for f in img_filenames if os.path.splitext(f)[1] in {'.jpg', '.png'}]\n img_transforms = get_standard_img_transforms()\n images = []\n for img_filename in img_filenames:\n img_path = os.path.join(images_dir, img_filename)\n img = Image.open(img_path)\n img = img_transforms(img).to(device)\n images.append(img)\n images = torch.stack(images, dim=0)\n print('images:', images.size())\n\n # Generate captions.\n with torch.no_grad():\n captions = model.sample(images).cpu()\n captions = [dataset.dict.decode(c) for c in captions]\n\n # Print out the generated captions.\n print('The generated captions:')\n print(captions)\n\n # Create and save captioned images.\n outputs_dir = args.outputs\n mkdir_p(outputs_dir)\n for img_filename, caption in zip(img_filenames, captions):\n img_path = os.path.join(images_dir, img_filename)\n out_path = os.path.join(outputs_dir, img_filename)\n plt.imshow(plt.imread(img_path))\n plt.title(caption)\n plt.savefig(out_path)",
"def features_from_label(audio_file, segment):\n duration = segment['end'] - segment['start']\n audio, sample_rate = librosa.core.load(\n audio_file,\n duration=duration,\n offset=segment['start']\n )\n features = fe.get_features(audio, sample_rate)\n return features",
"def forward(self,features,captions):\n captions = captions[:,:-1]\n embeddings = self.embed(captions)\n inputs = torch.cat((features.unsqueeze(1),embeddings),1)\n hiddens,_ = self.lstm(inputs)\n outputs = self.linear(hiddens)\n \n return outputs",
"def wav2mfcc(file_path, max_len=44, n_mfcc=20):",
"def proc_one(filename):\n (rate, sig) = wav.read(filename)\n assert rate == samp_rate\n # since templates have max value of 32768, normalise it\n if sig.max() > 1:\n sig = sig / 32768\n # Normalise so that max-value is 1\n sig = sig / max(sig)\n\n # calculate MFCC\n feat = mfcc(sig, samplerate=samp_rate, winlen=win_length / 1000, winstep=hop / 1000, preemph=0.95, numcep=14,\n winfunc=np.hamming)\n # print(sig.shape, feat.shape)\n return feat",
"def forward(self, features, captions, lengths):\n embeddings = self.embed(captions)\n embeddings = torch.cat((features.unsqueeze(1), embeddings), 1)\n packed = pack_padded_sequence(embeddings, lengths, batch_first=True) \n hiddens, _ = self.lstm(packed)\n outputs = self.linear(hiddens[0])\n return outputs",
"def forward(self, features, captions, lengths):\n embeddings = self.embed(captions)\n embeddings = torch.cat((features.unsqueeze(1), embeddings), 1)\n packed = pack_padded_sequence(embeddings, lengths, batch_first=True) \n hiddens, _ = self.lstm(packed)\n outputs = self.linear(hiddens[0])\n return outputs",
"def calc_mfccs(audio_data, samplerate, n_mfcc=13, n_fft=400, hop_length=160):\n mfcc = librosa.feature.mfcc(audio_data, sr=samplerate, n_mfcc=n_mfcc, n_fft=n_fft, hop_length=hop_length)\n\n # add derivatives and normalize\n mfcc_delta = librosa.feature.delta(mfcc)\n mfcc_delta2 = librosa.feature.delta(mfcc, order=2)\n mfcc = np.concatenate((normalize(mfcc),\n normalize(mfcc_delta),\n normalize(mfcc_delta2)), axis=0)\n\n return mfcc.T"
]
| [
"0.6545687",
"0.64840394",
"0.6444227",
"0.64008653",
"0.6340683",
"0.62353003",
"0.6219182",
"0.61814654",
"0.60809934",
"0.5951422",
"0.59291077",
"0.5893627",
"0.5873762",
"0.58557135",
"0.5834606",
"0.58306265",
"0.5822783",
"0.58100766",
"0.57669765",
"0.57355034",
"0.57300764",
"0.5717785",
"0.56930184",
"0.567085",
"0.5668582",
"0.56684613",
"0.5657138",
"0.5636188",
"0.5636188",
"0.5616563"
]
| 0.7304945 | 0 |
Resolute the whole directory. For each file we adopt features_and_labels to grab the MFCCSs and labels, then we concatenate the MFCCs and labels, after which we shuffle them in Dataloader for better representation and training effects. | def dir_resolution(self, src_path, frag_length=128):
src_path = os.path.join(self.root_path, src_path)
files = os.listdir(src_path)
MFCCs = None
labels = None
cnt = 1
total_num = len(files)
for wav in files:
wav_path = os.path.join(src_path, wav)
MFCCs_each, labels_each = self.features_and_labels(wav_path, frag_length)
if MFCCs is not None:
MFCCs = torch.cat((MFCCs, MFCCs_each))
labels = torch.cat((labels, labels_each))
else:
MFCCs, labels = MFCCs_each, labels_each
if cnt % 1000 == 0:
print('{} data pieces have been loaded in and {} are left'.format(cnt, total_num-cnt))
cnt += 1
np.save(self.feature_file, MFCCs.numpy())
np.save(self.label_file, labels.numpy())
print('Loading into files finished!') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _parse_data_dir(self, data_dir):\n categories = os.listdir(data_dir)\n for folder_name in categories:\n all_fnames_list_fname = os.path.join(data_dir, folder_name,\n folder_name + \".bmf\")\n if not os.path.isfile(all_fnames_list_fname):\n raise IOError(\"Not found file {}\".format(all_fnames_list_fname))\n all_fnames_list = np.loadtxt(all_fnames_list_fname, dtype=np.str,\n skiprows=1)\n # Correct from pgm to jpg\n all_fnames_list = [f.split('.')[0]+'.jpg' for f in all_fnames_list]\n\n all_fnames_list = [os.path.join(data_dir, folder_name, f) for f \\\n in all_fnames_list]\n\n self.samples += len(all_fnames_list)\n # Append the last\n self.image_filenames.append(all_fnames_list)",
"def load_data(folders):\n features, labels = np.zeros(0), np.zeros(0, dtype=int)\n for folder_id in folders:\n folder = \"fold%d\"%(folder_id)\n for fn in glob.glob(os.path.join(RAW_DATA_DIR, folder, \"*.wav\")):\n just_fn_name = fn.split('/')[-1]\n class_id = just_fn_name.split('-')[1]\n #print(\"fn\", fn, just_fn_name, class_id)\n mfcc2 = _extract_features_from_one_file(fn)\n if mfcc2 is None:\n continue\n features = np.append(features, mfcc2)\n labels= np.append(labels, int(class_id))\n features = features.reshape(-1, N_MFCC)\n #labels = labels.reshape(-1, 1)\n #print(\"features.shape\", features.shape, \"labels.shape\", labels.shape)\n labels = one_hot_encode(labels)\n return features, labels",
"def preprocess_dataset(dataset_path, SAMPLES_TO_CONSIDER: int, num_mfcc = 13, n_fft = 2048, hop_length = 512):\r\n\r\n data = {\r\n 'mapping': [],\r\n 'labels': [],\r\n 'MFCCs': [],\r\n 'files': []\r\n }\r\n\r\n # loop through all sub-dirs\r\n total_samples = 0\r\n valid_samples = 0\r\n for i, (dirpath, dirname, filenames) in tqdm(enumerate(os.walk(dataset_path))):\r\n\r\n # ensure we're at sub-folder level\r\n if dirpath is not dataset_path:\r\n # save label (i.e., sub-folder name) in the mapping\r\n label = dirpath.partition('speech_commands_subset')[-1][1:]\r\n\r\n data['mapping'].append(label)\r\n print(\"\\nProcessing: '{}'\".format(label))\r\n print(\"number of files for each class: \", len(filenames))\r\n # process all audio files\r\n for f in filenames:\r\n total_samples += 1\r\n file_path = os.path.join(dirpath, f)\r\n\r\n # load audio file and slice it to ensure length consistency among different files\r\n signal, sample_rate = librosa.load(file_path)\r\n # print(signal.shape)\r\n # print(type(signal[0]))\r\n\r\n # drop audio files with less than pre-decided number of samples\r\n if len(signal) >= SAMPLES_TO_CONSIDER:\r\n valid_samples += 1\r\n # ensure consistency of the length of the signal\r\n signal = signal[:SAMPLES_TO_CONSIDER]\r\n\r\n # extract MFCCs\r\n MFCCs = librosa.feature.mfcc(signal, sample_rate, n_mfcc = num_mfcc, n_fft = n_fft, \r\n hop_length = hop_length) \r\n # print(MFCCs.shape)\r\n # print(type(MFCCs[0,0]))\r\n\r\n # store data for analysed track\r\n data['MFCCs'].append(MFCCs.T.tolist())\r\n data['labels'].append(i-1)\r\n # data['files'].append(file_path)\r\n # print(\"{}: {}\".format(file_path, i-1))\r\n\r\n # if valid_samples == 20:\r\n # valid_samples =0\r\n # break\r\n print(\"\\ntotal samples: \", total_samples)\r\n print(\"\\nvalid_samples: \", valid_samples)\r\n\r\n \r\n return data",
"def load_features_labels(self):\n MFCCs = torch.from_numpy(np.load(self.feature_file))\n labels = torch.from_numpy(np.load(self.label_file))\n 'Loading from files finished!'\n return MFCCs.view(-1,1,128,128), labels.long()",
"def features_from_folder(label_folder, audio_folder, output_folder):\n print('Listing label files from folder.')\n #scan labels folder\n labels_list = os.listdir(label_folder)\n label_files = []\n for filename in labels_list:\n #get its extension\n file_extension = filename.split('.')[-1]\n if file_extension != 'txt':\n continue\n #save to without its extension\n label_files.append(filename[:-4])\n\n print('Listing audio files from folder.')\n #scan audio folder\n audios_list = os.listdir(audio_folder)\n audio_files = []\n for filename in audios_list:\n #get its extension\n file_extension = filename.split('.')[-1]\n if file_extension != 'wav':\n continue\n #save to without its extension\n audio_files.append(filename[:-4])\n\n print('Removing files without matches')\n #use only the files with matching audio/label\n files_to_process = []\n for label_file in label_files:\n if label_file in audio_files:\n files_to_process.append(label_file)\n\n print('Processing each file...')\n i = 1\n class_count = {}\n total_f = len(files_to_process)\n #for each file\n for processing in files_to_process:\n print('File', str(i) + '/' + str(total_f))\n i += 1\n\n #\n label_file = os.path.join(label_folder, processing + \".txt\")\n audio_file = os.path.join(audio_folder, processing + \".wav\")\n\n #get the segments from the corresponding label file\n segments = get_segments(label_file)\n\n #\n total_s = len(segments)\n j = 1\n #for each segment\n for segment in segments:\n print('\\tSegment', str(j) + '/' + str(total_s), segment['class'])\n j += 1\n\n if class_count.get(segment['class']) is None:\n class_count[segment['class']] = 1\n else:\n class_count[segment['class']] += 1\n output_filename = segment['class']\n output_filename += '-' + format(class_count[segment['class']], '04d')\n output_filename = os.path.join(output_folder, output_filename)\n\n #get its features\n segment_features = features_from_label(audio_file, segment)\n\n #save it to a file\n fe.write_as_bin(output_filename, segment_features)",
"def convert_all_data(mfccPath, fbankPath, labeldict, datadir):\n\n inputmfcc, inputnamemfcc = ark_parser(mfccPath, 'train.ark')\n inputfbank, inputnamefbank = ark_parser(fbankPath, 'train.ark')\n\n label = []\n inputlist = []\n assert len(inputnamemfcc) == len(labeldict.keys()) and len(inputnamefbank) == len(labeldict.keys())\n\n for fb, mfcc in zip(inputfbank, inputmfcc):\n fb = pp.normalize_mfcc(fb)\n mfcc = pp.normalize_mfcc(mfcc)\n inputlist.append(np.concatenate((fb, mfcc), axis=1))\n\n for name in inputnamemfcc:\n label.append(labeldict[name])\n\n with open('./train_data.pkl', 'wb') as train_data:\n pickle.dump(inputlist, train_data)\n\n convert_label_to_int(datadir, datadir + '48phone_char.map', label)",
"def data_generator(labels: list, folder_options: list, codex_list: list, padded_size: int = 300, batch_size: int = 1,\n dataset_path: str = 'D:\\\\Datasets\\\\bms-molecular-translation\\\\train\\\\', return_name_str: bool = False,\n folder_loop: int = 1, augment_data: bool = True, invert_image: bool = True, repeat_image: int = 1):\n\n image_name = ''\n # Limitations on the Augmentation performed on the training and validation inputs\n translation_mag = 10\n rotations_mag = 180\n\n while True:\n # Shuffle the folder order\n random.shuffle(folder_options)\n\n # Iterate through all folder paths\n for folder_path in folder_options:\n # Grab all files under a particular folder path\n full_path = dataset_path + folder_path[0] + '\\\\' + folder_path[1] + '\\\\' + folder_path[2] + '\\\\'\n file_list = [f for f in listdir(full_path) if isfile(join(full_path, f))]\n\n # Re-iterate over the same folder, shuffling the order each time\n for folder_itr in range(folder_loop):\n random.shuffle(file_list)\n\n # Iterate through each file, preprocess and yield each\n for file in file_list:\n # Repeat each training input as many times as desired\n for repeat in range(repeat_image):\n\n # Instantiate the batch\n image_data_batch = np.zeros(shape=(batch_size, 1500, 1500, 1))\n output_str_batch = np.zeros(shape=(batch_size, padded_size, len(codex_list) + 1))\n output_num_batch = np.zeros(shape=(batch_size, padded_size, 1))\n\n # Generate as big a batch of data as is requested\n for batch_num in range(batch_size):\n\n # Prepare Image augmentations\n rand_trans_mag_vert = round(np.random.uniform(-translation_mag, translation_mag))\n rand_trans_mag_horizontal = round(np.random.uniform(-translation_mag, translation_mag))\n rand_rotation = np.random.uniform(-rotations_mag, rotations_mag)\n\n # Load image in Black and White with a constant size of 1500 x 1500\n file_path = full_path + file\n image_data = Image.open(file_path)\n\n bg_colour = 1\n\n if invert_image:\n # Invert image colour\n image_data = ImageOps.invert(image_data)\n bg_colour = 0\n\n image_data = image_data.convert('1')\n\n if augment_data:\n # Perform Augmentation\n image_data = image_data.rotate(angle=rand_rotation,\n translate=(\n rand_trans_mag_vert, rand_trans_mag_horizontal),\n fillcolor=bg_colour,\n expand=True)\n\n image_data = ImageOps.pad(image_data, (1500, 1500), color=bg_colour)\n image_data_array = np.array(image_data).astype(np.float32).reshape((1, 1500, 1500, 1))\n\n # Find the correct label from the csv file data\n image_name = file[0:-4]\n output_string = ''\n for label in labels:\n if label[0] == image_name:\n output_string = label[1]\n break\n\n output_encoded = encode_inchi_name(output_string, codex_list, padded_size)\n\n # Extract all encoded Str and Num information separately.\n output_str = []\n output_num = []\n for char in output_encoded:\n output_str.append(char[0])\n output_num.append(char[1])\n\n # Cast Output Str and Num data to Numpy arrays and reshape to suit\n output_str_encoded = np.array(output_str).reshape((1, padded_size, len(codex_list) + 1))\n output_num_encoded = np.array(output_num).reshape((1, padded_size, 1))\n\n # Add new data to batch\n image_data_batch[batch_num] = image_data_array\n output_str_batch[batch_num] = output_str_encoded\n output_num_batch[batch_num] = output_num_encoded\n\n if return_name_str:\n yield image_data_batch, [output_str_batch, output_num_batch], output_string\n else:\n yield image_data_batch, [output_str_batch, output_num_batch]",
"def read_classified_data(root_path, to_size = (200,200), transformation = transforms.ToTensor()):\n label_dict = {}\n # for each folder in the dataset\n # get the label\n for i, label in tqdm(enumerate(sorted(os.listdir(root_path))), desc = \"Read in...\", leave = False):\n if len(os.listdir(sub_path)) == 0:\n continue\n sub_path = os.path.join(root_path, label)\n # write the label in the label dict\n label_dict[i] = label\n # find the csv, there should be one and only one csv\n csv_path = glob.glob(os.path.join(sub_path,\"*.csv\"))[0]\n df = pd.read_csv(csv_path)\n # the csv should have a image_name list indicating the 1-1 correspondense\n image_origin = df[\"image_name\"]\n # get the rest and the features\n df.drop(labels = \"image_name\", axis = \"columns\", inplace = True)\n # concate them to our dataset\n if i == 0:\n features = torch.from_numpy(df.to_numpy())\n images = torch.stack([preprocess(Image.open(os.path.join(sub_path, i)).convert(\"RGB\"),\n to_size = to_size,\n transformation = transformation) for i in image_origin])\n labels = torch.ones(image_origin.shape[0])*label\n else:\n features = torch.cat((features,torch.from_numpy(df.to_numpy())))\n images = torch.cat(images,torch.stack([preprocess(Image.open(os.path.join(sub_path, i)).convert(\"RGB\"),\n to_size = to_size,\n transformation = transformation) for i in image_origin]))\n labels = torch.cat(labels, torch.ones(image_origin.shape[0])*label)\n # return the dataset with our label_dict\n return TensorDataset(images,features, labels),label_dict",
"def convert_all_test_data(mfccPath, fbankPath, datadir):\n inputmfcc, inputnamemfcc = ark_parser(mfccPath, 'test.ark')\n inputfbank, inputnamefbank = ark_parser(fbankPath, 'test.ark')\n\n label = []\n inputlist = []\n assert len(inputnamemfcc) == len(inputnamefbank)\n\n for fb, mfcc in zip(inputfbank, inputmfcc):\n fb = pp.normalize_mfcc(fb)\n mfcc = pp.normalize_mfcc(mfcc)\n inputlist.append(np.concatenate((fb, mfcc), axis=1))\n\n with open('./test_data.pkl', 'wb') as test_data:\n pickle.dump(inputlist, test_data)\n\n with open('./test_name.pkl', 'wb') as test_name:\n pickle.dump(inputnamefbank, test_name)",
"def prepare(self):\n\n # step 0: load only when not loaded yet\n if TRAINING in self.data \\\n and VALIDATION in self.data:\n return\n\n # step 1: load the file names\n file_list = sorted(glob.glob(self.location+\"*.mhd\"))\n # count the number of data points\n\n # make a stratified validation set\n # note, the seed decides the validation set, but it is deterministic in the names\n random.seed(317070)\n patient_names = [self.patient_name_from_file_name(f) for f in file_list]\n validation_patients = random.sample(patient_names, int(VALIDATION_SET_SIZE*len(patient_names)))\n\n # make the static data empty\n for s in self.datasets:\n self.data[s] = []\n self.labels[s] = []\n self.names[s] = []\n\n # load the filenames and put into the right dataset\n labels_as_dict = defaultdict(list)\n\n with open(paths.LUNA_LABELS_PATH, 'rb') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n next(reader) # skip the header\n for row in reader:\n label = (float(row[1]), float(row[2]), float(row[3]), float(row[4]))\n labels_as_dict[str(row[0])].append(label)\n\n for patient_file in file_list:\n patient_name = self.patient_name_from_file_name(patient_file)\n\n if patient_name in validation_patients:\n s = VALIDATION\n else:\n s = TRAINING\n label = labels_as_dict[str(patient_name)]\n if self.only_positive and not label:\n continue\n self.data[s].append(patient_file)\n \n if self.pick_nodule:\n self.labels[s].append([random.choice(label)]) \n else:\n self.labels[s].append(label)\n \n \n self.names[s].append(patient_name)\n\n # give every patient a unique number\n last_index = -1\n for s in self.datasets:\n self.indices[s] = range(last_index+1,last_index+1+len(self.data[s]))\n if len(self.indices[s]) > 0:\n last_index = self.indices[s][-1]\n print s, len(self.indices[s]), \"samples\"",
"def preprocess_directory(data_path, label_path, damage_fn):\r\n\r\n file_names = os.listdir(data_path)\r\n os.mkdir(label_path)\r\n\r\n for file_name in file_names:\r\n file_path = data_path + \"/\" + file_name\r\n cur_label_path = label_path + \"/\" + file_name\r\n current_image = Image.open(file_path)\r\n label = damage_fn(current_image)\r\n label.save(cur_label_path, \"JPEG\")",
"def read_mastcam_dir(self, filepath, suffix, unit, feature, extension = '.IMG', lblext='.LBL_label', eye='LR', margin=6):\n \n if eye == 'L':\n eyez = 'ML'\n elif eye == 'R':\n eyez = 'MR'\n elif eye == 'LR':\n eyez = ''\n pass\n else:\n raise ValueError('Eye name %s is not valid! Use L, R, or LR.' % eye)\n \n # GET ALL FILES WITH GIVEN EXTENSION IN FILEPATH\n files = sorted(glob.glob(str(filepath) + \"*\" + eyez + \"*\" + str(suffix) + \"*\" + str(extension)))\n fileprefixes = sorted(list(set([f.split('/')[-1][0:12] for f in files])))\n print(fileprefixes)\n \n print(\"found %d files among %d sequences with eye %s and extension %s in %s:\" % (len(files), len(fileprefixes), eye, extension, filepath))\n assert len(files) > 0\n \n numfiles = len(fileprefixes)\n seen = 0\n percent = 0.0\n printed = [False for foo in range(1000)]\n \n fullimages = {}\n segmentation = {}\n\n data = []\n self.labels = []\n \n for fileprefix in fileprefixes:\n print(\" \" + fileprefix)\n \n thissequence = sorted(glob.glob(str(filepath) + fileprefix + \"*\" + str(suffix) + \"*\" + str(extension)))\n asdfghjkl = 0\n \n parser = Parser()\n seqfiltstr = \"\"\n dimlist = []\n for w in thissequence:\n labels = parser.parse(open_pds(w.replace(extension, lblext))) \n filt = labels['INSTRUMENT_STATE_PARMS']['FILTER_NAME'][9]\n seqfiltstr += filt\n h = int(labels['IMAGE']['LINES'])\n w = int(labels['IMAGE']['LINE_SAMPLES'])\n dimlist.append([h, w])\n #print(\" %s %s %s\" % (filt, h, w))\n\n print(\"Filter name:\", labels['INSTRUMENT_STATE_PARMS']['FILTER_NAME'])\n \n #print(seqfiltstr)\n # print(dimlist)\n seqstocombine = []\n \n # Handle cases which appear to be several series of observations\n if len(seqfiltstr) % 7 == 0:\n for i in range(len(seqfiltstr) // 7):\n subseq = thissequence[7*i:7*i+7]\n subseqfilt = seqfiltstr[7*i:7*i+7]\n if subseqfilt == '0123456':\n cont = False\n for j in range(7*i, 7*i+7):\n if dimlist[7*i] != dimlist[j]:\n print(\"SIZE ERROR\")\n cont = True\n if cont:\n continue\n \n seqstocombine.append(subseq)\n \n else:\n if seqfiltstr == '00112233445566':\n seq1 = [thissequence[2*i] for i in range(len(thissequence) // 2)]\n seq2 = [thissequence[2*i+1] for i in range(len(thissequence) // 2)]\n \n seqstocombine.append(seq1)\n seqstocombine.append(seq2)\n \n break\n else:\n print(\"Length multiple of 7 but bad sequence\")\n\n # Non-7 number of observations\n else:\n for i in range(len(seqfiltstr)):\n subseq = thissequence[i:i+7]\n subseqfilt = seqfiltstr[i:i+7]\n if subseqfilt == '0123456':\n cont = False\n for j in range(i, i+7):\n if dimlist[i] != dimlist[j]:\n print(\"SIZE ERROR\")\n cont = True\n if cont: continue\n \n seqstocombine.append(subseq)\n \n # No actual multispectral images exist, so use all RGB (sol 388)\n if len(seqstocombine) == 0 and 'sol388' in self.archive:\n seqstocombine = [[f] for f in thissequence]\n \n # Now, download each sequence with this prefix\n for subseq in seqstocombine:\n qwertyuiop = 0\n bigimage = None\n \n err = False\n # Get each image within sequence\n for filename in subseq:\n namestem = filename.split('.')[0].split('/')[-1]\n\n try:\n (image, lbls) = self.load_image(namestem, filepath, ext=extension, lblext=lblext)\n except ValueError as e:\n #print(\"An error happened while processing %s\" % filename)\n err = True\n break\n\n (h, w, b) = image.shape\n \n if b == 3:\n self.rgbdict[fileprefix + str(asdfghjkl)] = namestem\n fullimages[fileprefix + str(asdfghjkl)] = image\n #print(\"Stored %s to rgbdict\" % (fileprefix + str(asdfghjkl)))\n \n if bigimage == None and 'sol388' not in filepath:\n bigimage = np.zeros([h, w, 9], dtype='uint8')\n elif bigimage == None:\n bigimage = np.zeros([h, w, b], dtype='uint8')\n \n bigimage[:,:,qwertyuiop:qwertyuiop+b] = image\n\n qwertyuiop += b\n \n\n # Reorder images based on camera so filters are ordered\n if eye in ['L', 'R']:\n bi = np.zeros([h, w, 9], dtype='uint8')\n if eye == 'L':\n bi[:, :, 0] = bigimage[:, :, 0]\n bi[:, :, 1] = bigimage[:, :, 1]\n bi[:, :, 2] = bigimage[:, :, 2]\n bi[:, :, 3] = bigimage[:, :, 4]\n bi[:, :, 4] = bigimage[:, :, 3]\n bi[:, :, 5] = bigimage[:, :, 6]\n bi[:, :, 6] = bigimage[:, :, 5]\n bi[:, :, 7] = bigimage[:, :, 7]\n bi[:, :, 8] = bigimage[:, :, 8]\n elif eye == 'R':\n bi[:, :, 0] = bigimage[:, :, 2]\n bi[:, :, 1] = bigimage[:, :, 1]\n bi[:, :, 2] = bigimage[:, :, 0]\n bi[:, :, 3] = bigimage[:, :, 4]\n bi[:, :, 4] = bigimage[:, :, 3]\n bi[:, :, 5] = bigimage[:, :, 5]\n bi[:, :, 6] = bigimage[:, :, 6]\n bi[:, :, 7] = bigimage[:, :, 7]\n bi[:, :, 8] = bigimage[:, :, 8]\n bigimage = bi\n\n if err:\n print(\" ...didn't load sequence. There was an error.\")\n continue\n \n print(\" ...loaded one sequence:\", (fileprefix + str(asdfghjkl)))\n \n if 'sol388' not in self.archive:\n name = fileprefix + str(asdfghjkl) + '_' + unit + '_' + feature\n else:\n name = namestem + '_' + unit + '_' + feature\n\n \n (segments, segmentlabels) = self.segment_image(bigimage, unit=unit)\n segmentation[fileprefix + str(asdfghjkl)] = segments[0][1]\n\n for i in range(len(segments)):\n data += [[float(x) for x in self.process_image(segments[i], name + segmentlabels[i], feature=feature)]]\n \n asdfghjkl += 1\n \n ###########################################\n \n seen += 1\n \n # output read-in progress\n if percent < 100:\n if (round((seen / float(numfiles)) * 100, 1) >= percent) and (printed[int(percent * 10)] == False):\n #print(\"...%3.1f%%...\" % percent)\n printed[int(percent * 10)] == True\n percent = round(((seen / float(numfiles)) * 100), 1) + 1\n print(\"...100%...\")\n print(\"Transposing data...\")\n data = np.array(data).T\n self.xvals.sort()\n \n # Output the pickle\n print(\"Writing pickle to \" + self.archive + \" ...\")\n outf = open(self.archive, 'w')\n pickle.dump((data, fullimages, segmentation, self.labels, self.xlabel, self.ylabel, self.xvals, self.rgbdict, self.lblext, self.initdata, self.initfilename), outf)\n outf.close()\n print(\"Wrote pickle to \" + self.archive)",
"def read_process_labelled(src_dir, window=0.2, overlap=0.5, debug=True):\n\n arr_features = []\n\n # Read files from the folders\n for x, _ in GENRES.items():\n folder = src_dir + x\n \n for root, subdirs, files in os.walk(folder):\n for file in files:\n # Read the audio file\n file_name = folder + \"/\" + file\n signal, sr = librosa.load(file_name)\n signal = signal[:660000]\n \n # Debug process\n if debug:\n print(f\"Reading file: {file_name}\")\n \n # Split songs:\n samples = split_songs(signal, window, overlap)\n\n # Append the result to the data structure\n for s in samples:\n features = get_features(s, sr)\n features['genre'] = GENRES[x]\n arr_features.append(features)\n\n return arr_features",
"def _process_datasets_all_frames(self):\n datasets = os.listdir(self.separated_root)\n for dataset in datasets:\n dataset_path = join(self.separated_root, dataset)\n\n for model in self.models:\n\n attacks_list = os.listdir(dataset_path)\n\n for attack in attacks_list:\n attack_path = join(dataset_path, attack)\n\n for prop in self.properties:\n property_alias = prop.get_property_alias()\n\n if os.path.exists(\n join(self.output_features, dataset, attack, property_alias, model.alias)):\n print('%s already extracted features' % dataset)\n continue\n\n path_train = join(attack_path, self.train_alias)\n path_test = join(attack_path, self.test_alias)\n\n X_train, y_train, indexes_train, samples_train = self._get_dataset_contents(path_train,\n property_alias)\n X_test, y_test, indexes_test, samples_test = self._get_dataset_contents(path_test,\n property_alias)\n\n output_features = join(self.output_features, dataset, attack, property_alias, model.alias)\n\n features_train = self._fetch_features(X_train, model, output_features, self.train_alias)\n features_test = self._fetch_features(X_test, model, output_features, self.test_alias)\n\n # saving features\n np.save(join(output_features, (NAME_FEATURES % self.train_alias)), features_train)\n np.save(join(output_features, (NAME_FEATURES % self.test_alias)), features_test)\n\n # saving targets\n np.save(join(output_features, (NAME_TARGETS % self.train_alias)), y_train)\n np.save(join(output_features, (NAME_TARGETS % self.test_alias)), y_test)\n np.save(join(output_features, (NAME_TARGETS % self.test_alias)), y_test)\n\n # saving samples names\n self.__save_txt(join(output_features, (NAME_SAMPLES % self.train_alias)), samples_train)\n self.__save_txt(join(output_features, (NAME_SAMPLES % self.test_alias)), samples_test)",
"def load_data_in_folder(self):\n if self.data_filenames:\n print('removing existing data files')\n for f in tqdm(self.data_filenames):\n os.remove(f)\n print('loading files in data folder')\n n = len(self.filenames)\n idx_max = n // self.batch_size\n for idx in tqdm(range(0, idx_max-1)):\n data = []\n for f in self.filenames[idx:idx+self.batch_size]:\n img = cv2.imread(f, int(self.color))\n if not self.color:\n img = np.expand_dims(img, axis=-1)\n data.append(img)\n data = np.array(data)\n data = data.astype('float32')\n data = (data - 127.5)/127.5\n np.save(op.join(self.data_path, str(idx)), data)\n # TODO last batch ?\n self.data_filenames = sorted(glob(op.join(self.data_path, '*.npy')))",
"def load_defects(self, val_dir):\n \n img_list_1 = os.listdir(val_dir+'/'+'1')\n img_list_2 = os.listdir(val_dir+'/'+'2')\n img_list_3 = os.listdir(val_dir+'/'+'3')\n img_list_4 = os.listdir(val_dir+'/'+'4')\n\n\n\n img_list_1 = self.make_imgs_list(val_dir + '/' + '1', img_list_1)\n img_list_2 = self.make_imgs_list(val_dir + '/' + '2', img_list_2)\n img_list_3 = self.make_imgs_list(val_dir + '/' + '3', img_list_3)\n img_list_4 = self.make_imgs_list(val_dir + '/' + '4', img_list_4)\n\n\n img_list_1 = self.load_imgsLabels(img_list_1)\n img_list_2 = self.load_imgsLabels(img_list_2)\n img_list_3 = self.load_imgsLabels(img_list_3)\n img_list_4 = self.load_imgsLabels(img_list_4)\n\n\n img_list_1 = self.features_to_np_array(img_list_1)\n img_list_2 = self.features_to_np_array(img_list_2)\n img_list_3 = self.features_to_np_array(img_list_3)\n img_list_4 = self.features_to_np_array(img_list_4)\n\n lbl_list_1 = img_list_1.shape[0]*[1]\n lbl_list_2 = img_list_2.shape[0]*[2]\n lbl_list_3 = img_list_3.shape[0]*[3]\n lbl_list_4 = img_list_4.shape[0]*[4]\n\n\n imgs = np.concatenate((img_list_1, img_list_2, img_list_3, img_list_4))\n lbls = lbl_list_1 + lbl_list_2 + lbl_list_3 + lbl_list_4\n\n\n lbls = np.array(lbls)\n \n lbls = lbls - 1\n \n lbls = to_categorical(lbls)\n \n return imgs, lbls",
"def _setup(self, used_sample_id_list):\n self.file_lengths = dict()\n self.len = 0\n\n files_to_remove = []\n for file_path in reversed(self.file_paths):\n data = np.load(file_path)\n\n index_list = self._get_index_list(data, used_sample_id_list)\n if not index_list:\n files_to_remove.append(file_path)\n continue\n\n self.data = data\n self.current_loaded_file = file_path\n self.index_list = index_list\n\n self.index_dict[file_path] = index_list\n\n self.file_lengths[file_path] = int(np.ceil(\n float(len(index_list))/float(self.batch_size)))\n\n self.len += self.file_lengths[file_path]\n\n for f_p in files_to_remove:\n self.file_paths.remove(f_p)\n\n self.has_labels = len(self.data[0]) >= 3",
"def load_train_dataset(data_dir, word_list, silence_percentage, noise_percentage):\n validation_percentage, testing_percentage = 0.1, 0.1\n temp_list = []\n\n #wav_lists = os.path.join(data_dir, *, '*.wav')\n for word_l in word_list:\n #wav_word_list = os.path.join(data_dir, word_l)\n wav_list = os.path.join(data_dir, word_l, '*.wav')\n for file in gfile.Glob(wav_list):\n _, word = os.path.split(os.path.dirname(file))\n word = word.lower()\n\n if which_set(file, validation_percentage, testing_percentage) == 'training':\n rate, signal = load_wav(file);\n signal_and_noise = add_noise(signal, rate, 1, os.path.join(data_dir,'_background_noise_'), noise_percentage)\n \n feature = psf.mfcc(signal_and_noise, rate, nfilt = 40,numcep = 12, appendEnergy = False)\n #if feature.shape[0] != 99:\n # print(str(len(signal)) + \" \" + str(rate))\n temp_list.append({'feature': feature, 'label': word_l})\n\n # hotspot\n #silence = len(X_train) * silence_percentage\n silence = int(math.ceil(len(temp_list) * silence_percentage / 100))\n for _ in range(silence):\n temp_list.append({'feature': 0, 'label': \"_silence_\"})\n\n random.shuffle(temp_list)\n\n X_train = np.zeros((len(temp_list), 99, 12))\n Y_train = np.zeros( len(temp_list) )\n\n for i in range(len(X_train)):\n X_train[i] = temp_list[i]['feature']\n Y_train[i] = word2index(temp_list[i]['label'])\n\n return X_train, Y_train",
"def prepare_training_data(self, data_folder_path):\n\n #get the directories (one directory for each subject) in data folder\n dirs = os.listdir(data_folder_path)\n\n #list to hold all subject faces\n faces = []\n #list to hold labels for all subjects\n labels = []\n #List to hold subject names\n subjects = []\n\n label = -1;\n #let's go through each directory and read images within it\n for dir_name in dirs:\n\n #ignore system files like .DS_Store\n if dir_name.startswith(\".\"):\n continue;\n\n label += 1\n subjects.append(dir_name)\n logger.info(\"label=%d subject=%s\" %(label, dir_name))\n\n #build path of directory containing images for current subject subject\n #sample subject_dir_path = \"training-data/Bruce\"\n subject_dir_path = data_folder_path + \"/\" + dir_name\n\n #get the images names that are inside the given subject directory\n subject_images_names = os.listdir(subject_dir_path)\n\n #go through each image name, read image,\n #detect face and add face to list of faces\n for image_name in subject_images_names:\n\n #ignore system files like .DS_Store\n if image_name.startswith(\".\"):\n continue;\n\n #sample image path = training-data/Bruce/face1.png\n image_path = subject_dir_path + \"/\" + image_name\n image = cv2.imread(image_path)\n logger.info(\"file size: %d. numpy image size: %d\" %(os.path.getsize(image_path), len(image)))\n face, rect = self.detect_face(image)\n\n #we will ignore faces that are not detected\n if face is not None:\n #add face to list of faces\n faces.append(face)\n #add label for this face\n labels.append(label)\n\n return faces, labels, subjects",
"def load_data_in_folder(self):\n print('loading files in data folder')\n n = len(self.filenames)\n idx_max = n // self.batch_size\n for idx in range(0, idx_max-1):\n data = []\n for f in self.filenames[idx:idx+64]:\n img = cv2.imread(f, int(self.color))\n if not self.color:\n img = np.expand_dims(img, axis=-1)\n data.append(img)\n data = np.array(data)\n data = data.astype('float32')\n data = (data - 127.5)/127.5\n np.save(op.join(self.data_path, str(idx)), data)\n # TODO last batch ?\n self.data_filenames = sorted(glob(op.join(self.data_path, '*.npy')))",
"def __init__(self, file_paths_list, batch_size=1, patient_shape=(128, 128, 128), shuffle=True,\n mode_name='training_model', trans=[128]):\n # Set file_loader specific attributes\n self.rois = dict(oars=['Brainstem', 'SpinalCord', 'RightParotid', 'LeftParotid',\n 'Esophagus', 'Larynx', 'Mandible'], targets=['PTV56', 'PTV63', 'PTV70'])\n\n #self.batch_size = batch_size # Number of patients to load in a single batch\n self.patient_shape = patient_shape # Shape of the patient\n self.indices = np.arange(len(file_paths_list)) # Indices of file paths\n self.file_paths_list = file_paths_list # List of file paths\n self.shuffle = shuffle # Indicator as to whether or not data is shuffled\n self.full_roi_list = sum(map(list, self.rois.values()), []) # make a list of all rois\n self.num_rois = len(self.full_roi_list)\n self.patient_id_list = ['pt_{}'.format(k.split('/pt_')[1].split('/')[0].split('.csv')[0]) for k in\n self.file_paths_list] # the list of patient ids with information in this data loader\n self.trans = trans\n # Set files to be loaded\n self.required_files = None\n # self.aff_trans = MyRandomTransform()#transformation()\n self.mode_name = mode_name # Defines the mode for which data must be loaded for\n self.set_mode(self.mode_name) # Set load mode to prediction by default\n \n self.ct_scaling_factor = 4000. #Added by Ramsy\n self.dose_scaling_factor = 100. #Added by Ramsy",
"def train_iter(self, shuffle):\n paths = glob.glob(os.path.join(self.train_dir, \"*.jpg\"))\n if shuffle:\n random.shuffle(paths)\n for path in paths:\n label = os.path.basename(path).partition(\".\")[0]\n yield (path, label)",
"def get_files(self):\n train_images = glob(os.path.join(self.images_dir, '*%s' % self.im_extension)) \n train_labels = [x.replace(self.im_extension, '.npy').replace('images', 'groundTruth') for x in train_images]\n val_images = glob(os.path.join(self.val_images_dir, '*%s' % self.im_extension))\n val_labels = [x.replace(self.im_extension, '.npy').replace('images', 'groundTruth') for x in val_images]\n train_images = np.array(train_images)\n train_labels = np.array(train_labels)\n val_images = np.array(val_images)\n val_labels = np.array(val_labels)\n test_images = np.array(\n glob('/media/data_cifs/pytorch_projects/datasets/BSDS500_crops/data/images/test_nocrop/*.jpg'))\n test_labels = np.array(\n [x.replace('images', 'groundTruth').replace('.jpg', '.npy') for x in test_images])\n test_labels = np.array(\n [np.load(x) for x in test_labels])\n keep_idx = np.array([True if x.shape[0] > x.shape[1] else False for x in test_labels])\n test_images = test_images[keep_idx]\n test_labels = test_labels[keep_idx]\n test_images = np.stack([misc.imread(x) for x in test_images], 0)\n test_labels = np.stack(test_labels, 0)\n test_labels = test_labels[..., None]\n\n # Add constant padding to bottom/right\n if self.pad:\n test_images = util.pad(test_images, ((0, 0), (self.pad // 2, self.pad - self.pad // 2), (self.pad // 2, self.pad - self.pad // 2), (0, 0)), mode='linear_ramp')\n test_labels = util.pad(test_labels, ((0, 0), (self.pad // 2, self.pad - self.pad // 2), (self.pad // 2, self.pad - self.pad // 2), (0, 0)), mode='constant', constant_values=0)\n\n # Select images for training\n sort_idx = np.argsort(train_images)\n train_images = train_images[sort_idx[:self.train_size]]\n train_labels = train_labels[sort_idx[:self.train_size]]\n\n # Build CV dict\n cv_files, cv_labels = {}, {}\n cv_files[self.folds['train']] = train_images\n cv_files[self.folds['val']] = val_images\n cv_files[self.folds['test']] = test_images\n cv_labels[self.folds['train']] = train_labels\n cv_labels[self.folds['val']] = val_labels\n cv_labels[self.folds['test']] = test_labels\n return cv_files, cv_labels",
"def ReadData(self, path):\n os.chdir(path)\n folders=os.listdir()\n if 'data.hdf5' in folders:\n print('Loading data from hdf5 file! Might take some time, be patient!')\n file=h5py.File('data.hdf5','r+')\n data=(np.array(list(file['imgs'])),np.array(list(file['lables'])))\n self.real_labels=list(file['real_labels'])\n file.close()\n\n else:\n print('1. Collecting data.')\n err_logs = []\n img=[]\n lable=[]\n for folder in tqdm(folders):\n\n os.chdir(os.path.join(path,folder))\n for file in os.listdir():\n try:\n dat=(plt.imread(open(file,'rb')))\n img.append(resize_image(dat, (resize_x, resize_y),\n mode='constant',\n ))\n lable.append(folder)\n if folder not in self.real_labels:\n self.real_labels.append(folder)\n \n except OSError:\n err_logs.append([folder, file])\n print('\\nError logs:')\n for e in range(len(err_logs)):\n print('\\tFolder: {} | Some OSError for file: {}'.format(err_logs[e][0],\n err_logs[e][0]))\n \n \n print('2. Encoding data to categorical.')\n # Encode Letters into numerical categories.\n le = LabelEncoder()\n le.fit(lable)\n lable = le.transform(lable)\n lable = np.array(lable).reshape(-1, 1)\n \n print('3. Onehot encoding.')\n # Onehot encoding.\n ohe = OneHotEncoder(sparse=False)\n ohe.fit(lable)\n lable = ohe.transform(lable)\n \n # Shaffle data.\n print('4. Shuffle data.')\n img, lable = shuffle(img, lable)\n\t\t \n print('5. Saving data.')\n data=(np.asarray(img), np.asarray(lable))\n os.chdir(path)\n \n file=h5py.File('data.hdf5','w')\n x=file.create_dataset('imgs',data=np.array(img))\n y=file.create_dataset('lables',data=np.array(lable))\n print(self.real_labels)\n rl=file.create_dataset('real_labels',data=np.string_(self.real_labels))\n file.close()\n print('Data set is stored in Data.hdf5 file. ')\n\n return data",
"def main(args):\n data_transform = transforms.Compose([\n transforms.Scale((256, 256)),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n dataset = datasets.ImageFolder(root=args.root_dir, transform=data_transform)\n dataloader = DataLoader(dataset, batch_size=BATCH_SIZE, \n shuffle=False, num_workers=0, pin_memory=True)\n net = get_feature_extractor()\n\n if torch.cuda.is_available():\n net = net.cuda()\n\n features_out = np.zeros((len(dataset), 4096))\n labels_out = np.zeros(len(dataset))\n \n p = progressbar.ProgressBar(widgets=[progressbar.ETA(), ' ', progressbar.Percentage()])\n for i, samples in p(enumerate(dataloader)):\n images, labels = samples\n if torch.cuda.is_available():\n images = images.cuda()\n images = Variable(images)\n features = net(images).cpu().data.numpy()\n features_out[i*BATCH_SIZE:i*BATCH_SIZE+BATCH_SIZE] = features\n labels_out[i*BATCH_SIZE:i*BATCH_SIZE+BATCH_SIZE] = labels.int().numpy()\n print(i)\n\n with open(os.path.join(args.out, 'features.pickle'),'wb') as f:\n pickle.dump(features_out, f)\n with open(os.path.join(args.out, 'labels.pickle'),'wb') as f:\n pickle.dump(labels_out, f)",
"def load_data(data_dir):\n\n # Initiate lists\n images = []\n labels = []\n\n main_dir = os.path.abspath(os.curdir)\n\n for i in range(NUM_CATEGORIES):\n os.chdir(os.path.join(data_dir, str(i))) # Open directory i\n dir_images = os.listdir() # Create a list of all images in directory\n\n for j in range(len(dir_images)):\n image = cv2.imread(dir_images[j]) # Read image from file\n image = tf.keras.preprocessing.image.img_to_array(image) # Transform image to numpy array\n image = tf.image.resize(image, (IMG_WIDTH, IMG_HEIGHT)) # Reshape image to 30 x 30 px\n image = image/255 # Normalize image RGB values\n images.append(image) \n labels.append(i)\n\n os.chdir(main_dir)\n \n return (images, labels)",
"def ExtractDataSetFeatures(dir_name,imageExt ='pgm',features_type='hog',cnn_model_path='C:\\\\Users\\\\IssaMawad\\\\Documents\\\\DSIP_ML_Project\\\\cnn\\\\models\\\\20170512-110547.pb'):\n if(features_type=='facenet'):\n return EvaluateCNN(dir_name,cnn_model_path)\n features = [];\n classes = [];\n for subdir, dirs, files in os.walk(dir_name):\n for dir in dirs:\n dirFull = os.path.join( dir_name,dir)\n for innerSubDir,innerDirs,innerFiles in os.walk(dirFull):\n #if(len(innerFiles)<8):\n # continue;\n #print(dir_name)\n for file in innerFiles:\n if(not file.endswith(imageExt)):\n continue;\n fullFile = os.path.join(dirFull,file)\n if(features_type=='hog'):\n features.append(extractHOGFeatures(ReadImage(fullFile)))\n if(features_type=='gbrdct'):\n features.append(extractGaborDCT(ReadImage(fullFile)))\n if(features_type=='lgbphs'):\n features.append(extractLGBPHS(ReadImage(fullFile)))\n if(features_type=='gabor'):\n features.append(extractGabor(ReadImage(fullFile)))\n #if(features_type=='gbrzk'):\n # features.append(extractGaborZernike(ReadImage(fullFile)))\n if(features_type=='dum'):\n features.append(ReadImage(fullFile).ravel())\n classes.append(dir)\n return np.asarray(features),np.asanyarray(classes)",
"def load_data_and_labels(self):\n gen = image.ImageDataGenerator()\n target_size = (224,224)\n if self.preprocess:\n print('Preprocessing data...')\n if not os.path.isdir(self.pproc_dir()):\n os.mkdir(self.pproc_dir())\n \n batch_arr = []\n for ld,segment in [(self.train_dir(), 'train'),\n (self.valid_dir(), 'valid')]:\n # TODO(ness): segment = os.basename(ld)\n flowgen = gen.flow_from_directory(\n ld,\n target_size=target_size,\n shuffle=False,\n class_mode=None,\n batch_size=1)\n # Save the batches using method defined in utils.py\n data = np.concatenate([flowgen.next() for i in range(flowgen.n)])\n batches_dir = self.pproc_dir() + segment + '-bc'\n save_array(batches_dir, data)\n \n # Save the classes.\n cls_dir = self.pproc_dir() + segment + '-cl'\n save_array(cls_dir, flowgen.classes)\n \n batch_arr.append((data, flowgen.classes, flowgen.class_indices))\n \n # Set the data.\n self.training_data = batch_arr[0][0]\n self.validation_data = batch_arr[1][0]\n \n # Classes are zero-indexed and represent a category in\n # numerical form. So if the classes are 'dog' and 'cat',\n # the possible class values will be 0 and 1.\n self.trn_classes = batch_arr[0][1]\n self.val_classes = batch_arr[1][1]\n \n # Labels are the one-hot encoded (i.e. categorical)\n # version of the classes. In other words, if there are\n # 5 classes and an element belongs to class 2,\n # its label will be [0,0,1,0,0] (index 1).\n self.training_labels = to_categorical(batch_arr[0][1])\n self.validation_labels = to_categorical(batch_arr[1][1])\n \n # Class indices are dictionaries of the form\n # {'category_name': 0, 'category_name_2: 1}. They\n # make the mapping between numerical class indices and\n # a human-readable category name. They are (should be...)\n # the same for validation and training, so only load them\n # once, after sanity checking.\n self.cindices = batch_arr[0][2]\n print('Done preprocessing.')\n else:\n print('Loading data...')\n # Load the pre-saved data using methods defined in utils.py. See\n # preprocessing branch for the meaning of the data.\n self.training_data = load_array(self.pproc_dir() + 'train-bc')\n self.validation_data = load_array(self.pproc_dir() + 'valid-bc')\n self.trn_classes = load_array(self.pproc_dir() + 'train-cl')\n self.val_classes = load_array(self.pproc_dir() + 'valid-cl')\n self.training_labels = to_categorical(self.trn_classes)\n self.validation_labels = to_categorical(self.val_classes)\n \n # To get the class indices, we create the generator. It's cheap to\n # run since it doesn't actually load all the data.\n flowgen = gen.flow_from_directory(\n self.train_dir(),\n target_size=target_size,\n shuffle=False,\n class_mode=None,\n batch_size=1) \n self.cindices = flowgen.class_indices\n print('Done loading.')",
"def prepare(self):\n bcolz.set_nthreads(2)\n\n # step 0: load only when not loaded yet\n if TRAINING in self.data and VALIDATION in self.data: return\n\n # step 1: load the file names\n patients = sorted(glob.glob(self.location+'/*.*/'))\n print len(patients), \"patients\"\n\n # step 1: load the file names\n # make a stratified validation set\n # note, the seed decides the validation set, but it is deterministic in the names\n random.seed(317070)\n patient_names = [self.patient_name_from_file_name(f) for f in patients]\n validation_patients = random.sample(patient_names, int(VALIDATION_SET_SIZE*len(patient_names)))\n\n labels_as_dict = defaultdict(list)\n\n with open(paths.LUNA_LABELS_PATH, 'rb') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n next(reader) # skip the header\n for row in reader:\n label = (float(row[1]), float(row[2]), float(row[3]), float(row[4]))\n labels_as_dict[str(row[0])].append(label)\n\n # make the static data empty\n for s in self.datasets:\n self.data[s] = []\n self.labels[s] = []\n self.names[s] = []\n self.spacings[s] = []\n self.origins[s] = []\n\n with gzip.open(paths.INTERMEDIATE_DATA_PATH + 'spacings.pkl.gz') as f:\n spacings = cPickle.load(f)\n\n with gzip.open(paths.INTERMEDIATE_DATA_PATH + 'origins.pkl.gz') as f:\n origins = cPickle.load(f)\n\n # load the filenames and put into the right dataset\n for i, patient_folder in enumerate(patients):\n patient_id = str(patient_folder.split(path.sep)[-2])\n if patient_id in validation_patients:\n dataset = VALIDATION\n else:\n dataset = TRAIN\n\n\n label = labels_as_dict[patient_id]\n if self.only_positive and not label:\n continue\n\n self.data[dataset].append(patient_folder)\n self.labels[dataset].append(label)\n self.names[dataset].append(patient_id)\n self.spacings[dataset].append(spacings[patient_id])\n self.origins[dataset].append(origins[patient_id])\n\n # give every patient a unique number\n last_index = -1\n for set in self.datasets:\n self.indices[set] = range(last_index+1,last_index+1+len(self.data[set]))\n if len(self.indices[set]) > 0:\n last_index = self.indices[set][-1]\n print set, len(self.indices[set]), \"samples\"",
"def load_data_from_dir(self,\n dir_list=[],\n exclude=[]):\n dir_list_ = dir_list[:]\n\n if len(dir_list) == 0:\n eprint(\"CANNOT load data generator with an empty list of directories: {}\".format(dir_list))\n return\n\n for directory in dir_list_:\n if not os.path.isdir(directory):\n eprint(\"\\t\\t {}: {} is not a directory\".format(self.load_data_from_dir.__name__, directory))\n return\n\n # Read Data from current directory\n while dir_list_:\n # Pop first directory name and create dataloader if its a valid folder\n current_dir = dir_list_.pop(0)\n valid_dir = True\n for name in exclude:\n if name in current_dir and valid_dir:\n valid_dir = False\n data_file = current_dir + \"/data.mat\"\n if os.path.isfile(data_file) and \"takktile_\" in current_dir and valid_dir:\n self.dataloaders.append(takktile_dataloader(data_dir=current_dir,\n config=self.config,\n augment=self.augment))\n\n # Find all child directories of current directory and recursively load them\n data_dirs = [os.path.join(current_dir, o) for o in os.listdir(current_dir)\n if os.path.isdir(os.path.join(current_dir, o))]\n for d in data_dirs:\n dir_list_.append(d)\n\n self.num_dl = len(self.dataloaders)\n\n if self.transform_type:\n self.__calculate_data_transforms()\n\n # Create Eval Data\n if self.create_eval_data:\n self.eval_len = (self.__len__())//10\n self.create_eval_data = False\n\n # Calculate class number and ratios\n # Also calculate class diffs\n if not self.config['label_type'] == 'value':\n self.__class_nums = self.dataloaders[0].get_data_class_numbers(self.__get_data_idx(0))\n for i, dl in enumerate(self.dataloaders[1:]):\n self.__class_nums += dl.get_data_class_numbers(self.__get_data_idx(i+1))\n self.__class_ratios = self.__class_nums / float(np.mean(self.__class_nums))\n self.__class_diff = np.max(self.__class_nums) - self.__class_nums\n self.__class_diff = [d if n > 0 else 0 for n,d in zip(self.__class_nums, self.__class_diff)]\n\n # Reset and prepare data\n self.on_epoch_end()"
]
| [
"0.6891191",
"0.67281985",
"0.6459952",
"0.6419726",
"0.6413108",
"0.62061477",
"0.61670274",
"0.6143902",
"0.60984755",
"0.60692763",
"0.60558665",
"0.60204107",
"0.60094136",
"0.59958917",
"0.5993219",
"0.59861505",
"0.5970218",
"0.59578747",
"0.5953404",
"0.59519756",
"0.59349",
"0.59302926",
"0.5920986",
"0.59018797",
"0.58802724",
"0.58650386",
"0.585076",
"0.58411217",
"0.5839111",
"0.58291465"
]
| 0.67848784 | 1 |
Test permuting FrameSet axes using a SkyFrame Permuting the axes of the current frame of a frame set in situ (by calling `permAxes` on the frame set itself) should update the connected mappings. | def test_FrameSetPermutationSkyFrame(self):
# test with arbitrary values that will not be wrapped by SkyFrame
x = 0.257
y = 0.832
frame1 = ast.Frame(2)
unitMap = ast.UnitMap(2)
frame2 = ast.SkyFrame()
frameSet = ast.FrameSet(frame1, unitMap, frame2)
self.assertAlmostEqual(frameSet.applyForward([x, y]), [x, y])
self.assertAlmostEqual(frameSet.applyInverse([x, y]), [x, y])
# permuting the axes of the current frame also permutes the mapping
frameSet.permAxes([2, 1])
self.assertAlmostEqual(frameSet.applyForward([x, y]), [y, x])
self.assertAlmostEqual(frameSet.applyInverse([x, y]), [y, x])
# permuting again puts things back
frameSet.permAxes([2, 1])
self.assertAlmostEqual(frameSet.applyForward([x, y]), [x, y])
self.assertAlmostEqual(frameSet.applyInverse([x, y]), [x, y]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_FrameSetPermutationUnequal(self):\n # Initial mapping: 3 inputs, 2 outputs: 1-1, 2-2, 3=z\n # Test using arbitrary values for x,y,z\n x = 75.1\n y = -53.2\n z = 0.123\n frame1 = ast.Frame(3)\n permMap = ast.PermMap([1, 2, -1], [1, 2], [z])\n frame2 = ast.Frame(2)\n frameSet = ast.FrameSet(frame1, permMap, frame2)\n self.assertAlmostEqual(frameSet.applyForward([x, y, z]), [x, y])\n self.assertAlmostEqual(frameSet.applyInverse([x, y]), [x, y, z])\n\n # permuting the axes of the current frame also permutes the mapping\n frameSet.permAxes([2, 1])\n self.assertAlmostEqual(frameSet.applyForward([x, y, z]), [y, x])\n self.assertAlmostEqual(frameSet.applyInverse([x, y]), [y, x, z])\n\n # permuting again puts things back\n frameSet.permAxes([2, 1])\n self.assertAlmostEqual(frameSet.applyForward([x, y, z]), [x, y])\n self.assertAlmostEqual(frameSet.applyInverse([x, y]), [x, y, z])",
"def permute(self):\n raise NotImplementedError()",
"def permute(ts: Tensor, axes) -> Tensor:\n permute_op = PermuteOp(axes)\n return permute_op(ts, None)",
"def run_permute(self):\n # CH5+ = [[1,2,3,4,5]] , or for testing [[1,2,3],[4,5]]\n # H5O2+ = [[2,3],[4,5]] [O_left,O_left,H_left,H_left,H_right,H_right,H_center]\n like_atoms = self.technique_kwargs['like_atoms']\n ensemble = self.technique_kwargs['ensemble']\n\n # Get ensemble size\n if ensemble is None:\n walkers = np.tile(self.coord, (self.num_walkers, 1, 1))\n else:\n walkers = ensemble\n\n # For each tuple of like atoms, we will randomly permute them\n for pair in like_atoms:\n cds_to_randomize = walkers[:, pair]\n [np.random.shuffle(x) for x in cds_to_randomize]\n # Assign the stack of permuted atom coordinates to the appropriate place in the walker array\n walkers[:, pair] = cds_to_randomize\n return walkers",
"def permutation(self):\n perm = np.random.permutation(self.n_samples)\n self.data = self.data.iloc[perm]\n self.labels = self.labels.iloc[perm]\n self.labels_onehot = self.labels_onehot.iloc[perm]\n self.df_perm = self.df_perm.iloc[perm]",
"def test_set_perms(self):\n group0 = self.test_save('TestGroup0')\n group1 = self.test_save('TestGroup1')\n perms1 = self.perms\n perms2 = set(['Perm1', 'Perm2'])\n perms3 = set(['Perm2', 'Perm3'])\n perms4 = []\n \n # grant single property\n set_group_perms(group0, perms1, object0)\n self.assertEqual(perms1, set(get_group_perms(group0, object0)))\n self.assertEqual([], get_group_perms(group0, object1))\n self.assertEqual([], get_group_perms(group1, object0))\n \n set_group_perms(group0, perms2, object0)\n self.assertEqual(perms2, set(get_group_perms(group0, object0)))\n self.assertEqual([], get_group_perms(group0, object1))\n self.assertEqual([], get_group_perms(group1, object0))\n \n set_group_perms(group0, perms3, object0)\n self.assertEqual(perms3, set(get_group_perms(group0, object0)))\n self.assertEqual([], get_group_perms(group0, object1))\n self.assertEqual([], get_group_perms(group1, object0))\n \n # remove perms\n set_group_perms(group0, perms4, object0)\n self.assertEqual(perms4, get_group_perms(group0, object0))\n self.assertFalse(group0.TestModel_gperms.filter(obj=object0).exists())\n self.assertEqual([], get_group_perms(group0, object1))\n self.assertEqual([], get_group_perms(group1, object0))\n \n set_group_perms(group0, perms2, object1)\n self.assertEqual(perms4, get_group_perms(group0, object0))\n self.assertEqual(perms2, set(get_group_perms(group0, object1)))\n self.assertEqual([], get_group_perms(group1, object0))\n \n set_group_perms(group1, perms1, object0)\n self.assertEqual(perms4, get_group_perms(group0, object0))\n self.assertEqual(perms2, set(get_group_perms(group0, object1)))\n self.assertEqual(perms1, set(get_group_perms(group1, object0)))",
"def test_random_permute_inverse_changes_group(self):\n # reproducible arbitrariness\n np.random.seed(232)\n\n nchan = 3\n nsteps = 20\n rho = 1.0/4\n target = np.random.randn(nchan, nsteps)\n\n controller = LinearController(self.G, target, tau=None)\n\n controller.set_random_permute_inverse(rho)\n self.assertIsNotNone(controller.permute_inverse)\n\n n_per_group = self.N/nchan\n groups0 = np.arange(self.N)/n_per_group\n groups1 = controller.permute_inverse/n_per_group\n\n # check that the right fraction of assignments are kept intact\n self.assertEqual(np.sum(groups0 != groups1), rho*self.N)",
"def test_to_multiframe_xyz_openeye(self):\n from openff.toolkit.utils import OpenEyeToolkitWrapper\n\n tkw = OpenEyeToolkitWrapper()\n # load in an SDF of butane with multiple conformers in it\n molecules = Molecule.from_file(\n get_data_file_path(\"molecules/butane_multi.sdf\"),\n \"sdf\",\n toolkit_registry=tkw,\n )\n # now we want to combine the conformers to one molecule\n butane = molecules[0]\n for mol in molecules[1:]:\n butane.add_conformer(mol._conformers[0])\n\n # make sure we have the 7 conformers\n assert butane.n_conformers == 7\n with NamedTemporaryFile(suffix=\".xyz\") as iofile:\n # try and write out the xyz file\n butane.to_file(iofile.name, \"xyz\", toolkit_registry=tkw)\n\n # now lets check whats in the file\n with open(iofile.name) as xyz_data:\n data = xyz_data.readlines()\n # make sure we have the correct amount of lines writen\n assert len(data) == 112\n # make sure all headers and frame data was writen\n assert data.count(\"14\\n\") == 7\n for i in range(1, 8):\n assert f\"C4H10 Frame {i}\\n\" in data\n\n # now make sure the first line of the coordinates are correct in every frame\n coords = [\n \"C 1.8902000189 0.0425999984 0.2431000024\\n\",\n \"C 1.8976000547 -0.0232999995 0.2845999897\\n\",\n \"C -1.8794000149 -0.1792999953 -0.2565000057\\n\",\n \"C -1.5205999613 -0.0164999999 0.2786999941\\n\",\n \"C -1.4889999628 -0.2619000077 0.4871000051\\n\",\n \"C -1.4940999746 -0.2249000072 -0.0957999974\\n\",\n \"C -1.8826999664 -0.0372000001 0.1937000006\\n\",\n ]\n for coord in coords:\n assert coord in data",
"def test_to_multiframe_xyz_rdkit(self):\n from openff.toolkit.utils import RDKitToolkitWrapper\n\n tkw = RDKitToolkitWrapper()\n # load in an SDF of butane with multiple conformers in it\n molecules = Molecule.from_file(\n get_data_file_path(\"molecules/butane_multi.sdf\"),\n \"sdf\",\n toolkit_registry=tkw,\n )\n # now we want to combine the conformers to one molecule\n butane = molecules[0]\n for mol in molecules[1:]:\n butane.add_conformer(mol._conformers[0])\n\n # make sure we have the 7 conformers\n assert butane.n_conformers == 7\n with NamedTemporaryFile(suffix=\".xyz\") as iofile:\n # try and write out the xyz file\n butane.to_file(iofile.name, \"xyz\", toolkit_registry=tkw)\n\n # now lets check whats in the file\n with open(iofile.name) as xyz_data:\n data = xyz_data.readlines()\n # make sure we have the correct amount of lines writen\n assert len(data) == 112\n # make sure all headers and frame data was writen\n assert data.count(\"14\\n\") == 7\n for i in range(1, 8):\n assert f\"C4H10 Frame {i}\\n\" in data\n\n # now make sure the first line of the coordinates are correct in every frame\n coords = [\n \"C 1.8902000000 0.0426000000 0.2431000000\\n\",\n \"C 1.8976000000 -0.0233000000 0.2846000000\\n\",\n \"C -1.8794000000 -0.1793000000 -0.2565000000\\n\",\n \"C -1.5206000000 -0.0165000000 0.2787000000\\n\",\n \"C -1.4890000000 -0.2619000000 0.4871000000\\n\",\n \"C -1.4941000000 -0.2249000000 -0.0958000000\\n\",\n \"C -1.8827000000 -0.0372000000 0.1937000000\\n\",\n ]\n for coord in coords:\n assert coord in data",
"def test_permute_W(self):\n\t\tN, M = 4096, 4096\n\t\titerator = self.watcher.make_layer_iterator(layers=[self.fc2_layer])\n\t\tfor ww_layer in iterator:\n\t\t\tself.assertEqual(ww_layer.layer_id,self.fc2_layer)\n\t\t\tW = ww_layer.Wmats[0]\n\t\t\tself.assertEqual(W.shape,(N,M))\n\t\t\t\n\t\t\tself.watcher.apply_permute_W(ww_layer)\n\t\t\tW2 = ww_layer.Wmats[0]\n\t\t\tself.assertNotEqual(W[0,0],W2[0,0])\n\t\t\t\n\t\t\tself.watcher.apply_unpermute_W(ww_layer)\n\t\t\tW2 = ww_layer.Wmats[0]\n\t\t\tself.assertEqual(W2.shape,(N,M))\n\t\t\tself.assertEqual(W[0,0],W2[0,0])",
"def _adjustFramesAsAxes(self, frames, idx, framesAsAxes):\n axisRange = {}\n slen = len(frames)\n check = 1\n for stride, axis in sorted([[v, k] for k, v in framesAsAxes.items()], reverse=True):\n axisRange[axis] = slen // stride\n slen = stride\n check *= axisRange[axis]\n if check != len(frames) and not hasattr(self, '_warnedAdjustFramesAsAxes'):\n self.logger.warning('framesAsAxes strides do not use all frames.')\n self._warnedAdjustFramesAsAxes = True\n frame = frames[idx].copy()\n for axis in self._axesList:\n frame.pop('Index' + axis.upper(), None)\n for axis, stride in framesAsAxes.items():\n frame['Index' + axis.upper()] = (idx // stride) % axisRange[axis]\n return frame",
"def test_to_multiframe_xyz(self):\n\n # load in an SDF of butane with multiple conformers in it\n molecules = Molecule.from_file(\n get_data_file_path(\"molecules/butane_multi.sdf\"), \"sdf\"\n )\n # now we want to combine the conformers to one molecule\n butane = molecules[0]\n for mol in molecules[1:]:\n butane.add_conformer(mol._conformers[0])\n\n # make sure we have the 7 conformers\n assert butane.n_conformers == 7\n with NamedTemporaryFile(suffix=\".xyz\") as iofile:\n # try and write out the xyz file\n butane.to_file(iofile.name, \"xyz\")\n\n # now lets check whats in the file\n with open(iofile.name) as xyz_data:\n data = xyz_data.readlines()\n # make sure we have the correct amount of lines writen\n assert len(data) == 112\n # make sure all headers and frame data was writen\n assert data.count(\"14\\n\") == 7\n for i in range(1, 8):\n assert f\"C4H10 Frame {i}\\n\" in data\n\n # now make sure the first line of the coordinates are correct in every frame\n coords = [\n \"C 1.8902000189 0.0425999984 0.2431000024\\n\",\n \"C 1.8976000547 -0.0232999995 0.2845999897\\n\",\n \"C -1.8794000149 -0.1792999953 -0.2565000057\\n\",\n \"C -1.5205999613 -0.0164999999 0.2786999941\\n\",\n \"C -1.4889999628 -0.2619000077 0.4871000051\\n\",\n \"C -1.4940999746 -0.2249000072 -0.0957999974\\n\",\n \"C -1.8826999664 -0.0372000001 0.1937000006\\n\",\n ]\n for coord in coords:\n assert coord in data",
"def test_perm(self):\n fun = get_problem('perm', dimension=2)\n self.assertAlmostEqual(fun(np.array([1.0, 0.5])), 0.0)",
"def set_RotationsInTiltSeries(self, TiltSeries_):\n kk = 0\n for Proj in TiltSeries_.Projections:\n Proj.rotInPlane = self.rotInPlane[kk]\n kk = kk + 1",
"def test_permutation(perm):\n n_src = len(perm)\n perm_tensor = torch.Tensor(perm)\n source_base = torch.ones(1, n_src, 10)\n sources = torch.arange(n_src).unsqueeze(-1) * source_base\n est_sources = perm_tensor.unsqueeze(-1) * source_base\n\n loss_func = PITLossWrapper(pairwise_mse)\n loss_value, reordered = loss_func(est_sources, sources, return_est=True)\n\n assert loss_value.item() == 0\n assert_allclose(sources, reordered)",
"def assertSetAllEqual(self, x, y):\n x = tf.expand_dims(x, axis=0)\n y = tf.expand_dims(y, axis=0)\n self.assertEqual(\n tf.size(tf.sets.difference(x, y)), 0, msg='Input sets are not equal.')\n self.assertEqual(\n tf.size(tf.sets.difference(y, x)), 0, msg='Input sets are not equal.')",
"def permute(p, dims, perm):\n if issparse(p):\n return _permute_sparse(p, dims, perm)\n return _permute_dense(p, dims, perm)",
"def test_permute_2d(self):\r\n a = reshape(arange(9), (3, 3))\r\n self.assertEqual(permute_2d(a, [0, 1, 2]), a)\r\n self.assertEqual(permute_2d(a, [2, 1, 0]),\r\n array([[8, 7, 6], [5, 4, 3], [2, 1, 0]]))\r\n self.assertEqual(permute_2d(a, [1, 2, 0]),\r\n array([[4, 5, 3], [7, 8, 6], [1, 2, 0]]))",
"def permutation(self, **kwargs):\n self.locator.recognise_grid()\n red = self.locator.detect_colour(0, 'red')\n rospy.loginfo(\"permutation(): looking for red object: %s\" % str(red))\n blue = self.locator.detect_colour(0, 'blue')\n rospy.loginfo(\"permutation(): looking for blue object: %s\" % str(blue))\n if red[0] < blue[0]:\n sequence = [('red','M'),('blue','A'),('red','D')]\n else:\n sequence = [('red','M'),('blue','D'),('red','A')]\n\n colours = self.locator.tetris_blocks.keys() \n self.target_locations['D']\n self.target_locations['A']\n self.target_locations['M']\n answer = 'n'\n action_number = 0\n while action_number < len(sequence):\n\n (colour,pos) = sequence[action_number]\n \n rospy.loginfo('permutation(): %s to position %s' % (colour,pos))\n\n self.locator.update_pose()\n goal_pose = self.locator.pose[:]\n goal_pose[0:2] = self.target_locations[pos][0:2]\n success = self.locator.locate(colour, goal_pose)\n if not success:\n answer = raw_input('Failed to execute action. Try again? (y/n): ')\n if answer in ('y'):\n action_number -= 1\n continue\n \n action_number += 1",
"def set_fk_pose(*args):\n # Number of robot axes; could include external axes potentially.\n num_axes = 6\n\n try:\n for i in range(num_axes):\n set_axis(i + 1)\n except:\n pm.warning('Error setting FK pose')",
"def _share_setup(self):\n # Panel axes sharing, between main subplot and its panels\n def shared(paxs):\n return [\n pax for pax in paxs\n if not pax._panel_filled and pax._panel_share\n ]\n\n if not self._panel_side: # this is a main axes\n # Top and bottom\n bottom = self\n paxs = shared(self._bpanels)\n if paxs:\n bottom = paxs[-1]\n for iax in (self, *paxs[:-1]):\n # parent is *bottom-most* panel\n iax._sharex_setup(bottom, 3)\n paxs = shared(self._tpanels)\n for iax in paxs:\n iax._sharex_setup(bottom, 3)\n # Left and right\n left = self\n paxs = shared(self._lpanels)\n if paxs:\n left = paxs[0]\n for iax in (*paxs[1:], self):\n iax._sharey_setup(left, 3) # parent is *bottom-most* panel\n paxs = shared(self._rpanels)\n for iax in paxs:\n iax._sharey_setup(left, 3)\n\n # Main axes, sometimes overrides panel axes sharing\n # TODO: This can get very repetitive, but probably minimal impact?\n # Share x axes\n parent, *children = self._get_extent_axes('x')\n for child in children:\n child._sharex_setup(parent)\n # Share y axes\n parent, *children = self._get_extent_axes('y')\n for child in children:\n child._sharey_setup(parent)",
"def set(self, **kwargs):\n for ax in self.axes.flat:\n ax.set(**kwargs)\n return self",
"def compare_frames(frame1, frame2):\n for attr in (\"naxes\", \"axes_type\", \"axes_order\", \"unit\", \"axes_names\"):\n assert getattr(frame1, attr) == getattr(frame2, attr)",
"def permute_sentence(sentence, permutation_set):\n pass",
"def permute_qubits(self, axes, inverse=False):\n\n self._t = self._permuted_tensor(axes, inverse=inverse)\n return self",
"def test_permute_W_with_model(self):\n\t\tN, M = 4096, 4096\n\t\titerator = self.watcher.make_layer_iterator(model=self.model, layers=[self.fc2_layer])\n\t\tfor ww_layer in iterator:\n\t\t\tself.assertEqual(ww_layer.layer_id,self.fc2_layer)\n\t\t\tW = ww_layer.Wmats[0]\n\t\t\tself.assertEqual(W.shape,(N,M))\n\t\t\t\n\t\t\tself.watcher.apply_permute_W(ww_layer)\n\t\t\tW2 = ww_layer.Wmats[0]\n\t\t\tself.assertNotEqual(W[0,0],W2[0,0])\n\t\t\t\n\t\t\tself.watcher.apply_unpermute_W(ww_layer)\n\t\t\tW2 = ww_layer.Wmats[0]\n\t\t\tself.assertEqual(W2.shape,(N,M))\n\t\t\tself.assertEqual(W[0,0],W2[0,0])",
"def set_TranslationsInTiltSeries(self, TiltSeries_):\n for (kk, Proj) in enumerate(TiltSeries_._ProjectionList):\n Proj._alignmentTransX = self._alignmentTransX[kk]\n Proj._alignmentTransY = self._alignmentTransY[kk]",
"def _updateProjectedPts(self):\n for pointSetName in self.pointSets:\n pointSet = self.pointSets[pointSetName]\n proj_pts = self._evaluatePoints(\n pointSet.u,\n pointSet.v,\n pointSet.t,\n pointSet.uvlimits0,\n pointSet.tlimits0,\n pointSet.bodyID,\n pointSet.faceID,\n pointSet.edgeID,\n pointSet.nPts,\n )\n pointSet.proj_pts = proj_pts",
"def Mat_CorrectPerm(X0,X):\n\n Xout = dp(X)\n\n nX = np.shape(X)\n\n for rx in range(nX[2]):\n for ry in range(nX[3]):\n Xt = X[:,:,rx,ry]\n xx,p=CorrectPerm(X0,Xt)\n Xout[:,:,rx,ry]=xx\n\n return Xout",
"def _frameDictToFrames(self, frameDict):\n frames = []\n if not frameDict['axesAllowed']:\n frameCount = max(frameDict['byFrame']) + 1\n for frameIdx in range(frameCount):\n frame = {'sources': frameDict['byFrame'].get(frameIdx, [])}\n frames.append(frame)\n else:\n axesCount = [max(aKey[idx] for aKey in frameDict['byAxes']) + 1\n for idx in range(len(self._axesList))]\n for aKey in itertools.product(*[range(count) for count in axesCount][::-1]):\n aKey = tuple(aKey[::-1])\n frame = {\n 'sources': frameDict['byAxes'].get(aKey, []),\n }\n for idx, axis in enumerate(self._axesList):\n if axesCount[idx] > 1:\n frame[f'Index{axis.upper()}'] = aKey[idx]\n frames.append(frame)\n return frames"
]
| [
"0.76181346",
"0.49969777",
"0.49213955",
"0.4911864",
"0.48749465",
"0.48103952",
"0.4778155",
"0.47578478",
"0.4733813",
"0.47031477",
"0.4684239",
"0.46478426",
"0.4607625",
"0.4590625",
"0.4569246",
"0.4523995",
"0.4502329",
"0.44952422",
"0.44672385",
"0.4460318",
"0.44435185",
"0.4437263",
"0.4432789",
"0.44258267",
"0.44133762",
"0.44067916",
"0.44039533",
"0.44013292",
"0.43878335",
"0.4384773"
]
| 0.82028717 | 0 |
Test that permuting FrameSet axes with nIn != nOut Permuting the axes of the current frame of a frame set in situ (by calling `permAxes` on the frame set itself) should update the connected mappings. Make nIn != nOut in order to test DM9899 FrameSet.permAxes would fail if nIn != nOut | def test_FrameSetPermutationUnequal(self):
# Initial mapping: 3 inputs, 2 outputs: 1-1, 2-2, 3=z
# Test using arbitrary values for x,y,z
x = 75.1
y = -53.2
z = 0.123
frame1 = ast.Frame(3)
permMap = ast.PermMap([1, 2, -1], [1, 2], [z])
frame2 = ast.Frame(2)
frameSet = ast.FrameSet(frame1, permMap, frame2)
self.assertAlmostEqual(frameSet.applyForward([x, y, z]), [x, y])
self.assertAlmostEqual(frameSet.applyInverse([x, y]), [x, y, z])
# permuting the axes of the current frame also permutes the mapping
frameSet.permAxes([2, 1])
self.assertAlmostEqual(frameSet.applyForward([x, y, z]), [y, x])
self.assertAlmostEqual(frameSet.applyInverse([x, y]), [y, x, z])
# permuting again puts things back
frameSet.permAxes([2, 1])
self.assertAlmostEqual(frameSet.applyForward([x, y, z]), [x, y])
self.assertAlmostEqual(frameSet.applyInverse([x, y]), [x, y, z]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_FrameSetPermutationSkyFrame(self):\n # test with arbitrary values that will not be wrapped by SkyFrame\n x = 0.257\n y = 0.832\n frame1 = ast.Frame(2)\n unitMap = ast.UnitMap(2)\n frame2 = ast.SkyFrame()\n frameSet = ast.FrameSet(frame1, unitMap, frame2)\n self.assertAlmostEqual(frameSet.applyForward([x, y]), [x, y])\n self.assertAlmostEqual(frameSet.applyInverse([x, y]), [x, y])\n\n # permuting the axes of the current frame also permutes the mapping\n frameSet.permAxes([2, 1])\n self.assertAlmostEqual(frameSet.applyForward([x, y]), [y, x])\n self.assertAlmostEqual(frameSet.applyInverse([x, y]), [y, x])\n\n # permuting again puts things back\n frameSet.permAxes([2, 1])\n self.assertAlmostEqual(frameSet.applyForward([x, y]), [x, y])\n self.assertAlmostEqual(frameSet.applyInverse([x, y]), [x, y])",
"def test_random_permute_inverse_changes_group(self):\n # reproducible arbitrariness\n np.random.seed(232)\n\n nchan = 3\n nsteps = 20\n rho = 1.0/4\n target = np.random.randn(nchan, nsteps)\n\n controller = LinearController(self.G, target, tau=None)\n\n controller.set_random_permute_inverse(rho)\n self.assertIsNotNone(controller.permute_inverse)\n\n n_per_group = self.N/nchan\n groups0 = np.arange(self.N)/n_per_group\n groups1 = controller.permute_inverse/n_per_group\n\n # check that the right fraction of assignments are kept intact\n self.assertEqual(np.sum(groups0 != groups1), rho*self.N)",
"def permute(self):\n raise NotImplementedError()",
"def test_perm(self):\n fun = get_problem('perm', dimension=2)\n self.assertAlmostEqual(fun(np.array([1.0, 0.5])), 0.0)",
"def test_permutation(self):\r\n rng_R = random_state_type()\r\n post_r, out = permutation(rng_R, size=(9,), n=6)\r\n print 'OUT NDIM', out.ndim\r\n f = compile.function(\r\n [compile.In(rng_R,\r\n value=numpy.random.RandomState(utt.fetch_seed()),\r\n update=post_r, mutable=True)],\r\n [out], accept_inplace=True)\r\n\r\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\r\n # Check over two calls to see if the random state is correctly updated.\r\n # numpy_rng.permutation outputs one vector at a time,\r\n # so we call it iteratively to generate all the samples.\r\n val0 = f()\r\n val1 = f()\r\n numpy_val0 = numpy.asarray([numpy_rng.permutation(6)\r\n for i in range(9)])\r\n numpy_val1 = numpy.asarray([numpy_rng.permutation(6)\r\n for i in range(9)])\r\n print val0\r\n print numpy_val0\r\n print val1\r\n print numpy_val1\r\n self.assertTrue(numpy.all(val0 == numpy_val0))\r\n self.assertTrue(numpy.all(val1 == numpy_val1))",
"def test_random_permute_inverse_subdivide(self):\n # reproducible arbitrariness\n np.random.seed(121)\n\n nchan = 3\n nsteps = 20\n rho = 1.0/2\n subdiv = 2\n target = np.random.randn(nchan, nsteps)\n\n controller = LinearController(self.G, target, tau=None)\n\n controller.set_random_permute_inverse(rho, subdivide_by=subdiv)\n self.assertIsNotNone(controller.permute_inverse)\n\n n_per_group = self.N/nchan\n groups0 = np.arange(self.N)/n_per_group\n groups1 = controller.permute_inverse/n_per_group\n\n n_per_subgroup = self.N/(subdiv*nchan)\n subgroups0 = np.arange(self.N)/n_per_subgroup\n subgroups1 = controller.permute_inverse/n_per_subgroup\n\n # check that the right fraction of assignments are kept intact\n self.assertEqual(np.sum(subgroups0 != subgroups1), rho*self.N)\n \n # but that some of the mismatches end up *within the same group*\n # (though they come from different subgroups)\n self.assertNotEqual(np.sum(groups0 != groups1), rho*self.N)",
"def test_permutation_helper(self):\r\n # permutation_helper needs \"ndim_added=1\", because its output\r\n # is one dimension more than its \"shape\" argument (and there's\r\n # no way to determine that automatically).\r\n # Check the working case, over two calls to see if the random\r\n # state is correctly updated.\r\n rf = RandomFunction(permutation_helper, tensor.imatrix, 8,\r\n ndim_added=1)\r\n rng_R = random_state_type()\r\n post_r, out = rf(rng_R, (7,), 8)\r\n\r\n f = compile.function(\r\n [compile.In(rng_R,\r\n value=numpy.random.RandomState(utt.fetch_seed()),\r\n update=post_r, mutable=True)],\r\n [out], accept_inplace=True)\r\n\r\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\r\n val0 = f()\r\n val1 = f()\r\n # numpy_rng.permutation outputs one vector at a time,\r\n # so we call it iteratively to generate all the samples.\r\n numpy_val0 = numpy.asarray([numpy_rng.permutation(8)\r\n for i in range(7)])\r\n numpy_val1 = numpy.asarray([numpy_rng.permutation(8)\r\n for i in range(7)])\r\n print val0\r\n print numpy_val0\r\n print val1\r\n print numpy_val1\r\n self.assertTrue(numpy.all(val0 == numpy_val0))\r\n self.assertTrue(numpy.all(val1 == numpy_val1))\r\n\r\n # This call lacks \"ndim_added=1\", so ndim_added defaults to 0.\r\n # A ValueError should be raised.\r\n rf0 = RandomFunction(permutation_helper, tensor.imatrix, 8)\r\n post_r0, out0 = rf0(rng_R, (7,), 8)\r\n f0 = compile.function(\r\n [compile.In(rng_R,\r\n value=numpy.random.RandomState(utt.fetch_seed()),\r\n update=post_r0, mutable=True)],\r\n [out0], accept_inplace=True)\r\n self.assertRaises(ValueError, f0)\r\n\r\n # Here, ndim_added is 2 instead of 1. A ValueError should be raised.\r\n rf2 = RandomFunction(permutation_helper, tensor.imatrix, 8,\r\n ndim_added=2)\r\n post_r2, out2 = rf2(rng_R, (7,), 8)\r\n f2 = compile.function(\r\n [compile.In(rng_R,\r\n value=numpy.random.RandomState(utt.fetch_seed()),\r\n update=post_r2, mutable=True)],\r\n [out2], accept_inplace=True)\r\n self.assertRaises(ValueError, f2)",
"def permutation(self):\n perm = np.random.permutation(self.n_samples)\n self.data = self.data.iloc[perm]\n self.labels = self.labels.iloc[perm]\n self.labels_onehot = self.labels_onehot.iloc[perm]\n self.df_perm = self.df_perm.iloc[perm]",
"def permute(ts: Tensor, axes) -> Tensor:\n permute_op = PermuteOp(axes)\n return permute_op(ts, None)",
"def test_permute_2d(self):\r\n a = reshape(arange(9), (3, 3))\r\n self.assertEqual(permute_2d(a, [0, 1, 2]), a)\r\n self.assertEqual(permute_2d(a, [2, 1, 0]),\r\n array([[8, 7, 6], [5, 4, 3], [2, 1, 0]]))\r\n self.assertEqual(permute_2d(a, [1, 2, 0]),\r\n array([[4, 5, 3], [7, 8, 6], [1, 2, 0]]))",
"def test_permutation(perm):\n n_src = len(perm)\n perm_tensor = torch.Tensor(perm)\n source_base = torch.ones(1, n_src, 10)\n sources = torch.arange(n_src).unsqueeze(-1) * source_base\n est_sources = perm_tensor.unsqueeze(-1) * source_base\n\n loss_func = PITLossWrapper(pairwise_mse)\n loss_value, reordered = loss_func(est_sources, sources, return_est=True)\n\n assert loss_value.item() == 0\n assert_allclose(sources, reordered)",
"def Mat_CorrectPerm(X0,X):\n\n Xout = dp(X)\n\n nX = np.shape(X)\n\n for rx in range(nX[2]):\n for ry in range(nX[3]):\n Xt = X[:,:,rx,ry]\n xx,p=CorrectPerm(X0,Xt)\n Xout[:,:,rx,ry]=xx\n\n return Xout",
"def permutation_helper(random_state, n, shape):\r\n # n should be a 0-dimension array\r\n assert n.shape == ()\r\n # Note that it is important to convert `n` into an integer, because if it\r\n # is a long, the numpy permutation function will crash on Windows.\r\n n = int(n.item())\r\n\r\n if shape is None:\r\n # Draw only one permutation, equivalent to shape = ()\r\n shape = ()\r\n out_shape = list(shape)\r\n out_shape.append(n)\r\n out = numpy.empty(out_shape, int)\r\n for i in numpy.ndindex(*shape):\r\n out[i] = random_state.permutation(n)\r\n\r\n #print 'RETURNING', out.shape\r\n return out",
"def test_permutation(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n random = RandomStreams(utt.fetch_seed())\r\n fn = function([], random.permutation((20,), 10), updates=random.updates())\r\n\r\n fn_val0 = fn()\r\n fn_val1 = fn()\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n\r\n # rng.permutation outputs one vector at a time, so we iterate.\r\n numpy_val0 = numpy.asarray([rng.permutation(10) for i in range(20)])\r\n numpy_val1 = numpy.asarray([rng.permutation(10) for i in range(20)])\r\n\r\n assert numpy.all(fn_val0 == numpy_val0)\r\n assert numpy.all(fn_val1 == numpy_val1)",
"def test_permutation(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n m = Module()\r\n m.random = RandomStreams(utt.fetch_seed())\r\n m.fn = Method([], m.random.permutation((20,), 10))\r\n\r\n made = m.make()\r\n made.random.initialize()\r\n fn_val0 = made.fn()\r\n fn_val1 = made.fn()\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n\r\n # rng.permutation outputs one vector at a time, so we iterate.\r\n numpy_val0 = numpy.asarray([rng.permutation(10) for i in range(20)])\r\n numpy_val1 = numpy.asarray([rng.permutation(10) for i in range(20)])\r\n\r\n assert numpy.all(fn_val0 == numpy_val0)\r\n assert numpy.all(fn_val1 == numpy_val1)",
"def test_channel_axis_introduction(conv1d_no_channel_axis, output_size, channel_axis):\n conv_layer = Convolution((3, output_size), lambda x: 1)\n output = conv_layer(conv1d_no_channel_axis)\n t_axes = conv1d_no_channel_axis.axes + channel_axis\n assert output.axes.is_equal_set(t_axes), (\"Output axes are not input axes + channel axis:\"\n \"{} != {} + {}\").format(output.axes,\n conv1d_no_channel_axis.axes,\n channel_axis)",
"def test_bit_flip_mixer_output(self, graph, n, target_hamiltonian):\n\n mixer_hamiltonian = qaoa.bit_flip_mixer(graph, n)\n assert decompose_hamiltonian(mixer_hamiltonian) == decompose_hamiltonian(target_hamiltonian)",
"def test_random_permute_inverse_fraction(self):\n # reproducible arbitrariness\n np.random.seed(12325)\n\n nchan = 3\n nsteps = 20\n rho = 1.0/4\n target = np.random.randn(nchan, nsteps)\n\n controller = LinearController(self.G, target, tau=None)\n\n controller.set_random_permute_inverse(rho)\n self.assertIsNotNone(controller.permute_inverse)\n\n # check that the right fraction of assignments are kept intact\n self.assertEqual(np.sum(controller.permute_inverse == np.arange(self.N)),\n (1.0 - rho)*self.N)",
"def test_permute_W(self):\n\t\tN, M = 4096, 4096\n\t\titerator = self.watcher.make_layer_iterator(layers=[self.fc2_layer])\n\t\tfor ww_layer in iterator:\n\t\t\tself.assertEqual(ww_layer.layer_id,self.fc2_layer)\n\t\t\tW = ww_layer.Wmats[0]\n\t\t\tself.assertEqual(W.shape,(N,M))\n\t\t\t\n\t\t\tself.watcher.apply_permute_W(ww_layer)\n\t\t\tW2 = ww_layer.Wmats[0]\n\t\t\tself.assertNotEqual(W[0,0],W2[0,0])\n\t\t\t\n\t\t\tself.watcher.apply_unpermute_W(ww_layer)\n\t\t\tW2 = ww_layer.Wmats[0]\n\t\t\tself.assertEqual(W2.shape,(N,M))\n\t\t\tself.assertEqual(W[0,0],W2[0,0])",
"def permute(p, dims, perm):\n if issparse(p):\n return _permute_sparse(p, dims, perm)\n return _permute_dense(p, dims, perm)",
"def test_set_perms(self):\n group0 = self.test_save('TestGroup0')\n group1 = self.test_save('TestGroup1')\n perms1 = self.perms\n perms2 = set(['Perm1', 'Perm2'])\n perms3 = set(['Perm2', 'Perm3'])\n perms4 = []\n \n # grant single property\n set_group_perms(group0, perms1, object0)\n self.assertEqual(perms1, set(get_group_perms(group0, object0)))\n self.assertEqual([], get_group_perms(group0, object1))\n self.assertEqual([], get_group_perms(group1, object0))\n \n set_group_perms(group0, perms2, object0)\n self.assertEqual(perms2, set(get_group_perms(group0, object0)))\n self.assertEqual([], get_group_perms(group0, object1))\n self.assertEqual([], get_group_perms(group1, object0))\n \n set_group_perms(group0, perms3, object0)\n self.assertEqual(perms3, set(get_group_perms(group0, object0)))\n self.assertEqual([], get_group_perms(group0, object1))\n self.assertEqual([], get_group_perms(group1, object0))\n \n # remove perms\n set_group_perms(group0, perms4, object0)\n self.assertEqual(perms4, get_group_perms(group0, object0))\n self.assertFalse(group0.TestModel_gperms.filter(obj=object0).exists())\n self.assertEqual([], get_group_perms(group0, object1))\n self.assertEqual([], get_group_perms(group1, object0))\n \n set_group_perms(group0, perms2, object1)\n self.assertEqual(perms4, get_group_perms(group0, object0))\n self.assertEqual(perms2, set(get_group_perms(group0, object1)))\n self.assertEqual([], get_group_perms(group1, object0))\n \n set_group_perms(group1, perms1, object0)\n self.assertEqual(perms4, get_group_perms(group0, object0))\n self.assertEqual(perms2, set(get_group_perms(group0, object1)))\n self.assertEqual(perms1, set(get_group_perms(group1, object0)))",
"def assertSetAllEqual(self, x, y):\n x = tf.expand_dims(x, axis=0)\n y = tf.expand_dims(y, axis=0)\n self.assertEqual(\n tf.size(tf.sets.difference(x, y)), 0, msg='Input sets are not equal.')\n self.assertEqual(\n tf.size(tf.sets.difference(y, x)), 0, msg='Input sets are not equal.')",
"def manual_perm_test(model: 'Fitted sklearn estimator',\n X: 'Pandas df',\n y: 'Pandas series',\n true_score: float,\n n_permutations: int=10000,\n plot: bool=True,\n clf: bool=False) -> 'p-value, null_counts':\n\n scores = [] # Empty list for null distribution scores\n n_perms = range(1, n_permutations, 1) # Range of values to permute\n for n in tqdm(n_perms, desc='Permutation test'): # tqdm for progress bar\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, stratify=y, test_size=0.90, random_state=n\n )\n model.fit(X_train, y_train)\n y_test_perm = np.random.permutation(y_test) # Permuting class labels\n chance_scores = round(model.score(X=X_test, y=y_test_perm), 4)\n scores.append(chance_scores)\n\n # Converting to a pandas dataframe\n perm_scores_df = pd.DataFrame(data=scores, columns=['null_dist'])\n perm_scores_df['null_dist'] *= 100\n null_counts = (\n perm_scores_df # Counts greater than or equal to our test set score\n .loc[(perm_scores_df['null_dist']) >= true_score]\n .count()\n .iloc[0]\n )\n p_value = (null_counts + 1) / (n_permutations + 1)\n p_value = np.round(p_value, decimals=5)\n\n if plot is True: # Plotting a histogram of permutation scores\n plt.figure(figsize=(10, 10))\n sns.distplot(a=perm_scores_df['null_dist'],\n hist=True,\n label='Permutation scores')\n ylim = plt.ylim()\n if clf is False:\n # True classifier score and p-value\n plt.plot(2 * [true_score],\n ylim,\n '--g',\n linewidth=3,\n label='R2 score %s (pvalue : %s)' %\n (true_score, p_value))\n else:\n plt.plot(2 * [true_score],\n ylim,\n '--g',\n linewidth=3,\n label='Multimodal AUC score: %s (pvalue = %s)' %\n (true_score, p_value))\n n_classes = np.unique(y).size\n chance = 2 * [100. / n_classes]\n plt.plot(chance,\n ylim,\n '--k',\n linewidth=3,\n label='Null model mean AUC score: %s' % 50.00)\n \n plt.ylim(ylim)\n plt.legend(loc='lower center', bbox_to_anchor=(0.5, -0.38))\n plt.tight_layout()\n\n if clf is False:\n plt.xlabel(xlabel='R2 Scores')\n else:\n plt.xlabel(xlabel='AUC Scores')\n plt.title(label='Null Distribution')\n plt.savefig('quadratic_null_dist.png', dpi=300, bbox_inches='tight')\n plt.show()\n\n return p_value, null_counts",
"def test_random_permute_inverse_is_random(self):\n # reproducible arbitrariness\n np.random.seed(2325)\n\n nchan = 3\n nsteps = 20\n rho = 1.0/4\n target = np.random.randn(nchan, nsteps)\n\n controller = LinearController(self.G, target, tau=None)\n\n controller.set_random_permute_inverse(rho)\n self.assertIsNotNone(controller.permute_inverse)\n\n perm1 = np.copy(controller.permute_inverse)\n\n controller.set_random_permute_inverse(rho)\n perm2 = controller.permute_inverse\n\n self.assertNotEqual(np.sum(perm1 == perm2), self.N)",
"def permutation(random_state, size=None, n=1, ndim=None, dtype='int64'):\r\n ndim, size, bcast = _infer_ndim_bcast(ndim, size)\r\n #print \"NDIM\", ndim, size\r\n op = RandomFunction(permutation_helper,\r\n tensor.TensorType(dtype=dtype, broadcastable=bcast + (False,)),\r\n ndim_added=1)\r\n return op(random_state, size, n)",
"def permute(x, in_shape='BCD', out_shape='BCD', **kw):\n if (in_shape == out_shape) or (out_shape is None):\n return x\n if isinstance(out_shape, (list, tuple, torch.Size)):\n return x.permute(*out_shape)\n if isinstance(in_shape, str) and isinstance(out_shape, str) :\n assert set(in_shape) == set(out_shape) <= {'B', 'C', 'D'}, 'In and out shapes must have save set of chars among B, C, and D.'\n in_shape = in_shape.lower().replace('d', '...')\n out_shape = out_shape.lower().replace('d', '...')\n return torch.einsum(f'{in_shape}->{out_shape}', x)\n return x",
"def testSameNumberOfOutputAndInputChannels(self, use_bias):\n\n input_channels = random.randint(1, 32)\n inputs = tf.placeholder(tf.float32, shape=[1, 10, 10, input_channels])\n conv1 = snt.InPlaneConv2D(kernel_shape=3, use_bias=use_bias)\n\n # Before conv1 is connected, we cannot know how many `output_channels`\n # conv1 should have.\n err = \"Variables in in_plane_conv2d not instantiated yet\"\n with self.assertRaisesRegexp(snt.NotConnectedError, err):\n _ = conv1.output_channels\n\n # After connection, should match `input_channels`.\n conv1(inputs)\n self.assertEqual(conv1.output_channels, input_channels)",
"def permutation(data, dataLabel=None, nperm=10000, decimals=4):\n\n # test calling values\n if data is None or not isinstance(data, dict) or len(data.keys()) != 2:\n raise ValueError('RSTATS.permutation: data must be'\n + ' a dictionary with at exactly 2 keys'\n + '\\nUse KW (anova) for more than 2 groups')\n\n k = list(data.keys())\n\n g1 = data[k[0]]\n g2 = data[k[1]]\n # (w1, p1) = Stats.shapiro(g1, a=None, reta=False)\n # (w2, p2) = Stats.shapiro(g2, a=None, reta=False)\n\n combined = np.concatenate((g1, g2))\n diffobs = np.mean(g2)-np.mean(g1)\n diffs = np.zeros(nperm)\n nperm = nperm\n index = range(0, combined.shape[0])\n for i in range(nperm):\n # draw from combined data set without replacement\n #shuff = np.random.randint(combined.shape[0], size=combined.shape[0])\n shuff = np.random.permutation(index)\n ar = combined[shuff[0:len(g1)]]\n br = combined[shuff[len(g1):]]\n diffs[i] = np.mean(br) - np.mean(ar)\n pvalue = np.sum(np.abs(diffs) >= np.abs(diffobs)) / float(nperm)\n if dataLabel is not None:\n print ('\\n%s: Permutation Test (Nperm = %d)' % (dataLabel, nperm))\n # if p1 < 0.05 and p2 < 0.05:\n # print(u' Both data sets appear normally distributed: Shapiro-Wilk Group 1 p = {:6.3f}, Group2 p = {:6.3f}'.format(p1, p2))\n # else:\n # print(u' ****At least one Data set is NOT normally distributed****\\n Shapiro-Wilk Group 1 p = {:6.3f}, Group2 p = {:6.3f}'.format(p1, p2))\n # print (u' (Permutation test does not depend on distribution)')\n \n n = max([len(l) for l in k])\n print(u' {:s}={:8.{pc}f} \\u00B1{:.{pc}f}, {:d} (mean, SD, N)'.\n format(k[0].rjust(n), np.mean(g1), np.std(g1, ddof=1),\n len(g1), pc=decimals))\n print(u' {:s}={:8.{pc}f} \\u00B1{:.{pc}f}, {:d} (mean, SD, N)'.\n format(k[1].rjust(n), np.mean(g2), np.std(g2, ddof=1),\n len(g2), pc=decimals))\n summarizeData(data, decimals=decimals)\n # iqr1 = np.subtract(*np.percentile(g1, [75, 25]))\n # iqr2 = np.subtract(*np.percentile(g2, [75, 25]))\n # print(u' {:s}: median={:8.4f} IQR={:8.4f}'.format(k[0].rjust(n), np.median(g1), iqr1))\n # print(u' {:s}: median={:8.4f} IQR={:8.4f}'.format(k[1].rjust(n), np.median(g2), iqr2))\n print(u' Observed difference: {:8.4f}'.format(diffobs))\n print(u' p={:8.6f}, Nperm={:8d}\\n'.format(float(pvalue), int(nperm)))\n return(pvalue, nperm)",
"def test_permute_inverse(self):\n # reproducible arbitrariness\n np.random.seed(12321)\n\n nsteps = 20\n nchan = 3\n tmax = nsteps*self.dt\n sequence = np.random.randn(nsteps, self.N)\n\n permutation = np.arange(self.N)\n n1a = 3\n n1b = 5\n n2a = 13\n n2b = 4\n permutation[n1a], permutation[n1b] = (permutation[n1b], permutation[n1a])\n permutation[n2a], permutation[n2b] = (permutation[n2b], permutation[n2a])\n\n target = np.random.randn(nchan, nsteps)\n controller = LinearController(self.G, target, tau=None)\n controller.W = np.random.randn(*controller.W.shape)\n\n self.G.out_fct = lambda i: sequence[i]\n\n class SourceErrorGrabber(object):\n def __init__(self, target):\n self.target = target\n self.order = 10\n \n def prepare(self, tmax, dt):\n nsteps = int_r(tmax/dt)\n self.motor_error = np.zeros((nsteps, self.target.source.N))\n\n def evolve(self, t, dt):\n i = int_r(t/dt)\n self.motor_error[i, :] = self.target.get_source_error()\n\n ME1 = SourceErrorGrabber(controller)\n\n sim1 = simulation.Simulation(self.G, controller, ME1, dt=self.dt)\n sim1.run(tmax)\n\n controller.permute_inverse = permutation\n ME2 = SourceErrorGrabber(controller)\n\n sim2 = simulation.Simulation(self.G, controller, ME2, dt=self.dt)\n sim2.run(tmax)\n\n # test that the correct source error outputs have been swapped\n expected = np.copy(ME1.motor_error)\n expected[:, [n1a, n1b]] = expected[:, [n1b, n1a]]\n expected[:, [n2a, n2b]] = expected[:, [n2b, n2a]]\n\n self.assertAlmostEqual(np.mean(np.abs(expected - ME2.motor_error)), 0.0)",
"def lift_perm(p: Dict[int, int]) -> np.ndarray:\n n = len(p)\n pm = np.zeros((1 << n, 1 << n), dtype=complex)\n for i in range(1 << n):\n j = 0\n mask = 1 << n\n for q in range(n):\n mask >>= 1\n if (i & mask) != 0:\n j |= 1 << (n - 1 - p[q])\n pm[j][i] = 1\n return pm"
]
| [
"0.67414784",
"0.5697938",
"0.55126363",
"0.55059415",
"0.54638445",
"0.5372107",
"0.5241461",
"0.5231766",
"0.51561236",
"0.5102879",
"0.50862706",
"0.5055971",
"0.5043112",
"0.4973088",
"0.4970074",
"0.4965453",
"0.48989153",
"0.48948184",
"0.48939347",
"0.48901424",
"0.4887672",
"0.48789713",
"0.4875809",
"0.48730266",
"0.48573515",
"0.4834561",
"0.4833955",
"0.48175284",
"0.48152915",
"0.48083052"
]
| 0.78115654 | 0 |
Function predicts area value in an unknown location based on the areatoarea or areatopoint Poisson Kriging. | def predict(self, unknown_area_points, number_of_neighbours, max_search_radius):
prediction = self.k_func.predict(unknown_area_points,
number_of_neighbours,
max_search_radius)
return prediction | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def predictor(path):\n # get keypoints from the image in a DF\n TEST_keypoints = []\n path = cv2.cvtColor(path, cv2.COLOR_BGR2RGB)\n img = movenet_inference_flat_v10(hub_model, path)\n TEST_keypoints.append(img)\n TEST_keypoints_df = pd.DataFrame(TEST_keypoints)\n\n # Rename columns in the DataFrames according to the values\n columns = []\n for point in kp_descriptions:\n for value in ('y', 'x', 'score'):\n columns.append(f'{point}_{value}')\n\n TEST_keypoints_df.columns = columns\n \n # add additional positional features\n TEST_keypoints_df = add_pos_features(TEST_keypoints_df, drop_scores=True)\n # predict the asana\n prediction_existing = model_fl.predict(TEST_keypoints_df)\n # initialize the predicted_asana to 107 (no asan found)\n predicted_asana = 107\n\n # assign the precited asana if accuracy more than threshold (12.5%)\n for i in range(1):\n mx = 0\n mx_label = -1\n for j in range(107):\n if(prediction_existing[i, j] > mx):\n mx_label = j\n mx = prediction_existing[i, j]\n predicted_asana = mx_label\n predicted_accuracy = prediction_existing[0, mx_label]\n if(predicted_accuracy < 0.125):\n predicted_asana = 107\n\n # print(predicted_asana)\n \n # find label from the json\n a = inv_map[str(predicted_asana)]\n # b = \"null\"\n\n print(\"predicted pose --> \", a)\n print(\"confidence = \", predicted_accuracy)\n # print(\"actual pose -->\", b)\n return a, img",
"def predict_price(area) -> float:\n response = requests.get(TRAIN_DATA_URL)\n print(response)\n df=pd.read_csv(TRAIN_DATA_URL).T.reset_index()\n df.columns=['area','price']\n df=df.iloc[1:]\n df['area']=pd.to_numeric(df['area'],downcast='float')\n df['price']=pd.to_numeric(df['price'],downcast='float')\n X=df['area'].values\n Y=df['price'].values\n z=numpy.polyfit(X,Y,1)\n p=numpy.poly1d(z)\n predictions=p(area)\n return predictions",
"def predict_price(area) -> float:\n response = requests.get(TRAIN_DATA_URL)\n # YOUR IMPLEMENTATION HERE\n #print(response.content)\n d = pd.read_csv(TRAIN_DATA_URL, header = None)\n d_T = d.T\n #d_T = d_T[:].values()\n d_T.drop(d_T.index[1])\n #print(d_T)\n '''x_a = [row[0] for row in d]\n y_a = [row[1] for row in d]\n x_s = np.array(x_a[1:])\n y_s = np.array(y_a[1:])'''\n x_1 = d_T[0][1:]\n y_1 = d_T[1][1:]\n x_min = x_1.min()\n x_max = x_1.max()\n y_min = y_1.min()\n y_max = y_1.max()\n x = np.array((x_1-x_min)/(x_max-x_min))\n y = np.array((y_1-y_min)/(y_max-y_min))\n x_mean, y_mean = mean(x), mean(y)\n b1 = covariance(x, x_mean, y, y_mean/variance(x, x_mean))\n b0 = y_mean - b1*x_mean\n print(b0, b1)\n return np.array(b0+b1*area)",
"def predict_price(area) -> float:\n response = requests.get(TRAIN_DATA_URL)\n # YOUR IMPLEMENTATION HERE\n ...\n # print(response.content)\n x = str(response.content)\n # print(x)\n x = x.split('\\\\n')\n # print(len(x))\n # print(x[2])\n a = x[0].split(',')\n p = x[1].split(',')\n a = a[1:]\n p = p[1:]\n a = np.array([float(i) for i in a])\n p = np.array([float(i) for i in p])\n print(a.shape)\n clf = LR()\n clf.fit(a, p)\n\n ans = []\n for i in area:\n \tans.append(i)\n return ans\n # print(a)\n\n return 0",
"def get_area(area=None):\n # Set global vars that can be accessed outside of function for debuging\n global sorted_areas, chosen_area, mask\n # Create a dicionary where keys --> brain areas and values --> list of neurons\n sorted_areas = sort_by_area()\n # Choose area of interest\n chosen_area = sorted_areas[area]\n # Create a mask to keep only rows of interest\n mask = np.isin(spiketimes[:,0], chosen_area)\n # Fetch only desired spiketimes\n area_spikes = spiketimes[mask]\n # Find number of neurons\n N_neurons = len(chosen_area)\n \n return area_spikes",
"def prepare_prediction_data(self, unknown_areal_data_row, unknown_areal_data_centroid,\n weighted=False, verbose=False):\n\n areal_id = unknown_areal_data_centroid[0][-1]\n\n cx_cy = unknown_areal_data_centroid[0][:2]\n r = np.array([cx_cy])\n known_centroids = self.centroids_of_areal_data\n kc = known_centroids[:, :2]\n\n # Build set for Poisson Kriging\n\n if weighted:\n weighted_distances = self._calculate_weighted_distances(unknown_areal_data_row,\n areal_id)\n s = []\n for wd in weighted_distances:\n for k in known_centroids:\n if wd[1] in k:\n s.append(wd[0])\n break\n else:\n pass\n s = np.array(s).T\n\n kriging_data = np.c_[known_centroids, s] # [coo_x, coo_y, val, id, weighted_dist_to_unkn]\n else:\n distances_array = np.zeros(kc.shape)\n for i in range(0, r.shape[1]):\n distances_array[:, i] = (kc[:, i] - r[:, i]) ** 2\n s = distances_array.sum(axis=1)\n s = np.sqrt(s)\n s = s.T\n kriging_data = np.c_[known_centroids, s] # [coo_x, coo_y, val, id, dist_to_unkn]\n\n # remove nans\n kriging_data = kriging_data[~np.isnan(kriging_data).any(axis=1)]\n\n # sort by distance\n kriging_data = kriging_data[kriging_data[:, -1].argsort()]\n\n # set output by distance params\n\n # search radius\n\n max_search_pos = np.argmax(kriging_data[:, -1] > self.max_search_radius)\n output_data = kriging_data[:max_search_pos]\n\n # check number of observations\n\n if len(output_data) < self.min_no_of_observations:\n output_data = kriging_data[:self.min_no_of_observations]\n # TODO: info to the app logs\n # print('Dataset has been set based on the minimum number of observations')\n\n # set final dataset\n\n self.prepared_data = output_data\n if verbose:\n print('Predictions data prepared')",
"def mapk(y_pred, y, k=10):\n return np.mean([apk(a, p, k) for a, p in zip(y, y_pred)])",
"def predict(self):\n if((self.kalman_filter.x[6]+self.kalman_filter.x[2])<=0):\n self.kalman_filter.x[6] *= 0.0\n \n # Predecimos\n print(\"[KALMAN-INFO] Before prediction on tracker {}: {}\".format(self.id, self.get_state()))\n self.kalman_filter.predict()\n print(\"[KALMAN-INFO] After prediction: on tracker {}: {}\".format(self.id, self.get_state()))\n\n # Obtenemos el nuevo estado\n bbox = self.get_state().astype(int)\n \n # Calculate centroid of bounding box\n centroid_x = int((bbox[0] + bbox[2]) / 2)\n centroid_y = int((bbox[1] + bbox[3]) / 2)\n\n self.time_since_update += 1\n print(\"[KALMAN-INFO] New time since last update: {}\".format(self.time_since_update))\n self.last_centroid = np.array([centroid_x, centroid_y], dtype=int)",
"def pick_area(data ,total_process, interval ,list_of_vars, list_of_areas, init_time=0, pr_height=None, ):\n \n \n \n #trying if the longitude values change from 0 to 360 or -180 to 180?\n \n if data['lon'].values[0] < 0:\n \n p_d = {'europe' : [0, 48, 30, 65],\n 'northamerica' : [-142,-42,0,60],\n 'australia' : [80,180,-50,10],\n 'gulfofmexico' : [-100,-75,18,31],\n 'carribeans' : [-85,-60,12,38], \n 'indianocean' : [30, 130,-35,35],\n 'NH' : [-180, 180 ,0,90]}\n \n # -180 to 180 change the values given in the dictionary to relevant\n else:\n \n p_d = {'europe' : [0, 48, 30, 65],\n 'northamerica' : [218,318,-10,70],\n 'australia' : [80,180,-50,10],\n 'gulfofmexico' : [260,285,14,37],\n 'carribeans' : [275,300,12,38], \n 'indianocean' : [30, 130,-35,35],\n 'NH' : [0, 360 ,0,90]}\n \n \n \n places_dict = {}\n #looping in the list of areas\n say_pl = 1\n for pl in list_of_areas:\n variables_l = {}\n #looping in the list of variables\n say_var =1\n for var in list_of_vars:\n #check if data contains 'lev' coords.\n try:\n \n #wrap the data\n single = data[var].sel(lon=slice(p_d[pl][0],p_d[pl][1]), \n lat=slice(p_d[pl][2],p_d[pl][3]), \n lev=pr_height).isel(time=slice(init_time, total_process, interval))\n \n #if no 'lev' coords exist.\n except:\n single = data[var].sel(lon=slice(p_d[pl][0],p_d[pl][1]), \n lat=slice(p_d[pl][2],p_d[pl][3]),).isel(time=slice(init_time, total_process, interval))\n \n #append a single variable given by the user\n variables_l[var] = single\n \n \n #append all the variables with respect to their area of interest.\n places_dict[pl] = variables_l\n \n #return\n return places_dict",
"def predict(self, observation):\n\t\t# TODO - complete this\n\t\tp_max = 0\n\t\tpredict = None\n\t\tfor label in self.possible_labels:\n\t\t\tpossiblity = 1\n\t\t\tlabel_gaussian = self.gaussians.get(label)\n\t\t\tfor i in range(len(observation)):\n\t\t\t\t(mean, std) = label_gaussian[0][i]\n\t\t\t\tvalue = observation[i]\n\t\t\t\tpossiblity *= self.gaussians_calc(value, mean, std)\n\t\t\tif p_max < possiblity:\n\t\t\t\tp_max = possiblity\n\t\t\t\tpredict = label\n\n\t\treturn predict",
"def _handle_amcl_pose(self, data):\n cov = data.pose.covariance\n if cov != None and len(data.pose.covariance) == 36:\n try:\n cov = np.reshape(cov,(6,6))\n if cov.shape[0] == 6 and cov.shape[1] == 6:\n a, b, _ = self._calc_ellipse(cov)\n\n self.area_ellips = a * b * math.pi\n else:\n rospy.loginfo(\"shape wrong\")\n except:\n rospy.loginfo(\"covariance exception\")\n else:\n rospy.logerr(\"wrong length of array\")",
"def mapk(actual, predicted, k):\n return round(np.mean([apk(a,p,k) for a,p in zip(actual, predicted)]), 4) * 100",
"def create_area_entery(conn,var, case, model, type_avg,area, keys,var_entery, avg_over_lev=False, pressure_coords='',\n to_lev='', at_lev=''):\n var_info= fetch_var_case(conn, model, case, var)\n lev_is_dim = bool(var_info['lev_is_dim'].values)\n id_name = make_area_mean_id(area, type_avg, var, case, model, bool(lev_is_dim), pressure_coords=bool(pressure_coords),\n to_lev=to_lev, at_lev=at_lev, avg_over_lev=bool(avg_over_lev))\n dict = {'var':var, 'case_name':case,'model':model, 'type_avg':type_avg,'model_case': model+' '+case,\n 'case_var':case+' '+var,'area':area}\n if pressure_coords!='':#isinstance(pressure_coords, bool):\n dict['pressure_coords'] = boolstr2int(pressure_coords)\n if bool(lev_is_dim):\n if isinstance(avg_over_lev, bool) or isinstance(avg_over_lev, int):\n dict['avg_over_lev'] = int(avg_over_lev)\n if isinstance(to_lev, float) and avg_over_lev:\n dict['to_lev'] = to_lev\n if isinstance(at_lev, float) and not avg_over_lev:\n dict['at_lev'] = at_lev\n\n keys = list(keys)\n var_entery= list(var_entery)\n for key in dict.keys():\n if key not in keys:\n keys.append(key)\n var_entery.append(dict[key])\n keys.append('var_case_model_avgtype_pressure_coords_to_lev')\n var_entery.append(id_name)\n key_str = '('\n val_str = '('#%s, '%id_name\n for i in np.arange(len(keys)):\n key_str= key_str+ keys[i] +', '\n val_str = val_str + '?,'\n key_str=key_str[:-2]+')'\n val_str= val_str[:-1]+')'\n\n sql = ''' INSERT INTO Area_means %s \n VALUES%s'''%(key_str, val_str)\n #print(sql)\n try:\n cur = conn.cursor()\n cur.execute(sql, tuple(var_entery))\n return cur.lastrowid\n except Error as e:\n #print(e)\n #print('Tries updating')\n out = update_area_table_entery(conn, keys, var_entery, id_name)\n return out#update_area_entery(conn, model,case,var,keys, var_entery)\n except Error as e:\n print(e)\n return",
"def define_areas(\n pixel_filtered_map: np.ndarray, district_heating_zone_threshold: float\n):\n structure = np.ones((3, 3)).astype(int)\n expanded_map = binary_dilation(input=pixel_filtered_map, structure=structure)\n eroded_map = binary_erosion(input=expanded_map, structure=structure)\n labels_array, n_label = measurements.label(\n input=eroded_map,\n structure=structure,\n )\n\n # labels start from 1, therefore the array size is 'num_labels_array + 1'\n areas_potential = np.zeros((n_label + 1)).astype(float)\n if n_label > 0:\n end, start, sorted_array = get_browsing_indexes(\n labels_array=labels_array,\n pixel_filtered_map=pixel_filtered_map,\n n_label=n_label,\n )\n\n for i, (start_index, end_index) in enumerate(zip(start, end)):\n area = sorted_array[start_index:end_index, 3]\n area_potential = np.sum(area)\n if area_potential >= district_heating_zone_threshold:\n # i+1 because labeling starts from 1 and not from 0\n # factor 0.001 for conversion from MWh/ha to GWh/ha\n areas_potential[i + 1] = np.around(np.sum(area_potential) / 1000, 2)\n\n areas = areas_potential[labels_array]\n filtered_map = pixel_filtered_map * (areas > 0).astype(int)\n total_potential = np.sum(areas_potential)\n return areas, filtered_map, total_potential, areas_potential[1:]",
"def mapk(y_true, y_pred, k):\n \n # initialize empty list for apk values \n apk_values = []\n \n # loop over all samples\n for i in range(len(y_true)):\n # store apk values for every sample\n apk_values.append(\n apk(y_true[i], y_pred[i], k=k)\n )\n \n # return mean of apk values list\n return sum(apk_values) / len(apk_values)",
"def ellipse_area(preds, target):\n # unpack target = (target, mask)\n target, mask = target\n if preds.size(2) != 4:\n raise ValueError('Prediction must be 4-dimensional (x, y, r1, r2), '\n f'but got preds.shape[2] = {preds.size(2)}')\n areas = preds[:, :, 2] * preds[:, :, 3] * math.pi\n if mask is not None:\n return areas.masked_select(mask).mean().float()\n return areas.mean().float()",
"def predict(self, t_in, zone, action, time=-1, outside_temperature=None, interval=None):\n X = self._datapoint_to_dataframe(action, t_in) # TODO which t_in are we really assuming?\n return super(AverageMPCThermalModel, self).predict(X)",
"def sensor_model(particle_poses, beacon_pose, beacon_loc):\n \n \n\n M = particle_poses.shape[0]\n particle_weights = np.zeros(M)\n \n # TODO. For each particle calculate its weight based on its pose,\n # the relative beacon pose, and the beacon location.\n\n\n if sensor_model_on:\n #becon conditioning\n #camera_to_robot = (0.1, 0.1, 0)\n becon_pose_robot = transform_pose(camera_to_robot, beacon_pose )\n\n #print(beacon_pose)\n #print(becon_pose_robot)\n \n #liekelyhood functions\n becon_range = np.sqrt((becon_pose_robot[0])**2 + (becon_pose_robot[1])**2)\n becon_angle = becon_pose_robot[2]\n\n #print(becon_range)\n #print(becon_angle * 180 / np.pi)\n \n \n for m in range(M):\n\n if sensor_model_on:\n \n x_b = beacon_loc[0]\n y_b = beacon_loc[1]\n x_p = particle_poses[m][0] #particle position in map frame\n y_p = particle_poses[m][1]\n theta_p = particle_poses[m][2]\n \n \n range_p2b = np.sqrt((x_b - x_p)**2 + (y_b - y_p)**2) #range from particle to becon\n b_angle_map = arctan2((y_b - y_p), (x_b-x_p))\n\n \n angle_p2b = angle_difference(theta_p, b_angle_map)\n\n rangeerror = gauss(becon_range - range_p2b, 0, sigma_r)\n angleerror = gauss(becon_angle - angle_p2b, 0, sigma_theta)\n \n particle_weights[m] = rangeerror * angleerror\n\n #print(rangeerror, angleerror)\n\n else:\n particle_weights[m] = 1\n\n return particle_weights",
"def predict_proba(self):\n ...",
"def compute_area(self):\r\n\r\n \"\"\"Косое произведение векторов\r\n A = (x2-x1; y2-y1; z2-z1)\r\n B = (x3-x1; y3-y1; z3-z1)\r\n S = 0.5*sqrt((Ay*Bz - Az*By)^2 + (Az*Bx - Ax*Bz)^2 + (Ax*By - Ay*Bx)^2 )\r\n \"\"\"\r\n a_x = self.x2 - self.x1\r\n a_y = self.y2 - self.y1\r\n a_z = self.z2 - self.z1\r\n\r\n b_x = self.x3 - self.x1\r\n b_y = self.y3 - self.y1\r\n b_z = self.z3 - self.z1\r\n\r\n self.area = 0.5 * math.sqrt((a_y * b_z - a_z * b_y) ** 2 + (a_z * b_x - a_x * b_z) ** 2 + (a_x * b_y - a_y * b_x) ** 2)\r\n\r\n \"\"\"По теореме Герона\"\"\"\r\n # a = math.sqrt((self.x1-self.x2)**2 + (self.y1-self.y2)**2 + (self.z1-self.z2)**2)\r\n # b = math.sqrt((self.x1-self.x3)**2 + (self.y1-self.y3)**2 + (self.z1-self.z3)**2)\r\n # c = math.sqrt((self.x2-self.x3)**2 + (self.y2-self.y3)**2 + (self.z2-self.z3)**2)\r\n # p = 0.5 * (a + b + c)\r\n # self.area = math.sqrt(p * (p - a) * (p - b) * (p - c))\r",
"def get_prediction(self, datapoints, employee, location, device, next_risk):\n raise NotImplementedError",
"def predict(self, X):\n # ! You could simply call the hypothesis here\n predictions= self._hypothesis(X)\n #empty_predictions = np.zeros((1,X.shape[1]))\n return predictions",
"def get_interest_map(far):\n\n # --- horizontal locations on 5 meter high in world coordinate\n height = -3.5\n x = np.arange(-4, 12, 1)\n x = x.reshape((-1, 1))\n high_horizon = np.concatenate([x, np.ones_like(x) * height, np.ones_like(x) * far], 1)\n\n # --- {3, 7, 11} meters right and 2.5 meter high in world coordinate\n height = -1.\n x = np.arange(3, 12, 4)\n x = x.reshape((-1, 1))\n right_candidate = np.concatenate([x, np.ones_like(x) * height, np.ones_like(x) * far], 1)\n\n p_world = np.concatenate([high_horizon, right_candidate], 0)\n p_img = project_pts3_to_image(p_world, K)\n\n # --- if close, search for top region in image coordinate\n if far < 8:\n x = np.arange(600, 1280, 50)\n x = x.reshape((-1, 1))\n y = 5\n close = np.concatenate([x, np.ones_like(x) * y], 1)\n p_img = np.concatenate([p_img, close], 0)\n\n # --- consider only locations in image\n ll = np.array([0, 0]) # lower-left\n ur = np.array([img_width, img_height]) # upper-right\n inidx = np.all(np.logical_and(ll <= p_img, p_img <= ur), axis=1)\n inbox = p_img[inidx]\n inbox = inbox.astype(np.int)\n\n interest = np.zeros((img_height, img_width))\n interest[inbox[:, 1], inbox[:, 0]] = 1\n interest = scipy.ndimage.morphology.distance_transform_edt(interest-1)\n interest = np.exp(-interest / 30**2)\n interest = (interest - np.min(interest)) / (np.max(interest) - np.min(interest))\n return interest",
"def fit_peak_az_and_el(self,data):\n\n az = data['level1/spectrometer/pixel_pointing/pixel_az'][0,:]\n el = data['level1/spectrometer/pixel_pointing/pixel_el'][0,:]\n tod_model = self.model.func(self.avg_map_fits['Values'][:], (az,el))\n imax = np.argmax(tod_model)\n az_max = az[imax]\n el_max = el[imax]\n self.az_el_peak = {'AZ_PEAK': np.array([az_max]),\n 'EL_PEAK': np.array([el_max])}",
"def __init__(self, location, threshold_zenith_angle, area, total_exposure, kappa_c, label):\n\n self.label = label\n \n self.location = location\n \n self.threshold_zenith_angle = Angle(threshold_zenith_angle, 'rad')\n\n self._view_options = ['map', 'decplot']\n\n self.kappa_c = kappa_c\n \n self.num_points = 500\n\n self.params = [np.cos(self.location.lat.rad),\n np.sin(self.location.lat.rad),\n np.cos(self.threshold_zenith_angle.rad)]\n\n self.exposure()\n\n self.area = area\n\n self.alpha_T = total_exposure\n \n self.M, err = integrate.quad(m_integrand, 0, np.pi, args = self.params)\n \n self.params.append(self.alpha_T)\n self.params.append(self.M)",
"def predict(self, predPoints=None):",
"def area(self):\n raise Exception('area() is not implemented')",
"def area(self):\n raise Exception('area() is not implemented')",
"def area(self):\n raise NotImplementedError",
"def anomaly():\n\n #Load anomaly dataset\n anomaly_data = LoadDataset(\"dataset/kaggle_anomalies/\",0)\n anomaly_data, anomaly_label, val, val_label = anomaly_data.load_data()\n for i in range (len(anomaly_label)):\n anomaly_label[i] = anomaly_label[i] + 5\n\n #Concatinate test and anomaly\n test_anomaly_data = np.vstack((test_data,anomaly_data))\n test_anomaly_label = np.hstack((test_label, anomaly_label))\n\n \"\"\"# Get k-means cluster distance\n cluster_model = KMeansClustering()\n cluster_model.train(encoded_train,None)\n cluster_dist = cluster_model.transform(encoded_test_anomaly)\n\n correct = 0\n wrong = 0\n total = 0\n for i in range(len(cluster_dist)):\n min_distance = np.amin(cluster_dist[i])\n if(min_distance > 4):\n if(test_anomaly_label[i] > 4):\n correct = correct +1\n else:\n wrong = wrong +1\n\n print(\"Dist \",min_distance,\" True label \", test_anomaly_label[i])\n print(\"Found \",correct,\" anomalies and \",wrong,\" wrong\")\n\n decoded = auto.predict(test_anomaly_data)\n errors = []\n # loop over all original images and their corresponding\n # reconstructions\n for (image, recon) in zip(test_anomaly_data, decoded):\n \t# compute the mean squared error between the ground-truth image\n \t# and the reconstructed image, then add it to our list of errors\n \tmse = np.mean((image - recon) ** 2)\n \terrors.append(mse)\n # compute the q-th quantile of the errors which serves as our\n # threshold to identify anomalies -- any data point that our model\n # reconstructed with > threshold error will be marked as an outlier\n thresh = np.quantile(errors, 0.4)\n idxs = np.where(np.array(errors) >= thresh)[0]\n print(\"[INFO] mse threshold: {}\".format(thresh))\n print(\"[INFO] {} outliers found\".format(len(idxs)))\n correct = 0\n wrong = 0\n for i in idxs:\n if(test_anomaly_label[i] > 4):\n correct = correct +1\n else:\n wrong = wrong +1\n print(\"Found \",correct,\" anomalies and \",wrong,\" wrong\")\n\n ds = np.zeros(len(test_anomaly_data))\n for i in idxs:\n ds[i] = 1\n tsne(enc, test_anomaly_data,ds,\"anomaly_plot\",\"anomaly_plot\")\"\"\""
]
| [
"0.5723355",
"0.5665625",
"0.5470485",
"0.54139686",
"0.5410499",
"0.53469545",
"0.5344502",
"0.5193771",
"0.51919246",
"0.5158969",
"0.51501316",
"0.5096641",
"0.50602984",
"0.5057834",
"0.5034465",
"0.5033032",
"0.5012514",
"0.4920955",
"0.4918968",
"0.49158666",
"0.49049526",
"0.49048597",
"0.49017417",
"0.4899119",
"0.4896051",
"0.48624843",
"0.48166487",
"0.48166487",
"0.48149142",
"0.48103002"
]
| 0.6549179 | 0 |
Function regularizes whole dataset and creates new values and error maps based on the kriging type. If chosen type is area to area then function returns Geopandas GeoDataFrame with area id, areal geometry, estimated value, estimated prediction error, RMSE of prediction. If chosen type is area to point then function returns Geopandas GeoDataFrame with area id, point coordinates, estimated value, estimated prediction error, RMSE of areal prediction. Function do not predict unknown values, areas with NaN are skipped. | def regularize_data(self, number_of_neighbours, max_search_radius, data_crs="EPSG:4326"):
areas_ids = self.areal_data_known[:, 0]
list_of_vals = []
for a_id in areas_ids:
prediction_rows = self._get_prediction_row(a_id, number_of_neighbours, max_search_radius)
# Add id and geometry into a list
if self.ktype == 'ata':
geometry = self.areal_data_known[self.areal_data_known[:, 0] == a_id]
geometry = geometry[0][1]
data_row = [a_id, geometry, prediction_rows[0], prediction_rows[1], prediction_rows[2]]
list_of_vals.append(data_row)
else:
for val in prediction_rows:
xy = Point(val[0])
list_of_vals.append([a_id, xy, val[1], val[2], val[3]])
# Transform array into a dataframe
gdf = gpd.GeoDataFrame(list_of_vals)
gdf.columns = ['id', 'geometry', 'reg.est', 'reg.err', 'rmse']
gdf.crs = data_crs
return gdf | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_regression_map(df, only_coal=True):\n regression_map = pd.DataFrame(columns=[\"Fuel\", \"slope\", \"intercept\", \"min_val\", \"max_val\"])\n if only_coal:\n df = df[df.fuel_type == \"coal\"]\n for fuel in df.fuel_type.unique():\n temp_df = df[df[\"fuel_type\"] == fuel].copy()\n regression_hr = heat_rate_regression(temp_df, [\"heat_rate\"], \"delta_heatrate\")\n map_dict = {}\n map_dict[\"Fuel\"] = fuel\n map_dict[\"slope\"] = regression_hr.coef_[0]\n map_dict[\"intercept\"] = regression_hr.intercept_\n map_dict[\"min_val\"] = temp_df[\"heat_rate\"].min()\n map_dict[\"max_val\"] = temp_df[\"heat_rate\"].max()\n regression_map = regression_map.append(map_dict, ignore_index=True)\n return regression_map",
"def _map_raw_data_to_standard(self):\n data_df = self._raw_data.copy()\n\n # if only 1 RT col, split into 2\n if self._map_cols['goRT'] == self._map_cols['stopRT']:\n data_df[self._standards['columns']['goRT']] = np.where(\n data_df[self._map_cols['condition']] == self._map_codes['go'],\n data_df[self._map_cols['goRT']],\n None)\n data_df[self._standards['columns']['stopRT']] = np.where(\n data_df[self._map_cols['condition']] ==\n self._map_codes['stop'],\n data_df[self._map_cols['stopRT']],\n None)\n del data_df[self._map_cols['goRT']]\n else:\n data_df.loc[\n data_df[self._map_cols['condition']] !=\n self._map_codes['go'],\n self._map_cols['goRT']] = None\n data_df.loc[\n data_df[self._map_cols['condition']] !=\n self._map_codes['stop'],\n self._map_cols['stopRT']] = None\n\n # drop SSDs of non-stop Trials\n data_df.loc[\n data_df[self._map_cols['condition']] != self._map_codes['stop'],\n self._map_cols['SSD']] = None\n\n # add block column if not present\n if self._map_cols['block'] not in data_df.columns:\n data_df[self._map_cols['block']] = 1\n\n # recompute choice accuracy if missing / flagged\n if (self._map_cols['choice_accuracy'] not in self._raw_data.columns) |\\\n self._compute_acc_col:\n corr_code = self._map_codes['correct']\n incorr_code = self._map_codes['incorrect']\n data_df[self._map_cols['choice_accuracy']] = np.where(\n data_df[self._map_cols['response']] == data_df[\n self._map_cols['correct_response']],\n corr_code,\n incorr_code)\n\n # map columns, key codes to standard\n rename_column_dict = {self._map_cols[col]: self._standards['columns']\n [col] for col in self._map_cols.keys()}\n data_df = data_df.rename(columns=rename_column_dict)\n\n # map key codes to various columns\n condition_map = {\n self._map_codes['go']: self._standards['key_codes']['go'],\n self._map_codes['stop']: self._standards['key_codes']['stop'],\n }\n acc_map = {\n self._map_codes['correct']: self._standards['key_codes']['correct'],\n self._map_codes['incorrect']: self._standards['key_codes']['incorrect'],\n }\n no_response_map = {\n self._map_codes['noResponse']: self._standards['key_codes']['noResponse']\n }\n cols_n_maps = [(self._standards['columns']['condition'], condition_map),\n (self._standards['columns']['choice_accuracy'], acc_map),\n (self._standards['columns']['goRT'], no_response_map),\n (self._standards['columns']['stopRT'], no_response_map)]\n for col, map_dict in cols_n_maps:\n data_df[col] = data_df[col].map(lambda x: map_dict.get(x,x))\n\n assert self._is_preprocessed(data_df)\n self._transformed_data = data_df",
"def geo_data_analysis(search_term):\n map_pol = dict()\n\n #A list of tweet texts from each region\n NE_text = geo_collect_tweets(search_term,42.781158,-71.398729,'250mi')\n S_text = geo_collect_tweets(search_term,33.000000,-84.000000,'500mi')\n MW_text = geo_collect_tweets(search_term,40.000000,-100.000000,'1000mi')\n W_text = geo_collect_tweets(search_term,35.000000,-120.000000,'250mi')\n \n #A list of sentiment values for the tweets from each region \n NE_sentiment_values = sentiment(NE_text)\n S_sentiment_values = sentiment(S_text)\n MW_sentiment_values = sentiment(MW_text)\n W_sentiment_values = sentiment(W_text)\n\n #find the average sentiment value for each region\n NE_avg = sum(NE_sentiment_values)/len(NE_sentiment_values)\n S_avg = sum(S_sentiment_values)/len(S_sentiment_values)\n MW_avg = sum(MW_sentiment_values)/len(MW_sentiment_values)\n W_avg = sum(W_sentiment_values)/len(W_sentiment_values)\n\n return [W_avg,S_avg,NE_avg,MW_avg]",
"def rasterize_geometry(\n self,\n gdf: gpd.GeoDataFrame,\n method: Optional[str] = \"fraction\",\n mask_name: Optional[str] = None,\n name: Optional[str] = None,\n nodata: Optional[Union[int, float]] = -1,\n keep_geom_type: Optional[bool] = False,\n ) -> xr.DataArray:\n ds_like = self._obj.copy()\n # Create vector grid (for calculating fraction and storage per grid cell)\n logger.debug(\n \"Creating vector grid for calculating coverage fraction per grid cell\"\n )\n gdf[\"geometry\"] = gdf.geometry.buffer(0) # fix potential geometry errors\n gdf_grid_all = ds_like.raster.vector_grid()\n if mask_name is None:\n gdf_grid = gdf_grid_all\n else:\n msktn = ds_like[mask_name]\n idx_valid = np.where(msktn.values.flatten() != msktn.raster.nodata)[0]\n gdf_grid = gdf_grid_all.loc[idx_valid]\n\n # intersect the gdf data with the grid\n gdf = gdf.to_crs(gdf_grid.crs)\n gdf_intersect = gdf.overlay(\n gdf_grid, how=\"intersection\", keep_geom_type=keep_geom_type\n )\n\n # find the best UTM CRS for area computation\n if gdf_intersect.crs.is_geographic:\n crs_utm = gis_utils.parse_crs(\n \"utm\", gdf_intersect.to_crs(4326).total_bounds\n )\n else:\n crs_utm = gdf_intersect.crs\n\n # compute area using same crs for frac\n gdf_intersect = gdf_intersect.to_crs(crs_utm)\n gdf_intersect[\"area\"] = gdf_intersect.area\n # convert to point (easier for stats)\n gdf_intersect[\"geometry\"] = gdf_intersect.representative_point()\n\n # Rasterize area column with sum\n da_area = ds_like.raster.rasterize(\n gdf_intersect,\n col_name=\"area\",\n nodata=0,\n all_touched=True,\n merge_alg=MergeAlg.add,\n )\n\n if method == \"area\":\n da_out = da_area\n else: # fraction\n # Mask grid cells that actually do intersect with the geometry\n idx_area = np.where(da_area.values.flatten() != da_area.raster.nodata)[0]\n gdf_grid = gdf_grid_all.loc[idx_area]\n # Convert to frac using gdf grid in same crs\n # (area error when using ds_like.raster.area_grid)\n gdf_grid = gdf_grid.to_crs(crs_utm)\n gdf_grid[\"area\"] = gdf_grid.area\n da_gridarea = ds_like.raster.rasterize(\n gdf_grid, col_name=\"area\", nodata=0, all_touched=False\n )\n\n da_out = da_area / da_gridarea\n # As not all da_gridarea were computed, cover with zeros\n da_out = da_out.fillna(0)\n da_out.name = \"fraction\"\n\n da_out.raster.set_crs(ds_like.raster.crs)\n da_out.raster.set_nodata(nodata)\n # Rename da_area\n if name is not None:\n da_out.name = name\n\n return da_out",
"def read_all(return_type = 'np', scaling = 'None',\n remove_GrLivArea_outliers = True,\n normal_sales_only = True,\n feature_subset = 'all'):\n data = pd.read_csv(basepath + '/Ames_Housing/train.csv')\n # Postprocessing\n if remove_GrLivArea_outliers:\n # See remark in the top\n data = data[data['GrLivArea'] < 4000]\n if normal_sales_only:\n data = data[data['SaleCondition'] == 'Normal']\n if feature_subset == 'numerical':\n data = data[__idx_numerical_features__]\n elif feature_subset == 'intuitive':\n data = data[__idx_intuitive_features__]\n cols = data.columns.tolist()\n if scaling == 'MinMax':\n minmaxscaler = MinMaxScaler(feature_range=(-1, 1))\n data[cols[:-1]] = minmaxscaler.fit_transform(data[cols[:-1]])\n elif scaling == 'MeanVar':\n data[cols[:-1]] = scale(data[cols[:-1]])\n if return_type == 'np':\n return data.values\n else:\n raise RuntimeError(\"Choose return_type = 'np' to read data.\")",
"def dict_type_area(self, workspace_unique_id=None, subset_unique_id=None, type_area=None, request=None):\n \n workspace_object = self._get_workspace_object(unique_id=workspace_unique_id) \n subset_object = workspace_object.get_subset_object(subset_unique_id) \n if not subset_object:\n self._logger.warning('Could not find subset object {}. Subset is probably not loaded.'.format(subset_unique_id))\n return {\"label\": \"\",\n \"value\": \"\",\n \"type\": \"\",\n \"status\": \"\", \n \"active\": False, \n \"children\": []}\n \n if request: \n # Add active water bodies to include filter. \n # Set the data filter for water body here instead of adding one by one in dict_water_body \n active_water_bodies = self._get_active_values_in_list_with_dicts(request['children'])\n subset_object.set_data_filter(step='step_1', \n filter_type='include_list', \n filter_name='WATER_BODY', \n data=active_water_bodies, \n append_items=True)\n return request\n else:\n active = False \n data_filter_object = subset_object.get_data_filter_object('step_1')\n water_body_active_list = data_filter_object.get_include_list_filter('WATER_BODY')\n water_body_mapping = self.mapping_objects['water_body']\n \n type_area_active_list = water_body_mapping.get_list('type_area', water_body=water_body_active_list)\n \n if type_area in type_area_active_list:\n active = True\n \n return_dict = {\"label\": water_body_mapping.get_display_name(type_area=type_area),\n \"value\": type_area,\n \"type\": \"type_area\",\n \"status\": \"selectable\", \n \"active\": active, \n \"children\": []}\n \n children_list = []\n for water_body in water_body_mapping.get_list('water_body', type_area=type_area):\n children_list.append(self.dict_water_body(workspace_unique_id=workspace_unique_id, \n subset_unique_id=subset_unique_id, \n water_body=water_body, \n request=request)) \n # request not active here...\n return_dict['children'] = children_list \n \n return return_dict",
"def pick_area(data ,total_process, interval ,list_of_vars, list_of_areas, init_time=0, pr_height=None, ):\n \n \n \n #trying if the longitude values change from 0 to 360 or -180 to 180?\n \n if data['lon'].values[0] < 0:\n \n p_d = {'europe' : [0, 48, 30, 65],\n 'northamerica' : [-142,-42,0,60],\n 'australia' : [80,180,-50,10],\n 'gulfofmexico' : [-100,-75,18,31],\n 'carribeans' : [-85,-60,12,38], \n 'indianocean' : [30, 130,-35,35],\n 'NH' : [-180, 180 ,0,90]}\n \n # -180 to 180 change the values given in the dictionary to relevant\n else:\n \n p_d = {'europe' : [0, 48, 30, 65],\n 'northamerica' : [218,318,-10,70],\n 'australia' : [80,180,-50,10],\n 'gulfofmexico' : [260,285,14,37],\n 'carribeans' : [275,300,12,38], \n 'indianocean' : [30, 130,-35,35],\n 'NH' : [0, 360 ,0,90]}\n \n \n \n places_dict = {}\n #looping in the list of areas\n say_pl = 1\n for pl in list_of_areas:\n variables_l = {}\n #looping in the list of variables\n say_var =1\n for var in list_of_vars:\n #check if data contains 'lev' coords.\n try:\n \n #wrap the data\n single = data[var].sel(lon=slice(p_d[pl][0],p_d[pl][1]), \n lat=slice(p_d[pl][2],p_d[pl][3]), \n lev=pr_height).isel(time=slice(init_time, total_process, interval))\n \n #if no 'lev' coords exist.\n except:\n single = data[var].sel(lon=slice(p_d[pl][0],p_d[pl][1]), \n lat=slice(p_d[pl][2],p_d[pl][3]),).isel(time=slice(init_time, total_process, interval))\n \n #append a single variable given by the user\n variables_l[var] = single\n \n \n #append all the variables with respect to their area of interest.\n places_dict[pl] = variables_l\n \n #return\n return places_dict",
"def grid_data(self, method='mean'): \n \n # Add a few extra variables in that I print out each month as I can then\n # compare these with the IDL output to see if there are discrepancies in\n # the number of profiles being used:\n rej_profiles = 0.\n nWOD9 = 0.\n nGTSPP0 = 0.\n nGTSPP999 = 0.\n index = np.where(self.qc == False)\n print('No. of rejected temperature values', np.shape(index))\n \n # Make the salinity values missing where the salinity QC flag is bad:\n fv = self.z.fill_value\n \n # Having set fv to be the general fill_value, check that this is the \n # recognised fill value for the other variables that are assessed by it\n # and raise a ValueError if not:\n if not self.data.fill_value == fv:\n raise ValueError('Incompatible fill value between depth and data')\n \n if not self.ps.fill_value == fv:\n raise ValueError('Incompatible fill value between depth and psal.')\n \n # Now make the salinity values missing where the QC flag is bad:\n self.ps[np.where(self.psalqc == False)] = fv\n index = np.where(self.psalqc == False)\n print('No. of rejected salinity values', np.shape(index))\n\n # Restrict to profiles with good posqc:\n self.data = self.data[self.posqc] \n self.x = self.x[self.posqc]\n self.y = self.y[self.posqc]\n self.p = self.p[self.posqc]\n self.pn = self.pn[self.posqc]\n self.ir = self.ir[self.posqc]\n self.ps = self.ps[self.posqc]\n self.z = self.z[self.posqc]\n self.qc = self.qc[self.posqc]\n self.posqc = self.posqc[self.posqc]\n \n # Store the maximum depth (with a good quality flag) of each profile:\n zvar1 = np.ma.masked_where(self.qc == False, self.z)\n maxdepth = np.amax(zvar1, axis = 1)\n # Get only the profiles that aren't entirely bad:\n self.data = self.data[maxdepth.mask == False]\n self.x = self.x[maxdepth.mask == False]\n self.y = self.y[maxdepth.mask == False]\n self.p = self.p[maxdepth.mask == False]\n self.pn = self.pn[maxdepth.mask == False]\n self.ir = self.ir[maxdepth.mask == False]\n self.ps = self.ps[maxdepth.mask == False]\n self.z = self.z[maxdepth.mask == False]\n self.qc = self.qc[maxdepth.mask == False]\n self.posqc = self.posqc[maxdepth.mask == False] \n maxdepth = maxdepth[maxdepth.mask == False]\n \n # I could thin to the second element of zbounds as if profiles don't go\n # that deep then I won't be able to use them (this would be true even if\n # the first element of zbounds wasn't zero) - it also removes the need \n # for an OHC dep value in my configuration file:\n self.data = self.data[maxdepth >= self.zbounds[1]]\n self.x = self.x[maxdepth >= self.zbounds[1]]\n self.y = self.y[maxdepth >= self.zbounds[1]]\n self.p = self.p[maxdepth >= self.zbounds[1]]\n self.pn = self.pn[maxdepth >= self.zbounds[1]]\n self.ir = self.ir[maxdepth >= self.zbounds[1]]\n self.ps = self.ps[maxdepth >= self.zbounds[1]]\n self.z = self.z[maxdepth >= self.zbounds[1]]\n self.qc = self.qc[maxdepth >= self.zbounds[1]]\n self.posqc = self.posqc[maxdepth >= self.zbounds[1]]\n year = int(self.fname[-9:-5])\n \n # Filter out low quality XBTs:\n rem = []\n for p in range(len(self.p)):\n xbt = inst_type.is_xbt(self.pn[p], self.ir[p], self.ps[p], fv, \n self.z[p], fv)\n if xbt[0][0] >= 0:\n # Remove any XBTs sourced from WOD where the fall rate equation\n # is unknown:\n if xbt[3] == 9:\n rem.append(p)\n nWOD9 += 1\n # Remove any GTSPP XBTs where the type is unknown and year is \n # >= 1995. \n # Or if type is unknown and it may not be a T4/T6/T7/DB because \n # the depth it reaches is too deep. Some of these will have been\n # given the Hanawa correction and so be inaccurate - this \n # happens in EN processing if probe code is zero:\n projectName = ''.join(self.pn[p])\n if projectName[0:5] == 'GTSPP':\n if (xbt[4] == 0 or xbt[4] == 99 or xbt[4] == 999) and year >= 1995:\n rem.append(p)\n if xbt[4] == 0:\n nGTSPP0 += 1\n else:\n nGTSPP999 += 1\n if (xbt[4] == 0 and xbt[1] > 900):\n rem.append(p)\n \n # Get rid of the low quality XBTs:\n nolowxbt = np.array(np.setdiff1d(range(len(self.p)),rem))\n self.data = self.data[nolowxbt]\n self.x = self.x[nolowxbt]\n self.y = self.y[nolowxbt]\n self.p = self.p[nolowxbt]\n self.pn = self.pn[nolowxbt]\n self.ps = self.ps[nolowxbt]\n self.ir = self.ir[nolowxbt]\n self.z = self.z[nolowxbt]\n self.qc = self.qc[nolowxbt]\n self.posqc = self.posqc[nolowxbt]\n \n # Do the vertical averaging:\n self.p = np.array(range(len(self.p)))\n \n # Check if the maxgap really is fixed:\n print('Fixed maxgap? ', self.fixedgap == True)\n \n # Loop over these profiles and do vertical averages:\n # Storage vectors need to start off filled with fill values otherwise\n # you get zero values being included in the averages instead of being\n # discarded. Make these storage matrices, not vectors so you can store \n # avergae values for multiple depth levels in them.\n all_mT = np.zeros((len(self.p),np.shape(self.zbounds)[0]-1))\n all_mT.fill(fv)\n all_lT = np.zeros((len(self.p),np.shape(self.zbounds)[0]-1))\n all_lT.fill(fv)\n all_dep = np.zeros((len(self.p),np.shape(self.zbounds)[0]-1))\n all_dep.fill(fv)\n all_x = np.zeros((len(self.p),np.shape(self.zbounds)[0]-1))\n all_x.fill(fv)\n all_y = np.zeros((len(self.p),np.shape(self.zbounds)[0]-1))\n all_y.fill(fv)\n for p in range(len(self.p)):\n # 1. Select the profile of interest and make sure no tar_t1 values\n # are carried over from a previous profile:\n x_p = self.x[p]\n y_p = self.y[p]\n qc_p = np.where(np.logical_and(self.qc[p] == True, self.z[p].mask == False))\n data_p = self.data[p][qc_p]\n z_p = self.z[p][qc_p].data\n tar_t1 = fv\n # 1a. Sanity check to make sure there are no missing data going into\n # the averaging process:\n tempanddeppres = np.where(np.logical_and(data_p != fv, z_p != fv))[0]\n data_p = data_p[tempanddeppres]\n z_p = z_p[tempanddeppres]\n # 2. Make sure that depths are in correct order, but sorting takes time\n # therefore only sort if I've identified non-ascending depths:\n if np.any((z_p[1:] - z_p[:-1]) < 0):\n sortz = np.argsort(z_p)\n data_p = data_p[sortz]\n z_p = z_p[sortz]\n # To be in line with the IDL code also need to check for very wrong \n # depth values that might have slipped through:\n udep = np.where(np.logical_and(z_p > -99.9, z_p < 10000))\n z_p = z_p[udep]\n data_p = data_p[udep]\n # 3. Find the temperature at the exact depth level - this is now \n # more in depth as there are multiple depth levels to look at - AT \n # THE MOMENT THIS STILL ASSUMES THAT THE FIRST LEVEL IS 0 TO SOME\n # DEPTH - THIS IS STILL A SLIGHT SIMPLIFICATION:\n dval = 0\n for dep in self.zbounds[1:]:\n # Have added in this switch to allow you to specify whether\n # you're using a fixed gap or not:\n if self.fixedgap == True:\n maxgap = 200\n else:\n maxgap = max(0.3*(self.zbounds[dval+1]),100)\n # Get only the levels of the profile in the depth range of \n # interest:\n LTi1 = np.where(np.logical_and(z_p < dep, \n z_p >= self.zbounds[dval]))\n GEi1 = np.where(z_p >= dep)\n if (np.shape(LTi1)[1] != 0 and np.shape(GEi1)[1] != 0):\n # Get the depth differences between layers and the mean temps across\n # layers:\n nk = np.shape(LTi1)[1] + 1\n dz = np.zeros(nk)\n mt = np.zeros(nk)\n if dval == 0:\n dz[0] = z_p[0]\n mt[0] = data_p[0]\n for kk in range(1, nk):\n dz[kk] = z_p[kk] - z_p[kk-1]\n mt[kk] = 0.5 * (data_p[kk] + data_p[kk-1])\n else:\n # Effectively missing the first layer as dz will be zero\n # there if you've calculated a temperature at that depth\n # for tar_t1 on the previous loop, but it won't exist if\n # you haven't been able to calculate a tar_t1 value, so\n # then you'll have to do what you do when you're at the \n # first depth level and aren't garunteed a value at 0m.\n if tar_t1 != fv:\n dz[0] = z_p[LTi1[0][0]] - self.zbounds[dval]\n mt[0] = 0.5 * (data_p[LTi1[0][0]] + tar_t1)\n else:\n dz[0] = z_p[LTi1[0][0]] - self.zbounds[dval]\n mt[0] = data_p[LTi1[0][0]]\n for kk in range(0, nk-1):\n dz[kk+1] = z_p[LTi1[0][kk]+1] - z_p[LTi1[0][kk]]\n mt[kk+1] = 0.5 * (data_p[LTi1[0][kk]+1] + data_p[LTi1[0][kk]])\n \n # Work out the temp at the target depth:\n if z_p[GEi1[0][0]] == dep:\n #print('A sampled depth is equal to the desired level')\n tar_t1 = data_p[GEi1[0][0]]\n else:\n deltaT = data_p[GEi1[0][0]] - data_p[GEi1[0][0] -1]\n deltaZ = z_p[GEi1[0][0]] - z_p[GEi1[0][0] -1]\n tar_t1 = (dep - z_p[GEi1[0][0] -1])*(deltaT/deltaZ) + data_p[GEi1[0][0] -1]\n dz[nk -1] = dep - z_p[GEi1[0][0] -1]\n mt[nk -1] = 0.5*(data_p[GEi1[0][0] -1] + tar_t1)\n \n # Check if there are unacceptable gaps between layers:\n test_gap = np.where(dz > maxgap)\n if np.shape(test_gap)[1] != 0:\n mean_t1 = fv\n mean_t2 = fv\n tar_t1 = fv\n else:\n mean_t1 = sum(np.multiply(mt,dz))/(self.zbounds[dval+1] - self.zbounds[dval])\n \n # Make sure there are no crazy mean values:\n if (abs(mean_t1) > 100 and mean_t1 != fv):\n raise ValueError('Extreme values found')\n # Save the mean temperature at that depth and the temp at the target \n # depth, also save the depth and profile number:\n all_mT[p,dval] = mean_t1\n all_lT[p,dval] = tar_t1\n all_dep[p,dval] = dep # Lower bound of depth\n all_x[p,dval] = x_p\n all_y[p,dval] = y_p\n else:\n tar_t1 = fv # Make sure that if you have no data in a \n # specific depth range you don't carry an old tar_t1 value\n # over.\n \n dval +=1\n\n # Reshape\n self.data_1d = self.reshape_1d(self.data)\n self.x_1d = self.reshape_1d(self.x)\n self.y_1d = self.reshape_1d(self.y)\n self.p_1d = self.reshape_1d(self.p)\n self.z_1d = self.reshape_1d(self.z)\n self.qc_1d = self.reshape_1d(self.qc)\n self.posqc_1d = self.reshape_1d(self.posqc)\n \n # Apply QC - Technically the self.posqc_1d step shouldn't be needed as \n # these profiles will have got filtered out with the earlier QC, but \n # it's good to leave it in there as then if I remove the depth \n # restriction step, this step will still catch badly positioned profiles:\n #qcind = (self.qc_1d == True) & (self.posqc_1d == True)\n qcind = (self.qc_1d == True)\n self.qc_1d = self.qc_1d[qcind]\n self.posqc_1d = self.posqc_1d[qcind]\n self.data_1d = self.data_1d[qcind] # Still seems to have 99999 values in\n # it, and I can't see where they get filtered out, but they must get \n # filtered out somewhere or the mean values wouldn't be sensible.\n self.x_1d = self.x_1d[qcind]\n self.y_1d = self.y_1d[qcind]\n self.p_1d = self.p_1d[qcind]\n self.z_1d = self.z_1d[qcind]\n # Trying to work out why I use self.posqc here, not qcind, I think it's \n # because qcind will be one value for each level for each profile,\n # whereas posqc is one value for a whole profile - though I don't think\n # there should be any values left with a bad self.posqc - so this step \n # is probably redundant:\n #all_mTqc = all_mT[self.posqc]\n # Make sure I'm not including values where a mean_t1 value couldn't be \n # calculated:\n #all_mTqc1 = all_mTqc[np.where(all_mTqc != 99999.0)]\n # Need to remember that all_mT may well now be an array not a vector...\n all_mTqc1 = all_mT[all_mT != 99999.0]\n all_depqc = all_dep[all_mT != 99999.0]\n all_xqc = all_x[all_mT != 99999.0]\n all_yqc = all_y[all_mT != 99999.0]\n \n # Prepare data for gridding\n self.init_xgrid()\n self.init_ygrid()\n self.init_zgrid()\n \n # Getting the unique profile references:\n punique = np.unique(self.p_1d, return_index = True)[1]\n \n # From the unique profile references selecting only those profiles that \n # have a mean temperature over the depth of interest:\n #puniqueqc = punique[all_mT != 99999.0]\n \n # Getting references for all the points - so every temp value will have \n # a depth, lat and long i.e. you will still have multiple points per \n # profile:\n points = np.vstack([self.z_1d, self.y_1d, self.x_1d]).transpose()\n \n # Getting a single reference for each profile, this will just take the \n # first value in points, for each profile => points2 will have a \n # latitude, a longitude and the shallowest accepted depth of the profile\n # => This point will always (pretty much) be put into the top set of \n # grid boxes, would therefore need to consider this further if I wanted\n # to populate multiple depth level grid boxes at once:\n points2 = np.vstack([self.z_1d[punique], self.y_1d[punique], \n self.x_1d[punique]]).transpose()\n \n # points3 is like points 2, but gets coordinates for only profiles that\n # have a mean temperature down to the depth of interest:\n #points3 = np.vstack([np.ones(len(puniqueqc)), self.y_1d[puniqueqc], \n # self.x_1d[puniqueqc]]).transpose()\n points3 = np.vstack([all_depqc - 1, all_yqc, all_xqc]).transpose()\n \n # Pretty self explanatory - the boundaries of the grid boxes:\n bins = [self.zbounds, self.ybounds, self.xbounds]\n \n # Grid data:\n grid_count, binedges, binno = scipy.stats.binned_statistic_dd(\n points, self.data_1d, statistic='count', bins=bins)\n grid_sum, binedges, binno = scipy.stats.binned_statistic_dd(\n points, self.data_1d, statistic='sum', bins=bins)\n grid_meansum, binedges, binno = scipy.stats.binned_statistic_dd(\n points3, all_mTqc1, statistic = 'sum', bins = bins)\n grid_pcount, binedges, binno = scipy.stats.binned_statistic_dd(\n points3, all_mTqc1, statistic='count', bins=bins)\n# grid_pcount, binedges, binno = scipy.stats.binned_statistic_dd(\n# points3, self.data_1d[puniqueqc], statistic='count', bins=bins)\n# grid_max, binedges, binno = scipy.stats.binned_statistic_dd(\n# points, self.data_1d, statistic = 'max', bins=bins)\n# grid_min, binedges, binno = scipy.stats.binned_statistic_dd(\n# points, self.data_1d, statistic = 'min', bins=bins)\n# grid_med, binedges, binno = scipy.stats.binned_statistic_dd(\n# points, all_mTqc1, statistic = 'median', bins = bins)\n \n # Sum of valid temps/ number of valid obs:\n grid_tmean = grid_sum / grid_count\n grid_tmean = np.ma.MaskedArray(grid_tmean, mask = (grid_count == 0))\n # Sum of valid mean temps/ number of valid profiles:\n grid_meantmean = grid_meansum / grid_pcount\n grid_meantmean = np.ma.MaskedArray(grid_meantmean, mask = (grid_pcount == 0))\n self.grid_tmean = grid_tmean\n self.grid_count = grid_count\n self.grid_sum = grid_sum\n self.grid_meansum = grid_meansum\n self.grid_pcount = grid_pcount\n self.grid_meantmean = grid_meantmean",
"def predict_price(area) -> float:\n response = requests.get(TRAIN_DATA_URL)\n # YOUR IMPLEMENTATION HERE\n #print(response.content)\n d = pd.read_csv(TRAIN_DATA_URL, header = None)\n d_T = d.T\n #d_T = d_T[:].values()\n d_T.drop(d_T.index[1])\n #print(d_T)\n '''x_a = [row[0] for row in d]\n y_a = [row[1] for row in d]\n x_s = np.array(x_a[1:])\n y_s = np.array(y_a[1:])'''\n x_1 = d_T[0][1:]\n y_1 = d_T[1][1:]\n x_min = x_1.min()\n x_max = x_1.max()\n y_min = y_1.min()\n y_max = y_1.max()\n x = np.array((x_1-x_min)/(x_max-x_min))\n y = np.array((y_1-y_min)/(y_max-y_min))\n x_mean, y_mean = mean(x), mean(y)\n b1 = covariance(x, x_mean, y, y_mean/variance(x, x_mean))\n b0 = y_mean - b1*x_mean\n print(b0, b1)\n return np.array(b0+b1*area)",
"def get_geocen_df(quality = str(), year = str(), area_type = str()):\n try:\n url = f\"https://raw.githubusercontent.com/uscensusbureau/citysdk/master/v2/GeoJSON/{quality}/{year}/{area_type}.json\"\n df = gpd.read_file(url)\n return df\n except Exception as err:\n print(f'An error occured. All parameters must be in string format and must exist in the Census GeoJSON database.: {err}')",
"def calculate(self):\n self.results['max'] = numpy.max(self.data)\n self.results['min'] = numpy.min(self.data)\n if self.type == 0:\n self.group_discrete_data()\n if self.type == 1:\n self.group_continuous_data()\n\n self.results['arithAvg'] = self.average([self.data[i] * self.occurrences[i] for i in range(len(self.data))],\n self.totalOccurrences)\n self.results['quadAvg'] = math.sqrt(\n self.average([(self.data[i] * self.data[i]) * self.occurrences[i] for i in range(len(self.data))],\n self.totalOccurrences))\n if self.results['min'] > 0:\n self.results['geoAvg'] = math.exp(\n self.average([numpy.log(self.data[i]) * self.occurrences[i] for i in range(len(self.data))],\n self.totalOccurrences))\n self.results['harmAvg'] = 1 / self.average(\n [(self.occurrences[i] / self.data[i]) for i in range(len(self.data))],\n self.totalOccurrences)\n else:\n self.results['geoAvg'] = self.results['harmAvg'] = \"N/A\"\n self.results['momentsR'] = self.moments(self.data, self.occurrences, 4)\n self.results['centralMomentsR'] = self.moments([(i - self.results['arithAvg']) for i in self.data],\n self.occurrences, 4)\n self.results['std'] = self.average(\n [self.occurrences[i] * abs(self.data[i] - self.results['arithAvg']) for i in range(len(self.data))],\n self.totalOccurrences)",
"def make_area_plots(df, x_input = \"Mean Predicted Avg\",\n y_input = \"Empirical Probability\"):\n\n df = df.copy()\n\n # Get the regularizer and reset coeff\n coeff = [float(i.split(\"evidence_new_reg_\")[1]) if \"evidence\" in i else i for i in df['method_name']]\n df[\"method_name\"] = coeff\n df[\"Data\"] = convert_dataset_names(df[\"dataset\"])\n df[\"Method\"] = df[\"method_name\"]\n\n trials = 'trial_number'\n methods = 'Method'\n\n # Make area plot\n uniq_methods = set(df[\"Method\"].values)\n method_order = sorted(uniq_methods,\n key=lambda x : x if isinstance(x, float) else -1)\n method_df = []\n datasets = set()\n for data, sub_df in df.groupby(\"Data\"):\n # Add datasets\n datasets.add(data)\n x_vals = sub_df[x_input]\n y_vals = sub_df[y_input]\n methods_sub = sub_df[\"Method\"]\n trials_sub= sub_df['trial_number']\n for method_idx, method in enumerate(method_order):\n # Now summarize these lines\n bool_select = (methods_sub == method)\n lines_y = y_vals[bool_select]\n lines_x = x_vals[bool_select]\n trials_temp = trials_sub[bool_select]\n areas = []\n # create area!\n for trial, line_x, line_y in zip(trials_sub, lines_x, lines_y):\n new_y = np.abs(np.array(line_y) - np.array(line_x))\n area = simps(new_y, line_x)\n to_append = {\"Area from parity\": area,\n \"Regularizer Coeff, $\\lambda$\": method,\n \"method_name\": method,\n \"Data\": data,\n \"Trial\" : trial}\n method_df.append(to_append)\n method_df = pd.DataFrame(method_df)\n method_df_evidence = method_df[[isinstance(i, float) for i in\n method_df['method_name']]].reset_index()\n method_df_ensemble = method_df[[\"ensemble\" in str(i) for i in\n method_df['method_name']]].reset_index()\n data_colors = {\n dataset : sns.color_palette()[index]\n for index, dataset in enumerate(datasets)\n }\n\n min_x = np.min(method_df_evidence[\"Regularizer Coeff, $\\lambda$\"])\n max_x= np.max(method_df_evidence[\"Regularizer Coeff, $\\lambda$\"])\n\n sns.lineplot(x=\"Regularizer Coeff, $\\lambda$\", y=\"Area from parity\",\n hue=\"Data\", alpha=0.8, data=method_df_evidence,\n palette = data_colors)\n\n for data, subdf in method_df_ensemble.groupby(\"Data\"):\n\n color = data_colors[data]\n area = subdf[\"Area from parity\"].mean()\n std = subdf[\"Area from parity\"].std()\n plt.hlines(area, min_x, max_x, linestyle=\"--\", color=color, alpha=0.8)\n\n ensemble_line = plt.plot([], [], color='black', linestyle=\"--\",\n label=\"Ensemble\")\n # Now make ensemble plots\n plt.legend(bbox_to_anchor=(1.1, 1.05))",
"def sample_gp(\n params: Dict[str, float],\n data: pd.DataFrame,\n model: ModelType,\n t_val: np.ndarray,\n n: int = 1,\n):\n\n if n < 1:\n raise ValueError(\"'n' must be equal or greater 1.\")\n\n resid = predict_resid(params, data, model)\n pos_resid = resid[[\"x\", \"x_err\", \"y\", \"y_err\", \"technique\"]].dropna()\n\n gpx, gpy = create_gp(params)\n\n sample = pd.DataFrame(index=t_val)\n for technique, df in pos_resid.groupby(\"technique\"):\n if technique == \"interferometry\":\n # unaffected by source confusion\n pass\n else:\n assert technique == \"imaging\"\n gpx.compute(df.index, df[\"x_err\"])\n gpy.compute(df.index, df[\"y_err\"])\n if n == 1:\n sample[\"x\"] = gpx.sample_conditional(df[\"x\"], t_val)\n sample[\"y\"] = gpy.sample_conditional(df[\"y\"], t_val)\n else:\n # average over multiple realizations\n for i in range(n):\n sample[f\"x-{i}\"] = gpx.sample_conditional(df[\"x\"], t_val)\n sample[f\"y-{i}\"] = gpy.sample_conditional(df[\"y\"], t_val)\n sample[\"x\"] = sample.filter(regex=\"x-\").mean(axis=1)\n sample[\"y\"] = sample.filter(regex=\"y-\").mean(axis=1)\n\n return sample",
"def coarse_dataframe(geodf, side_square):\n\n # initialise the categories\n\n geodf['category'] = -1\n\n # do calculations on the first date, then extrapolate to the rest\n data_df = geodf[geodf['date'] == np.unique(geodf['date'])[0]]\n\n data_df = data_df.sort_values(by=['longitude', 'latitude'])\n\n n_grids = int(math.sqrt(data_df.shape[0]))\n\n category = 0\n\n for n in range(data_df.shape[0]):\n\n # only process lat,long point that do not have a category\n if data_df['category'].iloc[n] == -1:\n\n # get the side_square^2 nearest indexes to the point.\n indexes = []\n for i in range(side_square):\n for j in range(side_square):\n\n if n + n_grids * i + j < n_grids * n_grids and data_df['category'].iloc[n + n_grids * i + j] == -1:\n indexes.append(n + n_grids * i + j)\n\n # assing them all to the same categorty\n data_df['category'].iloc[indexes] = str(category)\n\n # get the geometry points of that catery\n cat_geometry = data_df[data_df['category'] == str(category)]['geometry']\n\n # get indexes of each point belonging to the category\n indexes_all = []\n for point in cat_geometry:\n indexes_all.append(geodf[geodf['geometry'] == point].index.tolist())\n\n indexes_all_flat = [item for sublist in indexes_all for item in sublist]\n\n geodf['category'].iloc[indexes_all_flat] = str(category)\n\n category = category + 1\n\n geodf['category'] = (geodf['category'].astype(str)).str.cat(geodf['date'], sep=\"_\")\n\n geodf = geodf.dissolve(by=['category', 'date'], aggfunc='mean')\n\n # re-assing the date because we are losing it\n geodf['date'] = [i[1] for i in geodf.index]\n\n geodf['category'] = [i[0] for i in geodf.index]\n\n return geodf",
"def process_dataframe(df):\n\n if isinstance(df, pd.DataFrame):\n df2 = df.copy()\n required_columns = {'name', 'wkt', 'lower_limit', 'upper_limit'}\n if not required_columns <= set(df2.columns):\n raise ValueError(\"DataFrame must contain columns 'name', 'wkt', 'lower_limit', 'upper_limit'.\")\n if not 'geometry' in list(df2.columns):\n logger.info(\"Converting WKT representation of geometry to geometry objects.\")\n df2['geometry'] = df2.wkt.apply(shapely.wkt.loads)\n gdf = geopandas.GeoDataFrame(df2, geometry=df2.geometry)\n elif isinstance(df, geopandas.GeoDataFrame):\n df2 = df.copy()\n required_columns = {'name', 'lower_limit', 'upper_limit'}\n if not required_columns <= set(df2.columns):\n raise ValueError(\"GeoDataFrame must contain columns 'name', 'lower_limit', 'upper_limit'.\")\n if not 'wkt' in list(df2.columns):\n logger.info(\"Converting geometry objects to their WKT representations.\")\n df2['wkt'] = df2.geometry.apply(lambda g: g.wkt)\n gdf = df2\n else:\n raise ValueError(\"df must be a DataFrame or GeoDataFrame!\")\n\n return gdf",
"def predict_price(area) -> float:\n response = requests.get(TRAIN_DATA_URL)\n print(response)\n df=pd.read_csv(TRAIN_DATA_URL).T.reset_index()\n df.columns=['area','price']\n df=df.iloc[1:]\n df['area']=pd.to_numeric(df['area'],downcast='float')\n df['price']=pd.to_numeric(df['price'],downcast='float')\n X=df['area'].values\n Y=df['price'].values\n z=numpy.polyfit(X,Y,1)\n p=numpy.poly1d(z)\n predictions=p(area)\n return predictions",
"def clean_data(df):\n \n # Put in code here to execute all main cleaning steps:\n # convert missing value codes into NaNs, ...\n count_miss = df.isnull().sum(axis=0).values #find number of nans for each column\n count_miss = [val for val in count_miss]\n \n drop_cols = []\n\n for ind, val in enumerate(count_miss):\n if val > 200000:\n drop_cols.append(ind)\n \n df_drop_cols = list(azdias.columns[drop_cols])\n df = df.drop(df_drop_cols, axis=1)\n \n for col in range(df.shape[1]): #loop through columns\n column_name = df.columns[col] #get column name\n missing_list = feat_info.iloc[col,3] #get missing_or_unknown column from feature info\n missing_list = missing_list.replace('[','') #remove left bracket from string\n missing_list = missing_list.replace(']','') #remove right bracket from string\n missing_list = missing_list.split(',') #split into individual strings\n \n #find data that is natually missing and continue loop to omit\n if missing_list == ['']:\n continue\n \n else:\n for dat_type in missing_list: \n if df[column_name].dtype == 'object': #find values that contain x\n df.loc[df[column_name] == dat_type, column_name] = np.nan #replace x with nan\n \n else:\n dat_type = int(dat_type) #if no x, convert to integer and replace with nan\n df.loc[df[column_name] == dat_type, column_name] = np.nan\n \n # select, re-encode, and engineer column values.\n \n # encode OST_WEST_KZ\n df.loc[df['OST_WEST_KZ'] == 'W','OST_WEST_KZ'] = 0\n df.loc[df['OST_WEST_KZ'] == 'O','OST_WEST_KZ'] = 1\n \n # Re-encode categorical variable(s) to be kept in the analysis.\n \n \n #get list of attributes with type categorical\n feat_info[feat_info['type'] == 'categorical']\n \n cat_new_cols = [] #initialize\n for i in feat_info[feat_info['type'] == 'categorical']['attribute']:\n cat_new_cols.append(i)\n \n for cols in df.columns:\n if cols in cat_new_cols:\n if df[cols].nunique(dropna=True) > 2: #if the number of unique values is greater than 2 \n df = df.drop(cols, axis=1) #drop from the analysis\n print(\"more than 2 categories: {}\".format(cols))\n \n else:\n if not df[cols].unique()[0] > 0:\n #if not df[cols].unique()[0] > 0:\n dummies = pd.get_dummies(df[cols], prefix=cols)\n df = df.drop(cols, axis=1) #create dummy variable\n df = df.join(dummies)\n print(\"transformed to dummy variable: {}\".format(cols))\n \n # create variable: MOVEMENT\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([1,3,5,8,10,12,14]),'MOVEMENT'] = 1\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([2,4,6,7,9,11,13,15]),'MOVEMENT'] = 2\n \n #Capture Decade\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([1,2]), 'DECADE'] = 40\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([3,4]), 'DECADE'] = 50\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([5,6,7]), 'DECADE'] = 60\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([8,9]), 'DECADE'] = 70\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([10,11,12,13]), 'DECADE'] = 80\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([14,15]), 'DECADE'] = 90\n \n df['CAMEO_INTL_2015'] = df['CAMEO_INTL_2015'].astype(float)\n\n # create new variable: WEALTH\n df.loc[df['CAMEO_INTL_2015'].isin([51,52,53,54,55]), 'WEALTH'] = 1\n df.loc[df['CAMEO_INTL_2015'].isin([41,42,43,44,45]), 'WEALTH'] = 2\n df.loc[df['CAMEO_INTL_2015'].isin([31,32,33,34,35]), 'WEALTH'] = 3\n df.loc[df['CAMEO_INTL_2015'].isin([21,22,23,24,25]), 'WEALTH'] = 4\n df.loc[df['CAMEO_INTL_2015'].isin([11,12,13,14,15]), 'WEALTH'] = 5\n \n # create new variable: LIFE_STAGE\n df.loc[df['CAMEO_INTL_2015'].isin([11,21,31,41,51]),'LIFE_STAGE'] = 1\n df.loc[df['CAMEO_INTL_2015'].isin([12,22,32,42,52]),'LIFE_STAGE'] = 2\n df.loc[df['CAMEO_INTL_2015'].isin([13,23,33,43,53]),'LIFE_STAGE'] = 3\n df.loc[df['CAMEO_INTL_2015'].isin([14,24,34,44,54]),'LIFE_STAGE'] = 4\n df.loc[df['CAMEO_INTL_2015'].isin([15,25,35,45,55]),'LIFE_STAGE'] = 5\n \n # remove selected columns and rows, ...\n df = df.drop('PRAEGENDE_JUGENDJAHRE', axis=1)\n df = df.drop('CAMEO_INTL_2015',axis=1)\n \n # Return the cleaned dataframe.\n return df",
"def output_chosen_design(\n pretest_data: pd.DataFrame,\n geo_level_eval_data: pd.DataFrame,\n response: str,\n spend_proxy: str,\n time_window_for_eval: TimeWindow,\n group_control: int = common_classes.GeoAssignment.CONTROL,\n group_treatment: int = common_classes.GeoAssignment.TREATMENT\n) -> np.ndarray:\n geo_treatment = geo_level_eval_data[geo_level_eval_data['assignment'] ==\n group_treatment]\n geo_control = geo_level_eval_data[geo_level_eval_data['assignment'] ==\n group_control]\n treatment_geo = geo_treatment['geo'].to_list()\n control_geo = geo_control['geo'].to_list()\n\n treatment_time_series = pretest_data[pretest_data['geo'].isin(\n treatment_geo)].groupby(\n 'date', as_index=False)[[response, spend_proxy]].sum()\n\n control_time_series = pretest_data[pretest_data['geo'].isin(\n control_geo)].groupby(\n 'date', as_index=False)[[response, spend_proxy]].sum()\n\n _, axes = plt.subplots(2, 2, figsize=(15, 7.5))\n\n sns.regplot(\n x=np.sqrt(geo_treatment['response']),\n y=np.sqrt(geo_control['response']),\n ax=axes[0, 0],\n fit_reg=False)\n axes[0, 0].set_title(response + ' (in square root)')\n axes[0, 0].set_xlabel('treatment')\n axes[0, 0].set_ylabel('control')\n lim = np.sqrt([\n min([min(geo_control['response']),\n min(geo_treatment['response'])]) * 0.97,\n max([max(geo_control['response']),\n max(geo_treatment['response'])]) * 1.03\n ])\n axes[0, 0].plot(lim, lim, linestyle='--', color='gray')\n axes[0, 0].set_xlim(lim)\n axes[0, 0].set_ylim(lim)\n\n sns.regplot(\n x=np.sqrt(geo_treatment['spend']),\n y=np.sqrt(geo_control['spend']),\n ax=axes[0, 1],\n fit_reg=False)\n axes[0, 1].set_title(spend_proxy + ' (in square root)')\n axes[0, 1].set_xlabel('treatment')\n axes[0, 1].set_ylabel('control')\n lim = np.sqrt([\n min([min(geo_control['spend']),\n min(geo_treatment['spend'])]) * 0.97,\n max([max(geo_control['spend']),\n max(geo_treatment['spend'])]) * 1.03\n ])\n axes[0, 1].plot(lim, lim, linestyle='--', color='gray')\n axes[0, 1].set_xlim(lim)\n axes[0, 1].set_ylim(lim)\n\n treatment_time_series.plot(\n x='date',\n y=response,\n color='black',\n label='treatment',\n ax=axes[1, 0])\n control_time_series.plot(\n x='date', y=response, color='red', label='control', ax=axes[1, 0])\n\n axes[1, 0].axvline(\n x=time_window_for_eval.first_day,\n color='blue',\n ls='-',\n label='evaluation window')\n axes[1, 0].axvline(x=time_window_for_eval.last_day, color='blue', ls='-')\n axes[1, 0].legend()\n axes[1, 0].set_ylabel(response)\n axes[1, 0].set_xlabel('date')\n\n treatment_time_series.plot(\n x='date',\n y=spend_proxy,\n color='black',\n label='treatment',\n ax=axes[1, 1])\n control_time_series.plot(\n x='date', y=spend_proxy, color='red', label='control', ax=axes[1, 1])\n axes[1, 1].axvline(\n x=time_window_for_eval.first_day,\n color='blue',\n ls='-',\n label='evaluation window')\n axes[1, 1].axvline(x=time_window_for_eval.last_day, color='blue', ls='-')\n axes[1, 1].legend()\n axes[1, 1].set_ylabel(spend_proxy)\n axes[1, 1].set_xlabel('date')\n\n return axes",
"def get_places() -> DataFrame:\n df = pd.read_csv('./data/geoplaces2.csv', encoding='utf-8')\n\n # drop useless columns\n df.drop(columns=['the_geom_meter', 'name', 'address',\n 'city', 'state', 'country', 'fax',\n 'zip', 'url', 'accessibility', 'franchise',\n 'other_services'],\n inplace=True)\n\n # select categorical column names\n categorical_columns = [column for column in df.columns\n if df[column].dtype.name == 'object'\n if column not in ['userID', 'smoker']]\n\n # replace categorical columns with one hot encoding\n for column_name in categorical_columns:\n dummies = pd.get_dummies(df[column_name])\n\n for dummy_column_name in dummies.columns:\n df[column_name + \"_\" + dummy_column_name] = dummies[dummy_column_name]\n\n df.drop(columns=[column_name], inplace=True)\n\n categorical_columns = [column for column in df.columns if df[column].dtype.name == 'object']\n\n for column in categorical_columns:\n df[column] = df[column].astype('category')\n\n df_cuisine = get_place_secondary_df('cuisine', 'Rcuisine')\n df_payment = get_place_secondary_df('accepts', 'Rpayment')\n df_hours = get_place_hours()\n\n payment_columns = list(filter(lambda x: x.startswith(\"Raccepts_\"), df_payment.columns))\n\n # some restaurants don't have specified payment ... but why\n # left join payment options and set cash option\n new_df = df.merge(df_payment, on='placeID', how='left')\n new_df[payment_columns] = new_df[payment_columns].fillna(0)\n new_df['Raccepts_cash'] = 1\n\n # left join cuisines and fill missing values with 0\n new_df = new_df.merge(df_cuisine, on='placeID', how='left')\n cuisine_columns = list(filter(lambda x: \"Rcuisine\" in x, new_df.columns))\n new_df[cuisine_columns] = new_df[cuisine_columns].fillna(0)\n\n new_df = new_df.merge(df_hours, on='placeID', how='inner')\n\n return new_df",
"def get_data(n):\n data = pd.read_csv('map_data/lior_results_2.csv')\n data = data.drop(['estimated_mass', 'estimated_pop'], axis=1)\n data = data[data.binomial != 'Sus scrofa'] # Wild Boar\n data = data[data.binomial != 'Ursus maritimus'] # Polar bear\n data = data[data.binomial != 'Sus bucculentus'] # EX\n data = data[data.binomial != 'Melomys rubicola'] # EX\n data = data.assign(total_mass=data.AdultBodyMassG * data.pop_density * data.Range,\n total_mass_density=data.AdultBodyMassG * data.pop_density)\n data = data.sort_values(by='total_mass_density', ascending=False)\n data = data.iloc[0:n - 1]\n geo_data = gpd.read_file('TERRESTRIAL_MAMMALS/TERRESTRIAL_MAMMALS.shp').to_crs(\"EPSG:6933\")\n geo_data = geo_data[geo_data.category != 'EX']\n range_polygons = geo_data.loc[(geo_data['legend'] == 'Extant & Introduced (resident)') |\n (geo_data['legend'] == 'Extant & Origin Uncertain (resident)') |\n (geo_data['legend'] == 'Extant & Reintroduced (resident)') |\n (geo_data['legend'] == 'Extant & Vagrant (seasonality uncertain)') |\n (geo_data['legend'] == 'Extant (non breeding)') |\n (geo_data['legend'] == 'Extant (resident)') |\n (geo_data['legend'] == 'Probably Extant & Origin Uncertain (resident)') |\n (geo_data['legend'] == 'Probably Extant (resident)') |\n (geo_data['legend'] == 'Reintroduced')]\n range_polygons = range_polygons.merge(data, on='binomial')\n range_polygons = range_polygons.to_crs(\"EPSG:6933\")\n return range_polygons",
"def setgeo(rundata):\n#-------------------\n\n try:\n geodata = rundata.geodata\n except:\n print \"*** Error, this rundata has no geodata attribute\"\n raise AttributeError(\"Missing geodata attribute\")\n\n # == setgeo.data values ==\n\n geodata.variable_dt_refinement_ratios = True\n\n geodata.igravity = 1\n geodata.gravity = 9.81\n geodata.icoordsys = 2\n geodata.Rearth = 6367.5e3\n geodata.icoriolis = 0\n\n # == settsunami.data values ==\n geodata.sealevel = 0.\n geodata.drytolerance = 1.e-3\n geodata.wavetolerance = 1.e-1\n geodata.depthdeep = 1.e2\n geodata.maxleveldeep = 4\n geodata.ifriction = 1\n geodata.coeffmanning =.025\n geodata.frictiondepth = 200.\n\n\n # == settopo.data values ==\n geodata.topofiles = []\n # for topography, append lines of the form\n # [topotype, minlevel, maxlevel, t1, t2, fname]\n geodata.topofiles.append([3, 1, 1, 0., 1e10, 'ebanda.asc'])\n \n\n # == setdtopo.data values ==\n geodata.dtopofiles = []\n # for moving topography, append lines of the form: (<= 1 allowed for now!)\n # [topotype, minlevel, maxlevel, fname]\n geodata.dtopofiles.append([3,1,3,'BandaArc1852.tt3'])\n\n geodata.iqinit = 0\n geodata.qinitfiles = []\n\n # == setgauges.data values ==\n geodata.gauges = []\n # for gauges append lines of the form [gaugeno,x,y,t1,t2]\n geodata.gauges.append([1, 109.000, -7.789, 0., 1e10]) #Cialciap\n geodata.gauges.append([2, 109.040, -7.722, 0., 1e10]) #Cialciap Bay\n geodata.gauges.append([3, 110.292, -8.027, 0., 1e10]) #Bantul\n geodata.gauges.append([4, 111.086, -8.233, 0., 1e10]) #Pacitan\n geodata.gauges.append([5, 111.558, -8.319, 0., 1e10]) #Pelang Beach\n geodata.gauges.append([6, 111.968, -8.286, 0., 1e10]) #Sine Beach\n geodata.gauges.append([7, 112.982, -8.326, 0., 1e10]) #Guying\n geodata.gauges.append([8, 113.176, -8.286, 0., 1e10]) #Muara\n geodata.gauges.append([9, 113.461, -8.383, 0., 1e10]) #Puger\n geodata.gauges.append([10, 113.336, -8.506, 0., 1e10]) #Barung Island\n geodata.gauges.append([11, 114.110, -8.621, 0., 1e10]) #Lampon\n geodata.gauges.append([12, 114.396, -8.231, 0., 1e10]) #Banyuwani\n geodata.gauges.append([13, 112.880, -7.278, 0., 1e10]) #Surabiya\n geodata.gauges.append([14, 114.965, -8.533, 0., 1e10]) #Tabanan\n geodata.gauges.append([15, 115.144, -8.697, 0., 1e10]) #Kuta\n geodata.gauges.append([16, 115.193, -8.848, 0., 1e10]) #Nusa Dua\n geodata.gauges.append([17, 116.064, -8.586, 0., 1e10]) #Mataram\n geodata.gauges.append([18, 115.260, -8.727, 0., 1e10]) #Sanur\n geodata.gauges.append([19, 116.031, -8.873, 0., 1e10]) #Sepi Bay\n geodata.gauges.append([20, 116.135, -8.872, 0., 1e10]) #Serangan Beach\n geodata.gauges.append([21, 116.283, -8.902, 0., 1e10]) #Kuta Lombok\n geodata.gauges.append([22, 116.400, -8.868, 0., 1e10]) #Awang Bay\n geodata.gauges.append([23, 116.466, -8.924, 0., 1e10]) #Surga Beach\n geodata.gauges.append([24, 116.744, -8.918, 0., 1e10]) #Maluk\n geodata.gauges.append([25, 116.833, -9.047, 0., 1e10]) #Tongo\n geodata.gauges.append([26, 117.199, -9.023, 0., 1e10]) #Linyuk\n geodata.gauges.append([27, 117.762, -8.939, 0., 1e10]) #Leppu\n geodata.gauges.append([28, 118.377, -8.785, 0., 1e10]) #Huu\n geodata.gauges.append([29, 118.172, -8.780, 0., 1e10]) #Rontu Beach\n geodata.gauges.append([30, 119.403, -8.729, 0., 1e10]) #Mantea Alley\n geodata.gauges.append([31, 119.374, -9.788, 0., 1e10]) #Nihiwatu\n geodata.gauges.append([32, 119.466, -9.742, 0., 1e10]) #Waigalli\n geodata.gauges.append([33, 119.945, -9.975, 0., 1e10]) #Tarimbang Beach\n geodata.gauges.append([34, 120.183, -10.233, 0., 1e10]) #Lalindi\n geodata.gauges.append([35, 120.264, -10.257, 0., 1e10]) #Manoekangga\n geodata.gauges.append([36, 120.546, -10.241, 0., 1e10]) #Baing\n geodata.gauges.append([37, 120.312, -9.661, 0., 1e10]) #Waingapu\n geodata.gauges.append([38, 119.871, -8.501, 0., 1e10]) #Labun Badjo\n geodata.gauges.append([39, 120.604, -8.822, 0., 1e10]) #Mborong\n geodata.gauges.append([40, 123.560, -10.166, 0., 1e10]) #Kupang\n geodata.gauges.append([41, 121.824, -10.491, 0., 1e10]) #Baa",
"def create():\n df = prepare_dataframe(io[\"harmonization_df_output_path\"], index_col=\"label\")\n assumption_map = create_assumption_map(columns, df)\n assumption_map.to_csv(io[\"harmonization_output_assumption_path\"], index=False)\n\n # Heat Rate regression Map, Valid only for the Coal\n regression_map = create_regression_map(df)\n\n res = other_regression(df[df[\"fuel_type\"] == \"coal\"], [\"heat_rate\"], \"delta_heatrate\")\n regression_map[\"intersect_err\"] = res.bse[0]\n regression_map[\"slope_err\"] = res.bse[1]\n print(regression_map)\n regression_map.to_csv(io[\"harmonization_output_regression_path\"], index=False)",
"def get_results(self, preprocess=True):\n logger.debug('Calculating GSEA')\n measurement_df = self._get_measurement_df(preprocess)\n\n annot_df = self.data_source.get_annotations()\n joined = pd.merge(left=measurement_df, right=annot_df, left_index=True, right_index=True)\n joined = joined.set_index('entity_id')\n unique_ids = [self.data_source._get_unique_id(x) for x in joined.index.values]\n joined.index = unique_ids\n joined = joined.drop_duplicates(keep='first').sort_index()\n\n # gene_sets is a dict. key is pw name, values are a list of entries in that pathway\n gene_sets = {}\n assert len(self.data_source.dataset_pathways) > 0, 'No pathways found in the dataset'\n pathways = list(self.data_source.dataset_pathways)\n for pw in pathways:\n pathway_row_ids = self.data_source.dataset_pathways_to_row_ids[pw]\n pw_unique_ids = []\n for row_id in pathway_row_ids:\n pw_unique_ids.extend(self.data_source.dataset_row_id_to_unique_ids[row_id])\n pw_unique_ids = list(set(pw_unique_ids))\n gene_sets[pw] = pw_unique_ids\n\n # run GSEA for all comparisons\n all_dfs = []\n for comp in self.data_source.comparisons:\n if not is_comparison_used(comp, self.case, self.control):\n continue\n case = comp['case']\n control = comp['control']\n logger.debug('Running comparison case=%s control=%s' % (case, control))\n pheno_cols = set(self.data_source.get_experimental_design()['groups'][case])\n df_cols = measurement_df.columns.values\n\n # for each comparison, we need to create C (phenotype labels)\n # Loop over df_cols and store an indicator into C.\n # Entries in C is 1 if that column belongs to the case group, otherwise it's a 0\n C = []\n for col in df_cols:\n if col in pheno_cols:\n C.append(1)\n else:\n C.append(0)\n C = np.array(C)\n\n # actually runs GSEA here\n data = joined\n cls = C.tolist()\n outdir = None\n min_size = 1\n max_size = 1000\n permutation_num = self.num_resamples\n weighted_score_type = 1\n permutation_type = 'phenotype'\n method = self.method\n ascending = True\n processes = 1\n figsize = (6.5, 6)\n format = 'pdf',\n graph_num = 20\n no_plot = True\n seed = None\n verbose = False\n\n msea = MSEA(data, gene_sets, cls, outdir, min_size, max_size, permutation_num,\n weighted_score_type, permutation_type, method, ascending, processes,\n figsize, format, graph_num, no_plot, seed, verbose)\n msea.run()\n\n # convert GSEA results to dataframe\n df = msea.res2d\n df = df.reset_index()\n selected = df[['Term', 'pval', 'fdr', 'es']]\n selected = selected.rename(columns={'Term': 'mapids'}).set_index('mapids')\n\n col_name = comp['name'] + ' p-value'\n es_colname = comp['name'] + ' ES_score'\n if self.data_source.database_name is not None:\n comb_col_name = '%s %s %s' % (self.data_source.database_name, comp['name'], 'comb_p')\n else:\n comb_col_name = '%s %s' % (comp['name'], 'comb_p')\n\n pathway_df = selected.rename(columns={\n 'pval': col_name,\n 'es': es_colname,\n 'fdr': comb_col_name\n })\n all_dfs.append(pathway_df)\n\n # combine all the results across all comparisons\n combined_df = pd.concat(all_dfs, axis=1, sort=False)\n combined_df.index.name = 'mapids'\n\n # create a dataframe of pathway mapids and names\n pw_name_df = []\n for map_id in pathways:\n pw_name = self.data_source.pathway_dict[map_id]['display_name']\n pw_name_df.append((map_id, pw_name))\n pw_name_df = pd.DataFrame(pw_name_df, columns=['mapids', 'pw_name']).set_index(['mapids'])\n combined_df = pw_name_df.merge(combined_df, left_index=True, right_index=True)\n\n # add formula coverage information\n mapids = combined_df.index.values.tolist()\n cov_df = self.data_source._calculate_coverage_df(mapids)\n coverage_df = cov_df.reindex(combined_df.index) # make sure dfs are in same order before merging\n\n # Merge the two dfs together\n pathway_df = pd.merge(combined_df, coverage_df, left_index=True, right_index=True, how='outer')\n\n # del pathway_df.index.name\n pathway_df.rename_axis(None, inplace=True)\n\n # post-processing to filter pathway dataframe by the minimum number of hits\n pathway_df = post_filter_df_by_min_hits(pathway_df, self.data_source.min_hits)\n return pathway_df",
"def prep_data(df, datatype):\n\n start = time.time()\n if datatype == \"training\":\n df = drop_cols(df, \"gross_bookings_usd\")\n df = add_category(df)\n df = get_train_data(df)\n\n df = stats(df)\n print(\"(1/6 - train only) add categories and downsample train data: \", np.round((time.time() - start)*1000 / 60, 2), \"min\")\n\n start = time.time()\n df = competitors(df)\n print(\"(2/6) competitors: \", np.round((time.time() - start) / 60, 2), \"min\")\n\n start = time.time()\n df = seasonality(df)\n print(\"(3/6) seasons: \", np.round((time.time() - start)*1000 / 60, 2), \"min\")\n\n start = time.time()\n df = missing_values(df)\n print(\"(4/6) missing values: \", np.round((time.time() - start)*1000 / 60, 2), \"min\")\n\n start = time.time()\n df = price_star_diff(df)\n print(\"(5/6) price and star difference: \", np.round((time.time() - start)*1000 / 60, 2), \"min\")\n\n # print(\"er zijn nog zoveel nans(2): \", df.isnull().sum().sum())\n # pd.set_option('display.max_rows', None)\n # print(df.isnull().sum())\n\n\n start = time.time()\n df = scale(df)\n print(\"(6/6) scaling: \", np.round((time.time() - start) / 60, 2), \"min\")\n\n start = time.time()\n df = categorical(df)\n print(\"(6/6) categorical transformation: \", np.round((time.time() - start) / 60, 2), \"min\")\n\n return df",
"def run(self, country: Countries, action: str = 'evaluate') -> Union[pd.DataFrame, Dict[\n Union[Literal[\"Singapore\"], Literal[\"China\"], Literal[\"India\"]], Dict[str, Union[Union[float, str], Any]]]]:\n assert country in Countries.__args__, \\\n f\"{country} is not supported, please choose between {Countries.__args__}\"\n X_train, y_train = self.split_sequence(self.train[country].values, self.n_steps_in, self.n_steps_out)\n X_valid, y_valid = self.split_sequence(self.valid_arb[country].values, self.n_steps_in, self.n_steps_out)\n\n X_train = X_train.reshape((X_train.shape[0], X_train.shape[1], self.n_features))\n X_valid = X_valid.reshape((X_valid.shape[0], X_valid.shape[1], self.n_features))\n\n model = self.make_model()\n model.fit(X_train, y_train, epochs=200, verbose=0,\n callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=20)],\n validation_data=(X_valid, y_valid))\n\n if action == 'predict':\n input = (self.df[country][-self.n_steps_in:].values).reshape((1, self.n_steps_in, self.n_features))\n pred = model.predict(input, verbose=0)\n\n return pd.DataFrame(pred, columns=['2008', '2009'], index=[country]).T\n\n else:\n pred_valid = model.predict(X_valid, verbose=0)\n pred_train = model.predict(X_train, verbose=0)\n\n return {country: {'rmse_train': sqrt(mean_squared_error([y_train[i][0] for i in range(0, len(y_train))],\n [pred_train[i][0] for i in\n range(0, len(pred_train))])),\n 'rmse_val': sqrt(mean_squared_error([y_valid[i][0] for i in range(0, len(y_valid))],\n [pred_valid[i][0] for i in\n range(0, len(pred_valid))])),\n 'mae_train': mean_absolute_error([y_train[i][0] for i in range(0, len(y_train))],\n [pred_train[i][0] for i in range(0, len(pred_train))]),\n 'mae_val': mean_absolute_error([y_valid[i][0] for i in range(0, len(y_valid))],\n [pred_valid[i][0] for i in range(0, len(pred_valid))]),\n 'mape_train': f'{self.mean_absolute_percentage_error([y_train[i][0] for i in range(0, len(y_train))], [pred_train[i][0] for i in range(0, len(pred_train))])} %',\n 'mape_val': f'{self.mean_absolute_percentage_error([y_valid[i][0] for i in range(0, len(y_valid))], [pred_valid[i][0] for i in range(0, len(pred_valid))])} %'}}",
"def clean_dataframe(self, df_in , what = ''): \n \n if what == 'era5fb': # cleaning the era5 feedback only \n df = df_in[np.isfinite(df_in['obsvalue@body'])]\n try: \n df = df.loc[ df['vertco_type@body'] != 2 ] \n except:\n pass\n df = df.reindex()\n df = df[np.isfinite(df['vertco_reference_1@body'])]\n #print('check lengths: ' , len(df_in) , len(df) )\n new_ind = np.array ( range(len(df))) \n df['index'] =new_ind\n df = df.set_index('index')\n \n else: \n ### check if can be optimized ???\n df = df_in.loc[ df_in['z_coordinate_type'] != 2 ] # case where the levels are given in terms of geopotential only (pressure not available)\n \n df = df.loc[ (df['observation_value'] != -99999.0) \n & (df['observation_value'] != -999.0) \n & (df['observation_value'] != -9999) \n & (df['observation_value'] != -9999.0) \n & (df['observation_value'] != -999.9) \n & (df['observation_value'] != -8888 )\n & (df['observation_value'] != -8888.0 )\n \n #& (df['z_coordinate_type'] != 2) \n & (df['z_coordinate'] != -99999.0) \n & (df['z_coordinate'] != -9999.0 )\n & (df['z_coordinate'] != 999 )\n & (df['z_coordinate'] != 999.0 )\n \n \n ] #cleaning the values \n #clean = clean.loc[ (clean['z_coordinate_type'] != 2)] #cleaning the values\n #clean = clean.loc[ (clean['z_coordinate'] != -99999.0 )] #cleaning the values\n \n df = df[np.isfinite(df['observation_value'])] # excluding nan values \n df = df[np.isfinite(df['z_coordinate'])]\n \n return df",
"def BuildArchetypeDict(BuildingData):\n\n #Manually Set Lighting Control\n lighting_control_d = { \n \"MULTI_RES\": 250.,\n \"OFFICE\": 350.,\n \"SCHOOL\": 350.,\n }\n\n #Set Mean Occupancy as calculated from occupancy profiles. Maybe redundant\n mean_occupancy_d = { \n \"MULTI_RES\": 0.014355,\n \"OFFICE\": 0.009951,\n \"SCHOOL\": 0.010913,\n }\n\n #Recreated the above dictionary into a dataframe manually because im an idiot and short of time\n mean_occupancy_df = pd.DataFrame({\"Code\": [\"MULTI_RES\",\"OFFICE\",\"SCHOOL\"], \"people_sqm\": [0.014355,0.009951,0.010913]})\n\n volume = (BuildingData['room_width'] / 1000.) * (BuildingData['room_depth'] / 1000.) * (\n BuildingData['room_height'] / 1000.)\n area = (BuildingData['room_width'] / 1000.) * (BuildingData['room_depth'] / 1000.)\n\n # read thermal properties for RC model\n arch = pd.read_excel(paths['Archetypes_properties'], sheetname='THERMAL')\n r = re.compile(\"([a-zA-Z_]+)\") # generate regular expression of letters to strip numbers\n\n # Strip numbers off the building archetypes for matching later on\n arch[\"code1\"] = pd.DataFrame([r.match(string).groups() for string in arch.Code])\n arch.set_index(['code1'], inplace=True)\n print arch\n\n # Delete uneeded archetypes and\n arch.drop(['SERVERROOM', 'PARKING', 'SWIMMING', 'COOLROOM', \"SINGLE_RES\", \"HOTEL\", \"RETAIL\", \"FOODSTORE\", \"RESTAURANT\", \"INDUSTRIAL\", \"HOSPITAL\", \"GYM\"], inplace=True)\n arch.reset_index(drop=False, inplace=True)\n arch.drop('Es', axis=1, inplace=True) # Ratio of floor area that has electricity not needed\n arch.drop('Hs', axis=1, inplace=True) # ratio of gross floor area heated or cooled not needed\n arch.drop('U_roof', axis=1, inplace=True) # roof u value not needed, assume only facade loss\n arch.drop('U_base', axis=1, inplace=True) # heat transfer through basement not needed\n\n # read internal loads for RC model from CEA excel sheet and keep necessary loads\n int_loads = pd.read_excel(paths['Archetypes_properties'], sheetname='INTERNAL_LOADS')\n int_loads = int_loads[['Code', 'Qs_Wp', 'Ea_Wm2', 'El_Wm2']]\n\n # read thermal set points and ventilation rates\n thermal_setpoint_ventelation = pd.read_excel(paths['Archetypes_properties'], sheetname='INDOOR_COMFORT')\n\n thermal_setpoint_ventelation = thermal_setpoint_ventelation.merge(mean_occupancy_df)\n\n #Set a ventilation rate in air changes per hour. However this doesn't work with average occupancy\n #TODO: Set a dynamic ventilation strategy in the ASF Simulation Model\n #thermal_setpoint_ventelation['ACH_vent']=thermal_setpoint_ventelation['Ve_lps']*thermal_setpoint_ventelation['people_sqm'] * area * 3.6/volume\n\n # Combine everything into a single dataframe\n b_props = arch.merge(int_loads, how='left', left_on='code1', right_on='Code')\n b_props = b_props.merge(thermal_setpoint_ventelation, how='left', left_on='code1', right_on='Code')\n\n\n b_props = b_props.drop(['Code_y', 'Code'], axis=1)\n\n # Create set back temperature definition to match with the ASF_Simulation\n b_props['setBackTempC'] = b_props['Tcs_setb_C'] - b_props['Tcs_set_C']\n b_props['setBackTempH'] = b_props['Ths_set_C'] - b_props['Ths_setb_C']\n\n\n\n #Ventilation rate per person\n b_props['ACH_vent'] = b_props['Ve_lps'] * 3.6 / volume\n\n # Assign values for Cm from ISO13790:2008, Table 12, based on archetypes\n c_m = []\n for i in range(0, len(b_props['th_mass'])):\n # c_m.append(165.0*10**3) just testing default value\n if b_props['th_mass'][i] == \"T1\":\n c_m.append(110.0 * 10 ** 3) # Light\n elif b_props['th_mass'][i] == \"T2\":\n c_m.append(165.0 * 10 ** 3) # Medium\n elif b_props['th_mass'][i] == \"T3\":\n c_m.append(260.0 * 10 ** 3) # Heavy\n b_props['c_m_A_f'] = pd.DataFrame(c_m)\n\n # declare variables\n occupancy = []\n lighting_control = []\n mean_occupancy = []\n # declare constants\n glass_solar_transmittance = []\n glass_light_transmittance = []\n Lighting_Utilisation_Factor = []\n Lighting_Maintenance_Factor = []\n ACH_vent = []\n ACH_infl = []\n ventilation_efficiency = []\n phi_c_max_A_f = []\n phi_h_max_A_f = []\n heatingSupplySystem = []\n coolingSupplySystem = []\n heatingEmissionSystem = []\n coolingEmissionSystem = []\n heatingEfficiency = []\n coolingEfficiency = []\n ActuationEnergy = []\n COP_H = []\n COP_C = []\n\n print b_props['code1']\n #TODO: Change a lot of thees with df.assign(column_name=constant)\n for code in b_props['code1']:\n # variables\n occupancy.append('schedules_occ_%s.csv' % code)\n lighting_control.append(lighting_control_d.get(code))\n mean_occupancy.append(mean_occupancy_d.get(code))\n glass_solar_transmittance.append(0.6)\n glass_light_transmittance.append(0.6)\n Lighting_Utilisation_Factor.append(0.45)\n Lighting_Maintenance_Factor.append(0.9)\n ACH_vent.append(2.0) # TODO: Shoudlnt this be a variable\n ACH_infl.append(0.5)\n ventilation_efficiency.append(0.6)\n phi_c_max_A_f.append(-np.inf)\n phi_h_max_A_f.append(np.inf)\n heatingSupplySystem.append(COP42Heater) # DirectHeater, #ResistiveHeater #HeatPumpHeater\n coolingSupplySystem.append(COP81Cooler) # DirectCooler, #HeatPumpCooler\n heatingEmissionSystem.append(FloorHeating)\n coolingEmissionSystem.append(FloorHeating)\n heatingEfficiency.append(1.0)\n coolingEfficiency.append(1.0)\n ActuationEnergy.append(False)\n COP_H.append(1.0)\n COP_C.append(1.0)\n\n\n b_props['Qs_Wm2'] = mean_occupancy * b_props['Qs_Wp'] # occupancy: p/m2, qs_wp: W/p\n b_props['Occupancy'] = occupancy\n b_props['ActuationEnergy'] = ActuationEnergy\n\n\n #Build Building Properties dataframe with building inputs with the same variable definition as the ASF simulation engine\n BuildingPropertiesDF = pd.DataFrame({'Code': []})\n BuildingPropertiesDF['Code'] = b_props.loc[:, 'Code_x']\n BuildingPropertiesDF.loc[:, 'lighting_load'] = b_props.loc[:, 'El_Wm2']\n BuildingPropertiesDF.loc[:, 'lighting_control'] = lighting_control\n BuildingPropertiesDF.loc[:, 'U_em'] = b_props.loc[:, 'U_wall']\n BuildingPropertiesDF.loc[:, 'U_w', ] = b_props.loc[:, 'U_win']\n BuildingPropertiesDF.loc[:, 'theta_int_h_set'] =b_props.loc[:,'Ths_set_C'].apply(pd.to_numeric)\n BuildingPropertiesDF.loc[:, 'theta_int_c_set'] = b_props.loc[:, 'Tcs_set_C'].apply(pd.to_numeric)\n BuildingPropertiesDF.loc[:, 'c_m_A_f'] = b_props.loc[:, 'c_m_A_f']\n BuildingPropertiesDF.loc[:, 'Qs_Wp'] = b_props.loc[:, 'Qs_Wp']\n BuildingPropertiesDF.loc[:, 'Ea_Wm2'] = b_props.loc[:, 'Ea_Wm2']\n BuildingPropertiesDF.loc[:, 'glass_solar_transmittance'] = glass_solar_transmittance\n BuildingPropertiesDF.loc[:, 'glass_light_transmittance'] = glass_light_transmittance\n BuildingPropertiesDF.loc[:, 'Lighting_Utilisation_Factor'] = Lighting_Utilisation_Factor\n BuildingPropertiesDF.loc[:, 'Lighting_Maintenance_Factor'] = Lighting_Maintenance_Factor\n BuildingPropertiesDF.loc[:, 'ACH_vent'] = ACH_vent\n BuildingPropertiesDF.loc[:, 'ACH_infl'] = ACH_infl\n BuildingPropertiesDF.loc[:, 'ventilation_efficiency'] = ventilation_efficiency\n BuildingPropertiesDF.loc[:, 'phi_c_max_A_f'] = phi_c_max_A_f\n BuildingPropertiesDF.loc[:, 'phi_h_max_A_f'] = phi_h_max_A_f\n BuildingPropertiesDF.loc[:, 'heatingSupplySystem'] = heatingSupplySystem\n BuildingPropertiesDF.loc[:, 'coolingSupplySystem'] = coolingSupplySystem\n BuildingPropertiesDF.loc[:, 'heatingEmissionSystem'] = heatingEmissionSystem\n BuildingPropertiesDF.loc[:, 'coolingEmissionSystem'] = coolingEmissionSystem\n # BuildingPropertiesDF.loc[:, 'heatingEfficiency'] = heatingEfficiency\n # BuildingPropertiesDF.loc[:, 'coolingEfficiency'] = coolingEfficiency\n # BuildingPropertiesDF.loc[:, 'COP_H'] = COP_H\n # BuildingPropertiesDF.loc[:, 'COP_C'] = COP_C\n BuildingPropertiesDF.set_index(['Code'], inplace=True)\n\n #Build Simulation Options dataframe with the same variable definitions as the ASF Simulation tool\n SimulationOptionsDF = b_props[['Code_x', 'setBackTempC', 'setBackTempH', 'Occupancy', 'ActuationEnergy']]\n SimulationOptionsDF= SimulationOptionsDF.assign(human_heat_emission = 0.12)\n SimulationOptionsDF= SimulationOptionsDF.assign(Temp_start = 20.0)\n SimulationOptionsDF.set_index(['Code_x'], inplace=True)\n\n # Temp: only analyse the first two lines for testing purposes. Delete the next two lines:\n SimulationOptionsDF = SimulationOptionsDF[12:18]\n BuildingPropertiesDF=BuildingPropertiesDF[12:18]\n # Temp complete\n\n print BuildingPropertiesDF\n print SimulationOptionsDF\n\n #Convert dataframes to dictionaries\n SimulationOptions = SimulationOptionsDF.to_dict(orient='index')\n BuildingProperties = BuildingPropertiesDF.to_dict(orient='index')\n BuildingPropertiesDF.to_csv('Builtdictionaries2.csv')\n SimulationOptionsDF.to_csv('SOdictionaries.csv')\n\n return BuildingProperties, SimulationOptions",
"def get_gev_fit(data):\n md = mode(data)[0][0]\n std = np.std(data)\n # first try with loc=mode\n shape, loc, scale = gev.fit(data, loc=md)\n # if bad try again with mean\n if loc > md+std:\n shape, loc, scale = gev.fit(data, loc=np.mean(data))\n else:\n print('GEV fit with mode')\n # if still bad (ugh), try again with mode - std\n if loc > md+std:\n shape, loc, scale = gev.fit(data, loc=md-std)\n else:\n print('GEV fit with mean')\n if loc > md+std:\n print('GEV fit with c=0')\n shape, loc, scale = gev.fit(data, 0)\n else:\n print('GEV fit with mode minus std deviation')\n return shape, loc, scale",
"def get_gev_fit(data):\n md = mode(data)[0][0]\n std = np.std(data)\n # first try with loc=mode\n shape, loc, scale = gev.fit(data, loc=md)\n # if bad try again with mean\n if loc > md+std:\n shape, loc, scale = gev.fit(data, loc=np.mean(data))\n else:\n print('GEV fit with mode')\n # if still bad (ugh), try again with mode - std\n if loc > md+std:\n shape, loc, scale = gev.fit(data, loc=md-std)\n else:\n print('GEV fit with mean')\n if loc > md+std:\n print('GEV fit with c=0')\n shape, loc, scale = gev.fit(data, 0)\n else:\n print('GEV fit with mode minus std deviation')\n return shape, loc, scale",
"def get_gev_fit(data):\n md = mode(data)[0][0]\n std = np.std(data)\n # first try with loc=mode\n shape, loc, scale = gev.fit(data, loc=md)\n # if bad try again with mean\n if loc > md+std:\n shape, loc, scale = gev.fit(data, loc=np.mean(data))\n else:\n print('GEV fit with mode')\n # if still bad (ugh), try again with mode - std\n if loc > md+std:\n shape, loc, scale = gev.fit(data, loc=md-std)\n else:\n print('GEV fit with mean')\n if loc > md+std:\n print('GEV fit with c=0')\n shape, loc, scale = gev.fit(data, 0)\n else:\n print('GEV fit with mode minus std deviation')\n return shape, loc, scale"
]
| [
"0.52539074",
"0.5209256",
"0.51522666",
"0.51052946",
"0.50703835",
"0.49784595",
"0.49778527",
"0.49533156",
"0.48517737",
"0.4843977",
"0.48254356",
"0.48217475",
"0.48072037",
"0.48047912",
"0.47924858",
"0.47781545",
"0.4767692",
"0.47657946",
"0.47596288",
"0.47494495",
"0.4747459",
"0.4742245",
"0.47385314",
"0.47220722",
"0.47218266",
"0.47187454",
"0.46898362",
"0.46853858",
"0.46853858",
"0.46853858"
]
| 0.59640026 | 0 |
Test module journals.py by downloading journals.csv and testing shape of extracted data has 180 rows and 10 columns | def test_journals():
test_path = tempfile.mkdtemp()
x_train, metadata = journals(test_path)
try:
assert x_train.shape == (180, 10)
except:
shutil.rmtree(test_path)
raise() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_add_furniture(self):\n\n add_furniture('invoice.csv', 'Elisa Miles', 'LR04', 'Leather Sofa', 25)\n add_furniture('invoice.csv', 'Edward Data', 'KT78', 'Kitchen Table', 10)\n add_furniture('invoice.csv', 'Alex Gonzales', 'BR02', 'Queen Mattress', 17)\n\n # Generate list of rentals\n with open('invoice.csv', 'r') as csvfile:\n rentals = []\n for row in csvfile:\n rentals.append(row)\n\n print(rentals)\n\n # Assert statements\n self.assertEqual(rentals[0], ('Elisa Miles,LR04,Leather Sofa,25\\n'))\n self.assertEqual(rentals[1], ('Edward Data,KT78,Kitchen Table,10\\n'))\n self.assertEqual(rentals[2], ('Alex Gonzales,BR02,Queen Mattress,17\\n'))",
"def test_narrative_data(\n self, setup_folder_structure, config_handler, get_narrative\n ):\n basefolder = setup_folder_structure\n filepath = os.path.join(\n str(basefolder), \"data\", \"narratives\", \"central_planning.csv\"\n )\n with open(filepath, \"w\") as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=[\"homogeneity_coefficient\"])\n writer.writeheader()\n writer.writerow({\"homogeneity_coefficient\": 8})\n\n spec = Spec.from_dict(\n {\n \"name\": \"homogeneity_coefficient\",\n \"description\": \"How homegenous the centralisation process is\",\n \"absolute_range\": [0, 1],\n \"expected_range\": [0, 1],\n \"unit\": \"percentage\",\n \"dtype\": \"float\",\n }\n )\n\n actual = config_handler.read_narrative_variant_data(\"central_planning\", spec)\n assert actual == DataArray(spec, np.array(8, dtype=float))",
"def test_read_in_file(self):\r\n filename = \"CrimeDataSmall.csv\"\r\n\r\n lst = cds.read_in_file(filename)\r\n\r\n self.assertIsInstance(lst, list, \"Returned datatype should be a list\")\r\n self.assertEqual(len(lst), 4, \"There should be 4 rows returned from CrimeDataSmall 1 header and 3 data rows\")\r\n self.assertEqual(len(lst[0]), 23, \"Each row should have 23 columns\")\r\n self.assertEqual(lst[0][1], \"Reported_Date\", \"Column 1 was incorrect header\")\r\n self.assertEqual(lst[0][7], \"Offense\", \"Column 7 was incorrect header\")\r\n self.assertEqual(lst[0][13], \"Zip Code\", \"Column 13 header was incorrect\")\r\n self.assertEqual(lst[1][1], \"03/19/2019\", \"Column 1 was incorrect in first data row\")\r\n self.assertEqual(lst[1][7], \"Vehicular – Non-Injury\", \"Column 7 was incorrect in first data row\")\r\n self.assertEqual(lst[1][13], \"64161\", \"Column 13 in first data row was incorrect\")\r\n self.assertEqual(lst[3][1], \"03/27/2019\", \"Column 1 was incorrect in 3rd data row\")\r\n self.assertEqual(lst[3][7], \"Embezzlement\", \"Column 7 was incorrect 3rd data row\")\r\n self.assertEqual(lst[3][13], \"64112\", \"Column 13 3rd data row was incorrect\")\r\n self.assertEqual(lst[3][11], \"4600, S WORNALL RD\", \"Column 11 3rd data row was incorrect. Use csv module to read \")",
"def extractionTitlePrincipals(cur, conn):\n fh = open(pathTitlePrincipals)\n reader = csv.reader(fh, delimiter = '\\t')\n firstLine = True\n idActor_list = []\n idJugar = 1\n for row in reader:\n if firstLine : firstLine = False # Read header\n else :\n if (row[3]=='actor' or row[3]=='actress'): #only record actors\n idTitulo = int(row[0][2:])\n idActor = int(row[2][2:])\n idActor_list.append(idActor)\n idJugar +=1\n # print(jugarInsert.format(idJugar, idTitulo, idActor))\n # REGISTER DATA IN JUGAR TABLE\n cur.execute(jugarInsert.format(idJugar, idTitulo, idActor))\n conn.commit()\n return idActor_list",
"def test_add_data():\n add_furniture(\"invoice_file.csv\", \"Elisa Miles\", \"LR04\", \"Leather Sofa\", 25.00)\n add_furniture(\"invoice_file.csv\", \"Edward Data\", \"KT78\", \"Kitchen Table\", 10.00)\n add_furniture(\"invoice_file.csv\", \"Alex Gonzales\", \"BR02\", \"Queen Mattress\", 17.00)",
"def main():\n\n # Read the CSV and get its content\n jobOfferList, professionsList = usefulFunctions.readCsv()\n \n # Create an empty output tab with the right number of lines and columns\n finalTab = usefulFunctions.createEmpty(jobOfferList, professionsList)\n \n # Fill the tab\n finalTab = usefulFunctions.fillTabExceptTotals(jobOfferList, professionsList, finalTab)\n \n # Update the totals \n finalTab = usefulFunctions.fillTotals(finalTab)\n \n print(\"\\nTable des métiers par profession et type de contrat : \")\n for line in finalTab:\n print(line)",
"def test_single_customer(self):\n\n create_invoice = single_customer(\"Susan Wong\", \"invoice.csv\")\n create_invoice(\"test_items.csv\")\n\n # Generate list of rentals\n with open('invoice.csv', 'r') as csvfile:\n rentals = []\n for row in csvfile:\n rentals.append(row)\n\n print(rentals)\n\n # Assert statements\n self.assertEqual(rentals[3], ('Susan Wong,AT92,Office Chair,13\\n'))\n self.assertEqual(rentals[4], ('Susan Wong,KE25,Espresso Machine,30\\n'))",
"def test_journals_paged_fields(self, api_client):\n rv = api_client.get(\"/journals-paged\")\n json_data = rv.get_json()\n sample = next(\n (item for item in json_data[\"results\"] if item[\"issn_l\"] == \"1907-1760\"),\n None,\n )\n top_level_keys = [\n \"id\",\n \"issn_l\",\n \"issns\",\n \"title\",\n \"publisher\",\n \"previous_issn_ls\",\n \"other_titles\",\n \"journal_metadata\",\n \"total_dois\",\n \"dois_by_issued_year\",\n \"sample_dois\",\n \"subscription_pricing\",\n \"apc_pricing\",\n \"open_access\",\n \"status\",\n \"status_as_of\",\n ]\n\n i = 0\n for key in sample.keys():\n assert key == top_level_keys[i]\n i += 1",
"def read_data():\n data = pd.read_csv('input_data/Preply_tutor_views_datasaet.csv')\n return data",
"def main():\n s = content.DataFiles()\n \n date_list = generate.get_list_dates(2016, 2016, 500)\n prod_list = list(s.get_collist_by_name(os.path.join(content.data_fldr,'food','garden_produce.csv'), 'name')[0])\n \n tbl_cust = generate.TableGenerator(8, ['STRING','PEOPLE', 'PEOPLE', 'PLACE'], ['Customer ID', 'First Name', 'Surname', 'Country'])\n tbl_cust.save_table('customers.csv')\n cust_list = list(s.get_collist_by_name('customers.csv', 'Customer ID')[0])\n \n tbl_sales = generate.TableGenerator(25, [date_list, cust_list, prod_list, 'CURRENCY'], ['Date of sale', 'Customer ID', 'Product', 'Amount'])\n tbl_sales.save_table('sales.csv')",
"def _fetch_jail_data(year=None):\n global _SIMPLECOUNT_COLUMNS\n\n try:\n if year is None:\n year = _get_max_year([22]) + 1\n raw = pd.read_excel(f'P:\\DATA\\JAIL\\{year} ICJIA County SUB Totals.xls')\n \n filtered = raw[~raw['Month'].isna() & ~raw['Facility'].str.contains('Alton')]\n filtered = filtered[['Facility', 'TOTAL Number of Bookings', 'Average Monthly Pop']]\n filtered.columns = ['county', '1500', '1510']\n \n aggregated = filtered.groupby('county').agg({'1500': 'sum', '1510': 'mean'}).reset_index(drop=True)\n aggregated['year'] = year\n \n pivoted = pd.melt(\n aggregated,\n id_vars = ['county', 'year'],\n value_vars= ['1500', '1510'],\n var_name = 'fk_simplecount_indicator'\n ).reset_index(drop=True)\n\n pivoted['county'] = pivoted['county'].str.extract('(.*) County.*')\n pivoted.loc[pivoted['county'].isna(), 'county'] = 'Tri-County'\n pivoted.loc[pivoted['county'] == 'DeWitt', 'county'] = 'De Witt'\n pivoted.loc[pivoted['county'] == 'Tri-County', 'county'] = 'Tri-County Jail'\n\n county = database.fetch_tables(['County'])[0]\n county_id_dict = dict(zip(county['county_name'].str.lower(), county['id'].astype(int)))\n county_to_id = lambda x: county_id_dict[x.lower()]\n\n pivoted['fk_simplecount_county'] = pivoted['county'].apply(county_to_id)\n pivoted['fk_simplecount_indicator'] = pivoted['fk_simplecount_indicator'].astype(int)\n pivoted['year'] = pivoted['year'].astype(int)\n\n return pivoted[_SIMPLECOUNT_COLUMNS]\n except FileNotFoundError:\n raise ValueError(\"WARNING: Jail data is up to date.\")\n except:\n raise",
"def test_csv_reader_data_contents(process_data):\n data = process_data(file_name_or_type='clean_map.csv')\n\n # Check row types\n for row in data:\n assert(isinstance(row['Country'], str))\n assert(isinstance(row['City'], str))\n assert(isinstance(row['State_Or_Province'], str))\n assert(isinstance(row['Lat'], float))\n assert(isinstance(row['Long'], float))\n assert(isinstance(row['Altitude'], float))\n\n # Basic data checks\n assert len(data) == 180 # We have collected 180 rows\n assert data[0]['Country'] == 'Andorra'\n assert data[106]['Country'] == 'Japan'",
"def test_law_pages_extract(self, mock_extract_rows, mock_get_rows):\n\n mock_get_rows.return_value = [\"row1\", \"row2\", \"row3\"]\n mock_extract_rows.return_value = {'number': 'number1', 'docs': 'doc1'}\n result = self.lp.extract()\n self.assertEqual(result, [{'docs': 'doc1', 'number': 'number1'}])",
"def __loaddata(filename, datatype='flightcsv', minprob=0.001, maxprob=0.20):\n if datatype is 'flightcsv':\n return extract_flight_csv(filename, minprob=minprob, maxprob=maxprob)\n else:\n raise Exception('unknown datatype %s' % datatype)",
"def open_actuarial_data(sex, years):\n filename = \"adj_act_data_2014.txt\" #file generated using get_actdata_2014.py\n\n all_data = []\n\n with open(filename) as f:\n text = csv.reader(f) \n for line in text:\n all_data.append(line)\n\n #I discovered the csv reader after I had already\n #written the get_singlename_year() function\n \n alive_prob = []\n shift = min(years) - 1880\n for index, item in enumerate(years):\n index +=shift\n if sex == \"M\":\n #Male alive prob. data is stored in column 4 of actuary file\n alive_prob.append(float(all_data[index][4])) \n elif sex == \"F\":\n #Female alive prob. data is stored in column 8 of actuary file\n alive_prob.append(float(all_data[index][8]))\n else:\n print \"Neither F or M chosen\"\n return False\n\n\n return alive_prob #returns the probability of being alive in 2017",
"def test_get_opening_balance_journals(self):\n pass",
"def tour_data(self):\n\n start_time = time.time()\n\n # load tour list into Pandas DataFrame\n tours = pd.read_csv(self.scenario_path + \"/output/jointTourData_\" + str(self.iteration) + \".csv\",\n usecols=[\"hh_id\", \"tour_id\", \"tour_participants\"])\n\n end_time = time.time()\n time_taken = end_time - start_time\n print(\"read tour data: \", str(datetime.timedelta(seconds=round(time_taken))))\n\n # return fields of interest\n return tours[[\"hh_id\",\n \"tour_id\",\n \"tour_participants\"]]",
"def test_data():\n return [Donor(\"David Andrews\", [200.50, 400.00, 250.75]),\n Donor(\"John Goodfellow\", [25.00, 175.50]),\n Donor(\"Mary Suzuki\", [75.00, 125.00, 250.00]),\n Donor(\"Bonney Lake\", [500.50, 700.75, 500.25]),\n Donor(\"DeMarcus Rollins\", [155.00, 165.00])\n ]",
"def main():\n # LoL = readcsv( \"wds.csv\" )\n # print(LoL[:10])\n\n # test writing\n # write_to_csv( LoL[:10], \"tenrows.csv\" )\n\n # text csv_to_html_table_starter\n # output_html = csv_to_html_table_starter( LoL[:10] )\n # print(\"\\noutput_html is\\n\\n\" + output_html)\n # create_html_page(output_html, \"test.html\")\n Wcount_first()\n Wcount_last()\n Wcount_middle()",
"def big9salary(path):\n import pandas as pd\n path = os.path.expanduser(path)\n filename = 'big9salary.csv'\n if not os.path.exists(os.path.join(path, filename)):\n url = 'http://dustintran.com/data/r/wooldridge/big9salary.csv'\n maybe_download_and_extract(path, url,\n save_file_name='big9salary.csv',\n resume=False)\n\n data = pd.read_csv(os.path.join(path, filename), index_col=0,\n parse_dates=True)\n x_train = data.values\n metadata = {'columns': data.columns}\n return x_train, metadata",
"def test_number_of_rows_with_header(self):\n url=\"http://archive.ics.uci.edu/ml/machine-learning-databases/forest-fires/forestfires.csv\"\n\n reader=requester.url_to_df(url)\n rows,columns=reader.shape\n self.assertEqual(rows,517)",
"def test_data():\n return {\"David Andrews\" : [200.50, 400.00, 250.75],\n \"John Goodfellow\" : [25.00, 175.50],\n \"Mary Suzuki\" : [75.00, 125.00, 250.00],\n \"Bonney Lake\" : [500.50, 700.75, 500.25],\n \"DeMarcus Rollins\" : [155.00, 165.00]\n }",
"def test_fetchall(self):\n result = export.processExport(houseId=1)\n #We should have 2 locations * 1 sensor * 10 days of data here\n # 2 * 1 * (288 * 10) == 5670\n #print result.shape\n\n #result.to_csv(\"temp.csv\")\n #Do we get the right object\n self.assertEqual(type(result), pandas.DataFrame)\n #And is it the right size\n self.assertEqual(result.shape, (2880, 2)) #So 2880 samples from two sensors\n #And the right range of data\n self.assertEqual(result.index[0], datetime.datetime(2013, 01, 01))\n self.assertEqual(result.index[-1], datetime.datetime(2013, 01, 10, 23, 55))",
"def test_companies_company_id_data_journal_entries_get(self):\n pass",
"def test_export(self):\n response = self.client.get('%s?export' % reverse('users_report'))\n self.assertEqual(\n response['Content-Disposition'],\n 'attachment; filename=users.csv'\n )\n self.assertEqual(\n response['Content-Type'],\n 'text/csv'\n )\n data = import_set(response.content)\n # There should be at least the header row and one user row\n self.assertGreater(data.height, 2)\n self.assertEqual(data.width, 14)",
"def get_test_data():\n\n # test set\n test = pd.read_csv(\"test.csv\")\n\n return test",
"def load_data(nlp, cue_verbs, poly):\n train_dicts, _ = load_quote_authors(nlp)\n author_prediction_dataset = AuthorPredictionDataset(train_dicts, cue_verbs, poly)\n return np.array(train_dicts), author_prediction_dataset",
"def test_parser():\n data = parse_csv(TEST_DATA)\n assert data['2020-01-03'] == ['recycle']\n assert data['2020-01-08'] == ['bio', 'trash']\n assert data['2021-01-09'] == ['christmas']",
"def read_data(self):\n fpath = './data/surveys.csv'\n self.data = pd.read_csv(fpath, header=0, low_memory=False)\n #print(self.data.head(n=5))\n print(self.data.shape)",
"def get_data(data_basename: str = f'{data_folder}/data.csv') -> pd.DataFrame:\n data_path = file_path_relative(data_basename)\n if exists(data_path):\n logger.info(f'reading data from {data_path}')\n moon_data = pd.read_csv(data_path)\n return moon_data\n\n res = requests.get(data_url)\n soup = BeautifulSoup(res.content, features='html.parser')\n\n # get second table from wikipedia\n moon_table = soup.findAll('table', {'class': 'wikitable'})[1]\n # convert to dataframe\n moon_df = pd.read_html(str(moon_table))\n moon_df = pd.DataFrame(moon_df[0])\n\n # sanitize column names\n moon_df.columns = [_sanitize_column_name(\n col) for col in moon_df.columns.values.tolist()]\n\n # sanitize orbital period\n moon_df[orbital_period_column] = moon_df[orbital_period_column].str.replace(\n brackets_remove_regex, '').str.replace('−', '-').str.strip()\n moon_df[orbital_period_column] = pd.to_numeric(\n moon_df[orbital_period_column])\n # days to seconds\n moon_df[orbital_period_column] *= (24 * 60 * 60)\n\n # sanitize semi-major axis\n moon_df[semimajor_axis_column] = moon_df[semimajor_axis_column].str.replace(\n brackets_remove_regex, '').str.strip()\n moon_df[semimajor_axis_column] = pd.to_numeric(\n moon_df[semimajor_axis_column])\n # km to m\n moon_df[semimajor_axis_column] *= 1000\n\n # sanitize mass and sort by it\n mass_column_key: str = 'mass'\n moon_df[mass_column_key] = moon_df[mass_column_key].str.replace(\n '≈', '').str.strip()\n moon_df[mass_column_key] = pd.to_numeric(moon_df[mass_column_key])\n # to kg\n moon_df[mass_column_key] *= 1e16\n moon_df = moon_df.sort_values(by=[mass_column_key], ascending=False)\n\n moon_df.to_csv(data_path, index=False)\n return moon_df"
]
| [
"0.57835835",
"0.55504376",
"0.5542925",
"0.5541012",
"0.55314946",
"0.55193686",
"0.5485054",
"0.5478811",
"0.5470811",
"0.5430811",
"0.5420464",
"0.5403592",
"0.5352353",
"0.5343877",
"0.53379554",
"0.53257203",
"0.5323983",
"0.5321546",
"0.5315496",
"0.5297438",
"0.52928215",
"0.5283095",
"0.5271228",
"0.5269231",
"0.5259525",
"0.5253898",
"0.5237273",
"0.52327853",
"0.5231615",
"0.5221997"
]
| 0.6703763 | 0 |
Opens the Buganizer url in the Chrome Browser and scrapes the webpage's source html to get the links for all the issues under the componentid. | def scrape_issues(self, url):
try:
self.driver.get(url)
except common.exceptions.InvalidSessionIdException:
self.driver.close()
error_message = "ERROR: Failed to reach URL, check "\
"specified URL in constants.py\n"
self.logger.log(error_message)
return []
except Exception:
self.driver.close()
error_message = "ERROR: Failed to reach URL, check "\
"specified URL in constants.py\n"
self.logger.log(error_message)
return []
source_html = self.driver.page_source
soup = BeautifulSoup(source_html, "html.parser")
page_title = soup.title.string
buganizer_issues = []
if "Buganizer" not in page_title or "componentid" not in page_title:
if "MOMA Single Sign On" in page_title:
error_message = "ERROR: You must log into your MOMA account "\
"first. Select the 'Use Security Code' option and generate a security code at go/sc.\n"
self.logger.log(error_message)
while "Buganizer" not in page_title:
source_html = self.driver.page_source
soup = BeautifulSoup(source_html, "html.parser")
page_title = soup.title.string
time.sleep(1)
return buganizer_issues
error_message = "ERROR: URL does not link to a Buganizer "\
"componentid, check specified URL "\
"in constants.py\n"
self.logger.log(error_message)
return buganizer_issues
for tbody in soup.find_all('tbody'):
for _tr in tbody.find_all('tr'):
issue_link = "https://b.corp.google.com/issues/" + _tr.get(
'data-row-id')
buganizer_issues.append(issue_link)
return buganizer_issues | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def openNewIssueUrl(self):\r\n url = QUrl(\"https://github.com/Freeseer/freeseer/issues/new\")\r\n QDesktopServices.openUrl(url)",
"def visit_all_issues_in_list(self, issues):\n for issue in issues:\n self.driver.implicitly_wait(3)\n self.driver.get(issue)\n config_type_text = self.driver.find_element_by_xpath(\"/html/body/b-service-bootstrap/\"\\\n \"app-root/div[7]/div/div/edit-issue-page/b-resolving-issue-references/div[2]/div[1]/\"\\\n \"div[3]/div/div/div[2]/div[2]/div[3]/div/div[1]/div/span/span[6]/span/span/a\").text\n\n source_html = self.driver.page_source\n soup = BeautifulSoup(source_html, \"html.parser\")\n\n advanced_fields = {}\n advanced_fields[\"Issue Id\"] = issue.replace(\"https://b.corp.google.com/issues/\", \"\")\n reporter_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-reporter\")\n reporter = reporter_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n advanced_fields[reporter[0]] = reporter[1]\n assignee_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner bv2-issue-metadata-\"\\\n \"field-assignee\")\n assignee = assignee_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n if assignee[1] != \"empty\":\n advanced_fields[assignee[0]] = assignee[1]\n\n if \"EnqueueRule\" in config_type_text:\n config_type = \"EnqueueRules\"\n elif \"RoutingTargets\" in config_type_text:\n config_type = \"RoutingTargets\"\n elif \"QueueInfo\" in config_type_text:\n config_type = \"QueueInfo\"\n\n advanced_fields[\"Config Type\"] = config_type\n\n if config_type == \"QueueInfo\":\n if assignee[1] != constants.AUTOMATION_USER:\n continue\n\n self.scrape_queue_info(advanced_fields)\n elif config_type == \"RoutingTargets\":\n if assignee[1] != constants.AUTOMATION_USER:\n continue\n self.scrape_routing_targets(advanced_fields)\n elif config_type == \"EnqueueRules\":\n self._message_parsing_util.parse_page(soup, reporter[1], issue)",
"def open_web_browser(url: str):\n Popen(web_browser + [url], stdout=DEVNULL, stderr=DEVNULL)",
"def open(webpage_url):\n\twith youtube_dl.YoutubeDL(dict(forceurl=True)) as ydl:\n\t\tr = ydl.extract_info(webpage_url, download=False)\n\t\tmedia_url = r['formats'][-1]['url']\n\twebbrowser.open('googlechromes://' + media_url[8:] )",
"def open(url):\r\n webbrowser.open(url)",
"def cli(repo, milestone):\n webbrowser.open_new(repo.milestone(milestone).data[\"html_url\"])",
"def open_web_browser(whac_config: WhacConfig) -> None:\n if whac_config.open_web_browser:\n browser = webbrowser.get('chrome')\n browser.open('http://localhost:' + str(whac_config.host_port), new=2, autoraise=True)",
"def open_chrome(url,chrome_path):\r\n webbrowser.register('chrome', None,webbrowser.BackgroundBrowser(chrome_path))\r\n webbrowser.get('chrome').open(url)",
"def __init__(self):\n self.URL = 'http://blog.csssr.ru/qa-engineer/'\n self.driver = webdriver.Chrome(executable_path='chromedriver')\n self.driver.maximize_window()\n self.driver.get(self.URL)",
"def do_jira(self, arg):\n jql = self.settings['jira_jql']\n if arg.startswith('b'):\n out.info('Opening browser.')\n webbrowser.open(self.jira_url() + '/issues/?jql=' + jql)\n else:\n open_issues = self.get_open_issues()\n cases = [\n (issue.key, issue.fields.issuetype, issue.fields.summary, self.jira_url() + \"/browse/\" + issue.key)\n for\n issue in open_issues]\n out.table(\"Active Cases\", rows=cases)",
"def open_url(self, url: str):\n self.driver.get(url)",
"def dod():\n file = requests.get(\"https://www.bewakoof.com/design-of-the-day\")\n soup = bs4.BeautifulSoup(file.text, \"lxml\")\n # print(soup)\n\n linkList = soup.select(\"a[class='col-sm-4 col-xs-6'] > div > div > div > img:nth-of-type(2)]\")\n # soup.select(\"div[id=foo] > div > div > div[class=fee] > span > span > a\")\n for i in linkList:\n if \"t-shirt-men\" in str(i):\n # print(i.get('src'))\n webbrowser.open(i.get('src'))",
"def open_url(self, url):\n\n self.driver.get(url)",
"def build_browse(ctx, args):\n for build_id in args:\n data = ctx.obj.get_build_by_build_id(build_id)\n webbrowser.open(data['webUrl'])",
"def openurl(url):\n\n # Open the URL\n webbrowser.open(url)",
"def issueListing(self, v, i):\n #list of URLS within the issue\n# links = []\n issURL = self.link(vol = v, iss = i )\n html=urlopen(issURL)\n soup=BeautifulSoup(html,'html.parser')\n URLs = [] #Empty list\n \n# titles = soup.find_all('h5', class_=\"title\")\n# authors = soup.find_all('h6', class_=\"authors\")\n# pubs = soup.find_all('h6', class_=\"pub-info\")\n# for t, a, p in zip(titles, authors, pubs):\n blocks = soup.find_all('div', class_=\"article panel article-result\")\n for b in blocks:\n# print(b)\n titletag = b.find('h5', class_=\"title\")\n title = titletag.get_text()\n #Extract abstract url from title head\n aURL = titletag.find('a', href = True)['href']\n alink = 'https://journals.aps.org' + aURL\n #Print out the scraped information\n print(title)\n print(alink)\n #Extract research area and topic keywords\n kwlist = b.find('ul', class_=\"inline-list subjects\")\n #If the list tag exists\n if kwlist:\n lis = kwlist.find_all('li')\n kws = [li.get_text() for li in lis] \n print(kws)\n #Add utf-8 encode\n# print(kws.encode('utf-8')) \n print('----------------------------------------------------------------') \n #Collect URLs in the issue\n URLs.append('https://journals.aps.org' + aURL)\n return URLs",
"def __init__(self, url_address, type=\"Chrome\"):\n from webdriverwrapper import Chrome\n from selenium import webdriver\n options = webdriver.ChromeOptions()\n options.add_experimental_option(\"excludeSwitches\", [\"enable-automation\"])\n options.add_experimental_option('useAutomationExtension', False)\n self.driver = Chrome(options=options)\n # Open a website\n window_before = self.driver.window_handles[0]\n self.driver.get(url_address)",
"def test_chrome(chrome_browser):\n chrome_browser.get(\"https://habr.com/ru/\")\n print(chrome_browser.title)\n pass",
"def open_browser(url):\n import webbrowser\n webbrowser.open_new(url)",
"def click_link(candidate_urls):\n for url in candidate_urls:\n webbrowser.open(url)",
"def find_url(ticketID):\n uri = 'https://jira.duraspace.org/browse/' + ticketID\n print uri\n return uri",
"def open_link(self):\n try:\n # webbrowser.open(self.url) # if you are on Windows OS\n webbrowser.get('safari').open_new_tab(self.url) # if you are on Mac OS\n except(AttributeError):\n self.ids.label.text = self.error_msg",
"def _open_browser(self, single_doc_html):\n url = os.path.join(\"file://\", DOC_PATH, \"build\", \"html\", single_doc_html)\n webbrowser.open(url, new=2)",
"def open_in_browser(self):\n webbrowser.open(self.url)",
"def get_page(self):\n self.browser.get(self.url)",
"def browser_open(story_id, arguments):\r\n\r\n story = load_story(story_id, arguments)\r\n\r\n webbrowser.open(story.url)",
"def fetch_content_page(driver, url):\n driver.get(url)\n post_urls = [e.get_attribute('href') for e in driver.find_elements_by_xpath(\"//div[@class='ride_list']/a\")]\n return post_urls",
"def _open_browser(self, single_doc_html):\n url = os.path.join(\n \"file://\", DOC_PATH, \"build\", \"html\", single_doc_html\n )\n webbrowser.open(url, new=2)",
"def on_actionReportBug_triggered(self):\n reportBugWindow = WebViewWindow(self, \"Report Bug\", \"http://n900-tune-up.sourceforge.net\")",
"def open_url(name):\n url = localReadConfig.get_webServer(name)\n browser = open_browser()\n browser.get(url)\n return browser"
]
| [
"0.5919638",
"0.58162516",
"0.576486",
"0.5741829",
"0.57077444",
"0.5615245",
"0.55784756",
"0.55586386",
"0.5550047",
"0.55428475",
"0.54789764",
"0.5455549",
"0.54487735",
"0.5448626",
"0.54362655",
"0.5435568",
"0.5407659",
"0.5395814",
"0.5374557",
"0.5369067",
"0.53653467",
"0.5358731",
"0.53094417",
"0.5299841",
"0.52948123",
"0.52870977",
"0.52833986",
"0.5277907",
"0.5247604",
"0.5240305"
]
| 0.69480836 | 0 |
From the list of buganizer issues, visit each issue with the webdriver, find the reporter from an html tag send issue html to message_parsing_utility to be parsed. | def visit_all_issues_in_list(self, issues):
for issue in issues:
self.driver.implicitly_wait(3)
self.driver.get(issue)
config_type_text = self.driver.find_element_by_xpath("/html/body/b-service-bootstrap/"\
"app-root/div[7]/div/div/edit-issue-page/b-resolving-issue-references/div[2]/div[1]/"\
"div[3]/div/div/div[2]/div[2]/div[3]/div/div[1]/div/span/span[6]/span/span/a").text
source_html = self.driver.page_source
soup = BeautifulSoup(source_html, "html.parser")
advanced_fields = {}
advanced_fields["Issue Id"] = issue.replace("https://b.corp.google.com/issues/", "")
reporter_tag = soup.find("div", "bv2-issue-metadata-field-inner "\
"bv2-issue-metadata-field-reporter")
reporter = reporter_tag["aria-label"].replace(
" value is ", "\n").split("\n")
advanced_fields[reporter[0]] = reporter[1]
assignee_tag = soup.find("div", "bv2-issue-metadata-field-inner bv2-issue-metadata-"\
"field-assignee")
assignee = assignee_tag["aria-label"].replace(
" value is ", "\n").split("\n")
if assignee[1] != "empty":
advanced_fields[assignee[0]] = assignee[1]
if "EnqueueRule" in config_type_text:
config_type = "EnqueueRules"
elif "RoutingTargets" in config_type_text:
config_type = "RoutingTargets"
elif "QueueInfo" in config_type_text:
config_type = "QueueInfo"
advanced_fields["Config Type"] = config_type
if config_type == "QueueInfo":
if assignee[1] != constants.AUTOMATION_USER:
continue
self.scrape_queue_info(advanced_fields)
elif config_type == "RoutingTargets":
if assignee[1] != constants.AUTOMATION_USER:
continue
self.scrape_routing_targets(advanced_fields)
elif config_type == "EnqueueRules":
self._message_parsing_util.parse_page(soup, reporter[1], issue) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def scrape_issues(self, url):\n try:\n self.driver.get(url)\n except common.exceptions.InvalidSessionIdException:\n self.driver.close()\n error_message = \"ERROR: Failed to reach URL, check \"\\\n \"specified URL in constants.py\\n\"\n self.logger.log(error_message)\n return []\n except Exception:\n self.driver.close()\n error_message = \"ERROR: Failed to reach URL, check \"\\\n \"specified URL in constants.py\\n\"\n self.logger.log(error_message)\n return []\n\n source_html = self.driver.page_source\n soup = BeautifulSoup(source_html, \"html.parser\")\n page_title = soup.title.string\n buganizer_issues = []\n\n if \"Buganizer\" not in page_title or \"componentid\" not in page_title:\n if \"MOMA Single Sign On\" in page_title:\n error_message = \"ERROR: You must log into your MOMA account \"\\\n \"first. Select the 'Use Security Code' option and generate a security code at go/sc.\\n\"\n self.logger.log(error_message)\n\n while \"Buganizer\" not in page_title:\n source_html = self.driver.page_source\n soup = BeautifulSoup(source_html, \"html.parser\")\n page_title = soup.title.string\n time.sleep(1)\n\n return buganizer_issues\n error_message = \"ERROR: URL does not link to a Buganizer \"\\\n \"componentid, check specified URL \"\\\n \"in constants.py\\n\"\n self.logger.log(error_message)\n return buganizer_issues\n\n for tbody in soup.find_all('tbody'):\n for _tr in tbody.find_all('tr'):\n issue_link = \"https://b.corp.google.com/issues/\" + _tr.get(\n 'data-row-id')\n buganizer_issues.append(issue_link)\n return buganizer_issues",
"def markdown_report(issues, commits):\n print()\n print('Handled issues:')\n print()\n\n for issue in issues:\n markdown_item(\n '#{0} {1}'.format(\n issue.number,\n issue.title,\n ),\n issue.html_url,\n )\n\n print()\n print('Commits:')\n print()\n\n for commit in commits:\n markdown_item(\n '{0} - {1}'.format(\n commit.sha[:7],\n commit.commit.message.split('\\n')[0]\n ),\n commit.html_url,\n )",
"def parseIssuesEmail(self):\r\n # See if we actually got the file\r\n if not os.path.isfile('issues-email.txt'):\r\n return False\r\n\r\n # Determine the set of libraries that have unresolved failures\r\n date_regex = re.compile('Report time: (.*)')\r\n url_regex = re.compile(' (http://.*)')\r\n library_regex = re.compile('\\|(.*)\\|')\r\n failure_regex = re.compile(' ([^:]*): (.*)')\r\n current_library = None\r\n for line in file('issues-email.txt', 'r'):\r\n # Check for the report time line\r\n m = date_regex.match(line)\r\n if m:\r\n self.date = m.group(1)\r\n continue\r\n\r\n # Check for the detailed report URL\r\n m = url_regex.match(line)\r\n if m:\r\n self.url = m.group(1)\r\n continue\r\n \r\n # Check for a library header\r\n m = library_regex.match(line)\r\n if m:\r\n current_library = Library(m.group(1))\r\n self.libraries[m.group(1)] = current_library\r\n continue\r\n \r\n # Check for a library test and its failures\r\n m = failure_regex.match(line)\r\n if m:\r\n test = Test(current_library, m.group(1))\r\n for platform_name in re.split('\\s*', m.group(2)):\r\n if platform_name != '':\r\n platform = self.getPlatform(platform_name)\r\n failure = Failure(test, platform)\r\n test.addFailure(failure)\r\n platform.addFailure(failure)\r\n pass\r\n current_library.addTest(test)\r\n continue\r\n pass\r\n\r\n return True",
"def issueListing(self, v, i):\n #list of URLS within the issue\n# links = []\n issURL = self.link(vol = v, iss = i )\n html=urlopen(issURL)\n soup=BeautifulSoup(html,'html.parser')\n URLs = [] #Empty list\n \n# titles = soup.find_all('h5', class_=\"title\")\n# authors = soup.find_all('h6', class_=\"authors\")\n# pubs = soup.find_all('h6', class_=\"pub-info\")\n# for t, a, p in zip(titles, authors, pubs):\n blocks = soup.find_all('div', class_=\"article panel article-result\")\n for b in blocks:\n# print(b)\n titletag = b.find('h5', class_=\"title\")\n title = titletag.get_text()\n #Extract abstract url from title head\n aURL = titletag.find('a', href = True)['href']\n alink = 'https://journals.aps.org' + aURL\n #Print out the scraped information\n print(title)\n print(alink)\n #Extract research area and topic keywords\n kwlist = b.find('ul', class_=\"inline-list subjects\")\n #If the list tag exists\n if kwlist:\n lis = kwlist.find_all('li')\n kws = [li.get_text() for li in lis] \n print(kws)\n #Add utf-8 encode\n# print(kws.encode('utf-8')) \n print('----------------------------------------------------------------') \n #Collect URLs in the issue\n URLs.append('https://journals.aps.org' + aURL)\n return URLs",
"def scrape_queue_info(self, advanced_fields):\n source_html = self.driver.page_source\n soup = BeautifulSoup(source_html, \"html.parser\")\n severity_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-severity\")\n severity = severity_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n found_in_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-foundInVersion\")\n found_in = found_in_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n in_prod_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-inProd\")\n in_prod = in_prod_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n verifier_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-verifier\")\n verifier = verifier_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n targeted_to_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-targetedToVersion\")\n targeted_to = targeted_to_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n queue_id_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-customField688197\")\n queue_id = queue_id_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n mdb_group_name_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-customField686879\")\n mdb_group_name = mdb_group_name_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n ops_owner_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-customField686850\")\n ops_owner = ops_owner_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n gvo_owner_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-customField686358\")\n gvo_owner = gvo_owner_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n tech_owner_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-customField686980\")\n tech_owner = tech_owner_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n is_dashboard_queue_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-customField686718\")\n is_dashboard_queue = is_dashboard_queue_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n reviews_per_item_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-customField687560\")\n reviews_per_item = reviews_per_item_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n fragment_name_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-customField686833\")\n fragment_name = fragment_name_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n item_expiry_sec_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-customField686748\")\n item_expiry_sec = item_expiry_sec_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n is_experimental_review_enabled_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-customField688166\")\n is_experimental_review_enabled = is_experimental_review_enabled_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n experimental_probability_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-customField686699\")\n experimental_probability = experimental_probability_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n\n advanced_fields[severity[0]] = severity[1]\n if verifier[1] != \"empty\":\n advanced_fields[verifier[0]] = verifier[1]\n if found_in[1] != \"empty\":\n advanced_fields[found_in[0]] = found_in[1]\n if in_prod[1] != \"empty\":\n if in_prod[1] == \"Yes\":\n advanced_fields[in_prod[0]] = True\n else:\n advanced_fields[in_prod[0]] = False\n if targeted_to[1] != \"empty\":\n advanced_fields[targeted_to[0]] = targeted_to[1]\n if queue_id[1] != \"empty\":\n advanced_fields[queue_id[0]] = int(queue_id[1])\n if mdb_group_name[1] != \"empty\":\n advanced_fields[mdb_group_name[0]] = mdb_group_name[1]\n if ops_owner[1] != \"empty\":\n advanced_fields[ops_owner[0]] = ops_owner[1]\n if gvo_owner[1] != \"empty\":\n advanced_fields[gvo_owner[0]] = gvo_owner[1]\n if tech_owner[1] != \"empty\":\n advanced_fields[tech_owner[0]] = tech_owner[1]\n if is_dashboard_queue[1] != \"empty\":\n if is_dashboard_queue[1] == \"true\":\n advanced_fields[is_dashboard_queue[0]] = True\n else:\n advanced_fields[is_dashboard_queue[0]] = False\n if reviews_per_item[1] != \"empty\":\n advanced_fields[reviews_per_item[0]] = int(reviews_per_item[1])\n if fragment_name[1] != \"empty\":\n advanced_fields[fragment_name[0]] = fragment_name[1]\n if item_expiry_sec[1] != \"empty\":\n advanced_fields[item_expiry_sec[0]] = int(item_expiry_sec[1])\n if is_experimental_review_enabled[1] != \"empty\":\n if is_dashboard_queue[1] == \"true\":\n advanced_fields[is_experimental_review_enabled[0]] = True\n else:\n advanced_fields[is_experimental_review_enabled[0]] = False\n if experimental_probability[1] != \"empty\":\n advanced_fields[experimental_probability[0]] = int(experimental_probability[1])\n\n self._message_parsing_util.publish_buganizer_fields(advanced_fields)",
"def getIssuesEmail(self):\r\n base_url = \"http://beta.boost.org/development/tests/\"\r\n base_url += self.branch\r\n base_url += \"/developer/\";\r\n got_issues = False\r\n\r\n # Ping the server by looking for an HTML file\r\n print \"Pinging the server to initiate extraction...\"\r\n ping_url = base_url + \"issues.html\"\r\n os.system('curl -O ' + ping_url)\r\n os.system('rm -f issues.html')\r\n \r\n for x in range(30):\r\n # Update issues-email.txt\r\n url = base_url + \"issues-email.txt\"\r\n print 'Retrieving issues email from ' + url\r\n os.system('rm -f issues-email.txt')\r\n os.system('curl -O ' + url)\r\n\r\n if self.parseIssuesEmail():\r\n return True\r\n\r\n print 'Failed to fetch issues email. '\r\n time.sleep (30)\r\n\r\n return False",
"def report(issues, show_urls=False):\r\n # titles may have unicode in them, so we must encode everything below\r\n if show_urls:\r\n for i in issues:\r\n role = 'ghpull' if 'merged' in i else 'ghissue'\r\n print('* :%s:`%d`: %s' % (role, i['number'],\r\n i['title'].encode('utf-8')))\r\n else:\r\n for i in issues:\r\n print('* %d: %s' % (i['number'], i['title'].encode('utf-8')))",
"def issues(self):\n if self.pull_request.body is not None:\n regex = r\"(?<=closes: #|elated: #)\\d{5}\"\n issue_strs = re.findall(regex, self.pull_request.body)\n self.issue_nums = [eval(s) for s in issue_strs]",
"def parse_status_file(jira, filename, issues):\n # Regexp to match Jira issue on a single line, i.e:\n # [SWG-28]\n # [LITE-32]\n # ...\n regex = r\"^\\[([A-Z]+-[0-9]+).*\\]\\n$\"\n\n # Regexp to match a tag that indicates we should stop processing, ex:\n # [STOP]\n # [JIPDATE-STOP]\n # [OTHER]\n # ...\n regex_stop = r\"^\\[.*\\]\\n$\"\n\n # Regexp to mach a tag that indicates to stop processing completely:\n # [FIN]\n regex_fin = r\"^\\[FIN\\]\\n$\"\n\n # Regexp to match for a status update, this will remove 'Status' from the\n # match:\n regex_status = r\"(?:^Status:) *(.+)\\n$\"\n\n # Contains the status text, it could be a file or a status email\n status = \"\"\n\n # Regexp to match for a time spent update, this will remove 'Time spent:'\n # from the match:\n regex_timespent = r\"(^Time spent:) \\d+\\w\\n$\"\n\n # List of resolutions (when doing a transition to Resolved). Query once globally.\n resolution_map = dict([(t.name.title(), t.id) for t in jira.resolutions()])\n\n with open(filename) as f:\n status = f.readlines()\n\n myissue = \"\"\n mycomment = \"\"\n\n # build list of {issue,comment} tuples found in status\n issue_comments = []\n for line in status:\n # New issue?\n match = re.search(regex, line)\n\n # Evaluate and save the transition regex for later. We have to do this\n # here, since we cannot assign and save the variable in the if\n # construction as you can do in C for example.\n transition = re.search(regex_status, line)\n\n if match:\n myissue = match.group(1)\n validissue = True\n\n # if we ran a query, we might already have fetched the issue\n # let's try to find the issue there first, otherwise ask Jira\n try:\n issue = [x for x in issues if str(x) == myissue][0]\n issue_comments.append((issue, \"\", \"\", None))\n\n # IndexError: we had fetched already, but issue is not found\n # TypeError: issues is None, we haven't queried Jira yet, at all\n except (IndexError, TypeError) as e:\n try:\n issue = jira.issue(myissue)\n issue_comments.append((issue, \"\", \"\", None))\n except Exception as e:\n if \"Issue Does Not Exist\" in e.text:\n print(\"[{}] : {}\".format(myissue, e.text))\n validissue = False\n\n # Stop parsing entirely. This needs to be placed before regex_stop\n # or the .* will match and [FIN] won't be processed\n elif re.search(regex_fin, line):\n break\n # If we have non-JIRA issue tags, stop parsing until we find a valid tag\n elif re.search(regex_stop, line):\n validissue = False\n elif transition and validissue:\n # If we have a match, then the new status should be first in the\n # group. Jira always expect the name of the state transitions to be\n # word capitalized, hence the call to the title() function. This\n # means that it doesn't matter if the user enter all lower case,\n # mixed or all upper case. All of them will work.\n new_status = transition.groups()[0].title()\n (i, c, _, ts) = issue_comments[-1]\n issue_comments[-1] = (i, c, new_status, ts)\n elif re.search(regex_timespent, line, re.IGNORECASE):\n timespent = line.split(\":\")[1].strip()\n (i, c, t, _) = issue_comments[-1]\n issue_comments[-1] = (i, c, t, timespent)\n else:\n # Don't add lines with comments\n if line[0] != \"#\" and issue_comments and validissue:\n (i, c, t, ts) = issue_comments[-1]\n issue_comments[-1] = (i, c + line, t, ts)\n\n issue_upload = []\n print(\"These JIRA cards will be updated as follows:\\n\")\n for idx, t in enumerate(issue_comments):\n (issue, comment, transition, timespent) = issue_comments[idx]\n\n # Strip beginning and trailing blank lines\n comment = comment.strip(\"\\n\")\n\n # initialize here to avoid unassigned variables and useless code complexity\n resolution_id = transition_id = None\n resolution = transition_summary = \"\"\n\n if transition != \"\" and transition != str(issue.fields.status):\n # An optional 'resolution' attribute can be set when doing a transition\n # to Resolved, using the following pattern: Resolved / <resolution>\n if (\n transition.startswith(\"Resolved\") or transition.startswith(\"Closed\")\n ) and \"/\" in transition:\n (transition, resolution) = map(str.strip, transition.split(\"/\"))\n if not resolution in resolution_map:\n print(\n 'Invalid resolution \"{}\" for issue {}'.format(resolution, issue)\n )\n print(\"Possible resolution: {}\".format([t for t in resolution_map]))\n sys.exit(1)\n resolution_id = resolution_map[resolution]\n\n transition_map = dict(\n [(t[\"name\"].title(), t[\"id\"]) for t in jira.transitions(issue)]\n )\n if not transition in transition_map:\n print('Invalid transition \"{}\" for issue {}'.format(transition, issue))\n print(\"Possible transitions: {}\".format([t for t in transition_map]))\n sys.exit(1)\n\n transition_id = transition_map[transition]\n if resolution:\n transition_summary = \" %s => %s (%s)\" % (\n issue.fields.status,\n transition,\n resolution,\n )\n else:\n transition_summary = \" %s => %s\" % (issue.fields.status, transition)\n\n if comment == \"\" and not transition_id:\n log.debug(\n \"Issue [%s] has no comment or transitions, not updating the issue\"\n % (issue)\n )\n continue\n\n issue_upload.append(\n (\n issue,\n comment,\n {\"transition\": transition_id, \"resolution\": resolution_id},\n timespent,\n )\n )\n print(\n \"[%s]%s\\n %s\"\n % (issue, transition_summary, \"\\n \".join(comment.splitlines()))\n )\n if timespent:\n print(\" Time spent: %s\" % timespent)\n print(\"\")\n\n issue_comments = issue_upload\n if issue_comments == [] or cfg.args.dry_run or should_update() == \"n\":\n if issue_comments == []:\n print(\"No change, Jira was not updated!\\n\")\n else:\n print(\"Comments will not be written to Jira!\\n\")\n if not cfg.args.s:\n print_status(status)\n sys.exit()\n\n # if we found something, let's update jira\n for issue, comment, transition, timespent in issue_comments:\n update_jira(jira, issue, comment, transition, timespent)\n\n print(\"Successfully updated your Jira tickets!\\n\")\n if not cfg.args.s:\n print_status(status)",
"def test_correct_issue_parsing(self):\n issue = SAMPLE_ISSUE.copy()\n parsed = parse_issue(issue['body'])\n for item in parsed:\n self.assertTrue(item)",
"def _render_reported(self) -> dict:\n logging.debug(f\"Fetching reported bugs for {self.user.display_name}\")\n reported = defaultdict(list)\n tasks = self.user.searchTasks(\n bug_reporter=self.user, status=self.status, created_since=self.since\n )\n tasks = [LPWrap(t) for t in tasks]\n for t in tasks:\n if in_window(self.window, t.bug.date_created):\n reported[t.bug_target_name].append(\n {t.bug.id: t.title,}\n )\n return reported",
"def bugs_to_html(string):\n string = string.strip()\n string = escape(string)\n index = 0\n string = BUG_REGEX.sub('<a href=\"%s\\\\3\">\\\\1</a>' % BUG_URL, string)\n match = DATE_REGEX.search(string, index)\n if match:\n start = match.start()\n next = DATE_REGEX.search(string, match.end() + 1)\n if next:\n end = next.start() - 1\n else:\n end = len(string)\n substring = string[start:end]\n html = '<span class=\"change\"><span class=\"date\">%s</span>%s</span>' % \\\n (substring[:11], substring[11:])\n (string, index) = mstring.replace_sub(string[:end+1], html, start, end)\n string = GENTOO_DEV.sub('(<a href=\"%s\\\\2\">\\\\1</a>)' % CIA_URL, string)\n return '<span class=\"change\">%s\\n\\n</span>' % string",
"def _parse(self):\n with open(self._path, 'r') as file:\n try:\n line = file.readline()\n while line:\n if line.startswith(BUG_START):\n line = file.readline()\n if line:\n # Extract bug type\n bug_type = line.split(' ', 1)[0]\n if bug_type not in self._bug_list:\n self._bug_list[bug_type] = []\n # Get whether or not the bug was reproduced\n reproduced = 'Bug was reproduced' in line\n line = file.readline()\n if line.startswith('Attempted'):\n # Skip the 'Attempted to reproduce' line if exists\n line = file.readline()\n bug_hash = line.split(' ')[-1].rstrip()\n line = file.readline()\n seq = ParsedSequence([])\n # Populate the sequence of requests that made the bug\n while line and not line.startswith(BUG_START):\n seq += self._get_request(line)\n line = file.readline()\n # Add the bug sequence to the bug list\n self._bug_list[bug_type].append((seq, reproduced, bug_hash))\n else:\n line = file.readline()\n except Exception as err:\n print(\"Failed to read bug log. Log was not a complete test log.\\n\"\n f\"{err!s}\")\n raise TestFailedException",
"def test_get_updated_issues_one_page(self):\n with open(\"issues_one_page.json\", \"r\") as issues_file:\n mock_response = issues_file.read()\n\n with requests_mock.Mocker() as m:\n m.register_uri('GET', '/rest/api/2/search', text=mock_response)\n issues = jiratimereport.get_updated_issues(\"https://jira_url\", \"user_name\", \"api_token\", \"MYB\",\n \"2020-01-10\", \"2020-01-20\", \"\")\n\n issues_expected_result = [\n Issue(10005, \"MYB-5\", \"Summary of issue MYB-5\", \"MYB-3\", \"Summary of the parent issue of MYB-5\", 3600, 900, datetime(2020, 1, 20)),\n Issue(10004, \"MYB-4\", \"Summary of issue MYB-4\", \"MYB-3\", \"Summary of the parent issue of MYB-4\", 7200, 600, None)]\n\n self.assertListEqual(issues_expected_result, issues, \"Issues lists are unequal\")",
"def reports(self):\r\n actual_text, ids, eng_list = self.util.get_text_from_xml(self.string_xml, \"Reports\", \"trans-unit\",\r\n Config.selected_language.strip())\r\n text_index = 0\r\n actual_text2 = []\r\n for str1 in actual_text:\r\n if \"<br>\" in str1:\r\n str_li = str1.split(\"<br>\")\r\n for i in str_li:\r\n actual_text2.append(i)\r\n else:\r\n actual_text2.append(str1)\r\n xpath = self.util.read_xpath_list_from_xml(self.object_repo, \"Reports\", self.my_object)\r\n self.object.click(self.util.client, xpath[0]['zone'],\r\n xpath[0]['xpath'],\r\n xpath[0]['index'],\r\n xpath[0]['comment'],\r\n 1, self.logger_name)\r\n self.place_holder(xpath, 1, actual_text, text_index, ids, eng_list)\r\n text_index += 1\r\n\r\n xpath_questions = self.util.read_xpath_list_from_xml(self.object_repo, \"ReportsQues\",\r\n self.my_object)\r\n for loop_index in range(len(xpath_questions)):\r\n pixel = self.dev.p2cy(self.util.client, 15)\r\n self.click(xpath_questions, loop_index)\r\n self.dev.swipe(self.util.client, \"Down\", pixel, 300)\r\n # self.object.touch_down(self.util.client, xpath[1]['zone'], xpath[1]['xpath'], xpath[1][\r\n # 'index'])\r\n # #self.object.touch_move(self.util.client, xpath[2]['zone'], xpath[2]['xpath'],\r\n # xpath[2]['index'])\r\n # self.object.touch_up(self.util.client)\r\n string_inzone = self.object.get_text(self.util.client,\r\n \"WEB\") # this method gets all string in the zone\r\n string_list = string_inzone.splitlines()\r\n string_list = self.remove_empty_lines(\r\n string_list) # this method removes string with empty lines line from list\r\n\r\n for loop_index in range(max(len(actual_text), len(string_list))):\r\n try:\r\n if actual_text2[text_index] and string_list[loop_index]:\r\n self.logger.info(\"Testing StringID == \" + str(ids[text_index]))\r\n self.logger.info(\"English Text == \" + eng_list[text_index])\r\n self.util.text_compare2(self.common, actual_text2[text_index], string_list[loop_index],\r\n ids[text_index],\r\n self.logger_name)\r\n text_index += 1\r\n except:\r\n print \"value error\"\r\n self.click(xpath, 2)",
"def test_issue_list_issues(self):\n pass",
"def main():\n verbose = False\n online = True\n\n if online:\n TOKEN = \"\"\n g = Github(base_url=\"https://github.ibm.com/api/v3\", login_or_token=TOKEN)\n repo = g.get_repo(\"Raphael-Lambert/test_note\")\n\n path = \"C:/Users/RaphaelLambert/Documents/git_issues\"\n onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]\n if verbose:\n print(onlyfiles)\n treated = []\n issues = []\n\n with open(join(path, 'log.txt'), 'r') as doc:\n for line in doc:\n treated.append(line.rstrip('\\n'))\n\n with open(join(path, 'issues.txt'), 'r') as doc:\n for line in doc:\n issues.append(int(line.rstrip('\\n')))\n\n for title in onlyfiles:\n if title != 'log.txt' and title != 'issues.txt' and title not in treated:\n with open(join(path, title), 'rb') as fhdl:\n raw_email = fhdl.read()\n\n parsed_eml = eml_parser.eml_parser.decode_email_b(raw_email, include_raw_body=True)\n if verbose:\n print('-----------------')\n print(title)\n print('-----------------')\n print(parsed_eml)\n print('-----------------')\n body = parsed_eml['body']\n if len(body) > 0:\n raw_text = body[0]['content']\n else:\n raw_text = \"unable to retrieve the message\"\n raw_text = link_breaker(raw_text)\n num_get = 0\n if online and title[:4] == 'Re ' and title[4:] in treated:\n cont_issue = repo.get_issue(issues[treated.index(title[4:])])\n num_get = cont_issue.number\n cont_issue.create_comment(body=raw_text)\n elif online:\n new_issue = repo.create_issue(title=\"Conversation number {}: {}\".format(len(treated), title[:10]+\"...\"),\n body=raw_text)\n if verbose:\n print(new_issue)\n num_get = new_issue.number\n treated.append(title)\n issues.append(num_get)\n\n if verbose:\n print(treated)\n\n with open(join(path, 'log.txt'), 'w') as doc:\n for title in treated:\n doc.write(title+'\\n')\n with open(join(path, 'issues.txt'), 'w') as doc:\n for title in issues:\n doc.write(str(title)+'\\n')",
"def get_issues(): # pragma: no cover\n global issue_data\n team = {\n 'stevex196x': 0,\n 'TheSchaft': 0,\n 'melxtru': 0,\n 'aylish19': 0,\n 'connormlewis': 0,\n 'tsukkisuki': 0\n }\n all_issues = 0\n while all_issues == 0:\n url = ('https://api.github.com/repos/connormlewis/idb/'\n 'issues?state=all&filter=all&per_page=100')\n data = requests.get(\n url, headers={'Authorization': 'token ' + os.environ['API_TOKEN']})\n link = data.headers.get('Link', None)\n for i in range(1, int(find_last_page(link)) + 1):\n url = (\n 'https://api.github.com/repos/connormlewis/idb/'\n 'issues?state=all&filter=all&per_page=100' + '&page=' + str(i))\n data = requests.get(\n url,\n headers={'Authorization': 'token ' + os.environ['API_TOKEN']})\n json_list = data.json()\n for entry in json_list:\n if 'pull_request' not in entry:\n all_issues += 1\n if entry['user']['login'] in team:\n team[entry['user']['login']] += 1\n return team, all_issues",
"def display_for_triage(bugs):\n # bug title is like:\n # '\n # Bug #1724025 in openstack-ansible:\n # invalid regular expression...\"\n # '\n for bug in bugs:\n bug_name = u\"\".join(bug.title.split(\":\")[1:])\n print(u\"#link {link}\\n\\t{name}\".format(link=bug.web_link, name=bug_name))",
"def _parse(self):\n soup = BS(self._current_html, 'lxml')\n for item in soup.select('div.c'):\n temp = {}\n # main content\n ctt = item.select('span.ctt')\n if not ctt:\n continue\n weibo_body = item.select('div')\n if len(weibo_body) > 1:\n temp['content'] = weibo_body[0].text\n btn_group = weibo_body[1].text\n else:\n temp['content'] = weibo_body[0].select('span.ctt')[0].text\n btn_group = weibo_body[0].text\n temp['is_repost'] = True if REPO_TEST_PATTERN.match(\n temp['content']) else False\n try:\n temp['like_num'] = LIKE_NUM_PATTERN.findall(btn_group)[0]\n temp['cmt_num'] = COMMENT_NUM_PATTERN.findall(btn_group)[0]\n temp['repo_num'] = REPO_NUM_PATTERN.findall(btn_group)[0]\n except Exception:\n pass\n cmt = item.select('.cmt')\n # visibility\n if cmt:\n try:\n temp['visibility'] = VISIBILITY_PATTERN.findall(\n cmt[0].text)[0]\n except Exception:\n pass\n\n # img in main content\n img = item.select('div a img')\n img_src = img[0].attrs['src'] if img else None\n temp['img_src'] = img_src\n LOGGER.debug('img_src: {}'.format(img_src))\n # time & source device\n ct = item.select('span.ct')\n if ct:\n ct = ct[0]\n text = ct.text\n reg_result = TIME_PATTERN.findall(text)[0]\n\n temp['time'] = ar(\n '{}年{}'.format(self._current_year, reg_result[0]),\n DATE_FMTS[0]\n ).naive if reg_result[0] else ar(\n reg_result[1], DATE_FMTS[1]\n ).naive\n temp['source'] = SOURCE_DEVICE_PATTERN.findall(text)[0]\n self._post_item = Post(**temp)\n self._attachment_item = Attachment(\n uri=img_src, post=self._post_item)\n self._store()",
"def process_bug_links(text):\n text = LP_RE.sub(r\"[\\1](https://bugs.launchpad.net/bugs/\\2)\", text)\n text = BDO_RE.sub(r\"[\\1](https://bugs.debian.org/\\2)\", text)\n text = ISSUE_RE.sub(r\"[\\1](https://github.com/gammu/gammu/\\2)\", text)\n return BUG_RE.sub(r\"[\\1](https://bugs.cihar.com/\\2)\", text)",
"def scrape_routing_targets(self, advanced_fields):\n source_html = self.driver.page_source\n soup = BeautifulSoup(source_html, \"html.parser\")\n try:\n show_all = self.driver.find_element_by_id(\"bv2-issue-metadata-list-4-more\")\n show_all.click()\n show_all = self.driver.find_element_by_id(\"bv2-issue-metadata-list-5-more\")\n show_all.click()\n except common.exceptions.NoSuchElementException:\n pass\n\n severity_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-severity\")\n severity = severity_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n found_in_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-foundInVersion\")\n found_in = found_in_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n in_prod_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-inProd\")\n in_prod = in_prod_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n verifier_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-verifier\")\n verifier = verifier_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n targeted_to_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-targetedToVersion\")\n targeted_to = targeted_to_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n queue_id_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-customField688193\")\n queue_id = queue_id_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n\n queues_to_add = []\n queues_to_remove = []\n\n for tag in soup.find_all(\"button\", id=lambda value: value and value.startswith(\n \"bv2-issue-metadata-list\")):\n queue_method_string = tag[\"aria-label\"]\n if \"Add Queues to Route To\" in queue_method_string:\n queue_method_string = queue_method_string.replace(\"Remove \", \"\")\n queue_method_string = queue_method_string.replace(\" from Add Queues to Route To\", \"\")\n queues_to_add.append(int(queue_method_string))\n elif \"Remove Queues to Route To\" in queue_method_string:\n queue_method_string = queue_method_string.replace(\"Remove \", \"\")\n queue_method_string = queue_method_string.replace(\" from Queues to Route To\", \"\")\n queues_to_remove.append(int(queue_method_string))\n\n advanced_fields[\"Add Queues to Route To\"] = queues_to_add\n advanced_fields[\"Remove Queues to Route To\"] = queues_to_remove\n\n advanced_fields[severity[0]] = severity[1]\n if verifier[1] != \"empty\":\n advanced_fields[verifier[0]] = verifier[1]\n if found_in[1] != \"empty\":\n advanced_fields[found_in[0]] = found_in[1]\n if in_prod[1] != \"empty\":\n if in_prod[1] == \"Yes\":\n advanced_fields[in_prod[0]] = True\n else:\n advanced_fields[in_prod[0]] = False\n if targeted_to[1] != \"empty\":\n advanced_fields[targeted_to[0]] = targeted_to[1]\n if queue_id[1] != \"empty\":\n advanced_fields[queue_id[0]] = int(queue_id[1])\n\n self._message_parsing_util.publish_buganizer_fields(advanced_fields)",
"def test_obtain_issues(self, mock_url_read):\n mock_url_read.side_effect = [LAST_SCAN, '{\"reportId\": 22}', '{\"status\": {\"value\": \"Created\"}}',\n SAST_REPORT.format(false_positive=False, severity='High')]\n\n self.__report.obtain_issues(['id'], 'high')\n issues = self.__report.issues()\n\n self.assertIsInstance(issues, List)\n self.assertIsInstance(issues[0], Checkmarx.Issue)\n self.assertEqual('JScript Vulnerabilities', issues[0].group)\n self.assertEqual('Reflected XSS', issues[0].title)\n self.assertEqual('http://url/CxWebClient/ScanQueryDescription.aspx?queryID=789&'\n 'queryVersionCode=842956&queryTitle=Reflected_XSS', issues[0].display_url)\n self.assertEqual(1, issues[0].count)\n self.assertEqual(\"Recurrent\", issues[0].status)",
"def process_issue(\n start, day_to_process, message_index, drive_by, issue_owner, messages,\n user):\n assert isinstance(start, datetime.datetime), start\n assert isinstance(day_to_process, datetime.date), day_to_process\n assert message_index is None or 0 <= message_index < len(messages), (\n message_index)\n assert drive_by in (True, False), drive_by\n assert issue_owner.count('@') == 1, issue_owner\n assert all(isinstance(m, models.Message) for m in messages), messages\n assert user.count('@') == 1, user\n\n lgtms = sum(\n m.sender == user and\n m.find(models.Message.LGTM_RE, owner_allowed=True) and\n not m.find(models.Message.NOT_LGTM_RE, owner_allowed=True)\n for m in messages)\n\n # TODO(maruel): Check for the base username part, e.g.:\n # if user.split('@', 1)[0] == issue_owner.split('@', 1)[0]:\n # For example, many people have both matching @google.com and @chromium.org\n # accounts.\n if user == issue_owner:\n if not any(m.date.date() == day_to_process for m in messages):\n return -1, None, None\n # There's no concept of review latency for OUTGOING reviews.\n return -1, lgtms, models.AccountStatsBase.OUTGOING\n\n if message_index is None:\n # Neither issue_owner nor user sent an email, ignore.\n return -1, None, None\n\n if drive_by:\n # Tricky case. Need to determine the difference between NOT_REQUESTED and\n # DRIVE_BY. To determine if an issue is NOT_REQUESTED, look if the owner\n # never sent a request for review in the previous messages.\n review_type = (\n models.AccountStatsBase.NOT_REQUESTED\n if messages[message_index].sender == user\n else models.AccountStatsBase.DRIVE_BY)\n else:\n review_type = models.AccountStatsBase.NORMAL\n\n for m in messages[message_index:]:\n if m.sender == user:\n if m.date.date() < day_to_process:\n # It was already updated on a previous day. Skip calculation.\n return -1, None, None\n return int((m.date - start).total_seconds()), lgtms, review_type\n\n # 'user' didn't send a message, so no latency can be calculated.\n assert not lgtms, lgtms\n return -1, lgtms, models.AccountStatsBase.IGNORED",
"def list_issues(self, chat):\n issues = self.url_handler.get_json_from_url(constants.URL_GITHUB)\n msg = ''\n msg += '\\U0001F4CB Issues List\\n\\n'\n for aux in issues:\n msg += \"[[{}]] - {}\\n\\n\".format(str(aux['number']), aux['title'])\n\n self.url_handler.send_message(msg, chat)",
"def get_jira_issues(jira, username):\n exclude_stories = cfg.args.x\n epics_only = cfg.args.e\n all_status = cfg.args.all\n filename = cfg.args.file\n user = cfg.args.user\n last_comment = cfg.args.l\n\n issue_types = [\"Sub-task\", \"Epic\"]\n if not epics_only:\n issue_types.append(\"Initiative\")\n if not exclude_stories:\n issue_types.extend([\"Story\", \"Task\", \"Sub-task\", \"Bug\"])\n issue_type = \"issuetype in (%s)\" % \", \".join(issue_types)\n\n status = 'status in (\"In Progress\")'\n if all_status:\n status = \"status not in (Resolved, Closed)\"\n\n if user is None:\n user = \"currentUser()\"\n else:\n user = '\"%s\"' % add_domain(user)\n\n jql = \"%s AND assignee = %s AND %s\" % (issue_type, user, status)\n log.debug(jql)\n\n my_issues = jira.search_issues(jql)\n if my_issues.total > my_issues.maxResults:\n my_issues = jira.search_issues(jql, maxResults=my_issues.total)\n\n showdate = strftime(\"%Y-%m-%d\", gmtime())\n subject = \"Subject: [Weekly] Week ending \" + showdate + \"\\n\\n\"\n\n msg = get_header()\n if msg != \"\":\n msg += email_to_name(username) + \"\\n\\n\"\n\n f = open_file(filename)\n filename = f.name\n\n f.write(subject)\n\n f.write(msg)\n log.debug(\"Found issue:\")\n for issue in my_issues:\n log.debug(\"%s : %s\" % (issue, issue.fields.summary))\n\n if merge_issue_header():\n f.write(\n \"[%s%s%s]\\n\" % (issue, get_header_separator(), issue.fields.summary)\n )\n else:\n f.write(\"[%s]\\n\" % issue)\n f.write(\"# Header: %s\\n\" % issue.fields.summary)\n\n f.write(\"# Type: %s\\n\" % issue.fields.issuetype)\n f.write(\"# Status: %s\\n\" % issue.fields.status)\n f.write(get_extra_comments())\n if last_comment:\n write_last_jira_comment(f, jira, issue)\n f.write(\"\\n\")\n\n f.close()\n return (filename, my_issues)",
"def run_tests(self):\n self.ignore_errors()\n return self.issues",
"def parse_buginfo(entry):\n bugname = entry['bug']['name'].replace(' ','').replace('/','_')\n bugsrc = entry['ent']['policyContentLocation']\n bugpattern = entry['bug']['pattern']\n try :\n bugaffiliation = entry['bug']['affiliation']\n except KeyError:\n bugaffiliation = \"\"\n bugtype = entry['bug']['type']\n bugpathname = entry['ent']['pathname']\n return WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,\n bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)",
"def cleanIssues(issues):\n response = []\n for issue in issues:\n response.append({\"title\": issue[\"title\"], \"body\": issue[\"body\"]})\n return response",
"def process(self):\n for user in self.repos:\n for repo in self.repos[user]:\n self.process_issues(user, repo)"
]
| [
"0.70684487",
"0.62912136",
"0.61083233",
"0.59577924",
"0.5827052",
"0.5784197",
"0.57594097",
"0.56995815",
"0.5654476",
"0.54628205",
"0.54601973",
"0.54352313",
"0.54164195",
"0.5216515",
"0.521272",
"0.5200309",
"0.5200226",
"0.5149228",
"0.5148666",
"0.51370823",
"0.51208967",
"0.5091616",
"0.5090344",
"0.5083169",
"0.50658196",
"0.5039347",
"0.5035902",
"0.50309676",
"0.5015474",
"0.4998231"
]
| 0.7092747 | 0 |
Scrape all advanced fields from a RoutingTarget Buganizer Issue | def scrape_queue_info(self, advanced_fields):
source_html = self.driver.page_source
soup = BeautifulSoup(source_html, "html.parser")
severity_tag = soup.find("div", "bv2-issue-metadata-field-inner "\
"bv2-issue-metadata-field-severity")
severity = severity_tag["aria-label"].replace(
" value is ", "\n").split("\n")
found_in_tag = soup.find("div", "bv2-issue-metadata-field-inner "\
"bv2-issue-metadata-field-foundInVersion")
found_in = found_in_tag["aria-label"].replace(
" value is ", "\n").split("\n")
in_prod_tag = soup.find("div", "bv2-issue-metadata-field-inner "\
"bv2-issue-metadata-field-inProd")
in_prod = in_prod_tag["aria-label"].replace(
" value is ", "\n").split("\n")
verifier_tag = soup.find("div", "bv2-issue-metadata-field-inner "\
"bv2-issue-metadata-field-verifier")
verifier = verifier_tag["aria-label"].replace(
" value is ", "\n").split("\n")
targeted_to_tag = soup.find("div", "bv2-issue-metadata-field-inner "\
"bv2-issue-metadata-field-targetedToVersion")
targeted_to = targeted_to_tag["aria-label"].replace(
" value is ", "\n").split("\n")
queue_id_tag = soup.find("div", "bv2-issue-metadata-field-inner "\
"bv2-issue-metadata-field-customField688197")
queue_id = queue_id_tag["aria-label"].replace(
" value is ", "\n").split("\n")
mdb_group_name_tag = soup.find("div", "bv2-issue-metadata-field-inner "\
"bv2-issue-metadata-field-customField686879")
mdb_group_name = mdb_group_name_tag["aria-label"].replace(
" value is ", "\n").split("\n")
ops_owner_tag = soup.find("div", "bv2-issue-metadata-field-inner "\
"bv2-issue-metadata-field-customField686850")
ops_owner = ops_owner_tag["aria-label"].replace(
" value is ", "\n").split("\n")
gvo_owner_tag = soup.find("div", "bv2-issue-metadata-field-inner "\
"bv2-issue-metadata-field-customField686358")
gvo_owner = gvo_owner_tag["aria-label"].replace(
" value is ", "\n").split("\n")
tech_owner_tag = soup.find("div", "bv2-issue-metadata-field-inner "\
"bv2-issue-metadata-field-customField686980")
tech_owner = tech_owner_tag["aria-label"].replace(
" value is ", "\n").split("\n")
is_dashboard_queue_tag = soup.find("div", "bv2-issue-metadata-field-inner "\
"bv2-issue-metadata-field-customField686718")
is_dashboard_queue = is_dashboard_queue_tag["aria-label"].replace(
" value is ", "\n").split("\n")
reviews_per_item_tag = soup.find("div", "bv2-issue-metadata-field-inner "\
"bv2-issue-metadata-field-customField687560")
reviews_per_item = reviews_per_item_tag["aria-label"].replace(
" value is ", "\n").split("\n")
fragment_name_tag = soup.find("div", "bv2-issue-metadata-field-inner "\
"bv2-issue-metadata-field-customField686833")
fragment_name = fragment_name_tag["aria-label"].replace(
" value is ", "\n").split("\n")
item_expiry_sec_tag = soup.find("div", "bv2-issue-metadata-field-inner "\
"bv2-issue-metadata-field-customField686748")
item_expiry_sec = item_expiry_sec_tag["aria-label"].replace(
" value is ", "\n").split("\n")
is_experimental_review_enabled_tag = soup.find("div", "bv2-issue-metadata-field-inner "\
"bv2-issue-metadata-field-customField688166")
is_experimental_review_enabled = is_experimental_review_enabled_tag["aria-label"].replace(
" value is ", "\n").split("\n")
experimental_probability_tag = soup.find("div", "bv2-issue-metadata-field-inner "\
"bv2-issue-metadata-field-customField686699")
experimental_probability = experimental_probability_tag["aria-label"].replace(
" value is ", "\n").split("\n")
advanced_fields[severity[0]] = severity[1]
if verifier[1] != "empty":
advanced_fields[verifier[0]] = verifier[1]
if found_in[1] != "empty":
advanced_fields[found_in[0]] = found_in[1]
if in_prod[1] != "empty":
if in_prod[1] == "Yes":
advanced_fields[in_prod[0]] = True
else:
advanced_fields[in_prod[0]] = False
if targeted_to[1] != "empty":
advanced_fields[targeted_to[0]] = targeted_to[1]
if queue_id[1] != "empty":
advanced_fields[queue_id[0]] = int(queue_id[1])
if mdb_group_name[1] != "empty":
advanced_fields[mdb_group_name[0]] = mdb_group_name[1]
if ops_owner[1] != "empty":
advanced_fields[ops_owner[0]] = ops_owner[1]
if gvo_owner[1] != "empty":
advanced_fields[gvo_owner[0]] = gvo_owner[1]
if tech_owner[1] != "empty":
advanced_fields[tech_owner[0]] = tech_owner[1]
if is_dashboard_queue[1] != "empty":
if is_dashboard_queue[1] == "true":
advanced_fields[is_dashboard_queue[0]] = True
else:
advanced_fields[is_dashboard_queue[0]] = False
if reviews_per_item[1] != "empty":
advanced_fields[reviews_per_item[0]] = int(reviews_per_item[1])
if fragment_name[1] != "empty":
advanced_fields[fragment_name[0]] = fragment_name[1]
if item_expiry_sec[1] != "empty":
advanced_fields[item_expiry_sec[0]] = int(item_expiry_sec[1])
if is_experimental_review_enabled[1] != "empty":
if is_dashboard_queue[1] == "true":
advanced_fields[is_experimental_review_enabled[0]] = True
else:
advanced_fields[is_experimental_review_enabled[0]] = False
if experimental_probability[1] != "empty":
advanced_fields[experimental_probability[0]] = int(experimental_probability[1])
self._message_parsing_util.publish_buganizer_fields(advanced_fields) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def scrape_routing_targets(self, advanced_fields):\n source_html = self.driver.page_source\n soup = BeautifulSoup(source_html, \"html.parser\")\n try:\n show_all = self.driver.find_element_by_id(\"bv2-issue-metadata-list-4-more\")\n show_all.click()\n show_all = self.driver.find_element_by_id(\"bv2-issue-metadata-list-5-more\")\n show_all.click()\n except common.exceptions.NoSuchElementException:\n pass\n\n severity_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-severity\")\n severity = severity_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n found_in_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-foundInVersion\")\n found_in = found_in_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n in_prod_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-inProd\")\n in_prod = in_prod_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n verifier_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-verifier\")\n verifier = verifier_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n targeted_to_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-targetedToVersion\")\n targeted_to = targeted_to_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n queue_id_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-customField688193\")\n queue_id = queue_id_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n\n queues_to_add = []\n queues_to_remove = []\n\n for tag in soup.find_all(\"button\", id=lambda value: value and value.startswith(\n \"bv2-issue-metadata-list\")):\n queue_method_string = tag[\"aria-label\"]\n if \"Add Queues to Route To\" in queue_method_string:\n queue_method_string = queue_method_string.replace(\"Remove \", \"\")\n queue_method_string = queue_method_string.replace(\" from Add Queues to Route To\", \"\")\n queues_to_add.append(int(queue_method_string))\n elif \"Remove Queues to Route To\" in queue_method_string:\n queue_method_string = queue_method_string.replace(\"Remove \", \"\")\n queue_method_string = queue_method_string.replace(\" from Queues to Route To\", \"\")\n queues_to_remove.append(int(queue_method_string))\n\n advanced_fields[\"Add Queues to Route To\"] = queues_to_add\n advanced_fields[\"Remove Queues to Route To\"] = queues_to_remove\n\n advanced_fields[severity[0]] = severity[1]\n if verifier[1] != \"empty\":\n advanced_fields[verifier[0]] = verifier[1]\n if found_in[1] != \"empty\":\n advanced_fields[found_in[0]] = found_in[1]\n if in_prod[1] != \"empty\":\n if in_prod[1] == \"Yes\":\n advanced_fields[in_prod[0]] = True\n else:\n advanced_fields[in_prod[0]] = False\n if targeted_to[1] != \"empty\":\n advanced_fields[targeted_to[0]] = targeted_to[1]\n if queue_id[1] != \"empty\":\n advanced_fields[queue_id[0]] = int(queue_id[1])\n\n self._message_parsing_util.publish_buganizer_fields(advanced_fields)",
"def extract_details(df):\n df_RSinfo = df[['pentamer', 'Step details', 'RouteScore details',\n 'Isolated', 'RouteScore', 'log(RouteScore)']]\n\n last3_rxns = ['Buchwald_deprotection', 'Buchwald', 'SNAr']\n for rxn in last3_rxns:\n df_RSinfo[rxn] = [next(step for step in row[-3:] if step['reaction'] == rxn) for row in df['Step details']]\n\n for key in df_RSinfo['RouteScore details'][0].keys():\n df_RSinfo[key] = [row[key] for row in df['RouteScore details']]\n\n return df_RSinfo",
"def visit_all_issues_in_list(self, issues):\n for issue in issues:\n self.driver.implicitly_wait(3)\n self.driver.get(issue)\n config_type_text = self.driver.find_element_by_xpath(\"/html/body/b-service-bootstrap/\"\\\n \"app-root/div[7]/div/div/edit-issue-page/b-resolving-issue-references/div[2]/div[1]/\"\\\n \"div[3]/div/div/div[2]/div[2]/div[3]/div/div[1]/div/span/span[6]/span/span/a\").text\n\n source_html = self.driver.page_source\n soup = BeautifulSoup(source_html, \"html.parser\")\n\n advanced_fields = {}\n advanced_fields[\"Issue Id\"] = issue.replace(\"https://b.corp.google.com/issues/\", \"\")\n reporter_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-reporter\")\n reporter = reporter_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n advanced_fields[reporter[0]] = reporter[1]\n assignee_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner bv2-issue-metadata-\"\\\n \"field-assignee\")\n assignee = assignee_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n if assignee[1] != \"empty\":\n advanced_fields[assignee[0]] = assignee[1]\n\n if \"EnqueueRule\" in config_type_text:\n config_type = \"EnqueueRules\"\n elif \"RoutingTargets\" in config_type_text:\n config_type = \"RoutingTargets\"\n elif \"QueueInfo\" in config_type_text:\n config_type = \"QueueInfo\"\n\n advanced_fields[\"Config Type\"] = config_type\n\n if config_type == \"QueueInfo\":\n if assignee[1] != constants.AUTOMATION_USER:\n continue\n\n self.scrape_queue_info(advanced_fields)\n elif config_type == \"RoutingTargets\":\n if assignee[1] != constants.AUTOMATION_USER:\n continue\n self.scrape_routing_targets(advanced_fields)\n elif config_type == \"EnqueueRules\":\n self._message_parsing_util.parse_page(soup, reporter[1], issue)",
"def fields(request):\n fields = request.GET.getlist('field')\n response = {}\n if 'reviewers' in fields:\n response['reviewers'] = request.issue.reviewers or []\n if 'description' in fields:\n response['description'] = request.issue.description\n if 'subject' in fields:\n response['subject'] = request.issue.subject\n return response",
"def test_get_explore_fields_gets_fields(\n fc: fetcher.Fetcher, test_model, test_explores_stats\n):\n test_explore = test_explores_stats[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=test_explore[\"name\"])\n assert isinstance(explore, list)\n explore = explore[0]\n assert isinstance(explore, models.LookmlModelExplore)\n assert explore.model_name == test_model[\"name\"]\n assert explore.name == test_explore[\"name\"]\n fields = fc.get_explore_fields(explore)\n assert isinstance(fields, list)\n assert fields == test_explore[\"all_fields\"]",
"def get_details(self):",
"def parse(self, response):\n\t\ttoday = self.date.strftime('%d/%m/%Y')\n\n\t\tselector = Selector(response)\n\t\tprado = selector.xpath('//input[@name=\"PRADO_PAGESTATE\"]/@value').extract()\n\n\t\tyield FormRequest(\"https://www.marches-publics.gouv.fr/index.php5?page=entreprise.EntrepriseAdvancedSearch&searchAnnCons\",\n\t\t\t#formname='main_form',\n\t\t\tformdata={\n\t\t\t'PRADO_PAGESTATE':prado[0],\n\t\t\t'PRADO_POSTBACK_TARGET':'ctl0$CONTENU_PAGE$AdvancedSearch$lancerRecherche',\n\t\t\t'PRADO_POSTBACK_PARAMETER':'undefined',\n\t\t\t'ctl0$menuGaucheEntreprise$quickSearch':'Recherche rapide',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$orgNameAM':'',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$reference':'',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$procedureType':'0',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$categorie':'0',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$clauseSociales':'0',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$ateliersProteges':'0',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$clauseEnvironnementale':'0',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$idsSelectedGeoN2':'',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$idAtexoRef$UrlRef':'/atexo.referentiels/referentiel.jsp?&clef=ctl0_CONTENU_PAGE_AdvancedSearch_idAtexoRef&locale=fr&cheminFichierConfigXML=/ressources/referentiels-new/cpv-config.xml&urlBase=https://mpe3-docs.local-trust.com&styleCSS=https://www.marches-publics.gouv.fr/themes/cpv/css/cpv.css',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$idAtexoRef$casRef':'cas6',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$idAtexoRef$codeRefPrinc':'',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$idAtexoRef$codesRefSec':'',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$idAtexoRef$defineCodePrincipal':'(Code principal)',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$dateMiseEnLigneStart':'',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$dateMiseEnLigneEnd':'',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$dateMiseEnLigneCalculeStart': today,\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$dateMiseEnLigneCalculeEnd': today,\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$keywordSearch':'',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$rechercheFloue':'ctl0$CONTENU_PAGE$AdvancedSearch$floue',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$orgNamesRestreinteSearch':'0',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$refRestreinteSearch':'',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$accesRestreinteSearch':''\n\t\t\t},\n\t\t\tcallback=self.parse_page_resultat)",
"def parse_details_more_cves(self, content):\n result = {}\n detail = ''\n header_appeared = False\n vuln_headers = content.xpath('.//*[self::strong or self::h3]/text()')\n details_list = content.xpath('.//div[@id=\"detailfield\"]/span//text()')\n for item in details_list:\n item = normalize_string(item)\n if item == '':\n continue\n if item in vuln_headers:\n header_appeared = True\n detail = ''\n elif header_appeared:\n cve_match = self.cve_match(item)\n if cve_match == '':\n detail += item\n else:\n result[cve_match] = detail\n detail = ''\n return result",
"def get_fields(request):\n\n json_resp = {}\n json_resp['fields'] = []\n json_resp['fields_to_ann'] = []\n all = request.GET.get('all',None)\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n auto_request = request.GET.get('ns_id', None)\n report = request.GET.get('report', None)\n # print(request.session['report_type'])\n if report is not None or all == 'all':\n if report is not None:\n if report.startswith('PUBMED_'):\n json_resp['fields'] = ['volume','authors','year','journal']\n json_resp['fields_to_ann'] = ['title','abstract']\n else:\n json_resp = get_fields_from_json()\n if all == 'all':\n # All the possible fields for every usecase (MANUAL CONFIGURATION)\n json_resp = get_fields_from_json()\n if Report.objects.filter(institute = 'PUBMED').exists():\n json_resp['all_fields'].extend(['title','abstract','volume','journal','year','authors']) #aggiungo pubmed solo in coda!\n else:\n if request.session['report_type'] == 'pubmed':\n json_resp['fields'] = ['volume','authors','year','journal']\n json_resp['fields_to_ann'] = ['title','abstract']\n else:\n # Fileds related exclusively to a usecase\n json_resp = get_fields_from_json_configuration(request.session['usecase'],request.session['institute'],request.session['language'])\n if request.session['mode'] == 'Robot' or auto_request == 'Robot':\n with open(os.path.join(workpath, './automatic_annotation/auto_fields/auto_fields.json')) as out:\n data = json.load(out)\n json_resp['fields_to_ann'] = data['extract_fields'][request.session['usecase']]\n for el in json_resp['fields_to_ann']:\n if el in json_resp['fields']:\n json_resp['fields'].remove(el)\n # print('FIELDS', json_resp)\n return JsonResponse(json_resp)",
"def get_route_details(agency, route_tag):\n\n xml_query_string = 'http://webservices.nextbus.com/service/publicXMLFeed?command=routeConfig&a=' \\\n + agency + '&r=' + route_tag\n xml_request = requests.get(xml_query_string)\n route_directions = {}\n root = ET.fromstring(xml_request.text)\n \n return root",
"def _scrape(self):",
"def fields(request):\n # Only recognizes a few fields for now.\n if request.method != 'POST':\n fields = request.GET.getlist('field')\n response = {}\n if 'reviewers' in fields:\n response['reviewers'] = request.issue.reviewers or []\n if 'description' in fields:\n response['description'] = request.issue.description\n if 'subject' in fields:\n response['subject'] = request.issue.subject\n return response\n\n if not request.issue.edit_allowed:\n if not IS_DEV:\n return HttpTextResponse('Login required', status=401)\n fields = json.loads(request.POST.get('fields'))\n issue = request.issue\n if 'description' in fields:\n issue.description = fields['description']\n if 'reviewers' in fields:\n issue.reviewers = _get_emails_from_raw(fields['reviewers'])\n issue.calculate_updates_for()\n if 'subject' in fields:\n issue.subject = fields['subject']\n issue.put()\n return HttpTextResponse('')",
"def test_parse_hit_details(self):\n for query in self.result:\n first_hsp = self.result[query][0][0]\n self.assertEqual(first_hsp[\"SUBJECT_ID\"], \"gi|148670104|gb|EDL02051.1|\")\n self.assertEqual(\n first_hsp[\"HIT_DEF\"],\n \"insulin-like growth factor 2 receptor, isoform CRA_c [Mus musculus]\",\n )\n self.assertEqual(first_hsp[\"HIT_ACCESSION\"], \"2001\")\n self.assertEqual(first_hsp[\"HIT_LENGTH\"], 707)",
"def pull_fields(self, org):\n pass",
"def extract_way(element, way_attr_fields = WAY_FIELDS, problem_chars=PROBLEMCHARS, default_tag_type='regular') :\r\n attribs = {}\r\n nodes = []\r\n tags =[]\r\n\r\n for key in way_attr_fields:\r\n attribs[key] = element.attrib[key]\r\n for tag in element.iter(\"tag\"):\r\n way_tag = {}\r\n way_tag[\"type\"] = default_tag_type\r\n way_tag[\"id\"] = attribs[\"id\"]\r\n way_tag[\"value\"] = tag.attrib[\"v\"]\r\n\r\n k = tag.attrib[\"k\"]\r\n if PROBLEMCHARS.search(k):\r\n continue\r\n elif \":\" in k:\r\n way_tag[\"key\"] = k.split(\":\", 1)[1]\r\n way_tag[\"type\"] = k.split(\":\", 1)[0]\r\n else:\r\n way_tag[\"key\"] = k\r\n\r\n # Audit city name , if any, before appending the dictionary in list\r\n\r\n if way_tag[\"key\"] == \"city\":\r\n way_tag[\"value\"] = update_city_name(way_tag[\"value\"])\r\n\r\n # Audit street name, if any , as per mapping\r\n\r\n if way_tag[\"key\"] == \"street\" or \"street:name\":\r\n way_tag[\"value\"] = update_street_name(way_tag[\"value\"], mapping)\r\n\r\n # Check if postcode is valid, if invalid prefix the postcode value with 'fixme:'\r\n\r\n if way_tag[\"key\"] == \"postcode\":\r\n invalid, way_tag[\"value\"] = update_postcode(way_tag[\"value\"])\r\n if invalid:\r\n way_tag[\"value\"]='fixme:'+ way_tag[\"value\"]\r\n\r\n tags.append(way_tag)\r\n\r\n for counter, nd in enumerate(element.iter(\"nd\")):\r\n nd_tags = {}\r\n nd_tags[\"id\"] = attribs[\"id\"]\r\n nd_tags[\"node_id\"] = nd.attrib[\"ref\"]\r\n nd_tags[\"position\"] = counter\r\n\r\n nodes.append(nd_tags)\r\n\r\n return {'way': attribs, 'way_nodes': nodes, 'way_tags': tags}",
"def get_reply_fields(self): \n def alter_request_edges(self, jdata):\n \"\"\"\n From the jsonified request template, converts\n \"edges\" : { \"node\" : { \"key1\" : value1, ... } }\n to something resembling a reply message body:\n \"edges\" : [ { \"key1\" : value, ... } }\n so that flatten_json can be run against it to extract\n valid field names.\n \"\"\"\n if isinstance(jdata, list):\n for entry in jdata:\n self._alter_request_edges(entry)\n if isinstance(jdata, dict):\n for key in jdata:\n if key == \"edges\":\n edge_dict = jdata[key]\n jdata[key] = []\n for subkey in edge_dict:\n jdata[key].append(edge_dict[subkey]) \n self._alter_request_edges(jdata[key]) \n\n json1 = re.sub(r'([z-zA-z0-9_-]+)(?:\\(.*?\\))*\\s*([\\[\\{])', r'\"\\1\" : \\2', self.template_text)\n json2 = re.sub(r'\\.*([a-zA-Z0-9]+)\\s*\\n', r'\"\\1\" : true,\\n', json1)\n json3 = re.sub(r'(\"[a-zA-Z0-9_-]+\"\\s*:[^,]+),(\\s*\\n\\s*[\\}\\]].*)', r'\\1\\2', json2)\n jdata = json.loads(json3)\n alter_request_edges(jdata)\n jreply = self.flatten_json(jdata, self.flatpath)\n self.reply_fields = [ key for key in jdata[0] ]\n return self._reply_fields",
"def get_explain_details(match_explanation):\n expl = []\n for x in match_explanation[\"details\"]:\n if len(re.findall(\"attrib=([a-zA-Z0-9_\\-\\s]+)\", str(x))) > 1:\n expl.extend(get_explain_details(x))\n elif len(re.findall(\"attrib=([a-zA-Z0-9_\\-\\s]+)\", str(x))) == 1:\n expl.append({\"field\": re.search(\"attrib=([a-zA-Z0-9_\\-\\s]+)\", str(x)).group(1), \"similarity\":x['value']})\n return expl",
"def test_gethints_other(self):\r\n request = RequestFactory()\r\n post = request.post(self.url, {'field': 'hints'})\r\n out = view.get_hints(post, self.course_id, 'hints')\r\n print out\r\n self.assertTrue(out['other_field'] == 'mod_queue')\r\n expected = {self.problem_id: [('1.0', {'1': ['Hint 1', 2],\r\n '3': ['Hint 3', 12]}),\r\n ('2.0', {'4': ['Hint 4', 3]})\r\n ]}\r\n self.assertTrue(out['all_hints'] == expected)",
"def request_fields(self, fields=None):\n # The cursor only works for the 'search' endpoint, just call\n # the 'field' endpoint and return all the field types\n response = self.connection.get_request(self.uri_field)\n if response.status_code != requests.codes.ok:\n logger.warning('JIRA Cloud returned %d for %s', response.status_code, self.uri_field)\n return []\n content = json.loads(response.content)\n # Overwrite some fields\n for c in content:\n if c['name'] == 'Epic Status':\n c['schema']['type'] = 'string'\n c['choices'] = (('To Do', 'To Do'), ('In Progress', 'In Progress'), ('Done', 'Done'))\n elif c['name'] == 'Resolution':\n c['choices'] = self._get_resolutions()\n\n # The KEY field is never returned\n c = {\n \"id\": \"key\",\n \"key\": \"key\",\n \"name\": \"Key\",\n \"custom\": False,\n \"orderable\": True,\n \"navigable\": True,\n \"searchable\": True,\n \"clauseNames\": [\n \"key\",\n ],\n \"schema\": {\n \"type\": \"string\",\n }\n }\n content.append(c)\n # The parent field is never returned\n c = {\n \"id\": \"parent\",\n \"key\": \"parent\",\n \"name\": \"Parent\",\n \"custom\": True,\n \"orderable\": True,\n \"navigable\": True,\n \"searchable\": True,\n \"clauseNames\": [\n \"parent\",\n ],\n \"schema\": {\n \"type\": \"any\",\n \"custom\": \"com.django-atlassian:parent\"\n }\n }\n content.append(c)\n return content",
"def strategy_crawler(details):\n try:\n response = get(details['url'])\n soup = bs4.BeautifulSoup(response.text)\n # Regex any div with class 'tppjsc' for a stop_id number\n stop_divs = soup.select('div.tppjsc')\n stop_ids = [re.findall(r'(\\d+)', div.text) for div in stop_divs]\n stop_ids = reduce(lambda x, y: x + y, stop_ids)\n details['stop_ids'] = list(set(stop_ids)) # Remove duplicates\n if details['stop_ids'] != 'Unavailable':\n details['agency'] = details['agency'][0]\n return details\n except:\n details['stop_ids'] = \"Unavailable\"\n return details",
"def get_fields_for_cr(cr_id):\n # Construct request\n url = \"{}/reports/{}/patient_fields\"\n url = url.format(FABRIC_API_URL, cr_id)\n\n sys.stdout.flush()\n result = requests.get(url, auth=auth)\n return result.json()",
"def test_gethints(self):\r\n request = RequestFactory()\r\n post = request.post(self.url, {'field': 'mod_queue'})\r\n out = view.get_hints(post, self.course_id, 'mod_queue')\r\n print out\r\n self.assertTrue(out['other_field'] == 'hints')\r\n expected = {self.problem_id: [(u'2.0', {u'2': [u'Hint 2', 1]})]}\r\n self.assertTrue(out['all_hints'] == expected)",
"def fields(self):",
"def test_get_rule_details(self):\n pass",
"def get_fields(self, resource):\n\n def _get_fields_key(resource):\n \"\"\"Returns the fields key from a resource dict\n\n \"\"\"\n if resource['code'] in [HTTP_OK, HTTP_ACCEPTED]:\n if (MODEL_RE.match(resource_id) or\n ANOMALY_RE.match(resource_id)):\n return resource['object']['model']['model_fields']\n elif CLUSTER_RE.match(resource_id):\n return resource['object']['clusters']['fields']\n elif CORRELATION_RE.match(resource_id):\n return resource['object']['correlations']['fields']\n elif STATISTICAL_TEST_RE.match(resource_id):\n return resource['object']['statistical_tests']['fields']\n elif STATISTICAL_TEST_RE.match(resource_id):\n return resource['object']['statistical_tests']['fields']\n elif LOGISTIC_REGRESSION_RE.match(resource_id):\n return resource['object']['logistic_regression']['fields']\n elif ASSOCIATION_RE.match(resource_id):\n return resource['object']['associations']['fields']\n elif SAMPLE_RE.match(resource_id):\n return dict([(field['id'], field) for field in\n resource['object']['sample']['fields']])\n else:\n return resource['object']['fields']\n return None\n\n if isinstance(resource, dict) and 'resource' in resource:\n resource_id = resource['resource']\n elif (isinstance(resource, basestring) and (\n SOURCE_RE.match(resource) or DATASET_RE.match(resource) or\n MODEL_RE.match(resource) or PREDICTION_RE.match(resource))):\n resource_id = resource\n resource = self._get(\"%s%s\" % (self.url, resource_id))\n else:\n LOGGER.error(\"Wrong resource id\")\n return\n # Tries to extract fields information from resource dict. If it fails,\n # a get remote call is used to retrieve the resource by id.\n fields = None\n try:\n fields = _get_fields_key(resource)\n except KeyError:\n resource = self._get(\"%s%s\" % (self.url, resource_id))\n fields = _get_fields_key(resource)\n\n return fields",
"def hit_details(hit_id, sandbox, recruiter):\n prolific_check(recruiter, sandbox)\n rec = by_name(recruiter, skip_config_validation=True)\n details = rec.hit_details(hit_id, sandbox)\n print(json.dumps(details, indent=4, default=str))",
"def get_issue(self, context):",
"def fields(self):\n ...",
"def scrape_technical_data(self):\n\n page = requests.get(self.url)\n soup = BeautifulSoup(page.content, \"html.parser\")\n results = soup.find(\n \"section\", class_=\"titlereference-section-additional-details\"\n )\n results = results.find_all(\"tr\")\n\n data = []\n for res in results:\n data.append(res)\n\n runtime = data[1]\n runtime = re.sub(\"<.*?>\", \"\", str(runtime))\n runtime = (\n runtime.replace(\" \", \"\")\n .replace(\"Runtime\", \"\")\n .replace(\"\\n\", \"\")\n .replace(\"min\", \" min\")\n )\n\n country = \"\"\n languages = []\n color = \"\"\n social = {}\n if \"Country</td>\" in str(data[2]):\n country = data[2].find_all(\"a\")\n country = re.sub(\"<.*?>\", \"\", str(country))\n\n languages = data[3].find_all(\"a\")\n languages = re.sub(\"<.*?>\", \"\", str(languages))\n\n color = data[4].find_all(\"a\")\n color = re.sub(\"<.*?>\", \"\", str(color))\n elif \"Country</td>\" in str(data[3]):\n sites = data[2].find_all(\"a\")\n for site in sites:\n key = (\n re.sub(\"<.*?>\", \"\", str(site))\n .replace(\"\\n\", \"\")\n .replace(\" \", \"\")\n .replace(\" \", \"\")\n )\n social[key] = site[\"href\"]\n\n country = data[3].find_all(\"a\")\n country = re.sub(\"<.*?>\", \"\", str(country))\n\n languages = data[4].find_all(\"a\")\n languages = re.sub(\"<.*?>\", \"\", str(languages))\n\n color = data[5].find_all(\"a\")\n color = re.sub(\"<.*?>\", \"\", str(color))\n\n country = country.replace(\"[\", \"\").replace(\"]\", \"\")\n languages = languages.replace(\"[\", \"\").replace(\"]\", \"\")\n color = color.replace(\"[\", \"\").replace(\"]\", \"\")\n\n return (runtime, country, languages, color, social)",
"def scrape_issues(self, url):\n try:\n self.driver.get(url)\n except common.exceptions.InvalidSessionIdException:\n self.driver.close()\n error_message = \"ERROR: Failed to reach URL, check \"\\\n \"specified URL in constants.py\\n\"\n self.logger.log(error_message)\n return []\n except Exception:\n self.driver.close()\n error_message = \"ERROR: Failed to reach URL, check \"\\\n \"specified URL in constants.py\\n\"\n self.logger.log(error_message)\n return []\n\n source_html = self.driver.page_source\n soup = BeautifulSoup(source_html, \"html.parser\")\n page_title = soup.title.string\n buganizer_issues = []\n\n if \"Buganizer\" not in page_title or \"componentid\" not in page_title:\n if \"MOMA Single Sign On\" in page_title:\n error_message = \"ERROR: You must log into your MOMA account \"\\\n \"first. Select the 'Use Security Code' option and generate a security code at go/sc.\\n\"\n self.logger.log(error_message)\n\n while \"Buganizer\" not in page_title:\n source_html = self.driver.page_source\n soup = BeautifulSoup(source_html, \"html.parser\")\n page_title = soup.title.string\n time.sleep(1)\n\n return buganizer_issues\n error_message = \"ERROR: URL does not link to a Buganizer \"\\\n \"componentid, check specified URL \"\\\n \"in constants.py\\n\"\n self.logger.log(error_message)\n return buganizer_issues\n\n for tbody in soup.find_all('tbody'):\n for _tr in tbody.find_all('tr'):\n issue_link = \"https://b.corp.google.com/issues/\" + _tr.get(\n 'data-row-id')\n buganizer_issues.append(issue_link)\n return buganizer_issues"
]
| [
"0.7713654",
"0.54770523",
"0.54455316",
"0.5295992",
"0.52034366",
"0.51422",
"0.51173514",
"0.511575",
"0.51122963",
"0.50632423",
"0.50037414",
"0.49961174",
"0.49717402",
"0.49483138",
"0.49464095",
"0.4919574",
"0.48905483",
"0.48504505",
"0.48457265",
"0.48365772",
"0.4835398",
"0.4832522",
"0.48233706",
"0.48025426",
"0.4789322",
"0.47783667",
"0.47779757",
"0.4771945",
"0.4763828",
"0.47552416"
]
| 0.656218 | 1 |
Scrape all advanced fields from a RoutingTarget Buganizer Issue | def scrape_routing_targets(self, advanced_fields):
source_html = self.driver.page_source
soup = BeautifulSoup(source_html, "html.parser")
try:
show_all = self.driver.find_element_by_id("bv2-issue-metadata-list-4-more")
show_all.click()
show_all = self.driver.find_element_by_id("bv2-issue-metadata-list-5-more")
show_all.click()
except common.exceptions.NoSuchElementException:
pass
severity_tag = soup.find("div", "bv2-issue-metadata-field-inner "\
"bv2-issue-metadata-field-severity")
severity = severity_tag["aria-label"].replace(
" value is ", "\n").split("\n")
found_in_tag = soup.find("div", "bv2-issue-metadata-field-inner "\
"bv2-issue-metadata-field-foundInVersion")
found_in = found_in_tag["aria-label"].replace(
" value is ", "\n").split("\n")
in_prod_tag = soup.find("div", "bv2-issue-metadata-field-inner "\
"bv2-issue-metadata-field-inProd")
in_prod = in_prod_tag["aria-label"].replace(
" value is ", "\n").split("\n")
verifier_tag = soup.find("div", "bv2-issue-metadata-field-inner "\
"bv2-issue-metadata-field-verifier")
verifier = verifier_tag["aria-label"].replace(
" value is ", "\n").split("\n")
targeted_to_tag = soup.find("div", "bv2-issue-metadata-field-inner "\
"bv2-issue-metadata-field-targetedToVersion")
targeted_to = targeted_to_tag["aria-label"].replace(
" value is ", "\n").split("\n")
queue_id_tag = soup.find("div", "bv2-issue-metadata-field-inner "\
"bv2-issue-metadata-field-customField688193")
queue_id = queue_id_tag["aria-label"].replace(
" value is ", "\n").split("\n")
queues_to_add = []
queues_to_remove = []
for tag in soup.find_all("button", id=lambda value: value and value.startswith(
"bv2-issue-metadata-list")):
queue_method_string = tag["aria-label"]
if "Add Queues to Route To" in queue_method_string:
queue_method_string = queue_method_string.replace("Remove ", "")
queue_method_string = queue_method_string.replace(" from Add Queues to Route To", "")
queues_to_add.append(int(queue_method_string))
elif "Remove Queues to Route To" in queue_method_string:
queue_method_string = queue_method_string.replace("Remove ", "")
queue_method_string = queue_method_string.replace(" from Queues to Route To", "")
queues_to_remove.append(int(queue_method_string))
advanced_fields["Add Queues to Route To"] = queues_to_add
advanced_fields["Remove Queues to Route To"] = queues_to_remove
advanced_fields[severity[0]] = severity[1]
if verifier[1] != "empty":
advanced_fields[verifier[0]] = verifier[1]
if found_in[1] != "empty":
advanced_fields[found_in[0]] = found_in[1]
if in_prod[1] != "empty":
if in_prod[1] == "Yes":
advanced_fields[in_prod[0]] = True
else:
advanced_fields[in_prod[0]] = False
if targeted_to[1] != "empty":
advanced_fields[targeted_to[0]] = targeted_to[1]
if queue_id[1] != "empty":
advanced_fields[queue_id[0]] = int(queue_id[1])
self._message_parsing_util.publish_buganizer_fields(advanced_fields) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def scrape_queue_info(self, advanced_fields):\n source_html = self.driver.page_source\n soup = BeautifulSoup(source_html, \"html.parser\")\n severity_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-severity\")\n severity = severity_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n found_in_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-foundInVersion\")\n found_in = found_in_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n in_prod_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-inProd\")\n in_prod = in_prod_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n verifier_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-verifier\")\n verifier = verifier_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n targeted_to_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-targetedToVersion\")\n targeted_to = targeted_to_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n queue_id_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-customField688197\")\n queue_id = queue_id_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n mdb_group_name_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-customField686879\")\n mdb_group_name = mdb_group_name_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n ops_owner_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-customField686850\")\n ops_owner = ops_owner_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n gvo_owner_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-customField686358\")\n gvo_owner = gvo_owner_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n tech_owner_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-customField686980\")\n tech_owner = tech_owner_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n is_dashboard_queue_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-customField686718\")\n is_dashboard_queue = is_dashboard_queue_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n reviews_per_item_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-customField687560\")\n reviews_per_item = reviews_per_item_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n fragment_name_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-customField686833\")\n fragment_name = fragment_name_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n item_expiry_sec_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-customField686748\")\n item_expiry_sec = item_expiry_sec_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n is_experimental_review_enabled_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-customField688166\")\n is_experimental_review_enabled = is_experimental_review_enabled_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n experimental_probability_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-customField686699\")\n experimental_probability = experimental_probability_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n\n advanced_fields[severity[0]] = severity[1]\n if verifier[1] != \"empty\":\n advanced_fields[verifier[0]] = verifier[1]\n if found_in[1] != \"empty\":\n advanced_fields[found_in[0]] = found_in[1]\n if in_prod[1] != \"empty\":\n if in_prod[1] == \"Yes\":\n advanced_fields[in_prod[0]] = True\n else:\n advanced_fields[in_prod[0]] = False\n if targeted_to[1] != \"empty\":\n advanced_fields[targeted_to[0]] = targeted_to[1]\n if queue_id[1] != \"empty\":\n advanced_fields[queue_id[0]] = int(queue_id[1])\n if mdb_group_name[1] != \"empty\":\n advanced_fields[mdb_group_name[0]] = mdb_group_name[1]\n if ops_owner[1] != \"empty\":\n advanced_fields[ops_owner[0]] = ops_owner[1]\n if gvo_owner[1] != \"empty\":\n advanced_fields[gvo_owner[0]] = gvo_owner[1]\n if tech_owner[1] != \"empty\":\n advanced_fields[tech_owner[0]] = tech_owner[1]\n if is_dashboard_queue[1] != \"empty\":\n if is_dashboard_queue[1] == \"true\":\n advanced_fields[is_dashboard_queue[0]] = True\n else:\n advanced_fields[is_dashboard_queue[0]] = False\n if reviews_per_item[1] != \"empty\":\n advanced_fields[reviews_per_item[0]] = int(reviews_per_item[1])\n if fragment_name[1] != \"empty\":\n advanced_fields[fragment_name[0]] = fragment_name[1]\n if item_expiry_sec[1] != \"empty\":\n advanced_fields[item_expiry_sec[0]] = int(item_expiry_sec[1])\n if is_experimental_review_enabled[1] != \"empty\":\n if is_dashboard_queue[1] == \"true\":\n advanced_fields[is_experimental_review_enabled[0]] = True\n else:\n advanced_fields[is_experimental_review_enabled[0]] = False\n if experimental_probability[1] != \"empty\":\n advanced_fields[experimental_probability[0]] = int(experimental_probability[1])\n\n self._message_parsing_util.publish_buganizer_fields(advanced_fields)",
"def extract_details(df):\n df_RSinfo = df[['pentamer', 'Step details', 'RouteScore details',\n 'Isolated', 'RouteScore', 'log(RouteScore)']]\n\n last3_rxns = ['Buchwald_deprotection', 'Buchwald', 'SNAr']\n for rxn in last3_rxns:\n df_RSinfo[rxn] = [next(step for step in row[-3:] if step['reaction'] == rxn) for row in df['Step details']]\n\n for key in df_RSinfo['RouteScore details'][0].keys():\n df_RSinfo[key] = [row[key] for row in df['RouteScore details']]\n\n return df_RSinfo",
"def visit_all_issues_in_list(self, issues):\n for issue in issues:\n self.driver.implicitly_wait(3)\n self.driver.get(issue)\n config_type_text = self.driver.find_element_by_xpath(\"/html/body/b-service-bootstrap/\"\\\n \"app-root/div[7]/div/div/edit-issue-page/b-resolving-issue-references/div[2]/div[1]/\"\\\n \"div[3]/div/div/div[2]/div[2]/div[3]/div/div[1]/div/span/span[6]/span/span/a\").text\n\n source_html = self.driver.page_source\n soup = BeautifulSoup(source_html, \"html.parser\")\n\n advanced_fields = {}\n advanced_fields[\"Issue Id\"] = issue.replace(\"https://b.corp.google.com/issues/\", \"\")\n reporter_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-reporter\")\n reporter = reporter_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n advanced_fields[reporter[0]] = reporter[1]\n assignee_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner bv2-issue-metadata-\"\\\n \"field-assignee\")\n assignee = assignee_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n if assignee[1] != \"empty\":\n advanced_fields[assignee[0]] = assignee[1]\n\n if \"EnqueueRule\" in config_type_text:\n config_type = \"EnqueueRules\"\n elif \"RoutingTargets\" in config_type_text:\n config_type = \"RoutingTargets\"\n elif \"QueueInfo\" in config_type_text:\n config_type = \"QueueInfo\"\n\n advanced_fields[\"Config Type\"] = config_type\n\n if config_type == \"QueueInfo\":\n if assignee[1] != constants.AUTOMATION_USER:\n continue\n\n self.scrape_queue_info(advanced_fields)\n elif config_type == \"RoutingTargets\":\n if assignee[1] != constants.AUTOMATION_USER:\n continue\n self.scrape_routing_targets(advanced_fields)\n elif config_type == \"EnqueueRules\":\n self._message_parsing_util.parse_page(soup, reporter[1], issue)",
"def fields(request):\n fields = request.GET.getlist('field')\n response = {}\n if 'reviewers' in fields:\n response['reviewers'] = request.issue.reviewers or []\n if 'description' in fields:\n response['description'] = request.issue.description\n if 'subject' in fields:\n response['subject'] = request.issue.subject\n return response",
"def test_get_explore_fields_gets_fields(\n fc: fetcher.Fetcher, test_model, test_explores_stats\n):\n test_explore = test_explores_stats[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=test_explore[\"name\"])\n assert isinstance(explore, list)\n explore = explore[0]\n assert isinstance(explore, models.LookmlModelExplore)\n assert explore.model_name == test_model[\"name\"]\n assert explore.name == test_explore[\"name\"]\n fields = fc.get_explore_fields(explore)\n assert isinstance(fields, list)\n assert fields == test_explore[\"all_fields\"]",
"def get_details(self):",
"def parse(self, response):\n\t\ttoday = self.date.strftime('%d/%m/%Y')\n\n\t\tselector = Selector(response)\n\t\tprado = selector.xpath('//input[@name=\"PRADO_PAGESTATE\"]/@value').extract()\n\n\t\tyield FormRequest(\"https://www.marches-publics.gouv.fr/index.php5?page=entreprise.EntrepriseAdvancedSearch&searchAnnCons\",\n\t\t\t#formname='main_form',\n\t\t\tformdata={\n\t\t\t'PRADO_PAGESTATE':prado[0],\n\t\t\t'PRADO_POSTBACK_TARGET':'ctl0$CONTENU_PAGE$AdvancedSearch$lancerRecherche',\n\t\t\t'PRADO_POSTBACK_PARAMETER':'undefined',\n\t\t\t'ctl0$menuGaucheEntreprise$quickSearch':'Recherche rapide',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$orgNameAM':'',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$reference':'',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$procedureType':'0',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$categorie':'0',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$clauseSociales':'0',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$ateliersProteges':'0',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$clauseEnvironnementale':'0',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$idsSelectedGeoN2':'',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$idAtexoRef$UrlRef':'/atexo.referentiels/referentiel.jsp?&clef=ctl0_CONTENU_PAGE_AdvancedSearch_idAtexoRef&locale=fr&cheminFichierConfigXML=/ressources/referentiels-new/cpv-config.xml&urlBase=https://mpe3-docs.local-trust.com&styleCSS=https://www.marches-publics.gouv.fr/themes/cpv/css/cpv.css',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$idAtexoRef$casRef':'cas6',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$idAtexoRef$codeRefPrinc':'',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$idAtexoRef$codesRefSec':'',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$idAtexoRef$defineCodePrincipal':'(Code principal)',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$dateMiseEnLigneStart':'',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$dateMiseEnLigneEnd':'',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$dateMiseEnLigneCalculeStart': today,\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$dateMiseEnLigneCalculeEnd': today,\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$keywordSearch':'',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$rechercheFloue':'ctl0$CONTENU_PAGE$AdvancedSearch$floue',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$orgNamesRestreinteSearch':'0',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$refRestreinteSearch':'',\n\t\t\t'ctl0$CONTENU_PAGE$AdvancedSearch$accesRestreinteSearch':''\n\t\t\t},\n\t\t\tcallback=self.parse_page_resultat)",
"def parse_details_more_cves(self, content):\n result = {}\n detail = ''\n header_appeared = False\n vuln_headers = content.xpath('.//*[self::strong or self::h3]/text()')\n details_list = content.xpath('.//div[@id=\"detailfield\"]/span//text()')\n for item in details_list:\n item = normalize_string(item)\n if item == '':\n continue\n if item in vuln_headers:\n header_appeared = True\n detail = ''\n elif header_appeared:\n cve_match = self.cve_match(item)\n if cve_match == '':\n detail += item\n else:\n result[cve_match] = detail\n detail = ''\n return result",
"def get_fields(request):\n\n json_resp = {}\n json_resp['fields'] = []\n json_resp['fields_to_ann'] = []\n all = request.GET.get('all',None)\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n auto_request = request.GET.get('ns_id', None)\n report = request.GET.get('report', None)\n # print(request.session['report_type'])\n if report is not None or all == 'all':\n if report is not None:\n if report.startswith('PUBMED_'):\n json_resp['fields'] = ['volume','authors','year','journal']\n json_resp['fields_to_ann'] = ['title','abstract']\n else:\n json_resp = get_fields_from_json()\n if all == 'all':\n # All the possible fields for every usecase (MANUAL CONFIGURATION)\n json_resp = get_fields_from_json()\n if Report.objects.filter(institute = 'PUBMED').exists():\n json_resp['all_fields'].extend(['title','abstract','volume','journal','year','authors']) #aggiungo pubmed solo in coda!\n else:\n if request.session['report_type'] == 'pubmed':\n json_resp['fields'] = ['volume','authors','year','journal']\n json_resp['fields_to_ann'] = ['title','abstract']\n else:\n # Fileds related exclusively to a usecase\n json_resp = get_fields_from_json_configuration(request.session['usecase'],request.session['institute'],request.session['language'])\n if request.session['mode'] == 'Robot' or auto_request == 'Robot':\n with open(os.path.join(workpath, './automatic_annotation/auto_fields/auto_fields.json')) as out:\n data = json.load(out)\n json_resp['fields_to_ann'] = data['extract_fields'][request.session['usecase']]\n for el in json_resp['fields_to_ann']:\n if el in json_resp['fields']:\n json_resp['fields'].remove(el)\n # print('FIELDS', json_resp)\n return JsonResponse(json_resp)",
"def get_route_details(agency, route_tag):\n\n xml_query_string = 'http://webservices.nextbus.com/service/publicXMLFeed?command=routeConfig&a=' \\\n + agency + '&r=' + route_tag\n xml_request = requests.get(xml_query_string)\n route_directions = {}\n root = ET.fromstring(xml_request.text)\n \n return root",
"def _scrape(self):",
"def fields(request):\n # Only recognizes a few fields for now.\n if request.method != 'POST':\n fields = request.GET.getlist('field')\n response = {}\n if 'reviewers' in fields:\n response['reviewers'] = request.issue.reviewers or []\n if 'description' in fields:\n response['description'] = request.issue.description\n if 'subject' in fields:\n response['subject'] = request.issue.subject\n return response\n\n if not request.issue.edit_allowed:\n if not IS_DEV:\n return HttpTextResponse('Login required', status=401)\n fields = json.loads(request.POST.get('fields'))\n issue = request.issue\n if 'description' in fields:\n issue.description = fields['description']\n if 'reviewers' in fields:\n issue.reviewers = _get_emails_from_raw(fields['reviewers'])\n issue.calculate_updates_for()\n if 'subject' in fields:\n issue.subject = fields['subject']\n issue.put()\n return HttpTextResponse('')",
"def test_parse_hit_details(self):\n for query in self.result:\n first_hsp = self.result[query][0][0]\n self.assertEqual(first_hsp[\"SUBJECT_ID\"], \"gi|148670104|gb|EDL02051.1|\")\n self.assertEqual(\n first_hsp[\"HIT_DEF\"],\n \"insulin-like growth factor 2 receptor, isoform CRA_c [Mus musculus]\",\n )\n self.assertEqual(first_hsp[\"HIT_ACCESSION\"], \"2001\")\n self.assertEqual(first_hsp[\"HIT_LENGTH\"], 707)",
"def pull_fields(self, org):\n pass",
"def extract_way(element, way_attr_fields = WAY_FIELDS, problem_chars=PROBLEMCHARS, default_tag_type='regular') :\r\n attribs = {}\r\n nodes = []\r\n tags =[]\r\n\r\n for key in way_attr_fields:\r\n attribs[key] = element.attrib[key]\r\n for tag in element.iter(\"tag\"):\r\n way_tag = {}\r\n way_tag[\"type\"] = default_tag_type\r\n way_tag[\"id\"] = attribs[\"id\"]\r\n way_tag[\"value\"] = tag.attrib[\"v\"]\r\n\r\n k = tag.attrib[\"k\"]\r\n if PROBLEMCHARS.search(k):\r\n continue\r\n elif \":\" in k:\r\n way_tag[\"key\"] = k.split(\":\", 1)[1]\r\n way_tag[\"type\"] = k.split(\":\", 1)[0]\r\n else:\r\n way_tag[\"key\"] = k\r\n\r\n # Audit city name , if any, before appending the dictionary in list\r\n\r\n if way_tag[\"key\"] == \"city\":\r\n way_tag[\"value\"] = update_city_name(way_tag[\"value\"])\r\n\r\n # Audit street name, if any , as per mapping\r\n\r\n if way_tag[\"key\"] == \"street\" or \"street:name\":\r\n way_tag[\"value\"] = update_street_name(way_tag[\"value\"], mapping)\r\n\r\n # Check if postcode is valid, if invalid prefix the postcode value with 'fixme:'\r\n\r\n if way_tag[\"key\"] == \"postcode\":\r\n invalid, way_tag[\"value\"] = update_postcode(way_tag[\"value\"])\r\n if invalid:\r\n way_tag[\"value\"]='fixme:'+ way_tag[\"value\"]\r\n\r\n tags.append(way_tag)\r\n\r\n for counter, nd in enumerate(element.iter(\"nd\")):\r\n nd_tags = {}\r\n nd_tags[\"id\"] = attribs[\"id\"]\r\n nd_tags[\"node_id\"] = nd.attrib[\"ref\"]\r\n nd_tags[\"position\"] = counter\r\n\r\n nodes.append(nd_tags)\r\n\r\n return {'way': attribs, 'way_nodes': nodes, 'way_tags': tags}",
"def get_reply_fields(self): \n def alter_request_edges(self, jdata):\n \"\"\"\n From the jsonified request template, converts\n \"edges\" : { \"node\" : { \"key1\" : value1, ... } }\n to something resembling a reply message body:\n \"edges\" : [ { \"key1\" : value, ... } }\n so that flatten_json can be run against it to extract\n valid field names.\n \"\"\"\n if isinstance(jdata, list):\n for entry in jdata:\n self._alter_request_edges(entry)\n if isinstance(jdata, dict):\n for key in jdata:\n if key == \"edges\":\n edge_dict = jdata[key]\n jdata[key] = []\n for subkey in edge_dict:\n jdata[key].append(edge_dict[subkey]) \n self._alter_request_edges(jdata[key]) \n\n json1 = re.sub(r'([z-zA-z0-9_-]+)(?:\\(.*?\\))*\\s*([\\[\\{])', r'\"\\1\" : \\2', self.template_text)\n json2 = re.sub(r'\\.*([a-zA-Z0-9]+)\\s*\\n', r'\"\\1\" : true,\\n', json1)\n json3 = re.sub(r'(\"[a-zA-Z0-9_-]+\"\\s*:[^,]+),(\\s*\\n\\s*[\\}\\]].*)', r'\\1\\2', json2)\n jdata = json.loads(json3)\n alter_request_edges(jdata)\n jreply = self.flatten_json(jdata, self.flatpath)\n self.reply_fields = [ key for key in jdata[0] ]\n return self._reply_fields",
"def get_explain_details(match_explanation):\n expl = []\n for x in match_explanation[\"details\"]:\n if len(re.findall(\"attrib=([a-zA-Z0-9_\\-\\s]+)\", str(x))) > 1:\n expl.extend(get_explain_details(x))\n elif len(re.findall(\"attrib=([a-zA-Z0-9_\\-\\s]+)\", str(x))) == 1:\n expl.append({\"field\": re.search(\"attrib=([a-zA-Z0-9_\\-\\s]+)\", str(x)).group(1), \"similarity\":x['value']})\n return expl",
"def test_gethints_other(self):\r\n request = RequestFactory()\r\n post = request.post(self.url, {'field': 'hints'})\r\n out = view.get_hints(post, self.course_id, 'hints')\r\n print out\r\n self.assertTrue(out['other_field'] == 'mod_queue')\r\n expected = {self.problem_id: [('1.0', {'1': ['Hint 1', 2],\r\n '3': ['Hint 3', 12]}),\r\n ('2.0', {'4': ['Hint 4', 3]})\r\n ]}\r\n self.assertTrue(out['all_hints'] == expected)",
"def request_fields(self, fields=None):\n # The cursor only works for the 'search' endpoint, just call\n # the 'field' endpoint and return all the field types\n response = self.connection.get_request(self.uri_field)\n if response.status_code != requests.codes.ok:\n logger.warning('JIRA Cloud returned %d for %s', response.status_code, self.uri_field)\n return []\n content = json.loads(response.content)\n # Overwrite some fields\n for c in content:\n if c['name'] == 'Epic Status':\n c['schema']['type'] = 'string'\n c['choices'] = (('To Do', 'To Do'), ('In Progress', 'In Progress'), ('Done', 'Done'))\n elif c['name'] == 'Resolution':\n c['choices'] = self._get_resolutions()\n\n # The KEY field is never returned\n c = {\n \"id\": \"key\",\n \"key\": \"key\",\n \"name\": \"Key\",\n \"custom\": False,\n \"orderable\": True,\n \"navigable\": True,\n \"searchable\": True,\n \"clauseNames\": [\n \"key\",\n ],\n \"schema\": {\n \"type\": \"string\",\n }\n }\n content.append(c)\n # The parent field is never returned\n c = {\n \"id\": \"parent\",\n \"key\": \"parent\",\n \"name\": \"Parent\",\n \"custom\": True,\n \"orderable\": True,\n \"navigable\": True,\n \"searchable\": True,\n \"clauseNames\": [\n \"parent\",\n ],\n \"schema\": {\n \"type\": \"any\",\n \"custom\": \"com.django-atlassian:parent\"\n }\n }\n content.append(c)\n return content",
"def strategy_crawler(details):\n try:\n response = get(details['url'])\n soup = bs4.BeautifulSoup(response.text)\n # Regex any div with class 'tppjsc' for a stop_id number\n stop_divs = soup.select('div.tppjsc')\n stop_ids = [re.findall(r'(\\d+)', div.text) for div in stop_divs]\n stop_ids = reduce(lambda x, y: x + y, stop_ids)\n details['stop_ids'] = list(set(stop_ids)) # Remove duplicates\n if details['stop_ids'] != 'Unavailable':\n details['agency'] = details['agency'][0]\n return details\n except:\n details['stop_ids'] = \"Unavailable\"\n return details",
"def get_fields_for_cr(cr_id):\n # Construct request\n url = \"{}/reports/{}/patient_fields\"\n url = url.format(FABRIC_API_URL, cr_id)\n\n sys.stdout.flush()\n result = requests.get(url, auth=auth)\n return result.json()",
"def test_gethints(self):\r\n request = RequestFactory()\r\n post = request.post(self.url, {'field': 'mod_queue'})\r\n out = view.get_hints(post, self.course_id, 'mod_queue')\r\n print out\r\n self.assertTrue(out['other_field'] == 'hints')\r\n expected = {self.problem_id: [(u'2.0', {u'2': [u'Hint 2', 1]})]}\r\n self.assertTrue(out['all_hints'] == expected)",
"def fields(self):",
"def test_get_rule_details(self):\n pass",
"def get_fields(self, resource):\n\n def _get_fields_key(resource):\n \"\"\"Returns the fields key from a resource dict\n\n \"\"\"\n if resource['code'] in [HTTP_OK, HTTP_ACCEPTED]:\n if (MODEL_RE.match(resource_id) or\n ANOMALY_RE.match(resource_id)):\n return resource['object']['model']['model_fields']\n elif CLUSTER_RE.match(resource_id):\n return resource['object']['clusters']['fields']\n elif CORRELATION_RE.match(resource_id):\n return resource['object']['correlations']['fields']\n elif STATISTICAL_TEST_RE.match(resource_id):\n return resource['object']['statistical_tests']['fields']\n elif STATISTICAL_TEST_RE.match(resource_id):\n return resource['object']['statistical_tests']['fields']\n elif LOGISTIC_REGRESSION_RE.match(resource_id):\n return resource['object']['logistic_regression']['fields']\n elif ASSOCIATION_RE.match(resource_id):\n return resource['object']['associations']['fields']\n elif SAMPLE_RE.match(resource_id):\n return dict([(field['id'], field) for field in\n resource['object']['sample']['fields']])\n else:\n return resource['object']['fields']\n return None\n\n if isinstance(resource, dict) and 'resource' in resource:\n resource_id = resource['resource']\n elif (isinstance(resource, basestring) and (\n SOURCE_RE.match(resource) or DATASET_RE.match(resource) or\n MODEL_RE.match(resource) or PREDICTION_RE.match(resource))):\n resource_id = resource\n resource = self._get(\"%s%s\" % (self.url, resource_id))\n else:\n LOGGER.error(\"Wrong resource id\")\n return\n # Tries to extract fields information from resource dict. If it fails,\n # a get remote call is used to retrieve the resource by id.\n fields = None\n try:\n fields = _get_fields_key(resource)\n except KeyError:\n resource = self._get(\"%s%s\" % (self.url, resource_id))\n fields = _get_fields_key(resource)\n\n return fields",
"def hit_details(hit_id, sandbox, recruiter):\n prolific_check(recruiter, sandbox)\n rec = by_name(recruiter, skip_config_validation=True)\n details = rec.hit_details(hit_id, sandbox)\n print(json.dumps(details, indent=4, default=str))",
"def get_issue(self, context):",
"def fields(self):\n ...",
"def scrape_technical_data(self):\n\n page = requests.get(self.url)\n soup = BeautifulSoup(page.content, \"html.parser\")\n results = soup.find(\n \"section\", class_=\"titlereference-section-additional-details\"\n )\n results = results.find_all(\"tr\")\n\n data = []\n for res in results:\n data.append(res)\n\n runtime = data[1]\n runtime = re.sub(\"<.*?>\", \"\", str(runtime))\n runtime = (\n runtime.replace(\" \", \"\")\n .replace(\"Runtime\", \"\")\n .replace(\"\\n\", \"\")\n .replace(\"min\", \" min\")\n )\n\n country = \"\"\n languages = []\n color = \"\"\n social = {}\n if \"Country</td>\" in str(data[2]):\n country = data[2].find_all(\"a\")\n country = re.sub(\"<.*?>\", \"\", str(country))\n\n languages = data[3].find_all(\"a\")\n languages = re.sub(\"<.*?>\", \"\", str(languages))\n\n color = data[4].find_all(\"a\")\n color = re.sub(\"<.*?>\", \"\", str(color))\n elif \"Country</td>\" in str(data[3]):\n sites = data[2].find_all(\"a\")\n for site in sites:\n key = (\n re.sub(\"<.*?>\", \"\", str(site))\n .replace(\"\\n\", \"\")\n .replace(\" \", \"\")\n .replace(\" \", \"\")\n )\n social[key] = site[\"href\"]\n\n country = data[3].find_all(\"a\")\n country = re.sub(\"<.*?>\", \"\", str(country))\n\n languages = data[4].find_all(\"a\")\n languages = re.sub(\"<.*?>\", \"\", str(languages))\n\n color = data[5].find_all(\"a\")\n color = re.sub(\"<.*?>\", \"\", str(color))\n\n country = country.replace(\"[\", \"\").replace(\"]\", \"\")\n languages = languages.replace(\"[\", \"\").replace(\"]\", \"\")\n color = color.replace(\"[\", \"\").replace(\"]\", \"\")\n\n return (runtime, country, languages, color, social)",
"def scrape_issues(self, url):\n try:\n self.driver.get(url)\n except common.exceptions.InvalidSessionIdException:\n self.driver.close()\n error_message = \"ERROR: Failed to reach URL, check \"\\\n \"specified URL in constants.py\\n\"\n self.logger.log(error_message)\n return []\n except Exception:\n self.driver.close()\n error_message = \"ERROR: Failed to reach URL, check \"\\\n \"specified URL in constants.py\\n\"\n self.logger.log(error_message)\n return []\n\n source_html = self.driver.page_source\n soup = BeautifulSoup(source_html, \"html.parser\")\n page_title = soup.title.string\n buganizer_issues = []\n\n if \"Buganizer\" not in page_title or \"componentid\" not in page_title:\n if \"MOMA Single Sign On\" in page_title:\n error_message = \"ERROR: You must log into your MOMA account \"\\\n \"first. Select the 'Use Security Code' option and generate a security code at go/sc.\\n\"\n self.logger.log(error_message)\n\n while \"Buganizer\" not in page_title:\n source_html = self.driver.page_source\n soup = BeautifulSoup(source_html, \"html.parser\")\n page_title = soup.title.string\n time.sleep(1)\n\n return buganizer_issues\n error_message = \"ERROR: URL does not link to a Buganizer \"\\\n \"componentid, check specified URL \"\\\n \"in constants.py\\n\"\n self.logger.log(error_message)\n return buganizer_issues\n\n for tbody in soup.find_all('tbody'):\n for _tr in tbody.find_all('tr'):\n issue_link = \"https://b.corp.google.com/issues/\" + _tr.get(\n 'data-row-id')\n buganizer_issues.append(issue_link)\n return buganizer_issues"
]
| [
"0.656218",
"0.54770523",
"0.54455316",
"0.5295992",
"0.52034366",
"0.51422",
"0.51173514",
"0.511575",
"0.51122963",
"0.50632423",
"0.50037414",
"0.49961174",
"0.49717402",
"0.49483138",
"0.49464095",
"0.4919574",
"0.48905483",
"0.48504505",
"0.48457265",
"0.48365772",
"0.4835398",
"0.4832522",
"0.48233706",
"0.48025426",
"0.4789322",
"0.47783667",
"0.47779757",
"0.4771945",
"0.4763828",
"0.47552416"
]
| 0.7713654 | 0 |
Return a total of `num` random samples and labels. Mimicks the mnist.train.next_batch() function | def next_batch(num, data, labels):
idx = np.arange(0 , len(data))
np.random.shuffle(idx)
idx = idx[:num]
data_shuffle = [data[i] for i in idx]
labels_shuffle = [labels[i] for i in idx]
return np.asarray(data_shuffle), np.asarray(labels_shuffle) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def next_batch(num, data, labels):\n idx = np.arange(0, len(data))\n np.random.shuffle(idx)\n idx = idx[:num]\n data_shuffle = [data[i] for i in idx]\n labels_shuffle = [labels[i] for i in idx]\n \n return np.asarray(data_shuffle), np.asarray(labels_shuffle)",
"def next_batch(num, data, labels):\n idx = np.arange(0, len(data))\n np.random.shuffle(idx)\n idx = idx[:num]\n data_shuffle = [data[i] for i in idx]\n labels_shuffle = [labels[i] for i in idx]\n return np.asarray(data_shuffle), np.asarray(labels_shuffle)",
"def train(self, num_batches: int):",
"def next_batch(self, batch_size, shuffle=True):",
"def num_training_examples(self):",
"def next_batch(self, batch_size, fake_data=False, shuffle=True):\r\n if fake_data:\r\n #fake_image = [1] * 784\r\n fake_image = [1]*6400\r\n if self.one_hot:\r\n #fake_label = [1] + [0] * 9\r\n fake_label = [1]+[0]*(people-1)\r\n else:\r\n fake_label = 0\r\n return [fake_image for _ in xrange(batch_size)], [\r\n fake_label for _ in xrange(batch_size)\r\n ]\r\n start = self._index_in_epoch\r\n # Shuffle for the first epoch\r\n if self._epochs_completed == 0 and start == 0 and shuffle:\r\n perm0 = numpy.arange(self._num_examples)\r\n numpy.random.shuffle(perm0)\r\n self._images = self.images[perm0]\r\n self._labels = self.labels[perm0]\r\n # Go to the next epoch\r\n if start + batch_size > self._num_examples:\r\n # Finished epoch\r\n self._epochs_completed += 1\r\n # Get the rest examples in this epoch\r\n rest_num_examples = self._num_examples - start\r\n images_rest_part = self._images[start:self._num_examples]\r\n labels_rest_part = self._labels[start:self._num_examples]\r\n # Shuffle the data\r\n if shuffle:\r\n perm = numpy.arange(self._num_examples)\r\n numpy.random.shuffle(perm)\r\n self._images = self.images[perm]\r\n self._labels = self.labels[perm]\r\n # Start next epoch\r\n start = 0\r\n self._index_in_epoch = batch_size - rest_num_examples\r\n end = self._index_in_epoch\r\n images_new_part = self._images[start:end]\r\n labels_new_part = self._labels[start:end]\r\n return numpy.concatenate((images_rest_part, images_new_part), axis=0) , numpy.concatenate((labels_rest_part, labels_new_part), axis=0)\r\n else:\r\n self._index_in_epoch += batch_size\r\n end = self._index_in_epoch\r\n return self._images[start:end], self._labels[start:end]",
"def generator(numbers, number_labels, batch_size=32):\n while True: # Loop forever so the generator never terminates\n\n images = []\n labels = []\n\n for batch_sample in range(batch_size):\n img, label = create_numbers(numbers, number_labels, return_label=True)\n\n # Here we will convert the label to a format that Keras API can process:\n n_label = np.zeros((5, 11), dtype='int')\n for i, digit in enumerate(label):\n if digit == \".\":\n n_digit = 10\n else:\n n_digit = int(digit)\n\n n_label[i][n_digit] = 1\n\n images.append(img)\n # labels.append(label)\n labels.append(n_label)\n\n X_train = np.array(images)\n if len(X_train.shape) == 3:\n X_train = np.expand_dims(X_train, -1)\n\n y_temp = np.array(labels)\n\n y1 = y_temp[:, 0, :]\n y2 = y_temp[:, 1, :]\n y3 = y_temp[:, 2, :]\n y4 = y_temp[:, 3, :]\n y5 = y_temp[:, 4, :]\n\n yield X_train, [y1, y2, y3, y4, y5]",
"def train_next_batch(self, batch_size=None):",
"def batch_size(features, labels):\n return extract_batch_length(features)",
"def generator(features, labels, batch_size):\n \n # Create empty arrays to contain batch of features and labels#\n batch_features = np.zeros((batch_size, 160, 320, 3))\n batch_labels = np.zeros((batch_size, 1))\n while True:\n for i in range(batch_size):\n # choose random index in features\n index = random.choice(range(len(features)))\n batch_features[i] = features[index]\n batch_labels[i] = labels[index]\n yield batch_features, batch_labels",
"def generate_next_batch(self, data): \n \n batch_words = np.array(data[self.batch_lookup[self.batch_index]][0])\n batch_labels = np.array(data[self.batch_lookup[self.batch_index]][1])\n self.batch_index += 1\n if self.batch_index == len(data) - 1:\n self.epoch += 1\n return batch_words, batch_labels",
"def random_labels(size, num_classes):\n return torch.randint(high=num_classes, size=(size,)).int().tolist()",
"def next_batch(self, batch_size, fake_data=False):\r\n if fake_data:\r\n fake_image = [1.0 for _ in range(784)]\r\n fake_label = 0\r\n return [fake_image for _ in range(batch_size)], [fake_label for _ in range(batch_size)]\r\n start = self._index_in_epoch\r\n self._index_in_epoch += batch_size\r\n #print (0)\r\n #print(self._index_in_epoch,self._num_examples)\r\n #若当前训练读取的index>总体的images数时,则读取读取开始的batch_size大小的数据\r\n if self._index_in_epoch > self._num_examples:\r\n #print (0)\r\n # Finished epoch\r\n self._epochs_completed += 1\r\n # Shuffle the data\r\n perm = numpy.arange(self._num_examples)\r\n numpy.random.shuffle(perm)\r\n self._images = self._images[perm]\r\n self._labels = self._labels[perm]\r\n # Start next epoch\r\n start = 0\r\n self._index_in_epoch = batch_size\r\n assert batch_size <= self._num_examples\r\n end = self._index_in_epoch\r\n #print (\"start is:%d,end is:%d\"%(start,end))\r\n return self._images[start:end], self._labels[start:end]",
"def train(batch_size, num_sample=128):\n return paddle.batch(_read_creater(num_sample=num_sample), batch_size)",
"def next_batch(self, batch_size):\r\n start = self._index_in_epoch\r\n self._index_in_epoch += batch_size\r\n\r\n if self._index_in_epoch > self._num_examples:\r\n # After each epoch we update this\r\n self._epochs_done += 1\r\n start = 0\r\n self._index_in_epoch = batch_size\r\n #print(\"numexamples \",self._num_examples)\r\n assert batch_size <= self._num_examples\r\n end = self._index_in_epoch\r\n\r\n return self._images[start:end], self._labels[start:end], self._img_names[start:end], self._cls[start:end]",
"def next_batch(self, batch_size, fake_data=False, shuffle=True):\r\n if fake_data:\r\n fake_image = [1] * 784\r\n if self.one_hot:\r\n fake_label = [1] + [0] * 9\r\n else:\r\n fake_label = 0\r\n return [fake_image for _ in xrange(batch_size)], [\r\n fake_label for _ in xrange(batch_size)\r\n ]\r\n start = self._index_in_epoch\r\n # Shuffle for the first epoch\r\n if self._epochs_completed == 0 and start == 0 and shuffle:\r\n perm0 = numpy.arange(self._num_examples)\r\n numpy.random.shuffle(perm0)\r\n self._images = self.images[perm0]\r\n self._labels = self.labels[perm0]\r\n # Go to the next epoch\r\n if start + batch_size > self._num_examples:\r\n # Finished epoch\r\n self._epochs_completed += 1\r\n # Get the rest examples in this epoch\r\n rest_num_examples = self._num_examples - start\r\n images_rest_part = self._images[start:self._num_examples]\r\n labels_rest_part = self._labels[start:self._num_examples]\r\n # Shuffle the data\r\n if shuffle:\r\n perm = numpy.arange(self._num_examples)\r\n numpy.random.shuffle(perm)\r\n self._images = self.images[perm]\r\n self._labels = self.labels[perm]\r\n # Start next epoch\r\n start = 0\r\n self._index_in_epoch = batch_size - rest_num_examples\r\n end = self._index_in_epoch\r\n images_new_part = self._images[start:end]\r\n labels_new_part = self._labels[start:end]\r\n return numpy.concatenate((images_rest_part, images_new_part), axis=0), numpy.concatenate((labels_rest_part, labels_new_part), axis=0)\r\n else:\r\n self._index_in_epoch += batch_size\r\n end = self._index_in_epoch\r\n return self._images[start:end], self._labels[start:end]",
"def next_batch(self, batch_size):\n start = self._index_in_epoch\n end = min(start + batch_size, self._num_examples)\n batch_data = self._data[start:end]\n if self._label_used:\n batch_labels = self._labels[start:end]\n\n if end == self._num_examples:\n self._epochs_completed += 1\n self._index_in_epoch = 0\n if self._shuffled:\n perm = np.arange(self._num_examples)\n random.shuffle(perm)\n self._data = self._data[perm]\n if self._label_used:\n self._labels = self._labels[perm]\n else:\n self._index_in_epoch = end\n\n if self._label_used:\n return batch_data,batch_labels\n else:\n return batch_data",
"def next_batch(self, batch_size, fake_data=False):\n if fake_data:\n fake_image = [1] * 784\n if self.one_hot:\n fake_label = [1] + [0] * 9\n else:\n fake_label = 0\n return [fake_image for _ in xrange(batch_size)], [\n fake_label for _ in xrange(batch_size)]\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Shuffle the data\n perm = np.arange(self._num_examples)\n np.random.shuffle(perm)\n self._images = self._images[perm]\n self._labels = self._labels[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n end = self._index_in_epoch\n return self._images[start:end], self._labels[start:end]",
"def next_batch(self, batch_size):\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n self._epochs_completed += 1\n start = 0\n self._index_in_epoch = batch_size\n end = self._index_in_epoch\n return self._samples[start:end], self._labels[start:end]",
"def next_batch(self, batch_size, fake_data=False):\n if fake_data:\n fake_image = [1] * 784\n if self.one_hot:\n fake_label = [1] + [0] * 9\n else:\n fake_label = 0\n return [fake_image for _ in xrange(batch_size)], [\n fake_label for _ in xrange(batch_size)\n ]\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Shuffle the data\n perm = numpy.arange(self._num_examples)\n numpy.random.shuffle(perm)\n self._images = self._images[perm]\n self._labels = self._labels[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n end = self._index_in_epoch\n return self._images[start:end], self._labels[start:end]",
"def next(self, batch_size=np.inf):\n if self.batch_id == len(self.data):\n self.batch_id = 0\n # shuffle the data each pass over it.\n rng_state = np.random.get_state()\n np.random.shuffle(self.data)\n np.random.set_state(rng_state)\n np.random.shuffle(self.labels)\n \n end_idx = min(self.batch_id + batch_size, len(self.data))\n batch_data = (self.data[self.batch_id:end_idx])\n batch_labels = self.labels[self.batch_id:end_idx]\n batch_seqlen = (self.seqlen[self.batch_id:end_idx])\n self.batch_id = end_idx\n return batch_data, batch_labels, batch_seqlen",
"def next_batch(self, batch_size, fake_data=False):\n if fake_data:\n fake_image = [1] * 784\n if self.one_hot:\n fake_label = [1] + [0] * 9\n else:\n fake_label = 0\n return [fake_image for _ in range(batch_size)], [\n fake_label for _ in range(batch_size)]\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n\n # Shuffle the data\n np.random.seed(0)\n perm = np.arange(self._num_examples)\n np.random.shuffle(perm)\n self._images = self._images[perm]\n self._labels = self._labels[perm]\n\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n end = self._index_in_epoch\n return self._images[start:end], self._labels[start:end]",
"def next_batch(self, batch_size, fake_data=False):\n if fake_data:\n fake_image = [1] * 784\n if self.one_hot:\n fake_label = [1] + [0] * 9\n else:\n fake_label = 0\n return [fake_image for _ in range(batch_size)], [fake_label for _ in range(batch_size)]\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n\n # Shuffle data\n np.random.seed(0)\n perm = np.arange(self._num_examples)\n np.random.shuffle(perm)\n self._images = self._images[perm]\n self._labels = self._labels[perm]\n\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n\n end = self._index_in_epoch\n\n return self._images[start:end], self._labels[start:end]",
"def next_batch(self,batch_size):\r\n end_indicator = self._indicator + batch_size\r\n if end_indicator > self._num_examples:\r\n if self._need_shuffle:\r\n self._shuffle_data()\r\n end_indicator = batch_size\r\n else:\r\n raise Exception(\"have no more examples.\")\r\n\r\n if end_indicator > self._num_examples:\r\n raise Exception(\"batch size is larger than all examples.\")\r\n batch_data = self._data[self._indicator: end_indicator]\r\n batch_labels = self._labels[self._indicator: end_indicator]\r\n self._indicator = end_indicator\r\n return batch_data,batch_labels",
"def next_batch(index,feature,label,batch_size):\n epochs_completed = 0\n examples = feature.shape[0]\n start = index*batch_size\n index_in_epoch =index*batch_size+batch_size-1\n if index_in_epoch > examples:\n # Finished epoch\n epochs_completed += 1\n # Shuffle the data\n perm = np.arange(examples)\n np.random.shuffle(perm)\n feature = feature[perm]\n label = label[perm]\n # Start next epoch\n start = 0\n index_in_epoch = batch_size\n assert batch_size <= examples\n end = index_in_epoch\n return feature[start:end], label[start:end]",
"def sample_train_batch(self):\r\n batch = []\r\n labels =[]\r\n num_groups = self.batch_size // self.batch_k\r\n sampleed_classes = np.random.choice(self.train_class_ids,num_groups,replace=False)\r\n for class_id in sampleed_classes:\r\n img_fname = np.random.choice(self.train_image_files[class_id],self.batch_k,replace=False)\r\n batch += img_fname.tolist()\r\n labels += [class_id]*self.batch_k\r\n return batch,labels",
"def total_predict_batches(self) -> int:\n return sum(self.trainer.num_predict_batches)",
"def next_batch(self, batch_size, fake_data=False):\n if fake_data:\n #fake_image = [1.0 for _ in xrange(784)]\n fake_image = [1.0 for _ in range(784)]\n fake_label = 0\n #return [fake_image for _ in xrange(batch_size)], [\n # fake_label for _ in xrange(batch_size)]\n return [fake_image for _ in range(batch_size)], [\n fake_label for _ in range(batch_size)]\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Shuffle the data\n perm = numpy.arange(self._num_examples)\n numpy.random.shuffle(perm)\n self._images = self._images[perm]\n self._labels = self._labels[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n end = self._index_in_epoch\n return self._images[start:end], self._labels[start:end]",
"def next_batch(self,batch_size):\r\n end_indicator = self._indicator + batch_size\r\n if end_indicator > self._num_examples:\r\n if self._need_shuffle:\r\n self._shuffle_data()\r\n self._indicator = 0\r\n end_indicator = batch_size\r\n else:\r\n raise Exception(\"have no more examples.\")\r\n if end_indicator > self._num_examples:\r\n raise Exception(\"too lager batch size than examples.\")\r\n batch_data = self._data[self._indicator: end_indicator]\r\n batch_label = self._label[self._indicator: end_indicator]\r\n self._indicator = end_indicator\r\n return batch_data, batch_label",
"def get_num_train_samples(self):\n raise NotImplementedError"
]
| [
"0.6891144",
"0.6830833",
"0.63632464",
"0.6281471",
"0.6275252",
"0.6201188",
"0.6174942",
"0.6142057",
"0.6138394",
"0.61206514",
"0.6116791",
"0.6075215",
"0.6054932",
"0.6029934",
"0.6014392",
"0.598679",
"0.5984626",
"0.59697586",
"0.59603626",
"0.59552246",
"0.5938416",
"0.5923951",
"0.59206456",
"0.59187704",
"0.5916189",
"0.59134895",
"0.5880388",
"0.58770716",
"0.58686465",
"0.58331174"
]
| 0.6933328 | 0 |
POST request to create a valid team in database | def test_post_team(self):
response = self.client.post(url_for('teams'),
data={
'name': 'test team',
'capacity': 11,
'number_players': 6,
'pitch_postcode': 'E1 6LT',
'time': '2019-01-01 13:00'
})
self.assertEqual(response.status_code, 201)
self.assertIn(b'Team created successfully', response.data)
self.assertEqual(db.session.query(Team).count(), 1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def post(self):\n req = team_req.parse_args(strict=True)\n curr_user = api.user.get_user()\n if curr_user[\"teacher\"]:\n raise PicoException(\"Teachers may not create teams\", 403)\n req[\"team_name\"] = req[\"team_name\"].strip()\n if not all(\n [\n c in string.digits + string.ascii_lowercase + \" ()+-,#'&!?\"\n for c in req[\"team_name\"].lower()\n ]\n ):\n raise PicoException(\n \"Team names cannot contain special characters other than \"\n + \"()+-,#'&!?\",\n status_code=400,\n )\n\n if req[\"team_name\"] == curr_user[\"username\"]:\n raise PicoException(\"Invalid team name\", status_code=409)\n\n new_tid = api.team.create_and_join_new_team(\n req[\"team_name\"], req[\"team_password\"], curr_user\n )\n res = jsonify({\"success\": True, \"tid\": new_tid})\n res.status_code = 201\n return res",
"def test_create_new_team(self):\n default_user = AnotherUserFactory(email_confirmed=True)\n token = Token.objects.get(user=default_user)\n self.client.credentials(\n HTTP_AUTHORIZATION=f'Token {token.key}')\n\n data = {\n 'name': 'Griffons',\n 'description': 'Only strong souls can be joined us.'\n }\n response = self.client.post(reverse('api:teams-list'), data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertTrue(Team.objects.filter(name=data['name']).exists())",
"def create_team():\n # Get the user's id from access token\n uid = get_jwt_identity()\n\n # If no user id, return error\n if not uid:\n return make_response(\n jsonify({'error': 'Could not verify!'}),\n 401,\n {'WWW-Authentication': 'Basic realm=\"Login required!\"'})\n\n # Try to get user from database\n query = User.query.filter_by(public_id=uid)\n\n try:\n user = query.one()\n\n # If no result found, return error\n except NoResultFound:\n return jsonify({'error': 'No result found!'}), 401\n\n # If some other sqlalchemy error is thrown, return error\n except SQLAlchemyError:\n return jsonify({'error': 'Some problem occurred!'}), 400\n\n # Get team data from request\n data = request.get_json()\n\n # Verify that all required team data was sent\n if not data['name'] or not data['group']:\n return make_response(jsonify({'error': 'Missing data!'}), 400)\n\n # Create team object\n team = Team(\n name=data['name'],\n iso_2=data['iso_2'],\n group=data['group'])\n\n # Try to add team to database\n try:\n db.session.add(team)\n db.session.commit()\n\n # If team name already in database, return error\n except IntegrityError:\n return jsonify({\n 'error': 'Team with name already exists'\n }), 400\n\n # If some other sqlalchemy error is thrown, return error\n except SQLAlchemyError:\n return jsonify({'error': 'Some problem occurred!'}), 400\n\n # Serialze the team object and return json response\n team_schema = TeamSchema()\n output = team_schema.dump(team).data\n\n return jsonify({\n 'success': 'Successfully retrieved team.',\n 'team': output\n }), 200",
"def test_posting_a_teammate(self):\n response = self.client.post(\n '/team/all/', {'name': 'New Name',\n 'email': '[email protected]',\n 'slackhandle': '@NewTeam'},\n format='json')\n self.assertEqual(response.data, {'status': 201,\n \"data\": {'id': 1, 'name': 'New Name',\n 'email': '[email protected]',\n 'slackhandle': '@NewTeam'}})",
"def create_team(request):\n if request.method == 'POST':\n email = request.session.get('email', None)\n team_name = request.POST.get('team_name', None)\n team = Team(name=team_name)\n team.save()\n\n message = \"Team created, please use the cool search feature and assign yourself to the team\"\n messages.add_message(request, messages.INFO, message)\n return redirect('teamsapp:teams')\n else:\n raise Http404('Not allowed')",
"def create(self, request):\n serializer = data_serializers.CreateTeamSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n request_data = serializer.save()\n try:\n new_team_entity = self.controller.create_team(request_data=request_data)\n serializer = data_serializers.PresentTeamSerializer(new_team_entity)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except domain_exceptions.TeamHasALeader as e:\n return Response(e.message, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)",
"def create_team_action(request):\n # Create the team.\n now = datetime.utcnow()\n user_id = request.context.user_id\n user = load_user(request.db, user_id)\n # Select a round based on the user's badges.\n round_ids = find_round_ids_with_badges(request.db, user['badges'], now)\n if len(round_ids) == 0:\n # The user does not have access to any open round.\n raise ApiError('not qualified for any open round')\n if len(round_ids) > 1:\n # XXX The case where a user has badges for multiple open rounds\n # is currently handled by picking the first one, which is the\n # one that has the greatest id. This is unsatisfactory.\n pass\n round_id = round_ids[0]\n round_ = load_round(request.db, round_id, now)\n if not round_['is_registration_open']:\n raise ApiError('registration is closed')\n # Create the team.\n team_id = create_user_team(request.db, user_id, now)\n # Create a participation.\n create_participation(request.db, team_id, round_id, now=now)\n # Ensure the user gets team credentials.\n reset_user_principals(request)\n return {'success': True}",
"def test_create_team_creates_survey(self):\n user = User.create(name='User Foo', email='[email protected]')\n user.put()\n\n code = 'trout viper'\n\n team_response = self.testapp.post_json(\n '/api/teams',\n {\n 'name': 'Team Foo',\n 'code': code,\n 'program_id': self.ep_program.uid,\n },\n headers=self.login_headers(user),\n )\n team_dict = json.loads(team_response.body)\n\n survey_result = Survey.get(team_id=team_dict['uid'])\n self.assertEqual(len(survey_result), 1)\n survey = survey_result[0]\n\n return user, team_dict",
"def post(self):\n player = PlayerSchema().load(request.get_json())\n\n try:\n db.session.add(player)\n db.session.commit()\n except IntegrityError as e:\n logger.warning(\n f\"Integrity Error, this team is already in the database. Error: {e}\"\n )\n\n abort(500, message=\"Unexpected Error!\")\n else:\n return player.player_id, 201",
"def test_create_team(self):\n pass",
"async def create_team(new_team: BaseTeam, db_handler: DBHandler = Depends(database_dependency)):\n try:\n inserted_record = await db_handler.insert_team(new_team=new_team)\n inserted_record = init_BaseTeam(inserted_record)\n except DBHandlerException as e:\n return JSONResponse(status_code=400)\n\n return inserted_record",
"def test_register_team_already_team(self):\n result = self.client.post(\"/teams\", data={\"already_team\": \"Killers\"}, follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n # self.assertIn(b\"Sorry! That team name is already in use!\", result.data) #error:not in /teams, but should be in createTeam",
"def test_teams_create(self):\n pass",
"def test_cannot_create_new_team(self):\n\n data = {\n 'name': 'Griffons',\n 'description': 'Only strong souls can be joined us.'\n }\n response = self.client.post(reverse('api:teams-list'), data)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_teams_save_team_v1(self):\n pass",
"def addTeam(request):\n registered = False\n if request.method == 'POST':\n team_form = TeamForm(data=request.POST)\n if team_form.is_valid():\n team = team_form.save()\n registered = True\n else:\n print(team_form.errors)\n else:\n team_form = TeamForm()\n return render(request,'footBallApp/team.html',\n {'team_form':team_form,\n 'registered':registered})",
"def team_add(token_user):\n if not json_param_exists('name') or \\\n not json_param_exists('type'):\n abort(400, \"one or more required parameter is missing\")\n name = request.json['name']\n team_type = TeamType.query.filter_by(name=request.json['type']).first()\n if not team_type:\n abort(400, \"invalid team type\")\n\n if team_type.name == 'other_team':\n if not token_user.has_permission('team.create') and \\\n not token_user.has_permission('team.create.elevated'):\n abort(403, 'team creation is not permitted')\n else: # creating any team other than 'other_team' requires elevated\n if not token_user.has_permission('team.create.elevated'):\n abort(403, 'insufficient permissions to create a team of this type')\n\n team = Team(name=name)\n team.team_type = team_type\n\n try:\n get_db().add(team)\n get_db().commit()\n except IntegrityError:\n abort(409, 'team name is already in use')\n\n return '', 201",
"def create_team_if_needed():\n try:\n # get client data from request\n client_data = request.get_json()\n print(f\"recived: {client_data}\")\n\n # set a gender code\n gender = 'girls' if client_data['gender_code'] == 1 else 'boys'\n\n # if the team does not yet exist, create it\n team = Team.query.filter_by(school_id = client_data['school_id'],\n year=client_data['year'],\n gender=gender).first()\n print(team)\n if not team:\n team = Team(gender=gender,\n year=client_data['year'],\n school_id=client_data['school_id'])\n db.session.add(team)\n db.session.commit()\n\n # Pass JSON_received to the frontend\n JSON_received = {'Status':'Received race'}\n return jsonify(JSON_received)\n\n except Exception as e:\n print(\"AJAX excepted \" + str(e))\n return str(e)",
"def create_challenge_team(request, challenge_pk):\n\tif request.method == \"POST\":\n\t\tteam_name = request.POST[\"team-name\"]\n\t\t\n\t\tnew_team = ChallengeTeam()\n\t\tnew_team.team_name = team_name\n\t\t\n\t\tselected_challenge = Challenge.objects.get(pk = challenge_pk)\n\t\tnew_team.challenge = selected_challenge\n\t\t\n\t\tnew_team.save()\n\t\t\n\t\treturn redirect(\"/challenge/view/\" + str(challenge_pk))\n\t\t\n\telse:\n\t\tselected_challenge = Challenge.objects.get(pk = challenge_pk)\n\t\t\n\t\tcontext = RequestContext(request, {\"challenge_name\" : selected_challenge.name})\n\t\treturn render_to_response(\"encourage/create_team.html\", context)",
"def new(request):\n template = loader.get_template('team/new.html')\n\n if request.method == 'POST':\n form = TeamForm(request.user, request.POST)\n if form.is_valid():\n team = form.save(commit=False)\n team.year = datetime.datetime.now().year\n if 'logo_image' in request.FILES:\n team.logo = request.FILES['logo_image']\n if request.POST.get('team_info'):\n team.information = request.POST.get('team_info')\n team.save()\n\n # assign team to all members\n request.user.profile.team = team\n request.user.save()\n if form.cleaned_data['member2'] is not '':\n member2 = User.objects.get(pk=form.cleaned_data['member2'])\n member2.profile.team = team\n member2.save()\n if form.cleaned_data['member3'] is not '':\n member3 = User.objects.get(pk=form.cleaned_data['member3'])\n member3.profile.team = team\n member3.save()\n if form.cleaned_data['member4'] is not '':\n member4 = User.objects.get(pk=form.cleaned_data['member4'])\n member4.profile.team = team\n member4.save()\n\n messages.success(request, _('Your team has been created.'))\n\n else:\n if request.user.profile.team is not None:\n return redirect('/team/my-team')\n form = TeamForm(request.user)\n\n context = {'form': form}\n return CustomHttpResponse.send(template, context, request)",
"def test_post_request_for_team(self):\n\n usual_user = UserFactory(\n username='Usual User',\n email='[email protected]',\n )\n token = Token.objects.get(user=usual_user)\n self.client.credentials(\n HTTP_AUTHORIZATION=f'Token {token.key}')\n data = {'team': self.team.id}\n response = self.client.post(reverse('api:user-team-requests-list'), data=data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n notification = UserNotification.objects.last()\n notification_message = UserNotification.get_notification_text(\n UserNotification.TEAM_REQUEST_WAS_SENT_WITH_DEACTIVATED_EMAIL, username=usual_user.username\n )\n self.assertEqual(notification.message, notification_message)",
"def add_team():\n if request.method == 'POST':\n result = request.form\n teamImage = request.files['teamImage'].read()\n team = Team.query.filter_by(team_name=result['team_name']).first()\n if not team:\n team1 = Team(team_name=result['team_name'], team_image=teamImage)\n db.session.add(team1)\n db.session.commit()\n flash(result['team_name'] + ' is added successfully')\n teams = get_team()\n return render_template('team-players.html', teams=teams)\n else:\n flash(result['team_name'] + ' is already present')\n return render_template('addteam.html')",
"def test_createteam(self):\n p1, p2, p3 = self.create3persons()\n t = model.Team(name='Tigers', persons=[p1, p2, p3])\n id = t.store()\n t2 = model.Team(id=id)\n self.assertEqual(t.name, t2.name)\n self.assertEqual(t.persons, t2.persons)",
"def test_teams_save_team_member_v1(self):\n pass",
"def perform_create(self, serializer):\n team = get_object_or_404(models.Team, pk=self.kwargs.get('pk'))\n\n return serializer.save(team=team)",
"def post(self, team_id, project_id):\n if not TeamService.is_user_team_manager(team_id, token_auth.current_user()):\n return {\n \"Error\": \"User is not an admin or a manager for the team\",\n \"SubCode\": \"UserPermissionError\",\n }, 401\n\n try:\n role = request.get_json(force=True)[\"role\"]\n except DataError as e:\n current_app.logger.error(f\"Error validating request: {str(e)}\")\n return {\"Error\": str(e), \"SubCode\": \"InvalidData\"}, 400\n\n try:\n if not ProjectAdminService.is_user_action_permitted_on_project(\n token_auth.current_user, project_id\n ):\n raise ValueError()\n TeamService.add_team_project(team_id, project_id, role)\n return (\n {\n \"Success\": \"Team {} assigned to project {} with role {}\".format(\n team_id, project_id, role\n )\n },\n 201,\n )\n except ValueError:\n return {\n \"Error\": \"User is not a manager of the project\",\n \"SubCode\": \"UserPermissionError\",\n }, 403",
"def post( ):\n # get data from json.\n input_data = TournamentRegister.register_parser.parse_args( )\n # corresponding Tournament exist in database.\n if TournamentModel.get_tournament(input_data['tournament_id']) is None:\n # create Tournament.\n tournament = TournamentModel(tournament_id=input_data['tournament_id'], tournament_start_date=input_data[\n 'tournament_start_date'],tournament_end_date=input_data['tournament_end_date'])\n # save user.\n tournament.save_data()\n return {'message': 'Tournament created ', 'Tournament_details': tournament.json(), 'Success_Code': 1}, 201\n else:\n return {'message': 'Tournament already present with the Tournament id', 'Success_Code': 0}, 400",
"def test_name_must_be_present(self):\n response = self.client.post(url_for('teams'),\n data={\n 'capacity': 10,\n 'number_players': 6,\n 'pitch_postcode': 'E1 6LT',\n 'time': '2019-01-01 13:00'\n })\n self.assertEqual(response.status_code, 400)",
"def test_get_individual_team(self):\n args = {\n 'name': 'test team',\n 'capacity': '11',\n 'number_players': '6',\n 'pitch_postcode': 'E1 6LT',\n 'time': '2019-01-01 13:00'\n }\n team = Team(args)\n db.session.add(team)\n db.session.commit()\n response = self.client.get('/teams/1')\n self.assertEqual(response.status_code, 200)\n self.assertIn(b'test team', response.data)",
"def create(self, request):\n serializer = data_serializers.TeamLeaderOrEmployeeRequestDataSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n request_data = serializer.save()\n try:\n new_team_entity = self.controller.assign_team_leader(request_data=request_data)\n serializer = data_serializers.TeamLeaderPresenterSerializer(new_team_entity)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except (\n domain_exceptions.TeamDoesNotExist,\n domain_exceptions.TeamHasALeader,\n domain_exceptions.EmployeeDoesNotExist\n )as e:\n return Response(e.message, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)"
]
| [
"0.79959697",
"0.7724525",
"0.76015633",
"0.7561775",
"0.7447651",
"0.743541",
"0.7377837",
"0.7367278",
"0.7318445",
"0.72648257",
"0.722233",
"0.7212253",
"0.7125656",
"0.71181744",
"0.70906293",
"0.708581",
"0.7075323",
"0.6973152",
"0.68631893",
"0.6804662",
"0.67360556",
"0.6692681",
"0.66882",
"0.66638964",
"0.6532225",
"0.6476496",
"0.6471119",
"0.64649856",
"0.643951",
"0.6424673"
]
| 0.80973095 | 0 |
player number cannot be a empty | def test_player_number_cannot_be_empty(self):
with self.assertRaises(Exception) as context:
self.client.post(
url_for('teams'),
data={
'name': 'team',
'capacity': '5',
'number_players': 'hello',
'pitch_postcode': 'E1 6LT',
'time': '2019-01-01 13:00'
}
)
self.assertTrue('Number players must be a number' in context.exception)
self.assertEqual(db.session.query(Team).count(), 0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_num_players(num_players):\n \n try:\n\tnum_players = int(num_players)\n except ValueError:\n\treturn None\n if num_players < 2 or num_players > 4:\n return None\n\n return num_players",
"def enough_players():\n return True",
"def SelectPlayer(self):\n\n player = input(data['player'])\n if player == \"1\":\n return 0\n elif player == \"2\":\n return 1\n else:\n return 'invalid'",
"def string_u_broj(self):\n if self.player_input == \"rock\":\n self.player_number = 0\n elif self.player_input == \"spock\":\n self.player_number = 1\n elif self.player_input == \"paper\":\n self.player_number = 2\n elif self.player_input == \"lizard\":\n self.player_number = 3\n elif self.player_input == \"scissors\":\n self.player_number = 4\n else:\n self.player_number = -1\n raise RpslsError(102)\n return self.player_number",
"def get_player(self, number):\n num = int(number)\n assert (num in [1, 2])\n return self.player_1 if num == 1 else self.player_2",
"def player_choice(board):\n position = -1\n while True:\n try:\n position = int(input(\"Choose your position: \"))\n\n if 0 < position <= 9:\n is_empty_position = space_check(board, position)\n if is_empty_position:\n break\n else:\n print('Position is not empty, choose again!')\n continue\n except ValueError:\n print('Invalid position, choose again!')\n return position",
"def set_players():\n \n while True:\n players = eval(input(\"Geben Sie die Anzahl Spieler an oder tippe '0' zum Abbruch: \"))\n if int(players) > 0:\n break\n elif int(players) == 0:\n quit()\n else:\n print(\"ERROR: Du musst eine positive Ganzzahl eingeben!\")\n print()\n print()\n return players",
"def player(board):\n count = 0\n rows = 3\n columns = 3\n for i in range(rows):\n for j in range(columns):\n if board[i][j] != EMPTY:\n count += 1\n if count % 2 == 0:\n player = X\n else:\n player = O\n return player\n\n #raise NotImplementedError",
"def set_n_players(self):\n complain = \"\"\n while True:\n clear_output()\n try:\n self.n_players = int(\n input(f\"{complain}Please insert the number of players (between 2 to 6): \\n\"))\n if self.n_players >= 2 and self.n_players < 7:\n self.start_troops = 120 / self.n_players\n break\n elif self.n_players < 2:\n complain = \"Not enough players!\\n\"\n elif self.n_players >= 7:\n complain = \"Too many players!\\n\"\n except:\n complain = \"Not a valid number!\\n\"\n pass",
"def is_valid_player(user_input):\n \n i = user_input.upper()\n if i in Board.player_decoder:\n return True\n elif i == 'Q':\n exit(\"\\nExiting program. Thanks for using Clue Detective!\\n\")\n else:\n return False",
"def insert_number_player(client):\r\n global num_player\r\n global player_insert\r\n if player_insert == False:\r\n msg_client('Inserire il numero di giocatori: ', client)\r\n msg = client.recv(BUFSIZ)\r\n check_quit(msg, client)\r\n num_player = check_number(client, msg, 1, nPlayer)\r\n \r\n player_insert = True",
"def get_number_of_players():\n number_of_players = None\n while not(type(number_of_players)) == int:\n try:\n number_of_players = int(input(\"How many players are there? \"))\n if number_of_players == 0:\n raise zeroPlayersError\n elif number_of_players > 6:\n raise tooManyPlayersError\n except zeroPlayersError:\n print(\"The game needs at least 1 player\")\n number_of_players = None\n except tooManyPlayersError:\n print(\"Sorry you can't have more than 6 players\")\n number_of_players = None\n except:\n number_of_players = None\n return number_of_players",
"def player(board):\n if not terminal(board):\n cnt_x = 0\n cnt_o = 0\n for i in range(3):\n for j in range(3):\n if board[i][j] == EMPTY:\n continue\n elif board[i][j] == X:\n cnt_x += 1\n else:\n cnt_o += 1\n\n if cnt_x <= cnt_o:\n return X\n else:\n return O\n else:\n return None",
"def player(self):\n legal = self.board.legal_move(self.black)\n if(len(legal) == 0):\n self.p_no_move = 1\n print(\"No legal move for player!\")\n self.computer_turn = True\n self.player_turn = False",
"def check_player_id(self):\n if self.player_id == 'me':\n profile = self.profile\n self.player_id = profile['id']",
"def test_not_found_pair(self):\n self.assertTrue(\"No matches found\", show_players_sumheigh_is_input(100))",
"def test_not_is_the_same(self):\n self.assertFalse(show_players_sumheigh_is_input(140), \"-Mike Wilks Mike Wilks\")",
"def player(s):\r\n\r\n slot = int(input('\\n' + s + \"'s move: \"))\r\n if board[slot] in '012345678':\r\n board[slot] = s\r\n else:\r\n print('That space is filled!')",
"def create_player(id_player: str):\n id_player = str(id_player)\n last_name = input(\"Last name of the player : \")\n first_name = input(\"First name of the player : \")\n birthday = input(\"Birthday of the player : \")\n sex = input(\"Sex of the player : \")\n elo = int(input(\"Elo of the player: \"))\n\n if not Player.get(id_player):\n Player(id_player, last_name, first_name, birthday, sex, elo)\n else:\n raise Exception(f\"The ID {id_player} already exists : {Player.get(id_player)}\")",
"def _checkPlayer(self):\r\n pawn = self.startCell.getPawn()\r\n if(not pawn.owner == self.player):\r\n message = (\"Player (%r) is not allowed to move that pawn (%r)\" %\r\n (self.player, pawn))\r\n raise IllegalMoveException(message)",
"def test_malformed_player(self):\n board = Board()\n player1 = MalformedDataPlayer()\n player_guard1 = PlayerGuard(player1, timeout=3)\n\n p1id = uuid.uuid4()\n\n player_guard1.set_id(uuid.uuid4())\n\n self.assertRaises(PlayerMalformedData, player_guard1.place_worker, board)\n self.assertRaises(PlayerMalformedData, player_guard1.play_turn, board)",
"def test_create_player_invalid(self):\n payload = {'name': ''}\n res = self.client.post(PLAYERS_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)",
"def input_player_elo(self):\r\n try:\r\n elo = input('Entrez le classement ELO du joueur: ')\r\n if int(elo):\r\n try:\r\n if int(elo) < 1000 or int(elo) > 2900:\r\n raise ValueError\r\n else:\r\n return int(elo)\r\n except ValueError:\r\n print(\"Le classement ELO doit être un entier compris entre 1000 et 2900!\")\r\n return self.input_player_elo()\r\n else:\r\n raise ValueError\r\n except ValueError:\r\n print(\"Vous devez entrer un nombre! (entier supérieur compris entre 1000 et 2900)\")\r\n return self.input_player_elo()",
"def user_pick(self):\n player_taking = True\n while player_taking:\n play_take = int(input(\"How many dots would you like to remove?(1-4)\"))\n if not 1 <= play_take <= 4:\n print(\"You may only take between 1 and 4 balls\")\n else:\n player_taking = False\n return play_take",
"def min_players(self):\n return 2",
"def get_player_num(self):\r\n return self.player_control.get_player_num()",
"def ask_move(player: int) -> int:\n\n while True:\n\n try:\n pawn_number = int(input(f\"Player {player}: Choose a piece to move (0-3): \"))\n except ValueError:\n continue\n else:\n if 0 <= pawn_number <= 3:\n break\n\n return pawn_number",
"def generate_player_id() -> string:\n while True:\n # code will have uppercase letters and numbers\n code_options = string.ascii_uppercase + string.digits\n generated_player_id = ''.join(secrets.choice(code_options) for i in range(5))\n if Player.objects.filter(player_id=generated_player_id).count() == 0:\n break\n return generated_player_id",
"def do_nothing(self, player):\n return '%s spins \\'nun\\' and does nothing.' % (player,)",
"def test_get_player(self):\n pass"
]
| [
"0.66977656",
"0.6385544",
"0.6348341",
"0.63268924",
"0.61899865",
"0.6139229",
"0.61374456",
"0.61094064",
"0.60811144",
"0.6046342",
"0.6008706",
"0.5914257",
"0.5887272",
"0.57951015",
"0.5778498",
"0.5777042",
"0.5770641",
"0.57653236",
"0.5763992",
"0.5750957",
"0.5707508",
"0.5697078",
"0.56775725",
"0.56670827",
"0.5666378",
"0.5663749",
"0.5660651",
"0.5655901",
"0.5650888",
"0.5650364"
]
| 0.6929696 | 0 |
pitch_postcode cannot be a empty | def test_pitch_postcode_cannot_be_empty(self):
with self.assertRaises(Exception) as context:
self.client.post(
url_for('teams'),
data={
'name': 'team',
'capacity': '11',
'number_players': '1',
'pitch_postcode': '',
'time': '2019-01-01 13:00'
}
)
self.assertTrue('Postcode must be present' in context.exception)
self.assertEqual(db.session.query(Team).count(), 0)
def test_pitch_postcode_length(self):
"""pitch_postcode must be fewer than 8 character"""
with self.assertRaises(Exception) as context:
self.client.post(
url_for('teams'),
data={
'name': 'team',
'capacity': '11',
'number_players': '1',
'pitch_postcode': 'somewhereE1 6LT',
'time': '2019-01-01 13:00'
}
)
self.assertTrue('Postcode must be fewer than 8 characters' in context.exception)
self.assertEqual(db.session.query(Team).count(), 0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_pitch_postcode_length(self):\n with self.assertRaises(Exception) as context:\n self.client.post(\n url_for('teams'),\n data={\n 'name': 'team',\n 'capacity': '11',\n 'number_players': '1',\n 'pitch_postcode': 'somewhereE1 6LT',\n 'time': '2019-01-01 13:00'\n }\n )\n self.assertTrue('Postcode must be fewer than 8 characters' in context.exception)\n self.assertEqual(db.session.query(Team).count(), 0)",
"def validate_postcode_format(self):\n\n assert type(self.postcodes) == str, \"To use this method, the postcode cannot be an iterable.\"\n pcd = self.postcodes.replace(' ', '')\n # The following regular expression matches are in order to adhere to the rules for UK postcodes given in the\n # documentation.\n first_char_alpha = re.match(r'^[a-zA-Z]', pcd)\n last_char_match = re.match(r'[a-zA-Z]', pcd[-1])\n alpha_match = re.search(r'[a-zA-Z]', pcd)\n numeric_match = re.search(r'[0-9]', pcd)\n special_chars_match = re.search(r'[!#,£$%^&*¬-]', pcd)\n if len(pcd) == 0:\n response = 'Null'\n elif (5 <= len(pcd) <= 7) and first_char_alpha and alpha_match and numeric_match \\\n and last_char_match and not special_chars_match:\n response = 'Valid Postcode Format'\n else:\n response = 'Invalid Postcode Format'\n return response",
"def update_postcode(postcode, invalid = True):\r\n m = postcode_format_re.search(postcode)\r\n if m:\r\n invalid = False\r\n postcode= postcode[:5]\r\n return (invalid, postcode)",
"def is_valid_postal_code(postal_code):\n assert postal_code is not None\n postal_code = postal_code.replace(\" \", \"\")\n postal_code_re = re.compile(r\"\\s*(\\w\\d\\s*){3}\\s*\")\n return postal_code_re.match(postal_code) is not None",
"def clean_postal_code(self):\n return self.cleaned_data['postal_code'].strip()",
"def test_missing_bwipp(self):\n seq = 21\n week = 46\n header = 'MSTAR 2016-11-14 MON 1.0'\n with self.assertRaisesRegex(ValueError, 'BWIPP'):\n star_barcode.construct_postscript(\n bwipp_location=Path('/fake-path/not-here.ps'),\n issn=self.issn,\n sequence=seq,\n week=week,\n header_line=header\n )",
"def is_pj_bag(value):\n return is_list_with_len(value, 2) and (value[0] == BAG_STR) and is_natural(value[1])",
"def validate_notes_input(notes):\n if len(notes) == 0:\n notes = 'None'\n clear()\n return notes",
"def postal_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"postal_code\")",
"def postal_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"postal_code\")",
"def postal_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"postal_code\")",
"def test_170417_empty(self):\n spc = parser(get_file('PTSD48_empty.txt'))\n # spc.draw_outlooks()\n spc.sql(self.txn)\n jabber = spc.get_jabbers('')\n self.assertEquals(jabber[0][0],\n (\"The Storm Prediction Center issues Days 4-8 \"\n \"Convective Outlook at Dec 25, 9:41z \"\n \"http://www.spc.noaa.gov/products/exper/day4-8/\"\n \"archive/2008/day4-8_20081225.html\"))",
"def normalise_postcode(postcode):\n\n postcode = NON_ALPHA_RE.sub(\"\", postcode.upper())\n postcode = postcode[:-3] + \" \" + postcode[-3:]\n if POSTCODE_RE.match(postcode):\n return postcode\n return None",
"def postal_codes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"postal_codes\")",
"def postal_codes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"postal_codes\")",
"def check_empty_fields_before_bounds(header,\r\n mapping_data,\r\n warnings):\r\n\r\n desc_field = \"Description\"\r\n correction = 1\r\n primer_field = \"LinkerPrimerSequence\"\r\n\r\n try:\r\n desc_field_ix = header.index(desc_field) + correction\r\n primer_field_ix = header.index(primer_field) + correction\r\n except ValueError:\r\n # Skip if Description field not present, already get header error\r\n return warnings\r\n\r\n for curr_row in range(len(mapping_data)):\r\n for curr_col in range(primer_field_ix, desc_field_ix):\r\n curr_field = mapping_data[curr_row][curr_col].replace('\\n', '')\r\n if not curr_field:\r\n warnings.append('Empty data field ' +\r\n '%s found\\t%d,%d' %\r\n (mapping_data[\r\n curr_row][curr_col].replace('\\n', ''),\r\n curr_row + correction, curr_col))\r\n\r\n return warnings",
"def test_30_phonenumbers_empty(self):\n number_phone = self.samples[3]\n res = self.pn._symbol_set_char(number_phone)\n self.assertEqual(res, None, 'e164 phone formatting failed')\n res = self.pn._symbol_get(number_phone)\n self.assertEqual(res, None, 'International phone formatting failed')",
"def checkPostalCode(self, code, country):\n if country == 'US':\n USZipCodeField().clean(code)",
"def test_issue_74():\n patient = Patient(active=True, address=[])\n assert \"address\" not in patient.dict()\n assert patient.dict(exclude_none=False)[\"address\"] == []",
"def test_postal_code(self):\n self.assertIsInstance(self.address.postal_code, str)\n self.assertEqual(self.address.postal_code, \"75000\")",
"def validate_pnumac(pnumac):\n if not re.match(pnumac_pattern, pnumac):\n raise ValidationError(u'%s is not a valid area code'%pnumac)",
"def test_validate_party_info_hqaddress_is_none(self):\n self.party_test_data[\"hqAddress\"] = None\n response = validate_party_info(self.party_test_data)\n self.assertDictEqual(\n response,\n {\"message\": \"hqAddress is required\", \"code\": 400})",
"def is_valid(postal_code):\n return bool(re.match(UK_POST_CODE_REGEX, postal_code, re.VERBOSE)) if postal_code else False",
"def is_valid(key):\n return key[0:2] == \"MR\" and key[2:].isdigit() and len(key) in [9, 10]",
"def test_170411_jabber_error(self):\n spc = parser(get_file('PFWF38_empty.txt'))\n j = spc.get_jabbers('')\n self.assertEquals(j[0][0],\n (\"The Storm Prediction Center issues Day 3-8 Fire \"\n \"Weather Outlook at Apr 11, 19:54z \"\n \"http://www.spc.noaa.gov/products/fire_wx/\"\n \"2017/20170413.html\"))",
"def clean_year_suciedad(palabra):\n\n try:\n if len(palabra) < 4:\n return None\n else:\n return palabra\n except:\n pass",
"def is_code_has_unknown_digit(processed_code):\n return True if list(processed_code).count(\"?\") == 0 else False",
"def _check_for_incomplete_input(self):\n pass",
"def test_validate_pincode(self):\n schema = vol.Schema(valid_pin)\n\n for value in ('', '123-456-78', 'a23-45-678', '12345678', 1234):\n with self.assertRaises(vol.MultipleInvalid):\n schema(value)\n\n for value in ('123-45-678', '234-56-789'):\n self.assertTrue(schema(value))",
"def is_valid_postal_code(postal_code):\n is_code_valid = False\n postcode_regex = re.compile(r'^\\d{2}-\\d{3}$')\n\n if postcode_regex.search(postal_code) is not None:\n is_code_valid = True\n\n return is_code_valid"
]
| [
"0.65764123",
"0.60340387",
"0.5626135",
"0.5612466",
"0.55946106",
"0.5583271",
"0.53661996",
"0.529294",
"0.5222145",
"0.5222145",
"0.5222145",
"0.5163425",
"0.5148325",
"0.50922865",
"0.50922865",
"0.50837094",
"0.5075694",
"0.50661093",
"0.50603104",
"0.5041943",
"0.503111",
"0.5026323",
"0.50147986",
"0.5006004",
"0.5000394",
"0.49993277",
"0.49885798",
"0.49859875",
"0.49592394",
"0.49493495"
]
| 0.7474278 | 0 |
pitch_postcode must be fewer than 8 character | def test_pitch_postcode_length(self):
with self.assertRaises(Exception) as context:
self.client.post(
url_for('teams'),
data={
'name': 'team',
'capacity': '11',
'number_players': '1',
'pitch_postcode': 'somewhereE1 6LT',
'time': '2019-01-01 13:00'
}
)
self.assertTrue('Postcode must be fewer than 8 characters' in context.exception)
self.assertEqual(db.session.query(Team).count(), 0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_pitch_postcode_cannot_be_empty(self):\n with self.assertRaises(Exception) as context:\n self.client.post(\n url_for('teams'),\n data={\n 'name': 'team',\n 'capacity': '11',\n 'number_players': '1',\n 'pitch_postcode': '',\n 'time': '2019-01-01 13:00'\n }\n )\n self.assertTrue('Postcode must be present' in context.exception)\n self.assertEqual(db.session.query(Team).count(), 0)\n\n def test_pitch_postcode_length(self):\n \"\"\"pitch_postcode must be fewer than 8 character\"\"\"\n with self.assertRaises(Exception) as context:\n self.client.post(\n url_for('teams'),\n data={\n 'name': 'team',\n 'capacity': '11',\n 'number_players': '1',\n 'pitch_postcode': 'somewhereE1 6LT',\n 'time': '2019-01-01 13:00'\n }\n )\n self.assertTrue('Postcode must be fewer than 8 characters' in context.exception)\n self.assertEqual(db.session.query(Team).count(), 0)",
"def validate_pitch_name(name):\n msg = 'invalid pitch name \"{}\"'.format(name)\n if name[0] not in ['A', 'B', 'C', 'D', 'E', 'F', 'G']:\n raise ValueError(msg)\n if (name.count('#') > 2) or (name.count('b')) > 2:\n raise ValueError('accepts a maximum of two accidentals')\n try:\n int(name.replace('#', '').replace('b', '')[1:])\n except ValueError:\n raise ValueError(msg)",
"def test_40_phonenumbers_too_long(self):\n number_phone = self.samples[4]\n with self.assertRaises(osv.except_osv):\n self.pn._symbol_set_char(number_phone)",
"def test_issn_incorrect_length(self):\n issns = ['0307-15', '0307-15789', '03071758', '0307175']\n for num in issns:\n with self.subTest(num=num):\n with self.assertRaisesRegex(ValueError, num):\n star_barcode.construct_postscript(\n issn=num,\n bwipp_location=self.bwipp,\n sequence=21,\n week=46,\n header_line=''\n )",
"def validate_postcode_format(self):\n\n assert type(self.postcodes) == str, \"To use this method, the postcode cannot be an iterable.\"\n pcd = self.postcodes.replace(' ', '')\n # The following regular expression matches are in order to adhere to the rules for UK postcodes given in the\n # documentation.\n first_char_alpha = re.match(r'^[a-zA-Z]', pcd)\n last_char_match = re.match(r'[a-zA-Z]', pcd[-1])\n alpha_match = re.search(r'[a-zA-Z]', pcd)\n numeric_match = re.search(r'[0-9]', pcd)\n special_chars_match = re.search(r'[!#,£$%^&*¬-]', pcd)\n if len(pcd) == 0:\n response = 'Null'\n elif (5 <= len(pcd) <= 7) and first_char_alpha and alpha_match and numeric_match \\\n and last_char_match and not special_chars_match:\n response = 'Valid Postcode Format'\n else:\n response = 'Invalid Postcode Format'\n return response",
"def update_postcode(postcode, invalid = True):\r\n m = postcode_format_re.search(postcode)\r\n if m:\r\n invalid = False\r\n postcode= postcode[:5]\r\n return (invalid, postcode)",
"def is_valid_postal_code(postal_code):\n assert postal_code is not None\n postal_code = postal_code.replace(\" \", \"\")\n postal_code_re = re.compile(r\"\\s*(\\w\\d\\s*){3}\\s*\")\n return postal_code_re.match(postal_code) is not None",
"def is_valid(key):\n return key[0:2] == \"MR\" and key[2:].isdigit() and len(key) in [9, 10]",
"def check_len( string_key ) : \r\n\r\n if len( string_key ) != 4 :\r\n\r\n raise Eggog( \"'%s': EGI wants the key to be exactly four characters!\" % (string_key, ) ) \r\n \r\n else :\r\n \r\n return True",
"def valid_zipcode(line):\n zipcode = line.o_zip_code\n invalid_zip = len(zipcode) not in [5, 9] and zipcode.isdigit()\n if invalid_zip:\n rule = 'Zipcode length'\n new_row = Error(e_name=rule, order_key=line.primary_key)\n line.errors.append(new_row)\n return False\n return True",
"def test_minlength(self):\n val = DwcaValidator(yaml.load(self.yaml_length, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'postal_code': '9050'}\n self.assertTrue(val.validate(document))\n\n document = {'postal_code': 'B-9050'}\n self.assertTrue(val.validate(document))\n\n document = {'postal_code': '905'}\n self.assertFalse(val.validate(document))",
"def check_chars_data_fields(header,\r\n mapping_data,\r\n warnings):\r\n\r\n allowed_data_field_chars = \"+-%./ :,;_\" + digits + letters\r\n allowed_sampleid_chars = \".\" + digits + letters\r\n correction = 1\r\n\r\n sample_id_field = \"SampleID\"\r\n fields_to_skip = [\"BarcodeSequence\", \"LinkerPrimerSequence\",\r\n \"ReversePrimer\"]\r\n\r\n for curr_field in range(len(header)):\r\n if header[curr_field] in fields_to_skip:\r\n continue\r\n if header[curr_field] == sample_id_field:\r\n valid_chars = allowed_sampleid_chars\r\n else:\r\n valid_chars = allowed_data_field_chars\r\n for curr_data in range(len(mapping_data)):\r\n # Need to skip newline characters\r\n curr_cell = mapping_data[curr_data][curr_field].replace('\\n', '')\r\n for curr_char in curr_cell:\r\n if curr_char not in valid_chars:\r\n warnings.append(\"Invalid characters found in %s\\t%d,%d\" %\r\n (mapping_data[\r\n curr_data][curr_field].replace(\r\n '\\n', ''),\r\n curr_data + correction, curr_field))\r\n break\r\n\r\n return warnings",
"def test_is_valid_fujita_rating_f_too_many_letters(self):\n\n self.assertFalse(\n tornado_io._is_valid_fujita_rating(F_SCALE_RATING_TOO_MANY_LETTERS)\n )",
"def test_length(self):\n form_data = self.form_data('c897B$eH@')\n form = self.form(data=form_data, user=self.u)\n self.assertFalse(form.is_valid())",
"def validate_pnumac(pnumac):\n if not re.match(pnumac_pattern, pnumac):\n raise ValidationError(u'%s is not a valid area code'%pnumac)",
"def checkZipCode(data):\n if len(data) < 5:\n while len(data) < 5:\n data = '0' + data\n elif len(data) > 5:\n data = data[0:4]\n # print(data)\n return (data)",
"def len12(self, len): # -> None:\n ...",
"def is_19_pandigital(n):\n return len(str(n)) == 9 and set(list(str(n))) == pan",
"def verify_hack_key(self):\r\n\t\tself.percent_english = Dict_Control(self.my_code).check_key()\r\n\t\t#If more than half the words are english, the key will pass. \r\n\t\tif self.percent_english > 50:\r\n\t\t\tself.hack_plausible = True",
"def validate(input):\n regex = re.compile(r'(UL)?\\d{1,' + re.escape(str(barcode_digit_length)) + '}$', flags=re.IGNORECASE)\n if regex.match(input):\n is_valid = True\n else:\n is_valid = False\n return is_valid",
"def test_missing_bwipp(self):\n seq = 21\n week = 46\n header = 'MSTAR 2016-11-14 MON 1.0'\n with self.assertRaisesRegex(ValueError, 'BWIPP'):\n star_barcode.construct_postscript(\n bwipp_location=Path('/fake-path/not-here.ps'),\n issn=self.issn,\n sequence=seq,\n week=week,\n header_line=header\n )",
"def validateFormat(barcode):\r\n validatesymbol = 0\r\n delimitedsymbol = 0\r\n if barcode[0] == '' or barcode[-1] == '':\r\n validatesymbol += 1\r\n for i in range(len(barcode)):\r\n try:\r\n int(barcode[i])\r\n except ValueError:\r\n if barcode[i] == '-':\r\n delimitedsymbol += 1\r\n else:\r\n validatesymbol += 1\r\n if delimitedsymbol == 0 and validatesymbol == 0:\r\n if len(barcode) == 12 or len(barcode) == 13:\r\n pass\r\n else:\r\n validatesymbol += 1\r\n if validatesymbol == 0:\r\n return True\r\n else:\r\n return False",
"def validate(code):\n if not code.isdigit():\n raise IllegalCharacterError('[0-9]{%d}' % UPCA.digits)\n\n if len(code) != UPCA.digits:\n raise ValueError('Bar code %s requires %d digits' % (code, UPCA.digits))\n\n checksum = UPCA.calculate_checksum(code)\n if checksum != int(code[-1]):\n raise ValueError('Checksum character mismatch %s != %s' % (checksum, code[-1]))",
"def _prepare(self):\n number_of_numbers = 10\n code_length = safe_int_input(\"How long is the code to be guessed? (4-10): \", 4, 10)\n numbers = '1234567890'[:number_of_numbers]\n code = ''.join(random.choices(numbers, k=code_length))",
"def clean_year_suciedad(palabra):\n\n try:\n if len(palabra) < 4:\n return None\n else:\n return palabra\n except:\n pass",
"def valid_barcode(s):\n # implement this function!\n odd_digits = 0\n even_digits = 0\n result = 0\n for i in range(len(s) - 1):\n if i % 2 == 0:\n odd_digits += int(s[i])\n else:\n even_digits += int(s[i])\n result = (3 * odd_digits + even_digits) % 10\n if result != 0:\n result = 10 - result\n\n try:\n if int(s[-1]) == result and len(s) == 12:\n return True\n else:\n return False\n except IndexError:\n return False",
"def test_is_valid_fujita_rating_ef_too_many_letters(self):\n\n self.assertFalse(tornado_io._is_valid_fujita_rating(\n EF_SCALE_RATING_TOO_MANY_LETTERS\n ))",
"def convert_admin1_pcode_length(\n self, countryiso3: str, pcode: str, logname: Optional[str] = None\n ) -> Optional[str]:\n pcode_length = len(pcode)\n country_pcodelength = self.pcode_lengths.get(countryiso3)\n if not country_pcodelength:\n return None\n if (\n pcode_length == country_pcodelength\n or pcode_length < 4\n or pcode_length > 6\n ):\n return None\n if country_pcodelength == 4:\n pcode = f\"{Country.get_iso2_from_iso3(pcode[:3])}{pcode[-2:]}\"\n elif country_pcodelength == 5:\n if pcode_length == 4:\n pcode = f\"{pcode[:2]}0{pcode[-2:]}\"\n else:\n pcode = f\"{Country.get_iso2_from_iso3(pcode[:3])}{pcode[-3:]}\"\n elif country_pcodelength == 6:\n if pcode_length == 4:\n pcode = f\"{Country.get_iso3_from_iso2(pcode[:2])}0{pcode[-2:]}\"\n else:\n pcode = f\"{Country.get_iso3_from_iso2(pcode[:2])}{pcode[-3:]}\"\n else:\n pcode = None\n if pcode in self.pcodes:\n if logname:\n self.matches.add(\n (\n logname,\n countryiso3,\n pcode,\n self.pcode_to_name[pcode],\n \"pcode length conversion\",\n )\n )\n return pcode\n return None",
"def test_maxlength(self):\n val = DwcaValidator(yaml.load(self.yaml_length, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'license_plate': 'AF8934'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': '123456'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': 'AF893'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': 'AF8-934'}\n self.assertFalse(val.validate(document))\n\n document = {'license_plate': 'AF 934'}\n self.assertFalse(val.validate(document))",
"def clean_code(code, lengte):\n return code.zfill(lengte)"
]
| [
"0.68165445",
"0.5974245",
"0.5805453",
"0.5583695",
"0.5577443",
"0.5540056",
"0.5485245",
"0.5436021",
"0.54209876",
"0.53664035",
"0.53635174",
"0.5358223",
"0.5351246",
"0.5350252",
"0.5310552",
"0.5294924",
"0.52869827",
"0.5275066",
"0.5266998",
"0.5253421",
"0.52335024",
"0.5232907",
"0.5227254",
"0.5214371",
"0.5212995",
"0.52094805",
"0.5208871",
"0.5207499",
"0.519956",
"0.51715463"
]
| 0.74989283 | 0 |
To set `payroll_id` and set `state = 'done'` | def action_done(self, payroll):
self.payroll_id = payroll
self.date_done = payroll.date_payroll
self.state = 'done' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def action_set_done(self):\n self.ensure_one()\n self.write({\"state\": \"done\"})\n self.credit_control_line_ids.write({\"state\": \"done\"})\n return True",
"def action_payslip_done(self):\n for recd in self.late_check_in_ids:\n recd.state = 'deducted'\n return super(PayslipLateCheckIn, self).action_payslip_done()",
"def settle_self(self):\n self.state = 'completed'\n self.save()\n self.safe_post()",
"def set_state(self, state):\r\n update_story_url =\"https://www.pivotaltracker.com/services/v3/projects/{}/stories/{}?story[current_state]={}\".format(self.project_id, self.story_id, state)\r\n response = _perform_pivotal_put(update_story_url)\r\n return response",
"def payee_state(self, payee_state):\n\n self._payee_state = payee_state",
"def mark_completed(self, order_id=None):\n self.status = \"paid\"\n if order_id and not self.order_id:\n self.order_id = order_id\n print(\"Order completed\")\n self.save()",
"def set_dues(net_id, paid):\n if paid not in(0, 1):\n raise AttributeError(\"Paid must be either 0 for false or 1 for true\")\n connection = get_connection()\n cursor = connection.cursor()\n sql_string = \"UPDATE Member SET dues_paid=\"+str(paid)+\" WHERE netID='\"+net_id+\"'\"\n cursor.execute(sql_string)\n connection.commit()",
"def completed(payment_id):\n epay = PaymentProcessor.epay\n EpayPayment = apps.get_model('epay', 'EpayPayment')\n with transaction.atomic():\n epay_payment = EpayPayment.objects.select_related('payment').get(payment_id=payment_id)\n payment = epay_payment.payment\n epay.capture(\n payment_id, payment.amount, epay_payment.approval_code,\n epay_payment.reference, currency=payment.currency)\n\n # epay_payment.change_status(\"caputred\")\n epay_payment.update_from_kkb()\n return epay_payment",
"def mark_as_completed(conn, pk):\n cursor = conn.cursor()\n statement = \"update aws_files set action_completed_at=? where id=?\"\n now = datetime.utcnow()\n values = (now, pk,)\n print(\" {0}\").format(now.isoformat())\n cursor.execute(statement, values)\n conn.commit()\n cursor.close()",
"def action_done(self):\n if not self.date_done:\n self.date_done = fields.Datetime.now()\n if self.state_rapel == '1':\n self.generate_rapel()\n self.state = 'done'",
"def action_payslip_done(self):\n for recd in self.overtime_ids:\n recd.payslip_paid = True\n return super(PayslipOverTime, self).action_payslip_done()",
"async def async_set_state(self, data: dict):\n field = f\"{self.deconz_id}/state\"\n await self.async_set(field, data)",
"def test_update_status_period(self):\n prev_status = self.test_period.status\n self.test_period.status = 'FINALIZED'\n self.test_period.save()\n employee_payments = EmployeePayment.objects.filter(employer=self.test_user_employer.profile.employer).count()\n url = reverse_lazy('api:me-get-single-payroll-period', kwargs={'period_id': self.test_period.id})\n self.client.force_login(self.test_user_employer)\n response = self.client.put(url, data={'status': 'FINALIZED'}, content_type='application/json')\n self.assertEqual(response.status_code, 200, response.content.decode())\n response_json = response.json()\n self.assertEqual(response_json.get('id'), self.test_period.id, response_json)\n self.assertEqual(response_json.get('status'), 'FINALIZED', response_json)\n self.assertEqual(EmployeePayment.objects.filter(employer=self.test_user_employer.profile.employer).count(),\n employee_payments)\n self.test_period.status = prev_status\n self.test_period.save()",
"def update_status(payment_id):\n EpayPayment = apps.get_model('epay', 'EpayPayment')\n with transaction.atomic():\n response = PaymentProcessor.get_status(payment_id)\n epay_payment = EpayPayment.import_or_update(response, payment_id=payment_id)\n return epay_payment",
"def change_status(id):\n query = \"\"\"UPDATE parcels SET status = %s WHERE id = %s\"\"\"\n tuple =('delivered' , id)\n db.insert(query, tuple)",
"def update(self, id):\n loan = self._model.query.get(id)\n loan.original_due_date = loan.due_date\n loan.due_date = loan.due_date + 1 * TimeUnits.MONTH_IN_SEC\n\n db.session.add(loan)\n\n try:\n db.session.commit()\n except Exception as exc:\n print(f'Something went wrong: {exc}')\n db.session.rollback()",
"def update_task_state(mapper, conn, target):\r\n sql_query = ('select count(id) from task_run \\\r\n where task_run.task_id=%s') % target.task_id\r\n n_answers = conn.scalar(sql_query)\r\n sql_query = ('select n_answers from task \\\r\n where task.id=%s') % target.task_id\r\n task_n_answers = conn.scalar(sql_query)\r\n if (n_answers) >= task_n_answers:\r\n sql_query = (\"UPDATE task SET state=\\'completed\\' \\\r\n where id=%s\") % target.task_id\r\n conn.execute(sql_query)",
"def step_impl_the_ru_is_set_to(context, business_id):\n context.bdd_helper.message_data[\"business_id\"] = business_id",
"def set_order_done():\n data = select_data_source()\n user = data['user']\n order_id = data['id']\n \n if check_user_permission(user) : return permission_denied_return\n \n db = database.getdb()\n \n ### Check if is valid.\n \n cmd = 'select passed from orders where id==\"{0}\"'.format(order_id)\n order_valid = db.execute(cmd).fetchall()[0][0]\n if order_valid == 0 :\n return finish_invalid_return\n \n ### Check if is done.\n cmd = 'select done from orders where id==\"{0}\"'.format(order_id)\n order_done = db.execute(cmd).fetchall()[0][0]\n if order_done != 0 :\n return finish_done_return\n \n ### All check done.\n ### Set it to done.\n cmd = 'update orders set done=1 where id==\"{0}\"'.format(order_id)\n db.execute(cmd)\n db.commit()\n print('user sets order {0} to be done.'.format(user))\n \n return finish_complete_return",
"def set_final(self, setup_id):\n setup = Setup.objects.get(id=setup_id)\n if setup.status == \"draft\":\n try:\n self.save_setup_info(setup)\n except:\n return \"failed: failed building gridpoints. (Check your subspaces values)\"\n success_msg = SetupService.check_setup(setup)\n if success_msg == \"success\":\n setup.status = 'final'\n setup.save()\n return success_msg",
"def set_state(self, state: int):",
"def update(approval_id=None,state=None):\n\n if approval_id is None or state is None: return\n \n client = Client('http://labss2.fiit.stuba.sk/pis/ws/Students/Team071approval?WSDL')\n approval = get(int(approval_id))\n approval.state = int(state)\n approval.name = \"\"\n\n client.service.update('071', 'Vreqif', approval.id, approval)\n\n # check if applicaiton is now approved or canceled, then notify employee\n a.check_state_and_notify(approval.application_id)\n\n return approval",
"def update_payment_status(self):\n payments = Payment.query.filter_by(invoice_id=self.id).all()\n total = 0.0\n for payment in payments:\n total += float(payment.amount)\n\n if total >= self.total:\n self.payment_status = u'paid'\n else:\n self.payment_status = u'unpaid'\n\n db.session.add(self)\n db.session.commit()\n\n return False",
"def test_finalize_and_open_period(self):\n employee_payments_qty = EmployeePayment.objects.filter(employer=self.test_employer).count()\n url = reverse_lazy('api:me-get-single-payroll-period', kwargs={'period_id': self.test_period2.id})\n self.client.force_login(self.test_user_employer)\n # change from OPEN to FINALIZE\n response = self.client.put(url, data={'status': 'FINALIZED'}, content_type='application/json')\n self.assertEqual(response.status_code, 200, response.content.decode())\n response_json = response.json()\n self.assertEqual(response_json.get('status'), 'FINALIZED', response_json)\n self.assertEqual(EmployeePayment.objects.filter(employer=self.test_employer).count(), employee_payments_qty + 1)\n # change from FINALIZE to OPEN\n response = self.client.put(url, data={'status': 'OPEN'}, content_type='application/json')\n self.assertEqual(response.status_code, 200, response.content.decode())\n response_json = response.json()\n self.assertEqual(response_json.get('status'), 'OPEN', response_json)\n self.assertEqual(EmployeePayment.objects.filter(employer=self.test_employer).count(), employee_payments_qty)",
"async def _pin_state_response(self, data):\n self.query_reply_data[PrivateConstants.PIN_STATE_RESPONSE] = data[1:-1]",
"async def _pin_state_response(self, data):\n self.query_reply_data[PrivateConstants.PIN_STATE_RESPONSE] = data[1:-1]",
"def _update_payment_status(payment: Payment, yk_payment: PaymentResponse):\n _assert_payment_is_yandex_kassa(payment)\n\n mapped_status = _YK_STATUS_MAPPING.get(yk_payment.status)\n assert mapped_status, \\\n f'Cannot map status {yk_payment.status} for {yk_payment}'\n\n # float because of https://github.com/yandex-money/yandex-checkout-sdk-python/pull/64\n yk_amount = float(yk_payment.amount.value)\n yk_currency = yk_payment.amount.currency\n payment_amount = float(payment.amount.amount)\n payment_currency = payment.amount.currency.code\n\n # Let's find out if there are some updates\n need_save = False\n if yk_amount != payment_amount:\n logger.error(f'Incorrect amount {payment_amount} in Payment. Correcting it to {yk_amount}')\n payment.amount = Money(yk_amount, yk_currency)\n need_save = True\n if yk_currency != payment_currency:\n logger.error(f'Incorrect currency {payment_currency} in Payment. Correcting it to {yk_currency}')\n payment.amount = Money(yk_amount, yk_currency)\n need_save = True\n\n if payment.state != mapped_status:\n payment.state = mapped_status\n need_save = True\n\n if need_save:\n logger.info(f'Updating payment {payment}')\n payment.save()\n else:\n logger.debug(f'No need to update {payment}')",
"def _change_job_state(self, state, result=None):\n session = ConnectorSession(self.env.cr,\n self.env.uid,\n context=self.env.context)\n storage = OpenERPJobStorage(session)\n for job in self:\n job = storage.load(job.uuid)\n if state == DONE:\n job.set_done(result=result)\n elif state == PENDING:\n job.set_pending(result=result)\n else:\n raise ValueError('State not supported: %s' % state)\n storage.store(job)",
"def status_update(request, id=None):\n #obj = Todo.objects.all()\n user = request.user if request.user.is_authenticated else None\n Todo.objects.filter(id=id).update(mark_done=True, answered_by= user)\n return redirect('lists:alllist')",
"def update_paypal(sender, **kwargs):\n ipn_obj = sender\n try:\n payment = json.loads(ipn_obj.custom)\n\n # try to get payment. if not exist, exception will be catched\n p = Payment.objects.filter(id=payment.get('id'), token=payment.get('token')).get()\n\n # update payment\n p.method = constants.PAYPAL\n p.ipn = ipn_obj\n p.save()\n\n # if payment is completed, so valid\n if ipn_obj.payment_status == ST_PP_COMPLETED:\n # check correct price , currency and mail\n if int(ipn_obj.mc_gross) == int(p.price.price) and \\\n ipn_obj.mc_currency == 'EUR' and \\\n ipn_obj.business == settings.PAYPAL_RECEIVER_EMAIL:\n # all is OK, update state\n p.state = True\n p.save()\n sendmail_payment_success(p)\n else:\n # TODO: send alert / mail\n return\n except Payment.DoesNotExist:\n # TODO: send alert / mail\n pass\n except:\n # TODO: send alert / mail\n pass"
]
| [
"0.6185564",
"0.60995096",
"0.5869406",
"0.575969",
"0.57338566",
"0.57096726",
"0.57083595",
"0.55526006",
"0.55448234",
"0.55183035",
"0.5485711",
"0.54563105",
"0.544745",
"0.5410483",
"0.53973025",
"0.538477",
"0.5375823",
"0.53601676",
"0.53398156",
"0.525954",
"0.5216349",
"0.5201486",
"0.5189318",
"0.5182981",
"0.5124663",
"0.5124663",
"0.51246566",
"0.51194197",
"0.51105857",
"0.5109379"
]
| 0.75810933 | 0 |
To get recordset of rapel based on employee and pay period. | def get_employee_rapel_pay(self, employee_id, year_pay, month_pay):
return self.search([
('year_pay', '=', year_pay),
('month_pay', '=', month_pay),
('employee_id', '=', employee_id),
]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _compute_results(self):\n self.ensure_one()\n Result = self.env['sla.employee.view']\n dom = []\n if self.supplier_category_name:\n if self.supplier_category_name == 'employee':\n dom += [('pay_to', '=', 'employee')]\n elif self.supplier_category_name == 'supplier':\n dom += [('pay_to', '!=', 'employee'),('invoice_id.partner_id.category_id.name', '!=', 'ต่างประเทศ')]\n elif self.supplier_category_name == 'foreign':\n dom += [('pay_to', '!=', 'employee'),('invoice_id.partner_id.category_id.name', '=', 'ต่างประเทศ')]\n if self.user_ids:\n dom += [('voucher_id.validate_user_id', 'in', self.user_ids.ids)]\n if self.source_document_type:\n dom += [('invoice_id.source_document_type', '=',\n self.source_document_type)]\n if self.fiscalyear_start_id:\n dom += [('voucher_id.date', '>=',\n self.fiscalyear_start_id.date_start)]\n if self.fiscalyear_end_id:\n dom += [('voucher_id.date', '<=',\n self.fiscalyear_end_id.date_stop)]\n if self.period_start_id:\n dom += [('voucher_id.date', '>=',\n self.period_start_id.date_start)]\n if self.period_end_id:\n dom += [('voucher_id.date', '<=',\n self.period_end_id.date_stop)]\n if self.date_start:\n dom += [('voucher_id.date', '>=', self.date_start)]\n if self.date_end:\n dom += [('voucher_id.date', '<=', self.date_end)]\n self.results = Result.search(\n dom, order=\"fiscalyear,voucher_number,invoice_number\")",
"def get_sal_slip_list(self, as_dict=False):\n\t\tcondition= ''\n\t\temp_list=[]\n\t\tif self.is_for_all==0:\n\t\t\tif not self.selected_employees:\n\t\t\t\tfrappe.throw(_(\"No employees for the mentioned criteria\"))\n\t\t\t#emp_list = [cstr(d.employee) for d in self.selected_employees]\n\t\t\temp_list = frappe.db.sql_list(\"\"\"\n\t\t\t\tselect\n\t\t\t\t\temployee from `tabAttendance Salary Tool Employee`\n\t\t\t\twhere\n\t\t\t\t\tparent = '%(parent)s' \n\t\t\t\"\"\"%{\"parent\": self.name})\n\t\t\tcondition+= \"\"\" and t1.employee IN %(employees)s \"\"\"\n\t\tif self.is_open_period==0:\n\t\t\tif not self.start_date or not self.end_date:\n\t\t\t\tfrappe.throw(_(\"Satart Date and End Date are Mandatories\"))\n\t\t\tcondition= \"\"\" and attendance_date >= %(start_date)s and attendance_date <= %(end_date)s\"\"\"\n\t\temp_list = frappe.db.sql(\"\"\"\n\t\t\tselect\n\t\t\t\tt1.name\n\t\t\tfrom\n\t\t\t\t`tabAttendance` t1\n\t\t\twhere\n\t\t\t\tt1.attendance_salary_tool is null\n\t\t\t\tand t1.docstatus = 1 and t1.status='Present'\n\t\t\t\t{condition} group by t1.employee order by t1.employee asc\n\t\t\"\"\".format(condition=condition),{\"employees\": tuple(emp_list),\"start_date\": self.start_date,\"end_date\": self.end_date}, as_dict=as_dict)\n\n\t\treturn emp_list",
"def get_emp_list(self):\n\t\tcondition= ''\n\t\temp_list=[]\n\t\tif self.is_for_all==0:\n\t\t\tif not self.selected_employees:\n\t\t\t\tfrappe.throw(_(\"No employees for the mentioned criteria\"))\n\t\t\t#emp_list = [cstr(d.employee) for d in self.selected_employees]\n\t\t\temp_list = frappe.db.sql_list(\"\"\"\n\t\t\t\tselect\n\t\t\t\t\temployee from `tabAttendance Salary Tool Employee`\n\t\t\t\twhere\n\t\t\t\t\tparent = '%(parent)s' \n\t\t\t\"\"\"%{\"parent\": self.name})\n\t\t\tcondition+= \"\"\" and t1.employee IN %(employees)s \"\"\"\n\t\tif self.is_open_period==0:\n\t\t\tif not self.start_date or not self.end_date:\n\t\t\t\tfrappe.throw(_(\"Satart Date and End Date are Mandatories\"))\n\t\t\tcondition= \"\"\" and attendance_date >= %(start_date)s and attendance_date <= %(end_date)s\"\"\"\n\t\temp_list = frappe.db.sql(\"\"\"\n\t\t\tselect\n\t\t\t\tt1.employee as employee, count(*) as attendance_days\n\t\t\tfrom\n\t\t\t\t`tabAttendance` t1\n\t\t\twhere\n\t\t\t\tt1.attendance_salary_tool is null\n\t\t\t\tand t1.docstatus = 1 and t1.status='Present'\n\t\t\t\t{condition} group by t1.employee order by t1.employee asc\n\t\t\"\"\".format(condition=condition),{\"employees\": tuple(emp_list),\"start_date\": self.start_date,\"end_date\": self.end_date}, as_dict=True)\n\t\treturn emp_list",
"def get_approved_rapel(self, year_pay, month_pay, status_id, company_payroll_id):\n return self.search([\n ('year_pay', '=', year_pay),\n ('month_pay', '=', month_pay),\n ('status_id', 'child_of', status_id),\n ('state', '=', 'approved'),\n ('company_payroll_id', '=', company_payroll_id),\n ])",
"def available_employees(self,work_trips_by_date):\r\n\r\n employee_list = self.get_updated_list_from_DB('employee')\r\n available_employees_list = []\r\n total_sets = set()\r\n set_list = []\r\n\r\n for i in range(len(work_trips_by_date)):\r\n set_list.append(set(work_trips_by_date[i])) \r\n \r\n total_sets = set_list[0]\r\n \r\n if len(work_trips_by_date) != 1: \r\n for i in range(1,len(set_list)):\r\n total_sets.update(set_list[i])\r\n\r\n for line in employee_list:\r\n if line[0] not in total_sets:\r\n available_employees_list.append(line)\r\n\r\n row_names = ['id', 'name' ,'role' ,'rank'] #return columns\r\n employee_index_list = self.find_index_from_header('employee', row_names)\r\n filtered_available_employees = self.filter_by_header_index(employee_index_list, available_employees_list)\r\n\r\n available_employees_list.pop(0)\r\n\r\n return filtered_available_employees",
"def get_employees(self, active_only):\n cursor = self.dbconnect.get_cursor()\n\n if active_only:\n cursor.execute(\n 'SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external, '\n 'is_admin, is_active FROM employee WHERE is_active = TRUE')\n else:\n cursor.execute(\n 'SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external, '\n 'is_admin, is_active FROM employee')\n\n employees = list()\n for row in cursor:\n obj = Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])\n employees.append(obj)\n return employees",
"def get_records(self, zone_id, ttl=None, data=None, host=None,\r\n record_type=None):\r\n _filter = NestedDict()\r\n\r\n if ttl:\r\n _filter['resourceRecords']['ttl'] = query_filter(ttl)\r\n\r\n if host:\r\n _filter['resourceRecords']['host'] = query_filter(host)\r\n\r\n if data:\r\n _filter['resourceRecords']['data'] = query_filter(data)\r\n\r\n if record_type:\r\n _filter['resourceRecords']['type'] = query_filter(\r\n record_type.lower())\r\n\r\n results = self.service.getResourceRecords(\r\n id=zone_id,\r\n mask='id,expire,domainId,host,minimum,refresh,retry,'\r\n 'mxPriority,ttl,type,data,responsiblePerson',\r\n filter=_filter.to_dict(),\r\n )\r\n\r\n return results",
"def query_records(self, context, rrs):\n records = self.dns_manager.query_records(context, rrs)\n return records",
"def get_employees(self):\n from Employee import Employee\n cursor = self.dbconnect.get_cursor()\n cursor.execute('select * from employee')\n\n employees = list()\n for row in cursor:\n employee = Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])\n employees.append(employee)\n return employees",
"def get_all_records(self, data: dict, execution_context: dict):",
"def _query_get(self, cr, uid, obj='l', context=None):\n \n fiscalyear_obj = self.pool.get('account.fiscalyear')\n fiscalperiod_obj = self.pool.get('account.period')\n account_obj = self.pool.get('account.account')\n journal_obj = self.pool.get('account.journal')\n initial_bal = context.get('initial_bal', False)\n fiscalyear_ids = []\n if context is None:\n context = {}\n #Only Valid Move Lines (BALANCE MOVES)\n query = obj+\".state <> 'draft' \"\n #Filter by Company\n if context.get('company_id', False):\n query += \" AND \" +obj+\".company_id = %s\" % context['company_id']\n #Filter by Move State\n if context.get('state', False):\n if type(context['state']) in (list,tuple) :\n query += \" AND \"+obj+\".move_id IN (SELECT id FROM account_move WHERE state !='reversed') \" \n # query += \" AND \"+obj+\".move_id IN (SELECT id FROM account_move WHERE state IN (\"+st+\")) \"\n elif context['state'].lower() != 'all':\n query += \" AND \"+obj+\".move_id IN (SELECT id FROM account_move WHERE account_move.state != '\"+context['state']+\"') \"\n #Get Selected FiscalYear\n if not context.get('fiscalyear', False):\n if context.get('all_fiscalyear', False):\n fiscalyear_ids = fiscalyear_obj.search(cr, uid, [])\n else:\n if context.get('date_from', False):\n #fiscalyear_ids = fiscalyear_obj.search(cr, uid, [])\n date_from=context.get('date_from', False)\n date_from2 = datetime.strptime( date_from, '%Y-%m-%d')\n f_code=date_from2.year \n fiscalyear_ids = fiscalyear_obj.search(cr,uid, [ ('code', '=', f_code)])\n else:\n fiscalyear_ids = fiscalyear_obj.search(cr, uid, [])\n \n else:\n #make the context['fiscalyear'] in one dimention list or ids\n fiscalyear_ids = type(context['fiscalyear']) is list and context['fiscalyear'] or [context['fiscalyear']]\n fiscalyear_clause = (','.join(map(str, fiscalyear_ids)))\n #Duration Filters\n\n if context.get('date_from', False) and context.get('date_to', False):\n \n if initial_bal:\n \n init_period = fiscalperiod_obj.search(cr, uid, [('special', '=', True), ('fiscalyear_id', 'in', fiscalyear_ids)])\n date_start = fiscalperiod_obj.browse(cr, uid, init_period[0], context=context).date_start\n \n query += \" AND \"+obj+\".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s) ) \" % (fiscalyear_clause,)\n\n date_from=context['date_from']\n if context.get('date_from', False)==date_start:\n date_1 = datetime.strptime(date_from, DEFAULT_SERVER_DATE_FORMAT)\n date_from= date_1+timedelta(days=1)\n query += \" AND \" +obj+\".move_id IN (SELECT id FROM account_move WHERE date <='%s') \" %(context['date_from'],)\n query += \" AND \" +obj+\".move_id IN (SELECT id FROM account_move WHERE date <'%s') \" %(date_from,)\n\n else:\n if context['type']=='statement':\n \n query += \" AND \" +obj+\".move_id IN (SELECT id FROM account_move WHERE date >= '%s' AND date <= '%s') \"%(context['date_from'],context['date_to']) \n elif context['type']=='balance':\n init_period = fiscalperiod_obj.search(cr, uid, [('special', '=', True), ('fiscalyear_id', 'in', fiscalyear_ids)])\n\n date_start = fiscalperiod_obj.browse(cr, uid, init_period[0], context=context).date_start\n date_from=context['date_from']\n if context.get('date_from', False)==date_start:\n date_1 = datetime.strptime(date_from, DEFAULT_SERVER_DATE_FORMAT)\n date_from= date_1+timedelta(days=1)\n query += \" AND \" +obj+\".move_id IN (SELECT id FROM account_move WHERE date > '%s' AND date <= '%s') \"%(date_from,context['date_to']) \n query += \" AND \" +obj+\".move_id IN (SELECT id FROM account_move WHERE date >= '%s' AND date <= '%s') \"%(context['date_from'],context['date_to']) \n if context.get('period_from', False) and context.get('period_to', False) and not context.get('periods', False) and context.get('type', False)!='statement':\n if initial_bal:\n period_company_id = fiscalperiod_obj.browse(cr, uid, context['period_from'], context=context).company_id.id\n first_period = fiscalperiod_obj.search(cr, uid, [('company_id', '=', period_company_id), ('fiscalyear_id', 'in', fiscalyear_ids)], order='date_start')\n context['periods'] = fiscalperiod_obj.build_ctx_periods(cr, uid, first_period[0], first_period[first_period.index(context['period_from'])-1])\n else:\n context['periods'] = fiscalperiod_obj.build_ctx_periods(cr, uid, context['period_from'], context['period_to'])\n\n if context.get('periods', False) and context.get('type', False)!='statement':\n period_ids = ','.join(map(str, context['periods']))\n query += \" AND \"+obj+\".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s) AND id IN (%s)) \" % (fiscalyear_clause, period_ids)\n else:\n sub_query = \"\"\n if not context.get('date_from', False) or context.get('period_from', False):\n special = initial_bal and (not context.get('date_from', False))\n sub_query = \"AND special = %s\"%(special,)\n query += \" AND \"+obj+\".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s) %s) \" % (fiscalyear_clause, sub_query)\n\n #Filter by Journal\n #situation_journal = set(journal_obj.search(cr, uid, [('type', '=', 'situation')], context=context))\n #selected_journals = set(context.get('journal_ids', False) or journal_obj.search(cr, uid, [], context=context))\n #TEST: situation journal when opening balance & not\n #journal_ids = context.get('selected_journals', False) and selected_journals or \\\n # (initial_bal and list(selected_journals | situation_journal) or list(selected_journals-situation_journal))\n # if journal_ids:\n # query += ' AND '+obj+'.journal_id IN (%s) ' % ','.join(map(str, journal_ids))\n #if not context.get('selected_journals', False) and not initial_bal and situation_journal:\n #query += ' AND '+obj+'.journal_id NOT IN (%s) ' % ','.join(map(str, situation_journal))\n #Filter by chart of Account\n if context.get('chart_account_id', False):\n child_ids = account_obj._get_children_and_consol(cr, uid, [context['chart_account_id']], context=context)\n query += ' AND '+obj+'.account_id IN (%s) ' % ','.join(map(str, child_ids))\n #Filter by Move Line Statement\n if 'statement_id' in context:\n if context.get('statement_id', False):\n query += ' AND '+obj+'.statement_id IN (%s) ' % ','.join(map(str, context['statement_id']))\n else:\n query += ' AND '+obj+'.statement_id IS NULL '\n #Filter by Move Line\n if context.get('move_line_ids', False):\n query += ' AND '+obj+'.id IN (%s) ' % ','.join(map(str, context['move_line_ids']))\n #Filter by Analytic Account Type\n if context.get('analytic_display', False):\n query += ' AND '+obj+\".analytic_account_id IN (SELECT id FROM account_analytic_account WHERE analytic_type=%s) \" % (context.get('analytic_display', False).id,)\n\n return query",
"def getEmployeeRecord(self, employees, records, name, reader):\n # pull the first and last name of the employee\n last, first = name.split(',')\n # clean up, if necessary\n last = last.strip()\n first = first.split()[0].strip()\n # get the next line with the social security number\n text = next(reader)[0]\n # check that it is the right line\n assert text.startswith('SSN: ')\n # extract the obfuscated number\n ssn = text[5:].strip().replace('x', '?')\n\n # go through the employee index looking for a name match\n for eid, fullname in employees.items():\n # if this is the match\n if fullname == (last, first):\n # bail\n break\n # if we get this far, there is no match\n else:\n # complain\n # print('could not match {} {}'.format(first, last))\n # make one up\n eid = ((last, first), ssn)\n\n # attempt to\n try:\n # look up the employee\n employee = records[eid]\n # if that fails\n except KeyError:\n # build the employee record\n employee = Employee(first=first, last=last, ssn=ssn)\n # and attach it\n records[eid] = employee\n\n # grab the next line\n line = next(reader)\n # start parsing paycheck info\n while line:\n # have we reached the summary section?\n if line[0].startswith('Employee Totals:'):\n # swallow this section\n for line in reader:\n # bail if the zeroth field isn't empty; it's the end of the section\n if line[0]: return line\n # ran out of input\n break\n # otherwise, this is a paycheck section; extract\n line = self.getEmployeePaycheck(employee=employee, header=line, reader=reader)\n\n # if we get this far, the input was exhausted and we are all done\n return",
"def get_employee():\n\n employee_id = get_employee_input_int('Enter employee ID to get the data ')\n employee = db.get_employee(employee_id)\n if not employee:\n print(\"No employee found with id \", employee_id)\n else:\n payscale = db.get_payScale(employee.grade)\n print('DATA:-> {} {} has grade = {} which gives {} per hours\\n'\n .format(employee.first_name, employee.last_name, employee.grade, payscale.salary))",
"def filter_by_employee(table, employee_id):\n operations = []\n employee_id_index = 1\n for record in table:\n id = record[employee_id_index]\n if id == employee_id:\n operations.append(record)\n return operations",
"def get_ride_report(startDate, endDate):\n\n results_list = []\n\n session = DB_SESSION()\n\n results = []\n\n results = session.query(Report).filter(Report.date_created>=startDate, Report.date_created<=endDate)\n\n for result in results:\n results_list.append(result.to_dict())\n print(result.to_dict())\n\n session.close()\n\n return results_list, 200",
"def generate_payslip_data(employee_data):\n payslip_data = []\n\n for employee in employee_data:\n gross_income = monthly_gross_income(employee['annual_salary'])\n income_tax = monthly_income_tax(\n employee['annual_salary'], tax_brackets)\n net_income = monthly_net_income(\n gross_income, income_tax)\n super_amount = monthly_super_amount(\n gross_income, employee['super_rate'])\n\n payslip_data.append({\n 'full_name': employee['first_name'] + ' ' + employee['last_name'],\n 'payment_period': employee['payment_period'],\n 'gross_income': gross_income,\n 'income_tax': income_tax,\n 'net_income': net_income,\n 'super_amount': super_amount\n })\n\n return payslip_data",
"def get_payees(self):\n # open a cursor object\n cur = self.get_cursor()\n\n # get payees from database\n cur.execute(\"SELECT * FROM payees\")\n payees_data = cur.fetchall()\n\n # convert into a list of payee dictionaries\n payees_list = []\n [payees_list.append({'payee_id': payee[0],\n 'payee_name': payee[1]})\n for payee in payees_data]\n\n # close the cursor\n self.close_cursor()\n\n return payees_list",
"def query_table(self, expression = ''):\n response = self.table.scan(FilterExpression = Attr(\"Employeeid\").gt(int(expression)))\n df = pd.DataFrame(response['Items'])\n print(df.head(20))\n return df",
"def get_all(self):\n total_expense_reports = []\n get_count = {\n 'query': {\n 'object': 'EEXPENSES',\n 'select': {\n 'field': 'RECORDNO'\n },\n 'pagesize': '1'\n }\n }\n\n response = self.format_and_send_request(get_count)\n count = int(response['data']['@totalcount'])\n pagesize = 2000\n offset = 0\n for i in range(0, count, pagesize):\n data = {\n 'query': {\n 'object': 'EEXPENSES',\n 'select': {\n 'field': [\n 'RECORDNO',\n 'RECORDID',\n 'WHENCREATED',\n 'WHENPOSTED',\n 'TOTALENTERED',\n 'STATE',\n 'TOTALDUE',\n 'DESCRIPTION',\n 'CURRENCY',\n 'BASECURR',\n 'MEMO'\n ]\n },\n 'pagesize': pagesize,\n 'offset': offset\n }\n }\n expense_reports = self.format_and_send_request(data)['data']['EEXPENSES']\n total_expense_reports = total_expense_reports + expense_reports\n offset = offset + pagesize\n return total_expense_reports",
"def get(self):\n resultado = EmployeeModel.query.all()\n return resultado",
"def generate_rapel(self):\n config = self.env['ka_hr_payroll.config'].default_config()\n last_period = self.get_last_period(self.status_id.id, self.company_payroll_id.id, config=config)\n if last_period:\n date_done = datetime.strptime(self.date_done, DATETIME_FORMAT)\n\n if date_done.day > config.date_end:\n date_pay = date_done + relativedelta(months=1)\n else:\n date_pay = date_done\n\n data_rapel = {\n 'new_period_id': self.id,\n 'old_period_id': last_period.id,\n 'date_start': get_utc_timezone(self.date_start + ' 00:00:00'),\n 'date_end': self.date_done,\n 'year_pay': str(date_pay.year),\n 'month_pay': date_pay.month,\n 'status_id': self.status_id.id,\n 'company_payroll_id': self.company_payroll_id.id,\n }\n\n rapel_period = self.env['ka_hr_payroll.rapel.tunjangan.khusus.period'].create(data_rapel)\n self.rapel_id = rapel_period\n\n for line in self.line_ids:\n line.generate_rapel(last_period.id, rapel_period.id)\n\n self.state_rapel = '2'\n self.env.user.notify_info(\"{0}, berhasil dibuat!\".format(rapel_period.name))\n else:\n raise ValidationError(\n \"Tunjangan khusus periode sebelumnya tidak ditemukan! Anda tidak bisa melanjutkan aksi ini.\")",
"def list_periods(self, workspace_unique_id=None, subset_unique_id=None, request=None):\n workspace_object = self._get_workspace_object(unique_id=workspace_unique_id) \n subset_object = workspace_object.get_subset_object(subset_unique_id) \n \n # Check request\n if request:\n for per in request:\n \n if per[\"selected\"]:\n print('per', per)\n from_year, to_year = map(int, per[\"value\"].split('-'))\n year_list = map(str, list(range(from_year, to_year+1)))\n print('subset_object.alias', subset_object.alias)\n subset_object.set_data_filter(step='step_1', filter_type='include_list', filter_name='MYEAR', data=year_list)\n break\n print('request'.upper(), request)\n return request\n\n return [{\"label\": \"2007-2012\",\n \t\t\t\t\"status\": \"selectable\",\n \t\t\t\t\"selected\": False,\n \t\t\t\t\"value\": \"2007-2012\"}, \n \n {\"label\": \"2013-2018\",\n \t\t\t\t\"status\": \"selectable\",\n \t\t\t\t\"selected\": True,\n \t\t\t\t\"value\": \"2013-2018\"}]",
"def get_patients(self, active_records_only=True, fetch_ehr_records=True,\n fetch_hidden_ehr=False):\n drf = self._get_drivers_factory(self.patients_repository)\n with drf.get_driver() as driver:\n if not active_records_only:\n patient_records = driver.get_all_records()\n else:\n patient_records = self._get_active_records(driver)\n return [self._fetch_patient_data_full(r, fetch_ehr_records,\n fetch_hidden_ehr) for r in patient_records]",
"def execQ7():\n frame = pan.DataFrame(data, columns=['Product', 'Price', 'Period'])\n filtered_frame = frame.sort_values(by='Price', ascending=True).drop_duplicates(subset='Product').head(10)\n return filtered_frame",
"def pnl(qbo_session, period = \"YEARLY\", start_date=\"first\", end_date=\"last\",\n **kwargs):\n\n pnl_account_types = [\n \n \"Income\", \"Other Income\",\n \"Expense\", \"Other Expense\", \"Cost of Goods Sold\"\n \n ]\n\n \n\n # go through the accounts, collecting a list of those that are \n # pnl accounts\n\n relevant_accounts = []\n\n coa = qbo_session.chart_of_accounts()\n\n AccountType_i = coa[0].index(\"AccountType\")\n fqa_i = coa[0].index(\"FullyQualifiedName\")\n\n for a in coa:\n\n AccountType = a[AccountType_i]\n\n if AccountType in pnl_account_types:\n\n relevant_accounts.append(a[fqa_i])\n \n # now collect the ledger_lines that are even relevant to the time\n # period and pnl accounts (and we'll handle presentation last)\n\n relevant_activity = {} #{account:[relevant lines]}\n\n all_ledger_lines = qbo_session.ledger_lines(None, None, None, True,\n **kwargs)\n\n headers = all_ledger_lines[0]\n\n account_i = headers.index(\"account\") \n amount_i = headers.index(\"amount\")\n date_i = headers.index(\"TxnDate\")\n \n earliest_date = datetime(2100,1,1)\n latest_date = datetime(1900,1,1)\n\n for line in all_ledger_lines[1:]:\n\n account = line[account_i]\n line_date = line[date_i]\n\n #first apply the date filter!\n if not start_date == \"first\" and line_date < start_date:\n continue\n \n if not end_date == \"last\" and line_date > end_date:\n continue\n \n #if it's made the cut, we can update the report date bounds\n earliest_date = min(line_date,earliest_date)\n latest_date = max(line_date,latest_date)\n\n #then apply the account filter!\n\n if not account in relevant_activity:\n #then let's confirm that its account type is a pnl one\n \n if not account in relevant_accounts:\n \n continue\n\n else:\n relevant_activity[account] = []\n\n relevant_activity[account].append(line)\n\n #now let's do presentation\n #TODO -- incorporate pandas tables...do only minimal work on it until then\n\n pnl_lines = []\n\n if period == \"YEARLY\":\n\n report_start_date = datetime(earliest_date.year,1,1)\n report_end_date = datetime(latest_date.year,12,31)\n\n period_start_dates = list(rrule(YEARLY, bymonth=1, bymonthday=1,\n dtstart=report_start_date,\n until=report_end_date))\n\n period_end_dates = list(rrule(YEARLY, bymonth=12, bymonthday=-1,\n dtstart=report_start_date,\n until=report_end_date))\n\n elif period == \"MONTHLY\":\n\n report_start_date = datetime(earliest_date.year,\n earliest_date.month,\n 1)\n report_end_date = datetime(latest_date.year,\n latest_date.month,\n calendar.monthrange(latest_date.year,\n latest_date.month)[1])\n\n period_start_dates = list(rrule(MONTHLY, bymonthday=1,\n dtstart=report_start_date,\n until=report_end_date))\n\n period_end_dates = list(rrule(YEARLY, bymonthday=-1,\n dtstart=report_start_date,\n until=report_end_date)) \n\n header_1 = [\"\", \"Period Start -->\"] + period_start_dates\n header_2 = [\"Account\", \"Period End -->\"] + period_end_dates\n\n pnl_lines.append(header_1)\n pnl_lines.append(header_2)\n\n \"\"\"Clearly, there's a way to do this with only one pass of the data...\n let's get that right in the first re-write...probably with pandas\"\"\"\n\n #now let's fill up the pnl_lines with what we know to be the relevant data\n #for now, we'll rely on the knowledge that the data is coming to us in\n #date order, but that should be fixed too...\n\n for account in relevant_activity:\n\n account_row = [account, \"\"] #one value per period \n\n current_period_index = 0 #primitive counter, yes!\n this_period_total = 0 #this will be this period's total\n\n for line in relevant_activity[account]:\n \n line_amount = line[amount_i]\n line_date = line[date_i] \n\n if line_date > period_end_dates[current_period_index]:\n\n account_row.append(this_period_total)\n this_period_total = line_amount\n current_period_index +=1\n\n else:\n \n this_period_total = round(this_period_total +\n line_amount, 2)\n\n \"\"\"super sloppy...\"\"\"\n account_row.append(this_period_total) #for the last period\n current_period_index +=1\n\n while current_period_index < len(period_end_dates):\n account_row.append(0)\n current_period_index +=1\n\n pnl_lines.append(account_row)\n\n return pnl_lines",
"def get_records(module):\n if True:\n classes = Attendance.objects.filter(module_id=module).order_by('-time')\n\n data = []\n for c in classes:\n attend = {\"time_id\": c.time, \"lt\": c.lecture_or_tutorial, \"owner\": c.owner,\n \"students\": [p.person_id for p in Attend_Recodes.objects.filter(attendance=c)],\n \"images\": [{\"url\": settings.MEDIA_URL + IMG_FOLDER_NAME + img.path.name,\n \"data\": json.loads(img.data)} for img in Images.objects.filter(attendance=c)]}\n data.append(attend)\n\n return data\n else:\n return None",
"def getEmployeePaycheck(self, employee, header, reader):\n # extract the paycheck date and normalize it\n date = datetime.datetime.strptime(header[0], '%m/%d/%y').date()\n # make a paycheck\n paycheck = Paycheck(date=date)\n # save it\n employee.paychecks[paycheck.date] = paycheck\n\n # the gross pay\n paycheck.gross = float(header[5].strip())\n # the net pay\n paycheck.net = float(header[12].strip())\n\n # extract the paycheck info\n self.getIncomeAndDeductions(paycheck=paycheck, record=header)\n # process the remaining lines\n for record in reader:\n # if the zeroth field isn't empty\n if record[0]:\n # we are done with this paycheck\n return record\n # otherwise, get more\n self.getIncomeAndDeductions(paycheck=paycheck, record=record)\n\n # all done\n return",
"def test_get_records(self):\n pass",
"def get_recordrange(self):\r\n if self.version >= 10.1:\r\n querystr = \"\"\"?where=&outFields=*&returnGeometry=false&returnIdsOnly=false&returnCountOnly=false&orderByFields=&groupByFieldsForStatistics=&outStatistics=[{%0D%0A++++\"statisticType\"%3A+\"count\"%2C%0D%0A++++\"onStatisticField\"%3A+\"objectid\"%2C+++++\"outStatisticFieldName\"%3A+\"oidcount\"%0D%0A++}%2C{%0D%0A++++\"statisticType\"%3A+\"min\"%2C%0D%0A++++\"onStatisticField\"%3A+\"objectid\"%2C+++++\"outStatisticFieldName\"%3A+\"oidmin\"%0D%0A++}%2C{%0D%0A++++\"statisticType\"%3A+\"max\"%2C%0D%0A++++\"onStatisticField\"%3A+\"objectid\"%2C+++++\"outStatisticFieldName\"%3A+\"oidmax\"%0D%0A++}]&returnZ=false&returnM=false&returnDistinctValues=false&f=pjson\"\"\"\r\n req = requests.get(self.endpointurl + querystr)\r\n self.recordinfo = req.json()[\"features\"][0][\"attributes\"]\r\n\r\n elif self.version < 10.1:\r\n querystr = \"\"\"?text=&geometry=&geometryType=esriGeometryPoint&inSR=&spatialRel=esriSpatialRelIntersects&relationParam=&objectIds=&where=objectid+>+-1&time=&returnCountOnly=true&returnIdsOnly=false&returnGeometry=false&maxAllowableOffset=&outSR=&outFields=&f=pjson\"\"\"\r\n req = requests.get(self.endpontquerystr + qs)\r\n self.recordinfo = {\"oidmin\": 0, \"oidmax\": req.json()[\"count\"]}\r\n\r\n [\r\n self.iterlist.append([x, x + 999])\r\n for x in range(\r\n self.recordinfo[\"oidmin\"]\r\n if self.recordinfo[\"oidmin\"] != self.recordinfo[\"oidmax\"]\r\n else 1 - self.recordinfo[\"oidmin\"],\r\n self.recordinfo[\"oidmax\"],\r\n 1000,\r\n )\r\n ]",
"def _get_all_records(self) -> List[DBModelInstance]:\n return self.model.query.all()"
]
| [
"0.57123214",
"0.5697924",
"0.5684732",
"0.5339572",
"0.53176624",
"0.5301057",
"0.52452046",
"0.522237",
"0.5207651",
"0.51459855",
"0.5109143",
"0.5063053",
"0.50590104",
"0.5021293",
"0.50212413",
"0.5010067",
"0.50082755",
"0.49951124",
"0.49939287",
"0.4986779",
"0.49504742",
"0.49409625",
"0.49370643",
"0.49270278",
"0.4919533",
"0.49042955",
"0.49042556",
"0.48410088",
"0.48295885",
"0.4821178"
]
| 0.7033032 | 0 |
gets the price of an item based on the item code provided returns "Get price" currently | def get_price(item_code):
output = "Get price for item {}.".format(item_code)
print(output)
return output | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getprice():\n\n print(\"Get price\")\n latest_price = get_latest_price(item_code)\n return latest_price",
"def get_price(item):\n return float(item[1])",
"def _get_product_price(self, product_code: str) -> float:\n return float(self._product_prices[product_code])",
"def get_price(self):\r\n return self.price",
"def get_item(item_code):\n u = models.Items.query.filter_by(code=item_code).first()\n u_dict = u.__dict__\n item = dict(\n item_code = u_dict['item_code'],\n item_name = u_dict['item_name'],\n size_code = get_size_code( u_dict['size_code']),\n color_code = get_color_code( u_dict['color_code']),\n quality_code = get_quality_code( u_dict['quality_code']),\n cost_price = u_dict['variants']['cost_price'],\n selling_price = u_dict['variants']['selling_price'],\n quantity = u_dict['variants']['quantity']\n )\n return make_response(jsonify(item))",
"def get_item_price(self, soup: BeautifulSoup) -> None:\n try:\n price = soup.find(\"span\", class_=\"_olc9rf0\").get_text()\n price = re.findall(\"\\d+(?:\\.\\d+)?\", price)[0]\n except (AttributeError, IndexError):\n price = None\n self.__collected_dic[\"price\"].append(price)",
"def get_price():\n \n #Teacher's code. Could not get it working.\n #price = db(db.product.name == productName).select(db.product.price)[0].price\n \n \n return (200)",
"def get_item_current_price(item_id, realm_index) -> int:\n\n # get item json and direct to price\n item_price = __get_item_json__(item_id, realm_index)[\n \"stats\"][0][\"price\"]\n\n return item_price",
"def get_price(self, request, pk):\n return Response('20$')",
"def get_price(self, request, pk):\n return Response('20$')",
"def _get_price(input_: List) -> int:\n price = input_[1][\"price\"]\n return price",
"def get_price(self):\n return self.price",
"def get_price(self):\n return self.price",
"def get_price(self):\n return self.price",
"def get_price(self):\n\n if self.price: return self.price\n # retrieve from args and return if exists\n price = Settings.get_price() or None\n if price: \n self.price = price\n return price\n if not Settings.prompt(\"price\"): return \"\"\n question = {\n 'type': 'input',\n 'name': 'price',\n 'message': 'Price',\n 'validate': PriceValidator,\n 'filter': lambda val: int(val)\n }\n price = prompt(question)[\"price\"]\n if not Settings.confirm(price): return self.get_price()\n self.price = price\n return self.price",
"def price(self, irc, msg, args, optlist, typeName):\n\n try:\n typeID = self._get_typeID(typeName)\n itemType = self._get_type(typeID)\n except:\n irc.error('Unknown type')\n return\n\n if len(optlist) == 1:\n location = optlist[0][1]\n else:\n location = 'Jita'\n\n try:\n locationID = self._get_locationID(location)\n location = self._get_location(locationID)\n except:\n irc.error('Unknown location')\n return\n\n market = self._sql(\"\"\"\n SELECT * FROM evecentral_market\n WHERE \"locationID\"=%s\"\"\", [locationID])\n if not market:\n irc.reply('No data for that market location')\n return\n\n marketitem = self._sql(\"\"\"\n SELECT * FROM evecentral_marketitem\n WHERE \"locationID\"=%s AND \"typeID\"=%s\"\"\", [locationID, typeID])\n if marketitem:\n irc.reply('{0} in {1}: buy max: {2} (volume: {3:,d}). sell min: {4} (volume: {5:,d}).'.format(\n ircutils.bold(itemType['typeName']),\n self._colorize_system(location),\n ircutils.mircColor(\n '{:,.2f}'.format(marketitem['buy_max']),\n fg='green'),\n int(marketitem['buy_volume']),\n ircutils.mircColor(\n '{:,.2f}'.format(marketitem['sell_min']),\n fg='green'),\n int(marketitem['sell_volume']),\n ), prefixNick=False)\n else:\n irc.reply(\"Prices for {0} in {1} isn't updated yet.\".format(\n itemType['typeName'],\n location['itemName']\n ))",
"def _find_item_prices(self, size, categorycode=''):\r\n item_prices = self.client['Product_Package'].getItems(\r\n id=0,\r\n mask='id,capacity,prices[id]',\r\n filter={\r\n 'items': {\r\n 'capacity': {'operation': int(size)},\r\n 'categories': {\r\n 'categoryCode': {'operation': categorycode}\r\n }}})\r\n item_price = item_prices[0]['prices'][0]['id']\r\n return item_price",
"def get_discount(self, price):\r\n pass",
"def get_price(self):\n return self.sale_price if self.sale_price else self.price",
"def getProductPrice(productID):\n return \"http://api.tcgplayer.com/pricing/product/\" + str(productID)",
"def get_price(self, field_name='PRICES'):\n price_data = self.get_price_data()\n return price_data.get('price') or self.find_price(self.get_default(field_name))",
"def getPrice(self):\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.113 Safari/537.36\"}\n response = requests.get(self.__product_URL, headers=headers)\n #print(response.status_code)\n soup = BeautifulSoup(response.content, \"html.parser\")\n file = open(\"testproduct.html\", \"wb\")\n file.write(soup.prettify(\"utf-8\"))\n file.close()\n title = soup.find(\"span\", attrs={\"id\": \"productTitle\", \"class\": \"a-size-large\"}).string.strip()\n self.__product_title = title\n temp = soup.find_all(\"a\", attrs={\"class\": \"a-accordion-row a-declarative accordion-header\"})[1]\n price = temp.find(\"span\", attrs={\"class\": \"a-color-price\"}).text.strip()\n lst = list(price)\n lst.remove(\",\")\n price = int(float(\"\".join(lst)))\n self.__product_price = price\n #print(self.__product_price)",
"def get_product_price(self, url):\n self.driver.get(url)\n\n try:\n price = self.driver.find_element_by_id(\"priceblock_ourprice\").text\n except:\n pass\n\n try:\n price = self.driver.find_element_by_id(\"priceblock_dealprice\").text\n except:\n pass\n\n if price is None:\n price = \"Not available\"\n\n else:\n non_decimal = re.compile(r'[^\\d.]+')\n price = non_decimal.sub('', price)\n\n return price",
"def get_price(self):\n if self.price is None:\n price = self.data['pizza'][self.item_type][self.size]\n for topping in self.toppings:\n price += self.data['topping'][topping]\n return price\n return self.price",
"def getPrice(self):\n return self.price",
"def _get_low(item_id, infobox_data):\n\n sub_id = _get_subitem_id(item_id, infobox_data)\n multi = infobox_data.get('alchmultiplier', infobox_data.get('alchmultiplier%s' % sub_id, 0.6))\n\n # Checking if alchable\n if not _is_alchable(item_id, infobox_data):\n return -1\n\n # Checking deprecated attributes\n price = infobox_data.get('low', None)\n if price:\n return price\n\n # Checking deprecated attribute with sub id\n price = infobox_data.get('low%s' % sub_id, None)\n if price:\n return price\n\n # Checking if value is known\n value = _get_value(item_id, infobox_data)\n if not value:\n return -2\n\n # Calculating\n return int(_kill_rounding_error(value * multi * (2 / 3)))",
"def get_price(self):\n return self._price",
"def get_product_price(con, product_id: int) -> str:\n with con.cursor() as cursor:\n cursor.execute(\"\"\"select price from products\n where id = {0}\"\"\".format(product_id))\n try:\n return cursor.fetchone()[0]\n except TypeError:\n raise errors.StoreError",
"def price(self):\n return self.price_",
"def parse_item_page_price(self, id, body):\n info = {}\n end = self.__re_search_item_pos(body, *self.regx['combo'])\n if end:\n body = body[:end[1]]\n info['original'] = self.__re_search(body, *self.regx['original'])\n info['save'] = self.__re_search(body, *self.regx['save'])\n info['price'] = self.__re_search(body, *self.regx['price'])\n info['rebate'] = self.__re_search(body, *self.regx['rebate'])\n return info"
]
| [
"0.7892862",
"0.7391328",
"0.66978556",
"0.65503925",
"0.65408695",
"0.6529588",
"0.65179396",
"0.645546",
"0.6454885",
"0.6454885",
"0.63570976",
"0.6352167",
"0.6352167",
"0.6352167",
"0.63217175",
"0.63206077",
"0.6276482",
"0.62730235",
"0.6268507",
"0.6217629",
"0.6186063",
"0.6173656",
"0.61165696",
"0.61005706",
"0.60871124",
"0.6070596",
"0.60685986",
"0.6033551",
"0.599581",
"0.5990852"
]
| 0.82407415 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.