query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
For timedependent runs plot maximum displacements versus time | def plot_maxdisp_time(pointsh5, xscale=1e3, yscale=1e-2, tscale=3.1536e7,
adjustRadial=False):
coords,data,number,times = pu.load_h5_visco(pointsh5)
x = coords[:,0]
ur = np.hypot(data[:,:,0], data[:,:,1])
uz = data[:,:,2]
# Convert units & extract maximums for each timestep
x = x / xscale
ur = np.max(ur,1) / yscale
uz = np.max(uz,1) / yscale #cm
#times = times / 8.64e4 #days
#times = times / 31536000 #years
times = times / tscale
plt.figure()
line, = plt.plot(times, uz, 'b.-', lw=2, label='Uz')
plt.plot(times, ur, ls='dashed', lw=2, marker='.', color=line.get_color(), label='Ur')
plt.title('Maximum displacements')
plt.ylabel('Displacement [{}]'.format(get_unit(yscale)))
plt.xlabel('Time [{}]'.format(get_unit(tscale)))
plt.show()
plt.legend(loc='best')
plt.grid() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_displacements(ds):\n # Se obtienen una matriz de datos con los desplazamientos promedios de cada imagen\n t = ds['t']\n t = t[:n_im-1]\n t = mplt.dates.date2num(t)\n d = ds['d_t']\n \n # Se grafica la curva Desplazamientos promedios vs Tiempo\n formatter = DateFormatter(\"%d/%m - %H:%M\")\n for i in range(len(d)):\n # Hallando el valor promedio final x zona\n mean_bp = d[i].mean()\n print(\"Valor promedio BP_zona\"+str(i)+\": \",mean_bp)\n print(\"\")\n # Graficando\n direction = 'desplazamientosPromedios_dset'+str(i_o)+'-'+str(i_o+n_im-1)+'_zona'+str(i)+'.png'\n\n fig, ax= plt.subplots(figsize=(10,7))\n ax.plot_date(t,d[i],'b',marker='',markerfacecolor='b',markeredgecolor='b',label='Back Projection')\n ax.set(xlabel='Tiempo',ylabel='Desplazamiento(mm)',title=\"Desplazamientos promedios\\n(Zona \"+str(i)+')')\n ax.xaxis.set_major_formatter(formatter)\n ax.xaxis.set_tick_params(rotation=20)\n #ax.set_xlim([R.min(),R.max()])\n ax.set_ylim([-c*1000/(4*fc*5),c*1000/(4*fc*5)]) # En (mm)\n ax.grid(linestyle='dashed')\n ax.legend()\n plt.show()\n fig.savefig(os.getcwd()+\"/Results/Displacement_BP/\"+direction,orientation='landscape')\n \n return 'Ok'",
"def comp_time_plot(p1=database['K+'], p2=database['pi+'], pmax=80, plot=True):\r\n dt = []\r\n p_range = np.linspace(10, pmax, 1000)\r\n m1 = p1.mass\r\n m2 = p2.mass\r\n for p in p_range:\r\n t1_per_m = 76.273/(beta(p, m1)*gamma(p, m1)*c)\r\n t2_per_m = 76.273/(beta(p, m2)*gamma(p, m2)*c)\r\n dt.append(abs(t1_per_m - t2_per_m)*1e12)\r\n dt_12_5 = dt[np.argmin(abs(p_range-12.5))]\r\n dt_75 = dt[np.argmin(abs(p_range-75))]\r\n ratio = dt_12_5/dt_75\r\n if plot==True:\r\n fig = plt.figure(figsize=[10, 5])\r\n ax = fig.add_subplot(1, 1, 1)\r\n ax.plot(p_range, dt, 'b', label=r'$\\Delta t$')\r\n ax.axvline(12.5, color='r', label='p=12.5 GeV')\r\n ax.axvline(75, color='g', label='p=75 GeV')\r\n ax.set_xlim(10, pmax)\r\n ax.set_ylim(0)\r\n ax.set_xlabel('p / GeV', fontsize=20)\r\n# ax.set_yscale('log')\r\n ax.set_ylabel(r'$\\Delta t$ / ps', fontsize=20)\r\n title = f'{p1.name} to {p2.name} '\r\n title += r'$\\Delta t$ dependancy on particle momenta'\r\n ax.set_title(title, fontsize=20)\r\n ax.legend(fontsize=20)\r\n text = 'dt(12.5) = {0:.2f} ps, '.format(dt_12_5)\r\n text += 'dt(75) = {0:.2f} ps, '.format(dt_75)\r\n text += 'ratio = {0:.3f}'.format(ratio)\r\n plt.show()\r\n print(text)\r\n return [dt_12_5, dt_75, ratio]",
"def plot_vanHove_dt(comp,conn,start,step_size,steps):\n \n (fin,) = conn.execute(\"select fout from comps where comp_key = ?\",comp).fetchone()\n (max_step,) = conn.execute(\"select max_step from vanHove_prams where comp_key = ?\",comp).fetchone()\n Fin = h5py.File(fin,'r')\n g = Fin[fd('vanHove',comp[0])]\n\n temp = g.attrs['temperature']\n dtime = g.attrs['dtime']\n\n\n # istatus = plots.non_i_plot_start()\n \n fig = mplt.figure()\n fig.suptitle(r'van Hove dist temp: %.2f dtime: %d'% (temp,dtime))\n dims = figure_out_grid(steps)\n \n plt_count = 1\n outs = []\n tmps = []\n for j in range(start,start+step_size*steps, step_size):\n (edges,count,x_lim) = _extract_vanHove(g,j+1,1,5)\n if len(count) < 50:\n plt_count += 1\n continue\n #count = count/np.sum(count)\n \n sp_arg = dims +(plt_count,)\n ax = fig.add_subplot(*sp_arg)\n ax.grid(True)\n\n \n alpha = _alpha2(edges,count)\n \n ax.set_ylabel(r'$\\log{P(N)}$')\n ax.step(edges,np.log((count/np.sum(count))),lw=2)\n ax.set_title(r'$\\alpha_2 = %.2f$'%alpha + ' j:%d '%j )\n ax.set_xlim(x_lim)\n plt_count += 1\n\n mplt.draw()\n\n # plots.non_i_plot_start(istatus)\n\n del g\n Fin.close()\n del Fin",
"def plotEvolutionMaximum(self):\n evolutionMaximum = self.getEvolutionMax();\n plt.plot(evolutionMaximum)\n plt.xlabel('Time')\n plt.ylabel('Maximum realizations')\n plt.show()",
"def plotTimeDelta(data, type_plot, device):\n mean = data.mean()\n std = data.std()\n max_data = data.max()\n min_data = data.min()\n max_indx = np.argmax(data) # max value index\n min_indx = np.argmin(data) # min value index\n x = np.arange(min_data, max_data, 0.1)\n y = normfun(x, mean, std)\n res_quantile = quantileValues(data, device)\n if type_plot == 0:\n plt.plot(x, y, color='blue')\n annot_max_min(x, y)\n # plt.hist(data.dropna(), bins=500, rwidth=0.9, normed=True)\n plt.title('Time Delta distribution')\n plt.xlabel('Time Delta')\n plt.ylabel('Probability')\n sns.distplot(tmp.deltaSeconds.dropna(),\n kde=True, rug=True, rug_kws={\"color\": \"k\"},\n kde_kws={\"color\": \"red\", \"lw\": 3, \"label\": \"KDE\"},\n hist_kws={\"histtype\": \"step\", \"lw\": 3, \"alpha\": 1,\n \"color\": \"g\"},\n bins=500)\n # ax.set(xlabel='Vibration Intensity', ylabel='Probability')\n elif type_plot == 1: # plot the max and min point\n plt.plot(data)\n plt.plot(max_indx, data[max_indx], 'ks')\n show_max = '['+str(max_indx)+' '+str(data[max_indx])+']'\n plt.annotate(show_max,\n xytext=(max_indx, data[max_indx]),\n xy=(max_indx, data[max_indx]))\n plt.plot(min_indx, data[min_indx], 'gs')\n show_min = '['+str(min_indx)+' '+str(data[min_indx])+']'\n plt.annotate(show_min,\n xytext=(min_indx, data[min_indx]),\n xy=(min_indx, data[min_indx]))\n plt.title('Time Delta')\n plt.xlabel('Index')\n plt.ylabel('Vibration Intensity Value')\n elif type_plot == 2: # boxplot\n boxplot(data.dropna())\n return res_quantile",
"def task_2():\n\n # To store the list of speeds to plot\n list_of_speeds = []\n list_of_times = []\n list_of_time_difference = [0]\n\n # To go from 1 through 80\n for i in range(LOW_SPEED, HIGH_SPEED + 1, 5):\n list_of_speeds.append(i)\n list_of_times.append(((DISTANCE / i) * 60))\n\n for i in range(1, len(list_of_times)):\n list_of_time_difference.append(list_of_times[i-1] - list_of_times[i])\n\n plt.plot(list_of_speeds, list_of_time_difference)\n plt.xlabel(\"Speed (in mph)\")\n plt.ylabel(\"Time saved (in minutes)\")\n plt.show()",
"def one_period_plot():\n file = \"Data/matfiles/20131221.mat\"\n object = MatReader(file)\n\n NeA = object.NeA\n latA = object.latA\n times = object.secondsA\n mlt = object.mltA\n ind1 = 2606 #lat inds\n ind2 = 13940 #lat inds\n \n ind1 = 3197 #mlat inds\n ind2 = 14390 #mlat inds\n \n T = ind2 - ind1\n ind1 += int(T/2)\n ind2 += int(T/2)\n\n latA = latA[ind1:ind2]\n NeA = NeA[ind1:ind2]\n # NeA = object.meanie(NeA, 5)\n times = times[ind1:ind2]\n mlt = mlt[ind1:ind2]\n mlt = hour_round(mlt)\n\n lats = np.zeros_like(latA)\n lats[0] = latA[0]\n for i in range(len(latA)-1):\n dlat = latA[i+1] - latA[i]\n if dlat < 0:\n lats[i+1] = lats[i] - dlat\n else:\n lats[i+1] = lats[i] + dlat\n\n lats += 90\n\n xticks = np.array([-90, -70, -30, 30, 70, 110, 150, 210, 250, 270]) + 90\n gridticks = np.array([-90, -70, -30, 30, 70, 77, 103, 110, 150, 210, 250, 270]) + 90\n # plt.plot(lats, NeA, \".\", markersize = 1)\n # plt.plot([0, 0], [0, np.max(NeA)], \"k\")\n # plt.plot([30, 30], [0, np.max(NeA)], \"k\")\n # plt.plot([60, 60], [0, np.max(NeA)], \"k\")\n # plt.plot([120, 120],[0, np.max(NeA)], \"k\")\n # plt.plot([150, 150], [0, np.max(NeA)], \"k\")\n # plt.plot([167, 167], [0, np.max(NeA)], \"k\")\n # plt.plot([193, 193], [0, np.max(NeA)], \"k\")\n # plt.plot([210, 210], [0, np.max(NeA)], \"k\")\n # plt.plot([240, 244], [0, np.max(NeA)], \"k\")\n # plt.plot([300, 300], [0, np.max(NeA)], \"k\")\n # plt.plot([330, 330], [0, np.max(NeA)], \"k\")\n # plt.plot([360, 360], [0, np.max(NeA)], \"k\")\n # plt.xticks(xticks)\n # plt.xlabel(\"Geomagnetic latitude going from 0 to 360 degrees, starting and ending at south pole\")\n # plt.ylabel(\"Electron density [cm$^{-1}$]\")\n # plt.title(\"One SWARM satellite period\")\n # plt.grid(\"on\", axis = \"x\", xdata = gridticks)\n #adding letters\n x = (gridticks[:-1] + gridticks[1:])/2 - 3\n y = np.zeros_like(x) - np.max(NeA)/40\n s = [\"S\", \"B\", \"A\", \"B\", \"C\", \"D\", \"C\", \"B\", \"A\", \"B\", \"S\"]\n # for i in range(len(x)):\n # plt.text(x[i], y[i], s[i], fontsize = 10)\n # plt.savefig(\"Figures/swarm_period.pdf\")\n # plt.show()\n\n # plt.plot(times, latA)\n # plt.plot(times, mlt)\n # plt.show()\n print(lats[0])\n print(lats[-1])\n \n fig, ax = plt.subplots()\n ax.plot(lats, NeA, \".\", markersize = 1)\n ax.set_xticks(xticks, minor=False)\n ax.set_xticks([167, 193], minor=True)\n ax.xaxis.grid(True, which = \"major\")\n ax.xaxis.grid(True, which = \"minor\")\n for i in range(len(x)):\n ax.text(x[i], y[i], s[i], fontsize = 10)\n ax.set_xlabel(\"Geomagnetic latitude going from 0 to 360 degrees, starting and ending at south pole\")\n ax.set_ylabel(\"Electron density [cm$^{-1}$]\")\n ax.set_title(\"One Swarm satellite period\")\n # plt.savefig(\"Figures/swarm_period.pdf\")\n plt.show()\n plt.plot(mlt, NeA)\n plt.show()\n plt.plot(mlt, lats)\n plt.show()",
"def parameter_forecast_plot(model_obj,time_index,start,end,num_samples = 100,cached_samples=None,col_labels = ['P','PET','Lag-1 Q','Lag-1 P','Seasonal','P$^2$','Constant']):\n \n f = plt.figure(figsize = (8,10))\n num_components = len(col_labels)\n gs = gridspec.GridSpec(8+2*num_components,6)\n ax0 = plt.subplot(gs[-8:-6,:])\n ax1 = plt.subplot(gs[-6::,:])\n col_labels = ['P','PET','Lag-1 Q','Lag-1 P','Seasonal','P$^2$','Constant']\n ffbs = model_obj # 120 is French Broad River at Blantyre, NC\n if cached_samples is None:\n samples = ffbs.backward_sample(num_samples=num_samples)\n else: \n samples = cached_samples\n for i in range(7):\n ax_new = plt.subplot(gs[2*i:2*i+2,:])\n\n upper = np.percentile(samples[start:end,i,:],75,axis = 1)\n mid = np.percentile(samples[start:end,i,:],50,axis = 1)\n lower = np.percentile(samples[start:end,i,:],25,axis = 1)\n\n ax_new.plot(time_index[start:end],mid,color='k')\n ax_new.fill_between(time_index[start:end],upper,lower,color='0.8')\n ax_new.tick_params(labelbottom=False,direction='in')\n ax_new.text(0.02, 0.82,col_labels[i],\n horizontalalignment='left',\n verticalalignment='center',transform=ax_new.transAxes)\n\n ax1.plot(time_index[start:end],ffbs.f[start:end],color='k',label='1-step forecast')\n ax1.plot(time_index[start:end],ffbs.Y[start:end],color='k',linestyle='',marker='+',\n markersize = 10,label='Observed streamflow')\n\n ax1.fill_between(time_index[start:end],\n np.squeeze(ffbs.f[start:end] + 2*ffbs.Q[start:end,0]),\n np.squeeze(ffbs.f[start:end] - 2*ffbs.Q[start:end,0]),color='0.8',\n label = 'Forecast $\\pm 2V_t$')\n ax1.tick_params(direction='in')\n ax1.legend(loc='upper right',ncol=1,frameon=True)\n #ax1.set_ylabel('Standardized streamflow')\n ax1.set_xlabel('Date',fontsize=16)\n ax1.get_yaxis().set_label_coords(-0.1,0.5)\n ax1.text(0.02, 0.92,'Standardized streamflow',\n horizontalalignment='left',\n verticalalignment='center',transform=ax1.transAxes,)\n ax0.plot(time_index[start:end],ffbs.s[start:end],color='k')\n ax0.text(0.02, 0.82,'$E[V_t]$',\n horizontalalignment='left',\n verticalalignment='center',transform=ax0.transAxes,)\n ax0.get_yaxis().set_label_coords(-0.1,0.5)\n return f,samples",
"def plotCurrentTimeAnticipatory(s, ne,nc, gs):\n fig, ax = plt.subplots()\n ax.set_title('time: {0}'.format(s.time))\n for c in range(nc):\n carTemp = s.cars.getObject(c)\n ax.scatter(carTemp.position[0], carTemp.position[1], c='k', alpha=0.5)\n ax.scatter([], [], c='b', marker='*', label='Opened not commited')\n ax.scatter([], [], c='b', label='Opened commited')\n ax.scatter([], [], c='r', label='Canceled')\n ax.scatter([], [], c='g', label='Closed')\n for i in range(ne):\n eventTemp = s.events.getObject(i)\n if eventTemp.status == Status.OPENED_COMMITED:\n ax.scatter(eventTemp.position[0], eventTemp.position[1], c='b', alpha=0.7)\n elif eventTemp.status == Status.OPENED_NOT_COMMITED:\n ax.scatter(eventTemp.position[0], eventTemp.position[1], c='b', marker='*', alpha=0.7)\n elif (eventTemp.status == Status.CLOSED):\n ax.scatter(eventTemp.position[0], eventTemp.position[1], c='g', alpha=0.2)\n elif (eventTemp.status == Status.CANCELED):\n ax.scatter(eventTemp.position[0], eventTemp.position[1], c='r', alpha=0.2)\n else:\n ax.scatter(eventTemp.position[0], eventTemp.position[1], c='y', alpha=0.2)\n ax.set_xlim([-1, gs + 1])\n ax.set_ylim([-1, gs + 1])\n ax.grid(True)\n plt.legend()\n # Used to return the plot as an image rray\n fig.canvas.draw() # draw the canvas, cache the renderer\n image = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')\n image = image.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n return image",
"def plot_running_time():\n global counter\n counter += 1\n running_time_targeted = []\n running_time_fast_targeted = []\n \n for node_number in range(10, 1000, 10):\n synthetic_undirected_graph = make_synthetic_undirected_graph(node_number, 5)\n\n start_time = time.time()\n attack_order = targeted_order(synthetic_undirected_graph)\n stop_time = time.time()\n running_time_targeted.append(stop_time - start_time)\n \n start_time = time.time()\n attack_order = fast_targeted_order(synthetic_undirected_graph)\n stop_time = time.time()\n running_time_fast_targeted.append(stop_time - start_time)\n \n plt.plot(range(10, 1000, 10), running_time_targeted, '-b', label = 'targeted_order')\n plt.plot(range(10, 1000, 10), running_time_fast_targeted, '-r', label = 'fast_targeted_order')\n \n plt.legend(loc='upper right')\n\n\n plt.title(\" plot of running time of desktop Python\")\n plt.xlabel(\"the number of nodes\")\n plt.ylabel(\"running times\")\n plt.savefig(\"running_time_\"+str(counter)+\".png\", dpi = 72)\n plt.gcf().clear() # hose-keeping",
"def compare_displacements(ds1,ds2):\n # Obteniendo los datos para BP\n t1 = ds1['t']\n t1 = t1[:n_im-1]\n t1 = mplt.dates.date2num(t1)\n d1 = ds1['d_t']\n # Obteniendo los datos para RMA\n t2 = ds2['t']\n t2 = t2[:n_im-1]\n t2 = mplt.dates.date2num(t2)\n d2 = ds2['d_t']\n\n # Graficando las 2 curvas juntas\n formatter = DateFormatter(\"%d/%m - %H:%M\")\n for i in range(len(d1)):\n # Hallando el valor promedio final x zona\n mean_bp = d1[i].mean()\n mean_rma = d2[i].mean()\n print(\"Valor promedio BP_zona\"+str(i)+\": \",mean_bp)\n print(\"Valor promedio RMA_zona\"+str(i)+\": \",mean_rma)\n print(\"\")\n # Graficando\n direction = 'desplazamientosPromedios_dset'+str(i_o)+'-'+str(i_o+n_im-1)+'_zona'+str(i)\n\n fig, ax= plt.subplots(figsize=(10,7))\n ax.plot_date(t1,d1[i],'b',marker='',markerfacecolor='b',markeredgecolor='b',label='Back Projection')\n ax.plot_date(t2,d2[i],'r',marker='',markerfacecolor='r',markeredgecolor='r',label='RMA')\n ax.set(xlabel='Tiempo',ylabel='Desplazamiento(mm)',title=\"Desplazamientos promedios\\n(Zona \"+str(i)+')')\n ax.xaxis.set_major_formatter(formatter)\n ax.xaxis.set_tick_params(rotation=20)\n #ax.set_xlim([R.min(),R.max()])\n ax.set_ylim([-c*1000*4/(4*fc),c*1000*4/(4*fc)])\n ax.grid(linestyle='dashed')\n ax.legend()\n plt.show()\n fig.savefig(os.getcwd()+\"/Results/Desplazamientos/\"+direction,orientation='landscape')\n\n return 'Ok'",
"def plotTime(self):\n plt.figure()\n t = [i for i in range(len(self.nodes_infected))]\n print(t)\n plt.title('Nodos infectados vs Tiempo')\n plt.xlabel('Instantes de tiempo')\n plt.ylabel('# de nodos infectados')\n plt.plot(t, self.nodes_infected)\n plt.grid(True)\n plt.show()",
"def plot_A_time_space(\n data,\n is_deg=True,\n is_elecorder=False,\n is_spaceorder=False,\n freq_max=20000,\n r_max=100,\n z_max=None,\n is_norm=False,\n unit=\"SI\",\n save_path=None,\n is_auto_ticks=True,\n is_show_fig=None,\n fig=None,\n color_list=[],\n):\n\n if is_show_fig is None:\n is_show_fig = True if fig is None else False\n\n # Set plot\n fig, axs = plt.subplots(3, 2, tight_layout=True, figsize=(20, 10))\n title = data.name + \" over time and space\"\n\n # pcolorplot\n if is_deg:\n angle_str = \"angle{°}\"\n else:\n angle_str = \"angle{rad}\"\n\n plot_3D_Data(\n data,\n \"time\",\n angle_str,\n is_norm=is_norm,\n unit=unit,\n z_max=z_max,\n fig=fig,\n ax=axs[0, 0],\n is_auto_ticks=is_auto_ticks,\n is_show_fig=False,\n is_2D_view=True,\n )\n\n # 2D plots\n # time\n plot_2D_Data(\n data,\n \"time\",\n fig=fig,\n ax=axs[1, 0],\n color_list=color_list,\n is_auto_ticks=is_auto_ticks,\n is_show_fig=False,\n )\n\n # angle\n plot_2D_Data(\n data,\n angle_str,\n fig=fig,\n ax=axs[2, 0],\n color_list=color_list,\n is_auto_ticks=is_auto_ticks,\n is_show_fig=False,\n )\n\n # fft time\n if is_elecorder:\n elec_max = None\n for ax in data.axes:\n if ax.name == \"time\":\n try:\n elec_max = freq_max / ax.normalizations[\"elec_order\"]\n except:\n pass\n\n if elec_max is None:\n freq_str = \"freqs=[0,\" + str(freq_max) + \"]\"\n else:\n freq_str = \"freqs->elec_order[0,\" + str(elec_max) + \"]\"\n else:\n freq_str = \"freqs=[0,\" + str(freq_max) + \"]\"\n\n plot_2D_Data(\n data,\n freq_str,\n fig=fig,\n ax=axs[1, 1],\n unit=unit,\n color_list=color_list,\n is_auto_ticks=is_auto_ticks,\n is_show_fig=False,\n )\n\n # fft space\n if is_spaceorder:\n order_max = None\n for ax in data.axes:\n if ax.name == \"angle\":\n try:\n order_max = r_max / ax.normalizations[\"space_order\"]\n except:\n pass\n\n if order_max is None:\n wavenb_str = \"wavenumber=[0,\" + str(r_max) + \"]\"\n else:\n wavenb_str = \"wavenumber->space_order[0,\" + str(order_max) + \"]\"\n else:\n wavenb_str = \"wavenumber=[0,\" + str(r_max) + \"]\"\n\n plot_2D_Data(\n data,\n wavenb_str,\n fig=fig,\n ax=axs[2, 1],\n unit=unit,\n color_list=color_list,\n is_auto_ticks=is_auto_ticks,\n is_show_fig=False,\n )\n\n axs[0, 1].axis(\"off\")\n axs[0, 1].set_title(\"\")\n\n fig.canvas.set_window_title(title)\n fig.suptitle(title, x=0.65, fontsize=24, fontname=FONT_NAME)\n fig.tight_layout()\n\n if save_path is not None:\n fig.savefig(save_path)\n plt.close()\n\n if is_show_fig:\n fig.show()",
"def plot_multiLyapunov(systems, mode=2, savefig=True, figname=None):\n if mode == 2:\n print(systems)\n# divnorm = colors.DivergingNorm(vmin=max([np.nanmin(np.nanmax(np.nanmax(system.lyapunov_2, axis=0), axis=0)) for system in systems]), vcenter=0, vmax=max[np.nanmax(system.lyapunov_2) for system in systems])\n if figname == None:\n figname = 'sum_of_first_2_lyapunov'\n \n fig, ax = plt.subplots()\n for system in systems:\n\n lyapunov_2 = system.lyapunov_2\n x = system.x\n y = system.y\n l = system.l\n a = system.a\n\n\n\n plt.contourf(a[0,0,:,:],l[0,0,:,:],np.nanmax(np.nanmax(lyapunov_2, axis=0), axis=0), levels = 100, cmap = 'RdBu_r')\n# , norm=divnorm)\n for i in range(lyapunov_2.shape[0]):\n for j in range(lyapunov_2.shape[1]):\n plt.contour(a[0,0,:,:],l[0,0,:,:],lyapunov_2[i,j], levels = [0,], colors=('k',),alpha=0.1)\n lyap_sum = plt.contour(a[0,0,:,:],l[0,0,:,:],lyapunov_2.max(axis=0).max(axis=0), levels = [0,], colors=('blue',),alpha=1)\n\n# cbar = plt.colorbar()\n plt.plot(wild_chaos[:,0],wild_chaos[:,1],'--r',lw=3)\n plt.title('Sum of the first 2 Lyapunov exponents ')\n plt.ylabel('$\\lambda$')\n plt.xlabel('a')\n# cbar.ax.set_ylabel('Sum of the first 2 Lyapunov exponents')\n\n ax.set_ylim([l.min(),l.max()])\n ax.set_xlim([a.min(),a.max()])\n if savefig:\n plt.savefig(f'images/{figname}.pdf')\n plt.show()",
"def plot_time(self, X, x0, t):\n\n Pressure = [Solution(self, (x-x0)/t).pressure for x in X]\n Velocity = [Solution(self, (x-x0)/t).velocity for x in X]\n Density = [Solution(self, (x-x0)/t).rho for x in X]\n\n fig, axs = plt.subplots(3, sharex=True)\n fig.suptitle(\"Solution of the Riemann problem\\nat t = {}s\".format(t))\n axs[0].plot(X, Density)\n axs[1].plot(X, Velocity)\n axs[2].plot(X, Pressure)\n\n axs[0].grid()\n axs[0].set(ylabel = \"Density\")\n axs[1].grid()\n axs[1].set(ylabel = \"Velocity\")\n axs[2].grid()\n axs[2].set(ylabel = \"Pressure\")\n\n plt.xlabel(\"Location x\")",
"def plot_a_run(run, ax):\n npz = np.load(run['npz_fname'])\n ckg = npz['nums']\n y_var = run['y_variable']\n full_y = ckg[y_var]\n x_var = run['x_variable']\n full_x = ckg[x_var]\n ### each run is a plot, but it could have multiple lines.\n # this requires some magic, in seperating our data by the second var.\n ## I ASSUME, and this is important, that only two variables change\n x_to_plot = full_x\n x_to_calc = full_x\n y_to_plot = full_y\n ckg_fc = ckg\n if 'second_var' in run: \n ckg_fc = ckg[:,0]\n x_to_calc = full_x[:,0]\n elif ('average_over' in run):#### always do log average\n #y_to_plot = np.average(full_y, axis=1)\n y_to_plot = np.exp(np.average(np.log(full_y), axis=1))\n \n ckg_fc = ckg[:,0]\n x_to_plot = x_to_calc = full_x[:,0]\n #pdb.set_trace()\n ax.plot(x_to_plot, y_to_plot,\".\")\n plot_localization_length(ax, ckg_fc['c'],ckg_fc['k'], ckg_fc['dis_param'], ckg_fc['number_of_points'] , x_to_calc)\n ax.set_xlabel(x_var)\n ax.set_ylabel(y_var)",
"def plot_observed_predictions(self):\n \n # Plot of X vs Y\n fig = plt.figure(figsize=(15,5))\n plt.subplot(1,3,1) \n for k in self.phd_filter['estimated_positions'].keys():\n plt.plot(self.phd_filter['estimated_positions'][k][0], self.phd_filter['estimated_positions'][k][1], 'bx')\n plt.xlabel(\"X\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n \n # Plot of time vs X\n plt.subplot(1,3,2)\n for k in self.phd_filter['estimated_positions'].keys(): \n plt.plot(k*np.ones(self.phd_filter['estimated_positions'][k].shape[1]), self.phd_filter['estimated_positions'][k][0], 'bx')\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"X\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n\n # Plot of time vs Y\n plt.subplot(1,3,3)\n for k in self.phd_filter['estimated_positions'].keys():\n plt.plot(k*np.ones(self.phd_filter['estimated_positions'][k].shape[1]), self.phd_filter['estimated_positions'][k][1], 'bx')\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n plt.show();",
"def plot_running_time(num_clusters):\n slow_running = []\n fast_running = []\n for dummy_i in range(2, num_clusters):\n cluster_list = gen_random_clusters(dummy_i)\n start = timer()\n fast_closest_pair(cluster_list)\n end = timer()\n fast_running.append((end - start))\n \n start = timer()\n slow_closest_pair(cluster_list)\n end = timer()\n slow_running.append((end - start))\n #\n plt.plot(range(2, num_clusters), fast_running)\n plt.plot(range(2, num_clusters), slow_running)\n plt.xlabel(\"num clusters\")\n plt.ylabel(\"running time in seconds\")\n plt.title(\"Running time slow closest pair vs fast closest pair.\")\n plt.legend([\"fast closest pair\", \"slow closest pair\"])\n plt.show()",
"def task_1():\n\n # To store the list of speeds to plot\n list_of_speeds = []\n list_of_times = []\n\n # To go from 1 through 80\n for i in range(LOW_SPEED, HIGH_SPEED + 1):\n list_of_speeds.append(i)\n time = (DISTANCE/i) * 60 * 60\n list_of_times.append(time)\n\n plt.plot(list_of_speeds, list_of_times)\n plt.xlabel(\"Speed (in mph)\")\n plt.ylabel(\"Time (in s)\")\n plt.show()",
"def gentoplot(time):\n \n toplot = {}\n\n # Generates a list of movie paths in the data folder.\n files = dftf.batch_s('.') \n\n # Generates dft traces and plots for each roi in each movie.\n for file in files:\n os.chdir(file)\n print(os.path.basename(file))\n\n for col in COLS:\n \n if os.path.exists('params') == True:\n rawtracedata = dftf.TraceData(fname=RESULTS_FILE, paramsfile=PARAMS_FILE, \n corrparamsfile=CORRPARAMS_FILE, colname=col)\n td = rawtracedata.Processrawtrace(DFTSIZE, HZ_BOUND1, HZ_BOUND2)\n moviename = os.path.basename(os.path.abspath('.'))\n \n # Selects the area of the raw trace to plot.\n frames = time * td['fps']\n #print(frames)\n plottime = td['seltrace'][:frames]/10\n #print(len(plottime))\n ms = plottime-np.mean(plottime)\n xsec = np.linspace(0, len(plottime)/td['fps'], len(plottime))\n #print(xsec)\n condition = td['condition']\n toplot[moviename] = [xsec, ms, condition]\n print(np.max(ms), np.min(ms))\n \n return(toplot)",
"def example3():\n arrive_time=example2() # Get packets arrive time using example1\n time_series.plot_time_series(arrive_time) # Plot time series using packets arrive time",
"def callback_time_cut(val):\n global plot_mode\n global idx_time\n last_plot_mode = plot_mode\n plot_mode = 'time_cut'\n idx_time = int(val)\n update_num_shadow(int(sld['neighbors'].val))\n # plot 121\n lcuttime.set_xdata( [val, val] )\n lcuttime.set_alpha( alpha_hm )\n lcutfreq.set_alpha( 0.0 )\n # plot 122\n if plot_mode == last_plot_mode:\n replot_flags = get_replot_flag( idx_time ) # [True/False, True/False]\n replot_shadow( replot_flags )\n update_shadow( ~replot_flags )\n update_light()\n else:\n replot_shadow( [True, True ] )\n replot_light()\n reform_axis()\n\n fig.canvas.draw_idle()",
"def plot_max_cdv_data(stc_mft, lhmrinds, rhmrinds):\n print(\"##### Attempting to plot max. cdv data:\")\n fig = plt.figure()\n stcdata = stc_mft.data\n plt.plot(1e3 * stc_mft.times, np.max(stcdata[lhmrinds[0], :], axis=0), 'r', label='lh')\n plt.plot(1e3 * stc_mft.times, np.max(stcdata[rhmrinds[0], :], axis=0), 'g', label='rh')\n plt.plot(1e3 * stc_mft.times, np.max(stcdata, axis=0), 'b', label='all')\n plt.xlabel('time (ms)')\n plt.ylabel('max(|cdv|) value')\n plt.legend(loc='upper right', fontsize=10)\n plt.savefig('testfig_cdvmax')\n plt.close()",
"def updatefig(*args):\n p1.set_array(turn(grid))\n p2.set_data(tally['time'], tally['sickos'])\n p3.set_data(tally['time'], tally['immune'])\n p4.set_data(tally['time'], tally['dead'])\n ax2.set_xlim(0, max(tally['time']))\n # ax2.set_ylim(0, max(max(sickos), max(immune)))\n # End sim if the disease is gone\n if tally['sickos'][-1] == 0:\n ani.event_source.stop()\n end_time = time.process_time()\n show_summary()\n print(\"Process time:\", end_time - start_time)\n return p1, p2, p3, p4,",
"def plot_timeline(self, param_name, start_time=None, stop_time=None, \n provider=None, calib=False, max_pts=None):\n\n meta = self.get_param_info(param_name, mode='simple')\n data = self.get_data(param_name, start_time, stop_time, provider, calib, max_pts)\n data = data.squeeze()\n\n if data is None:\n return None\n\n fig, ax = plt.subplots()\n \n # get a list of changes in the data\n # changes = data[data.diff()!=0].index.tolist() # <-- does not work with strings\n # changes = data[1:][data[1:].ne(data[1:].shift())].index.tolist()\n changes = data[data.ne(data.shift())].index.tolist()\n\n # add the end of the last period\n changes.append(data.index.max())\n\n # trying to do gap detection here to NOT fully shade areas where we have no data\n # first finding the mean periodicity of the data\n mean = data.index.to_series().diff().mean()\n\n # now flag periods where the gaps are 2x this\n gap_ends = data[data.index.to_series().diff()>2*mean].index.tolist()\n\n # get the durations\n durations = np.diff(changes)\n\n\n\n # make a colour index with the correct number of colors, spanning the colourmap\n colours = cm.get_cmap('viridis')\n\n # get the list of unique values and create a colour list with this many entries\n num_unique = len(data.unique())\n colour_list = [colours(1.*i/num_unique) for i in range(num_unique)]\n \n # make a dictionary mapping unique values to colours\n unique = data.unique().tolist()\n colors = data[changes].map(dict(zip(unique, colour_list)))\n\n \n\n # now define the x and y ranges \n xranges = [(stop, end) for stop, end in zip(changes, durations)]\n yranges = (1, 0.5)\n\n # plot it using the broken horizontal bar function\n ax.broken_barh(xranges, yranges, facecolors=colors, zorder=2)\n \n ax.set_title(meta['Description'])\n ax.set_xlabel('Date (UTC)')\n # if 'Unit' in meta.index:\n # ax.set_ylabel(meta['Unit'])\n # else:\n # ax.set_ylabel('Calibrated') if calib else ax.set_ylabel('Raw')\n fig.autofmt_xdate()\n xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S')\n ax.xaxis.set_major_formatter(xfmt)\n\n return ax",
"def data_vis():\n dataroot = 'solar_data.txt'\n debug = False \n diff = False\n X, y = read_data(dataroot, debug, diff)\n\n # First plot the original timeseries\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(y)\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')\n # plt.show()\n\n fig.add_subplot(3,3,2)\n plt.plot(X[:,0])\n plt.title('Avg Zenith Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,3)\n plt.plot(X[:,1])\n plt.title('Avg Azimuth Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,4)\n plt.plot(X[:,2])\n plt.title('Avg Tower Dry Bulb Temp [deg C]')\n # plt.show()\n\n fig.add_subplot(3,3,5)\n plt.plot(X[:,3])\n plt.title('Avg Tower RH [%]')\n # plt.show()\n\n fig.add_subplot(3,3,6)\n plt.plot(X[:,4])\n plt.title('Avg Total Cloud Cover [%]')\n # plt.show()\n\n fig.add_subplot(3,3,7)\n plt.plot(X[:,5])\n plt.title('Avg Avg Wind Speed @ 6ft [m/s]')\n # plt.show()\n\n ##########################################################################################\n # Plotting the Fourier Transform of the signals\n\n freq = np.fft.fftfreq(len(y), 1*60*60)\n\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(freq, np.abs(np.fft.fft(y)))\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')\n # plt.show()\n\n fig.add_subplot(3,3,2)\n plt.plot(freq, np.abs(np.fft.fft(X[:,0])))\n plt.title('Avg Zenith Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,3)\n plt.plot(freq, np.abs(np.fft.fft(X[:,1])))\n plt.title('Avg Azimuth Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,4)\n plt.plot(freq, np.abs(np.fft.fft(X[:,2])))\n plt.title('Avg Tower Dry Bulb Temp [deg C]')\n # plt.show()\n\n fig.add_subplot(3,3,5)\n plt.plot(freq, np.abs(np.fft.fft(X[:,3])))\n plt.title('Avg Tower RH [%]')\n # plt.show()\n\n fig.add_subplot(3,3,6)\n plt.plot(freq, np.abs(np.fft.fft(X[:,4])))\n plt.title('Avg Total Cloud Cover [%]')\n # plt.show()\n\n fig.add_subplot(3,3,7)\n plt.plot(freq, np.abs(np.fft.fft(X[:,5])))\n plt.title('Avg Avg Wind Speed @ 6ft [m/s]')\n # plt.show()\n\n ##################################################################################################\n # Print correlation matrix\n\n df = pd.DataFrame(np.c_[y, X])\n df.columns = ['Avg Global PSP (vent/cor) [W/m^2]','Avg Zenith Angle [degrees]','Avg Azimuth Angle [degrees]','Avg Tower Dry Bulb Temp [deg C]','Avg Tower RH [%]','Avg Total Cloud Cover [%]','Avg Avg Wind Speed @ 6ft [m/s]']\n f = plt.figure(figsize=(19, 15))\n plt.matshow(df.corr(), fignum=f.number)\n plt.xticks(range(df.shape[1]), df.columns, fontsize=14, rotation=20)\n plt.yticks(range(df.shape[1]), df.columns, fontsize=14)\n cb = plt.colorbar()\n cb.ax.tick_params(labelsize=14)\n plt.title('Correlation Matrix', fontsize=16);\n plt.show()",
"def update_highest(csevo):\n tmax = [t[np.argmax(N)] for (t, N) in figure_to_data(csevo)]\n\n data = [{\n \"x\": list(range(len(tmax))), \"y\":tmax, \"type\":\"bar\"\n }]\n\n layout = {\n \"title\":'Time of largest abundance',\n \"template\":\"plotly_dark\",\n \"xaxis\":{\"title\":\"Charge state\", \"range\":[0, len(tmax)]},\n \"yaxis\":{\"title\":\"Time (s)\", \"type\":\"log\"}\n }\n\n return {\"data\":data, \"layout\":layout}",
"def figure9():\n\n g_h_bars = np.linspace(0.005, 0.05, 10)\n plot_settings = {'y_limits': [70, 115],\n 'x_limits': [0.005, 0.05],\n 'y_ticks': [70, 80, 90, 100, 110],\n 'locator_size': 5,\n 'y_label': 'mAHP Duration (ms)',\n 'x_ticks': g_h_bars[0::2],\n 'scale_size': 0,\n 'x_label': '$\\\\bar{g_H }$ ($\\mu S$)',\n 'scale_loc': 4,\n 'figure_name': 'figure_9',\n 'legend': None,\n 'legend_size': 8,\n 'y_on': True,\n 'x_on': True}\n\n plt.figure(figsize=(5, 3), dpi=96)\n\n t_start = 3000 # long to allow V to stabilize to new rest location\n length_after = np.zeros((len(g_h_bars),))\n\n for ix, g_h_bar in enumerate(g_h_bars):\n t, y = solver(5000, t_start=t_start, g_h_bar=g_h_bar)\n v = y[:, 0]\n\n pk = np.where(v == np.max(v))[0][0]\n v_clipped = v[pk:]\n v_rest = v[np.where(t <= t_start)[0][-1]] # v_rest is v immediately before the stimulus turns on\n v_clipped -= v_rest\n crossings = np.where((v_clipped[:-1] * v_clipped[1:]) < 0)[0]\n ix_after = np.where(t[crossings] < 200)[0][-1]\n length_after[ix] = t[crossings[ix_after] - crossings[ix_after - 1]]\n plt.plot(g_h_bars, length_after, c='k', marker='o', fillstyle='none')\n \"\"\"\n x is digitized from figure 9 in the original manuscript\n \"\"\"\n # x = [108.44444444444443, 97.03703703703704, 89.7037037037037, 85.55555555555556, 82.22222222222223,\n # 80.2962962962963, 78.22222222222223, 77.18518518518519, 76.81481481481481, 74.07407407407408]\n # plt.plot(g_h_bars, x)\n\n \"\"\"\n Annotate plot\n \"\"\"\n ellipse = patches.Ellipse(xy=(0.017, 105), width=0.01, height=4, angle=0)\n plt.gca().add_artist(ellipse)\n ellipse.set_facecolor((1, 1, 1))\n plt.gca().annotate(\"Neonatal\", (0.017, 105), fontsize=8, ha=\"center\", va=\"center\")\n\n ellipse = patches.Ellipse(xy=(0.04, 72), width=0.005, height=4, angle=0)\n plt.gca().add_artist(ellipse)\n ellipse.set_facecolor((1, 1, 1))\n plt.gca().annotate(\"Adult\", (0.04, 72), fontsize=8, ha=\"center\", va=\"center\")\n alter_figure(plot_settings, close=True)",
"def integrate_discrete_time_stochastic(self, y):\n self.compartments = self.convert_list_to_compartments(y)\n for label in self.compartments:\n self.compartments[label] = int(self.compartments[label])\n\n n_compartment = len(y)\n n_time = len(self.target_times)\n self.soln_array = numpy.zeros((n_time, n_compartment))\n\n time = self.target_times[0]\n self.soln_array[0, :] = y\n\n for i_time, new_time in enumerate(self.target_times):\n\n if i_time == 0:\n continue\n\n dt = new_time - self.target_times[i_time - 1]\n\n self.time = time\n self.calculate_vars()\n self.calculate_events()\n\n for event in self.events:\n from_label, to_label, rate = event\n\n mean = rate * dt\n delta_population = numpy.random.poisson(mean, 1)[0]\n\n if from_label and to_label:\n if delta_population > self.compartments[from_label]:\n delta_population = self.compartments[from_label]\n self.compartments[from_label] -= delta_population\n self.compartments[to_label] += delta_population\n elif to_label is None:\n # death\n if delta_population > self.compartments[from_label]:\n delta_population = self.compartments[from_label]\n self.compartments[from_label] -= delta_population\n elif from_label is None:\n # birth\n self.compartments[to_label] += delta_population\n\n self.checks()\n\n time += dt\n\n if i_time < n_time:\n y = self.convert_compartments_to_list(self.compartments)\n self.soln_array[i_time, :] = y",
"def visualize(model, x_val, PLOT_DIR, TIME_OF_RUN, args, ode_model=True, latent=False, epoch=0, is_mdn=False):\n x_val = x_val.reshape(2, -1, 2)\n dt = 0.01\n t = tf.linspace(0., 10., int(10./dt)+1)\n # Compute the predicted trajectories\n if ode_model:\n x0_extrap = tf.stack([x_val[0, 0]])\n x_t_extrap = odeint(model, x0_extrap, t, rtol=1e-5, atol=1e-5).numpy()[:, 0]\n x0_interp = tf.stack([x_val[1, 0]])\n x_t_interp = odeint(model, x0_interp, t, rtol=1e-5, atol=1e-5).numpy()[:, 0]\n else: # LSTM model\n x_t_extrap = np.zeros_like(x_val[0])\n x_t_extrap[0] = x_val[0, 0]\n x_t_interp = np.zeros_like(x_val[1])\n x_t_interp[0] = x_val[1, 0]\n # Always injects the entire time series because keras is slow when using\n # varying series lengths and the future timesteps don't affect the predictions\n # before it anyways.\n if is_mdn:\n import mdn\n for i in range(1, len(t)):\n pred_extrap = model(0., np.expand_dims(x_t_extrap, axis=0))[0, i-1:i]\n x_t_extrap[i:i+1] = mdn.sample_from_output(pred_extrap.numpy()[0], 2, 5, temp=1.)\n pred_interp = model(0., np.expand_dims(x_t_interp, axis=0))[0, i-1:i]\n x_t_interp[i:i+1] = mdn.sample_from_output(pred_interp.numpy()[0], 2, 5, temp=1.)\n else:\n for i in range(1, len(t)):\n x_t_extrap[i:i+1] = model(0., np.expand_dims(x_t_extrap, axis=0))[0, i-1:i]\n x_t_interp[i:i+1] = model(0., np.expand_dims(x_t_interp, axis=0))[0, i-1:i]\n\n x_t = np.stack([x_t_extrap, x_t_interp], axis=0)\n # Plot the generated trajectories\n fig = plt.figure(figsize=(12, 8), facecolor='white')\n ax_traj = fig.add_subplot(231, frameon=False)\n ax_phase = fig.add_subplot(232, frameon=False)\n ax_vecfield = fig.add_subplot(233, frameon=False)\n ax_vec_error_abs = fig.add_subplot(234, frameon=False)\n ax_vec_error_rel = fig.add_subplot(235, frameon=False)\n ax_energy = fig.add_subplot(236, frameon=False)\n ax_traj.cla()\n ax_traj.set_title('Trajectories')\n ax_traj.set_xlabel('t')\n ax_traj.set_ylabel('x,y')\n ax_traj.plot(t.numpy(), x_val[0, :, 0], t.numpy(), x_val[0, :, 1], 'g-')\n ax_traj.plot(t.numpy(), x_t[0, :, 0], '--', t.numpy(), x_t[0, :, 1], 'b--')\n ax_traj.set_xlim(min(t.numpy()), max(t.numpy()))\n ax_traj.set_ylim(-6, 6)\n ax_traj.legend()\n\n ax_phase.cla()\n ax_phase.set_title('Phase Portrait')\n ax_phase.set_xlabel('x')\n ax_phase.set_ylabel('x_dt')\n ax_phase.plot(x_val[0, :, 0], x_val[0, :, 1], 'g--')\n ax_phase.plot(x_t[0, :, 0], x_t[0, :, 1], 'b--')\n ax_phase.plot(x_val[1, :, 0], x_val[1, :, 1], 'g--')\n ax_phase.plot(x_t[1, :, 0], x_t[1, :, 1], 'b--')\n ax_phase.set_xlim(-6, 6)\n ax_phase.set_ylim(-6, 6)\n\n ax_vecfield.cla()\n ax_vecfield.set_title('Learned Vector Field')\n ax_vecfield.set_xlabel('x')\n ax_vecfield.set_ylabel('x_dt')\n\n steps = 61\n y, x = np.mgrid[-6:6:complex(0, steps), -6:6:complex(0, steps)]\n ref_func = Lambda()\n dydt_ref = ref_func(0., np.stack([x, y], -1).reshape(steps * steps, 2)).numpy()\n mag_ref = 1e-8+np.linalg.norm(dydt_ref, axis=-1).reshape(steps, steps)\n dydt_ref = dydt_ref.reshape(steps, steps, 2)\n\n if ode_model: # is Dense-Net or NODE-Net or NODE-e2e\n dydt = model(0., np.stack([x, y], -1).reshape(steps * steps, 2)).numpy()\n else: # is LSTM\n # Compute artificial x_dot by numerically diffentiating:\n # x_dot \\approx (x_{t+1}-x_t)/dt\n yt_1 = model(0., np.stack([x, y], -1).reshape(steps * steps, 1, 2))[:, 0]\n if is_mdn: # have to sample from output Gaussians\n yt_1 = np.apply_along_axis(mdn.sample_from_output, 1, yt_1.numpy(), 2, 5, temp=.1)[:,0]\n dydt = (np.array(yt_1)-np.stack([x, y], -1).reshape(steps * steps, 2)) / dt\n\n dydt_abs = dydt.reshape(steps, steps, 2)\n dydt_unit = dydt_abs / np.linalg.norm(dydt_abs, axis=-1, keepdims=True) # make unit vector\n\n ax_vecfield.streamplot(x, y, dydt_unit[:, :, 0], dydt_unit[:, :, 1], color=\"black\")\n ax_vecfield.set_xlim(-6, 6)\n ax_vecfield.set_ylim(-6, 6)\n\n ax_vec_error_abs.cla()\n ax_vec_error_abs.set_title('Abs. error of xdot')\n ax_vec_error_abs.set_xlabel('x')\n ax_vec_error_abs.set_ylabel('x_dt')\n\n abs_dif = np.clip(np.linalg.norm(dydt_abs-dydt_ref, axis=-1), 0., 3.)\n c1 = ax_vec_error_abs.contourf(x, y, abs_dif, 100)\n plt.colorbar(c1, ax=ax_vec_error_abs)\n\n ax_vec_error_abs.set_xlim(-6, 6)\n ax_vec_error_abs.set_ylim(-6, 6)\n\n ax_vec_error_rel.cla()\n ax_vec_error_rel.set_title('Rel. error of xdot')\n ax_vec_error_rel.set_xlabel('x')\n ax_vec_error_rel.set_ylabel('x_dt')\n\n rel_dif = np.clip(abs_dif / mag_ref, 0., 1.)\n c2 = ax_vec_error_rel.contourf(x, y, rel_dif, 100)\n plt.colorbar(c2, ax=ax_vec_error_rel)\n\n ax_vec_error_rel.set_xlim(-6, 6)\n ax_vec_error_rel.set_ylim(-6, 6)\n\n ax_energy.cla()\n ax_energy.set_title('Total Energy')\n ax_energy.set_xlabel('t')\n ax_energy.plot(np.arange(1001)/100.1, np.array([total_energy(x_) for x_ in x_t_interp]))\n\n fig.tight_layout()\n plt.savefig(PLOT_DIR + '/{:03d}'.format(epoch))\n plt.close()\n\n # Compute Metrics\n energy_drift_extrap = relative_energy_drift(x_t[0], x_val[0])\n phase_error_extrap = relative_phase_error(x_t[0], x_val[0])\n traj_error_extrap = trajectory_error(x_t[0], x_val[0])\n\n energy_drift_interp = relative_energy_drift(x_t[1], x_val[1])\n phase_error_interp = relative_phase_error(x_t[1], x_val[1])\n traj_error_interp = trajectory_error(x_t[1], x_val[1])\n\n\n wall_time = (datetime.datetime.now()\n - datetime.datetime.strptime(TIME_OF_RUN, \"%Y%m%d-%H%M%S\")).total_seconds()\n string = \"{},{},{},{},{},{},{},{}\\n\".format(wall_time, epoch,\n energy_drift_interp, energy_drift_extrap,\n phase_error_interp, phase_error_extrap,\n traj_error_interp, traj_error_extrap)\n file_path = (PLOT_DIR + TIME_OF_RUN + \"results\"\n + str(args.lr) + str(args.dataset_size) + str(args.batch_size)\n + \".csv\")\n if not os.path.isfile(file_path):\n title_string = (\"wall_time,epoch,energy_drift_interp,energy_drift_extrap, phase_error_interp,\"\n + \"phase_error_extrap, traj_err_interp, traj_err_extrap\\n\")\n fd = open(file_path, 'a')\n fd.write(title_string)\n fd.close()\n fd = open(file_path, 'a')\n fd.write(string)\n fd.close()\n\n # Print Jacobian\n if ode_model:\n np.set_printoptions(suppress=True, precision=4, linewidth=150)\n # The first Jacobian is averaged over 100 randomly sampled points from U(-1, 1)\n jac = tf.zeros((2, 2))\n for i in range(100):\n with tf.GradientTape(persistent=True) as g:\n x = (2 * tf.random.uniform((1, 2)) - 1)\n g.watch(x)\n y = model(0, x)\n jac = jac + g.jacobian(y, x)[0, :, 0]\n print(jac.numpy()/100)\n\n with tf.GradientTape(persistent=True) as g:\n x = tf.zeros([1, 2])\n g.watch(x)\n y = model(0, x)\n print(g.jacobian(y, x)[0, :, 0])"
] | [
"0.5977297",
"0.59203607",
"0.591389",
"0.5904414",
"0.58950406",
"0.5851301",
"0.583456",
"0.5795047",
"0.5763769",
"0.57332605",
"0.569345",
"0.567947",
"0.56562036",
"0.56409806",
"0.5510336",
"0.54855245",
"0.54765123",
"0.54744595",
"0.54681593",
"0.5465856",
"0.546511",
"0.5446847",
"0.54413056",
"0.54407257",
"0.541972",
"0.54073316",
"0.5383507",
"0.53669083",
"0.5356758",
"0.5356479"
] | 0.6763449 | 0 |
Convert scale term to unit label | def get_unit(scale):
scale2unit = { 1e-9: 'nm',
1e-6: u'\N{MICRO SIGN}m', #or hex id (lookup): u'\u00B5'
1e-3: 'mm',
0.01: 'cm',
0.1:'dm',
1:'m',
1000:'km',
# time
8.6400e4:'day',
3.1536e7:'yr',
3.1536e10:'ka',
3.1536e13:'Ma',
#Pressure
1e9: 'GPa',
1e6: 'MPa',
}
return scale2unit[scale] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def point_scale_name(self):",
"def to_axis_units(self, label, vals):\n if label in ['Hmolar', 'Smolar', 'Umolar', 'Dmolar', 'P']:\n return vals / 1000\n elif label in ['T']:\n return vals\n else:\n raise ValueError(label)",
"def labels_x(x_unit, latex = True, verbose = 0): \n \n if verbose > 1:\n print(\"SpectraTools.Resources.UnitConversion.labels_x()\") \n \n if x_unit in nm_labels:\n return \"Wavelength (nm)\"\n elif x_unit in um_labels:\n if latex:\n return r\"Wavelength ($\\mu$m)\"\n else:\n return \"Wavelength (micron)\"\n elif x_unit in cm_labels:\n if latex:\n return r\"Energy (cm$^{-1}$)\"\n else:\n return \"Energy (cm-1)\"\n elif x_unit in ev_labels:\n return \"Energy (eV)\" \n else:\n return x_unit",
"def make_label(self, label, units):\n nice_label = self.tex_axis_label(label)\n if not (units == 'dimensionless') and \\\n (units is not None) and (not units == []):\n nice_label += ' (%s)'%self.tex_axis_label(units)\n return nice_label",
"def get_scale():\r\n\r\n \r\n return 0.5",
"def test_replace_namespaced_scale_scale(self):\n pass",
"def scaled_to_name(a):\r\n wt_list = ['Rx','Sc']\r\n return wt_list[a]",
"def unit(self,unit_str,unit_scale):\n self.units[unit_str] = unit_scale\n return self",
"def scale_to_factor(scale):\n return (B.pi / 2) / (2 * scale**2)",
"def GetScale(self):\n ...",
"def tex_axis_label(self, label, smalllabel=False):\n if isinstance(label, list):\n label = label[0]\n if not isinstance(label, str):\n raise ValueError(\"Label must be a string. Got %s of \"\n \"type %s\"%(label, type(label)))\n label = label.lower()\n pretty_labels = {}\n pretty_labels[\"atm_muon_scale\"] = r\"Muon Background Scale\"\n pretty_labels[\"nue_numu_ratio\"] = r\"$\\nu_e/\\nu_{\\mu}$ Ratio\"\n pretty_labels[\"nu_nc_norm\"] = r\"$\\nu$ NC Scale\"\n pretty_labels[\"nu_nubar_ratio\"] = r\"$\\nu/\\bar{\\nu}$ Ratio\"\n pretty_labels[\"barr_uphor_ratio\"] = r\"Barr Up/Horizontal Ratio\"\n pretty_labels[\"barr_nu_nubar_ratio\"] = r\"Barr $\\nu/\\bar{\\nu}$ Ratio\"\n pretty_labels[\"barr_uphor\"] = r\"Barr Up/Horizontal Ratio\"\n pretty_labels[\"barr_nu_nubar\"] = r\"Barr $\\nu/\\bar{\\nu}$ Ratio\"\n pretty_labels[\"delta_index\"] = r\"Atmospheric Index Change\"\n pretty_labels[\"theta13\"] = r\"$\\theta_{13}$\"\n pretty_labels[\"theta23\"] = r\"$\\theta_{23}$\"\n pretty_labels[\"deltacp\"] = r\"$\\delta_{\\mathrm{CP}}$\"\n pretty_labels[\"gamma\"] = r\"$\\Gamma$\"\n pretty_labels[\"sin2theta23\"] = r\"$\\sin^2\\theta_{23}$\"\n pretty_labels[\"deltam31\"] = r\"$\\Delta m^2_{31}$\"\n pretty_labels[\"deltam32\"] = r\"$\\Delta m^2_{32}$\"\n pretty_labels[\"deltam3l\"] = r\"$\\Delta m^2_{3l}$\"\n pretty_labels[\"aeff_scale\"] = r\"$A_{\\mathrm{eff}}$ Scale\"\n pretty_labels[\"energy_scale\"] = r\"Energy Scale\"\n pretty_labels[\"genie_ma_qe\"] = r\"GENIE $M_{A}^{QE}$\"\n pretty_labels[\"genie_ma_res\"] = r\"GENIE $M_{A}^{Res}$\"\n pretty_labels[\"dom_eff\"] = r\"DOM Efficiency\"\n pretty_labels[\"hole_ice\"] = r\"Hole Ice\"\n pretty_labels[\"hole_ice_fwd\"] = r\"Hole Ice Forward\"\n pretty_labels[\"degree\"] = r\"$^\\circ$\"\n pretty_labels[\"radians\"] = r\"rads\"\n pretty_labels[\"radian\"] = r\"rads\"\n pretty_labels[\"electron_volt ** 2\"] = r\"$\\mathrm{eV}^2$\"\n pretty_labels[\"electron_volt\"] = r\"$\\mathrm{eV}^2$\"\n pretty_labels[\"gigaelectron_volt\"] = r\"$\\mathrm{GeV}$\"\n pretty_labels[\"llh\"] = r\"Likelihood\"\n pretty_labels[\"conv_llh\"] = r\"Convoluted Likelihood\"\n pretty_labels[\"chi2\"] = r\"$\\chi^2$\"\n pretty_labels[\"mod_chi2\"] = r\"Modified $\\chi^2$\"\n pretty_labels[\"delta_llh\"] = r\"$\\Delta$ Likelihood\"\n pretty_labels[\"delta_conv_llh\"] = r\"$\\Delta$ Convoluted Likelihood\"\n pretty_labels[\"delta_chi2\"] = r\"$\\Delta\\chi^2$\"\n pretty_labels[\"delta_mod_chi2\"] = r\"$\\Delta$ $\\chi^2_{\\mathrm{mod}}$\"\n if smalllabel:\n pretty_labels[\"no\"] = r\"NO\"\n pretty_labels[\"io\"] = r\"IO\"\n else:\n pretty_labels[\"no\"] = r\"Normal Ordering\"\n pretty_labels[\"io\"] = r\"Inverted Ordering\"\n pretty_labels[\"nomsw\"] = r\"Normal Ordering, Matter Oscillations\"\n pretty_labels[\"iomsw\"] = r\"Inverted Ordering, Matter Oscillations\"\n pretty_labels[\"novacuum\"] = r\"Normal Ordering, Vacuum Oscillations\"\n pretty_labels[\"iovacuum\"] = r\"Inverted Ordering, Vacuum Oscillations\"\n pretty_labels[\"msw\"] = r\"Matter Oscillations\"\n pretty_labels[\"vacuum\"] = r\"Vacuum Oscillations\"\n pretty_labels[\"no,llr\"] = r\"LLR Method\"\n pretty_labels[\"no,llr,nufitpriors\"] = r\"LLR Method, Nu-Fit Priors\"\n pretty_labels[\"io,llr\"] = r\"llr Method\"\n pretty_labels[\"io,llr,nufitpriors\"] = r\"LLR Method, Nu-Fit Priors\"\n pretty_labels[\"nue\"] = r\"$\\nu_e$\"\n pretty_labels[\"nuebar\"] = r\"$\\bar{\\nu}_e$\"\n pretty_labels[\"numu\"] = r\"$\\nu_{\\mu}$\"\n pretty_labels[\"numubar\"] = r\"$\\bar{\\nu}_{\\mu}$\"\n pretty_labels[\"second\"] = r\"s\"\n pretty_labels[\"seconds\"] = r\"s\"\n pretty_labels[\"atm_delta_index\"] = r\"Atmospheric Index Change\"\n pretty_labels[\"pve\"] = r\"Positive\"\n pretty_labels[\"nve\"] = r\"Negative\"\n pretty_labels[\"fitwrong\"] = r\"Sensitivity Stability\"\n pretty_labels[\"fixwrong\"] = r\"Fitting Relevance\"\n pretty_labels[\"nminusone\"] = r\"Hidden Potential\"\n pretty_labels[\"minimiser_times\"] = r\"Minimiser Time (seconds)\"\n pretty_labels[\"minimiser_iterations\"] = r\"Minimiser Iterations\"\n pretty_labels[\"minimiser_funcevals\"] = r\"Minimiser Function Evaluations\"\n pretty_labels[\"minimiser_status\"] = r\"Minimiser Status\"\n pretty_labels[\"correlation_coefficients\"] = r\"Correlation Coefficients\"\n pretty_labels[\"true no, llr\"] = r\"True Normal Ordering, LLR\"\n pretty_labels[\"true io, llr\"] = r\"True Inverted Ordering, LLR\"\n pretty_labels[\"e_res_scale\"] = r\"Energy Resolution Scale\"\n pretty_labels[\"cz_res_scale\"] = r\"$\\cos\\theta_Z$ Resolution Scale\"\n pretty_labels[\"livetime\"] = r\"Livetime\"\n pretty_labels[\"julian_year\"] = r\"Years\"\n if label not in pretty_labels.keys():\n logging.warning(\"I have no nice label for %s. Returning as is.\"%label)\n return label\n return pretty_labels[label]",
"def units_to_fits(unit):\n if unit is None:\n unit = Unit('')\n return unit.to_string(\"fits\").upper()",
"def convert_scaling_to_form_factors(qz, scale):\n apply_absorption_correction(qz, scale)\n apply_Lorentz_correction(qz, scale)\n for i in xrange(len(scale)):\n scale[i] = np.sign(scale[i]) * math.sqrt(abs(scale[i]))",
"def get_scale_op(self):\n\t\treturn self.variables.get('scale')",
"def Unit_removeScale(*args):\n return _libsbml.Unit_removeScale(*args)",
"def _get_scaled_odr_label(odr_fit, order, action_unit, acd_correction, magnitude_exponent=3):\n str_acd_scale = \"$^*$\" if acd_correction != 1 else \"\"\n\n scale = acd_correction * (10 ** -magnitude_exponent) / (UNIT_IN_METERS[action_unit] ** order)\n str_val, str_std = _get_scaled_labels(odr_fit.beta[order], odr_fit.sd_beta[order], scale)\n str_mag = ''\n if magnitude_exponent != 0:\n str_mag = fr'$\\cdot$ 10$^{{{magnitude_exponent}}}$'\n return fr'({str_val} $\\pm$ {str_std}){str_acd_scale} {str_mag} m$^{{-{order}}}$'",
"def scale(self):",
"def convertUnits(self, varname, arr):\n if varname == \"SPDQ\" or varname == \"PHQ\":\n return arr*2.5e6/1000.\n return arr",
"def convert_units(self):\n for prod in (\"ier\", \"ier_inc_rain\"):\n self.data[prod].data[:] /= 1e6",
"def petab_scale_to_amici_scale(scale_str):\n\n if scale_str == 'lin':\n return amici.ParameterScaling_none\n if scale_str == 'log':\n return amici.ParameterScaling_ln\n if scale_str == 'log10':\n return amici.ParameterScaling_log10\n raise ValueError(\"Invalid pscale \" + scale_str)",
"def unicode_of_unit(quant):\n return quant.dimensionality.unicode",
"def removeScale(*args):\n return _libsbml.Unit_removeScale(*args)",
"def __str__(self) -> str:\n lengthscale = self.covar_module.base_kernel.lengthscale.detach()\n outputscale = self.covar_module.outputscale.detach()\n return \"\\toutputscale: {}\\n \\tlengthscale: {}\".format(outputscale, lengthscale)",
"def convert(self, value, unit, axis):\n scaled = getattr(value, self.scale)\n if self.format in YMDHMS_FORMATS:\n return scaled.mjd\n elif self.format == \"byear_str\":\n return scaled.byear\n elif self.format == \"jyear_str\":\n return scaled.jyear\n else:\n return getattr(scaled, self.format)",
"def label(self):\r\n if isinstance(self.Lbeta, str):\r\n result = self.Lbeta\r\n else:\r\n result = 'T%.2d' % int(round(self.Lbeta))\r\n result += 'E%.2d' % int(round(self.E))\r\n result += 'G%.2d' % int(round(self.minTauG))\r\n result += self.insulation\r\n return result",
"def unit_scale(x, eps=1e-8):\n\tx = x.copy()\n\tx -= x.min()\n\tx *= 1.0 / (x.max() + eps)\n\treturn x",
"def factor_to_scale(factor):\n return 1 / B.sqrt(4 * factor / B.pi)",
"def test_read_namespaced_scale_scale(self):\n pass",
"def numerify_iso_label(lab):\n from sage.databases.cremona import class_to_int\n if 'CM' in lab:\n return -1 - class_to_int(lab[2:])\n else:\n return class_to_int(lab.lower())",
"def any_scale(scale):\n return scale"
] | [
"0.66115385",
"0.64944685",
"0.6276687",
"0.62619853",
"0.6258747",
"0.6168202",
"0.6106348",
"0.6104452",
"0.6032182",
"0.59474814",
"0.59472513",
"0.593621",
"0.5931593",
"0.591483",
"0.59085834",
"0.5896219",
"0.58899754",
"0.588468",
"0.58570886",
"0.5837348",
"0.5832844",
"0.58197284",
"0.58173925",
"0.57987094",
"0.5795785",
"0.5791935",
"0.57719576",
"0.5750965",
"0.5746385",
"0.5744292"
] | 0.7126686 | 0 |
Plot profiles for each output/step0X folder on same figure | def plot_directory_profiles(path, outname=None, show=True, xscale=1, yscale=1,
xval='x', adjustRadial=True):
outdirs = np.sort(os.listdir(path))
plt.figure()
#labels=['homogeneous','1D layering', '3D tomography'] #xscale=1e-3, yscale=1e2
for i,outdir in enumerate(outdirs):
pointsFile = os.path.join(path, outdir, 'points.h5')
#print(pointsFile)
#x_fem, ur_fem, uz_fem = pu.extract_points(pointsFile, output='cyl',adjustRadial=adjustRadial)
#x_fem, ur_fem, uz_fem = pu.extract_points(pointsFile)
#Load data
x,y,z,ux,uy,uz = pu.extract_points(pointsFile)
#Y = uz / yscale
if xval == 'x':
X = x / xscale
Y1 = ux / yscale
elif xval == 'r':
X = np.hypot(x,y) / xscale
ur_fem = np.hypot(ux,uy)
Y1 = ur_fem / yscale
if adjustRadial: #fix sign from hypot square root
ur_fem = pu.radial2negative(Y1)
x_fem = X #/ xscale #double scaling!
ur_fem = Y1 #/ yscale
uz_fem = uz / yscale
#print(pointsFile)
print(ur_fem.min(), ur_fem.max(), uz_fem.min(), uz_fem.max())
l, = plt.plot(x_fem,uz_fem,'.-',lw=3,label=outdir)
#l, = plt.plot(x_fem,uz_fem,'.-',lw=2,label=labels[i]) #for 3d heterogeneity example
plt.plot(x_fem,ur_fem,'.--',lw=3, mfc='w',color=l.get_color()) #mfc='none' transparent
# Annotate
plt.axhline(color='k',lw=0.5)
plt.xlabel('Distance [{}]'.format(get_unit(xscale)))
plt.ylabel('Displacement [{}]'.format(get_unit(yscale)))
plt.legend()
#NOTE: custom annotations for 3d heterogeneity
#plt.title('Elastic Heterogeneity Effects')
#plt.legend([l1,l2,l3],['homogeneous','1D layering', '3D tomography'])
if outname: plt.savefig(outname)
if show: plt.show() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_ratios(path='/Volumes/OptiHDD/data/pylith/3d/agu2014/output',\n\t\t\t\tsteps=['step01','step02'],\n\t\t\t\t#labels='',\n\t\t\t\tshow=True,\n\t\t\t\txscale=1e3,\n\t\t\t\tyscale=1e-2):\n\tplt.figure()\n\t#path = '/Users/scott/Desktop/elastic'\n\n\t# Deep source\n\t#labels = ['no APMB', 'APMB']\n\t#if labels == '':\n\tlabels = steps\n\tdeep = {}\n\t#uzmax = 0.824873455364\n\t# NOT sure why hardcoded...\n\tuzmax = 1\n\tfor i,outdir in enumerate(steps):\n\t\tpointsFile = os.path.join(path, outdir, 'points.h5')\n\t\tprint(pointsFile)\n\t\tx,y,z,ux,uy,uz = pu.extract_points(pointsFile)\n\n\t\tX = x / xscale\n\t\tY1 = ux / yscale\n\n\t\tx_fem = X #/ xscale #double scaling!\n\t\tur_fem = Y1 #/ yscale\n\t\tuz_fem = uz / yscale\n\n\t\t#print(pointsFile)\n\t\tprint(ur_fem.min(), ur_fem.max(), uz_fem.min(), uz_fem.max(), uz_fem.max() / ur_fem.max())\n\n\t\t#normalize\n\t\tuz_fem = uz_fem / uzmax\n\t\tur_fem = ur_fem / uzmax\n\t\tx_fem = x_fem / 30.0\n\n\t\tl, = plt.plot(x_fem,uz_fem,'o-',ms=4,lw=4,label=labels[i])\n\t\tplt.plot(x_fem,ur_fem,'o--',ms=4,lw=4,color=l.get_color()) #mfc='none' transparent\n\t\tdeep[outdir] = uz_fem/uz_fem\n\n\t'''\n\t# Shallow Source\n\tshallow = {}\n\tuzmax = 0.949652827795\n\tfor i,outdir in enumerate(['step11','step12']):\n\t\tpointsFile = os.path.join(path, outdir, 'points.h5')\n\n\t\tx,y,z,ux,uy,uz = pu.extract_points(pointsFile)\n\n\t\tX = x / xscale\n\t\tY1 = ux / yscale\n\n\t\tx_fem = X #/ xscale #double scaling!\n\t\tur_fem = Y1 #/ yscale\n\t\tuz_fem = uz / yscale\n\n\t\t#print(pointsFile)\n\t\tprint(ur_fem.min(), ur_fem.max(), uz_fem.min(), uz_fem.max(), uz_fem.max() / ur_fem.max())\n\n\t#normalize\n\tuz_fem = uz_fem / uzmax\n\tur_fem = ur_fem / uzmax\n\tx_fem = x_fem / 20.0\n\n\t\tl, = plt.plot(x_fem,uz_fem,'.-', mfc='w', lw=4,label=labels[i])\n\t\tplt.plot(x_fem,ur_fem,'.--',lw=4, mfc='w',color=l.get_color()) #mfc='none' transparent\n\n\t\tshallow[outdir] = uz_fem/ur_fem\n\t'''\n\n\t# Annotate\n\tplt.axhline(color='k',lw=0.5)\n\t#plt.xlabel('Distance [{}]'.format(get_unit(xscale)))\n\t#plt.ylabel('Displacement [{}]'.format(get_unit(yscale)))\n\tplt.legend()\n\tplt.grid()\n\t#plt.ylim(-0.5, 3.5)\n\t#plt.savefig('deep.png',bbox_inches='tight')\n\t#plt.savefig('shallow.png',bbox_inches='tight')\n\n\t# normalized\n\tplt.ylim(-0.5, 4)\n\tplt.xlim(0,10)\n\tplt.xlabel('Normalized Radial Distance [R / D]')\n\tplt.ylabel('Normalized Displacement [U / Uz_max]')\n\t#plt.savefig('normalized_deep.png',bbox_inches='tight')\n\tplt.savefig('normalized_shallow.png',bbox_inches='tight')\n\n\n\t# Plot ratios of uz versus NOTE: this plot is confusing,,, just keep ratio of uz_max to ur_max\n\t'''\n\tplt.figure()\n\tplt.plot(x_fem, deep['step01'], label='Deep no APMB')\n\tplt.plot(x_fem, deep['step02'], label='Deep w/ APMB')\n\tplt.plot(x_fem, shallow['step11'], label='Shallow no APMB')\n\tplt.plot(x_fem, shallow['step12'], label='Shallow w/ APMB')\n\tplt.xlabel('Distance [km]') #NOTE: maybe plot normailzed X-axis (R-d)\n\t#plt.xlabel('Normalized Distance [R/d]')\n\tplt.ylabel('Ratio [Uz/Ur]')\n\tplt.title('Ratio of vertical to radial displacement')\n\tplt.legend()\n\tplt.show()\n\t'''",
"def plot(profiler, outdir):\n # go through the outputs of all morphisms\n for cur_name, cur_output in profiler.profile.items():\n cur_outdir = os.path.join(outdir, cur_name)\n if not os.path.exists(cur_outdir):\n os.makedirs(cur_outdir)\n\n ProfilePlotter.plot_morphism_output(cur_output, cur_outdir)",
"def run_plots(self):\n # load the files\n self.pre_dark_file = os.path.join(self.input_dir, 'step_lastframe.fits')\n self.post_dark_file = os.path.join(self.input_dir, 'step_dark_current.fits')\n self.jump_file = os.path.join(self.input_dir, 'step_jump.fits')\n self.rate_file = os.path.join(self.input_dir, 'step_rate.fits')\n self.ramp_file = glob.glob(os.path.join(self.input_dir, '*.fits'))[0]\n\n # plots\n self.plot_jump_flags_image()\n self.plot_groupdq_flags(pixel=[884, 550])\n self.plot_ramps_pre_post_correction(pixel=[884, 550])",
"def plot_morphism_output(data, outdir):\n\n # show the distributions for each variable separately\n for col in data.columns:\n ProfilePlotter._plot_1d(data[col], outfile = os.path.join(outdir, col + \".pdf\"))\n\n # later, maybe also show 2d plots etc.",
"def plot_profiles(self, fig=0, title=''):\n plot_input.plot_profiles(self, fig, title)",
"def plot_directory_numex(path, vals, param='density', outname=None, show=True,\n xscale=1e-3,yscale=1e2):\n #vals = arange(2300.0, 2800.0, 50.0)\n outdirs = np.sort(os.listdir(path))\n plt.figure()\n\n # Plot surface profiles for each parameter\n for val,outdir in zip(vals,outdirs):\n pointsFile = os.path.join(path, outdir, 'points.h5')\n print(pointsFile)\n x_fem, ur_fem, uz_fem = pu.extract_points(pointsFile, output=True, adjustRadial=True)\n x_fem = x_fem / xscale\n ur_fem = ur_fem / yscale\n uz_fem = uz_fem / yscale\n l, = plt.plot(x_fem,uz_fem,'.-',label=str(val))\n plt.plot(x_fem,ur_fem,'.-',color=l.get_color())\n\n # Annotate\n plt.axhline(color='k') #zero displacement line\n plt.title(param)\n plt.xlabel('Distance [{}]'.format(get_unit(xscale)))\n plt.ylabel('Displacement [{}]'.format(get_unit(yscale)))\n plt.legend()\n\n if outname: plt.savefig(outname)\n if show: plt.show()",
"def create_plots(self):\n if not os.path.exists(self.output_folder):\n os.makedirs(self.output_folder)\n self.sse_plot()\n self.avg_sse_plot()",
"def make_plots(self):\n import os\n os.system('rm schelling*.png')\n file_name = 'schelling_000.png'\n title = 'Initial Population'\n self.plot(title=title, file_name=file_name)\n n_unhappy = 999\n counter = 0\n while n_unhappy > 0 and counter < 100:\n counter += 1\n print counter\n n_unhappy = self.move_unhappy()\n file_name = 'schelling_%03g.png'%(counter)\n title = 'Step %03g'%(counter)\n self.plot(title=title, file_name=file_name)",
"def plot_one_directory(args, dirnames, figname):\n logdir = args.logdir\n num = len(ATTRIBUTES)\n fig, axes = subplots(num, figsize=(13,4*num))\n\n for (dd, cc) in zip(dirnames, COLORS):\n A = np.genfromtxt(join(logdir, dd, 'log.txt'), \n delimiter='\\t', \n dtype=None, \n names=True)\n x = A['Iterations']\n\n for (i,attr) in enumerate(ATTRIBUTES):\n axes[i].plot(x, A[attr], '-', lw=lw, color=cc, label=dd)\n axes[i].set_ylabel(attr, fontsize=ysize)\n axes[i].tick_params(axis='x', labelsize=tick_size)\n axes[i].tick_params(axis='y', labelsize=tick_size)\n axes[i].legend(loc='best', ncol=2, prop={'size':legend_size})\n\n axes[0].set_ylim([-10,10])\n axes[1].set_ylim([-10,10])\n axes[2].set_ylim([0,10])\n axes[3].set_ylim([0,10])\n\n plt.tight_layout()\n plt.savefig(figname)",
"def ex1_plots(instance, destination, prefix, save, animate):\n \n plts = ukf_plots(instance, destination, prefix, save, animate)\n\n truths = truth_parser(instance)\n nan_array= nan_array_parser(instance, truths, instance.base_model)\n #obs, obs_key = obs_parser(instance, True)\n obs_key = obs_key_parser(instance, True)\n preds = preds_parser(instance, True)\n #forecasts = forecasts_parser(instance, True)\n \n ukf_params = instance.ukf_params\n index2 = ukf_params[\"index2\"]\n \n \"remove agents not in model to avoid wierd plots\"\n #obs *= nan_array\n truths *= nan_array\n preds *= nan_array\n #forecasts*= nan_array\n \n \"indices for unobserved agents\"\n not_index2 = np.array([i for i in np.arange(truths.shape[1]) if i not in index2])\n plts.pair_frame(truths, preds, obs_key, 10, destination)\n plts.error_hist(truths[::instance.sample_rate,index2], \n preds[::instance.sample_rate,index2],\"Observed Errors\")\n if len(not_index2)>0:\n plts.error_hist(truths[::instance.sample_rate, not_index2], \n preds[::instance.sample_rate, not_index2],\"Unobserved Errors\")\n \n #plts.path_plots(obs[::instance.sample_rate] , \"Observed\")\n plts.path_plots(preds[::instance.sample_rate], \"Predicted\")\n plts.path_plots(truths, \"True\")\n #plts.path_plots(forecasts[::instance.sample_rate], \"Forecasts\")\n\n if animate:\n #plts.trajectories(truths, \"plots/\")\n plts.pair_frames(truths, preds, obs_key,\n truths.shape[0], \"../../plots/\")",
"def initial_plots(runs):\n for run in runs.keys():\n meta = runs[run]\n plot_pdfs(meta)\n plot_priorsamps(meta)\n plot_ivals(meta)\n# if meta.truNz is not None:\n# plot_true(meta)\n timesaver(meta,'iplot',meta.key)",
"def plot(model, results, filename):\n\n # c = model.compartments.get_one(id='c')\n #\n # rna_1 = model.species_types.get_one(id='rna_1').species.get_one(compartment=c)\n # rna_2 = model.species_types.get_one(id='rna_2').species.get_one(compartment=c)\n # rna_3 = model.species_types.get_one(id='rna_3').species.get_one(compartment=c)\n #\n pops = results.get('populations')\n time = pops.index\n pop_rna_1 = pops['rna_1[c]']\n pop_rna_2 = pops['rna_2[c]']\n pop_rna_3 = pops['rna_3[c]']\n\n pop_atp = pops['atp[c]']\n pop_gtp = pops['gtp[c]']\n pop_utp = pops['ctp[c]']\n pop_ctp = pops['utp[c]']\n\n pop_amp = pops['amp[c]']\n pop_gmp = pops['gmp[c]']\n pop_ump = pops['cmp[c]']\n pop_cmp = pops['ump[c]']\n\n print(pop_rna_1, pop_atp, pop_gtp, pop_utp, pop_ctp)\n\n fig1, axes1 = pyplot.subplots(nrows=3, ncols=1)\n\n axes1[0].plot(time / 3600, pop_rna_1)\n axes1[0].plot(time / 3600, pop_rna_2)\n axes1[0].plot(time / 3600, pop_rna_3)\n axes1[0].set_xlim((time[0] / 3600, time[-1] / 3600))\n axes1[0].set_ylim((0., 10.0))\n axes1[0].legend(loc='upper right')\n\n axes1[1].plot(time / 3600, pop_atp)\n axes1[1].plot(time / 3600, pop_gtp)\n axes1[1].plot(time / 3600, pop_utp)\n axes1[1].plot(time / 3600, pop_ctp)\n axes1[1].set_xlim((time[0] / 3600, time[-1] / 3600))\n # axes1[1].set_ylim((0., 10.0))\n axes1[1].legend(loc='upper right')\n\n axes1[2].plot(time / 3600, pop_amp)\n axes1[2].plot(time / 3600, pop_gmp)\n axes1[2].plot(time / 3600, pop_ump)\n axes1[2].plot(time / 3600, pop_cmp)\n axes1[2].set_xlim((time[0] / 3600, time[-1] / 3600))\n # axes1[2].set_ylim((0., 10.0))\n axes1[2].legend(loc='upper right')\n\n fig1.savefig(filename.format('species'))\n pyplot.close(fig1)",
"def plot_parcles_run(k, v):\n\n L.info(f'Received Input: {v}')\n\n L.info(f\"Plotting results from: {v['filepath']}\")\n\n plotpath = Path(__file__).parent.parent / 'plots'\n filename = v['id'] + '.png'\n plotfile = str(plotpath / filename) \n\n plotTrajectoriesFile(v['filepath'], mode='2d', show_plt=False)\n f = plt.gcf()\n f.savefig(plotfile)\n\n L.info(f'Saved plot to: {plotfile}')",
"def generate_plots(path):\n videos = glob(path + '/*.mkv')\n print(path, len(videos), videos)\n\n if len(videos) == 0:\n return\n else:\n videos = videos[0]\n\n metadata_list = glob(path + '/metadata.txt')\n #print(path, len(metadata_list), metadata_list)\n\n if len(metadata_list) == 0:\n return \n\n P = Preprocessor()\n P.import_video(str(videos))\n P.read_metadata(path)\n P.preprocess()\n Im = P.frames_processed\n if len(Im) == 0:\n print(len(Im))\n return\n\n z_start = P.z_start\n z_end = P.z_end\n\n mean, cov = analyze_image(Im)\n\n window_size = 10\n mean_smoothed = smoothing.mean_moving_average(mean, window_size)\n cov_smoothed = smoothing.cov_moving_average(cov, window_size)\n\n c = CubicFitRotated()\n c.fit(mean=mean_smoothed, cov=cov_smoothed, z_start=z_start, z_end=z_end)\n\n try:\n os.mkdir(path + '/analysis')\n path += '/analysis'\n except OSError:\n pass\n\n\n plots.plot_mean(mean, z_start, z_end).savefig(path + '/beam_center.png')\n plots.plot_beta(cov, z_start, z_end).savefig(path + '/sigma_squared.png')\n\n export.export_mean(mean = mean, filename = path + '/center.csv', z_start = z_start, z_end = z_end)\n export.export_cov(cov = cov, filename = path + '/cov.csv', z_start = z_start, z_end = z_end)\n\n plt.close('all')",
"def plot_sample(self):\n print(u'plot_sample()')\n data_set = self.data_sets[1]\n scenario = u'Greedy Search'\n titles = [u'Collaborative Filtering', u'Content-based']\n fig, axes = plt.subplots(1, 2, figsize=(10, 5))\n for i, rec_type in enumerate(data_set.missions):\n graph = data_set.folder_graphs + rec_type + '_' + str(15) + u'.txt'\n for strategy in Strategy.strategies:\n m = data_set.missions[rec_type][graph][strategy][scenario]\n m.compute_stats()\n ppl.plot(axes[i], np.arange(STEPS_MAX + 1),\n m.stats, label=strategy, linewidth=2)\n axes[i].set_xlabel(u'#Hops')\n axes[i].set_ylabel(u'Success Ratio')\n axes[i].set_ylim(0, 85)\n axes[i].set_xlim(0, STEPS_MAX * 1.01)\n axes[i].set_title(titles[i])\n ppl.legend(axes[i], loc=0)\n\n\n # plt.suptitle(u'Greedy Search on the BookCrossing for N=15',\n # size='xx-large', x=0.5)\n fig.subplots_adjust(left=0.08, right=0.97, top=0.9)\n\n plt.savefig('plots/sample.png')\n plt.savefig('plots/sample.pdf')",
"def plot_visualization(path_results, x_data, y_data, variant_mode, nb_classes, signal_test, args):\n\n\t#path_tsne = path_results + \"/Visualization/train/\" + str(args.step) + \"_2d.csv\"\n\t#data_frame = pd.read_csv(path_tsne)\n\t\n\tpath_maping = path_results + \"/Maping/\" + str(args.subject).split(\".txt\")[0] + \"/\"\n\tfilename = path_maping + \"maping_\" + str(args.step) + \"_\" + str(args.subject).split(\".txt\")[0] + \"_stick\" + str(args.stick) + \".png\"\n\n\tprint(\"path_save maping\", path_maping)\n\n\tif not os.path.exists(path_maping):\n\t\tos.makedirs(path_maping)\n\n\t#print(\"path_tsne\", path_tsne)\n\n\tlabel_maping = np.array([10])\n\n\tx_data = np.concatenate((x_data,signal_test),axis=0)\n\ty_data = np.concatenate((y_data,label_maping),axis=0)\n\n\tprint(\"x_data concatenate\",x_data.shape)\n\tprint(\"y_data concatenate\",y_data.shape)\n\n\tdata_frame = tsne_2d(x_data, y_data)\n\n\t\n\t\n\tgroups = data_frame.groupby('label')\n\n\tcluster_names, cluster_colors = get_target_names_dr(nb_classes, args.mode, args, variant_mode)\n\n\tfig = plt.figure(figsize=(20, 10))\n\tax = fig.add_subplot(111)\n\tax.margins(0.05) # Optional, just adds 5% padding to the autoscaling\n\tfor name, group in groups:\n\t\t\n\t\tif cluster_names[name] == str(args.subject):\n\t\t\tax.scatter(group.x, group.y, marker='D', s=150, edgecolors = 'face',label=cluster_names[name], color=cluster_colors[name])\n\t\telse:\n\t\t\tax.scatter(group.x, group.y, marker='o', label=cluster_names[name], color=cluster_colors[name])\n\n\tax.legend(numpoints=1) #show legend with only 1 point\n\tplt.savefig(filename) #save the plot",
"def plot(data, filenames, game, destination, num_interplt):\n if isinstance(filenames, str):\n plt.plot(data[:,0], data[:,1], label=filenames)\n ax = plt.gca()\n ax.xaxis.get_major_formatter().set_powerlimits((0,1))\n plt.xlabel('# of steps')\n plt.ylabel('Cumulative reward')\n plt.title('{}'.format(game))\n plt.legend()\n figname = '{}_{}.png'.format(game, datetime_random_str())\n plt.savefig('{}/{}'.format(destination, figname))\n print('Images saved as {}/{}'.format(destination, figname))\n plt.close()\n else:\n print('{} results have been found.'.format(len(filenames)))\n agents = []\n for i in range(len(filenames)):\n # :-24 is to remove the previous datetime string\n # :-30 is to remove the train label\n agents.append(filenames[i][:-30])\n uni_agents = np.unique(np.array(agents))\n print('Game result from agent: {}'.format(uni_agents))\n fig = plt.figure()\n for agent in uni_agents:\n i_data = []\n for i in range(len(agents)):\n if agent == agents[i]:\n i_data.append(data[i])\n i_data = calculate_interpolation(np.array(i_data), num_interplt)\n plot_mean_standard_error(i_data, agent)\n plt.legend()\n ax = plt.gca()\n ax.xaxis.get_major_formatter().set_powerlimits((0,1))\n plt.xlabel('# of steps')\n plt.ylabel('Cumulative reward')\n plt.title('{}'.format(game))\n figname = '{}_{}.png'.format(game, datetime_random_str())\n plt.savefig('{}/{}'.format(destination, figname))\n plt.close()\n print('Images saved as {}/{}'.format(destination, figname))",
"def plot_pipeline(differences_dir, plots_dir_intonation, num_quantiles = 31):\n perf_list = ['54363310_1939750539', '540791114_1793842568']\n difference_path_list = [os.path.join(differences_dir, perf_list[i] + \".npy\") for i in range(len(perf_list))]\n comparisons_list = [np.load(path) for _, path in enumerate(difference_path_list)]\n num_samples = 10000\n # quantile indices\n q_indices = (np.linspace(0, 1, num_quantiles)*(num_samples-1)).astype(np.int32)\n plt.style.use('ggplot')\n labels = ['perf. A', 'perf. B']\n colors = ['blue', 'red']\n linestyles = ['dotted', 'dashed']\n grid = plt.GridSpec(2, 2)\n ax1 = plt.subplot(grid[1, 0])\n ax2 = plt.subplot(grid[1, 1])\n ax4 = plt.subplot(grid[0, :])\n ax4.plot(comparisons_list[0], color=colors[0], label=labels[0], linestyle=linestyles[0])\n ax4.plot(comparisons_list[1], color=colors[1], label=labels[1], linestyle=linestyles[1])\n ax4.set_title(\"Difference between MIDI and pYIN, two performances\")\n ax4.set_ylabel(\"Cents\")\n ax4.set_xlabel(\"Frames\")\n ax4.axhline(y=200, linestyle=\"solid\", linewidth=0.7, c=\"black\", zorder=2, label=\"thresh.\")\n ax4.axhline(y=-200, linestyle=\"solid\", linewidth=0.7, c=\"black\", zorder=2)\n ax4.legend(loc=\"upper right\")\n ax1.set_title(\"10k random sample of distances\")\n ax1.set_ylabel(r\"$|$Cents$|$\")\n ax1.set_xlabel(\"Frames sorted by distance\")\n ax2.set_title(\"Sample quantiles\")\n ax2.set_xlabel(\"Quantile indices\")\n # run analysis song by song\n for i, arr in enumerate(comparisons_list):\n # random sample so all arrays have the same size\n samples = np.random.choice(arr, num_samples, replace=True)\n # sort\n samples = np.sort(np.abs(samples))\n # discard the high values (might be due to misalignment, etc...)\n samples = samples[samples <= 200]\n samples = np.random.choice(samples, num_samples, replace=True)\n samples = np.sort(np.abs(samples))\n ax1.plot(samples, color=colors[i], linestyle=linestyles[i], label=labels[i])\n # get the quantiles\n samples = samples[q_indices]\n ax2.plot(samples, color=colors[i], linestyle=linestyles[i], label=labels[i])\n ax1.legend()\n ax2.legend()\n plt.tight_layout()\n plt.savefig(os.path.join(plots_dir_intonation, \"data processing pipeline.eps\"), format=\"eps\")\n plt.show()",
"def plot(path, subjects):\n transformToXYZmm = np.array([[-3.125, 0, 0, 81.250], [0, 3.125, 0, -115.625], [0, 0, 6, -54.000], [0, 0, 0, 1.000]])\n data = data_load.load_data(path, subjects)\n dimx = int(data[0][\"meta\"][\"dimx\"][0])\n dimy = int(data[0][\"meta\"][\"dimy\"][0])\n dimz = int(data[0][\"meta\"][\"dimz\"][0])\n coordToCol = data[0][\"meta\"][\"coordToCol\"][0][0]\n images = {}\n max_val = 0\n voxels = np.load(\"data/general_selected_500_1.npy\")\n directory = os.listdir(\"data/input/\")\n bar = pyprind.ProgBar(len(directory), title='Info extraction and Image Building')\n bar2 = pyprind.ProgBar(len(images.keys()), title='Saving Pictures')\n for file in directory:\n file_name = \"data/input/{}\".format(file)\n fh = open(file_name)\n activation_values = np.asarray(list(map(lambda x: float(x), filter(lambda x: x != '', fh.read().split(\",\")))))\n fh.close()\n plot_matrix = np.zeros((dimx, dimy, dimz))\n for x in range(dimx):\n for y in range(dimy):\n for z in range(dimz):\n indice = coordToCol[x][y][z]\n if indice != 0:\n if indice in list(voxels):\n voxel_indice = list(voxels).index(indice)\n value = activation_values[voxel_indice]\n if abs(value) > max_val:\n max_val = abs(value)\n plot_matrix[x][y][z] = value\n image = nib.Nifti1Image(plot_matrix, transformToXYZmm)\n images[file_name] = image\n bar.update(force_flush=True)\n print(bar)\n for image in images:\n plotting.plot_glass_brain(images[image], display_mode='ortho', vmax=max_val, plot_abs=False, threshold=None, colorbar=True, output_file=\"{}-wom1.png\".format(image))\n bar2.update(force_flush=True)\n print(bar2)",
"def app_SN_plot_all_active_sample(self, sample_list: List, phases):\n\n for sample in sample_list:\n sample_name = sample['sample_name']\n print(f'processing: {sample_name}')\n dfs, phase_dirs, sizer_val, styler_val = self.preprocessing_before_plotting(sample, phases=phases)\n\n self.app_SN_combined_master(dfs, phase_dirs, sizer_val, styler_val)\n self.app_SN_combined_temperature_series(dfs, phase_dirs, sizer_val, styler_val)\n self.app_SN_combined_temperature_gradient(dfs, phase_dirs, sizer_val, styler_val)\n self.app_SN_combined_moisture_series(dfs, phase_dirs, sizer_val, styler_val)\n self.app_SN_combined_moisture_gradient(dfs, phase_dirs, sizer_val, styler_val)\n self.app_SN_combined_series_moist_vs_temp(dfs, phase_dirs, sizer_val, styler_val)\n self.app_SN_separate_temperature_series(dfs, phase_dirs, sizer_val, styler_val)\n self.app_SN_separate_temperature_gradient(dfs, phase_dirs, sizer_val, styler_val)\n self.app_SN_separate_moisture_series(dfs, phase_dirs, sizer_val, styler_val)\n self.app_SN_separate_moisture_gradient(dfs, phase_dirs, sizer_val, styler_val)\n self.app_SN_separate_series_moist_vs_temp(dfs, phase_dirs, sizer_val, styler_val)\n self.app_SN_last_24h_plots(dfs, phase_dirs, sizer_val, styler_val)\n self.app_SN_hot_end_temperature(dfs, phase_dirs, sizer_val, styler_val)",
"def save_plots(self):\n pdir = os.path.splitext(self.filename)[0] + '_plots'\n if not os.path.exists(pdir):\n os.mkdir(pdir)\n\n for ii in range(self.uv.n_ant):\n fig, ax = self.plot_single_baseline_dual_pol(ii+1, ii+1)\n print \"Saving ant %i\"%ii\n plt.savefig(os.path.join(pdir, 'ant-%i.png'%ii))\n plt.clf()",
"def compare_ratios(path='/Volumes/OptiHDD/data/pylith/3d/agu2013/output',\n\t\t\t\tsteps=['step01','step02'],\n\t\t\t\t#labels='',\n\t\t\t\tshow=True,\n\t\t\t\txscale=1e3,\n\t\t\t\tyscale=1e-2):\n\tplt.figure()\n\t#path = '/Users/scott/Desktop/elastic'\n\n\t# Deep source\n\tlabels = ['no APMB', 'APMB']\n\tdeep = {}\n\tuzmax = 0.824873455364\n\t# NOT sure why hardcoded...\n\tuzmax = 1\n\tfor i,outdir in enumerate(steps):\n\t\tpointsFile = os.path.join(path, outdir, 'points.h5')\n\n\t\tx,y,z,ux,uy,uz = pu.extract_points(pointsFile)\n\n\t\tX = x / xscale\n\t\tY1 = ux / yscale\n\n\t\tx_fem = X #/ xscale #double scaling!\n\t\tur_fem = Y1 #/ yscale\n\t\tuz_fem = uz / yscale\n\n\t\t#print(pointsFile)\n\t\tprint(ur_fem.min(), ur_fem.max(), uz_fem.min(), uz_fem.max(), uz_fem.max() / ur_fem.max())\n\n\t\t#normalize\n\t\tuz_fem = uz_fem / uzmax\n\t\tur_fem = ur_fem / uzmax\n\t\tx_fem = x_fem / 30.0\n\n\t\tl, = plt.plot(x_fem,uz_fem,'o-',ms=4,lw=4,label=labels[i])\n\t\tplt.plot(x_fem,ur_fem,'o--',ms=4,lw=4,color=l.get_color()) #mfc='none' transparent\n\t\tdeep[outdir] = uz_fem/uz_fem\n\n\n\t# Shallow Source\n\tshallow = {}\n\tuzmax = 0.949652827795 # Why?\n\tfor i,outdir in enumerate(['step11','step12']):\n\t\tpointsFile = os.path.join(path, outdir, 'points.h5')\n\n\t\tx,y,z,ux,uy,uz = pu.extract_points(pointsFile)\n\n\t\tX = x / xscale\n\t\tY1 = ux / yscale\n\n\t\tx_fem = X #/ xscale #double scaling!\n\t\tur_fem = Y1 #/ yscale\n\t\tuz_fem = uz / yscale\n\n\t\t#print(pointsFile)\n\t\tprint(ur_fem.min(), ur_fem.max(), uz_fem.min(), uz_fem.max(), uz_fem.max() / ur_fem.max())\n\n\t\t#normalize\n\t\tuz_fem = uz_fem / uzmax\n\t\tur_fem = ur_fem / uzmax\n\t\tx_fem = x_fem / 20.0\n\n\t\tl, = plt.plot(x_fem,uz_fem,'.-', mfc='w', lw=4,label=labels[i])\n\t\tplt.plot(x_fem,ur_fem,'.--',lw=4, mfc='w',color=l.get_color()) #mfc='none' transparent\n\n\t\tshallow[outdir] = uz_fem/ur_fem\n\n\t# Annotate\n\tplt.axhline(color='k',lw=0.5)\n\t#plt.xlabel('Distance [{}]'.format(get_unit(xscale)))\n\t#plt.ylabel('Displacement [{}]'.format(get_unit(yscale)))\n\tplt.legend()\n\tplt.grid()\n\t#plt.ylim(-0.5, 3.5)\n\t#plt.savefig('deep.png',bbox_inches='tight')\n\t#plt.savefig('shallow.png',bbox_inches='tight')\n\n\t# normalized\n\tplt.ylim(-0.5, 4)\n\tplt.xlim(0,10)\n\tplt.xlabel('Normalized Radial Distance [R / D]')\n\tplt.ylabel('Normalized Displacement [U / Uz_max]')\n\t#plt.savefig('normalized_deep.png',bbox_inches='tight')\n\tplt.savefig('normalized_shallow.png',bbox_inches='tight')\n\n\n\t# Plot ratios of uz versus NOTE: this plot is confusing,,, just keep ratio of uz_max to ur_max\n\t'''\n\tplt.figure()\n\tplt.plot(x_fem, deep['step01'], label='Deep no APMB')\n\tplt.plot(x_fem, deep['step02'], label='Deep w/ APMB')\n\tplt.plot(x_fem, shallow['step11'], label='Shallow no APMB')\n\tplt.plot(x_fem, shallow['step12'], label='Shallow w/ APMB')\n\tplt.xlabel('Distance [km]') #NOTE: maybe plot normailzed X-axis (R-d)\n\t#plt.xlabel('Normalized Distance [R/d]')\n\tplt.ylabel('Ratio [Uz/Ur]')\n\tplt.title('Ratio of vertical to radial displacement')\n\tplt.legend()\n\tplt.show()\n\t'''",
"def plot_results(dir_path):\n results, labels, dx = get_results_labels(dir_path)\n\n colors = plt.cm.get_cmap('tab10', len(labels)+1)\n fig = plt.figure(figsize=(16, 9))\n for i, (label, result) in enumerate(zip(labels, results)):\n plt.plot(np.arange(result.shape[1]) * dx, np.mean(result, 0),\n color=colors(i), linewidth=2, label=label)\n plt.fill_between(np.arange(len(result[0])) * dx,\n np.mean(result, 0) - np.std(result, 0),\n np.mean(result, 0) + np.std(result, 0),\n alpha=0.25, color=colors(i))\n plt.title('Training Performance of Different Algorithms', fontsize=25)\n plt.ylabel('Cumulative return', fontsize=20)\n plt.xlabel('Steps', fontsize=20)\n plt.xlim([0, results[0].shape[1] * dx])\n plt.tick_params(labelsize=15)\n plt.ticklabel_format(axis='both', style='sci', scilimits=(-2, 2))\n plt.legend(fontsize=20)\n plt.grid(linestyle=':')\n plt.show()\n\n return fig",
"def generate_images(self, model, test_input, step, dst_dir):\n prediction = model(test_input)\n\n plt.figure(figsize=(12, 12))\n display_list = [test_input[0], prediction[0]]\n title = ['Input Image', 'Predicted Image']\n\n for i in range(2):\n plt.subplot(1, 2, i+1)\n plt.title(title[i])\n # getting the pixel values between [0, 1] to plot it.\n plt.imshow(display_list[i] * 0.5 + 0.5)\n plt.axis('off')\n filename = os.path.join(dst_dir, 'generated_imgs_at_step_{:06d}.png'.format(step))\n plt.savefig(filename)",
"def make_plots(self):\n n_rounds = self.run.n_rounds\n\n log.info('Making %d frames', n_rounds)\n args = [self._get_for_parallel(i) for i in range(n_rounds)]\n self.lbv.map(_plot_helper, args)",
"def _step_plot_closure(self, step_number):\n for image_name, image, clean_net_output in zip(self.images_names, self.images, self.clean_nets_outputs):\n plot_image_grid(image_name + \"_watermark_clean_{}\".format(step_number),\n [np.clip(torch_to_np(self.watermark_net_output), 0, 1),\n np.clip(torch_to_np(clean_net_output), 0, 1)])\n plot_image_grid(image_name + \"_learned_image_{}\".format(step_number),\n [np.clip(torch_to_np(self.watermark_net_output) * torch_to_np(self.mask_net_output) +\n (1 - torch_to_np(self.mask_net_output)) * torch_to_np(clean_net_output),\n 0, 1), image])",
"def visualize(self, paths, instance, during_analysis):\r\n xvalues = np.arange(self.data.shape[0])\r\n\r\n model_data = self.model_data_from_instance(instance=instance)\r\n\r\n residual_map = self.data - model_data\r\n chi_squared_map = (residual_map / self.noise_map) ** 2.0\r\n\r\n \"\"\"The visualizer now outputs images of the best-fit results to hard-disk (checkout `visualizer.py`).\"\"\"\r\n plot_profile_1d(\r\n xvalues=xvalues,\r\n profile_1d=self.data,\r\n title=\"Data\",\r\n ylabel=\"Data Values\",\r\n color=\"k\",\r\n output_path=paths.image_path,\r\n output_filename=\"data\",\r\n )\r\n\r\n plot_profile_1d(\r\n xvalues=xvalues,\r\n profile_1d=model_data,\r\n title=\"Model Data\",\r\n ylabel=\"Model Data Values\",\r\n color=\"k\",\r\n output_path=paths.image_path,\r\n output_filename=\"model_data\",\r\n )\r\n\r\n plot_profile_1d(\r\n xvalues=xvalues,\r\n profile_1d=residual_map,\r\n title=\"Residual Map\",\r\n ylabel=\"Residuals\",\r\n color=\"k\",\r\n output_path=paths.image_path,\r\n output_filename=\"residual_map\",\r\n )\r\n\r\n plot_profile_1d(\r\n xvalues=xvalues,\r\n profile_1d=chi_squared_map,\r\n title=\"Chi-Squared Map\",\r\n ylabel=\"Chi-Squareds\",\r\n color=\"k\",\r\n output_path=paths.image_path,\r\n output_filename=\"chi_squared_map\",\r\n )",
"def walkplot(cons):\n # Call always after cons.runn()!\n if not hasattr(cons, \"outplot\"):\n print(\"Consortium must run before analyzing the output of the run!\")\n return\n global plt\n if not plt:\n import matplotlib.pyplot as plt\n\n # keep original inputs\n original_metplots = dcp(cons.mets_to_plot)\n original_outplot = dcp(cons.outplot)\n cons.outplot = \"tmp.png\"\n mets = [k for k in cons.media]\n # loop over metabolites, plot them and let the user close the window\n for m in range(0, len(mets), 4):\n cons.mets_to_plot = [mets[m], mets[m + 1], mets[m + 2], mets[m + 3]]\n plot_comm(cons)\n plt.show()\n print(\"Image number\", m / 4)\n\n # clean temporary files, return to orginal parameters\n plt.close(\"all\")\n os.remove(cons.outplot)\n cons.mets_to_plot = original_metplots\n cons.outplot = original_outplot\n return",
"def save_plot(epoch_num, step, rewards, filepath):\n fig, ax = plt.subplots() \n ax.plot(range(0, epoch_num + 1, step), rewards)#,'.')\n ax.plot(range(0, epoch_num + 1, step), np.ones(len(range(0, epoch_num + 1, step)))*rewards[0], 'r')\n fig.savefig(filepath)\n plt.close(fig) # close the figure ",
"def main():\n import argparse\n import os\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--dirs', help='List of log directories', nargs='*', default=['./log'])\n parser.add_argument('--num_timesteps', type=int, default=int(10e6))\n parser.add_argument('--xaxis', help='Varible on X-axis', default=X_TIMESTEPS)\n parser.add_argument('--task_name', help='Title of plot', default='Breakout')\n args = parser.parse_args()\n args.dirs = [os.path.abspath(folder) for folder in args.dirs]\n plot_results(args.dirs, args.num_timesteps, args.xaxis, args.task_name)\n plt.show()"
] | [
"0.68558604",
"0.6830801",
"0.682716",
"0.6536897",
"0.6412557",
"0.6166356",
"0.61190796",
"0.60816085",
"0.6074869",
"0.60712504",
"0.598796",
"0.59763676",
"0.5945832",
"0.59333724",
"0.5919523",
"0.589908",
"0.58856714",
"0.58749056",
"0.5861554",
"0.5851671",
"0.58485967",
"0.5831483",
"0.58244705",
"0.5820081",
"0.58078563",
"0.577831",
"0.5776586",
"0.57755756",
"0.57658094",
"0.5759535"
] | 0.7218871 | 0 |
Plot grid of surface displacement maps from each output/step folder if normalize=True, use step01 colorbar for all images | def plot_directory_surface(path,figsize=(17,11), comp=2, nrow=1, norm=None,
cbar='each', cloc='top', outname=None, labels='1', show=True):
outdirs = np.sort(os.listdir(path))
nplots = len(outdirs)
ncol = np.ceil(nplots/nrow).astype(np.int)
fig = plt.figure(figsize=figsize)
grid = ImageGrid(fig, 111, # similar to subplot(111)
nrows_ncols = (nrow, ncol),
direction="row",
axes_pad = 0.25,
add_all=True,
label_mode = labels, #'all', 'L', '1'
share_all = True,
cbar_location=cloc, #top,right
cbar_mode=cbar, #each,single,None
cbar_size=0.1,#"7%",
cbar_pad=0.0#,"1%",
)
#NOTE: if cbar='single',cloc='right', a way to automatically normalize by grid[0]
#if normalize:
# verts,data,tris = load_h5(os.path.join(path,'step01/surface.h5'))
# if comp==3: #radial displacements
# z = np.hypot(data[:,:,0], data[:,:,1]).flatten()
# else:
# z = data[:,:,comp].flatten()
# norm = Normalize(vmin=np.nanmin(z), vmax=np.nanmax(z))
#else:
# norm=None
for i,outdir in enumerate(outdirs):
ax = grid[i]
print(outdir)
im = plotSurface(os.path.join(path,outdir,'surface.h5'), comp=comp, ax=ax,
points=False, tris=False, profile=False, annotate=False, norm=norm,
xscale=1e-3, yscale=1e-3)
# colorbar settings, not sure what's up with set_xticks...
ax.cax.colorbar(im)
#cmin = np.nanmin(data)
#cmax = np.nanmax(data)
#ax.cax.set_xticks([cmin,0,cmax])
# label upper left
ax.text(0.05,0.95,outdir,
weight='bold',
ha='left',
va='top',
bbox=dict(facecolor='white'),
transform=ax.transAxes)
#ax.set_ylabel(outdir)
#ax.tick_params(labelbottom=0,labeltop=0,labelleft=0,labelright=0,
# bottom=0,top=0,left=0,right=0)
#if cbar=='single':
# grid[0].cax.colorbar(im)
# Annotate Plot
# don't show grid frames without data...
Nextra = grid.ngrids - nplots
if Nextra > 0:
for ax in grid[-Nextra:]:
#print(ax)
ax.set_visible(False)
ax.cax.set_visible(False)
fig.suptitle(path, fontsize=14, fontweight='bold')
if outname: plt.savefig(outname)
if show: plt.show() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def debugplots(fig,data):\n grid = AxesGrid(fig, 111, # similar to subplot(142)\n nrows_ncols=(1, 6),\n axes_pad=0.0,\n share_all=True,\n label_mode=\"L\",\n cbar_location=\"right\",\n cbar_mode=\"none\",\n )\n\n Z0=data[0].real\n Z1=data[1].real\n Z2=data[2].real\n Z3=data[3].real\n Z4=data[4].real\n Z5=data[5].real\n \n Z=[Z0,Z1,Z2,Z3,Z4,Z5]\n \n for i in range(6):\n grid[i].set_title(r\"$t=%u\\Delta t$\"%(Timestep(i)),color='black',horizontalalignment='center',verticalalignment='bottom')\n im = grid[i].imshow(Z[i], extent=(-2, 2, -2, 2), interpolation=\"Nearest\",origin=\"lower\",cmap='seismic',vmin=-1,vmax=1)\n grid[i].set_aspect(ratio)\n grid[i].set_xlabel(\"$x/10$\",size=16)\n grid[0].set_ylabel(\"$y/10$\",size=16)\n pos2 = [0.905,0.25,0.01,0.5]\n position = fig.add_axes(pos2)\n fig.colorbar(im, ax=grid[2],cax=position,extend=\"both\")\n \n for cax in grid.cbar_axes:\n cax.toggle_label(True)\n \n # This affects all axes as share_all = True.\n grid.axes_llc.set_xticks([-2,-1, 0,1])\n #grid[0].set_xticks([-20,-10, 0,10, 20])\n grid.axes_llc.set_yticks([-2, -1, 0, 1,2])",
"def show_colormaps():\n maps = sorted(cmlist)\n nmaps = len(maps) + 1\n\n a = np.linspace(0, 1, 256).reshape(1, -1) # pylint: disable=E1103\n a = np.vstack((a, a))\n\n fig = plt.figure(figsize=(5, 10))\n fig.subplots_adjust(top=0.99, bottom=0.01, left=0.2, right=0.99)\n for i, name in enumerate(maps):\n ax = plt.subplot(nmaps, 1, i + 1)\n plt.axis(\"off\")\n plt.imshow(a, aspect='auto', cmap=get_cmap(name), origin='lower')\n pos = list(ax.get_position().bounds)\n fig.text(pos[0] - 0.01, pos[1], name, fontsize=10,\n horizontalalignment='right')\n\n plt.show()",
"def plot_final_grid(generated_images):\n\n fig = plt.figure(figsize=(8, 6))\n fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.001, hspace=0.001)\n\n for ind in range(GRID_IMGS):\n ax = fig.add_subplot(GRID_SIZE, GRID_SIZE, ind + 1, xticks=[], yticks=[])\n ax.imshow(np.uint8(((generated_images[ind] + 1) / 2) * 255), cmap='gray')\n\n plt.savefig('/content/drive/My Drive/WGAN/generated_image_grid.png')\n plt.savefig('/content/drive/My Drive/WGAN/results/DCGAN.png')\n plt.show()",
"def ListColorMaps(self):\n p.rc('text', usetex=False)\n a=p.outerproduct(numpy.arange(0,1,0.01),numpy.ones(10))\n p.figure(figsize=(10,5))\n p.subplots_adjust(top=0.8,bottom=0.05,left=0.01,right=0.99)\n maps=[m for m in p.cm.datad.keys() if not m.endswith(\"_r\")]\n maps.sort()\n l=len(maps)+1\n i=1\n for m in maps:\n p.subplot(1,l,i)\n p.axis(\"off\")\n p.imshow(a,aspect='auto',cmap=p.get_cmap(m),origin=\"lower\")\n p.title(m,rotation=90,fontsize=10)\n i=i+1\n #savefig(\"colormaps.png\",dpi=100,facecolor='gray')\n p.show()",
"def visualize_M_gridworld(self, state=0):\n\n\t\tplt.subplot(221); plt.imshow(self.M[12,0,:].reshape(5,5)), plt.colorbar()\n\t\tplt.subplot(222); plt.imshow(self.M[12,1,:].reshape(5,5)), plt.colorbar()\n\t\tplt.subplot(223); plt.imshow(self.M[12,2,:].reshape(5,5)), plt.colorbar()\n\t\tplt.subplot(224); plt.imshow(self.M[12,3,:].reshape(5,5)), plt.colorbar()\n\t\tplt.show()",
"def plot_final_grid(generated_images):\n\n fig = plt.figure(figsize=(GRID_SIZE, GRID_SIZE))\n fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.001, hspace=0.001)\n\n for ind in range(GRID_IMGS):\n ax = fig.add_subplot(GRID_SIZE, GRID_SIZE, ind + 1, xticks=[], yticks=[])\n ax.imshow(np.uint8(((generated_images[ind] + 1) / 2) * 255), cmap='gray')\n\n plt.savefig('/content/drive/My Drive/WGAN/generated_image_grid.png')\n plt.savefig('/content/drive/My Drive/WGAN/results/WGAN.png')\n plt.show()",
"def debugmyplots(fig,data):\n grid = AxesGrid(fig, 111, # similar to subplot(142)\n nrows_ncols=(1, 6),\n axes_pad=0.0,\n share_all=True,\n label_mode=\"L\",\n cbar_location=\"right\",\n cbar_mode=\"none\",\n )\n\n \n for i in range(6):\n Z = np.real(data[i])\n grid[i].set_title(r\"$t={:03.1f}$\".format(Timestep(i)),color='black',horizontalalignment='center',verticalalignment='bottom')\n im = grid[i].imshow(Z, extent=(-2, 2, -2, 2), interpolation=\"Gaussian\",origin=\"lower\",cmap='seismic',vmin=-1,vmax=1)\n grid[i].set_aspect(ratio)\n grid[i].set_xlabel(\"$x/10$\",size=16)\n #plt.colorbar(im, cax = grid.cbar_axes[0])\n #ticks = np.logspace(1e-6,1,7)\n #lf = LogFormatter(10, labelOnlyBase=False)\n grid[0].set_ylabel(\"$y/10$\",size=16)\n pos2 = [0.905,0.25,0.01,0.5]\n position = fig.add_axes(pos2)\n ticks=np.logspace(1e-6,1e-1,6)\n fig.colorbar(im, ax=grid[5],cax=position,extend=\"both\")\n \n for cax in grid.cbar_axes:\n cax.toggle_label(True)\n \n # This affects all axes as share_all = True.\n grid.axes_llc.set_xticks([-2,-1, 0,1])\n #grid[0].set_xticks([-20,-10, 0,10, 20])\n grid.axes_llc.set_yticks([-2, -1, 0, 1,2])",
"def plot_figure11():\n height_ceilings = [200., 300., 400.]\n height_ceiling_ids = [list(height_range_ceilings).index(height_ceiling) for height_ceiling in height_ceilings]\n\n baseline_height_ceiling = 500.\n baseline_height_ceiling_id = list(height_range_ceilings).index(baseline_height_ceiling)\n\n plot_item00 = {\n 'data': 100.-nc.variables[\"p_ceiling_rank40\"][height_ceiling_ids[0], :, :],\n 'contour_fill_levels': np.linspace(50, 100, 21),\n 'contour_line_levels': [70., 80., 90., 95.],\n 'contour_line_label_fmt': '%.0f',\n 'colorbar_ticks': np.linspace(50, 100, 21)[::4],\n 'colorbar_tick_fmt': '{:.0f}',\n 'colorbar_label': 'Availability [%]',\n 'extend': 'min',\n }\n plot_item01 = {\n 'data': 100.-nc.variables[\"p_ceiling_rank40\"][height_ceiling_ids[1], :, :],\n 'contour_fill_levels': np.linspace(70, 100, 21),\n 'contour_line_levels': [70., 80., 90., 95.],\n 'contour_line_label_fmt': '%.0f',\n 'colorbar_ticks': np.linspace(70, 100, 21)[::4],\n 'colorbar_tick_fmt': '{:.0f}',\n 'colorbar_label': 'Availability [%]',\n 'extend': 'min',\n }\n plot_item02 = {\n 'data': 100.-nc.variables[\"p_ceiling_rank40\"][height_ceiling_ids[2], :, :],\n 'contour_fill_levels': np.linspace(80, 100, 21),\n 'contour_line_levels': [70., 80., 90., 95.],\n 'contour_line_label_fmt': '%.0f',\n 'colorbar_ticks': np.linspace(80, 100, 21)[::4],\n 'colorbar_tick_fmt': '{:.0f}',\n 'colorbar_label': 'Availability [%]',\n 'extend': 'min',\n }\n\n column_titles = [\"200 m\", \"300 m\", \"400 m\"]\n plot_items = [plot_item00, plot_item01, plot_item02]\n\n eval_contour_fill_levels(plot_items)\n plot_panel_1x3_seperate_colorbar(plot_items, column_titles)\n\n linspace10 = np.linspace(0., 11., 21)\n plot_item10 = {\n 'data': -(100.-nc.variables[\"p_ceiling_rank40\"][height_ceiling_ids[0], :, :])+\n (100.-nc.variables[\"p_ceiling_rank40\"][baseline_height_ceiling_id, :, :]),\n 'contour_fill_levels': linspace10,\n 'contour_line_levels': sorted([1.1]+list(linspace10[::4])),\n 'contour_line_label_fmt': '%.1f',\n 'colorbar_ticks': linspace10[::4],\n 'colorbar_tick_fmt': '{:.0f}',\n 'colorbar_label': 'Availability decrease [%]',\n }\n linspace11 = np.linspace(0., 23., 21)\n plot_item11 = {\n 'data': (100.-nc.variables[\"p_ceiling_rank40\"][height_ceiling_ids[1], :, :])-\n (100.-nc.variables[\"p_ceiling_rank40\"][baseline_height_ceiling_id, :, :]),\n 'contour_fill_levels': linspace11,\n 'contour_line_levels': sorted([2.3]+list(linspace11[::4])),\n 'contour_line_label_fmt': '%.1f',\n 'colorbar_ticks': linspace11[::4],\n 'colorbar_tick_fmt': '{:.0f}',\n 'colorbar_label': 'Availability increase [%]',\n }\n linspace12 = np.linspace(0., 38., 21)\n plot_item12 = {\n 'data': (100.-nc.variables[\"p_ceiling_rank40\"][height_ceiling_ids[2], :, :])-\n (100.-nc.variables[\"p_ceiling_rank40\"][baseline_height_ceiling_id, :, :]),\n 'contour_fill_levels': linspace12,\n 'contour_line_levels': sorted([3.8]+list(linspace12[::4])),\n 'contour_line_label_fmt': '%.1f',\n 'colorbar_ticks': linspace12[::4],\n 'colorbar_tick_fmt': '{:.0f}',\n 'colorbar_label': 'Availability increase [%]',\n }\n\n column_titles = None\n plot_items = [plot_item10, plot_item11, plot_item12]\n\n eval_contour_fill_levels(plot_items)\n plot_panel_1x3_seperate_colorbar(plot_items, column_titles)",
"def plotmap(self):\n if self.plotfigure is None: return\n\n self.plotfigure.clf()\n collist = [\"#%.2x%.2x%.2x\" % (i, i, i) for i in self.currentshades]\n cmap = colors.ListedColormap(collist)\n if self.gs.isfixed:\n crange = [self.minvalue] + self.currentvalues\n elif self.gs.isperc:\n crange = np.percentile(self.imagearray, [0.0] + self.currentpercents)\n else:\n crange = np.array([self.minstdd] + self.currentnsigs) * self.stdvalue + self.meanvalue\n norm = colors.BoundaryNorm(crange, cmap.N)\n img = plt.imshow(self.imagearray, cmap=cmap, norm=norm, origin='lower')\n plt.colorbar(img, norm=norm, cmap=cmap, boundaries=crange, ticks=crange)\n if self.imagetitle is not None:\n plt.title(self.imagetitle)",
"def show_channels(chmaps, n_cols=8, normalize=None, ofpath=None):\n n_rows = (chmaps.shape[0] - 1) // n_cols + 1\n\n if n_rows == 1:\n n_cols = chmaps.shape[0]\n\n if normalize is None:\n vmin, vmax = None, None\n else:\n vmin, vmax = normalize\n\n fig = plt.figure()\n\n grid = AxesGrid(fig, 111,\n nrows_ncols=(n_rows, n_cols),\n axes_pad=0.0,\n share_all=True)\n\n for i, chmap in enumerate(chmaps):\n grid[i].imshow(chmap, vmin=vmin, vmax=vmax)\n\n grid.axes_llc.get_xaxis().set_ticks([])\n grid.axes_llc.get_yaxis().set_ticks([])\n\n if ofpath is None:\n plt.get_current_fig_manager().window.showMaximized()\n plt.show()\n else:\n fig.savefig(ofpath)\n plt.close(fig)",
"def plot_locations_np(self, c_img, locations, file_name_prefix=''):\n other_axes = [[1, 2], [2, 0], [1, 0]]\n shape_str = str(c_img.shape)\n create_folder(self._output_folder)\n for id_loc, c_loc in enumerate(locations):\n plt.subplots(1, 3, squeeze=True, figsize=(8 * 3, 8))\n for id_plane, plane in enumerate([PlaneTypes.AXIAL, PlaneTypes.SAGITTAL, PlaneTypes.CORONAL]):\n c_loc = np.array(c_loc)\n c_dim = get_axis_idx(plane)\n c_slice = c_loc[c_dim]\n\n if c_slice < c_img.shape[c_dim]:\n pt = c_loc[other_axes[c_dim]]\n ax = plt.subplot(1, 3, id_plane + 1)\n ax.imshow(get_proper_plane(c_img, plane, c_slice), cmap='gray')\n ax.scatter(pt[0],pt[1], color=self._COLORS[id_loc])\n c_title = F'{c_slice} {pt} -- {plane.value} -- {shape_str} '\n plt.title(c_title, fontsize=20)\n else:\n raise Exception(\n F'The slice {c_slice} is larger than the shape of the array {c_img.shape[c_dim]}')\n\n pylab.savefig(join(self._output_folder, F'{file_name_prefix}_Loc_{id_loc}.jpg'), bbox_inches='tight')\n self._close_figure()",
"def testing():\n\n from matplotlib import pyplot as plt\n\n dem_fn = \"../../tests/testdata/ca_dem_30s/ca_dem_30s/\"\n dir_fn = \"../../tests/testdata/ca_dir_30s/ca_dir_30s/\"\n\n fv = -32768\n\n grid = Grid.from_raster(dir_fn, \"dir\", nodata=fv)\n grid.read_raster(dem_fn, \"dem\", nodata=fv)\n\n lon, lat = -99.0619, 20.933\n\n fig, (ax1, ax2) = plt.subplots(1, 2)\n\n idem = ax1.imshow(\n grid.view(\"dem\"), extent=grid.extent, cmap=\"cubehelix\", zorder=1, vmin=0\n )\n plt.colorbar(idem, ax=ax1, label=\"Elevation (m)\")\n\n idir = ax2.imshow(\n grid.view(\"dir\"), extent=grid.extent, cmap=\"viridis\", zorder=2, vmin=0\n )\n boundaries = [0] + sorted(list(dirmap))\n plt.colorbar(idir, ax=ax2, boundaries=boundaries, values=sorted(dirmap))\n\n grid.catchment(\n data=\"dir\",\n x=lon,\n y=lat,\n dirmap=dirmap,\n out_name=\"catch\",\n xytype=\"label\",\n nodata_in=nodata,\n )\n catch = grid.polygonize(grid.catch.astype(\"int32\"), connectivity=8)\n grid.clip_to(\"catch\")\n\n for (p, v) in catch:\n poly = geometry.asShape(p)\n ax1.plot(*poly.exterior.xy, color=\"white\")\n plt.show()",
"def gridPlot12(img_stack):\r\n F = plt.figure(figsize = (30,30))\r\n F.subplots_adjust(left = 0.05, right = 0.95)\r\n grid = ImageGrid(F, 142, nrows_ncols = (3,4), axes_pad = 0.0, share_all = True,\r\n label_mode = \"L\", cbar_location = \"top\", cbar_mode = \"single\")\r\n \r\n i = 0\r\n for img in img_stack[0:12]:\r\n im = grid[i].imshow(img, interpolation = \"nearest\", vmin = 0, vmax = 255)\r\n i += 1 \r\n grid.cbar_axes[0].colorbar(im)\r\n plt.savefig('gplot12.png')\r\n# for cax in grid.cbar_axes:\r\n# cax.toggle_label(False)\r\n return",
"def myplots(fig,data):\n grid = AxesGrid(fig, 111, # similar to subplot(142)\n nrows_ncols=(1, 6),\n axes_pad=0.0,\n share_all=True,\n label_mode=\"L\",\n cbar_location=\"right\",\n cbar_mode=\"none\",\n )\n\n \n for i in range(6):\n Z = absolute(data[i])**2\n grid[i].set_title(r\"$t={:03.1f}$\".format(Timestep(i)),color='black',horizontalalignment='center',verticalalignment='bottom')\n im = grid[i].imshow(Z, extent=(-2, 2, -2, 2), interpolation=\"gaussian\",origin=\"lower\",cmap=cmap,norm=LogNorm(vmin=1e-5,vmax=1))\n grid[i].set_aspect(ratio)\n grid[i].set_xlabel(\"$x/10$\",size=16)\n #plt.colorbar(im, cax = grid.cbar_axes[0])\n #ticks = np.logspace(1e-6,1,7)\n #lf = LogFormatter(10, labelOnlyBase=False)\n grid[0].set_ylabel(\"$y/10$\",size=16)\n pos2 = [0.905,0.25,0.01,0.5]\n position = fig.add_axes(pos2)\n ticks=np.logspace(1e-6,1e-1,6)\n fig.colorbar(im, ax=grid[5],cax=position,extend=\"both\")\n \n for cax in grid.cbar_axes:\n cax.toggle_label(True)\n \n # This affects all axes as share_all = True.\n grid.axes_llc.set_xticks([-2,-1, 0,1])\n #grid[0].set_xticks([-20,-10, 0,10, 20])\n grid.axes_llc.set_yticks([-2, -1, 0, 1,2])",
"def map_plot(self, iter_no):\n \n m = self._m\n n = self._n\n plt.figure()\n label=np.zeros(m*n)\n self._trained = True\n mapped = self.map_vects(datanorm)\n mapped=tuple(map(tuple, mapped))\n c=Counter(mapped)\n \n c= sorted(c.items(), key=itemgetter(1))\n a=[m*n]\n for i in range(0,len(c)):\n x=(((c[i])[0])[0])\n y=(((c[i])[0])[1])\n z=((c[i])[1])\n plt.plot(x, y, 'ro', markersize= z/(2*m*n)) \n plt.savefig('exoplanet{}.png'.format(iter_no))\n p=plt.imread('exoplanet{}.png'.format(iter_no))\n imgs.append(p)\n plt.show()\n plt.close()\n print(c)\n self._trained = False",
"def data_model_residual(surface, dem, unw, incidence):\n los,fem_los,residual = pu.los2pylith(surface,dem,unw,incidence)\n\n # Using image_grid\n fig = plt.figure()\n grid = ImageGrid(fig, 111, # similar to subplot(111)\n nrows_ncols = (1, 3),\n direction=\"row\",\n axes_pad = 0.05,\n add_all=True,\n label_mode = \"1\",\n share_all = True,\n cbar_location=\"top\",\n cbar_mode=\"each\", #\"single\"\n cbar_size=\"5%\",\n cbar_pad=0.05,\n )\n #grid[0].set_xlabel(\"X\")\n #grid[0].set_ylabel(\"Y\")\n #grid2[0].set_xticks([-2, 0])\n #grid2[0].set_yticks([-2, 0, 2])\n\n #NOTE: could find global min/max from three arrays here\n norm = Normalize(vmin=np.nanmin(los), vmax=np.nanmax(los))\n #for ax,data in zip(grid,[los,fem_los,residual]):\n im = grid[0].imshow(los,origin='upper',norm=norm,cmap=plt.cm.jet)\n grid[0].axhline(100,color='m') #show profile\n cax = grid.cbar_axes[0]\n cax.colorbar(im)\n grid[1].axhline(100,color='k') #show profile\n im1 = grid[1].imshow(fem_los,origin='upper',norm=norm,cmap=plt.cm.jet)\n\n cax = grid.cbar_axes[1]\n cax.colorbar(im1)\n\n im2 = grid[2].imshow(residual,origin='upper',cmap=plt.cm.jet)\n cax = grid.cbar_axes[2]\n cax.colorbar(im2)\n\n # Add letter labels\n for ax, label in zip(grid,['A', 'B', 'C']):\n ax.text(0.05, 0.95, label, transform=ax.transAxes,\n fontsize=16, fontweight='bold', va='top')\n\n # Annotate\n # NOTE: way too high!\n #plt.suptitle('FEM Results')\n\n # Add profile\n # NOTE: for now EW, but would be easy to do arbitrary line, and convert to km\n fig = plt.figure()\n #x = arange(los.shape[0])\n plt.axhline(color='k',ls='--')\n plt.plot(los[100],'m.',label='data')\n plt.plot(fem_los[100],'k-',lw=2,label='model')\n plt.xlabel('Distance [km]')\n plt.ylabel('Distance [km]')\n plt.legend(loc='upper left')\n\n plt.show()",
"def plot_figure10():\n height_ceiling = 500.\n height_ceiling_id = list(height_range_ceilings).index(height_ceiling)\n\n plot_item00 = {\n 'data': 100.-nc.variables[\"p_ceiling_rank40\"][height_ceiling_id, :, :],\n 'contour_fill_levels': np.linspace(50, 100, 21),\n 'contour_line_levels': [70., 80., 90., 95.],\n 'contour_line_label_fmt': '%.0f',\n 'colorbar_ticks': np.linspace(50, 100, 21)[::4],\n 'colorbar_tick_fmt': '{:.0f}',\n 'colorbar_label': 'Availability [%]',\n 'extend': 'min',\n }\n plot_item01 = {\n 'data': 100.-nc.variables[\"p_ceiling_rank300\"][height_ceiling_id, :, :],\n 'contour_fill_levels': np.linspace(0, 80, 21),\n 'contour_line_levels': np.linspace(0, 80, 21)[::4][2:],\n 'contour_line_label_fmt': '%.0f',\n 'colorbar_ticks': np.linspace(0, 80, 21)[::4],\n 'colorbar_tick_fmt': '{:.0f}',\n 'colorbar_label': 'Availability [%]',\n }\n plot_item02 = {\n 'data': 100.-nc.variables[\"p_ceiling_rank1600\"][height_ceiling_id, :, :],\n 'contour_fill_levels': np.linspace(0, 45, 21),\n 'contour_line_levels': np.linspace(0, 45, 21)[::4][2:],\n 'contour_line_label_fmt': '%.0f',\n 'colorbar_ticks': np.linspace(0, 45, 21)[::4],\n 'colorbar_tick_fmt': '{:.0f}',\n 'colorbar_label': 'Availability [%]',\n }\n\n column_titles = [\"40 $W/m^2$\", \"300 $W/m^2$\", \"1600 $W/m^2$\"]\n plot_items = [plot_item00, plot_item01, plot_item02]\n\n eval_contour_fill_levels(plot_items)\n plot_panel_1x3_seperate_colorbar(plot_items, column_titles)\n\n plot_item10 = {\n 'data': (100.-nc.variables[\"p_ceiling_rank40\"][height_ceiling_id, :, :])-\n (100.-nc.variables[\"p_fixed_rank40\"][0, :, :]),\n 'contour_fill_levels': np.linspace(0., 22., 21),\n 'contour_line_levels': sorted([1.1, 2.2]+list(np.linspace(0., 22., 21)[::4][:-2])),\n 'contour_line_label_fmt': '%.1f',\n 'colorbar_ticks': np.linspace(0., 22., 21)[::4],\n 'colorbar_tick_fmt': '{:.0f}',\n 'colorbar_label': 'Availability increase [%]',\n }\n plot_item11 = {\n 'data': (100.-nc.variables[\"p_ceiling_rank300\"][height_ceiling_id, :, :])-\n (100.-nc.variables[\"p_fixed_rank300\"][0, :, :]),\n 'contour_fill_levels': np.linspace(0., 31., 21),\n 'contour_line_levels': np.linspace(0., 31., 21)[::4][:-2],\n 'contour_line_label_fmt': '%.1f',\n 'colorbar_ticks': np.linspace(0., 31., 21)[::4],\n 'colorbar_tick_fmt': '{:.0f}',\n 'colorbar_label': 'Availability increase [%]',\n }\n plot_item12 = {\n 'data': (100.-nc.variables[\"p_ceiling_rank1600\"][height_ceiling_id, :, :])-\n (100.-nc.variables[\"p_fixed_rank1600\"][0, :, :]),\n 'contour_fill_levels': np.linspace(0., 26., 21),\n 'contour_line_levels': np.linspace(0., 26., 21)[::4][:-2],\n 'contour_line_label_fmt': '%.1f',\n 'colorbar_ticks': np.linspace(0., 26., 21)[::4],\n 'colorbar_tick_fmt': '{:.0f}',\n 'colorbar_label': 'Availability increase [%]',\n }\n\n column_titles = None\n plot_items = [plot_item10, plot_item11, plot_item12]\n\n eval_contour_fill_levels(plot_items)\n plot_panel_1x3_seperate_colorbar(plot_items, column_titles)",
"def plot_all(self, cmap='Greys', size=(10,10)):\n\n fig, ((ax0, ax1), (ax2, ax3)) = plt.subplots(nrows=2,\n ncols=2,\n sharex=True,\n sharey=True)\n\n ax0.imshow(self.I, cmap=cmap)\n ax0.set_title(f'Original {self.I.shape}',\n fontsize=15)\n ax1.imshow(self.W, cmap=cmap)\n ax1.set_title(f'W Loadings {self.W.shape}',\n fontsize=15)\n ax2.imshow(self.H, cmap=cmap)\n ax2.set_title(f'H Loadings {self.H.shape}',\n fontsize=15)\n ax3.imshow(self.E, cmap=cmap)\n ax3.set_title(f'W * H with n={self._n_components} {self.E.shape}',\n fontsize=15)\n\n fig.set_figheight(size[0])\n fig.set_figwidth(size[1])\n fig.tight_layout()\n plt.show()",
"def SH_surface_plots(n_max=6,figsize=(15,15),fs=15,saveA=True,show=False,dpi=400,vis_type='real'):\n\n N = 100j\n\n for n in range(n_max+1):\n for m in range(n+1):\n plt.close('all')\n print(\"working on Y_%s^%s\" % (n,m) )\n\n PHI,THETA = np.mgrid[0:2*np.pi:N*2, 0:np.pi:N]\n if vis_type == 'real':\n R = sp.sph_harm(m,n,PHI,THETA).real\n if vis_type == 'modulus':\n r = sp.sph_harm(m,n,PHI,THETA)\n R = r * r.conjugate()\n if vis_type == 'unit':\n R = sp.sph_harm(m,n,PHI,THETA).real + 1\n\n X = np.abs(R) * np.sin(THETA) * np.cos(PHI)\n Y = np.abs(R) * np.sin(THETA) * np.sin(PHI)\n Z = np.abs(R) * np.cos(THETA)\n\n norm = colors.Normalize()\n fig, ax = plt.subplots(subplot_kw=dict(projection='3d'), figsize=(14,10))\n sm = cm.ScalarMappable(cmap=cm.seismic)\n ax.plot_surface(X, Y, Z, rstride=1, cstride=1, facecolors=cm.seismic(norm(R)))\n ax.set_title('real$(Y^%s_%s)$' % (m,n), fontsize=fs)\n ax.set_aspect(1)\n sm.set_array(R)\n fig.colorbar(sm, shrink=0.8)\n\n if saveA:\n fig.savefig('images/%s/%s_%s.png' % (vis_type,n,m), dpi=dpi)\n if show:\n plt.show()\n\n # print(\"\\n only +m values are used.\")\n # for n in range(n_max+1):\n # for m in range(n+1):\n # plt.close('all')\n # print(\"\\n n,m = %s,%s\" % (n,m) )\n #\n # R,X,Y,Z = harmonics(m,n)\n #\n # fig = plt.figure(figsize=figsize)\n # ax = plt.subplot(projection='3d')\n # ax.set_aspect(1)\n # ax.set_title(\"n: %s m: %s\" % (n,m), fontsize=fs+2)\n # ax.plot_surface(X,Y,Z,\\\n # cmap = cm.seismic,\n # norm = colors.Normalize( vmin=np.min(R),vmax=np.max(R) )\\\n # )\n #\n # if saveA:\n # fig.savefig('images/%s_%s.png' % (n,m), dpi=dpi)\n # if show:\n # plt.show()",
"def three_mass_FUV_maps(gal_indices,**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n fig = plt.figure(figsize=(15,14.5),constrained_layout=False)\n gs1 = fig.add_gridspec(nrows=3, ncols=3, wspace=0.0, hspace=0.0)\n\n rotate = False\n for row_i,gal_index in enumerate(gal_indices):\n\n R_max = p.R_max[row_i]\n\n ax1 = fig.add_subplot(gs1[row_i, 0])\n m = map_sim_property(add=True,ax=ax1,gal_index=gal_index, \\\n prop='m',R_max=R_max,vmin=0,vmax=7,\\\n pix_size_kpc=0.5,sim_type='simgas',cmap='viridis',log=True,colorbar=False,rotate=rotate,text=p.text)\n frame = plt.gca()\n #if row_i != 2: frame.axes.get_xaxis().set_visible(False)\n if row_i != 2: ax1.set_xlabel('')\n if row_i == 0:\n cbaxes = fig.add_axes([0.05, 0.93, 0.25, 0.01]) \n cb = plt.colorbar(m, orientation='horizontal', cax = cbaxes)\n cbaxes.xaxis.set_ticks_position('top')\n cb.ax.set_title(\"log $\\Sigma_{\\mathrm{gas}}$ [M$_{\\odot}$ kpc$^{-2}$]\")\n # Make a size indicator\n #ax1.plot([p.R_max-15,p.R_max-5],[-p.R_max+5,-p.R_max+5],lw=4,color='white')\n #ax1.text(p.R_max-16,-p.R_max+7,'10 kpc',color='white',fontsize=12)\n # Remove axes ticks\n ax1.tick_params(axis='x',which='both',labelbottom=False)\n ax1.tick_params(axis='y',which='both',labelleft=False)\n\n ax1 = fig.add_subplot(gs1[row_i, 1])\n m = star_map(add=True,ax=ax1,R_max=R_max,vmin=6,vmax=9,\\\n gal_index=gal_index,colorbar=False,rotate=rotate)\n frame = plt.gca()\n #if row_i != 2: frame.axes.get_xaxis().set_visible(False)\n if row_i != 2: ax1.set_xlabel('')\n frame.axes.get_yaxis().set_visible(False)\n if row_i == 0:\n cbaxes = fig.add_axes([0.375, 0.93, 0.25, 0.01]) \n cb = plt.colorbar(m, orientation='horizontal', cax = cbaxes) \n cbaxes.xaxis.set_ticks_position('top')\n cb.ax.set_title(\"log stellar age [yr]\")\n # Make a size indicator\n #ax1.plot([p.R_max-15,p.R_max-5],[-p.R_max+5,-p.R_max+5],lw=4,color='k')\n #ax1.text(p.R_max-16,-p.R_max+7,'10 kpc',color='k',fontsize=12)\n # Remove axes ticks\n ax1.tick_params(axis='x',which='both',labelbottom=False)\n ax1.tick_params(axis='y',which='both',labelleft=False)\n\n ax1 = fig.add_subplot(gs1[row_i, 2])\n m = FUV_map(add=True,ax=ax1,gal_index=gal_index,R_max=R_max,vmin=-10,vmax=3,select=p.select,cmap='twilight',colorbar=False,rotate=rotate)\n frame = plt.gca()\n #if row_i != 2: frame.axes.get_xaxis().set_visible(False)\n if row_i != 2: ax1.set_xlabel('')\n frame.axes.get_yaxis().set_visible(False)\n if row_i == 0:\n cbaxes = fig.add_axes([0.69, 0.93, 0.25, 0.01]) \n cb = plt.colorbar(m, orientation='horizontal', cax = cbaxes) \n cbaxes.xaxis.set_ticks_position('top')\n cb.ax.set_title('FUV flux [W/m$^2$/arcsec$^2$]')\n # Make a size indicator\n # if row_i == 2:\n # print('Adding size indicator')\n #ax1.text(p.R_max-16,-p.R_max+7,'10 kpc',color='w',fontsize=12)\n #ax1.plot([p.R_max-15,p.R_max-5],[-p.R_max+5,-p.R_max+5],lw=4,color='w')\n # else:\n ax1.text(R_max-16,-R_max+7,'10 kpc',color='k',fontsize=12)\n ax1.plot([R_max-15,R_max-5],[-R_max+5,-R_max+5],lw=4,color='k')\n # Remove axes ticks\n ax1.tick_params(axis='x',which='both',labelbottom=False)\n ax1.tick_params(axis='y',which='both',labelleft=False)\n\n # s = segs\n gs1.update(top=0.92,bottom=0.02,left=0.02,right=0.98)\n\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'pretty/'): os.mkdir(p.d_plot + 'pretty/')\n plt.savefig('plots/pretty/mass_FUV_maps_%s%s.png' % (p.sim_name,p.sim_run),format='png',dpi=200)",
"def setup_figure(self):\n # How many data plots are we dealing with in each dimension?\n plots_x = self._dims[0] # Number of columns\n plots_y = self._dims[1] if len(self._dims) > 1 else 1 # Number of rows\n\n # Set up our base row count\n num_rows = plots_y + 1 # Add one more row for the update number\n height_ratios = [1] * plots_y + [0.25]\n num_cols = plots_x + 1 # Add one more column for the colorbar\n width_ratios = [1] * plots_x + [0.10]\n\n if self._is_multi:\n # If we have multiple resources, add another row for the resource legend\n num_rows += 1\n height_ratios.append(0.1)\n\n has_descr = True if len(self._env_str + self._event_str) > 0 else False\n if has_descr:\n # if we need to print some descriptive text, add another at the bottom\n # change this height ratio to make it larger\n num_rows += 1\n height_ratios.append(0.35)\n\n # Create our grid layout\n gs = mpl.gridspec.GridSpec(num_rows, num_cols,\n height_ratios=height_ratios,\n\n width_ratios=width_ratios)\n\n # Plot our category heatmaps\n ndx = 0 # Index into our experiment\n plots = [] # Plots from our experiment\n for col in range(plots_x):\n for row in range(plots_y):\n ax = plt.subplot(gs[row,col])\n base_cmap = self._cmap if not self._is_multi else ColorMaps.gray\n plot = plt.imshow(np.zeros(self._grid_shape), cmap=base_cmap,\n origin='upper', interpolation='nearest',\n vmin=self._vmin, vmax=self._vmax)\n ax.tick_params(axis='both', bottom='off', labelbottom='off',\n left='off', labelleft='off')\n if self._is_left_edge(ndx):\n ax.set_ylabel(self._fact2label(ndx,1))\n if self._is_bottom_edge(ndx):\n ax.set_xlabel(self._fact2label(ndx,0))\n plots.append(plot)\n pa = []\n for pp in self._post_plot:\n pa.append(pp.blit_build(ax, ax_ndx=ndx))\n ndx = ndx+1\n\n # Plot the colorbar\n norm = mpl.colors.Normalize(self._vmin, self._vmax)\n cax = plt.subplot( gs[0:plots_y,-1] ) # Across data rows, last column\n if not self._is_multi:\n cbar = mpl.colorbar.ColorbarBase(cax, cmap=self._cmap, norm=norm, orientation='vertical')\n else:\n cbar = mpl.colorbar.ColorbarBase(cax, cmap=ColorMaps.gray, norm=norm, orientation='vertical')\n cbar.set_label('Abundance')\n\n # Plot the update\n ax = plt.subplot(gs[plots_y,0:plots_x]) # The row after the data plots, across all data plot columns\n ax.tick_params(axis='both', bottom='off', labelbottom='off',\n left='off', labelleft='off')\n ax.set_frame_on(False)\n ax.set_ylim(0,1)\n ax.set_xlim(0,1)\n update = ax.text(0.5,0.25,'Update n/a', ha='center', va='bottom')\n\n # Plot the category legend if needed\n if self._is_multi:\n ax = plt.subplot(gs[plots_y+1,:-1]) # The row after the update axis, acros all data plot columns\n legend_handles = []\n for ndx,cat_name in enumerate(self._categories):\n legend_handles.append(mpl.patches.Patch(color=self._colors[ndx], label=cat_name))\n plt.legend(handles=legend_handles, loc='center', frameon=False, ncol=len(legend_handles))\n ax.tick_params(axis='both', bottom='off', labelbottom='off',\n left='off', labelleft='off')\n ax.set_frame_on(False)\n\n # If we have an environment and event strings, plot them in the final row across all columns\n if has_descr:\n ax = plt.subplot(gs[-1,:])\n desc = self._env_str + '\\n\\n' + self._event_str + '\\n\\n' + f'World: {self._world_size[0]} x {self._world_size[1]}'\n env = ax.text(0.05, 1, desc, ha='left', va='top', fontsize=7)\n ax.tick_params(axis='both', bottom='off', labelbottom='off',\n left='off', labelleft='off')\n ax.set_frame_on(False)\n\n # Title the figure\n plt.suptitle(self._title)\n\n # Store what we need to redraw each frame for blitting.\n # The values in this dictionary may be either a single element\n # or an iterable.\n self._to_draw = {'plots':plots, 'update':update, 'post_plot':pa}",
"def channel_maps_figure(source):\n # Load data cube\n data_filename = data_rebinned_filepaths[source]\n cube = SpectralCube.read(data_filename)\n # Switch units to km/s (optional)\n cube = cube.with_spectral_unit(u.km/u.s)\n # Get WCS object for 2D spatial image\n wcs_flat = cube[0, :, :].wcs\n # Calculate pixel scale for the spatial image\n pixel_scale = wcs_flat.proj_plane_pixel_scales()[0] # gives RA, Dec scales; pick one. They're almost certainly equal, so doesn't matter\n # Get velocity limits\n # For now, use all the channels in the saved FITS file; can limit them in the future if necessary\n v_lo, v_hi = cube.spectral_axis[0], cube.spectral_axis[-1]\n first_channel_idx = 0 # Using all channels\n last_channel_idx = cube.shape[0] - 1 # Using all channels\n print(\"First and last available channels \", v_lo, v_hi, \" at indices \", first_channel_idx, last_channel_idx)\n\n grid_shape = {\n 'ngc1977': (3, 4), # 12 channels\n 'rcw120': (4, 6), # 21 channels\n 'rcw49': (4, 8), # 31 channels\n }\n figsize = {\n # in inches; fine tune these to remove gaps between the channel maps\n 'ngc1977': (12, 8),\n 'rcw120': (16, 10),\n 'rcw49': (17, 11.5)\n }\n fig = plt.figure(figsize=figsize[source])\n # Matplotlib gridspec setup so that we can have a big colorbar on the side\n # mega_gridspec will contain all the channel maps and the Axes created within it serves as an anchor for the colorbar\n mega_gridspec = fig.add_gridspec(right=0.9, left=0.06, top=0.98, bottom=(0.08 if source=='rcw49' else 0.06))\n # Create a single Axes object from mega_gridspec; this will anchor the colorbar\n mega_axis = mega_gridspec.subplots()\n # Hide the bounding box for this large Axes object\n mega_axis.set_axis_off()\n # Create the channel map gridspec within the large gridspec\n gs = mega_gridspec[0,0].subgridspec(*grid_shape[source], hspace=0, wspace=0)\n # Memoize axes\n axes = {}\n def get_axis(index):\n # Index is 1D index of channel counting from first_channel_idx as 0.\n # In other words, index of the panel in the figure.\n # (if first_channel_idx == 0 then axis index == channel index)\n if index not in axes:\n axes[index] = fig.add_subplot(gs[np.unravel_index(index-first_channel_idx, grid_shape[source])], projection=wcs_flat)\n return axes[index]\n\n # Text defaults\n text_x = 0.05 if source=='rcw49' else 0.5\n text_y = 0.94\n # ha/va are horizontal and vertical alignment\n ha = 'left' if source=='rcw49' else 'center'\n # the color I use there is from Marc's collection of colorblind-friendly colors and works well against \"plasma\"\n default_text_kwargs = dict(fontsize=14, color='#ff7f00', ha=ha, va='center')\n tick_labelsize = 14\n tick_labelrotation = 50 if source=='rcw49' else 25\n tick_labelpad = 26 if source=='rcw49' else 13\n # Colors\n cmap = \"plasma\" # Image colormap\n beam_patch_ec = \"grey\" # edge color\n beam_patch_fc = \"white\" # face color\n # vlims for images (min and max for image colorscales in data units)\n vlims = {\n 'ngc1977': dict(vmin=0, vmax=40),\n 'rcw120': dict(vmin=0, vmax=25),\n 'rcw49': dict(vmin=0, vmax=17)\n }\n\n # Loop through channels and plot\n for channel_idx in range(first_channel_idx, last_channel_idx+1):\n velocity = cube.spectral_axis[channel_idx]\n channel_data = cube[channel_idx].to_value()\n\n print(first_channel_idx, channel_idx, last_channel_idx)\n ### print the [min, mean, median, max] for each panel so that we can find the best vlims (min, max) for all of them\n # print([f(channel_data) for f in (np.nanmin, np.nanmean, np.nanmedian, np.nanmax)])\n\n\n # Setup Axes\n ax = get_axis(channel_idx)\n # Remove x and y labels on individual panels (use the \"super\" titles)\n ax.set_xlabel(\" \")\n ax.set_ylabel(\" \")\n ss = ax.get_subplotspec()\n # Coordinate labels\n if ss.is_last_row() and ss.is_first_col():\n # Coordinates only on bottom left corner panel\n # Mess around with the rotation, position, and size of coordinate labels\n ax.coords[0].set_ticklabel(rotation=tick_labelrotation, rotation_mode='anchor', pad=tick_labelpad, fontsize=tick_labelsize, ha='right', va='top')\n ax.coords[1].set_ticklabel(fontsize=tick_labelsize)\n else:\n # If not the bottom left panel, no coordinates (panels have no space in between)\n # Hide coordinates\n ax.tick_params(axis='x', labelbottom=False)\n ax.tick_params(axis='y', labelleft=False)\n # Plot\n im = ax.imshow(channel_data, origin='lower', cmap=cmap, **vlims[source])\n # Label velocity on each panel\n ax.text(text_x, text_y, f\"{velocity.to_value():.0f} {velocity.unit.to_string('latex_inline')}\", transform=ax.transAxes, **default_text_kwargs)\n # Beam on every panel\n beam_patch = cube.beam.ellipse_to_plot(*(ax.transAxes + ax.transData.inverted()).transform([0.9, 0.1]), pixel_scale)\n beam_patch.set(alpha=0.9, facecolor=beam_patch_fc, edgecolor=beam_patch_ec)\n ax.add_artist(beam_patch)\n\n # Colorbar\n # Create a space to the right of the panels using the height/location of the mega_axis as an anchor\n cbar_ax = mega_axis.inset_axes([1.03, 0, 0.03, 1])\n cbar = fig.colorbar(im, cax=cbar_ax, label='T$_{\\\\rm MB}$ (K)')\n ticks = {\n # 'rcw120'\n }\n # cbar.set_ticks(ticks[source])\n # Titles\n fig.supxlabel(\"Right Ascension\")\n fig.supylabel(\"Declination\")\n\n dpi = 100\n dpi_stub = \"\" if dpi==100 else f\"_dpi{dpi}\"\n\n fig_save_name = f\"channel_maps_{source}{dpi_stub}.png\"\n fig.savefig(\n os.path.join(figure_save_path, fig_save_name),\n dpi=dpi\n )\n print(f\"Figure saved to {os.path.join(figure_save_path, fig_save_name)}\")",
"def animate(frames):\n plt.grid('on')\n ax = plt.gca()\n ax.set_xticks(np.arange(0.5, 10, 1))\n ax.set_yticks(np.arange(0.5, 10, 1))\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n\n for i in range(len(env_list)):\n ax.imshow(env_list[i],cmap='binary')\n plt.pause(0.05)",
"def visualizeW1(images, vis_patch_side, hid_patch_side, iter, file_name=\"trained_\"):\n\n figure, axes = matplotlib.pyplot.subplots(nrows=hid_patch_side, ncols=hid_patch_side)\n index = 0\n\n for axis in axes.flat:\n \"\"\" Add row of weights as an image to the plot \"\"\"\n\n image = axis.imshow(images[index, :].reshape(vis_patch_side, vis_patch_side),\n cmap=matplotlib.pyplot.cm.gray, interpolation='nearest')\n axis.set_frame_on(False)\n axis.set_axis_off()\n index += 1\n\n \"\"\" Show the obtained plot \"\"\"\n file=file_name+str(iter)+\".png\"\n matplotlib.pyplot.savefig(file)\n print(\"Written into \"+ file)\n matplotlib.pyplot.close()",
"def plot_1d_systems(model):\n\n # Construct grid to evaluate the Green's function\n X_G, Y_G = np.meshgrid(model.x_G, model.y_G)\n x_G_star = X_G.flatten()[:, None]\n y_G_star = Y_G.flatten()[:, None]\n\n # Create the figure\n n_plots = max(model.n_input + 1, model.n_output)\n scaling = n_plots / 2\n fig, ax = newfig(1.0 * scaling, 1.5)\n ax.axis('off')\n gs = gridspec.GridSpec(n_plots, n_plots)\n gs.update(top=0.8, bottom=0.2, left=0.1, right=0.9,\n wspace=0.6 * scaling, hspace=0.6 * scaling)\n\n # Plot the Green's functions\n for i in range(model.n_output):\n for j in range(model.n_input):\n input_data = np.concatenate((x_G_star, y_G_star), 1)\n G_pred_identifier = model.sess.run(\n model.G_network[i][j].evaluate(input_data))\n G_pred = G_pred_identifier.reshape(X_G.shape)\n ax = plt.subplot(gs[i, j])\n h = ax.imshow(G_pred, interpolation='lanczos', cmap='jet',\n extent=[np.min(model.x_G), np.max(model.x_G), np.min(\n model.y_G), np.max(model.y_G)],\n origin='lower', aspect='auto')\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n fig.colorbar(h, cax=cax)\n ax.set_xlabel('$x$')\n ax.set_ylabel('$y$', rotation=0, labelpad=12)\n ax.set_title('$G_{%d,%d}$' % (i + 1, j + 1), fontsize=10)\n\n # Plot the homogeneous solutions\n for i in range(model.n_output):\n N_pred = model.sess.run(model.idn_N_pred[i].evaluate(model.x))\n ax = plt.subplot(gs[i, model.n_input])\n ax.plot(model.x, model.U_hom[:, i], label='Exact')\n ax.plot(model.x, N_pred, dashes=[2, 2], label='Learned')\n divider = make_axes_locatable(ax)\n ymin = min([np.min(N_pred), np.min(model.U_hom[:, i])])\n ymax = max([np.max(N_pred), np.max(model.U_hom[:, i])])\n ax.set_xlim(np.min(model.x), np.max(model.x))\n ax.set_ylim(ymin, ymax)\n if ymax - ymin < 1e-2:\n ax.yaxis.set_major_formatter(MathTextSciFormatter(\"%1.1e\"))\n ax.set_xlabel('$x$')\n ax.set_title('Hom$_{%d}$' % (i + 1), fontsize=10)\n ax.legend()\n\n # Save the figure\n savefig(\"%s/%s_%s\" % (model.path_result, model.example_name,\n model.activation_name), crop=False)",
"def run_plots(self):\n # load the files\n self.pre_dark_file = os.path.join(self.input_dir, 'step_lastframe.fits')\n self.post_dark_file = os.path.join(self.input_dir, 'step_dark_current.fits')\n self.jump_file = os.path.join(self.input_dir, 'step_jump.fits')\n self.rate_file = os.path.join(self.input_dir, 'step_rate.fits')\n self.ramp_file = glob.glob(os.path.join(self.input_dir, '*.fits'))[0]\n\n # plots\n self.plot_jump_flags_image()\n self.plot_groupdq_flags(pixel=[884, 550])\n self.plot_ramps_pre_post_correction(pixel=[884, 550])",
"def plot(self):\n h = .02\n i=1\n bags_X = self.bags_X\n bags_y = self.bags_y\n fig1 = plt.figure(figsize=(45, 9))\n\n \n cm = plt.cm.RdBu\n cm_bright = ListedColormap(['#FF0000', '#0000FF'])\n \n for model in self.models:\n ax = plt.subplot(1, len(self.models) , i)\n X = pd.DataFrame(bags_X[i-1])\n y = pd.Series(bags_y[i-1])\n x_min, x_max = X[X.columns[0]].min() - .5, X[X.columns[0]].max() + .5\n y_min, y_max = X[X.columns[1]].min() - .5, X[X.columns[1]].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = np.array(model.predict(pd.DataFrame(np.c_[xx.ravel(), yy.ravel()], columns=X.columns)))\n # print(Z[12])\n Z = Z.reshape(xx.shape)\n ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n ax.scatter(X[X.columns[0]], X[X.columns[1]], c=y, cmap=cm_bright, edgecolors='k')\n # size=[1000*w for w in self.weights[i-1]]\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xlabel(str(X.columns[0]))\n ax.set_ylabel(str(X.columns[1]))\n plt.title(\"Estimator \"+str(i))\n i+=1\n \n fig2 = plt.figure(figsize=(9,9))\n X = self.X\n y = self.y\n ax2 = plt.subplot(1,1,1)\n x_min, x_max = X[X.columns[0]].min() - .5, X[X.columns[0]].max() + .5\n y_min, y_max = X[X.columns[1]].min() - .5, X[X.columns[1]].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = np.array(self.predict(pd.DataFrame(np.c_[xx.ravel(), yy.ravel()], columns=X.columns)))\n Z = Z.reshape(xx.shape)\n ax2.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n # size=[1000*w for w in self.weights[i-2]]\n ax2.scatter(X[X.columns[0]], X[X.columns[1]], c=y, cmap=cm_bright, edgecolors='k')\n ax2.set_xlim(xx.min(), xx.max())\n ax2.set_ylim(yy.min(), yy.max())\n plt.title(\"Combined Decision Surface\")\n \n plt.tight_layout()\n plt.show()\n\n return [fig1,fig2]",
"def grid_frame(self, steps, figure_size=(12, 12)):\r\n\r\n x = self.seed\r\n counts = []\r\n for n in np.arange(0, steps):\r\n x, stats = self.update_grid(x)\r\n counts.append(stats)\r\n\r\n counts = np.array(counts)\r\n\r\n fig, ax = plt.subplots(figsize=figure_size)\r\n ax.get_xaxis().set_visible(False)\r\n ax.get_yaxis().set_visible(False)\r\n color_map = matplotlib.colors.ListedColormap(['white', 'black'])\r\n img = plt.imshow(x, interpolation='nearest', cmap=color_map)\r\n img.axes.grid(False)\r\n plt.title(self.title + ' | Step ' + str(steps))\r\n plt.show()\r\n\r\n return x, counts",
"def plot_maps(self, mode=0, target=1, gfilter=0):\r\n\r\n mpl.figure(1)\r\n\r\n mpl.imshow(self.avgimg, cmap=matplotlib.cm.gray, interpolation=None) # scipy.ndimage.gaussian_filter(ampmap, filter, order=0, mode='reflect'), cmap=matplotlib.cm.gray)\r\n\r\n mpl.colorbar()\r\n\r\n mpl.title('Average image')\r\n\r\n print ('target, mode: ', target, mode)\r\n\r\n max1 = np.amax(self.amplitudeImage1)\r\n\r\n if target > 1:\r\n\r\n max1 = np.amax([max1, np.amax(self.amplitudeImage2)])\r\n\r\n max1 = 10.0*int(max1/10.0)\r\n\r\n mpl.figure(2)\r\n\r\n mpl.subplot(2,2,4)\r\n\r\n ipy0, posl, coll = self.plot_averaged_amplitude()\r\n\r\n\r\n\r\n mpl.subplot(2,2,1)\r\n\r\n self.plot_amplitude_map(self.amplitudeImage1, max1, 'Amplitude Map1', filter=gfilter)\r\n\r\n mpl.subplot(2,2,3)\r\n\r\n self.plot_phase_map(self.phaseImage1, 'Phase Map1', filter=gfilter)\r\n\r\n for i, px in enumerate(posl):\r\n\r\n mpl.plot(px, self.ipy+ipy0, 'o-', markersize=5.0, markerfacecolor = coll[i], markeredgecolor='w')\r\n\r\n if target > 1:\r\n\r\n mpl.subplot(2,2,4)\r\n\r\n self.plot_phase_map(self.phaseImage1, 'Phase Map1', filter=gfilter)\r\n\r\n mpl.subplot(2,2,2)\r\n\r\n self.plot_fft()\r\n\r\n \r\n\r\n mpl.figure(3)\r\n\r\n mpl.title('Phase across center horizontally')\r\n\r\n # extract middle line\r\n\r\n sh = self.phaseImage1.shape\r\n\r\n iy0 = int(sh[1]/2)\r\n\r\n mpl.plot(self.phaseImage1[iy0, :], 'ko-')\r\n\r\n return\r\n\r\n \r\n\r\n if mode == 0:\r\n\r\n mpl.subplot(2,3,3)\r\n\r\n for i in range(0, self.nPhases):\r\n\r\n mpl.plot(ta.n_times, self.DF[:,5,5].view(ndarray))\r\n\r\n #mpl.plot(self.n_times, D[:,i*55+20, 60])\r\n\r\n mpl.hold('on')\r\n\r\n mpl.title('Waveforms')\r\n\r\n\r\n\r\n mpl.subplot(2,3,6)\r\n\r\n for i in range(0, self.nPhases):\r\n\r\n mpl.plot(ta.n_times, self.DF[:,5,5].view(ndarray))\r\n\r\n #mpl.plot(self.DF[:,i*55+20, 60])\r\n\r\n mpl.hold('on')\r\n\r\n mpl.title('FFTs')\r\n\r\n\r\n\r\n if mode == 1 and target > 1:\r\n\r\n \r\n\r\n mpl.subplot(2,3,2)\r\n\r\n mpl.title('Amplitude Map2')\r\n\r\n #scipy.ndimage.gaussian_filter(self.amplitudeImage2, 2, order=0, output=self.amplitudeImage2, mode='reflect')\r\n\r\n imga2 = mpl.imshow(scipy.ndimage.gaussian_filter(self.amplitudeImage2, gfilter, order=0, mode='reflect'))\r\n\r\n imga2.set_clim = (0.0, max1)\r\n\r\n mpl.colorbar()\r\n\r\n mpl.subplot(2,3,5)\r\n\r\n imgp2 = mpl.imshow(scipy.ndimage.gaussian_filter(self.phaseImage2, gfilter, order=0, mode='reflect'), cmap=matplotlib.cm.hsv)\r\n\r\n mpl.colorbar()\r\n\r\n imgp2.set_clim=(-np.pi/2.0, np.pi/2.0)\r\n\r\n mpl.title('Phase Map2')\r\n\r\n # doubled phase map\r\n\r\n mpl.subplot(2,3,6)\r\n\r\n #scipy.ndimage.gaussian_filter(self.phaseImage2, 2, order=0, output=self.phaseImage2, mode='reflect')\r\n\r\n np1 = scipy.ndimage.gaussian_filter(self.phaseImage1, gfilter, order=0, mode='reflect')\r\n\r\n np2 = scipy.ndimage.gaussian_filter(self.phaseImage2, gfilter, order=0, mode='reflect')\r\n\r\n dphase = np1 + np2\r\n\r\n #dphase = self.phaseImage1 - self.phaseImage2\r\n\r\n \r\n\r\n #scipy.ndimage.gaussian_filter(dphase, 2, order=0, output=dphase, mode='reflect')\r\n\r\n imgpdouble = mpl.imshow(dphase, cmap=matplotlib.cm.hsv)\r\n\r\n mpl.title('2x Phi map')\r\n\r\n mpl.colorbar()\r\n\r\n imgpdouble.set_clim=(-np.pi, np.pi)\r\n\r\n\r\n\r\n if mode == 2 or mode == 1:\r\n\r\n if self.phasex == []:\r\n\r\n self.phasex = np.random.randint(0, high=self.DF.shape[1], size=self.DF.shape[1])\r\n\r\n self.phasey = np.random.randint(0, high=self.DF.shape[2], size=self.DF.shape[2])\r\n\r\n\r\n\r\n mpl.subplot(2,3,3)\r\n\r\n sh = self.DF.shape\r\n\r\n spr = sh[2]/self.nPhases\r\n\r\n for i in range(0, self.nPhases):\r\n\r\n Dm = self.avgimg[i*spr,i*spr] # diagonal run\r\n\r\n mpl.plot(self.n_times, 100.0*(self.DF[:,self.phasex[i], self.phasey[i]]/Dm))\r\n\r\n mpl.hold('on')\r\n\r\n mpl.title('Waveforms')\r\n\r\n\r\n\r\n if mode == 2:\r\n\r\n mpl.subplot(2,3,6)\r\n\r\n sh = self.DF.shape\r\n\r\n x0 = int(sh[1]/2)\r\n\r\n y0 = int(sh[2]/2)\r\n\r\n for i in range(0, self.nPhases):\r\n\r\n mpl.plot(self.DF[1:,x0,y0])\r\n\r\n mpl.hold('on')\r\n\r\n mpl.title('FFTs')",
"def render(self, mode = 'human'):\n if mode == 'human':\n\n if self.is_2d:\n fig = plt.figure()\n for index, value in np.ndenumerate(self.board):\n if value == 1:\n plt.scatter(*index, c='red', s=1000, alpha=0.2)\n elif value == -1:\n plt.scatter(*index, c='blue', s=1000, alpha=0.2)\n plt.xlim(-1, self.dim[0])\n plt.ylim(-1, self.dim[1])\n plt.xticks([])\n plt.yticks([])\n plt.grid(True)\n\n if self.is_3d:\n fig = plt.figure()\n ax = Axes3D(fig)\n for index, value in np.ndenumerate(self.board):\n if value == 1:\n ax.scatter(*index, c='red', s=1000, alpha=0.2)\n elif value == -1:\n ax.scatter(*index, c='blue', s=1000, alpha=0.2)\n ax.set_xlim(0, self.dim[0] - 1)\n ax.set_ylim(0, self.dim[1] - 1)\n ax.set_zlim(0, self.dim[2] - 1)\n\n else:\n fig = plt.figure()\n ax = Axes3D(fig)\n for index, value in np.ndenumerate(self.board):\n if value == 1:\n ax.scatter(*index, c='red', s=1000, alpha=0.2)\n elif value == -1:\n ax.scatter(*index, c='blue', s=1000, alpha=0.2)\n\n ax.set_xlim(0, self.dim[0] - 1)\n ax.set_ylim(0, self.dim[1] - 1)\n ax.set_zlim(0, self.dim[2] - 1)\n ax.set_title('Nr of steps: ' + str(self.steps))\n\n plt.show()\n return fig"
] | [
"0.64044726",
"0.63224024",
"0.6253856",
"0.61958474",
"0.6189722",
"0.60908437",
"0.60695",
"0.606345",
"0.6026868",
"0.6024125",
"0.5955344",
"0.5922822",
"0.5906817",
"0.58775115",
"0.58641946",
"0.58604985",
"0.5835713",
"0.5813257",
"0.5810878",
"0.580833",
"0.57819027",
"0.5766788",
"0.5760945",
"0.57604176",
"0.5756126",
"0.57543445",
"0.5740251",
"0.5739379",
"0.5737627",
"0.57358754"
] | 0.703744 | 0 |
Use gdal/osr to get latlon point location from georeferenced array indices | def ind2latlon(index, filePath):
# Load georeferencing
ds = gdal.Open(filePath)
proj = ds.GetProjection()
gt = ds.GetGeoTransform()
srs = osr.SpatialReference()
srs.ImportFromWkt(proj)
x0 = gt[0] #top left longitude
y0 = gt[3] #top left latitude
dx = gt[1] #pixel width
dy = gt[5] #pixel height
# Convert row,col of array to projected coords
row, col = index
x = x0 + (col * dx)
y = y0 + (row * dy)
# Convert projected coords to latlon
trs = osr.SpatialReference()
trs.ImportFromEPSG(4326)
ct = osr.CoordinateTransformation(srs, trs)
(lon, lat, height) = ct.TransformPoint(x, y) #note could add elevation
#gdal.DecToDMS(lat, 'Lat', 2)
return lon, lat | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_coord_indices(lon_array, lat_array, lon_points, lat_points, proj_str):\n\n proj = Proj(proj_str)\n proj_lon, proj_lat = np.array(proj(lon_array, lat_array)) # transform to distances using specified projection\n lonlat = np.column_stack(\n (proj_lon.ravel(), proj_lat.ravel())) # Stack all coarse x, y distances for array shape (n, 2)\n ll = np.array(proj(lon_points, lat_points)).T # transform lists of fine grid x, y to match shape (n, 2)\n idx = cdist(lonlat, ll).argmin(0) # Calculate all distances and get index of minimum\n\n return np.column_stack((np.unravel_index(idx, lon_array.shape))).tolist()",
"def geo_idx(dd, dd_array):\r\n geo_idx = (np.abs(dd_array - np.float(dd))).argmin()\r\n return geo_idx",
"def ndarray_to_location(array: np.ndarray) -> carla.Location: # pylint: disable=no-member\n return carla.Location(*list(map(float, array))) # pylint: disable=no-member",
"def geo_idx(dd, dd_array):\n geo_idx = (np.abs(dd_array - dd)).argmin()\n return geo_idx",
"def find_lon_lat_of_indices(indices, dir, tile):\n \n filename_pattern = '*grid.tile{0}.nc'.format(tile)\n for f_name in os.listdir(dir):\n if fnmatch.fnmatch(f_name, filename_pattern):\n filename = f_name\n if not filename:\n message = 'No filenames matching the pattern {0} found in {1}'.format(filename_pattern,dir)\n logging.critical(message)\n raise Exception(message)\n \n nc_file = Dataset('{0}/{1}'.format(dir,filename))\n #read in supergrid longitude and latitude\n lon_super = np.array(nc_file['x']) #[lat,lon] or [y,x] #.swapaxes(0,1)\n lat_super = np.array(nc_file['y']) #[lat,lon] or [y,x] #.swapaxes(0,1)\n #get the longitude and latitude data for the grid centers by slicing the supergrid \n #and taking only odd-indexed values\n longitude = lon_super[1::2,1::2]\n latitude = lat_super[1::2,1::2]\n nc_file.close()\n \n return (longitude[indices[1],indices[0]], latitude[indices[1],indices[0]])",
"def getclosest_ij(lats,lons,latpt,lonpt):\n dist_lat = (lats-latpt)**2 # find squared distance of every point on grid\n dist_lon = (lons-lonpt)**2\n minindex_lat = dist_lat.argmin() # 1D index of minimum dist_sq element\n minindex_lon = dist_lon.argmin()\n return minindex_lat, minindex_lon # Get 2D index for latvals and lonvals arrays from 1D index",
"def getclosest_ij(lats,lons,latpt,lonpt):\n dist_lat = (lats-latpt)**2 # find squared distance of every point on grid\n dist_lon = (lons-lonpt)**2\n minindex_lat = dist_lat.argmin() # 1D index of minimum dist_sq element\n minindex_lon = dist_lon.argmin()\n return minindex_lat, minindex_lon # Get 2D index for latvals and lonvals arrays from 1D index",
"def geo_idx(dd, dd_array):\r\n geo_idx = (np.abs(dd_array - dd)).argmin()\r\n return geo_idx",
"def _raster_index_to_coords(i, j, bounds = [[-100, -100], [100, 100]],\n dx = 1, dy = 1):\n x = (j+0.5)*dx + bounds[0][0]\n y = (i+0.5)*dy + bounds[0][1]\n return x, y",
"def get_coordinates_geo(self):\n if not self.rotated:\n lon_arr_geo = self.lon_arr\n lat_arr_geo = self.lat_arr\n else:\n lon_arr_geo, lat_arr_geo = self.transform().get_coordinates() \n\n return lon_arr_geo, lat_arr_geo",
"def find_stn_idx(ref_lon, ref_lat, tlon, tlat):\n\n # compute great circle distance from location to model grid points\n dist = gc_dist(ref_lon, ref_lat, tlon, tlat)\n\n # find the indices of the two closest grid points with distinct longitudes\n work = N.take(dist,N.argmin(dist,0),0).diagonal()\n Jlist = N.argsort(work)[:2]\n del work\n\n # find the indices of the two closest grid points with distinct latitudes\n work = N.take(dist,N.argmin(dist,1),1).diagonal()\n Ilist = N.argsort(work)[:2]\n del work\n\n return Ilist, Jlist",
"def point_coords(geom):\n # Return a tuple with the x/y point coordinate for a GeoDataFrame geometry\n return list(geom.coords)[0] # Just get first tuple in list, since it's a point",
"def get_idx(lons, lats, lon, lat):\n dist = ((lons - lon) ** 2 + (lats - lat) ** 2) ** 0.5\n return np.unravel_index(dist.argmin(), dist.shape)",
"def get_geo_coordinates(\n index_y: int,\n index_x: int,\n tile_coordinates: tuple,\n):\n # Predefining variables for calculation\n # Setting up r, radius of world with given pixel values.\n mollweide_center_width = 72000\n mollweide_center_height = 36000\n sqrt2 = math.sqrt(2)\n r = (__GLOBE_WIDTH / 2) / 2 / sqrt2\n\n # Get actual x, y coordinates using the indices\n y_2d = index_y + (4000 * tile_coordinates[1]) - mollweide_center_height\n x_2d = index_x + (4000 * tile_coordinates[0]) - mollweide_center_width\n\n # Start Mollweide projection calculations based on indices\n theta = math.asin(y_2d / r / sqrt2)\n latitude = math.asin((2 * theta + math.sin(2 * theta)) / math.pi)\n longitude = math.pi * x_2d / 2 / r / sqrt2 / math.cos(theta)\n return latitude, longitude",
"def _get_voronoi_centroid_array(lsm_lat_array, lsm_lon_array, extent):\n YMin = extent[2]\n YMax = extent[3]\n XMin = extent[0]\n XMax = extent[1]\n\n ptList = []\n if (lsm_lat_array.ndim == 2) and (lsm_lon_array.ndim == 2):\n # generate point list with 2D lat lon lists\n if extent:\n # exctract subset within extent\n lsm_dx = np.max(np.absolute(np.diff(lsm_lon_array)))\n lsm_dy = np.max(np.absolute(np.diff(lsm_lat_array, axis=0)))\n\n # remove values with NaN\n lsm_lat_array = np.ma.filled(lsm_lat_array, fill_value=-9999)\n lsm_lon_array = np.ma.filled(lsm_lon_array, fill_value=-9999)\n\n lsm_lat_indices_from_lat, lsm_lon_indices_from_lat = \\\n np.where((lsm_lat_array >= (YMin - 2*lsm_dy)) &\n (lsm_lat_array <= (YMax + 2*lsm_dy)))\n lsm_lat_indices_from_lon, lsm_lon_indices_from_lon = \\\n np.where((lsm_lon_array >= (XMin - 2*lsm_dx)) &\n (lsm_lon_array <= (XMax + 2*lsm_dx)))\n\n lsm_lat_indices = np.intersect1d(lsm_lat_indices_from_lat,\n lsm_lat_indices_from_lon)\n lsm_lon_indices = np.intersect1d(lsm_lon_indices_from_lat,\n lsm_lon_indices_from_lon)\n\n lsm_lat_list = \\\n lsm_lat_array[lsm_lat_indices, :][:, lsm_lon_indices]\n lsm_lon_list = \\\n lsm_lon_array[lsm_lat_indices, :][:, lsm_lon_indices]\n # Create a list of geographic coordinate pairs\n for i in range(len(lsm_lat_indices)):\n for j in range(len(lsm_lon_indices)):\n ptList.append([lsm_lon_list[i][j], lsm_lat_list[i][j]])\n\n elif lsm_lat_array.ndim == 1 and lsm_lon_array.ndim == 1:\n # generate point list with 1D lat lon lists\n if extent:\n Ybuffer = 2 * abs(lsm_lat_array[0]-lsm_lat_array[1])\n Xbuffer = 2 * abs(lsm_lon_array[0]-lsm_lon_array[1])\n # Extract the lat and lon within buffered extent\n # (buffer with 2* interval degree)\n lsm_lat_list = lsm_lat_array[(lsm_lat_array >= (YMin - Ybuffer)) &\n (lsm_lat_array <= (YMax + Ybuffer))]\n lsm_lon_list = lsm_lon_array[(lsm_lon_array >= (XMin - Xbuffer)) &\n (lsm_lon_array <= (XMax + Xbuffer))]\n\n # Create a list of geographic coordinate pairs\n for ptX in lsm_lon_list:\n for ptY in lsm_lat_list:\n ptList.append([ptX, ptY])\n else:\n raise IndexError(\"Lat/Lon lists have invalid dimensions. \"\n \"Only 1D or 2D arrays allowed ...\")\n\n if len(ptList) <= 0:\n raise IndexError(\"The watershed is outside of the bounds of the\"\n \" land surface model grid ...\")\n\n return np.array(ptList) # set-up for input to Delaunay",
"def read_gdal_coordinates(dataset, mode=\"center\"):\n coordinates_pixel = _pixel_coordinates(\n dataset.RasterXSize, dataset.RasterYSize, mode\n )\n\n geotransform = dataset.GetGeoTransform()\n coordinates = _pixel_to_map(coordinates_pixel, geotransform)\n\n return coordinates",
"def lat_lons(self):",
"def geo(self):\n return vec2geo_linear_signed(self)",
"def nearest_lat_lon_index(point, latitudes, longitudes):\n stacked = da.stack((latitudes.flatten(), longitudes.flatten())).transpose()\n closest = get_nearest(da.array(point).reshape(-1, 1).transpose(), stacked)\n idx = np.unravel_index(closest[0], latitudes.shape)\n return idx",
"def build_spatial_index(vector_path):\r\n vector = gdal.OpenEx(vector_path)\r\n layer = vector.GetLayer()\r\n geom_index = rtree.index.Index()\r\n geom_list = []\r\n for index in range(layer.GetFeatureCount()):\r\n feature = layer.GetFeature(index)\r\n geom = feature.GetGeometryRef()\r\n shapely_geom = shapely.wkb.loads(geom.ExportToWkb())\r\n shapely_prep_geom = shapely.prepared.prep(shapely_geom)\r\n geom_list.append(shapely_prep_geom)\r\n geom_index.insert(index, shapely_geom.bounds)\r\n\r\n return geom_index, geom_list",
"def location(self):\n return np.array((self.latitude, self.longitude))",
"def location(self):\n return np.array((self.latitude, self.longitude))",
"def spatial(self):",
"def point_location(tri, p): \n simplex_index = tri.find_simplex(p)\n bc = []\n for id_, point in zip(simplex_index, p):\n # Calculate the two first barycentric coordinates for the relevant\n # simplex\n b = tri.transform[id_, :2].dot(point-tri.transform[id_, 2])\n bc.append(np.c_[np.atleast_2d(b), 1-b.sum()])\n # Create the full array and squeeze the shit out of it\n bc = np.array(bc).squeeze()\n return simplex_index, bc",
"def geofind():\n return render_template('geo_find.html')",
"def Indexes(self, latitudes, longitudes):\n res = self._transform.TransformPoints(\n np.column_stack((longitudes, latitudes)))\n res = list(zip(*res))\n x, y = np.array(res[0]), np.array(res[1])\n idx_col = self._inv_txf[0] + self._inv_txf[1] * x + self._inv_txf[2] * y\n idx_row = self._inv_txf[3] + self._inv_txf[4] * x + self._inv_txf[5] * y\n return idx_row.astype(int), idx_col.astype(int)",
"def loc(self):\n return self._gev_bijector.loc",
"def obs_ijpos(gridfile,lons,lats,coor):\n\n gfh= netCDF4.Dataset(gridfile)\n cartesian=0\n if (coor=='r'):\n try:\n \n latr=gfh.variables['lat_rho'][:,:]\n lonr=gfh.variables['lon_rho'][:,:]\n except:\n latr=gfh.variables['latitude'][:,:]\n lonr=gfh.variables['longitude'][:,:]\n \n\n try:\n xr=gfh.variables['xi_rho'][:]\n yr=gfh.variables['eta_rho'][:]\n except:\n try:\n xr=gfh.variables['x_rho'][:]\n yr=gfh.variables['y_rho'][:]\n except:\n print('Neither xi_rho/eta_rho or x_rho/y_rho on file.')\n print('This might slow down the calculations')\n\n\n elif (coor=='u'):\n latr=gfh.variables['lat_u'][:,:]\n lonr=gfh.variables['lon_u'][:,:]\n try:\n xr=gfh.variables['xi_u'][:]\n yr=gfh.variables['eta_u'][:]\n except:\n xr=gfh.variables['x_u'][:]\n yr=gfh.variables['y_u'][:]\n elif (coor=='v'):\n latr=gfh.variables['lat_v'][:,:]\n lonr=gfh.variables['lon_v'][:,:]\n try:\n xr=gfh.variables['xi_v'][:]\n yr=gfh.variables['eta_v'][:]\n except:\n xr=gfh.variables['x_v'][:]\n yr=gfh.variables['y_v'][:]\n\n IN = point_in_polygon(lonr, latr, lons, lats)\n ind=np.where(IN)[0]\n \n if lats.size >1: \n lons=lons[ind]; lats=lats[ind]\n # If there's no lons, lats left at this stage, return oipos, ojpos with -999 everywhere\n if not len(lons):\n return np.ones_like(IN)*-999, np.ones_like(IN)*-999\n \n try:\n try:\n mapstr=str(gfh.variables['h'].getncattr('mapping'))\n except:\n try:\n mapstr=str(gfh.variables['h'].getncattr('grid_mapping'))\n except:\n pass\n try:\n projstring=(gfh.variables[mapstr]).getncattr('proj4')\n except:\n try:\n projstring=(gfh.variables[mapstr]).getncattr('proj4string')\n except:\n pass\n try:\n projstring=(gfh.variables['grid_mapping']).getncattr('proj4')\n except:\n try:\n projstring=(gfh.variables['grid_mapping']).getncattr('proj4string')\n except:\n pass\n\n gridproj=proj.Proj(str(projstring))\n hasproj=1\n except:\n hasproj=0\n\n # Check if lat, lon spacing is uniform\n dx1=np.abs(lonr[0,1]-lonr[0,0])\n dx2=np.abs(lonr[0,-1]-lonr[0,-2])\n n=int(np.round(lonr.shape[1]/2))\n dx3=np.abs(lonr[0,n]-lonr[0,n-1])\n\n dy1=np.abs(latr[1,0]-latr[0,0])\n dy2=np.abs(latr[-1,0]-latr[-2,0])\n n=int(np.round(latr.shape[0]/2))\n dy3=np.abs(latr[n,0]-latr[n-1,0])\n\n if ( (dx1 == dx2) & (dx1==dx3) & (dx2==dx3) & (dy1 == dy2) & (dy1==dy3) & (dy2==dy3) ):\n cartesian=1\n gridproj=proj.Proj(\"+proj=latlong +datum=WGS84\")\n \n\n \n if hasproj:\n dx=xr[1]-xr[0]\n dy=yr[1]-yr[0]\n [x,y]=gridproj(lons,lats)\n ipos=(x-xr[0])/dx\n jpos=(y-yr[0])/dy\n\n elif cartesian:\n [x1,y1]=gridproj(lonr[0,0],latr[0,0])\n [x2,y2]=gridproj(lonr[0,1],latr[0,1])\n dx=x2-x1\n [x2,y2]=gridproj(lonr[1,0],latr[1,0])\n dy=y2-y1\n [x,y]=gridproj(lons,lats)\n [x0,y0]=gridproj(lonr[0,0],latr[0,0])\n\n ipos=(x-x0)/dx\n jpos=(y-y0)/dy\n\n else:\n x=np.linspace(0,lonr.shape[1]-1,lonr.shape[1])\n y=np.linspace(0,lonr.shape[0]-1,lonr.shape[0])\n xi=np.zeros_like(lonr); yi=np.zeros([lonr.shape[1],lonr.shape[0]])\n xi[:,:]=x; yi[:,:]=y; yi=np.swapaxes(yi,1,0)\n zi=scipy.interpolate.griddata((lonr.flatten(),latr.flatten()),xi.flatten(),(lons,lats))\n ipos=zi\n zi=scipy.interpolate.griddata((lonr.flatten(),latr.flatten()),yi.flatten(),(lons,lats))\n jpos=zi\n \n if 'ind' in locals():\n oipos=np.ones(IN.shape)*-999.; ojpos=np.ones(IN.shape)*-999.\n oipos[ind]=ipos; ojpos[ind]=jpos\n else:\n oipos=ipos\n ojpos=jpos\n if not IN:\n oipos = np.array([-999.])\n ojpos = np.array([-999.])\n gfh.close()\n return oipos,ojpos",
"def feature_coords(features):\n coords_list = []\n for feature in features:\n coord_start = feature.location.nofuzzy_start\n coord_end = feature.location.nofuzzy_end\n coord_pair = (coord_start, coord_end)\n coords_list.append(coord_pair)\n ## consider adding some info to the log\n return coords_list",
"def get_signif_locs(da, lat, lon): \n y = lat[np.where(da.values > 0)[0]].values\n x = lon[np.where(da.values > 0)[1]].values\n return [(x[i], y[i]) for i in range(len(x))]"
] | [
"0.64533174",
"0.62730485",
"0.6266516",
"0.6246362",
"0.6216341",
"0.6202899",
"0.6187555",
"0.6148967",
"0.6142526",
"0.6036951",
"0.5968435",
"0.59425825",
"0.59397787",
"0.5923113",
"0.5836684",
"0.5829554",
"0.58170235",
"0.58012676",
"0.5789401",
"0.5754598",
"0.5750754",
"0.5750754",
"0.57456744",
"0.57349545",
"0.57268816",
"0.57253015",
"0.56754977",
"0.5673673",
"0.5631847",
"0.5627104"
] | 0.72259116 | 0 |
Plot stress contours by extracting nearest stresses resolved on a point for 4 vertices per cell (tetrahedra) | def contour_stresses(matFile, infoFile, ax=0, esize=100):
# NOTE: some bug to work out here
vertices, cells, moduli, stress, strain = pu.load_h5_material(matFile, infoFile)
# NOTE: could get list of all elements that have a vertex on a particular surface
# or get list of all cells that have a centroid within a certain distane of the surface
centroids = 0.25 * (vertices[cells[:,0]] +
vertices[cells[:,1]] +
vertices[cells[:,2]] +
vertices[cells[:,3]] )
# get list of cell centroids that are within a certain distance to x-plane
ind = (np.abs(centroids[:,0]) <= esize) # X=0 plane
# yz location of centroids
a = centroids[ind,1] / 1e3 #y-axis points [km]
b = centroids[ind,2] / 1e3 #z-axis points
pointStresses = stress[ind] / 1e6 #report in MPa
sigma_mean = []
tau_max = []
for tensor in pointStresses:
#sm, tm = pu.stress_analysis(tensor)
sm, tm = pt.util.stress_analysis(tensor)
sigma_mean.append(sm)
tau_max.append(tm)
sigma_mean = np.array(sigma_mean)
tau_max = np.array(tau_max)
#z = sigma_mean
z = tau_max
# Figure after 7.7 in segall 2010
# NOTE: set axis is equal?
#f, (ax1, ax2) = plt.subplots(1, 2, sharex=True, sharey=True) #not sure about this
plt.figure()
plt.gca().set_aspect('equal')
# contour
#A,B = np.meshgrid(a,b)
ai = np.linspace(a.min(), a.max(), a.size)
bi = np.linspace(b.min(), b.max(), b.size)
zi = griddata(a,b,z,ai,bi)
plt.pcolormesh(ai,bi,zi)
plt.scatter(a,b,c=sigma_mean) #show actual points
cb = plt.colorbar()
cb.set_label('MPa')
plt.xlabel('Y-axis')
plt.ylabel('Z-axis')
plt.title('Max Shear Stress Contours on X=0 plane') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def highlight_cells_on_tsne(tsne, cells, fig=None, ax=None):\n fig, ax = get_fig(fig=fig, ax=ax)\n ax.scatter(tsne['x'], tsne['y'], s=3, color='lightgrey')\n ax.scatter(tsne.loc[cells, 'x'], tsne.loc[cells, 'y'], s=3, color='b')\n ax.set_axis_off()\n return fig, ax",
"def _uniform_refine_tetrahedra(mesh):\n\n # get current cells and edges\n cells = mesh.cells\n edges = mesh.edges\n\n assert np.size(cells, axis=1) == 4\n\n # get number of current nodes\n n_nodes = np.size(mesh.nodes, axis=0)\n \n # first we create additional nodes as midpoints of the current edges\n midpoints = 0.5 * mesh.nodes.take(edges - 1, axis=0).sum(axis=1)\n\n # add them to the existing mesh nodes\n new_nodes = np.vstack([mesh.nodes, midpoints])\n \n # then we generate the indices of the newly created nodes\n #\n # their indices start at the current number of nodes (`n_nodes`) + 1\n # and end after additional `n_edges` nodes\n n_edges = np.size(edges, axis=0)\n new_node_indices = np.arange(n_nodes + 1, n_nodes + n_edges + 1, dtype=int)\n\n # refine elements\n #\n # for every element we need the indices of the edges at which the\n # corresponding new nodes are created\n indices_3_1 = mesh.topology.get_connectivity(3, 1, return_indices=True) - 1\n \n # next we augment the indices that define each element as if\n # they were defined by 10 nodes (including those of the edge midpoints)\n cells_ = np.hstack([cells, new_node_indices.take(indices_3_1, axis=0)])\n \n # now we can generate the eight new elements for each existing one\n new_cells_1 = np.vstack([cells_[:,0], cells_[:,4], cells_[:,5], cells_[:,6]]).T\n new_cells_2 = np.vstack([cells_[:,1], cells_[:,4], cells_[:,7], cells_[:,8]]).T\n new_cells_3 = np.vstack([cells_[:,2], cells_[:,5], cells_[:,7], cells_[:,9]]).T\n new_cells_4 = np.vstack([cells_[:,3], cells_[:,6], cells_[:,8], cells_[:,9]]).T\n new_cells_5 = np.vstack([cells_[:,4], cells_[:,5], cells_[:,6], cells_[:,9]]).T\n new_cells_6 = np.vstack([cells_[:,4], cells_[:,5], cells_[:,7], cells_[:,9]]).T\n new_cells_7 = np.vstack([cells_[:,4], cells_[:,6], cells_[:,8], cells_[:,9]]).T\n new_cells_8 = np.vstack([cells_[:,4], cells_[:,7], cells_[:,8], cells_[:,9]]).T\n \n new_cells = np.vstack([new_cells_1, new_cells_2, new_cells_3, new_cells_4,\n new_cells_5, new_cells_6, new_cells_7, new_cells_8])\n\n return new_nodes, new_cells",
"def visualize_routes(self):\n visualize_tsp.plotTSP([self.best_solution], self.coords)",
"def plotMesh(verts,tris):\n x = verts[:,0]\n y = verts[:,1]\n\n plt.figure()\n plt.gca().set_aspect('equal')\n plt.triplot(x, y, tris, 'k-')\n plt.title('Unstructured Mesh')\n plt.xlabel('distance (m)')\n plt.ylabel('distance (m)')",
"def watershed_segment(M,xM=None,yM=None):\n\n if xM != None and yM != None:\n sel = np.ones((int(ceil(23.9*xM)),int(ceil(23.9*yM)))) # for opening\n sel2 = np.ones((int(ceil(127.2*xM)),int(ceil(127.2*yM)))) # for local thresholding\n sel3 = np.ones((int(ceil(11.9*xM)),int(ceil(11.9*yM)))) # for erosion\n ma,mi =(44245.21*xM*yM),(316.037*xM*yM) \n else:\n selD = np.array([int(M.shape[0]*.012),int(M.shape[1]*.012)])\n selD = np.where(selD!=0,selD,1)\n \n sel2D = np.array([int(M.shape[0]*.12),int(M.shape[1]*.12)])\n sel2D = np.where(sel2D!=0,sel2D,1)\n\n sel3D = np.array([int(M.shape[0]*.01),int(M.shape[1]*.01)])\n sel3D = np.where(sel3D!=0,sel3D,1)\n\n\n sel = np.ones(selD) # for opening\n sel2 = np.ones(sel2D) # for local thresholding\n sel3 = np.ones(sel3D) # for erosion\n ma,mi = (M.shape[0]*M.shape[1]*.0075),(M.shape[0]*M.shape[1]*.0003)\n\n # get a few points in the center of each blob\n \n # threshold\n bw = ((M>=ndi.percentile_filter(M,80,footprint=sel2)))\n #& (M>=stats.scoreatpercentile(M.flatten(),80)))\n\n # open and erode\n blobs = snm.binary_opening(bw,structure=sel)\n blobs = snm.binary_erosion(blobs,structure=sel3,iterations=2)\n \n # label\n labels,_ = ndi.label(blobs)\n labels[labels > 0] += 1\n labels[0,0] = 1\n\n # rescale and cast to int16, then use watershed\n #M2 = rescaled(M,0,65000).astype(np.uint16)\n #newlabels = ndi.watershed_ift(M2,labels)\n newlabels = labels\n \n # get rid of groups unless they have the right number of pixels\n\n counts = np.bincount(newlabels.flatten())\n old2new = np.arange(len(counts)) \n old2new[(counts < int(mi)) | (counts > int(ma))] = 0\n newlabels = old2new[newlabels]\n\n return newlabels",
"def plot_simplex(self, num_contours: int = 100, num_sub_div: int = 8,\n color_map: str = 'viridis', border: bool = True, ax: Axes = None) -> Axes:\n\n corners = array([[0, 0], [1, 0], [0.5, 0.75 ** 0.5]])\n triangle = Triangulation(corners[:, 0], corners[:, 1])\n mid_points = [\n (corners[(i + 1) % 3] + corners[(i + 2) % 3]) / 2\n for i in range(3)\n ]\n\n def to_barycentric(cartesian):\n \"\"\"\n Converts 2D Cartesian to barycentric coordinates.\n\n :param cartesian: A length-2 sequence containing the x and y value.\n \"\"\"\n s = [(corners[i] - mid_points[i]).dot(cartesian - mid_points[i]) / 0.75\n for i in range(3)]\n s_clipped = clip(a=s, a_min=0, a_max=1)\n return s_clipped / norm(s_clipped, ord=1)\n\n refiner = UniformTriRefiner(triangle)\n tri_mesh = refiner.refine_triangulation(subdiv=num_sub_div)\n f = [self._method(to_barycentric(xy))\n for xy in zip(tri_mesh.x, tri_mesh.y)]\n ax = ax or new_axes()\n ax.tricontourf(tri_mesh, f, num_contours, cmap=color_map)\n ax.set_aspect('equal')\n ax.set_xlim(0, 1)\n ax.set_ylim(0, 0.75 ** 0.5)\n ax.set_axis_off()\n if border:\n ax.triplot(triangle, linewidth=1)\n\n return ax",
"def test_plot_cspad(geometry, fname_data, amp_range=(0,0.5)):\n #rad1 = 93\n #rad2 = 146\n rad1 = 655\n rad2 = 670\n\n # get pixel coordinate index arrays:\n xyc = xc, yc = 500, 500# None\n\n #rows, cols = geometry.get_pixel_coord_indexes(xy0_off_pix=None)\n rows, cols = geometry.get_pixel_coord_indexes(xy0_off_pix=xyc, do_tilt=True)\n\n ixo, iyo = geometry.point_coord_indexes(xy0_off_pix=xyc, do_tilt=True)\n logger.info('Detector origin indexes ixo:%d iyo:%d' % (ixo, iyo))\n\n root, ext = os.path.splitext(fname_data)\n arr = np.load(fname_data) if ext == '.npy' else np.loadtxt(fname_data, dtype=np.float)\n arr.shape= (4,8,185,388)\n\n logger.info('shapes rows: %s cols: %s weight: %s' % (str(rows.shape), str(cols.shape), str(arr.shape)))\n\n arr.shape = rows.shape\n img = img_from_pixel_arrays(rows, cols, W=arr)\n\n rcc_ring = (iyo, ixo)\n axim = gg.plotImageLarge(img,amp_range=amp_range)\n gg.drawCircle(axim, rcc_ring, rad1, linewidth=1, color='w', fill=False)\n gg.drawCircle(axim, rcc_ring, rad2, linewidth=1, color='w', fill=False)\n gg.drawCenter(axim, rcc_ring, rad1, linewidth=1, color='w')\n gg.move(500,10)\n gg.show()",
"def hexapodZernikeLinearModel_hexapodcoordinate():\n Tfile='/home/jghao/research/decamFocus/psf_withseeing/finerGrid_coeff_matrix/zernike_coeff_finerGrid_training.cp'\n b=p.load(open(Tfile))\n nobs = len(b)\n x = b[:,0]\n y = b[:,1]\n z = b[:,2]\n theta = b[:,3]\n phi = b[:,4]\n fwhm = b[:,5]\n e1 = b[:,6]\n e2 = b[:,7]\n thetax = theta*np.cos(np.deg2rad(phi))\n thetay = theta*np.sin(np.deg2rad(phi))\n xh = x*1000 # convert to hexapod coordinate\n yh = -y*1000\n zh = -z*1000\n xtilth = - thetay\n ytilth = - thetax\n\n M22realTrefoil2 = b[:,37] # for x decenter\n M22imagTrefoil1 = b[:,54] \n M22TrefoilXshift = 0.5*(M22realTrefoil2+M22imagTrefoil1)\n\n M22realTrefoil1 = b[:,34] # for y decenter\n M22imagTrefoil2 = b[:,57] \n M22TrefoilYshift = 0.5*(M22realTrefoil1 - M22imagTrefoil2)\n\n M20defocus = b[:,12] # for defocus\n\n M22realComa2 = b[:,36] # for x-tilt\n M22imagComa1 = b[:,55]\n M22ComaXtilt = 0.5*(M22realComa2+M22imagComa1)\n\n M22realComa1 = b[:,35] # for y-tilt\n M22imagComa2 = b[:,56]\n M22ComaYtilt = 0.5*(M22realComa1 - M22imagComa2)\n \n pl.figure(figsize=(21,12))\n pl.subplot(2,3,1)\n t=bp.bin_scatter(M22TrefoilXshift,xh,nbins=20,fmt='bo',scatter=True)\n res = linefit(M22TrefoilXshift,xh)\n pl.plot(M22TrefoilXshift,M22TrefoilXshift*res[1]+res[0],'r,')\n pl.ylabel('x-decenter [micron]')\n pl.xlabel('(M22realTrefoil2+M22imagTrefoil1)/2')\n pl.title('slope: '+str(round(res[1],4))+' Intercept: '+str(round(res[0],4)))\n pl.grid()\n pl.subplot(2,3,2)\n t=bp.bin_scatter(M22TrefoilYshift,yh,nbins=20,fmt='bo',scatter=True)\n res = linefit(M22TrefoilYshift,yh)\n pl.plot(M22TrefoilYshift,M22TrefoilYshift*res[1]+res[0],'r,')\n pl.ylabel('y-decenter [micron]')\n pl.xlabel('(M22realTrefoil1 - M22imagTrefoil2)/2')\n pl.title('slope: '+str(round(res[1],4))+' Intercept: '+str(round(res[0],4)))\n pl.grid()\n pl.subplot(2,3,3)\n t=bp.bin_scatter(M20defocus,zh,nbins=20,fmt='bo',scatter=True)\n res = linefit(M20defocus,zh)\n pl.plot(M20defocus,M20defocus*res[1]+res[0],'r,')\n pl.ylabel('z-defocus [micron]')\n pl.xlabel('M20defocus')\n pl.title('slope: '+str(round(res[1],4))+' Intercept: '+str(round(res[0],4)))\n pl.grid()\n pl.subplot(2,3,4)\n t=bp.bin_scatter(M22ComaXtilt,ytilth,nbins=20,fmt='bo',scatter=True)\n res = linefit(M22ComaXtilt,ytilth)\n pl.plot(M22ComaXtilt,M22ComaXtilt*res[1]+res[0],'r,')\n pl.ylabel('y-tilt [arcsec]') # in hexapod coordiate, xtilt and y tilt is switched from the CRAY coordiante\n pl.xlabel('(M22realComa2+M22imagComa1)/2')\n pl.title('slope: '+str(round(res[1],4))+' Intercept: '+str(round(res[0],4)))\n pl.grid()\n pl.subplot(2,3,5)\n t=bp.bin_scatter(M22ComaYtilt,xtilth,nbins=20,fmt='bo',scatter=True)\n res = linefit(M22ComaYtilt,xtilth)\n pl.plot(M22ComaYtilt,M22ComaYtilt*res[1]+res[0],'r,')\n pl.ylabel('x-tilt [arcsec]')\n pl.xlabel('(M22realComa1 - M22imagComa2)/2')\n pl.title('slope: '+str(round(res[1],4))+' Intercept: '+str(round(res[0],4)))\n pl.grid()\n pl.savefig('linearModel_hexapod_coordinate.png')\n pl.close()",
"def quadrants (ax, T, neq, color='k', zorder=1):\n \n da = np.pi/180. # \n R = 6370. # Mean Earth Radius in km\n\n for n in range(len(T['lon'])):\n x = T['lon'][n]\n y = T['lat'][n]\n\n if T[neq][n] is not None:\n\n #Convert Quadrant values to km\n ne = T[neq][n][0]*1.852\n se = T[neq][n][1]*1.852\n sw = T[neq][n][2]*1.852\n nw = T[neq][n][3]*1.852\n\n label = neq\n if neq == 'rmax':\n label = str (int(T['vmax'][n])) \n \n xiso = []\n yiso = []\n for a in np.arange(0., 0.5*np.pi, da):\n \n dx = 180./(np.pi*R)*ne/np.cos( np.radians(y))\n dy = 180./(np.pi*R)*ne\n xiso.append( x + dx*np.cos(a) )\n yiso.append( y + dy*np.sin(a) )\n \n ax.text(x + dx + 0.05*dx, y, label, color=color, fontsize=7)\n \n for a in np.arange(0.5*np.pi, np.pi, da):\n dx = 180./(np.pi*R)*nw/np.cos( np.radians(y))\n dy = 180./(np.pi*R)*nw\n xiso.append( x + dx*np.cos(a) )\n yiso.append( y + dy*np.sin(a) ) \n\n for a in np.arange(np.pi, 1.5*np.pi, da):\n dx = 180./(np.pi*R)*sw/np.cos( np.radians(y))\n dy = 180./(np.pi*R)*sw\n xiso.append( x + dx*np.cos(a) )\n yiso.append( y + dy*np.sin(a) ) \n\n for a in np.arange(1.5*np.pi, 2.*np.pi, da):\n dx = 180./(np.pi*R)*se/np.cos( np.radians(y))\n dy = 180./(np.pi*R)*se\n xiso.append( x + dx*np.cos(a) )\n yiso.append( y + dy*np.sin(a) ) \n \n ax.plot(xiso, yiso, color=color, zorder=zorder)\n ax.plot((xiso[0],xiso[-1]), (yiso[0],yiso[-1]), color=color, zorder=zorder)\n\n return ax",
"def plot_neighbors(data, indices, ax):\n\n indices = [item.tolist() for item in indices]\n counter_removed = 0\n\n for j in range(data.shape[0]):\n def remove_repetitions(indices, j, counter_removed):\n for item in indices[j]:\n try:\n indices[item].remove(j)\n counter_removed = counter_removed + 1\n except ValueError:\n pass\n # print(\"removed\", counter_removed)\n return counter_removed, indices\n\n counter_removed, indices = remove_repetitions(indices, j, counter_removed)\n origin = data[j, :]\n targets = [data[i, :] for i in indices[j]]\n\n for target in targets:\n x = [origin[0], target[0]]\n y = [origin[1], target[1]]\n z = [origin[2], target[2]]\n ax.plot(x, y, z, 'ro-', linewidth='1', markersize=1)",
"def plot_contour(xynodes, stress, mytitle='contour plot'):\n\n stress = np.array(stress)\n fig2 = plt.figure()\n\n fig2 = plt.figure()\n sigma1 = stress[:, 0]\n sigma2 = stress[:, 1]\n sigma12 = stress[:, 2]\n vonmises = np.sqrt(sigma1 ** 2 - sigma1 * sigma2 + sigma2 ** 2 + 3 * sigma12 ** 2)\n prins1 = (sigma1 + sigma2) / 2 + np.sqrt(((sigma1 - sigma2) / 2) ** 2 + sigma12 ** 2)\n prins2 = (sigma1 + sigma2) / 2 - np.sqrt(((sigma1 - sigma2) / 2) ** 2 + sigma12 ** 2)\n\n sigma1norm = sigma1 / np.max(np.abs(sigma1))\n sigma2norm = sigma2 / np.max(np.abs(sigma2))\n sigma12norm = sigma12 / np.max(np.abs(sigma12))\n vonmisesnorm = vonmises / np.max(np.abs(vonmises))\n prins1norm = prins1 / np.max(np.abs(prins1))\n prins2norm = prins2 / np.max(np.abs(prins2))\n\n pltsigma = sigma1norm\n for i, xy in enumerate(xynodes):\n x = [k[0] for k in xy]\n y = [k[1] for k in xy]\n # red for tension, blue for compression\n pltcolor = 'r' if pltsigma[i] >= 0 else 'b'\n sigmavalnorm = pltsigma[i] if pltsigma[i] > 0 else pltsigma[i] * -1\n plt.fill(x, y, pltcolor, alpha=sigmavalnorm)\n plt.plot(x, y, 'k')\n\n title(mytitle)\n tmpx = [x[0] for k in xynodes for x in k]\n tmpy = [x[1] for k in xynodes for x in k]\n plt.xlim([np.min(tmpx) - 3, np.max(tmpx) + 3])\n plt.ylim([np.min(tmpy) - 3, np.max(tmpy) + 3])\n plt.show()",
"def voronoi_sub_mask_1d_index_to_pixeliztion_1d_index_from_grids_and_geometry(\n grid,\n mask_1d_index_to_nearest_pixelization_1d_index,\n sub_mask_1d_index_to_mask_1d_index,\n pixel_centres,\n pixel_neighbors,\n pixel_neighbors_size,\n):\n\n sub_mask_1d_index_to_pixeliztion_1d_index = np.zeros((grid.shape[0]))\n\n for sub_mask_1d_index in range(grid.shape[0]):\n\n nearest_pixelization_1d_index = mask_1d_index_to_nearest_pixelization_1d_index[\n sub_mask_1d_index_to_mask_1d_index[sub_mask_1d_index]\n ]\n\n while True:\n\n nearest_pixelization_pixel_center = pixel_centres[\n nearest_pixelization_1d_index\n ]\n\n sub_pixel_to_nearest_pixelization_distance = (\n (grid[sub_mask_1d_index, 0] - nearest_pixelization_pixel_center[0]) ** 2\n + (grid[sub_mask_1d_index, 1] - nearest_pixelization_pixel_center[1])\n ** 2\n )\n\n closest_separation_from_pixelization_to_neighbor = 1.0e8\n\n for neighbor_pixelization_1d_index in range(\n pixel_neighbors_size[nearest_pixelization_1d_index]\n ):\n\n neighbor = pixel_neighbors[\n nearest_pixelization_1d_index, neighbor_pixelization_1d_index\n ]\n\n separation_from_neighbor = (\n grid[sub_mask_1d_index, 0] - pixel_centres[neighbor, 0]\n ) ** 2 + (grid[sub_mask_1d_index, 1] - pixel_centres[neighbor, 1]) ** 2\n\n if (\n separation_from_neighbor\n < closest_separation_from_pixelization_to_neighbor\n ):\n closest_separation_from_pixelization_to_neighbor = (\n separation_from_neighbor\n )\n closest_neighbor_pixelization_1d_index = (\n neighbor_pixelization_1d_index\n )\n\n neighboring_pixelization_1d_index = pixel_neighbors[\n nearest_pixelization_1d_index, closest_neighbor_pixelization_1d_index\n ]\n sub_pixel_to_neighboring_pixelization_distance = (\n closest_separation_from_pixelization_to_neighbor\n )\n\n if (\n sub_pixel_to_nearest_pixelization_distance\n <= sub_pixel_to_neighboring_pixelization_distance\n ):\n sub_mask_1d_index_to_pixeliztion_1d_index[\n sub_mask_1d_index\n ] = nearest_pixelization_1d_index\n break\n else:\n nearest_pixelization_1d_index = neighboring_pixelization_1d_index\n\n return sub_mask_1d_index_to_pixeliztion_1d_index",
"def trapezoid_decomposition_pl(polygons, bounds):\n polygons = Polygons(polygons)\n # print(bounds)\n point_locator = PointLocator(bounds)\n for edge in polygons.random_edge_sampler():\n point_locator.add_line(edge)\n return point_locator",
"def touching_hexagons(point: Point):\n # get all 6 touching points\n if point.y % 2 == 0:\n return [Point(point.x + 1, point.y), Point(point.x, point.y - 1), Point(point.x - 1, point.y - 1),\n Point(point.x - 1, point.y), Point(point.x - 1, point.y + 1), Point(point.x, point.y + 1)]\n else:\n return [Point(point.x + 1, point.y), Point(point.x + 1, point.y - 1), Point(point.x, point.y - 1),\n Point(point.x - 1, point.y), Point(point.x, point.y + 1), Point(point.x + 1, point.y + 1)]",
"def plotpy (tvec=tvec, ind=ind, synclines=True):\n plt.scatter(tvec,ind) \n if synclines:\n for spkt in np.array(tvec): plt.plot((spkt, spkt), (0, ncells), 'r-', linewidth=0.1)",
"def hexapodZernikeTrend(mnts='M20'):\n Tfile='/home/jghao/research/decamFocus/psf_withseeing/finerGrid_coeff_matrix/zernike_coeff_finerGrid_training.cp'\n b=p.load(open(Tfile))\n nobs = len(b)\n x = b[:,0]\n y = b[:,1]\n z = b[:,2]\n theta = b[:,3]\n phi = b[:,4]\n fwhm = b[:,5]\n e1 = b[:,6]\n e2 = b[:,7]\n thetax = theta*np.cos(np.deg2rad(phi))\n thetay = theta*np.sin(np.deg2rad(phi))\n if mnts == 'M20':\n idxBase = 9\n if mnts == 'M22real':\n idxBase = 29\n if mnts == 'M22imag':\n idxBase = 49\n idx = np.arange(14)\n zernikeName=('Piston','Tip','Tilt','Astignism','Defocus','Astignism','Trefoil','Coma','Coma','Trefoil','Ashtray','Astigm.5th','Spherical','Astigm.5th','Ashtray','16','17','18','19','20')\n for i in range(14):\n pl.figure(figsize=(21,10))\n pl.subplot(2,3,1)\n bp.bin_scatter(b[:,idxBase+idx[i]],x,nbins=20,fmt='bo',scatter=True)\n pl.ylabel('x-decenter')\n pl.xlabel(zernikeName[i+1])\n pl.ylim(-0.1,0.1)\n pl.title(mnts)\n pl.subplot(2,3,2)\n bp.bin_scatter(b[:,idxBase+idx[i]],y,nbins=20,fmt='bo',scatter=True)\n pl.ylabel('y-decenter')\n pl.xlabel(zernikeName[i+1])\n pl.title(mnts)\n pl.ylim(-0.1,0.1)\n pl.subplot(2,3,3)\n bp.bin_scatter(b[:,idxBase+idx[i]],z,nbins=20,fmt='bo',scatter=True)\n pl.ylabel('z-defocus')\n pl.xlabel(zernikeName[i+1])\n pl.title(mnts)\n pl.ylim(-0.1,0.1)\n pl.subplot(2,3,4)\n bp.bin_scatter(b[:,idxBase+idx[i]],thetax,nbins=20,fmt='bo',scatter=True)\n pl.ylabel('x-tilt')\n pl.xlabel(zernikeName[i+1])\n pl.title(mnts)\n pl.ylim(-40,40)\n pl.subplot(2,3,5)\n bp.bin_scatter(b[:,idxBase+idx[i]],thetay,nbins=20,fmt='bo',scatter=True)\n pl.ylabel('y-tilt')\n pl.xlabel(zernikeName[i+1])\n pl.title(mnts)\n pl.ylim(-40,40)\n pl.savefig(zernikeName[i+1]+mnts+'_'+str(i+1)+'.png')\n pl.close()",
"def zernikeHexapodTrend(mnts='M20'):\n Tfile='/home/jghao/research/decamFocus/psf_withseeing/finerGrid_coeff_matrix/zernike_coeff_finerGrid_training.cp'\n b=p.load(open(Tfile))\n nobs = len(b)\n x = b[:,0]\n y = b[:,1]\n z = b[:,2]\n theta = b[:,3]\n phi = b[:,4]\n fwhm = b[:,5]\n e1 = b[:,6]\n e2 = b[:,7]\n thetax = theta*np.cos(np.deg2rad(phi))\n thetay = theta*np.sin(np.deg2rad(phi))\n if mnts == 'M20':\n idxBase = 9\n if mnts == 'M22real':\n idxBase = 29\n if mnts == 'M22imag':\n idxBase = 49\n idx = np.arange(14)\n zernikeName=('Piston','Tip','Tilt','Astignism','Defocus','Astignism','Trefoil','Coma','Coma','Trefoil','Ashtray','Astigm.5th','Spherical','Astigm.5th','Ashtray','16','17','18','19','20')\n for i in range(14):\n pl.figure(figsize=(21,10))\n pl.subplot(2,3,1)\n bp.bin_scatter(x,b[:,idxBase+idx[i]],binsize=0.01,fmt='bo',scatter=True)\n pl.xlabel('x decenter')\n pl.ylabel(zernikeName[i+1])\n pl.title(mnts)\n pl.subplot(2,3,2)\n bp.bin_scatter(y,b[:,idxBase+idx[i]],binsize=0.01,fmt='bo',scatter=True)\n pl.xlabel('y decenter')\n pl.ylabel(zernikeName[i+1])\n pl.title(mnts)\n pl.subplot(2,3,3)\n bp.bin_scatter(z,b[:,idxBase+idx[i]],binsize=0.01,fmt='bo',scatter=True)\n pl.xlabel('z-defocus')\n pl.ylabel(zernikeName[i+1])\n pl.title(mnts)\n pl.subplot(2,3,4)\n bp.bin_scatter(thetax,b[:,idxBase+idx[i]],binsize=5,fmt='bo',scatter=True)\n pl.xlabel('x-tilt')\n pl.ylabel(zernikeName[i+1])\n pl.title(mnts)\n pl.subplot(2,3,5)\n bp.bin_scatter(thetay,b[:,idxBase+idx[i]],binsize=5,fmt='bo',scatter=True)\n pl.xlabel('y-tilt')\n pl.ylabel(zernikeName[i+1])\n pl.title(mnts)\n pl.savefig(mnts+'_'+str(i+1)+'_'+zernikeName[i+1]+'.png')\n pl.close()",
"def integrate(coords,data,fault_pts,dshape_hex8,gll_weights,elmt):\n norm=0.0\n normx=0.0\n normy=0.0\n normz=0.0\n div=0.0 #normalizing factor to divide by\n divx=0.\n divy=0.\n divz=0.\n\n eps=1.0*g.mesh_spacing/(g.ngllx-1.)\n print 'eps=', eps\n f=open('eliminated_coords.vtk','w')\n\n #create integer versions of arrays to use in pulling out gll pts for each element\n data_round=np.rint(data)\n dati=data_round.astype(int)\n coord_round=np.rint(coords)\n coordi=coord_round.astype(int)\n\n #remove duplicates from data array\n dat_struc=np.ascontiguousarray(dati).view(np.dtype((np.void,dati.dtype.itemsize *dati.shape[1])))\n _,idx=np.unique(dat_struc,return_index=True)\n datu=dati[idx]\n data_unique=data[idx]\n\n for i_elmt in range(g.nelmt):\n #pull out geometric coordinates for this element\n elmt_coord_id=[j-1 for j in elmt[i_elmt]]\n elmt_coord=coordi[elmt_coord_id]\n\n #find corresponding gll pts for this element\n xmin=min(elmt_coord[:,0]);xmax=max(elmt_coord[:,0])\n ymin=min(elmt_coord[:,1]);ymax=max(elmt_coord[:,1])\n zmin=min(elmt_coord[:,2]);zmax=max(elmt_coord[:,2])\n gll_coord_id=np.nonzero((datu[:,0]>=xmin) & (datu[:,0]<=xmax) & (datu[:,1]>=ymin) & (datu[:,1]<=ymax) & (datu[:,2]>=zmin) & (datu[:,2]<=zmax))\n elmt_data=data_unique[gll_coord_id]\n if len(gll_coord_id[0]) != g.ngll:\n print \"elmt=\", elmt_coord_id\n print xmin,xmax,ymin,ymax,zmin,zmax\n print 'elmt_data=', elmt_data\n print \"gll pts found=\", len(gll_coord_id[0])\n raise ValueError(\"incorrect number of gll points found in element!\")\n exit\n\n #sort the gll coords so they correspond the order of the arrays giving the weights and shape function\n dat_sorted=elmt_data[npi.argsort((elmt_data[:,0], elmt_data[:,1],elmt_data[:,2]))]\n func=dat_sorted[:,3:]\n\n #if any gll pt is too close to fault, remove the element from the integration\n dist=distance.cdist(fault_pts,dat_sorted[:,0:3],'euclidean')\n if (dist<eps).any():\n print \"eliminated element #\", i_elmt\n np.savetxt(f,dat_sorted[:,0:3],fmt='%3.3f')\n continue\n\n for i_gll in range(g.ngll):\n\n #compute jacobian, its derivative and inverse\n jac=np.matmul(dshape_hex8[:,:,i_gll],elmt_coord)\n det_jac=np.linalg.det(jac)\n\n #perform the integration\n norm=norm+det_jac*gll_weights[i_gll]*np.dot((func[i_gll,3:6]-func[i_gll,0:3]),(func[i_gll,3:6]-func[i_gll,0:3]))\n div=div+det_jac*gll_weights[i_gll]*np.dot(func[i_gll,3:6],func[i_gll,3:6])\n normx=normx+det_jac*gll_weights[i_gll]*(func[i_gll,3]-func[i_gll,0])**2\n divx=divx+det_jac*gll_weights[i_gll]*(func[i_gll,3])**2\n normy=normy+det_jac*gll_weights[i_gll]*(func[i_gll,4]-func[i_gll,1])**2\n divy=divy+det_jac*gll_weights[i_gll]*(func[i_gll,4])**2\n normz=normz+det_jac*gll_weights[i_gll]*(func[i_gll,5]-func[i_gll,2])**2\n divz=divz+det_jac*gll_weights[i_gll]*(func[i_gll,5])**2\n\n norm_finalx=sqrt(normx/divx)\n norm_finaly=sqrt(normy/divy)\n norm_finalz=sqrt(normz/divz)\n norm_final=sqrt(norm/div)\n\n f.close()\n\n return norm_finalx, norm_finaly, norm_finalz,norm_final",
"def test_nearest_neighbour_regular_1d():\n # test with regular grid and 1d coords\n grid_lon = np.arange(100)\n grid_lat = np.arange(50)\n data = np.zeros((50, 100))\n\n # the four nearest values for the first point\n data[20:22, 10:12] = 7\n\n # the four nearest values for the second point\n data[17:19, 13:15] = 8\n\n # the actual test\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, (10.2, 13.2), (20.2, 17.2), npoints=4)(data)\n np.testing.assert_array_almost_equal(res, [7, 8])\n\n # same test, but with 3d-data (e.g., level, lat, lon)\n data2 = np.zeros((10, 50, 100))\n for i in range(10):\n data2[i, :, :] = data + i\n\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, (10.2, 13.2), (20.2, 17.2), npoints=4)(data2)\n np.testing.assert_array_almost_equal(res, np.asarray([np.arange(7, 17, 1), np.arange(8, 18, 1)]).transpose())\n\n # same test with only one neighbour or only one target point\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, (10.2, 13.2), (20.2, 17.2), npoints=1)(data)\n np.testing.assert_array_almost_equal(res, [7, 8])\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, 10.2, 20.2, npoints=1)(data)\n np.testing.assert_array_almost_equal(res, 7)\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, 13.2, 17.2, npoints=1)(data2)\n np.testing.assert_array_almost_equal(res, np.arange(8, 18, 1).reshape(10, 1))",
"def plotSolitonIntersection():\n \n cells = np.loadtxt('cells.txt') #file with several cell [x0 y0 x1 y1 x2 y2 x3 y3], each cell in seperate row (at least 2 needed)\n cells = np.hstack([cells,cells[:,:2]])\n for cell in cells:\n plt.plot(cell[::2],cell[1::2])\n\n np.savetxt('X.txt',sorted(cells.flatten()[::2]))\n subprocess.call('solitonEta 0.3 15 0 X.txt',shell=True)\n soliton = np.loadtxt('eta.txt')\n plt.plot(soliton[:,0],soliton[:,1])\n\n #ipdb.set_trace()\n plt.savefig('boxes.png')\n plt.show()",
"def check_interp(self):\n\n points = np.loadtxt(\"skeleton_temp/\" + cell + \"_points.txt\", delimiter=',')\n\n self.initial_scatter = ax.scatter(points[:, 0],\n points[:, 1],\n points[:, 2], s=5, c='r')\n self.cell_points = self.get_cell_xyz()\n ax.scatter(self.cell_points[::5, 0],\n self.cell_points[::5, 1],\n self.cell_points[::5, 2], s=3, c='b', alpha=.03)\n ax.set_xlabel('X (um)')\n ax.set_ylabel('Y (um)')\n ax.set_zlabel('Z (um)')",
"def print_components_sizes(distance, points):\n SortedX = sorted([point for point in points], key = abscisse)\n\n result = prochesX(SortedX, distance)\n dernier_pointX_1 = result[len(result)-1]\n dernier_indice = SortedX.index(dernier_pointX_1)\n\n origine = Point([0.0, 0.0])\n segment_1 = Segment([Point([dernier_pointX_1.x, 0]), Point([dernier_pointX_1.x, 1])])\n\n SortedY = sorted([point for point in result], key = ordonnee)\n result_bis = prochesY(SortedY, distance)\n dernier_pointXbis_1 = result_bis[len(result_bis)-1]\n dernier_indice_bis = SortedX.index(dernier_pointXbis_1)\n\n segment_2 = Segment([Point([0, dernier_pointXbis_1.y]), Point([1, dernier_pointXbis_1.y])])\n tycat(origine, points, (segment_1, segment_2))\n \"\"\"\n affichage des tailles triees de chaque composante\n \"\"\"\n segments = []\n research_base = [point for point in points]\n origine = Point([0.0, 0.0])\n total = research_base.copy()\n s = 0\n enveloppe = []\n while len(research_base) > 0:\n current = research_base[0]\n research_base.pop(0)\n for point in research_base:\n if current.distance_to(point) < distance:\n s += 1\n segments.append(Segment([current, point]))\n enveloppe.append(s)\n tycat(origine, total, segments)",
"def plotVoronoiCell(self, cells):\n for i in cells:\n #i indexes volumes\n i = self.nonBI[i] #now i indexes vor.point_region\n\n vI = self.vor.regions[self.vor.point_region[i]]\n v = self.vor.vertices[vI, :]\n r = v\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n fig.set_size_inches(18.5, 9.5)\n ax.set_title('Voronoi Cell of Particle ' + str(i))\n ax.set_xlabel('x [m]')\n ax.set_ylabel('y [m]')\n ax.set_zlabel('z [m]')\n ax.scatter(r[:, 0], r[:, 1], r[:, 2], s=5, alpha=0.5, label='Cell Boundaries')\n ax.scatter(self.data[i, 0], self.data[i, 1], self.data[i, 2], s=25, label='Cell Center')\n ax.set_xlim3d(np.min(self.data[:, 0]), np.max(self.data[:, 0]))\n ax.set_ylim3d(np.min(self.data[:, 1]), np.max(self.data[:, 1]))\n ax.set_zlim3d(np.min(self.data[:, 2]), np.max(self.data[:, 2]))\n # limits = np.vstack((np.array([np.max(self.data[:, 0]), np.max(self.data[:, 1]), np.max(self.data[:, 2])]), np.array([np.min(self.data[:, 0]), np.min(self.data[:, 1]), np.min(self.data[:, 2])])))\n # ax.scatter(limits[:, 0], limits[:, 1], limits[:, 2], s=1)\n ax.legend()",
"def plot_timeres(timeres):\n tint = 24 / timeres\n step = 255 / (tint + 1)\n\n cont = np.zeros((scale, scale))\n for i in range(self.dataset.shape[0]):\n posy = int(((self.dataset[i][0] - minLat) * normLat))\n posx = int(((self.dataset[i][1] - minLon) * normLon))\n if distrib:\n cont[scale - posy - 1, posx - 1] += 1\n else:\n cont[scale - posy - 1, posx - 1] = 1\n mxcont = np.max(cont)\n\n if distrib:\n cont = cont / mxcont\n for i in range(cont.shape[0]):\n for j in range(cont.shape[1]):\n if cont[i, j] > 0.01:\n mymap.circle_marker(\n location=[minLat + (((scale - i) - 0.5) / normLat), minLon + ((j + 1.5) / normLon)],\n radius=cont[i, j] * (circlesize / scale),\n line_color='#000000',\n fill_color='#110000', fill_opacity=0.3)\n # mymap.addradpoint(minLat+(((scale - i)-0.5)/normLat), minLon+((j+1.5)/normLon),\n # cont[i,j]*(circlesize/scale), \"#FF0000\")\n else:\n for i in range(cont.shape[0]):\n for j in range(cont.shape[1]):\n if cont[i, j] > 0.01:\n mymap.circle_marker(\n location=[minLat + (((scale - i) - 0.5) / normLat), minLon + ((j + 1.5) / normLon)],\n radius=30,\n line_color='#000000',\n fill_color='#110000', fill_opacity=0.3)\n # mymap.addradpoint(minLat+(((scale - i )-0.5)/normLat),\n # minLon+((j+1.5)/normLon), 30, \"#FF0000\")\n for t in range(tint):\n color = '#' + (str(hex((t + 1) * step))[2:]) + (\n str(hex((t + 1) * step))[2:]) + 'FF' # (str(hex((t+1)*step))[2:])\n cont = np.zeros((scale, scale))\n for i in range(self.dataset.shape[0]):\n posy = int(((self.dataset[i][0] - minLat) * normLat))\n posx = int(((self.dataset[i][1] - minLon) * normLon))\n stime = time.localtime(np.int32(self.dataset[i][2]))\n evtime = stime[3]\n if (evtime / timeres) == t:\n if distrib:\n cont[scale - posy - 1, posx - 1] += 1\n else:\n cont[scale - posy - 1, posx - 1] = 1\n if distrib:\n cont = cont / mxcont\n for i in range(cont.shape[0]):\n for j in range(cont.shape[1]):\n if cont[i, j] > 0.01:\n mymap.circle_marker(\n location=[minLat + (((scale - i) - 0.5) / normLat), minLon + ((j + 1.5) / normLon)],\n radius=cont[i, j] * (circlesize / scale),\n line_color=color,\n fill_color='#110000', fill_opacity=0.2)\n else:\n for i in range(cont.shape[0]):\n for j in range(cont.shape[1]):\n if cont[i, j] > 0.01:\n mymap.circle_marker(\n location=[minLat + (((scale - i) - 0.5) / normLat), minLon + ((j + 1.5) / normLon)],\n radius=30,\n line_color=color,\n fill_color='#110000', fill_opacity=0.2)",
"def svm_add_2d_hyperplane(model, ax, plotted_points):\n X_MIN = np.min(plotted_points[:, 0])\n X_MAX = np.max(plotted_points[:, 0])\n Y_MIN = np.min(plotted_points[:, 1])\n Y_MAX = np.max(plotted_points[:, 1])\n # plot the line, the points, and the nearest vectors to the plane\n xx, yy = np.mgrid[X_MIN:X_MAX:200j, Y_MIN:Y_MAX:200j]\n Z = model.decision_function(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n plot.contourf(xx, yy, Z, levels=np.linspace(\n Z.min(), 0, 7), cmap=plot.cm.PuBu)\n a = plot.contour(xx, yy, Z, levels=[0], linewidths=2, colors='darkred')\n plot.contourf(xx, yy, Z, levels=[0, Z.max()], colors='palevioletred')\n return a.collections[0]",
"def plotSurface(surfaceFile, comp=2, points=False, tris=False,\n profile=False, ax=None, annotate=True, norm=None,xscale=1, yscale=1):\n verts,data,tris = load_h5(surfaceFile)\n\n if comp==3: #radial displacements\n z = np.hypot(data[:,:,0], data[:,:,1]).flatten()\n else:\n z = data[:,:,comp].flatten()\n #z = data[:,:,comp].flatten()\n x = verts[:,0] / xscale\n y = verts[:,1] / yscale\n\n #NOTE: need to change grid for linear spacing to work properly\n xi = np.linspace(x.min(), x.max(), x.size)\n yi = np.linspace(y.min(), y.max(), y.size)\n zi = griddata(x,y,z, xi,yi, interp='nn') #'nn'\n\n #NOTE: getting error message here...\n # linear interpolation requires exactly the same limits\n #xi=np.arange(-15000.0,15000.0+1e-14,30000.0/x.size)\n #yi=np.arange(-15000.0,15000.0+1e-14,30000.0/x.size)\n #zi = griddata(x,y,z, xi,yi, interp='linear') #'nn'\n #ValueError: output grid must have constant spacing when using interp='linear'\n\n if ax==None:\n plt.figure()\n else:\n ax = plt.axes(ax)\n\n #plt.pcolor(xi, yi, zi, cmap=plt.cm.jet) #Very slow...\n x1, x2, y1, y2 = [x.min(), x.max(), y.min(), y.max()]\n im = plt.imshow(zi, cmap=plt.cm.jet, norm=norm, extent=[x1, x2, y1, y2])\n\n if annotate:\n compdict = {0:'Ux',1:'Uy',2:'Uz',3:'Ur'}\n plt.title('{} Displacement'.format(compdict[comp]))\n plt.xlabel('Distance [m]')\n plt.ylabel('Distance [m]')\n cb = plt.colorbar()\n cb.set_label('[m]')\n\n if points:\n plt.plot(x,y,'k.')\n\n if type(tris) is np.ndarray:\n plt.triplot(x, y, tris, 'k-')\n\n # EW profile line through the x-axis\n if profile:\n plt.axhline(linewidth=2, color='r')\n Zi = zi[x.size/2,:]\n plt.figure()\n plt.plot(xi, Zi, 'b.-')\n plt.title('Profile')\n plt.xlabel('Distance [m]')\n plt.ylabel('{} Displacement [m]'.format(compdict[comp]))\n\n return im",
"def test_nearest_neighbour_unstructured():\n # create coordinates\n grid_lon = np.arange(100)\n grid_lat = np.ones(100)\n data = np.zeros(100)\n\n # the nearest 3 points\n data[10:13] = 7\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, (11.2, 2.2), (11.2, 13.2), npoints=3, src_grid=\"unstructured\")(data)\n np.testing.assert_array_almost_equal(res, [7, 0])\n\n # same test, but with 2d-data (e.g., level, ncell)\n data2 = np.zeros((10, 100))\n for i in range(10):\n data2[i, :] = data + i\n\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, (11.2, 2.2), (11.2, 13.2), npoints=3, src_grid=\"unstructured\")(data2)\n np.testing.assert_array_almost_equal(res, np.asarray([np.arange(7, 17, 1), np.arange(0, 10, 1)]).transpose())\n\n # only one point\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, 11.2, 13.2, npoints=3, src_grid=\"unstructured\")(data)\n np.testing.assert_almost_equal(res, 7)\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, 11.2, 13.2, npoints=3, src_grid=\"unstructured\")(data2)\n np.testing.assert_array_almost_equal(res, np.arange(7, 17, 1).reshape(10, 1))\n\n # same test with one one neighbour point\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, 11.2, 13.2, npoints=1, src_grid=\"unstructured\")(data)\n np.testing.assert_almost_equal(res, 7)\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, 11.2, 13.2, npoints=1, src_grid=\"unstructured\")(data2)\n np.testing.assert_almost_equal(res, np.arange(7, 17, 1).reshape(10, 1))",
"def getEps(data):\r\n X = pdist(data)\r\n\r\n Sq = squareform(X)\r\n\r\n FourthDist = []\r\n firstDist = []\r\n k = 10\r\n kNeighbors = []\r\n for idx in range(len(Sq)):\r\n Sq[idx] = np.sort(Sq[idx])\r\n\r\n for i in range(k):\r\n kNeighbors.append(Sq[:, i + 1])\r\n for i in range(k):\r\n kNeighbors[i] = np.sort(kNeighbors[i])\r\n\r\n for i in range(k):\r\n plt.plot(kNeighbors[i])\r\n\r\n plt.title('10 Nearest Point')\r\n plt.show()",
"def problem5(self, s):\n points = 0\n\n points = self.neighbor( 10, 10, s.nearest_neighbor)*3\n points += self.neighbor(100, 10, s.nearest_neighbor)*3\n points += self.neighbor( 10, 100, s.nearest_neighbor)*3\n points += self.neighbor(100, 100, s.nearest_neighbor)*3\n points += self.neighbor(100, 100, s.nearest_neighbor)*3\n\n _testDriver.get_code(s.nearest_neighbor)\n print \"\\n(Check that scipy.spatial.KDTree is not used)\"\n points *= self.grade(1)\n \n return points",
"def test_nearest_neighbour_regular_2d():\n # test with regular grid and 2d coords\n grid_lon, grid_lat = np.meshgrid(np.arange(100), np.arange(50), indexing=\"ij\")\n data = np.zeros((100, 50))\n\n # the four nearest values for the first point\n data[10:12, 20:22] = 7\n\n # the four nearest values for the second point\n data[13:15, 17:19] = 8\n\n # the actual test\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, (10.2, 13.2), (20.2, 17.2), npoints=4)(data)\n np.testing.assert_array_almost_equal(res, [7, 8])\n\n # same test, but with 3d-data (e.g., level, lon, lat)\n data2 = np.zeros((10, 100, 50))\n for i in range(10):\n data2[i, :, :] = data + i\n\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, (10.2, 13.2), (20.2, 17.2), npoints=4)(data2)\n np.testing.assert_array_almost_equal(res, np.asarray([np.arange(7, 17, 1), np.arange(8, 18, 1)]).transpose())\n\n # same test with one neighbour point\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, 10.2, 20.2, npoints=1)(data2)\n np.testing.assert_array_almost_equal(res, np.arange(7, 17, 1).reshape(10, 1))\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, (10.2, 13.2), (20.2, 17.2), npoints=1)(data2)\n np.testing.assert_array_almost_equal(res, np.asarray([np.arange(7, 17, 1), np.arange(8, 18, 1)]).transpose())"
] | [
"0.54688436",
"0.54646534",
"0.53946656",
"0.5362895",
"0.5319889",
"0.52629554",
"0.52503395",
"0.52421236",
"0.52283096",
"0.52271646",
"0.52166325",
"0.5207605",
"0.52060854",
"0.5204232",
"0.51916236",
"0.51884454",
"0.5186699",
"0.5146042",
"0.5143565",
"0.51160043",
"0.50919545",
"0.5090334",
"0.508701",
"0.50867134",
"0.50778216",
"0.5064369",
"0.5054932",
"0.5054675",
"0.5051651",
"0.5049823"
] | 0.6441977 | 0 |
Initialize the inference network, stream video to network, and output stats and video. | def infer_on_stream(args, client):
# Initialise the class
infer_network = Network()
# Set Probability threshold for detections
prob_threshold = args.prob_threshold
### TODO: Load the model through `infer_network` ###
infer_network.load_model(args.model,args.cpu_extension,args.device)
input_shape = infer_network.get_input_shape()
input_image_width = input_shape[3]
input_image_height=input_shape[2]
### TODO: Handle the input stream ###
try: # try opening input file as image if file is not image, if it throw exception then try opening as video.
frame=cv2.imread(args.input)
IS_IMAGE = True
hasFrame =True
out_image_file = os.path.splitext(args.input)[0] + "_inferred" + ".jpg"
#print("Successfully Opened Image")
fps=0
frame_height = frame.shape[0]
frame_width = frame.shape[1]
except :
try: # Trying opening as video if it throw exception it means input is neither valid video nor image file.
if(args.input =='0'): # check if input is webcam
#print('input is webcam')
args.input =int(args.input)
video=cv2.VideoCapture(args.input) #Open video stream
if (video.isOpened()): # check video stream is successfully opened
hasFrame,frame=video.read()
IS_IMAGE = False
fps=int(video.get(cv2.CAP_PROP_FPS))
#print ("FPS is {}".format(fps))
frame_height = frame.shape[0]
frame_width = frame.shape[1]
if(args.input):
out_video_file = os.path.splitext(args.input)[0] + "_inferred" + ".avi"
else: # if webcam input fixed output filename
out_video_file = 'webcam_inferred.avi'
out_video=cv2.VideoWriter(out_video_file,cv2.CAP_OPENCV_MJPEG,cv2.VideoWriter_fourcc('M','J','P','G'), fps, (frame_width,frame_height))
else: # Video stream is failed to open
print('Video capture is not opened properly, Exiting')
video.release()
exit()
except: # Both try to open input as video or image failed , exiting
print(" Error Opening input!!! ,Input is neither valid image nor video file, please provide right input. Exiting !!!")
exit()
# initialize video stats variables
last_stat_person_in_frame =-1
last_stat_total_count =-1
THREESHOLD_NO_OF_SECONDS_FOR_PERSON_LEFT_SCENE = 1.5
THREESHOLD_NO_OF_FRAMES_FOR_PERSON_LEFT_SCENE = int(THREESHOLD_NO_OF_SECONDS_FOR_PERSON_LEFT_SCENE*fps)
frame_no =1
video_stats ={'video_state' : 'first_frame' , 'person_in_frame' : 0, 'person_time_spent_in_frame' :0 ,'no_person_in_consecutive_frames' :0 ,'total_count':0, 'person_exited_frame' : False,'Box_coordinate' :[None,None,None,None]} # Video statistics dictionary which will be updated as frames by get processed by analyze_frame() function
decision_param = {'THRESHOLD_PROB' : prob_threshold , 'THREESHOLD_NO_OF_FRAMES_FOR_PERSON_LEFT_SCENE' :THREESHOLD_NO_OF_FRAMES_FOR_PERSON_LEFT_SCENE} # Decision threshold parameters
### TODO: Read from the video capture ###
while(hasFrame and cv2.waitKey(1)<0): #Read video frame by frame
### TODO: Pre-process the image as needed ###
input_image = cv2.resize(frame,(input_image_width, input_image_height))
input_image = input_image.transpose((2,0,1))
input_image = input_image.reshape(1, 3, input_image_height, input_image_width)
### TODO: Start asynchronous inference for specified request ###
t0=time.time()
async_infer_req_handle=infer_network.exec_net(input_image,0)
### TODO: Wait for the result ###
infer_network.wait(async_infer_req_handle)
t1=time.time()
infer_time =round((t1-t0)*1000)
#print("For frame no. {} , infer taken {} miliseconds".format(frame_no, infer_time))
### TODO: Get the results of the inference request ###
obj_det_out=infer_network.get_output(async_infer_req_handle)['DetectionOutput']
### TODO: Extract any desired stats from the results ###
#Function to analyze frame and update video statistics
person_detected = analyze_frame(obj_det_out,video_stats,decision_param)
# if person detected draw box on image frame
if(person_detected):
x1 =int(video_stats['Box_coordinate'][0] *frame_width)
y1 = int(video_stats['Box_coordinate'][1]*frame_height)
x2 =int(video_stats['Box_coordinate'][2]*frame_width)
y2 = int(video_stats['Box_coordinate'][3]*frame_height)
frame=cv2.rectangle(frame, (x1,y1), (x2,y2), (0,0,255), int(round(frame_height/150)), 8)
cv2.putText(frame,'Person :' + str(video_stats['total_count']),(x2,y2+5), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,0), 2, cv2.LINE_AA)
# put frame_no , frame inference time, person in frame and total person stats in frame
cv2.putText(frame,'Frame No. ' + str(frame_no) +' Infer Time in ms: ' +str(infer_time),(10,20), cv2.FONT_HERSHEY_SIMPLEX,0.7, (0,255,0), 1, cv2.LINE_AA)
cv2.putText(frame,'Current Count:' + str(video_stats['person_in_frame']),(10,40), cv2.FONT_HERSHEY_SIMPLEX,0.7, (0,255,0), 1, cv2.LINE_AA)
cv2.putText(frame,'Total No. of Person:' + str(video_stats['total_count']),(10,60), cv2.FONT_HERSHEY_SIMPLEX,0.7, (0,255,0), 1, cv2.LINE_AA)
if(not IS_IMAGE): # if input is video put current person duration stat in frame
cv2.putText(frame,'Current person duration' + str(video_stats['person_time_spent_in_frame']/fps),(10,80), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,255,0), 1, cv2.LINE_AA)
### TODO: Calculate and send relevant information on ###
### current_count, total_count and duration to the MQTT server ###
### Topic "person": keys of "count" and "total" ###
### Topic "person/duration": key of "duration" ###
#Here statistics send over MQTT
#sending personduration to MQTT server whenever person exit frame
if(video_stats['person_exited_frame'] and (not IS_IMAGE)): # if person exited frame and input is video then send last exited person duration to MQTT server.
json_last_person_time_spent =json.dumps({'duration': video_stats['person_time_spent_in_frame']/fps})
client.publish('person/duration',json_last_person_time_spent)
video_stats['person_exited_frame'] =False
#print('Person duration :{}'.format(json_last_person_time_spent))
#sending current count to MQTT server
if((last_stat_person_in_frame !=video_stats['person_in_frame']) or (last_stat_counter >9)): # Instead of sending current count every frame , send current count when it is updated or after every 10 frames. Network data saving!!!
count_data = {'count' :video_stats['person_in_frame']}
json_count_data = json.dumps(count_data)
client.publish('person',json_count_data)
last_stat_person_in_frame = video_stats['person_in_frame']
#print('Current Count {}'.format(json_count_data))
last_stat_counter = -1
last_stat_counter+=1
#sending total count to MQTT server
if(last_stat_total_count !=video_stats['total_count']): # Instead of sending total count every frame , send total count when it is updated. Network data saving!!!
total_count_data = {'total':video_stats['total_count']}
json_total_count_data = json.dumps(total_count_data)
client.publish('person',json_total_count_data)
last_stat_total_count =video_stats['total_count']
# print('Total Count {}'.format(json_total_count_data))
### TODO: Send the frame to the FFMPEG server ###
if ( not IS_IMAGE):
sys.stdout.buffer.write(frame)
sys.stdout.flush()
#show frame (only for local pc)
#frame1 = cv2.resize(frame,(frame_width,frame_height))
#cv2.imshow('Inferred Image' ,frame1)
### TODO: Write an output image if `single_image_mode` ###
if (IS_IMAGE):
cv2.imwrite(out_image_file,frame)
cv2.waitKey(0)
break
else:
out_video.write(frame)
hasFrame,frame=video.read()
frame_no+=1
# Sending person duration if last frame ended in 'missing_person_in_frame' or 'person_in_frame' state
if((video_stats['video_state']=='missing_person_in_frame' or video_stats['video_state']=='person_in_frame' )and (not IS_IMAGE)):
json_person_time_spent =json.dumps({'duration': video_stats['person_time_spent_in_frame']/fps})
client.publish('person/duration',json_person_time_spent)
client.disconnect()
if (not IS_IMAGE):
video.release()
out_video.release()
cv2.destroyAllWindows() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def infer_on_stream(args, client):\n # Initialise the class\n infer_network = Network()\n # Set Probability threshold for detections\n prob_threshold = args.prob_threshold\n\n ### TODO: Load the model through `infer_network` ###\n infer_network.load_model(model=args.model,\n device=args.device,\n cpu_extension=args.cpu_extension)\n\n net_input_shape = infer_network.get_input_shape()\n\n ### TODO: Handle the input stream ###\n\n ### Handle image, video or webcam\n image_flag = False\n # Check if the input is a webcam\n if args.input == 'CAM':\n args.input = 0\n # Checks if the input is an image\n elif args.input.endswith('.jpg') or args.input.endswith('.bmp'):\n image_flag = True\n # else assume input is vedio file\n\n # Get and open video capture\n cap = cv2.VideoCapture(args.input)\n if args.input:\n cap.open(args.input)\n\n # Grab the shape of the input\n width = int(cap.get(3))\n height = int(cap.get(4))\n\n # iniatilize variables\n count_total = 0\n count_prev = 0\n count_curr = 0\n duration_curr = 0\n duration_prev = 0\n duration_total= 0\n frame_time = 0\n frame_count = 0\n timer_curr_start = 0\n request_id = 0\n\n ### TODO: Loop until stream is over ###\n while cap.isOpened():\n\n ### TODO: Read from the video capture ###\n flag, frame = cap.read()\n if not flag:\n break\n key_pressed = cv2.waitKey(60)\n\n\n ### TODO: Pre-process the image as needed ###\n p_frame = cv2.resize(frame, (net_input_shape[3], net_input_shape[2]))\n # Update layout\n p_frame = p_frame.transpose((2,0,1))\n p_frame = p_frame.reshape(1, *p_frame.shape)\n\n ### TODO: Start asynchronous inference for specified request ###\n timer_infer_start = time.time()\n infer_network.exec_net(p_frame, request_id)\n\n ### TODO: Wait for the result ###\n if infer_network.wait(request_id) == 0:\n\n ### TODO: Get the results of the inference request ###\n timer_infer_delay = time.time() - timer_infer_start\n result = infer_network.get_output(request_id)\n\n\n ### TODO: Extract any desired stats from the results ###\n\n # Draw bounding box\n conf = result[0, 0, :, 2]\n count_curr = 0\n for i, c in enumerate(conf):\n if c > prob_threshold:\n rect_box = result[0, 0, i, 3:]\n min_x = int(rect_box[0] * width)\n min_y = int(rect_box[1] * height)\n max_x = int(rect_box[2] * width)\n max_y = int(rect_box[3] * height)\n frame = cv2.rectangle(frame, (min_x, min_y), (max_x, max_y), (255,0, 0), 1)\n count_curr = count_curr + 1\n\n ### TODO: Calculate and send relevant information on ###\n\n ### current_count, total_count and duration to the MQTT server ###\n ### Topic \"person\": keys of \"count\" and \"total\" ###\n ### Topic \"person/duration\": key of \"duration\" ###\n\n # IF new person comes inside imapge\n if count_curr > count_prev:\n timer_curr_start = time.time()\n count_total = count_total + count_curr - count_prev\n client.publish('person', payload=json.dumps({'total': count_total}))\n\n # Calc Person Duration\n if count_curr < count_prev:\n timer_curr_delay = time.time() - timer_curr_start\n client.publish('person/duration', payload=json.dumps({'duration': timer_curr_delay}))\n\n # Write out information\n text_infer = \"Inference Delay: {:.3f}ms\".format(timer_infer_delay * 1000)\n text_counter = \"Current Counter: {}\".format(count_curr)\n cv2.putText(frame, text_infer, (10, 15),\n cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 0, 0), 1)\n cv2.putText(frame, text_counter, (10, 30),\n cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 1)\n\n if count_curr > 0:\n text_duration = \"Current Duration: {:.1f}s\".format(time.time() - timer_curr_start)\n cv2.putText(frame, text_duration, (10, 45),\n cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 255, 0), 1)\n\n count_prev = count_curr\n client.publish(\"person\", json.dumps({\"count\": count_curr}))\n\n\n ### TODO: Send the frame to the FFMPEG server ###\n sys.stdout.buffer.write(frame)\n sys.stdout.flush()\n # Break if escape key pressed\n if key_pressed == 27:\n break\n\n ### TODO: Write an output image if `single_image_mode` ###\n\n # Release the capture and destroy any OpenCV windows\n cap.release()\n cv2.destroyAllWindows()\n client.disconnect()",
"def infer_on_stream(args, client):\n count_current = 0\n count_last = 0\n count_last_last = 0\n total_count = 0\n duration = 0\n avg_duration = 0\n total_duration = 0\n start_time = 0\n active_person = 0\n net_input_shape = []\n frame_count = 0\n\n # Initialise the class\n infer_network = Network()\n # Set Probability threshold for detections\n prob_threshold = args.prob_threshold\n\n ### TODO: Load the model through `infer_network` ###\n infer_network.load_model(model=args.model, device=args.device, cpu_extension=args.cpu_extension)\n\n ### TODO: Handle the input stream ###\n cap = cv2.VideoCapture(args.input)\n cap.open(args.input)\n\n # get the required shape for the network\n net_input_shape = infer_network.get_input_shape()\n\n # get the shape of the input image\n width = int(cap.get(3))\n height = int(cap.get(4))\n\n if net_input_shape != [1, 3, 600, 600]:\n #net_input_shape = [1, 3, 600, 600]\n #sometimes gives [1,3] and causes an error, so hard coded shape to match model\n sys.exit(\"Input shape error, forced exit. Please run again until this error does not appear.\")\n\n ### TODO: Loop until stream is over ###\n while cap.isOpened():\n\n ### TODO: Read from the video capture ###\n flag, frame = cap.read()\n frame_count += 1\n\n if not flag:\n #video stream ended, go to end and close out\n break\n\n ### TODO: Start asynchronous inference for specified request ###\n if frame_count%2 == 0: #check every other frame\n ### TODO: Pre-process the image as needed ###\n vid_frame = cv2.resize(frame, (net_input_shape[3], net_input_shape[2]))\n #save a copy of the input frame to use on output\n vid_frame_copy = vid_frame\n vid_frame = vid_frame.transpose((2, 0, 1))\n vid_frame = vid_frame.reshape(1, *vid_frame.shape)\n\n infer_network.exec_net(vid_frame)\n\n ### TODO: Wait for the result ###\n if infer_network.wait() == 0:\n\n ### TODO: Get the results of the inference request ###\n results = infer_network.get_output()\n\n # for this model, results should be shape [1, 1, N, 7]\n # N is number of hits, last is a 7 item list [image_id, label, conf, x_min,\n # y_min, x_max, y_max] where label is the predicted class\n\n ### TODO: Extract any desired stats from the results ###\n out_frame, count_current, box = draw_boxes(vid_frame_copy, results, args, net_input_shape[3], net_input_shape[2])\n #out_frame = cv2.putText(out_frame, \"Last Frame Analyzed = \"+str(frame_count), (10, 420), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 0, 0), 1, cv2.LINE_AA)\n\n ### TODO: Calculate and send relevant information on ###\n ### count_current, total_count and duration to the MQTT server ###\n ### Topic \"person\": keys of \"count\" and \"total\" ###\n ### Topic \"person/duration\": key of \"duration\" ###\n\n # This block of code from Mentor Help question 129845, some modifications by me\n # If both last and last_last are equal, positive ID for two frames.\n if count_current > count_last and count_last_last == count_last:\n start_time = time.time()\n total_count = total_count + count_current - count_last\n\n #client.publish(\"person\", json.dumps({\"total\": total_count}))\n client.publish(\"person\", json.dumps({\"count\": count_current}))\n\n #out_frame = cv2.putText(out_frame, \"Current Time = \"+str('% 6.2f' % time.time()), (10, 450), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 0, 0), 1, cv2.LINE_AA)\n out_frame = cv2.putText(out_frame, \"Person Entered Frame = \"+str(count_current), (10, 510), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 0), 1, cv2.LINE_AA)\n out_frame = cv2.putText(out_frame, \"Total Counted = \"+str(total_count), (10, 540), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 0), 1, cv2.LINE_AA)\n\n # Person duration in the video is calculated if two frames of no detect to account for skipped frame\n if count_current < count_last_last and count_last < count_last_last:\n duration = int(time.time() - start_time)\n total_duration += duration / 11 #frames per second and evaluating only every other frame\n avg_duration = int(total_duration / total_count)\n client.publish(\"person/duration\", json.dumps({\"duration\": avg_duration}))\n\n #out_frame = cv2.putText(out_frame, \"Duration = \"+str('% 6.2f' % duration), (10, 540), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 0, 0), 1, cv2.LINE_AA)\n out_frame = cv2.putText(out_frame, \"Average Duration = \" + str('% 4.2f' % avg_duration) + \" seconds.\", (10, 570), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 0), 1, cv2.LINE_AA)\n\n # Set a double counter to review two frames at a time\n count_last_last = count_last\n count_last = count_current\n #End block of code from Mentor Help question 129845\n\n\n ### TODO: Send the frame to the FFMPEG server ###\n out_frame = out_frame.copy(order='C')\n out_frame = cv2.resize(out_frame, (width, height))\n np.ascontiguousarray(out_frame, dtype=np.float32)\n sys.stdout.buffer.write(out_frame)\n sys.stdout.flush()\n\n ### TODO: Write an output image if `single_image_mode` ###\n\n #Release the capture and destroy any OpenCV windows\n cap.release()\n cv2.destroyAllWindows()\n\n #Disconnect from MQTT\n client.disconnect()\n\n #Print final numbers for reference\n print(\"Video stream ended.\")\n print(\"Final count was \" + str(total_count))\n print(\"Average Duration was \" + str(avg_duration) + \" seconds.\")",
"def infer_on_stream(args, client):\n # Initialise the class\n infer_network = Network()\n # Set Probability threshold for detections\n prob_threshold = args.prob_threshold\n\n ### TODO: Load the model through `infer_network` ###\n infer_network.exec_network = infer_network.load_model\\\n (args.model, args.device, args.cpu_extension)\n # extract information about model input layer\n (b, c, input_height, input_width) = infer_network.get_input_shape()\n\n ### TODO: Handle the input stream ###\n # extenstion of input file\n input_extension = os.path.splitext(args.input)[1].lower()\n supported_vid_exts = ['.mp4', '.mpeg', '.avi', '.mkv']\n supported_img_exts = [\".bmp\",\".dib\", \".jpeg\", \".jp2\", \".jpg\", \".jpe\",\\\n \".png\", \".pbm\", \".pgm\", \".ppm\", \".sr\", \".ras\", \".tiff\", \".tif\"]\n single_image_mode = False\n # if input is camera\n if args.input.upper() == 'CAM':\n capture = cv2.VideoCapture(0)\n \n # if input is video\n elif input_extension in supported_vid_exts:\n capture = cv2.VideoCapture(args.input)\n \n # if input is image\n elif input_extension in supported_img_exts:\n single_image_mode = True\n capture = cv2.VideoCapture(args.input) \n capture.open(args.input)\n else:\n sys.exit(\"FATAL ERROR : The format of your input file is not supported\" \\\n \"\\nsupported extensions are : \" + \", \".join(supported_exts))\n prev_count = 0\n total_persons = 0\n ### TODO: Loop until stream is over ###\n while (capture.isOpened()):\n ### TODO: Read from the video capture ###\n ret, frame = capture.read()\n if not ret:\n break\n ### TODO: Pre-process the image as needed ###\n image = preprocessing(frame, input_width, input_height)\n ### TODO: Start asynchronous inference for specified request ###\n start_time = time.time()\n # run inference\n infer_network.exec_net(image)\n ### TODO: Wait for the result ###\n if infer_network.wait() == 0:\n infer_time = time.time() - start_time\n ### TODO: Get the results of the inference request ###\n outputs = infer_network.get_output()[0][0]\n ### Take model output and extract number of detections with confidence exceeding threshold\n ### and draw bounding boxes around detections\n out_image, current_count = apply_threshold(outputs, frame, prob_threshold)\n \n # show inference time on image\n cv2.putText(out_image, \"inference time: {:.5f} ms\".format(infer_time), (30, 30),\\\n cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 255, 0), 1)\n \n ### TODO: Extract any desired stats from the results ###\n # when any person exit\n if current_count < prev_count:\n ### Topic \"person/duration\": key of \"duration\" ###\n # send duration to mqtt server client\n client.publish(\"person/duration\", json.dumps({\"duration\": time.time() - p_start}))\n\n # when new person enters\n if current_count > prev_count:\n total_persons += current_count - prev_count\n p_start = time.time()\n \n prev_count = current_count\n \n ### TODO: Calculate and send relevant information on ###\n ### current_count, total_count and duration to the MQTT server ###\n ### Topic \"person\": keys of \"count\" and \"total\" ###\n client.publish(\"person\", json.dumps({\"count\": current_count,\"total\": total_persons}))\n ### TODO: Send the frame to the FFMPEG server ###\n sys.stdout.buffer.write(out_image)\n sys.stdout.buffer.flush()\n ### TODO: Write an output image if `single_image_mode` ###\n if single_image_mode:\n cv2.imwrite(\"output_frame.png\", out_image)\n # release resources\n capture.release()\n cv2.destroyAllWindows()\n client.disconnect()\n del infer_network",
"def infer_on_stream(args, client):\n # Initialize the network\n infer_network = Network()\n # Set Probability threshold for detections\n prob_threshold = args.prob_threshold\n\n # Load the model through `infer_network`\n infer_network.load_model(args.model, args.device, args.cpu_extension)\n\n # Handle the input stream\n # expand the tilde\n input_fpath = os.path.expanduser(args.input)\n f_name, f_extn = os.path.splitext(input_fpath)\n\n is_image_input = False\n\n # add the file extensions as you like\n if f_extn in ['.mp4', '.avi', '.mpeg']:\n pass\n elif f_extn in ['.png', '.jpg', 'jpeg']:\n is_image_input = True\n else:\n assert False, f'unsupported input data extension: {f_extn}'\n\n # Get and open video capture\n cap = cv2.VideoCapture(input_fpath)\n cap.open(input_fpath)\n # [1, 3, 320, 544] (BCHW)\n net_input_dims = infer_network.get_input_shape()\n logger.debug('* DNN input dims: {}'.format(net_input_dims))\n\n width = int(cap.get(3))\n height = int(cap.get(4))\n # * Video dims: [height:432, width:768]\n logger.debug('* Video dims: [height:{}, width:{}]'.format(height, width))\n\n logger.debug('platform: {}'.format(platform))\n out_video = cv2.VideoWriter('out_result.mp4', CODEC, 30, (width, height))\n\n # Loop until stream is over\n frame_num = 0\n last_valid_pers_num = 0\n total_valid_pers_num = 0\n duration_time_sec = 0\n miss_detect_cnt = 0\n start_tracking = False\n all_infer_time = []\n\n while cap.isOpened():\n # Read the next frame\n flag, frame = cap.read()\n if not flag:\n break\n key_pressed = cv2.waitKey(60)\n\n # for debug\n if frame[0].size > 0:\n logger.debug(frame)\n logger.debug(f'saved the frame into img_{frame_num}.png !!!')\n cv2.imwrite(f'resources/images/img_{frame_num}.png', frame)\n cv2.imshow('win', frame)\n\n # Pre-processing the image\n # cv2.resize(src, dsize=(width, height))\n p_frame = cv2.resize(frame, (net_input_dims[3], net_input_dims[2]))\n p_frame = p_frame.transpose((2,0,1))\n # reshape (3, 320, 544) to (1, 3, 320, 544)\n p_frame = p_frame.reshape(1, *p_frame.shape)\n logger.debug('+ frame %d' % (frame_num))\n logger.debug(' - shape: {}'.format(p_frame.shape))\n\n # Start asynchronous inference for specified request\n infer_start = time.time()\n infer_network.exec_net(p_frame)\n\n # Wait for the result\n if infer_network.wait() == 0: # when the inference per frame finishes\n infer_stop = time.time()\n infer_time_ms = (infer_stop-infer_start) * 1e3\n\n # Get the results of the inference request\n infer_result = infer_network.get_output()\n\n # Filter the valid object\n valid_object = extract_valid_object(infer_result)\n\n # draw bounding box of detected person on the image\n out_frame, valid_pers_num = draw_boundingbox(frame, valid_object, width, height, prob_threshold)\n\n def add_text_on_image(image, insert_text=None, loc=(10,10), tsize=0.4, tcolr=(209, 130, 0, 255), tbold=1):\n # add a text\n cv2.putText(image, insert_text, loc, cv2.FONT_HERSHEY_SIMPLEX, tsize, tcolr, tbold)\n logger.debug(' - [add the text on image] %s' % (insert_text))\n return\n\n logger.debug(' - total number of people: %d' % total_valid_pers_num)\n\n logger.debug(f'[#check#] valid person number: {valid_pers_num}')\n logger.debug(f'[#check#] last valid person number: {last_valid_pers_num}')\n logger.debug(f'[#check#] total count ({total_valid_pers_num})')\n\n # p1: 0-0-0-0-0-0-0-0-0-0 (F)\n # p2: 0-0-1-1-1-0-0-0-0-1 (F)\n # p3: 0-0-1-1-1-0-0-0-0-0 (F)\n # p4: 0-0-1-0-1-1-1-1-1-1 (F)\n if start_tracking: # if a person disappears for a sec\n miss_detect_cnt += 1\n logger.debug(f'[#check#] miss count ({miss_detect_cnt})')\n if miss_detect_cnt == 5: # if miss detection continues for the consecutive 5 frames, we think a person disappeared\n duration_time_sec = time.time() - emerge_time\n total_valid_pers_num += 1\n\n # Topic \"person/duration\": key of \"duration\"\n client.publish(\"person/duration\", json.dumps({\"duration\": duration_time_sec}))\n # Topic \"person\": keys of \"count\" and \"total\"\n client.publish(\"person\", json.dumps({\"total\": total_valid_pers_num}))\n\n logger.debug(f'[#check#] a person is disappeared')\n logger.debug(f'[#check#] total count ({total_valid_pers_num})')\n logger.debug(f'[#check#] duration ({duration_time_sec})')\n # initialize\n start_tracking = False\n miss_detect_cnt = 0\n elif valid_pers_num > last_valid_pers_num:\n # initialize\n start_tracking = False\n miss_detect_cnt = 0\n\n else:\n if valid_pers_num > last_valid_pers_num: # 0->1\n emerge_time = time.time()\n elif valid_pers_num < last_valid_pers_num: # 1->0\n start_tracking = True\n else: #0->0\n pass\n\n # add duration time on the image\n insert_text = 'duration time: %d sec' % (duration_time_sec)\n add_text_on_image(out_frame, insert_text, (10,60))\n\n # add total count of people on the image\n insert_text = 'total count of people: %d' % (total_valid_pers_num)\n add_text_on_image(out_frame, insert_text, (10,40))\n\n # add inference time on the image\n insert_text = \"inference time(without post-process): %.2fms\" % (infer_time_ms)\n add_text_on_image(out_frame, insert_text, (10,20))\n all_infer_time.append(infer_time_ms)\n\n # save a current valid person number into the last valid person number\n last_valid_pers_num = valid_pers_num\n\n if is_image_input:\n path = '.'\n f_name = f'output_{frame_num}{f_extn}'\n cv2.imwrite(os.path.join(path, f_name), out_frame)\n else:\n # write into a movie\n out_video.write(out_frame)\n\n # Send current_count, total_count and duration to the MQTT server ###\n client.publish(\"person\", json.dumps({\"count\": valid_pers_num}))\n\n\n # Send the frame to the FFMPEG server\n sys.stdout.buffer.write(out_frame)\n sys.stdout.flush()\n\n # Break if escape key pressed\n if key_pressed == 27:\n break\n# if frame_num > 500:\n# break\n\n # count up the frame number\n frame_num += 1\n\n # Release the capture and destroy any OpenCV windows\n cap.release()\n out_video.release()\n cv2.destroyAllWindows()\n\n # close the MTQQ server connection\n client.disconnect()\n\n logger.info(f'* average inference time: {sum(all_infer_time)/frame_num} ms')\n logger.info(f'* total count of people: {total_valid_pers_num}')",
"def inference():\n print(\"setting up vgg initialized conv layers ...\")\n model_data = utils.get_model_data(FLAGS.model_dir, MODEL_URL)\n\n mean = model_data['normalization'][0][0][0]\n mean_pixel = np.mean(mean, axis=(0, 1))\n\n weights = np.squeeze(model_data['layers'])\n\n\n with tf.variable_scope(\"inference\"):\n vgg_net(weights)",
"def __init__(self, pn_output=\"./\"):\n # Initialize the video stream, then allow the camera sensor to warm up\n print(\"[INFO] starting video stream...\")\n self.vs = cv2.VideoCapture(0) # Capture video frames, 0 is default video camera\n time.sleep(2.0)\n\n # Load config\n config = configparser.ConfigParser()\n config.read(fn_config)\n self.pn_guest_images = config['DEFAULT']['pn_guest_images_archive']\n self.guest_archive = p7zip(self.pn_guest_images)\n self.camera_rot = int(config['DEFAULT']['camera_rot'])\n self.image_width = int(config['DEFAULT']['image_width'])\n self.max_capture_interval = float(config['DEFAULT']['capture_interval'])\n self.max_capture_length = int(config['DEFAULT']['max_capture_length'])\n self.max_images = int(config['DEFAULT']['max_images'])\n\n # Capture Vars\n self.curr_pic = None # Current image from the camera\n self.gst_capture = None\n self.start_time = time.time()\n self.save_time = time.time()\n self.pic_num = None\n self.pn_gstcap_out = None\n\n # Face Detection Model\n self.min_detec_conf = float(config['DEFAULT']['min_detec_conf'])\n self.min_face_px = make_tuple(config['DEFAULT']['min_face_px'])\n pn_detector_model = config['DEFAULT']['pn_detector_model']\n self.trainRBGavg = make_tuple(config['DEFAULT']['detector_trainrgbavg'])\n print(\"[INFO] loading face detector and embedding model...\")\n protoPath = os.path.sep.join([pn_detector_model, \"deploy.prototxt\"])\n modelPath = os.path.sep.join([pn_detector_model,\n \"res10_300x300_ssd_iter_140000.caffemodel\"])\n self.detector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)\n self.detector.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)\n\n # Face Recognition (extract/recognize embeddings) Model\n self.min_recog_prob = float(config['DEFAULT']['min_recog_prob'])\n fn_embedding_model = config['DEFAULT']['fn_embedding_model']\n self.embedder = cv2.dnn.readNetFromTorch(fn_embedding_model)\n self.embedder.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)\n self.gst_identify = False\n self.guest_ids = {}\n\n # Guest Info (update outside of function)\n self.known_guest_meta = None",
"def inference(self):\n for i in range(len(self.nodes)):\n for j in range(len(self.nodes[i])):\n self.pipes[i][j].send(\"inference\")\n \n ## wait for the finalization to be completed\n for i in range(len(self.nodes)):\n for j in range(len(self.nodes[i])):\n self.pipes[i][j].recv()",
"def create_inference_session(self):\n sess_options = onnxruntime.SessionOptions()\n sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_DISABLE_ALL\n self.infer_session = onnxruntime.InferenceSession(\n self.augmented_model_path,\n sess_options=sess_options,\n providers=self.execution_providers,\n )",
"def initialize_network(self):\n self.sess = tf.InteractiveSession()\n sys.stderr.write(\"------\\n\")\n self.model.create_model()\n self._initialize_trainer()\n self.sess.run(tf.initialize_all_variables())\n self.saver = tf.train.Saver()",
"def main():\n args, config = parse_args()\n\n \"\"\"\n Log on wandb for track of experiments\n \"\"\"\n wandb.init(project=\"adaptive-finetuning-resnet\", name=f'Inference_{config.VERSION}', config=config)\n\n \"\"\"\n Set config GPUs and torch cuda device\n \"\"\"\n config.GPUS = str(0)\n torch.cuda.set_device(0)\n\n \"\"\"\n Create the model, put it to GPU and then create dataloader\n \"\"\"\n model = eval(config.MODULE)(config=config.NETWORK)\n model = model.cuda()\n\n val_loader = make_dataloader(config, mode='val', distributed=False)\n\n \"\"\"\n Load the model with pretrained weights\n \"\"\"\n assert config.NETWORK.PRETRAINED_MODEL != '', \"For inference, there must be pre-trained weights\"\n\n pretrain_state_dict = torch.load(config.NETWORK.PRETRAINED_MODEL, map_location = lambda storage, loc: storage)['net_state_dict']\n smart_model_load(model, pretrain_state_dict, loading_method=config.NETWORK.PRETRAINED_LOADING_METHOD)\n\n \"\"\"\n Pass the model and val loader for validation\n \"\"\"\n print(\"Inference started!!\")\n val_accuracy = do_validation(config, model, val_loader)\n print(f\"Inference complete!!\\nAccuracy:{val_accuracy}\")\n\n wandb.log({'Accuracy': val_accuracy})",
"def inference():\n inf_dataset = dataset\n net.eval()\n frames_gen, frame_cnt, rel_props, prop_ticks, prop_scaling = inf_dataset[index]\n \n num_crop = args.test_crops\n length = 3\n if args.modality == 'Flow':\n length = 10\n elif args.modality == 'RGBDiff':\n length = 18\n \n # First get the base_out outputs\n base_output = torch.autograd.Variable(torch.zeros((num_crop, frame_cnt, base_out_dim)).cuda(),\n volatile=True)\n cnt = 0\n for frames in frames_gen:\n # frames.shape == [frame_batch_size * num_crops * 3, 224, 224]\n # frame_batch_size is 4 by default\n input_var = torch.autograd.Variable(frames.view(-1, length, frames.size(-2), frames.size(-1)).cuda(),\n volatile=True)\n base_out = net(input_var, None, None, None, None)\n bsc = base_out.view(num_crop, -1, base_out_dim)\n base_output[:, cnt:cnt+bsc.size(1), :] = bsc\n cnt += bsc.size(1)\n\n n_frames = base_output.size(1)\n assert frame_cnt == n_frames\n # GLCU\n step_features = base_output.mean(dim=0).mean(dim=0).unsqueeze(0)\n gate, glcu_task_pred = net.glcu(step_features)\n glcu_task_pred = F.softmax(glcu_task_pred.squeeze(), dim=0).data.cpu().numpy()\n gate = gate.repeat(1, num_crop * n_frames).view(num_crop, n_frames, base_out_dim)\n if net.additive_glcu:\n base_output = base_output + gate\n else:\n base_output = base_output * gate\n\n # output.shape == [num_frames, 7791]\n output = torch.zeros((frame_cnt, output_dim)).cuda()\n cnt = 0\n for i in range(0, frame_cnt, 4):\n base_out = base_output[:, i:i+4, :].contiguous().view(-1, base_out_dim)\n rst = net.test_fc(base_out)\n sc = rst.data.view(num_crop, -1, output_dim).mean(dim=0)\n output[cnt: cnt + sc.size(0), :] = sc\n cnt += sc.size(0)\n base_output = base_output.mean(dim=0).data\n\n # act_scores.shape == [num_proposals, K+1]\n # comp_scores.shape == [num_proposals, K]\n act_scores, comp_scores, reg_scores = reorg_stpp.forward(output, prop_ticks, prop_scaling)\n act_scores = torch.autograd.Variable(act_scores, volatile=True)\n comp_scores = torch.autograd.Variable(comp_scores, volatile=True)\n\n # Task Head\n combined_scores = F.softmax(act_scores[:, 1:], dim=1) * torch.exp(comp_scores)\n combined_scores = combined_scores.mean(dim=0).unsqueeze(0)\n task_pred = F.softmax(net.task_head(combined_scores).squeeze(), dim=0).data.cpu().numpy()\n\n act_scores = act_scores.data\n comp_scores = comp_scores.data\n\n if reg_scores is not None:\n reg_scores = reg_scores.view(-1, num_class, 2)\n reg_scores[:, :, 0] = reg_scores[:, :, 0] * stats[1, 0] + stats[0, 0]\n reg_scores[:, :, 1] = reg_scores[:, :, 1] * stats[1, 1] + stats[0, 1]\n\n torch.cuda.empty_cache() # To empty the cache from previous iterations\n\n # perform stpp on scores\n return ((inf_dataset.video_list[index].id,\n (rel_props.numpy(), act_scores.cpu().numpy(), comp_scores.cpu().numpy(), reg_scores.cpu().numpy(), \n glcu_task_pred, task_pred),\n output.cpu().numpy(),\n base_output.cpu().numpy()))",
"def run(self):\n self.sock = self.set_up_socket()\n if self.output_file is None:\n cv2.namedWindow('Video Preview')\n else:\n self.video_writer = cv2.VideoWriter(self.output_file, \n self.output_format, \n self.fps, \n self.output_size\n )\n start_time = time.time()\n self.loop()\n run_time = time.time() - start_time\n self.output_statistics(run_time)\n self.cleanup()",
"def detect(self):\n # process the input video and get the attributes:\n self.process_video()\n\n # build a rcnn/ yolov5 predictor:\n self.build_predictor()\n\n \n # assert not os.path.isfile(args.output_file), \"File with the name %s already exists\"%args.output_file\n # build the writer with same attributes:\n self.vid_writer = cv2.VideoWriter(self.output, self.fourcc, self.fps, (self.w, self.h))\n\n # inference time:\n start = time.time()\n print(\"Started inference\\n\")\n \n # progress bar using tqdm:\n pbar = tqdm(total=self.nframes)\n\n while(self.cap.isOpened()):\n ret, frame = self.cap.read()\n if ret == False:\n break # when the last frame is read \n\n # different formats of results:\n if self.library == \"yolov5\":\n # predict and bring the outputs to cpu:\n results = self.predictor(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) # convert to RGB\n predictions = results.xyxy[0].cpu()\n # find the instance indices with person:\n person_idx = predictions[:,5] == self.label_dict[\"person\"]\n # extract the corresponding boxes and scores:\n boxes = predictions[person_idx,:4].numpy()\n probs = predictions[person_idx,4].numpy()\n\n if self.library == \"detectron2\":\n # predict and bring the outputs to cpu:\n results = self.predictor(frame) # RGB conversion done automatically in detectron\n predictions = results[\"instances\"].to(\"cpu\")\n # find the instance indices with person:\n person_idx = [predictions.pred_classes == self.label_dict[\"person\"]]\n # extract the corresponding boxes and scores:\n boxes = predictions.pred_boxes[person_idx].tensor.numpy()\n probs = predictions.scores[person_idx].numpy()\n\n # draw boxes and write the frame to the video:\n if len(boxes): # check whether there are predictions\n box_frame = self.draw_person_boxes(frame, boxes, probs)\n else:\n box_frame = frame\n self.vid_writer.write(box_frame)\n\n pbar.update(1)\n pbar.close()\n\n # release the video capture object and write object:\n self.cap.release()\n self.vid_writer.release()\n\n print(\"Inferene on the video file took %0.3f seconds\"%(time.time()-start))",
"def initialize_ai(self):\n\n self.gid, self.genome = constants.genomes_to_run[self.identifier]\n self.genome.fitness = -1\n self.net = neat.nn.FeedForwardNetwork.create(self.genome, constants.conf)\n # self.net = neat.nn.RecurrentNetwork\n # .create(self.genome, constants.conf)",
"def initiate(self):\n # if self.opt.checkpoint_encoder:\n # self.load(self.opt.checkpoint_encoder, self.opt.checkpoint_decoder)\n # else:\n # start fresh.\n self.model = Transformer(\n self.opt.src_vocab_size,\n self.opt.tgt_vocab_size,\n self.opt.max_token_seq_len,\n tgt_emb_prj_weight_sharing=self.opt.proj_share_weight,\n emb_src_tgt_weight_sharing=self.opt.embs_share_weight,\n d_k=self.opt.d_k,\n d_v=self.opt.d_v,\n d_model=self.opt.d_model,\n d_word_vec=self.opt.d_word_vec,\n d_inner=self.opt.d_inner_hid,\n n_layers=self.opt.layers,\n n_head=self.opt.n_head,\n dropout=self.opt.dropout).to(self.device)\n \n for p in self.model.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)",
"def test_run_inference(self, start_server_single_model_onnx):\n\n _, ports = start_server_single_model_onnx\n\n # Connect to grpc service\n stub = create_channel(port=ports[\"grpc_port\"])\n\n imgs_v1_224 = np.ones(ResnetONNX.input_shape, ResnetONNX.dtype)\n output = infer(imgs_v1_224, input_tensor=ResnetONNX.input_name, grpc_stub=stub,\n model_spec_name=ResnetONNX.name,\n model_spec_version=None,\n output_tensors=[ResnetONNX.output_name])\n logger.info(\"Output shape: {}\".format(output[ResnetONNX.output_name].shape))\n assert output[ResnetONNX.output_name].shape == ResnetONNX.output_shape, ERROR_SHAPE",
"def main():\n\t# create output file\n\tif not os.path.exists(OUTPUT_PATH):\n\t\tos.makedirs(OUTPUT_PATH)\n\n\t# init model with pre-trained weights\n\tmodel = create_model()\n\n\tmodel.load_state_dict(torch.load(PATH_PYTORCH_WEIGHTS)['state_dict'])\n\tmodel.eval()\n\n\n\t# if GPU is enabled\n\tif USE_GPU:\n\t\tmodel.cuda()\n\tvideos = os.listdir(INPUT_PATH)\n\t# load and preprocess images in folder\n\tfor y in videos[numTraining:(numTraining+numValidation)]:\n\t\tif not os.path.exists(os.path.join(OUTPUT_PATH,y)):\n\t\t\tos.makedirs(os.path.join(OUTPUT_PATH,y))\n\t\t\tfor i, name in enumerate(os.listdir(os.path.join(INPUT_PATH,y))):\n\t\t\t\tfilename = os.path.join(INPUT_PATH,y,'{:04d}.jpg'.format(i+1))\n\t\t\t\timage_tensor, image_size = load_image(filename)\n\n\t\t\t\tif USE_GPU:\n\t\t\t\t\timage_tensor = image_tensor.cuda()\n\n\t\t\t\t# run model inference\n\t\t\t\tprediction = model.forward(image_tensor[None, ...]) # add extra batch dimension\n\n\t\t\t\t# get result to cpu and squeeze dimensions\n\t\t\t\tif USE_GPU:\n\t\t\t\t\tprediction = prediction.squeeze().data.cpu().numpy()\n\t\t\t\telse:\n\t\t\t\t\tprediction = prediction.squeeze().data.numpy()\n\n\t\t\t\t# postprocess\n\t\t\t\t# first normalize [0,1]\n\t\t\t\tprediction = normalize_map(prediction)\n\t\t\t\tsaliency = postprocess_prediction(prediction, image_size)\n\t\t\t\tsaliency = normalize_map(saliency)\n\t\t\t\tsaliency *= 255\n\t\t\t\tsaliency = saliency.astype(np.uint8)\n\t\t\t\t# save saliency\n\n\t\t\t\tcv2.imwrite(os.path.join(OUTPUT_PATH,str(y),name), saliency)\n\t\t\t\tprint(\"Processed image {} from video {}\".format(i+1,y), end=\"\\r\")\n\t\t\t\tsys.stdout.flush()",
"def load_network_stream(self):\n\n if self.verify_network_stream(self.device):\n self.init_camera()\n else:\n print('Cannot connect to camera in the init thread')",
"def infer():\n\n # Create StreamManagerApi object\n stream_manager_api = StreamManagerApi()\n # Use InitManager method init StreamManagerApi\n ret = stream_manager_api.InitManager()\n if ret != 0:\n print(\"Failed to init Stream manager, ret=%s\" % str(ret))\n exit()\n\n # create streams by pipeline config file\n with open(args.pipeline_path, \"rb\") as f:\n pipeline_str = f.read()\n\n # Configuring a stream\n ret = stream_manager_api.CreateMultipleStreams(pipeline_str)\n if ret != 0:\n print(\"Failed to create Stream, ret=%s\" % str(ret))\n exit()\n\n # Construct the input of the stream\n data_input = MxDataInput()\n # Stream_name encoded in UTF-8\n stream_name = args.stream_name.encode()\n print(stream_name)\n predictions = []\n with open(args.label_path, 'rt') as f:\n val_cls = f.read().rstrip(\"\\n\").split(\"\\n\")\n val_cls_dict = {}\n for i, cls in enumerate(val_cls):\n val_cls_dict[i] = cls\n coco_gt = COCO(args.instances_path)\n classs_dict = {}\n cat_ids = coco_gt.loadCats(coco_gt.getCatIds())\n for cat in cat_ids:\n classs_dict[cat[\"name\"]] = cat[\"id\"]\n\n for file_name in os.listdir(args.img_path):\n pred_data = []\n # Gets the Address of each image\n img_id = int(file_name.split('.')[0])\n file_path = args.img_path + file_name\n size = (cv2.imread(file_path)).shape\n\n # Read each photo in turn\n with open(file_path, \"rb\") as f:\n img_data = f.read()\n if not img_data:\n print(f\"read empty data from img:{file_name}\")\n continue\n # The element value img_data\n data_input.data = img_data\n boxes_output, scores_output = send_data_get_output(stream_name, data_input, stream_manager_api)\n pred_data.append({\"boxes\": boxes_output,\n \"box_scores\": scores_output,\n \"img_id\": img_id,\n \"image_shape\": size})\n\n parse_img_infer_result(pred_data[0], predictions, val_cls_dict, classs_dict)\n print(f\"Inferred image:{file_name} success!\")\n\n # Save the result in JSON format\n if not os.path.exists(args.res_path):\n os.makedirs(args.res_path)\n with open(args.res_path + 'predictions_test.json', 'w') as f:\n json.dump(predictions, f)\n stream_manager_api.DestroyAllStreams()",
"def __init__(self, pretrain='vggface2'):\n self.device = torch.device('cuda:0' if torch.cuda.is_available()\n else 'cpu')\n\n self.mtcnn = MTCNN(\n image_size=160, margin=0, min_face_size=20,\n thresholds=[0.6, 0.7, 0.7], factor=0.709, post_process=True,\n device=self.device\n )\n self.resnet = InceptionResnetV1(pretrained=pretrain).eval()\\\n .to(self.device)\n self.resnet.classify = True\n\n self.names = self.vggface2_labels()",
"def init():\n global neural_network\n global labels\n\n # load objects required by run() for inferencing\n model_dir = Model.get_model_path(\"mnist-fashion\")\n # neural model\n neural_network = keras.models.load_model(f\"{model_dir}/neural-network.h5\")\n # labels\n with open(f\"{model_dir}/labels.jsonpickle\", \"r\") as labels_file:\n labels = jsonpickle.decode(labels_file.read())",
"def init():\n ########################\n # OPTIONS\n ########################\n # Debugging tools\n global TIMER # displays time of every major step\n TIMER = True\n global MONITOR # displays monitoring infos\n MONITOR = False\n \n global directories\n directories = {'1Face': 'data/1Face/',\n '2Faces': 'data/2Faces/',\n '3Faces': 'data/3Faces/',\n 'test': 'data/test/'}\n \n # Opt. swicthes\n global maxfinder # to find the max dim. amongst the pictures\n maxfinder = False\n global ML_mode\n ML_mode = {'CNN_Train': False,\n 'CNN_Pred' : True,\n 'Sampler': True}\n \n # Global variables\n global num_pics\n num_pics = {'1Face': 0,\n '2Faces': 0,\n '3Faces': 0}\n global labels\n labels = {'1Face': 0,\n '2Faces': 1,\n '3Faces': 2}\n global num_data\n num_data = 0\n global splitsize # Fraction of data to build the training set\n splitsize = 0.7 \n global maxheight # Resize the pictures to a power of 2 for CNN (2^8 here)\n maxheight = 128\n global maxwidth\n maxwidth = 128\n global TreshEdge # Number of consecutive black pixels to define an edge\n TreshEdge = 2\n global TreshFace # Number of white pixels to define a face (or large edge)\n TreshFace = maxheight/16",
"def run(model: str, label: str, max_results: int, num_threads: int,\n camera_id: int, width: int, height: int) -> None:\n # Initialize the video classification model\n options = VideoClassifierOptions(\n num_threads=num_threads, max_results=max_results)\n classifier = VideoClassifier(model, label, options)\n\n # Variables to calculate FPS\n counter, fps, last_inference_start_time, time_per_infer = 0, 0, 0, 0\n categories = []\n\n # Start capturing video input from the camera\n cap = cv2.VideoCapture(camera_id)\n cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)\n cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)\n\n # Continuously capture images from the camera and run inference\n while cap.isOpened():\n success, image = cap.read()\n if not success:\n sys.exit(\n 'ERROR: Unable to read from webcam. Please verify your webcam settings.'\n )\n counter += 1\n\n # Mirror the image\n image = cv2.flip(image, 1)\n\n # Ensure that frames are feed to the model at {_MODEL_FPS} frames per second\n # as required in the model specs.\n current_frame_start_time = time.time()\n diff = current_frame_start_time - last_inference_start_time\n if diff * _MODEL_FPS >= (1 - _MODEL_FPS_ERROR_RANGE):\n # Store the time when inference starts.\n last_inference_start_time = current_frame_start_time\n\n # Calculate the inference FPS\n fps = 1.0 / diff\n\n # Convert the frame to RGB as required by the TFLite model.\n frame_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # Feed the frame to the video classification model.\n categories = classifier.classify(frame_rgb)\n\n # Calculate time required per inference.\n time_per_infer = time.time() - current_frame_start_time\n\n # Notes: Frames that aren't fed to the model are still displayed to make the\n # video look smooth. We'll show classification results from the latest\n # classification run on the screen.\n # Show the FPS .\n fps_text = 'Current FPS = {0:.1f}. Expect: {1}'.format(fps, _MODEL_FPS)\n text_location = (_LEFT_MARGIN, _ROW_SIZE)\n cv2.putText(image, fps_text, text_location, cv2.FONT_HERSHEY_PLAIN,\n _FONT_SIZE, _TEXT_COLOR, _FONT_THICKNESS)\n\n # Show the time per inference.\n time_per_infer_text = 'Time per inference: {0}ms'.format(\n int(time_per_infer * 1000))\n text_location = (_LEFT_MARGIN, _ROW_SIZE * 2)\n cv2.putText(image, time_per_infer_text, text_location,\n cv2.FONT_HERSHEY_PLAIN, _FONT_SIZE, _TEXT_COLOR,\n _FONT_THICKNESS)\n\n # Show classification results on the image.\n for idx, category in enumerate(categories):\n class_name = category.label\n probability = round(category.score, 2)\n result_text = class_name + ' (' + str(probability) + ')'\n # Skip the first 2 lines occupied by the fps and time per inference.\n text_location = (_LEFT_MARGIN, (idx + 3) * _ROW_SIZE)\n cv2.putText(image, result_text, text_location, cv2.FONT_HERSHEY_PLAIN,\n _FONT_SIZE, _TEXT_COLOR, _FONT_THICKNESS)\n\n # Stop the program if the ESC key is pressed.\n if cv2.waitKey(1) == 27:\n break\n cv2.imshow('video_classification', image)\n\n cap.release()\n cv2.destroyAllWindows()",
"def __init__(self, video_vis, task_queue, result_queue):\n self.video_vis = video_vis\n self.task_queue = task_queue\n self.result_queue = result_queue\n super().__init__()",
"def __init__(self) -> None:\n from cscore import CameraServer\n\n self.cs = CameraServer.getInstance()\n self.camera_configs = self.read_config_JSON()\n\n self.cameras = [\n self.start_camera(camera_config) for camera_config in self.camera_configs\n ]\n\n # In this, source and sink are inverted from the cscore documentation.\n # self.sink is a CvSource and self.sources are CvSinks. This is because it makes more sense for a reader.\n # We get images from a source, and put images to a sink.\n self.sources = [self.cs.getVideo(camera=camera) for camera in self.cameras]\n self.sink = self.cs.putVideo(\"Driver_Stream\", FRAME_WIDTH, FRAME_HEIGHT)\n # Width and Height are reversed here because the order of putVideo's width and height\n # parameters are the opposite of numpy's (technically it is an array, not an actual image).\n self.frame = np.zeros(shape=(FRAME_HEIGHT, FRAME_WIDTH, 3), dtype=np.uint8)",
"def run(self):\n # Get the checkpoint file\n print('loading checkpoint file ...')\n cp = torch.load(self.cfg.work_dir + '/latest.pth')\n print('done')\n\n print('loading state dictionary ...')\n # Initialize network first as separate modules so we can access WFCOS\n backbone = build_backbone(self.cfg.model.backbone).cuda()\n neck = build_neck(self.cfg.model.neck).cuda()\n head = build_head(self.cfg.model.bbox_head).cuda()\n\n # Load the state dicts\n backbone_state = OrderedDict()\n neck_state = OrderedDict()\n head_state = OrderedDict()\n\n for key in cp['state_dict'].keys():\n if 'backbone' in key:\n backbone_state[key.split('.', 1)[1]] = cp['state_dict'][key]\n elif 'neck' in key:\n neck_state[key.split('.', 1)[1]] = cp['state_dict'][key]\n elif 'bbox_head' in key:\n head_state[key.split('.', 1)[1]] = cp['state_dict'][key]\n\n backbone.load_state_dict(backbone_state)\n neck.load_state_dict(neck_state)\n head.load_state_dict(head_state)\n\n # Set to eval mode\n backbone.eval()\n neck.eval()\n head.eval()\n\n print('done')\n\n print('starting inference validation run ...')\n for i, (img, cls) in enumerate(self.loader):\n out = backbone(img)\n out = neck(out)\n out = head(out)\n\n img_metas = [{'img_shape': (640, 800),\n 'scale_factor': 1}]\n bboxes = head.get_bboxes(out[0], out[1], out[2], img_metas,\n self.cfg.test_cfg)\n pass\n print('done')",
"def build_inference_graph(self):\n self.build_train_graph()",
"def __init__(self, network: Network):\n if LOG[\"ExperimentAI\"]:\n print(\"[ExperimentAI] Initializing AI\")\n self.network = network",
"def __init__(self, output_file=None, output_size=None, output_format=None):\n super(StreamerClient, self).__init__()\n # Constants\n self.MCAST_GRP = '224.0.0.1'\n self.MCAST_PORT = 5007\n self.sync_start = '#!-START-!#'\n self.sync_kill = '#!-QUIT-!#'\n self.chunk_length = 4096 # or 1024\n self.sock = None\n # Will change throughout \n self.width = 0\n self.height = 0\n self.received_frames = 0\n self.halt = False\n self.output_file = output_file\n if output_size is None:\n self.output_size = (1280, 720)\n else:\n self.output_size = output_size\n if output_format is None:\n self.output_format = cv2.VideoWriter_fourcc(*'mpeg')\n else:\n self.output_format = cv2.VideoWriter_fourcc(*output_format)\n self.fps = 20\n self.video_writer = None",
"def initialize(self, inputs, targets, global_step=None, is_training=False, is_evaluating=False):\n if inputs is None:\n raise ValueError('no mel inputs were provided')\n if targets is None:\n raise ValueError('no targets were provided')\n if is_training and is_evaluating:\n raise RuntimeError('Model can not be in training and evaluation modes at the same time!')\n\n with tf.variable_scope('inference') as scope:\n self.batch_size = tf.shape(inputs)[0]\n hp = self._hparams\n\n self.pipenet_output, self.postnet_output, self.alpha, self.conv_list = inference(inputs, is_training=is_training, hparams=hp)\n\n _, self.soft_prediction = self.bdnn_prediction(tf.sigmoid(self.postnet_output))\n\n self.pipenet_prediction = tf.round(tf.sigmoid(self.pipenet_output))\n\n self.postnet_prediction = tf.round(tf.sigmoid(self.postnet_output))\n\n self.all_vars = tf.trainable_variables()\n\n self.inputs = inputs\n self.pipenet_targets = targets\n self.targets = tf.reduce_max(targets, axis=-1)\n\n raw_indx = int(np.floor(int(2 * (self._hparams.w - 1) / self._hparams.u + 3) / 2))\n raw_labels = self.pipenet_targets[:, raw_indx]\n raw_labels = tf.reshape(raw_labels, shape=(-1, 1))\n self.raw_labels = tf.identity(raw_labels, 'raw_labels')\n\n self.postnet_accuracy = tf.reduce_mean(tf.cast(tf.equal(self.postnet_prediction, self.pipenet_targets), tf.float32))\n self.pipenet_accuracy = tf.reduce_mean(tf.cast(tf.equal(self.pipenet_prediction, self.pipenet_targets), tf.float32))\n\n log('Initialized Proposed model. Dimensions (? = dynamic shape): ')\n log(' Train mode: {}'.format(is_training))\n log(' Eval mode: {}'.format(is_evaluating))\n log(' input: {}'.format(inputs.shape))\n log(' Parameters {:.3f} Million.'.format(np.sum([np.prod(v.get_shape().as_list()) for v in self.all_vars]) / 1_000_000))"
] | [
"0.6657868",
"0.6654198",
"0.6620068",
"0.6584362",
"0.6474501",
"0.62960565",
"0.6276968",
"0.6220246",
"0.6160445",
"0.59733164",
"0.5964964",
"0.59312594",
"0.5894175",
"0.5887097",
"0.58308923",
"0.5786715",
"0.5768193",
"0.57668513",
"0.57543945",
"0.57250965",
"0.5708935",
"0.56773657",
"0.5665131",
"0.56609654",
"0.56480646",
"0.5615217",
"0.5599074",
"0.55833393",
"0.5558512",
"0.55583334"
] | 0.6824545 | 0 |
Workbook galleries supported by the template. | def galleries(self) -> pulumi.Input[Sequence[pulumi.Input['WorkbookTemplateGalleryArgs']]]:
return pulumi.get(self, "galleries") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def galleries(self) -> pulumi.Output[Sequence['outputs.WorkbookTemplateGalleryResponse']]:\n return pulumi.get(self, \"galleries\")",
"def get_galleries(self):\n data = self._get('get_gallery_list')\n return data['galleries']",
"def Gallery():\n return render_template(\n 'Gallery.html',\n title='The to 8 NBA players right now',\n year=datetime.now().year,\n message=''\n )",
"def get_gallery():\r\n to_segment = os.listdir(TO_SEGMENT)\r\n print(to_segment)\r\n return render_template(\"gallery.html\",\r\n image_names=to_segment,\r\n next_page_text=\"Segment Images! - (might take a couple mins)\",\r\n next_page=\"get_segmented_gallery\"\r\n )",
"def __init__(__self__, *,\n galleries: pulumi.Input[Sequence[pulumi.Input['WorkbookTemplateGalleryArgs']]],\n resource_group_name: pulumi.Input[str],\n template_data: Any,\n author: Optional[pulumi.Input[str]] = None,\n localized: Optional[pulumi.Input[Mapping[str, pulumi.Input[Sequence[pulumi.Input['WorkbookTemplateLocalizedGalleryArgs']]]]]] = None,\n location: Optional[pulumi.Input[str]] = None,\n priority: Optional[pulumi.Input[int]] = None,\n resource_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n pulumi.set(__self__, \"galleries\", galleries)\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n pulumi.set(__self__, \"template_data\", template_data)\n if author is not None:\n pulumi.set(__self__, \"author\", author)\n if localized is not None:\n pulumi.set(__self__, \"localized\", localized)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if priority is not None:\n pulumi.set(__self__, \"priority\", priority)\n if resource_name is not None:\n pulumi.set(__self__, \"resource_name\", resource_name)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)",
"def get_gallery(self):\n return os.path.join(self.directory, GALLERY_DIR)",
"def gallery():\n return render('base.html')",
"def cmd_gallery_subreddit_gallery(client, args):\n subreddit_gallery = client.subreddit_gallery(args.subreddit, args.sort,\n args.window, args.page)\n data = [item.__dict__ for item in subreddit_gallery]\n generate_output({'subreddit_gallery': data}, args.output_file)",
"def populateGallery():\n\n # Set the UI parent to be the scroll layout\n global objectScroll\n cmds.setParent(objectScroll)\n\n # List all assets in the direcoty\n assetList = [directory for directory in os.listdir(AC.ASSETS_PATH) if os.path.isdir(os.path.join(AC.ASSETS_PATH, directory))]\n\n # Create a ButtonIcon for each asset\n for asset in assetList:\n addButtonIcon(asset)",
"def get_available_galleries(include_default=False):\n galleries = []\n\n for directory in Path(MEDIA_AVATARS).dirs():\n if include_default or directory[-8:] != '_default':\n gallery = {'name': directory.name, 'images': []}\n\n images = directory.files('*.gif')\n images += directory.files('*.jpg')\n images += directory.files('*.jpeg')\n images += directory.files('*.png')\n\n for image in images:\n image_path = image[len(settings.MEDIA_ROOT):]\n if image_path.startswith('/'):\n image_path = image_path[1:]\n gallery['images'].append(image_path)\n\n if gallery['images']:\n galleries.append(gallery)\n\n return galleries",
"def expand_gallery(generator, metadata):\n if \"gallery\" not in metadata or metadata[\"gallery\"] is None:\n return # If no gallery specified, we do nothing\n\n lines = []\n base_path = _image_path(generator)\n in_path = path.join(base_path, metadata[\"gallery\"])\n template = generator.settings.get(\"GALLERY_TEMPLATE\", DEFAULT_TEMPLATE)\n thumbnail_name = generator.settings.get(\"GALLERY_THUMBNAIL\", DEFAULT_GALLERY_THUMB)\n thumbnail_prefix = generator.settings.get(\"\")\n resizer = Resizer(thumbnail_name, \"?x?\", base_path)\n for dirpath, _, filenames in os.walk(in_path):\n for filename in filenames:\n if not filename.startswith(\".\"):\n url = path.join(dirpath, filename).replace(base_path, \"\")[1:]\n url = path.join(\n \"/static\",\n generator.settings.get(\"IMAGE_PATH\", DEFAULT_IMAGE_DIR),\n url,\n ).replace(\"\\\\\", \"/\")\n logger.debug(\"GALLERY: {0}\".format(url))\n thumbnail = resizer.get_thumbnail_name(filename)\n thumbnail = path.join(\n \"/\",\n generator.settings.get(\"THUMBNAIL_DIR\", DEFAULT_THUMBNAIL_DIR),\n thumbnail,\n ).replace(\"\\\\\", \"/\")\n lines.append(\n template.format(filename=filename, url=url, thumbnail=thumbnail,)\n )\n metadata[\"gallery_content\"] = \"\\n\".join(lines)",
"def has_gallery(self):\n if self.barcamp.gallery and self.barcamp.gallery == \"-1\":\n return False\n try:\n gallery = self.config.dbs.galleries.get(bson.ObjectId(self.barcamp.gallery))\n except:\n return False\n return True",
"def cmd_gallery_items(client, args):\n gallery = client.gallery(args.section, args.sort, args.page, args.window,\n args.show_viral)\n data = [item.__dict__ for item in gallery]\n generate_output({'gallery': data}, args.output_file)",
"def gallery_conf(tmpdir):\n # Skip if numpy not installed\n pytest.importorskip(\"numpy\")\n\n gallery_conf = _fill_gallery_conf_defaults({})\n gallery_conf.update(\n src_dir=str(tmpdir), examples_dir=str(tmpdir), gallery_dir=str(tmpdir)\n )\n\n return gallery_conf",
"def get_gallery(self, section='hot', sort='viral', window='day',\n show_viral=True, limit=None):\n url = (\"https://api.imgur.com/3/gallery/{}/{}/{}/{}?showViral=\"\n \"{}\".format(section, sort, window, '{}', show_viral))\n resp = self._send_request(url, limit=limit)\n return [_get_album_or_image(thing, self) for thing in resp]",
"def test_gallery(self):\n\t\tps = PushshiftAPI()\n\t\tpost, = ps.search_submissions(limit=1, ids=['t3_hrrh23'])\n\t\tre = RedditElement(post)\n\t\tself.assertEqual(len(re.get_urls()), 3, msg='Got incorrect image count from PSAW gallery submission!')\n\t\tfor url in re.get_urls():\n\t\t\tself.assertIn('https', url, msg='Failed to extract valid gallery URL: %s' % url)",
"def create_gallery(jekyll_site_path, gallery_name):\n\n gallery_path = os.path.join(jekyll_site_path, 'images', 'galleries', gallery_name)\n\n if not os.path.exists(gallery_path):\n os.makedirs(gallery_path)\n\n print(f\"Created gallery path {gallery_path}\")\n\n return gallery_path",
"def notebooks_group():\n pass",
"def create_gallery(galleryname, gpath, desc, tags, zipfile, ur, uw, gr, gw, dbfile):\n \n new_table('galleryindex', dbfile, 'gallery_db_schema.sql') # Create gallery index table if not existing\n if add_gallery(galleryname, gpath, desc, tags, zipfile, ur, uw, gr, gw, 'galleryindex', dbfile): # Add gallery to gallery index table\n new_table(galleryname, dbfile, 'image_db_schema.sql') # Create empty image gallery table if not existing",
"def get(self):\n return PhotoGalleryService().get_all(), 200",
"def save_figures(block, block_vars, gallery_conf):\n image_path_iterator = block_vars['image_path_iterator']\n all_rst = u''\n prev_count = len(image_path_iterator)\n for scraper in gallery_conf['image_scrapers']:\n rst = scraper(block, block_vars, gallery_conf)\n if not isinstance(rst, str):\n raise TypeError('rst from scraper %r was not a string, '\n 'got type %s:\\n%r'\n % (scraper, type(rst), rst))\n n_new = len(image_path_iterator) - prev_count\n for ii in range(n_new):\n current_path, _ = _find_image_ext(\n image_path_iterator.paths[prev_count + ii])\n if not os.path.isfile(current_path):\n raise RuntimeError('Scraper %s did not produce expected image:'\n '\\n%s' % (scraper, current_path))\n all_rst += rst\n return all_rst",
"def plot_gallery(images, titles, h, w, n_row=3, n_col=4):\n plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))\n plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)\n for i in range(n_row * n_col):\n plt.subplot(n_row, n_col, i + 1)\n plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)\n plt.title(titles[i], size=12)\n plt.xticks(())\n plt.yticks(())\n plt.show()\n plt.savefig(\"feature_{}.png\".format(50))",
"def cmd_gallery_memes_subgallery(client, args):\n memes_subgallery = client.memes_subgallery(args.sort, args.page, args.window)\n data = [item.__dict__ for item in memes_subgallery]\n generate_output({'memes_subgallery': data}, args.output_file)",
"def ministry_gallery_json(request, ministry_id):\n ministry = Ministry.objects.get(pk=ministry_id)\n\n return JsonResponse({'gallery': ministry_images(ministry)})",
"def produce_gallery_set(root_dir,\n gallery_sizes=[50, 100, 500, 1000, 2000, 4000]):\n for size in gallery_sizes:\n file_name = 'TestG' + str(size)\n test = sio.loadmat(osp.join(root_dir, 'test/train_test',\n file_name + '.mat'))\n test = test[file_name].squeeze()\n\n q_names = []\n queries_to_galleries = [[] for _ in range(len(test))]\n for index, item in enumerate(test):\n # query\n q_name = str(item['Query'][0, 0][0][0])\n q_names.append(q_name)\n # gallery\n gallery = item['Gallery'].squeeze()\n for im_name, _, __ in gallery:\n g_name = str(im_name[0])\n queries_to_galleries[index].append(g_name)\n\n queries_to_galleries = pd.DataFrame(queries_to_galleries,\n index=q_names)\n queries_to_galleries.to_csv(osp.join(root_dir,\n 'q_to_g' + str(size) + 'DF.csv'))",
"def plot_gallery2(images, titles, h, w, n_row=3, n_col=4):\n pl.figure(figsize=(1.8 * n_col, 2.4 * n_row))\n pl.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)\n for i in range(n_row * n_col):\n pl.subplot(n_row, n_col, i + 1)\n im = images[i].reshape((h, w, 3))\n # Normalize image to 0..1 range for visualization\n if im.dtype.name == 'float64':\n m0 = np.min(im)\n m1 = np.max(im)\n im = (im - m0) / (m1 - m0)\n pl.imshow(im)\n pl.title(titles[i], size=12)\n pl.xticks(())\n pl.yticks(())",
"def plot_gallery(images, titles, h, w, n_row=3, n_col=4):\n pl.figure(figsize=(1.8 * n_col, 2.4 * n_row))\n pl.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)\n for i in range(n_row * n_col):\n pl.subplot(n_row, n_col, i + 1)\n pl.imshow(images[i].reshape((h, w)), cmap=pl.cm.gray)\n pl.title(titles[i], size=12)\n pl.xticks(())\n pl.yticks(())",
"def source_list(self):\n return [g[\"name\"] for g in self._galleries]",
"def figure_links(self):\n dirpath = os.path.join(config[\"src_dir\"], config[\"figures_subdir\"])\n assert os.path.exists(dirpath), f\"- figures subdirectory {dirpath} was not found\"\n figures = [f for f in os.listdir(dirpath) if os.path.isfile(os.path.join(dirpath, f))\n and not f.startswith('.') and not f.endswith('.tex') and not f.endswith('.pdf')]\n figures = filter(lambda f: any([re.search(f, cell.source) for cell in self.content.cells]), figures)\n return [(os.path.join(config[\"figures_subdir\"], figure), f\"{config['github_pages_url']}/figures/{figure}\") for figure in figures]",
"def make_gallery(post_name, image_list, config={'gallery_dir': 'galleries'}):\n gallery_name = make_gallery_name_from_post_name(post_name)\n gallery_path = get_gallery_path(gallery_name)\n output_path = os.path.join(gallery_path, \"index.md\")\n with open(output_path, \"w\") as fd:\n fd.write(make_gallery_index(gallery_name, image_list))\n\n copy_images(gallery_path, image_list)\n #make_thumbs\n #make_image_pages"
] | [
"0.8052166",
"0.6197946",
"0.585732",
"0.57718176",
"0.5716009",
"0.5657963",
"0.5653665",
"0.56315845",
"0.5574098",
"0.5572743",
"0.5434536",
"0.5415715",
"0.530429",
"0.5217824",
"0.5189865",
"0.5185198",
"0.5181188",
"0.51711136",
"0.5099473",
"0.5087812",
"0.50609577",
"0.5057201",
"0.50493896",
"0.4998675",
"0.49947795",
"0.4986091",
"0.49655232",
"0.49328575",
"0.492334",
"0.49165034"
] | 0.82092285 | 0 |
Get an existing WorkbookTemplate resource's state with the given name, id, and optional extra properties used to qualify the lookup. | def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'WorkbookTemplate':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = WorkbookTemplateArgs.__new__(WorkbookTemplateArgs)
__props__.__dict__["author"] = None
__props__.__dict__["galleries"] = None
__props__.__dict__["localized"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["priority"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["template_data"] = None
__props__.__dict__["type"] = None
return WorkbookTemplate(resource_name, opts=opts, __props__=__props__) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get(template_id):\n try:\n template = Template.objects.get(pk=template_id)\n except:\n raise CMException('template_get')\n\n if not template or template.state != template_states['active']:\n raise CMException('template_get')\n\n return template",
"def get_template_by_id(template_id):\n sql = \"\"\"\n select r.*, e.name event_name\n from runsheet_template r\n inner join event e on e.id=r.event_id\n where r.id=:template_id\n \"\"\"\n data = {'template_id': template_id}\n\n rows = db.session.execute(sql, data)\n if rows.rowcount == 0:\n return\n else:\n return dict(rows.fetchone())",
"def get_template(cls, template_id):\r\n dirname = cls.get_template_dir()\r\n if dirname is not None:\r\n path = os.path.join(dirname, template_id)\r\n for pkg in cls.template_packages:\r\n if resource_exists(pkg, path):\r\n template_content = resource_string(pkg, path)\r\n template = yaml.safe_load(template_content)\r\n template['template_id'] = template_id\r\n return template",
"def _get_template_by_id(self, template_id):\n raise NotImplementedError()",
"def get_template_by_name(name, event_id):\n sql = \"\"\"\n select r.*, e.name event_name\n from runsheet_template r\n inner join event e on e.id=r.event_id\n where r.name=:name and e.id=:event_id\n \"\"\"\n data = {'name': name, 'event_id': event_id}\n\n rows = db.session.execute(sql, data)\n if rows.rowcount == 0:\n return\n else:\n return rows.fetchone()",
"def state_by_id(id):\n states = storage.all('State').values()\n for state in states:\n if state.id == id:\n return render_template('9-states.html', states=state)\n return render_template('9-states.html')",
"def given_state(id):\n key = 'State.{}'.format(id)\n state = storage.all(State).get(key)\n return render_template('9-states.html', states=state)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'ProvisioningTemplate':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = ProvisioningTemplateArgs.__new__(ProvisioningTemplateArgs)\n\n __props__.__dict__[\"description\"] = None\n __props__.__dict__[\"enabled\"] = None\n __props__.__dict__[\"pre_provisioning_hook\"] = None\n __props__.__dict__[\"provisioning_role_arn\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"template_arn\"] = None\n __props__.__dict__[\"template_body\"] = None\n __props__.__dict__[\"template_name\"] = None\n __props__.__dict__[\"template_type\"] = None\n return ProvisioningTemplate(resource_name, opts=opts, __props__=__props__)",
"def get_template(self ,template_name):\n\n found = False\n for template in self.templates:\n if template['name'] == template_name:\n found = True\n return template\n if not found:\n return None",
"def get_state_by_id(state_id):\n my_state = storage.get('State', state_id)\n if my_state is None:\n abort(404)\n return jsonify(my_state.to_dict())",
"def get_state_by_id(state_id):\n for key, value in storage.all(\"State\").items():\n if state_id == value.id:\n return jsonify(value.to_dict())\n abort(404)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'LaunchTemplate':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = LaunchTemplateArgs.__new__(LaunchTemplateArgs)\n\n __props__.__dict__[\"default_version_number\"] = None\n __props__.__dict__[\"latest_version_number\"] = None\n __props__.__dict__[\"launch_template_data\"] = None\n __props__.__dict__[\"launch_template_id\"] = None\n __props__.__dict__[\"launch_template_name\"] = None\n __props__.__dict__[\"tag_specifications\"] = None\n __props__.__dict__[\"version_description\"] = None\n return LaunchTemplate(resource_name, opts=opts, __props__=__props__)",
"def get_template(self, name):\n return self.templates.get(name)",
"def get_state_by_id(state_id):\n state = storage.get(State, state_id)\n if not state:\n abort(404)\n return jsonify(state.to_dict()), 200",
"def states_id(id=None):\n all_states = storage.all(State)\n foundstate = None\n for key, state in all_states.items():\n if state.id == id:\n foundstate = state\n break\n\n return render_template('9-states.html', States=all_states, ID=id,\n Stateobj=foundstate)",
"def get_template(self, name):\n for template in self.templates:\n if template.name == name:\n assert isinstance(template, Template)\n return template\n return None",
"def find_template_by_id(context, id):\n admin = context.getAdministrationService()\n enterprise = admin.getCurrentEnterprise()\n return enterprise.findTemplate(VirtualMachineTemplatePredicates.id(id))",
"def get_state(state_id):\n try:\n ''' Check that state_id exists '''\n query = State.select().where(State.id == state_id)\n if not query.exists():\n raise LookupError('state_id')\n\n state = State.get(State.id == state_id)\n return state.to_dict(), 200\n except LookupError as e:\n abort(404)\n except Exception as e:\n abort(500)",
"def get_template(self, name=None, template_id=None):\n if template_id:\n return self.http_call(\n \"get\", url=f\"{self.base_url}/templates/{template_id}\"\n ).json()\n elif name:\n try:\n return next(t for t in self.get_templates() if t[\"name\"] == name)\n except StopIteration:\n # Template name not found\n return None\n else:\n raise ValueError(\"Must provide either a name or template_id\")",
"def _get_template(self, template_name):\n if template_name not in self.chached_templates:\n self.chached_templates[template_name] = self.env.get_template(template_name)\n return self.chached_templates[template_name]",
"def get_template(template_name: str, scope: Optional[str] = 'task') -> FlexGetTemplate:\n\n if not template_name.endswith('.template'):\n template_name += '.template'\n locations = []\n if scope:\n locations.append(scope + '/' + template_name)\n locations.append(template_name)\n for location in locations:\n if environment is not None:\n with suppress(TemplateNotFound):\n return cast(FlexGetTemplate, environment.get_template(location))\n else:\n err = f'Template not found in templates dir: {template_name}'\n if scope:\n err += f' ({scope})'\n raise ValueError(err)",
"def state_by_id(state_id):\n states_values = storage.all(\"State\").values()\n for obj in states_values:\n if obj.id == state_id:\n return jsonify(obj.to_dict())\n abort(404)",
"def get_state_by_name(exploration_id, state_name, strict=True):\n exploration = get_exploration_by_id(exploration_id)\n assert state_name\n\n # TODO(sll): This is too slow; improve it.\n state = None\n for candidate_state in exploration.states:\n if candidate_state.name == state_name:\n state = candidate_state\n break\n\n if strict and not state:\n raise Exception('State %s not found' % state_name)\n return state",
"def state_by_id(state_id):\n state = storage.get(State, state_id)\n if state is None:\n abort(404)\n return jsonify(state.to_dict())",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n application_insights_id: Optional[pulumi.Input[str]] = None,\n container_registry_id: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n discovery_url: Optional[pulumi.Input[str]] = None,\n encryption: Optional[pulumi.Input[pulumi.InputType['WorkspaceEncryptionArgs']]] = None,\n friendly_name: Optional[pulumi.Input[str]] = None,\n high_business_impact: Optional[pulumi.Input[bool]] = None,\n identity: Optional[pulumi.Input[pulumi.InputType['WorkspaceIdentityArgs']]] = None,\n image_build_compute_name: Optional[pulumi.Input[str]] = None,\n key_vault_id: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n primary_user_assigned_identity: Optional[pulumi.Input[str]] = None,\n public_access_behind_virtual_network_enabled: Optional[pulumi.Input[bool]] = None,\n public_network_access_enabled: Optional[pulumi.Input[bool]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n sku_name: Optional[pulumi.Input[str]] = None,\n storage_account_id: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n v1_legacy_mode_enabled: Optional[pulumi.Input[bool]] = None,\n workspace_id: Optional[pulumi.Input[str]] = None) -> 'Workspace':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _WorkspaceState.__new__(_WorkspaceState)\n\n __props__.__dict__[\"application_insights_id\"] = application_insights_id\n __props__.__dict__[\"container_registry_id\"] = container_registry_id\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"discovery_url\"] = discovery_url\n __props__.__dict__[\"encryption\"] = encryption\n __props__.__dict__[\"friendly_name\"] = friendly_name\n __props__.__dict__[\"high_business_impact\"] = high_business_impact\n __props__.__dict__[\"identity\"] = identity\n __props__.__dict__[\"image_build_compute_name\"] = image_build_compute_name\n __props__.__dict__[\"key_vault_id\"] = key_vault_id\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"primary_user_assigned_identity\"] = primary_user_assigned_identity\n __props__.__dict__[\"public_access_behind_virtual_network_enabled\"] = public_access_behind_virtual_network_enabled\n __props__.__dict__[\"public_network_access_enabled\"] = public_network_access_enabled\n __props__.__dict__[\"resource_group_name\"] = resource_group_name\n __props__.__dict__[\"sku_name\"] = sku_name\n __props__.__dict__[\"storage_account_id\"] = storage_account_id\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"v1_legacy_mode_enabled\"] = v1_legacy_mode_enabled\n __props__.__dict__[\"workspace_id\"] = workspace_id\n return Workspace(resource_name, opts=opts, __props__=__props__)",
"def get_worksheet(sheet_id, sheet_name):\n if (sheet_id, sheet_name) in WORKSHEET_CACHE:\n return WORKSHEET_CACHE[(sheet_id, sheet_name)]\n\n sheet = get_spreadsheet(sheet_id)\n worksheet = sheet.worksheet(sheet_name)\n\n WORKSHEET_CACHE[(sheet_id, sheet_name)] = worksheet\n return worksheet",
"def states_by_id(id):\n list_states = storage.all('State')\n state_id = 'State.{}'.format(id)\n if state_id in list_states:\n list_states = list_states[state_id]\n else:\n list_states = None\n return render_template('9-states.html', list_states=list_states)",
"def lookup_template(namespace, name):\r\n return LOOKUP[namespace].get_template(name)",
"def get(self, template_name):\n template = db.Template.find_one(template_name=template_name)\n\n if not template:\n return self.make_response('No such template found', HTTP.NOT_FOUND)\n\n return self.make_response({'template': template})",
"def get_state(state_id):\n try:\n state = jsonify(storage.get(State, state_id).to_dict())\n return state\n except:\n abort(404)"
] | [
"0.60059094",
"0.5583698",
"0.5580275",
"0.5527408",
"0.54685605",
"0.5425252",
"0.5421419",
"0.54145896",
"0.5405498",
"0.52019465",
"0.5158618",
"0.5141349",
"0.5131247",
"0.5115271",
"0.50936675",
"0.5086638",
"0.5047412",
"0.5034328",
"0.5018827",
"0.49963653",
"0.4993864",
"0.49557802",
"0.4953946",
"0.4940276",
"0.49193",
"0.48890224",
"0.4870409",
"0.48590016",
"0.48522118",
"0.4847733"
] | 0.66030896 | 0 |
Workbook galleries supported by the template. | def galleries(self) -> pulumi.Output[Sequence['outputs.WorkbookTemplateGalleryResponse']]:
return pulumi.get(self, "galleries") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def galleries(self) -> pulumi.Input[Sequence[pulumi.Input['WorkbookTemplateGalleryArgs']]]:\n return pulumi.get(self, \"galleries\")",
"def get_galleries(self):\n data = self._get('get_gallery_list')\n return data['galleries']",
"def Gallery():\n return render_template(\n 'Gallery.html',\n title='The to 8 NBA players right now',\n year=datetime.now().year,\n message=''\n )",
"def get_gallery():\r\n to_segment = os.listdir(TO_SEGMENT)\r\n print(to_segment)\r\n return render_template(\"gallery.html\",\r\n image_names=to_segment,\r\n next_page_text=\"Segment Images! - (might take a couple mins)\",\r\n next_page=\"get_segmented_gallery\"\r\n )",
"def __init__(__self__, *,\n galleries: pulumi.Input[Sequence[pulumi.Input['WorkbookTemplateGalleryArgs']]],\n resource_group_name: pulumi.Input[str],\n template_data: Any,\n author: Optional[pulumi.Input[str]] = None,\n localized: Optional[pulumi.Input[Mapping[str, pulumi.Input[Sequence[pulumi.Input['WorkbookTemplateLocalizedGalleryArgs']]]]]] = None,\n location: Optional[pulumi.Input[str]] = None,\n priority: Optional[pulumi.Input[int]] = None,\n resource_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n pulumi.set(__self__, \"galleries\", galleries)\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n pulumi.set(__self__, \"template_data\", template_data)\n if author is not None:\n pulumi.set(__self__, \"author\", author)\n if localized is not None:\n pulumi.set(__self__, \"localized\", localized)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if priority is not None:\n pulumi.set(__self__, \"priority\", priority)\n if resource_name is not None:\n pulumi.set(__self__, \"resource_name\", resource_name)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)",
"def get_gallery(self):\n return os.path.join(self.directory, GALLERY_DIR)",
"def gallery():\n return render('base.html')",
"def cmd_gallery_subreddit_gallery(client, args):\n subreddit_gallery = client.subreddit_gallery(args.subreddit, args.sort,\n args.window, args.page)\n data = [item.__dict__ for item in subreddit_gallery]\n generate_output({'subreddit_gallery': data}, args.output_file)",
"def populateGallery():\n\n # Set the UI parent to be the scroll layout\n global objectScroll\n cmds.setParent(objectScroll)\n\n # List all assets in the direcoty\n assetList = [directory for directory in os.listdir(AC.ASSETS_PATH) if os.path.isdir(os.path.join(AC.ASSETS_PATH, directory))]\n\n # Create a ButtonIcon for each asset\n for asset in assetList:\n addButtonIcon(asset)",
"def get_available_galleries(include_default=False):\n galleries = []\n\n for directory in Path(MEDIA_AVATARS).dirs():\n if include_default or directory[-8:] != '_default':\n gallery = {'name': directory.name, 'images': []}\n\n images = directory.files('*.gif')\n images += directory.files('*.jpg')\n images += directory.files('*.jpeg')\n images += directory.files('*.png')\n\n for image in images:\n image_path = image[len(settings.MEDIA_ROOT):]\n if image_path.startswith('/'):\n image_path = image_path[1:]\n gallery['images'].append(image_path)\n\n if gallery['images']:\n galleries.append(gallery)\n\n return galleries",
"def expand_gallery(generator, metadata):\n if \"gallery\" not in metadata or metadata[\"gallery\"] is None:\n return # If no gallery specified, we do nothing\n\n lines = []\n base_path = _image_path(generator)\n in_path = path.join(base_path, metadata[\"gallery\"])\n template = generator.settings.get(\"GALLERY_TEMPLATE\", DEFAULT_TEMPLATE)\n thumbnail_name = generator.settings.get(\"GALLERY_THUMBNAIL\", DEFAULT_GALLERY_THUMB)\n thumbnail_prefix = generator.settings.get(\"\")\n resizer = Resizer(thumbnail_name, \"?x?\", base_path)\n for dirpath, _, filenames in os.walk(in_path):\n for filename in filenames:\n if not filename.startswith(\".\"):\n url = path.join(dirpath, filename).replace(base_path, \"\")[1:]\n url = path.join(\n \"/static\",\n generator.settings.get(\"IMAGE_PATH\", DEFAULT_IMAGE_DIR),\n url,\n ).replace(\"\\\\\", \"/\")\n logger.debug(\"GALLERY: {0}\".format(url))\n thumbnail = resizer.get_thumbnail_name(filename)\n thumbnail = path.join(\n \"/\",\n generator.settings.get(\"THUMBNAIL_DIR\", DEFAULT_THUMBNAIL_DIR),\n thumbnail,\n ).replace(\"\\\\\", \"/\")\n lines.append(\n template.format(filename=filename, url=url, thumbnail=thumbnail,)\n )\n metadata[\"gallery_content\"] = \"\\n\".join(lines)",
"def has_gallery(self):\n if self.barcamp.gallery and self.barcamp.gallery == \"-1\":\n return False\n try:\n gallery = self.config.dbs.galleries.get(bson.ObjectId(self.barcamp.gallery))\n except:\n return False\n return True",
"def cmd_gallery_items(client, args):\n gallery = client.gallery(args.section, args.sort, args.page, args.window,\n args.show_viral)\n data = [item.__dict__ for item in gallery]\n generate_output({'gallery': data}, args.output_file)",
"def gallery_conf(tmpdir):\n # Skip if numpy not installed\n pytest.importorskip(\"numpy\")\n\n gallery_conf = _fill_gallery_conf_defaults({})\n gallery_conf.update(\n src_dir=str(tmpdir), examples_dir=str(tmpdir), gallery_dir=str(tmpdir)\n )\n\n return gallery_conf",
"def get_gallery(self, section='hot', sort='viral', window='day',\n show_viral=True, limit=None):\n url = (\"https://api.imgur.com/3/gallery/{}/{}/{}/{}?showViral=\"\n \"{}\".format(section, sort, window, '{}', show_viral))\n resp = self._send_request(url, limit=limit)\n return [_get_album_or_image(thing, self) for thing in resp]",
"def test_gallery(self):\n\t\tps = PushshiftAPI()\n\t\tpost, = ps.search_submissions(limit=1, ids=['t3_hrrh23'])\n\t\tre = RedditElement(post)\n\t\tself.assertEqual(len(re.get_urls()), 3, msg='Got incorrect image count from PSAW gallery submission!')\n\t\tfor url in re.get_urls():\n\t\t\tself.assertIn('https', url, msg='Failed to extract valid gallery URL: %s' % url)",
"def create_gallery(jekyll_site_path, gallery_name):\n\n gallery_path = os.path.join(jekyll_site_path, 'images', 'galleries', gallery_name)\n\n if not os.path.exists(gallery_path):\n os.makedirs(gallery_path)\n\n print(f\"Created gallery path {gallery_path}\")\n\n return gallery_path",
"def notebooks_group():\n pass",
"def create_gallery(galleryname, gpath, desc, tags, zipfile, ur, uw, gr, gw, dbfile):\n \n new_table('galleryindex', dbfile, 'gallery_db_schema.sql') # Create gallery index table if not existing\n if add_gallery(galleryname, gpath, desc, tags, zipfile, ur, uw, gr, gw, 'galleryindex', dbfile): # Add gallery to gallery index table\n new_table(galleryname, dbfile, 'image_db_schema.sql') # Create empty image gallery table if not existing",
"def get(self):\n return PhotoGalleryService().get_all(), 200",
"def save_figures(block, block_vars, gallery_conf):\n image_path_iterator = block_vars['image_path_iterator']\n all_rst = u''\n prev_count = len(image_path_iterator)\n for scraper in gallery_conf['image_scrapers']:\n rst = scraper(block, block_vars, gallery_conf)\n if not isinstance(rst, str):\n raise TypeError('rst from scraper %r was not a string, '\n 'got type %s:\\n%r'\n % (scraper, type(rst), rst))\n n_new = len(image_path_iterator) - prev_count\n for ii in range(n_new):\n current_path, _ = _find_image_ext(\n image_path_iterator.paths[prev_count + ii])\n if not os.path.isfile(current_path):\n raise RuntimeError('Scraper %s did not produce expected image:'\n '\\n%s' % (scraper, current_path))\n all_rst += rst\n return all_rst",
"def plot_gallery(images, titles, h, w, n_row=3, n_col=4):\n plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))\n plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)\n for i in range(n_row * n_col):\n plt.subplot(n_row, n_col, i + 1)\n plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)\n plt.title(titles[i], size=12)\n plt.xticks(())\n plt.yticks(())\n plt.show()\n plt.savefig(\"feature_{}.png\".format(50))",
"def cmd_gallery_memes_subgallery(client, args):\n memes_subgallery = client.memes_subgallery(args.sort, args.page, args.window)\n data = [item.__dict__ for item in memes_subgallery]\n generate_output({'memes_subgallery': data}, args.output_file)",
"def ministry_gallery_json(request, ministry_id):\n ministry = Ministry.objects.get(pk=ministry_id)\n\n return JsonResponse({'gallery': ministry_images(ministry)})",
"def produce_gallery_set(root_dir,\n gallery_sizes=[50, 100, 500, 1000, 2000, 4000]):\n for size in gallery_sizes:\n file_name = 'TestG' + str(size)\n test = sio.loadmat(osp.join(root_dir, 'test/train_test',\n file_name + '.mat'))\n test = test[file_name].squeeze()\n\n q_names = []\n queries_to_galleries = [[] for _ in range(len(test))]\n for index, item in enumerate(test):\n # query\n q_name = str(item['Query'][0, 0][0][0])\n q_names.append(q_name)\n # gallery\n gallery = item['Gallery'].squeeze()\n for im_name, _, __ in gallery:\n g_name = str(im_name[0])\n queries_to_galleries[index].append(g_name)\n\n queries_to_galleries = pd.DataFrame(queries_to_galleries,\n index=q_names)\n queries_to_galleries.to_csv(osp.join(root_dir,\n 'q_to_g' + str(size) + 'DF.csv'))",
"def plot_gallery2(images, titles, h, w, n_row=3, n_col=4):\n pl.figure(figsize=(1.8 * n_col, 2.4 * n_row))\n pl.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)\n for i in range(n_row * n_col):\n pl.subplot(n_row, n_col, i + 1)\n im = images[i].reshape((h, w, 3))\n # Normalize image to 0..1 range for visualization\n if im.dtype.name == 'float64':\n m0 = np.min(im)\n m1 = np.max(im)\n im = (im - m0) / (m1 - m0)\n pl.imshow(im)\n pl.title(titles[i], size=12)\n pl.xticks(())\n pl.yticks(())",
"def plot_gallery(images, titles, h, w, n_row=3, n_col=4):\n pl.figure(figsize=(1.8 * n_col, 2.4 * n_row))\n pl.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)\n for i in range(n_row * n_col):\n pl.subplot(n_row, n_col, i + 1)\n pl.imshow(images[i].reshape((h, w)), cmap=pl.cm.gray)\n pl.title(titles[i], size=12)\n pl.xticks(())\n pl.yticks(())",
"def source_list(self):\n return [g[\"name\"] for g in self._galleries]",
"def figure_links(self):\n dirpath = os.path.join(config[\"src_dir\"], config[\"figures_subdir\"])\n assert os.path.exists(dirpath), f\"- figures subdirectory {dirpath} was not found\"\n figures = [f for f in os.listdir(dirpath) if os.path.isfile(os.path.join(dirpath, f))\n and not f.startswith('.') and not f.endswith('.tex') and not f.endswith('.pdf')]\n figures = filter(lambda f: any([re.search(f, cell.source) for cell in self.content.cells]), figures)\n return [(os.path.join(config[\"figures_subdir\"], figure), f\"{config['github_pages_url']}/figures/{figure}\") for figure in figures]",
"def make_gallery(post_name, image_list, config={'gallery_dir': 'galleries'}):\n gallery_name = make_gallery_name_from_post_name(post_name)\n gallery_path = get_gallery_path(gallery_name)\n output_path = os.path.join(gallery_path, \"index.md\")\n with open(output_path, \"w\") as fd:\n fd.write(make_gallery_index(gallery_name, image_list))\n\n copy_images(gallery_path, image_list)\n #make_thumbs\n #make_image_pages"
] | [
"0.82092285",
"0.6197946",
"0.585732",
"0.57718176",
"0.5716009",
"0.5657963",
"0.5653665",
"0.56315845",
"0.5574098",
"0.5572743",
"0.5434536",
"0.5415715",
"0.530429",
"0.5217824",
"0.5189865",
"0.5185198",
"0.5181188",
"0.51711136",
"0.5099473",
"0.5087812",
"0.50609577",
"0.5057201",
"0.50493896",
"0.4998675",
"0.49947795",
"0.4986091",
"0.49655232",
"0.49328575",
"0.492334",
"0.49165034"
] | 0.8052166 | 1 |
Key value pair of localized gallery. Each key is the locale code of languages supported by the Azure portal. | def localized(self) -> pulumi.Output[Optional[Mapping[str, Sequence['outputs.WorkbookTemplateLocalizedGalleryResponse']]]]:
return pulumi.get(self, "localized") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def photo_dict(phrase):\n switcher = {\n '병원 위치': 'https://maps.googleapis.com/maps/api/staticmap?center=37.507144,127.063737&zoom=16&size=640x480&markers=color:blue%7Clabel:S%7C37.507144,127.063737&key=AIzaSyCF-XXYf7IW1mkUZFeZF84BCcZdtC-z1M0',\n '병원 운영시간': 'http://gunn.pausd.org/sites/default/files/16-17-Bell-Schedule-Color---Compatible-Font.png',\n '프로모션 A': 'http://media.dontpayfull.com/media/deals/eurostar-promo-code.jpg',\n '프로모션 B': 'http://media.dontpayfull.com/media/deals/namebubbles-com-coupon-code.jpg',\n '프로모션 C': 'https://s-media-cache-ak0.pinimg.com/originals/79/79/31/79793174d230a27e9168bbccb33df62f.jpg',\n '의료진': 'https://s-media-cache-ak0.pinimg.com/736x/f4/89/ef/f489ef22363cf1e4c2a4fb5b1cd8aec5.jpg',\n '병원 사진': 'https://www.hpcimedia.com/images/website/ManChemNews/DIR_30/F_28071.jpg',\n '병원 진료과목': 'https://s-media-cache-ak0.pinimg.com/originals/d5/05/09/d505091a57d42d3ed1de8b6f9d906fdb.jpg'\n }\n default_url = 'http://autopartstoys.com/images/M127205243.jpg'\n return switcher.get(phrase, default_url)",
"def caption_languages(self, video_display_name=None):\r\n languages_selector = self.get_element_selector(video_display_name, CSS_CLASS_NAMES['captions_lang_list'])\r\n language_codes = self.q(css=languages_selector).attrs('data-lang-code')\r\n language_names = self.q(css=languages_selector).attrs('textContent')\r\n\r\n return dict(zip(language_codes, language_names))",
"def localized(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Sequence[pulumi.Input['WorkbookTemplateLocalizedGalleryArgs']]]]]]:\n return pulumi.get(self, \"localized\")",
"def grepo(request):\n return {\n \"GREPO_LANGUAGES\": Language.objects.all().values_list(\"name\", flat=True)\n }",
"def gallery_key():\n return ndb.Key('Gallery', 'All')",
"def Languages(self, default=[\"en\"]):\n return self.data.get('metadata', {}).get('languages', default)",
"def globes(self, code):\n return {\n 'ariel': 'http://www.wikidata.org/entity/Q3343',\n 'bennu': 'http://www.wikidata.org/entity/Q11558',\n 'callisto': 'http://www.wikidata.org/entity/Q3134',\n 'ceres': 'http://www.wikidata.org/entity/Q596',\n 'deimos': 'http://www.wikidata.org/entity/Q7548',\n 'dione': 'http://www.wikidata.org/entity/Q15040',\n 'earth': 'http://www.wikidata.org/entity/Q2',\n 'enceladus': 'http://www.wikidata.org/entity/Q3303',\n 'eros': 'http://www.wikidata.org/entity/Q16711',\n 'europa': 'http://www.wikidata.org/entity/Q3143',\n 'ganymede': 'http://www.wikidata.org/entity/Q3169',\n 'gaspra': 'http://www.wikidata.org/entity/Q158244',\n 'hyperion': 'http://www.wikidata.org/entity/Q15037',\n 'iapetus': 'http://www.wikidata.org/entity/Q17958',\n 'io': 'http://www.wikidata.org/entity/Q3123',\n 'jupiter': 'http://www.wikidata.org/entity/Q319',\n 'lutetia': 'http://www.wikidata.org/entity/Q107556',\n 'mars': 'http://www.wikidata.org/entity/Q111',\n 'mercury': 'http://www.wikidata.org/entity/Q308',\n 'mimas': 'http://www.wikidata.org/entity/Q15034',\n 'miranda': 'http://www.wikidata.org/entity/Q3352',\n 'moon': 'http://www.wikidata.org/entity/Q405',\n 'oberon': 'http://www.wikidata.org/entity/Q3332',\n 'phobos': 'http://www.wikidata.org/entity/Q7547',\n 'phoebe': 'http://www.wikidata.org/entity/Q17975',\n 'pluto': 'http://www.wikidata.org/entity/Q339',\n 'rhea': 'http://www.wikidata.org/entity/Q15050',\n 'ryugu': 'http://www.wikidata.org/entity/Q1385178',\n 'steins': 'http://www.wikidata.org/entity/Q150249',\n 'tethys': 'http://www.wikidata.org/entity/Q15047',\n 'titan': 'http://www.wikidata.org/entity/Q2565',\n 'titania': 'http://www.wikidata.org/entity/Q3322',\n 'triton': 'http://www.wikidata.org/entity/Q3359',\n 'umbriel': 'http://www.wikidata.org/entity/Q3338',\n 'venus': 'http://www.wikidata.org/entity/Q313',\n 'vesta': 'http://www.wikidata.org/entity/Q3030',\n }",
"def return_dispense_media():\n media = {\"50_ug/ml_Kanamycin\": \"lb_miller_50ug_ml_kan\",\n \"100_ug/ml_Ampicillin\": \"lb_miller_100ug_ml_amp\",\n \"100_ug/mL_Spectinomycin\": \"lb_miller_100ug_ml_specto\",\n \"30_ug/ml_Kanamycin\": \"lb_miller_30ug_ml_kan\",\n \"15_ug/ml_Tetracycline\": \"lb_miller_15ug_ml_tet\",\n \"50_ug/ml_Kanamycin_25_ug/ml_Chloramphenicol\":\n \"lb_miller_50ug_ml_kan_25ug_ml_cm\",\n \"25_ug/ml_Chloramphenicol\": \"lb_miller_25ug_ml_cm\",\n \"LB_miller\": \"lb_miller_noAB\",\n \"TB_100_ug/ml_Ampicillin\": \"tb_100ug_ml_amp\",\n \"TB_50_ug/ml_Kanamycin\": \"tb_50ug_ml_kan\"}\n return (media)",
"async def get_multilingual(filename: str):\n query_result = {\"langList\": []}\n database = get_db()\n query_displayname = database.AQLQuery(\n query=main_queries.QUERY_MULTILINGUAL_LANGS,\n bindVars={\n \"filename\": filename\n },\n rawResults=True\n )\n query_result = {\"langList\": query_displayname.result[0]}\n return query_result",
"def get_language_list_gui():\n _ = get_gettext()\n language = {}\n language['connect'] = _(\"Connect\")\n language['ip'] = _(\"IP\")\n language['netmask'] = _(\"Netmask\")\n language['gateway'] = _('Gateway')\n language['dns'] = _('DNS')\n language['use_static_ip'] = _('Use Static IPs')\n language['use_static_dns'] = _('Use Static DNS')\n language['use_encryption'] = _('Use Encryption')\n language['advanced_settings'] = _('Advanced Settings')\n language['wired_network'] = _('Wired Network')\n language['wired_network_instructions'] = _('To connect to a wired network,'\n ' you must create a network profile. To create a network profile, type a'\n ' name that describes this network, and press Add.')\n language['automatic_connect'] = _('Automatically connect to this network')\n language['secured'] = _('Secured')\n language['unsecured'] = _('Unsecured')\n language['channel'] = _('Channel')\n language['preferences'] = _('Preferences')\n language['wpa_supplicant_driver'] = _('WPA Supplicant Driver')\n language['wireless_interface'] = _('Wireless Interface')\n language['wired_interface'] = _('Wired Interface')\n language['hidden_network'] = _('Hidden Network')\n language['hidden_network_essid'] = _('Hidden Network ESSID')\n language['connected_to_wireless'] = _('Connected to $A at $B (IP: $C)')\n language['connected_to_wired'] = _('Connected to wired network (IP: $A)')\n language['not_connected'] = _('Not connected')\n language['no_wireless_networks_found'] = _('No wireless networks found.')\n language['killswitch_enabled'] = _('Wireless Kill Switch Enabled')\n language['key'] = _('Key')\n language['username'] = _('Username')\n language['password'] = _('Password')\n language['anonymous_identity'] = _('Anonymous Identity')\n language['identity'] = _('Identity')\n language['authentication'] = _('Authentication')\n language['path_to_pac_file'] = _('Path to PAC File')\n language['select_a_network'] = _('Choose from the networks below:')\n language['connecting'] = _('Connecting...')\n language['wired_always_on'] = _('Always show wired interface')\n language['auto_reconnect'] = _('Automatically reconnect on connection loss')\n language['create_adhoc_network'] = _('Create an Ad-Hoc Network')\n language['essid'] = _('ESSID')\n language['use_wep_encryption'] = _('Use Encryption (WEP only)')\n language['before_script'] = _('Run script before connect')\n language['after_script'] = _('Run script after connect')\n language['disconnect_script'] = _('Run disconnect script')\n language['script_settings'] = _('Scripts')\n language['use_ics'] = _('Activate Internet Connection Sharing')\n language['madwifi_for_adhoc'] = _('Check if using madwifi/atheros drivers')\n language['default_wired'] = _('Use as default profile (overwrites any previous default)')\n language['use_debug_mode'] = _('Enable debug mode')\n language['use_global_dns'] = _('Use global DNS servers')\n language['use_default_profile'] = _('Use default profile on wired autoconnect')\n language['show_wired_list'] = _('Prompt for profile on wired autoconnect')\n language['use_last_used_profile'] = _('Use last used profile on wired autoconnect')\n language['choose_wired_profile'] = _('Select or create a wired profile to connect with')\n language['wired_network_found'] = _('Wired connection detected')\n language['stop_showing_chooser'] = _('Stop Showing Autoconnect pop-up temporarily')\n language['display_type_dialog'] = _('Use dBm to measure signal strength')\n language['scripts'] = _('Scripts')\n language['invalid_address'] = _('Invalid address in $A entry.')\n language['global_settings'] = _('Use these settings for all networks sharing this essid')\n language['encrypt_info_missing'] = _('Required encryption information is missing.')\n language['enable_encryption'] = _('This network requires encryption to be enabled.')\n language['wicd_auto_config'] = _('Automatic (recommended)')\n language[\"gen_settings\"] = _(\"General Settings\")\n language[\"ext_programs\"] = _(\"External Programs\")\n language[\"dhcp_client\"] = _(\"DHCP Client\")\n language[\"wired_detect\"] = _(\"Wired Link Detection\")\n language[\"route_flush\"] = _(\"Route Table Flushing\")\n language[\"backend\"] = _(\"Backend\")\n language[\"backend_alert\"] = _(\"Changes to your backend won't occur until the daemon is restarted.\")\n language['0'] = _('0')\n language['1'] = _('1')\n language['2'] = _('2')\n language['3'] = _('3')\n language['4'] = _('4')\n language['5'] = _('5')\n language['6'] = _('6')\n language['7'] = _('7')\n language['8'] = _('8')\n language['9'] = _('9')\n language['interface_down'] = _('Putting interface down...')\n language['resetting_ip_address'] = _('Resetting IP address...')\n language['interface_up'] = _('Putting interface up...')\n language['setting_encryption_info'] = _('Setting encryption info')\n language['removing_old_connection'] = _('Removing old connection...')\n language['generating_psk'] = _('Generating PSK...')\n language['generating_wpa_config'] = _('Generating WPA configuration file...')\n language['flushing_routing_table'] = _('Flushing the routing table...')\n language['configuring_interface'] = _('Configuring wireless interface...')\n language['validating_authentication'] = _('Validating authentication...')\n language['setting_broadcast_address'] = _('Setting broadcast address...')\n language['setting_static_dns'] = _('Setting static DNS servers...')\n language['setting_static_ip'] = _('Setting static IP addresses...')\n language['running_dhcp'] = _('Obtaining IP address...')\n language['dhcp_failed'] = _('Connection Failed: Unable to Get IP Address')\n language['aborted'] = _('Connection Cancelled')\n language['bad_pass'] = _('Connection Failed: Bad password')\n language['done'] = _('Done connecting...')\n return language",
"def gen_explanation():\n f = self.media['explanation'][0]\n k = self.__type_by_extension(os.path.sep.join(\n os.path.join([self.media_path, f])))\n v = [os.sep.join([self.web_root, expl]) \\\n for expl in self.media['explanation']]\n if v:\n v = v[0]\n else:\n v = \"\"\n return {'explanation': {k: v}}\n #): os.sep.join([self.web_root, f])\n\n #[os.sep.join([self.web_root, expl]) \\\n # for expl in self.media['explanation']]}",
"def get_translation(self, lang):\n if lang==\"it\":\n url=self.backstring+\"en/\"+self.name\n image=self.backstring+\"img/uk.png\"\n alttext='English version'\n elif lang==\"en\":\n url=self.backstring+\"it/\"+self.name\n image=self.backstring+\"img/it.png\"\n alttext='Italian version'\n img='<img src=\"%s\" height=\"15\" alt=\"%s\"><br>%s' % (image, alttext,alttext, )\n a=A(img, url, \"translation\")\n return str(a)",
"def getList(self):\n labelMap = {}\n imageMap = {}\n key = []\n index = 0\n\n for root, dirs, files in os.walk(self.path_data):\n for file in files:\n # If .png or .jpg file found then\n if file.endswith(tuple(config.imageFormat)):\n key.append(index)\n labelMap[index] = preprocessing.getLabel(file)\n imageMap[index] = os.path.join(root, file)\n\n index += 1\n\n else:\n continue\n\n return key, imageMap, labelMap",
"def wikiLanguages():\n return languages",
"def get_language_data(self, object, data):\n if \"lang_materials\" in object:\n if object.get(\"lang_materials\") in [\"\", [], {}]:\n data[\"lang_materials\"] = closest_parent_value(object, \"lang_materials\")\n else:\n data[\"language\"] = closest_parent_value(object, \"language\")\n return data",
"def growth_media(self, media=None):\n media_dict = {\n \"lb_miller_50ug_ml_kan\": self.lb_miller_50ug_ml_kan,\n \"lb_miller_100ug_ml_amp\": self.lb_miller_100ug_ml_amp,\n \"lb_miller_noAB\": self.lb_miller_noAB\n }\n return media_dict.get(media)",
"def genre_choices(request):\n choices = GENRES\n diction = {}\n li = []\n for data in choices:\n li.append(data[0])\n diction['GENRE_CHOICES'] = li\n return JsonResponse(data=diction, status=status.HTTP_200_OK)#, safe=False)",
"def create_image_caption_pairs(self):",
"def _special_public_data(rdict: Dict, lang: str) -> Dict[str, List]:\n rdict['labels1'] = [pretty_print_timestamp(date.today() - timedelta(days=dd), lang) for dd in range(7, -1, -1)]\n rdict['labels2'] = [pretty_print_timestamp(date.today() - timedelta(days=dd), lang) for dd in range(30, -1, -1)]\n rdict['labels3'] = rdict['labels2']\n rdict['labels4'] = rdict['labels2']\n rdict['data1'] = [9000.1] * 7\n rdict['data2'] = [9000.1] * 30\n rdict['data3'] = rdict['data2']\n rdict['data4'] = rdict['data2']\n return rdict",
"def language(self):\r\n return self._get('language', {})",
"def list(self):\n for key, value in self.languages.iteritems():\n print key, value",
"def lookups(self, request, model_admin):\n return (\n ('ASSETS', _('ASSETS')),\n ('CHI', _('CHI')),\n ('IMWUT', _('IMWUT')),\n ('TACCESS', _('TACCESS')),\n ('TEI', _('TEI')),\n ('UIST', _('UIST')),\n ('UbiComp', _('UbiComp'))\n )",
"def short_name(self):\n return {\n 0: \"null\",\n 1: \"eng-us\",\n 2: \"eng-gb\",\n 3: \"chi\",\n 4: \"fre\",\n 5: \"ger\",\n 6: \"rus\",\n 7: \"spa\"\n }[self.value]",
"def description(self, lang=0):\n if not lang in self.localized_strings: return \"\"\n return self.localized_strings[lang]",
"def extract_language_info(self, source: str) -> Dict[str, float]:\n languages = self.languages_compiled_exp.findall(source)\n language_info = {}\n for lang in languages:\n name = ' '.join(lang.split()[:-1])\n percent = float(lang.split()[-1]) # %\n language_info[name] = percent\n return language_info",
"def imageItems(self, context):\n prefs = getPreferences()\n\n images = [('NONE', \"––– Select –––\", \"\")]\n if prefs.path_value:\n for img in environmentImages(prefs.path_value):\n images.append((img, img, \"\"))\n\n return images",
"def make_translated_text():\n return {\n code: ''\n for code, name\n in settings.LANGUAGES\n }",
"def keys(self):\n return ['title', 'keywords', 'description', 'url', 'content_file',\n 'language', 'phone', 'email']",
"def _get_pages():\n pages = {}\n\n # Create the root pages.\n for path in _get_paths():\n pages[path] = {}\n\n # Create the intl pages.\n for locale in locales:\n for path in _get_paths():\n pages[('/intl/' + locale + path)] = {}\n\n return pages",
"def language_tags(self):\n return self.properties.get(\"languageTags\", StringCollection())"
] | [
"0.579176",
"0.5739075",
"0.5682472",
"0.5466263",
"0.5320878",
"0.5197395",
"0.515548",
"0.5117539",
"0.5035321",
"0.50232655",
"0.5018956",
"0.49866858",
"0.4972195",
"0.49670196",
"0.49650854",
"0.49545366",
"0.4943",
"0.49376488",
"0.48790962",
"0.48603863",
"0.48575422",
"0.48315465",
"0.48183674",
"0.48110464",
"0.47991538",
"0.47879636",
"0.47633198",
"0.47551358",
"0.4722544",
"0.4701379"
] | 0.59551746 | 0 |
Test SNMPv3 script utilizing Kirks snmp_helper module | def main():
# Take path argument and list all text files
ip = '10.1.10.100'
a_user = 'cisco'
auth_key = 'cisco123'
encr_key = 'cisco123'
snmp_user = (a_user, auth_key, encr_key)
sw1 = (ip, 161)
sysDescr = '1.3.6.1.2.1.1.1.0'
sysObjectID = '1.3.6.1.2.1.1.2.0'
sysUpTime = '1.3.6.1.2.1.1.3.0'
sysContact = '1.3.6.1.2.1.1.4.0'
sysNmae = '1.3.6.1.2.1.1.5.0'
ifNumber = '1.3.6.1.2.1.2.1.0'
# Uptime when running config last changed
RunLastChanged = '1.3.6.1.4.1.9.9.43.1.1.1.0'
# Uptime when running config last saved (note any 'write' constitutes a save)
RunLastSaved = '1.3.6.1.4.1.9.9.43.1.1.2.0'
# Uptime when startup config last saved
StartLastChanged = '1.3.6.1.4.1.9.9.43.1.1.3.0'
ifAlias = '1.3.6.1.2.1.31.1.1.1.18.1'
ifName = '1.3.6.1.2.1.31.1.1.1.1.1'
snmp_data = snmp_helper.snmp_get_oid_v3(sw1, snmp_user, oid=ifName, auth_proto='sha', encrypt_proto='des')
#print(snmp_data)
# snmp_get_oid_v3(snmp_device, snmp_user, oid='.1.3.6.1.2.1.1.1.0', auth_proto='sha',
# encrypt_proto='aes128', display_errors=True):
#snmp_extract(snmp_data):
output = snmp_helper.snmp_extract(snmp_data)
print output | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def quickstart():\n snmp.quickstart()\n return 0",
"def handle_snmpconf():\n return 0",
"def main():\n # CREATE EACH OID STRING USING THE FIRST ARGUMENT PASSED\n cmd_uptime = \"snmpget -v 1 -c public \" + argv[1] + \" 1.3.6.1.2.1.1.3.0\"\n # CREATE LISTS OUT OF EACH COMMAND\n clUptime = cmd_uptime.split(' ')\n # RETRIEVE THE DATA FROM THE TARGET SERVER\n dUptime = Popen(clUptime, stdout = PIPE).communicate()[0].strip()\n # FILTER OUT THE RESULTS AS WELL AS DETECT ERRORS\n try:\n dTotal = findall(r'\\(.*\\)', dUptime)[0].strip('()')\n except:\n dTotal = 'FAILED'\n try:\n dUptime = split(r'Timeticks: \\(.*\\) ', dUptime)[1].strip(' ')\n except:\n dUptime = 'FAILED'\n # DETERMINE CRITICALITY LEVEL CODE OR UNKNOWN\n if not dUptime == 'FAILED':\n print 'System Uptime at ' + dUptime + ' | ' + \"'System Uptime'=\" + dTotal\n if search(r'days', dUptime) == None:\n code = 1\n else:\n code = 0\n else:\n print 'SNMP Retrieve Failed'\n code = 3\n leave_now(code)",
"def snmp_v2(device,\n ip,\n mib_name,\n index=0,\n value=None,\n timeout=10,\n retries=3,\n community=\"private\",\n walk_cmd=None,\n stype=None):\n\n if not getattr(device, \"pysnmp_installed\", False):\n install_pysnmp(device)\n setattr(device, \"pysnmp_installed\", True)\n\n try:\n ObjectIdentifier(mib_name)\n oid = mib_name\n except Exception:\n oid = get_mib_oid(mib_name)\n\n def _run_snmp(py_set=False):\n action = \"getCmd\"\n set_value = \"\"\n if py_set:\n action = \"setCmd\"\n if str(value).lower().startswith(\"0x\"):\n set_value = ', %s(hexValue=\"%s\")' % (stype, value[2:])\n else:\n set_value = ', %s(\"%s\")' % (stype, value)\n pysnmp_cmd = 'cmd = %s(SnmpEngine(), CommunityData(\"%s\"), UdpTransportTarget((\"%s\", 161), timeout=%s, retries=%s), ContextData(), ObjectType(ObjectIdentity(\"%s.%s\")%s))' % \\\n (action, community, ip, timeout, retries, oid, index, set_value)\n\n py_steps = [\n 'from pysnmp.hlapi import *', pysnmp_cmd,\n 'errorIndication, errorStatus, errorIndex, varBinds = next(cmd)',\n 'print(errorIndication == None)',\n 'if errorIndication: result=errorIndication; print(result)',\n 'else: result=varBinds[0][1]; print(result.prettyPrint())',\n 'print(result.__class__.__name__)'\n ]\n\n device.sendline(\"cat > snmp.py << EOF\\n%s\\nEOF\" % \"\\n\".join(py_steps))\n for e in py_steps:\n device.expect_exact(e[-40:])\n device.expect_exact('EOF')\n device.expect_prompt()\n device.sendline(\"python snmp.py\")\n device.expect_exact(\"python snmp.py\")\n tout = (timeout * retries) + 15\n device.expect_prompt(timeout=tout)\n if \"Traceback\" in device.before:\n data = False, \"Python file error :\\n%s\" % device.before.split(\n \"\\n\")[-1].strip(), None\n else:\n result = [\n i.strip() for i in device.before.split('\\n') if i.strip() != \"\"\n ]\n data = result[0] == \"True\", \"\\n\".join(result[1:-1]), result[-1]\n device.sendline(\"rm snmp.py\")\n device.expect_prompt()\n return data\n\n if walk_cmd:\n return snmp_asyncore_walk(device, ip, oid, read_cmd=walk_cmd)\n\n if not stype:\n status, result, stype = _run_snmp()\n assert status, \"SNMP GET Error:\\nMIB:%s\\nError:%s\" % (mib_name, result)\n\n for k, v in SnmpMibs.mib_type_map.items():\n if stype in v:\n stype = k\n break\n\n if value != None: # some operations require zero as a value\n status, result, stype = _run_snmp(True)\n assert status, \"SNMP SET Error:\\nMIB:%s\\nError:%s\" % (mib_name, result)\n\n return result",
"def common():\n snmp.common()\n return 0",
"def ibns_snmp(task):\n snmp_cfg = task.run(\n task=text.template_file,\n template=f\"IBNS_snmp.j2\",\n path=\"templates/\",\n **task.host,\n )\n # return configuration\n return snmp_cfg.result",
"def test_snmpget_return_structure():\n result = snmpget(community='public', ipaddress=SNMP_SRV_ADDR,\n oid=SYSDESCR_OID, port=SNMP_SRV_PORT)\n assert 'Linux' in result\n assert isinstance(result, str)",
"def main():\n parser = argparse.ArgumentParser(\n description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter\n )\n add_common_snmp_args(parser)\n parser.add_argument(\n \"-w\",\n \"--warning\",\n type=int,\n default=70,\n help=\"Warning memory usage percentage (0-100)\",\n )\n parser.add_argument(\n \"-c\",\n \"--critical\",\n type=int,\n default=90,\n help=\"Critical memory usage percentage (0-100)\",\n )\n parser.add_argument(\n \"-f\",\n \"--family\",\n required=True,\n help=\"Switch family. Supported families: 1910, 1920, 1920S\",\n )\n\n config = vars(parser.parse_args())\n check_snmp_args(config)\n check_thresholds(config)\n\n dataset = {}\n\n if config[\"family\"] == \"1920S\":\n cpu = ObjectType(\n ObjectIdentity(\n \"HP-SWITCHING-MIB\", \"agentSwitchCpuProcessTotalUtilization\", 0\n )\n )\n elif config[\"family\"] in [\"1910\", \"1920\"]:\n cpu = ObjectType(\n ObjectIdentity(\"HH3C-ENTITY-EXT-MIB\", \"hh3cEntityExtCpuUsage\", 8)\n )\n else:\n unknown_exit(SERVICE, f\"Switch family {config['family']} NOT known\")\n\n try:\n raw_data = get_snmp_data(config, cpu)\n except ValueError as err:\n unknown_exit(SERVICE, err)\n add_vars_to_dataset(dataset, raw_data)\n\n if config[\"family\"] == \"1920S\":\n dataset[\"cpu_usage\"] = get_hp_cpu_usage(\n dataset[\"agentSwitchCpuProcessTotalUtilization\"]\n )\n elif config[\"family\"] in [\"1910\", \"1920\"]:\n dataset[\"cpu_usage\"] = int(dataset[\"hh3cEntityExtCpuUsage\"])\n else:\n unknown_exit(SERVICE, f\"Switch family {config['family']} NOT known\")\n\n state, message = generate_output(config, dataset)\n report(state, message)",
"def test_snmpcustom_get_sensordef(self):\n test_sensordef = {\n \"kind\": self.test_snmpcustom.get_kind(),\n \"name\": \"SNMP Custom\",\n \"description\": \"Monitors a numerical value returned by a specific OID using SNMP\",\n \"help\": \"Monitors a numerical value returned by a specific OID using SNMP\",\n \"tag\": \"mpsnmpcustomsensor\",\n \"groups\": [\n {\n \"name\": \"OID values\",\n \"caption\": \"OID values\",\n \"fields\": [\n {\n \"type\": \"edit\",\n \"name\": \"oid\",\n \"caption\": \"OID Value\",\n \"required\": \"1\",\n \"help\": \"Please enter the OID value.\"\n },\n {\n \"type\": \"edit\",\n \"name\": \"unit\",\n \"caption\": \"Unit String\",\n \"default\": \"#\",\n \"help\": \"Enter a 'unit' string, e.g. 'ms', 'Kbyte' (for display purposes only).\"\n },\n {\n \"type\": \"radio\",\n \"name\": \"value_type\",\n \"caption\": \"Value Type\",\n \"required\": \"1\",\n \"help\": \"Select 'Gauge' if you want to see absolute values (e.g. for temperature value) \"\n \"or 'Delta' for counter differences divided by time period \"\n \"(e.g. for bandwidth values)\",\n \"options\": {\n \"1\": \"Gauge\",\n \"2\": \"Delta\"\n },\n \"default\": 1\n },\n {\n \"type\": \"integer\",\n \"name\": \"multiplication\",\n \"caption\": \"Multiplication\",\n \"required\": \"1\",\n \"default\": 1,\n \"help\": \"Provide a value the raw SNMP value is to be multiplied by.\"\n },\n {\n \"type\": \"integer\",\n \"name\": \"division\",\n \"caption\": \"Division\",\n \"required\": \"1\",\n \"default\": 1,\n \"help\": \"Provide a value the raw SNMP value is divided by.\"\n },\n {\n \"type\": \"radio\",\n \"name\": \"snmp_version\",\n \"caption\": \"SNMP Version\",\n \"required\": \"1\",\n \"help\": \"Choose your SNMP Version\",\n \"options\": {\n \"1\": \"V1\",\n \"2\": \"V2c\",\n \"3\": \"V3\"\n },\n \"default\": 2\n },\n {\n \"type\": \"edit\",\n \"name\": \"community\",\n \"caption\": \"Community String\",\n \"required\": \"1\",\n \"help\": \"Please enter the community string.\"\n },\n {\n \"type\": \"integer\",\n \"name\": \"port\",\n \"caption\": \"Port\",\n \"required\": \"1\",\n \"default\": 161,\n \"help\": \"Provide the SNMP port\"\n }\n ]\n }\n ]\n }\n assert_equal(self.test_snmpcustom.get_sensordef(), test_sensordef)",
"def main():\n module = AnsibleModule(argument_spec=L3_interfaceArgs.argument_spec,\n supports_check_mode=True)\n\n result = L3_interface(module).execute_module()\n module.exit_json(**result)",
"def test_host_port_expression_props(self):\n block = SNMPBase()\n block._create_data = MagicMock()\n block._execute_snmp_request = MagicMock(\n return_value=SAMPLE_SNMP_RESPONSE)\n block._handle_data = MagicMock()\n myOID = \"1.3.6.1.2.1.31.1.1.1.6.2\"\n ip = \"0.0.0.0\"\n port = 1611\n starting_signal = Signal({\n \"ip\": ip,\n \"port\": port\n })\n self.configure_block(block, {\n \"oids\": [{\"oid\": myOID}],\n \"agent_host\": \"{{ $ip }}\",\n \"agent_port\": \"{{ $port }}\"\n })\n block.start()\n # Send the starting signal, make sure everything was called correctly\n block.process_signals([starting_signal])\n args, kwargs = block._execute_snmp_request.call_args\n self.assertEqual(args[1], [myOID])\n block._handle_data.assert_called_once_with([], starting_signal)\n block.stop()",
"def test_snmpset_return_structure():\n result = snmpset(ipaddress=SNMP_SRV_ADDR, community='public',\n oid='SNMPv2-MIB::sysName.0', value_type='s',\n value='Test Description', port=SNMP_SRV_PORT)\n assert 'Test Description' in result",
"def test_snmpget_no_such_instance():\n result = snmpget(community='public', ipaddress=SNMP_SRV_ADDR,\n oid='SNMPv2-MIB::sysName', port=SNMP_SRV_PORT)\n assert result is None",
"def test_wrapper_output(self):\r\n tmpdir = None\r\n try:\r\n tmpdir = tempfile.mkdtemp()\r\n result = mib2pysnmp('conpot/tests/data/VOGON-POEM-MIB.mib')\r\n\r\n with open(os.path.join(tmpdir, 'VOGON-POEM-MIB' + '.py'), 'w') as output_file:\r\n output_file.write(result)\r\n\r\n cmd_responder = command_responder.CommandResponder('', 0, [tmpdir])\r\n cmd_responder.snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.loadModules('VOGON-POEM-MIB')\r\n result = cmd_responder._get_mibSymbol('VOGON-POEM-MIB', 'poemNumber')\r\n\r\n self.assertIsNotNone(result, 'The expected MIB (VOGON-POEM-MIB) could not be loaded.')\r\n finally:\r\n shutil.rmtree(tmpdir)",
"def test_invalid_host_expression_prop(self):\n block = SNMPBase()\n block._create_data = MagicMock()\n block.execute_request = MagicMock()\n myOID = \"1.3.6.1.2.1.31.1.1.1.6.2\"\n ip = \"0.0.0.0\"\n port = 1611\n starting_signal = Signal({\n \"ip\": ip,\n \"port\": port\n })\n self.configure_block(block, {\n \"oids\": [{\"oid\": myOID}],\n \"agent_host\": \"{{ ip }}\",\n })\n block.start()\n # Send the starting signal, make sure everything was called correctly\n block.process_signals([starting_signal])\n self.assertEqual(0, block.execute_request.call_count)\n block.stop()",
"def test_wrapper_processing(self):\r\n result = mib2pysnmp('conpot/tests/data/VOGON-POEM-MIB.mib')\r\n self.assertTrue('mibBuilder.exportSymbols(\"VOGON-POEM-MIB\"' in result,\r\n 'mib2pysnmp did not generate the expected output. Output: {0}'.format(result))",
"def send_trap(snmp_settings, message, uptime=0, test=False):\n log(\"Entering {0}.\".format(sys._getframe().f_code.co_name), level='DEBUG')\n\n log(\"Sending SNMPTRAP to {0}: {1}\".format(snmp_settings['traphost'],\n message))\n\n # NOTE: snmptrap caveat: Generates an error when run as unprivileged user.\n # Failed to create the persistent directory for\n # /var/net-snmp/snmpapp.conf\n # http://sourceforge.net/p/net-snmp/bugs/1706/\n #\n\n # Build the arguments to snmptrap\n trap_args = ['snmptrap']\n trap_args.append('-v')\n trap_args.append(snmp_settings['version'])\n\n if snmp_settings['version'] == '2c':\n trap_args.append('-c')\n trap_args.append(snmp_settings['community'])\n\n elif snmp_settings['version'] == '3':\n # Send v3 snmp-inform rathern than a trap\n trap_args.append('-Ci')\n\n trap_args.append('-l')\n trap_args.append(snmp_settings['seclevel'])\n trap_args.append('-u')\n trap_args.append(snmp_settings['secname'])\n\n if snmp_settings['seclevel'] in ['authNoPriv', 'authPriv']:\n trap_args.append('-a')\n trap_args.append(snmp_settings['authprotocol'])\n trap_args.append('-A')\n trap_args.append(snmp_settings['authpassword'])\n\n if snmp_settings['seclevel'] == 'authPriv':\n trap_args.append('-x')\n trap_args.append(snmp_settings['privprotocol'])\n trap_args.append('-X')\n trap_args.append(snmp_settings['privpassword'])\n else:\n log(\"Unknown snmp version '{0}' specified in the config file.\".\n format(snmp_settings['version']))\n trap_args.append(snmp_settings['traphost'])\n\n #.iso.org.dod.internet.private. .arista\n # enterprises.30065\n enterprise_oid = '.1.3.6.1.4.1.30065'\n # enterpriseSpecific = 6\n generic_trapnum = '6'\n trap_oid = '.'.join([enterprise_oid, generic_trapnum])\n\n trap_args.append(str(uptime))\n trap_args.append(enterprise_oid)\n trap_args.append(trap_oid)\n trap_args.append('s')\n\n if test == \"trap\":\n message = \"TRANSCEIVER_RX_POWER_CHANGE, Ethernet2 (XKE000000000) RX \"\\\n \"power level has changed by -2.6348 dBm from baseline \"\\\n \"-5.4035 dBm (2015-12-15 11:33:11) to -8.0382 dBm \"\\\n \"(2015-12-15 11:33:33)\"\n log(\"Sending SNMPTRAP to {0} with arguments: {1}\".\n format(snmp_settings['traphost'], trap_args), level='DEBUG')\n\n trap_args.append(message)\n\n if test == \"trap\":\n print \"snmptrap_args:\"\n pprint(trap_args)\n\n call(trap_args)",
"def test_verify_connection_to_a_device():",
"def main():\n\n PASS = raw_input('password> ')\n\n with manager.connect(host=HOST, port=PORT, username=USER, password=PASS,\n hostkey_verify=False, device_params={'name': 'default'},\n look_for_keys=False, allow_agent=False) as m:\n\n # print all NETCONF capabilities\n with open('output/netconf_101_capability.txt', 'w') as file:\n for capability in m.server_capabilities:\n file.write(str(capability))\n file.write('\\n')\n\n result_xmllist = []\n # run commands on the remote device\n for key in xmlns_dic.keys():\n data = m.get(('subtree', xmlns_dic[key]))\n result_xmllist.append(data)\n\n with open('output/netconf_101_rpc.xml', 'w') as file:\n file.write(str(result_xmllist))\n\n result_jsonlist = []\n for data in result_xmllist:\n # print all in xml\n print(data)\n\n # print all in json\n result_xml_str = repr(data)\n result_json_parsed_str = json.dumps(xmltodict.parse(result_xml_str))\n result_json_parsed_dict = json.loads(result_json_parsed_str)\n\n print(json.dumps(result_json_parsed_dict, indent=4, sort_keys=True))\n result_jsonlist.append(result_json_parsed_dict)\n\n with open('output/netconf_101_rpc.json', 'w') as file:\n json.dump(result_jsonlist, file, indent=4, sort_keys=True)\n\n\n # xml_doc = xml.dom.minidom.parseString(result.xml)\n # mac_address = xml_doc.getElementsByTagName(\"mod:mac_address\")\n # print(mac_address)",
"def test_dynamic_oid(self):\n block = SNMPBase()\n block._create_data = MagicMock()\n block._execute_snmp_request = MagicMock(\n return_value=SAMPLE_SNMP_RESPONSE)\n block._handle_data = MagicMock()\n\n myOID = \"1.3.6.1.2.1.31.1.1.1.6.2\"\n starting_signal = Signal({\n \"existing_key\": \"existing_val\",\n \"oid\": myOID\n })\n\n self.configure_block(block, {\n \"oids\": [{\"oid\": \"{{ $oid }}\"}]\n })\n block.start()\n\n # Send the starting signal, make sure everything was called correctly\n block.process_signals([starting_signal])\n args, kwargs = block._execute_snmp_request.call_args\n self.assertEqual(args[1], [myOID])\n block._handle_data.assert_called_once_with([], starting_signal)\n block.stop()",
"def main():\n\n cmd = \"ping\"\n\n if os.environ.get(\"IPV6\", \"0\") == \"1\":\n cmd = \"ping6\"\n\n output = subprocess.check_output([cmd, os.environ[\"ADDRESS\"], \"-A\", \"-c\", os.environ.get(\"COUNT\", \"1\")])\n match = re.search(r\"(\\d+) packets transmitted, (\\d+) received\", output)\n\n if match:\n if match.group(1) != match.group(2):\n if match.group(2) != \"0\":\n alert(\"ping_packetloss\", {\n \"sent\": match.group(1),\n \"received\": match.group(2)\n })\n\n reading(\"loss\", (int(match.group(1)) - int(match.group(2))) * 100.0 / int(match.group(1)))\n else:\n reading(\"loss\", 100)\n alert(\"ping_failed\", {\n \"reason\": \"no_packet_received\"\n })\n else:\n reading(\"loss\", 0)\n\n rtt = re.search(r\"rtt min/avg/max/mdev = ([0-9.]+)/([0-9.]+)/([0-9.]+)/([0-9.]+) ms\", output)\n if rtt:\n reading(\"rtt\", float(rtt.group(2)))\n else:\n alert(\"ping_failed\", {\n \"reason\": \"command_failed\",\n \"output\": output\n })",
"def main():\n\tif len(sys.argv) > 1:\n\t\tfilename = sys.argv[1]\n\t\twith open(filename, 'r') as myfile:\n\t\t\tdata = myfile.read().replace('\\n', '').replace('\\t','')\n\telse:\n\t\tdata = sys.stdin.read().replace('\\n', '').replace('\\t','')\n\n\totp = \"\"\n\tif len(sys.argv) == 3:\n\t\totp = sys.argv[2]\n\t\tprint(\"otp : \" + otp)\n\t\tprint\n\n\n\tPATH = \"report/\"\n\tidtemplate = \"$UNIQUE_IDENTIFIER_\"\n\tsock = create_socket()\n\n\tconn= connecthttps(\"https://kryptus.dyndns.biz\", 49193)\n\texpectedResults = \"\"\n\tresults = \"\"\n\ttestcase = ElementTree.fromstring(data)\n\tidStore = {}\n\tfor i in range(0,len(testcase), 2):\n\t\t# Get XML request and expected response.\n\t\tereq = testcase[i]\n\t\teres = testcase[i+1]\n\t\texpectedResults += parse_xml_to_pretty_string(ereq)\n\t\texpectedResults += parse_xml_to_pretty_string(eres)\n\t\texpectedResults += \"\\n\\\\newpage\\n\"\n\t\t# Append expected req and resp to string for report\n\n\t\t#parse req for ID\n\t\tprint('\\033[92m'+parse_xml_to_pretty_string(ereq)+'\\033[0m')\n\t\tparse_xml_unique_id(ereq, idStore, idtemplate)\n\t\tparse_xml_timestamp(ereq)\n\t\tif len(otp) > 0:\n\t\t\tparse_xml_otp(ereq, otp)\n\t\tresults += parse_xml_to_pretty_string(ereq)\n\t\tprint(idStore)\n\n\t\t#Parse xml to TTLV and send to HSM\n\t\tsend = parse_xml_to_pretty_string(ereq)\n\t\treceived = send_receive_https(conn, send)\n\t\t\n\t\t#Parse response to store IDs and append to report\n\t\tresponse = ElementTree.fromstring(received)\n\t\tprint('\\033[94m'+parse_xml_to_pretty_string(response)+'\\033[0m')\n\t\tparse_xml_unique_id(response, idStore, idtemplate)\n\t\tparse_xml_unique_id(eres, idStore, idtemplate)\n\t\tresults += parse_xml_to_pretty_string(response)\n\t\tresults += \"\\n\\\\newpage\\n\"\n\t\t\n\t\tprint(idStore)\n\t\tdisconnect(sock)\n\t\twriteToFile(expectedResults.replace(\"$\", \"\"), PATH, \"expected.tex\")\n\t\twriteToFile(results, PATH, \"results.tex\")\n\texit()",
"def test_ap_hs20_eap_ttls_eap_mschapv2(dev, apdev):\n eap_test(dev[0], apdev[0], \"21[3:26][6:7][99:99]\", \"TTLS\", \"user\")",
"def unitTest(self):\n\n if 'y' in self.snmp_obj.dbg:\n print(self.snmp_obj.mib_dict)\n for k in self.snmp_obj.mib_dict:\n print(k, \":\", self.snmp_obj.mib_dict[k])\n\n print(\"Testing get mib oid\")\n\n for i in self.mibs:\n oid = self.snmp_obj.get_mib_oid(i)\n print('mib: %s - oid=%s' % (i, oid))\n\n return True",
"def test_hard_coded_oid(self):\n block = SNMPBase()\n block._create_data = MagicMock()\n block._execute_snmp_request = MagicMock(\n return_value=SAMPLE_SNMP_RESPONSE)\n block._handle_data = MagicMock()\n\n myOID = \"1.3.6.1.2.1.31.1.1.1.6.2\"\n starting_signal = Signal({\"existing_key\": \"existing_val\"})\n\n self.configure_block(block, {\n \"oids\": [{\"oid\": myOID}]\n })\n block.start()\n\n # Send the starting signal, make sure everything was called correctly\n block.process_signals([starting_signal])\n args, kwargs = block._execute_snmp_request.call_args\n self.assertEqual(args[1], [myOID])\n block._handle_data.assert_called_once_with([], starting_signal)\n block.stop()",
"def CASE3( self, main ):\n\n from tests.USECASE.SegmentRouting.SRRouting.dependencies.SRRoutingTest import SRRoutingTest\n\n SRRoutingTest.runTest( main,\n test_idx=3,\n onosNodes=3,\n dhcp=1,\n routers=1,\n ipv4=1,\n ipv6=1,\n countFlowsGroups=False,\n linkFailure=False,\n description=\"Ping between all ipv4 and ipv6 hosts in the topology\" )",
"def doTest(self, module, payloads):\n for payload in payloads:\n # Perform test & write report\n str = \"TEST #%s - %s\" % (self.testnum, payload[0])\n print str[:62].ljust(65,'.'),\n #test_dt_start = datetime.datetime.now()\n test_dt_start = time.strftime('%Y-%m-%d %H:%M:%S')\n pattern = \"\"\n\n if payload[1] == \"socket\":\n cmd = self.commandParser('socket', payload[4])\n (test_port, test_proto) = (payload[2], payload[3].lower())\n test_payload = cmd\n if payload[3].lower() == 'tcp':\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n else:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((self._target,payload[2]))\n s.send(cmd)\n pattern = payload[5]\n s.close()\n elif payload[1] == \"command\":\n cmd = self.commandParser('command', payload[2])\n (test_port, test_proto) = (None, None)\n test_payload = ' '.join(cmd)\n if self._debug==1:\n print \"\\n\\n***Debug: sending command: %s\" % ' '.join(cmd)\n subprocess.call(cmd)\n else:\n subprocess.call(cmd, stdout=subprocess.PIPE)\n pattern = payload[3]\n elif payload[1] == \"scapy\":\n cmd = self.commandParser('scapy', payload[2])\n if self._debug == 1:\n print \"\\n\\n***Debug: sending scapy payload: %s\" % cmd\n cmd = cmd.replace('verbose=0', 'verbose=1')\n (test_port, test_proto) = (None, None)\n test_payload = cmd\n eval(cmd)\n pattern = payload[3]\n elif payload[1] == \"pcap\":\n pcap = os.path.join(self.config.get('PATHS', 'pcapdir'), payload[2])\n (test_port, test_proto) = (None, None)\n test_payload = pcap\n if self._debug == 1:\n # verbose mode\n print \"Pcap Replay file\"\n cmd = [self.config.get('ENV','sudo'), self.config.get('ENV','tcpreplay'), '-i', self.config.get('CLIENT','iface'), pcap]\n else:\n # quiet mode\n cmd = [self.config.get('ENV','sudo'), self.config.get('ENV','tcpreplay'), '-q', '-i', self.config.get('CLIENT','iface'), pcap]\n if self._debug==1:\n subprocess.call(cmd)\n else:\n subprocess.call(cmd, stdout=subprocess.PIPE)\n pattern = payload[3]\n\n test_dt_end = time.strftime('%Y-%m-%d %H:%M:%S')\n\n # Sleep before getting alerts\n time.sleep(int(self.config.get('TIMING', 'sleepbeforegetalerts')))\n\n # Get new alerts and calculate new offset\n self.getAlertsFile()\n res = self.getAlertsFromOffset(self.config.get('PATHS', 'tempfile'), self.offset)\n\n # Sig matching\n if pattern != \"\":\n if re.search(pattern, res):\n test_flag = 2\n else:\n if res == '':\n test_flag = 0\n else:\n test_flag = 1\n test_sig_match = pattern\n else:\n test_sig_match = None\n test_flag = None\n\n test_alert = res\n self.offset = self.getOffset(self.config.get('PATHS', 'tempfile'))\n\n database.DB(self._cnf).addTestResult((module, payload[1], test_dt_start,\n test_dt_end, payload[0], test_port, test_proto, test_payload,\n test_sig_match, res, test_flag))\n\n print \"[ done ]\"\n \n # Sleep before next test\n time.sleep(int(self.config.get('TIMING', 'sleepbeforenexttest')))\n self.testnum += 1",
"def test_bug_2308(self):\n transformer = SnmpTransformer()\n transformer.setup(\"test\",{\n \"mib_dir\" : \"/dev/null\"\n })\n transformer.registered_mibs += (transformer.parse_file({\n \"EVENT test_event .1.3.6.1.4.1.2021.13.990.0.17 \\\"test category\\\" severity\" : 0,\n \"FORMAT $*\" : 1\n }))\n str = 'HOST:testhost.localdomain;IP:UDP: [127.0.5.1]:50935;VARS:.1.3.6.1.2.1.1.3.0 = 2:22:16:27.46 ; .1.3.6.1.6.3.1.1.4.1.0 = .1.3.6.1.4.1.2021.13.990.0.17 ; .1.3.6.1.2.1.1.6.0 = \"Argument 1 = test\" ; .1.3.6.1.6.3.18.1.3.0 = 127.0.0.1 ; .1.3.6.1.6.3.18.1.4.0 = \"public\" ; .1.3.6.1.6.3.1.1.4.3.0 = .1.3.6.1.4.1.2021.13.990'\n event = transformer.transform(str)\n assert event[\"trap_oid\"] == \".1.3.6.1.4.1.2021.13.990.0.17\"\n assert event[\"host_address\"] == \"127.0.5.1\"\n assert event[\"host_name\"] == \"testhost.localdomain\"\n assert event[\"message\"] == \"\\\"Argument 1 = test\\\"\"",
"def test_snmpcustom_get_sensordef(self):\n test_sensordef = {\n \"kind\": self.test_snmpload.get_kind(),\n \"name\": \"SNMP Load\",\n \"description\": \"Monitors a numerical value returned by a specific OID using SNMP\",\n \"help\": \"Monitors a numerical value returned by a specific OID using SNMP\",\n \"tag\": \"mpsnmploadsensor\",\n \"groups\": [\n {\n \"name\": \"OID values\",\n \"caption\": \"OID values\",\n \"fields\": [\n {\n \"type\": \"radio\",\n \"name\": \"snmp_version\",\n \"caption\": \"SNMP Version\",\n \"required\": \"1\",\n \"help\": \"Choose your SNMP Version\",\n \"options\": {\n \"1\": \"V1\",\n \"2\": \"V2c\",\n \"3\": \"V3\"\n },\n \"default\": 2\n },\n {\n \"type\": \"edit\",\n \"name\": \"community\",\n \"caption\": \"Community String\",\n \"required\": \"1\",\n \"help\": \"Please enter the community string.\"\n },\n {\n \"type\": \"integer\",\n \"name\": \"port\",\n \"caption\": \"Port\",\n \"required\": \"1\",\n \"default\": 161,\n \"help\": \"Provide the SNMP port\"\n }\n ]\n }\n ]\n }\n assert_equal(self.test_snmpload.get_sensordef(), test_sensordef)",
"def test_create_tpm(self):\n command_line = self._MENU + [self._POOLNAME] + self._DEVICES + [\"--clevis=tpm2\"]\n TEST_RUNNER(command_line)"
] | [
"0.63348025",
"0.619772",
"0.6136724",
"0.5980005",
"0.5947892",
"0.58158255",
"0.57572633",
"0.5711977",
"0.5508595",
"0.53740776",
"0.53207225",
"0.5219235",
"0.5182949",
"0.5170424",
"0.5160724",
"0.5152555",
"0.5085134",
"0.50800693",
"0.5075875",
"0.5074044",
"0.50578886",
"0.5055878",
"0.50294787",
"0.50016963",
"0.4999262",
"0.4991768",
"0.49741083",
"0.49731335",
"0.49466187",
"0.49430087"
] | 0.64426136 | 0 |
Connect to the API and test connection | def connect_api():
print("INFO: Checking API connection and credentials...")
conf = ConfigParser()
conf.read(os.path.join(os.path.abspath(
os.path.dirname(__file__)), '.', 'api.conf'))
client = CBWApi(conf.get('cyberwatch', 'url'), conf.get(
'cyberwatch', 'api_key'), conf.get('cyberwatch', 'secret_key'))
client.ping()
return client | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def connect():\n return connection.Connection(username=api_user,\n api_key=api_key,\n region=api_region)",
"def test_api_connection(server, username, password, api_key):\n global api_url\n api_url = \"http://\" + server + \"/i-doit/src/jsonrpc.php\"\n\n global headers\n headers = {}\n headers[\"Content-Type\"] = \"application/json\"\n headers[\"X-RPC-Auth-Username\"] = username\n headers[\"X-RPC-Auth-Password\"] = password\n\n global apikey\n apikey = api_key\n\n print(blue + \"\\n>>> \" + reset + \"Checking API connection...\")\n\n login_body = json.loads(\"{\\\"version\\\": \\\"2.0\\\",\\\"method\\\": \\\"idoit.login\\\",\\\"params\\\": {\\\"apikey\\\": \\\"\" +\n apikey + \"\\\",\\\"language\\\": \\\"en\\\"},\\\"id\\\": 1}\")\n\n try:\n s = requests.Session()\n login_request = s.post(api_url, json=login_body, headers=headers)\n login = login_request.json()\n if \"error\" in login:\n print(red + \"\\n>>> \" + reset +\n \"Unable to connect to the API. Please verify the connection information.\")\n return False\n else:\n print(green + \"\\n>>> \" + reset + \"Successfully connected.\")\n return True\n except requests.exceptions.RequestException:\n print(red + \"\\n>>> \" + reset +\n \"Unable to connect to the API. Please verify the connection information.\")\n return False",
"def test_connection(self):\n req = requests.get(\"http://{}:{}\".format(self.config.options.get(\"Server\", \"ListenAddress\"),\n self.config.options.get(\"Server\", \"Port\")))\n\n self.assertEqual(req.status_code, 200)",
"def connect(api, username, password):\n\treturn api.login(username, password)",
"def setUp(self):\n global access_token\n global accountID\n global account_cur\n global api\n # self.maxDiff = None\n try:\n accountID, account_cur, access_token = unittestsetup.auth()\n setattr(sys.modules[\"oandapyV20.oandapyV20\"],\n \"TRADING_ENVIRONMENTS\",\n {\"practice\": {\n \"stream\": \"https://test.com\",\n \"api\": \"https://test.com\",\n }})\n api = API(environment=environment,\n access_token=access_token,\n headers={\"Content-Type\": \"application/json\"})\n api.api_url = 'https://test.com'\n except Exception as e:\n print(\"%s\" % e)\n exit(0)",
"def connect(self):\n\n if self.auth == \"webform\":\n self.conn = ZabbixAPI(self.server)\n elif self.auth == \"http\":\n self.conn = ZabbixAPI(self.server, use_authenticate=False)\n self.conn.session.auth = (self.username, self.password)\n\n else:\n raise SystemExit('api auth method not implemented: %s' % self.conn.auth)\n\n if self.nocheckcertificate:\n self.conn.session.verify = False\n\n try:\n self.conn.login(self.username, self.password)\n except ZabbixAPIException as e:\n raise SystemExit('Cannot login to Zabbix server: %s' % e)\n\n self.logger.info(\"Connected to Zabbix API Version %s\" % self.conn.api_version())",
"async def test_connection(self):\n await self.webhook_connection.connect()\n assert self.webhook_connection.is_connected is True",
"def connect(self):\n config.base_url = self.get_config('host')\n config.client_connection_attempts = 1\n config.assume_untrusted = False\n if self.get_config('insecure'):\n config.assume_untrusted = True\n\n config.credentials = utils.PseudoNamespace({\n 'default': {\n 'username': self.get_config('username'),\n 'password': self.get_config('password'),\n }\n })\n\n _, remainder = self.parser.parse_known_args()\n if remainder and remainder[0] == 'config':\n # the config command is special; it doesn't require\n # API connectivity\n return\n # ...otherwise, set up a awxkit connection because we're\n # likely about to do some requests to /api/v2/\n self.root = api.Api()\n try:\n self.fetch_version_root()\n except RequestException:\n # If we can't reach the API root (this usually means that the\n # hostname is wrong, or the credentials are wrong)\n if self.help:\n # ...but the user specified -h...\n known, unknown = self.parser.parse_known_args(self.argv)\n if len(unknown) == 1 and os.path.basename(unknown[0]) == 'awx':\n return\n raise",
"def setUp(self):\n self.api = api.InvenTreeAPI(\n SERVER,\n username=USERNAME, password=PASSWORD,\n timeout=30,\n )",
"def test_ApiConnectionWillAuthenticate_ValidCredentials_Successfully(self):\n connection = ApiConnection(self.userId, self.testToken)\n self.assertTrue(connection.connected())",
"def _try_connect(self):\n tuya = TuyaApi()\n try:\n tuya.init(\n self._username,\n self._password,\n self._country_code,\n self._platform,\n self._region,\n )\n except (TuyaAPIRateLimitException, TuyaNetException, TuyaServerException):\n return RESULT_CONN_ERROR\n except TuyaAPIException:\n return RESULT_AUTH_FAILED\n\n return RESULT_SUCCESS",
"def setUp(self):\n self.client = api.Client(config.get_config(), api.json_handler)",
"def setUp(self):\n self.clnt = CvpClient()\n nodes = [\"1.1.1.1\"]\n self.clnt.nodes = nodes\n self.clnt.node_cnt = len(nodes)\n self.clnt.node_pool = cycle(nodes)\n self.api = CvpApi(self.clnt)",
"def test_connection(self):\n self._bind_to_service()",
"def setUp(self):\n self.server_address = \"http://localhost:3030/$/\"\n self.request_address = \"http://localhost:3030/ds\"\n self.api = \"http://localhost:4032/\"\n self.version = \"0.2\"",
"def connect(self) -> None:\n self.driver.get(self.base_url)",
"def test_connection(self):\n r = main.List.connection()\n self.assertTrue(r.ping(), \"Connection failed.\")",
"def setUp(self):\n self.api = \"http://localhost:4031/\"\n self.version = \"0.2\"\n self.app = init_api()",
"def setUp(self):\n\t\tself.conn = Client([\"127.0.0.1:11211\"], debug = 1)",
"def test_ApiWillAuthenticate_ValidCredentials_Successfully(self):\n api = Api(self.userId, self.testToken)\n self.assertTrue(api.connected())",
"def test_server_connection():\n response = client.get(\"/\")\n assert response.ok\n assert response.json() == {\"ID\": \"8dbaaa72-ff7a-4f95-887c-e3109e577edd\"}",
"def test_open_api(self):\n response = self.client.get(self.initiatives_url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def connect(self):\n self.class_logger.info(\"Performing connection to TRex server via HLT API\")\n self.check_res(self.hltapi.connect(device=self.host, port_list=self.ports, reset=True, break_locks=True))",
"def setUp(self):\n self.c = Client(host=\"localhost\")",
"def setUp(self):\n self.hello_url = \"http://localhost:7000\"\n self.store_url = self.hello_url + \"/store\"\n self.session = requests.session()",
"def test_get_requests(self):\n response = self.client.open('/api/provisioning/port',\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))",
"def test_check_connection(self):\n self.assertIsNotNone(app.check_connection())",
"def test_call(self):\n\n with Client('username', 'password') as client:\n self.setSessionResponse(content='{\"ok\": true}')\n data = client.call(**self.build_parameters)\n self.assertEqual(data, '{\"ok\": true}')",
"def connect(self):\n self.conn.connect()",
"def setUp(self):\n self.client = APIClient()"
] | [
"0.75394267",
"0.71746594",
"0.71069753",
"0.70297796",
"0.69757605",
"0.6968587",
"0.6940774",
"0.6929308",
"0.6752766",
"0.67461467",
"0.66849214",
"0.6681502",
"0.6624572",
"0.66045177",
"0.6603074",
"0.6563905",
"0.6545671",
"0.6543918",
"0.650036",
"0.64967084",
"0.6495503",
"0.6495277",
"0.647101",
"0.6424504",
"0.64098144",
"0.6366996",
"0.6362531",
"0.6344491",
"0.6322445",
"0.63157904"
] | 0.7394636 | 1 |
Setup variables for SMTP | def setup_smtp():
print("INFO: Setting up SMTP variables...")
conf = ConfigParser()
conf.read(os.path.join(os.path.abspath(
os.path.dirname(__file__)), '.', 'smtp.conf'))
smtp = {
"server": conf.get('smtp', 'smtp_server'),
"login": conf.get('smtp', 'smtp_login'),
"password": conf.get('smtp', 'smtp_password'),
"port": conf.get('smtp', 'smtp_port'),
"sender": conf.get('smtp', 'smtp_sender'),
}
return smtp | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, smtp_server, smtp_user, smtp_password,\n smtp_port=25, is_with_tls=False):\n self.smtp_server = smtp_server\n self.smtp_port = smtp_port\n self.smtp_user = smtp_user\n self.smtp_password = smtp_password\n self.is_with_tls = is_with_tls",
"def __init__(self, message, subject, recipients=[], sender=sender,\n encoding=\"utf-8\", charset=\"iso-8859-1\",\n smtpserver='localhost', smtpuser = None,\n smtppass = None):\n\n self.smtpserver = smtpserver\n self.smtpuser = smtpuser\n self.smtppass = smtppass\n self.sender = sender\n self.subject = subject\n self.message = message\n self.encoding = encoding\n self.charset = charset\n self.recipients = recipients\n self.session = None\n ## header encoding\n self.header_charset = \"iso-8859-1\"",
"def set_email_para(self,\n email_dict):\n\n self.__email_flag__ = 1\n\n # email\n self.__email_host__ = email_dict[\"email_host\"]\n self.__email_receiver_list__ = email_dict[\"email_recv_list\"]\n self.__email_sender__ = email_dict[\"email_sender_mailbox\"]\n self.__email_user__ = email_dict[\"email_username\"]\n self.__email_passwd__ = email_dict[\"email_password\"]\n\n print(\"NotifyManager email host=%s\"\n % self.__email_host__)\n print(\"NotifyManager email sender mailbox=%s\"\n % self.__email_sender__)\n print(\"NotifyManager email receiver mailbox=%s\"\n % self.__email_receiver_list__)\n\n return",
"def __init__(self, host, user, password, port=25):\n self.host = host\n self.port = port\n self.user = user\n self.password = password\n\n self.smtp = smtplib.SMTP()",
"def __init__(self, user, password, _recipients, templatedir='templates'):\n\n self.user = user\n self.password = password\n self.recipient = _recipients if type (_recipients) is list else [_recipients]\n self.server = 'smtp.gmail.com'\n self.port = 587\n\n if os.path.isdir(templatedir):\n self.templatedir = templatedir\n else:\n self.templatedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), templatedir)\n\n self.env = Environment(loader=FileSystemLoader(self.templatedir))",
"def __init__(self, mailhost, username, password, fromaddr, toaddrs, subject):\r\n logging.handlers.SMTPHandler.__init__(self, mailhost, fromaddr, toaddrs, subject)\r\n self.username = username\r\n self.password = password",
"def __init__(self, config):\n # This generates the From address by stripping the part until the first\n # period from the mail server address and won't work always.\n self.fromaddr = config[\"mail\"][\"user\"] + \"@\" + \\\n config[\"mail\"][\"mailserver\"].partition(\".\")[2]\n\n # starts a client session with the SMTP server\n self.s = smtplib.SMTP(config[\"mail\"][\"mailserver\"])\n context = ssl.create_default_context()\n self.s.starttls(context=context)\n self.s.login(config[\"mail\"][\"user\"], config[\"mail\"][\"passphrase\"])",
"def __init__(self, auth=False):\n self.smtp = smtplib.SMTP(host=EMAIL_HOST, port=EMIAL_HOST_PORT)\n self.smtp.ehlo()\n if auth:\n self.smtp.login(EMAIL, EMAIL_PASSWORD)",
"def __init__(self, smtp_server='localhost', smtp_port=None,\n smtp_ssl=False, smtp_user=None, smtp_password=None):\n self._text_body = None\n self._html_body = None\n self._subject = \"\"\n self._reply_to = None\n\n self._smtp_server = smtp_server\n self._smtp_port = smtp_port\n self._smtp_ssl = smtp_ssl\n self._smtp_user = smtp_user\n self._smtp_password = smtp_password\n\n self._re_email = re.compile(\"^([\\\\w \\\\._]+\\\\<[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\\\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\\\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\\\>|[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\\\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\\\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?)$\")\n self.clear_recipients()\n self.clear_attachments()",
"def _readsmtpserver(self):\n # FIXME too much duplicated code in these _readXYZ() methods\n try: \n self.smtpserver = self.conf.get(\"report.email.smtp_server\")\n except:\n # we use print so this messages goes to the stdout\n msg = \"configuration variable 'smtp_server' is not defined. Plugin Email cannot be created\"\n self.log.error(msg)\n raise PluginConfigurationFailure(msg)",
"def __init__(self, to, subject, settings):\r\n self.to = to\r\n self.subject = subject\r\n self.settings = settings\r\n\r\n self.from_addr = settings.get('email.from', None)\r\n\r\n # need ot setup/override in the extending classes\r\n self.message_file = None",
"def get_base_params():\n return {'from': settings.MAILGUN_FROM_EMAIL}",
"def _login(self):\n self._smtp = smtplib.SMTP(host=self._config.host,\n port=self._config.port)\n # send 'hello' to SMTP server\n self._smtp.ehlo()\n # start TLS encryption\n self._smtp.starttls()\n self._smtp.login(self._config.sender_email, self._config.password)\n self._connected = True",
"def build_mail_settings():\n mail_settings = MailSettings()\n mail_settings.bcc_settings = BCCSettings(True, Email(\"[email protected]\"))\n mail_settings.bypass_list_management = BypassListManagement(True)\n mail_settings.footer_settings = FooterSettings(True, \"Footer Text\",\n (\"<html><body>Footer \"\n \"Text</body></html>\"))\n mail_settings.sandbox_mode = SandBoxMode(True)\n mail_settings.spam_check = SpamCheck(True, 1,\n \"https://spamcatcher.sendgrid.com\")\n return mail_settings",
"def _setup_smtp_server(self):\n\n # Init; Attempt to use external first\n target = 'external'\n\n # ============================================================\n # Attempt (1): External mail server\n # ============================================================\n\n if target == 'external':\n # Assume it's a machine external to company network.\n # We will use an external email account that requires a login.\n\n # msg = f'_setup_smtp_server(): Attempting to launch session as external machine...'\n # fancy_print(msg, fg=COMMUNICATOR_MSG_COLOR, bold=True)\n\n self.host = EXTERNAL_HOST\n self.port = EXTERNAL_PORT\n self.sender_address = EXTERNAL_USER_NAME\n self.sender_pwd = EXTERNAL_USER_PWD\n\n try:\n sess = smtplib.SMTP(host=self.host, port=self.port)\n sess.starttls()\n sess.login(self.sender_address, self.sender_pwd)\n return sess\n except:\n target = 'internal'\n\n # ============================================================\n # Attempt (2): Company internal mail server\n # ============================================================\n\n if target == 'internal':\n # Assume machine is internal to company network.\n # Current user should already be authenticated.\n\n # msg = f'_setup_smtp_server(): Attempting to launch session as internal Cooper machine...'\n # fancy_print(msg, fg=COMMUNICATOR_MSG_COLOR, bold=True)\n\n self.host = INTERNAL_HOST\n self.port = INTERNAL_PORT\n self.sender_address = INTERNAL_USER_NAME\n self.sender_pwd = INTERNAL_USER_PWD\n\n try:\n sess = smtplib.SMTP(self.host)\n return sess\n except:\n msg = f'COMMUNICATOR WARNING: Could not establish SMTP connection. Check configuration.'\n fancy_print(msg, fg=COMMUNICATOR_WARN_COLOR)\n\n msg = f'Could not establish SMTP connection'\n raise ConnectionError(msg)",
"def initEmailCreds(sender):\n print (\"::initializing email creds...\")\n pw = getpass.getpass()\n entry = {'tardigrade': { 'username':sender, 'password':pw } }\n filepath = confighome+\"config\"\n appendJson(filepath,entry)",
"def test_smtp(self):\n self._endpointServerTest(\"smtp\", protocols.SMTPFactory)",
"def __init__(self):\r\n self.window = 'dag_emailWindow'\r\n self.title = 'dagRenderMail'\r\n self.size= (195, 290);\r\n \r\n #Sets some defaults\r\n self.subject='Render Complete on '+str(dag_compName());\r\n self.login= '[email protected]'\r\n self.password='Password'\r\n self.to='[email protected]'\r\n self.time='10'\r\n self.smtp='smtp.gmail.com:587'\r\n self.render = ''\r\n \r\n #Default message body\r\n self.body='Your render on '+str(dag_compName())+' is now complete.' + \"this message is automatically generated by dagMail. \\n dagmail script by Dhruv Govil www.dgovil.com \\n\\n\\n\"\r\n \r\n \r\n #default name for settings file. Can be anything. \r\n self.config='dagmail.settings'\r\n \r\n #Default MEL scripts. Don't change.\r\n self.preScr = 'python \"import dagMail\";python \"dagMail.dagMail.preScript()\"'\r\n self.postScr = 'python \"import dagMail\";python \"dagMail.dagMail.postScript()\"'",
"def __init__(self):\n self.__ALLOWED_EXTENSIONS__ = {\"txt\", \"doc\", \"docx\", \"xls\", \"xlsx\", \"pdf\", \"png\", \"jpg\", \"jpeg\", \"gif\", \"zip\"}\n self.__APP_PATH__ = path.dirname(path.realpath(__file__))\n self.__APP_DIR__ = self.__APP_PATH__.split(\"/\")[-1]\n self.__SPECIAL_FILES__ = [\"request.saved\", \"request.submitted\", \"request.processed\", \"request.returned\", \"request.voided\", \"submission.json\"]\n self.__TEST_EMAILS__ = [[\"Damian Jimenez\", \"[email protected]\"]]\n self.__PROD_EMAILS__ = [[\"CSE Webmaster\", \"[email protected]\"], [\"Chengkai Li\", \"[email protected]\"]]\n self.mailer = Mailer()",
"def setup(self):\n self.builder = HTMLEmailBuilder(self.file_path)\n\n header_content = input('Please type header content:\\n')\n content = input('Please type the content what you want to show in this page.\\n')\n foot = input('Please type the content of foot.\\n')\n\n self.builder.config_header(header_content)\n self.builder.config_body(content)\n self.builder.config_foot(foot)\n self.builder.config_html()\n print(self.builder.htmlemail.__str__())",
"def test_email():\n recipients = configs[\"email_to\"].split(\", \")\n email_body = test_email_content()\n if configs[\"smtp_ssl\"] == 1:\n server = smtplib.SMTP_SSL(configs[\"smtp_server\"])\n elif configs[\"smtp_tls\"] == 1:\n server = smtplib.SMTP(configs[\"smtp_server\"])\n server.starttls()\n else:\n server = smtplib.SMTP(configs[\"smtp_server\"])\n\n if configs[\"smtp_authentication\"] == 1:\n server.login(configs[\"username\"], configs[\"password\"])\n\n server.sendmail(configs[\"email_from\"], recipients, email_body)\n server.quit()",
"def test_defaults(self):\n msg_helper = MessageHelper()\n self.assertEqual(msg_helper.transport_name, 'sphex')\n self.assertEqual(msg_helper.transport_type, 'sms')\n self.assertEqual(msg_helper.mobile_addr, '+41791234567')\n self.assertEqual(msg_helper.transport_addr, '9292')",
"def test_sendmail(self):\n assert self.rc_conf.has_key('sendmail_enable')\n assert self.rc_conf['sendmail_enable'] == '\"NONE\"'",
"def initialize_server() -> None:\r\n\r\n global webserver\r\n webserver = smtplib.SMTP(SMTP_NAME, SMTP_PORT)\r\n webserver.ehlo()\r\n webserver.starttls()",
"def config_email_host(email_config: dict) -> dict:\n print(\"Email host not configured.\\nPlease enter host: \")\n email_config[\"host\"] = sys.stdin.readline().strip()\n print(\"Enter Port: \")\n email_config[\"port\"] = int(sys.stdin.readline().strip())\n\n if email_config[\"host\"] != \"localhost\":\n print(\"Enter User Name: \")\n email_config[\"username\"] = sys.stdin.readline().strip()\n\n print(\"Enter Password: \")\n crypt = Crypt()\n email_config[\"password\"] = crypt.encrypt(getpass.getpass())\n\n print(\"Does the Email service use SSL? (y/n): \")\n email_config[\"ssl\"] = sys.stdin.readline().strip().lower() in [\n \"true\",\n \"y\",\n \"yes\",\n ]\n\n return email_config",
"def setup(self):\n global log_th, conf_th, header_th, command_w_th\n self.conf_th_ic = conf_th\n self.header_th_ic = header_th\n self.command_w_th_inc = command_w_th\n self.hostname = conf_th.get_item(q_key='general').get('hostname')\n self.std_recv_size = int(conf_th.get_item(q_key='general').get('std_recv_size'))\n self.data_recv_size = int(conf_th.get_item(q_key='general').get('data_recv_size'))\n self.mail_save_enable = int(conf_th.get_item(q_key='general').get('mail_save_enable'))\n self.mail_save_path = conf_th.get_item(q_key='general').get('mail_save_path')\n self.no_answer = int(conf_th.get_item(q_key='general').get('no_answer'))\n self.sleep_between = int(conf_th.get_item(q_key='general').get('sleep_between'))\n self.message_id = library.q_id_generate(size=16)\n self.client_ip = tuple(self.client_address).__getitem__(0)\n self.client_port = int(tuple(self.client_address).__getitem__(1))\n # Running\n self.header_th_ic.write_header(ip=self.client_ip, qid=self.message_id)\n message = '220 ' + self.hostname\n self.func_sender(message)\n log_th.log_info('{} connected to {} thread'.format(self.client_ip, threading.current_thread().name))",
"def __init__(self, sendgrid_email_env_name: str, sendgrid_api_key_env_name: str):\n try:\n self.sendgrid_email = os.environ[sendgrid_email_env_name]\n self.sendgrid_api_key = os.environ[sendgrid_api_key_env_name]\n except KeyError:\n self.sendgrid_email = None\n self.sendgrid_api_key = None\n self.logger.error(\"Failed to initialize email service\")\n return\n self.logger.info(\"Email service initialized\")",
"def setUp(self):\n self.valid_data={'subject': 'Test subject',\n 'sender_name': 'Steve Tester',\n 'sender_email': '[email protected]',\n 'message': 'This is my test message',\n 'cc_myself': 'True'}",
"def __init__(self, mailbox, api_key, base_url='https://mailosaur.com/api', smtp_host='mailosaur.io'):\n self.mailbox = mailbox\n self.api_key = api_key\n self.base_url = base_url\n self.smtp_host = smtp_host",
"def __init__(self, fromaddr, toaddrs, subject='', body='',\n smtphost='localhost'):\n\n if isinstance(toaddrs, StringType):\n self.toaddrs = [x.strip() for x in toaddrs.split(',') if x]\n else:\n self.toaddrs = toaddrs\n \n self.fromaddr = fromaddr\n self.subject = subject\n self.body = body\n self.smtphost = smtphost"
] | [
"0.65546256",
"0.6469776",
"0.6447924",
"0.64182866",
"0.63860655",
"0.6361841",
"0.62926346",
"0.6200357",
"0.6009188",
"0.5971613",
"0.59700364",
"0.5938338",
"0.58888084",
"0.5881734",
"0.5868435",
"0.5861525",
"0.5846991",
"0.58270794",
"0.57619554",
"0.57489693",
"0.57213825",
"0.56952894",
"0.5693097",
"0.56923246",
"0.5688955",
"0.56801254",
"0.5639705",
"0.56385493",
"0.5587787",
"0.55544376"
] | 0.7708062 | 0 |
Replace server list in file with recent one | def replace_file(servers):
print("INFO: Replacing server list in file with recent one...")
if os.path.exists(os.path.dirname(__file__) + '/communication_failure_list.txt'):
try:
os.remove(os.path.dirname(__file__) +
'/communication_failure_list.txt')
except OSError as error:
print("Error: %s - %s." % (error.filename, error.strerror))
find_communication_failure_servers(servers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_server_list(filename):\n if not os.path.isfile(filename):\n return #ignore this error for now\n fo=open(filename,\"r\")\n rd=fo.read()\n fo.close()\n __load_server_list(rd)",
"def update_servers(self, output_file, current_time):\n\n # Check the servers length list.\n if len(self.server_statistics) >= 1000 or \\\n _get_ttime(current_time, spd_factor) == \"24:00:00\":\n\n # Open a context manager for the file.\n with open(output_file, 'a') as the_file:\n\n # Initialize a Writer object.\n writer = csv.writer(the_file, delimiter=\",\")\n\n # Iterate through the deque and write out.\n for server in self.server_statistics:\n writer.writerow(server)\n\n # Clear the queue of Server objects.\n self.server_statistics.clear()",
"def find_recovered_servers(client):\n print(\"INFO: Determining recovered servers by comparing current servers with list in file...\")\n current_servers_list = []\n for server in client.servers():\n if server.status == \"server_update_comm_fail\":\n current_servers_list.append({\"id\": server.id})\n\n with open(os.path.dirname(__file__) + '/communication_failure_list.txt') as file:\n server_list = [json.loads(line) for line in file]\n\n diff = [i for i in current_servers_list +\n server_list if i not in current_servers_list or i not in server_list]\n\n return diff",
"def update_tlds(filename, url):\n try:\n old_tlds = []\n new_tlds = [tld.strip('\\n') for tld in urllib2.urlopen(url)\n if '#' not in tld]\n if os.path.isfile(filename):\n old_tlds = [line.strip('\\n') for line in open(filename)\n if '#' not in line]\n\n if old_tlds == new_tlds:\n return []\n else:\n with open(filename, 'w') as f:\n for tld in new_tlds:\n f.write(tld + '\\n')\n return sorted(set(new_tlds) - set(old_tlds))\n except Exception as e:\n return e",
"def update_scores_list(list_file, score_file):\n fnames = []\n head, tail = os.path.split(score_file)\n if os.path.exists(list_file):\n with open(list_file, \"r\") as f:\n fnames = json.loads(f.read())\n if tail not in fnames:\n fnames.append(tail)\n fnames.sort()\n fnames.reverse()\n else:\n fnames.append(tail)\n\n with open(list_file, \"w\") as f:\n print \"writing %s...\" % list_file\n f.write(json.dumps(fnames))",
"def modifyIserver(mswName):\n\n serverpath = \"/usr/local/nextone/bin/server.cfg\"\n try:\n bkupFile = '/tmp/server.cfg.%s.bkup' %mswName\n \n # Copy the server.cfg file from MSW to the local host\n if (os.path.isfile(bkupFile) == False):\n os.system(\"scp -q root@\" + mswName + \":\" + serverpath + \" \" + bkupFile)\n \n os.system('scp -q root@%s:%s /tmp/server.cfg' %(mswName,serverpath))\n\n fin=file('/tmp/server.cfg','r')\n inpList = fin.readlines()\n fin.close()\n\n position = -1\n pstr = '\\tpolicy enumdomain \"e164.com\"\\n '\n\n # Insert the enum domain configuration \n if (inpList.__contains__(pstr) == False):\n # Find the index of maxhunt\n for i in inpList:\n if i.__contains__('maxhunts'):\n position = inpList.index(i)\n break\n\n if position != -1:\n inpList.insert(position,pstr)\n fout=file('/tmp/server.cfg','w')\n fout.writelines(inpList)\n fout.close()\n else:\n log.info('maxhunts entry not present in server.cfg file')\n else:\n log.info('File server.cfg already contains enum '+ \\\n 'policy information')\n\n # Copying the server.cfg file to MSW\n os.system(\"scp -q /tmp/server.cfg root@\" + mswName + \":\" + serverpath )\n\n except Exception, e:\n msg = \"file error: %s\" % str(e)\n #32363 Modified to resolve string formatting error\n log.error('File server.cfg does not exist %s' %str(msg))",
"def reset_servers(self):\n\n servers = []\n for _, g in self.groups.items():\n g.get_servers(servers)\n\n for s in servers:\n self.servers[s.vid] = s",
"def _update_domains_on_server_update(self, server):\n ns_rec_content = self._sanitize_content(\"NS\", server['name'])\n\n LOG.debug(\"Content field of existing NS records will be updated\"\n \" to the following upon server update: %s\" % ns_rec_content)\n try:\n\n # Execute the manually prepared query\n # A TX is required for, at the least, SQLite.\n #\n self.session.begin()\n\n # first determine the old name of the server\n # before making the updates. Since the value\n # is coming from an NS record, the server name\n # will not have a trailing period (.)\n old_ns_rec = self.session.query(models.Record)\\\n .filter_by(type='NS', designate_id=server['id'])\\\n .first()\n if old_ns_rec is not None:\n old_server_name = old_ns_rec.content\n\n LOG.debug(\"old server name read from a backend NS record:\"\n \" %s\" % old_server_name)\n LOG.debug(\"new server name: %s\" % server['name'])\n\n # Then update all NS records that need updating\n # Only the name of a server has changed when we are here\n self.session.query(models.Record)\\\n .filter_by(type='NS', designate_id=server['id'])\\\n .update({\"content\": ns_rec_content})\n\n # Then update all SOA records as necessary\n # Do the SOA last, ensuring we don't trigger a NOTIFY\n # before the NS records are in place.\n #\n # Update the content field of every SOA record that has the\n # old server name as part of its 'content' field to reflect\n # the new server name.\n # Need to strip the trailing period from the server['name']\n # before using it to replace the old_server_name in the SOA\n # record since the SOA record already has a trailing period\n # and we want to keep it\n self.session.execute(models.Record.__table__\n .update()\n .where(and_(models.Record.__table__.c.type == \"SOA\",\n models.Record.__table__.c.content.like\n (\"%s%%\" % old_server_name)))\n .values(content=func.replace(\n models.Record.__table__.c.content,\n old_server_name,\n server['name'].rstrip('.'))\n )\n )\n\n except Exception:\n with excutils.save_and_reraise_exception():\n self.session.rollback()\n # now commit\n else:\n self.session.commit()",
"def parse_update(self, file):\n\n self.new_hashes = []\n self.old_hashes = []\n parsed = self.parse_header(file.readline())\n if parsed:\n (type, version) = parsed\n self.log.debug(\"Received list type: %s, version: %s\" % (type, version))\n pattern = re.compile(HASH_REGEX)\n for line in file:\n m = pattern.search(line)\n if m:\n if m.group(1) == \"+\":\n self.new_hashes.append(m.group(2))\n elif m.group(1) == \"-\":\n self.old_hashes.append(m.group(2))\n\n self._version = int(version)\n else:\n raise SafeBrowsingUpdateError(\"Received bad/empty list, no changes made\")",
"def save_server_list_json(server_list):\n\n with open(output_file,\"w+\") as f:\n json.dump(server_list, f)\n\n return server_list",
"def half_sync(self,delay):\n self.count = 1\n while not self.shutdown and self.loggedin.autosync:\n time.sleep(delay)\n self.count += 1\n self.filelist = self.loggedin.list()\n print \"Pinged server for changes\"\n self.synced = []\n if self.filelist:\n for f in self.filelist:\n path = self.loggedin.sanitize_path(f['path'])\n path = os.path.join(self.onedirrectory, path)\n if not os.path.exists(path):\n os.makedirs(path)\n if f['name'] and not self.loggedin.exists(f):\n exists, data = self.loggedin.getfile(f)\n if exists:\n with open(self.loggedin.make_path(f), 'a') as new_file:\n new_file.write(data)\n new_file.close()\n elif f['name'] and str(self.loggedin.hash_file(f)) != str(f['hash']):\n self.loggedin.sendfile(f['name'], f['path'])\n if self.loggedin.make_path(f) not in self.synced:\n self.synced.append(self.loggedin.make_path(f))\n os_walk = os.walk(self.loggedin.onedirrectory)\n for directory in os_walk:\n for f in directory[2]:\n if f.startswith('.'):\n continue\n path = os.path.join(directory[0], f)\n if path not in self.synced:\n try:\n os.remove(path)\n except OSError, e:\n print (\"Error: %s - %s.\" % (e.filename,e.strerror))",
"def getServersAddrs(i_ServerList):\n serverAddrList =[]\n\n with open(PLATFORMS_TO_SERVER_FILE, \"r\") as txtFile:\n data = txtFile.readlines()\n table = []\n filteredTable = []\n for line in data:\n if line.startswith(\"#\"):\n continue\n eachLine = line.split(\";\")\n table.append(eachLine)\n filteredTable.append([])\n for element in range(0, len(table)):\n filteredTable.append(table[element][0])\n\n with open(SERVERS_IP_PATH) as serversFile:\n serversFileLines = serversFile.readlines()\n for line in serversFileLines:\n if line[-1:] == '\\n':\n line = line[:-1]\n serverDetails = line.split(\",\")\n if (i_ServerList != True):\n if(serverDetails[0] in i_ServerList and serverDetails[0] in filteredTable):\n serverAddrList.append(serverDetails[1])\n else:\n if(serverDetails[0] in filteredTable):\n serverAddrList.append(serverDetails[1])\n \n return serverAddrList",
"def process_serverlist(self, serverlist):\n\t\t# Note that events may be late.\n\t\t# However, mustn't work on widgets that are being\n\t\t# garbage collected.\n\t\tif not self.lobby_visible:\n\t\t\treturn\n\n\t\tnum_servers = 0\n\t\tfor key, val in serverlist.iteritems():\n\t\t\t# Either update an existing list item.\n\t\t\tif len(self.li_servers.items) > 0 and num_servers < len(self.li_servers.items):\n\t\t\t\tself.li_servers.items[num_servers].set_server(val)\n\t\t\t# Or create a new one.\n\t\t\telse:\n\t\t\t\tself.li_servers.items.append(LobbyListItem(val))\n\t\t\tnum_servers += 1",
"def update_server(finished, server):\n name = server['name']\n while not finished.wait(2):\n new_s = fetch_server(VAULTZ[name], server)\n if 'cluster_id' in new_s:\n my_cluster = [x['name']\n for _name, x\n in iteritems(SERVERZ)\n if x.get('cluster_id', None) == new_s['cluster_id']]\n new_s['cluster_members'] = my_cluster\n\n SERVERZ[name] = new_s",
"def populate_server(self, target_server):\n for uid in self.servers_online:\n server = self.all_servers[uid]\n if server == target_server:\n pass\n else:\n if len(server.jobs)>1:\n shifting_task = server.jobs.pop(-1)\n self.servers_jobs_list[server.server_id].remove(shifting_task)\n server.waiting_time-=shifting_task.task_time\n self.schedule_task(shifting_task)",
"def readInServers(self):\n # we'll be using the global server tracker file\n global server_tracker_file\n # first, grab a list of all files in the current working directory\n current_dir = os.listdir('.')\n # verify that our server tracker file exists here\n if server_tracker_file not in current_dir:\n # if there's nothing to read in, simply return\n return\n \n # read in the csv\n with open(server_tracker_file, 'rb') as infile:\n # initialize the reader\n reader = csv.reader(infile)\n # verify that the header looks exactly as we expect\n header = reader.next()\n if header != ['Server','Ping Interval','Status']:\n # if this isn't the case, we won't try to read the file\n return\n else:\n # update our servers with the records we know about\n # while we update, we'll keep a count of how many\n # we can successfully read in\n server_count = 0\n for record in reader:\n # pull out the server name and ping interval\n server = record[0]\n try:\n interval = int(record[1])\n except ValueError:\n continue\n # ping the server to determine whether it is online\n # or offline\n status = sendPing(server)\n if status == 'Online':\n # allocate to online\n self.online_servers[server] = [0, interval]\n else:\n # allocate to offline\n self.offline_servers[server] = [0, interval]\n # udpate our count\n server_count += 1\n # repeat for every record from our pseudo memory dump file\n # report and return\n print 'Read in {0} known servers'.format(server_count)\n \n # file read complete\n return",
"def replace2(oldlst,newlst):\n with open(oldlst, 'r') as f4:\n with open(newlst,'w') as f5:\n for line in f4:\n f5.write(line)",
"def export_server_list_json(server_list, filename):\n\n with open(filename,\"w+\") as f:\n json.dump(server_list, f)\n\n return server_list",
"def loadServerList(inputFile='servers.csv', httpsonly=True):\n servers = []\n with open('servers.csv', 'r') as csvfile:\n serverreader = csv.reader(csvfile, delimiter=',', quotechar='|')\n for row in serverreader:\n name, url = row\n if url:\n host = urlparse(url)\n if not httpsonly or host.scheme == 'https':\n servers += [host.netloc]\n return servers",
"def modifyResolve(msw):\n hostip = socket.gethostbyname('mygen')\n name = 'nameserver ' + hostip+'\\n'\n name1 = 'search e164.com'+'\\n'\n newFileContents = [name,name1]\n\n try:\n # Back up the original file on local host\n if (os.path.isfile('/etc/resolv.conf.bkup') == False):\n os.system('sudo cp /etc/resolv.conf /etc/resolv.conf.bkup')\n rconfile = open('/etc/resolv.conf',\"w\")\n rconfile.writelines(newFileContents)\n rconfile.close()\n\n\n ##31291 Taking the bkup of resolv.conf file from MSW to the \n ##/tmp directory of msw \n resultString = msw.filter('ls -lrt /tmp/resolv.conf.enum.bkup') \n if (resultString.find('No such file or directory') !=-1):\n msw.assertCommand('cp /etc/resolv.conf /tmp/resolv.conf.enum.bkup')\n\n # Copy the new File to the MSW\n os.system(\"scp -q /etc/resolv.conf root@mymsw:/etc/resolv.conf\")\n\n except Exception, e:\n msg = \"file error: %s\" % str(e)\n #32363 Modified to resolve string formatting error\n log.error('File resolv.conf does not exist %s' %str(msg))",
"def updateIndex(self):\n for root, dirs, files in os.walk(self.serverdir):\n for d in dirs:\n if not d.startswith('.'):\n relpath = os.path.relpath(os.path.join(root, d), self.serverdir)\n self.serverindex[relpath] = (self.getNametype(os.path.join(root,d)), os.path.getmtime(os.path.join(root, d)))\n for f in files:\n if not f.startswith('.'):\n relpath = os.path.relpath(os.path.join(root, f), self.serverdir)\n self.serverindex[relpath] = (self.getNametype(os.path.join(root,f)), os.path.getmtime(os.path.join(root, f)))",
"def loadServerList(inputFile='servers.csv', httpsonly=True):\n servers = []\n with open('servers.csv', 'rb') as csvfile:\n serverreader = csv.reader(csvfile, delimiter=',', quotechar='|')\n for row in serverreader:\n name, url = row\n if url:\n host = urlparse(url)\n if not httpsonly or host.scheme == 'https':\n servers += [host.netloc]\n return servers",
"def load_from_file(self, file):\n\n if (args.replacetopip): #create list of IP addresses and the number of times they occur\n with open(args.dirty) as dirty_file:\n for line in dirty_file:\n ip = self._extract_by_key(line, self._attr_key)\n if (self.ip_dict.has_key(ip)):\n self.ip_dict[ip] += 1\n else:\n self.ip_dict[ip] = 1\n #sort list\n self.top_ip = sorted(self.ip_dict.items(), key=operator.itemgetter(1), reverse=True)\n count = 0\n with open(file) as ip_file:\n for line in ip_file:\n if (args.replacetopip): #replace top IP addresses from the sorted list with new ones from the file\n ip_old = self.top_ip[count][0]\n ip_new = line.strip()\n count += 1\n else:\n ip_old,ip_new = line.split(\",\")\n self._insts[ip_old] = ip_new.strip()",
"def _update_domains_on_server_delete(self, server):\n\n # find a replacement server\n replacement_server_name = None\n servers = self.central_service.find_servers(self.admin_context)\n\n for replacement in servers:\n if replacement['id'] != server['id']:\n replacement_server_name = replacement['name']\n break\n\n LOG.debug(\"This existing server name will be used to update existing\"\n \" SOA records upon server delete: %s \"\n % replacement_server_name)\n\n # NOTE: because replacement_server_name came from central storage\n # it has the trailing period\n\n # Execute the manually prepared query\n # A TX is required for, at the least, SQLite.\n try:\n self.session.begin()\n # first delete affected NS records\n self.session.query(models.Record)\\\n .filter_by(type='NS', designate_id=server['id'])\\\n .delete()\n\n # then update all SOA records as necessary\n # Do the SOA last, ensuring we don't trigger a\n # NOTIFY before the NS records are in place.\n #\n # Update the content field of every SOA record that\n # has the deleted server name as part of its\n # 'content' field to reflect the name of another\n # server that exists\n # both server['name'] and replacement_server_name\n # have trailing period so we are fine just doing the\n # substitution without striping trailing period\n self.session.execute(models.Record.__table__\n .update()\n .where(and_(models.Record.__table__.c.type == \"SOA\",\n models.Record.__table__.c.content.like\n (\"%s%%\" % server['name'])))\n .values(content=func.replace(\n models.Record.__table__.c.content,\n server['name'],\n replacement_server_name)))\n\n except Exception:\n with excutils.save_and_reraise_exception():\n self.session.rollback()\n else:\n self.session.commit()",
"def overwrite(fstack: List[Tuple[str,int]]) -> ():\n filename, line_num = fstack.pop()\n tmp = str() # store our new file in memory\n with open(filename, 'r') as input:\n for i,line in enumerate(input):\n if i + 1 == line_num:\n line = line.replace(\"pub \",\"\",1)\n _, line_num = fstack.pop() if fstack else ('',0)\n tmp += line\n with open(filename, 'w') as newfile:\n newfile.write(tmp)",
"def _update_hosts_file(self, resolution):\n self._execute_command('echo {0} >> /etc/hosts'.format(resolution),\n sudo=True)",
"def clear_recent_urls():\n with open('HousingPriceScraper/HousingPriceScraper/configs/input_urls/recent_urls.json') as recent_urls_json:\n recent_dict = json.load(recent_urls_json)\n for key in recent_dict.keys():\n recent_dict[key] = []\n with open('HousingPriceScraper/HousingPriceScraper/configs/input_urls/recent_urls.json', 'w') as fp:\n json.dump(recent_dict, fp, sort_keys=True, indent=4)",
"def update_dev_list(self, file_object, timestamp):\n dev_list = self.dev_list\n my_date = datetime.utcfromtimestamp(timestamp)\n\n for line in file_object:\n dev_record = line.split()\n dev_name = dev_record[3]\n dev_ip = dev_record[2]\n dev_rs = dev_list.find({'hid': self.home_id, 'dev_name': dev_name})\n\n if dev_rs.count() == 0 or dev_ip != dev_rs.sort('last_updated', DESCENDING)[0]['ip']:\n # If it's a new device or its ip has changed, then add a record.\n new_dev = {'hid': self.home_id, 'dev_name': dev_name,\n 'ip': dev_ip, 'last_updated': my_date}\n dev_list.insert(new_dev)\n # else do nothing.\n\n # Update the timestamp of the most recently processed device list file\n self.update_dev_list_latest_updated(my_date)\n return True",
"def remove_old_hosts_file(old_file_path, backup):\n\n # Create if already removed, so remove won't raise an error.\n open(old_file_path, \"a\").close()\n\n if backup:\n backup_file_path = old_file_path + \"-{}\".format(\n time.strftime(\"%Y-%m-%d-%H-%M-%S\")\n )\n\n # Make a backup copy, marking the date in which the list was updated\n shutil.copy(old_file_path, backup_file_path)\n\n os.remove(old_file_path)\n\n # Create new empty hosts file\n open(old_file_path, \"a\").close()",
"def load_ports_from_file(self, filename):\n lines = open(filename, \"r\").readlines()\n for line in lines:\n name, port = line.split(\":\")\n self.servers[name] = port.rstrip()"
] | [
"0.6282345",
"0.6164431",
"0.5646351",
"0.5629765",
"0.55825335",
"0.55205137",
"0.54738104",
"0.54054666",
"0.53958476",
"0.53949213",
"0.53838426",
"0.5362909",
"0.53529775",
"0.5349839",
"0.53414625",
"0.53314036",
"0.5330049",
"0.53072166",
"0.5290604",
"0.52862704",
"0.5271379",
"0.5267621",
"0.52669334",
"0.52615064",
"0.52251947",
"0.5178649",
"0.5165171",
"0.5139934",
"0.5136492",
"0.51346946"
] | 0.7886564 | 0 |
Find servers with status "Communication failure" and save them to a file | def find_communication_failure_servers(servers):
print('INFO: Finding servers with "Communication failure" status and saving result in file')
with open(os.path.dirname(__file__) + '/communication_failure_list.txt', 'w+') as file:
for server in servers:
if server.status == "server_update_comm_fail":
json.dump({"id": server.id}, file)
file.write(os.linesep) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_recovered_servers(client):\n print(\"INFO: Determining recovered servers by comparing current servers with list in file...\")\n current_servers_list = []\n for server in client.servers():\n if server.status == \"server_update_comm_fail\":\n current_servers_list.append({\"id\": server.id})\n\n with open(os.path.dirname(__file__) + '/communication_failure_list.txt') as file:\n server_list = [json.loads(line) for line in file]\n\n diff = [i for i in current_servers_list +\n server_list if i not in current_servers_list or i not in server_list]\n\n return diff",
"def replace_file(servers):\n print(\"INFO: Replacing server list in file with recent one...\")\n if os.path.exists(os.path.dirname(__file__) + '/communication_failure_list.txt'):\n try:\n os.remove(os.path.dirname(__file__) +\n '/communication_failure_list.txt')\n except OSError as error:\n print(\"Error: %s - %s.\" % (error.filename, error.strerror))\n find_communication_failure_servers(servers)",
"def writeOutServers(self):\n # we'll be writing out to the server tracker file, overwriting\n # anything that may exist in it\n global server_tracker_file\n \n with open(server_tracker_file, 'wb') as outfile:\n # let's leverage the printStatus method we have\n outfile.write(self.printStatus())\n \n return",
"def test(self):\n\n resultdict = {}\n for server in self.server_list:\n resultdict.update(**{server: self.fetch(server)})\n\n ips = sorted(resultdict.values())\n ips_set = set(ips)\n print(\"\\nNumber of servers: {}\".format(len(self.server_list)))\n\n for ip, occurence in zip(ips_set, map(lambda x: ips.count(x), ips_set)):\n print(\n \"{0} = {1} ocurrenc{2}\".format(\n ip if len(ip) > 0 else \"broken server\",\n occurence,\n \"e\" if occurence == 1 else \"ies\",\n )\n )\n print(\"\\n\")\n if any([i == \"\" for i in resultdict.values()]):\n print(\"\\n________list of failed servers_______\")\n for _url, _ip in resultdict.items():\n if _ip == \"\":\n print(_url)\n print(\"\\n\")\n return resultdict",
"def _get_error_info(self, result, log):\n _ = '/opt/l2deploy/logs/OverallStatusReport'\n f = self._remote_cmd(\"grep '{}' {}\".format(_, log))\n f = f.get('output').split('[')[-1][:-1]\n\n for n in [result] if self.nodes == 1 else result['nodes']:\n if 'failed' == n.get('status').lower():\n # 10th line in the detail report contains the required info\n c = \"grep -A 10 {} {}\".format(n.get('server'), f)\n c += \" | grep OS_Install_Status_Detail\"\n e = self._remote_cmd(c).get('output').split(':', 1)[1]\n LOG.info(\"{} failed due to {}\".format(n['server'], e))",
"def get_status_servers(p_id_guilda):\r\n json_file = select_data.get_guild_servers(p_id_guilda)\r\n css_mensagem = '```css\\n'\r\n mensagem = ''\r\n\r\n for x in json_file:\r\n\r\n rcon_host = x['ip_server']\r\n rcon_port = int(x['rcon_port'])\r\n rcon_pwd = x['rcon_password']\r\n\r\n mensagem = mensagem + '\\n### Id Server: ' + str(x['id_server_sk']) + ' - ' + x['name_guild'] + '\\n### Mapa: ' + x['map_name'] + '\\n### Modo: ' + x['mode_server'] + '\\n### Patreon: ' + x['map_patreon'] \r\n\r\n try:\r\n rcon.RCONClient(rcon_host, rcon_port, rcon_pwd)\r\n mensagem = mensagem + '\\n### Status: Online' + '\\n### IP Server: ' + x['ip_server'] + '\\n -----------------------------------------------------------------------------------' + '\\n'\r\n \r\n except:\r\n mensagem = mensagem + '\\n### Status: ::Offline' + '\\n### IP Server: ' + x['ip_server'] + '\\n -----------------------------------------------------------------------------------' + '\\n' \r\n pass\r\n \r\n \r\n css_mensagem = css_mensagem + mensagem + '\\n ```'\r\n return css_mensagem",
"def build_server_list(client, diff):\n print(\"INFO: Fetching each server not in 'Communication failure' anymore...\")\n servers = []\n for server in diff:\n servers.append(client.server(str(server.id)))\n return servers",
"def checkServerResponse(self, ret):\n\n logMsg = ''\n if ret == 10:\n # overlaod\n logMsg = 'Error The server %s refused to accept the task %s because it is overloaded\\n'%(self.serverName, self.crab_task_name)\n logMsg += '\\t For Further infos please contact the server Admin: %s'%self.server_admin\n elif ret == 14:\n # Draining\n logMsg = 'Error The server %s refused to accept the task %s because it is Draining out\\n'%(self.serverName, self.crab_task_name)\n logMsg += '\\t remaining jobs due to scheduled maintainence\\n'\n logMsg += '\\t For Further infos please contact the server Admin: %s'%self.server_admin\n elif ret == 101:\n # overlaod\n logMsg = 'Error The server %s refused the submission %s because you asked a too large task. Please submit by range'%(self.serverName, self.crab_task_name)\n elif ret == 11:\n # failed to push message in DB\n logMsg = 'Server unable to release messages into DB. Task %s won\\'t be submitted.'%self.crab_task_name\n elif ret == 12:\n # failed SOAP communication\n logMsg = 'Error The server %s refused to accept the task %s. It could be under maintainance. \\n'%(self.serverName, self.crab_task_name)\n logMsg += '\\t For Further infos please contact the server Admin: %s'%self.server_admin\n elif ret == 20:\n # failed to push message in PA\n logMsg = 'Server unable to release messages to other components. Task %s won\\'t be submitted.'%self.crab_task_name\n elif ret == 22:\n # failed SOAP communication\n logMsg = 'Error during SOAP communication with server %s'%self.serverName\n elif ret == 33:\n # uncompatible client version\n logMsg = 'Error You are using a wrong client version for server: %s\\n'%self.serverName\n logMsg += '\\t For further informations about \"Servers available for users\" please check here:\\n \\t%s '%self.ServerTwiki\n else:\n logMsg = 'Unexpected return code from server %s: %d'%(self.serverName, ret)\n\n # print loggings\n if logMsg != '':\n common.logger.info(logMsg)\n return ret",
"def check(self):\n curtime = time.time()\n failed_watchdogs = []\n for watchdog, filename, st_info in self._list_gen(self.watchdog_path):\n if curtime < st_info.st_mtime:\n # If the watchdog is set in the future, then service is still\n # alive\n pass\n\n else:\n # Otherwise, this is a watchdog failure\n _LOGGER.warning('Watchdog failed: %r.', watchdog)\n failed_watchdogs.append((filename, watchdog, st_info.st_mtime))\n\n # Retreive the payload of failed watchdogs\n if failed_watchdogs:\n failures = []\n for filename, name, failed_at in failed_watchdogs:\n try:\n with open(filename, 'r') as f:\n data = f.read()\n except OSError:\n _LOGGER.exception('Reading watchdog data')\n data = ''\n failures.append((name, failed_at, data))\n\n return failures\n\n else:\n return []",
"def remote_status():",
"def server_failure(self, resp):\n return resp[0] in FAILURE_CODES",
"def catch_log(driver, nombre):\n msn = \"\"\n msn1 = \"\"\n msn2 = \"\"\n saveFile = \"../Data/logs/\" + nombre + '.txt'\n if not os.path.exists('../Data/logs/'):\n os.makedirs('../Data/logs/')\n for entry in driver.get_log('browser'):\n if \"error\" in entry['message']:\n msn1 += entry['message'] + '\\n'\n elif entry['level'] == 'SEVERE':\n msn2 += entry['message'] + '\\n'\n else:\n pass\n if (msn == \"\") and (msn1 == \"\") and (msn2 == \"\"):\n return ''\n else:\n arch = open(saveFile,'w')\n arch.write(msn)\n arch.write(msn1)\n arch.write(msn2)\n arch.close()\n return saveFile",
"def check_app(servers):\n tmp_str = ''\n\n for host, services in servers.items():\n counter = 0\n for service, port in services.items():\n result = tcp_test.check_port(host, port)\n\n if result:\n status = online_str\n else:\n status = offline_str\n\n if counter == 0:\n tmp_str += '<tr><td>{}</td><td>{}</td><td>{}</td><td>{}</td>'.format(host, service, port, status)\n else:\n tmp_str += '<tr><td></td><td>{}</td><td>{}</td><td>{}</td>'.format(service, port, status)\n counter += 1\n\n return tmp_str",
"def check_servers (self):\n results = []\n\n for server_name in self.servers_sumtimes.keys():\n server = self._get_server(server_name)\n\n time = float(self.servers_sumtimes[server_name]) / self.servers_counts[server_name]\n server_time, created = models.HBase_ServerTime.objects.get_or_create(server=server)\n server_avg_state = server_time.averager_state\n avg_time = server_avg_state.value()\n\n if avg_time is not None and self.is_anomaly(avg_time, time):\n msg=\"\"\"\nAverage server's probe time exceeded average + threshold (%.2f%%). Values:\n- server: %s\n- history response time %.2f ms\n- probe time %.2f ms\n\"\"\" % (anomaly_threshold * 100.0, server_name, avg_time, time)\n result = ProcessAnomaly(is_region=False, object_name=server_name,\n text=\"Request time %.2f ms (avg is %.2f)\" % (time, avg_time),\n description=msg)\n results.append(result)\n else:\n # Normal value, update state\n self.averager.update(time, server_avg_state)\n server_time.averager_state = server_avg_state\n server_time.save()\n return results",
"def recover_failed_downloads():\n failed_files = jobtracker.query(\"SELECT * FROM files \" \\\n \"WHERE status='failed'\")\n\n for file in failed_files:\n attempts = jobtracker.query(\"SELECT * FROM download_attempts \" \\\n \"WHERE file_id=%d\" % file['id'])\n if len(attempts) < config.download.numretries:\n # download can be retried\n jobtracker.query(\"UPDATE files \" \\\n \"SET status='retrying', \" \\\n \"updated_at='%s', \" \\\n \"details='Download will be attempted again' \" \\\n \"WHERE id=%s\" % \\\n (jobtracker.nowstr(), file['id']))\n else:\n # Abandon this file\n if os.path.exists(file['filename']):\n os.remove(file['filename'])\n jobtracker.query(\"UPDATE files \" \\\n \"SET status='terminal_failure', \" \\\n \"updated_at='%s', \" \\\n \"details='This file has been abandoned' \" \\\n \"WHERE id=%s\" % \\\n (jobtracker.nowstr(), file['id']))",
"def get_server_logs(self):\n self.response.content\n binary_body = re.split('--==.*==', self.response.content)[2].split('\\r\\n')[5]\n\n f = StringIO.StringIO()\n f.write(bytearray(binary_body))\n\n memory_zip = ZipFile(f)\n zip_content = {name: memory_zip.read(name) for name in memory_zip.namelist()}\n oracc_log = zip_content['oracc.log']\n request_log = zip_content['request.log']\n\n # Check if server returns a lemmatised file\n autolem = None \n for key, value in zip_content.iteritems():\n if key.endswith(\"autolem.atf\"):\n autolem = value\n\n print zip_content.keys()\n print \"@\"*30\n print oracc_log\n print \"@\"*30\n print request_log\n print \"@\"*30\n if autolem:\n print autolem\n print \"@\"*30\n\n return oracc_log, request_log, autolem",
"def log(failure):\n return self._env.logger.warning('[ping] {}'.format(failure.getErrorMessage()))",
"def getIssuesEmail(self):\r\n base_url = \"http://beta.boost.org/development/tests/\"\r\n base_url += self.branch\r\n base_url += \"/developer/\";\r\n got_issues = False\r\n\r\n # Ping the server by looking for an HTML file\r\n print \"Pinging the server to initiate extraction...\"\r\n ping_url = base_url + \"issues.html\"\r\n os.system('curl -O ' + ping_url)\r\n os.system('rm -f issues.html')\r\n \r\n for x in range(30):\r\n # Update issues-email.txt\r\n url = base_url + \"issues-email.txt\"\r\n print 'Retrieving issues email from ' + url\r\n os.system('rm -f issues-email.txt')\r\n os.system('curl -O ' + url)\r\n\r\n if self.parseIssuesEmail():\r\n return True\r\n\r\n print 'Failed to fetch issues email. '\r\n time.sleep (30)\r\n\r\n return False",
"def replication_check():\n\n try:\n entries = [os.path.join(current_app.config['REPLICATION_PACKETS_DIR'], e)\n for e in os.listdir(current_app.config['REPLICATION_PACKETS_DIR'])]\n except OSError as e:\n logging.warning(e)\n return Response(\"UNKNOWN \" + str(e), mimetype='text/plain')\n\n pattern = re.compile(\"replication-[0-9]+.tar.bz2$\")\n entries = filter(lambda x: pattern.search(x), entries)\n entries = filter(os.path.isfile, entries)\n entries = _sort_natural(entries)\n\n if len(entries) == 0:\n return Response(\"UNKNOWN no replication packets available\", mimetype='text/plain')\n\n resp = \"OK\"\n last = -1\n pattern = re.compile(\"replication-([0-9]+).tar.bz2$\")\n for entry in entries:\n m = pattern.search(entry)\n if not m:\n resp = \"UNKNOWN Unkown files in the replication directory\"\n break\n num = int(m.groups()[0])\n if last < 0:\n last = num - 1\n if last != num - 1:\n resp = \"CRITICAL Replication packet %d is missing\" % (num - 1)\n last = num\n\n if resp != \"OK\":\n return Response(resp, mimetype='text/plain')\n \n last_packet_age = time.time() - os.path.getmtime(entries[-1]) \n if last_packet_age > MAX_PACKET_AGE_CRITICAL:\n resp = \"CRITICAL Latest replication packet is %.1f hours old\" % (last_packet_age / 3600)\n elif last_packet_age > MAX_PACKET_AGE_WARNING:\n resp = \"WARNING Latest replication packet is %.1f hours old\" % (last_packet_age / 3600)\n\n return Response(resp, mimetype='text/plain')",
"def report_failure(self):\n if self.email:\n if self.wiki.config.admin_mail and self.wiki.config.admin_mail.lower() != 'nomail':\n subject = \"Dump failure for \" + self.wiki.db_name\n message = self.wiki.config.read_template(\"errormail.txt\") % {\n \"db\": self.wiki.db_name,\n \"date\": self.wiki.date,\n \"time\": TimeUtils.pretty_time(),\n \"url\": \"/\".join((self.wiki.config.web_root, self.wiki.db_name,\n self.wiki.date, ''))}\n self.mail(subject, message)",
"def test_unexpectedFilename(self):\n self.write(\"service1.abcd\", [{\"host\": \"host1\", \"port\": 123},\n {\"host\": \"host2\", \"port\": 124}])\n self.pump()\n self.assertNodesEqual(knownNodes(self.disco, \"service1\", \"staging\"), [])",
"def connectionLost(self, hostname, projects, ip, port):\n for project in [i.id for i in projects if i != None]:\n sc = self.getScanner(hostname, project)\n try:\n sc.logConnection(time.time(), ip, port, 'lost')\n except ValueError:\n pass",
"def test_retrieve_files_error_message(self):\n os.makedirs('/tmp/remote_pacha/localhost/etc')\n os.mkdir('/tmp/remote_pacha/localhost/home')\n remote_file = open('/tmp/remote_pacha/localhost/etc/etc.conf', 'w')\n remote_file.write(\"remote second file\")\n remote_file.close()\n remote_file = open('/tmp/remote_pacha/localhost/home/home.conf', 'w')\n remote_file.write(\"remote file\")\n remote_file.close()\n server = \"%s@%s\" % (self.username, host.hostname()) \n run = rebuild.Rebuild(server=server,\n hostname='localhost', \n source='/tmpp/remote_pacha')\n sys.stdout = MockSys()\n sys.exit = MockSys()\n run.retrieve_files()\n actual = sys.stdout.captured()\n expected = \"\"\"\nPacha was not able to retrieve the files from the SSH server provided.\nCheck your configuration file settings and try again.\n\"\"\"\n self.assertEqual(actual, expected)",
"def save_failed_tests_info():\n global g_failed_tests_info_dict\n if len(g_failed_testnames) > 0: # found failed test\n if os.path.isfile(g_failed_tests_dict) and os.path.getsize(g_failed_tests_dict) > 10:\n try:\n g_failed_tests_info_dict=json.load(open(g_failed_tests_dict, 'r'))\n except:\n init_failed_tests_dict()\n\n # with open(g_failed_tests_dict, 'rb') as dict_file:\n # g_failed_tests_info_dict = pickle.load(dict_file)\n else: # file not found, create new dict\n init_failed_tests_dict()\n\n with open(g_summary_text_filename, 'a') as failed_file:\n with open(g_daily_failure_csv, 'w') as daily_failure:\n for index in range(len(g_failed_testnames)):\n\n testInfo = ','.join([g_timestring, g_job_name, str(g_build_id), g_git_hash, g_node_name,\n g_unit_test_type, g_failed_testnames[index]])\n failed_file.write(testInfo+'\\n')\n daily_failure.write(testInfo+'\\n')\n # update failed tests dictionary\n update_failed_test_info_dict(g_failed_testnames[index], g_failed_test_paths[index])\n json.dump(g_failed_tests_info_dict, open(g_failed_tests_dict, 'w'))",
"def server_failure(self, resp, ignore_codes=[]):\n return (resp[0] in FAILURE_CODES and resp[0] not in ignore_codes)",
"def updateServersStatus(servers):\n session = Queries.createSession()\n try:\n for server in servers:\n ip = server[0]\n port = server[1]\n status = server[2]\n dict_status = {\"status\": unicode(status)}\n if status == 'ONLINE':\n dict_status[\"last_online\"] = strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n session.query(FileServer).filter_by(ip=ip, port=port).update(dict_status)\n session.commit()\n except sqlalchemy.exc.ArgumentError:\n print 'SQLAlchemy ERROR: Invalid or conflicting function argument is supplied'\n except sqlalchemy.exc.CompileError:\n print 'SQLAlchemy ERROR: Error occurs during SQL compilation'\n finally:\n session.close()",
"def errors(conf, daemon):\n # persisted dict interface for long term memory\n errors = Shove('file://{0}'.format(conf.app.errors), protocol=2, flag='r')\n if any(errors):\n print(\"errors found\")\n for path, error in six.iteritems(errors):\n pp(error)\n errors.close()\n exit(1)\n # ⏏ exit the program with an error\n else:\n print(\"no errors found - OK\")\n print()\n errors.close()",
"def renew_status():\n status = []\n for host, host_pattern in CONFIG['hosts']:\n status.append({\n 'host': host,\n 'status': check_host([host, host_pattern])[1],\n })\n\n return status",
"def server_chaos(self):\n for uid, server in self.all_servers_info.all_servers.items():\n if self.maybe():\n print(f\"Chaos Monkey attacked server '{server.server_name}'\")\n after_attack_status = self.maybe()\n server.status = after_attack_status\n if not after_attack_status:\n print(f\"Server '{server.server_name}' went offline by the attack\")",
"def check_download_attempts():\n attempts = jobtracker.query(\"SELECT * FROM download_attempts \" \\\n \"WHERE status='downloading'\")\n\n active_ids = [int(t.getName()) for t in threading.enumerate() \\\n if isinstance(t, DownloadThread)]\n\n for attempt in attempts:\n if attempt['id'] not in active_ids:\n dlm_cout.outs(\"Download attempt (ID: %d) is no longer running.\" % \\\n attempt['id'])\n queries = []\n queries.append(\"UPDATE files \" \\\n \"SET status='unverified', \" \\\n \"updated_at='%s', \" \\\n \"details='Download thread is no longer running' \"\n \"WHERE id=%d\" % (jobtracker.nowstr(), attempt['file_id']))\n queries.append(\"UPDATE download_attempts \" \\\n \"SET status='unknown', \" \\\n \"updated_at='%s', \" \\\n \"details='Download thread is no longer running' \"\n \"WHERE id=%d\" % (jobtracker.nowstr(), attempt['id']))\n jobtracker.query(queries)"
] | [
"0.64982885",
"0.6221347",
"0.61516804",
"0.5741765",
"0.57228017",
"0.56389916",
"0.56194955",
"0.5510259",
"0.5408489",
"0.5404776",
"0.53996354",
"0.5329651",
"0.5317809",
"0.5298589",
"0.5267512",
"0.52575266",
"0.5253678",
"0.5225752",
"0.5221685",
"0.51375675",
"0.51358706",
"0.5080256",
"0.5064049",
"0.505813",
"0.50471383",
"0.50234485",
"0.5003132",
"0.49958053",
"0.49945655",
"0.49881446"
] | 0.85111237 | 0 |
Compare list of servers in file with current ones to find recovered servers | def find_recovered_servers(client):
print("INFO: Determining recovered servers by comparing current servers with list in file...")
current_servers_list = []
for server in client.servers():
if server.status == "server_update_comm_fail":
current_servers_list.append({"id": server.id})
with open(os.path.dirname(__file__) + '/communication_failure_list.txt') as file:
server_list = [json.loads(line) for line in file]
diff = [i for i in current_servers_list +
server_list if i not in current_servers_list or i not in server_list]
return diff | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def replace_file(servers):\n print(\"INFO: Replacing server list in file with recent one...\")\n if os.path.exists(os.path.dirname(__file__) + '/communication_failure_list.txt'):\n try:\n os.remove(os.path.dirname(__file__) +\n '/communication_failure_list.txt')\n except OSError as error:\n print(\"Error: %s - %s.\" % (error.filename, error.strerror))\n find_communication_failure_servers(servers)",
"def readInServers(self):\n # we'll be using the global server tracker file\n global server_tracker_file\n # first, grab a list of all files in the current working directory\n current_dir = os.listdir('.')\n # verify that our server tracker file exists here\n if server_tracker_file not in current_dir:\n # if there's nothing to read in, simply return\n return\n \n # read in the csv\n with open(server_tracker_file, 'rb') as infile:\n # initialize the reader\n reader = csv.reader(infile)\n # verify that the header looks exactly as we expect\n header = reader.next()\n if header != ['Server','Ping Interval','Status']:\n # if this isn't the case, we won't try to read the file\n return\n else:\n # update our servers with the records we know about\n # while we update, we'll keep a count of how many\n # we can successfully read in\n server_count = 0\n for record in reader:\n # pull out the server name and ping interval\n server = record[0]\n try:\n interval = int(record[1])\n except ValueError:\n continue\n # ping the server to determine whether it is online\n # or offline\n status = sendPing(server)\n if status == 'Online':\n # allocate to online\n self.online_servers[server] = [0, interval]\n else:\n # allocate to offline\n self.offline_servers[server] = [0, interval]\n # udpate our count\n server_count += 1\n # repeat for every record from our pseudo memory dump file\n # report and return\n print 'Read in {0} known servers'.format(server_count)\n \n # file read complete\n return",
"def getServersAddrs(i_ServerList):\n serverAddrList =[]\n\n with open(PLATFORMS_TO_SERVER_FILE, \"r\") as txtFile:\n data = txtFile.readlines()\n table = []\n filteredTable = []\n for line in data:\n if line.startswith(\"#\"):\n continue\n eachLine = line.split(\";\")\n table.append(eachLine)\n filteredTable.append([])\n for element in range(0, len(table)):\n filteredTable.append(table[element][0])\n\n with open(SERVERS_IP_PATH) as serversFile:\n serversFileLines = serversFile.readlines()\n for line in serversFileLines:\n if line[-1:] == '\\n':\n line = line[:-1]\n serverDetails = line.split(\",\")\n if (i_ServerList != True):\n if(serverDetails[0] in i_ServerList and serverDetails[0] in filteredTable):\n serverAddrList.append(serverDetails[1])\n else:\n if(serverDetails[0] in filteredTable):\n serverAddrList.append(serverDetails[1])\n \n return serverAddrList",
"def checkIfServerExist(i_servers):\n inputServerNames = i_servers.split(\",\")\n allServersExist = inputServerNames\n serversNotExist = []\n serversNameList = []\n with open(SERVERS_IP_PATH) as serversFile:\n serversFileLines = serversFile.readlines()\n if serversFileLines[-1:] == '\\n':\n serversFileLines = serversFileLines[:-1]\n for server in serversFileLines:\n serverNames = server.split(\",\")\n serversNameList.append(serverNames[0])\n for server in inputServerNames:\n if server not in serversNameList:\n serversNotExist.append(server)\n if (len(serversNotExist) > 0):\n print str(serversNotExist) + \" do(es) not exist.\"\n allServersExist = False\n return allServersExist",
"def find_communication_failure_servers(servers):\n print('INFO: Finding servers with \"Communication failure\" status and saving result in file')\n with open(os.path.dirname(__file__) + '/communication_failure_list.txt', 'w+') as file:\n for server in servers:\n if server.status == \"server_update_comm_fail\":\n json.dump({\"id\": server.id}, file)\n file.write(os.linesep)",
"def load_server_list(filename):\n if not os.path.isfile(filename):\n return #ignore this error for now\n fo=open(filename,\"r\")\n rd=fo.read()\n fo.close()\n __load_server_list(rd)",
"def compare_actual_adapters(adapter_ip,config_file):\n adapter_modified = {} #dict to store changes\n match = False\n fp = open(config_file,\"r+\")\n stored_adapters = fp.read()\n #modified or new adapters\n for key,value in adapter_ip.items(): #for every current adapter ipv6 addresses\n for line in stored_adapters.split('\\n')[:-1]: #for every adapter from config file\n if (key+\": \"+value) == line: #create same format string from current adapter settings as it is in config file and compare them\n match = True\n break\n else:\n match = False \n if not match: #current adapter or its settings is not in conf file\n try:\n old_ip = re.search(str(key)+\"\\: (.*)\",stored_adapters).group(1) #stored old ipv6 address\n adapter_modified[key] = [old_ip,value] #ipv6 address modified from one to another\n except AttributeError:\n adapter_modified[key] = [\"\",value] #adapter is new, no old ipv6 address\n match = False\n match = False\n #deleted adapters, adapter from config file not in current scan\n for line in stored_adapters.split('\\n')[:-1]: #for every adapter from config file\n for key,value in adapter_ip.items(): #for every current adapter ipv6 addresses\n adapter = re.search(\"(.*)\\: .*\",line).group(1)\n if adapter == key: #adapter from config file is in current scan\n match = True\n break\n else: #adapter from config file is not in current scan, it was deleted or disabled\n match = False\n if not match:\n ip = re.search(\"(.*)\\: (.*)\",line).group(2)\n adapter_modified[adapter] = [ip,\"\"] #old ipv6 from conf file, no new ipv6], adapter deleted\n return adapter_modified",
"def check_if_new_operators_in_live_analysis_file(listNewOp):\n if os.path.isfile(liveAnalysisFile):\n resultList = []\n onlineOperatorList = []\n with open(liveAnalysisFile,'r') as csvFile:\n reader = csv.DictReader(csvFile)\n onlineOperatorList = list(reader)\n isInFile = False\n for element in listNewOp:\n for operatorActive in onlineOperatorList :\n if element['HOST'] == operatorActive['HOST'] and element['PORT'] == operatorActive['PORT']:\n isInFile = True\n break\n if not isInFile:\n resultList.append(element)\n isInFile = False\n return resultList",
"def read_server_file():\n try:\n with open(\"servers.json\", \"r\") as server_file:\n try:\n data = json.load(server_file)\n except:\n data = []\n return data\n except IOError:\n return []",
"def compare_node14(self, index=0):\n data_orig = self.read_full_file('node14p1.dat')\n\n # append new sets of data\n data_out = self.read_full_file('node14p1_' + str(index) + '.status_1327701.dat')\n data_out += self.read_full_file('node14p1_' + str(index) + '.ctdmo_1327700.dat')\n data_out += self.read_full_file('node14p1_' + str(index) + '.wa_wfp_1327721.dat')\n data_out += self.read_full_file('node14p1_' + str(index) + '.wc_wfp_1327721.dat')\n data_out += self.read_full_file('node14p1_' + str(index) + '.we_wfp_1327721.dat')\n\n if not TestSioUnpack.compare_sio_matches(data_orig, data_out):\n self.fail(\"Failed sio block compare\")",
"def test_changedFile(self):\n self.write(\"service1.json\", [{\"host\": \"host1\", \"port\": 123},\n {\"host\": \"host2\", \"port\": 124}])\n self.pump()\n self.write(\"service1.json\", [{\"host\": \"host3\", \"port\": 125},\n {\"host\": \"host4\", \"port\": 126}])\n self.pump()\n self.assertNodesEqual(\n knownNodes(self.disco, \"service1\", \"staging\"),\n [self.node(\"service1\", \"host3\", 125),\n self.node(\"service1\", \"host4\", 126)])",
"def compare_node16(self, index=0):\n data_orig = self.read_full_file('node16p1.dat')\n\n # append new sets of data\n data_out = self.read_full_file('node16p1_' + str(index) + '.status_1328001.dat')\n data_out += self.read_full_file('node16p1_' + str(index) + '.status_1328601.dat')\n data_out += self.read_full_file('node16p1_' + str(index) + '.adcps_1328603.dat')\n data_out += self.read_full_file('node16p1_' + str(index) + '.ctdmo_1328600.dat')\n data_out += self.read_full_file('node16p1_' + str(index) + '.dosta_1328001.dat')\n data_out += self.read_full_file('node16p1_' + str(index) + '.flort_1328001.dat')\n data_out += self.read_full_file('node16p1_' + str(index) + '.phsen_1328001.dat')\n\n if not TestSioUnpack.compare_sio_matches(data_orig, data_out):\n self.fail(\"Failed sio block compare\")\n\n return data_out",
"def compare_node17(self, index=0):\n data_orig = self.read_full_file('node17p1.dat')\n\n # append new sets of data\n data_out = self.read_full_file('node17p1_' + str(index) + '.status_1328501.dat')\n data_out += self.read_full_file('node17p1_' + str(index) + '.status_1236901.dat')\n data_out += self.read_full_file('node17p1_' + str(index) + '.adcps_1328503.dat')\n data_out += self.read_full_file('node17p1_' + str(index) + '.ctdmo_1328500.dat')\n data_out += self.read_full_file('node17p1_' + str(index) + '.dosta_1236901.dat')\n data_out += self.read_full_file('node17p1_' + str(index) + '.flort_1236901.dat')\n data_out += self.read_full_file('node17p1_' + str(index) + '.phsen_1236901.dat')\n\n if not TestSioUnpack.compare_sio_matches(data_orig, data_out):\n self.fail(\"Failed sio block compare\")\n\n return data_out",
"def diff_files(self):\n pdup = []\n # Print out files that are only found in the DB\n if self.comparison_info['dbonly']:\n print(\"Files only found in the database --------- \")\n for fname in sorted(self.comparison_info['dbonly']):\n fdb = self.files_from_db[fname]\n print(f\"\\t{fdb['path']}/{fname}\")\n\n # print out files that are only found on disk\n if self.comparison_info['diskonly']:\n print(\"\\nFiles only found on disk --------- \")\n for fname in sorted(self.comparison_info['diskonly']):\n addon = \"\"\n if fname in self.duplicates:\n addon = \" *\"\n fdisk = self.files_from_disk[fname]\n print(f\"\\t{fdisk['relpath']}/{fname}{addon}\")\n if self.comparison_info['pathdup']:\n print(\"\\n The following files had multiple paths on disk (path filesize):\")\n listing = {}\n for fname in self.comparison_info['pathdup']:\n pdup.append(fname)\n listing[self.comparison_info['pathdup']['relpath']] = self.comparison_info['pathdup']['filesize']\n first = True\n for pth in sorted(listing):\n start = \" \"\n if first:\n start = \"*\"\n first = False\n addon = \"\"\n if fname in self.files_from_db and self.files_from_db[fname]['path'] == pth:\n addon = \" (DB Match)\"\n print(f\" {start} {pth}/{fname} {listing[pth]:d}{addon}\")\n\n # Print files that have different paths on disk and in the DB\n if self.comparison_info['path']:\n print(\"\\nPath mismatch (file name, db path, disk path) --------- \")\n for fname in sorted(self.comparison_info['path']):\n addon = \"\"\n if fname in self.duplicates:\n addon = \" *\"\n fdb = self.files_from_db[fname]\n fdisk = self.files_from_disk[fname]\n print(f\"\\t{fname}\\t{fdb['path']}\\t{fdisk['relpath']}{addon}\")\n if self.comparison_info['duplicates']:\n print(\" The following files have multiple disk paths on disk (path filesize):\")\n for fname in self.comparison_info['duplicates']:\n pdup.append(fname)\n listing[self.comparison_info['duplicates']['relpath']] = self.comparison_info['duplicates']['filesize']\n first = True\n for pth in sorted(listing):\n start = \" \"\n if first:\n start = \"*\"\n first = False\n addon = \"\"\n if fname in self.files_from_db and self.files_from_db[fname]['path'] == pth:\n addon = \" (DB Match)\"\n print(f\" {start} {pth}/{fname} {listing[pth]:d}{addon}\")\n\n # Print files that have different file sizes on disk and in the DB\n if self.comparison_info['filesize']:\n print(\"\\nFilesize mismatch (File name, size in DB, size on disk) --------- \")\n for fname in sorted(self.comparison_info['filesize']):\n fdb = self.files_from_db[fname]\n fdisk = self.files_from_disk[fname]\n print(f\"\\t{fname} {fdb['filesize']} {fdisk['filesize']}\")\n\n # Print files that have different md5sum on disk and in DB\n if self.md5sum and 'md5sum' in self.comparison_info and self.comparison_info['md5sum']:\n print(\"\\nmd5sum mismatch (File name, sum in DB, sum on disk) --------- \")\n for fname in sorted(self.comparison_info['md5sum']):\n fdb = self.files_from_db[fname]\n fdisk = self.files_from_disk[fname]\n print(f\"\\t{fname} {fdb['md5sum']} {fdisk['md5sum']}\")\n\n # Print out files that have multiple paths on disk\n if len(self.duplicates) > len(pdup):\n print(\"\\nThe following files have multiple disk paths on disk (path filesize):\")\n for dup in sorted(self.duplicates):\n if dup not in pdup:\n listing = {}\n for fls in self.duplicates[dup]:\n listing[fls['relpath']] = fls['filesize']\n first = True\n for pth in sorted(listing):\n start = \" \"\n if first:\n start = \"*\"\n first = False\n addon = \"\"\n if dup in self.files_from_db and self.files_from_db[dup]['path'] == pth:\n addon = \" (DB Match)\"\n print(f\" {start} {pth}/{dup} {listing[pth]:d}{addon}\")\n\n # Print out files that have multiple endtries in the DB\n if self.db_duplicates:\n print(\"\\nThe following files have multiple entries in the database (path filesize):\")\n for dup in sorted(self.db_duplicates):\n listing = {}\n for fls in self.db_duplicates[dup]:\n listing[fls['relpath']] = fls['filesize']\n first = True\n for pth in sorted(listing):\n start = \" \"\n if first:\n start = \"*\"\n first = False\n addon = \"\"\n if dup in self.files_from_disk and self.files_from_disk[dup]['path'] == pth:\n addon = \" (Disk Match)\"\n print(f\" {start} {pth}/{dup} {listing[pth]:d}{addon}\")",
"def find_outdated_servers(self, zone, local_serial):\n outdated = {}\n for server in self.servers:\n remote_serial = query_serial(zone, server)\n if not remote_serial or remote_serial < local_serial:\n outdated[self.servers[server]] = remote_serial\n\n return outdated",
"def compare_db(compressed=True):\r\n #wipe demand file\r\n with open(\"{}DEMAND.txt\".format(db_folder_path), \"w\", encoding=\"utf8\") as demand_file:\r\n demand_file.write(str([]))\r\n #взять все файлы, которые есть в папке с дб,\r\n #и всем сделать compare_data, по их порядку создания\r\n if compressed:\r\n _db_files = sorted(glob.iglob('{}\\\\jsons\\\\DB_*.json.gz'.format(db_folder_path)), key=os.path.getctime)\r\n else:\r\n _db_files = sorted(glob.iglob('{}\\\\jsons\\\\DB_*.json'.format(db_folder_path)), key=os.path.getctime)\r\n #create temporary lists, that will hold db data\r\n _older_vend_data = []\r\n _newer_vend_data = []\r\n #\r\n _older_buy_data = []\r\n _newer_buy_data = []\r\n #iterate through all dbs\r\n for _n in range(len(_db_files)-1):\r\n print('comparing {} and {} db out of {}'.format(_n+1, _n+2, len(_db_files)))\r\n\r\n #get data from dbs\r\n if _older_vend_data == [] and _older_buy_data == []:\r\n #take older file\r\n get_vend_data(_db_files[_n], _older_vend_data, _older_buy_data)\r\n #compare it with newer file (+1)\r\n get_vend_data(_db_files[_n+1], _newer_vend_data, _newer_buy_data)\r\n #compare 2 datas and fill demand file\r\n compare_data(_older_vend_data, _newer_vend_data)\r\n compare_data(_older_buy_data, _newer_buy_data)\r\n #saving data that is already compared, and making it count as old one\r\n _older_vend_data = _newer_vend_data\r\n _older_buy_data = _newer_buy_data\r\n #wipe new data for next cycle\r\n _newer_vend_data = []\r\n _newer_buy_data = []",
"def check_master(source,master,out):\n\n # check if elements in source are in master\n print('Starting cross-checking between source %s and master %s files...'%(str(source),str(master))) \n with open(master) as f_master, open(source) as f_source:\n source = set(f_source.read().splitlines())\n master_lines = set(f_master.read().splitlines())\n\n with open(out,'w') as infile:\n for line in source: \n within_file=line not in master_lines\n if within_file==True: infile.write(str(line)+'\\n')\n print('1. Cross-checking complete!')",
"def compare_heads(list1, list2, diff_list):\n for line in list1:\n if line not in list2:\n diff_list.append(FILE1_SIGN + line)\n for line in list2:\n if line not in list1:\n diff_list.append(FILE2_SIGN + line)",
"def checkdifferences(oldfile, changelist, num):\n if num == 1: #combining the unique values of a list & file into 1 list\n newcontent = changelist\n currentcontent = csv_read('{}.csv'.format(oldfile)) # ReadyForAck\n combined = combinelists(currentcontent, newcontent)\n return combined\n if num == 2: # combine the unique values of 2 files into 1 list\n currentcontent = csv_read('{}.csv'.format(changelist)) #clientlist\n combined = []\n for each in currentcontent:\n # for elk in each:\n combined + each\n newlst = combinelists(currentcontent, combined)\n return newlst\n if num == 3: # removing the doubles from each list\n currentcontent = csv_read('{}.csv'.format(oldfile)) # ReadyForAck\n changecontent = changelist\n newlist = dividelists(currentcontent, changecontent)\n return newlist",
"def match_hosts(needle):\n\n matched_hosts = []\n with open(known_hosts_path, \"r\") as known_hosts_file:\n for line in known_hosts_file:\n host, _, _ = line.split(\" \")\n\n if needle in host:\n matched_hosts.append(host)\n\n return matched_hosts",
"def check_shelve_file(serverName: int):\n with shelve.open(shelve_file) as db:\n if str(serverName) in db:\n data = db[str(serverName)]\n return data",
"def GetPeersAndPiecesForFile(self,file):\n if (not self.fileDatabase.has_key(file)):\n raise LocalFileNonExistent, file\n fileRecord = self.fileDatabase[file]\n result = []\n append = result.append\n for pieceName in fileRecord.pieces.keys():\n pieceRecord = fileRecord.pieces[pieceName]\n if (pieceRecord.StoredP()):\n append((pieceName,pieceRecord.GetPeer(),pieceRecord.size))\n # Note: we set append=result.append before the for loop.\n else:\n dibs_logger.Logger.PrintAndLog('WARNING: ignoring ' +\n 'piece \"' + pieceName\n + '\" because it was marked '\n + 'as lost.\\n',\n dibs_logger.LOG_WARNING)\n return result",
"def build_server_list(client, diff):\n print(\"INFO: Fetching each server not in 'Communication failure' anymore...\")\n servers = []\n for server in diff:\n servers.append(client.server(str(server.id)))\n return servers",
"def recip_compare(s_file, c_file):\r\n recip_list = []\r\n cross_match = 0\r\n for row in s_file.itertuples():\r\n Q_gene = row.Query_gene\r\n adj_gene_interm = row.adjacent_genes.split(\";\")\r\n # list comp to strip metadata and keep gene name\r\n adj_genes = [i.split(\",\")[0] for i in adj_gene_interm]\r\n counter = 0\r\n for adj_gene in adj_genes:\r\n Q_search = c_file[c_file.Query_gene.isin([adj_gene])].index.tolist()\r\n # check to to see that the is one to one match first\r\n if Q_search and c_file.at[Q_search[0], \"gene_type\"] ==\"one_to_one_mapping\":\r\n # Q_search will always results in a list size of 1\r\n S_search = c_file.at[Q_search[0], \"Sytentic_genes\"].split(\",\")[0]\r\n if S_search == Q_gene:\r\n counter += 1\r\n if counter == len(adj_genes):\r\n cross_match += 1\r\n recip_list.append(row)\r\n recip_df = pd.DataFrame(recip_list)\r\n return(recip_df)",
"def update_servers(self, output_file, current_time):\n\n # Check the servers length list.\n if len(self.server_statistics) >= 1000 or \\\n _get_ttime(current_time, spd_factor) == \"24:00:00\":\n\n # Open a context manager for the file.\n with open(output_file, 'a') as the_file:\n\n # Initialize a Writer object.\n writer = csv.writer(the_file, delimiter=\",\")\n\n # Iterate through the deque and write out.\n for server in self.server_statistics:\n writer.writerow(server)\n\n # Clear the queue of Server objects.\n self.server_statistics.clear()",
"def compare_old_and_new_status_files():\n rdict=dict()\n mastcontrol=dirutil.get_mast_control_path()\n mastscratch=dirutil.get_mast_scratch_path()\n recipedirs=dirutil.immediate_subdirs(os.path.join(mastcontrol,\"statusfiles\"))\n for recipedir in recipedirs:\n mystatus=\"unknown\"\n rdict[recipedir]=dict()\n changelist=list()\n if not os.path.exists(os.path.join(mastscratch,recipedir)):\n mystatus=\"archived\"\n else:\n scratchstatusfile = MASTFile(os.path.join(mastscratch,recipedir,\"status.txt\"))\n controlstatusfile = MASTFile(os.path.join(mastcontrol,\"statusfiles\",recipedir,\"status.txt\"))\n if scratchstatusfile.data == controlstatusfile.data:\n mystatus=\"unchanged\"\n else:\n mystatus=\"changed\"\n myidx=0\n while myidx < len(scratchstatusfile.data):\n oldline = controlstatusfile.data[myidx]\n newline = scratchstatusfile.data[myidx]\n if \"#\" in oldline:\n pass\n else:\n ingred = oldline.split(\":\")[0].strip()\n oldstatus = oldline.split(\":\")[1].strip()\n newstatus = newline.split(\":\")[1].strip()\n if (oldstatus == \"P\") and (newstatus == \"P\"):\n rdict[recipedir][ingred]=\"AVOID\"\n elif (oldstatus == \"C\") and (newstatus == \"C\"):\n rdict[recipedir][ingred]=\"AVOID\"\n else:\n rdict[recipedir][ingred]=\"send\"\n myidx = myidx + 1\n rdict[recipedir][\"MAIN\"]=mystatus\n return rdict",
"def test(self):\n\n resultdict = {}\n for server in self.server_list:\n resultdict.update(**{server: self.fetch(server)})\n\n ips = sorted(resultdict.values())\n ips_set = set(ips)\n print(\"\\nNumber of servers: {}\".format(len(self.server_list)))\n\n for ip, occurence in zip(ips_set, map(lambda x: ips.count(x), ips_set)):\n print(\n \"{0} = {1} ocurrenc{2}\".format(\n ip if len(ip) > 0 else \"broken server\",\n occurence,\n \"e\" if occurence == 1 else \"ies\",\n )\n )\n print(\"\\n\")\n if any([i == \"\" for i in resultdict.values()]):\n print(\"\\n________list of failed servers_______\")\n for _url, _ip in resultdict.items():\n if _ip == \"\":\n print(_url)\n print(\"\\n\")\n return resultdict",
"def compare_old_and_new_change_status_files():\n rdict=dict()\n mastcontrol=dirutil.get_mast_control_path()\n mastscratch=dirutil.get_mast_scratch_path()\n recipedirs=dirutil.immediate_subdirs(os.path.join(mastcontrol,\"changestatusfiles\"))\n for recipedir in recipedirs:\n mystatus=\"unknown\"\n rdict[recipedir]=dict()\n changelist=list()\n if not os.path.exists(os.path.join(mastscratch,recipedir)):\n mystatus=\"archived\"\n else:\n ingreddirs = dirutil.immediate_subdirs(os.path.join(mastcontrol,\"changestatusfiles\",recipedir))\n for ingreddir in ingreddirs:\n scratchstatusfile = MASTFile(os.path.join(mastscratch,recipedir,ingreddir,\"change_status.txt\"))\n controlstatusfile = MASTFile(os.path.join(mastcontrol,\"changestatusfiles\",recipedir,ingreddir,\"change_status.txt\"))\n if scratchstatusfile.data == controlstatusfile.data:\n mystatus=\"unchanged\"\n else:\n mystatus=\"changed\"\n rdict[recipedir][ingreddir]=\"send\"\n rdict[recipedir][\"MAIN\"]=mystatus\n return rdict",
"def check_scanned(fname):\n try:\n for line in open(fname):\n if \"(1 host up)\" in line:\n return True\n except IOError: return False\n return False",
"def list_vservers():\n\n result = {}\n\n for file in os.listdir(cfg.ETC_VSERVERS):\n\n cfgdir = os.path.join(cfg.ETC_VSERVERS, file)\n\n if not os.path.isdir(cfgdir) or file.startswith('.'):\n # not a config \n continue\n\n result[file] = get_vserver_config(file)\n\n return result"
] | [
"0.67536014",
"0.6489799",
"0.6314629",
"0.5987139",
"0.5953733",
"0.58189934",
"0.5703676",
"0.55950475",
"0.55163497",
"0.5493828",
"0.54845756",
"0.54702073",
"0.54463625",
"0.5411403",
"0.53845555",
"0.5359683",
"0.5357033",
"0.53492767",
"0.52340114",
"0.52160895",
"0.5194266",
"0.5185589",
"0.5181505",
"0.51743233",
"0.51419187",
"0.5133338",
"0.51208097",
"0.50587326",
"0.5056355",
"0.5048878"
] | 0.7713606 | 0 |
Make an HTML list from server list for email | def create_body_html(client, server_list):
servers_html = ""
for server in server_list:
link = '<a href="{}/servers/{}">{}</a>'.format(
client.api_url, server.id, server.hostname)
html = """{}<br />""".format(link)
servers_html += html
return servers_html | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_list_html(self, items):\n html = \"\"\"\n <html>\n\t\t\t<head>\n\t\t\t\t<title>OpenFDA Cool App</title>\n\t\t\t</head>\n\t\t\t<body>\n <ol>\n \"\"\"\n\n for item in items:\n html += \"<li>\" + item + \"</li>\\n\"\n\n html += \"\"\"\n </ol>\n\t\t\t</body>\n </html>\n \"\"\"\n\n return html",
"def unordered_list_html(list_items: List[str]) -> str:\n return \"<ul>{}</ul>\".format(\"\".join(list_items))",
"def email_list(to_list, template_path, from_address, context_dict):\n from django.core.mail import send_mail\n from django.template import loader, Context\n\n nodes = dict(\n (n.name, n)\n for n in loader.get_template(template_path).template\n if n.__class__.__name__ == \"BlockNode\"\n )\n\n context = Context(context_dict)\n\n def render_node(node, con):\n return nodes[node].render(con)\n\n for address in to_list:\n send_mail(\n render_node(\"subject\", context),\n render_node(\"plain\", context),\n from_address,\n [address],\n )",
"def sendListing(self, names, paths, title=''):\n\t\tlines = []\n\t\t\n\t\tlines.append(\"<html>\")\n\t\tlines.append(\" <head>\")\n\t\tlines.append(\" <title>%s</title>\" % title)\n\t\tlines.append(\" </head>\")\n\t\tlines.append(\" <body>\")\n\t\tif title:\n\t\t\tlines.append(\" <h1>%s</h1>\" % title)\n\t\tlines.append(\" <hr/>\")\n\t\tlines.append(\" <ul>\")\n\t\tfor name, path in zip(names, paths):\n\t\t\tlines.append(\" <li><a href=\\\"%s\\\">%s</a></li>\" % (path, name))\n\t\tlines.append(\" </ul>\")\n\t\tlines.append(\" <hr/>\")\n\t\tlines.append(\" </body>\")\n\t\tlines.append(\"</html>\")\n\t\t\n\t\tlisting = '\\n'.join(lines)\n\t\t\n\t\tself.send_response(200)\n\t\tself.send_header(\"Content-type\", \"text/html\")\n\t\tself.send_header(\"Content-Length\", len(listing))\n\t\tself.end_headers()\n\t\tself.wfile.write(listing)",
"def htmlList(contents, attr='', listType='ul'):\n return '<%s%s>\\n%s</%s>\\n' % (listType,sep(attr),contents,listType)",
"def format_list(self, at_char, user, list_name):\r\n return u'<a href=\"http://%s/%s/%s\" data-list=\"\">%s%s/%s</a>' \\\r\n % (self.domain, user, list_name, at_char, user, list_name)",
"def getlist(self):\n self.__domainlist.sort()\n\n outstr = \"{ \"\n for index, domain in enumerate(self.__domainlist):\n outstr += domain + \" \"\n if (index % 50 == 0) and index > 0:\n outstr += \"}\\n{ \"\n\n outstr += \"}\"\n\n return outstr",
"def html_unordered_list(items):\n if not items:\n return \"\"\n\n inner = \"\".join(map(html_list_item, items))\n if inner == \"\":\n return \"\"\n\n return \"<ul>\\n\" + inner + \"</ul>\\n\"",
"def _make_song_list_html(song_list):\n return '<p class=\"song_name\">' + '<br>'.join([f'{song[\"title\"]} <span class=\"artist_album\">{song[\"artist\"]} - {song[\"album\"]}</span>' for song in song_list]) + '</p>'",
"def generate_email_body(urls):\n\n return \"New URLs:\\n\" + \"\\n\".join(f\" • {url['url']}\" for url in urls)",
"def create_content_list(contents: List[Text]) -> Text:\n # print(contents)\n return '\\n'.join(\n [template.LIST_TEMPLATE.format(\n level='',\n content=item\n ) for item in contents if item.strip()])",
"def slist(body):\n return SList(body.split(\"\\n\"))",
"def list_item_html(text: str) -> str:\n return \"<li>{}</li>\".format(text)",
"def email_list(request):\n if not request.user.is_superuser:\n raise PermissionDenied\n emails = set()\n form = EmailSelectForm()\n subject = None\n message = None\n errors = []\n success = None\n if request.method == \"POST\":\n form = EmailSelectForm(request.POST)\n if form.is_valid():\n if \"send_email\" in request.POST:\n send = True\n else:\n send = False\n form, subject, message, success, errors = _send_emails(request, form, emails, send)\n return render(\n request,\n \"rr/email.html\",\n {\n \"object_list\": sorted(emails),\n \"form\": form,\n \"subject\": subject,\n \"message\": message,\n \"errors\": errors,\n \"success\": success,\n },\n )",
"def create_list_html(array):\n if not array:\n return ''\n msg = ''\n for item in array:\n msg += '<li>' + item + '</li>'\n return '<ul>' + msg + '</ul>'",
"def get_emails(print_list, email_dict):\n\n email_list = []\n again = True\n contact_table = PrettyTable()\n contact_table.field_names = [\"Command\", \"Advisor Name\", \"Email\"]\n\n for row in print_list:\n contact_table.add_row(row)\n\n while again:\n print(contact_table)\n pretty_print(email_list, \":\")\n pretty_print(\"To Add Receiving Emails Enter the corresponding command number\", \"-\")\n pretty_print(\"To Send Mail press any number key:\", \"-\")\n choice = get_int_input()\n if choice in email_dict.keys():\n email_list.append(email_dict[choice])\n\n else:\n if len(email_list) != 0:\n again = False\n\n else:\n again = True\n pretty_print(\"No Email Added\", \"-\")\n\n clear()\n\n return email_list",
"def epbunchlist2html(epbunchlist):\n def epbunch2html(epbunch):\n lines = epbunch.obj[:2]\n return '->'.join(lines)\n lines = [epbunch2html(epbunch) for epbunch in epbunchlist]\n return \", \".join(lines)",
"def email_list(self) -> Sequence[str]:\n return pulumi.get(self, \"email_list\")",
"def send_email(client, smtp, server_list):\n content = create_body_html(client, server_list)\n\n # Email Configuration\n message = MIMEMultipart(\"alternative\")\n message[\"Subject\"] = '[Cyberwatch] Servers recovered from \"Communication failure\" report - ' + \\\n date.today().strftime(\"%m/%d/%y\")\n message[\"From\"] = smtp[\"sender\"]\n message[\"To\"] = \", \".join(EMAIL_RECEIVERS)\n\n # Get Period start date with \"Last Modified\" time of file\n start_date = datetime.fromtimestamp(os.path.getmtime(os.path.dirname(\n __file__) + '/communication_failure_list.txt')).strftime(\"%d/%m/%Y, %H:%M\")\n\n email_body = f\"\"\"\\\n <p>Greetings,</p>\n\n <p>Please find in the following section, a list of servers that recovered from the status\n \"Communication failure\".</p>\n\n <span style=\"color:#4bb9f1;font-size:18px;align:center\"><strong>Servers recovered from \"Communication Failure\"\n between {start_date} and {datetime.now().strftime(\"%d/%m/%Y, %H:%M\")}</strong></span>\n <br />\n\n <br />{content}<br />\n\n <p>The Cyberwatch Team - [email protected]</p>\n \"\"\"\n\n # Add HTML/plain-text parts to MIMEMultipart message\n # The email client will try to render the last part first\n message.attach(MIMEText(email_body, \"plain\"))\n message.attach(MIMEText(email_body, \"html\"))\n\n # Create secure connection with server and send email\n context = ssl.create_default_context()\n with smtplib.SMTP_SSL(smtp[\"server\"], smtp[\"port\"], context=context) as server:\n server.login(smtp[\"login\"], smtp[\"password\"])\n server.sendmail(\n smtp[\"sender\"], EMAIL_RECEIVERS, message.as_string()\n )\n\n print(\"Successfully sent email to {}\".format(message[\"To\"]))",
"def __convertToHtmlListElement(self, listOfStringValues: List[str]) -> str:\n return ''.join(f\"<li>{element}</li>\" for element in listOfStringValues)",
"def writeHTMLbody(appurl, numberOfCompletedFiles, stoppedState, componentList, dayStart, dayEnd):\n html = u'''\\\n<html>\n <head></head>\n <body>\n <p>Kære Operatør</p>\n <p>\n Her en rapport over hvordan det er gået med opsamling af YouSee\n TV i det seneste døgn. Informationerne i denne mail er alle trukket fra\n <a href=\"%(url)s\">Ingest Monitor websiden</a> som du også selv kan klikke rundt på.\n </p><p>\n Døgnet startede i går klokken %(start)s og varede indtil i dag klokken %(end)s.\n </p>\n <p>\n''' % {'url': appurl, 'start': dayStart, 'end': dayEnd}\n\n html += '<hr>'\n html += u'<p>I det seneste døgn blev der med succes blevet behandlet ' + str(numberOfCompletedFiles) + ' filer.</p>'\n\n if len(componentList) > 0:\n # add a list of files still in progress BUT previously were in a FAILED state\n # grouped by the component\n html += u'<h3>Filer som tidligere fejlede men som stadig er under behandling eller er blevet genstartet.</h3>'\n html += u'<p>'\n for component in componentList:\n html += u'<h4>Følgende filer fejlede i ' + component[0] + ' komponenten:</h4>'\n\t newList = set([e['entity']['name'] for e in component[1]])\n for e in newList:\n html += u'<a href=\"'\\\n + getDetailUrl(appurl, e)\\\n + '\">'\\\n + e\\\n + '</a><br>\\n'\n html += u'</p>'\n else:\n html += u'<p>Ingen filer under behandling har en fejlstatus.</p>'\n\n html += '<hr>'\n if len(stoppedState) > 0:\n # add a list of failed files to the report.\n html += u'<h3>Filer der er markeret som værende stoppet og som kun bliver genstartet ved manuel indgriben:</h3>'\n html += u'<p>'\n for e in stoppedState:\n html += u'<a href=\"' + getDetailUrl(appurl, e['entity']['name']) + '\">'\\\n + e['entity']['name']\\\n + u'</a><br>\\n'\n html += u'</p>'\n else:\n html += u'<p>Ingen filer er markeret som stoppet.</p>'\n\n # end the html part of the report\n html += u'''\\\n </ul>\n </p>\n </body>\n</html>\n'''\n return html",
"def list_str_breaks(lis):\r\n as_str = \"\"\r\n for item in lis:\r\n as_str += str(item) + \"\\n\"\r\n return as_str[:-1]",
"def html_list_item(string):\n if string == \"\":\n return \"\"\n\n return \"<li>\" + string + \"\\n\"",
"def get_all_message(): \n return \"<br>\".join(messages)",
"def get_list_servers(p_id_guilda):\r\n server_list = select_data.get_guild_servers(p_id_guilda)\r\n #css_mensagem = '```css\\n####### SERVERS ################'\r\n list_server = []\r\n for server in server_list:\r\n if server['description'] != None:\r\n description_server = server['description']\r\n else:\r\n description_server = ''\r\n return_data = '\\n### Id Server: ' + str(server['id_server_sk']) + ' - ' + server['name_guild'] + '\\n### Map: ' + server['map_name'] + '\\n### Modo: ' + server['mode_server'] + '\\n### Patreon: ' + server['map_patreon'] + '\\n### Description: ' + description_server + '\\n -----------------------------------------------------------------------------------'\r\n list_server.append(return_data)\r\n #css_mensagem = css_mensagem + return_data\r\n #css_mensagem = css_mensagem + '\\n##############################```'\r\n return list_server #css_mensagem\r",
"def file_server_list_table_format(result):\n table = []\n for item in result:\n table.append(file_server_show_table_format(item))\n return table",
"def htmlify_list(l):\n r = \"<ol>\"\n for i in l:\n r += \"<li>{}</li>\".format(str(i))\n r += \"</ol>\"\n return r",
"def listToStringFormat(self, list) ->str:\n string = ''\n for element in list:\n string = string + str(element) + \"\\n\"\n return string",
"def showlist(liste):\n lines = []\n for configuration in liste:\n for line in gentikz(configuration.copy()):\n lines.append(line)\n lines.append(r\"\\newpage\")\n return lines",
"def escape_list(l):\n return [_escape_harlowe_html(item) if isinstance(item, text_type) else str(item) for item in l]"
] | [
"0.6742271",
"0.61657894",
"0.61091125",
"0.60555714",
"0.60321444",
"0.5965353",
"0.59480834",
"0.5918078",
"0.5883233",
"0.5821413",
"0.5801513",
"0.5782404",
"0.57817954",
"0.57756126",
"0.57365865",
"0.5734727",
"0.57307297",
"0.56886506",
"0.5656043",
"0.5625895",
"0.5625385",
"0.56106025",
"0.5592617",
"0.5567409",
"0.5564028",
"0.5549722",
"0.55468994",
"0.5529826",
"0.5495466",
"0.5487944"
] | 0.7079043 | 0 |
Remove a random item from the set, and return it | def pop_random(self):
rand_index = randint(0, len(self._list) - 1)
item = self._list[rand_index]
self.remove(item)
return item | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def popitem(self):\n all_items = self.items()\n removed_item = random.choice(all_items)\n self[removed_item[0]] = None\n return removed_item",
"def getRandom(self):\n n = len(self.keys)\n while n > 0:\n index = random.randint(0, n - 1)\n my_key = self.keys[index]\n if my_key in self.ds:\n return my_key\n else:\n self.keys[index] = self.keys[n - 1]\n self.keys.pop()\n n = n - 1\n\n\n\n\n\n # Your RandomizedSet object will be instantiated and called as such:\n # obj = RandomizedSet()\n # param_1 = obj.insert(val)\n # param_2 = obj.remove(val)\n # param_3 = obj.getRandom()",
"def random_pop (self, checkfn=None):\n if len(self) == 0:\n return None\n\n index = self.random_pick(checkfn=checkfn)[0]\n\n if index == None:\n return None\n\n return self.pop(index)",
"def remove(self, val: int) -> bool:\n temp = self.randomSet.pop(val, False)\n return True if temp != False else temp",
"def randomchooseanddelete():\n\t\tvar = random.choice(unassigned)\n\t\tunassigned.remove(var)\n\t\treturn var",
"def remove(self, item):\n try:\n entry = self.set.pop(item)\n entry[-1] = self.REMOVED\n except KeyError:\n print(\"Can't remove a non-existing item\")",
"def getRandom(self):\n return self.nums[random.randint(0, len(self.nums) - 1)]\n\n # Your RandomizedSet object will be instantiated and called as such:\n # obj = RandomizedSet()\n # param_1 = obj.insert(val)\n # param_2 = obj.remove(val)\n # param_3 = obj.getRandom()",
"def getRandom(self) -> int:\n index = random.randint(0, len(self.lst) - 1)\n # self.lst[index], self.lst[len(self.lst) - 1] = self.lst[len(self.lst) - 1], self.lst[index]\n # val = self.lst.pop()\n # self.dic.pop(val)\n return self.lst[index]",
"def getRandom(self) -> int:\n some_item = self.container.pop()\n self.container.add(some_item)\n return some_item",
"def random_pop (self, checkfn=None):\n if len(self) == 0:\n return None\n item = random.randint(0, len(self)-1)\n if checkfn is not None:\n tries = len(self) * 5\n while not checkfn(self[item]):\n item = random.randint(0, len(self)-1)\n tries = tries - 1\n if tries <= 0:\n return None\n return self.pop(item)",
"def remove_randomico(lista, qtd_remocao):\n for i in range(qtd_remocao):\n lista.pop(random.randrange(len(lista))) \n return lista",
"def deal_one(self):\r\n rand_card = random.choice(self.cards_list)\r\n self.cards_list.remove(rand_card)\r\n return rand_card",
"def hit():\r\n new_card = deck[random.randint(1, len(deck))]\r\n deck.remove(new_card)\r\n return new_card",
"def remove(self) -> object:\n return self._contains.pop()",
"def remove_rear(self):\n\n if self.items:\n return self.items.pop()\n return None",
"def _find_element_not_in_set(self, already_used: set) -> int:\n new_element = random.randint(a=self.min_value, b=self.max_value)\n while new_element in already_used:\n new_element = random.randint(a=self.min_value, b=self.max_value)\n return new_element",
"def rpop(self, name):\n for r in random.sample(self.redis_list, len(self.redis_list)):\n data = r.rpop(name)\n if data:\n return data",
"def get(self):\n if len(self._words) < 1:\n raise ValueError(\"You have exhausted the list of words!\")\n index = randint(0, len(self._words) -1)\n return self._words.pop(index)",
"def get(self):\n if len(self._words) < 1:\n raise ValueError(\"You have exhausted the list of words!\")\n index = randint(0, len(self._words) - 1)\n return self._words.pop(index)",
"def getRandom(self) -> int:\n return random.choice(list(self.set))",
"def pop_random(random, values):\n\n # We pick the element at a random index. Rather than removing that element\n # from the list (which would be an O(n) operation), we swap it to the end\n # and return the last element of the list. This changes the order of\n # the elements, but as long as these elements are only accessed through\n # random sampling that doesn't matter.\n i = random.randrange(0, len(values))\n values[i], values[-1] = values[-1], values[i]\n return values.pop()",
"def pop(self):\n\t\treturn self.items.pop()",
"def popitem(self):\n return self.pop(0)",
"def rand_pop(l: list):\n i = randrange(len(l)) \n l[i], l[-1] = l[-1], l[i] \n return l.pop()",
"def del_value(self):\n return self.list.pop()",
"def remove(self, item):\n try:\n self._data.remove(item)\n except ValueError as exc:\n raise KeyError from exc\n else:\n self.__log__.append(SetRemove(value=item))",
"def pop_(self):\n\n return self.items.pop()",
"def pop(self, last=True):\r\n if not self:\r\n raise KeyError('set is empty')\r\n key = reversed(self).next() if last else iter(self).next()\r\n self.discard(key)\r\n return key",
"def pop(self):\r\n it = iter(self)\r\n try:\r\n value = next(it)\r\n except StopIteration:\r\n raise KeyError\r\n self.discard(value)\r\n return value",
"def pop(self):\n return self.items.pop()"
] | [
"0.8282262",
"0.7164812",
"0.7115214",
"0.69544333",
"0.68911844",
"0.68830067",
"0.6759804",
"0.67500526",
"0.67464954",
"0.67222595",
"0.6633977",
"0.6544951",
"0.6462838",
"0.6461487",
"0.6363013",
"0.6315286",
"0.6308643",
"0.62988245",
"0.6254927",
"0.6241444",
"0.62125313",
"0.619546",
"0.613709",
"0.6122986",
"0.6109161",
"0.6094347",
"0.6085302",
"0.608237",
"0.6050902",
"0.6049299"
] | 0.79628265 | 1 |
Generate a compressed name from keys wrt config. | def get_compressed_name_from_keys(config: Dict[str, Any],
keys: Tuple[Tuple[str]],
allow_missing: bool = True):
assert not isinstance(config, list), config
name = ''
for pre_keys in keys:
v = config
pre_keys_str = ''
missing = False
for k in pre_keys:
if allow_missing and k not in v:
missing = True
break
v = v[k]
k_str = ''.join([s[0] for s in k.split('_')]) # learning_rate -> lr
pre_keys_str = f'{pre_keys_str}.{k_str}' if pre_keys_str else k_str
if missing:
continue
if isinstance(v, bool):
v = str(v)[0] # True/False -> 'T', 'F'
elif v is None:
v = str(v)[0] # None -> 'N'
else:
v = str(v)
pre_keys_str += '_' + v
name = f'{name}__{pre_keys_str}' if name else pre_keys_str
return name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _make_pack_name(names):\n assert names\n tokens_in_names = [name.split('/') for name in names]\n common_prefix_tokens = []\n\n # Find the longest common prefix of tokens.\n while True:\n first_token_in_names = set()\n for tokens in tokens_in_names:\n if not tokens:\n break\n first_token_in_names.add(tokens[0])\n if len(first_token_in_names) != 1:\n break\n common_prefix_tokens.append(next(iter(first_token_in_names)))\n for tokens in tokens_in_names:\n tokens.pop(0)\n\n common_prefix_tokens.append('Packed')\n common_prefix = '/'.join(common_prefix_tokens)\n suffixes = ['_'.join(tokens) for tokens in tokens_in_names]\n return '%s[%s]' % (common_prefix, ', '.join(suffixes))",
"def createkey(*args): # {{{2\n return '-'.join(map(simplifyname, args))",
"def generate_name(config):\n\n name = basename(config.name)\n if config.prepro is not None:\n name += \"_\" + config.prepro\n if config.extract_pos:\n name += \"_pos\"\n return name",
"def _build_key(self, key: str) -> str:\n return \"-\".join((self._name, key))",
"def build_flattened_key(prefix, key):\n return key if not prefix else prefix + \".\" + key",
"def __to_key(name: str) -> str:\n return name.replace(\" \", \"-\")",
"def create_ricgraph_key(name: str, value: str) -> str:\n return value.lower() + '|' + name.lower()",
"def _make_key(self, extra_prefix, key):\n if extra_prefix:\n return \"-\".join((self.prefix, extra_prefix, key))\n else:\n return \"-\".join((self.prefix, key))",
"def _make_key(self, extra_prefix, key):\n if extra_prefix:\n return \"-\".join((self.prefix, extra_prefix, key))\n else:\n return \"-\".join((self.prefix, key))",
"def _build_name(name_idx):\n return \"explored%s.set_%05d.xa_%08d\" % (\n ArrayParameter.IDENTIFIER,\n name_idx // 1000,\n name_idx,\n )",
"def _make_keys(key_def):\r\n keys = []\r\n labels = []\r\n\r\n # no keys? return an empty string\r\n if len(key_def) == 0:\r\n return \"\"\r\n\r\n for i in range(1, 12 + 1):\r\n try:\r\n key, label = i, key_def[i]\r\n if label.startswith('-'):\r\n key = '0'\r\n label = label[1:]\r\n else:\r\n key = '1'\r\n except KeyError:\r\n key = '0'\r\n label = HIEW_EMPTY_KEY_LABEL\r\n\r\n t = len(label)\r\n if t > HIEW_KEY_LABEL_LEN:\r\n label = label[0:HIEW_KEY_LABEL_LEN]\r\n else:\r\n label += ' ' * (HIEW_KEY_LABEL_LEN - t)\r\n\r\n keys.append(key)\r\n labels.append(label)\r\n\r\n return ''.join(keys) + HEM_FNKEY_DELIMITER + ''.join(labels)",
"def build_key(key):\n return os.path.join(PREFIX, key)",
"def _create_key(chip):\n try:\n suffix = chip['version'][0]\n except IndexError:\n suffix = ''\n\n if chip['classification'] == 'secret':\n classification = 'z'\n else:\n classification = chip['classification'][0]\n\n return '%s-%s%s%s' % (chip['game'], classification, chip['indice'], suffix)",
"def generate_key(value):\n return '{}:{}'.format(String.__name__.lower(), value)",
"def _transformed_name(key: Text) -> Text:\n return key + \"_xf\"",
"def _make_display_name(cls, key: str) -> str:\n return f\"{cls._temp_prefix}-{key}-{uuid.uuid4()}\"",
"def construct_name(p, prefix):\n name = prefix\n for key in p.keys():\n if (type(p[key]) != tuple) and (type(p[key]) != list):\n name = name + '_' + str(key) + '-' + str(p[key])\n else:\n name = name + '_' + str(key) + '-' + str(p[key][0])\n return name",
"def generate_discovery_cache_key(name, ext):\n\n return 'wopi_' + name + '_' + ext",
"def _create_key(_type, name):\n return \"{}{}{}\".format(_type, DiagnosticManager._type_separator, name)",
"def _generate_processed_key_name(process_to, upload_name):\n timestamp = datetime.now().strftime('%Y%m%d%H%M%S%f')\n name, extension = os.path.splitext(upload_name)\n digest = md5(''.join([timestamp, upload_name])).hexdigest()\n return os.path.join(process_to, '{0}.{1}'.format(digest, extension))",
"def make_key(self, project, name, period, filters=None):\n\n parts = [project, name, period]\n\n if isinstance(filters, dict):\n filters_part = u\"/\".join(\n [u\"{0}|{1}\".format(f, to_unicode(self.clean_filter_value(filters[f])))\n for f in sorted(filters.keys(), key=lambda x: x) if f])\n\n if filters_part:\n parts.append(filters_part)\n\n return u';'.join(parts)",
"def makekey(function, *args, **kwargs) -> str:\n arguments = str((function.__name__, args, kwargs)).strip()\n arguments = arguments.translate(\n str.maketrans('', '', string.punctuation+string.whitespace)\n )\n key = codecs.encode(pickle.dumps(arguments, protocol=0), \"base64\").decode().strip()\n return key",
"def get_key(self, state: Dict) -> str:\n\n return \"_\".join(sorted(state))",
"def util_generate_key(conf_file=None):\n keyname = DebRepo(**config(conf_file=conf_file)).generate_key()\n print(keyname)",
"def gen_keys():",
"def generate_name_key(str):\n return str.decode('utf-8', errors='replace').lower().replace(' ', '_')",
"def _shorten_key(telstate, key):\n for prefix in telstate.prefixes:\n if key.startswith(prefix):\n return key[len(prefix):]\n return ''",
"def make_key(k, with_locale=True):\r\n key = encoding.smart_str('%s:%s' % (CACHE_PREFIX, k))\r\n if with_locale:\r\n key += encoding.smart_str(translation.get_language())\r\n # memcached keys must be < 250 bytes and w/o whitespace, but it's nice\r\n # to see the keys when using locmem.\r\n return hashlib.md5(key).hexdigest()",
"def aus_label_key(config_atom: str) -> str:\n return sre_capability_label_key(\"aus\", config_atom)",
"def create_key_name(callback, topic):\n\t\treturn utils.get_hash_key_name(u'%s\\n%s' % (callback, topic))"
] | [
"0.64022315",
"0.63604325",
"0.6284187",
"0.61198986",
"0.592737",
"0.5915452",
"0.59024495",
"0.5840366",
"0.5840366",
"0.5803846",
"0.57972753",
"0.5789273",
"0.5788882",
"0.57822824",
"0.5780839",
"0.5775445",
"0.57516694",
"0.57482225",
"0.5740038",
"0.5716991",
"0.5704012",
"0.56503177",
"0.56479746",
"0.56223893",
"0.5613646",
"0.559775",
"0.55485046",
"0.5537362",
"0.5528732",
"0.5517632"
] | 0.7821243 | 0 |
Get a sample of config. | def get_configuration_sample(config, root=True):
if isinstance(config, dict):
return {
k: get_configuration_sample(v, root=False)
for k, v in sorted(config.items())
}
elif isinstance(config, list):
if root:
return get_configuration_sample(
config[np.random.randint(len(config))], root=False)
else:
return config[np.random.randint(len(config))]
elif callable(config):
return config()
else:
return config | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_sample(config, n_sample=1):\n if config['distribution'] == 'binary':\n data = np.random.choice([0, 1], size=n_sample, replace=True, p=config['pmf'])\n\n elif config['distribution'] == 'discrete':\n data = np.random.choice(config['category'], size=n_sample, replace=True, p=config['pmf'])\n\n elif config['distribution'] == 'uniform':\n assert float(config['min']) < float(config['max'])\n data=np.random.uniform(low=float(config['min']),high=float(config['max']),size=n_sample)\n\n elif config['distribution'] == 'gaussian':\n data=np.random.normal(loc=float(config['mean']),scale=float(config['std']),size=n_sample)\n data = np.maximum(data, float(config['min']))\n data = np.minimum(data, float(config['max']))\n\n elif config['distribution'] == 'uniform_int':\n if int(config['min'])==int(config['max']):\n data=int(config['min'])*np.ones((n_sample,),dtype='int32')\n else:\n data=np.random.randint(int(config['min']),high=int(config['max']),size=n_sample)\n\n else:\n log.warning('Warning: unknown distribution type: %s' % config['distribution'])\n data = []\n\n return data",
"def _sample_hyperparameters(self):\n\t\tconfig = {}\n\t\tfor attr, option in self._config_options.items():\n\t\t\tprint('Sampling', attr)\n\t\t\tconfig[attr] = option.sample()\n\t\treturn config",
"def sample_configuration_dist(config, root=True, num_samples_per_dist=1):\n if isinstance(config, dict):\n return {\n k: sample_configuration_dist(\n v, root=False, num_samples_per_dist=num_samples_per_dist)\n for k, v in sorted(config.items())\n }\n elif isinstance(config, list) and root:\n return [\n sample_configuration_dist(\n c, root=False, num_samples_per_dist=num_samples_per_dist)\n for c in config\n ]\n elif callable(config):\n return [config() for _ in range(num_samples_per_dist)]\n else:\n return config",
"def get_config():\n return CONFIG",
"def random_configuration(self):\n raise NotImplementedError",
"def get_config():\n return _config",
"def sample_from_curriculum(self, curriculum):\n sample_config = np.random.choice(curriculum)\n self.set_config(sample_config)\n return sample_config",
"def get_config_sample_speed():\n # try changing learning rate\n config = get_default_config()\n\n config['train_batch_size'] = 16384\n config['_policies'] = [None, \"from_scratch_sb\", \"pretrained\"]\n config['lr'] = 3e-4\n config['sgd_minibatch_size'] = 4096\n config['num_sgd_iter'] = 4\n config['rollout_fragment_length'] = 100\n config['num_workers'] = tune.grid_search([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])\n\n config['num_envs_per_worker'] = tune.grid_search([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])\n\n # ['humanoid_blocker', 'humanoid'],\n config['_train_policies'] = ['player_1']\n config['num_gpus'] = 0\n config['_train_steps'] = 20\n config[\"batch_mode\"] = \"complete_episodes\"\n\n config['_trainer'] = \"PPO\"\n config['_policy'] = \"PPO\"\n config['_call']['num_samples'] = 1\n config['_call']['resources_per_trial'] = {\n \"custom_resources\": {\"tune_cpu\": tune.sample_from(lambda spec: spec.config.num_workers + 10)}} # upper bound\n\n # config['_run_inline'] = True\n\n return config",
"def config():\n return _config",
"def config_fixture():\n CONFIG_FILE = \"secrets/config.json\"\n with open(CONFIG_FILE, \"r\") as f:\n return json.loads(f.read())",
"def get_sample(self, ctx, params):\n # ctx is the context object\n # return variables are: sample\n #BEGIN get_sample\n id_, ver = _get_sample_address_from_object(params)\n admin = _check_admin(self._user_lookup, ctx[_CTX_TOKEN], _AdminPermission.READ,\n # pretty annoying to test ctx.log_info is working, do it manually\n 'get_sample', ctx.log_info, skip_check=not params.get('as_admin'))\n s = self._samples.get_sample(id_, _UserID(ctx[_CTX_USER]), ver, as_admin=admin)\n sample = _sample_to_dict(s)\n #END get_sample\n\n # At some point might do deeper type checking...\n if not isinstance(sample, dict):\n raise ValueError('Method get_sample return value ' +\n 'sample is not type dict as required.')\n # return the results\n return [sample]",
"def getConfig(self, config):\n\n with open('./config.json', 'r') as json_file:\n try:\n data = json_file.read()\n return json.loads(data)[config]\n except Exception as e:\n print(e)",
"def random_config_from_list(config_list_file):\n lines = tuple(open(config_list_file, 'r'))\n config_file = random.choice(lines).strip()\n print(\"[hypergan] config file chosen from list \", config_list_file, ' file:', config_file)\n return hg.configuration.Configuration.load(config_file+\".json\")",
"def config():",
"def config():",
"def samples():\n f = open(config['samples'], \"r\")\n samp=[]\n for line in f:\n samp.append(line.strip().split()[0])\n return samp",
"def config(self, feature):\n return self._config.get(feature, base_config.BaseConfig())",
"def get_config():\n with open(CONFIG_PATH) as config_file:\n data = json.load(config_file)\n return data",
"def sample_from_concept(self):\n return random.choice(self.active_concept.extension)",
"def get_config(self):\n return self.config",
"def get_config(self, name):\n return self.configs[name][0]",
"def get(self) -> dict:\n return Config.get()",
"def get_config():\n handle = open(\"config.json\", \"r\")\n raw_json = handle.read()\n handle.close()\n return json.loads(raw_json)",
"def test_get_config(self):\r\n config = self.profile.get_config('testing.conf', TestConfig, storage_args=['this_section'])\r\n self.assertIsInstance(config, TestConfig)\r\n self.assertIsNone(config.save())",
"def get_config(self,config):\n return self.parser.get(\"main\", config)",
"def _get_sample(self):\n prev = self.prev_img\n curr = self.curr_img\n prevbb = self._last_bbox\n prev_sample, opts_prev = crop_sample({'image': prev, 'bb': prevbb})\n curr_sample, opts_curr = crop_sample({'image': curr, 'bb': prevbb})\n prev_img = bgr2rgb(self.scale(prev_sample, opts_prev)['image'])\n curr_img = bgr2rgb(self.scale(curr_sample, opts_curr)['image'])\n sample = {'previmg': prev_img, 'currimg': curr_img}\n self.curr_img = curr\n self.opts = opts_curr\n return sample",
"def get_config_template(self) -> cconfig.Config:",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config"
] | [
"0.66725135",
"0.65855396",
"0.6482443",
"0.64281243",
"0.62920535",
"0.62203",
"0.62141085",
"0.6211423",
"0.61809164",
"0.61482966",
"0.6130489",
"0.60582256",
"0.60312045",
"0.6018113",
"0.6018113",
"0.59389967",
"0.5933204",
"0.5931536",
"0.5909174",
"0.590119",
"0.5878221",
"0.5876027",
"0.58731794",
"0.5870356",
"0.58599055",
"0.5851013",
"0.58449495",
"0.58363473",
"0.58363473",
"0.58363473"
] | 0.7430664 | 0 |
Test aperture_photometry when error has units (see 176). | def test_aperture_photometry_with_error_units():
data1 = np.ones((40, 40), dtype=float)
data2 = u.Quantity(data1, unit=u.adu)
error = u.Quantity(data1, unit=u.adu)
radius = 3
true_flux = np.pi * radius * radius
unit = u.adu
position = (20, 20)
table1 = aperture_photometry(data2, CircularAperture(position, radius),
error=error)
assert_allclose(table1['aperture_sum'].value, true_flux)
assert_allclose(table1['aperture_sum_err'].value, np.sqrt(true_flux))
assert table1['aperture_sum'].unit == unit
assert table1['aperture_sum_err'].unit == unit | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_aperture_photometry_inputs_with_mask():\n\n data = np.ones((5, 5))\n aperture = CircularAperture((2, 2), 2.0)\n mask = np.zeros_like(data, dtype=bool)\n data[2, 2] = 100.0 # bad pixel\n mask[2, 2] = True\n error = np.sqrt(data)\n data_in = data.copy()\n error_in = error.copy()\n t1 = aperture_photometry(data, aperture, error=error, mask=mask)\n assert_array_equal(data, data_in)\n assert_array_equal(error, error_in)\n assert_allclose(t1['aperture_sum'][0], 11.5663706144)\n t2 = aperture_photometry(data, aperture)\n assert_allclose(t2['aperture_sum'][0], 111.566370614)",
"def test_scalar_aperture():\n\n data = np.ones((20, 20), dtype=float)\n\n ap = CircularAperture((10, 10), r=3.0)\n colnames1 = aperture_photometry(data, ap, error=data).colnames\n assert (colnames1 == ['id', 'xcenter', 'ycenter', 'aperture_sum',\n 'aperture_sum_err'])\n\n colnames2 = aperture_photometry(data, [ap], error=data).colnames\n assert (colnames2 == ['id', 'xcenter', 'ycenter', 'aperture_sum_0',\n 'aperture_sum_err_0'])\n\n colnames3 = aperture_photometry(data, [ap, ap], error=data).colnames\n assert (colnames3 == ['id', 'xcenter', 'ycenter', 'aperture_sum_0',\n 'aperture_sum_err_0', 'aperture_sum_1',\n 'aperture_sum_err_1'])",
"def mag_err(self):\n return self.photosamplers.get_estimate(mag=True)[1:]",
"def aperture_phot(self,data,x,y,v):\n r = np.sqrt((x-self.avg_map_fits['Values'][1])**2 + (y-self.avg_map_fits['Values'][3])**2)\n \n inner = (r < 8./60.) & np.isfinite(data) \n outer = (r > 8.5/60.) & (r < 12./60.) & np.isfinite(data)\n\n annu = np.nanmedian(data[outer])\n annu_rms = np.nanstd(data[outer])\n flux = np.sum(data[inner]) - annu*np.sum(inner)\n\n c = 3e8\n kb=1.38e-23\n beam = (1./60.*np.pi/180.)**2\n factor = 2*kb*(v*1e9/c)**2 * beam * 1e26\n return flux*factor, annu_rms*np.sqrt(np.sum(inner))*factor",
"def phot_aperture(input_file):\n #set the original directory\n original_path = os.getcwd()\n save_path = input_file['save_path']\n planet = input_file['exoplanet']\n #radii = np.arange(input_file['apertures'][0],input_file['apertures'][1],0.1)\n radii = np.array(input_file['apertures'])\n #change to save data reduction directory\n os.chdir(save_path)\n if not os.path.exists('phot_results'):\n os.makedirs('phot_results')\n tempo = time.time()\n print 'Starting aperture photometry'\n print 'Saving results on: '+save_path+'/phot_results/'\n \n #check the number of objects to make the photometry\n N_obj = len(input_file['pxpositions'])/2.\n print 'Number of objects = ',N_obj\n positions = [] #create the positions variable (X,Y) in pixels unit on the CCD\n for i in range(len(input_file['pxpositions'])):\n if i % 2 == 0: #if the number is a even (or not a odd), the turple is created\n positions.append((input_file['pxpositions'][i],input_file['pxpositions'][i+1]))\n print 'Radius from ',radii[0],' to ',radii[-1],'\\n'\n \n skysection = input_file['skysection']\n skysection[0] = int(skysection[0])\n skysection[1] = int(skysection[1])\n \n images = sorted(glob.glob('AB'+planet+'*.fits'))\n for radius in radii:\n flux_data = []\n for i in range(len(images)):\n im = fits.getdata(images[i],header=False)\n im = array(im,dtype='Float64')\n \n # ERROR\n #Traceback (most recent call last):\n # File \"ExoTRed.py\", line 105, in <module>\n # exotred.phot_aperture(input_file)\n # File \"./sources/ExoTRed_core.py\", line 637, in phot_aperture \n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 329, in __init__\n # self._calc_bkg_bkgrms()\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 686, in _calc_bkg_bkgrms\n # bkg = self._interpolate_meshes(self._bkg1d)\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 575, in _interpolate_meshes\n # f = ShepardIDWInterpolator(yx, data)\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/utils/interpolation.py\", line 138, in __init__\n # raise ValueError('The number of values must match the number '\n # ValueError: The number of values must match the number of coordinates.\n\n # bkg = background.background_2d.Background2D(im,tuple(skysection))\n # bkg_data = bkg.background\n # bkg_rms = bkg.background_rms\n\n # phot_table = aperture_photometry(im - bkg_data, CircularAperture(positions, radius),\n # error=bkg_rms, method ='center')#,effective_gain=float(input_file['gain']))\n ####### SUBSTITUTE ROUTINE\n window = 100\n sky_size = im.shape\n sky_mean = float(np.median(im[int(skysection[1]-window):int(skysection[1]+window),int(skysection[0]-window):int(skysection[0]+window)]))\n bkg = np.random.poisson(sky_mean,sky_size)\n apertures = CircularAperture(positions, radius)\n phot_table = aperture_photometry(im, apertures, error=bkg)\n #######\n phot_table_flux = np.array([]) #saving results of aperture photometry\n for j in range(len(phot_table['aperture_sum'])):\n phot_table_flux = np.concatenate((phot_table_flux,np.array([phot_table['aperture_sum'][j]])),axis=0)\n phot_table_flux = np.concatenate((phot_table_flux,np.array([phot_table['aperture_sum_err'][j]])),axis=0)\n flux = np.concatenate((phot_table_flux,np.array([images[i]])),axis=0)\n # flux = [phot_table['aperture_sum'][0], phot_table['aperture_sum'][1],phot_table['aperture_sum_err'][0],\n # phot_table['aperture_sum_err'][1],images[i]]\n flux_data.append(flux)\n flux_data = DataFrame(flux_data)#,columns=['hoststar','refstar','hoststar_err','refstar_err','image'])\n flux_data.to_csv('./phot_results/'+planet+'_flux_radius_'+str(radius)+'.csv',index=False)\n use.update_progress((float(np.where(radii == radius)[0])+1.)/len(radii))\n print 'Time total = ',abs(time.time()-tempo)/60.,' minutes'\n os.chdir(original_path)",
"def test_source_photometry(sersic_2d_image, segm_and_cat):\n cat, segm, segm_deblend = segm_and_cat\n\n source = cat[0]\n\n max_pix = 120\n\n r_list = pf.make_radius_list(\n max_pix=max_pix, # Max pixel to go up to\n n=max_pix # the number of radii to produce\n )\n\n flux_arr, area_arr, error_arr = pf.source_photometry(\n # Inputs\n source, # Source (`photutils.segmentation.catalog.SourceCatalog`)\n sersic_2d_image, # Image as 2D array\n segm_deblend, # Deblended segmentation map of image\n r_list, # list of aperture radii\n\n # Options\n cutout_size=max(r_list)*2, # Cutout out size, set to double the max radius\n bg_sub=True, # Subtract background\n sigma=1, sigma_type='clip', # Fit a 2D plane to pixels within 1 sigma of the mean\n plot=False, vmax=0, vmin=1, # Show plot with max and min defined above\n )\n plt.show()",
"def test_nan_in_bbox():\n\n data1 = np.ones((101, 101))\n data2 = data1.copy()\n data1[33, 33] = np.nan\n data1[67, 67] = np.inf\n data1[33, 67] = -np.inf\n data1[22, 22] = np.nan\n data1[22, 23] = np.inf\n error = data1.copy()\n\n aper1 = CircularAperture((50, 50), r=20.0)\n aper2 = CircularAperture((5, 5), r=20.0)\n\n tbl1 = aperture_photometry(data1, aper1, error=error)\n tbl2 = aperture_photometry(data2, aper1, error=error)\n assert_allclose(tbl1['aperture_sum'], tbl2['aperture_sum'])\n assert_allclose(tbl1['aperture_sum_err'], tbl2['aperture_sum_err'])\n\n tbl3 = aperture_photometry(data1, aper2, error=error)\n tbl4 = aperture_photometry(data2, aper2, error=error)\n assert_allclose(tbl3['aperture_sum'], tbl4['aperture_sum'])\n assert_allclose(tbl3['aperture_sum_err'], tbl4['aperture_sum_err'])",
"def test_fitting_accuracy(self):\r\n # Instantiate spectrum object, calibrate peak shape and fit all peaks\r\n spec = emg.spectrum(df=self.data,show_plot=False)\r\n spec.detect_peaks(thres=0.0053, plot_smoothed_spec=False,\r\n plot_2nd_deriv=False, plot_detection_result=False)\r\n msg0 = \"Incorrect number of peaks detected.\"\r\n assert len(spec.peaks) == len(self.true_mus), msg0\r\n spec.assign_species([\"Ni58:-1e\",\"Co58:-1e\",\"Mn58?:-1e\",\"Sn116:-2e\"])\r\n spec.assign_species(\"Mn58m?:-1e\", peak_index=2, Ex=71.77, Ex_error=0.05)\r\n spec.determine_peak_shape(species_shape_calib=\"Mn58m?:-1e\",\r\n show_plots=False)\r\n spec.fit_peaks(species_mass_calib=\"Ni58:-1e\",show_plots=False)\r\n\r\n # Perform accuracy checks\r\n for p in spec.peaks:\r\n if p.species == \"Ni58:-1e\":\r\n continue # skip calibrant\r\n msg1 = \"ME deviates from literature by more than 1 sigma.\"\r\n assert p.m_dev_keV <= p.mass_error_keV, msg1\r\n\r\n # Check calculation of (atomic) ME for doubly charged species\r\n if p.species == \"Sn116:-2e\":\r\n ME_dev_keV = p.atomic_ME_keV - self.ME_Sn116_keV\r\n msg2 = str(\"Respective deviation of ionic mass and atomic mass \"\r\n \"excess from literature differ by > 1 sigma for \"\r\n \"Sn116:-2e.\")\r\n assert abs(ME_dev_keV - p.m_dev_keV) < p.mass_error_keV, msg2",
"def measure_error_test(self):\n error_dict = self.results._determine_measured_error(\n score_threshold=0.5, plot=False\n )\n assert error_dict[\"mz_error\"] == [0, 0, 0]\n assert error_dict[\"intensity_error\"] == [0, 0, 0]",
"def test_pressure_increasing_check_some_bad(mocker, pressure_values, expected):\n profile = mocker.patch.object(argortqcpy.profile, \"Profile\")\n profile.get_property_data = mocker.Mock(return_value=ma.masked_array(pressure_values))\n\n pic = PressureIncreasingCheck(profile, None)\n output = pic.run()\n\n assert np.all(output.get_output_flags_for_property(\"PRES\").data == expected)",
"def test_complex_mean_exceptions(self):\n with pytest.raises(ValueError):\n ConceptEmbedding.mean_by_angle([])",
"def test_exif_broken(self):\n user = UserFactory.create()\n file_path = os.path.join(os.path.dirname(__file__), \"broken_exif.jpg\")\n self._upload_photo(user, file_path)",
"def test_compute_pixel_ray_direction_invalid_focal_lengths() -> None:\n u = 12\n v = 2\n fx = 10\n fy = 11\n\n img_w = 20\n img_h = 10\n with pytest.raises(ValueError):\n _compute_pixel_ray_direction(u, v, fx, fy, img_w, img_h)",
"def ampphaerror(orig, reco):\r\n\r\n amp_orig = np.abs(orig)\r\n amp_reco = np.abs(reco)\r\n pha_orig = np.angle(orig)\r\n pha_reco = np.angle(reco)\r\n\r\n # print(np.abs(amp_orig - amp_reco))\r\n # print(np.abs(pha_orig - pha_reco))\r\n # print(np.mean(np.abs(amp_orig - amp_reco)))\r\n # print(np.mean(np.abs(pha_orig - pha_reco)))\r\n\r\n amperror = np.mean(np.abs(amp_orig - amp_reco))\r\n phaerror = np.mean(np.abs(pha_orig - pha_reco))\r\n\r\n return amperror, phaerror",
"def test_exposure(self):\n lcname = os.path.join(self.datadir,\n 'monol_testA_E3-50_lc' + HEN_FILE_EXTENSION)\n ufname = os.path.join(self.datadir, 'monol_testA_uf.evt')\n command = \"{0} {1}\".format(lcname, ufname)\n\n hen.exposure.main(command.split())\n fname = os.path.join(self.datadir,\n 'monol_testA_E3-50_lccorr' + HEN_FILE_EXTENSION)\n assert os.path.exists(fname)\n ftype, contents = hen.io.get_file_type(fname)\n\n assert isinstance(contents, Lightcurve)\n assert hasattr(contents, 'expo')",
"def test_absolute_volume(self):\n\n assert self.test_shape.volume() == pytest.approx(50 * 60 * math.pi * 2 * 1000)",
"def testPluginUnexpectedError(self):\n self.config.plugins[self.algName].flux0 = 0.0 # this causes a divide by zero\n schema = self.dataset.makeMinimalSchema()\n task = lsst.meas.base.SingleFrameMeasurementTask(schema=schema, config=self.config)\n exposure, cat = self.dataset.realize(noise=100.0, schema=schema, randomSeed=1)\n task.log.setLevel(task.log.FATAL)\n task.run(cat, exposure)\n source = cat[0]\n self.assertTrue(source.get(self.algName + \"_flag\"))\n self.assertFalse(source.get(self.algName + \"_flag_containsNan\"))\n self.assertFalse(source.get(self.algName + \"_flag_edge\"))",
"def photometry(userinputs, image, catalog, outputname, apertures, annulus='', dannulus='', recenter=False):\n logging.info('Running photometry function on {}'.format(image))\n logging.info('Using {}px apertures'.format(apertures))\n\n #set directory\n target_dir = userinputs['OUTDIR']\n\n #Update passed names to be full paths if they are not\n\n if len(image.split('/'))==1:\n logging.info('Looking for {} in {}.'.format(image,userinputs['DATA']))\n image = glob.glob(userinputs['DATA'] + '/' + image)\n if len(image)==0:\n logging.critical('No {} image found'.format(image))\n filemanagement.shutdown('Selected image does not exist',userinputs)\n else:\n image = image[0]\n logging.debug('Using image: {}'.format(image))\n\n if len(catalog.split('/'))==1:\n catalog = target_dir + '/init/' + catalog\n logging.debug('Input catalog: {}'.format(catalog))\n\n if len(outputname.split('/'))==1:\n output = target_dir + '/photometry/' + outputname\n logging.debug('Output name: {}'.format(output))\n else:\n output = outputname\n outputname = outputname.split('/')[-1]\n logging.debug('Output name: {}'.format(output))\n\n\n #Load zeropoints\n inst_zp, filter_zp, zp_zp = np.loadtxt(target_dir + '/init/Hi-PEEC_zeropoints.tab', unpack=True, dtype='str')\n # print inst_zp, filter_zp, zp_zp\n # Get filter from header\n filter = get_filter(image)\n\n\n # Set the necessary variables for photometry on the reference image\n exptime = fits.getheader(image)['EXPTIME']\n logging.debug('Exposure time from header: {}'.format(exptime))\n inst = fits.getheader(image)['INSTRUME']\n logging.debug('Intrument from header: {}'.format(inst))\n inst = inst.lower()\n\n\n match = (inst_zp == inst) & (filter_zp == filter.lower())\n zp = zp_zp[match]\n\n # zp is a string within an array, so need to turn into a float\n try:\n zp = float(zp[0])\n #If that cannot be done there was no match.\n except IndexError:\n if inst == 'acs':\n logging.debug('Zeropoint not found in file, passing to ACS calculation')\n zp = ACS_zeropoint(image)\n elif inst == 'wfc3':\n logging.debug('Zeropoint not found in file, passing to WFC3 calculation')\n zp = WFC3_zeropoint(image)\n else:\n logging.critical('No matching zeropoint found. Quitting.')\n logging.debug('No zeropoint match found for filter {} with instrument {}'\\\n .format(filter,inst))\n logging.debug('Available filters in zeropoint file : {} for instrument {}'\\\n .format(filter_zp, inst_zp))\n filemanagement.shutdown('No zeropoint was found for filter: {}'.format(filter),userinputs)\n\n logging.debug('Zeropoint from file: {}'.format(zp))\n # Remove output file if it already exists\n filemanagement.remove_if_exists(output)\n\n\n # Run photometry\n #--------------------------------------------------------------------------\n # Set up IRAF params:\n iraf.datapars.epadu = exptime\n\n # !!!!!!!!!!!!!!!!!\n # Only center on reference frame\n if recenter:\n iraf.centerpars.calgorithm = 'centroid'\n else:\n iraf.centerpars.calgorithm = 'none'\n # !!!!!!!!!!!!!!!\n # CHANGE BACKGROUND ESTIMATE IN ANNULUS TO MODE\n\n # Select the annulus depending on whether it is overwritten in the function call or not\n if annulus == '':\n iraf.fitskypars.annulus = userinputs['ANNULUS']\n logging.debug('Using annulus from inputfile ({}px)'.format(userinputs['ANNULUS']))\n else:\n iraf.fitskypars.annulus = annulus\n logging.debug('Using user specified annulus ({}px)'.format(annulus))\n if dannulus == '':\n iraf.fitskypars.dannulus = userinputs['D_ANNULUS']\n logging.debug('Using annulus width from inputfile ({}px)'.format(userinputs['D_ANNULUS']))\n else:\n iraf.fitskypars.dannulus = dannulus\n logging.debug('Using user specified annulus width ({}px)'.format(dannulus))\n\n iraf.photpars.apertures = apertures\n logging.debug('Using aperture(s) of {}px'.format(apertures))\n iraf.photpars.zmag = zp\n logging.debug('Setting zeropoint to {}'.format(zp))\n\n # Do phot\n iraf.phot(image+'[SCI]', catalog, output)\n #--------------------------------------------------------------------------\n\n\n #Depending on the number of apertures used, different methods of saving the\n # results are required\n #--------------------------------------------------------------------------\n\n naper = len(apertures.split(','))\n logging.debug('Number of apertures used {}'.format(naper))\n\n #final output filename\n fullcat_mag_short = target_dir + '/photometry/short_' + outputname\n\n if naper > 1:\n # Removes all outputlines that do not contain the character '*'\n # ensures only phot results are kept\n cmd = 'grep \"*\" ' + output + ' > ' + fullcat_mag_short\n os.system(cmd)\n\n # Replace INDEFS:\n cmd = 'sed -i.bak \"s/INDEF/99.999/g\" ' + fullcat_mag_short\n os.system(cmd)\n\n # Remove .bak files to prevent confusion\n bak_fullcat = fullcat_mag_short + '.bak'\n os.remove(bak_fullcat)\n\n\n else:\n #Dump results into a temp file\n temp = target_dir + '/photometry/phot_dump.mag'\n filemanagement.remove_if_exists(temp)\n iraf.txdump(output, 'XCENTER,YCENTER,FLUX,MAG,MERR,MSKY,ID', 'yes', Stdout = temp)\n\n # Set placeholders for sources outside of FOV and undetected sources\n # For outside of FOV, use 66.666 instead of INDEF\n # For undetected sources, use 99.999 instead of INDEF\n\n # Sources outside of FOV have exactly zero flux\n x, y, flux, mag, merr, msky, id = np.loadtxt(temp, unpack = True,\n dtype = str)\n\n flux = flux.astype(float)\n\n out_fov = (flux == 0.)\n logging.debug('Number of sources outside FOV: {}'.format(len(out_fov)))\n\n mag[out_fov] = 66.666\n merr[out_fov] = 66.666\n msky[out_fov] = 66.666\n\n # Undetected sources, those with negative flux or fluxes so small that mag err\n # is INDEF\n neg_flux = (flux < 0.)\n tiny_flux = (flux > 0.) & (merr == 'INDEF')\n\n mag[neg_flux] = 99.999\n merr[neg_flux] = 99.999\n msky[neg_flux] = 99.999\n\n merr[tiny_flux] = 99.999\n msky[tiny_flux] = 99.999\n\n logging.debug('Nr of undetected sources: {}'.format(len(tiny_flux)+len(neg_flux)))\n # Save results to new file\n x = x.astype(float)\n y = y.astype(float)\n mag = mag.astype(float)\n merr = merr.astype(float)\n msky = msky.astype(float)\n id = id.astype(int)\n\n zip_phot = zip(x, y, mag, merr, msky, id)\n\n np.savetxt(fullcat_mag_short, zip_phot,\n fmt = '%.3f %.3f %.3f %.3f %.9f %i')\n\n #--------------------------------------------------------------------------\n\n return fullcat_mag_short",
"def testTicket1123(self):\n\n ctrl = afwMath.StatisticsControl()\n ctrl.setAndMask(~0x0)\n \n mimg = afwImage.MaskedImageF(afwGeom.Extent2I(10, 10))\n mimg.set([self.val, 0x1, self.val])\n\n # test the case with no valid pixels ... both mean and stdev should be nan\n stat = afwMath.makeStatistics(mimg, afwMath.MEAN | afwMath.STDEV, ctrl)\n mean = stat.getValue(afwMath.MEAN)\n stdev = stat.getValue(afwMath.STDEV)\n self.assertNotEqual(mean, mean) # NaN does not equal itself\n self.assertNotEqual(stdev, stdev) # NaN does not equal itself\n \n # test the case with one valid pixel ... mean is ok, but stdev should still be nan\n mimg.getMask().set(1, 1, 0x0)\n stat = afwMath.makeStatistics(mimg, afwMath.MEAN | afwMath.STDEV, ctrl)\n mean = stat.getValue(afwMath.MEAN)\n stdev = stat.getValue(afwMath.STDEV)\n self.assertEqual(mean, self.val)\n self.assertNotEqual(stdev, stdev) # NaN does not equal itself\n\n # test the case with two valid pixels ... both mean and stdev are ok\n mimg.getMask().set(1, 2, 0x0)\n stat = afwMath.makeStatistics(mimg, afwMath.MEAN | afwMath.STDEV, ctrl)\n mean = stat.getValue(afwMath.MEAN)\n stdev = stat.getValue(afwMath.STDEV)\n self.assertEqual(mean, self.val)\n self.assertEqual(stdev, 0.0)",
"def test_photo_init_error_no_camera(full_photo_params):\n del full_photo_params['camera']\n with pytest.raises(KeyError):\n Photo(**full_photo_params)",
"def test_single_caps(self):\n result_h = mmc.find_molar_mass('H')\n actual_h = 1.007940\n self.assertTrue(abs(percent_error(result_h, actual_h)) < self.ERR_THRESHOLD)",
"def run_ap_phot(data, fwhm, position=None):\n if type(position) == type(None):\n position = np.array(data.shape) // 2\n\n aperture = CircularAperture(position, r=fwhm)\n\n sky_annulus_aperture = CircularAnnulus(\n position, r_in=fwhm * 3, r_out=fwhm * 3 + 15\n )\n sky_annulus_mask = sky_annulus_aperture.to_mask(method=\"center\")\n sky_annulus_data = sky_annulus_mask.multiply(data)\n sky_annulus_data_1d = sky_annulus_data[sky_annulus_mask.data > 0]\n _, median_sigclip, _ = sigma_clipped_stats(sky_annulus_data_1d)\n\n aperture_bg = median_sigclip * aperture.area\n phot = aperture_photometry(data, aperture)\n\n apmag = (phot[\"aperture_sum\"] - aperture_bg)[0]\n\n skyvar = np.square(np.std(sky_annulus_data))\n phpadu = 1\n\n sigsq = skyvar / sky_annulus_aperture.area\n\n error1 = aperture.area * skyvar # Scatter in sky values\n error2 = (apmag > 0) / phpadu # Random photon noise\n error3 = sigsq * aperture.area**2 # Uncertainty in mean sky brightness\n magerr = np.sqrt(error1 + error2 + error3)\n\n return apmag, magerr",
"def testGetVegaMag(self):\n std = MKIDStd.MKIDStd()\n vegaFlux = std.load(\"vega\")\n bd17Flux = std.load(\"bd17\")\n for filter in ['U','B','V','R','I']:\n aFilter = std.filters[filter] \n mag = std.getVegaMag(vegaFlux, aFilter)\n self.assertAlmostEqual(0.03, mag, msg=\"filter=%s mag=%f\"%(filter,mag))",
"def test_plate_size_error():\n \n test_object = fa.read_in_envision(data_csv=plate_1, platemap_csv=plate_map_file, data_type='plate', size=100)",
"def test_is_unital_depolarizing_choi_true():\n np.testing.assert_equal(is_unital(depolarizing(4)), True)",
"def test_isentropic_pressure_data_bounds_error():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290.\n tmp[3, :] = 288.\n tmpk = tmp * units.kelvin\n isentlev = [296., 350.] * units.kelvin\n with pytest.raises(ValueError):\n isentropic_interpolation(isentlev, lev, tmpk)",
"def testImageDiffLengthEnforced(self) -> None:\n with self.assertRaises(AssertionError):\n _ = data_types.Result('test', ('win', 'x86'), (1, 2, 3),\n 'build_id')",
"def test_error_at_995tpr(self):\r\n\r\n return self.test_error_at_confidence(self.confidence_at_tpr(0.995))",
"def test_specific_qubit_pauli_error_measure_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'measure', [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def testPhotoResolutionResponse(self):\n message = (mavutil.mavlink.GOPRO_COMMAND_PHOTO_RESOLUTION, mavutil.mavlink.GOPRO_REQUEST_SUCCESS)\n self.mgr.set_response_callback('vehicle','name', message)\n self.mgr.processMsgQueue.assert_called_with()"
] | [
"0.69079345",
"0.6512504",
"0.62657374",
"0.6050535",
"0.5927566",
"0.57785326",
"0.5778429",
"0.5767613",
"0.566218",
"0.5603356",
"0.5592774",
"0.55879194",
"0.5550571",
"0.55201626",
"0.5513671",
"0.54968745",
"0.5489606",
"0.5488023",
"0.54835194",
"0.544789",
"0.54413337",
"0.54169685",
"0.5412281",
"0.5407146",
"0.5396795",
"0.53730875",
"0.53667855",
"0.5366473",
"0.534014",
"0.5303811"
] | 0.82502395 | 0 |
Test that aperture_photometry does not modify the input data or error array when a mask is input. | def test_aperture_photometry_inputs_with_mask():
data = np.ones((5, 5))
aperture = CircularAperture((2, 2), 2.0)
mask = np.zeros_like(data, dtype=bool)
data[2, 2] = 100.0 # bad pixel
mask[2, 2] = True
error = np.sqrt(data)
data_in = data.copy()
error_in = error.copy()
t1 = aperture_photometry(data, aperture, error=error, mask=mask)
assert_array_equal(data, data_in)
assert_array_equal(error, error_in)
assert_allclose(t1['aperture_sum'][0], 11.5663706144)
t2 = aperture_photometry(data, aperture)
assert_allclose(t2['aperture_sum'][0], 111.566370614) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_aperture_photometry_with_error_units():\n\n data1 = np.ones((40, 40), dtype=float)\n data2 = u.Quantity(data1, unit=u.adu)\n error = u.Quantity(data1, unit=u.adu)\n radius = 3\n true_flux = np.pi * radius * radius\n unit = u.adu\n position = (20, 20)\n table1 = aperture_photometry(data2, CircularAperture(position, radius),\n error=error)\n assert_allclose(table1['aperture_sum'].value, true_flux)\n assert_allclose(table1['aperture_sum_err'].value, np.sqrt(true_flux))\n assert table1['aperture_sum'].unit == unit\n assert table1['aperture_sum_err'].unit == unit",
"def test_nan_inf_mask(value):\n\n data = np.ones((9, 9))\n mask = np.zeros_like(data, dtype=bool)\n data[4, 4] = value\n mask[4, 4] = True\n radius = 2.0\n aper = CircularAperture((4, 4), radius)\n tbl = aperture_photometry(data, aper, mask=mask)\n desired = (np.pi * radius**2) - 1\n assert_allclose(tbl['aperture_sum'], desired)",
"def test_check_data_masked_input_data(self):\n cube = self.cube.copy()\n cube.data[:, 0, 0] = np.nan\n cube.data = np.ma.masked_invalid(cube.data)\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n expected_data[:, 0, 0] = np.nan\n expected_data = np.ma.masked_invalid(expected_data)\n result = Plugin().process(cube)\n self.assertArrayAlmostEqual(result.data.data, expected_data.data, decimal=5)\n self.assertArrayEqual(result.data.mask, expected_data.mask)",
"def test_scalar_aperture():\n\n data = np.ones((20, 20), dtype=float)\n\n ap = CircularAperture((10, 10), r=3.0)\n colnames1 = aperture_photometry(data, ap, error=data).colnames\n assert (colnames1 == ['id', 'xcenter', 'ycenter', 'aperture_sum',\n 'aperture_sum_err'])\n\n colnames2 = aperture_photometry(data, [ap], error=data).colnames\n assert (colnames2 == ['id', 'xcenter', 'ycenter', 'aperture_sum_0',\n 'aperture_sum_err_0'])\n\n colnames3 = aperture_photometry(data, [ap, ap], error=data).colnames\n assert (colnames3 == ['id', 'xcenter', 'ycenter', 'aperture_sum_0',\n 'aperture_sum_err_0', 'aperture_sum_1',\n 'aperture_sum_err_1'])",
"def check_mask(f):\n def wrapper(*args, **kwargs):\n data = args[0]\n try:\n mask = data.mask\n except AttributeError:\n data = np.ma.array(data, mask=np.zeros(data.shape, dtype=np.bool))\n mask = data.mask\n args = list(args)\n args[0] = data\n args = tuple(args)\n return f(*args, **kwargs)\n return wrapper",
"def test_annular_fpm():\n\n # test some semi-random cases - is the array size as expected? \n assert masks.annular_fpm(3, 2, np.inf).shape == (3*2*2, 3*2*2)\n assert masks.annular_fpm(3, 5, np.inf).shape == (3*5*2, 3*5*2)\n assert masks.annular_fpm(3, 5, 10).shape == (3*10*2, 3*10*2)\n assert masks.annular_fpm(3, 5, 11).shape == (3*11*2, 3*11*2)\n\n # test some pixel values are as expected. \n mask = masks.annular_fpm(3, 2, 10)\n assert mask[0,0]==0 # corner is black\n assert mask[5*10, 5*10]==1 # in between is white\n assert mask[3*10, 3*10]==0 # center is black",
"def testNonVarying(self):\n photoCalib = lsst.afw.image.PhotoCalib(self.calibration)\n self._testPhotoCalibCenter(photoCalib, 0)\n\n self.assertEqual(1, photoCalib.instFluxToMaggies(self.instFlux, self.pointXShift))\n self.assertEqual(0, photoCalib.instFluxToMagnitude(self.instFlux, self.pointXShift))\n result = photoCalib.instFluxToMaggies(self.instFlux, self.instFluxErr)\n self.assertEqual(1, result.value)\n\n photoCalib = lsst.afw.image.PhotoCalib(self.calibration, self.calibrationErr)\n self._testPhotoCalibCenter(photoCalib, self.calibrationErr)\n\n # constant, with a bbox\n photoCalib = lsst.afw.image.PhotoCalib(self.calibration, bbox=self.bbox)\n self._testPhotoCalibCenter(photoCalib, 0)",
"def applymask(self,mask):\n self.spec[mask==0]=np.nan",
"def test_nan_in_bbox():\n\n data1 = np.ones((101, 101))\n data2 = data1.copy()\n data1[33, 33] = np.nan\n data1[67, 67] = np.inf\n data1[33, 67] = -np.inf\n data1[22, 22] = np.nan\n data1[22, 23] = np.inf\n error = data1.copy()\n\n aper1 = CircularAperture((50, 50), r=20.0)\n aper2 = CircularAperture((5, 5), r=20.0)\n\n tbl1 = aperture_photometry(data1, aper1, error=error)\n tbl2 = aperture_photometry(data2, aper1, error=error)\n assert_allclose(tbl1['aperture_sum'], tbl2['aperture_sum'])\n assert_allclose(tbl1['aperture_sum_err'], tbl2['aperture_sum_err'])\n\n tbl3 = aperture_photometry(data1, aper2, error=error)\n tbl4 = aperture_photometry(data2, aper2, error=error)\n assert_allclose(tbl3['aperture_sum'], tbl4['aperture_sum'])\n assert_allclose(tbl3['aperture_sum_err'], tbl4['aperture_sum_err'])",
"def test_make_mask(self):\n output_mask = footprint_mask(os.path.join(data_dir, 'sample.csv'),\n geom_col=\"PolygonWKT_Pix\")\n truth_mask = skimage.io.imread(os.path.join(data_dir,\n 'sample_fp_mask.tif'))\n\n assert np.array_equal(output_mask, truth_mask)",
"def test_05_01_mask_of3D(self):\n x=cpi.Image()\n x.image = np.ones((10,10,3))\n self.assertTrue(x.mask.ndim==2)",
"def parse_aperture_mask(\n tpf,\n sap_mask=\"pipeline\",\n aper_radius=None,\n percentile=None,\n verbose=False,\n threshold_sigma=None,\n):\n if verbose:\n if sap_mask == \"round\":\n print(\n \"aperture photometry mask: {} (r={} pix)\\n\".format(\n sap_mask, aper_radius\n )\n )\n elif sap_mask == \"square\":\n print(\n \"aperture photometry mask: {0} ({1}x{1} pix)\\n\".format(\n sap_mask, aper_radius\n )\n )\n elif sap_mask == \"percentile\":\n print(\n \"aperture photometry mask: {} ({}%)\\n\".format(\n sap_mask, percentile\n )\n )\n else:\n print(\"aperture photometry mask: {}\\n\".format(sap_mask))\n\n # stacked_img = np.median(tpf.flux,axis=0)\n if (sap_mask == \"pipeline\") or (sap_mask is None):\n errmsg = \"tpf does not have pipeline mask\"\n assert tpf.pipeline_mask is not None, errmsg\n mask = tpf.pipeline_mask # default\n elif sap_mask == \"all\":\n mask = np.ones((tpf.shape[1], tpf.shape[2]), dtype=bool)\n elif sap_mask == \"round\":\n assert aper_radius is not None, \"supply aper_radius\"\n mask = make_round_mask(tpf.flux[0], radius=aper_radius)\n elif sap_mask == \"square\":\n assert aper_radius is not None, \"supply aper_radius/size\"\n mask = make_square_mask(tpf.flux[0], size=aper_radius, angle=None)\n elif sap_mask == \"threshold\":\n assert threshold_sigma is not None, \"supply threshold_sigma\"\n # FIXME: make sure aperture is contiguous\n mask = tpf.create_threshold_mask(threshold_sigma)\n elif sap_mask == \"percentile\":\n assert percentile is not None, \"supply percentile\"\n median_img = np.nanmedian(tpf.flux, axis=0)\n mask = median_img > np.nanpercentile(median_img, percentile)\n else:\n raise ValueError(\"Unknown aperture mask\")\n return mask",
"def test_check_data_masked_input_data_non_nans(self):\n cube = self.cube.copy()\n cube.data[:, 0, 0] = 1000\n cube.data = np.ma.masked_equal(cube.data, 1000)\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n expected_data[:, 0, 0] = np.nan\n expected_data = np.ma.masked_invalid(expected_data)\n result = Plugin().process(cube)\n self.assertArrayAlmostEqual(result.data.data, expected_data.data, decimal=5)\n self.assertArrayEqual(result.data.mask, expected_data.mask)",
"def set_measurement_mask(self, program_name, mask_name, begins, lengths) -> Tuple[numpy.ndarray, numpy.ndarray]:",
"def test_fix_data(self):\n cube = self.fix.fix_data(self.cube)\n np.testing.assert_allclose(cube.data[0], 1.0)\n np.testing.assert_allclose(cube.data[2], 2.0)\n assert not np.ma.is_masked(cube.data[0])\n assert np.ma.is_masked(cube.data[1])\n assert not np.ma.is_masked(cube.data[2])",
"def test_quality_mask():\n quality = np.array([0, 0, 1])\n assert np.all(KeplerQualityFlags.create_quality_mask(quality, bitmask=0))\n assert np.all(KeplerQualityFlags.create_quality_mask(quality, bitmask=None))\n assert np.all(KeplerQualityFlags.create_quality_mask(quality, bitmask='none'))\n assert (KeplerQualityFlags.create_quality_mask(quality, bitmask=1)).sum() == 2\n assert (KeplerQualityFlags.create_quality_mask(quality, bitmask='hardest')).sum() == 2\n # Do we see a ValueError if an invalid bitmask is passed?\n with pytest.raises(ValueError) as err:\n KeplerQualityFlags.create_quality_mask(quality, bitmask='invalidoption')\n assert \"not supported\" in err.value.args[0]",
"def ice_unmasked(res='4x5', debug=False):\n # Create a np.ma mask\n m = np.logical_not((land_unmasked(res)*ocean_unmasked(res)))\n if debug:\n print((mask, mask.shape))\n return m",
"def test_invalid_events(subarray_and_event_gamma_off_axis_500_gev):\n\n # 4-LST bright event already calibrated\n # we'll clean it and parametrize it again in the TelescopeFrame\n subarray, event = subarray_and_event_gamma_off_axis_500_gev\n\n tel_azimuth = {}\n tel_altitude = {}\n\n #source = EventSource(filename, max_events=1)\n #subarray = source.subarray\n calib = CameraCalibrator(subarray)\n fit = HillasReconstructor(subarray)\n\n #for event in source:\n\n calib(event)\n\n hillas_dict = {}\n for tel_id, dl1 in event.dl1.tel.items():\n\n geom = subarray.tel[tel_id].camera.geometry\n tel_azimuth[tel_id] = event.pointing.tel[tel_id].azimuth\n tel_altitude[tel_id] = event.pointing.tel[tel_id].altitude\n\n mask = tailcuts_clean(\n geom, dl1.image, picture_thresh=10.0, boundary_thresh=5.0\n )\n\n dl1.parameters = ImageParametersContainer()\n\n try:\n moments = hillas_parameters(geom[mask], dl1.image[mask])\n hillas_dict[tel_id] = moments\n dl1.parameters.hillas = moments\n except HillasParameterizationError:\n dl1.parameters.hillas = HillasParametersContainer()\n continue\n\n # copy event container to modify it\n event_copy = deepcopy(event)\n # overwrite all image parameters but the last one with dummy ones\n for tel_id in list(event_copy.dl1.tel.keys())[:-1]:\n event_copy.dl1.tel[tel_id].parameters.hillas = HillasParametersContainer()\n fit(event_copy)\n assert event_copy.dl2.stereo.geometry[\"HillasReconstructor\"].is_valid is False\n\n # Now use the original event, but overwrite the last width to 0\n event.dl1.tel[tel_id].parameters.hillas.width = 0 * u.m\n fit(event)\n assert event.dl2.stereo.geometry[\"HillasReconstructor\"].is_valid is False\n\n # Now use the original event, but overwrite the last width to NaN\n event.dl1.tel[tel_id].parameters.hillas.width = np.nan * u.m\n fit(event)\n assert event.dl2.stereo.geometry[\"HillasReconstructor\"].is_valid is False",
"def mask_image(image):\n pass",
"def clean_mask(image, sig=5, iters=3):\n\n mean, median, stddev = sigma_clipped_stats(image[image>0], sigma=sig, iters=iters)\n\n mask_bad = (np.abs(image - median) > sig * stddev) | (image == 0)\n image_ret = np.copy(image)\n image_ret[mask_bad] = 0\n \n return image_ret, mask_bad",
"def testTicket1123(self):\n\n ctrl = afwMath.StatisticsControl()\n ctrl.setAndMask(~0x0)\n \n mimg = afwImage.MaskedImageF(afwGeom.Extent2I(10, 10))\n mimg.set([self.val, 0x1, self.val])\n\n # test the case with no valid pixels ... both mean and stdev should be nan\n stat = afwMath.makeStatistics(mimg, afwMath.MEAN | afwMath.STDEV, ctrl)\n mean = stat.getValue(afwMath.MEAN)\n stdev = stat.getValue(afwMath.STDEV)\n self.assertNotEqual(mean, mean) # NaN does not equal itself\n self.assertNotEqual(stdev, stdev) # NaN does not equal itself\n \n # test the case with one valid pixel ... mean is ok, but stdev should still be nan\n mimg.getMask().set(1, 1, 0x0)\n stat = afwMath.makeStatistics(mimg, afwMath.MEAN | afwMath.STDEV, ctrl)\n mean = stat.getValue(afwMath.MEAN)\n stdev = stat.getValue(afwMath.STDEV)\n self.assertEqual(mean, self.val)\n self.assertNotEqual(stdev, stdev) # NaN does not equal itself\n\n # test the case with two valid pixels ... both mean and stdev are ok\n mimg.getMask().set(1, 2, 0x0)\n stat = afwMath.makeStatistics(mimg, afwMath.MEAN | afwMath.STDEV, ctrl)\n mean = stat.getValue(afwMath.MEAN)\n stdev = stat.getValue(afwMath.STDEV)\n self.assertEqual(mean, self.val)\n self.assertEqual(stdev, 0.0)",
"def mkapmask(slit, wave, apertures, background=None):\n if not hasattr(slit, '__len__'):\n slit = [slit]\n slit = np.array(slit).astype(float)\n\n # mask starts as zero -- unused pixels will remain zero\n mask = np.zeros((slit.size, wave.size))\n\n # set background to nan\n if background is not None:\n regions = np.round(tabinv(slit, background)).astype(int)\n for region in regions:\n if len(region) == 2:\n mask[region[0]: region[1] + 1, :] = np.nan\n\n for api, aperture in enumerate(apertures):\n pos = aperture['trace']\n\n # define PSF aperture\n rad = aperture['psf_radius']\n ap = np.array([pos - rad, pos + rad])\n\n # this gets the effective index for aperture position\n # in the slit array, clipping to 0 at the lower edge,\n # and len(slit)-1 at the upper\n apidxs = tabinv(slit, ap)\n\n # define aperture radius, if available\n if 'aperture_radius' in aperture:\n aprad = aperture['aperture_radius']\n apradpos = np.array([pos - aprad, pos + aprad])\n aprad_idxs = tabinv(slit, apradpos)\n else:\n aprad_idxs = None\n\n for wavei, apidx in enumerate(apidxs.T):\n # this takes the floor of the identified indices,\n # so may include a fractional pixel on the lower edge,\n # and will miss any fractional pixels on the upper edge\n apint = apidx.astype(int)\n\n # check for overlap with previous aperture:\n # identified pixels must be either nan or 0\n maxap = apint[1] + 2 if apint[1] < len(slit) - 2 else len(slit)\n test = mask[apint[0]:maxap, wavei]\n if not np.all(np.isnan(test) | (test == 0)):\n msg = \"The extraction apertures overlap. \" \\\n \"Please lower the aperture radii.\"\n log.error(msg)\n raise ValueError(msg)\n\n # set values to aperture number\n mask[apint[0]:apint[1] + 1, wavei] = api + 1\n\n # fix endpoints to reflect fractional pixels\n # Note that this assumes aperture widths are greater than\n # pixel widths.\n dap = apidx - apint\n if dap[0] > 0:\n # correct the first point to a fractional weight\n # (weight for extraction is mask - api, so full\n # pixels have weight 1)\n mask[apint[0], wavei] = api + dap[0]\n\n if dap[1] > 0:\n # add the next point up the slit to the aperture,\n # with a fractional weight\n mask[apint[1] + 1, wavei] = api + dap[1]\n\n # define aperture radius, if available\n if aprad_idxs is not None:\n # for the central aperture region, use only whole pixels\n apidx = aprad_idxs.T[wavei]\n apint[0] = int(np.ceil(apidx[0]))\n apint[1] = int(np.floor(apidx[1]))\n\n # set value to -1 * aperture number\n mask[apint[0]:apint[1] + 1, wavei] *= -1\n\n return mask",
"def test_odd(self):\n actual = cm.ring_mask((5, 5), 1, 2)\n expected = np.array([[False, False, True, False, False],\n [False, True, False, True, False],\n [True, False, False, False, True],\n [False, True, False, True, False],\n [False, False, True, False, False]])\n self.assertIsNone(np.testing.assert_array_equal(actual, expected))",
"def test_fix_mask(self):\n fixable_mask = mapreader.Map(os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_fixable_mask.map'))\n self.assertFalse(fixable_mask.is_mask)\n fixable_mask.fix_mask()\n self.assertTrue(fixable_mask.is_mask)",
"def test_unfixable_mask(self):\n unfixable_mask = mapreader.Map(os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_unfixable_mask.map'))\n self.assertFalse(unfixable_mask.is_mask)\n with self.assertRaises(ValueError):\n unfixable_mask.fix_mask()\n self.assertFalse(unfixable_mask.is_mask)",
"def test_centroid_com_mask_shape():\n with pytest.raises(ValueError):\n mask = np.zeros((2, 2), dtype=bool)\n centroid_com(np.zeros((4, 4)), mask=mask)",
"def get_noise_mask(frame, noise_reference_margin):\r\n center = int((frame.shape[0] - 1) / 2)\r\n radius = center - noise_reference_margin\r\n tmp = Aperture(center, center, radius, data=frame, crop=False)\r\n annulus_mask = np.logical_not(tmp.data.mask)\r\n return annulus_mask",
"def aperture_phot(self,data,x,y,v):\n r = np.sqrt((x-self.avg_map_fits['Values'][1])**2 + (y-self.avg_map_fits['Values'][3])**2)\n \n inner = (r < 8./60.) & np.isfinite(data) \n outer = (r > 8.5/60.) & (r < 12./60.) & np.isfinite(data)\n\n annu = np.nanmedian(data[outer])\n annu_rms = np.nanstd(data[outer])\n flux = np.sum(data[inner]) - annu*np.sum(inner)\n\n c = 3e8\n kb=1.38e-23\n beam = (1./60.*np.pi/180.)**2\n factor = 2*kb*(v*1e9/c)**2 * beam * 1e26\n return flux*factor, annu_rms*np.sqrt(np.sum(inner))*factor",
"def phot_aperture(input_file):\n #set the original directory\n original_path = os.getcwd()\n save_path = input_file['save_path']\n planet = input_file['exoplanet']\n #radii = np.arange(input_file['apertures'][0],input_file['apertures'][1],0.1)\n radii = np.array(input_file['apertures'])\n #change to save data reduction directory\n os.chdir(save_path)\n if not os.path.exists('phot_results'):\n os.makedirs('phot_results')\n tempo = time.time()\n print 'Starting aperture photometry'\n print 'Saving results on: '+save_path+'/phot_results/'\n \n #check the number of objects to make the photometry\n N_obj = len(input_file['pxpositions'])/2.\n print 'Number of objects = ',N_obj\n positions = [] #create the positions variable (X,Y) in pixels unit on the CCD\n for i in range(len(input_file['pxpositions'])):\n if i % 2 == 0: #if the number is a even (or not a odd), the turple is created\n positions.append((input_file['pxpositions'][i],input_file['pxpositions'][i+1]))\n print 'Radius from ',radii[0],' to ',radii[-1],'\\n'\n \n skysection = input_file['skysection']\n skysection[0] = int(skysection[0])\n skysection[1] = int(skysection[1])\n \n images = sorted(glob.glob('AB'+planet+'*.fits'))\n for radius in radii:\n flux_data = []\n for i in range(len(images)):\n im = fits.getdata(images[i],header=False)\n im = array(im,dtype='Float64')\n \n # ERROR\n #Traceback (most recent call last):\n # File \"ExoTRed.py\", line 105, in <module>\n # exotred.phot_aperture(input_file)\n # File \"./sources/ExoTRed_core.py\", line 637, in phot_aperture \n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 329, in __init__\n # self._calc_bkg_bkgrms()\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 686, in _calc_bkg_bkgrms\n # bkg = self._interpolate_meshes(self._bkg1d)\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 575, in _interpolate_meshes\n # f = ShepardIDWInterpolator(yx, data)\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/utils/interpolation.py\", line 138, in __init__\n # raise ValueError('The number of values must match the number '\n # ValueError: The number of values must match the number of coordinates.\n\n # bkg = background.background_2d.Background2D(im,tuple(skysection))\n # bkg_data = bkg.background\n # bkg_rms = bkg.background_rms\n\n # phot_table = aperture_photometry(im - bkg_data, CircularAperture(positions, radius),\n # error=bkg_rms, method ='center')#,effective_gain=float(input_file['gain']))\n ####### SUBSTITUTE ROUTINE\n window = 100\n sky_size = im.shape\n sky_mean = float(np.median(im[int(skysection[1]-window):int(skysection[1]+window),int(skysection[0]-window):int(skysection[0]+window)]))\n bkg = np.random.poisson(sky_mean,sky_size)\n apertures = CircularAperture(positions, radius)\n phot_table = aperture_photometry(im, apertures, error=bkg)\n #######\n phot_table_flux = np.array([]) #saving results of aperture photometry\n for j in range(len(phot_table['aperture_sum'])):\n phot_table_flux = np.concatenate((phot_table_flux,np.array([phot_table['aperture_sum'][j]])),axis=0)\n phot_table_flux = np.concatenate((phot_table_flux,np.array([phot_table['aperture_sum_err'][j]])),axis=0)\n flux = np.concatenate((phot_table_flux,np.array([images[i]])),axis=0)\n # flux = [phot_table['aperture_sum'][0], phot_table['aperture_sum'][1],phot_table['aperture_sum_err'][0],\n # phot_table['aperture_sum_err'][1],images[i]]\n flux_data.append(flux)\n flux_data = DataFrame(flux_data)#,columns=['hoststar','refstar','hoststar_err','refstar_err','image'])\n flux_data.to_csv('./phot_results/'+planet+'_flux_radius_'+str(radius)+'.csv',index=False)\n use.update_progress((float(np.where(radii == radius)[0])+1.)/len(radii))\n print 'Time total = ',abs(time.time()-tempo)/60.,' minutes'\n os.chdir(original_path)",
"def test_get_mean_mask():\n arr = np.array([-5, 4, 0, 3, -2, 7, 10, -10, 5, 6])\n m_arr = np.ma.masked_where(arr < 0, arr)\n assert_allclose(iqcalc.get_mean(m_arr), 5.0)"
] | [
"0.6387848",
"0.62934524",
"0.6213696",
"0.60203755",
"0.60186976",
"0.6006882",
"0.59526944",
"0.58965045",
"0.5882132",
"0.5878645",
"0.58625317",
"0.5860351",
"0.5847825",
"0.58367264",
"0.58344615",
"0.58279014",
"0.5787623",
"0.57459146",
"0.569849",
"0.56977993",
"0.56797236",
"0.5670405",
"0.56536764",
"0.5647506",
"0.56336915",
"0.56322527",
"0.5630631",
"0.5608634",
"0.56045055",
"0.5591484"
] | 0.87002504 | 0 |
Test elliptical exact aperture photometry on a grid of pixel positions. | def test_ellipse_exact_grid(x, y, r):
data = np.ones((10, 10))
aperture = EllipticalAperture((x, y), r, r, 0.0)
t = aperture_photometry(data, aperture, method='exact')
actual = t['aperture_sum'][0] / (np.pi * r**2)
assert_allclose(actual, 1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def phantom_ellipses(n_points,E):\n \n #Rescaling according to image size \n E[:,0] = E[:,0]*n_points/2 #semiaxis a\n E[:,1] = E[:,1]*n_points/2 #semiaxis b\n E[:,2] = E[:,2]*n_points/2 #x\n E[:,3] = E[:,3]*n_points/2 #y\n E[:,4] = E[:,4]*math.pi/180\n \n x,y = np.meshgrid(np.arange(0,n_points)-n_points//2 ,np.arange(0,n_points)-n_points//2 )\n nrow,ncol = E.shape\n phantom1 = np.zeros((y.shape[0], y.shape[1], nrow))\n\n for k in range(nrow): #itero sulle ellissi\n x_new = x - E[k,2]\n y_new = y - E[k,3]\n\n #find(( (x.*cosp + y.*sinp).^2)./asq + ((y.*cosp - x.*sinp).^2)./bsq <= 1); \n cosp = math.cos(E[k,4])\n sinp = math.sin(E[k,4])\n cond = np.square( x_new * cosp + y_new * sinp )*1/(E[k,0]*E[k,0]) + \\\n np.square(y_new * cosp - x_new * sinp)*1/(E[k,1]*E[k,1]) - 1\n\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n if (cond[i,j] <= 0.0):\n phantom1[i,j,k] = E[k,5]; # gray scale\n else:\n phantom1[i,j,k] = 0.0;\n #endif\n #endfor\n #endfor\n #endfor\n phantom1 = phantom1.sum(axis=2)\n phantom = np.flipud(phantom1)\n return phantom",
"def testEllipticalGaussian(self):\n\n width, height = 200, 200\n xcen, ycen = 0.5*width, 0.5*height\n #\n # Make the object\n #\n gal = afwImage.ImageF(afwGeom.ExtentI(width, height))\n a, b, theta = float(10), float(5), 20\n flux = 1e4\n I0 = flux/(2*math.pi*a*b)\n\n c, s = math.cos(math.radians(theta)), math.sin(math.radians(theta))\n for y in range(height):\n for x in range(width):\n dx, dy = x - xcen, y - ycen\n u = c*dx + s*dy\n v = -s*dx + c*dy\n val = I0*math.exp(-0.5*((u/a)**2 + (v/b)**2))\n if val < 0:\n val = 0\n gal.set(x, y, val)\n\n objImg = afwImage.makeExposure(afwImage.makeMaskedImage(gal))\n objImg.getMaskedImage().getVariance().set(1.0)\n del gal\n objImg.setXY0(afwGeom.Point2I(1234, 5678))\n #\n # We need a PSF to be able to centroid well. Cf. #2540\n #\n FWHM = 5\n ksize = 25 # size of desired kernel\n objImg.setPsf(measAlg.DoubleGaussianPsf(ksize, ksize,\n FWHM/(2*math.sqrt(2*math.log(2))), 1, 0.1))\n \n\n if display:\n frame = 0\n ds9.mtv(objImg, frame=frame, title=\"Elliptical\")\n\n self.assertAlmostEqual(1.0, afwMath.makeStatistics(objImg.getMaskedImage().getImage(),\n afwMath.SUM).getValue()/flux)\n #\n # Test elliptical apertures\n #\n #\n msConfig = measAlg.SourceMeasurementConfig()\n msConfig.algorithms.names.add(\"flux.aperture.elliptical\")\n radii = math.sqrt(a*b)*numpy.array([0.45, 1.0, 2.0, 3.0, 10.0,])\n\n msConfig.algorithms[\"flux.aperture.elliptical\"].radii = radii\n schema = afwTable.SourceTable.makeMinimalSchema()\n ms = msConfig.makeMeasureSources(schema)\n \n table = afwTable.SourceTable.make(schema)\n msConfig.slots.setupTable(table)\n source = table.makeRecord()\n\n ss = afwDetection.FootprintSet(objImg.getMaskedImage(), afwDetection.Threshold(0.1))\n fp = ss.getFootprints()[0]\n source.setFootprint(fp)\n\n center = fp.getPeaks()[0].getF()\n ms.apply(source, objImg, center)\n\n self.assertEqual(source.get(\"flux.aperture.elliptical.nProfile\"), len(radii))\n\n r0 = 0.0\n if display:\n shape = source.getShape().clone()\n xy = afwGeom.ExtentD(source.getCentroid()) - afwGeom.ExtentD(objImg.getXY0())\n ds9.dot(\"x\", xcen, ycen, ctype=ds9.RED)\n ds9.dot(\"+\", *xy, frame=frame)\n with ds9.Buffering():\n for r, apFlux in zip(radii, source.get(\"flux.aperture.elliptical\")):\n if display: # draw the inner and outer boundaries of the aperture\n shape.scale(r/shape.getDeterminantRadius())\n ds9.dot(shape, *xy, frame=frame)\n\n trueFlux = flux*(math.exp(-r0**2/(2*a*b)) - math.exp(-r**2/(2*a*b)))\n if verbose:\n print \"%5.2f %6.3f%%\" % (r, 100*((trueFlux - apFlux)/flux))\n self.assertAlmostEqual(trueFlux/flux, apFlux/flux, 5)\n r0 = r\n #\n # Now measure some annuli \"by hand\" (we'll repeat this will EllipticalAperture algorithm soon)\n #\n\n for r1, r2 in [(0.0, 0.45*a),\n (0.45*a, 1.0*a),\n ( 1.0*a, 2.0*a),\n ( 2.0*a, 3.0*a),\n ( 3.0*a, 5.0*a),\n ( 3.0*a, 10.0*a),\n ]:\n control = measAlg.SincFluxControl()\n control.radius1 = r1\n control.radius2 = r2\n control.angle = math.radians(theta)\n control.ellipticity = 1 - b/a\n\n schema = afwTable.SourceTable.makeMinimalSchema()\n mp = measAlg.MeasureSourcesBuilder().addAlgorithm(control).build(schema)\n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n\n if display: # draw the inner and outer boundaries of the aperture\n Mxx = 1\n Myy = (b/a)**2\n\n mxx, mxy, myy = c**2*Mxx + s**2*Myy, c*s*(Mxx - Myy), s**2*Mxx + c**2*Myy\n for r in (r1, r2):\n ds9.dot(\"@:%g,%g,%g\" % (r**2*mxx, r**2*mxy, r**2*myy), xcen, ycen, frame=frame)\n\n mp.apply(source, objImg, center)\n\n self.assertAlmostEqual(math.exp(-0.5*(r1/a)**2) - math.exp(-0.5*(r2/a)**2),\n source.get(control.name)/flux, 5)\n\n control = measAlg.GaussianFluxControl()\n\n schema = afwTable.SourceTable.makeMinimalSchema()\n mp = measAlg.MeasureSourcesBuilder().addAlgorithm(control).build(schema)\n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n\n objImg.setPsf(None) # no Psf\n mp.apply(source, objImg, center)\n # we haven't provided a PSF, so the built-in aperture correction won't work...but we'll get\n # a result anyway\n # Note that flags.psffactor==True sets flags=True IFF we attempt aperture corrections\n self.assertEqual(source.get(control.name + \".flags\"), False)\n self.assertEqual(source.get(control.name + \".flags.psffactor\"), True)\n gflux = source.get(control.name)\n err = gflux/flux - 1\n if abs(err) > 1.5e-5:\n self.assertEqual(gflux, flux, (\"%g, %g: error is %g\" % (gflux, flux, err)))",
"def Ez_area(position, angle, detect):\n# a = range(round(-2*Ez_height),round(2*Ez_height))\n# b = range(round(-2*Ez_height),round(2*Ez_height))\n# a_valid = []\n# b_valid= []\n \n # These are the grid points in a coordinate system based on the Ez's angle\n if detect:\n a_valid = [-11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6]\n b_valid = [-6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -4, -3, -2, -1, 0, 1, 2, 3, 4, -3, -2, -1, 0, 1, 2, 3]\n else:\n a_valid = [-9, -9, -9, -9, -9, -9, -9, -9, -9, -8, -8, -8, -8, -8, -8, -8, -8, -8, -7, -7, -7, -7, -7, -7, -7, -7, -7, -6, -6, -6, -6, -6, -6, -6, -6, -6, -5, -5, -5, -5, -5, -5, -5, -5, -5, -4, -4, -4, -4, -4, -4, -4, -4, -4, -3, -3, -3, -3, -3, -3, -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4]#[-19, -19, -19, -19, -19, -19, -19, -19, -19, -19, -19, -19, -19, -19, -19, -19, -19, -19, -19, -18, -18, -18, -18, -18, -18, -18, -18, -18, -18, -18, -18, -18, -18, -18, -18, -18, -18, -18, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -13, -13, -13, -13, -13, -13, -13, -13, -13, -13, -13, -13, -13, -13, -13, -13, -13, -13, -13, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9]\n b_valid = [-4, -3, -2, -1, 0, 1, 2, 3, 4, -4, -3, -2, -1, 0, 1, 2, 3, 4, -4, -3, -2, -1, 0, 1, 2, 3, 4, -4, -3, -2, -1, 0, 1, 2, 3, 4, -4, -3, -2, -1, 0, 1, 2, 3, 4, -4, -3, -2, -1, 0, 1, 2, 3, 4, -4, -3, -2, -1, 0, 1, 2, 3, 4, -4, -3, -2, -1, 0, 1, 2, 3, 4, -4, -3, -2, -1, 0, 1, 2, 3, 4, -4, -3, -2, -1, 0, 1, 2, 3, 4, -4, -3, -2, -1, 0, 1, 2, 3, 4, -3, -2, -1, 0, 1, 2, 3, -2, -1, 0, 1, 2]#[-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -4, -3, -2, -1, 0, 1, 2, 3, 4]\n positions = []\n# for i in a:\n# for j in b:\n# if (i > ((Ez_width/2)-Ez_height-detect_length) and abs(j) < (Ez_width/2+detect_length) and i < 0) or (i > 0 and np.sqrt(i**2 + j**2) < Ez_width/2+detect_length):\n# a_valid.append(i)\n# b_valid.append(j)\n# print('AAAA', a_valid)\n# print(' ')\n# print('BBBB', b_valid)\n# print(' ')\n \n # This is a coordinate transfromation to x,y\n for i in range(len(a_valid)):\n positions.append((int(round(a_valid[i]*np.cos(angle) + b_valid[i]*np.sin(angle) + position[0])), int(round(a_valid[i]*np.sin(angle) - b_valid[i]*np.cos(angle) + position[1]))))\n return positions",
"def test_epix100a():\n basedir = '/reg/g/psdm/detector/alignment/cspad/calib-cxi-ds1-2014-05-15/'\n fname_geometry = basedir + 'calib/CsPad::CalibV1/CxiDs1.0:Cspad.0/geometry/2-end.data'\n fname_data = basedir + 'cspad-arr-cxid2714-r0023-lysozyme-rings.txt'\n\n #basedir = '/reg/neh/home1/dubrovin/LCLS/GeometryCalib/calib-xpp-Epix100a-2014-11-05/'\n #fname_geometry = basedir + 'calib/Epix100a::CalibV1/NoDetector.0:Epix100a.0/geometry/0-end.data'\n #fname_data = basedir + 'epix100a-ndarr-ave-clb-xppi0614-r0073.dat'\n\n geometry = GeometryAccess(fname_geometry, pbits=0o377)\n amp_range = (-4,10)\n\n rows, cols = geometry.get_pixel_coord_indexes()\n\n root, ext = os.path.splitext(fname_data)\n arr = np.load(fname_data) if ext == '.npy' else np.loadtxt(fname_data, dtype=np.float)\n\n logger.info('shapes rows: %s cols: %s weight: %s' % (str(rows.shape), str(cols.shape), str(arr.shape)))\n img = img_from_pixel_arrays(rows,cols,W=arr)\n\n axim = gg.plotImageLarge(img,amp_range=amp_range)\n gg.move(500,10)\n gg.show()",
"def eoa(self, *args):\n\n\t\t#Assume coordinate is in center of pixel.\n\t\t#Information on pixel standard is in this article.\n\t\t#http://www.aanda.org/component/article?access=bibcode&bibcode=&bibcode=2002A%2526A...395.1061GFUL\n\t\tif isinstance(args[0], np.ndarray):\n\t\t\tlonUL, latUL = self.heliographic(args[0], -.5, -.5)\n\t\t\tlonLL, latLL = self.heliographic(args[0], .5, -.5)\n\t\t\tlonLR, latLR = self.heliographic(args[0], .5, .5)\n\t\t\tlonUR, latUR = self.heliographic(args[0], -.5, .5)\n\t\telse:\n\t\t\tx = args[0]\n\t\t\ty = args[1]\n\t\t\tlonUL, latUL = self.heliographic(x - .5, y - .5)\n\t\t\tlonLL, latLL = self.heliographic(x + .5, y - .5)\n\t\t\tlonLR, latLR = self.heliographic(x + .5, y + .5)\n\t\t\tlonUR, latUR = self.heliographic(x - .5, y + .5)\n\n\t\t# Calculating unit vectors of pixel corners for solid angle.\n\t\tr1 = np.array([np.cos(np.deg2rad(latUL))*np.cos(np.deg2rad(lonUL)),\n\t\t\t\t\t\tnp.cos(np.deg2rad(latUL))*np.sin(np.deg2rad(lonUL)),\n\t\t\t\t\t\tnp.sin(np.deg2rad(latUL))])\n\n\t\tr2 = np.array([np.cos(np.deg2rad(latLL))*np.cos(np.deg2rad(lonLL)),\n\t\t\t\t\t\tnp.cos(np.deg2rad(latLL))*np.sin(np.deg2rad(lonLL)),\n\t\t\t\t\t\tnp.sin(np.deg2rad(latLL))])\n\n\t\tr3 = np.array([np.cos(np.deg2rad(latLR))*np.cos(np.deg2rad(lonLR)),\n\t\t\t\t\t\tnp.cos(np.deg2rad(latLR))*np.sin(np.deg2rad(lonLR)),\n\t\t\t\t\t\tnp.sin(np.deg2rad(latLR))])\n\n\t\tr4 = np.array([np.cos(np.deg2rad(latUR))*np.cos(np.deg2rad(lonUR)),\n\t\t\t\t\t\tnp.cos(np.deg2rad(latUR))*np.sin(np.deg2rad(lonUR)),\n\t\t\t\t\t\tnp.sin(np.deg2rad(latUR))])\n\n\t\t# Calculate solid angle of pixel based on a pyrimid shaped polygon.\n\t\t# See \n\t\tcross1 = np.cross(r1, r2, axis=0)\n\t\tcross2 = np.cross(r3, r4, axis=0)\n\t\tnumerator1 = dot(cross1, r3)\n\t\tnumerator2 = dot(cross2, r1)\n\t\tsolid_angle1 = 2*np.arctan2(numerator1,\n\t\t\t\t\t\t(dot(r1, r2) + dot(r2, r3) + dot(r3, r1) + 1))\n\t\tsolid_angle2 = 2*np.arctan2(numerator2, \n\t\t\t\t\t\t(dot(r3, r4) + dot(r4, r1) + dot(r3, r1) + 1))\n\t\tsolid_angle = solid_angle1 + solid_angle2\n\t\tr = 6.957e10 * u.cm\n\n\t\tif isinstance(args[0], np.ndarray):\n\t\t\tself.area = np.abs((r**2)*solid_angle)\n\t\t\tind = np.where(self.rg > self.rsun)\n\t\t\tself.area[ind] = np.nan\n\t\t\treturn self.area\n\t\telse:\n\t\t\treturn np.abs((r**2)*solid_angle)",
"def radon_ellipses(N,theta_vec, E, tvec_set=None, circle=False):\n \n #Rescaling according to image size \n E[:,0] = E[:,0]*N/2\n E[:,1] = E[:,1]*N/2\n E[:,2] = E[:,2]*N/2\n E[:,3] = E[:,3]*N/2\n E[:,4] = E[:,4]*math.pi/180\n \n [t_vec, grid_t, grid_theta] = build_t_theta_pixel(N, theta_vec, tvec_set=tvec_set, circle =circle);\n\n (nrowE,ncolE) = E.shape;\n tmp = np.zeros((nrowE,len(grid_theta)))\n for i in range(nrowE):\n grid_theta_new = grid_theta - E[i,4]\n x_new = (E[i,2]*np.cos(grid_theta)+E[i,3]*np.sin(grid_theta))\n y_new = (-E[i,2]*np.sin(grid_theta)+E[i,3]*np.cos(grid_theta))\n grid_t_new = (grid_t -x_new)/E[i,1]\n\n v1 = np.sin(grid_theta_new)**2+((E[i,0]/E[i,1])**2)*np.cos(grid_theta_new)**2 - grid_t_new**2\n cond = v1;\n v2 = np.zeros((v1.shape[0],1))\n for j in range (len(grid_theta)):\n if cond[j] > 0:\n v2[j]=1\n else:\n v2[j]=0\n #endif\n #endfor\n v3 = np.sqrt(v1*v2);\n v4 = np.sin(grid_theta_new)**2+((E[i,0]/E[i,1])**2)*np.cos(grid_theta_new)**2\n tmp[i,:] = np.transpose( 2*E[i,0]*E[i,5]*(v3/v4) )\n #endfor\n radvec = np.sum(tmp,axis = 0);\n analytical_sinogram = np.transpose(np.reshape(radvec,(len(theta_vec),len(t_vec))))\n return analytical_sinogram",
"def testComputeImage(self):\n for fiberId in self.detMap.fiberId:\n for fraction in (0.1, 0.5, 0.9):\n yy = self.synthConfig.height*fraction\n if yy == int(yy):\n # Ensure we have a non-integer pixel position,\n # so computeImage and computeKernelImage differ\n yy += 0.5\n wavelength = self.detMap.findWavelength(fiberId, yy)\n image = self.psf.computeImage(fiberId, wavelength)\n kernel = self.psf.computeKernelImage(fiberId, wavelength)\n\n # Image should have xy0 set somewhere in the middle of the larger image\n self.assertNotEqual(image.getX0(), 0)\n self.assertNotEqual(image.getY0(), 0)\n\n # Kernel should have xy0 set to the half-size\n halfSize = (self.size - 1)//2\n self.assertEqual(kernel.getX0(), -halfSize)\n self.assertEqual(kernel.getY0(), -halfSize)\n\n # Centroid on image should be at the point of interest\n xx, yy = self.detMap.findPoint(fiberId, wavelength)\n centroid = calculateCentroid(image)\n self.assertFloatsAlmostEqual(xx, centroid.x, atol=2.0e-2)\n self.assertFloatsAlmostEqual(yy, centroid.y, atol=2.0e-2)\n\n # Centroid on kernel should be zero\n centroid = calculateCentroid(kernel)\n self.assertFloatsAlmostEqual(centroid.x, 0.0, atol=1.0e-7)\n self.assertFloatsAlmostEqual(centroid.y, 0.0, atol=1.0e-7)",
"def testKnown(self):\n numAmps = (2, 2)\n bbox = afwGeom.Box2I(afwGeom.Point2I(0, 0), afwGeom.Extent2I(4, 4))\n # make a 4x4 image with 4 identical 2x2 subregions that flatten to -1, 0, 1, 2\n im = afwImage.ImageF(bbox)\n imArr = im.getArray()\n imArr[:, :] = np.array(((-1, 0, -1, 0),\n (1, 2, 1, 2),\n (-1, 0, -1, 0),\n (1, 2, 1, 2)), dtype=imArr.dtype)\n\n sqCoeffs = np.array(((0, 0.11), (-0.15, -12)))\n detector = self.makeDetector(bbox=bbox, numAmps=numAmps, sqCoeffs=sqCoeffs)\n ampInfoCat = detector.getAmpInfoCatalog()\n\n linSq = LinearizeSquared()\n linSq(im, detector=detector)\n\n # amp 0 has 0 squared coefficient and so makes no correction\n imArr0 = im.Factory(im, ampInfoCat[0].getBBox()).getArray()\n linCoeff0 = ampInfoCat[0].getLinearityCoeffs()[0]\n self.assertEqual(0, linCoeff0)\n self.assertFloatsAlmostEqual(imArr0.flatten(), (-1, 0, 1, 2))\n\n # test all amps\n for ampInfo in ampInfoCat:\n imArr = im.Factory(im, ampInfo.getBBox()).getArray()\n linCoeff = ampInfo.getLinearityCoeffs()[0]\n expect = np.array((-1 + linCoeff, 0, 1 + linCoeff, 2 + 4*linCoeff), dtype=imArr.dtype)\n self.assertFloatsAlmostEqual(imArr.flatten(), expect)",
"def trajectory_inside_ellipsoid(env, p_0, p_all, q_all, k_fb, k_ff):\n n, _ = np.shape(k_ff)\n n_u = env.n_u\n n_s = env.n_s\n # init system to p_0\n\n x_all = simulate_trajectory(env, p_0, k_fb, k_ff, p_all)[1:, :]\n\n inside_ellipsoid = np.zeros((n,), dtype=np.bool)\n for i in range(n):\n inside_ellipsoid[i] = sample_inside_ellipsoid(x_all[None, i, :],\n p_all[i, :, None],\n q_all[i, :].reshape((n_s, n_s)))\n\n return inside_ellipsoid",
"def test_aperture_photometry_inputs_with_mask():\n\n data = np.ones((5, 5))\n aperture = CircularAperture((2, 2), 2.0)\n mask = np.zeros_like(data, dtype=bool)\n data[2, 2] = 100.0 # bad pixel\n mask[2, 2] = True\n error = np.sqrt(data)\n data_in = data.copy()\n error_in = error.copy()\n t1 = aperture_photometry(data, aperture, error=error, mask=mask)\n assert_array_equal(data, data_in)\n assert_array_equal(error, error_in)\n assert_allclose(t1['aperture_sum'][0], 11.5663706144)\n t2 = aperture_photometry(data, aperture)\n assert_allclose(t2['aperture_sum'][0], 111.566370614)",
"def test_compute_pixel_rays() -> None:\n u = 12\n v = 2\n img_w = 20\n img_h = 10\n fx = 10\n fy = 10\n\n ray_dir = _compute_pixel_ray_direction(u, v, fx, fy, img_w, img_h)\n\n gt_ray_dir: NDArrayFloat = np.array([2.0, -3.0, 10.0])\n gt_ray_dir /= np.linalg.norm(gt_ray_dir)\n\n assert np.allclose(gt_ray_dir, ray_dir)",
"def clashTest(self, px, py, pz, rad):\r\n radSq = rad**2\r\n # adjust for map not set at origin\r\n px -= self.unif[0]\r\n py -= self.unif[1]\r\n pz -= self.unif[2]\r\n ht = self.height/255\r\n halfw = self.width/2.0\r\n halfd = self.depth/2.0\r\n dx = self.width/self.ix\r\n dz = self.depth/self.iy\r\n\r\n # work out x and z ranges to check, x0 etc correspond with vertex indices in grid\r\n x0 = int(math.floor((halfw + px - rad)/dx + 0.5)) - 1\r\n if x0 < 0: x0 = 0\r\n x1 = int(math.floor((halfw + px + rad)/dx + 0.5)) + 1\r\n if x1 > self.ix-1: x1 = self.ix-1\r\n z0 = int(math.floor((halfd + pz - rad)/dz + 0.5)) - 1\r\n if z0 < 0: z0 = 0\r\n z1 = int(math.floor((halfd + pz + rad)/dz + 0.5)) + 1\r\n if z1 > self.iy-1: z1 = self.iy-1\r\n\r\n # go through grid around px, pz\r\n minDist, minLoc = 1000000, (0, 0)\r\n for i in xrange(x0+1, x1):\r\n for j in xrange(z0+1, z1):\r\n # use the locations stored in the one dimensional vertices matrix\r\n #generated in __init__. 3 values for each location\r\n p = j*self.ix + i # pointer to the start of xyz for i,j in the vertices array\r\n p1 = j*self.ix + i - 1 # pointer to the start of xyz for i-1,j\r\n p2 = (j-1)*self.ix + i # pointer to the start of xyz for i, j-1\r\n vertp = self.buf[0].vertices[p]\r\n normp = self.buf[0].normals[p]\r\n # work out distance squared from this vertex to the point\r\n distSq = (px - vertp[0])**2 + (py - vertp[1])**2 + (pz - vertp[2])**2\r\n if distSq < minDist: # this vertex is nearest so keep a record\r\n minDist = distSq\r\n minLoc = (i, j)\r\n #now find the distance between the point and the plane perpendicular\r\n #to the normal at this vertex\r\n pDist = dot([px - vertp[0], py - vertp[1], pz - vertp[2]],\r\n [-normp[0], -normp[1], -normp[2]])\r\n #and the position where the normal from point crosses the plane\r\n xIsect = px - normp[0]*pDist\r\n zIsect = pz - normp[2]*pDist\r\n\r\n #if the intersection point is in this rectangle then the x,z values\r\n #will lie between edges\r\n if xIsect > self.buf[0].vertices[p1][0] and \\\r\n xIsect < self.buf[0].vertices[p][0] and \\\r\n zIsect > self.buf[0].vertices[p2][2] and \\\r\n zIsect < self.buf[0].vertices[p][2]:\r\n pDistSq = pDist**2\r\n # finally if the perpendicular distance is less than the nearest so far\r\n #keep a record\r\n if pDistSq < minDist:\r\n minDist = pDistSq\r\n minLoc = (i,j)\r\n\r\n gLevel = self.calcHeight(px, pz) #check it hasn't tunnelled through by going fast\r\n if gLevel > (py-rad):\r\n minDist = py - gLevel\r\n minLoc = (int((x0+x1)/2), int((z0+z1)/2))\r\n\r\n if minDist <= radSq: #i.e. near enough to clash so return normal\r\n p = minLoc[1]*self.ix + minLoc[0]\r\n normp = self.buf[0].normals[p]\r\n if minDist < 0:\r\n jump = rad - minDist\r\n else:\r\n jump = 0\r\n return(True, normp[0], normp[1], normp[2], jump)\r\n else:\r\n return (False, 0, 0, 0, 0)",
"def test_compute_pixel_ray_directions_vectorized() -> None:\n fx = 10\n fy = 10\n\n # dummy 2d coordinates in the image plane.\n uv: NDArrayInt = np.array([[12, 2], [12, 2], [12, 2], [12, 2]])\n\n # principal point is at (10,5)\n img_w = 20\n img_h = 10\n\n pinhole_camera = _create_pinhole_camera(\n fx_px=fx,\n fy_px=fy,\n cx_px=img_w / 2,\n cy_px=img_h / 2,\n height_px=img_h,\n width_px=img_w,\n cam_name=\"ring_front_center\", # dummy name\n )\n ray_dirs = pinhole_camera.compute_pixel_ray_directions(uv)\n\n gt_ray_dir: NDArrayFloat = np.array([2, -3, 10.0])\n gt_ray_dir /= np.linalg.norm(gt_ray_dir)\n\n for i in range(4):\n assert np.allclose(gt_ray_dir, ray_dirs[i])",
"def test_compute_pixel_ray_directions_vectorized_invalid_focal_lengths() -> None:\n uv: NDArrayInt = np.array([[12, 2], [12, 2], [12, 2], [12, 2]])\n fx = 10\n fy = 11\n\n img_w = 20\n img_h = 10\n\n pinhole_camera = _create_pinhole_camera(\n fx_px=fx,\n fy_px=fy,\n cx_px=img_w / 2,\n cy_px=img_h / 2,\n height_px=img_h,\n width_px=img_w,\n cam_name=\"ring_front_center\", # dummy name\n )\n\n with pytest.raises(ValueError):\n pinhole_camera.compute_pixel_ray_directions(uv)",
"def clashTest(self, px, py, pz, rad):\n radSq = rad**2\n # adjust for map not set at origin\n px -= self.unif[0]\n py -= self.unif[1]\n pz -= self.unif[2]\n ht = self.height/255\n halfw = self.width/2.0\n halfd = self.depth/2.0\n dx = self.width/self.ix\n dz = self.depth/self.iy\n\n # work out x and z ranges to check, x0 etc correspond with vertex indices in grid\n x0 = int(math.floor((halfw + px - rad)/dx + 0.5)) - 1\n if x0 < 0: x0 = 0\n x1 = int(math.floor((halfw + px + rad)/dx + 0.5)) + 1\n if x1 > self.ix-1: x1 = self.ix-1\n z0 = int(math.floor((halfd + pz - rad)/dz + 0.5)) - 1\n if z0 < 0: z0 = 0\n z1 = int(math.floor((halfd + pz + rad)/dz + 0.5)) + 1\n if z1 > self.iy-1: z1 = self.iy-1\n\n # go through grid around px, pz\n minDist, minLoc = 1000000, (0, 0)\n for i in xrange(x0+1, x1):\n for j in xrange(z0+1, z1):\n # use the locations stored in the one dimensional vertices matrix\n #generated in __init__. 3 values for each location\n p = j*self.ix + i # pointer to the start of xyz for i,j in the vertices array\n p1 = j*self.ix + i - 1 # pointer to the start of xyz for i-1,j\n p2 = (j-1)*self.ix + i # pointer to the start of xyz for i, j-1\n vertp = self.buf[0].vertices[p]\n normp = self.buf[0].normals[p]\n # work out distance squared from this vertex to the point\n distSq = (px - vertp[0])**2 + (py - vertp[1])**2 + (pz - vertp[2])**2\n if distSq < minDist: # this vertex is nearest so keep a record\n minDist = distSq\n minLoc = (i, j)\n #now find the distance between the point and the plane perpendicular\n #to the normal at this vertex\n pDist = dot([px - vertp[0], py - vertp[1], pz - vertp[2]],\n [-normp[0], -normp[1], -normp[2]])\n #and the position where the normal from point crosses the plane\n xIsect = px - normp[0]*pDist\n zIsect = pz - normp[2]*pDist\n\n #if the intersection point is in this rectangle then the x,z values\n #will lie between edges\n if xIsect > self.buf[0].vertices[p1][0] and \\\n xIsect < self.buf[0].vertices[p][0] and \\\n zIsect > self.buf[0].vertices[p2][2] and \\\n zIsect < self.buf[0].vertices[p][2]:\n pDistSq = pDist**2\n # finally if the perpendicular distance is less than the nearest so far\n #keep a record\n if pDistSq < minDist:\n minDist = pDistSq\n minLoc = (i,j)\n\n gLevel = self.calcHeight(px, pz) #check it hasn't tunnelled through by going fast\n if gLevel > (py-rad):\n minDist = py - gLevel\n minLoc = (int((x0+x1)/2), int((z0+z1)/2))\n\n if minDist <= radSq: #i.e. near enough to clash so return normal\n p = minLoc[1]*self.ix + minLoc[0]\n normp = self.buf[0].normals[p]\n if minDist < 0:\n jump = rad - minDist\n else:\n jump = 0\n return(True, normp[0], normp[1], normp[2], jump)\n else:\n return (False, 0, 0, 0, 0)",
"def ellipse_radii_test(radii, eccentricity = 0, perimeter = 2*np.pi*1):\n a,b = radii\n return (np.sqrt(np.absolute(1 - (b**2)/(a**2))) - eccentricity,\n # perimeter approximation from https://www.mathsisfun.com/geometry/ellipse-perimeter.html\n np.pi * (3 * (a + b) - np.sqrt(np.absolute((3 * a + b) * (a + 3 * b)))) - perimeter)",
"def test_annular_fpm():\n\n # test some semi-random cases - is the array size as expected? \n assert masks.annular_fpm(3, 2, np.inf).shape == (3*2*2, 3*2*2)\n assert masks.annular_fpm(3, 5, np.inf).shape == (3*5*2, 3*5*2)\n assert masks.annular_fpm(3, 5, 10).shape == (3*10*2, 3*10*2)\n assert masks.annular_fpm(3, 5, 11).shape == (3*11*2, 3*11*2)\n\n # test some pixel values are as expected. \n mask = masks.annular_fpm(3, 2, 10)\n assert mask[0,0]==0 # corner is black\n assert mask[5*10, 5*10]==1 # in between is white\n assert mask[3*10, 3*10]==0 # center is black",
"def test_compute_pixel_ray_directions_vectorized_entireimage() -> None:\n fx = 10\n fy = 10\n\n img_w = 100\n img_h = 50\n\n pinhole_camera = _create_pinhole_camera(\n fx_px=fx,\n fy_px=fy,\n cx_px=img_w / 2,\n cy_px=img_h / 2,\n height_px=img_h,\n width_px=img_w,\n cam_name=\"ring_front_center\", # dummy name\n )\n\n uv_list = []\n for u in range(img_w):\n for v in range(img_h):\n uv_list += [(u, v)]\n\n uv: NDArrayInt = np.array(uv_list)\n assert uv.shape == (img_w * img_h, 2)\n\n ray_dirs = pinhole_camera.compute_pixel_ray_directions(uv)\n\n # compare w/ vectorized, should be identical\n for i, ray_dir_vec in enumerate(ray_dirs):\n u, v = uv[i]\n ray_dir_nonvec = _compute_pixel_ray_direction(u, v, fx, fy, img_w, img_h)\n assert np.allclose(ray_dir_vec, ray_dir_nonvec)",
"def addEllipseGrid(self, gridX=50, gridY=50, angle=None, semiX=5.0, semiY=2.0, value=1.0):\n self.fimage = None\n gridx = numpy.arange(0, self.nx+gridX, gridX)\n gridy = numpy.arange(0, self.ny+gridY, gridY)\n if angle != None: \n angles = numpy.zeros(len(gridx)*len(gridy), 'float') + angle * numpy.pi / 180.0\n if angle == None:\n angles = numpy.random.uniform(0, 2*numpy.pi, size=len(gridx)*len(gridy))\n count = 0\n for j in gridy:\n for i in gridx:\n angle = angles[count]\n count += 1\n xx = (self.xx - i) * numpy.cos(angle) + (self.yy - j) * numpy.sin(angle) \n yy = -1*(self.xx - i) * numpy.sin(angle) + (self.yy - j) * numpy.cos(angle)\n tmp = ((xx)**2/float(semiX)**2 + (yy)**2/float(semiY)**2)\n ellipses = numpy.where(tmp<=1.0, value, 0)\n # Add to image.\n self.image += ellipses\n return",
"def phantom_rectangles(n_points,R):\n \n \n #Rescaling according to image size \n R[:,0] = R[:,0]*n_points/2\n R[:,1] = R[:,1]*n_points/2\n R[:,2] = R[:,2]*n_points/2\n R[:,3] = R[:,3]*n_points/2\n R[:,4] = R[:,4]*math.pi/180\n \n x,y = np.meshgrid(np.arange(0,n_points)-n_points//2 ,np.arange(0,n_points)-n_points//2 )\n nrow,ncol = R.shape\n phantom1 = np.zeros((y.shape[0], y.shape[1], nrow))\n\n for k in range(nrow): #itero sui rettangoli\n x_new = x - R[k,0]\n y_new = y - R[k,1]\n\n u = abs(x_new*math.cos(R[k,4])+y_new*math.sin(R[k,4]))\n v = abs(-x_new*math.sin(R[k,4])+y_new*math.cos(R[k,4]))\n\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n if (u[i,j] < R[k,2]/2 and v[i,j] < R[k,3]/2):\n phantom1[i,j,k] = R[k,5]; # gray scale\n else:\n phantom1[i,j,k] = 0.0;\n #endif\n #endfor\n #endfor\n #endfor\n\n phantom1 = phantom1.sum(axis=2)\n phantom = np.flipud(phantom1)\n return phantom",
"def check_angle(self):\n self.find_pixels()\n alpha_theta=np.deg2rad(70)\n alpha_phi=np.deg2rad(70)\n extreme_values=self.compute_extreme_values(alpha_phi, alpha_theta)\n x=np.linspace(extreme_values[0], extreme_values[1], self.number_of_pix[1])\n y=np.linspace(extreme_values[2], extreme_values[3], self.number_of_pix[0])\n phi_0=20\n phi_0=np.deg2rad(phi_0)\n j, diff=self.compute_phi(\"find_orient.png\")\n print \"j=\", j\n print \"diff=\", diff",
"def test_propene(self):\n def draw(image: ShapeImage):\n image.add_line((400, 400), (500, 400))\n image.add_line((400, 410), (500, 410))\n image.add_line((500, 400), (587, 350))\n\n self._test_shape(\n image_size=(1000, 1000),\n expected_corners=np.array([\n [[400, 400]],\n [[500, 400]],\n [[587, 350]]\n ]),\n drawer=draw,\n expected_edges=np.array([\n [[400, 400, 500, 400]],\n [[400, 410, 500, 410]],\n [[500, 400, 587, 350]]\n ])\n )",
"def test_elliptic(self):\n fun = get_problem('elliptic', self.dimension, -100, 100)\n self.assertAlmostEqual(fun(self.array10), 5129555.351959938, delta=2e6)",
"def test_scalar_aperture():\n\n data = np.ones((20, 20), dtype=float)\n\n ap = CircularAperture((10, 10), r=3.0)\n colnames1 = aperture_photometry(data, ap, error=data).colnames\n assert (colnames1 == ['id', 'xcenter', 'ycenter', 'aperture_sum',\n 'aperture_sum_err'])\n\n colnames2 = aperture_photometry(data, [ap], error=data).colnames\n assert (colnames2 == ['id', 'xcenter', 'ycenter', 'aperture_sum_0',\n 'aperture_sum_err_0'])\n\n colnames3 = aperture_photometry(data, [ap, ap], error=data).colnames\n assert (colnames3 == ['id', 'xcenter', 'ycenter', 'aperture_sum_0',\n 'aperture_sum_err_0', 'aperture_sum_1',\n 'aperture_sum_err_1'])",
"def aperture_phot(self,data,x,y,v):\n r = np.sqrt((x-self.avg_map_fits['Values'][1])**2 + (y-self.avg_map_fits['Values'][3])**2)\n \n inner = (r < 8./60.) & np.isfinite(data) \n outer = (r > 8.5/60.) & (r < 12./60.) & np.isfinite(data)\n\n annu = np.nanmedian(data[outer])\n annu_rms = np.nanstd(data[outer])\n flux = np.sum(data[inner]) - annu*np.sum(inner)\n\n c = 3e8\n kb=1.38e-23\n beam = (1./60.*np.pi/180.)**2\n factor = 2*kb*(v*1e9/c)**2 * beam * 1e26\n return flux*factor, annu_rms*np.sqrt(np.sum(inner))*factor",
"def ellipse(self):\n f = self.img\n x = self.x\n y = self.y\n x2 = self.x2\n y2 = self.y2\n xy = self.xy\n self.a2 = (x2+y2) + sqrt(((x2-y2)/2.)**2 + xy**2)\n self.b2 = (x2+y2) - sqrt(((x2-y2)/2.)**2 + xy**2)\n self.a = sqrt(self.a2)\n self.b = sqrt(self.b2)\n tan2theta = 2* (xy/(x2-y2))\n self.theta = arctan(tan2theta)/2.\n denominator = sqrt(((x2-y2)/2)**2+xy**2)\n self.cxx = y2/denominator\n self.cyy = x2/denominator\n self.cxy = -2*xy/denominator",
"def test_cspad_xy_at_z():\n ## 'CxiDs1.0:Cspad.0)' or 'DscCsPad'\n basedir = '/reg/g/psdm/detector/alignment/cspad/calib-cxi-camera1-2014-09-24/'\n fname_geometry = basedir + '2016-06-03-geometry-cxi06216-r25-camera1-z175mm.txt'\n fname_data = basedir + '2016-06-03-chun-cxi06216-0025-DscCsPad-max.txt'\n\n geometry = GeometryAccess(fname_geometry, pbits=0o377)\n\n # get pixel coordinate index arrays:\n xyc = xc, yc = 1000, 1000\n #rows, cols = geometry.get_pixel_coord_indexes(xy0_off_pix=xyc)\n #rows, cols = geometry.get_pixel_coord_indexes(do_tilt=True)\n #rows, cols = geometry.get_pixel_xy_inds_at_z(zplane=None, xy0_off_pix=xyc)\n rows, cols = geometry.get_pixel_xy_inds_at_z(zplane=150000)\n\n root, ext = os.path.splitext(fname_data)\n arr = np.load(fname_data) if ext == '.npy' else np.loadtxt(fname_data, dtype=np.float)\n\n #logger.info('arr.shape=', arr.shape\n arr.shape= (32,185,388)\n\n #ave, rms = arr.mean(), arr.std()\n #amp_range = (ave-rms, ave+3*rms)\n amp_range = (0, 1000)\n logger.info('amp_range:' + str(amp_range))\n\n logger.info('shapes rows: %s cols: %s weight: %s' % (str(rows.shape), str(cols.shape), str(arr.shape)))\n img = img_from_pixel_arrays(rows,cols,W=arr)\n\n axim = gg.plotImageLarge(img,amp_range=amp_range)\n gg.move(500,10)\n gg.show()",
"def test_cyclohexane(self):\n self._test_shape(\n image_size=(1000, 1000),\n expected_corners=np.array([\n [[400, 400]],\n [[487, 350]],\n [[574, 400]],\n [[574, 500]],\n [[487, 550]],\n [[400, 500]]\n ]),\n drawer=lambda image: image.add_regular_hexagon(\n 100, start_coord=(400, 400)\n )\n )",
"def phantom_squares(n_points,S):\n \n #Rescaling according to image size \n S[:,0] = S[:,0]*n_points/2\n S[:,1] = S[:,1]*n_points/2\n S[:,2] = S[:,2]*n_points/2\n S[:,3] = S[:,3]*math.pi/180\n \n x,y = np.meshgrid(np.arange(0,n_points)-n_points//2 ,np.arange(0,n_points)-n_points//2 ) \n nrow,ncol = S.shape\n phantom1 = np.zeros((y.shape[0], y.shape[1], nrow)) \n\n for k in range(nrow): #itero sui quadrati\n x_new = x - S[k,0]\n y_new = y - S[k,1]\n\n u = abs(x_new*math.cos(S[k,3])+y_new*math.sin(S[k,3]))\n v = abs(-x_new*math.sin(S[k,3])+y_new*math.cos(S[k,3]))\n\n cond = np.maximum(u,v)\n\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n if (cond[i,j] < S[k,2]/2):\n phantom1[i,j,k] = S[k,4]; # gray scale\n else:\n phantom1[i,j,k] = 0.0;\n #endif\n #endfor\n #endfor\n #endfor\n\n phantom1 = phantom1.sum(axis=2)\n phantom = np.flipud(phantom1)\n return phantom",
"def test_density(self, radius, density):\n earth = PREM()\n assert earth.density(radius) == pytest.approx(density, rel=1e-5)"
] | [
"0.66793174",
"0.6369313",
"0.635344",
"0.5881113",
"0.57845265",
"0.5761885",
"0.57279414",
"0.561903",
"0.55827296",
"0.55767804",
"0.55325985",
"0.55246985",
"0.55024266",
"0.5493475",
"0.5487103",
"0.5459197",
"0.54441357",
"0.5431681",
"0.5411385",
"0.5358013",
"0.53543353",
"0.53538257",
"0.5345915",
"0.53333265",
"0.53196096",
"0.52974135",
"0.52972907",
"0.5297273",
"0.5290834",
"0.5290511"
] | 0.7887095 | 0 |
Regression test that nonfinite data values outside of the aperture mask but within the bounding box do not affect the photometry. | def test_nan_in_bbox():
data1 = np.ones((101, 101))
data2 = data1.copy()
data1[33, 33] = np.nan
data1[67, 67] = np.inf
data1[33, 67] = -np.inf
data1[22, 22] = np.nan
data1[22, 23] = np.inf
error = data1.copy()
aper1 = CircularAperture((50, 50), r=20.0)
aper2 = CircularAperture((5, 5), r=20.0)
tbl1 = aperture_photometry(data1, aper1, error=error)
tbl2 = aperture_photometry(data2, aper1, error=error)
assert_allclose(tbl1['aperture_sum'], tbl2['aperture_sum'])
assert_allclose(tbl1['aperture_sum_err'], tbl2['aperture_sum_err'])
tbl3 = aperture_photometry(data1, aper2, error=error)
tbl4 = aperture_photometry(data2, aper2, error=error)
assert_allclose(tbl3['aperture_sum'], tbl4['aperture_sum'])
assert_allclose(tbl3['aperture_sum_err'], tbl4['aperture_sum_err']) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_aperture_photometry_inputs_with_mask():\n\n data = np.ones((5, 5))\n aperture = CircularAperture((2, 2), 2.0)\n mask = np.zeros_like(data, dtype=bool)\n data[2, 2] = 100.0 # bad pixel\n mask[2, 2] = True\n error = np.sqrt(data)\n data_in = data.copy()\n error_in = error.copy()\n t1 = aperture_photometry(data, aperture, error=error, mask=mask)\n assert_array_equal(data, data_in)\n assert_array_equal(error, error_in)\n assert_allclose(t1['aperture_sum'][0], 11.5663706144)\n t2 = aperture_photometry(data, aperture)\n assert_allclose(t2['aperture_sum'][0], 111.566370614)",
"def test_nan_inf_mask(value):\n\n data = np.ones((9, 9))\n mask = np.zeros_like(data, dtype=bool)\n data[4, 4] = value\n mask[4, 4] = True\n radius = 2.0\n aper = CircularAperture((4, 4), radius)\n tbl = aperture_photometry(data, aper, mask=mask)\n desired = (np.pi * radius**2) - 1\n assert_allclose(tbl['aperture_sum'], desired)",
"def testNonVarying(self):\n photoCalib = lsst.afw.image.PhotoCalib(self.calibration)\n self._testPhotoCalibCenter(photoCalib, 0)\n\n self.assertEqual(1, photoCalib.instFluxToMaggies(self.instFlux, self.pointXShift))\n self.assertEqual(0, photoCalib.instFluxToMagnitude(self.instFlux, self.pointXShift))\n result = photoCalib.instFluxToMaggies(self.instFlux, self.instFluxErr)\n self.assertEqual(1, result.value)\n\n photoCalib = lsst.afw.image.PhotoCalib(self.calibration, self.calibrationErr)\n self._testPhotoCalibCenter(photoCalib, self.calibrationErr)\n\n # constant, with a bbox\n photoCalib = lsst.afw.image.PhotoCalib(self.calibration, bbox=self.bbox)\n self._testPhotoCalibCenter(photoCalib, 0)",
"def test_annular_fpm():\n\n # test some semi-random cases - is the array size as expected? \n assert masks.annular_fpm(3, 2, np.inf).shape == (3*2*2, 3*2*2)\n assert masks.annular_fpm(3, 5, np.inf).shape == (3*5*2, 3*5*2)\n assert masks.annular_fpm(3, 5, 10).shape == (3*10*2, 3*10*2)\n assert masks.annular_fpm(3, 5, 11).shape == (3*11*2, 3*11*2)\n\n # test some pixel values are as expected. \n mask = masks.annular_fpm(3, 2, 10)\n assert mask[0,0]==0 # corner is black\n assert mask[5*10, 5*10]==1 # in between is white\n assert mask[3*10, 3*10]==0 # center is black",
"def test_outside_plus_inside(self):\n for region, bounds in load_region_bounds_dict().items():\n lon_bounds, lat_bounds = bounds\n for key in ['data01', 'ds_shift_lon', 'ds_rev_both', 'ds_irr_both']:\n outside_data = climapy.xr_mask_bounds(data_dict[key],\n lon_bounds=lon_bounds, lat_bounds=lat_bounds,\n select_how='outside')['PRECL']\n inside_data = climapy.xr_mask_bounds(data_dict[key],\n lon_bounds=lon_bounds, lat_bounds=lat_bounds,\n select_how='inside')['PRECL']\n outside_plus_inside = (np.nan_to_num(outside_data.values) +\n np.nan_to_num(inside_data.values))\n diff_from_input = outside_plus_inside - data_dict[key]['PRECL'].values\n assert np.abs(diff_from_input).max() == 0",
"def testTicket1123(self):\n\n ctrl = afwMath.StatisticsControl()\n ctrl.setAndMask(~0x0)\n \n mimg = afwImage.MaskedImageF(afwGeom.Extent2I(10, 10))\n mimg.set([self.val, 0x1, self.val])\n\n # test the case with no valid pixels ... both mean and stdev should be nan\n stat = afwMath.makeStatistics(mimg, afwMath.MEAN | afwMath.STDEV, ctrl)\n mean = stat.getValue(afwMath.MEAN)\n stdev = stat.getValue(afwMath.STDEV)\n self.assertNotEqual(mean, mean) # NaN does not equal itself\n self.assertNotEqual(stdev, stdev) # NaN does not equal itself\n \n # test the case with one valid pixel ... mean is ok, but stdev should still be nan\n mimg.getMask().set(1, 1, 0x0)\n stat = afwMath.makeStatistics(mimg, afwMath.MEAN | afwMath.STDEV, ctrl)\n mean = stat.getValue(afwMath.MEAN)\n stdev = stat.getValue(afwMath.STDEV)\n self.assertEqual(mean, self.val)\n self.assertNotEqual(stdev, stdev) # NaN does not equal itself\n\n # test the case with two valid pixels ... both mean and stdev are ok\n mimg.getMask().set(1, 2, 0x0)\n stat = afwMath.makeStatistics(mimg, afwMath.MEAN | afwMath.STDEV, ctrl)\n mean = stat.getValue(afwMath.MEAN)\n stdev = stat.getValue(afwMath.STDEV)\n self.assertEqual(mean, self.val)\n self.assertEqual(stdev, 0.0)",
"def testTicket1125(self):\n mimg = afwImage.MaskedImageF(afwGeom.Extent2I(10, 10))\n mimg.set([self.val, 0x1, self.val])\n\n ctrl = afwMath.StatisticsControl()\n ctrl.setAndMask(~0x0)\n \n # test the case with no valid pixels ... try MEANCLIP and STDEVCLIP\n stat = afwMath.makeStatistics(mimg, afwMath.MEANCLIP | afwMath.STDEVCLIP, ctrl)\n mean = stat.getValue(afwMath.MEANCLIP)\n stdev = stat.getValue(afwMath.STDEVCLIP)\n self.assertNotEqual(mean, mean) # NaN does not equal itself\n self.assertNotEqual(stdev, stdev) # NaN does not equal itself",
"def flag_absolute(data):\n data.mask = np.logical_or(data.mask, data > params.thr_max)\n data.mask = np.logical_or(data.mask, data < params.thr_min)\n return data.mask",
"def test_check_data_masked_input_data(self):\n cube = self.cube.copy()\n cube.data[:, 0, 0] = np.nan\n cube.data = np.ma.masked_invalid(cube.data)\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n expected_data[:, 0, 0] = np.nan\n expected_data = np.ma.masked_invalid(expected_data)\n result = Plugin().process(cube)\n self.assertArrayAlmostEqual(result.data.data, expected_data.data, decimal=5)\n self.assertArrayEqual(result.data.mask, expected_data.mask)",
"def test_check_data_masked_input_data_non_nans(self):\n cube = self.cube.copy()\n cube.data[:, 0, 0] = 1000\n cube.data = np.ma.masked_equal(cube.data, 1000)\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n expected_data[:, 0, 0] = np.nan\n expected_data = np.ma.masked_invalid(expected_data)\n result = Plugin().process(cube)\n self.assertArrayAlmostEqual(result.data.data, expected_data.data, decimal=5)\n self.assertArrayEqual(result.data.mask, expected_data.mask)",
"def flag_fraction(data):\n occ_f = np.sum(data.mask, axis=0) / float(data.shape[0])\n occ_t = np.sum(data.mask, axis=1) / float(data.shape[1])\n \n bad_f = occ_f > params.max_frac_f\n bad_t = occ_t > params.max_frac_t\n \n data.mask[bad_t, :] = True\n data.mask[:, bad_f] = True\n \n return data.mask",
"def test_extreme_values(self):\n with pytest.warns(RuntimeWarning) as warninfo:\n assert np.exp(laplace_approx(999999, 999999, self.data)) == 0\n with pytest.warns(RuntimeWarning) as warninfo:\n assert np.exp(laplace_approx(999999, 999999, self.data)) == 0",
"def testKnown(self):\n numAmps = (2, 2)\n bbox = afwGeom.Box2I(afwGeom.Point2I(0, 0), afwGeom.Extent2I(4, 4))\n # make a 4x4 image with 4 identical 2x2 subregions that flatten to -1, 0, 1, 2\n im = afwImage.ImageF(bbox)\n imArr = im.getArray()\n imArr[:, :] = np.array(((-1, 0, -1, 0),\n (1, 2, 1, 2),\n (-1, 0, -1, 0),\n (1, 2, 1, 2)), dtype=imArr.dtype)\n\n sqCoeffs = np.array(((0, 0.11), (-0.15, -12)))\n detector = self.makeDetector(bbox=bbox, numAmps=numAmps, sqCoeffs=sqCoeffs)\n ampInfoCat = detector.getAmpInfoCatalog()\n\n linSq = LinearizeSquared()\n linSq(im, detector=detector)\n\n # amp 0 has 0 squared coefficient and so makes no correction\n imArr0 = im.Factory(im, ampInfoCat[0].getBBox()).getArray()\n linCoeff0 = ampInfoCat[0].getLinearityCoeffs()[0]\n self.assertEqual(0, linCoeff0)\n self.assertFloatsAlmostEqual(imArr0.flatten(), (-1, 0, 1, 2))\n\n # test all amps\n for ampInfo in ampInfoCat:\n imArr = im.Factory(im, ampInfo.getBBox()).getArray()\n linCoeff = ampInfo.getLinearityCoeffs()[0]\n expect = np.array((-1 + linCoeff, 0, 1 + linCoeff, 2 + 4*linCoeff), dtype=imArr.dtype)\n self.assertFloatsAlmostEqual(imArr.flatten(), expect)",
"def aperture_phot(self,data,x,y,v):\n r = np.sqrt((x-self.avg_map_fits['Values'][1])**2 + (y-self.avg_map_fits['Values'][3])**2)\n \n inner = (r < 8./60.) & np.isfinite(data) \n outer = (r > 8.5/60.) & (r < 12./60.) & np.isfinite(data)\n\n annu = np.nanmedian(data[outer])\n annu_rms = np.nanstd(data[outer])\n flux = np.sum(data[inner]) - annu*np.sum(inner)\n\n c = 3e8\n kb=1.38e-23\n beam = (1./60.*np.pi/180.)**2\n factor = 2*kb*(v*1e9/c)**2 * beam * 1e26\n return flux*factor, annu_rms*np.sqrt(np.sum(inner))*factor",
"def test_bad_bounds(self):\n with pytest.raises(ValueError):\n Real(\"yolo\", \"norm\", 0, 2, low=+2, high=-2, shape=(4, 4))\n with pytest.raises(ValueError):\n Real(\"yolo\", \"norm\", 0, 2, low=+2, high=+2, shape=(4, 4))",
"def outlierdetection(data,method):\n import numpy as np\n ##########\n # 0. Input\n data = np.array(data)\n methodname = method['name']\n rule = method['rule']\n try:\n mask = rule['initmask'].copy()\n if not mask:\n mask = np.full_like(data,True,dtype=bool)\n rule['initmask'] = mask.copy()\n except:\n mask = np.full_like(data,True,dtype=bool)\n rule['initmask'] = mask.copy()\n ##########\n # 1. Compute\n if methodname in {'median','sigma'}:\n minp,maxp = rule['minp'],rule['maxp']\n niter = rule['niter']\n for i in range(niter):\n gooddata = data[mask] # good data\n ### median or sigma\n if methodname=='median':\n median = np.median(gooddata)\n minbound = minp*median\n maxbound = maxp*median\n elif methodname=='sigma':\n std = np.std(gooddata)\n median = np.median(gooddata)\n minbound = median - minp*std\n maxbound = median + maxp*std\n ### update mask\n m = np.argwhere((data >= minbound) & (data <= maxbound)).flatten() # good data\n mask = np.full_like(data,False,dtype=bool)\n mask[m] = True\n print('{0} iter {1}'.format(methodname,i))\n elif methodname == 'sn':\n minp = rule['minp']\n noise = rule['noise']\n keepneg = rule['keepneg']\n sn = data / noise\n if keepneg:\n sn = np.abs(sn)\n m = np.argwhere(sn >= minp).flatten()\n mask = np.full_like(data,False,dtype=bool)\n mask[m] = True\n print('{0} complete'.format(methodname))\n elif methodname == 'sigmalocal':\n sigma = rule['sigma']\n noise = rule['noise']\n keepneg = rule['keepneg']\n niter = rule['niter']\n params = rule['params']\n for i in range(niter):\n tmpdata = data[mask]\n tmpmedian = savgol_filter(tmpdata,**params)\n tmpnoise = noise[mask]\n ratio = (tmpdata - tmpmedian)/tmpnoise\n if keepneg:\n ratio = np.abs(ratio)\n m = np.argwhere(ratio > sigma).flatten()\n mask[m] = False\n print('{0} iter {1}'.format(methodname,i))\n else:\n raise ValueError('method {0} does not support'.format(method))\n ##########\n # 2. Update with the initial mask and return\n return mask & rule['initmask']",
"def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[[-1, -10]])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)",
"def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[[-1, -10]])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)",
"def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[[-1, -10]])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)",
"def test_masked_data_below(self):\n\n expected_mask = np.full_like(self.cube.data, False, dtype=bool)\n expected_mask[:, 0, 0] = True\n expected_mask[1, 0, 2] = True\n expected_mask[2, 0] = True\n expected_mask[2, 1, 2] = True\n expected_mask[2, 1, 0] = True\n\n cube = set_up_probability_cube(\n 1 - self.cube.data,\n [200, 1000, 15000],\n variable_name=(\n \"cloud_base_height_assuming_only_consider_cloud_\"\n \"area_fraction_greater_than_4p5_oktas\"\n ),\n threshold_units=\"m\",\n spp__relative_to_threshold=\"below\",\n )\n\n result = Plugin(mask_percentiles=True)._probabilities_to_percentiles(\n cube, self.percentiles\n )\n self.assertArrayEqual(result.data.mask, expected_mask)",
"def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[-1])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)",
"def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[-1])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)",
"def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[-1])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)",
"def test_isentropic_pressure_data_bounds_error():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290.\n tmp[3, :] = 288.\n tmpk = tmp * units.kelvin\n isentlev = [296., 350.] * units.kelvin\n with pytest.raises(ValueError):\n isentropic_interpolation(isentlev, lev, tmpk)",
"def test_masked_data_above(self):\n\n expected_mask = np.full_like(self.cube.data, False, dtype=bool)\n expected_mask[:, 0, 0] = True\n expected_mask[1, 0, 2] = True\n expected_mask[2, 0] = True\n expected_mask[2, 1, 2] = True\n expected_mask[2, 1, 0] = True\n\n cube = set_up_probability_cube(\n self.cube.data,\n [200, 1000, 15000],\n variable_name=(\n \"cloud_base_height_assuming_only_consider_cloud_\"\n \"area_fraction_greater_than_4p5_oktas\"\n ),\n threshold_units=\"m\",\n spp__relative_to_threshold=\"above\",\n )\n\n result = Plugin(mask_percentiles=True)._probabilities_to_percentiles(\n cube, self.percentiles\n )\n\n self.assertArrayEqual(result.data.mask, expected_mask)",
"def test_invalid_events(subarray_and_event_gamma_off_axis_500_gev):\n\n # 4-LST bright event already calibrated\n # we'll clean it and parametrize it again in the TelescopeFrame\n subarray, event = subarray_and_event_gamma_off_axis_500_gev\n\n tel_azimuth = {}\n tel_altitude = {}\n\n #source = EventSource(filename, max_events=1)\n #subarray = source.subarray\n calib = CameraCalibrator(subarray)\n fit = HillasReconstructor(subarray)\n\n #for event in source:\n\n calib(event)\n\n hillas_dict = {}\n for tel_id, dl1 in event.dl1.tel.items():\n\n geom = subarray.tel[tel_id].camera.geometry\n tel_azimuth[tel_id] = event.pointing.tel[tel_id].azimuth\n tel_altitude[tel_id] = event.pointing.tel[tel_id].altitude\n\n mask = tailcuts_clean(\n geom, dl1.image, picture_thresh=10.0, boundary_thresh=5.0\n )\n\n dl1.parameters = ImageParametersContainer()\n\n try:\n moments = hillas_parameters(geom[mask], dl1.image[mask])\n hillas_dict[tel_id] = moments\n dl1.parameters.hillas = moments\n except HillasParameterizationError:\n dl1.parameters.hillas = HillasParametersContainer()\n continue\n\n # copy event container to modify it\n event_copy = deepcopy(event)\n # overwrite all image parameters but the last one with dummy ones\n for tel_id in list(event_copy.dl1.tel.keys())[:-1]:\n event_copy.dl1.tel[tel_id].parameters.hillas = HillasParametersContainer()\n fit(event_copy)\n assert event_copy.dl2.stereo.geometry[\"HillasReconstructor\"].is_valid is False\n\n # Now use the original event, but overwrite the last width to 0\n event.dl1.tel[tel_id].parameters.hillas.width = 0 * u.m\n fit(event)\n assert event.dl2.stereo.geometry[\"HillasReconstructor\"].is_valid is False\n\n # Now use the original event, but overwrite the last width to NaN\n event.dl1.tel[tel_id].parameters.hillas.width = np.nan * u.m\n fit(event)\n assert event.dl2.stereo.geometry[\"HillasReconstructor\"].is_valid is False",
"def test_bounds_respected_func_not_called(\n self, check_bounds_respected):\n self.controller.problem.value_ranges = {'test': (0, 1)}\n self.controller.minimizer = \"deriv_free_algorithm\"\n self.controller.flag_expected = [3]\n\n _ = loop_over_hessians(self.controller,\n options=self.options,\n grabbed_output=self.grabbed_output,\n checkpointer=self.cp)\n check_bounds_respected.assert_not_called()",
"def test_offcenter(self):\n actual = cm.ring_mask((5, 5), 1, 2, center=(2, 3))\n expected = np.array([[False, False, False, True, False],\n [False, False, True, False, True],\n [False, True, False, False, False],\n [False, False, True, False, True],\n [False, False, False, True, False]])\n self.assertIsNone(np.testing.assert_array_equal(actual, expected))",
"def test_intra_power_law_fit_no_model(self):\n\n\t\tdetails= self.watcher.analyze(model=self.model, layers=self.fc_layers, intra=True, randomize=False, vectors=False)\n\t\tactual_alpha = details.alpha[0]\n\n\t\texpected_alpha = 2.654 # not very accurate because of the sparisify transform\n\t\tself.assertAlmostEqual(actual_alpha,expected_alpha, places=1)",
"def in_image(self, *args, **kwargs):\n kwargs['with_bounding_box'] = True\n kwargs['fill_value'] = np.nan\n\n coords = self.invert(*args, **kwargs)\n\n result = np.isfinite(coords)\n if self.input_frame.naxes > 1:\n result = np.all(result, axis=0)\n\n if self.bounding_box is None or not np.any(result):\n return result\n\n if self.input_frame.naxes == 1:\n x1, x2 = self.bounding_box\n\n if len(np.shape(args[0])) > 0:\n result[result] = (coords[result] >= x1) & (coords[result] <= x2)\n elif result:\n result = (coords >= x1) and (coords <= x2)\n\n else:\n if len(np.shape(args[0])) > 0:\n for c, (x1, x2) in zip(coords, self.bounding_box):\n result[result] = (c[result] >= x1) & (c[result] <= x2)\n\n elif result:\n result = all([(c >= x1) and (c <= x2) for c, (x1, x2) in zip(coords, self.bounding_box)])\n\n return result"
] | [
"0.6720967",
"0.6610739",
"0.6296814",
"0.61645174",
"0.6088839",
"0.60787433",
"0.5982765",
"0.59556407",
"0.5947607",
"0.58739126",
"0.58470315",
"0.58464575",
"0.5712174",
"0.57064",
"0.57048935",
"0.5660747",
"0.5626591",
"0.5626591",
"0.5626591",
"0.5622805",
"0.5616519",
"0.5616519",
"0.5616519",
"0.55962515",
"0.55724216",
"0.5571008",
"0.55697805",
"0.5522383",
"0.5518102",
"0.550879"
] | 0.7124479 | 0 |
Regression test to check that scalar SkyCoords are added to the table as a length1 SkyCoord array. | def test_scalar_skycoord():
data = make_4gaussians_image()
wcs = make_wcs(data.shape)
skycoord = wcs.pixel_to_world(90, 60)
aper = SkyCircularAperture(skycoord, r=0.1 * u.arcsec)
tbl = aperture_photometry(data, aper, wcs=wcs)
assert isinstance(tbl['sky_center'], SkyCoord) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_xy(self):\n x = np.array([[1,3], [2,8], [1,3]])\n y = np.array([1,1,-1])\n lro = LogisticRegressionOptimiser(x,y)\n expected = np.array([[1,3], [2,8], [-1,-3]])\n for i in 0,1,2:\n for j in 0,1:\n self.assertEqual(lro.xy[i][j], expected[i][j])",
"def test_scalar_index(self):\n dset = self.f.create_dataset('x', shape=(), dtype='f')\n out = dset[...]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, ())",
"def test_add():\n data = io.create_sample_Dataset()\n tmp = data + data\n assert tmp[\"u\"][0, 0, 0] == 2.0",
"def test_scalar_index(setup_teardown_file):\n f = setup_teardown_file[3]\n\n dset = f.create_dataset('x', shape=(), dtype='f')\n out = dset[...]\n assert isinstance(out, np.ndarray)\n assert out.shape == ()",
"def test_data():\n x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float)\n y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float)\n z = np.array([0.064, 4.489, 6.241, 0.1, 2.704, 2.809, 9.604, 1.156,\n 0.225, 3.364], dtype=float)\n\n return x, y, z",
"def test_single_linear_regression_data_passing_correctly(\n single_linear_regression_model, single_linear_regression_data\n):\n assert (\n single_linear_regression_model.predictor_vars_train.all()\n == single_linear_regression_data[\"predictor_vars\"].all()\n )\n assert (\n single_linear_regression_model.response_var_train.all()\n == single_linear_regression_data[\"response_var\"].all()\n )\n assert type(single_linear_regression_model.predictor_vars_train) == np.ndarray\n assert type(single_linear_regression_model.response_var_train) == np.ndarray",
"def test_scalar_coord(self):\n self.cube.add_aux_coord(AuxCoord(1, long_name=\"scalar_coord\", units=\"no_unit\"))\n coord = self.cube.coord(\"scalar_coord\")\n plugin = NonLinearWeights(0.85)\n result = plugin.process(self.cube, coord)\n self.assertArrayAlmostEqual(result.data, np.array([1.0]))",
"def test_rasters_and_arrays(self):\n\n # Create test data\n lon_ul = 100 # Longitude of upper left corner\n lat_ul = 10 # Latitude of upper left corner\n numlon = 8 # Number of longitudes\n numlat = 5 # Number of latitudes\n dlon = 1\n dlat = -1\n\n # Define array where latitudes are rows and longitude columns\n A1 = numpy.zeros((numlat, numlon))\n\n # Establish coordinates for lower left corner\n lat_ll = lat_ul - numlat\n lon_ll = lon_ul\n\n # Define pixel centers along each direction\n lon = numpy.linspace(lon_ll + 0.5, lon_ll + numlon - 0.5, numlon)\n lat = numpy.linspace(lat_ll + 0.5, lat_ll + numlat - 0.5, numlat)\n\n # Define raster with latitudes going bottom-up (south to north).\n # Longitudes go left-right (west to east)\n for i in range(numlat):\n for j in range(numlon):\n A1[numlat - 1 - i, j] = linear_function(lon[j], lat[i])\n\n # Throw in a nodata element\n A1[2, 6] = numpy.nan\n\n # Upper left corner\n assert A1[0, 0] == 105.25\n assert A1[0, 0] == linear_function(lon[0], lat[4])\n\n # Lower left corner\n assert A1[4, 0] == 103.25\n assert A1[4, 0] == linear_function(lon[0], lat[0])\n\n # Upper right corner\n assert A1[0, 7] == 112.25\n assert A1[0, 7] == linear_function(lon[7], lat[4])\n\n # Lower right corner\n assert A1[4, 7] == 110.25\n assert A1[4, 7] == linear_function(lon[7], lat[0])\n\n # Generate raster object and write\n projection = ('GEOGCS[\"WGS 84\",'\n 'DATUM[\"WGS_1984\",'\n 'SPHEROID[\"WGS 84\",6378137,298.2572235630016,'\n 'AUTHORITY[\"EPSG\",\"7030\"]],'\n 'AUTHORITY[\"EPSG\",\"6326\"]],'\n 'PRIMEM[\"Greenwich\",0],'\n 'UNIT[\"degree\",0.0174532925199433],'\n 'AUTHORITY[\"EPSG\",\"4326\"]]')\n geotransform = (lon_ul, dlon, 0, lat_ul, 0, dlat)\n R1 = Raster(A1, projection, geotransform,\n keywords={'testkwd': 'testval', 'size': 'small'})\n\n # Check string representation of raster class\n assert str(R1).startswith('Raster data')\n assert str(R1.rows) in str(R1)\n assert str(R1.columns) in str(R1)\n\n # Test conversion between geotransform and\n # geometry (longitudes and latitudes)\n longitudes, latitudes = R1.get_geometry()\n msg = 'Longitudes not as expected: %s' % str(longitudes)\n assert numpy.allclose(longitudes, [100.5, 101.5, 102.5, 103.5, 104.5,\n 105.5, 106.5, 107.5]), msg\n\n msg = 'Latitudes not as expected: %s' % str(latitudes)\n assert numpy.allclose(latitudes, [5.5, 6.5, 7.5, 8.5, 9.5]), msg\n\n gt = raster_geometry2geotransform(longitudes, latitudes)\n msg = ('Conversion from coordinates to geotransform failed: %s'\n % str(gt))\n assert numpy.allclose(gt, geotransform,\n rtol=1.0e-12, atol=1.0e-12), msg\n\n msg = ('Dimensions of raster array do not match those of '\n 'raster object')\n assert numlat == R1.rows, msg\n assert numlon == R1.columns, msg\n\n # Write back to new (tif) file\n out_filename = unique_filename(suffix='.tif')\n R1.write_to_file(out_filename)\n assert R1.filename == out_filename\n\n # Check nodata in original layer\n assert numpy.isnan(R1.get_nodata_value())\n\n # Read again and check consistency\n R2 = read_layer(out_filename)\n assert R2.filename == out_filename\n\n # Check nodata in read layer\n assert numpy.isnan(R2.get_nodata_value())\n\n msg = ('Dimensions of written raster array do not match those '\n 'of input raster file\\n')\n msg += (' Dimensions of input file '\n '%s: (%s, %s)\\n' % (R1.filename, numlat, numlon))\n msg += (' Dimensions of output file %s: '\n '(%s, %s)' % (R2.filename, R2.rows, R2.columns))\n\n assert numlat == R2.rows, msg\n assert numlon == R2.columns, msg\n\n A2 = R2.get_data()\n\n assert numpy.allclose(numpy.nanmin(A1), numpy.nanmin(A2))\n assert numpy.allclose(numpy.nanmax(A1), numpy.nanmax(A2))\n\n msg = 'Array values of written raster array were not as expected'\n assert nanallclose(A1, A2), msg\n\n msg = 'Geotransforms were different'\n assert R1.get_geotransform() == R2.get_geotransform(), msg\n\n p1 = R1.get_projection(proj4=True)\n p2 = R2.get_projection(proj4=True)\n msg = 'Projections were different: %s != %s' % (p1, p2)\n assert p1 == p1, msg\n\n # Exercise projection __eq__ method\n assert R1.projection == R2.projection\n\n # Check that equality raises exception when type is wrong\n try:\n R1.projection == 234\n except TypeError:\n pass\n else:\n msg = 'Should have raised TypeError'\n raise Exception(msg)\n\n # Check keywords\n assert R1.keywords == R2.keywords\n\n # Check override of ==\n assert R1 == R2",
"def test_linear_regression_single_column():\n model = cuLinearRegression()\n with pytest.warns(UserWarning):\n model.fit(cp.random.rand(46341), cp.random.rand(46341))",
"def poly_regression_second(X, Y, Xs_test, Ys_test):\n ## YOUR CODE HERE\n #################\n return 0",
"def test_single(self):\n df = self.df.head(1).copy()\n out = weights_from_array(df.values)\n self.assertTrue(out.size == 1)",
"def test_roundtrip(self):\n dt = np.dtype('(3,)f8')\n dset = self.f.create_dataset('x', (10,), dtype=dt)\n\n out = dset[...]\n dset[...] = out\n\n self.assertTrue(np.all(dset[...] == out))",
"def test_feature_values(iris, name, x_feature, y_feature, x_vals, y_vals):\n iris.x_feature = x_feature\n iris.y_feature = y_feature\n assert iris.title == \"{} x {}\".format(x_feature, y_feature)\n data = iris.sources[name].data\n np.testing.assert_array_almost_equal(data[\"x\"][:2], x_vals)\n np.testing.assert_array_almost_equal(data[\"y\"][:2], y_vals)",
"def test_equal15():\n x = np.array([[[[[[True, False, True], [True, False, True], [True, False, True]]]]]])\n y = x\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)",
"def test_sea_level_nd_array():\n h = np.array([0.0, 0.0, 0.0])\n expected_h = np.array([0.0, 0.0, 0.0])\n expected_T = np.array([288.15] * 3)\n expected_p = np.array([101325.0] * 3)\n expected_rho = np.array([1.2250] * 3)\n\n h, T, p, rho = coesa.table(h)\n\n assert_array_equal(h, expected_h)\n assert_array_almost_equal(T, expected_T)\n assert_array_almost_equal(p, expected_p)\n assert_array_almost_equal(rho, expected_rho)",
"def test_single_null(self):\n dset = self.f.create_dataset('x', (1,), dtype='i1')\n out = dset[()]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, (1,))",
"def test_single_null(setup_teardown_file):\n f = setup_teardown_file[3]\n\n dset = f.create_dataset('x', (1,), dtype='i1')\n out = dset[()]\n assert isinstance(out, np.ndarray)\n assert out.shape == (1,)",
"def test_equal4():\n x = np.array([[1, 2, 3]])\n y = np.array([[[[1, 2, 3], [1, 2, 3], [1, 2, 3]]]])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)",
"def test_scalar_null(setup_teardown_file):\n f = setup_teardown_file[3]\n\n dset = f.create_dataset('x', shape=(), dtype='i1')\n out = dset[()]\n\n assert out.dtype == \"int8\"",
"def test_equal6():\n x = randtool(\"float\", -10, 10, [3, 3, 3, 1])\n y = randtool(\"float\", -10, 10, [3, 3, 1])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)",
"def test_sky_coord_classic(self):\n sc_cen = SkyCoord(SCS_CENTER)\n tab = conesearch.conesearch(\n sc_cen, SCS_RADIUS, catalog_db=self.url, verbose=self.verbose)\n assert len(tab) > 0",
"def test_equal13():\n x = np.array([[[[[[True, False, True], [True, False, True], [True, False, True]]]]]])\n y = np.array([[True, False, True], [True, False, True], [True, False, True]])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)",
"def test_input_transposed_vector(multiple_linear_regression_data):\n X, y = multiple_linear_regression_data\n x = X.copy().T\n y = pd.DataFrame(y)\n\n # There is a difference with a transposed array\n with pytest.raises(\n AssertionError, match=r\"N >= K: You need at least as many rows .*\"\n ):\n _ = multiple_linear_regression(x, y)",
"def test_import_wine():\n X, y = wine_data()\n\n assert(X.shape[1] == 12)\n assert(len(y.unique().tolist()) == 3)\n assert(X.shape[0] == y.shape[0])",
"def test_equal7():\n x = randtool(\"float\", -10, 10, [3, 3, 1])\n y = randtool(\"float\", -10, 10, [3, 3, 3, 1])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)",
"def test_positive_slope(self):\n slopes = []\n for i in range(100):\n neighborhood, pc = create_point_cloud_in_plane_and_neighborhood()\n slopes += list(EigenValueVectorizeFeatureExtractor().extract(pc, neighborhood, None, None, None)[6])\n np.testing.assert_array_less(np.zeros_like(slopes), slopes)",
"def test_equal11():\n x = np.array([[True, False, True]])\n y = np.array([[[[[True, False, True], [True, False, True], [True, False, True]]]]])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)",
"def test_scalar_null(self):\n dset = self.f.create_dataset('x', shape=(), dtype='i1')\n out = dset[()]\n self.assertIsInstance(out, np.int8)",
"def test_coord_preceding_fs(self):",
"def test_test_arraypointertype(self):\n input = \"\"\"\n void main () {\n float arr[3];\n arr[2]=1.5;\n foo(arr);\n arr[2] = foo(arr)[2] + 1.1;\n putFloatLn(arr[2]);\n }\n float[] foo(float x[]){\n x[2] = 5.1;\n return x;\n }\n \"\"\"\n expect = \"6.2\\n\"\n self.assertTrue(TestCodeGen.test(input,expect,571))"
] | [
"0.5772283",
"0.5675139",
"0.5644284",
"0.5608696",
"0.5585975",
"0.5573823",
"0.54413795",
"0.5437408",
"0.5400953",
"0.5393124",
"0.5352665",
"0.53412086",
"0.5340118",
"0.53243935",
"0.5320892",
"0.531939",
"0.5305857",
"0.5282068",
"0.52805465",
"0.5274832",
"0.5256645",
"0.52470016",
"0.52449197",
"0.5218932",
"0.5218771",
"0.5215347",
"0.521176",
"0.5207969",
"0.5205124",
"0.5204602"
] | 0.5873713 | 0 |
Getter of the user id. | def get_user_id(self):
return self.id_user | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_id(self):\n return self.user_id",
"def get_id(self) -> int:\n return self.user_id",
"def user_id(self):\n return self._user_id",
"def user_id(self):\n return self._user_id",
"def user_id(self):\n return self._user_id",
"def user_id(self):\n return self._user_id",
"def user_id(self):\n return self._user_id",
"def user_id(self) -> str:\n return self._user_id",
"def user_id(self) -> str:\n return self._user_id",
"def user_id(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"user_id\")",
"def user_id(self):\n # type: () -> string_types\n return self._user_id",
"def get_user_id(self):\n raise NotImplementedError",
"def user_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"user_id\")",
"def user_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"user_id\")",
"def user_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"user_id\")",
"def user_id(self):\n return self.status.user[\"id\"]",
"def get_id(self): \n\t\treturn (self.user_id)",
"def user_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"user_id\")",
"def id(self) -> int:\n return self.user.id",
"def user_id(self):\n return json_loads(self.user_json).get('id')",
"def user_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_id\")",
"def user_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_id\")",
"def user_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_id\")",
"def user_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"user_id\")",
"def user_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"user_id\")",
"def get_id(self):\n return self.username",
"def get_id(self):\n return self.username",
"def get_id(self):\n return self.username",
"def user_id(self):\n return lamin_user_settings().id",
"def getUserID(self):\n\t\treturn self.UserID"
] | [
"0.8740212",
"0.8674682",
"0.8466421",
"0.8466421",
"0.8466421",
"0.8466421",
"0.8466421",
"0.84438294",
"0.84438294",
"0.8365442",
"0.8362073",
"0.83430153",
"0.8284895",
"0.8284895",
"0.8263458",
"0.8238554",
"0.82361627",
"0.82293457",
"0.8197898",
"0.8093313",
"0.80851257",
"0.80851257",
"0.80851257",
"0.80421394",
"0.80421394",
"0.80191195",
"0.80191195",
"0.80191195",
"0.7939631",
"0.7900737"
] | 0.8879019 | 0 |
Getter of ratings list for this user. | def get_ratings(self):
return self.ratings | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_user_ratings(self, user_id):\n return self.ratings[self.ratings['user_id'] == user_id]",
"def get_ratings(self):\n return Vote.objects.filter(content_type=self.get_content_type(), object_id=self.instance.pk, key=self.field.key)",
"def user_ratings(user_id):\n return _fetch_records(f\"SELECT item_id, rating_type FROM ratings WHERE user_id = {user_id}\")",
"def ratings(self):\n session = Session.object_session(self)\n return session.query(Rating).join(Section).filter(Section.professor == self).all()",
"def get_user_ratings(self, user_id):\r\n return self.df_app_data.loc[(self.df_app_data[\"user_id\"] == int(user_id))]",
"def item_ratings(self):\n return self.get_ratings().sum(axis=0)",
"def all_ratings(self):\n\n for u, u_ratings in iteritems(self.ur):\n for i, r in u_ratings:\n yield u, i, r",
"def all_ratings(self):\n return {\n 'average': self.average_rating(),\n 'total': self.proto.aggregateRating.ratingsCount,\n 'oneStar': self.proto.aggregateRating.oneStarRatings,\n 'twoStar': self.proto.aggregateRating.twoStarRatings,\n 'threeStar': self.proto.aggregateRating.threeStarRatings,\n 'fourStar': self.proto.aggregateRating.fourStarRatings,\n 'fiveStar': self.proto.aggregateRating.fiveStarRatings,\n }",
"def all(self):\n ratings = []\n for i in range (1, self.pages()+1):\n ratings.extend(self.page(i))\n \n self._set_attrs_to_values({'ratings': ratings})\n return ratings",
"def satisfaction_ratings(self):\r\n return resources.SatisfactionRatings(self)",
"def user_rating(self) -> int:\n return self._user_rating",
"def average_ratings(self):\n return get_average_rate(\n model=Rating,\n article=self.pk\n )",
"def get_ratings(self):\n df = pd.read_csv(IoManager.CARD_RATINGS_FILE_PATH)\n df = IoManager.scale_ratings(df)\n df = IoManager.normalize_ratings_per_archetype(df)\n df = self.add_ratings_sum(df)\n # print(df[[\"name\", \"monogreen\", \"simic_ramp\", \"general\"]].tail(60))\n # print(df[[\"name\", \"general\"]].sort_values(ascending=False, by=\"general\").head(50))\n return df",
"def get_rating(self):\n self.rating = imdb.get_title_ratings(self.ID)['rating']",
"def ratings_usuarios(username, ratings):\n return list(filter(lambda x: x.username == username, ratings))",
"def get_queryset(self):\n return ArticleRating.objects.filter(article=self.get_object())",
"def get_mean_user_rating(self, user_id):\n return self.mean_user_rating[self.mean_user_rating['user_id'] == user_id]['rating'].item()",
"def ratings(self, ratings):\n\n self._ratings = ratings",
"def ratings(self, ratings):\n\n self._ratings = ratings",
"def get_rating(self):\n self.total = sum(int(review['stars']) for review in self.reviews.values())\n if self.total > 0:\n return round(self.total / self.reviews.count(), 1)\n else:\n return self.total",
"def dist_by_rating(self):\n return ratings_distribution",
"def getRawRatings(self):\n\n try:\n judgeNotesLogger.info(\"getRawRatings: Retrieving Raw Ratings from '%s'\", self.notesFile)\n for rating in self.ratingsToSongs.keys():\n numOfSongsWithRating = len(self.ratingsToSongs[rating])\n self.ratingsRaw[rating] = numOfSongsWithRating\n\n except:\n judgeNotesLogger.warning(\"getRawRatings: {0}: {1}\".format(sys.exc_info()[0].__name__,\n str(sys.exc_info()[1])))",
"def get_ratings_for_item_ids(self, DataType, UserId, GameId):\n request = self.spark_session.createDataFrame([(UserId, GameId)], [\"UserId\", \"GameId\"])\n ratings = self.model[DataType-1].transform(request).collect()\n return ratings",
"def rating(self):\n average = self.review.all().aggregate(Avg('rating'))['rating__avg']\n if not average:\n return 0\n return average",
"def get_queryset(self, request):\n try:\n profile = Profile.objects.get(user=request.user)\n except Profile.DoesNotExist:\n if request.user.is_superuser:\n return Rating.objects.all()\n\n if profile.access == 'teacher':\n return Rating.objects.filter(subject__teacher__profile_id=profile.id)\n if profile.access == 'student':\n return Rating.objects.filter(student__profile=profile)",
"def load_ratings(self):\n logging.debug(\"Loading ratings data...\")\n\n # loading ratings\n data=requests.get(self.__URL_RATINGS)\n self.__dataframe_ratings=pd.DataFrame(data.json())\n # calculate implicit and explicit ratings\n # XXX use a function to calculate implicit rating considering the video lead time\n self.__dataframe_ratings['rating_implicit'] = (self.__dataframe_ratings['video_watch_time']/100) * 0.3\n self.__dataframe_ratings['rating_explicit'] = (self.__dataframe_ratings['rating_value']) * 0.7\n\n # create a new column to put implicit or explicit rating value\n self.__dataframe_ratings['overall_rating_value'] = self.__dataframe_ratings['rating_implicit'] + self.__dataframe_ratings['rating_explicit']\n\n logging.debug(\"Ratings data loaded! n=%s\" % self.__dataframe_ratings.shape[0])\n\n return self.__dataframe_ratings",
"def aggregate_rating(self) -> object:\n return self._aggregate_rating",
"def rates(self):\n return self._rates",
"def with_rating(self):\n return self.annotate(\n rating=F('overall_posts_rating') * 10 + F('overall_comments_rating')\n )",
"def average_rating(self):\n return self.proto.aggregateRating.starRating"
] | [
"0.7477732",
"0.7453791",
"0.73936325",
"0.72100174",
"0.7001172",
"0.69021744",
"0.68245584",
"0.67234373",
"0.6693334",
"0.66076666",
"0.65877724",
"0.6586307",
"0.6462181",
"0.6363747",
"0.63283545",
"0.61764896",
"0.6175898",
"0.61114925",
"0.61114925",
"0.6096961",
"0.608822",
"0.60670716",
"0.6029499",
"0.60290885",
"0.59932804",
"0.596957",
"0.5930235",
"0.5905685",
"0.589432",
"0.588822"
] | 0.83241516 | 1 |
Method to check if a movie have been already seen for the given user. | def check_movie_seen(self, id_movie):
if id_movie in self.seen:
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def new_watched_movie(username: str, movie_id: int) -> bool:\n with connection:\n all_movies = connection.execute(MOVIES_IDS, (movie_id,)).fetchone()\n all_usernames = connection.execute(USERS_IDS, (username,)).fetchone()\n if all_usernames is not None and all_movies is not None:\n connection.execute(ADD_WATCHED_MOVIE, (all_usernames[0], movie_id))\n return True\n return False",
"def user_appears(self, user):\n pass",
"def is_examiner(self, user_obj):\n return self.examiners.filter(pk=user_obj.pk).count() > 0",
"def seen(user, item):\n print(\"---\", item.seen_by(user))\n return item.seen_by(user)",
"def isPlayed(self, item):\n userState = self.userState(item)\n return bool(userState.viewCount > 0) if userState.viewCount else False",
"def is_user_playing(self, user):\n return user in self.active_games",
"def has_history(self, user):\n\n header = connect(self.__path)\n curs = header.cursor()\n encrypted_id = md5((str(user.id) + \"typicaluser\").encode()).hexdigest()\n curs.execute(\"SELECT * FROM users WHERE id = (?)\", (encrypted_id,))\n data = curs.fetchall()\n return len(data) >= 1",
"def userObjExists(self, user : bbUser.bbUser) -> bool:\n return self.userIDExists(user.id)",
"def has_account(user_name):\n session = Session()\n num_meds = session.query(Med).filter_by(account_id=user_name).count()\n session.close()\n return num_meds > 0",
"def __contains__(self, userid):\r\n userid = int(userid)\r\n return bool(userid in self.players)",
"def has_user(self, user): # pylint: disable=unused-argument\r\n return False",
"def is_liked_by(self, user):\n return user.liked_articles.filter(pk=self.pk).exists()",
"def favorited(self: Article, request: Request):\n if not request.user:\n return False\n\n if self in request.user.favorites:\n return True\n\n return False",
"def liked_by(self, user):\n return Likes.objects.filter(recipe=self, chef=user).exists()",
"def is_screen_name_already_assigned(screen_name: str) -> bool:\n return _do_users_matching_filter_exist(DbUser.screen_name, screen_name)",
"def user_has_access(self, user):\n if self.visibility == self.PUBLIC:\n return True\n elif self.visibility == self.PRIVATE and self.created_by == user:\n return True\n elif self.visibility in (self.ORG_ONLY, self.ORG_ONLY_NO_EXTERNAL):\n if user.external and self.visibility == self.ORG_ONLY_NO_EXTERNAL:\n return False\n elif self.organization.memberships.filter(user=user).count() >= 1:\n return True\n return False",
"def is_user_player(self, user):\n return self.user == user",
"def user_exist(cls,user_name):\n for user in cls.user_list:\n if user.user_name == user_name:\n return True\n return False",
"def shared_by(self, user):\n return Shares.objects.filter(recipe=self, chef=user).exists()",
"def can_be_viewed_by(self,user):\n return True",
"def check_inflight_already_running(self, user: Identifier) -> bool:\n with self._lock:\n for flow in self.in_flight:\n if flow.requestor == user:\n return True\n return False",
"def is_registered(user_id: str) -> bool:\n inventories = get_file(\"inventories\")\n return str(user_id) in inventories",
"def UserRecords(self, username):\n return not self.com.CheckUid(username) is None",
"def has_user(self, username):\n\t\treturn username in self.users",
"def is_examiner(self, user_obj):\n warnings.warn(\"deprecated\", DeprecationWarning)\n return self.examiners.filter(user__id=user_obj.pk).count() > 0",
"async def check(self, ctx, user: discord.Member=None):\n user = user or ctx.author\n\n for activity in user.activities:\n if isinstance(activity, discord.Spotify):\n # Using the Spotify API from Discord's provided track id\n # This is broken\n sp_track = sp.track(activity.track_id)\n sp_artist = sp.artist(sp_track[\"artists\"][0][\"id\"])\n genres = sp_artist['genres']\n\n await ctx.send(f\"{user} is listening to **{activity.title}** by {activity.artist}.\\nGenres: {', '.join(genres)}\")\n return\n await ctx.send(f'{user} is not listening to a song.')",
"def belongs_to_user(self) -> bool:\n return flask.g.user is not None and flask.g.user.id == getattr(\n self, 'user_id', False\n )",
"def already_booked(slots, attendees, user_name):\n already_joined = False\n for i in attendees:\n if i[\"email\"] == user_name+'@student.wethinkcode.co.za':\n already_joined = True\n\n if already_joined == True:\n return False\n else:\n return True",
"def registered(cls, username):\n if any(usr['username'] == username for usr in cls.getall().values()):\n return True\n return False",
"def user_is_examiner(userobj):\n from .assignment_group import AssignmentGroup\n return AssignmentGroup.published_where_is_examiner(userobj).exists()"
] | [
"0.62960756",
"0.6231976",
"0.609444",
"0.6070275",
"0.60486317",
"0.6042864",
"0.6039808",
"0.6021467",
"0.5981415",
"0.59635645",
"0.5831276",
"0.58286434",
"0.5827806",
"0.5819191",
"0.579271",
"0.5785383",
"0.57851636",
"0.5760467",
"0.57585835",
"0.57534826",
"0.5748277",
"0.57302576",
"0.5717424",
"0.5667295",
"0.5652855",
"0.56494755",
"0.5636571",
"0.5633658",
"0.5629489",
"0.561781"
] | 0.7359592 | 0 |
Getter of movie id | def get_movie_id(self):
return self.id_movie | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_movie_id(self) -> str:\n return self.movie.id",
"def _get_ID(self):\n raw_data = imdb.search_for_title(self.title)\n if len(raw_data) > 1:\n raw_data = raw_data[0] # Pulls the first value of the title (the closest match)\n # if there is more than one\n self.ID = raw_data['imdb_id']",
"def get_video_id(self, obj):\n return obj.id",
"def get_video_id(self, obj):\n return obj.video.id",
"def imdb_id(title):\n pass",
"def video_id(self) -> str:\r\n return self._video_id",
"def video_id(self):\n # type: () -> string_types\n return self._video_id",
"def movie_identifier(self):\n return 'bluray_id'",
"def _get_id(self):\n return self.id",
"def getID():",
"def get_movie_by_id(movie_id):\n\n return Movie.query.get(movie_id)",
"def id(self):\n return self.getattr('id')",
"def getID(self) -> int:\n ...",
"def get_id(self) -> Optional[str]:\n return self.id_",
"def id(self):\n return self[\"id\"]",
"def id_getter(self):\n return self._id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id"
] | [
"0.8985618",
"0.72579557",
"0.7143325",
"0.6941592",
"0.6938852",
"0.68658173",
"0.67975396",
"0.6750645",
"0.67016894",
"0.66771835",
"0.6601591",
"0.6580242",
"0.6561278",
"0.65610945",
"0.6541132",
"0.6535963",
"0.65356594",
"0.65356594",
"0.65356594",
"0.65356594",
"0.65356594",
"0.65356594",
"0.65356594",
"0.65356594",
"0.65356594",
"0.65356594",
"0.65356594",
"0.65356594",
"0.65356594",
"0.65356594"
] | 0.8726083 | 1 |
Getter of ratings list for this movie. Pair (user, rating) | def get_ratings(self):
return self.ratings | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def user_ratings(user_id):\n return _fetch_records(f\"SELECT item_id, rating_type FROM ratings WHERE user_id = {user_id}\")",
"def _get_user_ratings(self, user_id):\n return self.ratings[self.ratings['user_id'] == user_id]",
"def ratings_usuarios(username, ratings):\n return list(filter(lambda x: x.username == username, ratings))",
"def get_user_ratings(self, user_id):\r\n return self.df_app_data.loc[(self.df_app_data[\"user_id\"] == int(user_id))]",
"def all_ratings(self):\n\n for u, u_ratings in iteritems(self.ur):\n for i, r in u_ratings:\n yield u, i, r",
"def get_ratings(self):\n return Vote.objects.filter(content_type=self.get_content_type(), object_id=self.instance.pk, key=self.field.key)",
"def get_users_movies(myRatings):\n #return [x[1] for x in myRatings]\n return list(myRatings.map(lambda x: x[1]).collect())",
"def ratings(self):\n session = Session.object_session(self)\n return session.query(Rating).join(Section).filter(Section.professor == self).all()",
"def ratings(book_list, user, rating):\n num = len(book_list)\n records = {'user_id': [user] * num,\n 'rating': [rating] * num,\n 'item_id': book_list,\n }\n return gl.SFrame(records)",
"def get_rating(self):\n self.rating = imdb.get_title_ratings(self.ID)['rating']",
"def _get_user_movie_rating(self, user_id, movie_id):\n rating = self.ratings[(self.ratings['user_id'] == user_id) &\n (self.ratings['movie_id'] == movie_id)]['rating'].iloc[0]\n user_mean = self.get_mean_user_rating(user_id)\n return rating - user_mean",
"def item_ratings(self):\n return self.get_ratings().sum(axis=0)",
"def recommend_one_user(self, user_id: int) -> List[Item]:\n # Retrieve all the movies for this author\n already_seen_movies = self.database.interactions.get_user_interactions(user_id)\n return self.recommend_from_list(already_seen_movies)",
"def _get_user_movies(self, user_id, list=False):\n user_movies = self.ratings[self.ratings['user_id'] == user_id]['movie_id']\n return user_movies.tolist() if list else user_movies.as_matrix()",
"def get_mean_user_rating(self, user_id):\n return self.mean_user_rating[self.mean_user_rating['user_id'] == user_id]['rating'].item()",
"def get_shared_ratings(self, user1_id, user2_id):\n return pd.merge(self._get_user_ratings(user1_id),\n self._get_user_ratings(user2_id),\n on=['movie_id'])",
"def user_rating(self) -> int:\n return self._user_rating",
"def all_ratings(self):\n return {\n 'average': self.average_rating(),\n 'total': self.proto.aggregateRating.ratingsCount,\n 'oneStar': self.proto.aggregateRating.oneStarRatings,\n 'twoStar': self.proto.aggregateRating.twoStarRatings,\n 'threeStar': self.proto.aggregateRating.threeStarRatings,\n 'fourStar': self.proto.aggregateRating.fourStarRatings,\n 'fiveStar': self.proto.aggregateRating.fiveStarRatings,\n }",
"def get_user_preferences(self, user_id):\n # User training ratings\n user_ratings = self.ratings[(self.ratings['user_id'] == user_id)]\n\n # Get rating-movie information\n movies_user = pd.merge(user_ratings, self.movies, on='movie_id')\n\n # Get count of genres\n genres_sum = movies_user[self.genres].sum()\n genres_sum_mat = genres_sum.as_matrix()\n\n # Weight by average of genre within user\n mean_ratings = np.zeros(len(self.genres))\n for i, g in enumerate(genres_sum.index):\n mean_ratings[i] = movies_user[movies_user[g] == True]['rating'].mean()\n\n # Multiply and replace nans to 0\n cleared = np.nan_to_num(genres_sum_mat * mean_ratings)\n return cleared / np.sum(cleared)",
"def _get_user_movie_rating_raw(self, user_id, movie_id):\n return self.ratings[(self.ratings['user_id'] == user_id) &\n (self.ratings['movie_id'] == movie_id)]['rating'].iloc[0]",
"def get_user_ratings_by_movie(self, movie_id):\r\n return self.df_ratmat[self.df_ratmat.iloc[:, movie_id - 1] > 0]",
"def recommend(self, u):\n # Implement a recommendation function that takes a user vector u\n # and outputs a list of movies recommended by the chatbot\n rec_list = []\n for i, movie in enumerate(self.ratings):\n rxi = 0.0\n for tup in u:\n j = tup[0]\n rxj = tup[1]\n if i == j: # Skip movies in user_vec\n continue\n sij = self.distance(self.ratings[i], self.ratings[j])\n rxi += (rxj * sij)\n movie_rank = [i, rxi] # Store movie index and rating\n rec_list.append(movie_rank)\n rec_list = sorted(rec_list, key=lambda x:x[1], reverse = True) \n return rec_list",
"def get_ratings(self):\n df = pd.read_csv(IoManager.CARD_RATINGS_FILE_PATH)\n df = IoManager.scale_ratings(df)\n df = IoManager.normalize_ratings_per_archetype(df)\n df = self.add_ratings_sum(df)\n # print(df[[\"name\", \"monogreen\", \"simic_ramp\", \"general\"]].tail(60))\n # print(df[[\"name\", \"general\"]].sort_values(ascending=False, by=\"general\").head(50))\n return df",
"def get_ratings_for_movie_ids(self, user_id, movie_ids):\n requested_movies_RDD = self.sc.parallelize(movie_ids).map(lambda x: (user_id, x))\n # Get predicted ratings\n ratings = self.__predict_ratings(requested_movies_RDD).collect()\n return ratings",
"def get_ratings_from_uid(dataset, user_id):\n user_ratings = dataset.filter(lambda x: x[0] == user_id) \\\n .repartition(numPartitions) \\\n .cache()\n\n return user_ratings",
"def get_ratings_for_movie_ids(self, user_id, movie_ids):\n requested_movies_RDD = self.sc.parallelize(movie_ids).map(lambda x: (user_id, x))\n # Get predicted ratings\n ratings = self.__predict_ratings(requested_movies_RDD).collect()\n\n return ratings",
"def map_user_to_ratings(reviews):\n\tuser_ratings = dict()\n\tfor review in reviews:\n\t\tuser = review[0]\n\t\tif user not in user_ratings:\n\t\t\tuser_ratings[user] = 0\n\t\telse:\n\t\t\tuser_ratings[user] = user_ratings[user] + 1\n\n\treturn user_ratings",
"def get_other_ratings( df, restaurant_id, user_id ):\n\t\t\n\t\tchoice = df[ ( df['business_id'] == restaurant_id ) & ( df['user_id'] == user_id ) ]\n\t\tusers_score = choice['stars'].values[0]\n\t\taverage_score = choice['business_avg'].values[0]\n\t\treturn users_score, average_score",
"def satisfaction_ratings(self):\r\n return resources.SatisfactionRatings(self)",
"def ratings(self, ratings):\n\n self._ratings = ratings"
] | [
"0.7654455",
"0.75835025",
"0.7034352",
"0.7018935",
"0.68056756",
"0.67744166",
"0.6699223",
"0.66923803",
"0.66044664",
"0.6584102",
"0.64854413",
"0.63792723",
"0.6326823",
"0.63158023",
"0.63092196",
"0.62783813",
"0.62779266",
"0.6260052",
"0.62443334",
"0.62352544",
"0.6234368",
"0.6199178",
"0.6189309",
"0.6153794",
"0.61365896",
"0.6120878",
"0.6114309",
"0.607457",
"0.6037006",
"0.60339487"
] | 0.77323925 | 1 |
Write similarities data to file | def write_similarities(self, data):
# If file is yet created, return data and do not create it again
if os.path.isfile(cfg.similarities):
return None
with open(cfg.similarities, 'wb') as similarities:
print("Storing data as serialized object...")
pickle.dump(data, similarities) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def record_similarities(similarities):\n writer = open(\"similarities.txt\", \"w\")\n for userA in similarities.keys():\n for userB in similarities[userA].keys():\n writer.write(userA + \"\\t\" + userB + \"\\t\" +\n str(similarities[userA][userB]) + \"\\n\")\n writer.close()",
"def save_dists(self, name):\n with open(name, \"w\", encoding=\"utf-8\") as f:\n for word in self._distWords:\n f.write(\"{} | {}\\n\".format(word, \",\".join(map(str_tuple, self._distWords[word].items()))))",
"def sort_similarities():\n reader = open(\"similarities.txt\", \"r\")\n lines = reader.readlines()\n sims_ranked = list()\n for line in lines:\n a, b, sim = line.split(\"\\t\")\n sims_ranked.append(float(sim))\n\n sims_ranked = sorted(sims_ranked, reverse=True)\n writer = open(\"sorted_similarities.txt\", \"w\")\n for sim in sims_ranked:\n writer.write(str(sim) + \"\\n\")",
"def save_course_content_similarities(self):\n connection = self.connection()\n\n sql_drop = 'DROP TABLE IF EXISTS `courses_similarities`'\n connection.execute(sql_drop)\n\n sql_create = \"\"\"CREATE TABLE `courses_similarities` (\n `a_course_id` VARCHAR(9) NOT NULL,\n `another_course_id` VARCHAR(9) NOT NULL,\n `similarity` DOUBLE NOT NULL,\n PRIMARY KEY (`a_course_id`, `another_course_id`)\n ) ENGINE=InnoDB DEFAULT CHARSET=utf8\n \"\"\"\n connection.execute(sql_create)\n\n # Save course similarities to database\n self.courses_content_sims_df.to_sql('courses_similarities', con=connection,\n if_exists='append', index=False)",
"def write(self, f):\n if self.best_mhc_align:\n mhc_align_str = self.best_mhc_align.subject_str()\n mhc_score_str = str(self.best_mhc_align.bit_score)\n else:\n mhc_align_str = \".\"\n mhc_score_str = \"0\"\n\n if self.best_non_mhc_align:\n non_mhc_align_str = self.best_non_mhc_align.subject_str()\n non_mhc_score_str = str(self.best_non_mhc_align.bit_score)\n else:\n non_mhc_align_str = \".\"\n non_mhc_score_str = \"0\"\n \n f.write(\"\\t\".join([self.locus, self.short_samp_id, self.name,\n str(self.length), mhc_align_str, non_mhc_align_str,\n mhc_score_str, non_mhc_score_str,\n str(self.n_mhc_align), str(self.n_non_mhc_align)]) + \"\\n\")",
"def saveMatches(matches,personDict,fileOutName1,fileOutName2):\n sorted_x = sorted(matches.items(), key=operator.itemgetter(1),reverse=True)\n sorted_matches = []\n for i in sorted_x:\n sorted_matches.append(i[0])\n with open(fileOutName1, 'w', newline='') as csvfile:\n spamwriter = csv.writer(csvfile, delimiter=',')\n spamwriter.writerow(['EnterpriseID1','EnterpriseID2','MATCH_SCORE'])\n for p in sorted_matches:\n spamwriter.writerow([p[0],p[1],str(matches[p])])\n \n with open(fileOutName2, 'w', newline='') as csvfile:\n spamwriter = csv.writer(csvfile, delimiter=',')\n spamwriter.writerow(['EnterpriseID','LAST','FIRST','MIDDLE','SUFFIX','DOB','GENDER','SSN','ADDRESS1','ADDRESS2','ZIP','MOTHERS_MAIDEN_NAME','MRN','CITY','STATE','PHONE','PHONE2','EMAIL','ALIAS'])\n for p in sorted_matches:\n spamwriter.writerow(list(personDict['EnterpriseID'][p[0]]))\n spamwriter.writerow(list(personDict['EnterpriseID'][p[1]]))\n spamwriter.writerow([])",
"def save_results(PATH, data, filename):\n with open(PATH + '/' + filename + \".txt\",\"w\") as file:\n file.write(\"Results of heuristic models with mean and standard deviation.\\n\")\n for result in data:\n write_result(file, result)\n file.close()\n print('results saved in:'+ PATH + '/' + filename + \".txt\")",
"def writeRatingsToSongs(self):\n judgeNotesLogger.info(\"writeRatingsToSongs: Writing file containing songs for each rating\")\n try:\n os.chdir(self.fileDir)\n sortedRatings = sorted(self.ratingsToSongs.keys(), key=float)\n fileName = \"ratingsToSongs_\" + self.judgeName + \".txt\"\n with open(fileName, 'w') as outFile:\n\n # Write out the normal ratings first.\n for rating in sortedRatings:\n songsInRating = self.ratingsToSongs[rating]\n outFile.write(\"[\"+str(rating)+\"/10]\")\n for song in songsInRating:\n if song[2] != \"\":\n outFile.write(\"\\n--> \" + str(song[0]) + \" {\" + str(song[1]) + \"} (\"+str(song[2]) + \")\")\n else:\n outFile.write(\"\\n--> \" + str(song[0]) + \" {\" + str(song[1]) + \"}\")\n outFile.write(\"\\n\\n\")\n\n # Write out the special ratings after.\n sortedRatings = sorted(self.specialRatingsToSongs.keys(), key=str.lower)\n for rating in sortedRatings:\n songsInRating = self.specialRatingsToSongs[rating]\n outFile.write(\"[\"+str(rating)+\"]\")\n for song in songsInRating:\n if song[2] != \"\":\n outFile.write(\"\\n--> \" + str(song[0]) + \" {\" + str(song[1]) + \"} (\"+str(song[2]) + \")\")\n else:\n outFile.write(\"\\n--> \" + str(song[0]) + \" {\" + str(song[1]) + \"}\")\n outFile.write(\"\\n\\n\")\n \n outFile.close()\n judgeNotesLogger.info(\"writeRatingsToSongs: Successfully wrote file '%s'\", fileName)\n except:\n judgeNotesLogger.warning(\"writeRatingsToSongs: {0}: {1}\".format(sys.exc_info()[0].__name__,\n str(sys.exc_info()[1])))",
"def writeNormScore(self,fin,fout):\n\n for line in fin:\n [sv, en, score] = re.split(r'\\t|,',line)\n self.count[sv][en] += float(score)\n self.en_sum[en] += float(score)\n self.sv_sum[sv] += float(score)\n\n for sv, ens in self.count.iteritems():\n for en in ens.keys():\n fout.write(sv + \",\" + en + \"\\t\" + str(self.count[sv][en] / self.sv_sum[sv] * self.en_sum[en]) + \"\\n\")",
"def save(self, filename):\n target = open(filename, 'w')\n target.write(\"\\\\data\\\\\\n\")\n target.write(\"ngram 1=\" + str(len(self.f1)) + \"\\n\\n\")\n target.write(\"\\\\1-grams:\\n\")\n for w,p in sorted(self.f1.items()): \n target.write(str(p) + \" \" + w + \"\\n\")\n target.write(\"\\\\end\\\\\\n\")\n target.close()",
"def writeStatsToFile( gfname, sfname, tgraph ):\n ParProbG = graphWithCutoff(gfname, 0.0)\n with open(sfname,'wb') as ofile:\n for u,v in itertools.combinations( tgraph.nodes(), 2 ):\n ofile.write(\"{0} {1}\\n\".format( ParProbG[u][v]['weight'] if ParProbG.has_edge(u,v) else 0.0, 1 if tgraph.has_edge(u,v) else 0) )",
"def write_results(file_path, predictions):\n with open(file_path, \"w\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\",\")\n writer.writerow([\"Id\", \"Bound\"])\n for id, bound in enumerate(predictions):\n writer.writerow([id, bound])",
"def write_to(self, filename):\n with open(filename, 'w') as f:\n for xx, yy, zz, ww in zip(self.x, self.y, self.field, self.weight):\n f.write(\"%s %s %s %s\\n\" % (xx, yy, zz, ww))\n logger.info(\"Written data into file {0}\".format(filename))",
"def __write_result(filename: str,\n frase,\n resources_path: str,\n outputh_path: str,\n predictions,\n vocab = None,\n enable_coarse_grained: int = 1,\n vocab_for_coarse = None) -> int:\n global mfs_counter\n bn2wn = get_bn2wn()\n lemma2wn = reload_word_mapping(resources_path+\"/mapping/lemma2wn.txt\")\n to_write = []\n for index, parola in enumerate(frase):\n name = parola.xpath('name()')\n if name == 'instance':\n id = parola.get('id')\n list_of_possible_senses_first_step = lemma2wn.get(parola.text)\n if not list_of_possible_senses_first_step:\n # MFS\n the_actual_meaning = MFS(parola,\n bn2wn,\n vocab2=vocab_for_coarse,\n pred_case=enable_coarse_grained)\n mfs_counter += 1\n to_write.append((id, the_actual_meaning))\n continue\n list_of_possible_senses_bn_version = convert_from_wnlist_2_bnlist(list_of_possible_senses_first_step, bn2wn)\n\n candidates,list_of_possible_senses_bn_version = create_custom_label(list_of_possible_senses_bn_version,\n parola.text,\n vocab,\n predictions[index],\n enable_coarse_grained=enable_coarse_grained)\n the_actual_meaning = None\n if candidates:\n argmax = np.argmax(candidates)\n the_actual_meaning = list_of_possible_senses_bn_version[argmax]\n else:\n #MFS\n mfs_counter += 1\n the_actual_meaning = MFS(parola,\n bn2wn,\n vocab2=vocab_for_coarse,\n pred_case=enable_coarse_grained)\n to_write.append((id, the_actual_meaning))\n with open(outputh_path + \"/\"+filename, \"a\") as test_saving:\n for tupla in to_write:\n test_saving.write(tupla[0] + \" \" + tupla[1]+\"\\n\")\n del to_write\n del lemma2wn\n del bn2wn\n return 1",
"def write_to_file(self, filepath, mode = \"a\"): \n if \"r\" in mode: \n print(\"Only accepts write and append modes\")\n return \n with open(filepath, mode) as f: \n f.write(\"{}\\n\".format(self.title))\n verified, seen, ratio = self.get_verified_ratio()\n f.write(\"Verified Names: {}\\n\".format(str(verified)))\n f.write(\"Names: {}\\n\".format(str(seen)))\n f.write(\"Ratio: {}\\n\".format(str(ratio)))",
"def write_matches(matches: List[Result],out: str):\n data = pd.DataFrame(matches)\n data.to_csv(out,sep=\"\\t\",index=False)",
"def write_distances(filename):\n print \"Computing distances and writing to \" + filename + \"...\"\n distances = {}\n n = 0\n\n for a1 in attrs:\n distances[a1] = {}\n n += 1\n print \"On point \" + str(n) + \" out of \" + str(len(attrs))\n for a2 in attrs:\n distances[a1][a2] = KL_divergence(attr_value_counts[a1],\n attr_value_counts[a2])\n\n with open(filename, 'w') as f:\n f.write(json.dumps(distances))",
"def outTxt(data, outPath, fileName):\n\n with open(outPath+fileName, \"wb\") as f:\n f.write(\"index,link,name,rating,review,price,category,neighborhood,address,phone,feedback\\n\")\n for record in data:\n f.write(\"%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\\n\" % \\\n (record[0],record[1],record[2],record[3],record[4],record[5],record[6],\\\n record[7],record[8],record[9],record[10]))",
"def save_results(predictions, filename):\n with open(filename, 'w') as f:\n f.write(\"id,ACTION\\n\")\n for i, pred in enumerate(predictions):\n f.write(\"%d,%f\\n\" % (i + 1, pred))",
"def write_analysis_details(self, csvfile):\n #filepath, total words, line count, most common word\n f = open(csvfile, 'w')\n most_common = self.most_common()\n f.write('filepath,total words,line count,most common word\\n')\n f.write(f'{self.filepath},{self.word_count()},{self.sentence_count()},{self.most_common()[0]}')\n f.close()",
"def write(self, filename):\n f = open(filename, 'w')\n f.write(str(self.m) + \"\\n\")\n f.write(str(self.n) + \"\\n\")\n for i in self.values:\n for j in i:\n f.write(str(j)+\"\\n\")\n f.closed",
"def write(self, outfile, rebasings=None):\r\n raise NotImplementedError()",
"def write_submission(ratings, file_name):\n # Build output string to write into the file\n output = \"Id,Prediction\\n\"\n for (row, col, rat) in ratings:\n # every line is of the format 'rX_cY,R' where X and Y correspond to row(user) and column(movie) indices and R is the rating\n # we have do increase row and col by one because numpy arrays use 0-base indexing while movie/user indices start at 1\n output += \"r%d_c%d,%f\\n\" % (row + 1, col + 1, rat)\n \n # Write file \n with open(os.path.join('../predictions_csv', file_name), 'w') as output_file:\n output_file.write(output)\n \n return output",
"def save_predictions(self,file_path):\n # compute average of predictions\n num_examples = len(self.labels)\n\n if num_examples == 0:\n raise Exception (\"nothing to save\")\n\n def string_to_average(string):\n return np.average(np.array(string.split(\",\"),dtype=float))\n prediction_averages = np.around(map(string_to_average,self.predictions),decimals=3)\n\n # sort by prediction averages\n order = np.flipud(prediction_averages.argsort())\n prediction_averages = prediction_averages[order]\n self.pl_pairs = self.pl_pairs[order]\n self.predictions = self.predictions[order]\n self.labels = self.labels[order]\n # write all of the predictions to the file\n f = open(file_path + \"_predictions.txt\", 'w')\n\n for i in range(num_examples):\n f.write((str(prediction_averages[i]) + \" \"*10)[:10]\n + (str(self.labels[i]) + \" \"*50)[:10]\n + str(self.pl_pairs[i] + \" \"*50)[:50]\n + str(self.predictions[i] + \" \"*50)[:50]\n + \"\\n\")\n\n f.close()\n # write and save some metadata\n\n f = open(file_path + \"_scores.txt\", 'w')\n f.write(\"top 100 score: \")\n f.write(str(self.top_100_score(self.predictions,self.labels)))\n f.write(\"\\nAUC: \")\n f.write(str(self.auc(prediction_averages,self.labels)))\n f.write(\"\\nconfusion matrix: \")\n f.write(str(self.confusion_matrix(prediction_averages,self.labels)))\n f.close()\n\n # write a file in Kaggle MAP{K} submision format\n # the form is:\n # Protein1, Ligand3 Ligand4 Ligand2\n # Protein2, Ligand5 Ligand9 Ligand7\n\n raw_database_array = np.genfromtxt(FLAGS.test_set_file_path, delimiter=',', dtype=str)\n receptor_set = raw_database_array[:,2]\n receptor_set = list(set(map(lambda x:x.split('.')[0].split('/')[-1],receptor_set)))\n submission = {}\n for i in range(num_examples):\n # get the name of the ligand and protein\n ligand,receptor = self.pl_pairs[i].split(',')\n ligand = ligand.split('/')[-1].split('.')[0]\n receptor = receptor.split('/')[-1].split('.')[0]\n # add all protein-ligand pairs to submission\n if not receptor in submission.keys():\n submission[receptor] = {}\n submission[receptor]['ligands'] = [ligand]\n submission[receptor]['score'] = [prediction_averages[i]]\n else:\n submission[receptor]['ligands'].append(ligand)\n submission[receptor]['score'].append(prediction_averages[i])\n \n # write and save submisison to file\n # if failed to predict any liagnd for a receptor\n # use placeholder 'L' as predict result\n # e.g. P1234,L\n with open(file_path+'_submission.csv','w') as f:\n f.write('Id,Expected\\n')\n for key in receptor_set:\n if key in submission.keys():\n ligands = np.array(submission[key]['ligands'])\n scores = np.array(submission[key]['score'])\n ligands = ligands[np.flipud(scores.argsort())]\n f.write(key+','+' '.join(ligands)+'\\n')\n else:\n f.write(key+','+'L'+'\\n')",
"def writeOut(self):\r\n with open(self.fname, 'w') as f:\r\n for i in range(10):\r\n score = self.getNextHighest()\r\n if score is not None:\r\n f.write('%s %s\\n' % (score.name,\r\n score.score))\r\n pass",
"def write_scores(path, lines):\n\n headers = [\"File\"] + [\"%s%s\" % (t,v) for t in TYPES for v in VALUES]\n write_csv(path, lines, headers)",
"def writeFile(fileName, profile, singleScores, bestMotifs, dnaScores, bestMotif):\n with open(fileName, 'w+') as f:\n f.write(strftime(\"Created on: %Y-%m-%d %H:%M:%S\\n\", localtime()))\n f.write('Best Motifs: ')\n f.write('\\n')\n json.dump(bestMotif, f)\n f.write('\\n')\n f.write('Motifs Profile: ')\n f.write('\\n')\n json.dump(profile, f)\n f.write('\\n')\n f.write('Single Scores: ')\n f.write('\\n')\n for i in range(0, len(singleScores)):\n json.dump(bestMotifs[i], f)\n f.write(': ')\n json.dump(singleScores[i], f)\n f.write('\\n')\n f.write('Motifs that have a better score than the worst scoring one: ')\n f.write('\\n')\n for scores in dnaScores:\n json.dump(scores, f)\n f.write('\\n')",
"def _save_chromosome_at_index(self, index, file_name):\n how_to_open = 'w' if index == 0 else 'a'\n with open(file_name, how_to_open) as out_file:\n for category in self.population[index].get_genes():\n out_file.write(''.join(category) + '\\t')\n out_file.write(\n '\\n{}\\n'.format(self.population[index].get_fitness())\n )",
"def write_match(iout, match):\n \n np.savetxt('match_{0:d}.list'.format(iout), match, fmt='%d %d %.4f')",
"def write_genre_index(self):\n for giEntry in self.genreIndex:\n # Write to file\n self.db_file.write(giEntry.get_representation())"
] | [
"0.68487245",
"0.6322778",
"0.62223566",
"0.608114",
"0.6032401",
"0.5957932",
"0.5846",
"0.581992",
"0.58038425",
"0.5798109",
"0.5784611",
"0.57362115",
"0.56961566",
"0.5652088",
"0.56154805",
"0.56049746",
"0.55978143",
"0.55934703",
"0.55828327",
"0.55662423",
"0.55455005",
"0.554164",
"0.55409575",
"0.5540765",
"0.5531817",
"0.5522533",
"0.5514133",
"0.549249",
"0.5483496",
"0.54799193"
] | 0.77305156 | 0 |
Load similarities pickle data | def load_similarities(self):
if not os.path.isfile(cfg.similarities):
return None
else:
print("Serialized object exists. Reading from disk...")
with open(cfg.similarities, 'rb') as file:
data = pickle.load(file)
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_data():\r\n f = gzip.open('mnist.pkl.gz', 'rb')\r\n training_data, validation_data, test_data = pickle.load(f,encoding='bytes')\r\n f.close()\r\n return (training_data, validation_data, test_data)",
"def write_similarities(self, data):\n # If file is yet created, return data and do not create it again\n if os.path.isfile(cfg.similarities):\n return None\n\n with open(cfg.similarities, 'wb') as similarities:\n print(\"Storing data as serialized object...\")\n pickle.dump(data, similarities)",
"def load_data():\n f = gzip.open('../data/mnist.pkl.gz', 'rb')\n training_data, validation_data, test_data = cPickle.load(f)\n f.close()\n return (training_data, validation_data, test_data)",
"def load_and_pickle_mnist():\n\n if os.path.exists(pickle_file):\n print(\"Pickle file found! Unpickling...\")\n with open(pickle_file, \"rb\") as pf:\n mnist = pickle.load(pf)\n else:\n mnist = read_data_sets(data_dir, one_hot=True)\n\n with open(pickle_file, \"wb\") as pf:\n pickle.dump(mnist, pf, pickle.HIGHEST_PROTOCOL)\n\n # Remove .gz files from the mnist download.\n for ptr in glob.glob(os.path.join(data_dir, \"*.gz\")):\n os.remove(ptr)\n\n return mnist",
"def loadData():\n\tprint \"Loading POS vectorized reviews\"\n\twith open(DATA_PATH, \"rb\") as data_file:\n\t\tdata = cPickle.load(data_file)\n\treturn data",
"def load_data():\n f = gzip.open('../data/mnist.pkl.gz', mode='rb')\n\n # NOTE: I get errors when I don't use encoding='latin1' because of Python 2 vs Python 3 compatibility issues\n # training_data, validation_data, test_data = pickle.load(f, encoding='latin1')\n training_data, validation_data, test_data = pickle.load(f)\n\n f.close()\n\n return training_data, validation_data, test_data",
"def load_similarity(self, old_similarity_file):\n\t\twith open(old_similarity_file) as old_sim_f:\n\t\t\tfor line in old_sim_f:\n\t\t\t\tfields = line.strip('\\n').strip('\\t').split('\\t')\n\t\t\t\tif len(fields) == 2:\n\t\t\t\t\ts_vid = fields[0]\n\t\t\t\t\tscore_str = fields[1]\n\t\t\t\t\tscore_list = score_str.strip(' ').strip(',').split(',')\n\t\t\t\t\tif len(score_list) > 0:\n\t\t\t\t\t\tscore_pair_list = []\n\t\t\t\t\t\tfor item in score_list:\n\t\t\t\t\t\t\td_vid, score = item.strip(' ').split(':')\n\t\t\t\t\t\t\tscore = float(score)\n\t\t\t\t\t\t\tsp = ScorePair(d_vid, score)\n\t\t\t\t\t\t\tscore_pair_list.append(sp)\n\t\t\t\t\t\theapq.heapify(score_pair_list)\n\t\t\t\t\t\tself.s_matrix[s_vid] = score_pair_list",
"def load_data():\n data = gzip.open(\"mnist.pkl.gz\", \"rb\")\n train_set, valid_set, test_set = cPickle.load(data)\n data.close()\n\n # Combine validation and train folds to recreate the master 60k set.\n new_images = numpy.concatenate((train_set[0], valid_set[0]))\n new_labels = numpy.concatenate((train_set[1], valid_set[1]))\n\n train_set = (new_images, new_labels)\n \n return (train_set, test_set)",
"def loader(self):\n\n with open(self.source, 'rb') as labels_file:\n self.distance_map = pd.read_pickle(labels_file)\n\n return self.distance_map",
"def load_cleaned_data(self):\n try:\n self.train = pd.read_pickle('../input/train_clean.pkl')\n self.test = pd.read_pickle('../input/test_clean.pkl')\n except FileNotFoundError:\n self.load_raw_data()",
"def data_example(data_path='PoS_data.pickle',\n words_path='all_words.pickle',\n pos_path='all_PoS.pickle'):\n\n with open('PoS_data.pickle', 'rb') as f:\n data = pickle.load(f)\n with open('all_words.pickle', 'rb') as f:\n words = pickle.load(f)\n with open('all_PoS.pickle', 'rb') as f:\n pos = pickle.load(f)\n\n print(\"The number of sentences in the data set is: \" + str(len(data)))\n print(\"\\nThe tenth sentence in the data set, along with its PoS is:\")\n print(data[10][1])\n print(data[10][0])\n\n print(\"\\nThe number of words in the data set is: \" + str(len(words)))\n print(\"The number of parts of speech in the data set is: \" + str(len(pos)))\n\n print(\"one of the words is: \" + words[34467])\n print(\"one of the parts of speech is: \" + pos[17])\n\n print(pos)",
"def load_data(path='mnist.npz'):\n origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/'\n path = get_file(\n path,\n origin=origin_folder + 'mnist.npz',\n file_hash=\n '731c5ac602752760c8e48fbffcf8c3b850d9dc2a2aedcf2cc48468fc17b673d1')\n print('############################################' + path) \n with np.load(path, allow_pickle=True) as f: # pylint: disable=unexpected-keyword-arg\n x_train, y_train = f['x_train'], f['y_train']\n x_test, y_test = f['x_test'], f['y_test']\n\n return (x_train, y_train), (x_test, y_test)",
"def load_data():\r\n\r\n mnist_file = gzip.open('../data/mnist.pkl.gz', 'rb')\r\n ## opening the gz archive file by using gzip's open function\r\n\r\n training_data, validation_data, test_data = cPickle.load(mnist_file, encoding='latin1')\r\n ## loading the training, validation and test data by using cPickle's load function\r\n ## passing encoding parameter as ``latin1``\r\n\r\n mnist_file.close()\r\n ## closing the mnist_file\r\n\r\n return (training_data, validation_data, test_data)",
"def load_data():\n dictionary = corpora.Dictionary.load(app.config['DICTIONARY'])\n matrix = similarities.MatrixSimilarity.load(app.config['MATRIX'])\n model = models.LsiModel.load(app.config['MODEL'])\n df = pd.read_pickle(app.config['DATA_FRAME'])\n return Data(matrix=matrix, model=model, dictionary=dictionary, data_frame=df)",
"def load_data(file_name=None):\n pos_review = pickle.load(open(file_name + 'pos_review.pkl', 'r'))\n neg_review = pickle.load(open(file_name + 'neg_review.pkl', 'r'))\n\n return pos_review, neg_review",
"def load_data_pickle(self, load_full=False):\n self.train = pd.read_pickle('../input/train_mod.pkl')\n self.test = pd.read_pickle('../input/test_mod.pkl')\n if load_full:\n self.train_full = pd.read_pickle('../input/train_full_mod.pkl')",
"def test_load_pickle(self, tmp_path):\n\n pickle_path = tmp_path / 'pickle.pgz'\n\n original_corpus = Corpus(\n common.TEST_CORPUS_PATH,\n csv_path=common.SMALL_TEST_CORPUS_CSV,\n name='test_corpus',\n pickle_on_load=pickle_path,\n ignore_warnings=True\n )\n\n # first make sure the small corpus is correct\n assert len(original_corpus) == 10\n assert isinstance(original_corpus.documents, list)\n assert original_corpus.name == 'test_corpus'\n\n # next load the pickle file to make sure data was copied correctly\n pickle_corpus = Corpus(pickle_path, name='test_corpus')\n assert len(pickle_corpus) == 10\n assert isinstance(original_corpus.documents, list)\n assert pickle_corpus.name == 'test_corpus'\n\n # Make sure the corpora are equal\n assert original_corpus == pickle_corpus",
"def load(self):\n self.word2vec, self.img2sentence, self.word_freq, self.num_words, self.word2idx, self.idx2word = pickle.load(open(self.save_file, 'rb'))",
"def load_synthetic_data():\n\n pickle_object = FM().data_file \n\n with pickle_object.open('rb') as data_file: \n return pickle.load(data_file)",
"def loadData():\n project_dir = \"/home/c/chandanchowdhury/Documents/CIS-833/CSSearch/indexer/\"\n\n index_file = \"index_file.pkl\"\n link_file = \"link_file.pkl\"\n\n index_data = loadPickle(project_dir+index_file)\n link_data = loadPickle(project_dir+link_file)\n\n return index_data, link_data",
"def load_data():\n with open('data.pickle', 'rb') as f:\n data = pickle.load(f)\n return data",
"def loadFeatures(self, filename):\n f = open(filename, 'rb')\n loadhash = pickle.load(f)\n b = self.spikes.view(np.uint8)\n hashkey = hashlib.sha1(b).hexdigest()\n\n if loadhash == hashkey:\n print(\"Spikeset hashes match, loading features info.\")\n self.calculateFeatures(pickle.load(f))\n else:\n print(\"Hashes don't match, features are from a different dataset. Be careful.\")\n self.calculateFeatures(pickle.load(f))",
"def load_predicted_results(self):\n print(\"\\n\\nLoad prediction answers : \")\n with open(\"predicted_results\", \"rb\") as predicted_results:\n self.predicted_results = pickle.load(predicted_results)",
"def get_sim(file, graph1, graph2, pickle_name = \"\"):\r\n def build_sim(file, graph1, graph2):\r\n similarity = np.zeros((len(graph1), len(graph2)))\r\n if file.endswith(\"xz\"):\r\n with lzma.open(file, mode = 'rt') as f:\r\n for line in f:\r\n node_y, node_h, sim_val = line.strip().split(\" \") # 8 seconds\r\n try:\r\n node_y, node_h, sim_val = graph1.indexes[node_y], graph2.indexes[node_h], float(sim_val) #17 seconds\r\n similarity[node_y][node_h] = sim_val\r\n except:\r\n pass\r\n else:\r\n with open(file, mode = 'rt') as f:\r\n for line in f:\r\n node_y, node_h, sim_val = line.strip().split(\" \") # 8 seconds\r\n try:\r\n node_y, node_h, sim_val = graph1.indexes[node_y], graph2.indexes[node_h], float(sim_val) #17 seconds\r\n similarity[node_y][node_h] = sim_val\r\n except:\r\n pass\r\n return similarity\r\n \r\n #if \".sim.pickle\" == pickle_name:\r\n # pickle_name = graph1.name + graph2.name + pickle_name\r\n if pickle_name == \"\":\r\n pickle_name = graph1.name + graph2.name + \".sim.pickle\"\r\n try:\r\n with open(pickle_name,'rb') as f:\r\n return pickle.load(f)\r\n except FileNotFoundError as e:\r\n sims = build_sim(file, graph1, graph2)\r\n with open(pickle_name,'wb') as f:\r\n pickle.dump(sims,f)\r\n return sims",
"def load_movielens1m(path):\n if not os.path.isfile(path):\n data_dir = os.path.dirname(path)\n if not os.path.exists(os.path.dirname(path)):\n os.makedirs(data_dir)\n download_dataset(\n 'http://files.grouplens.org/datasets/movielens/ml-1m.zip', path)\n\n zp = zipfile.ZipFile(path, 'r')\n content = zp.read('ml-1m/ratings.dat')\n data_list = content.split('\\n')\n\n output1 = open('train', 'w')\n output2 = open('test', 'w')\n num_users = 0\n num_movies = 0\n corpus = []\n for item in data_list:\n term = item.split('::')\n if len(term) < 3:\n continue\n user_id = int(term[0]) - 1\n movie_id = int(term[1]) - 1\n rating = int(term[2])\n corpus.append((user_id, movie_id, rating))\n num_users = max(num_users, user_id + 1)\n num_movies = max(num_movies, movie_id + 1)\n\n corpus_data = np.array(corpus)\n np.random.shuffle(corpus_data)\n np.random.shuffle(corpus_data)\n N = np.shape(corpus_data)[0]\n Ndv = N // 20 * 17\n Ndv2 = N // 10 * 9\n train = corpus_data[:Ndv, :]\n valid = corpus_data[Ndv:Ndv2, :]\n test = corpus_data[Ndv2:, :]\n\n for i in range(np.shape(train)[0]):\n output1.write('%d\\t%d\\t%d\\n' % (train[i, 0], train[i, 1], train[i, 2]))\n output1.close()\n for i in range(np.shape(test)[0]):\n output2.write('%d\\t%d\\t%d\\n' % (test[i, 0], test[i, 1], test[i, 2]))\n output2.close() \n\n return num_movies, num_users, train, valid, test",
"def load_pickles(classes_mapping_path, classes_count_path, entities_path):\n classes_count = cPickle.load(open(classes_count_path, 'rb'))\n hierarchy_mapping = cPickle.load(open(classes_mapping_path, 'rb'))\n entities = cPickle.load(open(entities_path, 'rb'))\n return classes_count, hierarchy_mapping, entities",
"def Classify_Data(self):\n\n lem = lemmatization()\n\n # Get Mongo Client\n client = MongoClient()\n db = client['allMovies']\n collection = db['Movies']\n\n # Path to folder containing the training model files\n path = self.path\n\n # Get the list of doc ids trained\n trained_docs = []\n\n # Mongo queries to retrieve Horror, Romance and Crime movies\n qr1 = self.collection.find({\"content.genres.name\": \"Horror\"})\n qr2 = self.collection.find({\"content.genres.name\": \"Romance\"})\n qr3 = self.collection.find({\"content.genres.name\": \"Crime\"})\n qr4 = self.collection.find({\"content.genres.name\": \"Comedy\"})\n print(\"111\")\n print(qr3)\n\n myfile = open('doc_ids.pkl', 'rb')\n trained_docs = pickle.load(myfile)\n # Get 100 Horror, Romance and Crime movies each, which are not in the trained data set\n\n horr = []\n i = 0\n for rec in qr1:\n if rec['_id'] not in trained_docs:\n i = i + 1\n horr.append(rec)\n\n if i >= 333:\n break\n rom = []\n i = 0\n for rec in qr2:\n if rec['_id'] not in trained_docs:\n i = i + 1\n rom.append(rec)\n\n if i >= 333:\n break\n\n crime = []\n i = 0\n for rec in qr3:\n if rec['_id'] not in trained_docs:\n i = i + 1\n crime.append(rec)\n\n if i >= 334:\n break\n comedy = []\n i = 0\n for rec in qr4:\n if rec['_id'] not in trained_docs:\n i = i + 1\n comedy.append(rec)\n\n if i >= 334:\n break\n\n # Combine the query results\n query_results = []\n for rec in horr:\n query_results.append(rec)\n for rec in rom:\n query_results.append(rec)\n for rec in crime:\n query_results.append(rec)\n print(query_results)\n # Data to be classified\n test_data = []\n\n # Genres of records to be classified\n categories = []\n a = 0\n for movie in query_results:\n test_data.append(movie['content']['overview'])\n for genre in movie['content']['genres']:\n a = a + 1\n if ((genre['name'] == 'Horror') or (genre['name'] == 'Romance') or (genre['name'] == 'Crime') or (\n genre['name'] == 'Comedy') and a <= 80):\n categories.append(genre['name'])\n\n # Lists of training models and vectorizers\n models = [\"SVM\", \"LOGISTIC REGRESSION\", \"GAUSSIAN NB\",\n \"MULTINOMIAL NB\", \"BERNOULLI NB\", \"RANDOM FOREST\", \"BAGGING\", \"GRADIENT\",\n \"Voting\", \"Voting With Weights\"]\n\n vectorizers = [\"COUNT VECTORIZER\", \"TFIDF VECTORIZER\"]\n\n # Load dictionary containing terms appearing in genres\n dictionary = joblib.load(path + \"_Genre_Dictionary\")\n\n vec_1 = feature_extraction.text.CountVectorizer(vocabulary=dictionary)\n vec_2 = feature_extraction.text.TfidfVectorizer(vocabulary=dictionary)\n vec_list = [vec_1, vec_2]\n\n # List to store the classification stats for each model\n stats = []\n # Generate results\n for i in range(0, len(models)):\n for j in range(0, len(vectorizers)):\n time0 = time.process_time()\n model = joblib.load(path + models[i] + \"_\" + vectorizers[j].replace('-', '') + \".pkl\")\n vec = vec_list[j]\n Y = vec.fit_transform(test_data).toarray()\n print(\"y\", Y)\n predicted_genres = model.predict(Y)\n\n k = 0\n horror = 0\n romance = 0\n crime = 0\n\n # Keeps track of correct predictions\n y_correct = []\n\n # Keeps track of incorrect predictions\n y_predicted = []\n for pred in predicted_genres:\n if (categories[k] == \"Horror\"):\n if (pred == \"Horror\"):\n horror += 1\n y_predicted.append(0)\n elif (pred == \"Romance\"):\n y_predicted.append(1)\n else:\n y_predicted.append(2)\n y_correct.append(0)\n elif (categories[k] == \"Romance\"):\n if (pred == \"Romance\"):\n romance += 1\n y_predicted.append(1)\n elif (pred == \"Horror\"):\n y_predicted.append(0)\n else:\n y_predicted.append(2)\n y_correct.append(1)\n elif (categories[k] == \"Crime\"):\n if (pred == \"Crime\"):\n crime += 1\n y_predicted.append(2)\n elif (pred == \"Horror\"):\n y_predicted.append(0)\n else:\n y_predicted.append(1)\n y_correct.append(2)\n k = k + 1\n\n # Print results\n score = precision_recall_fscore_support(y_correct, y_predicted, average='weighted')\n # print(\"Number of records classified per second = %d\" % (round((1000/(time.process_time()-time0)),3)))\n print(\"________SCORES__________\")\n print(\"MODEL : \" + models[i])\n print(\"VECTORIZER : \" + vectorizers[j])\n print(\"Horror : %d/333\" % (horror))\n print(\"Romance : %d/333\" % (romance))\n print(\"Crime : %d/334\" % (crime))\n print(\"Precision : %.5f\" % (score[0]))\n print(\"Recall : %.5f\" % (score[1]))\n print(\"F(1) Score : %.5f\" % ((score[1] * score[0] / (score[1] + score[0])) * 2))\n print(\"F(W) Score : %.5f\" % (score[2]))\n print(\"Accuracy : %.5f\" % accuracy_score(y_correct, y_predicted))\n # print(confusion_matrix(y_correct, y_predicted))\n\n dic = {}\n dic['model'] = models[i].title()\n dic['vectorizer'] = vectorizers[j][:-11]\n dic['horror'] = str(horror) + '/' + '333'\n dic['romance'] = str(romance) + '/' + '333'\n dic['crime'] = str(crime) + '/' + '334'\n dic['precision'] = round(score[0], 3)\n dic['Recall'] = round(score[1], 3)\n dic['F(1) Score'] = round(((score[1] * score[0] / (score[1] + score[0])) * 2), 3)\n dic['F(W) Score'] = round(score[2], 3)\n dic['accuracy'] = round(accuracy_score(y_correct, y_predicted), 3)\n stats.append(dic)\n # Store stats in file\n joblib.dump(stats, path + \"classification_results.txt\")\n\n print(\"Done\")\n return stats",
"def load_openml_data():\n datasets = dict()\n files = os.listdir(_DATA_DIRECTORY.value)\n for file_name in files:\n with open(_DATA_DIRECTORY.value + file_name, \"r\") as ff:\n task = np.loadtxt(ff, delimiter=\",\", skiprows=1)\n np.random.shuffle(task)\n datasets[file_name] = [task]\n return datasets, files",
"def load_pickle(path):\n with open(path, 'rb') as f:\n data = cPickle.load(f)\n images = np.asarray([i/np.float32(255) for i in data['data']])\n labels = np.asarray(data['labels'], dtype='int32')\n X_train, X_test, y_train, y_test = train_test_split(images, labels, test_size=0.2)\n return X_train, y_train, X_test, y_test",
"def test_pickle_load(self):\n l = [1, 2, 3, 4, 5]\n self.plugin.save_data(l)\n\n l = self.plugin.load_data()\n self.assertIn(4, l)"
] | [
"0.63505393",
"0.63323516",
"0.6323051",
"0.6226563",
"0.6211917",
"0.6082885",
"0.6041092",
"0.6009795",
"0.5966512",
"0.58577603",
"0.58408076",
"0.5830951",
"0.5827306",
"0.57913244",
"0.5778014",
"0.5771801",
"0.57601976",
"0.57570964",
"0.5746246",
"0.57078785",
"0.5694484",
"0.567397",
"0.5647946",
"0.56379944",
"0.56187093",
"0.5600653",
"0.5600347",
"0.55835336",
"0.5577804",
"0.55512846"
] | 0.7810446 | 0 |
Initializes our connectors by giving them a handler function. | def initConnectors(self):
def handlerFunc(message, responseFunc):
for h in self._handlers:
h.handleMessage(message, responseFunc)
getLogger(__name__).debug('Initializing %d connectors...' % len(self._connectors))
for c in self._connectors:
c.initialize(handlerFunc) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _init(self, connection_strings):\n # Init connections\n self._connections = []\n for s in connection_strings:\n d = Dict() # don't do Dict(foo=x) bc PyScript only supports that for dict\n self._connections.append(d)\n d.fullname = s\n d.type = s.split('.')[-1]\n d.objects = []\n \n # Pending events for this handler\n self._scheduled_update = False\n self._pending = [] # pending events\n \n # Connect\n for index in range(len(self._connections)):\n self._connect_to_event(index)",
"def _handler_init(self):\r\n\t\tself._handlers[\"player-join\"] = FunctionDelegate()\r\n\t\tself._handlers[\"player-quit\"] = FunctionDelegate()\r\n\t\tself._handlers[\"game-start\"] = FunctionDelegate()\r\n\t\tself._handlers[\"game-stop\"] = FunctionDelegate()",
"def connect(self, info_handler=None):\n pass",
"def handle_connect(self):\n pass",
"def test_init_adds_handler(self):\n pass",
"def init_connection(self, connection):",
"def initialize(self, context: InitCommandContext) -> None:\n super().initialize(context)\n self.handler.initialize(context, self.logger)",
"def initialize(self) -> None:\n conn = self.optionally_wrap_socket(self.client.connection)\n conn.setblocking(False)\n self.client = TcpClientConnection(conn=conn, addr=self.addr)\n if b'ProtocolHandlerPlugin' in self.config.plugins:\n for klass in self.config.plugins[b'ProtocolHandlerPlugin']:\n instance = klass(self.config, self.client, self.request)\n self.plugins[instance.name()] = instance",
"def __init__(self):\n self.try_to_connect()",
"def __init__(self, handler_factory):\n self.handler_factory = handler_factory",
"def __init__(self, handler):\n self.__handler = handler",
"def __init__(self, handler):\n\n self.event_handler = handler",
"def connect(self, **kwargs):\n pass",
"def __init__(self, db_handle):\n self.cur = db_handle.database().get_cursor()\n self.question_handler = db_handle.question()\n self.answer_handler = db_handle.answer()\n self.user_handler = db_handle.user()",
"def __init__(self, link_uri):\n\n self._cf = Crazyflie()\n\n self._cf.connected.add_callback(self._connected)\n self._cf.disconnected.add_callback(self._disconnected)\n self._cf.connection_failed.add_callback(self._connection_failed)\n self._cf.connection_lost.add_callback(self._connection_lost)\n\n self._cf.open_link(link_uri)\n\n print(\"Connecting to %s\" % link_uri)",
"def __setup_conn__(self, **kwargs):\n self.ext_conn = setup_conn(**kwargs)",
"def __init__(self, *args, **kwargs):\n self._initialize_protocols()\n super().__init__(*args, **kwargs)",
"def connect(self, *args, **kwargs):",
"def callback_connect(self):\n pass",
"def callback_connect(self):\n pass",
"def callback_connect(self):\n pass",
"def initialize(self):\n self._validate_client_objects()\n for execution_type in self.clients:\n # check for valid connection is done in _validate_client_objects()\n _ = self.clients[execution_type].connection # Unused",
"def connect(self):\n\t\tpass",
"def __init__(self, host, port, **kwargs):\n\n SocketHandler.__init__(self, host, port)\n BaseHandler.__init__(self, **kwargs)",
"def __init__(self, cmd_handler: Callable[[IRCClient], CommandHandler], *args, **kwargs):\n IRCClient.__init__(self, *args, **kwargs)\n self.command_handler: CommandHandler = cmd_handler(self)",
"def initialize( self, logger, loop, netconf_ip, netconf_port, statistics,\n xml_to_json_translator):\n self.init_stream_handler(logger, loop, \n netconf_ip, netconf_port, statistics, xml_to_json_translator)",
"def init_stream_handler(\n self, \n logger, \n loop, \n netconf_ip, \n netconf_port,\n statistics,\n xml_to_json_translator):\n self._logger = logger\n self._asyncio_loop = loop\n self._encoding = \"xml\"\n self._netconf_ip = netconf_ip\n self._netconf_port = netconf_port\n self._stat = statistics\n self._xml_to_json_translator = xml_to_json_translator",
"def initialize(self, logger, loop, netconf_ip, netconf_port, statistics,\n xml_to_json_translator):\n self.init_stream_handler(logger, loop, \n netconf_ip, netconf_port, statistics, xml_to_json_translator)",
"def connect(self):\n pass",
"def connect(self):\n pass"
] | [
"0.6854127",
"0.6759487",
"0.6748176",
"0.6468161",
"0.6428388",
"0.63626826",
"0.6295971",
"0.62636906",
"0.6258875",
"0.62326473",
"0.616134",
"0.61134547",
"0.60776216",
"0.6075216",
"0.60286105",
"0.6026851",
"0.6011213",
"0.60032165",
"0.5989295",
"0.5989295",
"0.5989295",
"0.59421796",
"0.5908346",
"0.59061635",
"0.5896743",
"0.5891601",
"0.58840185",
"0.5877598",
"0.5868536",
"0.5868536"
] | 0.8177971 | 0 |
Read a pulse of SPI data on a pin that corresponds to DYMO scale output protocol (12 bytes of data at about 14KHz), timeout is in seconds | def get_scale_data(pin, timeout=1.0):
timestamp = time.monotonic()
with pulseio.PulseIn(pin, maxlen=96, idle_state=True) as pulses:
pulses.pause()
pulses.clear()
pulses.resume()
while len(pulses) < 35:
if (time.monotonic() - timestamp) > timeout:
raise RuntimeError("Timed out waiting for data")
pulses.pause()
bits = [0] * 96 # there are 12 bytes = 96 bits of data
bit_idx = 0 # we will count a bit at a time
bit_val = False # first pulses will be LOW
print(pulses[1])
for i in range(len(pulses)):
if pulses[i] == 65535: # This is the pulse between transmits
break
num_bits = int(pulses[i] / 75 + 0.5) # ~14KHz == ~7.5us per clock
#print("%d (%d)," % (pulses[i], num_bits), end='')
for bit in range(num_bits):
#print("bit #", bit_idx)
bits[bit_idx] = bit_val
bit_idx += 1
if bit_idx == 96: # we have read all the data we wanted
#print("DONE")
break
bit_val = not bit_val
#print(bits)
data_bytes = [0] * 12
for byte_n in range(12):
thebyte = 0
for bit_n in range(8):
thebyte <<= 1
thebyte |= bits[byte_n*8 + bit_n]
data_bytes[byte_n] = thebyte
print([hex(i) for i in data_bytes])
# do some very basic data checking
if data_bytes[0] != 3 or data_bytes[1] != 3 or data_bytes[7] != 4 \
or data_bytes[8] != 0x1C or data_bytes[9] != 0 or data_bytes[10] \
or data_bytes[11] != 0:
raise RuntimeError("Bad data capture")
reading = ScaleReading()
reading.stable = data_bytes[2] & 0x4
reading.units = data_bytes[3]
reading.weight = data_bytes[5] + (data_bytes[6] << 8)
if data_bytes[2] & 0x1:
reading.weight *= -1
if reading.units == ScaleReading.OUNCES:
# oi no easy way to cast to int8_t
if data_bytes[4] & 0x80:
data_bytes[4] -= 0x100
reading.weight *= 10 ** data_bytes[4]
return reading | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def s_read(self, timeout = 1):\n if self.s.is_open:\n data = [] \n b = bytearray()\n try:\n self.s.timeout = 3\n data = self.s.read(1)\n \n if not len(data):\n return b\n\n self.s.timeout = .04\n data += self.s.read(500)\n except Exception as e:\n print(\"Could not read from port\" + str(e))\n\n start = data.find(b'\\x7e')\n end = data.find(b'\\x7f')\n\n txt_start = b''\n txt_end = b''\n if start < 0:\n txt_start = data\n elif end < 0:\n txt_start = data\n else:\n txt_start = data[0:start]\n txt_end = data[end+1:]\n\n txt = txt_start + txt_end\n if len(txt):\n if self.log_ascii:\n self.logfile.write(txt)\n\n # End logging \n if Connection.START_UP_STRING in data:\n raise Reset_Exception('ChipSHOUTER unit has reset - wait 5 seconds then reinitialize.') \n\n if start < 0 or end < 0 or end < start:\n b = bytearray()\n return b\n if self.log_input:\n self.logfile.write('\\nIN :' + str(len(data)) + '[' + hexlify(data) + ']' + '\\n')\n b.extend(data[start:end])\n return b\n else:\n raise IOError('Comport is not open, use ctl_connect()')",
"def time_pulse_us(pin:Pin, pulse_level:int, timeout_us:int=1000000, /) -> int:",
"def read(SCK=15, DAT=13):\n\n data = Pin(DAT, Pin.IN)\n sck = Pin(SCK, Pin.OUT, value=0)\n \n initialFreq = freq() # so we can slow it down afterwards\n freq(160000000) # hx711 needs a high clock frequency :o\n \n value = 0\n \n if data.value() == 0:\n\n for i in range(24):\n sck.value(1)\n sck.value(0)\n value = value << 1 | data.value()\n\n for j in range(1):\n sck.value(1)\n sck.value(0)\n\n # convert 2's complement to integer\n if value & (1 << (24 - 1)):\n value -= 1 << 24\n\n freq(initialFreq) # back to initialFreq\n \n return value",
"def pulse_hi(pin, length=0.00001): \n on(pin)\n time.sleep(length)\n off(pin)\n time.sleep(length)",
"def recv(self):\n self._serial.write('spi = SPI(2, SPI.SLAVE, baudrate=500000, polarity=0, phase=0)\\r\\n'.encode('utf-8'))\n self._serial.write('list(spi.recv(16, timeout=50000))\\r\\n'.encode('utf-8'))\n sleep(1)",
"def read_bytes(self, ctrl_pin):\n try:\n '''\n ctrl_pin1.value(0)\n time.sleep_ms(2)\n ctrl_pin1.value(1)\n time.sleep_ms(220)\n ctrl_pin1.value(0)\n temp = hspi.read(2)\n ctrl_pin1.value(1)\n '''\n pin_ = self.ctrl_pins[ctrl_pin]\n pin_.value(0)\n time.sleep_ms(2)\n pin_.value(1)\n time.sleep_ms(220)\n pin_.value(0)\n temp = self.hspi.read(2)\n pin_.value(1)\n except KeyError:\n print('requested pin not defined')\n temp = None\n return temp",
"def pulse_lo(pin, length=0.00001):\n off(pin)\n time.sleep(length)\n on(pin)\n time.sleep(length)",
"def read(self):\n try:\n cmd = 'SAMP:COUN 1' \n self.handle.write(cmd) #one sample per trigger\n self.handle.write('TRIG:SOUR BUS') #triggered by command\n self.handle.write('TRIG:COUN 1') #one trigger to return to wait for trg\n self.handle.write('INIT:IMM') #DVM to \"wait for trigger\" \n self.handle.write('*TRG')\n startTime = time.time()\n while True: #wait until measuring flag goes to 0\n try:\n measured = self.handle.ask(\"DATA:POIN?\")\n measured = measured.strip() #remove CR \n measured = int(measured) #convert to number\n if measured == 1: #final number of samples achieved\n break;\n except Exception:\n print('Dvm34411:read() polling failed !')\n raise\n \n if time.time() - startTime > self.timeout:\n print('Dvm34411:read() timeout !')\n return False\n \n time.sleep(1) \n reading = self.handle.ask('R? 1;') #definite-Length block format\n except Exception:\n print('Dvm34411.read() failed !')\n raise\n if reading[0] != '#':\n print('Dvm34411.read() DLB format error - # expected !')\n return False\n digits = int(reading[1])\n reading = reading[2 + digits:]\n rdg = float(reading)\n return rdg",
"def get_data(N,port_name,port_speed):\r\n t = np.zeros(N) # array for timestamps \r\n percent = np.zeros(N) # array for percentage values\r\n \r\n # get data from serial port\r\n with serial.Serial(port=port_name,baudrate=port_speed,timeout=2) as myport:\r\n \r\n sleep(2) # allow arduino to reset itself\r\n \r\n while (get_valid_line(myport) != 'START'):\r\n pass # wait until start tag is received\r\n \r\n # retrieve data\r\n for i in range(N):\r\n \r\n line = get_valid_line(myport) # check for valid line\r\n \r\n if line == 'START': # ignore any subsequent tags\r\n line = myport.readline()\r\n \r\n stamp, val = [int(a) for a in line.split()] # seperate data\r\n t[i] = stamp/1e3 # convert to seconds\r\n percent[i] = (val/1023)*100 # convert to percentage of max \r\n \r\n return t,percent",
"def _read(self, timeout=None):\n\n # Developer notes:\n #\n # Packet data read from Serial is in this format:\n # [HDLC_FLAG_BYTE][Escaped data][HDLC_FLAG_BYTE]\n #\n # [Escaped data] is encoded so that [HDLC_FLAG_BYTE] byte\n # values cannot occur within it. When [Escaped data] has been\n # unescaped, the last 2 bytes are a 16-bit CRC of the earlier\n # part of the packet (excluding the initial HDLC_FLAG_BYTE\n # byte)\n #\n # It's also possible that the serial device was half-way\n # through transmitting a packet when this function was called\n # (app was just started). So we also neeed to handle this case:\n #\n # [Incomplete escaped data][HDLC_FLAG_BYTE][HDLC_FLAG_BYTE][Escaped data][HDLC_FLAG_BYTE]\n #\n # In this case we skip over the first (incomplete) packet.\n #\n\n if self._s.timeout != timeout and timeout != None:\n if self._debug:\n print \"Set the timeout to %s, previous one was %s\" % (timeout, self._s.timeout)\n self._s.timeout = timeout\n\n try:\n # Read bytes until we get to a HDLC_FLAG_BYTE value\n # (either the end of a packet, or the start of a new one)\n d = self._get_byte(timeout)\n ts = time.time()\n if self._debug and d != self.HDLC_FLAG_BYTE:\n print \"Skipping incomplete packet\"\n while d != self.HDLC_FLAG_BYTE:\n d = self._get_byte(timeout)\n ts = time.time()\n\n # Store HDLC_FLAG_BYTE at the start of the retrieved packet\n # data:\n packet = [d]\n\n # Is the next byte also HDLC_FLAG_BYTE?\n d = self._get_byte(timeout)\n if d == self.HDLC_FLAG_BYTE:\n # Yes. This means that the previous byte was for\n # the end of the previous packet, and this byte is for\n # the start of the next packet.\n\n # Get the 2nd byte of the new packet:\n d = self._get_byte(timeout)\n ts = time.time()\n\n # We are now on the 2nd byte of the packet. Add it to\n # our retrieved packet data:\n packet.append(d)\n\n # Read bytes from serial until we read another\n # HDLC_FLAG_BYTE value (end of the current packet):\n while d != self.HDLC_FLAG_BYTE:\n d = self._get_byte(timeout)\n packet.append(d)\n\n # Done reading a whole packet from serial\n if self._debug:\n print \"SimpleSerial:_read: unescaped\", packet\n\n # Decode the packet, and check CRC:\n packet = self._unescape(packet)\n\n crc = self._crc16(0, packet[1:-3])\n packet_crc = self._decode(packet[-3:-1])\n\n if crc != packet_crc:\n print \"Warning: wrong CRC! %x != %x %s\" % (crc, packet_crc, [\"%2x\" % i for i in packet])\n raise ReadCRCError\n if self._debug:\n if self._ts == None:\n self._ts = ts\n else:\n print \"Serial:_read: %.4f (%.4f) Recv:\" % (ts, ts - self._ts), self._format_packet(packet[1:-3])\n self._ts = ts\n\n # Packet was successfully retrieved, so return it in a\n # RawPacket wrapper object (but leave out the\n # HDLC_FLAG_BYTE and CRC bytes)\n return RawPacket(ts, packet[1:-3])\n except socket.timeout:\n raise ReadTimeoutError",
"def read_mcp3008(pi, adc, channel):\n count, data = pi.spi_xfer(adc, [1, (8 + channel) << 4, 0])\n value = ((data[1] << 8) | data[2]) & 0x3FF\n return value",
"def send_recv(self, data):\n self._serial.write('spi = SPI(2, SPI.SLAVE, baudrate=500000, polarity=0, phase=0)\\r\\n'.encode('utf-8'))\n self._serial.write('data=bytearray({})\\r\\n'.format(data).encode('utf-8'))\n self._serial.write('list(spi.send_recv(data, timeout=50000))\\r\\n'.encode('utf-8'))\n sleep(1)",
"def read_from_serial(self):\n output = b''\n time.sleep(self._sleep_time)\n while self._ser.inWaiting() > 0:\n output = output + self._ser.read(1)\n #A default ten powercycle delay means that some measurements may still be processing\n #by the time the read function is called. This slows down the read but ensures that\n #it will finish (per my testing). There is probably a better way to do this. TODO\n time.sleep(0.06)\n return output.decode('utf-8').strip()",
"def read(self, nbytes):\n utils.print_for_unimplemented_functions(SPI.read.__name__)\n telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_SPI)",
"def _read(self, pin):\n # Immediately return conversion register result if in CONTINUOUS mode\n # and pin has not changed\n if self.mode == Mode.CONTINUOUS and self._last_pin_read == pin:\n raw_adc = self._read_register(_ADS1X15_POINTER_CONVERSION, True)\n raw_adc = raw_adc.to_bytes(2, \"big\")\n return struct.unpack(\">h\", raw_adc)[0] >> self._shift_fact\n\n # Assign last pin read if in SINGLE mode or first sample in CONTINUOUS\n # mode on this pin\n self._last_pin_read = pin\n\n # Configure ADC every time before a conversion in SINGLE mode\n # or changing channels in CONTINUOUS mode\n config = _ADS1X15_CONFIG_OS_SINGLE if self.mode == Mode.SINGLE else 0\n config |= (pin & 0x07) << _ADS1X15_CONFIG_MUX_OFFSET\n config |= _ADS1X15_CONFIG_GAIN[self.gain]\n config |= self.mode\n config |= self.rate_config[self.data_rate]\n config |= _ADS1X15_CONFIG_COMP_QUE_DISABLE\n self._write_register(_ADS1X15_POINTER_CONFIG, config)\n\n # Wait for conversion to complete\n # ADS1x1x devices settle within a single conversion cycle\n if self.mode == Mode.SINGLE:\n # Continuously poll conversion complete status bit\n #while not self._conversion_complete():\n while not self._read_register(_ADS1X15_POINTER_CONFIG) & 0x8000:\n pass\n else:\n # Can't poll registers in CONTINUOUS mode\n # Wait expected time for two conversions to complete\n time.sleep(2 /self.data_rate)\n\n raw_adc = self._read_register(_ADS1X15_POINTER_CONVERSION, False)\n raw_adc = raw_adc.to_bytes(2, \"big\")\n return struct.unpack(\">h\", raw_adc)[0] >> self._shift_fact",
"def read(self, delay):\n \n buf = []\n seeking_sync = True;\n seeking_end = True;\n \n time.sleep(delay)\n # Read serial into buffer and then pop out to s for return\n while self.ser.inWaiting() > 0:\n ch = self.ser.read(1) #Read 1 BYTE\n\n if seeking_sync:\n if ch == self.stx: # <STX>\n seeking_sync = False\n elif seeking_end:\n if ch == self.ack: # <ACK>\n buf.append(self.ack)\n seeking_end = False\n else:\n buf.append(ch)\n \n \n if not buf: # No reply received\n return False\n elif buf[-1] != self.ack: # Check for ACK character\n return False \n else:\n return ''.join(buf[:-1])",
"def read(self):\n self.pi.write(self.gpio, pigpio.LOW)\n time.sleep(0.017) # 17 ms\n self.pi.set_mode(self.gpio, pigpio.INPUT)\n self.pi.set_watchdog(self.gpio, 200)\n time.sleep(0.2)",
"def readout():\r\n\r\n #Getting the time of measurement.\r\n thetime = datetime.datetime.now().strftime(\"%H:%M:%S,%f\")[:-5]\r\n\r\n #Additional parameters to read out can be added here.\r\n T_BL100 = bl100.readParameter (142)\r\n MF_BL100 = bl100.readParameter(205)\r\n Rho_BL100 = bl100.readParameter (270)\r\n T_Cori = coriflow.readParameter(142)\r\n MF_Cori = coriflow.readParameter(205)\r\n Rho_Cori = coriflow.readParameter (270) \r\n DP = diffp.readParameter (205)\r\n\r\n #This function has multiple outputs: the sensors' measurements and the time of measurement.\r\n readoutdata = (thetime,T_BL100, MF_BL100, Rho_BL100, T_Cori, MF_Cori, Rho_Cori, DP)\r\n return readoutdata",
"def serial_read(user_gpio):\n bytes = _u2i(_pigpio_command(_control, _PI_CMD_SLR, user_gpio, 10000))\n if bytes > 0:\n buf = \"\"\n while len(buf) < bytes: buf += _control.recv(bytes-len(buf))\n return bytes, buf\n return bytes, \"\"",
"def getData(dig, pipe, event, pulses):\n logging.info(\"Started getData\")\n start_time = time.time()\n for pulse in range(pulses):\n samples = dig.get_data_raw()\n# logging.info(\"GetData retrieved: %d\", len(samples))\n pipe.put(samples)\n end_time = time.time()\n elapsed = end_time - start_time\n samplesProcessed = (pulses * len(samples[0]) * len(samples))\n logging.info(\"getData processed %d Msamples in %.3f s\",\n samplesProcessed / 1e6,\n elapsed)\n logging.info(\"getData rate: %.3f Msa/s in lumps of %d samples\",\n samplesProcessed / elapsed / 1e6,\n dig.pointsPerCycle)",
"def single_pulse_SCPI(pulsewidth, updown, high_voltage, low_voltage, channel = '1', *args, **kwargs):\n\tif pulsewidth[-2:] not in set({'ns', 'us', 'ms',}):\n\t\tif pulsewidth[-1] != 's':\n\t\t\traise ValueError('pulsewidth ' + str(pulsewidth) + ' not supported')\n\tif updown not in set({'up', 'down'}):\n\t\traise ValueError('updown ' + str(updown) + ' not supported')\n\tif high_voltage[-2:].lower() not in set({'mv'}):\n\t\tif high_voltage[-1].lower() != 'v':\n\t\t\traise ValueError('high_voltage ' + str(high_voltage) + ' not supported')\n\tif low_voltage[-2:].lower() not in set({'mv'}):\n\t\tif low_voltage[-1].lower() != 'v':\n\t\t\traise ValueError('low_voltage ' + str(low_voltage) + ' not supported')\n\tif channel not in set({'1', '2'}):\n\t\traise ValueError('channel ' + str(channel) + ' not supported')\n\t\n\tif updown == 'up':\n\t\tout = 'outp'+channel+':puls:mode sin;'\n\t\tout += ':sour'+channel+':inv off;'\n\t\tout += ':sour'+channel+':volt:lev:imm:high '+high_voltage + ';'\n\t\tout += ':sour'+channel+':volt:lev:imm:low '+low_voltage + ';'\n\t\t#puls1 means the first pulse because we are in single mode\n\t\tout += ':sour'+channel+':puls1:wid '+pulsewidth + ';'\n\t\treturn out\n\telse:\n\t\tout = 'outp'+channel+':puls:mode sin;'\n\t\tout += ':sour'+channel+':inv on;'\n\t\tout += ':sour'+channel+':volt:lev:imm:low '+low_voltage + ';'\n\t\tout += ':sour'+channel+':volt:lev:imm:high '+high_voltage + ';'\n\t\t#puls1 means the first pulse because we are in single mode\n\t\tout += ':sour'+channel+':puls1:wid '+pulsewidth + ';'\n\t\treturn out",
"def read(self, timeout_ms=0):\n if not self.is_valid():\n return None\n buf = None\n if isinstance(self.__usb_dev, MpUsbApi):\n rx = bytearray(65)\n num_read = self.__usb_dev.MPUSBRead(self.__handle_read, rx, timeout_ms)\n if num_read:\n buf = bytearray(num_read)\n buf[:] = rx[:num_read]\n else:\n buf = self.__usb_dev.read(0x81, 64, 0)\n return buf\n #end read()",
"def read_spi_data_channel(channel):\n\n adc = spi.xfer2([1, (8+channel) << 4, 0])\n return ((adc[1] & 3) << 8) + adc[2]",
"def get_hsdpa_ack_meas(self, numSubframes=2000):\r\r\n\r\r\n # perform HSDPA ACK measurements\r\r\n\r\r\n loggerCmw = logging.getLogger('get_hsdpa_ack_meas')\r\r\n\r\r\n self.set_num_scheduled_subframes(numSubframes)\r\r\n\r\r\n # determine the timeout per measurement\r\r\n # work out approximate time for numSubframes\r\r\n # (numSubframes * 0.002 = 10)\r\r\n meas_timeout_sec = int ( (self.get_num_scheduled_subframes() * 0.002) + 10 )\r\r\n\r\r\n meas_sample_time_sec = 2\r\r\n\r\r\n self.conf_hsdpa_ack_meas(timeout_sec=meas_timeout_sec)\r\r\n\r\r\n self.write('INIT:WCDMA:SIGN:HACK')\r\r\n\r\r\n self.waitForCompletion()\r\r\n\r\r\n num_iter = 0\r\r\n\r\r\n NUM_ITER_MAX = int(math.ceil(meas_timeout_sec/meas_sample_time_sec))\r\r\n\r\r\n loggerCmw.info(\"Obtaining HSDPA measurements for instrument. Please be patient ...\")\r\r\n\r\r\n while ( num_iter < NUM_ITER_MAX ):\r\r\n\r\r\n num_iter += 1\r\r\n\r\r\n loggerCmw.debug(\"FETCHING HSDPA ACK MEAS: iteration %d of %d\" % (num_iter, NUM_ITER_MAX))\r\r\n\r\r\n state=self.read('FETCh:WCDMa:SIGN:HACK:STATe?')\r\r\n\r\r\n loggerCmw.debug(\"FETCH STATE : %s\" % state)\r\r\n\r\r\n if (state == 'RDY') :\r\r\n\r\r\n break\r\r\n\r\r\n loggerCmw.debug(\"Waiting for %02f [sec]... \" % meas_sample_time_sec)\r\r\n\r\r\n time.sleep(meas_sample_time_sec)\r\r\n\r\r\n if state == 'RDY':\r\r\n\r\r\n avgCqi_1_str=self.read('FETCh:WCDMa:SIGN:HACK:MCQI:CARRier1?')\r\r\n\r\r\n avgTput=self.read('FETCh:WCDMa:SIGN:HACK:TRACe:THRoughput:TOTal:AVERage?')\r\r\n\r\r\n avgCqi_1_list = avgCqi_1_str.split(',')\r\r\n\r\r\n if self.dc_hsdpa:\r\r\n\r\r\n avgCqi_2_str=self.read('FETCh:WCDMa:SIGN:HACK:MCQI:CARRier2?')\r\r\n\r\r\n avgCqi_2_list = avgCqi_2_str.split(',')\r\r\n\r\r\n if avgCqi_1_list[0] == \"0\":\r\r\n\r\r\n # valid CQI measurement\r\r\n loggerCmw.debug('Median CQI, carrier 1 : %s' %avgCqi_1_list[1])\r\r\n\r\r\n self.set_medianCqi(carrier=1, val=avgCqi_1_list[1])\r\r\n\r\r\n if self.dc_hsdpa:\r\r\n\r\r\n if avgCqi_2_list[0] == \"0\":\r\r\n\r\r\n # valid CQI measurement\r\r\n loggerCmw.debug('Median CQI, carrier 2 : %s' %avgCqi_2_list[1])\r\r\n\r\r\n self.set_medianCqi(carrier=2, val=avgCqi_2_list[1])\r\r\n\r\r\n hspda_stats_str_1 = self.read('FETCh:WCDMa:SIGN:HACK:THRoughput:CARRier1:ABSolute?')\r\r\n hack_meas_list_1 = hspda_stats_str_1.split(',')\r\r\n loggerCmw.debug('HSDPA ACK stats %s' %hack_meas_list_1)\r\r\n\r\r\n if self.dc_hsdpa:\r\r\n hspda_stats_str_2 = self.read('FETCh:WCDMa:SIGN:HACK:THRoughput:CARRier2:ABSolute?')\r\r\n hack_meas_list_2 = hspda_stats_str_2.split(',')\r\r\n loggerCmw.debug('HSDPA ACK stats %s' %hack_meas_list_2)\r\r\n\r\r\n self.hsdpa_meas[0].set_results_list(hack_meas_list_1)\r\r\n\r\r\n if self.dc_hsdpa:\r\r\n\r\r\n self.hsdpa_meas[1].set_results_list(hack_meas_list_2)\r\r\n\r\r\n\r\r\n self.get_ack_trans_meas(carrier=1)\r\r\n\r\r\n if self.dc_hsdpa:\r\r\n\r\r\n self.get_ack_trans_meas(carrier=2)\r\r\n\r\r\n numMeasuredFrames = self.get_measured_subframes()\r\r\n\r\r\n if numMeasuredFrames == self.NO_MEASURED_FRAMES_STR:\r\r\n\r\r\n return 0\r\r\n\r\r\n self.set_hsdpa_measured_subframes(numMeasuredFrames)\r\r\n\r\r\n blerVal = self.get_instr_hsdpa_bler(carrier=1)\r\r\n\r\r\n self.set_hsdpa_bler(blerVal, carrier=1)\r\r\n\r\r\n if self.dc_hsdpa:\r\r\n\r\r\n blerVal = self.get_instr_hsdpa_bler(carrier=2)\r\r\n\r\r\n self.set_hsdpa_bler(blerVal, carrier=2)\r\r\n\r\r\n\r\r\n return 1",
"def readData(self, duration_s, toV=True, maxAI=1024, maxV=5.0,\n updateFunc=None, nPntsUpdate=1, trigger=[]):\n isDone = False\n nLines = 0\n empty = numpy.array([])\n \n if self.rate_ms >= 0:\n nPnts = round(duration_s *1000.0 /self.rate_ms)\n print(\"{0:.3f} s duration = {1} samples\".format(duration_s, nPnts))\n else: \n nPnts = 100\n print(\"Sample rate invalid, {0} samples will be recorded\".format(nPnts))\n\n if not(self.isOpen): \n print(\"ERROR: Link not open\")\n return (-1, empty, empty, empty)\n \n # Create empty arrays for the data\n #\n np_data_0 = numpy.zeros([nPnts], dtype=float)\n np_data_1 = numpy.zeros([nPnts], dtype=float)\n np_data_t = numpy.zeros([nPnts], dtype=float)\n np_dt_ms = numpy.zeros([nPnts], dtype=float)\n \n # Attempt to read data\n #\n while not(isDone):\n # Read a line\n #\n (errC, parts) = self.__readLine()\n if not(errC == 0):\n return (errC, empty, empty, empty)\n \n else: \n np_data_0[nLines] = float(parts[2])\n np_data_1[nLines] = float(parts[3])\n np_data_t[nLines] = float(parts[0])/1000.0\n np_dt_ms[nLines] = float(parts[1]) /1000.0\n \"\"\"\n print(\"{0:.3f} {1} {2}\".format(int(parts[0])/1000.0, \n int(parts[2]), int(parts[3])))\n \"\"\"\n # Update plot of data, if requested\n #\n if updateFunc and ((nLines % nPntsUpdate) == 0):\n updateFunc(nLines, np_data_t[nLines], np_data_0[nLines], \n np_data_1[nLines])\n \n if nLines < (nPnts-1):\n sys.stdout.write(\"\\r{0:.0f}% {1:.3f} s: {2:.3f} {3:.3f} ...\"\n .format(nLines/float(nPnts) *100, \n np_data_t[nLines]/1000.0,\n np_data_0[nLines], np_data_1[nLines]))\n else: \n sys.stdout.write(\"\\r100% done\" +\" \"*40 +\"\\n\")\n \n nLines += 1\n isDone = (nLines == nPnts)\n \n print(\"SUCCESS\") \n print(\"{0} data points recorded\".format(nLines))\n print(\"Rate = {0:.3f} +/- {1:.3f} ms\".format(numpy.mean(np_dt_ms), \n numpy.std(np_dt_ms)))\n\n if toV: \n np_data_0 = np_data_0 /float(maxAI -1) *maxV\n np_data_1 = np_data_1 /float(maxAI -1) *maxV\n \n return (0, np_data_t, np_data_0, np_data_1)",
"def read_timed(self, buf: AnyWritableBuf, timer: Timer | int, /) -> None:",
"def MCP3008Driver(\n ch1, ch2, ch3, ch4, ch5, ch6, ch7, ch8,\n spi_clk, spi_ss_n, spi_miso, spi_mosi,\n clk25, rst_n):\n\n # Internal SPI signals\n i_clk = Signal(HIGH) # cpol=1\n i_ss_n = Signal(HIGH)\n i_rx = Signal(LOW)\n i_tx = Signal(LOW)\n\n # Use 17-bit words: we send the start bit (a 1), then the single-ended bit\n # (another 1), then 3 bits for the channel number, then 12 zeros. In the\n # response, only the 10 lower bits matter.\n word_size = 17\n\n # The 17-bit data to send:\n # - start bit (a 1)\n # - single-ended bit (a 1)\n # - 3 bits for the channel (most significant bit first)\n # - 12 padding bits (we send zeros)\n txdata = Signal(intbv(0)[word_size:])\n # and signal toggling when new txdata can be accepted\n txrdy = Signal(LOW)\n\n # The 17-bit data we get back\n # - 6 padding bits\n # - a null byte\n # - the 10-bit value\n rxdata = Signal(intbv(0)[word_size:])\n # and signal toggling when new rxdata is available\n rxrdy = Signal(LOW)\n\n # Controller driver\n SPISlave_inst = SPISlave(i_tx, i_rx, i_clk, i_ss_n,\n txdata, txrdy, rxdata, rxrdy,\n rst_n, n=word_size, cpol=1, cpha=1)\n\n @always_comb\n def CombinatorialLogic():\n \"\"\" Combinatorial logic \"\"\"\n spi_clk.next = i_clk\n spi_ss_n.next = i_ss_n\n spi_mosi.next = i_tx\n i_rx.next = spi_miso.next\n\n # Counter for 100kHz clock (counter must overflow twice per period)\n SPI_FREQ = 100000\n SPI_CNT_MAX = int(CLK_FREQ/(SPI_FREQ*2) - 1)\n\n # Counter for 30Hz read of each channel (so x8)\n READ_FREQ = 30\n READ_CNT_MAX = int(SPI_FREQ/(READ_FREQ*8) - 1)\n\n @instance\n def ADCProcess():\n state = t_State.IDLE\n channel = intbv(0)[3:]\n i_clk_cnt = intbv(0, min = 0, max = SPI_CNT_MAX + 1)\n read_cnt = intbv(0, min = 0, max = READ_CNT_MAX + 1)\n previous_rxrdy = LOW\n while True:\n yield clk25.posedge, rst_n.negedge\n if rst_n == LOW:\n state = t_State.IDLE\n read_cnt[:] = 0\n channel[:] = 0\n i_clk_cnt[:] = 0\n i_clk.next = HIGH\n i_ss_n.next = HIGH\n else:\n if state == t_State.IDLE:\n i_ss_n.next = HIGH\n i_clk.next = HIGH\n if read_cnt == READ_CNT_MAX:\n # Prepare command to read channel\n state = t_State.READING\n txdata.next = concat(HIGH, HIGH, channel, intbv(0)[12:])\n read_cnt[:] = 0\n else:\n # Wait before next read\n read_cnt += 1\n elif state == t_State.READING:\n # Select slave\n i_ss_n.next = LOW\n\n # Generate SPI clock\n if i_clk_cnt == SPI_CNT_MAX:\n i_clk_cnt[:] = 0\n i_clk.next = not i_clk\n else:\n i_clk_cnt += 1\n\n # When done\n if rxrdy != previous_rxrdy:\n # Set output\n if channel == 0:\n ch1.next = rxdata[10:]\n elif channel == 1:\n ch2.next = rxdata[10:]\n elif channel == 2:\n ch3.next = rxdata[10:]\n elif channel == 3:\n ch4.next = rxdata[10:]\n elif channel == 4:\n ch5.next = rxdata[10:]\n elif channel == 5:\n ch6.next = rxdata[10:]\n elif channel == 6:\n ch7.next = rxdata[10:]\n elif channel == 7:\n ch8.next = rxdata[10:]\n\n # Next time read next channel\n if channel == 7:\n channel[:] = 0\n else:\n channel += 1\n\n # Back to waiting\n state = t_State.IDLE\n previous_rxrdy = rxrdy.val\n\n return instances()",
"def send(self, data):\n self._serial.write('spi = SPI(2, SPI.SLAVE, baudrate=500000, polarity=0, phase=0)\\r\\n'.encode('utf-8'))\n self._serial.write('data=bytearray({})\\r\\n'.format(data).encode('utf-8'))\n self._serial.write('spi.send(data, timeout=50000)\\r\\n'.encode('utf-8'))\n sleep(1)",
"def serial_read_close(user_gpio):\n return _u2i(_pigpio_command(_control, _PI_CMD_SLRC, user_gpio, 0))",
"def read(self):\n\n clock = self.pins[\"clock\"]\n data = self.pins[\"data\"]\n\n # read bitstream\n self._req(True)\n for i in range(52):\n # wait for clock to go low\n while clock.value:\n continue\n\n self.bits[i] = data.value\n\n if i == 0: # deassert req after first bit read, so we only get one response\n self._req(False)\n\n # wait for clock to go up again\n while not clock.value:\n continue\n\n # assemble nibbles\n for n in range(13): # iterate over each nibble\n idx = n * 4\n self.nibbles[n] = (\n (self.bits[idx + 0] << 0)\n + (self.bits[idx + 1] << 1)\n + (self.bits[idx + 2] << 2)\n + (self.bits[idx + 3] << 3)\n )\n\n # parse preamble\n # TODO: check if this contains useful data.\n for n in range(4):\n if self.nibbles[n] != 15:\n return None # invalid data\n\n # sign\n if self.nibbles[4] != 0 and self.nibbles[4] != 8:\n return None # invalid data\n sign_pos = self.nibbles[4] == 0\n\n # convert bcd sequence to integer\n number = 0\n bcd = self.nibbles[5:11]\n for i in range(6):\n number += bcd[i] * (10 ** (5 - i))\n\n # decimal point\n number = number / 10 ** self.nibbles[11]\n\n # unit\n unit = self.UNITS.get(self.nibbles[12])\n\n value = number if sign_pos else -number\n if number == 0:\n value = 0.0 # don't like negative zeros.\n\n return self.Reading(value, unit)"
] | [
"0.6665904",
"0.6170786",
"0.6145323",
"0.61205214",
"0.6039192",
"0.6033334",
"0.59552085",
"0.5954513",
"0.58192575",
"0.5769836",
"0.5758939",
"0.56423473",
"0.5639176",
"0.5637689",
"0.56207716",
"0.56185025",
"0.5606741",
"0.5584944",
"0.55758065",
"0.5558758",
"0.55472636",
"0.55461574",
"0.55307984",
"0.5520326",
"0.54535794",
"0.5438853",
"0.542875",
"0.5400496",
"0.539793",
"0.5387793"
] | 0.75666636 | 0 |
encode convex hulls to network input format | def _encode_convex_hull(record):
max_encode_len = max(seq_len)
max_decode_len = max(seq_len) + 1 + 1
total_len = max_encode_len + max_decode_len
encoder_seq, hull = record
encoder_seq_len = len(encoder_seq)
# add new dimension for the [start] token
encoder_seq = [(0., *e) for e in encoder_seq]
# create decoder sequence
decoder_seq = [encoder_seq[i] for i in hull]
# insert [start] token
decoder_seq = [(1.0, 0., 0.)] + decoder_seq
decoder_seq = decoder_seq + [(0., 0., 0.)] * (max_decode_len - len(decoder_seq))
# pad encoder seq
pad_len = max_encode_len - encoder_seq_len
encoder_seq = [[0., 0., 0.]] * pad_len + encoder_seq
# create seq mask
seq_mask = [False] * pad_len
seq_mask = seq_mask + [True] * (total_len - len(seq_mask))
# input sequence to the network = encoder inputs + [start] + decoder inputs
input_seq = encoder_seq + decoder_seq
# network prediction
output = [pad_len + i for i in hull]
# [end] token is at `max_encode_len` position.
output = output + [max_encode_len]
output_mask = [True] * len(output) + [False] * (max_decode_len - len(output))
output = output + [0] * (max_decode_len - len(output))
return InputRecord(input_seq, seq_mask, output, output_mask) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.\n # Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull\n lower = []\n for p in points:\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n lower.pop()\n lower.append(p)\n\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n\n # Concatenation of the lower and upper hulls gives the convex hull.\n # Last point of each list is omitted because it is repeated at the beginning of the other list.\n return lower[:-1] + upper[:-1]",
"def convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.\n # Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull\n lower = []\n for p in points:\n cont = 1\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n print(\"antes \"), print(cont), print(lower)\n lower.pop()\n print(\"despues \"),print(lower)\n cont += 1\n lower.append(p)\n xlower ,ylower = getlists(lower)\n plt.plot(xlower,ylower,color=\"yellow\")\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n print(upper)\n print(\"hello2 \")\n print(cross((2,0),(2,4),(2.5,3)))\n\n xupper ,yupper = getlists(upper)\n plt.plot(xupper,yupper,color=\"blue\")\n\n\n return lower[:-1] + upper[:-1]",
"def convex_hull(l):\n\tpass",
"def convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.\n # Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull \n lower = []\n for p in points:\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n lower.pop()\n lower.append(p)\n\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n\n # Concatenation of the lower and upper hulls gives the convex hull.\n # Last point of each list is omitted because it is repeated at the beginning of the other list. \n return lower[:-1] + upper[:-1]",
"def make_convex_hull(self):\n hull_points_d = []\n try:\n print \"self.V_bar_list_d******************\", self.V_bar_list_d\n hull = ConvexHull(self.V_bar_list_d)\n hull_vertices = hull.vertices\n\n for i in hull_vertices:\n hull_points_d.append(self.V_bar_list_d[i])\n\n except scipy.spatial.qhull.QhullError:\n hull_points_d = self.V_bar_list_d\n\n return hull_points_d",
"def construct_convex_hull(vertices: Sequence[Point]) -> Polyhedron:\n coords = np.zeros((len(vertices),3))\n for i,vertex in enumerate(vertices):\n coords[i,:] = vertex.coordinates\n hull = qconvex(\"i\", coords)\n n_facets = int(hull[0])\n facets = []\n for facet_vertices_str in hull[1:]:\n facet_vertices_idx = [int(x) for x in facet_vertices_str.split(' ')]\n facet_vertices = [vertices[i] for i in facet_vertices_idx]\n facet = Facet([Contour.from_vertices(facet_vertices)])\n facets.append(facet)\n polyhedron = Polyhedron(facets)\n return polyhedron",
"def convex_hull(points):\n pointList = ExtendedTupleList(points)\n complete_ranges = pointList.range_within(0, 1)\n # Filters for four quadrants\n filters = [\n ((0, complete_ranges[1][\"max\"][2], \">=\"), (1, complete_ranges[0][\"max\"][2], \">=\")), #Q1\n ((0, complete_ranges[1][\"max\"][1], \"<=\"), (1, complete_ranges[0][\"min\"][2], \">=\")), #Q2\n ((0, complete_ranges[1][\"min\"][1], \"<=\"), (1, complete_ranges[0][\"min\"][1], \"<=\")), #Q3\n ((0, complete_ranges[1][\"min\"][2], \">=\"), (1, complete_ranges[0][\"max\"][1], \"<=\")) #Q4\n ]\n # Sorting reversals (True means Desc sort, False means Asc sort. Y sort given first)\n sorts = [\n (True, True),\n (True, False),\n (False, False),\n (False, True),\n ]\n hull = ExtendedTupleList([])\n # In CW order of quadrants...\n for index in [0, 3, 2, 1]:\n # Find all the relevant points\n quad_points = ExtendedTupleList([point for point in pointList.filter(filters[index])])\n # Sort them properly\n quad_points.double_sort(1, 0, reverse_outside=sorts[index][0], reverse_inside=sorts[index][1])\n # Build a convex line segment\n line_segment = convex_line_segment(quad_points, sorts[index][0], sorts[index][1])\n # Reverse it, if we need to\n if index % 2 == 1:\n line_segment.reverse()\n # Add all the points in, avoiding repeated points.\n hull.extend(line_segment, avoid_repeats=True)\n return hull",
"def getContourRep(self):\n\t\tvertex1 = [[self.startX, self.startY]]\n\t\tvertex2 = [[self.startX, self.endY]]\n\t\tvertex3 = [[self.endX, self.startY]]\n\t\tvertex4 = [[self.endX, self.endY]]\n\t\tvertices = [vertex1, vertex2, vertex3, vertex4]\n\t\treturn convexHull(np.asarray(vertices, dtype = np.int32))",
"def to_convex_hull(pred_segs):\n pred_masks = segs_to_masks(pred_segs)\n n_cl = pred_masks.shape[-1]\n n_rows = pred_masks.shape[0]\n \n masks_convex_hull = np.zeros(pred_masks.shape)\n for row in range(0, n_rows):\n for cl in range(1, n_cl):\n masks_convex_hull[row,...,cl] = morphology.convex_hull_image(pred_masks[row,...,cl])\n \n \n return np.argmax(masks_convex_hull , 3)",
"def convex_hull(self):\n nodes = self._datacontroller.get_data('nodes')\n scale = self._datacontroller.get_data('scale')\n hull = tsputil.convex_hull_helper(nodes)\n if hull:\n result = construct_step(hull, 'Most Top Left Node', 'Clockwise', nodes, scale)\n self._datacontroller.commit_change('path', result)",
"def encode_nodes(nodes):\n n = []\n for node in nodes:\n n.extend([node[0], dottedQuadToNum(node[1].host), node[1].port])\n return struct.pack(\"!\" + \"20sIH\" * len(nodes), *n)",
"def test_conv_full(self):\n\n points = np.array([[1, 4], [2, 1], [3, 2], [3, 3], [3, 5], [4, 2], [5, 1], [5, 3]]) # example of points \n \n cv_hull = convex_hull.convex_hull(points) # convex hull returned by the function \n\n right_conv_hull = np.array([[2, 1], [5, 1], [5, 3], [3, 5], [1, 4], [2, 1] ]) # right convex hull\n self.assertTrue((right_conv_hull == cv_hull).all())",
"def convex_hull(*args):\n from point import Point\n from line import Segment\n from polygon import Polygon\n\n def uniquify(a):\n # not order preserving\n return list(set(a))\n\n p = args[0]\n if isinstance(p, Point):\n p = uniquify(args)\n\n if len(p) == 1:\n return p[0]\n elif len(p) == 2:\n return Segment(p[0], p[1])\n\n def orientation(p, q, r):\n '''Return positive if p-q-r are clockwise, neg if ccw, zero if\n collinear.'''\n return (q[1] - p[1])*(r[0] - p[0]) - (q[0] - p[0])*(r[1] - p[1])\n\n # scan to find upper and lower convex hulls of a set of 2d points.\n U = []\n L = []\n p.sort()\n for p_i in p:\n while len(U) > 1 and orientation(U[-2], U[-1], p_i) <= 0:\n U.pop()\n while len(L) > 1 and orientation(L[-2], L[-1], p_i) >= 0:\n L.pop()\n U.append(p_i)\n L.append(p_i)\n U.reverse()\n convexHull = tuple(L + U[1:-1])\n\n if len(convexHull) == 2:\n return Segment(convexHull[0], convexHull[1])\n return Polygon(convexHull)",
"def convex_hull(points):\n points = np.array(points)\n hull = ConvexHull(points)\n return points[hull.vertices, :]",
"def convex_hull(image):\n\n corners = find_corners(image)\n\n\n vertices = [corners[0]]\n\n for i in range(len(corners)):\n vertices.extend(\n _convex_hull_side(\n image, corners[i], corners[(i + 1) % len(corners)]))\n\n return vertices",
"def convex_hull(self):\n return self._geomgen(capi.geom_convex_hull)",
"def convex(points):\r\n if isinstance(points, np.ndarray):\r\n points = np.unique(points, axis=0)\r\n else:\r\n pts = []\r\n points = [pts.append(i) for i in points if i not in pts] # Remove duplicates\r\n del pts\r\n if len(points) <= 1:\r\n return points\r\n # Build lower hull\r\n lower = []\r\n for p in points:\r\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\r\n lower.pop()\r\n lower.append(p)\r\n # Build upper hull\r\n upper = []\r\n for p in reversed(points):\r\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\r\n upper.pop()\r\n upper.append(p)\r\n #print(\"lower\\n{}\\nupper\\n{}\".format(lower, upper))\r\n return np.array(lower[:-1] + upper) # upper[:-1]) # for open loop\r",
"def convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross\n # product. Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull\n lower = []\n for p in points:\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n lower.pop()\n lower.append(p)\n\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n\n return lower, upper",
"def give_convex_hull(rand_points):\n return ConvexHull(rand_points)",
"def convex_hull(self):\n return _property_geo(arctern.ST_ConvexHull, self)",
"def convex_hull_model(self, _start='random', _direction='random'):\n steps = [{'Tour': [], 'Tourlength': 0}]\n nodes = self._datacontroller.get_data('nodes')\n scale = self._datacontroller.get_data('scale')\n\n if nodes:\n # Step 1: Sketch the connections between adjacent boundary\n # points of the convex hull.\n # hull is a list of ids, not nodes,the hull is always generated CW\n hull = tsputil.convex_hull_helper(nodes)\n # Step 2: Select a starting point and a direction (randomly).\n # start is an id not a node\n startinfo = get_direction_and_start(nodes, _start, _direction)\n start = startinfo[0]\n # if direction is ccw ,reverse hull\n if not startinfo[1] == 1:\n hull.reverse()\n\n steps.append(construct_step(hull, startinfo[2], startinfo[3], nodes, scale))\n\n # Step 3: If the starting point is on the boundary,\n # the starting node is the current node. \"\"\"\n if start in hull:\n # The arc connecting the current node to the adjacent boundary\n # node in the direc- tion of travel is referred to as the\n # current arc.\n cn_index = hull.index(start)\n current_node = hull[cn_index]\n # get adjacent node\n an_index = (cn_index + 1) % (len(hull))\n adjacent_node = hull[an_index]\n # Proceed immediately to Step 4.\"\"\"\n else:\n # If the starting point is not on the boundary, apply the\n # insertion rule to find the closest arc on the boundary. \"\"\"\n closest_arc = find_closest_arc(start, hull, nodes)\n # Connect the starting point to the end node of the closest\n # arc which is in the direction of travel.\n # This node becomes the current node.\"\"\"\n # insert startnode into hull\n hull.insert(hull.index(closest_arc[0]) + 1, start)\n steps.append(construct_step(hull, startinfo[2], startinfo[3], nodes, scale))\n # update current arc nodes\n current_node = start\n adjacent_node = hull[hull.index(closest_arc[1])]\n # Step 4: Apply the insertion criterion to identify which\n # unconnected interior point is closest to the current arc.\n # repeat step 4 and 5 until all nodes are included in the path\n while len(hull) <= len(nodes):\n while True:\n current_arc = (current_node, adjacent_node)\n # find closest node not in the hull\n interior_node = find_closest_interior_node(current_arc, hull, nodes)\n # Apply the insertion criterion to check whether the\n # closest node is closer to any other arc.\n is_closer = is_closer_to_other_arc(interior_node, current_arc, hull, nodes)\n # If not, proceed to Step 5. If it is, move to the end node of\n # the current arc. This becomes the current node. Repeat\n # Step 4.\n if not is_closer:\n break\n else:\n current_node = current_arc[1]\n an_index = (hull.index(current_node) + 1) % (len(hull))\n adjacent_node = hull[an_index]\n # Step 5: Insert the closest node. The connection between the\n # current node and the newly inserted node becomes the current arc.\n # Retaining the current node, return to Step 4 and repeat Steps 4 and\n # 5 until a complete tour is obtained\"\"\"\n hull.insert(hull.index(current_node) + 1, interior_node)\n adjacent_node = interior_node\n steps.append(construct_step(hull, startinfo[2], startinfo[3], nodes, scale))\n\n self._datacontroller.commit_change('pathsteps', steps)\n self._datacontroller.commit_change('path', steps[-1])",
"def convexify(domain):\n\n if isinstance(domain, isl.BasicSet):\n return domain\n\n dom_bsets = domain.get_basic_sets()\n if len(dom_bsets) == 1:\n domain, = dom_bsets\n return domain\n\n hull_domain = domain.simple_hull()\n if isl.Set.from_basic_set(hull_domain) <= domain:\n return hull_domain\n\n domain = domain.coalesce()\n\n dom_bsets = domain.get_basic_sets()\n if len(domain.get_basic_sets()) == 1:\n domain, = dom_bsets\n return domain\n\n hull_domain = domain.simple_hull()\n if isl.Set.from_basic_set(hull_domain) <= domain:\n return hull_domain\n\n dom_bsets = domain.get_basic_sets()\n assert len(dom_bsets) > 1\n\n print(\"PIECES:\")\n for dbs in dom_bsets:\n print(\" %s\" % (isl.Set.from_basic_set(dbs).gist(domain)))\n raise NotImplementedError(\"Could not find convex representation of set\")",
"def encode_points(Plist):\n return '[' + ','.join([encode_point(P) for P in Plist]) + ']'",
"def hull(self):\n capacity = self._getAttribute(Attribute.hullCapacity)\n em = self._getAttribute(Attribute.hullEM)\n explosive = self._getAttribute(Attribute.hullExplosive)\n kinetic = self._getAttribute(Attribute.hullKinetic)\n thermal = self._getAttribute(Attribute.hullThermal)\n\n em = 1.0 - em\n explosive = 1.0 - explosive\n kinetic = 1.0 - kinetic\n thermal = 1.0 - thermal\n\n return {\n \"capacity\": capacity,\n \"resists\": {\n \"em\": em,\n \"explosive\": explosive,\n \"kinetic\": kinetic,\n \"thermal\": thermal\n }\n }",
"def create_hull(vertices):\n dt = np.dtype([('vertex', np.float64, (2,)),\n ('length', np.float64),\n ('is_processed', bool)])\n\n hull = np.empty(len(vertices), dtype=dt)\n for i, v in enumerate(vertices):\n j = 0 if i == len(vertices)-1 else i+1\n hull[i] = (v, dist(v, vertices[j]), False)\n\n return np.rec.array(hull)",
"def to_poly_file(self, filename):\n\n def getinsidepoint(pts):\n direct = (pts[0] + pts[1] + pts[2]) / 3 - pts[0]\n return pts[0] + 0.001 * direct\n\n if self.dim == 2:\n self.leaveonlyphysicalsurfaces()\n if self.dim == 3:\n self.leaveonlyphysicalvolumes()\n\n # write nodes\n nodes = []\n map = {}\n for x in self.d0.values():\n assert isinstance(x, point)\n nodes.append(x.getxyz())\n map[x.getn()] = len(nodes)\n\n\n s = \"# nodes\\n%d %d 0 0\\n\" % (len(nodes), self.dim)\n if self.dim == 2:\n ptstr = \" %d %f %f\\n\"\n ptstr2 = \" %d %f %f %d\\n\"\n else:\n ptstr = \" %d %f %f %f\\n\"\n ptstr2 = \" %d %f %f %f %d\\n\"\n\n for n, x in enumerate(nodes):\n s += ptstr % tuple([n + 1] + list(x[:self.dim]))\n\n # facets\n # first write external polygon, then hole polygons and then point in each\n # hole polygon\n facets = []\n if self.dim == 2:\n\n hole_pts = []\n regions=[]\n for x2 in self.d2.values():\n assert isinstance(x2, surface)\n for x1 in x2.getlines():\n assert isinstance(x1, line)\n p = [map[y.getn()] for y in x1.getpoints()]\n bc = self.getBCnum(x1.getn())\n facets.append((p, bc))\n\n for hole in x2.getholepoints():\n hole_pts.append(hole.getxyz())\n\n # regions\n for x in self.phys2.values():\n assert isinstance(x, physicalsurface)\n for x2 in x.getsurfaces():\n if not x2.is_hole:\n regions.append(x2.getinsidepoint().getxyz() + [x.getn()])\n\n # number of facets, boundary markers=yes\n s += \"# segments\\n%d 1\\n\" % len(facets)\n for ii, (p, bc) in enumerate(facets):\n # number of corners, corner 1, corner 2, ...\n s += \" %d %s %d\\n\" % (ii + 1, ' '.join([str(ii) for ii in p]), bc)\n # holes\n s += \"# holes\\n%d\\n\" % len(hole_pts)\n for ii, x0 in enumerate(hole_pts):\n # number of corners, corner 1, corner 2, ...\n s += \" %d %s\\n\" % (ii + 1, ' '.join([str(ii) for ii in x0]))\n # regions\n s += \"# regions\\n%d\\n\" % len(regions)\n for ii, x0 in enumerate(regions):\n s += \" %d %f %f %d\\n\" % tuple([ii + 1] + x0)\n\n if self.dim == 3:\n\n for x in self.d2.values():\n assert isinstance(x, surface)\n p = [map[y.getn()] for y in x.getpoints()]\n h = []\n pts = []\n for hole in x.getholepoints():\n h.append([map[y.getn()] for y in hole])\n pts.append(getinsidepoint(hole).getxyz())\n bc = self.getBCnum(x.getn())\n facets.append((p, bc, h, pts))\n # number of facets, boundary markers=yes\n s += \"# segments\\n%d 1\\n\" % len(facets)\n for p, bc, h, holes in facets:\n # number of polygons, # of holes, boundary marker\n s += \" %d %d %d\\n\" % (1 + len(h), len(h), bc)\n # number of corners, corner 1, corner 2, ...\n s += \" %d %s\\n\" % (len(p), ' '.join([str(ii) for ii in p]))\n for x in h:\n # number of corners, corner 1, corner 2, ...\n s += \" %d %s\\n\" % (len(x), ' '.join([str(ii) for ii in p]))\n for i, pt in enumerate(holes):\n # hole #, x, y, z\n s += ptstr % tuple([i + 1] + list(pt))\n\n # volume holes\n s += \"# holes\\n0\\n\"\n # regions\n regions=[]\n for x in self.phys3.values():\n assert isinstance(x, physicalvolume)\n for v in x.getvolumes():\n regions.append(v.getinsidepoint().getxyz()+[x.getn()])\n s += \"# regions\\n%d\\n\" % len(regions)\n for i, x in enumerate(regions):\n s += ptstr2 % tuple([i + 1] + list(x))\n\n open(filename, \"w\").write(s)",
"def __CalculateConvexHull(self, contour):\r\n return cv2.convexHull(contour)",
"def convex_pieces(self, config):\n # get volume\n orig_volume = self.mesh_.get_total_volume()\n \n # convert to off\n meshlabserver_cmd = 'meshlabserver -i \\\"%s\\\" -o \\\"%s\\\"' %(self.obj_filename, self.off_filename) \n os.system(meshlabserver_cmd)\n logging.info('MeshlabServer OFF Conversion Command: %s' %(meshlabserver_cmd))\n\n if not os.path.exists(off_filename):\n logging.warning('Meshlab conversion failed for %s' %(off_filename))\n return\n \n # create convex pieces\n cvx_decomp_command = config['hacd_cmd_template'] %(self.off_filename,\n config['min_num_clusters'],\n config['max_concavity'],\n config['invert_input_faces'],\n config['extra_dist_points'],\n config['add_faces_points'],\n config['connected_components_dist'],\n config['target_num_triangles'])\n logging.info('CV Decomp Command: %s' %(cvx_decomp_command))\n os.system(cvx_decomp_command) \n\n # convert each wrl to an obj and an stl\n convex_piece_files = glob.glob('%s_dec_hacd_*.wrl' %(os.path.join(self.file_path_, self.file_root_)))\n convex_piece_meshes = []\n total_volume = 0.0\n\n for convex_piece_file in convex_piece_files:\n file_root, file_ext = os.path.splitext(convex_piece_file)\n obj_filename = file_root + '.obj'\n stl_filename = file_root + '.stl'\n meshlabserver_cmd = 'meshlabserver -i \\\"%s\\\" -o \\\"%s\\\"' %(convex_piece_file, obj_filename) \n os.system(meshlabserver_cmd)\n meshlabserver_cmd = 'meshlabserver -i \\\"%s\\\" -o \\\"%s\\\"' %(convex_piece_file, stl_filename) \n os.system(meshlabserver_cmd)\n\n of = obj_file.ObjFile(obj_filename)\n convex_piece = of.read()\n total_volume += convex_piece.get_total_volume()\n convex_piece_meshes.append(of.read())\n\n root = et.Element('robot', name=\"test\")\n\n # get the masses and moments of inertia\n effective_density = orig_volume / total_volume\n prev_piece_name = None\n for convex_piece, filename in zip(convex_piece_meshes, convex_piece_files):\n convex_piece.set_center_of_mass(np.zeros(3))\n convex_piece.set_density(self.mesh_.density * effective_density)\n \n # write to xml\n piece_name = 'link_%s'%(file_root)\n file_path_wo_ext, file_ext = os.path.splitext(filename)\n file_path, file_root = os.path.split(file_path_wo_ext)\n I = convex_piece.inertia\n link = et.SubElement(root, 'link', name=piece_name)\n\n inertial = et.SubElement(link, 'inertial')\n origin = et.SubElement(inertial, 'origin', xyz=\"0 0 0\", rpy=\"0 0 0\")\n mass = et.SubElement(inertial, 'mass', value='%f'%convex_piece.mass)\n inertia = et.SubElement(inertial, 'inertia', ixx='%f'%I[0,0], ixy='%f'%I[0,1], ixz='%f'%I[0,2],\n iyy='%f'%I[1,1], iyz='%f'%I[1,2], izz='%f'%I[2,2])\n \n visual = et.SubElement(link, 'visual')\n origin = et.SubElement(visual, 'origin', xyz=\"0 0 0\", rpy=\"0 0 0\")\n geometry = et.SubElement(visual, 'geometry')\n mesh = et.SubElement(geometry, 'mesh', filename=file_path_wo_ext+'.stl')\n material = et.SubElement(visual, 'material', name='')\n color = et.SubElement(material, 'color', rgba=\"0.75 0.75 0.75 1\")\n\n collision = et.SubElement(link, 'collision')\n origin = et.SubElement(collision, 'origin', xyz=\"0 0 0\", rpy=\"0 0 0\") \n geometry = et.SubElement(collision, 'geometry')\n mesh = et.SubElement(geometry, 'mesh', filename=file_path_wo_ext+'.stl')\n\n if prev_piece_name is not None:\n joint = et.SubElement(root, 'joint', name='%s_joint'%(piece_name), type='fixed')\n origin = et.SubElement(joint, 'origin', xyz=\"0 0 0\", rpy=\"0 0 0\")\n parent = et.SubElement(joint, 'parent', link=prev_piece_name)\n child = et.SubElement(joint, 'child', link=piece_name)\n\n prev_piece_name = piece_name\n\n \"\"\"\n txt_filename = file_root + '.txt'\n f = open(txt_filename, 'w')\n f.write('mass: %f\\n' %(convex_piece.mass))\n f.write('inertia: ' + str(convex_piece.inertia) + '\\n')\n f.close()\n \"\"\"\n\n tree = et.ElementTree(root)\n tree.write('test.URDF')\n exit(0)\n\n return convex_piece_meshes",
"def concave_hull(hull:list, points:list, max_iterations:int=None, min_length_fraction:float=0, min_angle:float=90)->list:\n tweet.info(\"Creating concave hull; minimum side length {}% of average, minimum_angle {}\".format(min_length_fraction * 100, min_angle))\n test_points = set(points)\n ignore_points = []\n avg_sqr_distance = 0\n for k in range(0, len(hull)-1):\n avg_sqr_distance += point_sqr_distance(hull[k], hull[k+1])\n test_points.remove(hull[k])\n avg_sqr_distance /= len(hull) - 1\n min_sqr_length = avg_sqr_distance * (min_length_fraction ** 2) # since we get sqr_length, we square the fraction\n min_cosine = math.cos(math.radians(min_angle))\n \n while (max_iterations is None or max_iterations > 0) and test_points:\n selection, edge = select_longest_edge(hull, ignore_points, min_sqr_length)\n tweet.info(\"Considering edge {}; {} points left\".format(edge, len(test_points)))\n if selection is None:\n break\n selected_point = select_candidate_point(edge, test_points, hull, min_cosine)\n if selected_point is None:\n # This edge has no more candidate points, so we ignore it in the next pass\n ignore_points.append(edge[0])\n tweet.debug(\"No candidate point found.\")\n continue\n tweet.debug(\"Found point {}, inserting new edge.\".format(selected_point))\n if not max_iterations is None:\n max_iterations -= 1\n # We add the point into the concave hull\n hull.insert(selection + 1, selected_point)\n test_points.remove(selected_point)\n return hull",
"def test_get_convex_hull(self):\n\n these_vertex_indices = skeleton_lines._get_convex_hull(\n vertex_x_coords=VERTEX_X_COORDS[END_NODE_VERTEX_INDICES],\n vertex_y_coords=VERTEX_Y_COORDS[END_NODE_VERTEX_INDICES])\n\n expected_indices = numpy.linspace(\n 0, len(END_NODE_VERTEX_INDICES) - 1,\n num=len(END_NODE_VERTEX_INDICES), dtype=int)\n self.assertTrue(numpy.array_equal(\n these_vertex_indices, expected_indices))"
] | [
"0.58518094",
"0.5845417",
"0.57677484",
"0.57292026",
"0.5701482",
"0.5699452",
"0.5666843",
"0.5633451",
"0.5616808",
"0.5535729",
"0.5463705",
"0.54190964",
"0.5377508",
"0.5347325",
"0.5336038",
"0.5333922",
"0.5331519",
"0.5311888",
"0.5309981",
"0.53056556",
"0.52919686",
"0.5262881",
"0.52462167",
"0.5234855",
"0.52078533",
"0.51773936",
"0.51597416",
"0.51542705",
"0.51499087",
"0.5147442"
] | 0.6956518 | 0 |
Stacks are comprised of multiple hosts. Each host may be located in different cloud accounts. This method returns a map of the underlying driver implementation and the hosts that running in the account. host_ids (list); a list of primary keys for the hosts we're interested in (dict); each key is a provider driver implementation with QuerySet value for the matching host objects | def get_driver_hosts_map(self, host_ids=None):
host_queryset = self.get_hosts(host_ids)
# Create an account -> hosts map
accounts = {}
for h in host_queryset:
accounts.setdefault(h.get_account(), []).append(h)
# Convert to a driver -> hosts map
result = {}
for account, hosts in accounts.items():
result[account.get_driver()] = host_queryset.filter(id__in=[h.id for h in hosts])
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_host_configs(self, hostids, **kwargs):\n host_configs = {}\n for hostid in hostids:\n host_configs[hostid] = self.get_host_config(hostid, **kwargs)\n return host_configs",
"def query_hosts(self, force=False):\n CACHE_KEY = 'salt-cloud-full-query'\n\n cached_result = cache.get(CACHE_KEY)\n\n if cached_result and not force:\n logger.debug('salt-cloud query result cached')\n result = cached_result\n else:\n logger.debug('salt-cloud query result not cached, retrieving')\n logger.info('get_hosts_info: {0!r}'.format(self))\n\n salt_cloud = salt.cloud.CloudClient(settings.STACKDIO_CONFIG.salt_cloud_config)\n result = salt_cloud.full_query()\n\n # Cache the result for a minute\n cache.set(CACHE_KEY, result, 60)\n\n # yaml_result contains all host information in the stack, but\n # we have to dig a bit to get individual host metadata out\n # of account and provider type dictionaries\n host_result = {}\n for host in self.hosts.all():\n account = host.get_account()\n provider = account.provider\n\n # each host is buried in a cloud provider type dict that's\n # inside a cloud account name dict\n\n # Grab the list of hosts\n host_map = result.get(account.slug, {}).get(provider.name, {})\n\n # Grab the individual host\n host_result[host.hostname] = host_map.get(host.hostname, None)\n\n return host_result",
"def map_int_ext_hosts(self):\n int_hosts = []\n ext_hosts = []\n dp_hosts = {self.dp_name(dp_index): ([], []) for dp_index in range(self.NUM_DPS)}\n for host_id, options in self.host_options.items():\n host = self.host_information[host_id]['host']\n if options.get('loop_protect_external', False):\n ext_hosts.append(host)\n int_or_ext = 1\n else:\n int_hosts.append(host)\n int_or_ext = 0\n for link in self.host_links[host_id]:\n dp_hosts[self.dp_name(link)][int_or_ext].append(host)\n return set(int_hosts), set(ext_hosts), dp_hosts",
"def get_host_providernets(self, context, host_uuid):\n result = {}\n # Retrieve the results as providernet names and then convert to\n # providernet id values.\n data = self._get_host_providernet_names(host_uuid)\n for uuid, body in six.iteritems(data):\n values = []\n for name in body['providernets']:\n providernet = self._get_plugin().get_providernet_by_name(\n context, name.strip())\n if providernet:\n values.append(providernet['id'])\n else:\n LOG.error((\"host {} is referencing \"\n \"non-existent provider network {}\").format(\n host_uuid, name.strip()))\n result[uuid] = {'providernets': values}\n return result",
"def get_host_interfaces(self, context, host_uuid):\n result = {}\n interfaces = self._get_cgtsclient().iinterface.list(host_uuid)\n for interface in interfaces:\n if interface.networktype != \"data\":\n continue\n providernets = interface.providernetworks\n result[interface.uuid] = {'uuid': interface.uuid,\n 'mtu': interface.imtu,\n 'vlans': '',\n 'network_type': interface.networktype,\n 'providernets': providernets}\n return result",
"def get_hosts(self, host_ids=None):\n if not host_ids:\n return self.hosts.all()\n return self.hosts.filter(id__in=host_ids)",
"def host_list(self):\n try:\n scode, hosts = Rest.get('Host')\n except Exception as e:\n Console.error(e.message)\n return\n if len(hosts) == 0:\n print(\"No hosts exist\")\n return\n\n n = 1\n e = {}\n for host in hosts:\n d = {}\n d['Ip'] = str(host['Ip'])\n d['Name'] = str(host['Name'])\n d['Port'] = str(host['Port'])\n d['Swarmmode'] = str(host['Swarmmode'])\n e[n] = d\n n = n + 1\n Console.ok(str(Printer.dict_table(e, order=['Ip', 'Name', 'Port', 'Swarmmode'])))",
"def host(self, host):\n for p, c in self.configs_:\n if host in c.hosts_:\n return c.host(host)\n return {}",
"def get_host_mapping(cohesity_client):\n hosts = cohesity_client.network.list_hosts() or []\n for host in hosts:\n exported_res_dict[\"Hosts Mapping\"].append(host.ip)\n return hosts",
"def hosts(self) -> dict:\n return self._hosts",
"def get_host_ids(self):\n host_ids = []\n \n for node_id in self.nodes:\n if (isinstance(self.nodes[node_id], HostNode)):\n host_ids.append(node_id)\n \n return host_ids",
"def host_list(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"This function must be called with -f, --function argument.\"\n )\n ret = {}\n session = _get_session()\n hosts = session.xenapi.host.get_all()\n for host in hosts:\n host_record = session.xenapi.host.get_record(host)\n ret[host_record[\"name_label\"]] = host_record\n return ret",
"def get_hosts(self):\n\n raise NotImplementedError",
"def select_host_ids():\n return IMPL.select_host_ids()",
"def _get_dump_chains_hosts(self):\n hosts = self.conf.get(\"hosts\")\n dump_chains = {}\n for datacenter, cluster, host, is_spm, is_up in hosts:\n if datacenter not in dump_chains:\n dump_chains[datacenter] = None\n if not is_up:\n continue\n if datacenter not in dump_chains:\n dump_chains[datacenter] = host\n elif is_spm:\n dump_chains[datacenter] = host\n\n for datacenter in dump_chains:\n logging.info(\"Data center %r volume chains to be collected by %r\",\n datacenter, dump_chains[datacenter])\n\n return dump_chains",
"def _get_host_placement_all(self, context):\n raise NotImplementedError('_get_host_placement_all not implemented')",
"def Hosts(self):\n if not self._hosts:\n hs = self._get_objects(vim.HostSystem)\n for h in hs:\n self._hosts[h.name] = h\n return self._hosts",
"def get_hosts_info(self):\n result = []\n index = 0\n while index < self.host_numbers:\n host = self.get_generic_host_entry(index)\n result.append({\n 'ip': host['NewIPAddress'],\n 'name': host['NewHostName'],\n 'mac': host['NewMACAddress'],\n 'status': host['NewActive']})\n index += 1\n return result",
"def get_hostkey_list(self):\n return self.hostkey",
"def hosts(self):\n hosts = set()\n for p, c in self.configs_:\n hosts.update(c.hosts())\n return tuple(hosts)",
"def hosts(self):\n return HostsTable(self.rpc, self.name)",
"def list(cls, context):\n db_hosts = cls.dbapi.host_list(context)\n return Host._from_db_object_list(\n db_hosts, cls, context)",
"def get_chunk_hosts_for_index_servers(self, host):\n try:\n conn = psycopg2.connect(\"dbname='{0}'\".format(DATABASE))\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n cur.execute(\"SELECT * FROM index_server WHERE is_host = %s;\", (host,))\n results = cur.fetchall()\n\n temp = []\n for chunk in results:\n temp_dict = {}\n chunk_id = chunk['chunk_id']\n temp_dict['chunk_id'] = chunk_id\n temp_dict['hosts'] = {}\n temp_dict['hosts']['c_host'] = self.get_relation_for_chunk_id('crawler', chunk_id)[0]['c_host']\n temp_dict['hosts']['ib_host'] = self.get_relation_for_chunk_id('index_builder', chunk_id)[0]['ib_host']\n temp.append(temp_dict)\n cur.close()\n return temp\n except Exception as e:\n print(e)",
"def _get_ipv4_addresses(self, host: str) -> Dict[str, List[IPv4Address]]:\n if host == \"self\":\n command = \"show ip address\"\n elif host == \"peer\":\n command = \"failover exec mate show ip address\"\n\n show_ip_address = self.show(command)\n re_ip_addresses = RE_SHOW_IP_ADDRESS.findall(show_ip_address)\n\n results = {\n interface: [IPv4Interface(f\"{address}/{netmask}\")] for interface, address, netmask in re_ip_addresses\n }\n log.debug(\"Host %s: ip interfaces %s\", self.host)\n return results",
"def get_all_host(self, conf, tenant_id, network_id):\n\t\tpass",
"def _parse_hosts(self):\n hosts = dict()\n for address, h_cfg in self.host_configs.items():\n formatted_address = eval(address)\n os_cfg, srv_cfg, proc_cfg = self._construct_host_config(h_cfg)\n value = self._get_host_value(formatted_address, h_cfg)\n hosts[formatted_address] = Host(\n address=formatted_address,\n os=os_cfg,\n services=srv_cfg,\n processes=proc_cfg,\n firewall=h_cfg[u.HOST_FIREWALL],\n value=value\n )\n self.hosts = hosts",
"def get_hosts(self):\n\n return sorted(self.host_data.keys())",
"def hosts():\n devices = app.inventory.keys()\n return jsonify({\"hosts\": sorted(devices)})",
"def get_instance_and_ip_list_for_stack_id(self,heatcln,stack_id):\n #Get the instance list for this stack\n resources = heatcln.resources.list(stack_id)\n instance_list = []\n ip_list = []\n \n for resource in resources:\n res_info = resource._info\n \n #Add those resources that are instances\n if res_info['resource_type'] == 'AWS::EC2::Instance':\n instance_list.append(resource)\n if res_info['resource_type'] == 'AWS::EC2::EIPAssociation':\n ip_list.append(resource)\n return instance_list,ip_list",
"def create_current_host_dict_playbook(self):\n\n host_dict = {\n 'no_access_hosts': self.module.params['no_access_hosts'],\n 'read_only_hosts': self.module.params['read_only_hosts'],\n 'read_only_root_hosts': self.module.params[\n 'read_only_root_hosts'],\n 'read_write_hosts': self.module.params['read_write_hosts'],\n 'read_write_root_hosts': self.module.params[\n 'read_write_root_hosts']\n }\n return host_dict"
] | [
"0.6461176",
"0.62735856",
"0.60457814",
"0.5975142",
"0.5896362",
"0.5873509",
"0.5828188",
"0.5788435",
"0.5750981",
"0.574665",
"0.57066274",
"0.56011957",
"0.55321527",
"0.55266124",
"0.55064696",
"0.54972166",
"0.54785377",
"0.5460819",
"0.54548657",
"0.54415405",
"0.54301745",
"0.54090047",
"0.540013",
"0.5389108",
"0.53817767",
"0.5357274",
"0.5350668",
"0.53470117",
"0.5303293",
"0.5297294"
] | 0.7679804 | 0 |
Quick way of getting all hosts or a subset for this stack. (list); list of primary keys of hosts in this stack (QuerySet); | def get_hosts(self, host_ids=None):
if not host_ids:
return self.hosts.all()
return self.hosts.filter(id__in=host_ids) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list(self, **kwargs):\n\n return self.getResourceManager() \\\n .getSdk() \\\n .hosts \\\n .list(**kwargs)",
"def get_hosts(self):\n\n raise NotImplementedError",
"def hosts(self):\n\n return self._get_list_field(\"hosts\", lambda x: HostSettingContext(x))",
"def hosts(self):\n\n return self._get_list_field(\"hosts\", lambda x: HostSettingContext(x))",
"def select_host_ids():\n return IMPL.select_host_ids()",
"def get_hostkey_list(self):\n return self.hostkey",
"def hosts(self):\n return self._hosts",
"def hosts(self):\n return self._hosts",
"def all_hosts(self):\n ...",
"def get_allhosts():\n connection, tablename = HomeNetwork.get_connection_info()\n query = 'SELECT hostname from {}'.format(tablename)\n output = pandas.read_sql_query(query, connection).to_json(orient='records')\n\n for host in json.loads(output):\n yield host[\"hostname\"]",
"def get_hosts(self):\n\n return sorted(self.host_data.keys())",
"def hosts(self):\n return tuple(self.hosts_)",
"def hosts(self, hosts):\n return self._set_list_field(\"hosts\", hosts)",
"def hosts(self) -> t.List[str]:\n if not self._hosts:\n self._hosts = self._get_db_hosts()\n return self._hosts",
"def getHosts(**options):\n return search.HostSearch.byOptions(**options)",
"def query_hosts(self, force=False):\n CACHE_KEY = 'salt-cloud-full-query'\n\n cached_result = cache.get(CACHE_KEY)\n\n if cached_result and not force:\n logger.debug('salt-cloud query result cached')\n result = cached_result\n else:\n logger.debug('salt-cloud query result not cached, retrieving')\n logger.info('get_hosts_info: {0!r}'.format(self))\n\n salt_cloud = salt.cloud.CloudClient(settings.STACKDIO_CONFIG.salt_cloud_config)\n result = salt_cloud.full_query()\n\n # Cache the result for a minute\n cache.set(CACHE_KEY, result, 60)\n\n # yaml_result contains all host information in the stack, but\n # we have to dig a bit to get individual host metadata out\n # of account and provider type dictionaries\n host_result = {}\n for host in self.hosts.all():\n account = host.get_account()\n provider = account.provider\n\n # each host is buried in a cloud provider type dict that's\n # inside a cloud account name dict\n\n # Grab the list of hosts\n host_map = result.get(account.slug, {}).get(provider.name, {})\n\n # Grab the individual host\n host_result[host.hostname] = host_map.get(host.hostname, None)\n\n return host_result",
"def get_hosts(self):\n\n hosts = self.client.service.getHosts()\n return hosts",
"def getHosts(self):\n raise \"not implemented\"",
"def hosts(self) -> List[str]:\n if self.head_host:\n return [self.head_host]\n else:\n return [replica.host for replica in self.pod_args['pods'][0]]",
"def hosts(self):\n return HostsTable(self.rpc, self.name)",
"def list(cls, context):\n db_hosts = cls.dbapi.host_list(context)\n return Host._from_db_object_list(\n db_hosts, cls, context)",
"def hgetall(self):\n return self._redis_client.hgetall(self.context)",
"def list_hosts():\n db = sqlite3.connect('/home/tropius/TROPIUS/TROPIUS.db')\n res = hosts.get_all(db)\n res = {'list': res}\n return jsonify(res)",
"def get_host_ids(self):\n host_ids = []\n \n for node_id in self.nodes:\n if (isinstance(self.nodes[node_id], HostNode)):\n host_ids.append(node_id)\n \n return host_ids",
"def get_all_hosts(self, view='summary'):\n return self.api_client.get_all_hosts(view=view)['items']",
"def get_all_hosts(self, view='summary'):\n return self._get(endpoint='{}/hosts'.format(self.api_version),\n params=dict(view=view)).json()",
"def host_list(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"This function must be called with -f, --function argument.\"\n )\n ret = {}\n session = _get_session()\n hosts = session.xenapi.host.get_all()\n for host in hosts:\n host_record = session.xenapi.host.get_record(host)\n ret[host_record[\"name_label\"]] = host_record\n return ret",
"def get_hosts(self):\n if self._scanned:\n return self._scanner.all_hosts()\n else:\n raise ScannerError(\"ERROR: A scan has not yet been conducted!\")",
"def hosts(self) -> dict:\n return self._hosts",
"def hostgroup_list(self):\n return self.ezx.get_hostgroup_list()"
] | [
"0.6386337",
"0.6371365",
"0.63523024",
"0.63523024",
"0.6272247",
"0.6215245",
"0.6201035",
"0.6201035",
"0.616472",
"0.6138865",
"0.6128013",
"0.6115967",
"0.6107518",
"0.60520285",
"0.6051724",
"0.60374975",
"0.6008605",
"0.6000928",
"0.599559",
"0.59938854",
"0.598613",
"0.59850436",
"0.5966846",
"0.5958985",
"0.5871151",
"0.58622104",
"0.5847653",
"0.57929605",
"0.57927436",
"0.5771648"
] | 0.6725173 | 0 |
Creates host objects on this Stack. If no arguments are given, then all hosts available based on the Stack's blueprint host definitions will be created. If args are given, then only the `count` for the given `host_definition` will be created. host_definition (BlueprintHostDefinition object); the host definition to use for creating new hosts. If None, all host definitions for the stack's blueprint will be used. count (int); the number of hosts to create. If None, all hosts will be created. backfill (bool); If True, then hosts will be created with hostnames that fill in any gaps if necessary. If False, then hostnames will start at the end of the host list. This is only used when `host_definition` and `count` arguments are provided. | def create_hosts(self, host_definition=None, count=None, backfill=False):
created_hosts = []
if host_definition is None:
host_definitions = self.blueprint.host_definitions.all()
else:
host_definitions = [host_definition]
for hostdef in host_definitions:
hosts = self.hosts.all()
if count is None:
start, end = 0, hostdef.count
indexes = range(start, end)
elif not hosts:
start, end = 0, count
indexes = range(start, end)
else:
if backfill:
hosts = hosts.order_by('index')
# The set of existing host indexes
host_indexes = set([h.index for h in hosts])
# The last index available
last_index = sorted(host_indexes)[-1]
# The set of expected indexes based on the last known
# index
expected_indexes = set(range(last_index + 1))
# Any gaps any the expected indexes?
gaps = expected_indexes - host_indexes
indexes = []
if gaps:
indexes = list(gaps)
count -= len(indexes)
start = sorted(host_indexes)[-1] + 1
end = start + count
indexes += range(start, end)
else:
start = hosts.order_by('-index')[0].index + 1
end = start + count
indexes = xrange(start, end)
# all components defined in the host definition
components = hostdef.formula_components.all()
# iterate over the host definition count and create individual
# host records on the stack
for i in indexes:
hostname = hostdef.hostname_template.format(
namespace=self.namespace,
index=i
)
kwargs = dict(
index=i,
cloud_image=hostdef.cloud_image,
blueprint_host_definition=hostdef,
instance_size=hostdef.size,
hostname=hostname,
sir_price=hostdef.spot_price,
state=Host.PENDING
)
if hostdef.cloud_image.account.vpc_enabled:
kwargs['subnet_id'] = hostdef.subnet_id
else:
kwargs['availability_zone'] = hostdef.zone
host = self.hosts.create(**kwargs)
account = host.cloud_image.account
# Add in the cloud account default security groups as
# defined by an admin.
account_groups = set(list(
account.security_groups.filter(
is_default=True
)
))
host.security_groups.add(*account_groups)
if account.create_security_groups:
# Add in the security group provided by this host definition,
# but only if this functionality is enabled on the account
security_group = SecurityGroup.objects.get(
stack=self,
blueprint_host_definition=hostdef
)
host.security_groups.add(security_group)
# add formula components
host.formula_components.add(*components)
for volumedef in hostdef.volumes.all():
self.volumes.create(
host=host,
snapshot=volumedef.snapshot,
hostname=hostname,
device=volumedef.device,
mount_point=volumedef.mount_point
)
created_hosts.append(host)
return created_hosts | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def post(self):\n\n try:\n if \"hosts\" in self.jbody:\n hostnames = self.jbody[\"hosts\"]\n else:\n hostnames = [{\"hostname\": self.jbody[\"hostname\"]}]\n except KeyError as err:\n raise exc.BadRequest(\n \"Missing Required Argument: {}\".format(err.message)\n )\n except ValueError as err:\n raise exc.BadRequest(err.message)\n\n log.info(\"HOSTS: Create {}\".format(\", \".join(\n [\n host[\"hostname\"] for host in hostnames\n ]\n )))\n\n try:\n hosts = []\n for hostname in hostnames:\n host = Host.create(self.session, hostname[\"hostname\"])\n hosts.append(host.to_dict(self.href_prefix))\n except IntegrityError as err:\n raise exc.Conflict(err.orig.message)\n except exc.ValidationError as err:\n raise exc.BadRequest(err.message)\n\n self.session.commit()\n\n if len(hosts) == 1:\n json = hosts[0]\n self.created(\"/api/v1/hosts/{}\".format(hosts[0][\"hostname\"]), json)\n else:\n self.created(data={\"hosts\": hosts, \"totalHosts\": len(hosts)})\n\n log.info(\"HOST: Created {}\".format(\", \".join(\n [host[\"hostname\"] for host in hostnames]\n )))",
"def create(cls, host, **kwargs):\n\n new = cls.default_create(host)\n for key, value in kwargs.items():\n setattr(new, key, value)\n\n return new",
"def host_list(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"This function must be called with -f, --function argument.\"\n )\n ret = {}\n session = _get_session()\n hosts = session.xenapi.host.get_all()\n for host in hosts:\n host_record = session.xenapi.host.get_record(host)\n ret[host_record[\"name_label\"]] = host_record\n return ret",
"def create_host(self, wwpns, hostname):\n\n if not wwpns or len(wwpns) == 0 or not hostname or len(hostname) == 0:\n ex_args = {'wwpns': wwpns,\n 'hostname': hostname}\n raise SVCCreateHostParameterError(**ex_args)\n\n ports = ':'.join(wwpns)\n # get the host shortname.\n hostname_str = hostname.split('.')[0]\n LOG.debug(\"enter: create_host(): wwpns=%(wwpns)s\"\n \" hostname=%(hostname)s\"\n % {'wwpns': ports, 'hostname': hostname_str})\n\n rand_id = str(random.randint(0, 99999999)).zfill(8)\n host_name = '%s-%s' % (self._hostname_prefix(hostname_str), rand_id)\n\n cmd = 'mkhost -name %(host_name)s -hbawwpn %(ports)s -force' % locals()\n\n output, err_output = self._svc_command(cmd)\n\n if err_output:\n # err_output should be a list type\n if isinstance(err_output, types.ListType):\n err_msg = err_output[0]\n else:\n err_msg = err_output\n err_code = err_msg.split()[0]\n\n if err_code and err_code == 'CMMVC6035E':\n # host has been defined on the storage, but we don't see it.\n # return None and ask caller to run cfgdev to relogin to SAN\n # and retry get_host_from_wwpns().\n return None\n\n msg = (_(\"create_host() failure cmd=%(cmd)s, error:%(err_output)s.\"\n \" Make sure host and storage are zoned properly and check\"\n \" SAN fabric connectivity\") % locals())\n\n LOG.exception(msg)\n ex_args = {'host_name': hostname_str,\n 'err_output': err_output}\n raise SVCCreateHostFailed(**ex_args)\n\n return host_name",
"def add_nodes(self, count=1):\n self.log.info('Adding %d nodes' % count)\n new_nodes = []\n Node.flavor = env_vars['client_flavor']\n for i in range(count):\n #check if cluster did not previously exist\n if i == 0 and len(self.all_nodes) == 0:\n # give a floating IPv4 to the first node only\n new_guy = Node(self.cluster_name, '', len(self.all_nodes)+1, create=True, IPv4=True)\n else:\n new_guy = Node(self.cluster_name, node_type=\"\", number=len(self.all_nodes)+1, create=True)\n self.all_nodes.append(new_guy)\n new_nodes.append(new_guy)\n self.save_cluster()\n for n in new_nodes:\n n.wait_ready()\n #inject host files to everybody\n n.inject_hostnames(self.get_hosts(private=True), delete=self.cluster_name)\n n.bootstrap()\n self.log.info(\"Node %s is live \" % new_guy.name)\n #inform all\n self.inject_hosts_files()",
"def set_hosts(self, host_list: t.List[str]) -> None:\n if isinstance(host_list, str):\n host_list = [host_list.strip()]\n if not isinstance(host_list, list):\n raise TypeError(\"host_list argument must be a list of strings\")\n if not all(isinstance(host, str) for host in host_list):\n raise TypeError(\"host_list argument must be list of strings\")\n # TODO check length\n if self.batch:\n if hasattr(self, \"batch_settings\") and self.batch_settings:\n self.batch_settings.set_hostlist(host_list)\n\n if self.launcher == \"lsf\":\n for db in self.dbnodes:\n db.set_hosts(host_list)\n else:\n for host, db in zip(host_list, self.dbnodes):\n if isinstance(db.run_settings, AprunSettings):\n if not self.batch:\n db.run_settings.set_hostlist([host])\n else:\n db.run_settings.set_hostlist([host])\n\n if db.is_mpmd and hasattr(db.run_settings, \"mpmd\"):\n for i, mpmd_runsettings in enumerate(db.run_settings.mpmd):\n mpmd_runsettings.set_hostlist(host_list[i + 1])",
"def host_create(self,hostname,hostip,groupid,templateid):\n data = json.dumps(\n {\n \"jsonrpc\": \"2.0\",\n \"method\": \"host.create\",\n \"params\": {\n \"host\": hostname,\n \"interfaces\": [\n {\n \"type\": 1,\n \"main\": 1,\n \"useip\": 1,\n \"ip\": hostip,\n \"dns\": \"\",\n \"port\": \"10050\"\n }\n ],\n \"groups\": groupid, \n \"templates\":templateid,\n\n },\n \"auth\": self.authID,\n \"id\": 1 \n })\n res = self.get_data(data)['result']\n if (res !=0) and (len(res) != 0):\n return res\n else:\n return 0",
"def create_host(self, conf, tenant_id, network_id, params):\n\t\tpass",
"def host_list(self):\n try:\n scode, hosts = Rest.get('Host')\n except Exception as e:\n Console.error(e.message)\n return\n if len(hosts) == 0:\n print(\"No hosts exist\")\n return\n\n n = 1\n e = {}\n for host in hosts:\n d = {}\n d['Ip'] = str(host['Ip'])\n d['Name'] = str(host['Name'])\n d['Port'] = str(host['Port'])\n d['Swarmmode'] = str(host['Swarmmode'])\n e[n] = d\n n = n + 1\n Console.ok(str(Printer.dict_table(e, order=['Ip', 'Name', 'Port', 'Swarmmode'])))",
"def create(self, name, **kwargs):\n values = {'name': name}\n values.update(**kwargs)\n\n return self._create('/os-hosts', values, response_key='host')",
"def list(cls, context):\n db_hosts = cls.dbapi.host_list(context)\n return Host._from_db_object_list(\n db_hosts, cls, context)",
"def test_port_create_on_multiconnected_host(self):\n network, segments, subnets = self._create_test_segments_with_subnets(2)\n\n # This host is bound to multiple hosts\n self._setup_host_mappings([(segments[0]['segment']['id'], 'fakehost'),\n (segments[1]['segment']['id'], 'fakehost')])\n\n response = self._create_port(self.fmt,\n net_id=network['network']['id'],\n tenant_id=network['network']['tenant_id'],\n is_admin=True,\n arg_list=(portbindings.HOST_ID,),\n **{portbindings.HOST_ID: 'fakehost'})\n self.deserialize(self.fmt, response)\n\n # multi segments supported since Antelope.\n self.assertEqual(webob.exc.HTTPCreated.code, response.status_int)",
"def default_create(cls, host):\n new = cls(host)\n return new",
"def esxi_host_count(self, esxi_host_count):\n\n self._esxi_host_count = esxi_host_count",
"def create_host_list(self):\n # Get first network address and add to list\n net_address = input('What is a network address you want to ping? ')\n self.hosts.append(net_address)\n\n # Find out if user wants to add more network addresses\n while True:\n add_another = input('Add another? (y/n) ')\n print()\n if add_another.lower() == 'n' or add_another.lower() == 'no':\n break\n elif add_another.lower() == 'y' or add_another.lower() == 'yes':\n net_address = input(\"What is a network address you want to ping? \")\n self.hosts.append(net_address)\n else:\n print(\"That is an invalid input.\")\n print()\n os.system('cls')",
"def build(self):\n\n LOG.debug('-' * 80)\n LOG.debug(\"build\")\n LOG.debug('-' * 80)\n for b in self._bridges:\n bridge = b['bridge']\n # TODO(tomohiko) Need to something when not bridge['provided']?\n if bridge['provided']:\n LOG.info('Skipped building bridge=%r', bridge)\n\n for h in self._hosts:\n host = h['host']\n if host.get('tunnel_zone'):\n tz_data = host.get('tunnel_zone')\n tzs = self._api.get_tunnel_zones()\n\n # Ensure that TZ exists\n tz = [t for t in tzs if t.get_name() == tz_data['name']]\n if tz == []:\n if is_vxlan_enabled():\n tz = self._api.add_vxlan_tunnel_zone()\n else:\n tz = self._api.add_gre_tunnel_zone()\n tz.name(tz_data['name'])\n tz.create()\n else:\n tz = tz[0]\n\n # Ensure that the host is in the TZ\n tz_hosts = tz.get_hosts()\n tz_host = filter(\n lambda x: x.get_host_id() == host['mn_host_id'],\n tz_hosts)\n if tz_host == []:\n tz_host = tz.add_tunnel_zone_host()\n tz_host.ip_address(tz_data['ip_addr'])\n tz_host.host_id(host['mn_host_id'])\n tz_host.create()\n\n\n if host['provided'] == True:\n LOG.info('Skipped building host=%r', host)\n else:\n #TODO(tomoe): when we support provisioning Midolman host with\n # this tool.\n pass\n interfaces = host['interfaces']\n\n futures = []\n for i in interfaces:\n iface = Interface(i['interface'], host)\n self._interfaces[(host['id'], i['interface']['id'])] = iface\n f = iface.create()\n futures.append(f)\n\n wait_on_futures(futures)\n\n LOG.debug('-' * 80)\n LOG.debug(\"end build\")\n LOG.debug('-' * 80)",
"def create(args):\n print('Creates an HPC fleet with given name \"{}\"'.format(args.fleet_name))",
"def post_hosts(\n self,\n references=None, # type: List[models.ReferenceType]\n host=None, # type: models.HostPost\n authorization=None, # type: str\n x_request_id=None, # type: str\n names=None, # type: List[str]\n async_req=False, # type: bool\n _return_http_data_only=False, # type: bool\n _preload_content=True, # type: bool\n _request_timeout=None, # type: Optional[int]\n ):\n # type: (...) -> models.HostResponse\n kwargs = dict(\n host=host,\n authorization=authorization,\n x_request_id=x_request_id,\n names=names,\n async_req=async_req,\n _return_http_data_only=_return_http_data_only,\n _preload_content=_preload_content,\n _request_timeout=_request_timeout,\n )\n kwargs = {k: v for k, v in kwargs.items() if v is not None}\n endpoint = self._hosts_api.api20_hosts_post_with_http_info\n _process_references(references, ['names'], kwargs)\n return self._call_api(endpoint, kwargs)",
"def create_hosting_device_resources(self, context, complementary_id,\n tenant_id, mgmt_nw_id,\n mgmt_sec_grp_id, max_hosted):\n pass",
"def createHost(self):\n self.createUser()\n self.user.host_for = [self.program.scope.key()]\n self.user.put()",
"def build_lhosts(self , sws , lhost_count):\n host_count = 0\n for sw in sws:\n for i in range(lhost_count):\n host_id = host_count + 1\n host = self.addHost('h%s' % host_id)\n self.addLink(sw, host)\n host_count += 1\n return host_count",
"def test_port_create_on_unconnected_host(self):\n network, segment, _subnet = self._create_test_segment_with_subnet()\n response = self._create_port(self.fmt,\n net_id=network['network']['id'],\n tenant_id=network['network']['tenant_id'],\n is_admin=True,\n arg_list=(portbindings.HOST_ID,),\n **{portbindings.HOST_ID: 'fakehost'})\n res = self.deserialize(self.fmt, response)\n\n self.assertEqual(webob.exc.HTTPConflict.code, response.status_int)\n self.assertEqual(segment_exc.HostNotConnectedToAnySegment.__name__,\n res['NeutronError']['type'])\n\n # Ensure that mapping the segment to other hosts doesn't trip it up\n self._setup_host_mappings([(segment['segment']['id'], 'otherhost')])\n response = self._create_port(self.fmt,\n net_id=network['network']['id'],\n tenant_id=network['network']['tenant_id'],\n is_admin=True,\n arg_list=(portbindings.HOST_ID,),\n **{portbindings.HOST_ID: 'fakehost'})\n res = self.deserialize(self.fmt, response)\n\n self.assertEqual(webob.exc.HTTPConflict.code, response.status_int)\n self.assertEqual(segment_exc.HostNotConnectedToAnySegment.__name__,\n res['NeutronError']['type'])",
"def hyperv_host_count(self, hyperv_host_count):\n\n self._hyperv_host_count = hyperv_host_count",
"def create_host(\n parent_objuuid: str,\n name: str = \"New Host\",\n objuuid: str = None\n ) -> Object:\n logging.info(name)\n\n inventory = Collection(\"inventory\")\n\n host = inventory.get_object(objuuid)\n\n host.object = {\n \"type\" : \"host\",\n \"parent\" : parent_objuuid,\n \"children\" : [],\n \"name\" : name,\n \"host\" : \"\",\n \"icon\" : \"/images/host_icon.png\",\n \"console\" : None,\n \"concurrency\" : 1,\n \"config\" : \"\",\n \"context\" : {\n \"delete\" : {\n \"label\" : \"Delete\",\n \"action\" : {\n \"method\" : \"delete node\",\n \"route\" : \"inventory/delete\",\n \"params\" : {\n \"objuuid\" : host.objuuid\n }\n }\n },\n \"edit\" : {\n \"label\" : \"Edit\",\n \"action\" : {\n \"method\" : \"edit host\",\n \"route\" : \"inventory/get_object\",\n \"params\" : {\n \"objuuid\" : host.objuuid\n }\n }\n },\n \"copy\" : {\n \"label\" : \"Copy\",\n \"action\" : {\n \"method\" : \"copy node\",\n \"route\" : \"inventory/copy_object\",\n \"params\" : {\n \"objuuid\" : host.objuuid\n }\n }\n }\n }\n }\n\n host.set()\n\n parent = inventory.get_object(parent_objuuid)\n parent.object[\"children\"] = inventory.find_objuuids(parent=parent_objuuid)\n parent.set()\n\n return host",
"def add_hosts_all_subnets(client, parsed_args):\n for host in client.hosts():\n if host.category == \"linux\":\n if host.server_id:\n EXISTING.append(host)\n if host.server_id is None:\n create_agentless(host, client, parsed_args)",
"def create(self, context=None):\n values = self.obj_get_changes()\n db_host = self.dbapi.host_create(context, values)\n self._from_db_object(context, self, db_host)",
"def hfp_create(handle, org_dn, name,\r\n blade_bundle_version=\"\",\r\n rack_bundle_version=\"\",\r\n ignore_comp_check=\"yes\",\r\n update_trigger=\"immediate\",\r\n mode=\"staged\",\r\n stage_size=\"0\",\r\n policy_owner=\"local\",\r\n descr=\"testdescr\"):\r\n\r\n from ucsmsdk.mometa.firmware.FirmwareComputeHostPack import \\\r\n FirmwareComputeHostPack\r\n\r\n org = handle.query_dn(org_dn)\r\n if org is None:\r\n raise ValueError(\"Org '%s' does not exist\" % org_dn)\r\n\r\n mo = FirmwareComputeHostPack(parent_mo_or_dn=\"org-root\",\r\n name=name,\r\n blade_bundle_version=blade_bundle_version,\r\n rack_bundle_version=rack_bundle_version,\r\n ignore_comp_check=ignore_comp_check,\r\n update_trigger=update_trigger,\r\n mode=mode,\r\n stage_size=stage_size,\r\n policy_owner=policy_owner,\r\n descr=descr)\r\n handle.add_mo(mo, modify_present=True)\r\n handle.commit()\r\n\r\n return mo",
"async def launch_bftlist(test_name=\"unknown test\", n=N, f=F, args={}):\n generate_hosts_file(n)\n nodes = get_nodes()\n cmd = \". env/bin/activate && python3.7 main.py\"\n cwd = os.path.abspath(\".\")\n pids = []\n\n for node_id in nodes.keys():\n env = os.environ.copy()\n env[\"ID\"] = str(node_id)\n env[\"API_PORT\"] = str(4000 + node_id)\n env[\"NUMBER_OF_NODES\"] = str(n)\n env[\"NUMBER_OF_BYZANTINE\"] = str(f)\n env[\"NUMBER_OF_CLIENTS\"] = \"1\"\n env[\"HOSTS_PATH\"] = os.path.abspath(RELATIVE_PATH_FIXTURES_HOST)\n env[\"INTEGRATION_TEST\"] = test_name\n env[\"DEBUG\"] = \"1\"\n if \"FORCE_VIEW\" in args:\n env[\"FORCE_VIEW\"] = args[\"FORCE_VIEW\"]\n if \"ALLOW_SERVICE\" in args:\n env[\"ALLOW_SERVICE\"] = args[\"ALLOW_SERVICE\"]\n if \"FORCE_NO_VIEW_CHANGE\" in args:\n env[\"FORCE_NO_VIEW_CHANGE\"] = args[\"FORCE_NO_VIEW_CHANGE\"]\n if \"NON_SELF_STAB\" in args:\n env[\"NON_SELF_STAB\"] = \"1\"\n\n if \"BYZANTINE\" in args:\n if node_id in args[\"BYZANTINE\"][\"NODES\"]:\n env[\"BYZANTINE\"] = \"true\"\n env[\"BYZANTINE_BEHAVIOR\"] = args[\"BYZANTINE\"][\"BEHAVIOR\"]\n\n p = subprocess.Popen(cmd, shell=True, cwd=cwd, env=env)\n pids.append(p.pid)\n\n sec = os.getenv(\"INTEGRATION_TEST_SLEEP\")\n logger.info(\"Test suite sleeping, awaiting node startup\")\n await asyncio.sleep(int(sec) if sec is not None else 2)\n logger.info(\"Sleeping done, now resuming tests\")\n return pids",
"async def establish_hosts(self):\n scheme = self._config['scheme']\n hosts = self._config['hosts']\n port = self._config['port']\n for hostname in hosts:\n url = '{}://{}:{}/gremlin'.format(scheme, hostname, port)\n host = await driver.GremlinServer.open(\n url, self._loop, **dict(self._config))\n self._hosts.append(host)\n self._hostmap[hostname] = host",
"def handle_hosts(\n actapi: act.api.Act, content: Text, hosts: List[Text]\n) -> List[act.api.fact.Fact]:\n\n feeds_facts: List[act.api.fact.Fact] = []\n\n for host in hosts:\n (ip_type, ip) = act.api.helpers.ip_obj(host)\n\n chain = []\n\n chain.append(\n actapi.fact(\"connectsTo\").source(\"content\", content).destination(\"uri\", \"*\")\n )\n chain.append(\n actapi.fact(\"resolvesTo\").source(\"fqdn\", \"*\").destination(ip_type, ip)\n )\n chain.append(\n actapi.fact(\"componentOf\").source(\"fqdn\", \"*\").destination(\"uri\", \"*\")\n )\n\n feeds_facts += act.api.fact.fact_chain(*chain)\n\n return feeds_facts"
] | [
"0.4938579",
"0.48584768",
"0.47865078",
"0.47671604",
"0.47439453",
"0.47246405",
"0.4700411",
"0.46670032",
"0.45571178",
"0.45547783",
"0.44747478",
"0.43636113",
"0.43369415",
"0.42789087",
"0.4247567",
"0.4242754",
"0.4227977",
"0.4216803",
"0.42011896",
"0.42005447",
"0.41820562",
"0.41650286",
"0.41622856",
"0.4161303",
"0.41491115",
"0.4145433",
"0.4142787",
"0.40935582",
"0.40855825",
"0.40838292"
] | 0.8422698 | 0 |
Uses saltcloud to query all the hosts for the given stack id. | def query_hosts(self, force=False):
CACHE_KEY = 'salt-cloud-full-query'
cached_result = cache.get(CACHE_KEY)
if cached_result and not force:
logger.debug('salt-cloud query result cached')
result = cached_result
else:
logger.debug('salt-cloud query result not cached, retrieving')
logger.info('get_hosts_info: {0!r}'.format(self))
salt_cloud = salt.cloud.CloudClient(settings.STACKDIO_CONFIG.salt_cloud_config)
result = salt_cloud.full_query()
# Cache the result for a minute
cache.set(CACHE_KEY, result, 60)
# yaml_result contains all host information in the stack, but
# we have to dig a bit to get individual host metadata out
# of account and provider type dictionaries
host_result = {}
for host in self.hosts.all():
account = host.get_account()
provider = account.provider
# each host is buried in a cloud provider type dict that's
# inside a cloud account name dict
# Grab the list of hosts
host_map = result.get(account.slug, {}).get(provider.name, {})
# Grab the individual host
host_result[host.hostname] = host_map.get(host.hostname, None)
return host_result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def all_hosts(self):\n ...",
"def get_stacking_stacks_by_stack_id(self, stack_id, *, fields=None, **kwargs):\n function_endpoint = urljoin(self._baseurl, 'stacking/stacks/{stack_id}'.format(stack_id=stack_id))\n return self._call('GET', function_endpoint, **kwargs)",
"def get(self, request, *args, **kwargs):\n provider_metadata = request \\\n .QUERY_PARAMS \\\n .get('provider_metadata') == 'true'\n result = super(StackHostsAPIView, self).get(request, *args, **kwargs)\n\n if not provider_metadata or not result.data['results']:\n return result\n\n stack = self.get_object()\n query_results = stack.query_hosts()\n\n # TODO: query_results are highly dependent on the underlying\n # salt-cloud driver and there's no guarantee that the result\n # format for AWS will be the same for Rackspace. In the future,\n # we should probably pass the results off to the cloud provider\n # implementation to format into a generic result for the user\n for host in result.data['results']:\n hostname = host['hostname']\n host['provider_metadata'] = query_results[hostname]\n\n return result",
"def get_hosts(self, host_ids=None):\n if not host_ids:\n return self.hosts.all()\n return self.hosts.filter(id__in=host_ids)",
"def get_instance_and_ip_list_for_stack_id(self,heatcln,stack_id):\n #Get the instance list for this stack\n resources = heatcln.resources.list(stack_id)\n instance_list = []\n ip_list = []\n \n for resource in resources:\n res_info = resource._info\n \n #Add those resources that are instances\n if res_info['resource_type'] == 'AWS::EC2::Instance':\n instance_list.append(resource)\n if res_info['resource_type'] == 'AWS::EC2::EIPAssociation':\n ip_list.append(resource)\n return instance_list,ip_list",
"def get_stack_list_for_tenant(self,heatcln,tenant_id):\n #Build query to get stack id \n stack_filters = {\n 'stack_name': cfg.CONF.tdaf_rush_prefix+str(tenant_id),\n }\n stack_search = {\n 'filters': stack_filters,\n }\n stack_generator = heatcln.stacks.list(**stack_search)\n stack_list = []\n for stack in stack_generator:\n stack_list.append(stack)\n return stack_list",
"def check_all_hosts (self, repo_version_id, version_name):\n if self.compare_versions(self.ambari_version, \"2.1.0\") < 0:\n query1 = \"SELECT chm.host_name from ClusterHostMapping chm JOIN clusters c ON c.cluster_name = '{0}';\".format(self.cluster_name)\n else:\n query1 = \"SELECT h.host_name from ClusterHostMapping chm JOIN clusters c ON c.cluster_name = '{0}' JOIN hosts h ON chm.host_id = h.host_id;\".format(self.cluster_name)\n\n if self.compare_versions(self.ambari_version, \"2.1.0\") < 0:\n query2 = \"SELECT hv.host_name, hv.state FROM host_version hv WHERE hv.repo_version_id = {0};\".format(repo_version_id)\n else:\n #query2 = \"SELECT hv.state,h.host_name FROM hosts h JOIN host_version hv ON h.host_id = hv.host_id WHERE hv.repo_version_id = {0};\".format(repo_version_id)\n query2 = \"SELECT hv.state,h.host_name, hs.health_status,hs.agent_version,(h.total_mem/1024/1024) as total_mem_gb,(hs.available_mem/1024/1024) as available_mem_gb FROM hosts h JOIN host_version hv ON h.host_id = hv.host_id JOIN hoststate hs ON h.host_id = hs.host_id WHERE hv.repo_version_id = {0} order by h.host_name;\".format(repo_version_id)\n # All cluster hosts\n host_names = set()\n self.cursor.execute(query1)\n rows = self.cursor.fetchall()\n if self.options.verbose:\n Logger.debug(query1 + \"\\n\")\n if rows and len(rows) > 0:\n host_names = set([row[0] for row in rows if len(row) == 1])\n Logger.debug(\"Hosts: {0}\".format(\", \".join(host_names)))\n\n host_name_to_state = {} # keys should be a subset of host_names\n hosts_with_repo_version_state_not_in_current = set()\n self.cursor.execute(query2 + \"\\n\")\n rows = self.cursor.fetchall()\n Logger.info(\"******************************************************************************************************************************************************\")\n Logger.info(\"\\t\\t\\t\\t\\t\\t\\tHOST(S) STATE\\t\")\n Logger.info(\"******************************************************************************************************************************************************\\n\")\n Logger.info(\"------------------------------------------------------------------------------------------------------------------------------------------------------\")\n Logger.info(\"State\\t\\tHostname\\t\\t\\t\\tHealth\\t\\tAgentVersion\\tTotalMemory\\tAvailableMemory\")\n Logger.info(\"------------------------------------------------------------------------------------------------------------------------------------------------------\")\n\n if rows and len(rows) > 0:\n for row in range(len(rows)):\n data = json.loads(rows[row][2])\n data1 = json.loads(rows[row][3])\n Logger.info(\"{0}\\t\\t{1}\\t\\t{2}\\t\\t{3}\\t\\t{4}\\t\\t{5}\".format(rows[row][0], rows[row][1], data[\"healthStatus\"], data1[\"version\"], rows[row][4], rows[row][5]))\n print (\"\\n\")\n Logger.debug(query2)\n if rows and len(rows) > 0:\n for row in rows:\n if len(row) == 6:\n host_name = row[1]\n state = row[0]\n host_name_to_state[host_name] = state\n if state.upper() != \"CURRENT\":\n hosts_with_repo_version_state_not_in_current.add(host_name)\n host_names_with_version = set(host_name_to_state.keys())\n host_names_without_version = host_names - host_names_with_version\n # Logger.info(\"\\t\\tHost(s) state Summary\")\n if len(host_names) > 0:\n if len(host_names_without_version) > 0:\n Logger.error(\"{0} host(s) do not have a Host Version for Repo Version {1}.\\n\" \\\n \"Host(s):\\n{2}\\n\".\n format(len(host_names_without_version), version_name, \", \".join(host_names_without_version)))\n\n if len(hosts_with_repo_version_state_not_in_current) > 0:\n Logger.error(\"{0} host(s) have a Host Version for Repo Version {1} but the state is not CURRENT.\\n\" \\\n \"Host(s):\\n{2}\\n\".\n format(len(hosts_with_repo_version_state_not_in_current), version_name, \", \".join(hosts_with_repo_version_state_not_in_current)))\n\n if len(host_names_without_version) == 0 and len(hosts_with_repo_version_state_not_in_current) == 0:\n Logger.info(\"Found {0} host(s) in the cluster, and all have a Host Version of CURRENT for \" \\\n \"Repo Version {1}. Things look good.\\n\".format(len(host_names), version_name))\n else:\n Logger.error(\"Make sure that all of these hosts are heartbeating, that they have the packages installed, the\\n\" \\\n \"hdp-select symlinks are correct, and that the services on these hosts have been restarated.\\n\")\n pass",
"def get(self, host_id):\n return self._get('/os-hosts/%s' % host_id, 'host')",
"def get_hmc_hosts(self, hmc_id):\n url = '%s/ibm-hmcs/%s/hosts' % (self.catalog['compute'], hmc_id)\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['hosts']\n else:\n LOG.error('Get HMC hosts failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)",
"def list(self, **kwargs):\n\n return self.getResourceManager() \\\n .getSdk() \\\n .hosts \\\n .list(**kwargs)",
"def get_hosts(self):\n\n raise NotImplementedError",
"def get_all_host(self, conf, tenant_id, network_id):\n\t\tpass",
"def list(cls, context):\n db_hosts = cls.dbapi.host_list(context)\n return Host._from_db_object_list(\n db_hosts, cls, context)",
"def get_stacking_standalone_hosts(self, *, filter=None, Range=None, fields=None, **kwargs):\n function_endpoint = urljoin(self._baseurl, 'stacking/standalone_hosts')\n return self._call('GET', function_endpoint, **kwargs)",
"def get_hosts(\n hstuuid: str,\n hstuuids: List[str],\n grpuuids: List[str],\n inventory: Collection\n ):\n current = inventory.get_object(hstuuid)\n\n if \"type\" in current.object: # pylint: disable=too-many-nested-blocks\n if current.object[\"type\"] == \"host\":\n if hstuuid not in hstuuids:\n hstuuids.append(hstuuid)\n elif current.object[\"type\"] == \"host group\":\n for uuid in current.object[\"hosts\"]:\n nested = inventory.get_object(uuid)\n if \"type\" in nested.object:\n if nested.object[\"type\"] == \"host group\":\n if uuid not in grpuuids:\n grpuuids.append(uuid)\n get_hosts(uuid, hstuuids, grpuuids, inventory)\n elif nested.object[\"type\"] == \"host\":\n if uuid not in hstuuids:\n hstuuids.append(uuid)\n else:\n current.object[\"hosts\"].remove(uuid)\n current.set()\n nested.destroy()",
"def getHosts(self):\n raise \"not implemented\"",
"def index(self, req):\n LOG.info(\"List all the nova-compute hosts in the system\")\n ctxt = req.environ['nova.context']\n authorize(ctxt)\n LOG.debug(\"%s - %s\", req.environ, req.body)\n services = dbapi.service_get_all_compute_sorted(ctxt)\n # services looks like (Service(object), Decimal('0'))\n # must convert from Decimal('0') to int() because no JSON repr\n hosts = [{'name':srv[0].host,\n 'instanceCount':int(srv[1])}\n for srv in services]\n return {'hosts': hosts}",
"def hosts(self, hosts):\n return self._set_list_field(\"hosts\", hosts)",
"def _get_hosts_in_cluster(self, cluster_ref):\n result = self._session._call_method(\n vim_util, 'get_inner_objects', cluster_ref, 'host', 'HostSystem')\n with vutil.WithRetrieval(self._session.vim, result) as objects:\n return [obj.obj for obj in objects]",
"def _get_hosts_with_container(self, context, cluster):\n pass",
"def getHosts(**options):\n return search.HostSearch.byOptions(**options)",
"def get_all_hosts(self, view='summary'):\n return self._get(endpoint='{}/hosts'.format(self.api_version),\n params=dict(view=view)).json()",
"def get_list(self, ctxt,tenant_id):\n \n try:\n #Check in db if this tenant has an instanced Rush\n rt = db_api.rush_tenant_get_all_by_tenant(ctxt, tenant_id)\n result = {'result': True, 'rushes': []}\n for rtentry in rt:\n rush_entry = db_api.rush_stack_get(ctxt, rtentry.rush_id)\n \n #Update status with heat data (Rushstack DB can be out of sync with HEAT stack status)\n #Can be removed to improve query performance when status is CREATE_COMPLETE\n heatcln = heat.heatclient(cfg.CONF.tdaf_username, cfg.CONF.tdaf_user_password, cfg.CONF.tdaf_tenant_name)\n \n rush_stack_name = cfg.CONF.tdaf_rush_prefix+str(tenant_id)+\"-\"+str(rush_entry.name)\n stack_list = self.get_stack_list_for_tenant(heatcln,tenant_id)\n for stack in stack_list:\n stack_info = stack._info;\n if stack_info['stack_name'] == rush_stack_name:\n break\n \n if stack_info is not None and stack_info['stack_name'] == rush_stack_name:\n #Stack info first\n values = {'status':stack_info['stack_status']}\n db_api.rush_stack_update(ctxt, rtentry.rush_id, values)\n \n self.update_rush_endpointdata(ctxt,heatcln,rush_entry.stack_id,rtentry.rush_id)\n result['rushes'].append({'id': rush_entry.id, 'name': rush_entry.name, 'type': rush_entry.rush_type_id,\n 'endpoint':rush_entry.url, 'status': rush_entry.status})\n return result\n except Exception as e:\n return {'result': False, 'error': str(e)}",
"def iter_hosts():\n environmentdef = _get_environmentdef()\n\n for host in environmentdef.hosts():\n # fabric needs the host if we're calling from main()\n with this_hostname(host.host):\n yield host",
"def get_hosts(enable_details: Optional[bool] = None,\n host_address: Optional[str] = None,\n host_name: Optional[str] = None,\n ids: Optional[Sequence[str]] = None,\n instance_id: Optional[str] = None,\n name_regex: Optional[str] = None,\n os_type: Optional[str] = None,\n output_file: Optional[str] = None,\n source: Optional[str] = None,\n source_instance_id: Optional[str] = None,\n source_instance_state: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetHostsResult:\n __args__ = dict()\n __args__['enableDetails'] = enable_details\n __args__['hostAddress'] = host_address\n __args__['hostName'] = host_name\n __args__['ids'] = ids\n __args__['instanceId'] = instance_id\n __args__['nameRegex'] = name_regex\n __args__['osType'] = os_type\n __args__['outputFile'] = output_file\n __args__['source'] = source\n __args__['sourceInstanceId'] = source_instance_id\n __args__['sourceInstanceState'] = source_instance_state\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('alicloud:bastionhost/getHosts:getHosts', __args__, opts=opts, typ=GetHostsResult).value\n\n return AwaitableGetHostsResult(\n enable_details=pulumi.get(__ret__, 'enable_details'),\n host_address=pulumi.get(__ret__, 'host_address'),\n host_name=pulumi.get(__ret__, 'host_name'),\n hosts=pulumi.get(__ret__, 'hosts'),\n id=pulumi.get(__ret__, 'id'),\n ids=pulumi.get(__ret__, 'ids'),\n instance_id=pulumi.get(__ret__, 'instance_id'),\n name_regex=pulumi.get(__ret__, 'name_regex'),\n names=pulumi.get(__ret__, 'names'),\n os_type=pulumi.get(__ret__, 'os_type'),\n output_file=pulumi.get(__ret__, 'output_file'),\n source=pulumi.get(__ret__, 'source'),\n source_instance_id=pulumi.get(__ret__, 'source_instance_id'),\n source_instance_state=pulumi.get(__ret__, 'source_instance_state'))",
"def get_all_hosts(self, view='summary'):\n return self.api_client.get_all_hosts(view=view)['items']",
"def get_allhosts():\n connection, tablename = HomeNetwork.get_connection_info()\n query = 'SELECT hostname from {}'.format(tablename)\n output = pandas.read_sql_query(query, connection).to_json(orient='records')\n\n for host in json.loads(output):\n yield host[\"hostname\"]",
"def list(args, config):\n\n api = config['API']\n headers = {}\n if args.stack_name:\n headers = {'stack-name': args.stack_name} # put stack name in headers\n r = requests.get(api['list'], headers=headers) # send the GET request\n print('\\nThe following clusters exist:\\n{}\\n'.format(r.json()))\n return",
"def host_list(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"This function must be called with -f, --function argument.\"\n )\n ret = {}\n session = _get_session()\n hosts = session.xenapi.host.get_all()\n for host in hosts:\n host_record = session.xenapi.host.get_record(host)\n ret[host_record[\"name_label\"]] = host_record\n return ret",
"def get_list_hosts(self, path, params):\n eth_src = params.get('eth_src')\n host = self._extract_url_base(path)\n reply = self._faucet_collector.get_list_hosts(host, eth_src)\n self._augment_state_reply(reply, path)\n return reply"
] | [
"0.6205435",
"0.6024829",
"0.60159945",
"0.59807694",
"0.595565",
"0.5935895",
"0.59275913",
"0.58967364",
"0.58842933",
"0.5843488",
"0.5838476",
"0.58124864",
"0.5785423",
"0.5756203",
"0.5662017",
"0.56044453",
"0.5555758",
"0.5528058",
"0.55251485",
"0.55106646",
"0.5470756",
"0.5458264",
"0.54370654",
"0.54262173",
"0.5414475",
"0.5403023",
"0.5389884",
"0.5381225",
"0.53759223",
"0.53651345"
] | 0.6475528 | 0 |
Given an array it splits the array in two parts at index i | def split_i(array:list, i:int) -> (list, list):
if i==len(array)-1:
return array[i], array[:-1]
else:
pre = array[0:i]
post = array[i+1:]
l = pre + post
x = array[i]
return x, l | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def split_array(a):\n n = len(a)\n if n == 1:\n return a\n index = n // 2\n b = a[:index]\n c = a[index:]\n return b, c",
"def even_split(a, n):\n n = min(n, len(a)) # if less elements in array than chunks to output, change chunks to array length\n k, m = divmod(len(a), n)\n return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n))",
"def split(a):\n return a[:len(a)//2],a[len(a)//2:]",
"def test_integer_split_2D_default(self):\n a = array([arange(10),arange(10)])\n res = array_split(a,3)\n desired = [array([arange(10)]),array([arange(10)]),array([])]\n compare_results(res,desired)",
"def Split(ar, size):\r\n return [ar[i:i + size] for i in range(0, len(ar), size)]",
"def split_array(array, size):\n arrays = []\n while len(array) > size:\n pice = array[:size]\n arrays.append(pice)\n array = array[size:]\n arrays.append(array)\n return arrays",
"def split(array, indexes):\n if isinstance(indexes, int):\n return list(array[0:indexes], array[indexes:])\n\n newarray = []\n offset = 0\n for idx in indexes:\n newarray.append(array[offset:idx])\n offset += idx\n if indexes[-1] < len(array):\n newarray.append(array[indexes[-1]:])\n return newarray",
"def split(a, n):\n n = min(n, len(a))\n k, m = divmod(len(a), n)\n return [a[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)] for i in range(n)]",
"def split(array, nrows, ncols):\r\n r, h = array.shape\r\n return (array.reshape(h//nrows, nrows, -1, ncols)\r\n .swapaxes(1, 2)\r\n .reshape(-1, nrows, ncols))",
"def spliter(temp,split1,split2):\n for x in range(len(temp)):\n if x<len(temp)/2:\n split1.append(temp[x])\n else:\n split2.append(temp[x])",
"def split(a, n):\n k, m = divmod(len(a), n)\n ret = [a[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(n)]\n return ret",
"def split(f):\n n = len(f)\n f0 = [f[2 * i + 0] for i in range(n // 2)]\n f1 = [f[2 * i + 1] for i in range(n // 2)]\n return [f0, f1]",
"def split_array(arr, num_of_splits):\n # TODO Replace this function with gluon.utils.split_data() once targeting MXNet 1.7\n size = arr.shape[0]\n if size < num_of_splits:\n return [arr[i:i + 1] for i in range(size)]\n slice_len, rest = divmod(size, num_of_splits)\n div_points = [0] + [(slice_len * index + min(index, rest) + slice_len + (index < rest))\n for index in range(num_of_splits)]\n slices = [arr[div_points[i]:div_points[i + 1]] for i in range(num_of_splits)]\n return slices",
"def ArraySplit(array_to_split, bucket_size):\n return [array_to_split[i:i+bucket_size]\n for i in xrange(0, len(array_to_split), bucket_size)]",
"def greedy_split(arr, n, axis=0):\n length = arr.shape[axis]\n # compute the size of each of the first n-1 blocks\n block_size = int(np.ceil(length / float(n)))\n # the indices at which the splits will occur\n ix = np.arange(block_size, length, block_size)\n return np.array(np.split(arr, ix, axis))",
"def split(self, array):\n q, r = divmod(self.num_envs, self.num_workers)\n return [\n array[i * q + min(i, r) : (i + 1) * q + min(i + 1, r)]\n for i in range(self.num_workers)\n ]",
"def split(a):\r\n if len(a) % 2 != 0 or len(a[0]) % 2 != 0:\r\n raise Exception('Odd matrices are not supported!')\r\n \r\n length = len(a)\r\n mid = length // 2\r\n tLeft = [[a[i][j] for j in range(mid)] for i in range(mid)]\r\n bottom_left = [[a[i][j] for j in range(mid)] for i in range(mid, length)]\r\n\r\n tRight = [[a[i][j] for j in range(mid, length)] for i in range(mid)]\r\n bottom_right = [[a[i][j] for j in range(mid, length)] for i in range(mid, length)]\r\n\r\n return tLeft, tRight, bottom_left, bottom_right",
"def split(data):\n return data[:len(data) // 2], data[len(data) // 2:]",
"def test_integer_split_2D_default(self):\n a = np.array([np.arange(10), np.arange(10)])\n res = array_split(a, 3)\n tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]),\n np.zeros((0, 10))]\n compare_results(res, tgt)\n assert_(a.dtype.type is res[-1].dtype.type)\n # perhaps should check higher dimensions",
"def split(data):\n data = sorted(data, key=lambda x: x[0])\n half = len(data)//2\n return (data[half][0]+data[half + 1][0])/2\n print(data)",
"def split(container, count):\n return [container[_i::count] for _i in range(count)]",
"def split(a, N):\n\n integ = int(len(a) / N)\n remain = int(len(a) % N)\n\n splitted = [a[i * integ + min(i, remain):(i + 1) * integ +\n min(i + 1, remain)] for i in range(N)]\n\n return splitted",
"def split(list):\n mid = len(list)//2\n left = list[:mid]\n right= list[mid:]\n\n return left,right",
"def partition(array, first, last):\n # partition up until final value\n pivot = array[last]\n i = first - 1\n\n for count in range(first, last):\n # split array\n if array[count] < pivot:\n i += 1\n # assign array positions\n array[i],array[count] = array[count],array[i]\n # reassign\n array[i+1],array[last] = array[last],array[i+1]\n return (i+1)",
"def split(list):\r\n \r\n mid = len(list)//2\r\n left = list[:mid]\r\n right = list[mid:]\r\n \r\n return left, right",
"def split(base_list):\n list_mid_pointer=len(base_list)//2\n return base_list[:list_mid_pointer],base_list[list_mid_pointer:]",
"def split(self, X):",
"def split_list(a_list):\n half = len(a_list)/2\n return a_list[:half], a_list[half:]",
"def split_array(array: np.ndarray, parts: int):\n\n if parts == -1:\n parts = array.size\n shape = array.shape\n possible_chunk_sizes = []\n # Generate all possible chunk sizes for the given array shape\n for chunk_size in product(*[range(1, shape[i] + 1) for i in range(len(shape))]):\n # Check if the number of chunks generated by the current chunk size is equal to the desired number of parts\n if np.prod(\n [shape[i] // chunk_size[i] + int(shape[i] % chunk_size[i] != 0) for i in range(len(shape))]) == parts:\n possible_chunk_sizes.append(chunk_size)\n # Sort the possible chunk sizes in ascending order of the sum of the squares of their dimensions\n possible_chunk_sizes.sort(key=lambda x: np.sum(np.array(x) ** 2)) # type: ignore\n if not possible_chunk_sizes:\n logging.warning(\"Could not divide the domain in %d parts. Trying with parts=%d.\", parts, parts - 1)\n return split_array(array=array, parts=parts - 1)\n selected_chunk_size = possible_chunk_sizes[0]\n\n chunks = []\n # Get the number of chunks for the first possible chunk size\n num_chunks = [shape[i] // selected_chunk_size[i] + int(shape[i] % selected_chunk_size[i] != 0) for i in\n range(len(shape))]\n indexes = [range(num_chunks[i]) for i in range(len(shape))]\n # Iterate over the chunks and append the corresponding slice of the array to the chunks list\n for indx in product(*indexes):\n current_slice = tuple(\n slice(selected_chunk_size[i] * indx[i], min(selected_chunk_size[i] * (indx[i] + 1), shape[i])) for i in\n range(len(shape)))\n chunks.append(array[current_slice])\n return chunks",
"def split(list):\n\n\tmid = len(list) // 2\n\tleft = list[:mid]\n\tright = list[mid:]\n\n\treturn left, right"
] | [
"0.7665456",
"0.7067053",
"0.68895733",
"0.6825084",
"0.6575467",
"0.6566739",
"0.656526",
"0.6551348",
"0.65445685",
"0.6517187",
"0.64508444",
"0.6435194",
"0.6357571",
"0.63475424",
"0.6298871",
"0.6265837",
"0.6259398",
"0.6205831",
"0.6199721",
"0.61938316",
"0.61762506",
"0.6161783",
"0.6144705",
"0.6133529",
"0.61235136",
"0.60693043",
"0.5963785",
"0.59274787",
"0.59078115",
"0.588165"
] | 0.77543867 | 0 |
Testing for proper beaker kwargs usage | def test_beaker_kwargs(self):
css_source = stylesheet_link('/deep/a.css', '/b.css', combined=True, minified=True)
from fixtures import beaker_container
self.assertEqual(beaker_container, beaker_kwargs)
css_source = stylesheet_link('/deep/a.css', '/b.css', combined=True, minified=True, beaker_kwargs={'foo': 'bar'})
from fixtures import beaker_container
beaker_kwargs.update({'foo': 'bar'})
self.assertEqual(beaker_container, beaker_kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_020_kwargs(self):\n caller = self.get_caller([KwargsTaskOverride])\n self.assertEqual([\"A\", \"B\"], caller(\"A\", \"B\"))",
"def test_kwargs(self):\n kwargs = forge.kwargs\n assert isinstance(kwargs, forge._signature.VarKeyword)\n assert kwargs.name == 'kwargs'\n assert kwargs.converter is None\n assert kwargs.validator is None",
"def test_kwargs(self):\n user1 = User(email='[email protected]', password='1234', first_name='Jack', last_name='Off')\n self.assertTrue(hasattr(user1, \"email\"))\n self.assertTrue(hasattr(user1, \"password\"))\n self.assertTrue(hasattr(user1, \"first_name\"))\n self.assertTrue(hasattr(user1, \"last_name\"))",
"def test_star_kwargs():\n\n @type_checked\n def _run_test(nothing, special=None, going:int=12, on=\"here\", **kw:str):\n assert nothing == \"hello\"\n assert special == 50.12\n assert going == 1999\n assert on is True\n assert kw[\"other\"] == \"False\"\n assert kw[\"thing\"] == \"15\"\n\n _run_test(\"hello\", 50.12, going=\"1999\", on=True, other=False, thing=15)",
"def test_kwargs_not_false_positive(*args, **kwargs):\n 'Hello John Doe {0[0]}'.format(args)\n 'Hello {0[name]}'.format(kwargs)",
"def test_accepts_kwargs(self):\n self.Test.scope('foo', where='foo')\n self.assertEqual(self.Test.foo().params['where'], ['foo'])",
"def test_kwargs(self):\n def f(**kwargs):\n self.assertEqual(kwargs, {'spam': 'eggs'})\n\n kwargs = self.decode('\\n\\x0b\\x01\\tspam\\x06\\teggs\\x01')\n\n f(**kwargs)",
"def kwargs(kwargs):\n run_kwargs(kwargs)",
"def test_kwargs() -> None:\n\n @argcomb(a=\"b\")\n def f(**kwargs: Any) -> None:\n ...\n\n f(a=1, b=1)\n f(b=1, c=1)\n with pytest.raises(InvalidArgumentCombination):\n f(a=1)",
"def test_kwargs():\n client, server = make_queue_pairs('localhost')\n client.send_inputs(1, input_kwargs={'hello': 'world'})\n _, task = server.get_task()\n assert task.args == (1,)\n assert task.kwargs == {'hello': 'world'}",
"def _validate_kwargs(self, kwargs):\n pass",
"def test_kw_args_with_defaults():\n assert arguments.fun_opt_kw_params() == ('blue', 'red', 'yellow', 'orange')",
"def test_map_args_no_kwargs():\n pass",
"def test_kwargs(self):\n self.Test.default_scope(where='foo')\n self.assertEqual(self.Test.scoped().params['where'], ['foo'])",
"def test_single_keyword_arg_provided(self):\n _func = required_parameters('arg1')(undecorated_func)\n self.assertEqual(_func(arg1='hello'), 'foo')",
"def test_checkParameters(self):\n self.failUnlessEqual(self.nice.opts['long'], \"Alpha\")\n self.failUnlessEqual(self.nice.opts['another'], \"Beta\")\n self.failUnlessEqual(self.nice.opts['longonly'], \"noshort\")\n self.failUnlessEqual(self.nice.opts['shortless'], \"Gamma\")",
"def test_kwargs(self):\n\n @sync_performer\n def p(dispatcher, intent, extra):\n return extra\n\n dispatcher = lambda _: partial(p, extra=\"extra val\")\n result = sync_perform(dispatcher, Effect(\"foo\"))\n self.assertEqual(result, \"extra val\")",
"def test_defaults(self):\n vark = VarKeyword()\n name, fparam = self.assert_mapping_and_get_fparam(vark)\n assert name == 'kwargs'\n assert fparam.type == empty\n assert not fparam.converter\n assert not fparam.validator\n assert not fparam.metadata",
"def test_need_params(self):\n\n acme = ACMEAccount(client=self.client)\n # missing name, acme_server, org_id\n self.assertRaises(TypeError, acme.create)\n # missing acme_server, org_id\n self.assertRaises(TypeError, acme.create, \"name\")\n # missing org_id\n self.assertRaises(TypeError, acme.create, \"name\", \"acme_server\")",
"def __init__(self, **kwargs):\n self._kw = kwargs.pop('kw', None)\n super(Mocker, self).__init__(**kwargs)",
"def test_uparforvarg(self):",
"def test_arguments(arg=TestClass): # [used-before-assignment]\n return arg",
"def test_kwarg_nonbool():\n\n with pytest.raises(ValueError) as error:\n # because this happens in the wrap, but before the wrap, we don't need\n # a test function, we just have to not be None\n type_checked(func=False, debug=\"abc\")\n\n assert \"abc is not a valid config value.\" in error.value.args",
"def test_need_params(self):\n\n acme = ACMEAccount(client=self.client)\n # missing acme_id, name\n self.assertRaises(TypeError, acme.create)\n # missing name\n self.assertRaises(TypeError, acme.create, 1234)",
"def test_url_helper_kwarg():\n urlh = URLHelper()\n args = []\n kwargs = {\"foo\": \"bar\"}\n url = urlh.build_url(*args, **kwargs)\n assert url == \"https://archive.gemini.edu/jsonsummary/notengineering/NotFail/foo=bar\"",
"def test_validate_params(mocker, params):\n validate_params(**params)",
"def _check_kwargs(self):\n valid_kw = {\n 'hf_type': 'str',\n 'hierarchy': 'bool',\n 'smooth': 'bool',\n 'water_level': 'float',\n # Object modifier kw\n 'no_shadow': 'bool',\n 'no_image': 'bool',\n 'no_reflection': 'bool',\n 'inverse': 'bool',\n 'double_illuminate': 'bool',\n 'hollow': 'bool'\n }\n\n self._validate_kwargs(valid_kw)\n\n valid_types = [\n 'gif', 'tga', 'pot', 'png', 'pgm',\n 'ppm', 'jpeg', 'tiff', 'sys', 'function'\n ]\n self._checkKwargValue('hf_type', valid_types)",
"def test_vargs(self):",
"def test_ban_seed_kwarg(self):\n with pytest.raises(ValueError):\n Dimension(\"yolo\", \"norm\", 0.9, seed=8)",
"def test_func(**kwargs: Dict[str, Any]) -> None:\n click.echo(json.dumps(kwargs))"
] | [
"0.7301403",
"0.72157216",
"0.69301623",
"0.6644783",
"0.65698075",
"0.65681475",
"0.65209633",
"0.6470704",
"0.6410706",
"0.6393279",
"0.6379091",
"0.6320997",
"0.6313386",
"0.6237101",
"0.6231931",
"0.621528",
"0.62060726",
"0.6202357",
"0.6099443",
"0.6098508",
"0.6097881",
"0.60786307",
"0.60528016",
"0.6035168",
"0.6012764",
"0.5983637",
"0.59618276",
"0.595829",
"0.5954338",
"0.59351844"
] | 0.7554935 | 0 |
Call first_move() after good key was pressed. Good key will be saved in self.first_key | def wait_first_move(self):
self.env.keyboard.listen_once(self.catch_key_first, key_down) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def first_move(self):\n self.play_sound(self.first_key)\n self.make_blink()\n self.wait_second_move()",
"def key_handler(self, event):\n if event.type == pygame.KEYUP: \n self.done = True",
"def wait_second_move(self):\n self.qr_unregister()\n self.env.keyboard.listen_once(self.catch_key_second, key_down)",
"def handle_keys(self, maze, game_display, key):\n Drone.handle_keys(self, maze, game_display, key)\n if self.auto_flag:\n if self.state == DroneState.LAND and self.time_in_air > 0:\n self.state = DroneState.TAKE_OFF\n self.auto_move(maze=maze, game_display=game_display)\n if key[pygame.K_a]:\n self.auto_move(maze=maze, game_display=game_display)\n if key[pygame.K_d] and self.time_in_air > 0:\n self.auto_flag = True\n if key[pygame.K_s]:\n self.state = DroneState.LAND\n if key[pygame.K_w]:\n self.slam.show()\n return False",
"def second_move(self):\n self.play_sound(self.second_key)\n self.end_move()",
"def keydown(self, key):\n if key.keycode == KEY_MAP[\"up\"]:\n try:\n self._puzzle.update_puzzle(\"u\")\n self._current_moves += \"u\"\n except:\n print \"invalid move: up\"\n elif key.keycode == KEY_MAP[\"down\"]:\n try:\n self._puzzle.update_puzzle(\"d\")\n self._current_moves += \"d\"\n except:\n print \"invalid move: down\"\n elif key.keycode == KEY_MAP[\"left\"]:\n try:\n self._puzzle.update_puzzle(\"l\")\n self._current_moves += \"l\"\n except:\n print \"invalid move: left\"\n elif key.keycode == KEY_MAP[\"right\"]:\n try:\n self._puzzle.update_puzzle(\"r\")\n self._current_moves += \"r\"\n except:\n print \"invalid move: right\"\n self.draw()",
"def check_for_input(self):\n if self.state == 'resting':\n if self.keys[pg.K_UP]:\n self.begin_moving('up')\n elif self.keys[pg.K_DOWN]:\n self.begin_moving('down')\n elif self.keys[pg.K_LEFT]:\n self.begin_moving('left')\n elif self.keys[pg.K_RIGHT]:\n self.begin_moving('right')",
"def handle_key(self, key):\n direction = DIRECTIONS.get(key)\n if direction:\n self.move(direction)",
"def first_move(self, first_click_tile):\r\n\r\n self.is_new_game = False\r\n self.board.first_click(first_click_tile)\r\n self.timer.init_clock()",
"def check_for_input(self, keys):\n if keys[pg.K_SPACE]:\n if self.arrow.index == 0:\n self.next = c.TOWN\n self.game_data = pickle.load(open('save.p', 'rb'))\n elif self.arrow.index == 1:\n self.next = c.MAIN_MENU\n self.state = c.TRANSITION_OUT\n self.notify(c.CLICK2)",
"def handle_key(self, event):\n direction = DIRECTIONS.get(event.key)\n if direction:\n self.player.move(direction)\n self.check_collision()",
"def handle_keyboard_input(self):\n keys = pg.key.get_pressed()\n\n if (keys[K_UP]):\n self.grid.change_direction(Direction.up)\n if (keys[K_DOWN]):\n self.grid.change_direction(Direction.down)\n if (keys[K_LEFT]):\n self.grid.change_direction(Direction.left)\n if (keys[K_RIGHT]):\n self.grid.change_direction(Direction.right)\n if (keys[K_SPACE]):\n self.grid.snake.grow()\n if (keys[K_RIGHTBRACKET]):\n self.actions_per_second += 1\n if (keys[K_LEFTBRACKET]):\n self.actions_per_second -= 1\n if (keys[K_t]):\n self.is_training = True\n print(\"========================================================================\")\n print(\"Training: ON\")\n print(\"========================================================================\")\n if (keys[K_s]):\n self.is_training = False\n print(\"========================================================================\")\n print(\"Training: OFF\")\n print(\"========================================================================\")",
"def handle_movement_keydown(self, key):\n try:\n log.debug(f'pressed: {key}')\n if key == pygame.K_LEFT:\n self.walk_left()\n elif key == pygame.K_RIGHT:\n self.walk_right()\n elif key == pygame.K_DOWN:\n pass\n elif key == pygame.K_UP:\n pass\n elif key == pygame.K_SPACE:\n self.jump()\n self.keys_down[key] = True\n except AttributeError:\n log.info(\"you didn't pass a keyboard event!!\")",
"def check_keys(self):\n if self.holding_left:\n self.paddle.move_down()\n\n if self.holding_right:\n self.paddle.move_up()",
"def handle_movement_keyup(self, key):\n def _opposite_dir(key):\n return {pygame.K_LEFT: pygame.K_RIGHT,\n pygame.K_RIGHT: pygame.K_LEFT,\n pygame.K_UP: pygame.K_DOWN}[key]\n try:\n log.debug(f'released: {key}')\n self.keys_down[key] = False\n if key in {pygame.K_LEFT, pygame.K_RIGHT} and \\\n not(self.keys_down[_opposite_dir(key)]):\n self.stop_movement()\n log.debug(f'keys down: {self.keys_down}')\n except AttributeError:\n log.error(\"you didn't pass a keyboard event!!\")",
"def move(self, key):\n \n global last_time\n if (key == K_RIGHT):\n self.xMove = self.x_dist\n self.x_pos=self.xMove\n elif (key == K_LEFT):\n self.xMove = -self.x_dist\n self.x_pos+=self.xMove\n elif (key == K_UP):\n self.yMove = -self.y_dist\n self.y_pos+=self.yMove\n elif (key == K_DOWN):\n self.yMove = self.y_dist\n self.y_pos+=self.yMove\n self.rect = self.rect.move(self.xMove,self.yMove)",
"def getMove(player,first_move=False):\n while True: \n move = raw_input(\"MAKE YOUR MOVE: \").upper()\n\n # handle special commands\n if move == \"QUIT\" or move == \"Q\":\n wannaQuit()\n continue\n elif move == \"QUIT!\" or move == \"Q!\":\n if SAY_PROC:\n SAY_PROC.terminate()\n sys.exit()\n continue\n elif move == \"HELP\" or move == \"H\":\n help()\n continue\n elif move == \"HELP!\" or move == \"H!\":\n say(helpString())\n continue\n elif move == \"SHUTUP!\" or move == \"S!\":\n shutUp(fuck=True)\n continue\n elif move == \"SHUTUP\" or move == \"S\":\n shutUp()\n continue\n elif move == \"BOARD\" or move == \"B\":\n printBoard()\n continue\n elif move == \"CLEAR\" or move == \"C\": \n clearTerminal()\n printBoard()\n continue\n elif move == \"CLEAR!\" or move == \"C!\": # TODO board -> clear, you end up with a half drawn new board. clear again fixes this\n clearTerminalAndBuffer()\n printBoard()\n continue\n elif move == \"PASS\" or move == \"P\": \n if wannaPass():\n break\n else:\n continue\n elif move == \"PASS!\" or move == \"P!\": \n break\n \n # mostly used to catch blank lines or me typing ASFADS like an asshole\n if len(move) < 7:\n print \"That's too short to be a command.\"\n continue\n\n parts=move.split(\":\")\n if len(parts) != 3:\n print \"Can't find all the parts of the move command. Maybe you're missing/have too many \\\":\\\"?\"\n continue\n\n for item in parts:\n if len(item) == 0:\n print \"Found a blank command. Maybe you left in an extra \\\":\\\"?\"\n continue\n\n coords = parts[0].replace(\" \",\"\") # incase of space inbetween file and rank\n direction = parts[1].strip()\n word = parts[2].strip()\n\n if not coords[0].isalpha():\n print \"I don't know where to put your word (Bad file coord).\"\n continue\n\n if not coords[1:].isdigit():\n print \"I don't know where to put your word (Bad rank coord).\"\n continue\n\n x = gridCharToInt(coords[0])\n y = int(coords[1:]) - 1\n if 14 < x < 0 or 14 < y < 0:\n print \"Those aren't coords on the board. Valid Files are from A-O, valid Ranks are 1-15.\"\n continue\n\n if first_move:\n if x != 7 or y != 7:\n print \"The first move must start from the center (H8).\"\n continue\n\n #compact that command\n if direction == \"ACROSS\":\n direction = \"A\"\n elif direction == \"DOWN\":\n direction = \"D\"\n if direction != \"A\" and direction !=\"D\":\n print \"I don't know where to put your word (Across or Down?).\"\n continue\n \n score,placed_tiles = checkWords(x,y,direction,word,first_move)\n if not score: #error reporting is handling in check words\n continue\n else:\n for tile in placed_tiles:\n if not tile in player.rack:\n print \"You don't have the tiles to play that!\"\n continue\n player.rack.remove(tile)\n print player.name+\" scored \"+str(score)+\" on that last play!\"\n player.score+=score\n for tile in placed_tiles:\n player.rack.remove(tile)\n break #YAY",
"def updateKeys(self, _key):\n\t\tif _key == curses.KEY_UP or _key == curses.KEY_DOWN:\n\t\t\tself.switch()\n\t\telif _key == 261 or _key == 10: # Execute (Key RIGHT / ENTER)\n\t\t\treturn str(self.pointer.get())\n\t\treturn (50, _key)\t\t# send key back, to handle in main program",
"def perform_keyboard_actions(self):\n self.handle_keyboard_input()\n self.grid.next_frame()",
"def _check_keyup_events(self, event):\n if event.key == pygame.K_RIGHT:\n self.ship.moving_right = False # moving right key released, stop moving\n elif event.key == pygame.K_LEFT:\n self.ship.moving_left = False # moving left key released, stop moving",
"def keyPressed():\n global PLAY\n if (key == ' '):\n PLAY = not PLAY\n if (key == 'r'):\n init()",
"def handle_continuous_keys(self):\n shift = pygame.K_LSHIFT in self.held\n ctrl = pygame.K_LCTRL in self.held\n factor = 3 if shift else 1/3 if ctrl else 1\n for key in self.held:\n if not self.followmode:\n # if self.held_delay[key] == 0:\n if key in (pygame.K_w, pygame.K_UP): # up\n # self.canvas.move_offset(0, 5 * factor)\n self.canvas.move_focus(0, 5 * factor)\n elif key in (pygame.K_s, pygame.K_DOWN): # down\n # self.canvas.move_offset(0, -5 * factor)\n self.canvas.move_focus(0, -5 * factor)\n elif key in (pygame.K_d, pygame.K_RIGHT): # right\n # self.canvas.move_offset(-5 * factor, 0)\n self.canvas.move_focus(5 * factor, 0)\n elif key in (pygame.K_a, pygame.K_LEFT): # left\n # self.canvas.move_offset(5 * factor, 0)\n self.canvas.move_focus(-5 * factor, 0)\n if key in (pygame.K_e, pygame.K_KP_PLUS):\n self.canvas.zoom(2 * factor)\n elif key in (pygame.K_q, pygame.K_KP_MINUS):\n self.canvas.zoom(-2 * factor)\n for key in self.held:\n self.held_delay[key] = (self.held_delay[key] + 1) % 5",
"def _handle_keydown_event(self, key: int) -> (bool, bool):\n wants_to_quit = (True, True)\n finished_maze = (True, False)\n still_playing = (False, None)\n\n finished = False\n if key == K_UP:\n # Move the agent up\n finished = self._move(K_UP)\n elif key == K_DOWN:\n # Move the agent down\n finished = self._move(K_DOWN)\n elif key == K_LEFT:\n # Move the agent left\n finished = self._move(K_LEFT)\n elif key == K_RIGHT:\n # Move the agent right\n finished = self._move(K_RIGHT)\n elif key == K_ESCAPE:\n # User wants to quit\n return wants_to_quit\n\n if finished:\n return finished_maze\n else:\n return still_playing",
"def _check_keyup_event(self, event):\n if event.key == pygame.K_RIGHT:\n self.ship.moving_right = False\n if event.key == pygame.K_LEFT:\n self.ship.moving_left = False",
"def _move(self, direction) -> bool:\n if direction == K_UP:\n return self._move_up()\n elif direction == K_DOWN:\n return self._move_down()\n elif direction == K_LEFT:\n return self._move_left()\n elif direction == K_RIGHT:\n return self._move_right()\n else:\n raise ValueError(f\"This method is not equipped to handle the given key: {direction}\")",
"def handle_turn(cls, key):\n entered = str(key).replace(\"'\", \"\")\n\n if entered in ['a','s','d','w']:\n switcher = {\n 'w': cls.up,\n 's': cls.down,\n 'a': cls.left,\n 'd': cls.right,\n }\n switcher.get(entered)()\n cls.display_board(True)\n \n elif entered in cls.positions:\n cls.position = int(entered) - 1\n\n elif entered == 'Key.enter':\n row, col = cls.get_position_coords()\n if cls.board[row][col] == cls.empty:\n # Board will place an X or O on the number slot chosen\n cls.board[row][col] = cls.current_player\n\n # Check if the game has ended\n cls.is_game_over()\n\n # Flip to other player\n cls.flip_player()\n\n # Declare winner and clear board\n if(cls.winner):\n print(f'{cls.winner} wins!')\n input('Press enter to play again.')\n cls.clear_board()\n else:\n print(\"You can't go there. Asshole.\")",
"def move_player(self, pressed_keys):\n # Arrow-key movement\n if pressed_keys[K_UP]:\n self.player.rect.move_ip(0, -2)\n self.player.movement_check = True\n self.player.up_check = True\n self.player.down_check = False\n if pressed_keys[K_DOWN]:\n self.player.rect.move_ip(0, 2)\n self.player.movement_check = True\n self.player.up_check = False\n self.player.down_check = True\n if pressed_keys[K_LEFT]:\n self.player.rect.move_ip(-2, 0)\n self.player.movement_check = True\n self.player.direction_check = False\n self.player.up_check = False\n self.player.down_check = False\n if pressed_keys[K_RIGHT]:\n self.player.rect.move_ip(2, 0)\n self.player.movement_check = True\n self.player.direction_check = True\n self.player.up_check = False\n self.player.down_check = False\n # WASD movement\n if pressed_keys[K_w]:\n self.player.rect.move_ip(0, -2)\n self.player.movement_check = True\n self.player.up_check = True\n self.player.down_check = False\n if pressed_keys[K_s]:\n self.player.rect.move_ip(0, 2)\n self.player.movement_check = True\n self.player.up_check = False\n self.player.down_check = True\n if pressed_keys[K_a]:\n self.player.rect.move_ip(-2, 0)\n self.player.movement_check = True\n self.player.direction_check = False\n self.player.up_check = False\n self.player.down_check = False\n if pressed_keys[K_d]:\n self.player.rect.move_ip(2, 0)\n self.player.movement_check = True\n self.player.direction_check = True\n self.player.up_check = False\n self.player.down_check = False\n #Boundary\n if self.player.rect.left < 0:\n self.player.rect.left = 0\n if self.player.rect.right > self.board.screen_width:\n self.player.rect.right = self.board.screen_width\n if self.player.rect.top <= 0:\n self.player.rect.top = 0\n if self.player.rect.bottom >= self.board.screen_height:\n self.player.rect.bottom = self.board.screen_height",
"def update(self):\n keys = pygame.key.get_pressed() # Checks for an input by the user\n if keys[pygame.K_RIGHT]:\n king.move_right() # Moves right if the user presses the right key\n\n if keys[pygame.K_LEFT]:\n king.move_left() # Moves left if the user presses the left key",
"def handle_keys(self):\n handled = 0\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n end_game()\n elif event.type == pygame.KEYDOWN:\n if handled and event.key in (pygame.K_DOWN, pygame.K_UP, pygame.K_LEFT, pygame.K_RIGHT):\n pygame.event.post(event)\n break\n if event.key == pygame.K_UP:\n self.snake.turn(self.snake.up)\n handled = True\n elif event.key == pygame.K_DOWN:\n self.snake.turn(self.snake.down)\n handled = True\n elif event.key == pygame.K_LEFT:\n self.snake.turn(self.snake.left)\n handled = True\n elif event.key == pygame.K_RIGHT:\n self.snake.turn(self.snake.right)\n handled = True",
"def keyCam(self, event):\n dct = {\n \"d\": 0,\n \"s\": 1,\n \"q\": 2,\n \"z\": 3\n }[event.char]\n self.moveAllSeg(dct)"
] | [
"0.7691176",
"0.69349104",
"0.68719256",
"0.68288636",
"0.6766883",
"0.6606569",
"0.6452956",
"0.63939595",
"0.63457495",
"0.6345322",
"0.6335956",
"0.63320196",
"0.6313395",
"0.6303774",
"0.6273421",
"0.6273016",
"0.62193775",
"0.6212796",
"0.61779016",
"0.616101",
"0.61597776",
"0.6144229",
"0.613374",
"0.6130192",
"0.6117554",
"0.6105433",
"0.60978436",
"0.60915047",
"0.60716915",
"0.6060658"
] | 0.7749387 | 0 |
Call second_move() after good key was pressed. Good key will be saved in self.second_key | def wait_second_move(self):
self.qr_unregister()
self.env.keyboard.listen_once(self.catch_key_second, key_down) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def second_move(self):\n self.play_sound(self.second_key)\n self.end_move()",
"def first_move(self):\n self.play_sound(self.first_key)\n self.make_blink()\n self.wait_second_move()",
"def wait_first_move(self):\n self.env.keyboard.listen_once(self.catch_key_first, key_down)",
"def handle_keys(self, maze, game_display, key):\n Drone.handle_keys(self, maze, game_display, key)\n if self.auto_flag:\n if self.state == DroneState.LAND and self.time_in_air > 0:\n self.state = DroneState.TAKE_OFF\n self.auto_move(maze=maze, game_display=game_display)\n if key[pygame.K_a]:\n self.auto_move(maze=maze, game_display=game_display)\n if key[pygame.K_d] and self.time_in_air > 0:\n self.auto_flag = True\n if key[pygame.K_s]:\n self.state = DroneState.LAND\n if key[pygame.K_w]:\n self.slam.show()\n return False",
"def key_handler(self, event):\n if event.type == pygame.KEYUP: \n self.done = True",
"def handle_movement_keyup(self, key):\n def _opposite_dir(key):\n return {pygame.K_LEFT: pygame.K_RIGHT,\n pygame.K_RIGHT: pygame.K_LEFT,\n pygame.K_UP: pygame.K_DOWN}[key]\n try:\n log.debug(f'released: {key}')\n self.keys_down[key] = False\n if key in {pygame.K_LEFT, pygame.K_RIGHT} and \\\n not(self.keys_down[_opposite_dir(key)]):\n self.stop_movement()\n log.debug(f'keys down: {self.keys_down}')\n except AttributeError:\n log.error(\"you didn't pass a keyboard event!!\")",
"def keydown(self, key):\n if key.keycode == KEY_MAP[\"up\"]:\n try:\n self._puzzle.update_puzzle(\"u\")\n self._current_moves += \"u\"\n except:\n print \"invalid move: up\"\n elif key.keycode == KEY_MAP[\"down\"]:\n try:\n self._puzzle.update_puzzle(\"d\")\n self._current_moves += \"d\"\n except:\n print \"invalid move: down\"\n elif key.keycode == KEY_MAP[\"left\"]:\n try:\n self._puzzle.update_puzzle(\"l\")\n self._current_moves += \"l\"\n except:\n print \"invalid move: left\"\n elif key.keycode == KEY_MAP[\"right\"]:\n try:\n self._puzzle.update_puzzle(\"r\")\n self._current_moves += \"r\"\n except:\n print \"invalid move: right\"\n self.draw()",
"def handle_key(self, event):\n direction = DIRECTIONS.get(event.key)\n if direction:\n self.player.move(direction)\n self.check_collision()",
"def handle_key(self, key):\n direction = DIRECTIONS.get(key)\n if direction:\n self.move(direction)",
"def move(self, key):\n \n global last_time\n if (key == K_RIGHT):\n self.xMove = self.x_dist\n self.x_pos=self.xMove\n elif (key == K_LEFT):\n self.xMove = -self.x_dist\n self.x_pos+=self.xMove\n elif (key == K_UP):\n self.yMove = -self.y_dist\n self.y_pos+=self.yMove\n elif (key == K_DOWN):\n self.yMove = self.y_dist\n self.y_pos+=self.yMove\n self.rect = self.rect.move(self.xMove,self.yMove)",
"def handle_movement_keydown(self, key):\n try:\n log.debug(f'pressed: {key}')\n if key == pygame.K_LEFT:\n self.walk_left()\n elif key == pygame.K_RIGHT:\n self.walk_right()\n elif key == pygame.K_DOWN:\n pass\n elif key == pygame.K_UP:\n pass\n elif key == pygame.K_SPACE:\n self.jump()\n self.keys_down[key] = True\n except AttributeError:\n log.info(\"you didn't pass a keyboard event!!\")",
"def check_for_input(self, keys):\n if keys[pg.K_SPACE]:\n if self.arrow.index == 0:\n self.next = c.TOWN\n self.game_data = pickle.load(open('save.p', 'rb'))\n elif self.arrow.index == 1:\n self.next = c.MAIN_MENU\n self.state = c.TRANSITION_OUT\n self.notify(c.CLICK2)",
"def updateKeys(self, _key):\n\t\tif _key == curses.KEY_UP or _key == curses.KEY_DOWN:\n\t\t\tself.switch()\n\t\telif _key == 261 or _key == 10: # Execute (Key RIGHT / ENTER)\n\t\t\treturn str(self.pointer.get())\n\t\treturn (50, _key)\t\t# send key back, to handle in main program",
"def _check_keyup_events(self, event):\n if event.key == pygame.K_RIGHT:\n self.ship.moving_right = False # moving right key released, stop moving\n elif event.key == pygame.K_LEFT:\n self.ship.moving_left = False # moving left key released, stop moving",
"def _check_keyup_event(self, event):\n if event.key == pygame.K_RIGHT:\n self.ship.moving_right = False\n if event.key == pygame.K_LEFT:\n self.ship.moving_left = False",
"def move_player(self, pressed_keys):\n # Arrow-key movement\n if pressed_keys[K_UP]:\n self.player.rect.move_ip(0, -2)\n self.player.movement_check = True\n self.player.up_check = True\n self.player.down_check = False\n if pressed_keys[K_DOWN]:\n self.player.rect.move_ip(0, 2)\n self.player.movement_check = True\n self.player.up_check = False\n self.player.down_check = True\n if pressed_keys[K_LEFT]:\n self.player.rect.move_ip(-2, 0)\n self.player.movement_check = True\n self.player.direction_check = False\n self.player.up_check = False\n self.player.down_check = False\n if pressed_keys[K_RIGHT]:\n self.player.rect.move_ip(2, 0)\n self.player.movement_check = True\n self.player.direction_check = True\n self.player.up_check = False\n self.player.down_check = False\n # WASD movement\n if pressed_keys[K_w]:\n self.player.rect.move_ip(0, -2)\n self.player.movement_check = True\n self.player.up_check = True\n self.player.down_check = False\n if pressed_keys[K_s]:\n self.player.rect.move_ip(0, 2)\n self.player.movement_check = True\n self.player.up_check = False\n self.player.down_check = True\n if pressed_keys[K_a]:\n self.player.rect.move_ip(-2, 0)\n self.player.movement_check = True\n self.player.direction_check = False\n self.player.up_check = False\n self.player.down_check = False\n if pressed_keys[K_d]:\n self.player.rect.move_ip(2, 0)\n self.player.movement_check = True\n self.player.direction_check = True\n self.player.up_check = False\n self.player.down_check = False\n #Boundary\n if self.player.rect.left < 0:\n self.player.rect.left = 0\n if self.player.rect.right > self.board.screen_width:\n self.player.rect.right = self.board.screen_width\n if self.player.rect.top <= 0:\n self.player.rect.top = 0\n if self.player.rect.bottom >= self.board.screen_height:\n self.player.rect.bottom = self.board.screen_height",
"def update_keypresses(self) -> bool:\n # Get all the number of keys\n keys = pygame.key.get_pressed()\n\n # If player 2 is not destroyed\n if not self.player2.is_destroyed():\n\n # Check player 2 keys\n if keys[K_LEFT]:\n self.player2.move_left()\n\n if keys[K_RIGHT]:\n self.player2.move_right()\n\n if keys[K_0]:\n self.player2.shoot()\n\n # Call the superclass update keypress\n return super().update_keypresses()",
"def test_o_second_move(self):\n board = [[None, None, None],\n [None, None, None],\n [None, x_char, None]]\n \n self.assertEqual(char_move_order(board), (o_char, x_char))",
"def key_down_char(self, key):\n # Used to check if Logic.[direction] worked\n done = None\n # Need to check if tuple or not\n # Python sometimes sends key as tuple or char\n if isinstance(key, tuple):\n if key[0] == '\\'a\\'':\n self.matrix, done = Logic.left(self.matrix)\n if key[0] == '\\'s\\'':\n self.matrix, done = Logic.down(self.matrix)\n if key[0] == '\\'d\\'':\n self.matrix, done = Logic.right(self.matrix)\n if key[0] == '\\'w\\'':\n self.matrix, done = Logic.up(self.matrix)\n else:\n if key == '\\'a\\'':\n self.matrix, done = Logic.left(self.matrix)\n if key == '\\'s\\'':\n self.matrix, done = Logic.down(self.matrix)\n if key == '\\'d\\'':\n self.matrix, done = Logic.right(self.matrix)\n if key == '\\'w\\'':\n self.matrix, done = Logic.up(self.matrix)\n\n if done:\n # Logic.[direction] worked = add new tile (game rules)\n self.matrix = Logic.add_tile(self.matrix)\n # NOT USED, record last move for potential back track\n # self.history_matrix.append(self.matrix)\n # ONLY used in UI\n # self.update_grid_cells()\n # done = False",
"def process_keychange(self):\n # Process up/down\n if self.up_pressed and not self.down_pressed:\n if self.physics_engine.is_on_ladder():\n self.player_sprite.change_y = PLAYER_MOVEMENT_SPEED\n elif (\n self.physics_engine.can_jump(y_distance=10)\n and not self.jump_needs_reset\n ):\n self.player_sprite.change_y = PLAYER_JUMP_SPEED\n self.jump_needs_reset = True\n arcade.play_sound(self.jump_sound)\n elif self.down_pressed and not self.up_pressed:\n if self.physics_engine.is_on_ladder():\n self.player_sprite.change_y = -PLAYER_MOVEMENT_SPEED\n\n # Process up/down when on a ladder and no movement\n if self.physics_engine.is_on_ladder():\n if not self.up_pressed and not self.down_pressed:\n self.player_sprite.change_y = 0\n elif self.up_pressed and self.down_pressed:\n self.player_sprite.change_y = 0\n\n # Process left/right\n if self.right_pressed and not self.left_pressed:\n self.player_sprite.change_x = PLAYER_MOVEMENT_SPEED\n elif self.left_pressed and not self.right_pressed:\n self.player_sprite.change_x = -PLAYER_MOVEMENT_SPEED\n else:\n self.player_sprite.change_x = 0",
"def _check_keyup_events(self, event):\r\n if event.key == pg.K_RIGHT:\r\n self.ship.moving_right = False\r\n elif event.key == pg.K_LEFT:\r\n self.ship.moving_left = False",
"def _handle_keydown_event(self, key: int) -> (bool, bool):\n wants_to_quit = (True, True)\n finished_maze = (True, False)\n still_playing = (False, None)\n\n finished = False\n if key == K_UP:\n # Move the agent up\n finished = self._move(K_UP)\n elif key == K_DOWN:\n # Move the agent down\n finished = self._move(K_DOWN)\n elif key == K_LEFT:\n # Move the agent left\n finished = self._move(K_LEFT)\n elif key == K_RIGHT:\n # Move the agent right\n finished = self._move(K_RIGHT)\n elif key == K_ESCAPE:\n # User wants to quit\n return wants_to_quit\n\n if finished:\n return finished_maze\n else:\n return still_playing",
"def update(self):\n keys = pygame.key.get_pressed() # Checks for an input by the user\n if keys[pygame.K_RIGHT]:\n king.move_right() # Moves right if the user presses the right key\n\n if keys[pygame.K_LEFT]:\n king.move_left() # Moves left if the user presses the left key",
"def handleHeldKeys(self):\n self.keys = pygame.key.get_pressed()\n\n m.player1.runningForward = False\n m.player1.runningBackward = False\n\n if self.keys[pygame.K_RIGHT]: \n if m.player1.onGround:\n m.player1.moveForward()\n m.player1.runningForward = True\n self.flip = True\n elif self.keys[pygame.K_LEFT]:\n if m.player1.onGround:\n m.player1.moveBackward()\n m.player1.runningBackward = True\n self.flip = False\n\n\n if self.model.playernum == 2:\n m.player2.runningForward = False\n m.player2.runningBackward = False\n if self.keys[pygame.K_d]:\n if m.player2.onGround:\n m.player2.moveForward()\n m.player2.runningForward = True\n self.flip = False\n elif self.keys[pygame.K_a]:\n if m.player2.onGround:\n m.player2.moveBackward()\n m.player2.runningBackward= True\n self.flip = True",
"def handle_keyboard_input(self):\n keys = pg.key.get_pressed()\n\n if (keys[K_UP]):\n self.grid.change_direction(Direction.up)\n if (keys[K_DOWN]):\n self.grid.change_direction(Direction.down)\n if (keys[K_LEFT]):\n self.grid.change_direction(Direction.left)\n if (keys[K_RIGHT]):\n self.grid.change_direction(Direction.right)\n if (keys[K_SPACE]):\n self.grid.snake.grow()\n if (keys[K_RIGHTBRACKET]):\n self.actions_per_second += 1\n if (keys[K_LEFTBRACKET]):\n self.actions_per_second -= 1\n if (keys[K_t]):\n self.is_training = True\n print(\"========================================================================\")\n print(\"Training: ON\")\n print(\"========================================================================\")\n if (keys[K_s]):\n self.is_training = False\n print(\"========================================================================\")\n print(\"Training: OFF\")\n print(\"========================================================================\")",
"def _check_keyup_events(self, event):\n if event.key == pygame.K_RIGHT:\n self.ship.moving_right = False\n elif event.key == pygame.K_LEFT:\n self.ship.moving_left = False",
"def check_keys(self):\n if self.holding_left:\n self.paddle.move_down()\n\n if self.holding_right:\n self.paddle.move_up()",
"def move_to_position2(self):",
"def _check_keydown_events(self, event):\n if event.key == pygame.K_RIGHT:\n self.rocket.moving_right = True\n elif event.key == pygame.K_LEFT:\n self.rocket.moving_left = True\n elif event.key == pygame.K_UP:\n self.rocket.moving_up = True\n elif event.key == pygame.K_DOWN:\n self.rocket.moving_down = True\n elif event.key == pygame.K_q:\n sys.exit()",
"def handle_turn(cls, key):\n entered = str(key).replace(\"'\", \"\")\n\n if entered in ['a','s','d','w']:\n switcher = {\n 'w': cls.up,\n 's': cls.down,\n 'a': cls.left,\n 'd': cls.right,\n }\n switcher.get(entered)()\n cls.display_board(True)\n \n elif entered in cls.positions:\n cls.position = int(entered) - 1\n\n elif entered == 'Key.enter':\n row, col = cls.get_position_coords()\n if cls.board[row][col] == cls.empty:\n # Board will place an X or O on the number slot chosen\n cls.board[row][col] = cls.current_player\n\n # Check if the game has ended\n cls.is_game_over()\n\n # Flip to other player\n cls.flip_player()\n\n # Declare winner and clear board\n if(cls.winner):\n print(f'{cls.winner} wins!')\n input('Press enter to play again.')\n cls.clear_board()\n else:\n print(\"You can't go there. Asshole.\")"
] | [
"0.7825597",
"0.7296097",
"0.7028338",
"0.67003924",
"0.66919225",
"0.6691698",
"0.65923977",
"0.6526897",
"0.6478581",
"0.64533395",
"0.6391608",
"0.63787925",
"0.63162094",
"0.63137877",
"0.62750816",
"0.6266099",
"0.62504405",
"0.62090117",
"0.6191794",
"0.61795557",
"0.6166135",
"0.6165297",
"0.6164618",
"0.6157322",
"0.6155374",
"0.6148865",
"0.6125376",
"0.60809284",
"0.6080078",
"0.6067903"
] | 0.7450435 | 1 |
Responses for second_key press. | def second_move(self):
self.play_sound(self.second_key)
self.end_move() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def wait_second_move(self):\n self.qr_unregister()\n self.env.keyboard.listen_once(self.catch_key_second, key_down)",
"def goto_second():\n\tglobal c2\n\tglobal a2\n\tglobal BUF_SIZE\n\tglobal state\n\n\tmsg = c2.recv(BUF_SIZE) # wait for the taken off message\n\tprint a2, ' >> ', msg\n\tif msg != 'Taken Off':\n\t\terror(msg)\n\t\tstate = 9 # exit failure\n\telse:\n\t\tnew_msg = {}\n\t\tnew_msg['msg'] = 'GOTO'\n\t\tnew_msg['arg1'] = init2\n\t\tc2.send(json.dumps(new_msg))\n\t\tstate += 1",
"def takeoff_second():\n\tglobal c2\n\tglobal a2\n\tglobal BUF_SIZE\n\tglobal state\n\n\tmsg = c2.recv(BUF_SIZE) # wait for the armed message\n\tprint a2, ' >> ', msg\n\tif msg != 'Armed':\n\t\terror(msg)\n\t\tstate = 9 # exit failure\n\telse:\n\t\tnew_msg = {}\n\t\tnew_msg['msg'] = 'TAKEOFF'\n\t\tnew_msg['arg1'] = init2[2]\n\t\tc2.send(json.dumps(new_msg))\n\t\tstate += 1",
"def whait_for_keys_press(prompt, key1, key2, key3, key4):\n print(prompt)\n while True:\n Key_pressed = curses.wrapper(main)\n #if Key_pressed != (-1): print(Key_pressed) # displays number of key\n if Key_pressed == key1:\n break\n if Key_pressed == key2:\n break\n if Key_pressed == key3:\n break \n if Key_pressed == key4:\n break \n time.sleep(0.1)\n return Key_pressed",
"def handle_key():\n \n digit_pressed = request.args.get('Digits', None)\n\n print \"handle-key. key: \" + str(digit_pressed)\n\n if digit_pressed == \"2\":\n resp = twilio.twiml.Response()\n # Dial (310) 555-1212 - connect that number to the incoming caller.\n resp.dial(\"12345678\")\n # If the dial fails:\n resp.say(\"The call failed, or the remote party hung up. Goodbye.\")\n \n return str(resp)\n \n elif digit_pressed == \"1\":\n resp = twilio.twiml.Response()\n resp.say(\"Record your shout after the tone. You have 3 seconds.\")\n resp.record(maxLength=\"3\", action=\"/handle-recording\")\n return str(resp)\n \n # If the caller pressed anything but 1, redirect them to the homepage.\n else:\n return redirect(\"/service\")",
"def handle_key(world: World, key: int):\r\n if key == ord(\"1\"):\r\n world[\"user choice\"] = \"santa\"\r\n elif key == ord(\"2\"):\r\n world[\"user choice\"] = \"reindeer\"\r\n elif key == ord(\"3\"):\r\n world[\"user choice\"] = \"snowman\"\r\n if key == ord(\"1\") or key == ord(\"2\") or key == ord(\"3\"):\r\n world[\"attempts\"] += 1\r\n assign_number(world, random_number())\r\n win_point(world)",
"def test_yankPopTwice(self):\n s = 'hello world'\n n = 5\n self.widget.buffer = s\n self.widget.cursor = n\n self.widget.killRing = ['last', 'second', 'first']\n self.widget.keystrokeReceived('\\x19', None)\n self.widget.keystrokeReceived('y', ServerProtocol.ALT)\n self.widget.keystrokeReceived('y', ServerProtocol.ALT)\n self.assertEqual(self.widget.buffer, s[:n] + 'last' + s[n:])\n self.assertEqual(self.widget.cursor, n + len('last'))\n self.assertEqual(self.widget.killRing, ['second', 'first', 'last'])",
"def keypress_callback():\n data = request.get_json()\n buttonid = data['buttonid']\n assert buttonid in pyautogui.KEYBOARD_KEYS, \"Unknown buttonid received\"\n pyautogui.press(buttonid)\n return json.dumps({'success':True}), 200, {'ContentType':'application/json'}",
"def updateKeys(self, _key):\n\t\tif _key == curses.KEY_UP or _key == curses.KEY_DOWN:\n\t\t\tself.switch()\n\t\telif _key == 261 or _key == 10: # Execute (Key RIGHT / ENTER)\n\t\t\treturn str(self.pointer.get())\n\t\treturn (50, _key)\t\t# send key back, to handle in main program",
"def emulate_press(self, key_code, scan_code, value, timeval):\n scan_event = self.create_event_object(\n \"Misc\",\n 0x04,\n scan_code,\n timeval)\n key_event = self.create_event_object(\n \"Key\",\n key_code,\n value,\n timeval)\n return scan_event, key_event",
"def keypress(cls, _, key):\n return key",
"def slot_keypress(self, gox, (key)):\r\n pass",
"def _on_keyboard_down(self, keyboard, keycode, char, modifiers):\n\n print(f\"Keystroke: char={char}, code={keycode}, mods={modifiers}\")\n if keycode[0] == 27: # use the Escape key to toggle modes.\n self.toggle_speak_mode()\n elif self._speakmode == 'SAY_LETTERS':\n self.say_letter(keyboard, keycode, char, modifiers)\n else:\n self.say_word(keyboard, keycode, char, modifiers)\n return True",
"def handle_key():\n #Get the digit pressed by the user\n digit_pressed = request.values.get('Digits',None)\n if digit_pressed ==\"1\":\n resp = twilio.twiml.Response()\n resp.say(\"It's a trap!\")\n resp.play(\"http://demo.twilio.com/hellomonkey/monkey.mp3\")\n return str(resp)\n \n elif digit_pressed ==\"2\":\n resp = twilio.twiml.Response()\n resp.say(\"Record your howl after the tone for Claire please.\")\n resp.record(maxLength=\"30\",action = \"/handle-recording\")\n return str(resp)\n else: \n return redirect(\"/\")",
"def prepost_hook_two(self) -> None:\n self.poutput(\"two\")",
"def on_key_down(self, keyboard, keycode, text, modifiers):\n Logger.debug('KeyDown Event: Keycode[1] is \"{}\"'.format(keycode[1]))\n self.keysPressed.add(keycode[1])",
"def key_press(self):\n self.screen.nodelay(True)\n return self.screen.getch()",
"def test_wait_for_key_pressed(self, cpu):\n keys_to_test = [None] + range(0x0, 0xF+1)\n for key in keys_to_test:\n cpu.program_counter = 0\n cpu.opcode = 0xF00A\n cpu.V_register[0] = 0\n cpu.keyboard.key_down = key\n cpu.wait_for_key_pressed()\n cpu.program_counter += 2\n if key is None:\n assert(cpu.V_register[0] == 0)\n assert(cpu.program_counter == 0)\n else:\n assert(cpu.program_counter == 2)\n assert(cpu.V_register[0] == key)",
"def handle_keyrelease_event(event, labels):\n\tglobal current_user\n\tglobal current_mode\n\t\n\t(instruction_label, response_label, congrats_label) = labels\n\n\tif current_mode == \"number\":\n\t\tnum_char = str(event.char)\n\t\tif num_char in ['1','2','3','4','5','6','7']:\n\t\t\tpush_update(current_user, int(num_char))\n\t\t\tcongrats_label.temp_update(random.choice(messages), 1500)\n\t\t\tcurrent_mode = \"user\"\n\t\t\tinstruction_label.update(\"Please enter user character...\")",
"def keypress(key):\n k = PyKeyboard()\n if key == 'enter':\n key = k.return_key\n k.tap_key(key)",
"def keypress(self, event):\n events = {\n '1': lambda: self.slot.set(1),\n '2': lambda: self.slot.set(2),\n '6': lambda: self.digits.set(6),\n '8': lambda: self.digits.set(8),\n }\n try:\n events[event.keysym]()\n except KeyError:\n pass\n if event.keysym in ('1', '2', 'Return', 'Enter'):\n self.get_totp()\n self.root.wm_withdraw()",
"def on_key(self, _window, key, _scancode, action, _mods):\n is_press = action == glfw.PRESS or action == glfw.REPEAT\n if is_press and (key == glfw.KEY_ESCAPE or key == glfw.KEY_Q):\n glfw.set_window_should_close(self.window, True)\n\n if action != glfw.REPEAT:\n self.key_handler(key, is_press)",
"def key_wait():\n while 1:\n for event in get():\n if event.type == 'KEYDOWN':\n return event\n if event.type == 'QUIT':\n # convert QUIT into alt+F4\n return KeyDown('F4', '', True, False, True, False, False)\n _time.sleep(.001)",
"def _on_key_press(self, event):",
"def wait_first_move(self):\n self.env.keyboard.listen_once(self.catch_key_first, key_down)",
"def notify(self, sender, key, key2=b'\\x00'):\r\n\r\n EventListener.notify(self, sender, KeyPressEventArgs(key, key2))",
"def key_press_callback(data):\n global D\n message = data.data # that's the string\n D.last_keypress = message\n # we'll handle stuff here...\n k = D.last_keypress\n\n if k in ' ': \n D.robot_publisher.publish( \"toggle commands\" ) # Wow!\n if k in 'W': # 'W' goes to the waiting state\n D.robot_publisher.publish( \"D.tank(0,0)\" ) # Yay, Python!\n D.STATE = \"WAITING_TO_START\" # back to waiting to start",
"def cb_key_pressed(data, signal, signal_data):\n global last_signal_time\n last_signal_time = time.time()\n if signal_data == \"\\x01[\":\n # In 50ms, check if any other keys were pressed. If not, it's Esc!\n weechat.hook_timer(50, 0, 1, \"cb_check_esc\",\n \"{:f}\".format(last_signal_time))\n return weechat.WEECHAT_RC_OK",
"def first_move(self):\n self.play_sound(self.first_key)\n self.make_blink()\n self.wait_second_move()",
"def _on_key_release(self, event):"
] | [
"0.6041951",
"0.58219993",
"0.57078713",
"0.567378",
"0.55835176",
"0.55665016",
"0.55074126",
"0.55048704",
"0.5473432",
"0.5445047",
"0.5436347",
"0.54057235",
"0.5393213",
"0.5380113",
"0.53439146",
"0.5318712",
"0.5311537",
"0.52885514",
"0.5287942",
"0.5277704",
"0.5264641",
"0.52465343",
"0.5233179",
"0.52308685",
"0.5224725",
"0.5211793",
"0.5207816",
"0.5171054",
"0.5165578",
"0.5153062"
] | 0.63019663 | 0 |
Plot a histogram of the pnorms of the solutions | def plot_p_norm(p=2, bins=500):
plt.title(f"{p}-norms of solutions for lattice point quaternion polynomials")
plt.hist([sum(abs(x)**p for x in abcd)**(1/p) for abcd in solutions], bins=bins) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_imag_p_norm(p=2, bins=500):\n plt.title(f\"{p}-norms of imaginary parts of solutions to polynomials with quaternion coefficients\")\n plt.hist([sum(abs(x) ** p for x in abcd[1:]) ** (1 / p) for abcd in solutions], bins=bins)",
"def test_normal(self):\r\n s = np.random.normal(-0.42, 0.55, 5000)\r\n plt.hist(s, 30, density=False)\r\n plt.xlabel('Interlayer point energy [eV]')\r\n plt.ylabel('Frequency')\r\n plt.show()",
"def plot_histogram(self,**kwargs):\n axes = []\n for i in range(self.score_length):\n fig = plt.figure()\n scores = np.array([s[i] for s in self.scores_list])\n probs,bins,patches = plt.hist(scores,label=\"Sample {}\".format(self.labels[i]), **kwargs)\n plt.vlines(self.xhat,fig.get_axes().get_ylim(),label='Mean',color='r')\n plt.legend()\n axes.append(fig.get_axes())\n return axes",
"def plot_variation_distn(gene_vars: pd.DataFrame):\n plt.hist(gene_vars.median(axis=1), bins=100, alpha=0.4, label='median')\n plt.hist(gene_vars.mean(axis=1), bins=100, alpha=0.4, label='mean')\n plt.legend()",
"def plot_histogram(beta=3):\n m = 10 ** beta\n\n # generate m normal random variables of 100 points each\n X = np.random.randn(100, m)\n\n # take the maximum along the rows\n Z = np.max(X, axis=1)\n\n # plot the pdf with a gaussian kernel density estimate\n plt.subplot(121)\n sns.distplot(Z, kde=True)\n plt.title(r'Histogram of Z for $\\beta$ = {}'.format(beta))\n\n # plot the cdf and find t in relation with Q3)\n plt.subplot(122)\n plt.hist(Z, bins=25, normed=True, cumulative=True)\n plt.title(r'P[Z $\\leq$ t]$\\geq$0.9 for t$\\geq$%0.4f' % (np.sqrt(2*(np.log(m) + np.log(10)))))\n\n print('P[Z <= t] >= 0.9 for t >= %0.4f using the inverse cdf' % (norm.ppf(0.9 ** (1/m))))\n print('P[Z <= t] >= 0.9 for t >= %0.4f using the Chernoff bounding method'\n % (np.sqrt(2*(np.log(m) + np.log(10)))))\n\n # save the plot to file & show the plot\n plt.savefig('histogram_beta_{}.png'.format(beta))\n\n plt.show()",
"def show_bryant(data_length=200000, bins=100):\n\n data = np.random.normal(0,1,data_length)\n plt.hist(data, bins)\n plt.show()",
"def test_uniform(self):\r\n\r\n s = np.random.uniform(-1.35, 0.5, 5000)\r\n plt.hist(s, 30, density=False)\r\n plt.xlabel('Interlayer point energy [eV]')\r\n plt.ylabel('Frequency')\r\n plt.show()",
"def plot_histogram(self,ax=None,**kwargs):\n if not ax:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n probs,bins,patches = ax.hist(self.scores_list,normed=True,label=\"Sample\",**kwargs)\n ax.vlines(self.xhat,*ax.get_ylim(),label='Mean',color='r')\n ax.legend()\n return ax,probs,bins",
"def test_triangular(self):\r\n s = np.random.triangular(-1.65, 0.08, 0.08, 5000)\r\n plt.hist(s, bins=30, density=False)\r\n plt.xlabel('Interlayer point energy [eV]')\r\n plt.ylabel('Frequency')\r\n plt.show()",
"def hist(data):\n\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n plt.hold(True)\n for x in xrange(len(data[:,0,0])):\n counts, edges = np.histogram(data[x,:,:],bins=100)\n centers = [(edges[i]+edges[i+1])/2.0 for i,v in enumerate(edges[:-1])]\n ax1.plot(centers,counts)\n plt.hold(False)\n\n plt.show(block=False)\n\n # return fig",
"def display(self, bin_size):\n xs = np.linspace(self.sample_min, self.sample_max, 2000)\n ys = np.zeros_like(xs)\n for (l, s), w in zip(self.gauss_params, self.dist_weights):\n ys += ss.norm.pdf(xs, loc=l, scale=s) * w\n plt.plot(xs, ys, color=\"blue\")\n plt.hist(self.samples, density=True, bins=bin_size, color=\"palegreen\")\n plt.xlabel(\"duration\")\n plt.ylabel(\"density\")\n _, _, ymin, ymax = plt.axis()\n if self.lower_bound > 0:\n plt.vlines([self.lower_bound], ymin, ymax, color=\"crimson\")\n if self.upper_bound < float(\"inf\"):\n plt.vlines([self.upper_bound], ymin, ymax, color=\"crimson\")\n plt.show()",
"def plot_sample_distribution(samples):\n plt.hist(samples, 50)\n plt.xlabel('Value of a sample')\n plt.ylabel('Number of samples')\n plt.title('Sample distribution')\n plt.show()",
"def show_histograms(self, n_samples=1000):\n\n ps = self.gen(n_samples)\n util.plot.plot_hist_marginals(ps, lims=get_disp_lims())\n plt.show()",
"def plot_hist(datasets, bins, labels, alphas):\n assert len(labels) == len(datasets)\n assert len(alphas) == len(datasets)\n plt.figure(figsize=[9,6])\n for idx, data in enumerate(datasets):\n plt.hist(data, bins=bins[idx], density=True, label=labels[idx], alpha=alphas[idx])\n plt.xlabel(\"PHQ score\")\n plt.ylabel(\"Probability\")\n plt.legend()\n plt.savefig(\"saved_plots/hist_\"+\"_\".join(labels)+\".png\")\n plt.show()",
"def plot_hist(x, var_name, bins=100, path='hist.png'):\n (mu, sigma) = stats.norm.fit(x)\n fig, ax = plt.subplots()\n sns.distplot(x, bins=bins, kde=True, fit=stats.norm, \n hist_kws={'linewidth': 2, 'alpha': 0.6, 'color': 'b'},\n kde_kws={'linewidth': 2, 'alpha': 0.6, 'color': 'k'},\n fit_kws={'linewidth': 2, 'alpha': 0.6, 'color': 'r', 'label': f'norm fit: $\\mu$={mu:.2f}, $\\sigma$={sigma:.2f}'})\n plt.grid(True)\n plt.legend()\n plt.title(var_name + ' hist')\n plt.savefig(path, bbox_inches='tight')",
"def hist(self, bins):\n x = self.x\n plt.hist(x, bins)\n plt.xlabel('Observed Data')\n plt.ylabel('Frequency')\n plt.show()",
"def _normality_plot(ax, data, **kwargs):\n n, bins, patches = ax.hist(data, normed=True, **kwargs)\n data = np.ravel(data)\n\n # normal line\n mu = np.mean(data)\n sigma = np.std(data)\n y = mpl.mlab.normpdf(bins, mu, sigma)\n ax.plot(bins, y, 'r--', linewidth=1)\n\n # TESTS\n # test Anderson\n A2, thresh, sig = scipy.stats.morestats.anderson(data)\n index = sum(A2 >= thresh)\n if index > 0:\n ax.text(.95, .95, '$^{*}%s$' % str(sig[index - 1] / 100), color='r', size=11,\n horizontalalignment='right',\n verticalalignment='top',\n transform=ax.transAxes,)\n logging.debug(\" Anderson: %s, %s, %s\" % (A2, thresh, sig))\n # test Lilliefors\n n_test = test.lilliefors(data)\n ax.set_xlabel(r\"$D=%.3f$, $p_{est}=%.2f$\" % n_test) # \\chi ^{2}\n # make sure ticks display int values\n # ax.yaxis.set_major_formatter(ticker.MaxNLocator(nbins=8, integer=True))\n ticks = ax.get_yticks()\n ticks_int = [int(l) for l in ticks]\n ax.set_yticks(ticks_int)\n ax.set_yticklabels(ticks_int)",
"def distribution_magnitude_histogram(cur, var, table, label):\n x = select(cur,var, table)\n print(\"Number of entries: \", len(x))\n print(\"Maximum: \", max(x))\n print(\"Minimum: \", min(x))\n \n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n ax.set_xlabel(\"Sentiment Magnitude\")\n ax.set_ylabel(\"Number of Sentences\")\n fig.suptitle(label)\n ax.hist(x, bins = 20)\n plt.show()",
"def not_pokemon_function():\n # Ironic I'm using random inside seed\n numpy.random.seed(random.randint(1, 1000))\n sample = numpy.random.normal(size=1000)\n counts, bin_edges = numpy.histogram(sample, bins=39)\n fig = tpl.figure()\n fig.hist(counts, bin_edges, grid=[15, 25], force_ascii=False)\n fig.show()\n print(\"Hopefully this random histogram(because I couldn't generate plot graphs) which is generated cheers you up\")",
"def histogram(x_points, output, title=\"Histogram\", xlabel=\"X\", color=\"blue\"):\n fig = plt.figure()\n # the histogram of the data\n n, bins, patches = plt.hist(x_points, bins=\"auto\", \n normed=False, facecolor=color, alpha=1.0)\n mean = np.mean(x_points)\n std_dev = np.std(x_points)\n # add a 'best fit' line\n y = mlab.normpdf(bins, mean, std_dev)\n plt.plot(bins, y, 'r--', linewidth=1)\n # generate plot\n plt.xlabel(xlabel)\n plt.ylabel(\"Occurrences\")\n plt.title(title)\n # Tweak spacing to prevent clipping of ylabel\n plt.subplots_adjust(left=0.15)\n fig.savefig(\"{}.pdf\".format(os.path.splitext(os.path.basename(output))[0]), \n format='pdf', dpi=300)",
"def show_histograms(n_samples=1000):\n\n true_ps, obs_xs = get_ground_truth()\n\n prior = Prior()\n model = Model()\n stats = Stats()\n\n ps = prior.gen(n_samples)\n data = stats.calc(model.sim(ps))\n cond_data = stats.calc(model.sim(np.tile(true_ps, [n_samples, 1])))\n\n # plot prior parameter histograms\n fig = util.plot.plot_hist_marginals(ps, lims=get_disp_lims(), gt=true_ps)\n fig.suptitle('p(thetas)')\n\n # plot stats histograms\n fig = util.plot.plot_hist_marginals(data, gt=obs_xs)\n fig.suptitle('p(stats)')\n\n # plot stats histograms, conditioned on true params\n fig = util.plot.plot_hist_marginals(cond_data, gt=obs_xs)\n fig.suptitle('p(stats|true thetas)')\n\n plt.show()",
"def plotPosteriors(posteriors):\n for i,p in enumerate(posteriors):\n plt.hist(p,bins=20,histtype='stepfilled',alpha=0.5,\n density=True,label='Bin {0}'.format(i))\n plt.legend()\n plt.ylabel(\"Probability\")\n plt.xlabel(\"Posterior\")\n\n return",
"def hist_pvalue(perms, ax, name):\n # Re-weight to obtain distribution\n pval = np.sum(perms >= perms[0]) / perms.shape[0]\n weights = np.ones(perms.shape[0]) / perms.shape[0]\n ax.hist([perms[perms >= perms[0]], perms], histtype='stepfilled',\n bins=100, label=\"p-val<%.3f\" % pval,\n weights=[weights[perms >= perms[0]], weights])\n ax.axvline(x=perms[0], color=\"k\", linewidth=2)#, label=\"observed statistic\")\n ax.set_ylabel(name)\n ax.legend()\n return ax",
"def get_gridpoint_histograms(self):\n\n ind_array = np.indices(self.results_array.shape)\n\n def results_array_histograms(x, y, z):\n if isinstance(self.results_array[x][y][z], tuple):\n num_zeros = self.tup_max_length - len(self.results_array[x][y][z])\n if num_zeros != 0:\n print('Num_zeros: ', num_zeros)\n hist_arr = np.array(self.results_array[x][y][z])\n # hist, bin_edges = np.histogram(hist_arr, bins=20)\n colour_dict = {\"acceptor\": \"r\", \"donor\": \"b\", \"apolar\": \"y\"}\n hist_name = self.prot_name + '_' + self.probe + '_{}_{}_{}'.format(x, y, z)\n\n plt.figure(1)\n plt.hist(hist_arr, bins=20, color=colour_dict[self.probe])\n plt.figtext(0.6, 0.8, ('Number of zero values:' + str(num_zeros)))\n plt.title('Score distribution at point x:{}, y:{}, z:{}'.format(x, y, z))\n plt.xlabel('Fragment hotspot score')\n plt.ylabel('Frequency')\n plt.savefig(join(self.out_dir, hist_name))\n plt.close()\n\n print('Generating Histograms')\n vresults_array_histograms = np.vectorize(results_array_histograms)\n vresults_array_histograms(ind_array[0], ind_array[1], ind_array[2])",
"def PlotLogPHistogram(lag):\n (n,bins) = pylab.mlab.hist(P(lag), bins=100, normed=True)\n binCenters = bins + (bins[1]-bins[0])/2.\n pylab.plot(binCenters, scipy.log(n+1.e-4))",
"def convolute_plot(lam, mu, sigma, nEntries, randomState=None):\n np.random.seed(randomState) # to have the same starting point\n \n xb = np.arange(-30,500000, 5000)\n xp = np.arange(-30,30,0.2)\n \n # Plot the exponential curve\n plt.figure()\n plt.subplot(3,1,1)\n xf = stats.expon(0.,1./lam).rvs(nEntries)\n plt.hist(xf,xb, normed=True)\n plt.plot(xb, stats.expon(0,1./lam).pdf(xb))\n \n # Plot the gaussian distribution\n plt.subplot(3,1,2) \n xg = stats.norm(mu, sigma).rvs(nEntries)\n plt.hist(xg,xp, normed=True)\n plt.plot(xp,stats.norm(mu,sigma).pdf(xp))\n \n # Plot the convolution of the two distributions\n plt.subplot(3,1,3)\n plt.hist(xf+xg,xb,normed=True)\n plt.plot(xb, stats.expon(0,1./lam).pdf(xb))\n \n data_set = xf+xg\n return data_set",
"def PlotHist(self, label=None):\n ys, xs, patches = plt.hist(self.test_stats)\n plt.vlines(self.actual, 0, max(ys), linewidth=3, color='black')\n plt.xlabel('test statistic')\n plt.ylabel('count')\n plt.show()",
"def plot_loss(self):\n #x = [k for k in range(self.rep)]\n loss = self.min_list[:,0]//100 #For clarity\n #plt.plot(x,self.min_list[:,0])\n plt.hist(loss,density=True)\n plt.xlabel(self.list_name + '_loss//100')\n plt.ylabel('Frequency')\n #plt.xticks(range(8),[0,250,500,750,1000,1250,1500,1750])\n plt.title('Distribution of '+self.list_name+'_loss ('+str(self.rep)+' iterations)')\n plt.savefig('img/stats/'+self.list_name+'_lossFrequency_'+self.model_name+'.png')\n plt.show()",
"def plotPValHistogram(lXs, lYs, out=\"out.png\", title=\"title\", xax=\"xax\", yax=\"yax\"):\n\n fig = plt.Figure(figsize=(20,20))\n fig.suptitle(title, fontsize=32)\n ax = fig.add_subplot(111)\n ax.hist(lXs,lYs)\n axis_font = {'size':'28'}\n ax.set_xlabel(xax, **axis_font)\n ax.set_ylabel(yax, **axis_font)\n ax.tick_params(labelsize=20)\n canvas = FigureCanvasAgg(fig)\n canvas.print_figure(out, dpi=80)",
"def plot_histogram(self) -> None:\n\n if self.data:\n plt.hist(self.data)\n plt.title(\"Histogram of data\")\n plt.xlabel(\"data\")\n plt.ylabel(\"count\")\n else:\n raise ValueError(\"Histogram cannot be generated as no\\\n data has been provided\")"
] | [
"0.765192",
"0.68404233",
"0.66037804",
"0.65835",
"0.6533649",
"0.64928544",
"0.6461779",
"0.645999",
"0.6441155",
"0.6404364",
"0.6345734",
"0.6342641",
"0.6324178",
"0.63204044",
"0.62690324",
"0.62213695",
"0.61868656",
"0.6182514",
"0.6176014",
"0.61743295",
"0.6171994",
"0.61611557",
"0.6159493",
"0.6158188",
"0.6146853",
"0.6136799",
"0.6114616",
"0.6110518",
"0.6094443",
"0.6084317"
] | 0.8222845 | 0 |
Plot a histogram of the pnorms of the imaginary parts of the solutions | def plot_imag_p_norm(p=2, bins=500):
plt.title(f"{p}-norms of imaginary parts of solutions to polynomials with quaternion coefficients")
plt.hist([sum(abs(x) ** p for x in abcd[1:]) ** (1 / p) for abcd in solutions], bins=bins) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_p_norm(p=2, bins=500):\n plt.title(f\"{p}-norms of solutions for lattice point quaternion polynomials\")\n plt.hist([sum(abs(x)**p for x in abcd)**(1/p) for abcd in solutions], bins=bins)",
"def show_histogram(im):\n\n if im.ndim == 2:\n # Input image is single channel\n plt.hist(im.flatten(), 256, range=(0, 250), fc='k')\n plt.show()\n\n elif im.ndim == 3:\n # Input image is three channels\n fig = plt.figure()\n fig.add_subplot(311)\n plt.hist(im[..., 0].flatten(), 256, range=(0, 250), fc='b')\n fig.add_subplot(312)\n plt.hist(im[..., 1].flatten(), 256, range=(0, 250), fc='g')\n fig.add_subplot(313)\n plt.hist(im[..., 2].flatten(), 256, range=(0, 250), fc='r')\n plt.show()",
"def test_normal(self):\r\n s = np.random.normal(-0.42, 0.55, 5000)\r\n plt.hist(s, 30, density=False)\r\n plt.xlabel('Interlayer point energy [eV]')\r\n plt.ylabel('Frequency')\r\n plt.show()",
"def plot_histogram(beta=3):\n m = 10 ** beta\n\n # generate m normal random variables of 100 points each\n X = np.random.randn(100, m)\n\n # take the maximum along the rows\n Z = np.max(X, axis=1)\n\n # plot the pdf with a gaussian kernel density estimate\n plt.subplot(121)\n sns.distplot(Z, kde=True)\n plt.title(r'Histogram of Z for $\\beta$ = {}'.format(beta))\n\n # plot the cdf and find t in relation with Q3)\n plt.subplot(122)\n plt.hist(Z, bins=25, normed=True, cumulative=True)\n plt.title(r'P[Z $\\leq$ t]$\\geq$0.9 for t$\\geq$%0.4f' % (np.sqrt(2*(np.log(m) + np.log(10)))))\n\n print('P[Z <= t] >= 0.9 for t >= %0.4f using the inverse cdf' % (norm.ppf(0.9 ** (1/m))))\n print('P[Z <= t] >= 0.9 for t >= %0.4f using the Chernoff bounding method'\n % (np.sqrt(2*(np.log(m) + np.log(10)))))\n\n # save the plot to file & show the plot\n plt.savefig('histogram_beta_{}.png'.format(beta))\n\n plt.show()",
"def plot_histogram(img):\n rgb_hist = rgb_histogram(img)\n plt.figure()\n for color, hist in rgb_hist.items():\n plt.plot(hist, color=color)\n plt.xlim([0, 256])",
"def plot_variation_distn(gene_vars: pd.DataFrame):\n plt.hist(gene_vars.median(axis=1), bins=100, alpha=0.4, label='median')\n plt.hist(gene_vars.mean(axis=1), bins=100, alpha=0.4, label='mean')\n plt.legend()",
"def test_triangular(self):\r\n s = np.random.triangular(-1.65, 0.08, 0.08, 5000)\r\n plt.hist(s, bins=30, density=False)\r\n plt.xlabel('Interlayer point energy [eV]')\r\n plt.ylabel('Frequency')\r\n plt.show()",
"def convolute_plot(lam, mu, sigma, nEntries, randomState=None):\n np.random.seed(randomState) # to have the same starting point\n \n xb = np.arange(-30,500000, 5000)\n xp = np.arange(-30,30,0.2)\n \n # Plot the exponential curve\n plt.figure()\n plt.subplot(3,1,1)\n xf = stats.expon(0.,1./lam).rvs(nEntries)\n plt.hist(xf,xb, normed=True)\n plt.plot(xb, stats.expon(0,1./lam).pdf(xb))\n \n # Plot the gaussian distribution\n plt.subplot(3,1,2) \n xg = stats.norm(mu, sigma).rvs(nEntries)\n plt.hist(xg,xp, normed=True)\n plt.plot(xp,stats.norm(mu,sigma).pdf(xp))\n \n # Plot the convolution of the two distributions\n plt.subplot(3,1,3)\n plt.hist(xf+xg,xb,normed=True)\n plt.plot(xb, stats.expon(0,1./lam).pdf(xb))\n \n data_set = xf+xg\n return data_set",
"def plotHistogram(a):\n plt.figure(figsize=(10,5))\n plt.subplot(1,2,1)\n plt.imshow(a)\n plt.axis('off')\n histo = plt.subplot(1,2,2)\n histo.set_ylabel('Count')\n histo.set_xlabel('Pixel Intensity')\n n_bins = 30\n plt.hist(a[:,:,0].flatten(), bins= n_bins, lw = 0, color='r', alpha=0.5);\n plt.hist(a[:,:,1].flatten(), bins= n_bins, lw = 0, color='g', alpha=0.5);\n plt.hist(a[:,:,2].flatten(), bins= n_bins, lw = 0, color='b', alpha=0.5);",
"def plotHistogram(a):\n plt.figure(figsize=(10,5))\n plt.subplot(1,2,1)\n plt.imshow(a)\n plt.axis('off')\n histo = plt.subplot(1,2,2)\n histo.set_ylabel('Count')\n histo.set_xlabel('Pixel Intensity')\n n_bins = 30\n plt.hist(a[:,:,0].flatten(), bins= n_bins, lw = 0, color='r', alpha=0.5);\n plt.hist(a[:,:,1].flatten(), bins= n_bins, lw = 0, color='g', alpha=0.5);\n plt.hist(a[:,:,2].flatten(), bins= n_bins, lw = 0, color='b', alpha=0.5);",
"def plotHistogram(a):\n plt.figure(figsize=(10,5))\n plt.subplot(1,2,1)\n plt.imshow(a)\n plt.axis('off')\n histo = plt.subplot(1,2,2)\n histo.set_ylabel('Count')\n histo.set_xlabel('Pixel Intensity')\n n_bins = 30\n plt.hist(a[:,:,0].flatten(), bins= n_bins, lw = 0, color='r', alpha=0.5);\n plt.hist(a[:,:,1].flatten(), bins= n_bins, lw = 0, color='g', alpha=0.5);\n plt.hist(a[:,:,2].flatten(), bins= n_bins, lw = 0, color='b', alpha=0.5);",
"def plot_histogram(self,**kwargs):\n axes = []\n for i in range(self.score_length):\n fig = plt.figure()\n scores = np.array([s[i] for s in self.scores_list])\n probs,bins,patches = plt.hist(scores,label=\"Sample {}\".format(self.labels[i]), **kwargs)\n plt.vlines(self.xhat,fig.get_axes().get_ylim(),label='Mean',color='r')\n plt.legend()\n axes.append(fig.get_axes())\n return axes",
"def PlotLogPHistogram(lag):\n (n,bins) = pylab.mlab.hist(P(lag), bins=100, normed=True)\n binCenters = bins + (bins[1]-bins[0])/2.\n pylab.plot(binCenters, scipy.log(n+1.e-4))",
"def hist(data):\n\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n plt.hold(True)\n for x in xrange(len(data[:,0,0])):\n counts, edges = np.histogram(data[x,:,:],bins=100)\n centers = [(edges[i]+edges[i+1])/2.0 for i,v in enumerate(edges[:-1])]\n ax1.plot(centers,counts)\n plt.hold(False)\n\n plt.show(block=False)\n\n # return fig",
"def plot_hist(datasets, bins, labels, alphas):\n assert len(labels) == len(datasets)\n assert len(alphas) == len(datasets)\n plt.figure(figsize=[9,6])\n for idx, data in enumerate(datasets):\n plt.hist(data, bins=bins[idx], density=True, label=labels[idx], alpha=alphas[idx])\n plt.xlabel(\"PHQ score\")\n plt.ylabel(\"Probability\")\n plt.legend()\n plt.savefig(\"saved_plots/hist_\"+\"_\".join(labels)+\".png\")\n plt.show()",
"def getHistogram( self, img):\n bins = 256\n range_scale = [0,254]\n nivel_transparencia = 0.5\n plt.hist(img.ravel(),bins,range_scale, label=\"histogram\", alpha=nivel_transparencia);\n plt.legend(loc='upper right')\n plt.show()",
"def show_bryant(data_length=200000, bins=100):\n\n data = np.random.normal(0,1,data_length)\n plt.hist(data, bins)\n plt.show()",
"def plot_histograms(p_hist, p_hbins, title, figure_path=None):\n\n base_fig_size = 7\n h_fig = base_fig_size\n w_fig = base_fig_size * 4\n\n fig = plt.figure(figsize=(w_fig, h_fig))\n fig.suptitle(title)\n iplot = 0\n\n p_Nx, p_Ny = np.amax(p_hbins, axis=1) + 1\n\n p_hist = np.reshape(p_hist, (4, p_Ny, p_Nx))\n\n iplot += 1\n p_plot_title = 'Spectral BEHistogram Amp (log10 of counts)'\n p_plot = fig.add_subplot(1, 4, iplot, title=p_plot_title)\n p_im = p_plot.imshow(np.rot90(np.log10(p_hist[0])), interpolation='nearest')\n p_plot.axis('tight')\n fig.colorbar(p_im, fraction=0.1)\n\n iplot += 1\n p_plot_title = 'Spectral BEHistogram Phase (log10 of counts)'\n p_plot = fig.add_subplot(1, 4, iplot, title=p_plot_title)\n p_im = p_plot.imshow(np.rot90(np.log10(p_hist[1])), interpolation='nearest')\n p_plot.axis('tight')\n fig.colorbar(p_im, fraction=0.1)\n\n iplot += 1\n p_plot_title = 'Spectral BEHistogram Real (log10 of counts)'\n p_plot = fig.add_subplot(1, 4, iplot, title=p_plot_title)\n p_im = p_plot.imshow(np.rot90(np.log10(p_hist[2])), interpolation='nearest')\n p_plot.axis('tight')\n fig.colorbar(p_im, fraction=0.1)\n\n iplot += 1\n p_plot_title = 'Spectral BEHistogram Imag (log10 of counts)'\n p_plot = fig.add_subplot(1, 4, iplot, title=p_plot_title)\n p_im = p_plot.imshow(np.rot90(np.log10(p_hist[3])), interpolation='nearest')\n p_plot.axis('tight')\n fig.colorbar(p_im, fraction=0.1)\n\n if figure_path:\n plt.savefig(figure_path, format='png')\n\n return fig",
"def display(self, bin_size):\n xs = np.linspace(self.sample_min, self.sample_max, 2000)\n ys = np.zeros_like(xs)\n for (l, s), w in zip(self.gauss_params, self.dist_weights):\n ys += ss.norm.pdf(xs, loc=l, scale=s) * w\n plt.plot(xs, ys, color=\"blue\")\n plt.hist(self.samples, density=True, bins=bin_size, color=\"palegreen\")\n plt.xlabel(\"duration\")\n plt.ylabel(\"density\")\n _, _, ymin, ymax = plt.axis()\n if self.lower_bound > 0:\n plt.vlines([self.lower_bound], ymin, ymax, color=\"crimson\")\n if self.upper_bound < float(\"inf\"):\n plt.vlines([self.upper_bound], ymin, ymax, color=\"crimson\")\n plt.show()",
"def plot_histogram(self,ax=None,**kwargs):\n if not ax:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n probs,bins,patches = ax.hist(self.scores_list,normed=True,label=\"Sample\",**kwargs)\n ax.vlines(self.xhat,*ax.get_ylim(),label='Mean',color='r')\n ax.legend()\n return ax,probs,bins",
"def histograma(p):\n img = read_img(p)\n show_histograma(img.reshape((-1)))",
"def fn_photonflux_hist(file_name,folder,mean_photons_per_sec):\n import numpy as np\n import matplotlib.pyplot as plt\n from scipy.stats import lognorm\n from pylab import text\n \n n_molecules=len(mean_photons_per_sec)\n \n #Plot photon flux\n figure_name=file_name+'_photonsPerSecond'\n ax = plt.subplot(111)\n num_bins = np.linspace(int(min(mean_photons_per_sec)), int(max(mean_photons_per_sec)), int(np.sqrt(len(mean_photons_per_sec))*4))\n ax.hist(mean_photons_per_sec, bins=num_bins, density=True, color='darkorange',edgecolor='black')\n \n #Fit curve\n sigma,loc,mean = lognorm.fit(mean_photons_per_sec, floc=0)\n pdf = lognorm.pdf(num_bins, sigma, loc, mean) #sigma=shape, mu=np.log(scale)\n ax.plot(num_bins, pdf, 'k',linestyle='--')\n \n #Edit plot\n plt.xlabel('Photon flux ($s^{-1}$)', fontname='Arial', fontsize=12)\n plt.ylabel('Probability density', fontname='Arial', fontsize=12)\n plt.xticks(fontname='Arial', fontsize=12)\n plt.yticks(fontname='Arial', fontsize=12)\n plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))\n text(0.75, 0.95,'μ='+str(round(mean,2))+' photons $s^{-1}$',horizontalalignment='center', verticalalignment='center',transform = ax.transAxes,fontname='Arial', fontsize=12)\n text(0.40, 0.95,'N='+str(n_molecules),horizontalalignment='center', verticalalignment='center',transform = ax.transAxes,fontname='Arial', fontsize=12)\n plt.savefig(folder+'/Figures/PDFs'+ '/' + figure_name + '.pdf', dpi=500)\n plt.savefig(folder+'/Figures/PNGs'+ '/' + figure_name + '.png', dpi=500)\n \n return (plt.show())",
"def test_uniform(self):\r\n\r\n s = np.random.uniform(-1.35, 0.5, 5000)\r\n plt.hist(s, 30, density=False)\r\n plt.xlabel('Interlayer point energy [eV]')\r\n plt.ylabel('Frequency')\r\n plt.show()",
"def compare_histograms(df, df_norm, fignum, fields, binns):\n fig = plt.figure(num=fignum, figsize=(18,18))\n fig.suptitle('Histogram before and after normalization', fontsize=22)\n ax1 = fig.add_subplot(421, axisbg='0.94')\n ax2 = fig.add_subplot(422, axisbg='0.94')\n ax3 = fig.add_subplot(423, axisbg='0.94')\n ax4 = fig.add_subplot(424, axisbg='0.94')\n ax5 = fig.add_subplot(425, axisbg='0.94')\n ax6 = fig.add_subplot(426, axisbg='0.94')\n ax7 = fig.add_subplot(427, axisbg='0.94')\n ax8 = fig.add_subplot(428, axisbg='0.94')\n alphas = [0.33, 0.33, 0.6, 0.6, 0.28, 0.28, 0.6, 0.6]\n hues = ['g','y','g','y','g','y','g','y']\n all_axes = plt.gcf().axes\n # print list(enumerate(fields))\n for i, ax in list(enumerate(all_axes)):\n ax.set_ylabel(\"count\", fontsize=10)\n for ticklabel in ax.get_xticklabels() + ax.get_yticklabels():\n ticklabel.set_fontsize(14)\n g = np.int(math.ceil(np.float(i)/2))\n \n if (len(fields)*2-1) >= i:\n if i in (0,2,4,6):\n ax.hist(df[fields[i-g]].dropna().values, bins=binns[i-g], color=hues[i],alpha=alphas[i])\n print \" plot \" + str(df[fields[i-g]].name)\n ax.set_title(df[fields[i-g]].name, fontsize=20)\n #if (len(fields)*2) >= i: \n if i in (1,3,5,7):\n #try:\n ax.hist(df_norm[fields[i-g]].dropna().values, bins=binns[i-g], color=hues[i],alpha=alphas[i])\n ax.set_title(\"As normalized:\", fontsize=20)\n \n try: # Save the figure as one file\n filename = \"data/vis/histogram_compare\" + \"_\" + str(fignum) + \".png\"\n plt.savefig(filename)\n print \"= Vis Output: \", filename\n print\n except IOError:\n print \"WARNING: Failed to write out file: \", filename\n print\n plt.close(fig)",
"def show_histograms(self, n_samples=1000):\n\n ps = self.gen(n_samples)\n util.plot.plot_hist_marginals(ps, lims=get_disp_lims())\n plt.show()",
"def _normality_plot(ax, data, **kwargs):\n n, bins, patches = ax.hist(data, normed=True, **kwargs)\n data = np.ravel(data)\n\n # normal line\n mu = np.mean(data)\n sigma = np.std(data)\n y = mpl.mlab.normpdf(bins, mu, sigma)\n ax.plot(bins, y, 'r--', linewidth=1)\n\n # TESTS\n # test Anderson\n A2, thresh, sig = scipy.stats.morestats.anderson(data)\n index = sum(A2 >= thresh)\n if index > 0:\n ax.text(.95, .95, '$^{*}%s$' % str(sig[index - 1] / 100), color='r', size=11,\n horizontalalignment='right',\n verticalalignment='top',\n transform=ax.transAxes,)\n logging.debug(\" Anderson: %s, %s, %s\" % (A2, thresh, sig))\n # test Lilliefors\n n_test = test.lilliefors(data)\n ax.set_xlabel(r\"$D=%.3f$, $p_{est}=%.2f$\" % n_test) # \\chi ^{2}\n # make sure ticks display int values\n # ax.yaxis.set_major_formatter(ticker.MaxNLocator(nbins=8, integer=True))\n ticks = ax.get_yticks()\n ticks_int = [int(l) for l in ticks]\n ax.set_yticks(ticks_int)\n ax.set_yticklabels(ticks_int)",
"def fn_total_photon_hist(file_name,folder,total_photon):\n import numpy as np\n import matplotlib.pyplot as plt\n from scipy.stats import lognorm\n from pylab import text\n \n n_molecules=len(total_photon)\n \n #Plot histogram\n figure_name=file_name+'_totalPhotons'\n ax = plt.subplot(111)\n num_bins = np.linspace(int(min(total_photon)), int(max(total_photon)), int(np.sqrt(len(total_photon))*3))\n ax.hist(total_photon, bins=num_bins, density=True,color='cornflowerblue',edgecolor='black')\n\n #Fit curve\n sigma,loc,mean = lognorm.fit(total_photon, floc=0)\n pdf = lognorm.pdf(num_bins, sigma, loc, mean) #sigma=shape, mu=np.log(scale)\n ax.plot(num_bins, pdf, 'k',linestyle='--')\n\n #Edit plot\n plt.xlabel('Total number of photons',fontname='Arial', fontsize=12)\n plt.ylabel('Probability density',fontname='Arial', fontsize=12)\n plt.xticks(fontname='Arial',fontsize=12)\n plt.yticks(fontname='Arial', fontsize=12)\n plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))\n text(0.75, 0.95,'μ='+str(round(mean/10**6,2))+'$x10^6$ photons',horizontalalignment='center', verticalalignment='center',transform = ax.transAxes,fontname='Arial', fontsize=12)\n text(0.40, 0.95,'N='+str(n_molecules),horizontalalignment='center', verticalalignment='center',transform = ax.transAxes,fontname='Arial', fontsize=12)\n plt.savefig(folder+'/Figures/PDFs'+ '/' + figure_name + '.pdf', dpi=500)\n plt.savefig(folder+'/Figures/PNGs'+ '/' + figure_name + '.png', dpi=300)\n\n return (plt.show())",
"def not_pokemon_function():\n # Ironic I'm using random inside seed\n numpy.random.seed(random.randint(1, 1000))\n sample = numpy.random.normal(size=1000)\n counts, bin_edges = numpy.histogram(sample, bins=39)\n fig = tpl.figure()\n fig.hist(counts, bin_edges, grid=[15, 25], force_ascii=False)\n fig.show()\n print(\"Hopefully this random histogram(because I couldn't generate plot graphs) which is generated cheers you up\")",
"def plot_features(data: np.array)->None:\n n_rows = np.size(data, 0)\n n_cols = np.size(data, 1)\n for i in range(n_cols):\n plt.hist(data[:,i])\n plt.show()",
"def plot_trait_histogram(params, traits):\n\n pylab.subplot(122).clear()\n pylab.xlabel(r\"$x$\")\n pylab.ylabel(r\"$I$\")\n pylab.hist(traits, 100, range = (0, params[\"max_trait\"]), normed = True, \n facecolor = \"black\")\n pylab.xlim(0, params[\"max_trait\"])\n pylab.ylim(0, params[\"population\"])\n ax = pylab.gca()\n ax.yaxis.major.formatter.set_powerlimits((0,0))\n pylab.draw()"
] | [
"0.777749",
"0.6434252",
"0.6400417",
"0.62809926",
"0.6274087",
"0.62183875",
"0.6199149",
"0.61816305",
"0.6179878",
"0.6179878",
"0.6179878",
"0.61471015",
"0.6140279",
"0.6139242",
"0.6112178",
"0.6104909",
"0.60622525",
"0.60274523",
"0.5973794",
"0.59647155",
"0.5960954",
"0.5921871",
"0.5905656",
"0.5905491",
"0.5904516",
"0.58987355",
"0.5887921",
"0.58456546",
"0.58440715",
"0.5843341"
] | 0.83347857 | 0 |
First Python implementation method of the Levenshtein distance between strings | def levenshtein(s1, s2):
if len(s1) < len(s2):
return levenshtein(s2, s1)
# len(s1) >= len(s2)
if len(s2) == 0:
return len(s1)
previous_row = range(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer
deletions = current_row[j] + 1 # than s2
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def levenshtein_distance(s1,s2):\n\n\t\tif len(s1) < len(s2):\n\t\t\treturn Searcher.levenshtein_distance(s2, s1)\n\n\t\t# len(s1) >= len(s2)\n\t\tif len(s2) == 0:\n\t\t\treturn len(s1)\n\n\t\tprevious_row = range(len(s2) + 1)\n\t\tfor i, c1 in enumerate(s1):\n\t\t\tcurrent_row = [i + 1]\n\t\t\tfor j, c2 in enumerate(s2):\n\t\t\t\tinsertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer\n\t\t\t\tdeletions = current_row[j] + 1 # than s2\n\t\t\t\tsubstitutions = previous_row[j] + (c1 != c2)\n\t\t\t\tcurrent_row.append(min(insertions, deletions, substitutions))\n\t\t\tprevious_row = current_row\n\t\t\n\t\treturn previous_row[-1]",
"def distance(str1, str2):\n return levenshtein.normalized_distance(str1, str2)",
"def levenshtein(str1, str2, normalise=False):\n\ttmp = Levenshtein.distance(str1, str2)\n\tif(normalise) and (len(str1) + len(str2)): tmp /= max(len(str1), len(str2))\n\treturn tmp",
"def levenshtein_normalised(str1, str2):\n\treturn levenshtein(str1, str2, normalise=True)",
"def levenshtein_distance(str1, str2):\n m = len(str1)\n n = len(str2)\n lensum = float(m + n)\n d = [] \n for i in range(m+1):\n d.append([i]) \n del d[0][0] \n for j in range(n+1):\n d[0].append(j) \n for j in range(1,n+1):\n for i in range(1,m+1):\n if str1[i-1] == str2[j-1]:\n d[i].insert(j,d[i-1][j-1]) \n else:\n minimum = min(d[i-1][j]+1, d[i][j-1]+1, d[i-1][j-1]+2) \n d[i].insert(j, minimum)\n ldist = d[-1][-1]\n ratio = (lensum - ldist)/lensum\n return {'distance':ldist, 'ratio':ratio}",
"def levenshteinDistance(s1, s2):\n singleLetterMapping = {DOWNLEFT: '1', DOWN:'2', DOWNRIGHT:'3',\n LEFT:'4', RIGHT:'6',\n UPLEFT:'7', UP:'8', UPRIGHT:'9'}\n\n len1 = len([singleLetterMapping[letter] for letter in s1])\n len2 = len([singleLetterMapping[letter] for letter in s2])\n\n matrix = list(range(len1 + 1)) * (len2 + 1)\n for i in range(len2 + 1):\n matrix[i] = list(range(i, i + len1 + 1))\n for i in range(len2):\n for j in range(len1):\n if s1[j] == s2[i]:\n matrix[i+1][j+1] = min(matrix[i+1][j] + 1, matrix[i][j+1] + 1, matrix[i][j])\n else:\n matrix[i+1][j+1] = min(matrix[i+1][j] + 1, matrix[i][j+1] + 1, matrix[i][j] + 1)\n return matrix[len2][len1]",
"def levenshtein(w1, w2):\n\n if len(w1) < len(w2):\n # check if length of word1 is smaller than word2.\n # if so, call function and switch parameters\n return levenshtein(w2, w1)\n elif len(w1) == 0:\n # if the length of word1 equals 0, that means that\n # the Lev' distance is the length of word2\n return len(w2)\n elif len(w2) == 0:\n # if the length of word2 equals 0, that means that\n # the Lev' distance is the length of word1\n return len(w1)\n elif w1 == w2:\n # check if words are simply the same\n return 0\n\n # thanks to the check above, we can assume that w2 is the longest word\n # we use this information to determine the range of 'previous'\n previous = range(len(w2) + 1)\n\n # iterate over the characters of the first word\n for a, char1 in enumerate(w1):\n current = [a + 1]\n # iterate over the characters of the second word\n for b, char2 in enumerate(w2):\n inserts = previous[b + 1] + 1\n deletions = current[b] + 1\n subs = previous[b] + (char1 != char2)\n current.append(min(inserts, deletions, subs))\n previous = current\n return previous[-1]",
"def damerau_levenshtein_distance(s1, s2):\n\n utils.check_for_none(s1, s2)\n utils.check_for_type(str, s1, s2)\n\n # s1 = utils.unicode_normalize(s1)\n # s2 = utils.unicode_normalize(s2)\n\n n1, n2 = len(s1), len(s2)\n infinite = n1 + n2\n\n char_arr = defaultdict(int)\n dp = [[0] * (n2 + 2) for _ in range(n1 + 2)]\n\n dp[0][0] = infinite\n for i in range(0, n1 + 1):\n dp[i + 1][0] = infinite\n dp[i + 1][1] = i\n for i in range(0, n2 + 1):\n dp[0][i + 1] = infinite\n dp[1][i + 1] = i\n\n for i in range(1, n1 + 1):\n db = 0\n for j in range(1, n2 + 1):\n i1 = char_arr[s2[j - 1]]\n j1 = db\n cost = 1\n if s1[i - 1] == s2[j - 1]:\n cost = 0\n db = j\n\n dp[i + 1][j + 1] = min(dp[i][j] + cost,\n dp[i + 1][j] + 1,\n dp[i][j + 1] + 1,\n dp[i1][j1] + (i - i1 - 1) + 1 + (j - j1 - 1))\n char_arr[s1[i - 1]] = i\n\n return dp[n1 + 1][n2 + 1]",
"def levenshtein(seq1: str, seq2: str) -> int:\n if seq1 == \"\":\n return len(seq2)\n if seq2 == \"\":\n return len(seq1)\n if seq1[-1] == seq2[-1]:\n cost = 0\n else:\n cost = 1\n \n result = min([levenshtein(seq1[:-1], seq2) + 1,\n levenshtein(seq1, seq2[:-1]) + 1,\n levenshtein(seq1[:-1], seq2[:-1]) + cost ])\n return result",
"def lev_dist(s1, s2):\n\n if s1 is None or s2 is None:\n return np.NaN\n if pd.isnull(s1) or pd.isnull(s2):\n return np.NaN\n\n # Create the similarity measure object\n measure = sm.Levenshtein()\n\n s1 = gh.convert_to_str_unicode(s1)\n s2 = gh.convert_to_str_unicode(s2)\n\n # Call the function to compute the distance measure.\n return measure.get_raw_score(s1, s2)",
"def damerau_levenshtein_similarity(s1, s2):\n max_cost = max(len(s1), len(s2))\n\n if max_cost == 0:\n return 1.0\n\n return 1.0 - float(damerau_levenshtein_distance(s1, s2)) / max_cost",
"def _levenshtein_distance(t1: Trace, t2: Trace):\n if t1.length > t2.length:\n t1, t2 = t2, t1\n\n distances = range(t1.length + 1)\n for i2, c2 in enumerate(t2.event_list):\n distances_ = [i2 + 1]\n for i1, c1 in enumerate(t1.event_list):\n if c1 == c2:\n distances_.append(distances[i1])\n else:\n distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])))\n distances = distances_\n return distances[-1]",
"def iterative_levenshtein(s, t):\n rows = len(s)+1\n cols = len(t)+1\n dist = [[0 for x in range(cols)] for x in range(rows)]\n # source prefixes can be transformed into empty strings \n # by deletions:\n for i in range(1, rows):\n dist[i][0] = i\n # target prefixes can be created from an empty source string\n # by inserting the characters\n for i in range(1, cols):\n dist[0][i] = i\n \n for col in range(1, cols):\n for row in range(1, rows):\n if s[row-1] == t[col-1]:\n cost = 0\n else:\n cost = 1\n dist[row][col] = min(dist[row-1][col] + 1, # deletion\n dist[row][col-1] + 1, # insertion\n dist[row-1][col-1] + cost) # substitution\n #for r in range(rows):\n #print(dist[r])\n \n \n return dist[row][col]",
"def levenshtein_distance(first, second):\n if len(first) > len(second):\n first, second = second, first\n if len(second) == 0:\n return len(first)\n first_length = len(first) + 1\n second_length = len(second) + 1\n distance_matrix = [range(second_length) for x in range(first_length)]\n for i in range(1, first_length):\n for j in range(1, second_length):\n deletion = distance_matrix[i-1][j] + 1\n insertion = distance_matrix[i][j-1] + 1\n substitution = distance_matrix[i-1][j-1]\n if first[i-1] != second[j-1]:\n substitution += 1\n distance_matrix[i][j] = min(insertion, deletion, substitution)\n\n return distance_matrix[first_length-1][second_length-1]",
"def compare_str(seq1, seq2):\n if seq1 == seq2:\n return 1\n ld = Levenshtein.distance(seq1, seq2)\n longest = len(seq1 if len(seq1) > len(seq2) else seq2)\n return (longest - ld) / longest",
"def levenshtein_distance(s, t, alphabet=string.printable, **weight_dict):\n if len(s) == 0 or len(t) == 0:\n return max([len(s), len(t)])\n\n rows = len(s) + 1\n cols = len(t) + 1\n\n w = dict((x, (1, 1, 1)) for x in alphabet + alphabet.upper())\n if weight_dict:\n w.update(weight_dict)\n\n dist = [[0 for _ in range(cols)] for _ in range(rows)]\n # source prefixes can be transformed into empty strings\n # by deletions:\n for row in range(1, rows):\n dist[row][0] = dist[row - 1][0] + w[s[row - 1]][0]\n # target prefixes can be created from an empty source string\n # by inserting the characters\n for col in range(1, cols):\n dist[0][col] = dist[0][col - 1] + w[t[col - 1]][1]\n\n for col in range(1, cols):\n for row in range(1, rows):\n deletes = w[s[row - 1]][0]\n inserts = w[t[col - 1]][1]\n subs = max((w[s[row - 1]][2], w[t[col - 1]][2]))\n if s[row - 1] == t[col - 1]:\n subs = 0\n else:\n subs = subs\n dist[row][col] = min(\n dist[row - 1][col] + deletes,\n dist[row][col - 1] + inserts,\n dist[row - 1][col - 1] + subs,\n ) # substitution\n\n return dist[row][col]",
"def levenshteinDistance(s, t, asRatioOfMax = False):\n\n if s == None:\n s = \"\"\n if t == None:\n t = \"\"\n\n if t == s:\n return 0\n if len(s) == 0:\n return len(t)\n if len(t) == 0:\n return len(s)\n\n v0 = [x for x in range(len(t)+1)]\n v1 = [0 for x in range(len(t)+1)]\n\n for i, si in enumerate(s):\n\n v1[0] = i + 1\n\n for j, tj in enumerate(t):\n\n if si == tj:\n cost = 0\n else:\n cost = 1\n\n j1 = v1[j] + 1\n j2 = v0[j + 1] + 1\n j3 = v0[j] + cost\n \n if j1 < j2 and j1 < j3:\n v1[j + 1] = j1\n continue\n \n if j2 < j3:\n v1[j+1] = j2\n else:\n v1[j+1] = j3\n \n v0 = v1.copy()\n\n if not asRatioOfMax:\n return v1[len(t)]\n\n return 1 - v1[len(t)]/max([len(t), len(s)])",
"def Levenshtein(a, b):\n v0 = list(range(len(b)+1))\n v1 = list(range(len(b)+1)) # Or whatever.\n\n for i in range(len(a)):\n v1[0] = i + 1\n\n for j in range(len(b)):\n deletionCost = v0[j + 1] + 1\n insertionCost = v1[j] + 1\n substitutionCost = v0[j] if a[i] == b[j] else v0[j]+1\n v1[j + 1] = min(deletionCost, insertionCost, substitutionCost)\n\n v1, v0 = v0, v1\n return v0[len(b)]",
"def levenshtein_distance(s: Union[bytes, str], t: Union[bytes, str]) -> int:\n if s is None or t is None:\n raise ValueError(\"Strings must not be None\")\n\n n = len(s)\n m = len(t)\n\n if n == 0:\n return m\n elif m == 0:\n return n\n\n if n > m:\n tmp = s\n s = t\n t = tmp\n n = m\n m = len(t)\n\n p = [0] * (n + 1)\n d = [0] * (n + 1)\n\n for i in range(0, n + 1):\n p[i] = i\n\n for j in range(1, m + 1):\n if DEBUG_DISTANCE:\n if j % 100 == 0:\n print(\"DEBUG:\", int(j / (m + 1.0) * 100), \"%\\r\", end=' ', file=sys.stderr)\n t_j = t[j - 1]\n d[0] = j\n\n for i in range(1, n + 1):\n cost = 0 if s[i - 1] == t_j else 1\n # minimum of cell to the left+1, to the top+1, diagonally left and up +cost\n d[i] = min(min(d[i - 1] + 1, p[i] + 1), p[i - 1] + cost)\n\n _d = p\n p = d\n d = _d\n\n if DEBUG_DISTANCE:\n print(\"\\n\", file=sys.stderr)\n return p[n]",
"def __levenshtein(a, b):\n\n n, m = len(a), len(b)\n if n > m:\n # Make sure n <= m, to use O(min(n,m)) space\n a, b = b, a\n n, m = m, n\n\n current = list(range(n + 1))\n for i in range(1, m + 1):\n previous, current = current, [i] + [0] * n\n for j in range(1, n + 1):\n add, delete = previous[j] + 1, current[j - 1] + 1\n change = previous[j - 1]\n if a[j - 1] != b[i - 1]:\n change = change + 1\n current[j] = min(add, delete, change)\n\n return current[n]",
"def similarity(a, b):\n distance = Levenshtein.distance(a, b)\n return 1 - (distance / max((len(a), len(b))))",
"def levenshtein_dist(s1, s2, dele, add, sub):\n m = np.zeros((len(s1)+1, len(s2)+1), dtype=np.int)\n p = np.zeros((len(s1)+1, len(s2)+1), dtype=np.float)\n for i in range(len(s1)+1):\n m[i, 0] = i\n # compute probability for deletion\n if i == 0:\n p[i, 0] = 1\n else:\n ind = alphabet.index('@')\n p[i, 0] = p[i-1, 0] * dele[ind, alphabet.index(s1[i-1])]\n for j in range(len(s2)+1):\n # compute probability for insertion\n if j == 0:\n p[0, j] = 1\n else:\n prev_char = '@' if j == 1 else s2[j-2]\n p[0, j] = p[0, j-1] * add[alphabet.index(prev_char),\n alphabet.index(s2[j-1])]\n m[0, j] = j\n for i in range(1, 1+len(s1)):\n for j in range(1, len(s2)+1):\n if s1[i-1] == s2[j-1]:\n k = np.argmin([m[i-1, j] + 1, m[i, j-1] + 1, m[i-1, j-1]])\n if k == 0:\n # deletion\n m[i, j] = m[i-1, j] + 1\n prev_char = '@' if j == 1 else s2[j-2]\n p[i, j] = p[i-1, j] * dele[alphabet.index(prev_char), alphabet.index(s1[i-1])]\n elif k == 1:\n # insertion\n m[i, j] = m[i, j-1] + 1\n prev_char = '@' if j == 1 else s2[j-2]\n p[i, j] = p[i, j-1] * add[alphabet.index(prev_char), alphabet.index(s2[j-1])]\n else:\n # no mistake\n m[i, j] = m[i-1, j-1]\n p[i, j] = p[i-1, j-1]\n else:\n k = np.argmin([m[i-1, j] + 1, m[i, j-1] + 1, m[i-1, j-1] + 1])\n if k == 0:\n # deletion\n m[i, j] = m[i-1, j] + 1\n prev_char = '@' if j == 1 else s2[j-2]\n p[i, j] = p[i-1, j] * dele[alphabet.index(prev_char), alphabet.index(s1[i-1])]\n elif k == 1:\n # insertion\n m[i, j] = m[i, j-1] + 1\n prev_char = '@' if j == 1 else s2[j-2]\n p[i, j] = p[i, j-1] * add[alphabet.index(prev_char), alphabet.index(s2[j-1])]\n else:\n # substitution\n m[i, j] = m[i-1, j-1] + 1\n p[i, j] = p[i-1, j-1] * sub[alphabet.index(s1[i-1]), alphabet.index(s2[j-1])]\n # recall that in sub[X, Y], Y is the correct word\n\n return m[len(s1), len(s2)], p[len(s1), len(s2)]",
"def levenshtein_similarity(s1, s2, insert=None, delete=None, substitute=None,\n insert_default=1, delete_default=1, substitute_default=1,\n lower_bound=None):\n insert = insert if isinstance(insert, dict) else {}\n delete = delete if isinstance(delete, dict) else {}\n substitute = substitute if isinstance(substitute, dict) else {}\n\n def compute_max_cost(s):\n return sum([\n max(\n insert[c] if c in insert else insert_default,\n delete[c] if c in delete else delete_default,\n substitute[c] if c in substitute else substitute_default\n ) for c in s\n ])\n\n def estimate_min_char_cost(s):\n return min([min(\n insert[c] if c in insert else insert_default,\n delete[c] if c in delete else delete_default,\n substitute[c] if c in substitute else substitute_default\n ) for c in s])\n\n utils.check_for_none(s1, s2)\n utils.check_for_type(str, s1, s2)\n\n max_cost = max(compute_max_cost(s1), compute_max_cost(s2))\n\n if lower_bound:\n diff = abs(len(s1) - len(s2))\n if len(s1) == 0 and len(s2) == 0:\n return 1.0\n elif len(s1) == 0:\n min_lev = float(diff * estimate_min_char_cost(s2))\n elif len(s2) == 0:\n min_lev = float(diff * estimate_min_char_cost(s1))\n else:\n min_lev = float(diff * min(estimate_min_char_cost(s1), estimate_min_char_cost(s2)))\n est_sim = 1.0 - min_lev / max_cost\n if est_sim < lower_bound:\n return 0.0\n\n lev = levenshtein_distance(s1, s2, insert, delete, substitute,\n insert_default, delete_default, substitute_default)\n\n if max_cost < lev:\n raise ValueError('Illegal value of operation cost')\n\n if max_cost == 0:\n return 1.0\n\n lev_sim = 1.0 - float(lev) / max_cost\n if lower_bound and lev_sim < lower_bound:\n return 0.0\n return lev_sim",
"def wer(self, s1, s2):\n\n # build mapping of words to integers\n b = set(s1.split() + s2.split())\n word2char = dict(zip(b, range(len(b))))\n\n # map the words to a char array (Levenshtein packages only accepts\n # strings)\n w1 = [chr(word2char[w]) for w in s1.split()]\n w2 = [chr(word2char[w]) for w in s2.split()]\n\n return Lev.distance(''.join(w1), ''.join(w2))",
"def string_edit_dist(str1, str2):\n sm = edit_distance.SequenceMatcher(a=str1, b=str2)\n return sm.distance()",
"def test_string_similarity_constraint():\n f = SimilarityConstraint(func=LevenshteinDistance(), pred=GreaterThan(0.5))\n assert f('BROOKLYN', 'BROKLYN')\n assert not f('BROOKLYN', 'QUEENS')",
"def edit_distance(str1, str2, reconstruct_answer=False, method=alignments.Levinshtein(),\n swap_case_on_mismatch=True):\n method = alignments.Levinshtein() if method is None else method\n return align(str1, str2, reconstruct_answer, method, swap_case_on_mismatch)",
"def string_distance(s1, s2):\n if len(s1) != len(s2):\n return 1\n diff_count = 0\n for c1, c2, in zip(s1, s2):\n if c1 != c2:\n diff_count += 1\n return diff_count",
"def edit_distance(str1, str2):\r\n pass",
"def iterative_levenshtein(self, w1, d1, w2, d2):\n rows = len(w1) + 1\n cols = len(w2) + 1\n dist = [[0 for x in range(cols)] for x in range(rows)]\n # source prefixes can be transformed into empty strings\n # by deletions:\n for i in range(1, rows):\n dist[i][0] = i\n # target prefixes can be created from an empty source string\n # by inserting the characters\n for i in range(1, cols):\n dist[0][i] = i\n\n for col in range(1, cols):\n for row in range(1, rows):\n if w1[row - 1] == w2[col - 1]:\n cost = 0\n else:\n cost = 1\n dist[row][col] = min(dist[row - 1][col] + 1, # deletion\n dist[row][col - 1] + 1, # insertion\n dist[row - 1][col - 1] + cost) # substitution\n return dist[row][col] < 5"
] | [
"0.81522393",
"0.79750335",
"0.7954744",
"0.7949555",
"0.78139377",
"0.7758564",
"0.7698071",
"0.7673589",
"0.7605972",
"0.7590628",
"0.7568366",
"0.7530191",
"0.74639803",
"0.7419249",
"0.739178",
"0.7367446",
"0.7348473",
"0.7338631",
"0.73206055",
"0.7319021",
"0.7293537",
"0.7266428",
"0.72638476",
"0.7211292",
"0.712276",
"0.71086013",
"0.7087208",
"0.70858216",
"0.70790136",
"0.70734185"
] | 0.80752313 | 1 |
It is reccommended that preprocessing has taken place before loading directly into object class in iterable format level to avoid inconsistencies due to document delimitation from the puctuations. Upon calling method `.fit()` on corpus, collection sets on entities and PoS will be identified to harmonise the extracted relation triplets. This may take some computation time. Note that this uses spaCy's pretrained models which is fitted on Onto Notes 5 (chargeable to access the raw training data!) | def fit(self,
corpus: Union[str, Iterable[str]],
sent_delim: str='\.\s+|\r|\n',
preferred_spacy_core: str='en_core_web_sm'
) -> None:
# Initialise corpus
if type(corpus) == str:
self.__corpus__ = [sent+'.' if ('\.' in sent_delim and sent[-1] != '.') else sent
for sent in re.split(sent_delim, corpus) ]
elif isinstance(corpus, Iterable):
self.__corpus__ = corpus
else:
raise Exception('[WARN] Invalid corpus input supplied!')
## Collect pronoun-variants and determinants
nlp = spacy.load(preferred_spacy_core)
self.__pron_det_pos_words__ = {token.text for doc in nlp.pipe(self.__corpus__, disable=['parser', 'ner'])
for token in doc if token.pos_ in ['PRON', 'DET']}
## Collect recognised entities
# Default NER scheme: Onto Notes 5
# TODO: integration of pre/re-trainng modules for larger set of recognised entities
# N.B.: temp. disabled functionality to clean triplets via NER
self.__entities_in_doc__ = {(ent.text, ent.label_)
for doc in nlp.pipe(self.__corpus__, disable=['tagger', 'parser'])
for ent in doc.ents}
self.__entities_in_doc__ = pd.DataFrame(self.__entities_in_doc__, columns=['entities', 'ner_label'])
self.__entities_in_doc__['xjoin'] = 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _preprocess(self):\n self.data['sentences'] = self.data['text'].apply(self._tokenize_sent)\n self.data['nouns'] = self.data['sentences'].apply(self._get_nouns)\n # self._get_frequent_features()\n # self._compactness_pruning()\n # self._redundancy_pruning()\n # self._get_features()\n self._extract_opinions()",
"def preproc_doc(document):\n\n # Each document is a list of lines\n tokenizer = tokenization.FullTokenizer(\n vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)\n\n # set a random seed for reproducability\n # since this function is run in parallel, if we hardcode a seed, all\n # documents will have the same permutations. Instead we use the hash of the\n # first sentence as the seed so it is different for each document and it\n # is still reproducible.\n hash_object = hashlib.md5(document[0])\n rng = random.Random(int(hash_object.hexdigest(), 16) % (10**8))\n\n # Each document is composed of a list of sentences. We create paragraphs\n # by keeping together sentences on the same line and adding adjacent sentences\n # if there are fewer than 5 to form the paragraph.\n # The utility functions below expect the document to be split by paragraphs.\n list_of_paragraphs = []\n paragraph = []\n for line in document:\n line = tokenization.convert_to_unicode(line)\n line = line.replace(u\"\\u2018\", \"'\").replace(u\"\\u2019\", \"'\")\n sents = split_line_by_sentences(line)\n for sent in sents:\n tokens = tokenizer.tokenize(sent)\n if tokens:\n paragraph.append(tokens)\n if len(paragraph) > 5:\n list_of_paragraphs.append(paragraph)\n paragraph = []\n\n # In case of any empty paragraphs, remove them.\n list_of_paragraphs = [x for x in list_of_paragraphs if x]\n\n # Convert the list of paragraphs into TrainingInstance object\n # See preprocessing_utils.py for definition\n if FLAGS.format == FORMAT_BINARY:\n instances = create_instances_from_document(list_of_paragraphs,\n FLAGS.max_seq_length, rng)\n elif FLAGS.format == FORMAT_PARAGRAPH:\n instances = create_paragraph_order_from_document(list_of_paragraphs,\n FLAGS.max_seq_length, rng)\n\n # Convert token lists into ids and add any needed tokens and padding for BERT\n tf_examples = [\n convert_instance_to_tf_example(tokenizer, instance,\n FLAGS.max_seq_length)[0]\n for instance in instances\n ]\n\n # Serialize TFExample for writing to file.\n tf_examples = [example.SerializeToString() for example in tf_examples]\n\n return tf_examples",
"def __init__(self):\n # Initialise class attributes (visibility ease)\n self.__corpus__ = None\n self.__pron_det_pos_words__ = None\n self.__triples_corpus__ = None\n self.__entities_in_doc__ = None\n self.__wvmodel__ = None\n \n # For purpose of parsing relation triplets later\n # Load pretrained embedding model\n #plog('Loading pretrained word embeddings. This will take some time to load...')\n #self.__wvmodel__ = api.load('fasttext-wiki-news-subwords-300')\n #plog('Pretrained word embeddings loaded!')",
"def fit_transform(self,corpus):\n\n\t\tassert self.fitted == False, \"CandidateBuilder has already been fit to corpus\"\n\t\tassert isinstance(corpus,kindred.Corpus)\n\t\tassert not self.entityCount in corpus.candidateRelationsEntityCounts, \"Candidates for relations with entityCount=%d already exist in corpus.\" % self.entityCount\n\n\t\tif not corpus.parsed:\n\t\t\tparser = kindred.Parser()\n\t\t\tparser.parse(corpus)\n\t\t\n\t\tself.relTypes = set()\n\t\n\t\tfor doc in corpus.documents:\n\t\t\tknownRelations = doc.getRelations()\n\t\t\tfor r in knownRelations:\n\t\t\t\tassert isinstance(r,kindred.Relation)\n\t\t\t\n\t\t\ttmpRelTypesAndArgCount = [ tuple([r.relationType] + r.argNames) for r in knownRelations ]\n\t\t\tself.relTypes.update(tmpRelTypesAndArgCount)\n\t\t\t\n\t\tself.relTypes = sorted(list(self.relTypes))\n\t\tself.relClasses = { relType:(i+1) for i,relType in enumerate(self.relTypes) }\n\t\t\t\n\t\tself.fitted = True\n\t\n\t\tself.transform(corpus)",
"def fit(self, pnos, texts = None, from_loaded = False):\n self.pnos = pnos\n assert((texts is not None) or from_loaded)\n if texts is not None:\n self._process_texts(texts)\n else:\n assert(self.has_vocab and self.has_corpus)\n self._lda_model = ldamodel.LdaModel(\n corpus=self.corpus, \n id2word=self.vocab,\n num_topics=self.K\n\n )\n self.is_trained = True\n _ = self.parse_topics()",
"def transform(self,corpus):\n\t\tassert self.fitted == True, \"CandidateBuilder must be fit to corpus first\"\n\t\tassert isinstance(corpus,kindred.Corpus)\n\t\tassert not self.entityCount in corpus.candidateRelationsEntityCounts, \"Candidates for relations with entityCount=%d already exist in corpus.\" % self.entityCount\n\n\t\tif not corpus.parsed:\n\t\t\tparser = kindred.Parser()\n\t\t\tparser.parse(corpus)\n\n\t\tfor doc in corpus.documents:\n\t\t\texistingRelations = defaultdict(list)\n\t\t\tfor r in doc.getRelations():\n\t\t\t\tassert isinstance(r,kindred.Relation)\n\t\t\t\t\n\t\t\t\tentityIDs = tuple(r.entityIDs)\n\t\t\t\t\n\t\t\t\trelKey = tuple([r.relationType] + r.argNames)\n\t\t\t\tif relKey in self.relClasses:\n\t\t\t\t\trelationClass = self.relClasses[relKey]\n\t\t\t\t\texistingRelations[entityIDs].append(relationClass)\n\n\t\t\tfor sentence in doc.sentences:\n\t\t\t\tentitiesInSentence = sentence.getEntityIDs()\n\t\t\t\t\t\t\t\n\t\t\t\tfor entitiesInRelation in itertools.permutations(entitiesInSentence, self.entityCount):\n\t\t\t\t\tcandidateRelation = kindred.Relation(entityIDs=list(entitiesInRelation))\n\t\t\t\t\tcandidateClass = [0]\n\t\t\t\t\trelKey = tuple(entitiesInRelation)\n\t\t\t\t\tif relKey in existingRelations:\n\t\t\t\t\t\tcandidateClass = existingRelations[relKey]\n\n\t\t\t\t\tincludeCandidate = True\n\t\t\t\t\tif not self.acceptedEntityTypes is None:\n\t\t\t\t\t\ttypesInRelation = tuple([ sentence.getEntityType(eID) for eID in entitiesInRelation ])\n\t\t\t\t\t\tincludeCandidate = (typesInRelation in self.acceptedEntityTypes)\n\n\t\t\t\t\tif includeCandidate:\n\t\t\t\t\t\tsentence.addCandidateRelation(candidateRelation,candidateClass)\n\n\t\t\t\tsentence.candidateRelationsEntityCounts.add(self.entityCount)\n\t\t\t\t\t\n\t\tcorpus.addRelationTypes(self.relTypes)\n\t\tcorpus.candidateRelationsEntityCounts.add(self.entityCount)",
"def train(self):\n # >>> YOUR ANSWER HERE\n\n fake_docs = []\n fake_words = []\n fake_words_freq = {}\n real_docs = []\n real_words = []\n real_words_freq = {}\n\n # load fake data of the training dataset, store the docs and words\n fake_data = open(self.train_data['fake']).readlines()\n for sentence in fake_data:\n preprocess_sentence = sentence.strip()\n fake_docs.append(preprocess_sentence)\n fake_words.extend(preprocess_sentence.split())\n\n # load real data of the training dataset, store the docs, words and word frequencies.\n real_data = open(self.train_data['real']).readlines()\n for sentence in real_data:\n preprocess_sentence = sentence.strip()\n real_docs.append(preprocess_sentence)\n real_words.extend(preprocess_sentence.split())\n\n # remove stop words if necessary\n if self.REMOVE_STOPWORDS:\n fake_words = [word for word in fake_words if word not in self.stopwords]\n real_words = [word for word in real_words if word not in self.stopwords]\n\n # calculate all words' frequency\n for word in fake_words:\n self.vocabulary.add(word)\n fake_words_freq[word] = fake_words_freq.get(word, 0) + 1\n for word in real_words:\n self.vocabulary.add(word)\n real_words_freq[word] = real_words_freq.get(word, 0) + 1\n\n # pre-calculate the number of all docs, the number of docs per class and words frequency per class for\n # calculation in the training loop.\n n_doc = len(fake_docs) + len(real_docs)\n n_class = {'fake': len(fake_docs), 'real': len(real_docs)}\n big_doc_dict = {'fake': fake_words_freq, 'real': real_words_freq}\n fake_words_num = 0\n real_words_num = 0\n for w in self.vocabulary:\n fake_words_num += fake_words_freq.get(w, 0)\n real_words_num += real_words_freq.get(w, 0)\n words_frequency_per_class = {'fake': fake_words_num, 'real': real_words_num}\n\n # Training\n for c in self.classes:\n self.logprior[c] = math.log(n_class[c] / n_doc)\n for w in self.vocabulary:\n count_w_c = big_doc_dict[c].get(w, 0)\n log_likelihood = math.log((count_w_c + 1) / (len(self.vocabulary) + words_frequency_per_class[c]))\n self.loglikelihood[(w, c)] = log_likelihood\n # >>> END YOUR ANSWER",
"def Classify_Data(self):\n\n lem = lemmatization()\n\n # Get Mongo Client\n client = MongoClient()\n db = client['allMovies']\n collection = db['Movies']\n\n # Path to folder containing the training model files\n path = self.path\n\n # Get the list of doc ids trained\n trained_docs = []\n\n # Mongo queries to retrieve Horror, Romance and Crime movies\n qr1 = self.collection.find({\"content.genres.name\": \"Horror\"})\n qr2 = self.collection.find({\"content.genres.name\": \"Romance\"})\n qr3 = self.collection.find({\"content.genres.name\": \"Crime\"})\n qr4 = self.collection.find({\"content.genres.name\": \"Comedy\"})\n print(\"111\")\n print(qr3)\n\n myfile = open('doc_ids.pkl', 'rb')\n trained_docs = pickle.load(myfile)\n # Get 100 Horror, Romance and Crime movies each, which are not in the trained data set\n\n horr = []\n i = 0\n for rec in qr1:\n if rec['_id'] not in trained_docs:\n i = i + 1\n horr.append(rec)\n\n if i >= 333:\n break\n rom = []\n i = 0\n for rec in qr2:\n if rec['_id'] not in trained_docs:\n i = i + 1\n rom.append(rec)\n\n if i >= 333:\n break\n\n crime = []\n i = 0\n for rec in qr3:\n if rec['_id'] not in trained_docs:\n i = i + 1\n crime.append(rec)\n\n if i >= 334:\n break\n comedy = []\n i = 0\n for rec in qr4:\n if rec['_id'] not in trained_docs:\n i = i + 1\n comedy.append(rec)\n\n if i >= 334:\n break\n\n # Combine the query results\n query_results = []\n for rec in horr:\n query_results.append(rec)\n for rec in rom:\n query_results.append(rec)\n for rec in crime:\n query_results.append(rec)\n print(query_results)\n # Data to be classified\n test_data = []\n\n # Genres of records to be classified\n categories = []\n a = 0\n for movie in query_results:\n test_data.append(movie['content']['overview'])\n for genre in movie['content']['genres']:\n a = a + 1\n if ((genre['name'] == 'Horror') or (genre['name'] == 'Romance') or (genre['name'] == 'Crime') or (\n genre['name'] == 'Comedy') and a <= 80):\n categories.append(genre['name'])\n\n # Lists of training models and vectorizers\n models = [\"SVM\", \"LOGISTIC REGRESSION\", \"GAUSSIAN NB\",\n \"MULTINOMIAL NB\", \"BERNOULLI NB\", \"RANDOM FOREST\", \"BAGGING\", \"GRADIENT\",\n \"Voting\", \"Voting With Weights\"]\n\n vectorizers = [\"COUNT VECTORIZER\", \"TFIDF VECTORIZER\"]\n\n # Load dictionary containing terms appearing in genres\n dictionary = joblib.load(path + \"_Genre_Dictionary\")\n\n vec_1 = feature_extraction.text.CountVectorizer(vocabulary=dictionary)\n vec_2 = feature_extraction.text.TfidfVectorizer(vocabulary=dictionary)\n vec_list = [vec_1, vec_2]\n\n # List to store the classification stats for each model\n stats = []\n # Generate results\n for i in range(0, len(models)):\n for j in range(0, len(vectorizers)):\n time0 = time.process_time()\n model = joblib.load(path + models[i] + \"_\" + vectorizers[j].replace('-', '') + \".pkl\")\n vec = vec_list[j]\n Y = vec.fit_transform(test_data).toarray()\n print(\"y\", Y)\n predicted_genres = model.predict(Y)\n\n k = 0\n horror = 0\n romance = 0\n crime = 0\n\n # Keeps track of correct predictions\n y_correct = []\n\n # Keeps track of incorrect predictions\n y_predicted = []\n for pred in predicted_genres:\n if (categories[k] == \"Horror\"):\n if (pred == \"Horror\"):\n horror += 1\n y_predicted.append(0)\n elif (pred == \"Romance\"):\n y_predicted.append(1)\n else:\n y_predicted.append(2)\n y_correct.append(0)\n elif (categories[k] == \"Romance\"):\n if (pred == \"Romance\"):\n romance += 1\n y_predicted.append(1)\n elif (pred == \"Horror\"):\n y_predicted.append(0)\n else:\n y_predicted.append(2)\n y_correct.append(1)\n elif (categories[k] == \"Crime\"):\n if (pred == \"Crime\"):\n crime += 1\n y_predicted.append(2)\n elif (pred == \"Horror\"):\n y_predicted.append(0)\n else:\n y_predicted.append(1)\n y_correct.append(2)\n k = k + 1\n\n # Print results\n score = precision_recall_fscore_support(y_correct, y_predicted, average='weighted')\n # print(\"Number of records classified per second = %d\" % (round((1000/(time.process_time()-time0)),3)))\n print(\"________SCORES__________\")\n print(\"MODEL : \" + models[i])\n print(\"VECTORIZER : \" + vectorizers[j])\n print(\"Horror : %d/333\" % (horror))\n print(\"Romance : %d/333\" % (romance))\n print(\"Crime : %d/334\" % (crime))\n print(\"Precision : %.5f\" % (score[0]))\n print(\"Recall : %.5f\" % (score[1]))\n print(\"F(1) Score : %.5f\" % ((score[1] * score[0] / (score[1] + score[0])) * 2))\n print(\"F(W) Score : %.5f\" % (score[2]))\n print(\"Accuracy : %.5f\" % accuracy_score(y_correct, y_predicted))\n # print(confusion_matrix(y_correct, y_predicted))\n\n dic = {}\n dic['model'] = models[i].title()\n dic['vectorizer'] = vectorizers[j][:-11]\n dic['horror'] = str(horror) + '/' + '333'\n dic['romance'] = str(romance) + '/' + '333'\n dic['crime'] = str(crime) + '/' + '334'\n dic['precision'] = round(score[0], 3)\n dic['Recall'] = round(score[1], 3)\n dic['F(1) Score'] = round(((score[1] * score[0] / (score[1] + score[0])) * 2), 3)\n dic['F(W) Score'] = round(score[2], 3)\n dic['accuracy'] = round(accuracy_score(y_correct, y_predicted), 3)\n stats.append(dic)\n # Store stats in file\n joblib.dump(stats, path + \"classification_results.txt\")\n\n print(\"Done\")\n return stats",
"def preprocess(args: argparse.Namespace) -> None:\n data_dir = os.path.join(args.data_dir, args.corpus)\n train_file = os.path.join(data_dir, 'train.jsonl')\n train_instances = load_jsonl(train_file, max_instances=args.max_instances)\n precompute_ngrams(train_instances)\n text1_gram1 = compute_most_freq_ngrams(train_instances, max_number=args.max_1gram,\n length=1, target=True)\n text1_gram2 = compute_most_freq_ngrams(train_instances, max_number=args.max_2gram,\n length=2, target=True)\n text1_gram3 = compute_most_freq_ngrams(train_instances, max_number=args.max_3gram,\n length=3, target=True)\n text2_gram1 = compute_most_freq_ngrams(train_instances, max_number=args.max_1gram,\n length=1, target=False)\n text2_gram2 = compute_most_freq_ngrams(train_instances, max_number=args.max_2gram,\n length=2, target=False)\n text2_gram3 = compute_most_freq_ngrams(train_instances, max_number=args.max_3gram,\n length=3, target=False)\n all_ngrams = list(set(text1_gram1 + text1_gram2 + text1_gram3 + text2_gram1 + text2_gram2 +\n text2_gram3))\n gram_to_dim_mapping = {ng: i for i, ng in enumerate(all_ngrams)}\n label_to_dim_mapping = map_labels_to_dims(train_instances)\n save_to_pickle(data=train_instances, fpath_out=os.path.join(\n args.serialization_dir, 'train_instances.pickle'))\n save_dict(data=gram_to_dim_mapping, fpath_out=os.path.join(args.serialization_dir,\n 'gram_mapping.json'))\n save_dict(data=label_to_dim_mapping, fpath_out=os.path.join(args.serialization_dir,\n 'label_mapping.json'))\n # save_dict(data=gram1, fpath_out=os.path.join(args.serialization_dir, '1grams.json'))\n # save_dict(data=gram2, fpath_out=os.path.join(args.serialization_dir, '2grams.json'))\n # save_dict(data=gram3, fpath_out=os.path.join(args.serialization_dir, '3grams.json'))",
"def load_all(self):\n if os.path.isfile(self.vocab_path):\n self.vocab_processor = self.load_vocab()\n else:\n self.vocab_processor = self.train_vocab()\n if self.data_path:\n self.x, self.y = self.load_data(self.need_shuffle)\n print(\"Max document length: {}\".format(self.max_doc))",
"def train(self, documents):\n ###DONE\n\n #entire vocab in document set D\n vocab_sod = set()\n vocab_pop = set()\n \n #Calcuates prior probabilities\n priorSOD = 0 #how many docs are spam\n priorPOP = 0 #how many docs are ham\n \n #Cacluates Tct\n term_freq_sod = {} #{term:occur, term:occur}\n term_freq_pop = {}\n \n #Tct'\n Tct_sod = 0 #Tct' = sum of (every term occurence in class c + 1)\n Tct_pop = 0\n \n for doc in documents: \n if 'sod' in doc.label:\n priorSOD += 1\n for token in doc.tokens:\n Tct_sod += 1\n if token in term_freq_sod.keys():\n term_freq_sod[token] = term_freq_sod[token] + 1\n else:\n term_freq_sod[token] = 1\n vocab_sod.add(token) \n else:\n priorPOP += 1\n for token in doc.tokens:\n Tct_pop += 1\n if token in term_freq_pop.keys():\n term_freq_pop[token] = term_freq_pop[token] + 1\n else:\n term_freq_pop[token] = 1\n vocab_pop.add(token)\n \n \n #endfor\n # | is for set join\n self.vocab = vocab_sod | vocab_pop #gets rid of duplicate words (those in both 'ham' and 'spam') \n \n #Tct Primes\n #tct' = term freq of all terms in class c + 1*(total terms)\n Tct_sod = Tct_sod + len(self.vocab) \n Tct_pop = Tct_pop + len(self.vocab) \n \n \n print(\"PriorSod: \" + str(priorSOD))\n print(\"PriorPop: \" + str(priorPOP))\n print(\"LEN Docum: \" + str(len(documents)))\n \n self.priorSOD = priorSOD / len(documents)\n self.priorPOP = priorPOP / len(documents)\n \n for term in self.vocab:\n if term in term_freq_pop.keys():\n self.cond_prob_pop[term] = (term_freq_pop[term] + 1) / Tct_pop\n else:\n self.cond_prob_pop[term] = 1 / Tct_pop\n \n if term in term_freq_sod.keys():\n self.cond_prob_sod[term] = (term_freq_sod[term] + 1) / Tct_sod\n else:\n self.cond_prob_sod[term] = 1 / Tct_sod\n \n \n pass",
"def __init__(self, corpus):\n self.train(corpus)",
"def fit(self):\n if not self.paragraphs or len(self.paragraphs) == 0:\n self.paragraphs = self._get_all_paragraphs()\n if not self.paragraphs or len(self.paragraphs) == 0:\n logger.warning(\"Fit method called with empty document store\")\n return\n\n self.df = pd.DataFrame.from_dict(self.paragraphs)\n self.df[\"text\"] = self.df[\"text\"].apply(lambda x: \" \".join(x))\n self.tfidf_matrix = self.vectorizer.fit_transform(self.df[\"text\"])",
"def learn_models(self):\n\n influencers = self.influencers.infGroup\n\n self.complete_model = LanguageModel()\n self.influencer_models = { influencer: LanguageModel() for influencer in influencers }\n\n all_tweets = []\n # for influencer in tqdm(influencers, desc='Learning Models'):\n for influencer in influencers:\n tweets = [tweet for tweet in self.get_saved_tweets(influencer)]\n self.influencer_models[influencer].add_documents(tweets)\n all_tweets += tweets\n\n self.complete_model.add_documents(all_tweets)",
"def load_model(self):\n self._logger.debug(f\"Loading Spacy Data Model : {self._model}... Could take time.\")\n self._nlp = spacy.load(self._model)\n self._logger.debug(\"Successfully loaded Spacy Data !\")\n\n # === Load entities ===\n if PIPE_ENTITY not in self._nlp.pipe_names:\n self._nlp.add_pipe(PIPE_ENTITY, last=True)\n\n entity_pipe = self._nlp.get_pipe(PIPE_ENTITY)\n for entity in self._entities:\n entity_pipe.add_label(entity)\n\n # === Load categories ===\n if PIPE_INTENT not in self._nlp.pipe_names:\n self._nlp.add_pipe(PIPE_INTENT, last=True)\n\n intent_pipe = self._nlp.get_pipe(PIPE_INTENT)\n for intent in self._intents:\n intent_pipe.add_label(intent)",
"def _fit(self, clean, entity_map):\n raise NotImplementedError",
"def train_pipeline(nlp: spacy.language.Language) -> None:\n if TEXTCAT not in nlp.pipe_names:\n textcat = nlp.create_pipe(TEXTCAT, config={\"exclusive_classes\": False})\n nlp.add_pipe(textcat, last=True)\n else:\n textcat = nlp.get_pipe(TEXTCAT)\n\n for category in CATEGORIES:\n textcat.add_label(category.value)\n\n pipe_exceptions = {TEXTCAT, \"trf_wordpiecer\", \"trf_tok2vec\"}\n other_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipe_exceptions]\n with nlp.disable_pipes(*other_pipes): # only train textcat\n all_data = list(get_classification_training_data())\n random.shuffle(all_data)\n\n training_data = all_data[: len(all_data) - 2]\n validation_data = all_data[len(all_data) - 2 :]\n\n optimizer = nlp.begin_training()\n for itn in range(20):\n losses: Dict[str, Any] = {}\n random.shuffle(training_data)\n batches = minibatch(training_data, size=compounding(4.0, 32.0, 1.001))\n\n for batch in batches:\n texts, annotations = zip(*batch)\n nlp.update(texts, annotations, sgd=optimizer, drop=0.2, losses=losses)",
"def fit(self, documents):\n n_words_trained = 0\n tokens, self.vocab, data, self._frequencies, self.diction, self.reverse_diction = self._build_dataset(\n documents)\n n_tokens = len(tokens)\n n_vocab = len(self.vocab)\n words_per_epoch = n_vocab / self.n_epochs\n self._cum_dist = self._build_cum_dist()",
"def preprocess_corpus(train_sents):\n #lexicon_dict['stop_words'] = set(open('stop_words').read().split())\n lexicon_dict['people_name']=set(open('data\\\\lexicon\\\\firstname.5k').read().title().split())\n lexicon_dict['people_name'].update(set(open('data\\\\lexicon\\\\lastname.5000').read().title().split()))\n lexicon_dict['people_name'].update(set(open('data\\\\lexicon\\\\people.family_name').read().title().split()))\n lexicon_dict['people_person']=set(open('data\\\\lexicon\\\\people.person').read().title().split())\n lexicon_dict['people_name'].update(set(open('data\\\\lexicon\\\\people.person.lastnames').read().title().split()))\n \n lexicon_dict['product']=set(open('data\\\\lexicon\\\\product').read().title().split())\n lexicon_dict['business_products']=set(open('data\\\\lexicon\\\\business.consumer_product').read().title().split())\n\n lexicon_dict['sports_team']=set(open('data\\\\lexicon\\\\sports.sports_team').read().title().split())\n\n lexicon_dict['tvprog']=set(open('data\\\\lexicon\\\\tv.tv_program').read().title().split())\n \n lexicon_dict['museum'] = set(open('data\\\\lexicon\\\\architecture.museum').read().title().split())\n lexicon_dict['auto_make']=set(open('data\\\\lexicon\\\\automotive.make').read().title().split())\n lexicon_dict['auto_model']=set(open('data\\\\lexicon\\\\automotive.model').read().title().split())\n lexicon_dict['award']=set(open('data\\\\lexicon\\\\award.award').read().title().split())\n lexicon_dict['fest_ser']=set(open('data\\\\lexicon\\\\base.events.festival_series').read().title().split())\n lexicon_dict['reg_name']=set(open('data\\\\lexicon\\\\bigdict').read().title().split())\n lexicon_dict['newspaper']=set(open('data\\\\lexicon\\\\book.newspaper').read().title().split())\n lexicon_dict['tv_channels']=set(open('data\\\\lexicon\\\\broadcast.tv_channel').read().title().split())\n lexicon_dict['business_brand']=set(open('data\\\\lexicon\\\\business.brand').read().title().split())\n lexicon_dict['business_company']=set(open('data\\\\lexicon\\\\business.brand').read().title().split())\n lexicon_dict['business_brand']=set(open('data\\\\lexicon\\\\business.consumer_company').read().title().split())\n\n lexicon_dict['business_sponsor']=set(open('data\\\\lexicon\\\\business.sponsor').read().title().split())\n lexicon_dict['top10']=set(open('data\\\\lexicon\\\\cap.10').read().title().split())\n lexicon_dict['top100']=set(open('data\\\\lexicon\\\\cap.100').read().title().split())\n lexicon_dict['cap500']=set(open('data\\\\lexicon\\\\cap.500').read().title().split())\n lexicon_dict['cap1000']=set(open('data\\\\lexicon\\\\cap.1000').read().title().split())\n lexicon_dict['video_game']=set(open('data\\\\lexicon\\\\cvg.computer_videogame').read().title().split())\n lexicon_dict['cvg_developer']=set(open('data\\\\lexicon\\\\cvg.cvg_developer').read().title().split())\n lexicon_dict['cvg_platform']=set(open('data\\\\lexicon\\\\cvg.cvg_platform').read().title().split())\n #leaving out dictionaries.conf,english.stop,lower.100,lower.500,lower.1000,lower.5000,lower.10000\n lexicon_dict['dictionaries_conf']=set(open('data\\\\lexicon\\\\dictionaries.conf').read().title().split())\n lexicon_dict['english_stop']=set(open('data\\\\lexicon\\\\english.stop').read().title().split())\n lexicon_dict['lower_10000']=set(open('data\\\\lexicon\\\\lower.10000').read().title().split())\n #lexicon_dict['cvg_platform']=set(open('data\\\\lexicon\\\\cvg.cvg_platform').read().title().split())\n \n lexicon_dict['university']=set(open('data\\\\lexicon\\\\education.university').read().title().split())\n lexicon_dict['gov_agency']=set(open('data\\\\lexicon\\\\government.government_agency').read().title().split())\n\n\n lexicon_dict['location']=set(open('data\\\\lexicon\\\\location').read().title().split())\n lexicon_dict['location'].update(set(open('data\\\\lexicon\\\\location.country').read().title().split()))\n lexicon_dict['sports_league']=set(open('data\\\\lexicon\\\\sports.sports_league').read().title().split())\n\n\n lexicon_dict['time_holiday']=set(open('data\\\\lexicon\\\\time.holiday').read().title().split())\n lexicon_dict['time_rec_event']=set(open('data\\\\lexicon\\\\time.recurring_event').read().title().split())\n lexicon_dict['roads']=set(open('data\\\\lexicon\\\\transportation.road').read().title().split())\n lexicon_dict['tvnet']=set(open('data\\\\lexicon\\\\tv.tv_network').read().title().split())\n\n lexicon_dict['ven_company']=set(open('data\\\\lexicon\\\\venture_capital.venture_funded_company').read().title().split())\n lexicon_dict['venues']=set(open('data\\\\lexicon\\\\venues').read().title().split())",
"def main():\n # Read data for train set\n print('loading training data')\n train = read_datafile('../data/tsd_train.csv')\n\n # Read trial data for validation set\n validation = read_datafile('../data/tsd_trial.csv')\n\n # Read data for test set\n print('loading test data')\n test = read_datafile('../data/tsd_test.csv')\n\n # Convert training data to Spacy Entities\n nlp = spacy.load(\"en_core_web_sm\")\n print('preparing training data')\n training_data = []\n for n, (spans, text) in enumerate(train):\n doc = nlp(text)\n ents = spans_to_ents(doc, set(spans), 'TOXIC')\n training_data.append((doc.text, {'entities': ents}))\n\n toxic_tagging = spacy.blank('en')\n toxic_tagging.vocab.strings.add('TOXIC')\n ner = nlp.create_pipe(\"ner\")\n toxic_tagging.add_pipe(ner, last=True)\n ner.add_label('TOXIC')\n\n pipe_exceptions = [\"ner\", \"trf_wordpiecer\", \"trf_tok2vec\"]\n unaffected_pipes = [\n pipe for pipe in toxic_tagging.pipe_names\n if pipe not in pipe_exceptions]\n\n\n print('Training!')\n with toxic_tagging.disable_pipes(*unaffected_pipes):\n \n toxic_tagging.begin_training()\n for iteration in range(30):\n random.shuffle(training_data)\n losses = {}\n batches = spacy.util.minibatch(\n training_data, size=spacy.util.compounding(\n 4.0, 32.0, 1.001))\n for batch in batches:\n texts, annotations = zip(*batch)\n toxic_tagging.update(texts, annotations, drop=0.5, losses=losses)\n print(\"Losses\", losses)\n\n\n # Define helper function for evaluating datasets\n def evaluate(dateset):\n precision_recall_f1_scores = []\n for spans, text in dateset:\n pred_spans = []\n doc = toxic_tagging(text)\n for ent in doc.ents:\n pred_spans.extend(range(ent.start_char, ent.start_char + len(ent.text)))\n \n # score = semeval2021.f1(pred_spans, spans)\n precision_recall_f1_scores.append(per_post_precision_recall_f1(pred_spans, spans))\n\n # compute average precision, recall and f1 score of all posts\n return np.array(precision_recall_f1_scores).mean(axis=0)\n\n # Evaluate on dev and test sets\n print('Evaluation:')\n eval_precision, eval_recall, eval_f1 = evaluate(validation)\n test_precision, test_recall, test_f1 = evaluate(test)\n \n print(f'Dev set: Precision = {eval_precision}, Recall = {eval_recall}, F1 = {eval_f1}')\n print(f'Test set: Precision = {test_precision}, Recall = {test_recall}, F1 = {test_f1}')",
"def preprocess(self):\n self.word_to_id, self.unk_word_list = self.build_vocab(mode=\"word\")\n self.word_vocab_size = len(self.word_to_id)\n self.max_word_len = self.get_max_word_length(self.word_to_id)\n # Do not write the same file again\n if not os.path.exists(self.words_vocab_file):\n with open(self.words_vocab_file, 'wb') as f:\n pickle.dump((self.word_to_id, self.unk_word_list), f)\n if self.unit != \"word\":\n self.preprocess_sub_units()",
"def Train(self):\n\n lem = lemmatization()\n # Get Mongo client\n client = MongoClient()\n db = client['IR']\n collection = db['Movies']\n print(\"collection: \", collection)\n host = '127.0.0.1' # or localhost\n port = 27017\n client = MongoClient(host, port)\n # # 创建数据库dialog\n db = client['allMovies']\n # # 创建集合scene\n collection = db[\"Movie\"]\n print(collection.__sizeof__())\n print(collection.find_one({\"content.genres.name\": \"Drama\"}))\n\n # Path to folder to store trained data set\n path = self.path\n\n query_results = []\n for i in (collection.find({\"name\": \"183.txt\"})):\n query_results.append(i)\n print(\"queryyy\", query_results)\n\n # Dictionary to store the terms appearing in the genres\n dictionary = []\n\n # List to store category of each record\n categories = []\n\n training_data = []\n # Document ids of records to be trained\n doc_ids = []\n a = 0\n i=0\n movie=query_results[0]\n tsv_file = open(\n \"/home/do/PycharmProjects/pythonProject/information-retrival-search-engine/informationRetrival/classification/test_data.tsv\")\n read_tsv = csv.reader(tsv_file, delimiter=\"\\t\")\n for row in read_tsv:\n training_data.append(row[1])\n categories.append(row[2])\n dict_rec = row[1].lower()\n # table = maketrans(string.punctuation, \" \")\n for s in string.punctuation:\n dict_rec = dict_rec.replace(s, \"\")\n # dict_rec = str(dict_rec).translate(string.punctuation)\n dict_rec = lem.removeStopWords(dict_rec.split(\" \"))\n\n # Add to dictionary\n if dict_rec not in dictionary:\n dictionary.extend(dict_rec)\n\n # print(row[2])\n # while i<=99:\n #\n # training_data.append(movie['content'][i]['overview'])\n # doc_ids.append(movie['_id'])\n # # for genre in movie['content'][i]['genres']:\n # # print(\"genre \", genre['name'])\n # # a = a + 1\n # #\n # # if ((genre['name'] == 'Horror') or (genre['name'] == 'Romance') or (genre['name'] == 'Crime') or genre[\n # # 'name'] == 'Comedy') and a <= 160:\n # # categories.append(genre['name'])\n #\n # # Convert to lower case and remove stop words from overview\n # dict_rec = movie['content'][i]['overview'].lower()\n # # table = maketrans(string.punctuation, \" \")\n # for s in string.punctuation:\n # dict_rec = dict_rec.replace(s, \"\")\n # # dict_rec = str(dict_rec).translate(string.punctuation)\n # dict_rec = lem.removeStopWords(dict_rec.split(\" \"))\n #\n # # Add to dictionary\n # if dict_rec not in dictionary:\n # dictionary.extend(dict_rec)\n # i=i+1\n print(\"Dictionary\", dictionary)\n print(\"shape\", len(dictionary))\n dictionary = filter(None, list(set(dictionary)))\n\n # Store dictionary in a file\n joblib.dump(dictionary, path + \"_Genre_Dictionary\")\n\n # Store doc ids of trained data in a file\n myfile = open(r'doc_ids.pkl', 'wb')\n #pickle.dump(doc_ids, myfile)\n #myfile.close()\n\n # Initialize training models\n mod_1 = SVC(kernel='linear', C=1, gamma=1)\n mod_2 = LogisticRegression()\n mod_3 = GaussianNB()\n mod_4 = MultinomialNB()\n mod_5 = BernoulliNB()\n\n # Ensemble classifiers\n mod_6 = RandomForestClassifier(n_estimators=50)\n mod_7 = BaggingClassifier(mod_2, n_estimators=50)\n mod_8 = GradientBoostingClassifier(loss='deviance', n_estimators=100)\n\n mod_9 = VotingClassifier(\n estimators=[(\"SVM\", mod_1), (\"LR\", mod_2), (\"Gauss\", mod_3), (\"Multinom\", mod_4), (\"Bernoulli\", mod_5),\n (\"RandomForest\", mod_6), (\"Bagging\", mod_7), (\"GB\", mod_8)], voting='hard')\n mod_10 = VotingClassifier(\n estimators=[(\"SVM\", mod_1), (\"LR\", mod_2), (\"Multinom\", mod_4), (\"Bernoulli\", mod_5), (\"Bagging\", mod_7)],\n voting='hard', weights=[1, 2, 3, 2, 1])\n\n # Vectorizers for feature extraction\n vec_1 = feature_extraction.text.CountVectorizer(vocabulary=dictionary)\n vec_2 = feature_extraction.text.TfidfVectorizer(vocabulary=dictionary)\n\n vec_list = [vec_1, vec_2]\n vec_list = [vec_1]\n # List of training models\n model_list = [mod_1, mod_2, mod_3, mod_4, mod_5, mod_6, mod_7, mod_8, mod_9, mod_10]\n\n models_used = [\"SVM\", \"LOGISTIC REGRESSION\", \"GAUSSIAN NB\",\n \"MULTINOMIAL NB\", \"BERNOULLI NB\", \"RANDOM FOREST\", \"BAGGING\", \"GRADIENT\",\n \"Voting\", \"Voting With Weights\"]\n\n vec_used = [\"COUNT VECTORIZER\", \"TFIDF VECTORIZER\"]\n\n print(\"Starting training. This might take a while...\")\n b = 1\n # Start training\n for model in range(0, len(model_list)):\n a = 1\n for vec in range(0, len(vec_list)):\n mod = model_list[model]\n vector = vec_list[vec]\n print(\"tour\", a, b)\n print(\"taille training : \", (np.shape(training_data)))\n print(training_data)\n print(vector)\n # print(\"fit_tarnsform\", vector.fit_transform(training_data))\n X = vector.fit_transform(training_data).toarray()\n print(\"la matrice x\",1 in X)\n print(\"shape X\", np.shape(X))\n print(np.shape(categories))\n # categories.reshape((80, 2))\n # l=[]\n # l.append([categories[0:79],categories[79:,159]])\n # print(l)\n print(\"categories\", categories)\n\n print(np.unique(categories))\n print(np.unique(X))\n mod.fit(X, categories)\n print(\"fiit\", mod.fit(X, categories))\n\n # Store in a file\n joblib.dump(mod, path + models_used[model] + \"_\" + vec_used[vec] + \".pkl\")\n\n print(models_used[model] + \" \" + vec_used[vec] + \" finished!\")\n a = a + 1\n b = b + 1\n break\n print(\"All Done!!\")",
"def process_spacy(self):\n\n def prevent_sentence_boundary_detection(doc):\n for token in doc:\n # This will entirely disable spaCy's sentence detection\n token.is_sent_start = False\n return doc\n\n def process_sentence(sen_tokens):\n doc = spacy.tokens.Doc(nlp.vocab, words=sen_tokens)\n tagger(doc)\n prevent_sbd(doc)\n ner(doc)\n parser(doc)\n return doc\n\n # setup spacy nlp pipeline\n nlp = spacy.load(\"en_core_web_lg\")\n parser = nlp.get_pipe(\"parser\")\n nlp.add_pipe(\n prevent_sentence_boundary_detection, name=\"prevent-sbd\", before=\"parser\"\n )\n\n tagger = nlp.get_pipe(\"tagger\")\n prevent_sbd = nlp.get_pipe(\"prevent-sbd\")\n parser = nlp.get_pipe(\"parser\")\n ner = nlp.get_pipe(\"ner\")\n\n for doc in self.annotation_documents:\n doc.sentences_processed = []\n for sen in doc.sentences:\n sen_tokens = [t.text for t in sen.tokens]\n sen_proc = process_sentence(sen_tokens)\n # add processed sentence to doc\n doc.sentences_processed.append(sen_proc)\n\n print(f\"Processed with Spacy: {doc.document_id}\")",
"def load_preprocessed(self):\n with open(self.words_vocab_file, 'rb') as f:\n self.word_to_id, self.unk_word_list = pickle.load(f)\n self.word_vocab_size = len(self.word_to_id)\n\n if self.unit != \"word\":\n with open(self.sub_vocab_file, 'rb') as f:\n if self.unit == \"char\":\n self.max_word_len = self.get_max_word_length(self.word_to_id) + 2\n self.char_to_id, self.unk_char_list, self.max_word_len = pickle.load(f)\n self.subword_vocab_size = len(self.char_to_id)\n elif self.unit == \"char-ngram\":\n self.ngram_to_id, self.unk_char_list, self.unk_ngram_list, \\\n self.max_ngram_per_word = pickle.load(f)\n self.subword_vocab_size = len(self.ngram_to_id)\n elif self.unit == \"morpheme\":\n self.morpheme_to_id, self.unk_char_list, self.unk_morph_list, \\\n self.max_morph_per_word = pickle.load(f)\n self.subword_vocab_size = len(self.morpheme_to_id)\n elif self.unit == \"oracle\":\n self.morpheme_to_id, self.max_morph_per_word = pickle.load(f)\n self.subword_vocab_size = len(self.morpheme_to_id)\n else:\n sys.exit(\"Unknown unit\")",
"def test_pyt_preprocess_train(self):\n # Second, check that the model will train\n defaults = parser_defaults.copy()\n defaults['datatype'] = 'train'\n defaults['pytorch_preprocess'] = True\n str_output, _, _ = testing_utils.train_model(defaults)\n self.assertTrue(\n solved_task(str_output),\n 'Teacher could not teach seq2seq with preprocessed obs, output: {}'\n .format(str_output)\n )",
"def predict(self, documents):\n raise NotImplementedError()",
"def train_model_cross_validation(model, train_docs, test_docs, nb_iter, output_dir, spacy_type = True, nb_folds = 5):\n\n print(output_dir)\n os.mkdir(output_dir) # creating the output directory\n print(\" ============= TRAINING MODEL ===========================\")\n\n\n # tuple conversion (the tuple type is lost when dataframe -> excel -> dataframe)\n\n #docs['annotations'] = [[tuple(ann) for ann in annotations] for annotations in docs['annotations'].to_numpy()]\n\n\n # cross validation :\n\n models = []\n all_scores = []\n\n kf = KFold(n_splits=nb_folds)\n c = 0\n for train_index, val_index in kf.split(train_docs):\n\n train_data = train_docs.iloc[train_index, :]\n val_data = train_docs.iloc[val_index, :]\n\n # spacy_format\n TRAIN_DATA = [(text, {'entities': entities}) for [text, entities] in train_data[['text', 'annotations']].to_numpy()]\n\n # trim entities : leading whitespace make the model bug\n TRAIN_DATA = trim_entity_spans(TRAIN_DATA)\n\n # loading of the model\n nlp = model\n\n optimizer = nlp.begin_training()\n\n # get names of other pipes to disable them during training\n pipe_exceptions = [\"ner\" ] #\"trf_wordpiecer\", \"trf_tok2vec\"\n other_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipe_exceptions]\n\n scores = []\n\n # training\n with nlp.disable_pipes(*other_pipes): # only train NER\n\n if not spacy_type : # add the other labels\n ner = nlp.get_pipe(\"ner\")\n ner.add_label('AGE_RELATED')\n ner.add_label('DURATION')\n ner.add_label('FREQUENCY')\n ner.add_label('OTHER')\n\n for i in range(nb_iter):\n\n print('Iteration ', i)\n print()\n losses = {}\n random.shuffle(TRAIN_DATA) # ??\n\n path = ''\n if spacy_type:\n path = 'spacy_model_' + str(c) + '_fold'\n else:\n path = 'all_types_model_' + str(c) + '_fold'\n\n batches = minibatch(TRAIN_DATA, size=1) #compounding(4.0, 20.0, 1.001)\n\n for batch in batches:\n texts, annotations = zip(*batch)\n try:\n nlp.update(texts, annotations, sgd = optimizer, drop=0.5, losses = losses)\n print(\"Losses\", losses)\n except Exception as e:\n print(e)\n #print(text)\n\n tp_g, fp_g, fn_g, p, r, f, pt, rt, ft, type_dict = test_model(test_docs, nlp)\n scores += [(p, r, r, pt, rt, ft)]\n print()\n print()\n\n # test the trained model\n test_model(val_data, nlp)\n\n df_scores = pd.DataFrame(scores, columns = ['span_precision', 'span_recall', 'span_f1', 'type_precision', 'type_recall', 'type_f1'])\n df_scores.to_excel(output_dir + '/' + path + '.xlsx')\n\n\n models += [nlp]\n all_scores += [scores]\n # save model to output directory\n if output_dir is not None:\n nlp.to_disk(output_dir + '/' + path)\n print(\"Saved model to\", output_dir + '/' + path)\n\n c += 1\n\n return models, all_scores",
"def train(self, corpus): \n # TODO your code here\n \n for sentence in corpus.corpus:\n for i,dotum in enumerate(sentence.data[1:]):\n self.vocab[dotum.word][sentence.data[i].word] +=1\n self.word_counts[sentence.data[i].word] +=1\n self.total +=1\n self.v = len(self.vocab.keys())",
"def fit(self, X, y=None):\n for input_data in X:\n self._node_vocab.add_node(input_data[0])\n self._word_vocab.add_document(input_data[1])\n if self._use_char:\n self._char_vocab.add_documents(input_data[1])\n for data in input_data[2]:\n self._word_vocab.add_document(data)\n if self._use_char:\n self._char_vocab.add_documents(data)\n # self._label_vocab.add_node(' '.join(data)) # this results in a very big lable space (90K) \n self._label_vocab.add_document(data) # Use word indexing instead, drawbacks: BOW\n\n self._node_vocab.build()\n self._word_vocab.build()\n self._char_vocab.build()\n self._label_vocab.build()\n\n return self",
"def preprocess_corpus(train_sents):\n global lookupLexiconDict\n lookupLexiconDict = {}\n \n lexiconDir = getcwd()+'\\\\data\\\\lexicon'\n filesList = [hfile for hfile in listdir(lexiconDir) if path.isfile(lexiconDir+'\\\\'+hfile) ]\n \n decision_tags = ['facility','product','musicartist']\n fileMappingDict = \\\n {\n 'architecture.museum':'facility',\n 'automotive.make':'product',\n 'automotive.model':'product',\n 'award.award':'musicartist',\n 'base.events.festival_series':'geo-loc',\n #'bigdict':'@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@',\n 'book.newspaper':'company',\n 'broadcast.tv_channel':'tvshow',\n 'business.brand':'company',\n 'business.consumer_company':'company',\n 'business.consumer_product':'product',\n 'business.sponsor':'company',\n 'cap.1000':'geo-loc',\n 'cvg.computer_videogame':'product',\n 'cvg.cvg_developer':'company',\n 'cvg.cvg_platform':'product',\n 'education.university':'facility',\n 'english.stop':'O',\n 'firstname.5k':'person',\n 'government.government_agency':'company',\n 'internet.website':'company',\n 'lastname.5000':'person',\n 'location':'geo-loc',\n 'location.country':'geo-loc',\n 'lower.5000':'O',\n 'people.family_name':'person',\n 'people.person':'person',\n 'people.person.lastnames':'person', # <-----------------------------\n 'product':'product',\n 'sports.sports_league':'sportsteam',\n 'sports.sports_team':'sportsteam',\n 'time.holiday':'O',\n 'time.recurring_event':'O',\n 'transportation.road':'geo-loc',\n 'tv.tv_network':'tvshow',\n 'tv.tv_program':'tvshow',\n 'venture_capital.venture_funded_company':'company',\n 'venues':'geo-loc'\n }\n\n for lexFile in filesList:\n if lexFile not in fileMappingDict: continue\n print 'Processing ', lexFile\n \n with open(lexiconDir+'\\\\'+lexFile) as f:\n for line in f:\n line = line.lower().split()\n if len(line) == 1: low=0\n else:low=1\n for i in range(low,len(line)):\n key = tuple(line[:i+1])\n if key not in lookupLexiconDict:\n lookupLexiconDict[key] = [fileMappingDict[lexFile]]\n else:\n lookupLexiconDict[key].append(fileMappingDict[lexFile]) \n\n \n #pass "
] | [
"0.6566442",
"0.64008427",
"0.6272306",
"0.6270147",
"0.6244757",
"0.62283623",
"0.61422414",
"0.6133686",
"0.6117981",
"0.6093613",
"0.60885054",
"0.60684276",
"0.6040109",
"0.60277957",
"0.6005362",
"0.59342384",
"0.59225696",
"0.59128237",
"0.5904542",
"0.58896387",
"0.588681",
"0.5847864",
"0.58343595",
"0.58297306",
"0.58150595",
"0.5796023",
"0.5779246",
"0.57647604",
"0.5763493",
"0.5761737"
] | 0.64138544 | 1 |
Parse relation triplets over the following conditions 1. Remove triplets with pronouns and determinants in subj/obj; i.e. "we", "she" "I", "their", etc. 2. Harmonise duplicated triplets, return only the superset triplet Semantic comparison option using word mover distance & agglomerative clustering FastText via Gensim W2V keyedvector architecture format; partial overcome OOV issues 3. Remove triplets with no entities either in the subject or object // [N.B.! KIV; function temporarily removed] | def parse_triplets(self,
levenshtein_thold: float=20.,
coph_scr: float=2.) -> Iterable[dict]:
# Remove pronoun and determiners
parse_triples = [triple for triple in self.__triples_corpus__
if (triple['subject'] not in self.__pron_det_pos_words__ and
triple['object'] not in self.__pron_det_pos_words__ ) ]
# Harmonise potentially duplicative triplets by constructing matrix of Word Mover Distances
stg_triples_idx, wmd_array = wmd_matrix(parse_triples, self.__wvmodel__)
stg_triples_idx_grp = get_similarity_repr(wmd_array, cophenetic_dist=coph_scr, grouped_idx=True)
# list of triplets' indices-lists
# i.e. [ [1,5,7], [2], [9,4,3] ]
## Retrieve longest relation-triplet strings in each group
stg_triples_idx_len = [
[len(re.sub('\s+', ' ', ' '.join([stg_triples_idx[trip]['subject'],
stg_triples_idx[trip]['relation'],
stg_triples_idx[trip]['object']])
).strip()
)
for trip in trip_grp]
for trip_grp in stg_triples_idx_grp]
stg_triples_selected = []
for trip_idx, trip_len in zip(stg_triples_idx_grp, stg_triples_idx_len):
group_max_len = max(trip_len)
idx_max_len = [trip_idx[pxtn_idx] for pxtn_idx, str_len in enumerate(trip_len)
if str_len==group_max_len][0] # first is position is retrieved if tied
stg_triples_selected.append(idx_max_len)
parse_triples = [triple for idx, triple in stg_triples_idx.items() if idx in stg_triples_selected]
# # Find triples of subject/object near matching identified collection of NER
# stg_triples = [(triple['subject'], triple['relation'], triple['object']) for triple in parse_triples]
# stg_triples = pd.DataFrame(stg_triples, columns=['subject', 'relation', 'object'])
# stg_triples['xjoin'] = 1
# stg_triples = stg_triples.merge(self.__entities_in_doc__, on='xjoin').drop(columns='xjoin')
# stg_triples['subj_ent_leven'] = stg_triples[['subject', 'entities']]\
# .apply(lambda row: levenshtein(row['subject'], row['entities']), axis=1)
# stg_triples['obj_ent_leven'] = stg_triples[['object', 'entities']]\
# .apply(lambda row: levenshtein(row['object'], row['entities']), axis=1) # consider subj/obj in ent over leven.
# subj_ent_leven_thold_ptile = np.percentile(stg_triples['subj_ent_leven'].values, levenshtein_thold)
# obj_ent_leven_thold_ptile = np.percentile(stg_triples['obj_ent_leven'].values, levenshtein_thold)
# subj_obj_similar_ent_mask = (stg_triples['subj_ent_leven'] <= subj_ent_leven_thold_ptile) & \
# (stg_triples['obj_ent_leven'] <= obj_ent_leven_thold_ptile)
# stg_triples = stg_triples.loc[subj_obj_similar_ent_mask, ['subject', 'relation', 'object']].values.T
# parse_triples = [{'subject':subj, 'relation':rel, 'object':obj}
# for subj, rel, obj in zip(stg_triples[0], stg_triples[1], stg_triples[2])] # revert to original list[dict]
return parse_triples | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_data_from_input_file(triplet):\n\n sentence = triplet.subject + ' ' + triplet.predicate + ' ' + triplet.object\n doc = nlp(unicode(sentence))\n root = doc[0]\n for t in doc:\n if t.pos_ == 'VERB' and t.head == t:\n root = t\n # elif t.pos_ == 'NOUN'\n\n # also, if only one sentence\n # root = doc[:].root\n\n\n \"\"\"\n CURRENT ASSUMPTIONS:\n - People's names are unique (i.e. there only exists one person with a certain name).\n - Pet's names are unique\n - The only pets are dogs and cats\n - Only one person can own a specific pet\n - A person can own only one pet\n \"\"\"\n\n\n # Process (PERSON, likes, PERSON) relations\n if root.lemma_ == 'like':\n if triplet.subject in [e.text for e in doc.ents if e.label_ == 'PERSON' or e.label_ == 'ORG'] and triplet.object in [e.text for e in doc.ents if e.label_ == 'PERSON' or e.label_ == 'ORG'] and \"n't\" not in triplet.predicate:\n s = add_person(triplet.subject)\n o = add_person(triplet.object)\n s.likes.append(o)\n\n if root.lemma_ == 'be' and triplet.object.startswith('friends with'):\n fw_doc = nlp(unicode(triplet.object))\n with_token = [t for t in fw_doc if t.text == 'with'][0]\n # get text after with\n after_with = fw_doc.text.split(with_token.text+ ' ')[1]\n people = []\n for p in after_with.split(' '):\n if nlp(p)[0].tag_ == 'NNP':\n people.append(nlp(p)[0].text)\n # fw_who = [t for t in with_token.children if t.dep_ == 'pobj'][0].text\n # fw_who = [e for e in fw_doc.ents if e.label_ == 'PERSON'][0].text\n for p in people:\n if triplet.subject in [e.text for e in doc.ents if e.label_ == 'PERSON']:\n s = add_person(triplet.subject)\n o = add_person(p)\n s.likes.append(o)\n o.likes.append(s)\n if root.lemma_ == 'be' and triplet.object == 'friends':\n fw_doc = nlp(unicode(triplet.subject))\n and_token = [t for t in fw_doc if t.text == 'and']\n if and_token:\n and_token = and_token[0].text\n if and_token == 'and' and fw_doc[0].text in [e.text for e in doc.ents if e.label_ == 'PERSON'] and fw_doc[2].text in [e.text for e in doc.ents if e.label_ == 'PERSON']:\n s = add_person(fw_doc[0].text)\n o = add_person(fw_doc[2].text)\n s.likes.append(o)\n o.likes.append(s)\n\n # Process (PET, has, NAME) Mary's dog's name is Rover\n if triplet.subject.endswith('name') and ('dog' in triplet.subject or 'cat' in triplet.subject):\n obj_span = doc.char_span(sentence.find(triplet.object), len(sentence))\n\n # handle single names, but what about compound names? Noun chunks might help.\n if (len(obj_span) == 1 or len(obj_span) == 2) and obj_span[-1].pos_ == 'PROPN':\n name = triplet.object\n subj_start = sentence.find(triplet.subject)\n subj_doc = doc.char_span(subj_start, subj_start + len(triplet.subject))\n\n s_people = [token.text for token in subj_doc if token.ent_type_ == 'PERSON']\n assert len(s_people) == 1\n s_person = select_person(s_people[0])\n\n pet = get_persons_pet(s_person.name)\n\n pet.name = name\n s_person.has.append(pet)\n\n # Process (Who has dog)\n if root.lemma_ == 'have'and ('dog' in triplet.object or 'cat' in triplet.object):\n # find pets name and instantiate name empty str\n obj_span = doc.char_span(sentence.find(triplet.object), len(sentence))\n name = ''\n\n if obj_span[-1].pos_ == 'PROPN':\n name = obj_span[-1].text\n s = add_person(triplet.subject)\n s_pet_type = 'dog' if 'dog' in triplet.object else 'cat'\n pet = add_pet(s_pet_type, name)\n s.has.append(pet)\n\n date = [e.text for e in doc.ents if e.label_ == 'DATE']\n gpe = [e.text for e in doc.ents if e.label_ == 'GPE']\n person = [e.text for e in doc.ents if e.label_ == 'PERSON' or e.label_ == 'ORG']\n # if person and GPE exists, we add it into trip(departs_on, departs_to)\n if person and (gpe or date):\n s = add_person(triplet.subject)\n o = add_trip(date, gpe)\n s.travels.append(o)",
"def pp_pipeline(output_dir, model_name):\n # load triples file\n df_triples = pd.read_csv(\n Path(output_dir, \"{:s}.csv\".format(model_name))\n )\n\n df_triples['e1'] = df_triples['triple'].apply(lambda x: ast.literal_eval(x)[0])\n df_triples['r'] = df_triples['triple'].apply(lambda x: ast.literal_eval(x)[1])\n df_triples['e2'] = df_triples['triple'].apply(lambda x: ast.literal_eval(x)[2])\n\n lemmatizer = nltk.WordNetLemmatizer()\n\n def process_entity(value):\n return re.sub(\"[^\\s'_A-Za-z]\", \"\",\n remove_stops(fix_entities(extract_entities_spacy(value).lower().strip()))).lstrip().rstrip()\n def process_relation(value):\n return re.sub(\"[^\\s'_A-Za-z]\", \"\", lemmatizer.lemmatize(value.lower().strip(), pos='v')).lstrip().rstrip()\n\n # Clean entities and relations\n\n df_triples[\"l1\"] = df_triples[\"e1\"].progress_apply(process_entity)\n df_triples[\"l2\"] = df_triples[\"e2\"].progress_apply(process_entity)\n df_triples[\"rel\"] = df_triples[\"r\"].progress_apply(process_relation)\n\n total_entities = pd.concat([df_triples[\"l1\"], df_triples[\"l2\"]])\n c = Counter(total_entities)\n unique_entities = pd.Series(list(c.keys()))\n\n total_relations = pd.Series(df_triples[\"rel\"])\n rc = Counter(total_relations)\n unique_relations = pd.Series(list(rc.keys()))\n\n # Create dicts with unique relations and entities\n\n d = {\n k: v for k, v in zip(unique_entities, [i for i in range(len(unique_entities))])\n }\n d_sorted = sorted(d.items(), key=lambda kv: kv[1])\n\n r = {\n k: v\n for k, v in zip(unique_relations, [i for i in range(len(unique_relations))])\n }\n r_sorted = sorted(r.items(), key=lambda kv: kv[1])\n\n # Write dicts to files\n\n with open(Path(output_dir, \"relation2id.txt\"), \"w\") as f:\n f.write(\"{:d}\\n\".format(len(r.items())))\n for i in range(len(r_sorted)):\n f.write(\"{:s}\\t{:d}\\n\".format(r_sorted[i][0], i))\n\n with open(Path(output_dir, \"entity2id.txt\"), \"w\") as f:\n f.write(\"{:d}\\n\".format(len(d.items())))\n for i in range(len(d_sorted)):\n f.write(\"/m/{:s}\\t{:d}\\n\".format(d_sorted[i][0], i))\n\n # Splitting up dataset for training, validation and testing (note that some parameters are hard-coded)\n\n df_triples = df_triples.sort_values(by=\"fake\").replace(\"\", np.nan).dropna()\n\n df_fake = df_triples[df_triples[\"fake\"] == 'fake'].reset_index()\n df_true = df_triples[df_triples[\"fake\"] == 'true'].reset_index()\n\n fake_ids = sorted([int(i) for i in list(set(df_fake.index))])\n true_ids = sorted([int(i) for i in list(set(df_true.index))])\n\n np.random.seed(42)\n np.random.shuffle(fake_ids)\n np.random.shuffle(true_ids)\n\n train = df_fake[df_fake.index.astype(int).isin(fake_ids[:500])]\n\n validation = train\n\n test = df_true[(df_true.index.astype(int).isin(true_ids[500:]))].append(\n df_fake[(df_fake.index.astype(int).isin(fake_ids[500:]))]\n )\n\n with open(Path(output_dir, \"entities.txt\"), \"w\") as file:\n file.write(json.dumps(d_sorted))\n\n with open(Path(output_dir, \"relations.txt\"), \"w\") as file:\n file.write(json.dumps(r_sorted))\n\n train.to_csv(Path(output_path, \"train_set.csv\"))\n validation.to_csv(Path(output_path, \"validation_set.csv\"))\n test.to_csv(Path(output_path, \"test_set.csv\"))\n\n t = test\n v = validation\n\n with open(Path(output_dir, \"train2id.txt\"), \"w\") as f:\n f.write(\"{:d}\\n\".format(len(train)))\n for a, b, c in zip(train[\"l1\"], train[\"l2\"], train[\"rel\"]):\n try:\n f.write(\"{:d} {:d} {:d}\\n\".format(d[a], d[b], r[c]))\n except KeyError:\n pass\n\n rem_ind_v = []\n\n with open(Path(output_dir, \"valid2id.txt\"), \"w\") as f:\n f.write(\"{:d}\\n\".format(len(validation)))\n for i, a, b, c in zip(\n validation.index, validation[\"l1\"], validation[\"l2\"], validation[\"rel\"]\n ):\n try:\n f.write(\"{:d} {:d} {:d}\\n\".format(d[a], d[b], r[c]))\n except KeyError:\n rem_ind_v.append(i)\n\n v.drop(v.index[list(set(rem_ind_v))], inplace=True)\n\n rem_ind = []\n\n with open(Path(output_dir, \"test2id.txt\"), \"w\") as f:\n f.write(\"{:d}\\n\".format(len(test)))\n for i, a, b, c in zip(test.index, test[\"l1\"], test[\"l2\"], test[\"rel\"]):\n try:\n f.write(\"{:d} {:d} {:d}\\n\".format(d[a], d[b], r[c]))\n except KeyError:\n rem_ind.append(i)\n\n t.drop(t.index[list(set(rem_ind))], inplace=True)\n\n # Fix first lines of text files\n with open(Path(output_dir, \"train2id.txt\")) as f:\n lines = f.readlines()\n lines[0] = str(len(lines) - 1) + \"\\n\"\n with open(Path(output_dir, \"train2id.txt\"), \"w\") as f:\n f.writelines(lines)\n\n with open(Path(output_dir, \"valid2id.txt\")) as f:\n lines = f.readlines()\n lines[0] = str(len(lines) - 1) + \"\\n\"\n with open(Path(output_dir, \"valid2id.txt\"), \"w\") as f:\n f.writelines(lines)\n\n with open(Path(output_dir,\"test2id.txt\")) as f:\n lines = f.readlines()\n lines[0] = str(len(lines) - 1) + \"\\n\"\n with open(Path(output_dir, \"test2id.txt\"), \"w\") as f:\n f.writelines(lines)\n\n v.to_csv(Path(output_dir, \"validation_set.csv\"))\n t.to_csv(Path(output_dir, \"test_set.csv\"))",
"def post_process_triples(triple_type, doc):\n def extract_date_string(sentence):\n date_finder = DateFinder()\n matches = list(date_finder.extract_date_strings(sentence))\n date_string = matches[0][0]\n extra_tokens = matches[0][2]['extra_tokens']\n\n for et in extra_tokens:\n date_string = date_string.replace(et, '')\n\n date_string = date_string.strip()\n return date_string\n\n Args = namedtuple(\"Arguments\", [\"arg1\", \"arg2\", \"arg3\"])\n\n ents = ARGUMENT_ENTITY_TYPES[triple_type]\n for index, sent in enumerate(doc.sents):\n templates = []\n svotriples = sent[f'{triple_type}_svotriples']\n for triple in svotriples:\n arg1 = triple.subject[0]\n\n if arg1.ent_type_ in ents[0]:\n for verb in triple.verb:\n arg2, arg3 = None, None\n for child in verb.children:\n if child.ent_type_ in ents[1] and child != arg1:\n arg2 = child\n # Extracting date for third argument in relation\n # using regex.\n try:\n date_string = extract_date_string(sent['sent'])\n except:\n date_string = None\n arg3 = date_string\n\n if not(arg2 is None and arg3 is None):\n templates.append(Args(arg1, arg2, arg3))\n doc.sents[index][f'{triple_type}_templates'] = templates\n del doc.sents[index][f'{triple_type}_svotriples']\n return doc",
"def __extract_patterns_and_spaces(self):\n\n def __decorate_nodes(nodes, space):\n \"\"\"\n Performs a backward search from a list of pattern nodes and assigns a set of search spaces\n to all encountered nodes.\n :param nodes: List of pattern nodes that belongs to a search space\n :param space: List of search space id\n :return:\n \"\"\"\n for n in nodes:\n if n not in self.__node_spaces:\n self.__node_spaces[n] = set([])\n self.__node_spaces[n].add(space)\n pred_nodes = self.__plan_graph.subjects(AGORA.next, n)\n __decorate_nodes(pred_nodes, space)\n\n # Extract all search spaces in the plan and build a dictionary of subjects-to-ignore per each of them.\n # Ignored subjects are those that won't be dereferenced due to a explicit graph pattern (object) filter,\n # e.g. ?s doap:name \"jenkins\" -> All ?s that don't match the filter will be ignored.\n self.__spaces = set(self.__plan_graph.subjects(RDF.type, AGORA.SearchSpace))\n self.__subjects_to_ignore = dict([(sp, set([])) for sp in self.__spaces])\n\n patterns = list(self.__plan_graph.subjects(RDF.type, AGORA.TriplePattern))\n for tp in patterns:\n # A triple pattern belongs to a UNIQUE search space\n space = list(self.__plan_graph.subjects(AGORA.definedBy, tp)).pop()\n self.__patterns[tp] = {'space': space}\n\n # Depending on the format of each triple pattern (either '?s a Concept' or '?s prop O'),\n # it is required to extract different properties.\n tp_pred = list(self.__plan_graph.objects(tp, predicate=AGORA.predicate)).pop()\n\n if tp_pred == RDF.type: # ?s a Concept\n self.__patterns[tp]['type'] = list(self.__plan_graph.objects(tp, predicate=AGORA.object)).pop()\n try:\n check_type = list(self.__plan_graph.objects(tp, predicate=AGORA.checkType)).pop().toPython()\n except IndexError:\n check_type = True\n self.__patterns[tp]['check'] = check_type\n else: # ?s prop O\n self.__patterns[tp]['property'] = tp_pred\n tp_obj = list(self.__plan_graph.objects(tp, predicate=AGORA.object)).pop()\n if (tp_obj, RDF.type, AGORA.Literal) in self.__plan_graph: # In case O is a Literal\n self.__patterns[tp]['filter_object'] = list(self.__plan_graph.objects(tp_obj, AGORA.value)).pop()\n elif isinstance(tp_obj, URIRef):\n self.__patterns[tp]['filter_object'] = tp_obj\n\n tp_sub = list(self.__plan_graph.objects(tp, predicate=AGORA.subject)).pop()\n if isinstance(tp_sub, URIRef):\n self.__patterns[tp]['filter_subject'] = tp_sub\n\n # Get all pattern nodes (those that have a byPattern properties) of the search plan and search backwards\n # in order to set the scope of each search space.\n nodes = list(self.__plan_graph.subjects(AGORA.byPattern, tp))\n for n in nodes:\n if n not in self.__node_patterns:\n self.__node_patterns[n] = set([])\n self.__node_patterns[n].add(tp)\n __decorate_nodes(nodes, space)",
"def _parse_relation(self, tag):\n chunk, relation, role = None, [], []\n if \";\" in tag:\n # NP-SBJ;NP-OBJ-1 => 1 relates to both SBJ and OBJ.\n id = tag.split(\"*\")[0][-2:]\n id = id if id.startswith(\"-\") else \"\"\n tag = tag.replace(\";\", id + \"*\")\n if \"*\" in tag:\n tag = tag.split(\"*\")\n else:\n tag = [tag]\n for s in tag:\n s = s.split(\"-\")\n n = len(s)\n if n == 1: \n chunk = s[0]\n if n == 2: \n chunk = s[0]; relation.append(s[1]); role.append(None)\n if n >= 3: \n chunk = s[0]; relation.append(s[2]); role.append(s[1])\n if n > 1:\n id = relation[-1]\n if id.isdigit():\n relation[-1] = int(id)\n else:\n # Correct \"ADJP-PRD\":\n # (ADJP, [PRD], [None]) => (ADJP, [None], [PRD])\n relation[-1], role[-1] = None, id\n return chunk, relation, role",
"def preprocess(self, data, vocab, opt):\n processed = []\n rule_counts = defaultdict(int)\n with open(self.mappings) as f:\n mappings = f.readlines()\n with open('tacred/rules.json') as f:\n rules = json.load(f)\n for c, d in enumerate(data):\n tokens = d['token']\n if opt['lower']:\n tokens = [t.lower() for t in tokens]\n l = len(tokens)\n # anonymize tokens\n ss, se = d['subj_start'], d['subj_end']\n os, oe = d['obj_start'], d['obj_end']\n tokens[ss:se+1] = ['SUBJ-'+d['subj_type']] * (se-ss+1)\n tokens[os:oe+1] = ['OBJ-'+d['obj_type']] * (oe-os+1)\n tokens = map_to_ids(tokens, vocab.word2id)\n pos = map_to_ids(d['stanford_pos'], constant.POS_TO_ID)\n ner = map_to_ids(d['stanford_ner'], constant.NER_TO_ID)\n if self.opt['gat']:\n deprel = map_to_ids(d['stanford_deprel'], constant.DEPREL_TO_ID)\n else:\n deprel = map_to_ids([d for d in d['stanford_deprel'] if d!='ROOT' and d!='root'], constant.DEPREL_TO_ID)\n \n if opt['prune_k'] < 0:\n edge_index = [[h-1 for h in d['stanford_head'] if h != 0], \n [i for i, h in enumerate(d['stanford_head']) if h != 0]]\n else:\n edge_index = prune_tree(l, d['stanford_head'], opt['prune_k'], list(range(ss, se+1)), list(range(os, oe+1)))\n deprel = map_to_ids([d['stanford_deprel'][i] for i in edge_index[1]], constant.DEPREL_TO_ID)\n if deprel[-1] == 2:\n deprel = deprel[:-1]\n edge_index = [edge_index[0][:-1], edge_index[1][:-1]]\n edge_index = [edge_index[0]+edge_index[1], edge_index[1]+edge_index[0]]\n edge_mask = [1 if i in edge_index[1] else 0 for i in range(l)]\n relation = constant.LABEL_TO_ID[d['relation']]\n\n if opt['pattn']:\n subj_positions = get_positions(d['subj_start'], d['subj_end'], l)\n obj_positions = get_positions(d['obj_start'], d['obj_end'], l)\n if 't_' in mappings[c] or 's_' in mappings[c]:\n rule = helper.word_tokenize(rules[eval(mappings[c])[0][1]])\n rule = map_to_ids(rule, vocab.rule2id) \n rule = [constant.SOS_ID] + rule + [constant.EOS_ID]\n processed += [(tokens, pos, ner, deprel, subj_positions, obj_positions, relation, edge_index, rule)]\n else:\n processed += [(tokens, pos, ner, deprel, subj_positions, obj_positions, relation, edge_index, [])]\n else:\n subj_mask = [1 if (i in range(ss, se+1) and i in edge_index[0]+edge_index[1]) else 0 for i in range(len(tokens))]\n obj_mask = [1 if (i in range(os, oe+1) and i in edge_index[0]+edge_index[1]) else 0 for i in range(len(tokens))]\n \n if 't_' in mappings[c] or 's_' in mappings[c]:\n rule_counts[rules[eval(mappings[c])[0][1]]] += 1\n rule = helper.word_tokenize(rules[eval(mappings[c])[0][1]])\n rule = map_to_ids(rule, vocab.rule2id) \n rule = [constant.SOS_ID] + rule + [constant.EOS_ID]\n # processed_rule += [(tokens, pos, ner, deprel, subj_mask, obj_mask, relation, edge_index, edge_mask, rule)]\n else:\n rule = []\n processed += [(tokens, pos, ner, deprel, subj_mask, obj_mask, relation, edge_index, edge_mask, rule)]\n return processed",
"def check_nouns(elem_dictionary: dict, key: str, alet_dict: dict, last_nouns: list,\n last_events: list, turtle: list, ext_sources: bool) -> list:\n nouns = []\n for elem in elem_dictionary[key]: # The subject or object nouns\n elem_key = key[0:-1] # Create dictionary key = 'subject' or 'object'\n elem_type = elem[f'{elem_key}_type']\n elem_text = elem[f'{elem_key}_text']\n # Get rid of titles (such as Ms, Miss, Mr, ...)\n if 'FEMALE' in elem_type:\n elem_text = _remove_title_from_name(female_titles, elem_text)\n elif 'MALE' in elem_type:\n elem_text = _remove_title_from_name(male_titles, elem_text)\n head_lemma, head_text = get_head_word(elem_text)\n # poss_dict = Dictionary of nouns (keys) with their possessive modifiers (values)\n # Revised elem_text = noun text with possessives removed\n poss_dict, elem_text = _separate_possessives(elem_text)\n new_tuple = tuple()\n possible_name = empty_string # For a proper name, may contain shortened form = given + surname (any order)\n if elem_type == 'CARDINAL': # For example, 'one' in 'he has one' or in 'one of the band'\n if 'preps' in elem:\n new_tuple = _account_for_cardinal_noun(elem, elem_text, head_lemma,\n alet_dict, last_nouns, last_events, turtle, ext_sources)\n else:\n iri = re.sub(r'[^:a-zA-Z0-9_]', '_', f':{elem_text}_{str(uuid.uuid4())[:13]}').replace('__', '_')\n new_tuple = (elem_text, 'CARDINAL', [owl_thing2], iri)\n turtle.extend([f'{iri} a owl:Thing .',\n f'{iri} rdfs:label \"{elem_text}\" .'])\n elif elem_text.lower() in personal_pronouns:\n # Array of tuples of matched text, type, mappings and IRIs\n new_tuples = _check_personal_pronouns(elem_text, last_nouns)\n nouns.extend(new_tuples)\n last_nouns.extend(new_tuples)\n continue # More than 1 new tuple, so handled specifically in this code block; No need to 'drop through'\n # Not a pronoun; Check for a match in instances of the ontology\n elif ('PERSON' in elem_type or elem_type.endswith('GPE') or\n elem_type.endswith('ORG') or elem_type.endswith('NORP')):\n if space in head_lemma:\n # Get last two words in the name (for given+surname or surname+given name, Eastern or Western ordering)\n names = head_lemma.split(space)\n possible_name = f'{names[-2]} {names[-1]}'\n match_iri, match_type = check_specific_match(head_lemma, elem_type)\n if not match_iri and possible_name:\n match_iri, match_type = check_specific_match(possible_name, elem_type)\n if match_iri:\n new_tuple = (elem_text, elem_type, match_type, match_iri)\n else:\n # Check for family role and match to a name\n new_tuple = _process_family_role(head_text, elem_text, elem_type, alet_dict)\n if not new_tuple:\n # No match - Try to match text and type in last_nouns\n match_noun_tuples = _check_last_nouns(elem_text, elem_type, last_nouns)\n if match_noun_tuples:\n new_tuple = (elem_text, elem_type, match_noun_tuples[0][0], match_noun_tuples[0][1])\n elif possible_name:\n # Also check given + surname\n match_noun_tuples = _check_last_nouns(possible_name, elem_type, last_nouns)\n if match_noun_tuples:\n new_tuple = (possible_name, elem_type, match_noun_tuples[0][0], match_noun_tuples[0][1])\n if not new_tuple:\n # No match - Try to match text and type in alet_dict\n match_maps, match_iri = _check_alet_dict(elem_text, elem_type, alet_dict, last_nouns) # Updates last nouns\n if match_iri:\n new_tuple = (elem_text, elem_type, match_maps, match_iri)\n elif possible_name:\n # Also check given + surname\n match_maps, match_iri = _check_alet_dict(possible_name, elem_type, alet_dict, last_nouns)\n if match_iri:\n new_tuple = (possible_name, elem_type, match_maps, match_iri)\n if not new_tuple:\n # No match - Check if the noun is aligned with an event that has already been described\n event_classes, event_iri = check_event(elem_text, last_events)\n if event_iri:\n new_tuple = (elem_text, elem_type, event_classes, event_iri)\n if not new_tuple:\n # No match - Create new entity\n iri = re.sub(r'[^:a-zA-Z0-9_]', underscore, f':{elem_text.lower()}_{str(uuid.uuid4())[:13]}').\\\n replace('__', '_')\n noun_mappings, noun_turtle = create_noun_ttl(iri, elem_text, elem_type, alet_dict, ext_sources)\n new_tuple = (elem_text, elem_type, noun_mappings, iri)\n turtle.extend(noun_turtle)\n nouns.append(new_tuple)\n last_nouns.append(new_tuple)\n return nouns",
"def get_triplets_visualphrase(self):\n vocab = self.vocab['sro']\n triplets = torch.zeros(len(vocab), 3)\n for j in range(len(vocab)):\n subjname, relname, objname = vocab.idx2word[j].split('-')\n triplets[j, 0] = self.vocab['all'].wordpos2idx[subjname + '_noun']\n triplets[j, 1] = self.vocab['all'].wordpos2idx[objname + '_noun']\n triplets[j, 2] = self.vocab['all'].wordpos2idx[relname + '_verb']\n\n triplets = triplets.long()\n return triplets",
"def find_triples(self, tokens,\n left_dependency_label= ['NSUBJ', 'NSUBPASS'],\n head_part_of_speech='VERB',\n right_dependency_label=['DOBJ','POBJ']):\n for head, token in enumerate(tokens):\n if token.part_of_speech == head_part_of_speech:\n children = self.dependents(tokens, head)\n left_deps = []\n right_deps = []\n for child in children:\n child_token = tokens[child]\n child_dep_label = child_token.edge_label\n if child_dep_label in left_dependency_label:\n left_deps.append(child)\n elif child_dep_label in right_dependency_label:\n right_deps.append(child)\n for left_dep in left_deps:\n for right_dep in right_deps:\n yield (left_dep, head, right_dep)",
"def _comp_het_pair_pattern(self,\n gt_types1, gt_nums1,\n gt_types2, gt_nums2,\n gt_phases1, gt_phases2):\n\n # already phased before sending here.\n ret = {'candidates': [], 'priority': 4}\n for kid in self.samples_with_parent:\n if gt_nums1[kid._i] == gt_nums2[kid._i]: continue\n if not (gt_types1[kid._i] == HET and gt_types2[kid._i] == HET): continue\n #if not (gt_phases1[kid._i] and gt_phases2[kid._i]): continue\n if gt_types1[kid.mom._i] == HOM_ALT or gt_types2[kid.dad._i] == HOM_ALT: continue\n mom, dad = kid.mom, kid.dad\n\n kid_phased = gt_phases1[kid._i] and gt_phases2[kid._i]\n dad_phased = gt_phases1[dad._i] and gt_phases2[dad._i]\n mom_phased = gt_phases1[mom._i] and gt_phases2[mom._i]\n\n if kid_phased and dad_phased and (gt_nums1[dad._i] == gt_nums1[kid._i]) and (gt_nums2[dad._i] == gt_nums2[kid._i]):\n continue\n if kid_phased and mom_phased and (gt_nums1[mom._i] == gt_nums1[kid._i]) and (gt_nums2[mom._i] == gt_nums2[kid._i]):\n continue\n\n if kid_phased and dad_phased and mom_phased and gt_types1[dad._i] != gt_types2[dad._i] and gt_types1[mom._i] != gt_types2[mom._i]:\n priority = 1\n\n elif kid_phased and gt_types1[dad._i] != gt_types1[mom._i] and gt_types2[dad._i] != gt_types2[mom._i]:\n # parents are unphased hets at different sites.\n priority = 1\n else:\n priority = 2\n for parent in (kid.mom, kid.dad):\n # unphased het\n if gt_types2[parent._i] == gt_types1[parent._i] == HET:\n priority += 1\n\n ret['candidates'].append(kid)\n ret['priority'] = min(ret['priority'], priority)\n ret['candidate'] = len(ret['candidates']) > 0\n return ret",
"def fetchphrases(query):\n results=searchphrases(query)\n parents=OrderedDict()\n children=OrderedDict()\n grand=OrderedDict()\n categories=[]\n unigrams={}\n bigrams={}\n trigrams={}\n dups=[]\n for cat in results:\n categories.append(cat[0])\n for cat in results:\n try:\n phrase=str(cat[0]).split()\n if(len(phrase)==1):\n categories.remove(cat[0])\n unigrams[phrase[0]]=cat[1]\n elif(len(phrase)==2):\n phrase=\" \".join(phrase)\n categories.remove(cat[0])\n bigrams[phrase]=cat[1]\n elif(len(phrase)==3):\n phrase=\" \".join(phrase)\n categories.remove(cat[0])\n trigrams[phrase]=cat[1]\n else:\n print \"Rest in categories\"\n except:\n print traceback.format_exc()\n if(len(unigrams)!=0):\n parents=unigrams\n if(len(bigrams)!=0):\n for unigram in unigrams.keys():\n for bigram,freq in bigrams.items():\n if(unigram in bigram):\n dups.append(bigram)\n try:\n children[unigram].append((bigram,freq))\n except:\n children[unigram]=[(bigram,freq)]\n\n else:\n parents[bigram]=freq\n if(len(trigrams)!=0):\n for bigram in bigrams.keys():\n for trigram,freq in trigrams.items():\n if(bigram in trigram):\n dups.append(trigram)\n try:\n grand[bigram].append((trigram,freq))\n except:\n grand[bigram]=[(trigram,freq)]\n else:\n try:\n children[bigram].append((trigram,freq))\n except:\n children[bigram]=[(trigram,freq)]\n elif(len(trigrams)!=0):\n for unigram in unigrams.keys():\n for trigram,freq in trigrams.items():\n if(unigram in trigram):\n dups.append(trigram)\n try:\n children[unigram].append((trigram,freq))\n except:\n children[unigram]=[(trigram,freq)]\n del trigrams[trigram]\n else:\n parents[trigram]=freq\n elif(len(bigrams)!=0):\n parents=bigrams\n if(len(trigrams)!=0):\n for bigram in bigrams.keys():\n for trigram,freq in trigrams.items():\n if(bigram in trigram):\n dups.append(trigram)\n try:\n children[bigram].append((trigram,freq))\n except:\n children[bigram]=[(trigram,freq)]\n del trigrams[trigram]\n else:\n parents[trigram]=freq\n elif(len(trigrams)!=0):\n parents=trigrams\n else:\n parents={}\n\n for d in dups:\n try:\n del parents[d]\n except:\n continue\n\n for key,values in children.items():\n sorted_child=sorted(values,key=lambda x:x[1],reverse=True)\n children[key]=sorted_child\n\n for key,values in grand.items():\n sorted_gchild=sorted(values,key=lambda x:x[1],reverse=True)\n grand[key]=sorted_gchild\n return parents,children,grand",
"def main(self, data, quant_factor=None):\n\n if not data[\"linestrings\"]:\n data[\"junctions\"] = self.junctions\n return data\n\n # quantize linestrings before comparing\n # if set to None or a value < 1 (True equals 1) no quantizing is applied.\n if quant_factor is not None:\n if quant_factor > 1:\n kx, ky, x0, y0 = self.prequantize(data[\"linestrings\"], quant_factor)\n data[\"transform\"] = {\"scale\": [kx, ky], \"translate\": [x0, y0]}\n\n # create list with unique combinations of lines using a rdtree\n line_combs = select_unique_combs(data[\"linestrings\"])\n\n # iterate over index combinations\n for i1, i2 in line_combs:\n g1 = data[\"linestrings\"][i1]\n g2 = data[\"linestrings\"][i2]\n\n # check if geometry are equal\n # being equal meaning the geometry object coincide with each other.\n # a rotated polygon or reversed linestring are both considered equal.\n if not g1.equals(g2):\n # geoms are unique, let's find junctions\n self.shared_segs(g1, g2)\n\n # self.segments are nested lists of LineStrings, get coordinates of each nest\n s_coords = []\n for segment in self.segments:\n s_coords.extend(\n [\n [\n (x.xy[0][y], x.xy[1][y])\n for x in segment\n for y in range(len(x.xy[0]))\n ]\n ]\n )\n # s_coords.extend([[y for x in segment for y in list(x.coords)]])\n\n # only keep junctions that appear only once in each segment (nested list)\n # coordinates that appear multiple times are not junctions\n for coords in s_coords:\n self.junctions.extend(\n [geometry.Point(i) for i in coords if coords.count(i) is 1]\n )\n\n # junctions can appear multiple times in multiple segments, remove duplicates\n self.junctions = [\n loads(xy) for xy in list(set([x.wkb for x in self.junctions]))\n ]\n\n # prepare to return object\n data[\"junctions\"] = self.junctions\n\n return data",
"def extract(\n conn: Connection,\n terms: dict,\n predicates: list,\n fmt: str = \"ttl\",\n imported_from: str = None,\n imported_from_property: str = \"IAO:0000412\",\n intermediates: str = \"all\",\n no_hierarchy: bool = False,\n statements: str = \"statements\",\n) -> str:\n if fmt.lower() not in [\"ttl\", \"json-ld\"]:\n raise Exception(\"Unknown format: \" + fmt)\n\n intermediates = intermediates.lower()\n if intermediates not in [\"all\", \"none\"]:\n raise Exception(\"Unknown 'intermediates' option: \" + intermediates)\n\n # Pre-clean up\n clean(conn)\n\n # Create a temp labels table\n add_labels(conn, statements=statements)\n\n # First pass on terms, get all related entities\n ignore = []\n more_terms = set()\n for term_id, details in terms.items():\n # Confirm that this term exists\n query = sql_text(f\"SELECT * FROM {statements} WHERE stanza = :term_id LIMIT 1\")\n res = conn.execute(query, term_id=term_id).fetchone()\n if not res:\n logging.warning(f\"'{term_id}' does not exist in database\")\n ignore.append(term_id)\n continue\n\n # Check for related entities & add them\n related = details.get(\"Related\")\n if not related:\n continue\n related = related.strip().lower().split(\" \")\n for r in related:\n if r == \"ancestors\":\n if intermediates == \"none\":\n # Find first ancestor/s that is/are either:\n # - in the set of input terms\n # - a top level term (below owl:Thing)\n ancestors = get_top_ancestors(\n conn,\n term_id,\n statements=statements,\n top_terms=list(terms.keys()),\n )\n else:\n # Otherwise get a set of ancestors, stopping at terms that are either:\n # - in the set of input terms\n # - a top level term (below owl:Thing)\n ancestors = get_ancestors_capped(\n conn, set(terms.keys()), term_id, statements=statements\n )\n more_terms.update(ancestors)\n elif r == \"children\":\n # Just add the direct children\n more_terms.update(get_children(conn, term_id, statements=statements))\n elif r == \"descendants\":\n if intermediates == \"none\":\n # Find all bottom-level descendants (do not have children)\n descendants = get_bottom_descendants(conn, term_id, statements=statements)\n more_terms.update(descendants)\n else:\n # Get a set of all descendants, including intermediates\n more_terms.update(get_descendants(conn, term_id, statements=statements))\n elif r == \"parents\":\n # Just add the direct parents\n more_terms.update(get_parents(conn, term_id, statements=statements))\n else:\n # TODO: should this just warn and continue?\n raise Exception(f\"unknown 'Related' keyword for '{term_id}': \" + r)\n\n # Add those extra terms from related entities to our terms dict\n for mt in more_terms:\n if mt not in terms:\n # Don't worry about the parent ID because hierarchy will be maintained ...\n # ... based on the first ancestor in the full set of terms\n terms[mt] = {}\n\n predicate_ids = None\n if predicates:\n # Current predicates are IDs or labels - make sure we get all the IDs\n predicate_ids = get_ids(conn, predicates)\n\n # Create the terms table containing parent -> child relationships\n conn.execute(\"CREATE TABLE tmp_terms(child TEXT, parent TEXT)\")\n for term_id in terms.keys():\n query = sql_text(\"INSERT INTO tmp_terms VALUES (:term_id, NULL)\")\n conn.execute(query, term_id=term_id)\n\n # Create tmp predicates table containing all predicates to include\n conn.execute(\"CREATE TABLE tmp_predicates(predicate TEXT PRIMARY KEY NOT NULL)\")\n if predicate_ids:\n for predicate_id in predicate_ids:\n if str(conn.engine.url).startswith(\"sqlite\"):\n query = sql_text(\"INSERT OR IGNORE INTO tmp_predicates VALUES (:predicate_id)\")\n conn.execute(query, predicate_id=predicate_id)\n else:\n query = sql_text(\n \"\"\"INSERT INTO tmp_predicates VALUES (:predicate_id)\n ON CONFLICT (predicate) DO NOTHING\"\"\"\n )\n conn.execute(query, predicate_id=predicate_id)\n else:\n # Insert all predicates\n if str(conn.engine.url).startswith(\"sqlite\"):\n conn.execute(\n f\"\"\"INSERT OR IGNORE INTO tmp_predicates\n SELECT DISTINCT predicate\n FROM {statements} WHERE predicate NOT IN\n ('rdfs:subClassOf', 'rdfs:subPropertyOf', 'rdf:type')\"\"\"\n )\n else:\n conn.execute(\n f\"\"\"INSERT INTO tmp_predicates\n SELECT DISTINCT predicate\n FROM {statements} WHERE predicate NOT IN\n ('rdfs:subClassOf', 'rdfs:subPropertyOf', 'rdf:type')\n ON CONFLICT (predicate) DO NOTHING\"\"\"\n )\n\n # Add subclass/subproperty/type relationships to terms table\n for term_id, details in terms.items():\n # Check for overrides, regardless of no-hierarchy\n override_parent = details.get(\"Parent ID\")\n if override_parent:\n # Just assert this as parent and don't worry about existing parent(s)\n query = sql_text(\"INSERT INTO tmp_terms VALUES (:term_id, :override_parent)\")\n conn.execute(query, term_id=term_id, override_parent=override_parent)\n continue\n if no_hierarchy:\n continue\n\n # Otherwise only add the parent if we want a hierarchy\n # Check for the first ancestor we can find with all terms considered \"top level\"\n # In many cases, this is just the direct parent\n parents = get_top_ancestors(\n conn, term_id, statements=statements, top_terms=list(terms.keys())\n )\n parents = parents.intersection(set(terms.keys()))\n if parents:\n # Maintain these relationships in the import module\n for p in parents:\n if p == term_id:\n continue\n query = sql_text(\"INSERT INTO tmp_terms VALUES (:term_id, :p)\")\n conn.execute(query, term_id=term_id, p=p)\n\n # Create our extract table to hold the actual triples\n conn.execute(\n \"\"\"CREATE TABLE tmp_extract(\n stanza TEXT,\n subject TEXT,\n predicate TEXT,\n object TEXT,\n value TEXT,\n datatype TEXT,\n language TEXT\n )\"\"\"\n )\n\n # Insert rdf:type declarations - only for OWL entities\n conn.execute(\n f\"\"\"INSERT INTO tmp_extract\n SELECT * FROM {statements}\n WHERE subject IN (SELECT DISTINCT child FROM tmp_terms)\n AND predicate = 'rdf:type'\n AND object IN\n ('owl:Class',\n 'owl:AnnotationProperty',\n 'owl:DataProperty',\n 'owl:ObjectProperty',\n 'owl:NamedIndividual')\"\"\"\n )\n\n # Insert subproperty statements for any property types\n conn.execute(\n f\"\"\"INSERT INTO tmp_extract (stanza, subject, predicate, object)\n SELECT DISTINCT child, child, 'rdfs:subPropertyOf', parent\n FROM tmp_terms WHERE parent IS NOT NULL AND child IN\n (SELECT subject FROM {statements} WHERE predicate = 'rdf:type'\n AND object IN ('owl:AnnotationProperty', 'owl:DataProperty', 'owl:ObjectProperty')\n AND subject NOT LIKE '_:%%')\"\"\"\n )\n\n # Insert subclass statements for any class types\n conn.execute(\n f\"\"\"INSERT INTO tmp_extract (stanza, subject, predicate, object)\n SELECT DISTINCT child, child, 'rdfs:subClassOf', parent\n FROM tmp_terms WHERE parent IS NOT NULL AND child IN\n (SELECT subject FROM {statements} WHERE predicate = 'rdf:type'\n AND object = 'owl:Class' AND subject NOT LIKE '_:%%')\"\"\"\n )\n\n # Everything else is an instance\n # TODO: or datatype?\n conn.execute(\n \"\"\"INSERT INTO tmp_extract (stanza, subject, predicate, object)\n SELECT DISTINCT child, child, 'rdf:type', parent\n FROM tmp_terms WHERE parent IS NOT NULL AND child NOT IN\n (SELECT stanza from tmp_extract\n WHERE predicate IN ('rdfs:subClassOf', 'rdfs:subPropertyOf'))\"\"\"\n )\n\n # Insert literal annotations\n conn.execute(\n f\"\"\"INSERT INTO tmp_extract\n SELECT * FROM {statements}\n WHERE subject IN (SELECT DISTINCT child FROM tmp_terms)\n AND predicate IN (SELECT predicate FROM tmp_predicates)\n AND value IS NOT NULL\"\"\"\n )\n\n # Insert logical relationships (object must be in set of input terms)\n conn.execute(\n f\"\"\"INSERT INTO tmp_extract\n SELECT * FROM {statements}\n WHERE subject IN (SELECT DISTINCT child FROM tmp_terms)\n AND predicate IN (SELECT predicate FROM tmp_predicates)\n AND object IN (SELECT DISTINCT child FROM tmp_terms)\"\"\"\n )\n\n # Insert IRI annotations (object does not have to be in input terms)\n conn.execute(\n f\"\"\"INSERT INTO tmp_extract (stanza, subject, predicate, object)\n SELECT s1.stanza, s1.subject, s1.predicate, s1.object FROM {statements} s1\n JOIN {statements} s2 ON s1.predicate = s2.subject\n WHERE s1.subject IN (SELECT DISTINCT child FROM tmp_terms)\n AND s1.predicate IN (SELECT predicate FROM tmp_predicates)\n AND s2.object = 'owl:AnnotationProperty'\n AND s1.object IS NOT NULL\"\"\"\n )\n\n # Finally, if imported_from IRI is included, add this to add terms\n if imported_from:\n query = sql_text(\n \"\"\"INSERT INTO tmp_extract (stanza, subject, predicate, object)\n SELECT DISTINCT child, child, :imported_from_property, :imported_from FROM tmp_terms\"\"\"\n )\n conn.execute(\n query, imported_from_property=imported_from_property, imported_from=f\"<{imported_from}>\"\n )\n\n # Escape QNames\n escape_qnames(conn, \"tmp_extract\")\n\n ttl = get_ttl(conn, \"tmp_extract\")\n if fmt.lower() == \"ttl\":\n return ttl\n\n # Otherwise the format is JSON\n return ttl_to_json(conn, ttl)",
"def test05_extract_containement_triples(self):\n uri = URIRef('http://ex.org/klm')\n c1 = (uri, LDP.contains, URIRef('http://ex.org/c1'))\n c2 = (uri, LDP.contains, URIRef('http://ex.org/c2'))\n g = Graph()\n g.add(c1)\n g.add(c2)\n g.add((uri, RDF.type, URIRef('http://ex.org/some_type')))\n r = LDPRS(content=g)\n cg = r.extract_containment_triples()\n self.assertEqual(len(r.content), 1)\n self.assertEqual(len(cg), 2)\n self.assertIn(c1, cg)\n self.assertIn(c2, cg)",
"def meshRelationships(Objects):\r\n # Create some variables to be used to store objects\r\n foreheadVariable = []\r\n noseBridgeVariable = []\r\n noseVariable = []\r\n eyeVariable = []\r\n mouthLoopVariable = []\r\n mouthVariable = []\r\n cheekVariable = []\r\n chinVariable = []\r\n earVariable = []\r\n backHeadVariable = []\r\n lowerBackHeadVariable = []\r\n\r\n # Create the relationshipList\r\n relationshipList = []\r\n\r\n for forehead in Objects:\r\n if \"TubxForehead_geo_\" in forehead:\r\n foreheadVariable.append(forehead)\r\n\r\n for noseBridge in Objects:\r\n if \"TubxNoseBridge_geo_\" in noseBridge:\r\n noseBridgeVariable.append(noseBridge)\r\n for forehead in foreheadVariable:\r\n createRelationships(relationshipList, noseBridge, forehead)\r\n\r\n for eye in Objects:\r\n if \"TubxEye_geo_\" in eye:\r\n eyeVariable.append(eye)\r\n for forehead in foreheadVariable:\r\n createRelationships(relationshipList, eye, forehead)\r\n for noseBridge in noseBridgeVariable:\r\n createRelationships(relationshipList, eye, noseBridge)\r\n\r\n for nose in Objects:\r\n if \"TubxNose_geo_\" in nose:\r\n noseVariable.append(nose)\r\n for noseBridge in noseBridgeVariable:\r\n createRelationships(relationshipList, nose, noseBridge)\r\n\r\n for mouthLoop in Objects:\r\n if \"TubxMouthLoop_geo_\" in mouthLoop:\r\n mouthLoopVariable.append(mouthLoop)\r\n for nose in noseVariable:\r\n createRelationships(relationshipList, mouthLoop, nose)\r\n\r\n for mouth in Objects:\r\n if \"TubxMouth_geo_\" in mouth:\r\n mouthVariable.append(mouth)\r\n for mouthLoop in mouthLoopVariable:\r\n createRelationships(relationshipList, mouth, mouthLoop)\r\n\r\n for cheek in Objects:\r\n if \"TubxCheek_geo_\" in cheek:\r\n cheekVariable.append(cheek)\r\n for mouthLoop in mouthLoopVariable:\r\n createRelationships(relationshipList, cheek, mouthLoop)\r\n\r\n for chin in Objects:\r\n if \"TubxChin_geo_\" in chin:\r\n chinVariable.append(chin)\r\n for mouthLoop in mouthLoopVariable:\r\n createRelationships(relationshipList, chin, mouthLoop)\r\n for cheek in cheekVariable:\r\n createRelationships(relationshipList, chin, cheek)\r\n\r\n for ear in Objects:\r\n if \"TubxEar_geo_\" in ear:\r\n earVariable.append(ear)\r\n for forehead in foreheadVariable:\r\n createRelationships(relationshipList, ear, forehead)\r\n for cheek in cheekVariable:\r\n createRelationships(relationshipList, ear, cheek)\r\n\r\n for backhead in Objects:\r\n if \"TubxBackHead_geo_\" in backhead:\r\n backHeadVariable.append(backhead)\r\n for forehead in foreheadVariable:\r\n createRelationships(relationshipList, backhead, forehead)\r\n for ear in earVariable:\r\n createRelationships(relationshipList, backhead, ear)\r\n\r\n for lowerbackhead in Objects:\r\n if \"TubxLowerBackHead_geo_\" in lowerbackhead:\r\n lowerBackHeadVariable.append(lowerbackhead)\r\n for ear in earVariable:\r\n createRelationships(relationshipList, lowerbackhead, ear)\r\n for backhead in backHeadVariable:\r\n createRelationships(relationshipList, lowerbackhead, backhead)\r\n\r\n for default in Objects:\r\n for forehead in foreheadVariable:\r\n createRelationships(relationshipList, default, forehead)\r\n for noseBridge in noseBridgeVariable:\r\n createRelationships(relationshipList, default, noseBridge)\r\n for nose in noseVariable:\r\n createRelationships(relationshipList,default,nose)\r\n for eye in eyeVariable:\r\n createRelationships(relationshipList, default, eye)\r\n for mouthLoop in mouthLoopVariable:\r\n createRelationships(relationshipList, default, mouthLoop)\r\n for mouth in mouthVariable:\r\n createRelationships(relationshipList, default, mouth)\r\n for cheek in cheekVariable:\r\n createRelationships(relationshipList, default, cheek)\r\n for chin in chinVariable:\r\n createRelationships(relationshipList, default, chin)\r\n for ear in earVariable:\r\n createRelationships(relationshipList, default, ear)\r\n for backhead in backHeadVariable:\r\n createRelationships(relationshipList, default, backhead)\r\n for lowerbackhead in lowerBackHeadVariable:\r\n createRelationships(relationshipList, default, lowerbackhead)\r\n\r\n return relationshipList",
"def remove_cryptic_relations(relations_list, relations_dot=None, relations_fig=None, threshold=None, verbose=False):\n # construct a graph of relations from given iterable of tuples of node ids,\n # then remove individuals corresponding to exact minimum vertex cover,\n # which has exponential-time complexity, but most subgraphs are small\n # unless size of subgraph is greater than the argument 'threshold',\n # then remove individuals corresponding to approximate minimum vertex cover,\n # which has linear-time complexity, and guarantees a factor-2 approximation\n # can handle loop edges, for inbred individuals, where haplotypes are related\n G = nx.Graph()\n for i, j in relations_list:\n G.add_edge(i, j)\n # iterate over each connected component\n removed_list = set()\n if verbose:\n print 'Removing cryptic relations...'\n for g in list(connected_component_subgraphs(G)):\n if verbose:\n print 'Size of current connected component is ' + str(len(g))\n # approximate solution\n if threshold is not None and g.number_of_nodes() >= threshold:\n for i in min_weighted_vertex_cover(g):\n removed_list.add(i)\n # exact solution to mvc\n else:\n for i in exact_min_vertex_cover(g):\n removed_list.add(i)\n # color nodes that were removed\n for i in removed_list:\n G.node[i]['color'] = 'red'\n # save dot file and pdf file of graph\n if relations_dot is not None and relations_fig is not None:\n write_dot(G, relations_dot)\n call('dot -Tpdf {0} -o {1}'.format(relations_dot, relations_fig), shell=True)\n return removed_list",
"def _build_init(self, kg_dir = \"./related_triples_by_relation/\"):\r\n if self.training:\r\n idx = np.random.choice(range(len(self.train_data))) \r\n self.entry = self.train_data[idx]\r\n else:\r\n idx = self.idx_to_test\r\n self.entry = self.test_data[idx]\r\n self.query = self.entry['s'] + ' ' + self.entry['p']\r\n self.text_list = self.entry['corpus']\r\n ######################################################\r\n ## obtain the answer from extraction system output ###\r\n ######################################################\r\n if self.training:\r\n self.answer_list = self.pred_train[self.entry['id']] \r\n else:\r\n self.answer_list = self.pred_test[self.entry['id']]\r\n\r\n assert len(self.text_list) == len(self.answer_list), \"Wrong, text length %d, answer length %d\" %(len(self.text_list), len(self.answer_list))\r\n\r\n self.text_answer = [[self.text_list[i], self.answer_list[i]] for i in range(len(self.text_list))]\r\n \r\n self.max_index = len(self.text_list)\r\n ### #####################################################################\r\n ## initialize the index of current/new candidate as 0/1 respectively. ###\r\n #########################################################################\r\n self.cur_index = 0\r\n self.new_index = 1\r\n self.cur = self.text_answer[self.cur_index]\r\n try:\r\n self.new = self.text_answer[self.new_index]\r\n except:\r\n ####################################################################\r\n ## exception would happen when size of raw text is less than 2. ####\r\n ## which cannot happen in preprocessed data ########################\r\n ####################################################################\r\n self.new = self.cur\r\n self.curans = self.cur[1][0]\r\n self.newans = self.new[1][0]\r\n self.answer_seen = self.cur[1][0]\r\n self.truth = \"\".join(self.entry['o'])\r\n\r\n #################################################################\r\n ## if do bert, we need to squeeze the space #####################\r\n #################################################################\r\n if self.do_bert:\r\n self.truth = token_word(self.truth)\r\n # get reference values\r\n #os.chdir('/content/drive/My Drive/Knowledge Extraction/related_triples_by_relation')\r\n filename = \"%s.csv\" % self.entry['p']\r\n related_triples_to_use = pd.read_csv(kg_dir + filename, sep='\\t', header = None)\r\n self.reference_values = related_triples_to_use[2].values",
"def get_triples(self, ignore_rel=True, filter_predicate=None,\n ignore_grammatical=False, minimal=False):\n result = []\n if isinstance(filter_predicate, (str, unicode)):\n filter_predicate = [filter_predicate]\n nodes = {}\n for s, p, o in self.graph:\n s = unicode(s)\n child = nodes.setdefault(s, Node(uri=s))\n\n pred = str(p).replace(AMCAT, \"\")\n if isinstance(o, Literal):\n if hasattr(child, pred):\n o = getattr(child, pred) + \"; \" + o\n setattr(child, pred, unicode(o))\n else:\n o = unicode(o)\n if not ((ignore_rel and pred == \"rel\")\n or (ignore_grammatical and pred.startswith(\"rel_\"))\n or (filter_predicate and pred not in filter_predicate)\n or (pred == RDF_TYPE)):\n parent = nodes.setdefault(o, Node(uri=o))\n result.append(Triple(child, pred, parent))\n\n if minimal:\n return [{\"subject\": s.id,\"predicate\": p, \"object\": o.id}\n for (s, p, o) in result]\n return result",
"def antecedents_patterns(self,\n graph: Graph,\n subject_uri: URIRef,\n relation_uri: URIRef,\n object_uri: URIRef) -> Tuple[str, Optional[Literal]]:\n # contains the concatenated SPARQL patterns of the literals, i.e. the SPARQL filter to match nodes that conform\n # with all literals in the premise\n patterns = \"\"\n\n # subject of a matching literal\n matched_literal_subject = None\n\n # object of a matching literal\n matched_literal_object = None\n\n # the literal that matches the new fact\n matched_literal = None\n\n # test if a literal in the premise handles the same relation that is in the new fact\n # save the literal and its subject and object if such an literal exists\n for antecedent in self.antecedents:\n antecedent_relation_uri = antecedent.relation.uri\n if antecedent_relation_uri == relation_uri:\n matched_literal_subject = f\"?{antecedent.literal_subject}\"\n matched_literal_object = f\"?{antecedent.literal_object}\"\n matched_literal = antecedent\n break\n\n # concatenate the SPARQL pattern fo every literal to query nodes matching all literals\n # exclude the literal with a matching relation type since it is already satisfied by the new fact that will be\n # added\n for antecedent in self.antecedents:\n if antecedent.relation != relation_uri:\n patterns += antecedent.sparql_patterns()\n\n subject_entity = f\"<{subject_uri}>\"\n object_entity = f\"<{object_uri}>\"\n\n if matched_literal_subject is not None:\n patterns = patterns.replace(matched_literal_subject, subject_entity)\n\n if matched_literal_object is not None:\n patterns = patterns.replace(matched_literal_object, object_entity)\n\n return patterns, matched_literal",
"def removeLongCompounds(Config, comps, wordToLemma):\n if not len(comps): return Counter(comps)\n thres = max(Config.minCompoundCount//2, comps.most_common(50)[-1][1] // 10) # use count of 10th most common word\n if thres > Config.minCompoundCount//2: print(\"Thres\",thres)\n\n #get all possible compound candidates, ie. all compounds split into parts of length 1, 2,3\n splitcomp = [] #only lower case\n for k in comps:\n parts = k.lower().split() #single words\n splitcomp += [parts[k] + \" \" + parts[k+1] for k in range(len(parts)-2)] #2 parts\n splitcomp += [parts[k] + \" \" + parts[k + 1] + \" \" + parts[k+2] for k in range(len(parts)-3)] #3 parts\n countsSplit = Counter(splitcomp) + comps\n\n allcomp = {}\n rem = 0\n inv_map = getInv(wordToLemma)\n acomps = [w for w in wordToLemma if len(w.split()) > 1]\n for k in acomps: #go through all current compounds\n if k in wordToLemma: parts = wordToLemma[k].split()\n else: parts = k.split()\n remove = False\n replace = []\n if len(parts) == 3: #check if should remove compound, ie. if not very frequent or subcompound much more frequent\n w0l = (parts[0] + \" \" + parts[1]).lower()\n w1l = (parts[1] + \" \" + parts[2]).lower()\n nw0 = countsSplit[w0l] if w0l in countsSplit else -1\n nw1 = countsSplit[w1l] if w1l in countsSplit else -1\n if nw0 > 3*countsSplit[k] and nw0 > nw1 and nw0 > thres:# and countsSplit[k]<5*minCompoundCount:\n replace = [(parts[0] + \" \" + parts[1], w0l), (parts[2], parts[2].lower())]\n remove = True\n elif nw1 > 3*countsSplit[k] and nw1 > thres:# and countsSplit[k]<5*minCompoundCount:\n replace = [(parts[0], parts[0].lower()), (parts[1] + \" \" + parts[2], w1l)]\n remove = True\n elif comps[k] <= thres:\n replace = zip(parts, [p.lower() for p in parts])\n remove = True\n elif len(parts) > 3: #extremely long compound, Stanford parser creates them often; mainly wrong; replace by most frequent biterm, if non-exists, add single words\n remove = True\n partw = [(parts[k] + \" \" + parts[k+1], k) for k in range(len(parts)-2)] #bi-terms\n freq = [comps[p.lower()] if p.lower() in comps else -1 for p, k in partw]\n ipart = np.argmax(freq)\n if freq[ipart] > thres:\n singlewords = set(np.arange(len(parts))) - set([partw[ipart][1], partw[ipart][1]+1]) #get indexes of single words\n #upartw = [parts[k] + \" \" + parts[k+1] for k in range(len(parts)-2)]\n replace = [(partw[ipart][0], partw[ipart][0].lower())] + [(parts[iw], parts[iw].lower()) for iw in singlewords]\n else:\n replace = zip(parts, [p.lower() for p in parts])\n\n if not remove: allcomp[k] = allcomp.get(k, 0) + comps[k]\n else:\n for t in replace: #replace compound by subterms\n w, tow = t\n addWord(wordToLemma, w, tow)\n if len(w.split()) > 1 and k in wordToLemma: #if a part is itself a compound\n mcomp = wordToLemma[k]\n if mcomp in comps:\n allcomp[mcomp] = allcomp.get(mcomp, 0) + comps[mcomp]\n del comps[mcomp] #avoid double counting\n if k in inv_map: #we delete the compound, ie. all values that map to it, that's why we need the inverse map\n for todel in inv_map[k]+[k]:\n if todel in wordToLemma:\n del wordToLemma[todel]\n rem += 1\n print(\"Removed\", rem)\n\n return Counter(allcomp)",
"def extract_triplets(self) -> Iterable[dict]:\n stg_corpus = [txt.strip()+\".\" if txt.strip()[-1]!=\".\" else txt.strip() for txt in self.__corpus__]\n stg_corpus = ' '.join(self.__corpus__)\n\n with StanfordOpenIE() as client:\n triples_corpus = client.annotate(stg_corpus)\n\n self.__triples_corpus__ = triples_corpus\n\n return triples_corpus",
"def extract_relations(self, ne_tagged_line, dependency_tree, pos_tagged_line):\n # Normalize resources\n aligned_ne_tagged_line = self._align_tagged_sentence(ne_tagged_line)\n aligned_pos_tagged_line = self._align_tagged_sentence(pos_tagged_line)\n normalized_dependency_tree = self._normalize_node_addresses(dependency_tree)\n\n verb_nodes = self._extract_verb_nodes(normalized_dependency_tree, aligned_pos_tagged_line)\n extracted_relations = []\n\n for verb_node in verb_nodes:\n subj_node, obj_node = self._get_subj_and_obj(verb_node, normalized_dependency_tree)\n\n expanded_subj_node = self._expand_node(subj_node, normalized_dependency_tree)\n expanded_obj_node = self._expand_node(obj_node, normalized_dependency_tree)\n\n # TODO (FEATURE): Extend definition of verb nodes? (Allow more patterns) [DU 18.04.17]\n # At the moment, the simple extraction heuristic ist just the following:\n # 1.) Find all verb nodes in a dependency tree\n # 2.) Find subject and object of that verb\n # 3.) Check if they are tagged with a Named Entity Tag\n # 4.) If one of them is tagged, extract the hold phrase as a relation triplet\n #\n # Possible improvements\n # - Use Machine Learning to learn patterns from pre-annotated corpus\n # - Alternatively, come up with more sophisticated rules manually\n # - Only extract relevant relationships\n # - Only extract the relevant parts of a relationship\n\n if self._expanded_node_is_ne_tagged(expanded_subj_node, aligned_ne_tagged_line) or \\\n self._expanded_node_is_ne_tagged(expanded_obj_node, aligned_ne_tagged_line):\n subj_phrase = self._join_expanded_node(expanded_subj_node)\n obj_phrase = self._join_expanded_node(expanded_obj_node)\n extracted_relations.append((subj_phrase, verb_node[\"word\"], obj_phrase))\n\n return extracted_relations",
"def main():\n\n for line in sys.stdin:\n # retrieve term(s)\n [q1] = line.rstrip().lower().split()\n\n # print('1 => ' + str(q1))\n # print('2 => ' + str(q2))\n\n if q1 not in postings:\n c1 = sorted(replace_by_lev(q1).items(), key=itemgetter(1))\n # print(\"C1 => \" + str(c1))\n alternatives = build_alternatives(c1)\n # print(\"alternatives => \" + str(alternatives))\n\n if args.all or args.jaccard:\n union = set()\n\n for word in alternatives:\n union = union | set(postings[word])\n\n for tweet in union:\n for word in alternatives:\n if(word, tweet) in db:\n print_tweet(tweet)\n else:\n p1 = set(postings[alternatives])\n for tweet in p1:\n if(alternatives, tweet) in db:\n print_tweet(tweet)\n else:\n\n # print(c1)\n # if q2 not in postings:\n # # print('!')\n # c2 = replace_by_lev(q2)\n # print(c2)\n #\n # sorted_lev = sorted(c2)\n #\n # print(str(sorted_lev))\n # print(str(sorted_occur))\n\n # for term in c2:\n # if c2[term][0] <= 2 and\n # p2.add(postings[term])\n\n p1 = set(postings[q1])\n # p2 = set(postings[q2])\n # # set with ID's of tweets in which both terms occur\n # intersection = p1 & p2\n\n # print(intersection)\n\n for tweet in p1: # formerly 'intersection'\n if (q1, tweet) in db: # and (q2, tweet) in db:\n # if bigram(db[(q1, tweet)], db[(q2, tweet)]):\n print_tweet(tweet)",
"def prune_influence_map_subj_obj(self):\n def get_rule_info(r):\n result = {}\n for ann in self.model.annotations:\n if ann.subject == r:\n if ann.predicate == 'rule_has_subject':\n result['subject'] = ann.object\n elif ann.predicate == 'rule_has_object':\n result['object'] = ann.object\n return result\n im = self.get_im()\n rules = im.nodes()\n edges_to_prune = []\n for r1, r2 in itertools.permutations(rules, 2):\n if (r1, r2) not in im.edges():\n continue\n r1_info = get_rule_info(r1)\n r2_info = get_rule_info(r2)\n if 'object' not in r1_info or 'subject' not in r2_info:\n continue\n if r1_info['object'] != r2_info['subject']:\n logger.info(\"Removing edge %s --> %s\" % (r1, r2))\n edges_to_prune.append((r1, r2))\n logger.info('Removing %d edges from influence map' %\n len(edges_to_prune))\n im.remove_edges_from(edges_to_prune)",
"def _extract_terms(self, obj):\r\n terms = set()\r\n if 'paths' in obj:\r\n for path in obj['paths']:\r\n segs = re.split('[/{}]', path)\r\n for seg in segs:\r\n terms.add(seg.lower())\r\n self.terms = terms",
"def produce(self,\n graph: Graph,\n subject_uri: URIRef,\n relation_uri: URIRef,\n object_uri: URIRef) -> List[Tuple[URIRef, URIRef, URIRef]]:\n # contains the facts produced by this rule\n new_facts: List[Tuple[URIRef, URIRef, URIRef]] = []\n\n # QUESTION: apparently AMIE rules can only have one triple in their conclusion. Is this actually the case?\n\n # if there is only one literal in the premise, simply check if it matches\n # a new fact is only produced if both subject and object of the input fact also appear in the premise literal\n if len(self.antecedents) == 1:\n\n # relation of the (only) literal in the conclusion\n new_relation = self.consequents[0].relation\n if isinstance(new_relation, URIRelation):\n new_relation_uri = new_relation.uri\n else:\n new_relation_uri = URIRelation(new_relation).uri\n\n # if the subject and object of the premise and the conclusion are the same entities\n if (\n self.antecedents[0].literal_subject_id == self.consequents[0].literal_subject_id\n and self.antecedents[0].literal_object_id == self.consequents[0].literal_object_id\n ):\n new_facts.append((subject_uri, new_relation_uri, object_uri))\n\n # if the subject and object of the premise are swapped in the conclusion\n if (\n self.antecedents[0].literal_subject_id == self.consequents[0].literal_object_id\n and self.antecedents[0].literal_object_id == self.consequents[0].literal_subject_id\n ):\n new_facts.append((object_uri, new_relation_uri, subject_uri))\n\n return new_facts\n\n else:\n # there are multiple literals in the premise\n # to check for triples matching every literal, a sparql query is built from them\n\n # build the where part of the sparql query and find the literal matching the relation type of the input fact\n # if such a literal exists\n query_patterns, new_literal = self.antecedents_patterns(graph, subject_uri, relation_uri, object_uri)\n\n # if the patterns of the sparql query do not contain either the subject or the object, only query for\n # possible solutions to the query\n # an ask query only queries if the pattern has a solution, i.e. do any nodes match the pattern\n # it will return a yes/no answer\n if \"?b\" not in query_patterns and \"?a\" not in query_patterns:\n query_projection = \"ask \"\n else:\n # insert the selectors for subject and object into the select query if they exist in the query pattern\n query_projection = \"select where \"\n\n # the resulting query would look like \"select ?a ?b ...\" if both cases are true\n if \"?b\" in query_patterns:\n query_projection = query_projection.replace(\"select \", \"select ?b \")\n if \"?a\" in query_patterns:\n query_projection = query_projection.replace(\"select \", \"select ?a \")\n\n # build remaining part of the query and execute it\n query_patterns = \"{\" + query_patterns + \"}\"\n sparql_query = query_projection + query_patterns\n query_result = graph.query(sparql_query)\n\n # relation type of the resulting triple\n new_relation = self.consequents[0].relation\n if isinstance(new_relation, URIRelation):\n new_relation_uri = self.consequents[0].relation.uri\n else:\n new_relation_uri = URIRelation(self.consequents[0].relation).uri\n\n # handle every possible projection of the query\n if \"?a\" in query_projection and \"?b\" in query_projection:\n # both subject and object for each of the new facts were queried\n\n # add every result tuple as a new fact with the relation of the conclusion\n for new_subject, new_object in query_result:\n new_facts.append((new_subject, new_relation_uri, new_object))\n\n elif \"?a\" in query_projection:\n # only the subject for each of the new facts was queried\n\n # select the subject or the object of the premise as object for new fact depending on the naming\n # i.e., a subject_id == 2 represents a \"b\", therefore the subject would be the new object\n if new_literal.literal_subject_id == 2:\n new_object = subject_uri\n else:\n # the object in the premise was named \"b\"\n new_object = object_uri\n\n # add every result subject with the previously determined object as new fact with the relation of the\n # conclusion\n for new_subject, in query_result:\n new_facts.append((new_subject, new_relation_uri, new_object))\n\n elif \"?b\" in query_projection:\n # only the object for each of the new facts was queried\n\n # select the subject or the object of the premise as subject for new fact depending on the naming\n # i.e., a subject_id == 1 represents an \"a\", therefore the subject would be the new subject\n if new_literal.literal_subject_id == 1:\n new_subject = subject_uri\n else:\n # the object in the premise was named \"a\"\n new_subject = object_uri\n\n # add every result object with the previously determined subject as new fact with the relation of the\n # conclusion\n for new_object, in query_result:\n new_facts.append((new_subject, new_relation_uri, new_object))\n\n elif bool(query_result):\n # if the result is non empty, or an ask query response is yes\n\n # if the subject was named \"a\" and the object named \"b\", the new fact will have the same subject and\n # object. otherwise they are swapped\n if new_literal.literal_subject_id == 1:\n new_subject = subject_uri\n else:\n new_subject = object_uri\n\n if new_literal.literal_object_id == 2:\n new_object = object_uri\n else:\n new_object = subject_uri\n\n # add the new fact with the original subject and object (possibly swapped) and the relation of the\n # conclusion\n new_facts.append((new_subject, new_relation_uri, new_object))\n\n return new_facts",
"def execute(self, triple_map, output, **kwargs):\n subjects = []\n logical_src_iterator = str(triple_map.logicalSource.iterator)\n json_object = kwargs.get('obj', self.source)\n # Removes '.' as a generic iterator, replace with '@'\n if logical_src_iterator == \".\":\n results = [None,]\n else:\n json_path_exp = jsonpath_ng.parse(logical_src_iterator)\n results = [r.value for r in json_path_exp.find(json_object)][0]\n for row in results:\n subject = self.generate_term(term_map=triple_map.subjectMap,\n **kwargs)\n for pred_obj_map in triple_map.predicateObjectMap:\n predicate = pred_obj_map.predicate\n if pred_obj_map.template is not None:\n output.add((\n subject,\n predicate,\n self.generate_term(term_map=pred_obj_map, **kwargs)))\n\n if pred_obj_map.parentTriplesMap is not None:\n self.__handle_parents__(\n output,\n parent_map=pred_obj_map.parentTriplesMap,\n subject=subject,\n predicate=predicate,\n obj=row,\n **kwargs)\n if pred_obj_map.reference is not None:\n ref_exp = jsonpath_ng.parse(str(pred_obj_map.reference))\n found_objects = [r.value for r in ref_exp.find(row)]\n for obj in found_objects:\n if rdflib.term._is_valid_uri(obj):\n rdf_obj = rdflib.URIRef(str(obj))\n else:\n rdf_obj = rdflib.Literal(str(obj))\n output.add((subject, predicate, rdf_obj))\n if pred_obj_map.constant is not None:\n output.add((subject,\n predicate,\n pred_obj_map.constant))\n subjects.append(subject)\n return subjects",
"def process_sample(\n sample: Dict[str, Any],\n relation_vocab: Dict[str, int],\n spacy_model: Any,\n tokenizer: Any,\n) -> Tuple[Optional[Dict[str, Any]], Dict[str, int]]:\n\n processed_sample = {}\n\n if sample['num_pos_raters'] < 2:\n relation = NO_RELATION\n else:\n relation = sample['relation']\n if relation not in relation_vocab:\n relation_vocab[relation] = len(relation_vocab)\n label = relation_vocab[relation]\n processed_sample['target'] = [label]\n\n text = sample['annotated_text']\n\n # Remove subj and obj annotations from text and store position\n def find_span(input_text: str, pattern: Any,\n prefix_len: int) -> Tuple[int, int]:\n \"\"\"Find span corresponding to actual subj or obj strings.\"\"\"\n match = pattern.search(input_text)\n span_start = match.start() + prefix_len + 1\n # We want inclusive spans, hence -2 instead of -1\n span_end = match.end() - 2\n return (span_start, span_end)\n\n def replace_and_adjust(\n input_text: str, match: Any, prefix_len: int,\n inverted_mapping: np.ndarray) -> Tuple[str, np.ndarray]:\n \"\"\"Remove subj/obj annotations and adjust token mapping accordingly.\"\"\"\n\n original_span_start = match.start() + prefix_len + 1\n original_span_end = match.end() - 1\n actual_string = input_text[original_span_start:original_span_end]\n new_text = input_text[:match.start()] + actual_string + input_text[match\n .end():]\n\n # Inverted mapping maps from remaining tokens to positions in original text\n new_inverted_mapping = np.zeros(len(new_text), dtype=np.int32)\n new_inverted_mapping[:match.start()] = inverted_mapping[:match.start()]\n\n new_span_start = match.start()\n new_span_end = match.start() + len(actual_string)\n new_inverted_mapping[new_span_start:new_span_end] = inverted_mapping[\n original_span_start:original_span_end]\n new_inverted_mapping[new_span_end:] = inverted_mapping[original_span_end +\n 1:]\n\n return new_text, new_inverted_mapping\n\n inverted_mapping = np.arange(len(text))\n subj_pattern = re.compile('SUBJ{[^}]+}')\n subj_span = find_span(text, subj_pattern, len('SUBJ'))\n obj_pattern = re.compile('OBJ{[^}]+}')\n obj_span = find_span(text, obj_pattern, len('OBJ'))\n\n # Remove subj/obj annotations from text\n while True:\n subj_match = subj_pattern.search(text)\n if subj_match is None:\n break\n text, inverted_mapping = replace_and_adjust(text, subj_match, len('SUBJ'),\n inverted_mapping)\n\n while True:\n obj_match = obj_pattern.search(text)\n if obj_match is None:\n break\n text, inverted_mapping = replace_and_adjust(text, obj_match, len('OBJ'),\n inverted_mapping)\n\n # Adjust spans for removed tokens\n mapping = np.zeros(len(sample['annotated_text']), dtype=np.int32) - 1\n mapping[inverted_mapping] = np.arange(len(inverted_mapping))\n subj_span = (mapping[subj_span[0]], mapping[subj_span[1]])\n assert subj_span[0] != -1 and subj_span[1] != -1\n obj_span = (mapping[obj_span[0]], mapping[obj_span[1]])\n assert obj_span[0] != -1 and obj_span[1] != -1\n\n parsed_text = spacy_model(text)\n\n # We use spacy to parse text, identify noun chunks\n mention_char_spans = []\n mention_char_spans.append(subj_span)\n mention_char_spans.append(obj_span)\n\n def overlaps(first_span: Tuple[int, int], second_span: Tuple[int,\n int]) -> bool:\n\n def point_inside_span(point: int, span: Tuple[int, int]) -> bool:\n return span[0] >= point and point <= span[1]\n\n spans_overlap = (\n point_inside_span(first_span[0], second_span) or\n point_inside_span(first_span[1], second_span) or\n point_inside_span(second_span[0], first_span) or\n point_inside_span(second_span[1], first_span))\n\n return spans_overlap\n\n for chunk in parsed_text.noun_chunks:\n span_start_char = parsed_text[chunk.start].idx\n span_last_token = parsed_text[chunk.end - 1]\n span_end_char = span_last_token.idx + len(span_last_token.text) - 1\n char_span = (span_start_char, span_end_char)\n # Append only if does not overlap with subj or obj spans. In case spacy\n # mention annotation disagrees with tacred annotation, we want to favor\n # tacred.\n\n if not overlaps(char_span, subj_span) and not overlaps(char_span, obj_span):\n mention_char_spans.append(char_span)\n\n # Sort spans by start char\n start_chars = np.array([span[0] for span in mention_char_spans])\n sorted_indices = np.argsort(start_chars)\n sorted_positions = np.zeros_like(start_chars)\n sorted_positions[sorted_indices] = np.arange(len(sorted_positions))\n sorted_spans = [mention_char_spans[idx] for idx in sorted_indices]\n\n # Tokenize and get aligned mention positions\n _, text_ids, text_mask, mention_spans, span_indices = tokenization_utils.tokenize_with_mention_spans(\n tokenizer=tokenizer,\n sentence=text,\n spans=sorted_spans,\n max_length=FLAGS.max_length,\n add_bert_tokens=True,\n allow_truncated_spans=True,\n )\n\n processed_sample['text_ids'] = text_ids\n processed_sample['text_mask'] = text_mask\n\n # Subj and obj are the first elements of mention spans.\n subj_index = sorted_positions[0]\n obj_index = sorted_positions[1]\n\n # Some spans may be dropped by the BERT tokenizer. Here we map indices in the\n # original list of spans to the one returned by the tokenizer.\n reverse_span_indices = {\n original_idx: tokenized_idx\n for tokenized_idx, original_idx in enumerate(span_indices)\n }\n\n # Skip if subj or obj dropped.\n if (subj_index not in reverse_span_indices or\n obj_index not in reverse_span_indices):\n return None, relation_vocab\n\n subj_index = reverse_span_indices[subj_index]\n obj_index = reverse_span_indices[obj_index]\n\n # Make sure we don't discard subj or obj\n assert max(subj_index, obj_index) < FLAGS.max_mentions\n\n processed_sample['subject_mention_indices'] = [subj_index]\n processed_sample['object_mention_indices'] = [obj_index]\n\n mention_spans = np.array(mention_spans)\n mention_start_positions = mention_spans[:, 0]\n mention_end_positions = mention_spans[:, 1]\n\n mention_start_positions = mention_start_positions[:FLAGS.max_mentions]\n mention_end_positions = mention_end_positions[:FLAGS.max_mentions]\n\n mention_pad_shape = (0, FLAGS.max_mentions - len(mention_start_positions))\n\n mention_mask = np.ones(len(mention_start_positions), dtype=np.int64)\n mention_mask = np.pad(mention_mask, mention_pad_shape, mode='constant')\n mention_start_positions = np.pad(\n mention_start_positions, mention_pad_shape, mode='constant')\n mention_end_positions = np.pad(\n mention_end_positions, mention_pad_shape, mode='constant')\n\n processed_sample['mention_start_positions'] = mention_start_positions\n processed_sample['mention_end_positions'] = mention_end_positions\n processed_sample['mention_mask'] = mention_mask\n\n return processed_sample, relation_vocab",
"def triples():",
"def parse_relation(self, term):\n pass"
] | [
"0.61057407",
"0.56719995",
"0.55161005",
"0.5455312",
"0.5447199",
"0.5435746",
"0.5421998",
"0.5421532",
"0.53770727",
"0.5327237",
"0.53150713",
"0.5306053",
"0.52893126",
"0.52765054",
"0.52570033",
"0.5234349",
"0.5214712",
"0.5208659",
"0.5204619",
"0.5195088",
"0.518262",
"0.5163499",
"0.5138381",
"0.5131887",
"0.51300335",
"0.5117049",
"0.5097253",
"0.50595134",
"0.5052819",
"0.5046693"
] | 0.61369044 | 0 |
Set defaul config to app | def configure_app(self, defaults: t.Optional[DefaultConfig]) -> None:
self.config = Config(defaults) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_config(app):\n # set config from config.py\n app.config.from_object('config')\n\n # override config from secret conf files\n pi_home = os.path.dirname(app.config['ENVPATH']) # /home/pi\n secret_conf_dir = os.path.join(pi_home, 'CONFIG_CHAUDIERE') # /home/pi/CONFIG_CHAUDIERE\n secret_conf_com_file = 'chaudiere_secret_config.py'\n secret_conf_com = secret_conf_dir+'/'+secret_conf_com_file\n try:\n with open(secret_conf_com) as f:\n json_config = json.load(f)\n for conf in ['Common', app.config['ENVNAME']]:\n app.config.update(json_config[conf])\n except IOError as e:\n print('IOError loading conf file (file not existing?): ' + secret_conf_com + str(e))\n except ValueError as e:\n print('ValueError loading JSON : ' + secret_conf_com + ' ' + str(e))\n\n #app.config['USERS_EMAILS'] = list(map(lambda x: x+'@gmail.com', app.config['USERS'])) \n # app.logger.error('test error') # <-- This works !!! ",
"def setup_app_config(self, app: Flask) -> Flask:\n app.secret_key = config.get_setting('SECRET_KEY')\n return app",
"def builder_inited_handler(app): # noqa\n log.debug('Sphinx overridden Plantweb defaults:')\n log.debug(app.config.plantweb_defaults)\n\n # Set overrides in provider\n defaults_provider.overrides = app.config.plantweb_defaults\n\n # Register provider with the highest priority\n provider = 'python://plantweb.directive.defaults_provider'\n if provider not in defaults.DEFAULTS_PROVIDERS:\n defaults.DEFAULTS_PROVIDERS.append(provider)\n\n # Force defaults reload\n from .defaults import read_defaults\n if hasattr(read_defaults, 'cache'):\n del read_defaults.cache",
"def setdefaults(self):\n self.config = {\n 'dbuser': Infopage.DEFAULT_DBUSER,\n 'dbname': Infopage.DEFAULT_DBNAME,\n 'dbpassword': Infopage.DEFAULT_DBPASSWORD,\n 'dbhost': Infopage.DEFAULT_DBHOST\n }",
"def config_skeleton():\n config = Config()\n config.set_to_default()\n config.save()",
"def _set_config():\n\n\tdebug_msg = \"load default config yaml file\"\n\tlogger.debug(debug_msg)\n\n\tconfig_file_parser(paths.CONFIG_FILE, override_options=True)",
"def configure_app(flask_app):\n flask_app.config['RESTPLUS_SWAGGER_UI_DOC_EXPANSION'] = \\\n settings.RESTPLUS_SWAGGER_UI_DOC_EXPANSION\n flask_app.config['RESTPLUS_VALIDATE'] = \\\n settings.RESTPLUS_VALIDATE\n\n flask_app.config['SQLALCHEMY_DATABASE_URI'] = \\\n settings.SQLALCHEMY_DATABASE_URI\n flask_app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = \\\n settings.SQLALCHEMY_TRACK_MODIFICATIONS",
"def configure(self, conf):\n return",
"def __init__(self):\n super().__init__()\n\n etc_conf_names = ('app.conf', 'app.local.conf')\n conf_paths = [os.path.join(APP_DIR, 'etc', c) for c in etc_conf_names]\n\n user_config_path = os.path.join(\n os.path.expanduser('~'),\n '.config',\n 'url_manager.conf'\n )\n conf_paths.append(user_config_path)\n\n self.read(conf_paths)\n self.set('DEFAULT', 'app_dir', APP_DIR)",
"def override_config(self):\n super(AuthedConfigFixture, self).override_config()\n self.conf.register_opts(auth_token._OPTS, group='keystone_authtoken')\n self.conf.set_override('auth_uri', 'http://127.0.0.1:35357',\n group='keystone_authtoken')",
"def setup_app(app):\n try:\n config_key = ndb.Key('WordListConfig', os.environ['CONFIG_MODEL_ID'])\n app.wordlist_config = config_key.get()\n except:\n print('Cannot load config from Datastore', file=sys.stderr)\n sys.exit(1)",
"def set_conf_files(application):\n example_dir = \"./docs/examples/configs/example_4\"\n application.config['GROUPS_FILE'] = example_dir + \"/groups.yml\"\n application.config['POLICIES_FILE'] = example_dir + \"/policies.yml\"",
"def _init_config_(self):\n self._config= {}",
"def init_app(self, app, config=None):\n if not (config is None or isinstance(config, dict)):\n raise ValueError(\"`config` must be an instance of dict or None\")\n\n base_config = app.config.copy()\n if self.config:\n base_config.update(self.config)\n if config:\n base_config.update(config)\n\n config = base_config\n\n config.setdefault(k_log_path, None)\n config.setdefault(k_log_name, \"\")\n config.setdefault(k_log_rotation, 60 * 60)\n config.setdefault(k_log_format, \"\")\n config.setdefault(k_log_enqueue, True)\n config.setdefault(k_log_serialize, True)\n\n self._set_loguru(app, config)",
"def handle_config_inited(app, config):\n\n def handle_legacy(new, orig):\n if getattr(config, new) is None and getattr(config, orig) is not None:\n config[new] = config[orig]\n\n # copy over deprecated configuration names to new names (if any)\n handle_legacy('confluence_publish_allowlist', 'confluence_publish_subset')\n handle_legacy('confluence_purge_from_root', 'confluence_purge_from_master')\n handle_legacy('confluence_root_homepage', 'confluence_master_homepage')\n handle_legacy('confluence_space_key', 'confluence_space_name')",
"def init_config(self):\n pass",
"def config():",
"def config():",
"def set_default_configs(self):\n\n raise Exception(\"Child classes must override set_default_configs().\")",
"def configure_app(app, config_dict, config_file_folder):\n # write the config dict to app config as a read-only proxy of a mutable dict\n app.config[APP_CONFIG_JSON] = MappingProxyType(config_dict)\n config_file_folder = config_file_folder\n app.config[CONFIG_FILE_FOLDER] = config_file_folder\n app.config[AVAILABLE_PAGES_DICT] = make_pages_dict(\n config_dict.get(AVAILABLE_PAGES, []), app.config[CONFIG_FILE_FOLDER]\n )\n configure_backend(app)\n return app",
"def config():\n config_django()\n config_svisor()",
"def spread_default_parameters(config, dev_cfg):\n def_cfg = config.get('DEFAULT')\n if def_cfg is None:\n return\n\n for (key, value) in def_cfg.items():\n if key not in dev_cfg:\n dev_cfg[key] = value",
"def configure(api_key=None):\n configuration = {'api_key': api_key}\n global _default_configuration\n _default_configuration = configuration",
"def configure(api_key=None):\n configuration = {\"api_key\": api_key}\n global _default_configuration\n _default_configuration = configuration",
"def _setConfig(self,config):\n if config:\n self.config = config\n else:\n from layman import config\n self.config = config",
"def init_app(app: object = None) -> None:\n config = get_application_config(app)\n config.setdefault(\n \"FULLTEXT_ENDPOINT\", \"https://fulltext.arxiv.org/fulltext/\"\n )",
"def _setup_app_config():\n try:\n file = _app_config_file()\n conf = pyhocon.ConfigFactory.parse_file(file)\n except FileNotFoundError:\n conf = pyhocon.ConfigFactory.parse_file(_generate_global_config())\n # test if since_version is deprecated and regenerate a newer config\n return conf",
"def load_configuration(app, environment):\n environment_configuration = ('config/settings_%s.py') % (environment)\n\n app.config.from_object(__name__)\n app.config.from_pyfile('config/settings_default.py')\n app.config.from_pyfile(environment_configuration)",
"def _create_default_config(self):\n self.options.setdefault('options.admin_passwd', '')\n sys.path.append(self.openerp_dir)\n sys.path.extend([egg.location for egg in self.ws])\n from openerp.tools.config import configmanager\n configmanager(self.config_path).save()",
"def __init__(self, app: Sanic):\n self.configurations = app.config\n\n Configs.__instance = self"
] | [
"0.7135407",
"0.6767803",
"0.67038256",
"0.6664277",
"0.65583634",
"0.6471732",
"0.6440063",
"0.64349705",
"0.64288706",
"0.63906646",
"0.6379789",
"0.63641423",
"0.63625246",
"0.6274701",
"0.627201",
"0.6265376",
"0.6263423",
"0.6263423",
"0.62554973",
"0.6252563",
"0.6247459",
"0.62470716",
"0.6245415",
"0.6237602",
"0.6227884",
"0.6225162",
"0.62127316",
"0.618326",
"0.61712146",
"0.6162807"
] | 0.7263818 | 0 |
Validating ProductsDataViewSet by giving Invalid data | def test_ProductsDataViewSet_with_post_Invalid_data(self):
payload = {
"name": "1234"
}
# Request the data by API call.
response = self.client.post('/api/productsdata/',
data=json.dumps(payload),
content_type=self.content_type)
# Checking the response
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json()['name'][0],
'Name `{0}` must contain atleast one letter'.format(
payload.get('name'))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_dataset(self):\n pass",
"def is_valid(self, dataset):\n pass",
"def test_ProductsDataViewSet_with_get_request_Invalid_id(self):\n # Request the data by API call.\n response = self.client.get('/api/productsdata/{}/'.format(-1))\n\n # Checking the response\n self.assertEqual(response.status_code, 404)\n self.assertEqual(response.json()['detail'], 'Not found.')",
"def test_is_valid_return_only_good_products(self):\n self.assertTrue(ProductValidator().is_valid(self.good_product))\n self.assertFalse(ProductValidator().is_valid(self.bad_product))",
"def validate(self, value):\n\n current_values = dict(self.queryset.values_list('id', 'quantity'))\n for product_id in self.product_fields.keys():\n self.product_fields[product_id]['quantity'] = current_values[product_id]\n\n errors = []\n for (product_id, product_data), chosen_value in zip(self.product_fields.items(), value):\n name = product_data['name']\n int_chosen_val = int(chosen_value)\n if product_data['quantity'] == 0:\n errors.append(\n ValidationError(self.error_messages['out_of_stock'].format(name))\n )\n continue\n if int_chosen_val <= 0:\n errors.append(\n ValidationError(self.error_messages['incorrect_quantity'].format(name))\n )\n continue\n\n if product_data['quantity'] < int_chosen_val:\n errors.append(\n ValidationError(self.error_messages['less_quantity'].format(product_data['quantity'], name))\n )\n continue\n\n if len(errors) > 0:\n raise ValidationError(errors)",
"def validate_dataset(self):\n if np.all(self.L_bpe == self.bpe_l):\n pass\n\n super(StandardDataset, self).validate_dataset()",
"def validate(self):\r\n # Check KeyError\r\n try:\r\n self.fields[\"product_name_fr\"]\r\n self.fields[\"generic_name\"]\r\n self.fields[\"url\"]\r\n self.fields[\"nutrition_grade_fr\"]\r\n self.fields[\"categories\"]\r\n self.fields[\"stores\"]\r\n self.fields[\"brands\"]\r\n except KeyError:\r\n return False\r\n\r\n # Check empty field and lenght of generic_name\r\n for key, value in self.fields.items():\r\n if value == '':\r\n return False\r\n break\r\n if key == \"generic_name\":\r\n if len(value) > 255:\r\n return False\r\n\r\n try:\r\n self.categories = ProductFromApiToDatabase.clean_tag(\r\n self.fields[\"categories\"], 100)\r\n self.stores = ProductFromApiToDatabase.clean_tag(\r\n self.fields[\"stores\"], 45)\r\n self.brands = ProductFromApiToDatabase.clean_tag(\r\n self.fields[\"brands\"], 45)\r\n self.category_index = self.categories.index(self.category)\r\n except KeyError:\r\n return False\r\n except ValueError:\r\n return False\r\n except AttributeError:\r\n self.errors += 1\r\n print(self.errors)\r\n return False",
"def is_valid(self, data_model: DataModel) -> bool:",
"def validate_fields_for_magento(self,data):\n for field in data:\n if data[field] == None :\n del data[field]\n if data[field] == True:\n data[field] = 1\n if data[field] == False :\n data[field] = 0",
"def validate(self, data):\n raise NotImplementedError(\"Inherit this class and override this method.\")",
"def test_deserialize_missing_data(self):\n data = {\"product_id\": 1}\n recommendation = Recommendation()\n self.assertRaises(DataValidationError, recommendation.deserialize, data)",
"def validate(cls, data, errors):",
"def _validate(self, queryset):\n values_distinct = queryset.values(\n *self._invoice_report_common_fields\n ).distinct()\n if values_distinct.count() != 1:\n raise ValidationError(self._get_non_unique_error(queryset))\n if not all(values_distinct[0].values()):\n raise ValidationError(\"None of {} can't be empty\".format(', '.join(\n self._invoice_report_common_fields\n )))",
"def _check_data_valid(self):\n\n is_valid = (sum(~np.isnan(self.data).flatten()) > 0 and self.data.flatten().sum() != 0)\n if not is_valid:\n raise FITSException(f\"No data in {self.survey}\")",
"def test_invalid_value_sales(self):\r\n expected = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'}}\r\n data = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'},\r\n 5: {'ID': 'A233', 'Gender': 'F', 'Age': '62', 'Sales': '2145', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}}\r\n result = Validator.save_dict(data)\r\n self.assertDictEqual(expected, result)",
"def _validate_update_data(self, data):\n return",
"def test_validate_field_data(self, **test_dict):\n test_data = TestData()\n test_data.weight = test_dict['weight']\n test_data.max_attempts = test_dict['max_attempts']\n validation = set()\n self.xblock.validate_field_data(validation, test_data)\n validation_list = list(validation)\n # Only one validation error should be in set\n self.assertEquals(1, len(validation_list))\n self.assertEquals(\n test_dict['result'],\n validation_list[0].text,\n )",
"def validate(self, row):\n raise NotImplementedError",
"def validate(self, data):\n\t\t#vectors must be comma-separated strings of integers\n\t\ttry:\n\t\t\tv1_list = [int(x.strip()) for x in data['vector1'].split(',')]\n\t\t\tv2_list = [int(i.strip()) for i in data['vector2'].split(',')]\n\t\texcept:\n\t\t\traise serializers.ValidationError(\"Must provide only comma-separated integers\")\n\n\t\t#vectors must be of equal length\n\t\tif not len(v1_list) == len(v2_list):\n\t\t\traise serializers.ValidationError(\"Vectors to be multiplied must be of equal length\")\n\n\t\ttry:\n\t\t\tdata['result'] = ','.join([str(x) for x in numpy.cross(v1_list, v2_list)])\n\t\texcept Exception as e: #pragma: no cover\n\t\t\t#This could represent a data leak in an enterprise application, but \n\t\t\t#for since this is a code challenge it makes sense to expose\n\t\t\t#the text of any error we hit here for transparency's sake\n\t\t\traise serializers.ValidationError(e)\n\n\t\treturn data",
"def testInvalidValues(self):\n pretest_data = self.test_data.copy()\n pretest_data['revenue'] = [1, 2, 3, 4, 5, 6, 7, 'nan']\n with self.assertRaisesRegex(\n ValueError,\n r'Unable to convert column revenue to numeric.'):\n _ = TrimmedMatchGeoXDesign(\n GeoXType.HEAVY_UP,\n pretest_data=pretest_data,\n time_window_for_design=self.design_window,\n time_window_for_eval=self.evaluation_window,\n response='revenue',\n matching_metrics={'response': 1.0})",
"def _data_validation(data):\n if isinstance(data, (list, tuple, type(None))) is not True:\n raise ValueError(f\"data must be tuple, list, or None, \"\n f\"data type is '{type(data).__name__}'. \"\n f\"Iterable data cannot be empty.\")",
"def test_object_is_not_created_without_required_fields(self):\n data1 = self.data.copy()\n del data1[\"title\"]\n\n serializer = ProductSerializer(data=data1)\n\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors.get(\"title\")[0], self.error_message)\n\n data2 = self.data.copy()\n del data2[\"description\"]\n\n serializer = ProductSerializer(data=data2)\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors.get(\"description\")[0], self.error_message)\n\n data3 = self.data.copy()\n del data3[\"price\"]\n\n serializer = ProductSerializer(data=data3)\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors.get(\"price\")[0], self.error_message)",
"def _validate_create_data(self, data):\n return",
"def validate(self, data: Dict):\n for key in self.__dict__.keys():\n if not key.startswith('__') and key != 'id':\n if data[key] == '' or data[key] is None:\n raise ValidationError(\n message=f'{key} should not be \"{data[key]}\"'\n )",
"def test_devicetype_bulk_import_invalid(self):\n form = DeviceTypeBulkImportForm(data={\"pk\": \"\"})\n\n self.assertFalse(form.is_valid())",
"def __call__(self, data):\n if sum(item_data['amount'] for item_data in data) < self.order.total_cost:\n raise ValidationError({\n api_settings.NON_FIELD_ERRORS_KEY: self.message,\n })",
"def test_validation_class(self):\n\n for data in ('tbldata', 'dihedraldata', 'rdcdata', 'danidata', 'tensordata', 'pcsdata'):\n v = self.web.query_nodes(key=data)\n\n if not v.empty():\n self.assertTrue(v.validate())",
"def validate_data(self, row, col, value):\n\n return True",
"def test_filter_remove_only_bad_products(self):\n list_of_products = [self.good_product, self.bad_product]\n self.assertEqual(\n ProductValidator().filter(list_of_products),\n [self.good_product])",
"def test_importing_invalid_data_for_collections(self):\n self.prepare()\n\n def _check(file_name, table_name, expected_results):\n # import the CSV file with COPY FROM\n logger.debug('Importing from csv file: {}'.format(file_name))\n out, err, _ = self.run_cqlsh(cmds=\"COPY ks.{} FROM '{}'\".format(table_name, file_name))\n logger.debug(out)\n\n assert 'ParseError - Failed to parse' in err\n\n results = rows_to_list(self.session.execute(\"SELECT * FROM {}\".format(table_name)))\n logger.debug(results)\n assert expected_results == results\n\n def _test_invalid_data_for_sets():\n logger.debug('Testing invalid data for sets')\n self.session.execute(\"\"\"\n CREATE TABLE testinvaliddataforsets (\n key text,\n value frozen<set<text>>,\n PRIMARY KEY (key)\n )\"\"\")\n\n tempfile = self.get_temp_file()\n with open(tempfile.name, 'w') as f:\n f.write('key1,\"{\\'test1\\', \\'test2\\'}\"\\n')\n f.write('key2,\"{\\'test1\\', \\'test2\\']\"\\n')\n f.write('key3,not_a_set\\n')\n f.write('key4,\"not_a_set\"\\n')\n f.write(\"key5,'not_a_set'\\n\")\n\n expected_results = [['key1', SortedSet(['test1', 'test2'])]]\n _check(tempfile.name, 'testinvaliddataforsets', expected_results)\n\n def _test_invalid_data_for_lists():\n logger.debug('Testing invalid data for lists')\n self.session.execute(\"\"\"\n CREATE TABLE testinvaliddataforlists (\n key text,\n value list<text>,\n PRIMARY KEY (key)\n )\"\"\")\n\n tempfile = self.get_temp_file()\n with open(tempfile.name, 'w') as f:\n f.write('key1,\"[\\'test1\\', \\'test2\\']\"\\n')\n f.write('key2,\"[\\'test1\\', \\'test2\\'}\"\\n')\n f.write('key3,not_a_list\\n')\n f.write('key4,\"not_a_list\"\\n')\n f.write(\"key5,'not_a_list'\\n\")\n\n expected_results = [['key1', list(['test1', 'test2'])]]\n _check(tempfile.name, 'testinvaliddataforlists', expected_results)\n\n def _test_invalid_data_for_maps():\n logger.debug('Testing invalid data for maps')\n self.session.execute(\"\"\"\n CREATE TABLE testinvaliddataformaps (\n key text,\n value map<text, text>,\n PRIMARY KEY (key)\n )\"\"\")\n\n tempfile = self.get_temp_file()\n with open(tempfile.name, 'w') as f:\n f.write('key1,\"{\\'key1\\': \\'test1\\', \\'key2\\': \\'test2\\'}\"\\n')\n f.write('key2,\"{\\'key1\\': \\'test1\\', \\'key2\\': \\'test2\\']\"\\n')\n f.write('key3,not_a_map\\n')\n f.write('key4,\"not_a_map\"\\n')\n f.write(\"key5,'not_a_map'\\n\")\n\n expected_results = [['key1', dict([('key1', 'test1'), ('key2', 'test2')])]]\n _check(tempfile.name, 'testinvaliddataformaps', expected_results)\n\n _test_invalid_data_for_sets()\n _test_invalid_data_for_lists()\n _test_invalid_data_for_maps()"
] | [
"0.6795187",
"0.6527712",
"0.62331474",
"0.62078047",
"0.6093575",
"0.6072888",
"0.5991253",
"0.5977534",
"0.59747654",
"0.5947358",
"0.5937604",
"0.5932143",
"0.59246784",
"0.5910832",
"0.58964",
"0.5830329",
"0.5815245",
"0.5773554",
"0.5771487",
"0.57573295",
"0.57554066",
"0.5739656",
"0.5731069",
"0.5719245",
"0.5714371",
"0.5713894",
"0.57100135",
"0.5705277",
"0.57025254",
"0.56989706"
] | 0.73469806 | 0 |
Validating ProductsDataViewSet using get request method | def test_ProductsDataViewSet_with_get_request(self):
# Request the data by API call.
response = self.client.get('/api/productsdata/')
# Checking the response
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()['count'], 1)
self.assertEqual(response.json()['next'], None)
self.assertEqual(response.json()['previous'], None) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_ProductsDataViewSet_with_get_request_id(self):\n # Request the data by API call.\n response = self.client.get('/api/productsdata/{}/'.format(\n self.product_id))\n\n # Checking the response\n self.assertEqual(response.status_code, 200)\n self.assertIsNotNone(response.json()['name'])",
"def test_ProductsDataViewSet_with_get_request_Invalid_id(self):\n # Request the data by API call.\n response = self.client.get('/api/productsdata/{}/'.format(-1))\n\n # Checking the response\n self.assertEqual(response.status_code, 404)\n self.assertEqual(response.json()['detail'], 'Not found.')",
"def test_ProductsDataViewSet_with_post_Invalid_data(self):\n payload = {\n \"name\": \"1234\"\n }\n\n # Request the data by API call.\n response = self.client.post('/api/productsdata/',\n data=json.dumps(payload),\n content_type=self.content_type)\n\n # Checking the response\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response.json()['name'][0],\n 'Name `{0}` must contain atleast one letter'.format(\n payload.get('name')))",
"def validate(self, data):\n choices = data['answer']\n question = Question.objects.get(id=data['question'])\n if question.qtype != 'select-multiple' and len(choices) > 1:\n raise serializers.ValidationError('This is a question with single choice')\n queryset = Choice.objects.filter(question_id=data['question'])\n for choice in choices:\n get_object_or_404(queryset, id=choice)\n return data",
"def test_security_on_get(self):\n # test the listing url\n product = Product.objects.all()[0]\n url = '/product/xml/'\n response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)\n # test the product detail url\n url = '/product/xml/%s/' % product.item_number\n Response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)",
"def post(self, request, *args, **kwargs): # pylint: disable=unused-argument\n formset = self.get_formset()\n if formset.is_valid():\n return self.formset_valid(formset)\n return self.formset_invalid(formset)",
"def product_list(request):\n if request.method == 'GET':\n products = Product.objects.all()\n serializer = ProductSerializer(products, many=True)\n return JSONResponse(serializer.data)\n\n elif request.method == 'POST':\n data = JSONParser().parse(request)\n serializer = ProductSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return JSONResponse(serializer.data, status=201)\n return JSONResponse(serializer.errors, status=400)",
"def test_get_all_products(self):\n response=self.get_all_products()\n result = json.loads(response.data.decode('utf-8'))\n self.assertEqual(response.status_code, 200,result['Available Products'] )",
"def test_filter_search_form_is_valid(self):\r\n response = self.client.get(reverse('search_results'), {\r\n 'name': 'nutella',\r\n 'category': '1',\r\n 'nutriscore': 'd'\r\n })\r\n self.assertTrue(response.context['product_list'])",
"def test_is_valid_return_only_good_products(self):\n self.assertTrue(ProductValidator().is_valid(self.good_product))\n self.assertFalse(ProductValidator().is_valid(self.bad_product))",
"def get_queryset(self):\n queryset = super(ProductUpdateView, self).get_queryset()\n return queryset",
"def validate(self, view, data):\n\n with self._lock:\n return view and (data.get('id') in self.request_ids[view.id()])",
"def Product_list(request):\n if request.method == 'GET':\n tasks = Product.objects.all()\n serializer = ProductSerializer(tasks, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n serializer = ProductSerializer(data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(\n serializer.errors, status=status.HTTP_400_BAD_REQUEST)",
"def test_get_all_objects(self):\n url = '/product/xml/'\n response = self.client.get(url, **self.headers)\n # Request should not be validated by a 401\n self.failUnlessEqual(response.status_code, 401)\n response = self.client.get(url, **self.advancedheaders)\n # Request should be validated by a 200\n self.failUnlessEqual(response.status_code, 200)\n xml_response = parseString(response.content)\n\n product_tags =[elt for elt in xml_response.getElementsByTagName('object') if elt.getAttribute('model') == 'product.product']\n # check that all product are displayed\n self.failUnlessEqual(len(product_tags), Product.objects.count())",
"def product_list(request, format=None):\n if request.method == 'GET':\n products = Product.objects.all()\n serializer = ProductSerializer(products, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n if 'id' in request.DATA:\n del request.DATA['id']\n if 'ordered' in request.DATA:\n del request.DATA['ordered']\n serializer = ProductSerializer(data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)",
"def validate(self, data):\n if data.has_key('site'):\n if FieldSightXF.objects.filter(\n xf__id=data['xf'], is_staged=False, is_scheduled=True, site=data['site']).exists():\n raise serializers.ValidationError(\"Form Already Exists, Duplicate Forms Not Allowded\")\n elif data.has_key('project'):\n if FieldSightXF.objects.filter(\n xf__id=data['xf'], is_staged=False, is_scheduled=True, project=data['project']).exists():\n raise serializers.ValidationError(\"Form Already Exists, Duplicate Forms Not Allowded\")\n return data",
"def test_list_products(self):\n sample_product(supplier_id=self.user)\n sample_product(supplier_id=self.user)\n sample_product(supplier_id=self.user)\n\n res = self.client.get(PRODUCTS_URL)\n\n products = Product.objects.all().order_by('-name')\n serializer = ProductSerializer(products, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)\n self.assertEqual(len(res.data), 3)",
"def post(self):\n data = request.get_json()\n prod_name = data.get('product_name')\n prod_cat = data.get('category')\n prod_price = data.get('unit_price')\n prod_qty = data.get('quantity')\n prod_meas = data.get('measure')\n \n valprod = validate_product(product_name=prod_name, \n category=prod_cat, \n unit_price=prod_price, \n quantity=prod_qty, \n measure=prod_meas)\n if valprod:\n return valprod\n\n obj_products = Products(prod_name, prod_cat, prod_price, prod_qty, prod_meas)\n database.insert_data_products(obj_products)\n return jsonify({\"Success\": \"you have added a product\"}), 201",
"def get_queryset(self):\n queryset = super(ProductDeleteView, self).get_queryset()\n return queryset",
"def post(self, request, *args, **kwargs):\n self.object_list = self.get_queryset()\n form = self.get_form()\n if form.is_valid():\n return self.form_valid(form)\n else:\n return self.form_invalid(form)",
"def test_view_all_products(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertIn('NY_denims', str(reply['products']))\n self.assertEqual(resp.status_code, 200)",
"def product_list(request):\n if request.method == 'GET':\n _products = Product.objects.all()\n serializer = ProductSerializer(_products, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)",
"def validate(self):\r\n # Check KeyError\r\n try:\r\n self.fields[\"product_name_fr\"]\r\n self.fields[\"generic_name\"]\r\n self.fields[\"url\"]\r\n self.fields[\"nutrition_grade_fr\"]\r\n self.fields[\"categories\"]\r\n self.fields[\"stores\"]\r\n self.fields[\"brands\"]\r\n except KeyError:\r\n return False\r\n\r\n # Check empty field and lenght of generic_name\r\n for key, value in self.fields.items():\r\n if value == '':\r\n return False\r\n break\r\n if key == \"generic_name\":\r\n if len(value) > 255:\r\n return False\r\n\r\n try:\r\n self.categories = ProductFromApiToDatabase.clean_tag(\r\n self.fields[\"categories\"], 100)\r\n self.stores = ProductFromApiToDatabase.clean_tag(\r\n self.fields[\"stores\"], 45)\r\n self.brands = ProductFromApiToDatabase.clean_tag(\r\n self.fields[\"brands\"], 45)\r\n self.category_index = self.categories.index(self.category)\r\n except KeyError:\r\n return False\r\n except ValueError:\r\n return False\r\n except AttributeError:\r\n self.errors += 1\r\n print(self.errors)\r\n return False",
"def test_get_product_list(self):\n self._create_products(5)\n resp = self.app.get(\"/products\")\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = resp.get_json()\n self.assertEqual(len(data), 5)",
"def validated_view(self, context, request):\n print(\"validated_view\")\n self.__request = request\n\n if self._params_schema:\n request.set_property(self.validate_params, self._valid_params_attr,\n reify=self._reify_params)\n if self._match_schema:\n request.set_property(self.validate_match, self._valid_match_attr,\n reify=self._reify_params)\n\n return self._view_callable(context, request)",
"def validate_dataset(self):\n pass",
"def test_get(self):\n response = self._get()\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, self.template_name)\n self.assertTrue('form' in response.context)\n self.assertFalse(response.context['form'].is_bound)\n self.assertEquals(self.model.objects.count(), 0)",
"def test_view_product_detail(self):\n product = sample_product(supplier_id=self.user)\n\n url = detail_url(product.id)\n res = self.client.get(url)\n\n serializer = ProductSerializer(product)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)",
"def validate(self, request):\n\n validated_parameter_set = self.__class__()\n\n # Inspects the attributes of a parameter set and tries to validate the input\n for attribute_name, type_instance in self.__class__.__dict__.iteritems():\n\n if attribute_name.startswith('__') or inspect.ismethod(type_instance):\n # Ignore parameters with __ and if they are methods\n continue\n\n #: Must be one of the following types\n if not isinstance(type_instance, prestans.types.String) and \\\n not isinstance(type_instance, prestans.types.Float) and \\\n not isinstance(type_instance, prestans.types.Integer) and \\\n not isinstance(type_instance, prestans.types.Date) and \\\n not isinstance(type_instance, prestans.types.DateTime) and \\\n not isinstance(type_instance, prestans.types.Array):\n raise TypeError(\"%s should be of type \\\n prestans.types.String/Integer/Float/Date/DateTime/Array\" % attribute_name)\n\n if issubclass(type_instance.__class__, prestans.types.Array):\n\n if not isinstance(type_instance.element_template, prestans.types.String) and \\\n not isinstance(type_instance.element_template, prestans.types.Float) and \\\n not isinstance(type_instance.element_template, prestans.types.Integer):\n raise TypeError(\"%s elements should be of \\\n type prestans.types.String/Integer/Float\" % attribute_name)\n\n try:\n\n #: Get input from parameters\n #: Empty list returned if key is missing for getall\n if issubclass(type_instance.__class__, prestans.types.Array):\n validation_input = request.params.getall(attribute_name)\n #: Key error thrown if key is missing for getone\n else:\n try:\n validation_input = request.params.getone(attribute_name)\n except KeyError:\n validation_input = None\n\n #: Validate input based on data type rules,\n #: raises DataTypeValidationException if validation fails\n validation_result = type_instance.validate(validation_input)\n\n setattr(validated_parameter_set, attribute_name, validation_result)\n\n except prestans.exception.DataValidationException, exp:\n raise prestans.exception.ValidationError(\n message=str(exp),\n attribute_name=attribute_name,\n value=validation_input,\n blueprint=type_instance.blueprint())\n\n return validated_parameter_set",
"def test_get_product(self):\n self._require_login(self.user1)\n response = self.client.get('/api/1.0/products/1/')\n self.assertEqual(response.data['name'], 'Producto 1')\n self.assertEqual(response.data['description'], 'Descripcion producto 1')\n self.assertEqual(response.data['selling'], True)\n self.assertEqual(response.data['price'], '24.0')\n self.assertEqual(response.data['seller']['user']['username'], 'testuser1')\n self.assertEqual(response.data['category']['name'], 'general')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n response = self.client.get('/api/1.0/products/2/')\n self.assertEqual(response.data['name'], 'Producto 2')\n self.assertEqual(response.data['description'], 'Descripcion producto 2')\n self.assertEqual(response.data['selling'], False)\n self.assertEqual(response.data['price'], '312.0')\n self.assertEqual(response.data['seller']['user']['username'], 'testuser2')\n self.assertEqual(response.data['category']['name'], 'deportes')\n self.assertEqual(response.status_code, status.HTTP_200_OK)"
] | [
"0.7251704",
"0.6607075",
"0.6369387",
"0.5741582",
"0.5719708",
"0.5598621",
"0.55916286",
"0.55769634",
"0.5567388",
"0.5546251",
"0.55135393",
"0.5474931",
"0.5474357",
"0.5402799",
"0.5396396",
"0.5390451",
"0.5390293",
"0.5374702",
"0.5366535",
"0.53604525",
"0.5345702",
"0.5331715",
"0.53172165",
"0.5300519",
"0.5298091",
"0.5284147",
"0.52570385",
"0.5231589",
"0.52252823",
"0.52199334"
] | 0.7067096 | 1 |
Validating ProductsDataViewSet using get request method with Id | def test_ProductsDataViewSet_with_get_request_id(self):
# Request the data by API call.
response = self.client.get('/api/productsdata/{}/'.format(
self.product_id))
# Checking the response
self.assertEqual(response.status_code, 200)
self.assertIsNotNone(response.json()['name']) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_ProductsDataViewSet_with_get_request_Invalid_id(self):\n # Request the data by API call.\n response = self.client.get('/api/productsdata/{}/'.format(-1))\n\n # Checking the response\n self.assertEqual(response.status_code, 404)\n self.assertEqual(response.json()['detail'], 'Not found.')",
"def test_ProductsDataViewSet_with_get_request(self):\n # Request the data by API call.\n response = self.client.get('/api/productsdata/')\n\n # Checking the response\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json()['count'], 1)\n self.assertEqual(response.json()['next'], None)\n self.assertEqual(response.json()['previous'], None)",
"def test_view_product_with_invalid_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2kk',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Try an interger for product id')\n self.assertEqual(resp.status_code, 400)",
"def test_ProductsDataViewSet_with_post_Invalid_data(self):\n payload = {\n \"name\": \"1234\"\n }\n\n # Request the data by API call.\n response = self.client.post('/api/productsdata/',\n data=json.dumps(payload),\n content_type=self.content_type)\n\n # Checking the response\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response.json()['name'][0],\n 'Name `{0}` must contain atleast one letter'.format(\n payload.get('name')))",
"def validate(self, view, data):\n\n with self._lock:\n return view and (data.get('id') in self.request_ids[view.id()])",
"def validate(self, data):\n choices = data['answer']\n question = Question.objects.get(id=data['question'])\n if question.qtype != 'select-multiple' and len(choices) > 1:\n raise serializers.ValidationError('This is a question with single choice')\n queryset = Choice.objects.filter(question_id=data['question'])\n for choice in choices:\n get_object_or_404(queryset, id=choice)\n return data",
"def test_view_product_detail(self):\n product = sample_product(supplier_id=self.user)\n\n url = detail_url(product.id)\n res = self.client.get(url)\n\n serializer = ProductSerializer(product)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)",
"def test_datatransformationsetups_id_get(self):\n pass",
"def test_security_on_get(self):\n # test the listing url\n product = Product.objects.all()[0]\n url = '/product/xml/'\n response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)\n # test the product detail url\n url = '/product/xml/%s/' % product.item_number\n Response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)",
"def Product_detail(request, pk):\n try:\n task = Product.objects.get(pk=pk)\n except Product.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = ProductSerializer(task)\n print(serializer.data)\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n serializer = ProductSerializer(task, data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n else:\n return Response(\n serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n task.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)",
"def view_products():\n min_id = (Product.select().order_by(Product.product_id.asc()).get()).product_id\n max_id = (Product.select().order_by(Product.product_id.desc()).get()).product_id\n print(f\"\\nPlease select id between {min_id} & {max_id}\")\n id = int(input(\"Select product id: \"))\n while id not in range(min_id, max_id+1):\n print(\"Your selection must be between {} and {}\".format(min_id, max_id))\n id = int(input(\"Select product id: \"))\n print(f\"\"\"\\n-Product: {Product.get_by_id(id).product_name}\n-Quantity: {Product.get_by_id(id).product_quantity}\n-Price: {Product.get_by_id(id).product_price} cents\n-Date updated: {Product.get_by_id(id).date_updated}\\n\"\"\")\n input(\"\\nPress ENTER to continue\")\n clear()",
"def get_queryset(self):\n queryset = super(ProductUpdateView, self).get_queryset()\n return queryset",
"def retrieve(self, request, pk=None):\n try:\n order_product = Order_Products.objects.get(pk=pk)\n serializer = Order_Products_Serializer(\n order_product, context={'request': request}\n )\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseServerError(ex)",
"def products_view(request, product_id):\n if not product_id:\n return JsonResponse({\"error\": \"Product id is not provided\"}, 400)\n if request.method == \"GET\":\n response, status_code = get_products(request, product_id)\n if status_code != 200:\n return JsonResponse(response, status=status_code)\n else:\n serialize_data = ProductSerializer(response, many=False).data\n return JsonResponse(serialize_data, status=200, safe=False)\n else:\n response, status_code = update_product(request, product_id)\n if status_code != 204:\n return JsonResponse(response, status=status_code)\n serialize_data = ProductSerializer(response, many=False).data\n return JsonResponse(serialize_data, status=status_code, safe=False)",
"def test_retrieve_product(self):\n product_pk = 1\n url = reverse('products:detail', kwargs={'pk': product_pk})\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['id'], product_pk)",
"def get(self, _id):",
"def test_api_can_get_department_by_id(self):\n res = self.client().get(service_url+'/1')\n self.assertEqual(res.status_code, 200)\n self.assertIn('dep 1', str(res.data))",
"def product(request, product_id):\n\n u = request.user\n try:\n p = Product.objects.get(id=product_id)\n request.POST['sku'] = p.sku\n result = item(u, p.sku)\n except Product.DoesNotExist:\n result = {'result':'0'}\n return JSONHttpResponse( result )",
"def get(id: str) -> DataSet:\n pass",
"def verify_object(self, data):\n rv = self.get(data[self.id_field])\n result = not is_404(rv)\n if result:\n for key, value in data:\n if not in_response(rv, value):\n return False\n return result",
"def get_queryset(self):\n queryset = super(ProductDeleteView, self).get_queryset()\n return queryset",
"def test_installments_id_get(self):\n pass",
"def test_poets_id_get(self):\n pass",
"def product_list(request, format=None):\n if request.method == 'GET':\n products = Product.objects.all()\n serializer = ProductSerializer(products, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n if 'id' in request.DATA:\n del request.DATA['id']\n if 'ordered' in request.DATA:\n del request.DATA['ordered']\n serializer = ProductSerializer(data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)",
"def get(request, pk=None):\n calls = Price.objects.filter(pk=int(pk))\n serializer = PriceSerializer(calls, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)",
"async def validation_of_method_parameters(\n self, resource_group_name: str, id: int, **kwargs: Any\n ) -> _models.Product:\n error_map = {\n 401: ClientAuthenticationError,\n 404: ResourceNotFoundError,\n 409: ResourceExistsError,\n 304: ResourceNotModifiedError,\n }\n error_map.update(kwargs.pop(\"error_map\", {}) or {})\n\n _headers = kwargs.pop(\"headers\", {}) or {}\n _params = case_insensitive_dict(kwargs.pop(\"params\", {}) or {})\n\n api_version: str = kwargs.pop(\"api_version\", _params.pop(\"apiVersion\", self._config.api_version))\n cls: ClsType[_models.Product] = kwargs.pop(\"cls\", None)\n\n request = build_validation_of_method_parameters_request(\n resource_group_name=resource_group_name,\n id=id,\n subscription_id=self._config.subscription_id,\n api_version=api_version,\n template_url=self.validation_of_method_parameters.metadata[\"url\"],\n headers=_headers,\n params=_params,\n )\n request = _convert_request(request)\n request.url = self._client.format_url(request.url)\n\n _stream = False\n pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access\n request, stream=_stream, **kwargs\n )\n\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response)\n raise HttpResponseError(response=response, model=error)\n\n deserialized = self._deserialize(\"Product\", pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized",
"def Product_list(request):\n if request.method == 'GET':\n tasks = Product.objects.all()\n serializer = ProductSerializer(tasks, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n serializer = ProductSerializer(data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(\n serializer.errors, status=status.HTTP_400_BAD_REQUEST)",
"def test_get_product_detail(self):\n ProductCategory.objects.create(name='Test Product Category #1', description='Test Description #1')\n ProductCategory.objects.create(name='Test Product Category #2', description='Test Description #1')\n ProductCategory.objects.create(name='Test Product Category #3', description='Test Description #1')\n test_key = ProductCategory.objects.values()[1].get('id')\n \n Product.objects.create(product_category_id=test_key, name='Test Product Category #1', description='Test Description #1', unit_price=12, quantity=15)\n pk = Product.objects.values()[0].get('id')\n\n PRODUCTS_DETAIL_URL = reverse('product:product_details', args=(pk,))\n res = self.client.get(PRODUCTS_DETAIL_URL)\n # print(res.data)\n self.assertEqual(res.status_code, status.HTTP_200_OK)",
"def product_detail(request, pk):\n try:\n product = Product.objects.get(pk=pk)\n except Product.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == 'GET':\n serializer = ProductSerializer(product)\n return JSONResponse(serializer.data)\n\n elif request.method == 'PUT':\n data = JSONParser().parse(request)\n serializer = ProductSerializer(product, data=data)\n if serializer.is_valid():\n serializer.save()\n return JSONResponse(serializer.data)\n return JSONResponse(serializer.errors, status=400)\n\n elif request.method == 'DELETE':\n product.delete()\n return HttpResponse(status=204)",
"def test_get_product_rate_plan_by_id(self):\n pass"
] | [
"0.7191686",
"0.673298",
"0.59211195",
"0.585185",
"0.58161724",
"0.5667275",
"0.5570321",
"0.5554077",
"0.5522598",
"0.5515945",
"0.5448646",
"0.5404771",
"0.53977585",
"0.53932184",
"0.5391353",
"0.53493273",
"0.53440976",
"0.5339966",
"0.533051",
"0.53057694",
"0.5302475",
"0.5301827",
"0.5295905",
"0.52842474",
"0.5281153",
"0.5268674",
"0.52296567",
"0.5228575",
"0.5226694",
"0.52265614"
] | 0.7739606 | 0 |
Validating ProductsDataViewSet using get request method with Invalid Id | def test_ProductsDataViewSet_with_get_request_Invalid_id(self):
# Request the data by API call.
response = self.client.get('/api/productsdata/{}/'.format(-1))
# Checking the response
self.assertEqual(response.status_code, 404)
self.assertEqual(response.json()['detail'], 'Not found.') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_ProductsDataViewSet_with_get_request_id(self):\n # Request the data by API call.\n response = self.client.get('/api/productsdata/{}/'.format(\n self.product_id))\n\n # Checking the response\n self.assertEqual(response.status_code, 200)\n self.assertIsNotNone(response.json()['name'])",
"def test_ProductsDataViewSet_with_post_Invalid_data(self):\n payload = {\n \"name\": \"1234\"\n }\n\n # Request the data by API call.\n response = self.client.post('/api/productsdata/',\n data=json.dumps(payload),\n content_type=self.content_type)\n\n # Checking the response\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response.json()['name'][0],\n 'Name `{0}` must contain atleast one letter'.format(\n payload.get('name')))",
"def test_view_product_with_invalid_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2kk',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Try an interger for product id')\n self.assertEqual(resp.status_code, 400)",
"def test_ProductsDataViewSet_with_get_request(self):\n # Request the data by API call.\n response = self.client.get('/api/productsdata/')\n\n # Checking the response\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json()['count'], 1)\n self.assertEqual(response.json()['next'], None)\n self.assertEqual(response.json()['previous'], None)",
"def validate(self, data):\n choices = data['answer']\n question = Question.objects.get(id=data['question'])\n if question.qtype != 'select-multiple' and len(choices) > 1:\n raise serializers.ValidationError('This is a question with single choice')\n queryset = Choice.objects.filter(question_id=data['question'])\n for choice in choices:\n get_object_or_404(queryset, id=choice)\n return data",
"def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.dataset.pk + 1))\n self.assertEqual(response.status_code, 404)",
"def validate(self, view, data):\n\n with self._lock:\n return view and (data.get('id') in self.request_ids[view.id()])",
"def test_security_on_get(self):\n # test the listing url\n product = Product.objects.all()[0]\n url = '/product/xml/'\n response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)\n # test the product detail url\n url = '/product/xml/%s/' % product.item_number\n Response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)",
"def validate(self, data):\n draft_group_id = data['draft_group']\n if draft_group_id is None:\n raise serializers.ValidationError(\"invalid draft_group id\")\n try:\n draftgroup.models.DraftGroup.objects.get(pk=draft_group_id)\n except draftgroup.models.DraftGroup.DoesNotExist:\n raise serializers.ValidationError('invalid draft_group id')\n\n return data",
"def test_datatransformationsetups_id_get(self):\n pass",
"def verify_object(self, data):\n rv = self.get(data[self.id_field])\n result = not is_404(rv)\n if result:\n for key, value in data:\n if not in_response(rv, value):\n return False\n return result",
"def validate(self, data: Dict):\n for key in self.__dict__.keys():\n if not key.startswith('__') and key != 'id':\n if data[key] == '' or data[key] is None:\n raise ValidationError(\n message=f'{key} should not be \"{data[key]}\"'\n )",
"def test_bad_pk(self):\n self.url_kwargs[self.pk_url_kwarg] = 1234\n response = self._get()\n self.assertEquals(response.status_code, 404)",
"def test_bad_pk(self):\n self.url_kwargs[self.pk_url_kwarg] = 1234\n response = self._get()\n self.assertEquals(response.status_code, 404)",
"def test_bad_pk(self):\n self.url_kwargs[self.pk_url_kwarg] = 1234\n response = self._get()\n self.assertEquals(response.status_code, 404)",
"def check_id(self, id):",
"def sanity_check(cls, data): # no version with ID, since PUT (update) isn't allowed\n data = _dict_sanity_check(data,\n mandatory_keys = [\n (\"user_id\", User.exists),\n (\"customer_id\", Customer.exists)\n ],\n optional_keys = [])\n return data, None",
"def test_admin_cannot_delete_product_with_non_integer_prod_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/kk',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'The product id should be a number!')\n self.assertEqual(resp.status_code, 400)",
"def validate(self, data):\n if data.has_key('site'):\n if FieldSightXF.objects.filter(\n xf__id=data['xf'], is_staged=False, is_scheduled=True, site=data['site']).exists():\n raise serializers.ValidationError(\"Form Already Exists, Duplicate Forms Not Allowded\")\n elif data.has_key('project'):\n if FieldSightXF.objects.filter(\n xf__id=data['xf'], is_staged=False, is_scheduled=True, project=data['project']).exists():\n raise serializers.ValidationError(\"Form Already Exists, Duplicate Forms Not Allowded\")\n return data",
"def test_get_unexisting_products(self):\n response=self.get_unexisting_products()\n result = json.loads(response.data.decode('utf-8'))\n self.assertEqual(result['message'],\"No Available products\")\n self.assertEqual(response.status_code, 200)",
"def test_get_restaurant_by_id_not_number(self):\n resp = self.test_client.get(self.API_BASE + '/hello', headers=auth_header_cru_restaurants)\n self.assertEqual(resp.status_code, 400)",
"def test_details_nonnum_id(self):\n self.check_response(\n '/attributes/xyz',\n ('Please enter an integer value for Attribute ID',))",
"def test_get_non_existent_book_by_id(self):\n response = self.client().get('/api/v1/products/0')\n json_data = json.loads(response.data)\n self.assertTrue(json_data.get('Error'))\n self.assertEqual(json_data.get('Error'), \"That book does not exist\")\n self.assertEqual(response.status_code, 404)",
"def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.htsv.pk + 1))\n self.assertEqual(response.status_code, 404)",
"async def validation_of_method_parameters(\n self, resource_group_name: str, id: int, **kwargs: Any\n ) -> _models.Product:\n error_map = {\n 401: ClientAuthenticationError,\n 404: ResourceNotFoundError,\n 409: ResourceExistsError,\n 304: ResourceNotModifiedError,\n }\n error_map.update(kwargs.pop(\"error_map\", {}) or {})\n\n _headers = kwargs.pop(\"headers\", {}) or {}\n _params = case_insensitive_dict(kwargs.pop(\"params\", {}) or {})\n\n api_version: str = kwargs.pop(\"api_version\", _params.pop(\"apiVersion\", self._config.api_version))\n cls: ClsType[_models.Product] = kwargs.pop(\"cls\", None)\n\n request = build_validation_of_method_parameters_request(\n resource_group_name=resource_group_name,\n id=id,\n subscription_id=self._config.subscription_id,\n api_version=api_version,\n template_url=self.validation_of_method_parameters.metadata[\"url\"],\n headers=_headers,\n params=_params,\n )\n request = _convert_request(request)\n request.url = self._client.format_url(request.url)\n\n _stream = False\n pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access\n request, stream=_stream, **kwargs\n )\n\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response)\n raise HttpResponseError(response=response, model=error)\n\n deserialized = self._deserialize(\"Product\", pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized",
"def test_installments_id_get(self):\n pass",
"def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.trait.pk + 1))\n self.assertEqual(response.status_code, 404)",
"def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.trait.pk + 1))\n self.assertEqual(response.status_code, 404)",
"def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.trait.pk + 1))\n self.assertEqual(response.status_code, 404)",
"def test_get_posts_missing_ids(client):\n response = client.simulate_get('/page/get_records')\n assert response.status_code == 400"
] | [
"0.73838174",
"0.650162",
"0.6430197",
"0.6365987",
"0.59624434",
"0.5900515",
"0.5785338",
"0.5644582",
"0.5601564",
"0.55595815",
"0.5494436",
"0.5453852",
"0.5401711",
"0.5401711",
"0.5401711",
"0.53924847",
"0.5382707",
"0.537352",
"0.5360401",
"0.5338085",
"0.53339374",
"0.53284323",
"0.5309664",
"0.52894914",
"0.528726",
"0.5281324",
"0.52691746",
"0.52691746",
"0.52691746",
"0.5266107"
] | 0.77829665 | 0 |
Map towers to 412 integer Examples >>> state([[1], [3], [5, 4], [2]]) 668 = 0 40 + 2 41 + 1 42 + 2 43 + 2 44 | def state(towers):
ret = 0
for i, row in enumerate(towers):
for val in row:
ret += i * 4**(val-1)
return ret | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def state_to_locations(state: list) -> list:\n\n locations = []\n for i in range(0, 16):\n locations.append((0, 0))\n # Each tuple represents a location on the board as (row, column)\n\n \"\"\" \"locations\" keeps track of all fifteen numbers in the given state and the goal \n state. The location of the blank in the state is stored as the tuple at locations[0], \n the location of the number 1 is stored as locations[1], so on and so forth.\"\"\"\n\n \"\"\" Due to the nature of indices on a list, when a location is stored as a tuple \n (row, column), the four rows and four columns are represented as indices from 0 \n to 3, even though the numbers 1 through 15 are represented as indices from 1 to \n 15 on the list.\"\"\"\n\n for i in range(0, 4):\n for j in range(0, 4):\n \"\"\" The loop scans the given state and reads the integer at [i][j]. The number \n is stored at its corresponding index in the list \"locations\". By the time the \n loop finishes, the locations of all fifteen numbers as well as the blank in \n the given state will have been stored in the list.\"\"\"\n num = state[i][j]\n locations[num] = (i, j)\n\n return locations",
"def eval(self, state):\n valueOfPlayers = 0\n valueOfRebelAdvancments = 0\n valueOfLocations = 0\n\n\n\n for coordinate in state.gameState:\n if state.gameState[coordinate]==state.blank:\n continue\n elif state.gameState[coordinate]==state.rebel:\n valueOfRebelAdvancments = -coordinate[0]\n elif state.gameState[coordinate]==state.jedi:\n continue\n elif state.gameState[coordinate]==state.sith:\n continue\n \n valueOfLocations += valueOfRebelAdvancments\n\n \n valueOfPlayers = state.numRebels + 4*state.numJedi - 4*state.numSith\n \n return valueOfPlayers*4 + valueOfLocations",
"def locations_to_state(locations: list) -> list:\n\n state = []\n for i in range(0, 4):\n state.append([])\n # The first layer of the list consists of the four rows of a state\n for j in range(0, 4):\n state[i].append(-1)\n \"\"\" The second layer consists of the four tiles of a row (one of them could be \n the blank).\"\"\"\n\n for i in range(0, 16):\n state[locations[i][0]][locations[i][1]] = i\n \"\"\" locations[i][0] stores the row number, locations[i][0] stores the column \n number, and i is the number on the tile.\"\"\"\n\n return state",
"def state_from_id(index, dims_state_grid):\n\n entries = [index] * len(dims_state_grid)\n for i in range(1, len(dims_state_grid)):\n value = 1\n for j in range(i, len(dims_state_grid)):\n value *= dims_state_grid[j]\n for k in range(i - 1, len(dims_state_grid)):\n if k == i - 1:\n entries[k] //= value\n else:\n entries[k] %= value\n\n out = np.array(object=entries)\n\n return out",
"def convert_state(self, x, v):\n \n\n #print(self.offset[0] * self.tiling_displacement[len(self.tiling_displacement)-1][0] / self.tile_width[0])\n\n #state = 0\n n_features = self.total_tiles[0] * self.total_tiles[1] * self.n_tilings\n state = np.zeros(n_features, dtype=int)\n print(np.shape(state))\n\n for i in range(self.n_tilings):\n # Finds the index of the tile in both dimensions\n x_tile = (x - self.offset[0] * self.tiling_displacement[i][0] - self.x_range[0] + self.extra_tiles[0] * self.tile_width[0]) // self.tile_width[0]\n v_tile = (v - self.offset[1] * self.tiling_displacement[i][1] - self.v_range[0] + self.extra_tiles[1] * self.tile_width[1]) // self.tile_width[1]\n \n #x_tile = (x - self.offset[0] * self.tiling_displacement[i][0] - self.x_range[0] + self.extra_tiles[0] * self.tile_width[0]) // self.tile_width[0]\n #v_tile = (v - self.offset[1] * self.tiling_displacement[i][1] - self.v_range[0] + self.extra_tiles[1] * self.tile_width[1]) // self.tile_width[1]\n\n index = int(i * (self.total_tiles[0]*self.total_tiles[1]) + x_tile * self.total_tiles[0] + v_tile)\n print(\"INDEX\" , index)\n state[index] = 1\n\n\n\n\n \"\"\"\n # adds the correct bit (corresponding to the state of the tiling) to the state integer\n state += 2 ** (i * self.n_tiles**2 + x_tile * self.n_tiles + v_tile)\n \"\"\"\n print (\"Tiling %s: (%s,%s)\" % (i, x_tile, v_tile))\n\n return state",
"def tuple_map(x):\n return x * 2",
"def value(state: Dict) -> int:\n return sum(state.values())",
"def make_state_appliable_4ch(state):\n size = len(state)\n st_appl = np.zeros((size,)*4, dtype=complex)\n for p1 in range(size):\n for p2 in range(size):\n for p3 in range(size):\n for p4 in range(size):\n st_appl[p1, p2, p3, p4] = state[p1, p2, p3, p4] * sqrt(factorial(p1) * factorial(p2) * factorial(p3) * factorial(p4))\n return st_appl",
"def state_(state):\n return tuple( [ tuple( row ) for row in state ] )",
"def encode_state(player, players=\"\", apples=\"\", board_size=(15,15)):\n player_location = players[player-1].get('location')\n dx = 8-player_location[0]\n dy = 8-player_location[1]\n\n # One-Hot mapping dict\n oh_mapping = {'empty': np.array([1, 0, 0, 0, 0, 0]),\n 'apple': np.array([0, 1, 0, 0, 0, 0]),\n 'up': np.array([0, 0, 1, 0, 0, 0]),\n 'down': np.array([0, 0, 0, 1, 0, 0]),\n 'left': np.array([0, 0, 0, 0, 1, 0]),\n 'right': np.array([0, 0, 0, 0, 0, 1])}\n\n # Initialise an empty board_state\n board_state = [[oh_mapping[\"empty\"] for i in range(board_size[0])] for i in range(board_size[1])]\n # Load apples into board\n for location in apples:\n x,y = location\n x = (x+dx)%15\n y = (y+dy)%15\n board_state[x][y] = oh_mapping[\"apple\"]\n # Load other players into board\n for worm in players:\n location = worm[\"location\"]\n\n if location == [\"?\",\"?\"]:\n newlocation=[\"?\",\"?\"]\n\n else:\n newlocation=[]\n newlocation.append((location[0] + dx)%15)\n newlocation.append((location[1] + dy)%15)\n board_state[newlocation[0]][newlocation[1]] = oh_mapping[worm[\"orientation\"]]\n return board_state",
"def make_ad(state):\n return [adnumber(val) for val in state]",
"def actions(self, state):\n\n\t\t# Returns the possible numbers we can use.\n\t\treturn [(str)(x + 1) for x in range (0,self.size)]",
"def stateToCode(self, state):\n\n multiplier = 1\n code = \"\"\n for i in range(self.num_joints-1, -1, -1):\n num_angles = len(self.angles[i])\n code += str(int((state / multiplier ) % num_angles))\n multiplier *= len(self.angles[i])\n\n # Return the reversed code\n return code [::-1]",
"def map():",
"def state_transl(state):\n nonlocal state_cnt\n nonlocal state_transl_dict\n\n if state not in state_transl_dict.keys():\n state_transl_dict[state] = state_cnt\n state_cnt += 1\n\n return str(state_transl_dict[state])",
"def state_transl(state):\n nonlocal state_cnt\n nonlocal state_transl_dict\n\n if state not in state_transl_dict.keys():\n state_transl_dict[state] = state_cnt\n state_cnt += 1\n\n return str(state_transl_dict[state])",
"def naked_twins(self,state:dict):\n for unit in self.unitlist:\n\n #find any twins in unit and save as counter object\n all_pairs = Counter([state[box] for box in unit if len(state[box])==2])\n twins = [key for key,val in all_pairs.items() if val == 2]\n\n #loop through twins and replace number in the other boxes\n for twin in twins:\n for num in twin:\n for box in unit:\n if twin != state[box]:\n self.assign_value(state,box,state[box].replace(num,''))\n \n return state",
"def sw(n):\n return 4*n*n + 2*n + 1",
"def state_rep (self):\n\n # Computing dealer_card\n dealer_card = self.dealer[0]\n\n # Compute player_max\n player_max = self.max_safe_sum()\n\n # State should not be bust\n assert (1 <= dealer_card <= 10)\n assert (0 <= player_max <= 31)\n\n # Compute table number\n possibilities = get_full_state (self.me)\n # possibilities = [p for p in possibilities if 0 <= p <= 31]\n\n table_no = 0\n for idx, p in enumerate(possibilities):\n if 0 <= p <= 31:\n table_no = idx\n assert 0 <= table_no <= 3\n\n # print (possibilities)\n return (table_no, dealer_card, player_max)",
"def score(state, first_n):\n\n return sum([(i + first_n) for i, letter in enumerate(state) if letter == \"#\"])",
"def T(self,state,action):\n result = NumMap()\n s_p = action.apply(state)\n if not self.is_legal(s_p) or s_p.__eq__(state):\n result[state] = 1\n else: \n result[s_p] = 1 - self._p_fail\n result[state] = self._p_fail\n\n return result",
"def num_func_mapper(nums, funs):\n pass",
"def compute_map(current_agent_id,agent_order,number_of_timestep,state_schedules, conv :StateConverter):\r\n #Find the agent has the highest number of time steps\r\n highest_timestep = 0\r\n # Find the highest time step\r\n if len(number_of_timestep) >0:\r\n highest_timestep = np.max(number_of_timestep)\r\n occupancy_map = []\r\n # Since we don't know yet how many time step of the current id so\r\n # the number of time steps of the occupancy map == highest number of time step\r\n # of the current schedule\r\n for time_step in range(int(highest_timestep)):\r\n # Initialize the occupancy for current time step\r\n current_occupancy_map = np.zeros(conv.num_tiles)\r\n # We loop through schedule of each agent at current time step\r\n for i in range(len(state_schedules)):\r\n # Get the agent id of current schedule\r\n agent_of_schedule = agent_order[i]\r\n if time_step < len(state_schedules[i]):\r\n # The first case when the agent of current schedule is executed after the current agent\r\n if agent_of_schedule > current_agent_id:\r\n # Get the current state\r\n current_state = state_schedules[i][time_step]\r\n # Convert the current state to tile index\r\n current_tile = conv.state_to_tile(current_state)\r\n # Occupied the current tile in the occupancy map\r\n current_occupancy_map[current_tile] = 1\r\n if time_step + 1 < len(state_schedules[i]):\r\n # Get the next state\r\n next_state = state_schedules[i][time_step + 1]\r\n # Convert next state to next tile will be occupied\r\n next_tile_index = conv.state_to_tile(next_state)\r\n # Occupied the next tile in the occupancy map\r\n current_occupancy_map[next_tile_index] = 1\r\n # The second case when the agent of current schedule is executed before the current agent\r\n else:\r\n if time_step + 1 < len(state_schedules[i]):\r\n # Get the next state\r\n next_state = state_schedules[i][time_step + 1]\r\n # Convert next state to next tile will be occupied\r\n next_tile_index = conv.state_to_tile(next_state)\r\n # Occupied the next tile in the occupancy map\r\n current_occupancy_map[next_tile_index] = 1\r\n if time_step + 2 < len(state_schedules[i]):\r\n # Get the next 2 state\r\n next_2state = state_schedules[i][time_step+2]\r\n # Convert the current state to tile index\r\n next_2tile = conv.state_to_tile(next_2state)\r\n # Occupied the current tile in the occupancy map\r\n current_occupancy_map[next_2tile] = 1\r\n occupancy_map.append(current_occupancy_map)\r\n return occupancy_map",
"def observation(state):\n return state[:4]",
"def split_state(U):\n return U[0], U[1], U[2], U[3], U[4], U[5], r(U)",
"def hash_value(board_state):\n res = \"\"\n for i in range(1,10):\n res = res + board_state[i]\n return res",
"def compute_map(self):\n number_of_orders = 0\n orders = []\n for i, line in enumerate(self.__grid):\n for j, column in enumerate(line):\n if self.__grid[i][j][\"humans\"] != 0:\n number_of_orders += 1\n orders.append(i)\n orders.append(j)\n orders.append(self.__grid[i][j][\"humans\"])\n orders.append(0)\n orders.append(0)\n if self.__grid[i][j][\"vampires\"] != 0:\n number_of_orders += 1\n orders.append(i)\n orders.append(j)\n orders.append(0)\n orders.append(self.__grid[i][j][\"vampires\"])\n orders.append(0)\n if self.__grid[i][j][\"werewolves\"] != 0:\n number_of_orders += 1\n orders.append(i)\n orders.append(j)\n orders.append(0)\n orders.append(0)\n orders.append(self.__grid[i][j][\"werewolves\"])\n return number_of_orders, orders",
"def computePosition(self, state):\n d = 0\n if state[5] == \"East\":\n d = 0\n elif state[5] == \"West\":\n d = 1\n elif state[5] == \"North\":\n d = 2\n else:\n d = 3\n return state[0]*64+state[1]*32+state[2]*16+state[3]*8+state[4]*4+d",
"def applyAction(state, action):\r\n if action == 'N':\r\n return (state[0] - 1, state[1])\r\n\r\n if action == 'E':\r\n return (state[0], state[1] + 1)\r\n\r\n if action == 'W':\r\n return (state[0], state[1] - 1)\r\n\r\n if action == 'S':\r\n return (state[0] + 1, state[1])",
"def map_view(state):\n string_rows = []\n\n for row in state:\n string_row1 = []\n string_row2 = []\n for cell in row:\n if \"grass\" not in cell and \"lapis_block\" not in cell:\n string_row1.append(\"XXX\")\n string_row2.append(\"XXX\")\n else:\n bottom_corners = \"E\" if \"lapis_block\" in cell else \" \"\n string_row1.append((\"A\" if \"Agent_2\" in cell else \" \") + \" \" +\n (\"P\" if \"Pig\" in cell else \" \"))\n string_row2.append(bottom_corners + (\"C\" if \"Agent_1\" in cell else \" \") + bottom_corners)\n string_rows.append(\"\".join(string_row1))\n string_rows.append(\"\".join(string_row2))\n\n return \"\\n\".join(string_rows)"
] | [
"0.5918972",
"0.58628327",
"0.5638546",
"0.56378",
"0.55551183",
"0.55406606",
"0.5532196",
"0.5530488",
"0.55285954",
"0.5523649",
"0.5497859",
"0.54556745",
"0.5416261",
"0.5404722",
"0.53601176",
"0.53601176",
"0.53426325",
"0.53384787",
"0.53346574",
"0.53269804",
"0.5321113",
"0.5320926",
"0.52901834",
"0.5289204",
"0.52871555",
"0.52763605",
"0.52584326",
"0.52529496",
"0.5238596",
"0.52335495"
] | 0.70966303 | 0 |
Builds a CoverMultiWaySearchTree of n nodes with all the currency codes in the standard | def build_tree(n=None) -> CoverMultiWaySearchTree:
tree = CoverMultiWaySearchTree()
codes = [currency.code for currency in cur]
shuffle(codes)
currencies = [Currency(code) for code in codes]
if n is None:
n = len(currencies)
for currency in currencies[:n]:
tree[currency._code] = currency
return tree | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_tree(n, d, name=defaultname):\n return build_tree_helper(1, n, 1, d, name)",
"def build(n):\n if n not in memo:\n res = []\n if n % 2 != 0:\n for i in range(1, n - 1):\n left = i\n right = n - 1 - i\n if left % 2 == 1 and right % 2 == 1:\n left_ = build(left)\n right_ = build(right)\n for l in left_:\n for r in right_:\n new_tree = TreeNode(0)\n new_tree.left, new_tree.right = l, r\n res.append(new_tree)\n memo[n] = res\n return memo[n]",
"def make_nodes(n):\n return [\n protein(namespace='NS', name=str(i))\n for i in range(1, n)\n ]",
"def generate_tree_general(node_lst, root_index):\n\n def generate_huffman(note):\n \"\"\" Return a new tree based on the given ReadNode node.\n\n @param ReadNode note: a given ReadNode\n @rtype: HuffmanNode\n\n >>> t = generate_huffman(ReadNode(0, 5, 0, 7))\n >>> t\n HuffmanNode(None, HuffmanNode(5, None, None), HuffmanNode(7, None, None))\n >>> t = generate_huffman(ReadNode(1, 1, 1, 0))\n >>> t\n HuffmanNode(None, HuffmanNode(None, None, None), HuffmanNode(None, None, None))\n >>> t.left.number\n 1\n >>> t.right.number\n 0\n \"\"\"\n\n if note.l_type == 0 and note.r_type == 0:\n return HuffmanNode(None, HuffmanNode(note.l_data),\n HuffmanNode(note.r_data))\n elif note.l_type == 0 and note.r_type == 1:\n k = HuffmanNode(None, HuffmanNode(note.l_data), HuffmanNode())\n k.right.number = note.r_data\n return k\n elif note.l_type == 1 and note.r_type == 0:\n k = HuffmanNode(None, HuffmanNode(), HuffmanNode(note.r_data))\n k.left.number = note.l_data\n return k\n else:\n k = HuffmanNode(None, HuffmanNode(), HuffmanNode())\n k.left.number, k.right.number = note.l_data, note.r_data\n return k\n\n def combine_trees(trees_, index):\n \"\"\" Return a new tree based on the list of frame trees and take the\n HuffmanNode tree at int index as the root tree.\n\n @param list[HuffmanNode] trees_:\n @param int index:\n @rtype: HuffmanNode\n\n >>> t = [ReadNode(0, 5, 0, 7), ReadNode(0, 10, 0, 12), ReadNode(1, 1, 1, 0)]\n >>> huff_list = [generate_huffman(x) for x in t]\n >>> a = combine_trees(huff_list, 2)\n >>> a == HuffmanNode(None, HuffmanNode(None, HuffmanNode(10, None, None), HuffmanNode(12, None, None)), HuffmanNode(None, HuffmanNode(5, None, None), HuffmanNode(7, None, None)))\n True\n >>> a.left.number\n 1\n >>> a.right.number\n 0\n >>> combine_trees(huff_list, 0)\n HuffmanNode(None, HuffmanNode(5, None, None), HuffmanNode(7, None, None))\n \"\"\"\n\n root = trees_[index]\n if root.left.number is None and root.right.number is None:\n return root\n elif root.left.number is None and root.right.number is not None:\n right = combine_trees(trees_, root.right.number)\n number = root.right.number\n root.right = right\n root.right.number = number\n return root\n elif root.left.number is not None and root.right.number is None:\n left = combine_trees(trees_, root.left.number)\n number = root.left.number\n root.left = left\n root.left.number = number\n return root\n else:\n left = combine_trees(trees_, root.left.number)\n num_l = root.left.number\n right = combine_trees(trees_, root.right.number)\n num_r = root.right.number\n root.left = left\n root.right = right\n root.left.number = num_l\n root.right.number = num_r\n return root\n\n trees = []\n for node in node_lst:\n trees.append(generate_huffman(node))\n root_tree = combine_trees(trees, root_index)\n\n return root_tree",
"def _gen_test_tree_3():\n tree = BinaryNode(5)\n tree.left = BinaryNode(1)\n tree.left.left = BinaryNode(2)\n tree.left.right = BinaryNode(3)\n tree.right = BinaryNode(7)\n tree.right.left = BinaryNode(8)\n tree.right.right = BinaryNode(9)\n return tree",
"def construct_trees(self, nodes):\n trees = {}\n for root in tqdm.tqdm(nodes):\n # note that nodes is an uniquely ordered set\n # tree = {0: {0 : [nb_1, nb_2, ..., nb_k], nb_1: [0, ...]}, 1 : {1: [nb_1,...], nb_1 : [..]},...}\n trees[root] = {}\n trees[root][root] = [root]\n # print('test...', trees[root][root])\n used_nodes = set()\n # queue has the form as following queue([root] for root in tqdm.tqdm(nodes)\n # with each node, we construct the tree rooted at that node, denoted as queue(['root'])\n queue = collections.deque([root]) # deque([0]) -> deque([0,1])\n while len(queue) > 0:\n cur_node = queue.popleft()\n used_nodes.add(cur_node)\n for sub_node in self.graph[cur_node]:\n # sub_node is not ordered\n if sub_node not in used_nodes:\n trees[root][cur_node].append(sub_node)\n trees[root][sub_node] = [cur_node]\n queue.append(sub_node)\n used_nodes.add(sub_node)\n return trees",
"def generate_tree_general(node_lst: List[ReadNode],\n root_index: int) -> HuffmanTree:\n tree = HuffmanTree(None)\n tree.left = _gen_tree_helper(node_lst, root_index, True)\n tree.right = _gen_tree_helper(node_lst, root_index, False)\n return tree",
"def get_n_random_itrees(self, n, subs_size):\n random_itrees = np.empty(n, dtype=object) # Allocate list for storing the trees.\n # TODO: parallelize!\n for k in np.arange(n):\n # Get a random sample of training examples to build next random itree.\n data_sub = self.data[np.random.choice(self.data.shape[0], subs_size, replace=False), :]\n random_itrees[k] = self.get_random_itree(data_sub) # Get next random itree \n self.random_itrees = random_itrees\n self.subs_size = subs_size",
"def build_tree_helper(x, n, d, max_d, name=defaultname):\n ret = {}\n ret['name'] = name(x)\n if d == max_d:\n return ret\n children = collatz.children(x, n)\n if x == 1:\n children = children[1:]\n if children:\n ret['children'] = [build_tree_helper(x, n, d + 1, max_d, name) for x in children]\n return ret",
"def create_Treeby_level(root, levelor, i, n):\n if i < n:\n temp = BinaryTreeNode(levelor[i])\n root = temp\n\n root.left = create_Treeby_level(root.left, levelor, 2 * i + 1, n)\n root.right = create_Treeby_level(root.right, levelor, 2 * i + 2, n)\n return root",
"def nocache_create_equal_size_subtrees():\n N = len(self)\n subTrees = [set(range(i, N, numSubTrees)) for i in range(numSubTrees)]\n totalCost = N\n return subTrees, totalCost",
"def constructTree(n, board, player, action, possible_position=None):\r\n max_branch_num = 7\r\n # 限制分支因子,不考虑过多局面\r\n node = Node(player=player, action=action)\r\n successors = []\r\n if possible_position == None:\r\n # 没有给定可选位置,我们自己现场计算\r\n logDebug(\"Point 1.\")\r\n possible_position = possible_pos(board)\r\n logDebug(\"Point 2.\")\r\n if possible_position == None:\r\n # 真的没有可选位置,😅\r\n return None\r\n\r\n is_critical = critical(board)\r\n new_board = copy.deepcopy(board)\r\n if is_critical == '4_1':\r\n # 我方四子连珠\r\n for pos in possible_position:\r\n new_board[pos[0]][pos[1]] = player\r\n new_special_pattern = is_special_pattern(board = new_board, player = player)\r\n old_special_pattern = is_special_pattern(board = board, player = player)\r\n if new_special_pattern[\"H4\"] != old_special_pattern[\"H4\"] or new_special_pattern[\"C4\"] != old_special_pattern[\"C4\"]:\r\n node = Node(player=player, action=action)\r\n successors = []\r\n successors.append(Node(player = 3-player, isLeaf = True, value = 1000000000, action = pos))\r\n # action 是到达这个节点,我们落子的位置\r\n node.successor = successors\r\n return node\r\n \r\n top_position = []\r\n\r\n if n == 1:\r\n # 树的深度只有一层\r\n if len(possible_position) < max_branch_num:\r\n for pos in possible_position:\r\n # :pos: 坐标 (x, y)\r\n # :prob_position: 可选位置,坐标的列表\r\n copy_board = copy.deepcopy(board)\r\n # 棋盘当前状态的拷贝(或许可以直接用深拷贝拷贝列表,不用一个一个位置去循环)\r\n copy_board[pos[0]][pos[1]] = player\r\n # 在当前位置放置当前棋手的棋子\r\n # player == 1 or 2\r\n temp_value = board_score(copy_board)\r\n # :util::board_evaluation:返回当前整个棋局的评分\r\n # 大评分对我们好,小评分对对方好\r\n # print temp_value\r\n # successors.append(Node(player=3-player, isLeaf=True, value=board_evaluation(board_copy), action=pos))\r\n successors.append(Node(player=3 - player, isLeaf=True, value=temp_value, action=pos))\r\n # 一层搜索树,下一个节点就是叶节点\r\n # player = 3 - player 完成棋手轮换\r\n # TODO: need to delete\r\n else:\r\n # 如果分支因子过大,只考虑落子后局面最好的前k个\r\n for pos in possible_position:\r\n board_copy = copy.deepcopy(board)\r\n board_copy[pos[0]][pos[1]] = player\r\n temp_value = board_score(board_copy)\r\n # :util::board_evaluation: 返回当前整个棋局的评分\r\n top_position.append(temp_value)\r\n temp = copy.deepcopy(top_position[:])\r\n # deepcopy\r\n temp.sort(reverse=True)\r\n # 从大到小排列\r\n for v in temp[0:max_branch_num]:\r\n pos = possible_position[top_position.index(v)]\r\n successors.append(Node(player=3 - player, isLeaf=True, value=v, action=pos))\r\n # 一层,后继节点是叶节点\r\n\r\n else:\r\n # 多层搜索树🌲\r\n if len(possible_position) < max_branch_num:\r\n # i = 0\r\n for pos in possible_position:\r\n # i += 1\r\n # print pos, 'else called', i\r\n copy_board = copy.deepcopy(board)\r\n copy_board[pos[0]][pos[1]] = player\r\n # print board_copy\r\n successors.append(constructTree(n-1, copy_board, 3-player, pos, update_possible_pos(pos, possible_position)))\r\n # 递归的调用\r\n else:\r\n for pos in possible_position:\r\n board_copy = copy.deepcopy(board)\r\n board_copy[pos[0]][pos[1]] = player\r\n top_position.append(board_score(board_copy))\r\n temp = copy.deepcopy(top_position[:])\r\n temp.sort(reverse=True)\r\n for v in temp[0:max_branch_num]:\r\n pos = possible_position[top_position.index(v)]\r\n copy_board = copy.deepcopy(board)\r\n copy_board[pos[0]][pos[1]] = player\r\n successors.append(constructTree(n - 1, copy_board, 3 - player, pos, update_possible_pos(pos, possible_position)))\r\n node.successor = successors\r\n return node",
"def fn(n):\n if n == 1: return [TreeNode()]\n ans = []\n for nn in range(1, n, 2): \n for left in fn(nn):\n for right in fn(n-1-nn): \n ans.append(TreeNode(left=left, right=right))\n return ans",
"def expand_tree(self, N=1):\n # type: (int) -> None\n assert self._initialized, 'Search not initialized.'\n for _ in range(N): \n x_rand = self.sample_free()\n x_nearest = self.nearest(x_rand)\n x_new = self.steer(x_nearest, x_rand)\n if self.coll_free(x_nearest, x_new):\n self.index+=1\n X_near = [x for x in self.near(x_new) if self.coll_free(x, x_new)]\n cost_min = self.costs[self.research_index(self.nodes,x_nearest)][1] + self.dist(x_nearest, x_new)\n x_min = x_nearest\n for x in X_near:\n cost = self.costs[self.research_index(self.nodes,x)][1] + self.dist(x, x_new)\n if cost < cost_min:\n cost_min = cost\n x_min = x\n \n self.nodes.append(x_new)\n j=self.research_index(self.nodes,x_min)\n self.parents[self.index,j]=1\n self.costs[self.index] = (x_new,self.costs[j][1] + self.dist(x_min, x_new))\n for x in X_near:\n k=self.research_index(self.nodes,x)\n if self.costs[self.index][1] + self.dist(x_new, x) < self.costs[k][1]:\n self.parents[self.index]=np.zeros(self.N)\n self.parents[self.index,k] = 1\n self.costs[k] = (self.costs[k][0],self.costs[self.index][1] + self.dist(x_new, x))",
"def MakeNodesforScen(model, BFs, scennum):\n ndn = \"ROOT_\"+str((scennum-1) // BFs[0]) # scennum is one-based\n retval = [scenario_tree.ScenarioNode(\"ROOT\",\n 1.0,\n 1,\n model.StageCost[1],\n None,\n [model.Pgt[1],\n model.Pgh[1],\n model.PDns[1],\n model.Vol[1]],\n model),\n scenario_tree.ScenarioNode(ndn,\n 1.0/BFs[0],\n 2,\n model.StageCost[2],\n None,\n [model.Pgt[2],\n model.Pgh[2],\n model.PDns[2],\n model.Vol[2]],\n model, parent_name=\"ROOT\")\n ]\n return retval",
"def __init__(self, n):\n self._count = n\n self._parent = list(range(n))\n self._rank = [0]*n\n\n \"\"\"\n Added a 'binary' list to keep track of sites that have been\n unioned, as well as an integer that counts the number of\n isolated sites. Also a list to keep track of the roots'\n tree sizes, as well as an integer that holds the maximum\n tree size (maximum component in the graph)\n \"\"\"\n self._nodes = [1]*n\n self._iso = n\n self._size = [1]*n\n self._max = 0",
"def _build_tree(self, index):\n\n children = []\n to_string = '({0}/{1}'.format(self.tokens[index], self.labels[index])\n\n for i in range(1, len(self.tokens)):\n\n if i not in self._visited and self.heads[i] == index:\n self._visited.append(i)\n child_tree = {}\n c, s = self._build_tree(i)\n child_tree[(self.spans[i][0], self.spans[i][1], self.tokens[i], self.labels[i])] = c\n children.append(child_tree)\n to_string += ' {0}'.format(s)\n\n if len(children) > 0:\n to_string += ')'\n return children, to_string\n else:\n return children, to_string[1:]",
"def make_trees(self):\n self.trees = build_recursively_from_cells(self.cells, container=self)\n# self.trees = []\n# for cell in self.cells:\n# if cell.bpointer is None: # test whether cell is root\n# tree = Colony(container=self)\n# tree.add_cell_recursive(cell)\n# self.trees.append(tree)\n return",
"def MakeAllScenarioTreeNodes(model, bf):\n TreeNodes = dict()\n TreeNodes[\"ROOT\"] = scenario_tree.ScenarioNode(\"ROOT\",\n 1.0,\n 1,\n model.StageCost[1],\n None,\n [model.Pgt[1],\n model.Pgh[1],\n model.PDns[1],\n model.Vol[1]],\n model)\n for b in range(bf):\n ndn = \"ROOT_\"+str(b)\n TreeNodes[ndn] = scenario_tree.ScenarioNode(ndn,\n 1.0/bf,\n 2,\n model.StageCost[2],\n None,\n [model.Pgt[2],\n model.Pgh[2],\n model.PDns[2],\n model.Vol[2]],\n model,\n parent_name=\"ROOT\")",
"def partial_tree(s, n):\n if n == 1:\n return (Tree(s.first), s.rest)\n elif n == 2:\n return (Tree(s.first, [Tree(s.rest.first)]), s.rest.rest)\n else:\n left_size = (n-1)//2\n right_size = n - left_size - 1\n \"*** YOUR CODE HERE ***\"",
"def fetchNodes(tree):\n if tree.results is None: #Check if the node is a branch\n condItems = {} #Initialize a container for the node conditions from lower branches\n v = [\"true\", \"false\"] #\"Veracity values\"\n for branch, veracity in [(tree.tb, v[0]), (tree.fb, v[1])]: #iterate over this node's true and false child nodes\n lower_results = fetchNodes(branch)\n if len(lower_results) == 1: #Check if child node is actually a leaf. If so,\n lower_results.insert(0, (tree.col, tree.value, veracity))\n condItems[veracity] = [lower_results] #Initialize the condition needed to reach that leaf\n else:\n condItems[veracity] = [] #If the child is not a leaf, initialize an empty list to contain its updated conditions\n for item in lower_results: #Iterate over each set of node conditions that stem from this branch\n new_descriptor = deepcopy(item) #make a deep copy of the list of node conditions from the lower level nodes\n #insert this node's condition at the beginning of each of the node conditions from the lower levels\n new_descriptor.insert(0, (tree.col, tree.value, veracity)) \n condItems[veracity].append(new_descriptor) #append the updated set of node conditions to the branches items\n node_conditions = deepcopy(condItems[v[0]]) #Initialize the complete list of node conditions that stem from this node\n node_conditions.extend(deepcopy(condItems[v[1]])) #Add the node conditions from the second branch of this node\n return node_conditions #Send the full set of node conditions from this node up to the higher nodes.\n else: #If the node is a leaf, return the dictionary of results\n return [tree.results]",
"def buildTree(self):\n self.initClusters()\n while len(self.centroids) > 1:\n print \"Iteration %d\" % self.labelcount\n for i in self.centroids:\n \tprint self.centroids[i], i\n \n l1, l2 = self.closestClusters()\n n1, n2 = self.getNumPoints(l1, l2)\n\n self.centroids[('Cl_%d' % self.labelcount, l1, l2)] = \\\n (n1*self.centroids[l1] + n2*self.centroids[l2])/(n1 + n2)\n \n del self.centroids[l1]\n del self.centroids[l2]\n self.labelcount += 1\n\n\n return ('Cl_%d' % (self.labelcount-1), l1, l2)",
"def _gen_test_tree_2():\n tree = BinaryNode(5)\n tree.left = BinaryNode(3)\n tree.left.left = BinaryNode(2)\n tree.left.left.left = BinaryNode(1)\n tree.left.right = BinaryNode(4)\n tree.right = BinaryNode(7)\n tree.right.left = BinaryNode(6)\n tree.right.right = BinaryNode(8)\n tree.right.right.right = BinaryNode(9)\n return tree",
"def _gen_test_tree_6():\n tree = BinaryNode(20)\n tree.left = BinaryNode(10)\n tree.right = BinaryNode(30)\n tree.left.right = BinaryNode(25)\n return tree",
"def _gen_test_tree_4():\n tree = BinaryNode(5)\n tree.left = BinaryNode(3)\n tree.left.left = BinaryNode(2)\n tree.left.right = BinaryNode(10)\n tree.right = BinaryNode(9)\n tree.right.left = BinaryNode(6)\n tree.right.right = BinaryNode(8)\n return tree",
"def crootnxtest(n):\n b = bina(n)\n for bi in b:\n print(bi)\n q = QuantumRegister(n, 'q')\n circ = QuantumCircuit(q)\n for i in bi:\n circ.x(q[i])\n for _ in range(2 ** (n)):\n crootnx(circ, q, q[0], q[n - 1], 2 ** n, False)\n circ.barrier(q)\n launch2(circ)\n\n circ_m = measure(circ, q, [i for i in range(n)])\n\n return circ_m",
"def __generate_central_nodes(self,k=3):\n if k < 3:\n k = 3\n \n self.__logger.info(\"CENTRAL_NODES: Try to seek {} nodes which are currently central\".format(k)) \n res = [n for n,_ in sorted(nx.betweenness_centrality(self.G).items(),key=itemgetter(1),reverse=True)[:4*k]]\n self.__logger.info(\"CENTRAL_NODES: Generated top {} central nodes (according to betweeness centrality)\".format(len(res)))\n \n self.__logger.info(\"CENTRAL_NODES: Sample {} items from the candidates as was requested\".format(k))\n tmp = list(res)\n random.shuffle(tmp)\n return tmp[0:k]",
"def cuckoo_search(n=None, nd=None, Lb=None, Ub=None, pa=None):\n\tif n is None:\n\t\tn =25\n\n\tif nd is None:\n\t\tnd=21\n\n\tif Lb is None:\n\t\tLb = np.ones(nd)*0\n\tif Ub is None:\n\t\tUb = np.ones(nd)*5\n\n\tif pa is None:\n\t\tpa = 0.25\n\n\t# creation of the list for parameter pairs \n\t\n\tstep = 1\n\n # initialization of the nests\n\tnests = np.zeros((n,nd))\n\tfor i in range(n):\n\t\tnests[i,:] = Lb + (Ub-Lb)*np.random.rand(len(Lb))\n\n\tfitness = 10**10 * np.ones((n,1))\n\tbest_nest, fmin, nest, fitness, N_iter = single_cuckoo_search(nests,fitness,Lb,Ub,pa,step) \n\n\treturn best_nest, fmin, nest, fitness, N_iter",
"def __init__(self, n: int, identity_element_func, binary_operation_func):\n self.n = n\n self.identity = identity_element_func\n self.binary = binary_operation_func\n n2 = 1 # n2はnより大きい2の冪数\n while n2 < n:\n n2 <<= 1\n self.n2 = n2\n self.tree = [identity_element_func() for _ in range(n2 << 1)]",
"def make_kmer_tree(self, k, nums):\n nodes = [(np.array([]), [])]\n for it in range(k):\n new_nodes = []\n count = 0\n for i, node in enumerate(nodes):\n n, e = node\n if len(n) < it:\n continue\n for a in nums:\n count += 1\n new_node = (np.append(n, a), [])\n new_nodes.append(new_node)\n nodes[i][1].append(len(nodes) + count - 1)\n nodes += new_nodes\n return nodes"
] | [
"0.5964563",
"0.5795323",
"0.5791167",
"0.5771873",
"0.55232745",
"0.5508132",
"0.5490907",
"0.5467991",
"0.5449655",
"0.53897923",
"0.53540474",
"0.5349767",
"0.53464985",
"0.53308815",
"0.5326568",
"0.5323885",
"0.5319227",
"0.5293522",
"0.5292058",
"0.5261324",
"0.52417105",
"0.523069",
"0.5225194",
"0.5205418",
"0.520145",
"0.5183176",
"0.5179739",
"0.51769155",
"0.5175807",
"0.5175123"
] | 0.8640555 | 0 |
Find the number of items in nodes in range [a, b] | def get_number_of_useful_items(nodes, a: str, b: str) -> int:
return sum(int(a <= item.key <= b) for node in nodes for item in node.elements) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def f02_03_countElemBetween(l, a, b):\n return sum([a < x < b for x in l])",
"def numberOfNodes(i):\n \n if (i == 1 or i == 2):\n i = RANGE\n else:\n i = i + RANGE\n return i",
"def countNodes(epr):\n result = 1\n argLst = epr.args\n for arg in argLst:\n result += countNodes(arg)\n return result",
"def count_entries(numbers):\n nodes = numbers[0]\n mt_entries = numbers[1]\n\n total = 0\n offset = 2\n for _ in range(nodes):\n entries, value = count_entries(numbers[offset:])\n offset += entries\n total += value\n\n for entry in numbers[offset:offset+mt_entries]:\n total += entry\n offset += 1\n return offset, total",
"def count_nodes(self, term=None, labels: istr = None) -> int:",
"def count(self):\n\t\treturn len(list(self.nodes))",
"def count_nodes(self, term=None, labels: istr = None):",
"def count_property_range_hits(prop, node_dict, hits):\n\tres = []\n\t# sets tuple position to use in dict value\n\tswitcher = {\n \"length\": (0,(0,4000,8000,12000,16000,20000)),\n \"steps\": (1,(0,2,4,8,16,32)),\n \"cov\": (2,(1,10,100,1000,10000,100000)),\n \"cv\": (3, (0,0.05,0.10,0.15,0.20,0.25))\n }\n\tif prop not in switcher:\n\t\treturn res\n\ttup_pos = switcher[prop][0]\n\tnode_cnt = 0\n\tpos_cnt = 0\n\tfor ind in range(len(switcher[prop][1])-1):\n\t\tmin_val = switcher[prop][1][ind]\n\t\tmax_val = switcher[prop][1][ind+1]\n\t\tfor node in node_dict.keys():\n\t\t\tval = node_dict[node][tup_pos]\n\t\t\tif ind < len(switcher[prop][1])-2:\n\t\t\t\trange_test_val = (min_val <= val < max_val)\n\t\t\telse:\n\t\t\t\trange_test_val = (min_val <= val <= max_val)\n\t\t\t# print \"range bool is\", range_test_val\n\t\t\tif range_test_val:\n\t\t\t\tnode_cnt += 1\n\t\t\t\tif node in hits: pos_cnt += 1\n\t\tif node_cnt > 0:\n\t\t\tres.append( (pos_cnt, node_cnt, round(float(pos_cnt)/node_cnt,2)))\n\t\telse:\n\t\t\tres.append((0,0,0))\n\t\tnode_cnt = 0\n\t\tpos_cnt = 0\n\treturn res",
"def get_node_count(self) -> Iterable:\n return len([i for i in self.all_nodes_as_iterable()])",
"def count_in_range(start, end, check):\n count = 0\n for val in range(start, end):\n if check(val):\n count += 1\n\n return count",
"def howmany_within_range(row, minimum, maximum):\n count = 0\n for n in row:\n if minimum <= n <= maximum:\n count = count + 1\n return count",
"def count_nodes(self):\n\t\treturn self.__count_nodes(self)",
"def overlap(g, node_1, node_2):\n inter = len(set(nx.neighbors(g, node_1)).intersection(set(nx.neighbors(g, node_2))))\n return float(inter)",
"def node_count(self):\n if self.value:\n cnt = 0\n else:\n left_cnt = self.left.node_count()\n right_cnt = self.right.node_count()\n cnt = 1 + left_cnt + right_cnt\n return cnt",
"def subs_at_point(a,b,x,y):\n\tmax_i = x + y\n\tsubs = 0\n\t# Counts left -> right, top -> bottom\n\tfor j in range(b, x+y):\n\t\t# stops when it can't go down anymore\n\t\tif not valid(a,j,x,y): return subs\n\t\tfor i in range(a,max_i):\n\t\t\tif valid(i,j,x,y): subs += 1\n\t\t\telse:\n\t\t\t\tmax_i = min(max_i, i)\n\t\t\t\tbreak",
"def numNodes(self):\n res = 0\n for n in self.iternodes():\n res += 1\n return res",
"def numberOfNodes( gen ):\n return int( scipy.sum( [ 3.**i for i in range( 1, gen + 2 ) ] ) )",
"def count_all(a, b):\n return len([1 for w in b if w == a])",
"def hypergraph_common_edges(u, v, hypergraph):\n total = 0\n for e in hypergraph.edges():\n if u in e.elements and v in e.elements:\n total += 1\n return total",
"def countElements(self, nums):\n import sys\n max_n = -sys.maxint\n min_n = sys.maxint\n\n for n in nums:\n max_n = max(n, max_n)\n min_n = min(n, min_n)\n\n count = 0\n for n in nums:\n if min_n < n < max_n:\n count += 1\n return count",
"def n(self):\n return sum(list(self.nodes.values()))",
"def node_count(self) -> int:\n return int(self.graph_tuple_stats.node_count or 0)",
"def count_fillin(graph, nodes):\n count = 0\n for v1 in nodes:\n for v2 in nodes:\n if v1 != v2 and v2 not in graph[v1]:\n count += 1\n return count / 2",
"def Test_NumNodes(Graph_MD):\n N_Knoten = Graph_MD.number_of_nodes()\n \n return N_Knoten",
"def query_range(tree, start_y, start_x, end_y, end_x):\n res = 0\n start_y -= 1\n\n while end_y > start_y:\n res += bit.query_range(tree[end_y], start_x, end_x)\n end_y -= (end_y & -end_y)\n\n while start_y > end_y:\n res -= bit.query_range(tree[start_y], start_x, end_x)\n start_y -= (start_y & -start_y)\n\n return res",
"def get_node_count(self) -> Iterable:\n return self._g.V().count().toList()[0]",
"def __len__(self):\n return self.count_of(CUBA.NODE)",
"def common_count(self, node_1, node_2):\n return int(len(set(nx.neighbors(self.graph, node_1)).intersection(set(nx.neighbors(self.graph, node_2)))))",
"def num_links(self):\n count=0.0\n for cluster in self.clusters:\n if self.clusters[cluster] == self.clusters[cluster].antecessor:\n numberofmembers=self.clusters[cluster].number_of_members\n count+=numberofmembers\n return count",
"def count(self):\r\n return self.count_helper(self.top_node)"
] | [
"0.69374925",
"0.6823391",
"0.65383244",
"0.6350672",
"0.6334986",
"0.6127555",
"0.6124622",
"0.6086067",
"0.6057941",
"0.6051697",
"0.6039297",
"0.5993141",
"0.59556144",
"0.59417903",
"0.59389144",
"0.5936251",
"0.59148836",
"0.5914569",
"0.5904718",
"0.5901973",
"0.58777267",
"0.58339524",
"0.58128494",
"0.5792805",
"0.5790075",
"0.5773259",
"0.5751932",
"0.5748499",
"0.5736957",
"0.573489"
] | 0.8006855 | 0 |
Tries to compute the (k, c1, c2)cover of tree with the minimum number of nodes. It follows a greedylike approach. | def compute_cover(tree: CoverMultiWaySearchTree,
k: int, c1: str, c2: str) -> Optional[Set[CoverMultiWaySearchTree.Position.Node]]:
# Step 1: Find nodes useful for the (k, c1, c2)-cover
nodes = tree.find_nodes_in_range(c1, c2)
# Step 2: Count number of items in range [c1, c2]
n = get_number_of_useful_items(nodes, c1, c2)
# Step 3: Compare with k
if not n >= k:
return None
# Step 4: Sort nodes by number of useful items
pq = HeapPriorityQueue(contents=[(get_number_of_useful_items([node], c1, c2), node) for node in nodes])
# Step 5: Greedy approach - Use the node with the maximum number of useful items
cover = set()
while k > 0:
useful_items, node = pq.remove_max()
k -= useful_items
cover.add(node)
return cover | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Trees__LCA_LowestCommonDenominator():\n # Python2 ported to Python3 via 2to3-3.7\n # URL:# URL:https://www.hackerrank.com/challenges/binary-search-tree-lowest-common-ancestor/problem\n '''\n class Node:\n def __init__(self,info): \n self.info = info \n self.left = None \n self.right = None \n // this is a node of the tree , which contains info as data, left , right\n '''\n def lca(root, v1, v2):\n # Find a and b. Link child nodes to parent to be able to backtrack.\n # (1) Note, we add 'parent' attribute to node dynamically via node.parent = ...\n root.parent = None\n node_stack = []\n node_stack.append(root)\n v1_node, v2_node = None, None\n while node_stack:\n node = node_stack.pop()\n if not v1_node and node.info == v1:\n v1_node = node\n if not v2_node and node.info == v2:\n v2_node = node\n for child_node in [node.left, node.right]:\n if child_node:\n child_node.parent = node # (1)\n node_stack.append(child_node)\n\n # Generate path from A to root.\n curr = v1_node\n a_to_root = set()\n while curr:\n a_to_root.add(curr.info)\n curr = curr.parent\n\n # traverse up b until you come across an element in a's path to parent.\n curr = v2_node\n while curr:\n if curr.info in a_to_root:\n return curr\n else:\n curr = curr.parent\n\n print(\"Shouldn't be here, Something went wrong\")\n\n # # Recursive. (Iterative is better, but did recursive for practice.) ~15 min.\n # # Main idea is that we count the number of v1/v2's found of the subnodes.\n # # If a node has sum of 2, we know it's the lca.\n # def lca(root, v1, v2):\n # def lca_helper(node):\n # ret_node = None\n # if not node:\n # return 0, None\n # v_match_counter = 0\n # if node.info in [v1, v2]:\n # v_match_counter += 1\n # left_count, left_node_ret = lca_helper(node.left)\n # right_count, right_node_ret = lca_helper(node.right)\n # v_match_counter += left_count + right_count\n # if v_match_counter == 2:\n # ret_node = node\n # if left_node_ret:\n # ret_node = left_node_ret\n # if right_node_ret:\n # ret_node = right_node_ret\n # return v_match_counter, ret_node\n\n # _, node = lca_helper(root)\n # return node",
"def decision_tree(original_training_data,call_depth):\n\n ''' Checking the stopping criterion. If yes then it returns the majority class (Muffin or CupCake) '''\n if check_if_stopping_criterion_is_met(original_training_data.values) or call_depth > 10:\n majority = classification(original_training_data)\n return majority\n\n else:\n ''' Each time we split the data and go deeper, we increment the depth of the tree '''\n call_depth += 1\n\n ''' Finding the best attribute, best threshold to split data, best minimum entropy '''\n best_split_index, best_attribute, best_threshold, best_minimum_entropy = find_best_attribute_threshold_entropy(original_training_data)\n original_training_data_values = original_training_data.values\n\n best_split_values = original_training_data_values[:,best_split_index]\n\n less_than_threshold = original_training_data[best_split_values <= best_threshold]\n more_than_threshold = original_training_data[best_split_values > best_threshold]\n\n ''' Initializing a variable called as condition which stores the format of the key for the resulting decision tree dictionary '''\n condition = original_training_data.columns[best_split_index] + \" <= \" + str(best_threshold)\n\n ''' Initializing a dictionary where key is condition and value is a list. This is the basic data structure in which the\n resulting decision tree is stored '''\n sub_tree = {condition: []}\n\n ''' Calling the decision tree recursively '''\n left_tree = decision_tree(less_than_threshold, call_depth)\n right_tree = decision_tree(more_than_threshold, call_depth)\n\n ''' For removing edge cases where on either split, the resulting decision tree gives the same result '''\n if left_tree == right_tree:\n sub_tree = left_tree\n else:\n ''' Appending the smaller trees in the final decision tree '''\n sub_tree[condition].append(left_tree)\n sub_tree[condition].append(right_tree)\n\n return sub_tree",
"def buildTree(rows, maxDepth = None, scoref=entropy, depth = 0):\n #A base condition for the recursion. Check if this branch of a split has no data\n if len(rows)==0:\n return decisionNode( )\n newDepth = depth + 1 #Calculate the depth of the next split.\n #Check if the depth at the next split is greater than a maximum specified depth\n if (maxDepth == 0 or maxDepth) and (newDepth > maxDepth): \n return decisionNode(results=__uniqueCounts(rows)) #If so, stop splitting.\n current_score=scoref(rows) #Calculate the current value of the score function.\n # Set up some variables to track the best criteria\n best_gain=0.0 #Initialize a value for the best gain from all possible splits\n best_criteria=None #Initialize a variable for the best column to split on\n best_sets=None #Initialize a variable for the best split's true and false data.\n\n #Count the number of columns in the row, minus the results column \n column_count=len(rows[0])-1\n for col in range(0,column_count): #Iterate over all the columns of the data\n #Generate the list of different values in this column\n column_values={} #Initialize a dictionary to store the column values\n for row in rows: \n #Iterate over each row, adding a key in the dict for each observed value\n column_values[row[col]]=1\n # Divide the dataset on each value in this column.\n for value in column_values.keys( ):\n (set1,set2)=__divideset(rows,col,value)\n #Calculate the fraction of data in the true branch\n p=float(len(set1))/len(rows) \n #Calculate the gain on the chosen score function using this split.\n gain=current_score-p*scoref(set1)-(1-p)*scoref(set2) \n #Check if this split provides a better gain than the best previous split\n if gain>best_gain and len(set1)>0 and len(set2)>0:\n best_gain=gain\n best_criteria=(col,value)\n best_sets=(set1,set2)\n # Recursively create the subbranches\n if best_gain>0:\n trueBranch=buildTree(best_sets[0], maxDepth = maxDepth, depth = newDepth)\n falseBranch=buildTree(best_sets[1], maxDepth = maxDepth, depth = newDepth)\n return decisionNode(col=best_criteria[0],value=best_criteria[1],\n tb=trueBranch,fb=falseBranch)\n else:\n return decisionNode(results=__uniqueCounts(rows))",
"def minNumberOfSemesters(self, n: int, dependencies: List[List[int]], k: int) -> int:\n\n @lru_cache(None)\n def dp(status, take, avaliable):\n if status == target: # all taken\n return 0\n bin_take = bin(take)[2:][::-1]\n for i,v in enumerate(bin_take):\n if v == '1':\n for j in edges[i]: # the indegree number changed during recursion\n indegree[j] -= 1\n if indegree[j] == 0:\n avaliable |= (1 << j)\n status |= (1 << i)\n # print('i, status', i, v, bin(status))\n # take -= (1 << i)\n\n lst = [i for i,v in enumerate(bin(avaliable)[2:][::-1]) if v == '1']\n # print(indegree)\n # print(lst)\n if not lst:\n res = 0\n # print('lst', lst, k)\n elif len(lst) <= k:\n res = dp(status, avaliable, 0)\n else:\n res = float('inf')\n for comb in combinations(lst, k):\n # print(comb)\n t, a = 0, avaliable\n for d in comb:\n t |= (1 << d)\n a -= (1 << d)\n res = min(res, dp(status, t, a))\n for i,v in enumerate(bin_take):\n if v == '1':\n for j in edges[i]: \n indegree[j] += 1\n return 1 + res\n\n self.counts = 0\n edges = defaultdict(list)\n indegree = Counter()\n for i,j in dependencies:\n edges[i].append(j)\n indegree[j] += 1\n\n courses = set(range(1, n+1))\n start = courses - indegree.keys()\n target = 2**(n+1) - 1\n avaliable = 0\n for i in start:\n avaliable |= (1 << i)\n\n return dp(1, 0, avaliable) - 1# first dp not take courses",
"def __call__(self, graph: Data, n_min: int, nodes_to_keep: List[int] = None, exhaustive: bool = False):\n nodes_to_keep = nodes_to_keep if nodes_to_keep is not None else []\n mcts = self._get_mcts(graph, n_min, nodes_to_keep, exhaustive)\n\n for iteration in range(self.m):\n mcts.search_one_iteration()\n\n explanation = mcts.best_leaf_node()\n\n return explanation.node_set, mcts",
"def __generate_central_nodes(self,k=3):\n if k < 3:\n k = 3\n \n self.__logger.info(\"CENTRAL_NODES: Try to seek {} nodes which are currently central\".format(k)) \n res = [n for n,_ in sorted(nx.betweenness_centrality(self.G).items(),key=itemgetter(1),reverse=True)[:4*k]]\n self.__logger.info(\"CENTRAL_NODES: Generated top {} central nodes (according to betweeness centrality)\".format(len(res)))\n \n self.__logger.info(\"CENTRAL_NODES: Sample {} items from the candidates as was requested\".format(k))\n tmp = list(res)\n random.shuffle(tmp)\n return tmp[0:k]",
"def dpsearch(points,k):\n\t#M = k\n\tpoints = np.sort(points,axis=0)\n\tL = len(points)\n\tM = k\n\tT = list(np.zeros(M+1,dtype='int'))\n\tT[0] = 0\t#first threshold is by default always set to index 0 in trellis graph.\n\tT[M] = L \t#last threshold is by default always set to last number in input points.\n\ttrellis_value = np.full((M+1,L+1),np.inf)\n\ttrellis_backpointer = np.full((M+1,L+1),np.inf)\n\n\t# Stage 1: m=1\t\n\tfor l in range(1,L-M+2):\n\t\ttrellis_value[1][l] = ((l-0)/float(L))*np.var(points[0:l])\n\t\ttrellis_backpointer[1][l] = 0\n\n\t\n\tif(M>2):\n\t\t# Stage 2: m=2 to m=M-1\n\t\tfor m in range(2,M):\n\t\t\tfor l in range(m,L-M+m+1):\n\t\t\t\t#finding optimal path\n\t\t\t\tJ_min = np.inf\n\t\t\t\tJ_temp = np.inf\n\t\t\t\tfor i in range(m-1,l):\n\t\t\t\t\tJ_temp = trellis_value[m-1][i] + ((l-i)/float(L))*np.var(points[i:l])\n\t\t\t\t\tif J_temp < J_min:\n\t\t\t\t\t\tJ_min = J_temp\n\t\t\t\t\t\tptr = i\n\t\t\t\t\n\t\t\t\ttrellis_value[m][l],trellis_backpointer[m][l] = J_min,ptr\n\t\t\t\t\n\n\t# Stage 3: m=M\n\tm = M\n\tl = L\n\t#finding optimal path\n\tJ_min = np.inf\n\tJ_temp = np.inf\n\tfor i in range(m-1,l):\n\t\tJ_temp = trellis_value[m-1][i] + ((l-i)/float(L))*np.var(points[i:l])\n\t\tif J_temp < J_min:\n\t\t\tJ_min = J_temp\n\t\t\tptr = i\n\n\t\n\ttrellis_value[M][L] = J_min\n\ttrellis_backpointer[M][L] = ptr\n\t\n\t\n\t# Backtracking\n\tl = L\n\tm = M\n\twhile m>=2:\n\t\tT[m-1] = int(trellis_backpointer[m][l])\n\t\tl = int(trellis_backpointer[m][l])\n\t\tm = m - 1\n\n\t#Assign cluster labels\n\tlabels = np.full(len(points),0)\n\tj = T[0]\n\tcounter = 0\n\tfor i in range(1,k+1):\n\t\tlabels[j:T[i]] = counter\n\t\tj = T[i]\n\t\tcounter += 1\n\n\n\treturn labels,T",
"def a_star_alg(self, p1: int, p2: int, max_level: int = 1000):\r\n \r\n # Create start and end node\r\n start_node = Node(None, p1, self.node_dict[p1])\r\n start_node.g = start_node.h = start_node.f = 0\r\n end_node = Node(None, p2, self.node_dict[p2])\r\n end_node.g = end_node.h = end_node.f = 0\r\n\r\n # Initialize both open and closed list\r\n open_list = []\r\n closed_list = []\r\n\r\n # Add the start node\r\n open_list.append(start_node)\r\n\r\n # Loop until you find the end\r\n level = 0\r\n while len(open_list) > 0 and level < max_level:\r\n level += 1\r\n\r\n # Get the current node (the node in open_list with the lowest cost)\r\n current_node = open_list[0]\r\n current_index = 0\r\n for index, item in enumerate(open_list):\r\n if item.f < current_node.f:\r\n current_node = item\r\n current_index = index\r\n\r\n # Pop current off open list, add to closed list\r\n open_list.pop(current_index)\r\n closed_list.append(current_node)\r\n\r\n # Found the goal\r\n if current_node == end_node:\r\n path = []\r\n distance = current_node.g\r\n current = current_node\r\n while current is not None:\r\n path.append(current.number)\r\n current = current.parent\r\n\r\n return path[::-1], distance # Return reversed path\r\n\r\n # Generate children\r\n children = []\r\n for new_number in self.road_tree[current_node.number]: # Adjacent nodes\r\n new_node = Node(current_node, new_number, self.node_dict[new_number])\r\n children.append(new_node)\r\n\r\n # Loop through children\r\n for child in children:\r\n append_to_open_list = False\r\n\r\n # Create the f, g, and h values\r\n child.g = current_node.g + self.road_dict[(current_node.number, child.number)]\r\n child.h = sqrt((child.x - end_node.x) ** 2 + (child.y - end_node.y) ** 2) / 200\r\n child.f = child.g + child.h\r\n\r\n # Child is already in the closed list\r\n closed_list, append_to_open_list = self.check_in_list(child, closed_list, append_to_open_list)\r\n\r\n # Child is already in the open list\r\n open_list, append_to_open_list = self.check_in_list(child, open_list, append_to_open_list)\r\n\r\n # Add the child to the open list\r\n if append_to_open_list:\r\n open_list.append(child)\r\n\r\n return [], 1e10",
"def quickbb(graph, fast=True):\n\n \"\"\"Given a permutation of the nodes (called an elimination ordering),\n for each node, remove the node and make its neighbors into a clique.\n The maximum degree of the nodes at the time of their elimination is\n the width of the tree decomposition corresponding to that ordering.\n The treewidth of the graph is the minimum over all possible\n permutations.\n \"\"\"\n\n best = Solution() # this gets around the lack of nonlocal in Python 2\n best.count = 0\n\n def bb(graph, order, f, g):\n best.count += 1\n if len(graph) < 2:\n if f < best.ub:\n assert f == g\n best.ub = f\n best.order = list(order) + list(graph)\n\n else:\n vs = []\n for v in graph:\n # very important pruning rule\n if simplicial(graph, v) or almost_simplicial(graph, v) and len(graph[v]) <= lb:\n vs = [v]\n break\n else:\n vs.append(v)\n\n for v in vs:\n graph1 = copy_graph(graph)\n eliminate_node(graph1, v)\n order1 = order + [v]\n # treewidth for current order so far\n g1 = max(g, len(graph[v]))\n # lower bound given where we are\n f1 = max(g, lower_bound(graph1))\n if f1 < best.ub:\n bb(graph1, order1, f1, g1)\n return\n\n graph = {u: set(graph[u]) for u in graph}\n\n order = []\n best.ub, best.order = upper_bound(graph)\n lb = lower_bound(graph)\n\n # This turns on the branch and bound algorithm that\n # gets better treewidth results, but takes a lot\n # longer to process\n if not fast:\n if lb < best.ub:\n bb(graph, order, lb, 0)\n\n # Build the tree decomposition\n tree = defaultdict(set)\n\n def build(order):\n if len(order) < 2:\n bag = frozenset(order)\n tree[bag] = set()\n return\n v = order[0]\n clique = graph[v]\n eliminate_node(graph, v)\n build(order[1:])\n for tv in tree:\n if clique.issubset(tv):\n break\n bag = frozenset(clique | {v})\n tree[bag].add(tv)\n tree[tv].add(bag)\n\n build(best.order)\n return tree",
"def select_leaf(self):\n current = self\n best_child = None\n selected_nodes_R = 0\n while current.isExpanded:\n maxUCT = - float('inf')\n for child in current.children.values():\n UCT = child.compute_uct()\n if UCT > maxUCT:\n maxUCT = UCT\n best_child = child\n\n current = best_child\n selected_nodes_R += current.score\n return current, selected_nodes_R",
"def C(n,k):\n if 0 <= k <= n:\n ntok = 1\n ktok = 1\n for t in xrange(1, min(k, n - k) + 1):\n ntok *= n\n ktok *= t\n n -= 1\n return ntok // ktok\n else:\n return 0",
"def make_set_cover_nr(gRNA_hits, num_sets = 1, target_ids = [], low_coverage_penalty = 0,\n num_lengths_to_track = None, prioritise_3prime = False, optimal_depth = 5,\n suppress_warning = False):\n collapsed_grnas = gRNA_hits.collapse()\n if not target_ids:\n target_ids = set().union(*[set(cg) for cg in collapsed_grnas])\n else:\n target_ids = set(target_ids)\n ## function to regenerate set cover solutions from collapsed_grna object\n collapsed_grnas_original = collapsed_grnas.copy()\n def generate_sc_solutions():\n ## sort in order of smallest set cover size, smallest redundancy, and size of largest set in set cover\n minweight_sc = limited_minweight_SC(collapsed_grnas, num_sets, targets = target_ids,\n low_coverage_penalty = low_coverage_penalty,\n num_lengths_to_track = num_lengths_to_track)\n ## optimal solutions\n max_depth = min(optimal_depth, max(map(len, minweight_sc)))\n max_redundancy = max(map(lambda C:C.redundancy, minweight_sc))/len(target_ids)\n print(max_depth, max_redundancy)\n optimal_sc = limited_optimal_SC(target_ids, collapsed_grnas_original,\n size = max_depth, redundancy = max_redundancy)\n print(\"num unfiltered optimal sc:\", len(optimal_sc))\n ## remove duplicates\n optimal_sc = [C for C in optimal_sc\n if all(map(lambda minweight_C:(len(C) != minweight_C\n and C != minweight_C),\n minweight_sc))]\n print(\"num filtered optimal sc:\", len(optimal_sc))\n return sorted(minweight_sc + optimal_sc,\n key = lambda C:(len(C), C.redundancy, -C.max_coverage))\n sc_solutions = []\n sc_solutions.extend(generate_sc_solutions())\n eliminated_grna = []\n ## function to generate set covers\n def make_set_cover(restore = []):\n ## restore only works if gRNA belonged in the current set cover\n curr_sc = sc_solutions[0]\n for grna in restore:\n curr_sc.add_grna(grna)\n eliminated_grna.remove(grna)\n ## if current set cover solution has at least one CollapsedgRNA with no gRNA left\n while not curr_sc.all_not_empty():\n sink = sc_solutions.pop(0) ## remove set cover solution\n ## generate more possible gRNA sets if no pre-generated set covers are left\n if not sc_solutions:\n collapsed_grnas.remove_grna(*eliminated_grna)\n collapsed_grnas.remove_empty()\n sc_solutions.extend(generate_sc_solutions())\n if not sc_solutions:\n if not suppress_warning:\n print((\"\\nError: The provided gRNA sequences cannot cover all\"\n \" target sequences at least once.\\n\"))\n return []\n ## select next solution\n curr_sc = sc_solutions[0]\n ## consume=True -> remove selected gRNA from CollapsedgRNA\n output = curr_sc.generate_grna_set(prioritise_3prime = prioritise_3prime, consume = True)\n eliminated_grna.extend(output)\n return output\n return make_set_cover",
"def cheapCycling(SList,CList):\n N = len(CList)\n nodes= list(range(N))\n Udict = dict(zip(nodes,CList)) #dictionary nodes:neighbours\n queue = deque() #things to check\n checked = {} #checked nodes to avoid double checking\n stations=np.zeros((N,2))#node:arrival output\n\n min_arr = 1000000 #initialise fat value\n arr_node=-1\n \n min_dep = 1000000\n dep_node=-1\n\n while Udict:\n #node = Udict.pop(next(iter(Udict))) #extract first node\n node = next(iter(Udict)) #select first node\n queue.append(node) #add node to queue\n while queue: #while queue is non empty\n node = queue.popleft() #set/extract node to element of queue\n Udict.pop(node) #make sure is also removed from overarching dict\n for nb in CList[node]: #neighbours of node\n if nb not in checked:\n if SList[nb][0] <min_arr: #check if new minimum\n min_arr = SList[nb][0]\n arr_node = nb\n if SList[nb][1] <min_dep: #same but for departure\n min_dep = SList[nb][1]\n dep_node= nb\n queue.append(nb)\n checked[nb] = 1\n checked[node]=1\n \n stations[list(checked.keys()),0] = arr_node #dropping in the cheapest arr and dep nodes for all nodes in connected part\n stations[list(checked.keys()),1] = dep_node\n checked={} #reset checked to empty for new connected part IMPORTANT!\n min_arr=1000000 #reset minimum values\n min_dep=1000000 #reset min dep values\n\n return stations",
"def gcd1(n1, n2):\n\n best = 1\n\n for i in range(1,n2+1):\n if n1 % i == 0 and n2 % i == 0 and i > best:\n best = i \n \n return best",
"def _fetch(tree, impurity_crit, dataSet, saved):\n\t\tif tree.cut_off is None:\n\t\t\treturn len(dataSet)*impurity_crit(dataSet), 1\n\n\t\telse:\n\t\t\tD1, D2 = DecisionTree._binarySplit(dataSet, *tree.cut_off)\n\t\t\tleft_impurity, left_leaves = DecisionTree._fetch(tree.left, impurity_crit, D1, saved)\n\t\t\tright_impurity, right_leaves = DecisionTree._fetch(tree.right, impurity_crit, D2, saved)\n\n\t\t\t# find node and set\n\t\t\tsaved.setdefault('node',[]).append(tree)\n\t\t\tsaved.setdefault('set', []).append(dataSet)\n\t\t\t# calculate g(t) for current TreeNode\n\t\t\tg = (len(dataSet)*impurity_crit(dataSet)-left_impurity-right_impurity) / \\\n\t\t\t\t(left_leaves + right_leaves - 1)\n\t\t\tsaved.setdefault('G',[]).append(g)\n\t\t\t\n\t\treturn left_impurity+right_impurity, left_leaves+right_leaves",
"def buildTreePandas(rows, res, min_ppl = None, maxDepth=None, scoref=entropy, depth=0):\n minimum_ppl = deepcopy(min_ppl)\n num_ppl = len(rows)\n \n if min_ppl is not None and num_ppl <= min_ppl:\n #Extra protection to stop the recursion\n return decisionNode(results=__uniqueCountsPandas(rows, res)) \n if num_ppl==0: \n return decisionNode( )\n newDepth = depth + 1\n if (maxDepth == 0 or maxDepth) and (newDepth > maxDepth):\n #print \"Hooray I got here.\"\n return decisionNode(results=__uniqueCountsPandas(rows, res))\n current_score=scoref(rows, resCol = res)\n # Set up some variables to track the best criteria\n best_gain=0.0\n best_criteria=None\n best_sets=None\n \n featColumns=rows.columns.tolist()\n featColumns.remove(res)\n for col in featColumns:\n # Generate the list of different values in\n # this column\n column_values=rows.loc[:,col].unique()\n # Now try dividing the rows up for each value\n # in this column\n copy = rows.sort(columns = col)\n for value in column_values:\n (set1,set2)=__dividePandas(copy,col,value)\n # Information gain\n p=float(len(set1))/len(rows)\n gain=current_score-p*scoref(set1, resCol = res)-(1-p)*scoref(set2, resCol = res)\n size_min = 0 if minimum_ppl is None else minimum_ppl - 1\n if gain>best_gain and len(set1)>size_min and len(set2)>size_min:\n best_gain=gain\n best_criteria=(col,value)\n best_sets=(set1,set2)\n # Create the subbranches\n if best_gain>0:\n trueBranch=buildTreePandas(best_sets[0], res, min_ppl = minimum_ppl, maxDepth = maxDepth, depth=newDepth)\n falseBranch=buildTreePandas(best_sets[1], res, min_ppl = minimum_ppl, maxDepth = maxDepth, depth=newDepth)\n return decisionNode(col=best_criteria[0],value=best_criteria[1],\n tb=trueBranch,fb=falseBranch)\n else:\n return decisionNode(results=__uniqueCountsPandas(rows, res))",
"def select(self):\n best_qsa_star_add = -99999\n best_node = None\n for a, c in self.children.items():\n qsa = c.wins / c.visits\n if c.visits_amaf == 0:\n qsa_tilde = 0\n else:\n qsa_tilde = c.wins_amaf / c.visits_amaf\n bsa = sqrt(self.k / (self.visits + self.k))\n qsa_star = (1 - bsa) * qsa + bsa * qsa_tilde\n qsa_star_add = qsa_star + 0.2 * self.c * sqrt(log(self.visits) / c.visits)\n if qsa_star_add > best_qsa_star_add:\n best_qsa_star_add = qsa_star_add\n best_node = c\n return best_node",
"def question4(T,r,n1,n2):\n\n\tif(len(T)<=1):\t\t\t\t\t\t\t\t# Edge case : If the Tree only consists of a root and no children\n\t\treturn -1\n\n\tif(n1==None or n2==None):\t\t\t\t\t# Edge case : If n1 and n2 are not actually numbers\n\t\treturn -1\n\n\tlen_T = len(T)\n\tif(not n1 < len_T or not n2 < len_T):\t\t# Edge case : If the nodes gives in parameters do not actually exist in the tree\n\t\treturn -1\n\n\tn1_list = []\t\t\t\t\t\t\n\tn2_list = []\n\n\tfor i in range(len(T)):\t\t\t\t\t\t# Traverse the list and append all the parents of node1 if found in O(N)\n\t\tif T[i][n1]==1:\n\t\t\tn1_list.append(i)\n\n\tfor i in range(len(T)):\t\t\t\t\t\t# Traverse the list and append all the parents of node2 is found in O(N)\n\t\tif T[i][n2]:\n\t\t\tn2_list.append(i)\n\n\t\t\t\t\t\t\t\t\t\t\t\t# The root is a common ancestor of every node in the tree\n\tif not r in n1_list:\t\t\t\t\t\t# check if the root is in the list, if not, add it\n\t\tn1_list.append(r)\n\n\tif not r in n2_list:\t\t\t\t\t\t# check if the root is in the list, if not, add it\n\t\tn2_list.append(r)\n\n\tn1_list = reversed(n1_list)\t\t\t\t\t# Since we are operating on a binary tree, we sort\n\tfor i in n1_list:\t\t\t\t\t\t\t# in decending order to operate on the latest nodes\n\t\tif i in n2_list:\t\t\t\t\t\t# if a match is found, we know that it is the lowest common ancestor\n\t\t\treturn i \t\t\t\t\t\t\t# If nothing is found, the root node is bound to be returned. And it correct.",
"def test_cover():\n # D corresponds to the items and the transactions in which they appear, it is the standard code table\n D = {\n \"B\": Bitmap([0, 1, 2]),\n \"A\": Bitmap([0, 1]),\n \"C\": Bitmap([0, 2]),\n \"D\": Bitmap([2])\n }\n # ct corresponds to the itemsets on which we want to calculate the cover\n ct = [\n frozenset(\"ABC\"),\n frozenset(\"AB\"),\n frozenset(\"BC\"),\n frozenset(\"A\"),\n frozenset(\"B\"),\n frozenset(\"C\"),\n frozenset(\"D\")\n ]\n CTc = cover(D, ct)\n\n assert CTc[frozenset(\"ABC\")] == Bitmap([0])\n assert CTc[frozenset(\"AB\")] == Bitmap([1]) # AB appears only in tid_1 for usage because ABC is placed before in ct\n # so the AB of the first transaction has been covered by ABC\n assert CTc[frozenset(\"BC\")] == Bitmap([2])\n assert CTc[frozenset(\"A\")] == Bitmap()\n assert CTc[frozenset(\"B\")] == Bitmap()\n assert CTc[frozenset(\"C\")] == Bitmap()\n assert CTc[frozenset(\"D\")] == Bitmap([2])",
"def bk(g,r,p,x, depth=0):\n # if p and x are empty:\n if not p and not x:\n print('Maximal Clique found: ', r)\n\n while p:\n # choose and remove a node from p\n node = p.pop()\n neighbors = list(g.neighbors(node))\n bk(g, r.union([node]), p.intersection(neighbors), x.intersection(neighbors), depth=depth+1)\n x = x.union([node])",
"def fetchNodes(tree):\n if tree.results is None: #Check if the node is a branch\n condItems = {} #Initialize a container for the node conditions from lower branches\n v = [\"true\", \"false\"] #\"Veracity values\"\n for branch, veracity in [(tree.tb, v[0]), (tree.fb, v[1])]: #iterate over this node's true and false child nodes\n lower_results = fetchNodes(branch)\n if len(lower_results) == 1: #Check if child node is actually a leaf. If so,\n lower_results.insert(0, (tree.col, tree.value, veracity))\n condItems[veracity] = [lower_results] #Initialize the condition needed to reach that leaf\n else:\n condItems[veracity] = [] #If the child is not a leaf, initialize an empty list to contain its updated conditions\n for item in lower_results: #Iterate over each set of node conditions that stem from this branch\n new_descriptor = deepcopy(item) #make a deep copy of the list of node conditions from the lower level nodes\n #insert this node's condition at the beginning of each of the node conditions from the lower levels\n new_descriptor.insert(0, (tree.col, tree.value, veracity)) \n condItems[veracity].append(new_descriptor) #append the updated set of node conditions to the branches items\n node_conditions = deepcopy(condItems[v[0]]) #Initialize the complete list of node conditions that stem from this node\n node_conditions.extend(deepcopy(condItems[v[1]])) #Add the node conditions from the second branch of this node\n return node_conditions #Send the full set of node conditions from this node up to the higher nodes.\n else: #If the node is a leaf, return the dictionary of results\n return [tree.results]",
"def build_knn(coords, k=6, **kwargs):\n \n tree = BallTree(coords, **kwargs)\n _, ind = tree.query(coords, k=k+1) # the first k is \"oneself\"\n pairs = pairs_from_knn(ind)\n return pairs",
"def exact_min_vertex_cover(graph):\n for N in range(1,len(graph.nodes())+1):\n for graph_sub in it.combinations(sorted(graph.nodes(), reverse=True), N):\n graph_temp = graph.copy()\n graph_temp.remove_nodes_from(graph_sub)\n if len(graph_temp.edges()) == 0:\n return list(graph_sub)",
"def get_initial_candidates(self, comment_tree):\n initial_candidates = comment_tree.tree.get(None, [])\n if initial_candidates:\n offset_depth = min(comment_tree.depth[comment_id]\n for comment_id in initial_candidates)\n else:\n offset_depth = 0\n return initial_candidates, offset_depth",
"def LCA(node1,node2,root):\n counter_node = root\n l = node1.key\n h = node2.key\n while (not counter_node.key >= l) or (not counter_node.key <= h):\n if counter_node.key < l:\n counter_node = counter_node.right\n else:\n counter_node = counter_node.left\n return counter_node",
"def _recursive_cutting(g, p, res=[]):\n k = math.ceil(len(g.nodes()) / p)\n g_l, g_r = kernighan_lin_bisection(g, weight=\"rate\")\n\n for partition in g_l, g_r:\n if len(partition) > k:\n _recursive_cutting(g.subgraph(partition), p / 2, res)\n else:\n res.append(partition)\n\n return res",
"def connected((e,r)):\n \n # Deal with the middle case so we don't divide by zero\n if r==0: return [(1,1),(2,1),(3,1),(4,1),(5,1),(0,1)]\n # If the input is impossible, return nothing to prune the branch (shouldn't\n # happen)\n if e>=6*r: return []\n connected=[]\n mult=e//r\n rem=e % r\n #Going sideways\n toAdd=((6*r-1,r) if e==0 else (e-1,r))\n connected.append(toAdd)\n toAdd=((0,r) if e==6*r-1 else (e+1,r))\n connected.append(toAdd)\n #Going inward\n toAdd=( (0,r-1)if mult==5 and rem==r-1 else (mult*(r-1)+rem,r-1) )\n connected.append(toAdd)\n if rem!=0:\n connected.append((mult*(r-1)+rem-1,r-1))\n\n #Going outward\n if r<nLayers-1:\n connected.append((mult*(r+1)+rem,r+1))\n connected.append((mult*(r+1)+rem+1,r+1))\n if rem==0: # only case where negatives could result\n if mult>0: connected.append( (mult*(r+1)-1,r+1))\n else: connected.append( (6*(r+1)-1,r+1))\n \n return connected",
"def bipartite_vertex_cover(bigraph, algo=\"Hopcroft-Karp\"):\n if algo == \"Hopcroft-Karp\":\n coord = [(irow,icol) for irow,cols in enumerate(bigraph) for icol in cols]\n coord = np.array(coord)\n graph = csr_matrix((np.ones(coord.shape[0]),(coord[:,0],coord[:,1])))\n matchV = maximum_bipartite_matching(graph, perm_type='row')\n matchV = [None if x==-1 else x for x in matchV]\n nU, nV = graph.shape\n assert len(matchV) == nV\n elif algo == \"Hungarian\":\n matchV = max_bipartite_matching2(bigraph)\n nU, nV = len(bigraph), len(matchV)\n else:\n assert False\n\n matchU = [None] * nU\n \n for v in range(nV): # -- build the mapping from U to V\n if matchV[v] is not None:\n matchU[matchV[v]] = v\n \n def old_konig():\n visitU = [False] * nU # -- build max alternating forest\n visitV = [False] * nV\n for u in range(nU):\n if matchU[u] is None: # -- starting with free vertices in U\n _alternate(u, bigraph, visitU, visitV, matchV)\n inverse = [not b for b in visitU]\n return (inverse, visitV)\n \n def new_konig():\n # solve the limitation of huge number of recursive calls\n visitU = [False] * nU # -- build max alternating forest\n visitV = [False] * nV\n wait_u = set(range(nU)) - set(matchV) \n while len(wait_u) > 0:\n u = wait_u.pop()\n visitU[u] = True\n for v in bigraph[u]:\n if not visitV[v]:\n visitV[v] = True\n assert matchV[v] is not None # otherwise match is not maximum\n assert matchV[v] not in wait_u\n wait_u.add(matchV[v])\n inverse = [not b for b in visitU]\n return (inverse, visitV)\n \n #res_old = old_konig()\n res_new = new_konig()\n #assert res_old == res_new\n return res_new",
"def dealing_covers(high_series,low_series):\n #dealing k\n #initialize\n pre_calculated=0\n rates_total=len(high_series)\n valid_high=high_series.copy()\n valid_low=low_series.copy()\n valid_k_line_mark=np.zeros(rates_total)\n \"\"\"\n start point use up contains dealing\n \"\"\"\n start_index=0\n pre_high=valid_high[start_index]\n pre_low=valid_low[start_index]\n pre_idx=start_index\n cur_idx=pre_idx\n is_found=False\n while(not is_found):\n cur_idx=cur_idx+1\n cur_high=valid_high[cur_idx]\n cur_low=valid_low[cur_idx]\n if (cur_high>pre_high and cur_low>pre_low)or (cur_high<pre_high and cur_low<pre_low):\n is_found=True\n valid_high[pre_idx]=pre_high\n valid_low[pre_idx]=pre_low\n valid_k_line_mark[pre_idx]=1\n else:\n if pre_high>cur_high:\n # first k cover second k\n pre_low=cur_low\n elif pre_high<cur_high:\n\n # first k be convered by second k\n pre_high=cur_high\n pre_idx=cur_idx\n else:\n # high value is equal\n pre_low=cur_low\n pre_idx=cur_idx\n\n # no start point dealing\n begin_idx=cur_idx+1\n for i in range(begin_idx,rates_total):\n post_high=valid_high[i]\n post_low=valid_low[i]\n post_idx=i\n #first classification: no contains\n if (cur_high>post_high and cur_low>post_low)or (cur_high<post_high and cur_low<post_low):\n valid_high[cur_idx]=cur_high\n valid_low[cur_idx]=cur_low\n valid_k_line_mark[cur_idx]=1\n pre_high=cur_high\n pre_low=cur_low\n pre_idx=cur_idx\n cur_high=post_high\n cur_low=post_low\n cur_idx=post_idx\n else:\n if pre_high<cur_high:#up contains\n if cur_high>post_high:\n #post be coverd by cur\n cur_low=post_low\n elif cur_high<post_high:\n #post cover cur\n cur_high=post_high\n cur_idx=post_idx\n else:\n #high be equal\n if cur_low>post_low:\n #cur be covered by post\n cur_idx=post_idx\n else:\n #cur covers post\n cur_low=post_low\n else:#down contains\n if cur_low>post_low:\n #cur be covered by post\n cur_low=post_low\n cur_idx=post_idx\n elif cur_low<post_low:\n #cur covers post\n cur_high=post_high\n else:\n # two low is equal\n if cur_high>post_high:\n cur_high=post_high\n else:\n cur_idx=post_idx\n cur_high=post_high#I think the words can be deleted\n return valid_k_line_mark,valid_high,valid_low",
"def coalesce_within_root(self, host_node):\n height = host_node.height\n while len(self.extant_p) > 1 and len(self.choices) >= 1:\n if self.verbose:\n print self\n self.get_pairs()\n if len(self.choices) == 0:\n #\n return\n height += random.expovariate(len(self.choices)*self.c_rate)\n cpaths = random.choice(self.choices.keys())\n self.coalesce_paths(cpaths, t0=height)\n if self.verbose:\n print self"
] | [
"0.58355105",
"0.5734315",
"0.5723906",
"0.56961703",
"0.55900025",
"0.55862707",
"0.5409351",
"0.53967494",
"0.53904384",
"0.53811276",
"0.5372247",
"0.53711116",
"0.536589",
"0.5354127",
"0.53164554",
"0.52926135",
"0.529009",
"0.5275919",
"0.5232045",
"0.52311265",
"0.5228785",
"0.52283573",
"0.5216952",
"0.5215996",
"0.5214411",
"0.5210189",
"0.52007204",
"0.51993954",
"0.51987153",
"0.5179737"
] | 0.7809844 | 0 |
given 2d image, lidar and camera as well as the current scan message, localizes the pixel against the lidar data | def localize_pixel(img_pos,camera : Camera,lidar : Lidar, scan : LaserScan) -> tuple:
# ---OBJ--
# x r1 /\ r2 x
# / \
#cam_ray / \ average_ray
# / \
# / \
# CAM ----> LID
#
# has to be 2d
assert (img_pos.size == 2)
cam_ray = camera.get_ray_through_image(img_pos)
cam_ray_robot = camera.get_ray_in_robot_frame(cam_ray)
cam_ray_lidar = lidar.get_ray_in_lidar_frame(cam_ray_robot)
# flatten camera ray
cam_ray_lidar_flat = lidar.get_ray_projection(cam_ray_lidar)
# figure out which lidar rays correspond to the camera ray
(ray1,ray2) = lidar.get_corresponding_lidar_rays(cam_ray_lidar_flat,scan)
# if no rays found corresponding to scan data
if ray1 is None or ray2 is None:
return (None,None)
# get the normal to the lidar hit
intersection_normal = lidar.get_normal_to_plane(ray1,ray2)
# get the distance data in horizontal plane, from lidar to object
lidar_to_target_length = lidar.get_camera_ray_length(cam_ray_lidar_flat,ray1,ray2)
# get the vector from camera to lidar (flattened to lidar plane)
# i.e. origin of lidar frame in camera frame
lidar_to_cam_vec = cam_ray_lidar_flat.origin
cam_to_lidar_flat = Ray(lidar_to_cam_vec,-lidar_to_cam_vec,np.linalg.norm(lidar_to_cam_vec))
# now workout the lidar to object ray, i.e. interpolate between ray1's and ray2's tips
lidar_to_object_flat = interpolated_ray(ray1,ray2,0.5,lidar_to_target_length)
# now finally workout the vector from camera to object (flattened)
# this lets us access the true z-distance in the camera
cam_to_object_flat = lidar_to_object_flat.get_vec() + cam_to_lidar_flat.get_vec()
cam_to_object_flat_length = np.linalg.norm(cam_to_object_flat)
# angle from horizontal on camera ray
cam_ray_theta = angle_between(cam_ray_lidar.get_vec(),cam_to_object_flat)
# length of original camera ray (knowing the length of its projection)
# will fail if ray is pointing straight up or down
cam_ray_robot.length = cam_to_object_flat_length / math.cos(cam_ray_theta)
object_robot = cam_ray_robot.get_vec()+cam_ray_robot.origin
return (object_robot,intersection_normal) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def localize(image):\n\n # Call the vision function in order to have the grid with the obstacle and the goal coordinate\n object_grid, occupancy_grid, world = vision(image)\n\n # Correction of the goal coordinate in order to fit the A* coordinate\n goal_x = object_grid[0][1]\n goal_y = WIDTH_G - object_grid[0][0]\n goal_coor = (goal_x, goal_y)\n\n return occupancy_grid, goal_coor",
"def LKTrackerImageToImage(imageOld, pixelCoordsOld, imageNew,\n pixelCoordsNew, windowSize):\n # imageOld = cv2.cvtColor(imageOld, cv2.COLOR_BGR2GRAY)\n # imageNew = cv2.cvtColor(imageNew, cv2.COLOR_BGR2GRAY)\n\n # Get top left corner of window.\n\n topLeftX1, topLeftY1 = pixelCoordsOld - windowSize // 2\n topLeftX2, topLeftY2 = pixelCoordsNew - windowSize // 2\n\n # Compute horizontal and vertical gradients for the original frame.\n gx = utils.pixelDiffImages(imageOld,\n topLeftX1,\n topLeftY1,\n imageOld,\n topLeftX1,\n topLeftY1 - 1,\n windowSize,\n windowSize)\n \n gy = utils.pixelDiffImages(imageOld,\n topLeftX1,\n topLeftY1,\n imageOld,\n topLeftX1 - 1,\n topLeftY1,\n windowSize,\n windowSize)\n \n # Compute difference between original and new frames.\n diff = utils.pixelDiffImages(imageOld,\n topLeftX1,\n topLeftY1,\n imageNew,\n topLeftX2,\n topLeftY2,\n windowSize,\n windowSize)\n\n # Compute components of Harris matrix.\n Ixx = gx ** 2\n Iyy = gy ** 2\n Ixy = gx * gy\n\n # Compute Gaussian kernel for weighting pixels in the window.\n gkern = np.outer(signal.gaussian(windowSize, 2.5),\n signal.gaussian(windowSize, 2.5))\n\n # Construct matrices and solve the matrix-vector equation to get the\n # movement of the pixel.\n Z = np.array([[np.sum(Ixx * gkern), np.sum(Ixy * gkern)],\n [np.sum(Ixy * gkern), np.sum(Iyy * gkern)]])\n b = np.array([np.sum(diff * gx * gkern), np.sum(diff * gy * gkern)])\n d = np.linalg.solve(Z, b)\n\n # Compute new position of pixel\n return pixelCoordsNew + d[: : -1]",
"def main():\n cam = Realsense()\n # cam.access_intr_and_extr()\n profile = cam.pipeline.start(cam.config)\n depth_sensor = profile.get_device().first_depth_sensor()\n depth_scale = depth_sensor.get_depth_scale()\n align_to = rs.stream.color\n align = rs.align(align_to)\n\n objp = np.zeros((3*4,3), np.float32)\n objp[:,:2] = np.mgrid[0:4,0:3].T.reshape(-1,2)\n axis = np.float32([[1,0,0], [0,1,0], [0,0,-1]]).reshape(-1,3)\n # print(objp)\n\n try:\n while (True):\n # detect ArUco markers in RGB images\n frames = cam.pipeline.wait_for_frames()\n aligned_frames = align.process(frames)\n color_frame = aligned_frames.get_color_frame()\n color_image = np.asanyarray(color_frame.get_data()) \n frame = color_image\n font = cv2.FONT_HERSHEY_SIMPLEX\n corners, ids, rvecs, tvecs = cam.detect_markers_realsense(frame)\n \n if np.all(ids != None): # if markers are detected\n for i in range(0, ids.size):\n aruco.drawAxis(frame, cam.newcameramtx, cam.dist, rvecs[i],\n tvecs[i], 0.1) # Draw axis\n aruco.drawDetectedMarkers(frame, corners) # draw square around markers\n\n ###### DRAW ID #####\n strg = ''\n for i in range(0, ids.size):\n strg += str(ids[i][0])+', '\n\n cv2.putText(frame, \"Id: \" + strg, (0,25), font, 1, (0,255,0), 2,\n cv2.LINE_AA)\n\n\t ###### Output marker positions in camera frame ######\n \t # output tvec\n y0 = 60\n dy = 40\n for i in range(0, ids.size):\n y = y0 + i*dy\n cv2.putText(frame, str(tvecs[i][0]), (0, y), font, 1, (0,255,0),\n 2, cv2.LINE_AA)\n\n else:\n ##### DRAW \"NO IDS\" #####\n cv2.putText(frame, \"No Ids\", (0,64), font, 1, (0,255,0), 2,\n cv2.LINE_AA)\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n ret, corners = cv2.findChessboardCorners(gray, (4,3), None)\n if ret == True:\n corners2 = cv2.cornerSubPix(gray, corners,(11,11), (-1,-1),\n cam.criteria)\n corners2 = corners2[::-1]\n # print(corners2)\n # print(objp)\n frame = cv2.drawChessboardCorners(frame, (4,3), corners2, ret)\n # Find the rotation and translation vectors.\n _, rvecs, tvecs = cv2.solvePnP(objp, corners2, cam.newcameramtx,\n cam.dist)\n rot, _ = cv2.Rodrigues(rvecs)\n # print(rot)\n # project 3D points to image plane\n imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs,\n cam.newcameramtx, cam.dist)\n frame = draw(frame, corners2, imgpts)\n\n # Display the resulting frame\n cv2.imshow('frame',frame)\n cv2.waitKey(5)\n\n # When everything done, release the capture\n cv2.destroyAllWindows()\n\n finally:\n cam.pipeline.stop()",
"def process_scan(self, msg):\n if len(msg.ranges) <= 330:\n # throw out scans that don't have more than 90% of the data\n return\n # get pose according to the odometry\n p = PoseStamped(header=Header(stamp=msg.header.stamp, frame_id=\"base_link\"), pose=Pose())\n self.odom_pose = self.tf_listener.transformPose(\"odom\", p)\n self.base_pose = self.tf_listener.transformPose(\"base_laser_link\", p)\n # convert the odom pose to the tuple (x,y,theta)\n self.odom_pose = OccupancyGridMapper.convert_pose_to_xy_and_theta(self.odom_pose.pose)\n #(-0.0069918, 0.000338577, 0.048387097)\n #(1.0208817, 0.04827240, 0.048387)\n self.base_pose = OccupancyGridMapper.convert_pose_to_xy_and_theta(self.base_pose.pose)\n for i in range(len(msg.ranges)):\n if 0.0 < msg.ranges[i] < 5.0: #for any reding within 5 meters\n #Using the pose and the measurement nd the angle, find it in the world\n map_x = self.odom_pose[0] + msg.ranges[i] * cos(i * pi / 180.0 + self.odom_pose[2])\n map_y = self.odom_pose[1] + msg.ranges[i] * -sin(i * pi / 180.0 + self.odom_pose[2])\n\n #Relate that map measure with a place in the picture\n x_detect = int((map_x - self.origin[0]) / self.resolution)\n y_detect = int((map_y - self.origin[1]) / self.resolution)\n\n\n #Determine how to mark the location in the map, along with the stuff inbetween\n u = (map_x - self.odom_pose[0], map_y - self.odom_pose[1])\n magnitude = sqrt(u[0] ** 2 + u[1] ** 2)\n n_steps = max([1, int(ceil(magnitude / self.resolution))])\n u_step = (u[0] / (n_steps - 1), u[1] / (n_steps - 1))\n marked = set()\n for i in range(n_steps):\n curr_x = self.odom_pose[0] + i * u_step[0]\n curr_y = self.odom_pose[1] + i * u_step[1]\n if not (self.is_in_map(curr_x, curr_y)):\n break\n\n x_ind = int((curr_x - self.origin[0]) / self.resolution)\n y_ind = int((curr_y - self.origin[1]) / self.resolution)\n if x_ind == x_detect and y_ind == y_detect:\n break\n if not ((x_ind, y_ind) in marked):\n # odds ratio is updated according to the inverse sensor model\n self.odds_ratios[x_ind, y_ind] *= self.p_occ / (1 - self.p_occ) * self.odds_ratio_miss\n marked.add((x_ind, y_ind))\n if self.is_in_map(map_x, map_y):\n # odds ratio is updated according to the inverse sensor model\n self.odds_ratios[x_detect, y_detect] *= self.p_occ / (1 - self.p_occ) * self.odds_ratio_hit\n\n self.seq += 1\n # to save time, only publish the map every 10 scans that we process\n if self.seq % 10 == 0:\n # make occupancy grid\n map = OccupancyGrid()\n map.header.seq = self.seq\n self.seq += 1\n map.header.stamp = msg.header.stamp\n map.header.frame_id = \"map\" # the name of the coordinate frame of the map\n map.info.origin.position.x = self.origin[0]\n map.info.origin.position.y = self.origin[1]\n map.info.width = self.n\n map.info.height = self.n\n map.info.resolution = self.resolution\n map.data = [0] * self.n ** 2 # map.data stores the n by n grid in row-major order\n for i in range(self.n):\n for j in range(self.n):\n idx = i + self.n * j # this implements row major order\n if self.odds_ratios[i, j] < 1 / 5.0: # consider a cell free if odds ratio is low enough\n map.data[idx] = 0\n elif self.odds_ratios[i, j] > 5.0: # consider a cell occupied if odds ratio is high enough\n map.data[idx] = 100\n else: # otherwise cell is unknown\n map.data[idx] = -1\n self.pub.publish(map)\n\n # create the image from the probabilities so we can visualize using opencv\n im = np.zeros((self.odds_ratios.shape[0], self.odds_ratios.shape[1], 3))\n for i in range(im.shape[0]):\n for j in range(im.shape[1]):\n if self.odds_ratios[i, j] < 1 / 5.0:\n im[i, j, :] = 1.0\n elif self.odds_ratios[i, j] > 5.0:\n im[i, j, :] = 0.0\n else:\n im[i, j, :] = 0.5\n\n # compute the index of the odometry pose so we can mark it with a circle\n x_odom_index = int((self.odom_pose[0] - self.origin[0]) / self.resolution)\n y_odom_index = int((self.odom_pose[1] - self.origin[1]) / self.resolution)\n\n x_base_index = int((self.base_pose[0] - self.origin[0] - 1) / self.resolution)\n y_base_index = int((self.base_pose[1] - self.origin[1]) / self.resolution)\n\n\n # computer the ball locations so we can mark with a colored circle\n #TODO Track and relate the robot's angle pose for accuracy\n\n if self.depth_red > 0:\n self.y_camera_red = int(x_odom_index - self.depth_red * cos(self.angle_diff_red + pi - self.odom_pose[2])/self.resolution)\n self.x_camera_red = int(y_odom_index - self.depth_red * sin(self.angle_diff_red + pi - self.odom_pose[2])/self.resolution)\n cv2.circle(im, (self.x_camera_red, self.y_camera_red), 1, self.red)\n\n real_red_y = self.depth_red * cos(self.angle_diff_red + pi - self.odom_pose[2])\n real_red_x = self.depth_red * sin(self.angle_diff_red + pi - self.odom_pose[2])\n\n self.rcoor_pub.publish(Vector3(-real_red_x, -real_red_y/2, 0))\n else:\n cv2.circle(im, (self.x_camera_red, self.y_camera_red), 1, self.red)\n\n if self.depth_blue > 0:\n self.y_camera_blue = int(x_odom_index - self.depth_blue * cos(self.angle_diff_blue + pi - self.odom_pose[2])/self.resolution)\n self.x_camera_blue = int(y_odom_index - self.depth_blue * sin(self.angle_diff_blue + pi - self.odom_pose[2])/self.resolution)\n cv2.circle(im, (self.x_camera_blue, self.y_camera_blue), 1, self.blue)\n\n real_blue_y = self.depth_blue * cos(self.angle_diff_blue + pi - self.odom_pose[2])\n real_blue_x = self.depth_blue * sin(self.angle_diff_blue + pi - self.odom_pose[2])\n\n self.bcoor_pub.publish(Vector3(-real_blue_x, -real_blue_y/2, 0))\n else:\n cv2.circle(im, (self.x_camera_blue, self.y_camera_blue), 1, self.blue)\n\n if self.depth_green > 0:\n self.y_camera_green = int(x_odom_index - self.depth_green * cos(self.angle_diff_green + pi - self.odom_pose[2])/self.resolution)\n self.x_camera_green = int(y_odom_index - self.depth_green * sin(self.angle_diff_green + pi - self.odom_pose[2])/self.resolution)\n cv2.circle(im, (self.x_camera_green, self.y_camera_green), 1, self.green)\n \n real_green_y = self.depth_green * cos(self.angle_diff_green + pi - self.odom_pose[2])\n real_green_x = self.depth_green * sin(self.angle_diff_green + pi - self.odom_pose[2])\n\n self.gcoor_pub.publish(Vector3(-real_green_x, -real_green_y/2, 0))\n\n if self.depth_yellow > 0:\n self.y_camera_yellow = int(x_odom_index - self.depth_yellow * cos(self.angle_diff_yellow + pi - self.odom_pose[2])/self.resolution)\n self.x_camera_yellow = int(y_odom_index - self.depth_yellow * sin(self.angle_diff_yellow + pi - self.odom_pose[2])/self.resolution)\n cv2.circle(im, (self.x_camera_yellow, self.y_camera_yellow), 1, self.yellow)\n \n real_yellow_y = self.depth_yellow * cos(self.angle_diff_yellow + pi - self.odom_pose[2])\n real_yellow_x = self.depth_yellow * sin(self.angle_diff_yellow + pi - self.odom_pose[2])\n\n self.ycoor_pub.publish(Vector3(-real_yellow_x, -real_yellow_y/2, 0))\n else:\n cv2.circle(im, (self.x_camera_yellow, self.y_camera_yellow), 1, self.yellow)\n\n # draw the robot\n cv2.circle(im, (y_odom_index, x_odom_index), 2, (255, 0, 0))\n \n # display the image resized\n cv2.imshow(\"map\", cv2.resize(im, (500, 500)))\n cv2.waitKey(20)",
"def on_image_update(self, message_data):\n # Get the image\n try:\n # The image should be already encoded as rgb8, we pass through to avoid costly recomputing\n image_array = self.bridge.compressed_imgmsg_to_cv2(message_data, desired_encoding=\"passthrough\")\n image_array = cv2.rotate(image_array, cv2.ROTATE_90_CLOCKWISE)\n image_array = cv2.cvtColor(image_array, cv2.COLOR_BGR2RGB)\n image_array_gray = cv2.cvtColor(image_array, cv2.COLOR_RGB2GRAY)\n corners, ids, rejectedImgPoints = aruco.detectMarkers(image_array_gray, self.aruco_dict, parameters=self.aruco_parameters)\n self.corners = corners\n # For some reason the cv2 transformation rotates the image, haven't figured out why yet\n self.last_image = aruco.drawDetectedMarkers(image_array, corners)\n except CvBridgeError as err:\n print err\n\n # Calculate the frame rate\n self.image_counter += 1\n now = time.time()\n frame_duration = now - self.last_time\n framerate = 1./frame_duration\n # Calculate the average frame rate from the latest update\n self.average_framerate = self.average_framerate + float(framerate - self.average_framerate)/(self.image_counter + 1)\n # End of this frame\n self.last_time = now",
"def find_components_old(image,deltaPix,lens_rad_arcsec = 5.0,lens_rad_ratio = None, gal_rad_ratio = 0.1,min_size_arcsec=0.3,thresh=0.4, show_locations=False):\n\n # convert minimum component size in pixel units\n min_size = int(min_size_arcsec / deltaPix)\n \n #Convert lens radius and central galaxy radius to pixels\n if lens_rad_ratio == None:\n lens_rad = int(lens_rad_arcsec / deltaPix)\n else: lens_rad = int(len(image) * lens_rad_ratio)\n gal_rad = int(len(image) * gal_rad_ratio)\n \n # downscale source image to data resolution (for speed + easier for converting to data units)\n #down = image_util.re_size(image, factor=supersampling_factor_source)\n \n # apply laplacian of gaussian (LoG) filter to enhance maxima\n filtered = - gaussian_laplace(deepcopy(image), sigma = min_size, mode='constant', cval=0.)\n \n# print(filtered.min(),filtered.max(),filtered.min() + thresh * np.abs(filtered.min()))\n \n \n # assume all value below max*threshold can not be maxima, so put all to zero\n# filtered[filtered < thresh*filtered.max()] = 0.\n \n# assume all value below min*threshold can not be maxima, so put all to zero\n filtered[filtered < filtered.min() + thresh * np.abs(filtered.min())] = 0.\n \n if show_locations:\n plt.figure(figsize = (8,8))\n plt.subplot(1,2,1)\n plt.imshow(image, origin='lower', norm=SymLogNorm(5))\n plt.title('Image')\n\n plt.subplot(1,2,2)\n plt.imshow(filtered, origin='lower', norm=SymLogNorm(5))\n plt.title('Filtered Image')\n plt.show()\n \n # find coordinates of local maxima\n #print(int(0.5 * min_size))\n max_idx_2d_small = peak_local_max(filtered, min_distance=0)\n max_idx_2d_large = peak_local_max(filtered, min_distance=1)\n \n x_list_small, y_list_small = max_idx_2d_small[:, 1], max_idx_2d_small[:, 0]\n x_list_large, y_list_large = max_idx_2d_large[:, 1], max_idx_2d_large[:, 0]\n \n im_center_x, im_center_y = len(image) / 2., len(image) / 2.\n \n R = np.sqrt((x_list_large - im_center_x)**2 + (y_list_large - im_center_y)**2)\n new_center_x, new_center_y = x_list_large[R < gal_rad], y_list_large[R < gal_rad]\n \n if (len(new_center_x) > 1) and (len(x_list_large[R == R.min()]) ==1 ): \n new_center_x, new_center_y = x_list_large[R == R.min()], y_list_large[R == R.min()]\n elif (len(new_center_x) > 1) and (len(x_list_large[R == R.min()]) > 1 ): \n new_center_x, new_center_y = im_center_x, im_center_y\n elif len(new_center_x) == 0: \n new_center_x, new_center_y = im_center_x, im_center_y\n \n \n R_small = np.sqrt((x_list_small - new_center_x)**2 + (y_list_small - new_center_y)**2)\n R_large = np.sqrt((x_list_large - new_center_x)**2 + (y_list_large - new_center_y)**2)\n \n x_sats, y_sats = x_list_small[R_small > lens_rad], y_list_small[R_small > lens_rad]\n \n # show maxima on image for debug\n if show_locations:\n fig = plt.figure(figsize=(4, 4))\n #plt.imshow(image, origin='lower', cmap=cmap_flux, norm=LogNorm(1e-2))\n plt.imshow(image, origin='lower', norm=SymLogNorm(5))\n \n for i in range(len(x_sats)):\n plt.scatter([x_sats[i]], [y_sats[i]], c='red', s=60, marker='+')\n# plt.annotate(i+1, (x_list[i], y_list[i]), color='black')\n \n# for i in range(len(x_mask)):\n# plt.scatter([x_mask[i]], [y_mask[i]], c='red', s=100, marker='*')\n# plt.annotate(i+1, (x_mask[i], y_mask[i]), color='red')\n plt.scatter(new_center_x, new_center_y,c='red', s=100, marker='*')\n \n draw_lens_circle = Circle((new_center_x, new_center_y),lens_rad ,fill=False)\n draw_gal_circle = Circle((new_center_x, new_center_y),gal_rad, fill = False)\n plt.gcf().gca().add_artist(draw_lens_circle)\n plt.gcf().gca().add_artist(draw_gal_circle)\n plt.title('Detected Components')\n plt.text(1, 1, \"detected components\", color='red')\n fig.axes[0].get_xaxis().set_visible(True); fig.axes[0].get_yaxis().set_visible(True)\n plt.show()\n return (x_sats, y_sats), (new_center_x, new_center_y)",
"def lidar_callback(self, lidar_data):\n\n self.raw_data = np.array(lidar_data.ranges)\n f = self.r_front\n l = self.r_left\n r = self.r_right\n\n # Distance Detection\n self.lidar[FRONT] = min(\n min(min(self.raw_data[f[0][0]:f[0][1]]), min(self.raw_data[f[1][0]:f[1][1]])), 10) # front 36 degrees\n \n self.lidar[LEFT] = min(\n min(self.raw_data[l[0]:l[1]]), 10)\n \n self.lidar[RIGHT] = min(\n min(self.raw_data[r[0]:r[1]]), 10)",
"def update_image(self):\n if self.filenames:\n pos = self.slider.value()\n proj, flat, dark, theta = dx.read_aps_32id(self.filenames, proj=(pos, pos+1))\n if self.ffc_correction:\n image = proj[0,:,:].astype(np.float)/flat[0,:,:].astype(np.float)\n else:\n image = proj[0,:,:].astype(np.float)\n self.image_item.setImage(image)",
"def process_image(image):\n undist = calibrator.undistort(image)\n binarized = binarizer.process(undist)\n warped = warper.warp(binarized)\n\n lane.detect_lane(warped)\n\n debug_image = lane.get_debug_image(warped)\n\n visualizer.draw_debugging_output(undist, binarized, warped, debug_image)\n visualizer.draw_text_info(undist, lane.center_curvature, lane.center_offset)\n result = visualizer.draw_lane_on_road(undist, lane)\n\n return result",
"def update():\n\n scan = np.clip(\n rc.lidar.get_samples() * LIDAR_OFFSET, 0, None\n ) # smooth(rc.lidar.get_samples())\n\n scan_xy = None\n\n color_image = rc.camera.get_color_image()\n depth_image = cv.bilateralFilter(rc.camera.get_depth_image(), 9, 75, 75)\n vis_image = np.zeros((2 * VIS_RADIUS, 2 * VIS_RADIUS, 3), np.uint8, \"C\")\n hsv_image = cv.cvtColor(color_image, cv.COLOR_BGR2HSV)\n\n # FSM\n\n speed = 0\n angle = 0\n global currentChallenge\n global oldState\n global colorPriority\n\n if currentChallenge == Challenge.ManualControl:\n speed, angle = manualControl()\n if rc.controller.was_pressed(rc.controller.Button.A):\n currentChallenge = oldState\n else:\n if rc.controller.was_pressed(rc.controller.Button.A):\n oldState = currentChallenge\n currentChallenge = Challenge.ManualControl\n\n curve = None\n path = None\n if currentChallenge == Challenge.Line:\n if colorPriority == None:\n # scan AR tags\n colorPriority = [\n Color.Red,\n Color.Green,\n Color.Blue,\n ] # {Color.Red: 1, Color.Blue: 2, Color.Green: 3}\n\n hsv_image[0 : height // 2, :] = [0, 0, 0] # crop out top half\n\n if colorPriority[2] == Color.Blue:\n blue_r = np.array([])\n blue_t = np.array([])\n else:\n mask = cv.inRange(hsv_image, BLUE[0], BLUE[1])\n # rc.display.show_color_image(mask)\n points = np.argwhere(mask != 0)\n depths = depth_image[points[:, 0], points[:, 1]]\n blue_r, blue_t = camera2Polar(points, depths)\n\n if colorPriority[2] == Color.Red:\n red_r = np.array([])\n red_t = np.array([])\n else:\n mask = cv.bitwise_or(\n cv.inRange(hsv_image, RED1[0], RED1[1]),\n cv.inRange(hsv_image, RED2[0], RED2[1]),\n )\n # rc.display.show_color_image(mask)\n points = np.argwhere(mask != 0)\n depths = depth_image[points[:, 0], points[:, 1]]\n red_r, red_t = camera2Polar(points, depths)\n\n if colorPriority[2] == Color.Green:\n green_r = np.array([])\n green_t = np.array([])\n else:\n mask = cv.inRange(hsv_image, GREEN[0], GREEN[1])\n # rc.display.show_color_image(mask)\n points = np.argwhere(mask != 0)\n depths = depth_image[points[:, 0], points[:, 1]]\n green_r, green_t = camera2Polar(points, depths)\n\n depths = np.concatenate([blue_r, red_r, green_r])\n sort = np.argsort(depths)\n\n points = np.array(\n [\n depths[sort],\n np.concatenate([blue_t, red_t, green_t])[sort],\n np.concatenate(\n [\n np.full_like(blue_r, Color.Blue),\n np.full_like(red_r, Color.Red),\n np.full_like(green_r, Color.Green),\n ]\n )[sort],\n ]\n )\n\n path = None\n\n if len(depths) > 5:\n final_r = np.array([])\n final_t = np.array([])\n\n oldt = -1\n\n for i in range(LINE_RADIUS // 5): # increments of 10 units\n p = points[\n :,\n np.logical_and(points[0, :] >= i * 5, points[0, :] < (i + 1) * 5),\n ]\n l = p.shape[1]\n if l > 0:\n unique = np.unique(p[2])\n # d = dict(zip(unique, counts))\n for c in colorPriority:\n if c in unique:\n args = np.argwhere(p[2] == c)\n tlist = p[1, args]\n c_t = np.mean(tlist)\n if oldt == -1 or abs(c_t - oldt) < 0.4: # radians\n final_r = np.append(final_r, p[0, args])\n final_t = np.append(final_t, tlist)\n oldt = c_t\n break\n # else:\n # final_r = np.append(final_r, p[0])\n # final_t = np.append(final_t, p[1])\n\n path = polar2TopDown(final_r, final_t)\n path[:, 1] -= 25\n curve = fitCurveToPath(path, vis_image)\n if currentChallenge == Challenge.Lane:\n mask = cv.bitwise_or(\n cv.inRange(hsv_image, PURPLE[0], PURPLE[1]),\n cv.inRange(hsv_image, ORANGE[0], ORANGE[1]),\n )\n # rc.display.show_color_image(mask)\n points = np.argwhere(mask != 0)\n depths = depth_image[points[:, 0], points[:, 1]]\n r, t = camera2Polar(points, depths)\n path = polar2TopDown(r, t)\n\n if len(path) > 0:\n final_x = np.array([])\n final_y = np.array([])\n\n # path = path[np.absolute(path[:, 1]) < LANE_HORIZ_RADIUS]\n\n for i in range(VIS_RADIUS // 5):\n p = path[np.logical_and(path[:, 1] >= i * 5, path[:, 1] < (i + 1) * 5)]\n if len(p) > 0:\n y = p[:, 1]\n p = p[:, 0]\n m = np.mean(p)\n left_mean = np.mean(p[p < m])\n right_mean = np.mean(p[p > m])\n if (\n abs(left_mean - right_mean) < 5\n or abs(left_mean - right_mean) > 100\n ):\n continue # throw out this data, one side is not visible\n m = (left_mean + right_mean) / 2\n p[p > m] += m - right_mean\n p[p < m] += m - left_mean\n final_x = np.append(final_x, p)\n final_y = np.append(final_y, y)\n\n path = np.transpose([final_x, final_y])\n curve = fitCurveToPath(path, vis_image)\n\n if curve is not None: # line or lane gave valid result\n # speed = 0.5\n slope = npp.polyval(5, np.polyder(curve))\n error = slope / 50\n # angleError = TARGET_ANGLE - (rightWallAngle + leftWallAngle) / 2\n # distError = npp.polyval(0, curve)\n\n # if abs(angleError) < ANGLE_THRESHHOLD:\n # error = distError\n # else:\n # error = distError / 10 + np.sin(np.radians(angleError)) * 2 # angleError / 30\n\n # angle = rc_utils.clamp(Angle_PID.update(error, rc.get_delta_time()), -1, 1)\n\n if True: # currentChallenge == Challenge.Cones:\n blue_image = np.zeros((VIS_RADIUS * 2, VIS_RADIUS * 2), dtype=np.uint8)\n red_image = np.zeros((VIS_RADIUS * 2, VIS_RADIUS * 2), dtype=np.uint8)\n\n visualizeColor(blue_image, hsv_image, depth_image, BLUE, 255)\n visualizeColor(red_image, hsv_image, depth_image, RED1, 255)\n visualizeColor(red_image, hsv_image, depth_image, RED2, 255)\n\n cones = []\n\n keypoints = conedetector.detect(blue_image)\n # vis_image = cv.drawKeypoints(\n # vis_image,\n # keypoints,\n # np.array([]),\n # (0, 255, 255),\n # cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS,\n # )\n\n cones.append([[k.pt[0], k.pt[1], WaypointType.Blue] for k in keypoints])\n\n keypoints = conedetector.detect(red_image)\n # vis_image = cv.drawKeypoints(\n # vis_image,\n # keypoints,\n # np.array([]),\n # (0, 255, 255),\n # cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS,\n # )\n\n cones.append([[k.pt[0], k.pt[1], WaypointType.Red] for k in keypoints])\n\n scan_xy = lidar2TopDown(scan) # , 30, 330)\n scan_xy[:, 1] -= 15 # lidar offset\n scan_xy = scan_xy[\n (np.absolute(scan_xy) < 60).all(axis=1)\n ] # scipy.cluster.vq.whiten(scan_xy)\n\n centroids = None\n if scan_xy.size > 5:\n try:\n centroids, distortion = scipy.cluster.vq.kmeans(scan_xy, 3)\n except:\n print(\"K means error\")\n\n # print(c)\n\n if centroids is not None:\n v = topDown2Vis(centroids)\n for i in range(len(v[0])):\n cones.append([v[1][i], v[0][i], WaypointType.Unknown])\n # x = v[1][i]\n # y = v[0][i]\n # if x < VIS_RADIUS: # and y > VIS_RADIUS:\n # red_cones.append([x, y])\n # else:\n # blue_cones.append([x, y])\n # if c is not None:\n # dist = min(np.minimum(c[:, 0] - x), np.minimum(c[:, 1] - y))\n # if dist > 10:\n # rc_utils.draw_circle(vis_image, (x, y), (0, 255, 255), 5)\n # else:\n # rc_utils.draw_circle(vis_image, (x, y), (0, 255, 255), 5)\n\n if len(cones) > 0:\n for i in range(len(waypoints)):\n\n x = int(rc_utils.clamp(waypoints[c][0], 0, VIS_RADIUS * 2))\n y = int(rc_utils.clamp(waypoints[c][1], 0, VIS_RADIUS * 2))\n for i in waypoints[c + 1 :]:\n d = (i[0] - x) ** 2 + (i[1] - y) ** 2\n if d < 100:\n break\n\n waypoints = []\n gate_forming_cones = []\n\n for i in red_cones:\n for j in blue_cones:\n if abs(j[1] - i[1]) < 40 and 20 < abs(j[0] - i[0]) < 100: # found gate\n x = abs(j[0] - i[0]) / 2\n # y = (j[1] + i[1]) / 2\n # if y >= 20:\n waypoints.append([j[0] - x, j[1], WaypointType.Gate])\n waypoints.append([i[0] + x, i[1], WaypointType.Gate])\n gate_forming_cones.append(i)\n gate_forming_cones.append(j)\n # break\n\n # print(gate_forming_cones)\n\n # print(waypoints)\n firstrun = True\n for a in red_cones:\n x = int(a[1])\n y = int(a[0])\n rc_utils.draw_circle(vis_image, (x, y), (0, 127, 255), 5)\n for b in blue_cones:\n xb = int(b[1])\n yb = int(b[0])\n if firstrun:\n rc_utils.draw_circle(vis_image, (xb, yb), (255, 255, 0), 5)\n d = (xb - x) ** 2 + (yb - y) ** 2\n if d < 100:\n gate_forming_cones.append(a)\n firstrun = False\n\n for i in red_cones:\n if i not in gate_forming_cones:\n waypoints.append([i[0] + 30, i[1], WaypointType.Red])\n waypoints.append([i[0] + 20, i[1] - 20, WaypointType.Red])\n waypoints.append([i[0] + 20, i[1] + 20, WaypointType.Red])\n\n for i in blue_cones:\n if i not in gate_forming_cones:\n waypoints.append([i[0] - 30, i[1], WaypointType.Blue])\n waypoints.append([i[0] - 20, i[1] - 20, WaypointType.Blue])\n waypoints.append([i[0] - 20, i[1] + 20, WaypointType.Blue])\n\n if len(waypoints) > 0:\n w = []\n\n for c in range(len(waypoints)):\n x = int(rc_utils.clamp(waypoints[c][0], 0, VIS_RADIUS * 2))\n y = int(rc_utils.clamp(waypoints[c][1], 0, VIS_RADIUS * 2))\n for i in waypoints[c + 1 :]:\n d = (i[0] - x) ** 2 + (i[1] - y) ** 2\n if d < 100:\n break\n else:\n w.append(waypoints[c])\n rc_utils.draw_circle(vis_image, (y, x), (0, 255, 0), 5)\n\n # waypoints.append([VIS_RADIUS, VIS_RADIUS, WaypointType.Self])\n waypoints = np.array(w)\n\n waypoints[:, 0:2] -= VIS_RADIUS\n waypoints[:, 1] = np.negative(waypoints[:, 1])\n # print(waypoints)\n\n # fit curve to path\n try:\n curve = scipy.interpolate.interp1d(\n waypoints[:, 1],\n waypoints[:, 0],\n # type=\"cubic\",\n fill_value=\"extrapolate\",\n )\n except:\n print(\"Spline curve error\")\n curve = None\n\n if curve is not None:\n i = topDown2Vis(\n np.transpose(\n [\n curve(np.arange(-VIS_RADIUS, VIS_RADIUS)),\n np.arange(-VIS_RADIUS, VIS_RADIUS),\n ]\n )\n )\n if i is not None:\n vis_image[i] = [255, 255, 0] # add pixels in color image\n\n speed = 0.2\n slope = (curve(0.1) - curve(0)) / 0.1\n error = curve(0) / 10 # + slope * 5 # angleError / 30\n if np.isfinite(error):\n angle = rc_utils.clamp(Angle_PID.update(error, rc.get_delta_time()), -1, 1)\n global last_waypoint_type\n last_waypoint_type = waypoints[np.argmin(np.absolute(waypoints[:, 1])), 2]\n else:\n if last_waypoint_type == WaypointType.Blue:\n speed = 0.2\n angle = 1\n elif last_waypoint_type == WaypointType.Red:\n speed = 0.2\n angle = -1\n\n # print(\"centroids : \", centroids)\n # print(\"distortion :\", distortion)\n\n # pass\n # if currentChallenge == Challenge.Gate:\n # pass\n if currentChallenge == Challenge.Wall:\n pass\n\n # green dot in middle for car\n rc_utils.draw_circle(vis_image, (VIS_RADIUS, VIS_RADIUS), (0, 255, 255), 2)\n\n if scan_xy is not None:\n i = topDown2Vis(scan_xy)\n if i is not None:\n vis_image[i] = [255, 255, 255]\n\n visualizeColor(vis_image, hsv_image, depth_image, BLUE, (255, 127, 0))\n visualizeColor(vis_image, hsv_image, depth_image, RED1, (0, 0, 255))\n visualizeColor(vis_image, hsv_image, depth_image, RED2, (0, 0, 255))\n # visualizeColor(vis_image, hsv_image, depth_image, GREEN, (0, 255, 0))\n\n if path is not None:\n i = topDown2Vis(path)\n if i is not None:\n vis_image[i] = [0, 255, 255] # add pixels in color image\n\n # mask[points[:, 0], points[:, 1]] = depths\n # rc.display.show_depth_image(mask)\n\n # red = (255, 0, 0)\n # blue = (255, 127, 0)\n # green = (0, 255, 0)\n # orange = (0, 127, 255)\n # purple = (255, 0, 127)\n # visualizeColor(vis_image, hsv_image, depth_image, PURPLE, (255, 0, 127))\n # visualizeColor(vis_image, hsv_image, depth_image, ORANGE, (0, 127, 255))\n\n rc.display.show_color_image(vis_image)\n # rc.display.show_depth_image(depth_image)\n\n rc.drive.set_speed_angle(speed, angle)",
"def image_local_autolevel(image: np.ndarray):\n # Resize image to a shape of (48, 48)\n image = image_as_square(image)\n\n image = filters.rank.autolevel(image, morphology.disk(4))\n\n # Resize the iamge back to a shape of (2304, )\n return image_as_array(image)",
"def localizeLP(img):\n\t# preprocess\n\tgray_img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\t# using cascade classifier to detect license plate\n\t# the trained classifier is not good, retrain to get better results.\n\t# pre-trained classifier at: https://github.com/openalpr/openalpr/tree/master/runtime_data/region\n\tcascade = cv2.CascadeClassifier(\"localization/cascade_model.xml\")\n\trects = cascade.detectMultiScale(gray_img, scaleFactor=1.1, minNeighbors=3, minSize=(30, 30),flags=cv2.CASCADE_SCALE_IMAGE)\n\tif len(rects)==0:\n\t\treturn None,None\n\n\trect_list=[(rect[0],rect[1],rect[2],rect[3]) for rect in rects]\n\timg_height=gray_img.shape[0]\n\timg_width=gray_img.shape[1]\n\tmin_height, max_height, min_width, max_width = (0.1*img_height, 0.7*img_height, 0.1*img_width, 0.7*img_width)\n\thorizontal_min, horizontal_max = (0.1*img_width, 0.9*img_width)\n\tfor obj_rect in rect_list:\n\t\tmin_col, min_row, w, h=obj_rect\n\t\tmax_col = min_col+w\n\t\tmax_row=min_row+h\n\t\tif h >= min_height and h <= max_height and w >= min_width and w <= max_width and min_col>horizontal_min and max_col<horizontal_max:\n\t\t\tplate=gray_img[min_row:max_row,min_col:max_col]\n\t\t\treturn plate,(min_row,min_col,max_row,max_col)",
"def pipeline(self,img,debug=0):\n\t\timg = self.cam.undist(img)\n\t\t#get warped binary image\n\t\tbinary_warped = self.cam.warp(Image(img).binary_th())\n\t\tbw_shape = binary_warped.shape\n\t\t\n\t\tif (self.leftLine.detected == True and self.rightLine.detected == True):\n\t\t\tself.quick_search(binary_warped,debug)\n\t\telse:\n\t\t\tself.blind_search(binary_warped,debug)\n\t\n\t\tif (self.leftLine.fit!=None and self.rightLine.fit!=None):\n\t\t\tpolygon = self.fill_lane(bw_shape)\n\t\t\tunwarped_polygon = self.cam.unwarp(polygon)\n\t\t\t# calculate position of lane's center \n\t\t\ttemp = np.nonzero(unwarped_polygon[-1,:,1])[0]\n\t\t\tleft, right = temp[0], temp[-1]\n\t\t\tself.center = (int(bw_shape[1]/2) - (int((right-left)/2)+int(left)))*7.4/1280\n\t\t\timg_lines = weighted_img(unwarped_polygon,img, α=1, β=0.5, λ=0.)\n\t\t\t# write text on image\n\t\t\tfont = cv2.FONT_HERSHEY_SIMPLEX\n\t\t\ttext1 = 'Radius of Curvature: {:.0f}m'.format(np.mean((self.leftLine.radius, self.rightLine.radius)))\n\t\t\ttext2 = 'Distance is {:.2f}m {} of center'.format(abs(self.center), 'left' if self.center<0 else 'right')\n\n\t\t\tcv2.putText(img_lines, text1, (100,100), font, 1,(255,255,255),2)\n\t\t\tcv2.putText(img_lines, text2 ,(100,140), font, 1,(255,255,255),2)\n\t\t\t\n\t\t\tif (debug==1):\n\t\t\t\tshow_2gr(polygon, unwarped_polygon)\n\t\t\t\tshow_2gr(binary_warped, unwarped_polygon)\n\n\t\t\treturn img_lines\n\n\t\telse:\n\t\t\t# no lines detected and not fit available: return original image\n\t\t\t# without lines\n\t\t\treturn img",
"def laser_detector(out_l_x, out_l_y):\n global screen_x_long\n global screen_y_long\n while True:\n ret, frame2 = cap.read()\n time.sleep(0.5)\n crop_img2 = frame2[SCREEN_Y_TOP:SCREEN_Y_BOT, SCREEN_X_TOP:SCREEN_X_BOT]\n hsv_image2 = cv2.cvtColor(crop_img2, cv2.COLOR_BGR2HSV)\n laser(hsv_image2)\n laser_str_el = cv2.getStructuringElement(cv2.MORPH_RECT, (8, 8))\n laser_str_el_2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4))\n laser_close_morphed = cv2.morphologyEx(channels['laser'],\n cv2.MORPH_CLOSE,\n laser_str_el\n )\n laser_morphed = cv2.morphologyEx(laser_close_morphed,\n cv2.MORPH_OPEN,\n laser_str_el_2\n )\n\n blur = cv2.GaussianBlur(laser_morphed, (7, 7), 4, 4)\n\n lasers = cv2.HoughCircles(blur, cv.CV_HOUGH_GRADIENT, 2.5, 720 / 2,\n param1=10, param2=4, minRadius=4,\n maxRadius=10\n )\n if lasers is not None:\n lasers = np.uint16(np.around(lasers))\n for i in lasers[0, :]:\n print \"lasers!\"\n # draw the outer circle\n cv2.circle(crop_img, (i[0], i[1]), i[2], (0, 255, 0), 2)\n # draw the center of the circle\n cv2.circle(crop_img, (i[0], i[1]), 2, (0, 0, 255), 3)\n x_l = ((i[0]) / screen_x_long) * WIDTH\n y_l = HEIGHT - (((i[1]) / screen_y_long) * HEIGHT)\n if laserT:\n out_l_x.put(x_l)\n out_l_y.put(y_l)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n print \"Exiting Background Thread: Laser detector\"",
"def leftArmImageCallback(msg):\n global armCamImage\n\n # extract arm cam image and update globally\n bridge = CvBridge()\n armCamImage = bridge.imgmsg_to_cv2(msg, \"bgr8\")",
"def __init__(self):\n DetectLandmarks.__init__(self)\n self.red_l = 0\n self.green_l = 0\n self.blue_l = 0\n self.red_e = 0\n self.green_e = 0\n self.blue_e = 0\n self.debug = 0\n self.image = 0\n self.width = 0\n self.height = 0\n self.im_copy = 0\n self.lip_x = []\n self.lip_y = []",
"def align_flir_visible_thermal(image_path, save=False, colorscale=\"plasma\"):\n thermal_image = read_flir_thermal_image(image_path)\n visible_image = read_flir_visible_image(image_path)\n metadata = read_flir_image_metadata(image_path)\n if save:\n metadata_path = os.path.join(os.path.dirname(image_path), \"metadata.csv\")\n metadata.to_csv(metadata_path)\n Real2IR, OffsetX, OffsetY = [\n metadata.loc[metadata[\"Tag\"] == tag, \"Value\"].squeeze() for tag in [\"Real2IR\", \"OffsetX\", \"OffsetY\"]]\n cropY, cropX = (np.array(visible_image.shape[:2]) * (1 - 1 / Real2IR)).astype(\"int\")\n if -cropY // 2 + OffsetY == 0:\n pass\n elif -cropY // 2 + OffsetY < 0:\n visible_image = visible_image[cropY // 2 + OffsetY:-cropY // 2 + OffsetY, :]\n else:\n raise NotImplementedError()\n if -cropX // 2 + OffsetX == 0:\n pass\n elif -cropX // 2 + OffsetX < 0:\n visible_image = visible_image[:, cropX // 2 + OffsetX: -cropX // 2 + OffsetX]\n else:\n raise NotImplementedError()\n _ = get_ratio(visible_image, thermal_image)\n visible_image = cv2.resize(visible_image, tuple(reversed(thermal_image.shape[:2])), interpolation=cv2.INTER_AREA)\n if save:\n flir_image = np.array(Image.open(image_path))\n flir_image = cv2.resize(flir_image, tuple(reversed(thermal_image.shape)), interpolation=cv2.INTER_AREA)\n plasma = cm.get_cmap(colorscale, 255)\n thermal_image = plasma(\n (thermal_image - np.min(thermal_image)) / (np.max(thermal_image) - np.min(thermal_image)))[:, :, :3]\n thermal_image = (thermal_image * 255).astype(\"uint8\")\n visible_path = os.path.join(os.path.dirname(image_path), \"visible.jpg\")\n thermal_path = os.path.join(os.path.dirname(image_path), \"thermal.jpg\")\n flir_path = os.path.join(os.path.dirname(image_path), \"flir.jpg\")\n Image.fromarray(visible_image).save(visible_path)\n Image.fromarray(thermal_image).save(thermal_path)\n Image.fromarray(flir_image).save(flir_path)\n return thermal_image, visible_image",
"def find_components(image,deltaPix,lens_rad_arcsec = 6.0,lens_rad_ratio = None,\n center_x = None,center_y = None, gal_rad_ratio = 0.1,\n min_size_arcsec=0.7,thresh=0.5, many_sources = True,\n show_locations=False, title = None):\n\n # convert minimum component size in pixel units\n min_size = int(min_size_arcsec / deltaPix)\n \n #Convert lens radius and central galaxy radius to pixels\n if lens_rad_ratio == None:\n lens_rad = int(lens_rad_arcsec / deltaPix)\n else: lens_rad = int(len(image) * lens_rad_ratio)\n gal_rad = int(len(image) * gal_rad_ratio)\n \n \n# im2[im2 < im2.min() + 10.*thresh] = 0.\n \n # downscale source image to data resolution (for speed + easier for converting to data units)\n #down = image_util.re_size(image, factor=supersampling_factor_source)\n \n # apply laplacian of gaussian (LoG) filter to enhance maxima\n LoG = - gaussian_laplace(deepcopy(image), sigma = min_size, mode='constant', cval=0.) \n \n# LoG = - gaussian_laplace(deepcopy(im2), sigma = 2., mode='constant', cval=0.)\n \n filtered = deepcopy(LoG)\n \n# print(LoG.min(),LoG.max(),np.abs(LoG.min()) + thresh )\n \n# print(type(filtered))\n \n #background mean and std of filtered image \n corners = np.zeros([4,5,5])\n corners[0] = LoG[0:5,0:5]\n corners[1] = LoG[-5:,0:5]\n corners[2] = LoG[0:5,-5:]\n corners[3] = LoG[-5:,-5:]\n means = []\n stds = []\n for c in corners:\n mn,med,s = sigma_clipped_stats(c,sigma=3.0)\n means.append(mn)\n stds.append(s)\n \n stds=np.array(stds)\n means = np.array(means)\n means_std = np.std(means)\n# means_good = means[(means >= means.mean() - 1.0 * means_std) & (means <= means.mean() + 1.0 * means_std)]\n means_good = means[(np.abs(means) <= np.abs(means).min() + 1.0 * means_std)]\n mean_bg = np.mean(means_good)\n std_bg = np.mean(stds[(np.abs(means) <= np.abs(means).min() + 1.0 * means_std)])\n# print('LoG means: {}, Log means std: {}, Log means good: {}, LoG avg mean: {}'.format(means,means_std,means_good,mean_bg))\n# print('min: {}, max: {}, cut: {}'.format(LoG.min(),LoG.max(),mean_bg + thresh))\n# print(LoG.min(),LoG.max(),filtered.min() + thresh)\n \n \n # assume all value below max*threshold can not be maxima, so put all to zero\n# filtered[filtered < thresh*filtered.max()] = 0.\n \n# assume all value below min*threshold can not be maxima, so put all to zero\n# filtered[filtered < filtered.min() + thresh * np.abs(filtered.min())] = 0.\n# filtered[filtered < mean_bg + thresh] = 0.\n filtered[filtered < mean_bg + 6.*std_bg] = 0. #set pixels below the mean + 6x threshold to 0\n \n # find coordinates of local maxima\n #print(int(0.5 * min_size))\n max_idx_2d_small = peak_local_max(filtered, min_distance=0) #All bright pixels\n max_idx_2d_large = peak_local_max(filtered, min_distance=1) #peaks with min size of 1 pixel\n \n x_list_small, y_list_small = max_idx_2d_small[:, 1], max_idx_2d_small[:, 0]\n x_list_large, y_list_large = max_idx_2d_large[:, 1], max_idx_2d_large[:, 0]\n \n im_center_x, im_center_y = len(image) / 2., len(image) / 2. #center of image\n \n if (center_x == None) & (center_y == None):\n new_center_x, new_center_y = im_center_x,im_center_y\n else:\n new_center_x, new_center_y = center_x,center_y #new \"center\" = location of lens galaxy\n \n \n #distance of each detected peak from center\n R_small = np.sqrt((x_list_small - new_center_x)**2 + (y_list_small - new_center_y)**2) \n R_large = np.sqrt((x_list_large - new_center_x)**2 + (y_list_large - new_center_y)**2)\n \n #Contaminant light is only bright pixels further from center than lens_rad\n x_sats, y_sats = x_list_small[R_small > lens_rad], y_list_small[R_small > lens_rad]\n \n if many_sources:\n x_lens, y_lens = deepcopy(x_list_small), deepcopy(y_list_small)\n else:\n x_lens, y_lens = deepcopy(x_list_large), deepcopy(y_list_large)\n \n# x_lens, y_lens = x_list_small[R_small <= lens_rad], y_list_small[R_small <= lens_rad]\n \n if (len(x_lens) == 0) & (len(y_lens) == 0):\n x_lens = [0,15]\n y_lens = [0,15]\n \n sources = QTable([x_lens, y_lens],names={'x_local_peak','y_local_peak'}) #make table of all detected objects\n# print(x_list_large)\n# print(y_list_large)\n# print(sources)\n \n # show maxima on image for debug\n \n if show_locations:\n# fig = plt.figure(figsize=(4, 4))\n #plt.imshow(image, origin='lower', cmap=cmap_flux, norm=LogNorm(1e-2))\n \n f, axes = plt.subplots(1, 5, figsize=(20,5), sharex=False, sharey=False)\n# plt.figure(figsize = (8,8))\n# plt.subplot(1,2,1)\n \n axes[0].imshow(image, origin='lower', norm=SymLogNorm(5))\n axes[0].set_title('Image')\n axes[0].set_axis_off()\n \n \n axes[1].imshow(LoG, origin='lower', norm=SymLogNorm(5))\n axes[1].set_title('LoG Filtered Image')\n axes[1].set_axis_off()\n\n# plt.subplot(1,2,2)\n axes[2].imshow(filtered, origin='lower', norm=SymLogNorm(5))\n axes[2].set_title('Final Filtered Image')\n axes[2].set_axis_off()\n \n axes[3].imshow(image, origin='lower', norm=SymLogNorm(5))\n for i in range(len(x_lens)):\n axes[3].scatter([x_lens[i]], [y_lens[i]], c='red', s=60, marker='+')\n \n for i in range(len(x_list_large)):\n axes[3].scatter([x_list_large[i]], [y_list_large[i]], c='black', s=100, marker='x')\n axes[3].set_title('Detected Objects')\n axes[3].set_axis_off()\n \n axes[4].imshow(image, origin='lower', norm=SymLogNorm(5))\n \n for i in range(len(x_sats)):\n axes[4].scatter([x_sats[i]], [y_sats[i]], c='red', s=60, marker='+')\n \n# plt.annotate(i+1, (x_list[i], y_list[i]), color='black')\n \n# for i in range(len(x_mask)):\n# plt.scatter([x_mask[i]], [y_mask[i]], c='red', s=100, marker='*')\n# plt.annotate(i+1, (x_mask[i], y_mask[i]), color='red')\n axes[4].scatter(new_center_x, new_center_y,c='red', s=100, marker='*')\n \n draw_lens_circle = Circle((new_center_x, new_center_y),lens_rad ,fill=False)\n draw_gal_circle = Circle((new_center_x, new_center_y),gal_rad, fill = False)\n# plt.gcf().gca().add_artist(draw_lens_circle)\n# plt.gcf().gca().add_artist(draw_gal_circle)\n axes[4].add_patch(draw_lens_circle)\n# axes[4].add_patch(draw_gal_circle)\n \n axes[4].set_title('Pixels to Mask: \\n r = {:.3f}'.format(lens_rad_arcsec))\n axes[4].text(1, 1, \"detected components\", color='red')\n axes[4].set_axis_off()\n \n if title != None:\n f.suptitle(title, fontsize = 15)\n# plt.show()\n \n \n return (x_sats, y_sats), (new_center_x, new_center_y), sources",
"def __init__(self, chessboard_img_fnames, chessboard_size, lane_shape, scale_correction=(30 / 720, 3.7 / 700)):\n # Get image size\n example_img = cv2.imread(chessboard_img_fnames[0])\n self.img_size = example_img.shape[0:2]\n self.img_height = self.img_size[0]\n self.img_width = self.img_size[1]\n\n # Calibrate\n self.camera_matrix, self.distortion_coeffs = self.calibrate(chessboard_img_fnames, chessboard_size)\n\n # Define overhead transform and its inverse\n top_left, top_right, bottom_left, bottom_right = lane_shape\n source = np.float32([top_left, top_right, bottom_right, bottom_left])\n destination = np.float32([(bottom_left[0], 0), (bottom_right[0], 0),\n (bottom_right[0], self.img_height - 1), (bottom_left[0], self.img_height - 1)])\n self.overhead_transform = cv2.getPerspectiveTransform(source, destination)\n self.inverse_overhead_transform = cv2.getPerspectiveTransform(destination, source)\n self.y_m_per_pix = scale_correction[0]\n self.x_m_per_pix = scale_correction[1]",
"def lidarScan(self):\n\n # Get cones seen by lidar\n lidar_coords = []\n for point in self.gui_points:\n # Convert from gui frame to lidar frame\n x = (point[0] - self.lidar_pos[0])*scaling_factor\n y = (self.lidar_pos[1] - point[1])*scaling_factor\n # Convert points to polar form and filter\n dist = math.hypot(x,y)\n angle = math.degrees(math.atan2(x,y))\n if dist <= LIDAR_RANGE and abs(angle) < LIDAR_FOV/2:\n lidar_coords.append(((float(x), float(y)), dist, angle, point))\n\n # Sort cones by angle\n self.detected_cones = sorted(lidar_coords,key=itemgetter(2))\n cones = []\n for c in self.detected_cones:\n cones.append(c[0])\n return cones",
"def scan_callback(self, scan):\n # Fill some cells in the map just so we can see that something is\n # being published.\n Lresol = 1 / myRes\n r = scan.ranges[0]\n xt = [self.position[0] + 1, self.position[1] + 1, self.position[2]]\n # for k in range(0,len(scan.ranges)-1):\n scanAngles = np.linspace(scan.angle_max, scan.angle_min, len(scan.ranges))\n lidar_local = np.array(\n [xt[0] + scan.ranges * np.cos(scanAngles + xt[2]), xt[1] - (scan.ranges * np.sin(scanAngles + xt[2]))])\n\n # print len(lidar_local[1])\n xtg = [int(np.ceil(xt[0] * Lresol)), int(np.ceil(xt[1] * Lresol))]\n self._map.grid[xtg[1], xtg[0]] = 0 # set the robot position grid as empty\n\n for k in range(0, len(scan.ranges) - 1):\n if scan.ranges[k] < scan.range_max:\n rtl = np.ceil(lidar_local[:, k] * Lresol)\n rtli = [0, 0]\n rtli[0] = int(rtl[0])\n rtli[1] = int(rtl[1])\n l = bresenham(xtg, rtli)\n self.EISM(l.path, scan.ranges[k])\n # Now that the map is updated, publish it!\n rospy.loginfo(\"Scan is processed, publishing updated map.\")\n self.publish_map()",
"def image_cb(self, msg):\n if self.waypoints is None:\n return\n if self.state_count >= self.state_count_threshold and time.time() - self.last_detection_time < self.traffic_light_detection_interval:\n return\n if time.time() - self.last_tl_off_time < self.traffic_light_off_idle_interval:\n if self.loglevel >= 5:\n rospy.logdebug(\"No detection %f %f %f\", time.time(), self.last_tl_off_time, self.traffic_light_off_idle_interval)\n return\n\n self.last_detection_time = time.time()\n self.has_image = True\n self.camera_image = msg\n light_wp, state = self.process_traffic_lights()\n\n '''\n Publish upcoming red lights at camera frequency.\n Each predicted state has to occur `STATE_COUNT_THRESHOLD` number\n of times till we start using it. Otherwise the previous stable state is\n used.\n '''\n if self.state != state:\n self.state_count = 1\n self.state = state\n else:\n self.state_count += 1\n if self.state_count >= self.state_count_threshold:\n if state == TrafficLight.GREEN and self.last_state in (TrafficLight.RED, TrafficLight.YELLOW):\n self.last_tl_off_time = time.time()\n self.last_state = self.state\n self.last_wp = light_wp\n self.last_msg = state_msg = TrafficLightStatus()\n state_msg.tlwpidx = light_wp\n state_msg.state = state\n self.upcoming_red_light_pub.publish(state_msg)\n elif self.last_msg: # have not reached the threshold\n if self.car_wpidx < self.last_msg.tlwpidx + self.traffic_light_over_waypoints: \n # keep sending previous message when we are still close to the current traffic light\n self.upcoming_red_light_pub.publish(self.last_msg)\n else: # for other locations, clear traffic light status\n self.last_msg.tlwpidx = -1\n self.last_msg.state = TrafficLight.UNKNOWN\n self.upcoming_red_light_pub.publish(self.last_msg)\n self.last_msg = None\n if self.loglevel >= 4:\n rospy.loginfo(\"Curr Light_wp: %d, state: %d, global state: %d, last Light_wp: %d, state count: %d\", light_wp, state, self.state, self.last_wp, self.state_count)",
"def oscanSub(img):\n oscanL = img[:,10:50]\n oscanR = img[:,2110:2150]\n mdL=np.median(oscanL,axis=1)\n mdR=np.median(oscanR,axis=1)\n #rowL=np.arange(0,mdL.shape[0])\n #rowR=np.arange(0,mdR.shape[0])\n #(aL,bL,sda,sdb,se)=linefit(rowL,mdL)\n #(aR,bR,sda,sdb,se)=linefit(rowR,mdR)\n #oscanLfit=rowL*bL+aL\n #oscanRfit=rowR*bR+aR\n for i in range(1080):\n img[:,i] = img[:,i] - mdL #oscanLfit\n img[:,1080+i] = img[:,1080+i] - mdR #oscanRfit\n return img",
"def read_next_image(m, lcr, X_center, X_left, X_right, Y_train):\n offset = 1.0\n dist = 20.0\n steering = Y_train[m]\n\n if lcr == 0:\n image = plt.imread(normalize_path(X_left[m]))\n dsteering = offset / dist * 360 / (2 * np.pi) / 25.0\n steering += dsteering\n elif lcr == 1:\n image = plt.imread(normalize_path(X_center[m]))\n elif lcr == 2:\n image = plt.imread(normalize_path(X_right[m]))\n dsteering = -offset / dist * 360 / (2 * np.pi) / 25.0\n steering += dsteering\n else:\n print('Invalid lcr value :', lcr)\n\n return image, steering",
"def main():\n\n \"\"\"\n Initialize Parameters\n \"\"\"\n src_path_map = '../data/map/wean.dat'\n src_path_log = '../data/log/robotdata1.log'\n\n map_obj = MapReader(src_path_map)\n occupancy_map = map_obj.get_map()\n logfile = open(src_path_log, 'r')\n\n motion_model = MotionModel()\n params = {\n 'z_max': 8000,\n 'lambda_short': 0.1,\n 'sigma_hit': 20,\n\n 'z_pHit': 0.95,\n 'z_pShort': 0.01,\n 'z_pMax': 0.05,\n 'z_pRand': 0.05,\n\n 'laser_sensor_offset': 25.0,\n 'ray_step_size': 2,\n 'grid_size': 10,\n 'occ_thrsh': 0.1,\n 'laser_subsample': 30,\n\n 'rayCast_vis': False,\n 'map_vis': True\n }\n sensor_model = SensorModel(occupancy_map, params)\n resampler = Resampling()\n\n num_particles = 1500\n X_bar = init_particles_freespace(num_particles, occupancy_map)\n vis_flag = 1\n\n \"\"\"\n Monte Carlo Localization Algorithm : Main Loop\n \"\"\"\n if vis_flag:\n visualize_map(occupancy_map)\n\n first_time_idx = True\n for time_idx, line in enumerate(logfile):\n\n # Read a single 'line' from the log file (can be either odometry or laser measurement)\n meas_type = line[0] # L : laser scan measurement, O : odometry measurement\n meas_vals = np.fromstring(line[2:], dtype=np.float64,\n sep=' ') # convert measurement values from string to double\n\n odometry_robot = meas_vals[0:3] # odometry reading [x, y, theta] in odometry frame\n time_stamp = meas_vals[-1]\n\n # if ((time_stamp <= 0.0) | (meas_type == \"O\")): # ignore pure odometry measurements for now (faster debugging) \n # continue\n\n if (meas_type == \"L\"):\n odometry_laser = meas_vals[3:6] # [x, y, theta] coordinates of laser in odometry frame\n ranges = meas_vals[6:-1] # 180 range measurement values from single laser scan\n\n print(\"Processing time step \" + str(time_idx) + \" at time \" + str(time_stamp) + \"s\")\n\n if (first_time_idx):\n u_t0 = odometry_robot\n first_time_idx = False\n continue\n\n X_bar_new = np.zeros((num_particles, 4), dtype=np.float64)\n u_t1 = odometry_robot\n for m in range(0, num_particles):\n\n \"\"\"\n MOTION MODEL\n \"\"\"\n x_t0 = X_bar[m, 0:3]\n x_t1 = motion_model.update(u_t0, u_t1, x_t0)\n\n \"\"\"\n SENSOR MODEL\n \"\"\"\n if (meas_type == \"L\"):\n z_t = ranges\n w_t = sensor_model.beam_range_finder_model(z_t, x_t1)\n # w_t = 1/num_particles\n X_bar_new[m, :] = np.hstack((x_t1, w_t))\n else:\n X_bar_new[m, :] = np.hstack((x_t1, X_bar[m, 3]))\n\n X_bar = X_bar_new\n u_t0 = u_t1\n\n \"\"\"\n RESAMPLING\n \"\"\"\n X_bar = resampler.low_variance_sampler(X_bar)\n if vis_flag:\n visualize_timestep(X_bar)",
"def gonio_axis_align():\n \n # Invert camera image, so dark pin on light image becomes a peak\n cam_7.proc1.scale.put(-1)\n cam_8.proc1.scale.put(-1)\n \n # High threshold, so AD centroid doesn't interpret background\n cam_8ThresholdOld = cam_8.stats4.centroid_threshold.get()\n cam_8.stats4.centroid_threshold.put(150)\n cam_7ThresholdOld = cam_7.stats4.centroid_threshold.get()\n cam_7.stats4.centroid_threshold.put(150)\n \n # HiMag\n # Copy ROI2 geometry (HiMag Mag3) to ROI4 and use ROI4 centroid plugin\n cam_8.roi4.min_xyz.min_x.put(cam_8.roi2.min_xyz.min_x.get())\n cam_8.roi4.min_xyz.min_y.put(cam_8.roi2.min_xyz.min_y.get())\n cam_8.roi4.size.x.put(cam_8.roi2.size.x.get() * 0.20)\n cam_8.roi4.size.y.put(cam_8.roi2.size.y.get())\n cam_8.roi4.min_xyz.min_x.put(cam_8.roi2.min_xyz.min_x.get() + cam_8.roi2.size.x.get()/2 - cam_8.roi4.size.x.get()/2)\n \n # LoMag\n # Copy ROI2 geometry (LoMag Mag1) to ROI4 and use ROI4 centroid plugin\n cam_7.roi4.min_xyz.min_x.put(cam_7.roi2.min_xyz.min_x.get())\n cam_7.roi4.min_xyz.min_y.put(cam_7.roi2.min_xyz.min_y.get())\n cam_7.roi4.size.x.put(cam_7.roi2.size.x.get() * 0.05)\n cam_7.roi4.size.y.put(cam_7.roi2.size.y.get())\n cam_7.roi4.min_xyz.min_x.put(cam_7.roi2.min_xyz.min_x.get() + cam_7.roi2.size.x.get()/2 - cam_7.roi4.size.x.get()/2)\n \n centerPinYHiMag0 = centroid_avg(cam_8.stats4)[1]\n centerPinYLoMag0 = centroid_avg(cam_7.stats4)[1]\n yield from bps.mvr(gonio.o,180)\n time.sleep(2)\n centerPinYHiMag180 = centroid_avg(cam_8.stats4)[1]\n centerPinYLoMag180 = centroid_avg(cam_7.stats4)[1]\n centerPinYHiMag = (centerPinYHiMag0 + centerPinYHiMag180)/2\n centerPinYLoMag = (centerPinYLoMag0 + centerPinYLoMag180)/2\n\n centerPinOffsYHiMag = centerPinYHiMag - cam_8.roi4.size.y.get() / 2\n centerPinOffsYLoMag = centerPinYLoMag - cam_7.roi4.size.y.get() / 2\n \n # Correct Mag 3 (cam_8 ROI2)\n cam_8.roi2.min_xyz.min_y.put(cam_8.roi2.min_xyz.min_y.get() + centerPinOffsYHiMag)\n # Correct Mag 4 (cam_8 ROI1)\n cam_8.roi1.min_xyz.min_y.put(cam_8.roi2.min_xyz.min_y.get() + (cam_8.roi2.size.y.get()-cam_8.roi1.size.y.get())/2)\n \n # Correct Mag 1 (cam_7 ROI2)\n cam_7.roi2.min_xyz.min_y.put(cam_7.roi2.min_xyz.min_y.get() + centerPinOffsYLoMag)\n # Correct Mag 2 (cam_7 ROI3)\n cam_7.roi3.min_xyz.min_y.put(cam_7.roi2.min_xyz.min_y.get() + (cam_7.roi2.size.y.get()-cam_7.roi3.size.y.get())/2)\n\n # De-invert image\n cam_7.proc1.scale.put(-1)\n cam_8.proc1.scale.put(-1)\n \n # Set thresold to previous value\n cam_8.stats4.centroid_threshold.put(cam_8ThresholdOld)\n cam_7.stats4.centroid_threshold.put(cam_7ThresholdOld)\n \n return",
"def apply(self, image):\n\n bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n\n # Convert to float image\n float_im = bgr.copy().astype('float32') / 255\n blurred = cv2.GaussianBlur(float_im, ksize=(9, 9), sigmaX=1, sigmaY=9)\n cplanes = colors.bgr2cpaces(blurred)\n lanes, py, pw = finder.find_lane_pixels(cplanes, self.pfilter, gamma=0.4)\n\n binary = lanes\n\n # Find lanes and fit curves\n if not self.curve:\n self.sw.find(binary)\n self.curve= CurveSearch(self.sw.left_fit, self.sw.right_fit,\n image_size=self.warped_image_size, margin=20)\n lane = self.sw.visualize_lane()\n curve_rad = self.measure_curvature(self.sw.left_fit, self.sw.right_fit)\n offset = self.measure_offset(self.sw.left_fit, self.sw.right_fit)\n else:\n self.curve.find(binary)\n lane = self.curve.visualize_lane()\n curve_rad = self.measure_curvature(self.curve.left_fit, self.curve.right_fit)\n offset = self.measure_offset(self.curve.left_fit, self.curve.right_fit)\n\n non_warped_lane = self.warp_inverse(lane)\n\n result = cv2.addWeighted(image, 1, non_warped_lane, 0.3, 0)\n cv2.putText(result, \"Curve Radius: {:.0f}m\".format(curve_rad), (50,50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255))\n cv2.putText(result, \"Off Center: {:.2f}m\".format(offset), (50, 100),\n cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255))\n\n return result",
"def localize(self, new_data, gps_guess=False):\r\n if gps_guess: \r\n mapping_img, _ = self.mapdata.extract_from_map(new_data.gps_pos, new_data.attitude, np.shape(new_data.img))\r\n gps_pos, attitude = projection(self.mapdata.gps_pos, self.mapdata.attitude, new_data.gps_pos, new_data.attitude)\r\n mapping_data = RadarData(None, mapping_img, gps_pos, attitude) \r\n else:\r\n mapping_img, _ = self.mapdata.extract_from_map(self.position, self.attitude, np.shape(new_data.img))\r\n mapping_data = RadarData(None, mapping_img, self.position, self.attitude) \r\n\r\n self.position, self.attitude = new_data.image_position_from(mapping_data)\r\n self.last_data = RadarData(new_data.id, new_data.img, self.position, self.attitude)\r\n\r\n if self.mapping: \r\n self.mapdata.add_data(self.last_data)\r\n \r\n return deepcopy(self.position), deepcopy(self.attitude)",
"def image_cb(self, msg):\n light_wp = self.last_wp\n state = self.state\n self.has_image = True\n self.camera_image = msg\n \n light_wp, state = self.process_traffic_lights()\n\n '''\n Publish upcoming red lights at camera frequency.\n Each predicted state has to occur `STATE_COUNT_THRESHOLD` number\n of times till we start using it. Otherwise the previous stable state is\n used.\n '''\n if self.state != state:\n colorVals = (\"Red\", \"Yellow\", \"Green\", \"Unspecified\", \"Unknown\")\n rospy.loginfo(\"tl_detector detects light change from {0} to {1}\"\n .format(colorVals[self.state], colorVals[state]))\n self.state_count = 0\n self.state = state\n elif self.state_count >= STATE_COUNT_THRESHOLD:\n self.last_state = self.state\n light_wp = light_wp if state == TrafficLight.RED else -1\n self.last_wp = light_wp\n self.upcoming_red_light_pub.publish(Int32(light_wp))\n else:\n self.upcoming_red_light_pub.publish(Int32(self.last_wp))\n self.state_count += 1",
"def lensedImage(self, source, scale, xl=8., yl=4., gamma=0.):\n image, magMap = lens(self.ltype, self.dist, source, self.x01, self.x02, xl, yl, gamma)\n\n return image, magMap"
] | [
"0.5971704",
"0.59564734",
"0.5860076",
"0.57806957",
"0.57335377",
"0.56992394",
"0.5685931",
"0.56329805",
"0.5621531",
"0.5617687",
"0.5521019",
"0.5454433",
"0.54460114",
"0.5421576",
"0.5418019",
"0.53956425",
"0.5381104",
"0.53777224",
"0.53671235",
"0.53640884",
"0.5341299",
"0.53159845",
"0.5314532",
"0.5308407",
"0.52924705",
"0.5257394",
"0.52481043",
"0.524702",
"0.52344626",
"0.52089685"
] | 0.80712897 | 0 |
Check that the no_cache cache control header is set on the resopnse. | def test_no_cache(self):
content = self.unique()
self.assertViewBehavior(
{"cache_control_no_cache": True, "get": content},
status_code=200,
content=content,
headers_exact={"Cache-Control": "no-cache"}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _may_cache(self, request, response=None):\n # any successful request may be cached\n return ((HTTPStatus.OK <= response.status_code < HTTPStatus.BAD_REQUEST)\n if response else True)",
"def nocache(response):\n response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, max-age=0'\n return response",
"def cache():\n is_conditional = request.headers.get(\"If-Modified-Since\") or request.headers.get(\n \"If-None-Match\"\n )\n\n if is_conditional is None:\n response = view_get()\n response.headers[\"Last-Modified\"] = http_date()\n response.headers[\"ETag\"] = uuid.uuid4().hex\n return response\n else:\n return status_code(304)",
"def never_cache_preview(response):\n response.cache_control.max_age = 0\n response.cache_control.no_cache = True\n response.cache_control.must_revalidate = True\n response.cache_control.no_store = True\n return response",
"def never_cache_preview(self, response):\n response.cache_control.max_age = 0\n response.cache_control.no_cache = True\n response.cache_control.must_revalidate = True\n response.cache_control.no_store = True\n return response",
"def test_not_modified_headers(self):\n\n def get_response(req):\n resp = self.client.get(req.path_info)\n resp[\"Date\"] = \"Sat, 12 Feb 2011 17:35:44 GMT\"\n resp[\"Last-Modified\"] = \"Sat, 12 Feb 2011 17:35:44 GMT\"\n resp[\"Expires\"] = \"Sun, 13 Feb 2011 17:35:44 GMT\"\n resp[\"Vary\"] = \"Cookie\"\n resp[\"Cache-Control\"] = \"public\"\n resp[\"Content-Location\"] = \"/alt\"\n resp[\"Content-Language\"] = \"en\" # shouldn't be preserved\n resp[\"ETag\"] = '\"spam\"'\n resp.set_cookie(\"key\", \"value\")\n return resp\n\n self.req.META[\"HTTP_IF_NONE_MATCH\"] = '\"spam\"'\n\n new_response = ConditionalGetMiddleware(get_response)(self.req)\n self.assertEqual(new_response.status_code, 304)\n base_response = get_response(self.req)\n for header in (\n \"Cache-Control\",\n \"Content-Location\",\n \"Date\",\n \"ETag\",\n \"Expires\",\n \"Last-Modified\",\n \"Vary\",\n ):\n self.assertEqual(\n new_response.headers[header], base_response.headers[header]\n )\n self.assertEqual(new_response.cookies, base_response.cookies)\n self.assertNotIn(\"Content-Language\", new_response)",
"def test_client_max_age_0(self, sess):\r\n print('first request')\r\n r = sess.get(self.url)\r\n assert self.cache.get(self.url) == r.raw\r\n\r\n print('second request')\r\n r = sess.get(self.url, headers={'Cache-Control': 'max-age=0'})\r\n\r\n # don't remove from the cache\r\n assert self.cache.get(self.url)\r\n assert not r.from_cache",
"def has_cached(self,ourmod,etag=None):\n if \"If-Modified-Since\" in self.request.headers:\n hdr = self.request.headers[\"If-Modified-Since\"]\n theirmod =time.mktime(parsedate(hdr))\n return theirmod < ourmod\n elif \"If-None-Match\" in self.request.headers and etag is not None:\n return self.request.headers[\"ETag\"] == etag",
"def test_cache_control_headers_on_apis(flask_app):\n rv = flask_app.get('api/v1/')\n headers = rv.headers\n assert headers.get('Cache-Control') == 'no-cache, no-store, must-revalidate, max-age=0'\n assert headers.get('Pragma') == 'no-cache'",
"def disable_cache(response):\n\n response.headers['Cache-Control'] = 'max-age=0, no-cache, no-store, must-revalidate, private'\n response.headers['Pragma'] = 'no-cache'\n response.headers['Expires'] = '0'\n return response",
"def is_cacheable(self, response: Union[AnyResponse, None]) -> bool:\n if not response:\n return False\n cache_criteria = {\n 'allowed method': response.method in self.allowed_methods,\n 'allowed status': response.status in self.allowed_codes,\n 'not disabled': not self.disabled,\n 'not expired': not getattr(response, 'is_expired', False),\n 'not filtered': self.filter_fn(response),\n }\n logger.debug(f'Pre-cache checks for response from {response.url}: {cache_criteria}') # type: ignore\n return all(cache_criteria.values())",
"def add_header(response):\n response.cache_control.public = True\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n response.headers[\"Pragma\"] = \"no-cache\"\n response.headers[\"Expires\"] = \"0\"\n return response",
"def test_must_revalidate(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_must_revalidate\": True, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"must-revalidate\"})",
"def disable_caching(self):\n\n def after_request(r: flask.Response):\n if 'Cache-Control' not in r.headers:\n r.headers['Cache-Control'] = 'no-store'\n return r\n\n self.after_request(after_request)",
"def test_precedence(self):\n self.assertViewBehavior(\n {\"cache_control_public\": True},\n status_code=405,\n headers_exclude=\"Cache-Control\")",
"def add_header(response):\n response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'\n response.headers['Pragma'] = 'no-cache'\n response.headers['Expires'] = '-1'\n return response",
"def add_header(r):\r\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\r\n r.headers[\"Pragma\"] = \"no-cache\"\r\n r.headers[\"Expires\"] = \"0\"\r\n r.headers['Cache-Control'] = 'public, max-age=0'\r\n return r",
"def add_header(r):\r\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\r\n r.headers[\"Pragma\"] = \"no-cache\"\r\n r.headers[\"Expires\"] = \"0\"\r\n r.headers['Cache-Control'] = 'public, max-age=0'\r\n return r",
"def add_header(r):\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n r.headers['Cache-Control'] = 'public, max-age=0'\n return r",
"def add_header(r):\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n r.headers[\"Cache-Control\"] = \"public, max-age=0\"\n return r",
"def cache_is_valid():\n if ARGV.get(NOCACHE_OPT):\n return False\n if not CACHE['last-request'] \\\n or not CACHE['max-age'] \\\n or not CACHE['feed']:\n return False\n current_time = float(time.time())\n last_request = float(CACHE['last-request'])\n max_age = float(CACHE['max-age'])\n return bool(current_time - last_request < max_age)",
"def _check_cache(self):\n return os.path.exists(self._cache_key)",
"def add_header(r):\n\tr.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n\tr.headers[\"Pragma\"] = \"no-cache\"\n\tr.headers[\"Expires\"] = \"0\"\n\tr.headers['Cache-Control'] = 'public, max-age=0'\n\treturn r",
"def add_header(r):\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n r.headers['Cache-Control'] = 'public, max-age=0'\n return r",
"def add_header(r):\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n r.headers['Cache-Control'] = 'public, max-age=0'\n return r",
"def add_header(r):\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n r.headers['Cache-Control'] = 'public, max-age=0'\n return r",
"def add_header(r):\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n r.headers['Cache-Control'] = 'public, max-age=0'\n return r",
"def add_header(r):\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n r.headers['Cache-Control'] = 'public, max-age=0'\n return r",
"def add_header(r):\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n r.headers['Cache-Control'] = 'public, max-age=0'\n return r",
"def add_header(r):\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n r.headers['Cache-Control'] = 'public, max-age=0'\n return r"
] | [
"0.7414972",
"0.6805398",
"0.67788416",
"0.67497075",
"0.6657216",
"0.6639991",
"0.6516709",
"0.6489518",
"0.6489176",
"0.64887327",
"0.644979",
"0.6448329",
"0.6426001",
"0.63928515",
"0.63785326",
"0.6366645",
"0.63470674",
"0.63470674",
"0.63428575",
"0.6316757",
"0.62844247",
"0.6257525",
"0.6218411",
"0.6204501",
"0.6204501",
"0.6204501",
"0.6204501",
"0.6204501",
"0.6204501",
"0.6204501"
] | 0.7025476 | 1 |
Check that the no_transform cache control header is set on the resopnse. | def test_no_transform(self):
content = self.unique()
self.assertViewBehavior(
{"cache_control_no_transform": True, "get": content},
status_code=200,
content=content,
headers_exact={"Cache-Control": "no-transform"}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _may_cache(self, request, response=None):\n # any successful request may be cached\n return ((HTTPStatus.OK <= response.status_code < HTTPStatus.BAD_REQUEST)\n if response else True)",
"def test_not_modified_headers(self):\n\n def get_response(req):\n resp = self.client.get(req.path_info)\n resp[\"Date\"] = \"Sat, 12 Feb 2011 17:35:44 GMT\"\n resp[\"Last-Modified\"] = \"Sat, 12 Feb 2011 17:35:44 GMT\"\n resp[\"Expires\"] = \"Sun, 13 Feb 2011 17:35:44 GMT\"\n resp[\"Vary\"] = \"Cookie\"\n resp[\"Cache-Control\"] = \"public\"\n resp[\"Content-Location\"] = \"/alt\"\n resp[\"Content-Language\"] = \"en\" # shouldn't be preserved\n resp[\"ETag\"] = '\"spam\"'\n resp.set_cookie(\"key\", \"value\")\n return resp\n\n self.req.META[\"HTTP_IF_NONE_MATCH\"] = '\"spam\"'\n\n new_response = ConditionalGetMiddleware(get_response)(self.req)\n self.assertEqual(new_response.status_code, 304)\n base_response = get_response(self.req)\n for header in (\n \"Cache-Control\",\n \"Content-Location\",\n \"Date\",\n \"ETag\",\n \"Expires\",\n \"Last-Modified\",\n \"Vary\",\n ):\n self.assertEqual(\n new_response.headers[header], base_response.headers[header]\n )\n self.assertEqual(new_response.cookies, base_response.cookies)\n self.assertNotIn(\"Content-Language\", new_response)",
"def never_cache_preview(response):\n response.cache_control.max_age = 0\n response.cache_control.no_cache = True\n response.cache_control.must_revalidate = True\n response.cache_control.no_store = True\n return response",
"def never_cache_preview(self, response):\n response.cache_control.max_age = 0\n response.cache_control.no_cache = True\n response.cache_control.must_revalidate = True\n response.cache_control.no_store = True\n return response",
"def nocache(response):\n response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, max-age=0'\n return response",
"def surrogate_control(self):\n def on_update(surrogate_control):\n if not surrogate_control and \"surrogate-control\" in self.headers:\n del self.headers[\"surrogate-control\"]\n elif surrogate_control: # pragma: no cover\n self.headers[\"Surrogate-Control\"] = \\\n surrogate_control.to_header()\n return parse_cache_control_header(\n self.headers.get(\"surrogate-control\"),\n on_update,\n ResponseCacheControl,\n )",
"def add_header(response):\n response.cache_control.public = True\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n response.headers[\"Pragma\"] = \"no-cache\"\n response.headers[\"Expires\"] = \"0\"\n return response",
"def test_precedence(self):\n self.assertViewBehavior(\n {\"cache_control_public\": True},\n status_code=405,\n headers_exclude=\"Cache-Control\")",
"def test_no_cache(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_no_cache\": True, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"no-cache\"})",
"def cache():\n is_conditional = request.headers.get(\"If-Modified-Since\") or request.headers.get(\n \"If-None-Match\"\n )\n\n if is_conditional is None:\n response = view_get()\n response.headers[\"Last-Modified\"] = http_date()\n response.headers[\"ETag\"] = uuid.uuid4().hex\n return response\n else:\n return status_code(304)",
"def add_header(response):\n response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'\n response.headers['Pragma'] = 'no-cache'\n response.headers['Expires'] = '-1'\n return response",
"def test_must_revalidate(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_must_revalidate\": True, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"must-revalidate\"})",
"def add_header(response):\n\n # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control\n response.cache_control.no_store = True\n return response",
"def add_header(response):\n\n # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control\n response.cache_control.no_store = True\n return response",
"def test_no_unsafe(self):\n\n def get_200_response(req):\n return HttpResponse(status=200)\n\n response = ConditionalGetMiddleware(self.get_response)(self.req)\n etag = response.headers[\"ETag\"]\n put_request = self.request_factory.put(\"/\", headers={\"if-match\": etag})\n conditional_get_response = ConditionalGetMiddleware(get_200_response)(\n put_request\n )\n self.assertEqual(\n conditional_get_response.status_code, 200\n ) # should never be a 412",
"def _is_not_modified_result(result):\n return result.get('status', None) == 304",
"def has_cached(self,ourmod,etag=None):\n if \"If-Modified-Since\" in self.request.headers:\n hdr = self.request.headers[\"If-Modified-Since\"]\n theirmod =time.mktime(parsedate(hdr))\n return theirmod < ourmod\n elif \"If-None-Match\" in self.request.headers and etag is not None:\n return self.request.headers[\"ETag\"] == etag",
"def add_header(r):\r\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\r\n r.headers[\"Pragma\"] = \"no-cache\"\r\n r.headers[\"Expires\"] = \"0\"\r\n r.headers['Cache-Control'] = 'public, max-age=0'\r\n return r",
"def add_header(r):\r\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\r\n r.headers[\"Pragma\"] = \"no-cache\"\r\n r.headers[\"Expires\"] = \"0\"\r\n r.headers['Cache-Control'] = 'public, max-age=0'\r\n return r",
"def get304(self):\n bad = ('allow', 'content-encoding', 'content-language',\n 'content-length', 'content-md5', 'content-range',\n 'content-type', 'last-modified') # + c-location, expires?\n for h in bad:\n bottle.response.set_header(h, 'foo')\n bottle.status = 304\n for h, v in bottle.response.headerlist:\n self.assertFalse(h.lower() in bad, \"Header %s not deleted\" % h)",
"def add_header(r):\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n r.headers['Cache-Control'] = 'public, max-age=0'\n return r",
"def add_header(request):\n request.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n request.headers[\"Pragma\"] = \"no-cache\"\n request.headers[\"Expires\"] = \"0\"\n request.headers['Cache-Control'] = 'public, max-age=0'\n return request",
"def add_header(r):\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n r.headers[\"Cache-Control\"] = \"public, max-age=0\"\n return r",
"def test_cache_control_headers_on_apis(flask_app):\n rv = flask_app.get('api/v1/')\n headers = rv.headers\n assert headers.get('Cache-Control') == 'no-cache, no-store, must-revalidate, max-age=0'\n assert headers.get('Pragma') == 'no-cache'",
"def test_no_compress_incompressible_response(self):\n self.resp.content = self.incompressible_string\n r = GZipMiddleware(self.get_response)(self.req)\n self.assertEqual(r.content, self.incompressible_string)\n self.assertIsNone(r.get(\"Content-Encoding\"))",
"def add_header(r):\n\tr.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n\tr.headers[\"Pragma\"] = \"no-cache\"\n\tr.headers[\"Expires\"] = \"0\"\n\tr.headers['Cache-Control'] = 'public, max-age=0'\n\treturn r",
"def _is_sanic_static(response) -> bool:\n if isinstance(response, list):\n return False\n return response.status == 304 or 'Last-Modified' in response.headers",
"def disable_caching(self):\n\n def after_request(r: flask.Response):\n if 'Cache-Control' not in r.headers:\n r.headers['Cache-Control'] = 'no-store'\n return r\n\n self.after_request(after_request)",
"async def cache_control(request, handler):\n if request.path.startswith(\"/static/\"):\n\n def add_headers(obj):\n obj.headers[\"Cache-Control\"] = \"max-age=3600\"\n\n else:\n\n def add_headers(obj):\n obj.headers[\"Cache-Control\"] = \"no-store\"\n\n try:\n response = await handler(request)\n add_headers(response)\n return response\n except aiohttp.web.HTTPException as exc:\n add_headers(exc)\n raise",
"def test_weak_etag_not_modified(self):\n\n def get_response(req):\n response = HttpResponse(self.compressible_string)\n response.headers[\"ETag\"] = 'W/\"eggs\"'\n return response\n\n request = self.rf.get(\"/\", headers={\"accept-encoding\": \"gzip, deflate\"})\n gzip_response = GZipMiddleware(get_response)(request)\n self.assertEqual(gzip_response.headers[\"ETag\"], 'W/\"eggs\"')"
] | [
"0.6575073",
"0.62584645",
"0.6253825",
"0.61756957",
"0.6135134",
"0.61045665",
"0.6085116",
"0.6053426",
"0.6017882",
"0.58883804",
"0.5885764",
"0.5883365",
"0.58609915",
"0.58609915",
"0.577764",
"0.5760166",
"0.57544553",
"0.5737813",
"0.5737813",
"0.5734081",
"0.5727316",
"0.5718122",
"0.57067317",
"0.5693566",
"0.5692873",
"0.56691116",
"0.5663491",
"0.5659539",
"0.56335753",
"0.5630991"
] | 0.72058463 | 0 |
Check that the must_revalidate cache control header is set on the resopnse. | def test_must_revalidate(self):
content = self.unique()
self.assertViewBehavior(
{"cache_control_must_revalidate": True, "get": content},
status_code=200,
content=content,
headers_exact={"Cache-Control": "must-revalidate"}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _may_cache(self, request, response=None):\n # any successful request may be cached\n return ((HTTPStatus.OK <= response.status_code < HTTPStatus.BAD_REQUEST)\n if response else True)",
"def has_cached(self,ourmod,etag=None):\n if \"If-Modified-Since\" in self.request.headers:\n hdr = self.request.headers[\"If-Modified-Since\"]\n theirmod =time.mktime(parsedate(hdr))\n return theirmod < ourmod\n elif \"If-None-Match\" in self.request.headers and etag is not None:\n return self.request.headers[\"ETag\"] == etag",
"def test_proxy_revalidate(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_proxy_revalidate\": True, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"proxy-revalidate\"})",
"def SetCachingHeaders(self, revalidate):\n max_age = self.MAX_AGE\n #self.response.headers['Expires'] = email.Utils.formatdate(\n # time.time() + max_age, usegmt=True)\n cache_control = []\n if self.PUBLIC:\n cache_control.append('public')\n cache_control.append('max-age=%d' % max_age)\n if revalidate:\n cache_control.append('must-revalidate')\n self.response.headers['Cache-Control'] = ', '.join(cache_control)",
"def SetCachingHeaders(self, revalidate):\n max_age = self.MAX_AGE\n #self.response.headers['Expires'] = email.Utils.formatdate(\n # time.time() + max_age, usegmt=True)\n cache_control = []\n if self.PUBLIC:\n cache_control.append('public')\n cache_control.append('max-age=%d' % max_age)\n if revalidate:\n cache_control.append('must-revalidate')\n self.response.headers['Cache-Control'] = ', '.join(cache_control)",
"def cache():\n is_conditional = request.headers.get(\"If-Modified-Since\") or request.headers.get(\n \"If-None-Match\"\n )\n\n if is_conditional is None:\n response = view_get()\n response.headers[\"Last-Modified\"] = http_date()\n response.headers[\"ETag\"] = uuid.uuid4().hex\n return response\n else:\n return status_code(304)",
"def cache_is_valid():\n if ARGV.get(NOCACHE_OPT):\n return False\n if not CACHE['last-request'] \\\n or not CACHE['max-age'] \\\n or not CACHE['feed']:\n return False\n current_time = float(time.time())\n last_request = float(CACHE['last-request'])\n max_age = float(CACHE['max-age'])\n return bool(current_time - last_request < max_age)",
"def test_client_max_age_0(self, sess):\r\n print('first request')\r\n r = sess.get(self.url)\r\n assert self.cache.get(self.url) == r.raw\r\n\r\n print('second request')\r\n r = sess.get(self.url, headers={'Cache-Control': 'max-age=0'})\r\n\r\n # don't remove from the cache\r\n assert self.cache.get(self.url)\r\n assert not r.from_cache",
"def test_not_modified_headers(self):\n\n def get_response(req):\n resp = self.client.get(req.path_info)\n resp[\"Date\"] = \"Sat, 12 Feb 2011 17:35:44 GMT\"\n resp[\"Last-Modified\"] = \"Sat, 12 Feb 2011 17:35:44 GMT\"\n resp[\"Expires\"] = \"Sun, 13 Feb 2011 17:35:44 GMT\"\n resp[\"Vary\"] = \"Cookie\"\n resp[\"Cache-Control\"] = \"public\"\n resp[\"Content-Location\"] = \"/alt\"\n resp[\"Content-Language\"] = \"en\" # shouldn't be preserved\n resp[\"ETag\"] = '\"spam\"'\n resp.set_cookie(\"key\", \"value\")\n return resp\n\n self.req.META[\"HTTP_IF_NONE_MATCH\"] = '\"spam\"'\n\n new_response = ConditionalGetMiddleware(get_response)(self.req)\n self.assertEqual(new_response.status_code, 304)\n base_response = get_response(self.req)\n for header in (\n \"Cache-Control\",\n \"Content-Location\",\n \"Date\",\n \"ETag\",\n \"Expires\",\n \"Last-Modified\",\n \"Vary\",\n ):\n self.assertEqual(\n new_response.headers[header], base_response.headers[header]\n )\n self.assertEqual(new_response.cookies, base_response.cookies)\n self.assertNotIn(\"Content-Language\", new_response)",
"def is_cache_valid(self):\n if os.path.isfile(self.cache_filename):\n mod_time = os.path.getmtime(self.cache_filename)\n current_time = time()\n if (mod_time + self.cache_max_age) > current_time:\n return True\n return False",
"def _check_cache(self):\n return os.path.exists(self._cache_key)",
"def nocache(response):\n response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, max-age=0'\n return response",
"def is_cache_valid(self):\n if os.path.isfile(self.cache_path_cache):\n mod_time = os.path.getmtime(self.cache_path_cache)\n current_time = time()\n if (mod_time + self.cache_max_age) > current_time:\n if os.path.isfile(self.cache_path_index):\n return True\n return False",
"def add_header(r):\r\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\r\n r.headers[\"Pragma\"] = \"no-cache\"\r\n r.headers[\"Expires\"] = \"0\"\r\n r.headers['Cache-Control'] = 'public, max-age=0'\r\n return r",
"def add_header(r):\r\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\r\n r.headers[\"Pragma\"] = \"no-cache\"\r\n r.headers[\"Expires\"] = \"0\"\r\n r.headers['Cache-Control'] = 'public, max-age=0'\r\n return r",
"def test_client_max_age_3600(self, sess):\r\n r = sess.get(self.url)\r\n assert self.cache.get(self.url) == r.raw\r\n\r\n # request that we don't want a new one unless\r\n r = sess.get(self.url, headers={'Cache-Control': 'max-age=3600'})\r\n assert r.from_cache is True\r\n\r\n # now lets grab one that forces a new request b/c the cache\r\n # has expired. To do that we'll inject a new time value.\r\n resp = self.cache.get(self.url)\r\n resp.headers['date'] = 'Tue, 15 Nov 1994 08:12:31 GMT'\r\n r = sess.get(self.url)\r\n assert not r.from_cache",
"def add_header(r):\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n r.headers['Cache-Control'] = 'public, max-age=0'\n return r",
"def assertHttpNotModified(self, resp):\r\n return self.assertEqual(resp.status_code, 304)",
"def _ValidateCacheEntryHeader(self, cache_entry_header):\n return (cache_entry_header.request_size > 0 and\n cache_entry_header.request_size < self._MAXIMUM_URL_LENGTH and\n cache_entry_header.major_format_version == 1 and\n cache_entry_header.last_fetched_time > 0 and\n cache_entry_header.fetch_count > 0)",
"def add_header(r):\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n r.headers[\"Cache-Control\"] = \"public, max-age=0\"\n return r",
"def needs_update(self, cache_key):\r\n return self._read_sha(cache_key) != cache_key.hash",
"def is_cacheable(self, response: Union[AnyResponse, None]) -> bool:\n if not response:\n return False\n cache_criteria = {\n 'allowed method': response.method in self.allowed_methods,\n 'allowed status': response.status in self.allowed_codes,\n 'not disabled': not self.disabled,\n 'not expired': not getattr(response, 'is_expired', False),\n 'not filtered': self.filter_fn(response),\n }\n logger.debug(f'Pre-cache checks for response from {response.url}: {cache_criteria}') # type: ignore\n return all(cache_criteria.values())",
"def is_safe_cache(self):\n if self.get_last_update() > self.timestamp:\n return False\n return True",
"def add_header(req):\n\n req.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n req.headers[\"Pragma\"] = \"no-cache\"\n req.headers[\"Expires\"] = \"0\"\n req.headers['Cache-Control'] = 'public, max-age=0'\n return req",
"def add_header(r):\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n r.headers['Cache-Control'] = 'public, max-age=0'\n return r",
"def add_header(r):\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n r.headers['Cache-Control'] = 'public, max-age=0'\n return r",
"def add_header(r):\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n r.headers['Cache-Control'] = 'public, max-age=0'\n return r",
"def add_header(r):\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n r.headers['Cache-Control'] = 'public, max-age=0'\n return r",
"def add_header(r):\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n r.headers['Cache-Control'] = 'public, max-age=0'\n return r",
"def add_header(r):\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n r.headers['Cache-Control'] = 'public, max-age=0'\n return r"
] | [
"0.70986366",
"0.67210567",
"0.6683338",
"0.6584366",
"0.6584366",
"0.6293861",
"0.62422216",
"0.62041104",
"0.6199004",
"0.6124973",
"0.6101025",
"0.608115",
"0.6053864",
"0.60428745",
"0.60428745",
"0.6038889",
"0.6029584",
"0.6014237",
"0.600515",
"0.5989785",
"0.59765875",
"0.59627175",
"0.595924",
"0.59521365",
"0.594048",
"0.594048",
"0.594048",
"0.594048",
"0.594048",
"0.594048"
] | 0.7850928 | 0 |
Check that the proxy_revalidate cache control header is set on the response. | def test_proxy_revalidate(self):
content = self.unique()
self.assertViewBehavior(
{"cache_control_proxy_revalidate": True, "get": content},
status_code=200,
content=content,
headers_exact={"Cache-Control": "proxy-revalidate"}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_must_revalidate(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_must_revalidate\": True, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"must-revalidate\"})",
"def _may_cache(self, request, response=None):\n # any successful request may be cached\n return ((HTTPStatus.OK <= response.status_code < HTTPStatus.BAD_REQUEST)\n if response else True)",
"def nocache(response):\n response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, max-age=0'\n return response",
"def test_not_modified_headers(self):\n\n def get_response(req):\n resp = self.client.get(req.path_info)\n resp[\"Date\"] = \"Sat, 12 Feb 2011 17:35:44 GMT\"\n resp[\"Last-Modified\"] = \"Sat, 12 Feb 2011 17:35:44 GMT\"\n resp[\"Expires\"] = \"Sun, 13 Feb 2011 17:35:44 GMT\"\n resp[\"Vary\"] = \"Cookie\"\n resp[\"Cache-Control\"] = \"public\"\n resp[\"Content-Location\"] = \"/alt\"\n resp[\"Content-Language\"] = \"en\" # shouldn't be preserved\n resp[\"ETag\"] = '\"spam\"'\n resp.set_cookie(\"key\", \"value\")\n return resp\n\n self.req.META[\"HTTP_IF_NONE_MATCH\"] = '\"spam\"'\n\n new_response = ConditionalGetMiddleware(get_response)(self.req)\n self.assertEqual(new_response.status_code, 304)\n base_response = get_response(self.req)\n for header in (\n \"Cache-Control\",\n \"Content-Location\",\n \"Date\",\n \"ETag\",\n \"Expires\",\n \"Last-Modified\",\n \"Vary\",\n ):\n self.assertEqual(\n new_response.headers[header], base_response.headers[header]\n )\n self.assertEqual(new_response.cookies, base_response.cookies)\n self.assertNotIn(\"Content-Language\", new_response)",
"def test_client_max_age_0(self, sess):\r\n print('first request')\r\n r = sess.get(self.url)\r\n assert self.cache.get(self.url) == r.raw\r\n\r\n print('second request')\r\n r = sess.get(self.url, headers={'Cache-Control': 'max-age=0'})\r\n\r\n # don't remove from the cache\r\n assert self.cache.get(self.url)\r\n assert not r.from_cache",
"def never_cache_preview(self, response):\n response.cache_control.max_age = 0\n response.cache_control.no_cache = True\n response.cache_control.must_revalidate = True\n response.cache_control.no_store = True\n return response",
"def cache():\n is_conditional = request.headers.get(\"If-Modified-Since\") or request.headers.get(\n \"If-None-Match\"\n )\n\n if is_conditional is None:\n response = view_get()\n response.headers[\"Last-Modified\"] = http_date()\n response.headers[\"ETag\"] = uuid.uuid4().hex\n return response\n else:\n return status_code(304)",
"def never_cache_preview(response):\n response.cache_control.max_age = 0\n response.cache_control.no_cache = True\n response.cache_control.must_revalidate = True\n response.cache_control.no_store = True\n return response",
"def test_client_max_age_3600(self, sess):\r\n r = sess.get(self.url)\r\n assert self.cache.get(self.url) == r.raw\r\n\r\n # request that we don't want a new one unless\r\n r = sess.get(self.url, headers={'Cache-Control': 'max-age=3600'})\r\n assert r.from_cache is True\r\n\r\n # now lets grab one that forces a new request b/c the cache\r\n # has expired. To do that we'll inject a new time value.\r\n resp = self.cache.get(self.url)\r\n resp.headers['date'] = 'Tue, 15 Nov 1994 08:12:31 GMT'\r\n r = sess.get(self.url)\r\n assert not r.from_cache",
"def SetCachingHeaders(self, revalidate):\n max_age = self.MAX_AGE\n #self.response.headers['Expires'] = email.Utils.formatdate(\n # time.time() + max_age, usegmt=True)\n cache_control = []\n if self.PUBLIC:\n cache_control.append('public')\n cache_control.append('max-age=%d' % max_age)\n if revalidate:\n cache_control.append('must-revalidate')\n self.response.headers['Cache-Control'] = ', '.join(cache_control)",
"def SetCachingHeaders(self, revalidate):\n max_age = self.MAX_AGE\n #self.response.headers['Expires'] = email.Utils.formatdate(\n # time.time() + max_age, usegmt=True)\n cache_control = []\n if self.PUBLIC:\n cache_control.append('public')\n cache_control.append('max-age=%d' % max_age)\n if revalidate:\n cache_control.append('must-revalidate')\n self.response.headers['Cache-Control'] = ', '.join(cache_control)",
"def assertHttpNotModified(self, resp):\r\n return self.assertEqual(resp.status_code, 304)",
"def get304(self):\n bad = ('allow', 'content-encoding', 'content-language',\n 'content-length', 'content-md5', 'content-range',\n 'content-type', 'last-modified') # + c-location, expires?\n for h in bad:\n bottle.response.set_header(h, 'foo')\n bottle.status = 304\n for h, v in bottle.response.headerlist:\n self.assertFalse(h.lower() in bad, \"Header %s not deleted\" % h)",
"def has_cached(self,ourmod,etag=None):\n if \"If-Modified-Since\" in self.request.headers:\n hdr = self.request.headers[\"If-Modified-Since\"]\n theirmod =time.mktime(parsedate(hdr))\n return theirmod < ourmod\n elif \"If-None-Match\" in self.request.headers and etag is not None:\n return self.request.headers[\"ETag\"] == etag",
"def add_header(response):\n response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'\n response.headers['Pragma'] = 'no-cache'\n response.headers['Expires'] = '-1'\n return response",
"def is_cacheable(self, response: Union[AnyResponse, None]) -> bool:\n if not response:\n return False\n cache_criteria = {\n 'allowed method': response.method in self.allowed_methods,\n 'allowed status': response.status in self.allowed_codes,\n 'not disabled': not self.disabled,\n 'not expired': not getattr(response, 'is_expired', False),\n 'not filtered': self.filter_fn(response),\n }\n logger.debug(f'Pre-cache checks for response from {response.url}: {cache_criteria}') # type: ignore\n return all(cache_criteria.values())",
"def assertHttpNotModified(self, response):\n self.assertEqual(response.status_code, 304)\n self.assertEqual(response.content, b'')",
"def add_header(response):\n\n # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control\n response.cache_control.no_store = True\n return response",
"def add_header(response):\n\n # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control\n response.cache_control.no_store = True\n return response",
"def update_cached_response(self, request, response):\r\n cache_url = self.cache_url(request.url)\r\n\r\n cached_response = self.serializer.loads(request, self.cache.get(cache_url))\r\n\r\n if not cached_response:\r\n # we didn't have a cached response\r\n return response\r\n\r\n # Lets update our headers with the headers from the new request:\r\n # http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1\r\n #\r\n # The server isn't supposed to send headers that would make\r\n # the cached body invalid. But... just in case, we'll be sure\r\n # to strip out ones we know that might be problmatic due to\r\n # typical assumptions.\r\n excluded_headers = [\r\n \"content-length\",\r\n ]\r\n\r\n cached_response.headers.update(\r\n dict((k, v) for k, v in response.headers.items()\r\n if k.lower() not in excluded_headers)\r\n )\r\n\r\n # we want a 200 b/c we have content via the cache\r\n cached_response.status = 200\r\n\r\n # update our cache\r\n self.cache.set(\r\n cache_url,\r\n self.serializer.dumps(request, cached_response),\r\n )\r\n\r\n return cached_response",
"def cache_response(self, request, response, body=None):\r\n # From httplib2: Don't cache 206's since we aren't going to\r\n # handle byte range requests\r\n if response.status not in [200, 203]:\r\n return\r\n\r\n response_headers = CaseInsensitiveDict(response.headers)\r\n\r\n cc_req = self.parse_cache_control(request.headers)\r\n cc = self.parse_cache_control(response_headers)\r\n\r\n cache_url = self.cache_url(request.url)\r\n\r\n # Delete it from the cache if we happen to have it stored there\r\n no_store = cc.get('no-store') or cc_req.get('no-store')\r\n if no_store and self.cache.get(cache_url):\r\n self.cache.delete(cache_url)\r\n\r\n # If we've been given an etag, then keep the response\r\n if self.cache_etags and 'etag' in response_headers:\r\n self.cache.set(\r\n cache_url,\r\n self.serializer.dumps(request, response, body=body),\r\n )\r\n\r\n # Add to the cache if the response headers demand it. If there\r\n # is no date header then we can't do anything about expiring\r\n # the cache.\r\n elif 'date' in response_headers:\r\n # cache when there is a max-age > 0\r\n if cc and cc.get('max-age'):\r\n if int(cc['max-age']) > 0:\r\n self.cache.set(\r\n cache_url,\r\n self.serializer.dumps(request, response, body=body),\r\n )\r\n\r\n # If the request can expire, it means we should cache it\r\n # in the meantime.\r\n elif 'expires' in response_headers:\r\n if response_headers['expires']:\r\n self.cache.set(\r\n cache_url,\r\n self.serializer.dumps(request, response, body=body),\r\n )",
"def should_refresh_client_fnc(response):\n return not response",
"def add_header(response):\n response.cache_control.public = True\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n response.headers[\"Pragma\"] = \"no-cache\"\n response.headers[\"Expires\"] = \"0\"\n return response",
"def add_header(r):\r\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\r\n r.headers[\"Pragma\"] = \"no-cache\"\r\n r.headers[\"Expires\"] = \"0\"\r\n r.headers['Cache-Control'] = 'public, max-age=0'\r\n return r",
"def add_header(r):\r\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\r\n r.headers[\"Pragma\"] = \"no-cache\"\r\n r.headers[\"Expires\"] = \"0\"\r\n r.headers['Cache-Control'] = 'public, max-age=0'\r\n return r",
"def process_response(self, request, response):\n #if not self._should_update_cache(request, response):\n # # We don't need to update the cache, just return.\n # return response\n\n if response.streaming or response.status_code != 200:\n return response\n \n # Don't cache responses that set a user-specific (and maybe security\n # sensitive) cookie in response to a cookie-less request.\n if not request.COOKIES and response.cookies and has_vary_header(response, 'Cookie'):\n return response\n\n # Try to get the timeout from the \"max-age\" section of the \"Cache-\n # Control\" header before reverting to using the default cache_timeout\n # length.\n timeout = get_max_age(response)\n if timeout == None:\n timeout = self.cache_timeout\n elif timeout == 0:\n # max-age was set to 0, don't bother caching.\n return response\n patch_response_headers(response, timeout)\n if timeout:\n cache_key = \"%s-%s\" % (self.key_prefix, request.get_full_path())\n #raise ValueError(cache_key)\n if hasattr(response, 'render') and isinstance(response.render, collections.Callable):\n response.add_post_render_callback(\n lambda r: cache._cache.set(cache_key.encode(\"utf-8\"), zlib.compress(r.content, 9), timeout)\n )\n else:\n # we use the highest compression level, because since it is cached we hope for it to pay off\n cache._cache.set(cache_key.encode(\"utf-8\"), zlib.compress(response.content, 9), timeout)\n return response",
"def add_header(r):\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n r.headers['Cache-Control'] = 'public, max-age=0'\n return r",
"def add_header(r):\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n r.headers[\"Cache-Control\"] = \"public, max-age=0\"\n return r",
"def test_precedence(self):\n self.assertViewBehavior(\n {\"cache_control_public\": True},\n status_code=405,\n headers_exclude=\"Cache-Control\")",
"def disable_cache(response):\n\n response.headers['Cache-Control'] = 'max-age=0, no-cache, no-store, must-revalidate, private'\n response.headers['Pragma'] = 'no-cache'\n response.headers['Expires'] = '0'\n return response"
] | [
"0.72086537",
"0.7039436",
"0.6617241",
"0.65517527",
"0.6426039",
"0.6425563",
"0.6400477",
"0.63885456",
"0.63592887",
"0.6339059",
"0.6339059",
"0.6319922",
"0.6256228",
"0.6244743",
"0.62436944",
"0.6205305",
"0.6193491",
"0.6190335",
"0.6190335",
"0.612926",
"0.6054342",
"0.6035132",
"0.600892",
"0.6008405",
"0.6008405",
"0.59950536",
"0.59846574",
"0.59783447",
"0.5971222",
"0.589854"
] | 0.7780337 | 0 |
Check that the max_age cache control header is set on the resopnse. | def test_max_age(self):
content = self.unique()
self.assertViewBehavior(
{"cache_control_max_age": 1, "get": content},
status_code=200,
content=content,
headers_exact={"Cache-Control": "max-age=1"}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_client_max_age_3600(self, sess):\r\n r = sess.get(self.url)\r\n assert self.cache.get(self.url) == r.raw\r\n\r\n # request that we don't want a new one unless\r\n r = sess.get(self.url, headers={'Cache-Control': 'max-age=3600'})\r\n assert r.from_cache is True\r\n\r\n # now lets grab one that forces a new request b/c the cache\r\n # has expired. To do that we'll inject a new time value.\r\n resp = self.cache.get(self.url)\r\n resp.headers['date'] = 'Tue, 15 Nov 1994 08:12:31 GMT'\r\n r = sess.get(self.url)\r\n assert not r.from_cache",
"def test_client_max_age_0(self, sess):\r\n print('first request')\r\n r = sess.get(self.url)\r\n assert self.cache.get(self.url) == r.raw\r\n\r\n print('second request')\r\n r = sess.get(self.url, headers={'Cache-Control': 'max-age=0'})\r\n\r\n # don't remove from the cache\r\n assert self.cache.get(self.url)\r\n assert not r.from_cache",
"def test_s_maxage(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_s_maxage\": 1, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"s-maxage=1\"})",
"def _may_cache(self, request, response=None):\n # any successful request may be cached\n return ((HTTPStatus.OK <= response.status_code < HTTPStatus.BAD_REQUEST)\n if response else True)",
"def has_cached(self,ourmod,etag=None):\n if \"If-Modified-Since\" in self.request.headers:\n hdr = self.request.headers[\"If-Modified-Since\"]\n theirmod =time.mktime(parsedate(hdr))\n return theirmod < ourmod\n elif \"If-None-Match\" in self.request.headers and etag is not None:\n return self.request.headers[\"ETag\"] == etag",
"def cache_is_valid():\n if ARGV.get(NOCACHE_OPT):\n return False\n if not CACHE['last-request'] \\\n or not CACHE['max-age'] \\\n or not CACHE['feed']:\n return False\n current_time = float(time.time())\n last_request = float(CACHE['last-request'])\n max_age = float(CACHE['max-age'])\n return bool(current_time - last_request < max_age)",
"def _ValidateCacheEntryHeader(self, cache_entry_header):\n return (cache_entry_header.request_size > 0 and\n cache_entry_header.request_size < self._MAXIMUM_URL_LENGTH and\n cache_entry_header.major_format_version == 1 and\n cache_entry_header.last_fetched_time > 0 and\n cache_entry_header.fetch_count > 0)",
"def cache_control(value):\n response = view_get()\n response.headers[\"Cache-Control\"] = \"public, max-age={0}\".format(value)\n return response",
"def is_cache_valid(self):\n if os.path.isfile(self.cache_filename):\n mod_time = os.path.getmtime(self.cache_filename)\n current_time = time()\n if (mod_time + self.cache_max_age) > current_time:\n return True\n return False",
"def SetCachingHeaders(self, revalidate):\n max_age = self.MAX_AGE\n #self.response.headers['Expires'] = email.Utils.formatdate(\n # time.time() + max_age, usegmt=True)\n cache_control = []\n if self.PUBLIC:\n cache_control.append('public')\n cache_control.append('max-age=%d' % max_age)\n if revalidate:\n cache_control.append('must-revalidate')\n self.response.headers['Cache-Control'] = ', '.join(cache_control)",
"def SetCachingHeaders(self, revalidate):\n max_age = self.MAX_AGE\n #self.response.headers['Expires'] = email.Utils.formatdate(\n # time.time() + max_age, usegmt=True)\n cache_control = []\n if self.PUBLIC:\n cache_control.append('public')\n cache_control.append('max-age=%d' % max_age)\n if revalidate:\n cache_control.append('must-revalidate')\n self.response.headers['Cache-Control'] = ', '.join(cache_control)",
"def cache():\n is_conditional = request.headers.get(\"If-Modified-Since\") or request.headers.get(\n \"If-None-Match\"\n )\n\n if is_conditional is None:\n response = view_get()\n response.headers[\"Last-Modified\"] = http_date()\n response.headers[\"ETag\"] = uuid.uuid4().hex\n return response\n else:\n return status_code(304)",
"def test_expires(self):\n # We aren't bother going to test the actual time in expires, that\n # way lies pain with broken tests later.\n up = self.get(self.good_data)\n hdrs = dict(up.get_headers(1))\n lm = datetime(*utils.parsedate_tz(hdrs['Last-Modified'])[:7])\n exp = datetime(*utils.parsedate_tz(hdrs['Expires'])[:7])\n assert (exp - lm).seconds == 3600",
"def _ValidateCacheFileMetadataHeader(self, cache_file_metadata_header):\n return (cache_file_metadata_header.key_size > 0 and\n cache_file_metadata_header.key_size < self._MAXIMUM_URL_LENGTH and\n cache_file_metadata_header.format_version in (1, 2, 3) and\n cache_file_metadata_header.last_fetched_time > 0 and\n cache_file_metadata_header.fetch_count > 0)",
"def get_access_control_max_age(self):\n return self.access_control_max_age",
"def never_cache_preview(self, response):\n response.cache_control.max_age = 0\n response.cache_control.no_cache = True\n response.cache_control.must_revalidate = True\n response.cache_control.no_store = True\n return response",
"def is_cache_valid(self):\n if os.path.isfile(self.cache_path_cache):\n mod_time = os.path.getmtime(self.cache_path_cache)\n current_time = time()\n if (mod_time + self.cache_max_age) > current_time:\n if os.path.isfile(self.cache_path_index):\n return True\n return False",
"def cached_data_fresh(name, max_age):\n age = get_cached_data_age(name)\n if not age:\n return False\n return age < max_age",
"def test_not_modified_headers(self):\n\n def get_response(req):\n resp = self.client.get(req.path_info)\n resp[\"Date\"] = \"Sat, 12 Feb 2011 17:35:44 GMT\"\n resp[\"Last-Modified\"] = \"Sat, 12 Feb 2011 17:35:44 GMT\"\n resp[\"Expires\"] = \"Sun, 13 Feb 2011 17:35:44 GMT\"\n resp[\"Vary\"] = \"Cookie\"\n resp[\"Cache-Control\"] = \"public\"\n resp[\"Content-Location\"] = \"/alt\"\n resp[\"Content-Language\"] = \"en\" # shouldn't be preserved\n resp[\"ETag\"] = '\"spam\"'\n resp.set_cookie(\"key\", \"value\")\n return resp\n\n self.req.META[\"HTTP_IF_NONE_MATCH\"] = '\"spam\"'\n\n new_response = ConditionalGetMiddleware(get_response)(self.req)\n self.assertEqual(new_response.status_code, 304)\n base_response = get_response(self.req)\n for header in (\n \"Cache-Control\",\n \"Content-Location\",\n \"Date\",\n \"ETag\",\n \"Expires\",\n \"Last-Modified\",\n \"Vary\",\n ):\n self.assertEqual(\n new_response.headers[header], base_response.headers[header]\n )\n self.assertEqual(new_response.cookies, base_response.cookies)\n self.assertNotIn(\"Content-Language\", new_response)",
"def never_cache_preview(response):\n response.cache_control.max_age = 0\n response.cache_control.no_cache = True\n response.cache_control.must_revalidate = True\n response.cache_control.no_store = True\n return response",
"def is_safe_cache(self):\n if self.get_last_update() > self.timestamp:\n return False\n return True",
"def test_must_revalidate(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_must_revalidate\": True, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"must-revalidate\"})",
"def is_cacheable(self, response: Union[AnyResponse, None]) -> bool:\n if not response:\n return False\n cache_criteria = {\n 'allowed method': response.method in self.allowed_methods,\n 'allowed status': response.status in self.allowed_codes,\n 'not disabled': not self.disabled,\n 'not expired': not getattr(response, 'is_expired', False),\n 'not filtered': self.filter_fn(response),\n }\n logger.debug(f'Pre-cache checks for response from {response.url}: {cache_criteria}') # type: ignore\n return all(cache_criteria.values())",
"def get304(self):\n bad = ('allow', 'content-encoding', 'content-language',\n 'content-length', 'content-md5', 'content-range',\n 'content-type', 'last-modified') # + c-location, expires?\n for h in bad:\n bottle.response.set_header(h, 'foo')\n bottle.status = 304\n for h, v in bottle.response.headerlist:\n self.assertFalse(h.lower() in bad, \"Header %s not deleted\" % h)",
"def _check_expiration(self, url, data):\n if data.expires_after < time.time():\n del self.data[url]\n data = None\n return data",
"def extended(self):\n if self.expires_at:\n return self.expires_at - self.issued_at > timedelta(days=30)\n return False",
"def _verify_timeout(self, doc):\n expires = doc['expires']\n if expires == 0:\n return False\n if expires >= self._time():\n return False\n return True",
"def cache_max_age(hours):\n seconds = hours * 60 * 60\n return 'max-age=' + str(seconds)",
"def check_expiration(self, cur_time):\n\n\t\ttime_limit = 1000\n\t\ttime_elapsed = cur_time - self.time_created\n\n\t\t# Erase cache after an arbitrary amount of time\n\t\tif time_elapsed > time_limit:\n\t\t\tself.cache_expiration()",
"def test_expired_etags_if_none_match_response(self, sess):\r\n # get our response\r\n r = sess.get(self.etag_url)\r\n\r\n # expire our request by changing the date. Our test endpoint\r\n # doesn't provide time base caching headers, so we add them\r\n # here in order to expire the request.\r\n r.headers['Date'] = 'Tue, 26 Nov 2012 00:50:49 GMT'\r\n self.cache.set(self.etag_url, r)\r\n\r\n r = sess.get(self.etag_url)\r\n assert r.from_cache\r\n assert 'if-none-match' in r.request.headers\r\n assert r.status_code == 200"
] | [
"0.72107726",
"0.6873612",
"0.68624157",
"0.66889685",
"0.6536094",
"0.6532378",
"0.6311946",
"0.62280434",
"0.6197227",
"0.61068356",
"0.61068356",
"0.6103469",
"0.60647094",
"0.6050893",
"0.5995915",
"0.5928432",
"0.58629507",
"0.58624345",
"0.5841793",
"0.5827531",
"0.5821012",
"0.58051866",
"0.5738413",
"0.57224315",
"0.56891614",
"0.568293",
"0.5623097",
"0.5613429",
"0.5612569",
"0.5603009"
] | 0.7228714 | 0 |
Check that the s_maxage cache control header is set on the response. | def test_s_maxage(self):
content = self.unique()
self.assertViewBehavior(
{"cache_control_s_maxage": 1, "get": content},
status_code=200,
content=content,
headers_exact={"Cache-Control": "s-maxage=1"}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_max_age(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_max_age\": 1, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"max-age=1\"})",
"def test_client_max_age_3600(self, sess):\r\n r = sess.get(self.url)\r\n assert self.cache.get(self.url) == r.raw\r\n\r\n # request that we don't want a new one unless\r\n r = sess.get(self.url, headers={'Cache-Control': 'max-age=3600'})\r\n assert r.from_cache is True\r\n\r\n # now lets grab one that forces a new request b/c the cache\r\n # has expired. To do that we'll inject a new time value.\r\n resp = self.cache.get(self.url)\r\n resp.headers['date'] = 'Tue, 15 Nov 1994 08:12:31 GMT'\r\n r = sess.get(self.url)\r\n assert not r.from_cache",
"def _may_cache(self, request, response=None):\n # any successful request may be cached\n return ((HTTPStatus.OK <= response.status_code < HTTPStatus.BAD_REQUEST)\n if response else True)",
"def test_client_max_age_0(self, sess):\r\n print('first request')\r\n r = sess.get(self.url)\r\n assert self.cache.get(self.url) == r.raw\r\n\r\n print('second request')\r\n r = sess.get(self.url, headers={'Cache-Control': 'max-age=0'})\r\n\r\n # don't remove from the cache\r\n assert self.cache.get(self.url)\r\n assert not r.from_cache",
"def cache_control(value):\n response = view_get()\n response.headers[\"Cache-Control\"] = \"public, max-age={0}\".format(value)\n return response",
"def has_cached(self,ourmod,etag=None):\n if \"If-Modified-Since\" in self.request.headers:\n hdr = self.request.headers[\"If-Modified-Since\"]\n theirmod =time.mktime(parsedate(hdr))\n return theirmod < ourmod\n elif \"If-None-Match\" in self.request.headers and etag is not None:\n return self.request.headers[\"ETag\"] == etag",
"def cache():\n is_conditional = request.headers.get(\"If-Modified-Since\") or request.headers.get(\n \"If-None-Match\"\n )\n\n if is_conditional is None:\n response = view_get()\n response.headers[\"Last-Modified\"] = http_date()\n response.headers[\"ETag\"] = uuid.uuid4().hex\n return response\n else:\n return status_code(304)",
"def never_cache_preview(self, response):\n response.cache_control.max_age = 0\n response.cache_control.no_cache = True\n response.cache_control.must_revalidate = True\n response.cache_control.no_store = True\n return response",
"def get304(self):\n bad = ('allow', 'content-encoding', 'content-language',\n 'content-length', 'content-md5', 'content-range',\n 'content-type', 'last-modified') # + c-location, expires?\n for h in bad:\n bottle.response.set_header(h, 'foo')\n bottle.status = 304\n for h, v in bottle.response.headerlist:\n self.assertFalse(h.lower() in bad, \"Header %s not deleted\" % h)",
"def test_not_modified_headers(self):\n\n def get_response(req):\n resp = self.client.get(req.path_info)\n resp[\"Date\"] = \"Sat, 12 Feb 2011 17:35:44 GMT\"\n resp[\"Last-Modified\"] = \"Sat, 12 Feb 2011 17:35:44 GMT\"\n resp[\"Expires\"] = \"Sun, 13 Feb 2011 17:35:44 GMT\"\n resp[\"Vary\"] = \"Cookie\"\n resp[\"Cache-Control\"] = \"public\"\n resp[\"Content-Location\"] = \"/alt\"\n resp[\"Content-Language\"] = \"en\" # shouldn't be preserved\n resp[\"ETag\"] = '\"spam\"'\n resp.set_cookie(\"key\", \"value\")\n return resp\n\n self.req.META[\"HTTP_IF_NONE_MATCH\"] = '\"spam\"'\n\n new_response = ConditionalGetMiddleware(get_response)(self.req)\n self.assertEqual(new_response.status_code, 304)\n base_response = get_response(self.req)\n for header in (\n \"Cache-Control\",\n \"Content-Location\",\n \"Date\",\n \"ETag\",\n \"Expires\",\n \"Last-Modified\",\n \"Vary\",\n ):\n self.assertEqual(\n new_response.headers[header], base_response.headers[header]\n )\n self.assertEqual(new_response.cookies, base_response.cookies)\n self.assertNotIn(\"Content-Language\", new_response)",
"def SetCachingHeaders(self, revalidate):\n max_age = self.MAX_AGE\n #self.response.headers['Expires'] = email.Utils.formatdate(\n # time.time() + max_age, usegmt=True)\n cache_control = []\n if self.PUBLIC:\n cache_control.append('public')\n cache_control.append('max-age=%d' % max_age)\n if revalidate:\n cache_control.append('must-revalidate')\n self.response.headers['Cache-Control'] = ', '.join(cache_control)",
"def SetCachingHeaders(self, revalidate):\n max_age = self.MAX_AGE\n #self.response.headers['Expires'] = email.Utils.formatdate(\n # time.time() + max_age, usegmt=True)\n cache_control = []\n if self.PUBLIC:\n cache_control.append('public')\n cache_control.append('max-age=%d' % max_age)\n if revalidate:\n cache_control.append('must-revalidate')\n self.response.headers['Cache-Control'] = ', '.join(cache_control)",
"def never_cache_preview(response):\n response.cache_control.max_age = 0\n response.cache_control.no_cache = True\n response.cache_control.must_revalidate = True\n response.cache_control.no_store = True\n return response",
"def nocache(response):\n response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, max-age=0'\n return response",
"def add_header(response):\n response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'\n response.headers['Pragma'] = 'no-cache'\n response.headers['Expires'] = '-1'\n return response",
"def add_header(response):\n\n # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control\n response.cache_control.no_store = True\n return response",
"def add_header(response):\n\n # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control\n response.cache_control.no_store = True\n return response",
"def add_header(response):\n response.cache_control.public = True\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n response.headers[\"Pragma\"] = \"no-cache\"\n response.headers[\"Expires\"] = \"0\"\n return response",
"def _ValidateCacheEntryHeader(self, cache_entry_header):\n return (cache_entry_header.request_size > 0 and\n cache_entry_header.request_size < self._MAXIMUM_URL_LENGTH and\n cache_entry_header.major_format_version == 1 and\n cache_entry_header.last_fetched_time > 0 and\n cache_entry_header.fetch_count > 0)",
"def process_response(self, request, response):\n #if not self._should_update_cache(request, response):\n # # We don't need to update the cache, just return.\n # return response\n\n if response.streaming or response.status_code != 200:\n return response\n \n # Don't cache responses that set a user-specific (and maybe security\n # sensitive) cookie in response to a cookie-less request.\n if not request.COOKIES and response.cookies and has_vary_header(response, 'Cookie'):\n return response\n\n # Try to get the timeout from the \"max-age\" section of the \"Cache-\n # Control\" header before reverting to using the default cache_timeout\n # length.\n timeout = get_max_age(response)\n if timeout == None:\n timeout = self.cache_timeout\n elif timeout == 0:\n # max-age was set to 0, don't bother caching.\n return response\n patch_response_headers(response, timeout)\n if timeout:\n cache_key = \"%s-%s\" % (self.key_prefix, request.get_full_path())\n #raise ValueError(cache_key)\n if hasattr(response, 'render') and isinstance(response.render, collections.Callable):\n response.add_post_render_callback(\n lambda r: cache._cache.set(cache_key.encode(\"utf-8\"), zlib.compress(r.content, 9), timeout)\n )\n else:\n # we use the highest compression level, because since it is cached we hope for it to pay off\n cache._cache.set(cache_key.encode(\"utf-8\"), zlib.compress(response.content, 9), timeout)\n return response",
"def test_expires(self):\n # We aren't bother going to test the actual time in expires, that\n # way lies pain with broken tests later.\n up = self.get(self.good_data)\n hdrs = dict(up.get_headers(1))\n lm = datetime(*utils.parsedate_tz(hdrs['Last-Modified'])[:7])\n exp = datetime(*utils.parsedate_tz(hdrs['Expires'])[:7])\n assert (exp - lm).seconds == 3600",
"def test_cache_control_headers_on_apis(flask_app):\n rv = flask_app.get('api/v1/')\n headers = rv.headers\n assert headers.get('Cache-Control') == 'no-cache, no-store, must-revalidate, max-age=0'\n assert headers.get('Pragma') == 'no-cache'",
"def cache_is_valid():\n if ARGV.get(NOCACHE_OPT):\n return False\n if not CACHE['last-request'] \\\n or not CACHE['max-age'] \\\n or not CACHE['feed']:\n return False\n current_time = float(time.time())\n last_request = float(CACHE['last-request'])\n max_age = float(CACHE['max-age'])\n return bool(current_time - last_request < max_age)",
"def surrogate_control(self):\n def on_update(surrogate_control):\n if not surrogate_control and \"surrogate-control\" in self.headers:\n del self.headers[\"surrogate-control\"]\n elif surrogate_control: # pragma: no cover\n self.headers[\"Surrogate-Control\"] = \\\n surrogate_control.to_header()\n return parse_cache_control_header(\n self.headers.get(\"surrogate-control\"),\n on_update,\n ResponseCacheControl,\n )",
"def maybe_raise_304(request, response):\n if request.method not in (consts.METHOD_HEAD, consts.METHOD_GET):\n LOG.warning(\n 'check If-None-Match in non-standard request method: %s %s',\n request.method,\n request.path_str,\n )\n if_none_match = request.get_header(consts.HEADER_IF_NONE_MATCH)\n if if_none_match is None:\n return\n etag = response.headers.get(consts.HEADER_ETAG)\n if etag is None:\n return\n # TODO: Handle W/\"...\" weak validator.\n if etag in _parse_etags(if_none_match):\n raise wsgi_apps.HttpError(\n consts.Statuses.NOT_MODIFIED,\n 'etag matches: %s vs %s' % (etag, if_none_match),\n response.headers,\n )",
"def add_header(r):\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n r.headers['Cache-Control'] = 'public, max-age=0'\n return r",
"def cache_response(self, request, response, body=None):\r\n # From httplib2: Don't cache 206's since we aren't going to\r\n # handle byte range requests\r\n if response.status not in [200, 203]:\r\n return\r\n\r\n response_headers = CaseInsensitiveDict(response.headers)\r\n\r\n cc_req = self.parse_cache_control(request.headers)\r\n cc = self.parse_cache_control(response_headers)\r\n\r\n cache_url = self.cache_url(request.url)\r\n\r\n # Delete it from the cache if we happen to have it stored there\r\n no_store = cc.get('no-store') or cc_req.get('no-store')\r\n if no_store and self.cache.get(cache_url):\r\n self.cache.delete(cache_url)\r\n\r\n # If we've been given an etag, then keep the response\r\n if self.cache_etags and 'etag' in response_headers:\r\n self.cache.set(\r\n cache_url,\r\n self.serializer.dumps(request, response, body=body),\r\n )\r\n\r\n # Add to the cache if the response headers demand it. If there\r\n # is no date header then we can't do anything about expiring\r\n # the cache.\r\n elif 'date' in response_headers:\r\n # cache when there is a max-age > 0\r\n if cc and cc.get('max-age'):\r\n if int(cc['max-age']) > 0:\r\n self.cache.set(\r\n cache_url,\r\n self.serializer.dumps(request, response, body=body),\r\n )\r\n\r\n # If the request can expire, it means we should cache it\r\n # in the meantime.\r\n elif 'expires' in response_headers:\r\n if response_headers['expires']:\r\n self.cache.set(\r\n cache_url,\r\n self.serializer.dumps(request, response, body=body),\r\n )",
"def add_header(r):\r\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\r\n r.headers[\"Pragma\"] = \"no-cache\"\r\n r.headers[\"Expires\"] = \"0\"\r\n r.headers['Cache-Control'] = 'public, max-age=0'\r\n return r",
"def add_header(r):\r\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\r\n r.headers[\"Pragma\"] = \"no-cache\"\r\n r.headers[\"Expires\"] = \"0\"\r\n r.headers['Cache-Control'] = 'public, max-age=0'\r\n return r",
"async def cache_control(request, handler):\n if request.path.startswith(\"/static/\"):\n\n def add_headers(obj):\n obj.headers[\"Cache-Control\"] = \"max-age=3600\"\n\n else:\n\n def add_headers(obj):\n obj.headers[\"Cache-Control\"] = \"no-store\"\n\n try:\n response = await handler(request)\n add_headers(response)\n return response\n except aiohttp.web.HTTPException as exc:\n add_headers(exc)\n raise"
] | [
"0.7056533",
"0.69589156",
"0.6764175",
"0.6662597",
"0.6601882",
"0.6553718",
"0.6453501",
"0.640816",
"0.6405672",
"0.63611406",
"0.63209236",
"0.63209236",
"0.63096035",
"0.6251666",
"0.6251313",
"0.6150831",
"0.6150831",
"0.6129807",
"0.6030514",
"0.6019444",
"0.60143363",
"0.6011679",
"0.590145",
"0.58910954",
"0.58829904",
"0.58660203",
"0.5863713",
"0.5858028",
"0.5858028",
"0.58472824"
] | 0.71437556 | 0 |
Check that the behavior is disabled when cache_control is falsy. | def test_disabled(self):
content = self.unique()
self.assertViewBehavior({
"cache_control": False,
"cache_control_public": True,
"get": content},
status_code=200,
content=content,
headers_exclude="Cache-Control") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_disabled(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"never_cache\": False, \"get\": content},\n status_code=200,\n content=content,\n headers_exclude=\"Cache-Control\")",
"def test_no_cache(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_no_cache\": True, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"no-cache\"})",
"def test_cache_page_disabled(self):\n content = self.unique()\n calls = []\n def get(self, request, *args, **kwargs):\n calls.append(None)\n return http.HttpResponse(content)\n self.assertViewBehavior(\n {\"cache_page\": False, \"get\": get},\n repeat=2,\n status_code=200,\n content=content)\n self.assertEqual(len(calls), 2)",
"def _may_cache(self, request, response=None):\n # any successful request may be cached\n return ((HTTPStatus.OK <= response.status_code < HTTPStatus.BAD_REQUEST)\n if response else True)",
"def test_precedence(self):\n self.assertViewBehavior(\n {\"cache_control_public\": True},\n status_code=405,\n headers_exclude=\"Cache-Control\")",
"def can_be_disabled(self) -> bool:\n return True",
"def test_vary_on_cookie_disabled(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"vary_on_cookie\": False, \"get\": content},\n status_code=200,\n content=content,\n headers_exclude=\"Vary\")",
"def is_cacheable(self, response: Union[AnyResponse, None]) -> bool:\n if not response:\n return False\n cache_criteria = {\n 'allowed method': response.method in self.allowed_methods,\n 'allowed status': response.status in self.allowed_codes,\n 'not disabled': not self.disabled,\n 'not expired': not getattr(response, 'is_expired', False),\n 'not filtered': self.filter_fn(response),\n }\n logger.debug(f'Pre-cache checks for response from {response.url}: {cache_criteria}') # type: ignore\n return all(cache_criteria.values())",
"def getCacheable(self):\n return False",
"def never_cache_preview(response):\n response.cache_control.max_age = 0\n response.cache_control.no_cache = True\n response.cache_control.must_revalidate = True\n response.cache_control.no_store = True\n return response",
"def toggle_caching(on=None):\n global DISABLE_CACHING\n if on is None:\n DISABLE_CACHING = not DISABLE_CACHING\n else:\n DISABLE_CACHING = bool(on)",
"def is_exempt(self) -> bool:\n\n if self.exempt_when:\n return self.exempt_when()\n return False",
"def is_no_cache_key_option(number):\n return (0x1c == (number & 0x1e))",
"def disable_caching(self):\n\n def after_request(r: flask.Response):\n if 'Cache-Control' not in r.headers:\n r.headers['Cache-Control'] = 'no-store'\n return r\n\n self.after_request(after_request)",
"def disable_cache(response):\n\n response.headers['Cache-Control'] = 'max-age=0, no-cache, no-store, must-revalidate, private'\n response.headers['Pragma'] = 'no-cache'\n response.headers['Expires'] = '0'\n return response",
"def cache_is_valid():\n if ARGV.get(NOCACHE_OPT):\n return False\n if not CACHE['last-request'] \\\n or not CACHE['max-age'] \\\n or not CACHE['feed']:\n return False\n current_time = float(time.time())\n last_request = float(CACHE['last-request'])\n max_age = float(CACHE['max-age'])\n return bool(current_time - last_request < max_age)",
"def test_cache_page_precedence(self):\n self.assertViewBehavior(\n status_code=405)",
"def never_cache_preview(self, response):\n response.cache_control.max_age = 0\n response.cache_control.no_cache = True\n response.cache_control.must_revalidate = True\n response.cache_control.no_store = True\n return response",
"def test_precedence(self):\n self.assertViewBehavior(\n status_code=405,\n headers_exclude=\"Cache-Control\")",
"def never_ever_cache(decorated_function):\n @wraps(decorated_function)\n def wrapper(*args, **kwargs):\n response = decorated_function(*args, **kwargs)\n patch_cache_control(\n response, no_cache=True, no_store=True, must_revalidate=True,\n max_age=0)\n return response\n return wrapper",
"def test_no_transform(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_no_transform\": True, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"no-transform\"})",
"def check_disabled(self):\n return None",
"def never_ever_cache(decorated_function):\n\n @wraps(decorated_function)\n def wrapper(*args, **kwargs):\n response = decorated_function(*args, **kwargs)\n patch_cache_control(\n response, no_cache=True, no_store=True, must_revalidate=True,\n max_age=0)\n return response\n\n return wrapper",
"def is_vuln_mode_disabled(self):\n # Set this value if you want the vuln data to be collected in the S3 file.\n return os.environ.get('DISABLE_VULN_MODE', 'false').lower() in ('1', 'yes', 'true')",
"def test_disabled_middleware(self):\n self.assertFalse(self.maintenance.is_being_performed)\n response = self.client.get('/')\n self.assertNormalMode(response)",
"def test_must_revalidate(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_must_revalidate\": True, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"must-revalidate\"})",
"def test_vary_on_headers_disabled(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"get\": content},\n status_code=200,\n content=content,\n headers_exclude=\"Vary\")",
"def check_cachable(self, codelib):\n return not codelib.has_dynamic_globals",
"def noyable(self):\n return False",
"def no_cache(func):\n def disable_caching(request_handler, *args, **kwargs):\n request_handler.set_header('Cache-Control', 'no-cache, no-store, must-revalidate')\n request_handler.set_header('Pragma', 'no-cache')\n request_handler.set_header('Expires', 0)\n return func(request_handler, *args, **kwargs)\n\n return disable_caching"
] | [
"0.6935497",
"0.6439297",
"0.6283687",
"0.62281615",
"0.62020856",
"0.6143714",
"0.61050105",
"0.6087825",
"0.59735376",
"0.58653027",
"0.57895696",
"0.5783289",
"0.5764132",
"0.57436347",
"0.57417667",
"0.5730427",
"0.5700605",
"0.5676634",
"0.5612492",
"0.5606958",
"0.5591434",
"0.55599",
"0.5547825",
"0.5521385",
"0.54935515",
"0.5491359",
"0.547178",
"0.5458008",
"0.54572994",
"0.5452939"
] | 0.70288587 | 0 |
Check that the default HTTP method name protection takes precedence and that no cache control headers are set on the response. | def test_precedence(self):
self.assertViewBehavior(
{"cache_control_public": True},
status_code=405,
headers_exclude="Cache-Control") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_precedence(self):\n self.assertViewBehavior(\n status_code=405,\n headers_exclude=\"Cache-Control\")",
"def test_cache_page_precedence(self):\n self.assertViewBehavior(\n status_code=405)",
"def filter_request():\n if request.method not in ALLOWED_METHODS:\n return \"Method Not Allowed\", 405\n ua = str(request.user_agent)\n if \"Mozilla\" not in ua or \"Gecko\" not in ua:\n return \"No Scrappers!\", 403",
"def method_exempt(self) -> bool:\n\n return self.methods is not None and request.method.lower() not in self.methods",
"def test_default_span_name_missing_request_method(self):\n self.environ.pop(\"REQUEST_METHOD\")\n app = otel_wsgi.OpenTelemetryMiddleware(simple_wsgi)\n response = app(self.environ, self.start_response)\n self.validate_response(response, span_name=\"HTTP\", http_method=None)",
"def handle_request(self, request, environ, start_response,\n response_headers):\n method = environ[\"REQUEST_METHOD\"].upper()\n if method in (\"GET\", \"HEAD\"):\n return super(ReadOnlyServer, self).handle_request(\n request, environ, start_response, response_headers)\n else:\n return self.odata_error(request, environ, start_response,\n \"Unauthorised\", \"Method not allowed\", 403)",
"def _method_check(self, request, allowed=None):\n if allowed is None:\n allowed = []\n\n request_method = request.method.lower()\n allows = ','.join(map(str.upper, allowed))\n\n if request_method == \"options\":\n response = HttpResponse(allows)\n response['Allow'] = allows\n raise ImmediateHttpResponse(response=response)\n\n if not request_method in allowed:\n response = http.HttpMethodNotAllowed(allows)\n response['Allow'] = allows\n raise ImmediateHttpResponse(response=response)\n\n return request_method",
"def method_check(self, request, allowed=None):\r\n if allowed is None:\r\n allowed = []\r\n \r\n request_method = request.method.lower()\r\n allows = ','.join(map(lambda s: s.upper(), allowed))\r\n \r\n if request_method == 'options':\r\n response = HttpResponse(allows)\r\n response['Access-Control-Allow-Origin'] = '*'\r\n response['Access-Control-Allow-Headers'] = 'Content-Type, Authorization'\r\n response['Access-Control-Allow-Methods'] = \"GET, PUT, POST, PATCH\"\r\n response['Allow'] = allows\r\n raise ImmediateHttpResponse(response=response)\r\n \r\n if not request_method in allowed:\r\n response = http.HttpMethodNotAllowed(allows)\r\n response['Allow'] = allows\r\n raise ImmediateHttpResponse(response=response)\r\n \r\n return request_method",
"def test_disabled(self):\n content = self.unique()\n self.assertViewBehavior({\n \"cache_control\": False,\n \"cache_control_public\": True,\n \"get\": content},\n status_code=200,\n content=content,\n headers_exclude=\"Cache-Control\")",
"def response_method_not_allowed():\n\n return b\"HTTP/1.1 405 Method Not Allowed\\r\\n\"",
"def method_override(self):\n\t\treturn self.headers.get(\"X-HTTP-Method-Override\")",
"def method_check(self, request, allowed=None):\n if allowed is None:\n allowed = []\n \n request_method = request.method.lower()\n allows = ','.join(map(lambda s: s.upper(), allowed))\n \n if request_method == 'options':\n response = HttpResponse(allows)\n response['Access-Control-Allow-Origin'] = '*'\n response['Access-Control-Allow-Headers'] = 'Content-Type', 'Authorization'\n response['Access-Control-Allow-Methods'] = \"GET, PUT, POST, PATCH\"\n response['Allow'] = allows\n raise ImmediateHttpResponse(response=response)\n \n if not request_method in allowed:\n response = http.HttpMethodNotAllowed(allows)\n response['Allow'] = allows\n raise ImmediateHttpResponse(response=response)\n \n return request_method",
"def wrapped(request):\n if request.method in methods:\n return True\n else:\n return False",
"def test_no_transform(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_no_transform\": True, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"no-transform\"})",
"def method_check(self, request, allowed=None):\n if allowed is None:\n allowed = []\n\n # Normally we'll just use request.method to determine the request\n # method. However, since some bad clients can't support all HTTP\n # methods, we allow overloading POST requests with a\n # X-HTTP-Method-Override header. This allows POST requests to\n # masquerade as different methods.\n request_method = request.method.lower()\n if request_method == 'post' and 'HTTP_X_HTTP_METHOD_OVERRIDE' in request.META:\n request_method = request.META['HTTP_X_HTTP_METHOD_OVERRIDE'].lower()\n\n allows = ','.join(map(str.upper, allowed))\n\n if request_method == \"options\":\n response = HttpResponse(allows)\n response['Allow'] = allows\n raise ImmediateHttpResponse(response=response)\n\n if not request_method in allowed:\n response = http.HttpMethodNotAllowed(allows)\n response['Allow'] = allows\n raise ImmediateHttpResponse(response=response)\n\n return request_method",
"def test_not_modified_headers(self):\n\n def get_response(req):\n resp = self.client.get(req.path_info)\n resp[\"Date\"] = \"Sat, 12 Feb 2011 17:35:44 GMT\"\n resp[\"Last-Modified\"] = \"Sat, 12 Feb 2011 17:35:44 GMT\"\n resp[\"Expires\"] = \"Sun, 13 Feb 2011 17:35:44 GMT\"\n resp[\"Vary\"] = \"Cookie\"\n resp[\"Cache-Control\"] = \"public\"\n resp[\"Content-Location\"] = \"/alt\"\n resp[\"Content-Language\"] = \"en\" # shouldn't be preserved\n resp[\"ETag\"] = '\"spam\"'\n resp.set_cookie(\"key\", \"value\")\n return resp\n\n self.req.META[\"HTTP_IF_NONE_MATCH\"] = '\"spam\"'\n\n new_response = ConditionalGetMiddleware(get_response)(self.req)\n self.assertEqual(new_response.status_code, 304)\n base_response = get_response(self.req)\n for header in (\n \"Cache-Control\",\n \"Content-Location\",\n \"Date\",\n \"ETag\",\n \"Expires\",\n \"Last-Modified\",\n \"Vary\",\n ):\n self.assertEqual(\n new_response.headers[header], base_response.headers[header]\n )\n self.assertEqual(new_response.cookies, base_response.cookies)\n self.assertNotIn(\"Content-Language\", new_response)",
"def should_skip_auth(flask_request):\n return flask_request.method in ['HEAD', 'OPTIONS']",
"def test_disabled(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"never_cache\": False, \"get\": content},\n status_code=200,\n content=content,\n headers_exclude=\"Cache-Control\")",
"def check_method_allowed(cls, request):\r\n if not request.method in cls._meta.allowed_methods:\r\n raise HttpError(\r\n 'Method \\'%s\\' not allowed on this resource.' % request.method,\r\n status=status.HTTP_405_METHOD_NOT_ALLOWED)",
"def test_wants_to_handle3(self):\n\n self.bogus_environ['REQUEST_METHOD'] = 'GET'\n\n wants_to_handle = self.uh.wants_to_handle(self.bogus_environ)\n\n self.assertFalse(wants_to_handle)",
"def http_method_not_allowed(self, *args, **kwargs):\n\t\treturn self.abort(status.HTTP_405_METHOD_NOT_ALLOWED)",
"def test_not_get(self):\n self._environ['REQUEST_METHOD'] = 'POST'\n self.assertResponse('405 %s' % httplib.responses[405], [], '', self.app,\n self._environ)",
"def _may_cache(self, request, response=None):\n # any successful request may be cached\n return ((HTTPStatus.OK <= response.status_code < HTTPStatus.BAD_REQUEST)\n if response else True)",
"def test_vary_on_headers_disabled(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"get\": content},\n status_code=200,\n content=content,\n headers_exclude=\"Vary\")",
"def request_is_valid(request):\n return 'method' in request",
"def testNoMatch_UnknownHTTPMethod(self):\n self.mox.ReplayAll()\n\n self.handler.handle('UNKNOWN', '/my_service', 'does_not_matter')\n\n self.VerifyResponse('405',\n 'Unsupported HTTP method: UNKNOWN',\n 'Method Not Allowed',\n 'text/plain; charset=utf-8')\n\n self.mox.VerifyAll()",
"def test_none_metadata(self):\n class ExampleView(views.APIView):\n metadata_class = None\n\n view = ExampleView.as_view()\n response = view(request=request)\n assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED\n assert response.data == {'detail': 'Method \"OPTIONS\" not allowed.'}",
"def dispatch_request(self, **kwargs):\n meth = getattr(self, request.method.lower(), None)\n if meth is None and request.method == 'HEAD':\n meth = getattr(self, 'get', None)\n return meth(**kwargs)",
"def assertHttpMethodNotAllowed(self, response):\r\n self.assertEqual(response.status_code, 405)",
"def clean_method(self, method):\r\n method = method.upper()\r\n if method not in ['GET', 'POST']:\r\n method = 'POST'\r\n return method"
] | [
"0.7021219",
"0.6748958",
"0.65596247",
"0.64970404",
"0.6401734",
"0.6361947",
"0.6359896",
"0.6333692",
"0.6294945",
"0.62907964",
"0.626692",
"0.6196885",
"0.6187532",
"0.6166396",
"0.61391157",
"0.6137009",
"0.6135213",
"0.6134873",
"0.6127822",
"0.6123319",
"0.6056474",
"0.60560167",
"0.6031739",
"0.6027771",
"0.6025559",
"0.5995976",
"0.596719",
"0.59273523",
"0.5904018",
"0.5902138"
] | 0.7186979 | 0 |
Check that the behavior is disabled when never_cache is falsy. | def test_disabled(self):
content = self.unique()
self.assertViewBehavior(
{"never_cache": False, "get": content},
status_code=200,
content=content,
headers_exclude="Cache-Control") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def can_be_disabled(self) -> bool:\n return True",
"def test_disabled(self):\n content = self.unique()\n self.assertViewBehavior({\n \"cache_control\": False,\n \"cache_control_public\": True,\n \"get\": content},\n status_code=200,\n content=content,\n headers_exclude=\"Cache-Control\")",
"def check_disabled(self):\n return None",
"def test_cache_page_disabled(self):\n content = self.unique()\n calls = []\n def get(self, request, *args, **kwargs):\n calls.append(None)\n return http.HttpResponse(content)\n self.assertViewBehavior(\n {\"cache_page\": False, \"get\": get},\n repeat=2,\n status_code=200,\n content=content)\n self.assertEqual(len(calls), 2)",
"def is_exempt(self) -> bool:\n\n if self.exempt_when:\n return self.exempt_when()\n return False",
"def is_cacheable(self, response: Union[AnyResponse, None]) -> bool:\n if not response:\n return False\n cache_criteria = {\n 'allowed method': response.method in self.allowed_methods,\n 'allowed status': response.status in self.allowed_codes,\n 'not disabled': not self.disabled,\n 'not expired': not getattr(response, 'is_expired', False),\n 'not filtered': self.filter_fn(response),\n }\n logger.debug(f'Pre-cache checks for response from {response.url}: {cache_criteria}') # type: ignore\n return all(cache_criteria.values())",
"def _may_cache(self, request, response=None):\n # any successful request may be cached\n return ((HTTPStatus.OK <= response.status_code < HTTPStatus.BAD_REQUEST)\n if response else True)",
"def noyable(self):\n return False",
"def test_require_enabled_do_not_call_method(self):\n method = MagicMock(return_value=True)\n decorated = require_enabled(method)\n self = MagicMock()\n self.enabled = False\n self.assertIsNone(decorated(self))\n self.assertFalse(method.called)",
"def is_disabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"is_disabled\")",
"def available_on_system(cls):\n return (cls.reason_to_be_disabled() is None)",
"def reason_to_be_disabled(cls):\n # Assume by default the given decoder is always enabled.\n return None",
"def nfvi_compute_plugin_disabled():\n return (_compute_plugin is None)",
"def getCacheable(self):\n return False",
"def test_no_cache(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_no_cache\": True, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"no-cache\"})",
"def is_disabled(self, feature):\n return not self.is_enabled(feature)",
"def _canDisable(func):\n def wrapper(*args, **kwargs):\n if _DISABLE_ASSERTIONS == 0:\n return func(*args, **kwargs)\n return wrapper",
"def is_disabled(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"is_disabled\")",
"def toggle_caching(on=None):\n global DISABLE_CACHING\n if on is None:\n DISABLE_CACHING = not DISABLE_CACHING\n else:\n DISABLE_CACHING = bool(on)",
"def is_emptiable(self) -> bool:\n raise NotImplementedError()",
"def disabled(self) -> bool:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> bool:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"disabled\")",
"def test_skipif_false():\n pass",
"def is_disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_disabled\")",
"def disabled(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"disabled\")",
"def is_disabled(self):\n\n return self.__contains__('disabled')",
"def _check_no_advisory(img_pkg_vuln, advisory_cache):\n phash = hashlib.sha256(\n json.dumps(\n [\n img_pkg_vuln.pkg_name,\n img_pkg_vuln.pkg_version,\n img_pkg_vuln.vulnerability_namespace_name,\n ]\n ).encode(\"utf-8\")\n ).hexdigest()\n if phash not in advisory_cache:\n advisory_cache[phash] = img_pkg_vuln.fix_has_no_advisory()\n\n return advisory_cache.get(phash)",
"def _do_force_cache_miss(self):\n for note in self.inspire_record.get(\"_private_notes\", []):\n if note.get(\"value\") == \"orcid-push-force-cache-miss\":\n LOGGER.debug(\n \"OrcidPusher force cache miss\", recid=self.recid, orcid=self.orcid\n )\n return True\n return False"
] | [
"0.6629997",
"0.6369304",
"0.6163698",
"0.6107162",
"0.60885584",
"0.60602385",
"0.6041304",
"0.60086805",
"0.59680194",
"0.5952674",
"0.5931698",
"0.5891131",
"0.5890093",
"0.5888327",
"0.58407855",
"0.58371407",
"0.5833242",
"0.58196545",
"0.57784563",
"0.57356924",
"0.5717433",
"0.5717433",
"0.5712037",
"0.5712037",
"0.5698231",
"0.56902206",
"0.5689881",
"0.567168",
"0.5646385",
"0.56341475"
] | 0.66546726 | 0 |
Check that the defualt HTTP method name protection takes precedence and that no cache control headers are set on the response. | def test_precedence(self):
self.assertViewBehavior(
status_code=405,
headers_exclude="Cache-Control") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_precedence(self):\n self.assertViewBehavior(\n {\"cache_control_public\": True},\n status_code=405,\n headers_exclude=\"Cache-Control\")",
"def test_cache_page_precedence(self):\n self.assertViewBehavior(\n status_code=405)",
"def filter_request():\n if request.method not in ALLOWED_METHODS:\n return \"Method Not Allowed\", 405\n ua = str(request.user_agent)\n if \"Mozilla\" not in ua or \"Gecko\" not in ua:\n return \"No Scrappers!\", 403",
"def method_exempt(self) -> bool:\n\n return self.methods is not None and request.method.lower() not in self.methods",
"def response_method_not_allowed():\n\n return b\"HTTP/1.1 405 Method Not Allowed\\r\\n\"",
"def test_disabled(self):\n content = self.unique()\n self.assertViewBehavior({\n \"cache_control\": False,\n \"cache_control_public\": True,\n \"get\": content},\n status_code=200,\n content=content,\n headers_exclude=\"Cache-Control\")",
"def handle_request(self, request, environ, start_response,\n response_headers):\n method = environ[\"REQUEST_METHOD\"].upper()\n if method in (\"GET\", \"HEAD\"):\n return super(ReadOnlyServer, self).handle_request(\n request, environ, start_response, response_headers)\n else:\n return self.odata_error(request, environ, start_response,\n \"Unauthorised\", \"Method not allowed\", 403)",
"def test_default_span_name_missing_request_method(self):\n self.environ.pop(\"REQUEST_METHOD\")\n app = otel_wsgi.OpenTelemetryMiddleware(simple_wsgi)\n response = app(self.environ, self.start_response)\n self.validate_response(response, span_name=\"HTTP\", http_method=None)",
"def _method_check(self, request, allowed=None):\n if allowed is None:\n allowed = []\n\n request_method = request.method.lower()\n allows = ','.join(map(str.upper, allowed))\n\n if request_method == \"options\":\n response = HttpResponse(allows)\n response['Allow'] = allows\n raise ImmediateHttpResponse(response=response)\n\n if not request_method in allowed:\n response = http.HttpMethodNotAllowed(allows)\n response['Allow'] = allows\n raise ImmediateHttpResponse(response=response)\n\n return request_method",
"def method_check(self, request, allowed=None):\r\n if allowed is None:\r\n allowed = []\r\n \r\n request_method = request.method.lower()\r\n allows = ','.join(map(lambda s: s.upper(), allowed))\r\n \r\n if request_method == 'options':\r\n response = HttpResponse(allows)\r\n response['Access-Control-Allow-Origin'] = '*'\r\n response['Access-Control-Allow-Headers'] = 'Content-Type, Authorization'\r\n response['Access-Control-Allow-Methods'] = \"GET, PUT, POST, PATCH\"\r\n response['Allow'] = allows\r\n raise ImmediateHttpResponse(response=response)\r\n \r\n if not request_method in allowed:\r\n response = http.HttpMethodNotAllowed(allows)\r\n response['Allow'] = allows\r\n raise ImmediateHttpResponse(response=response)\r\n \r\n return request_method",
"def test_no_transform(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_no_transform\": True, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"no-transform\"})",
"def test_disabled(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"never_cache\": False, \"get\": content},\n status_code=200,\n content=content,\n headers_exclude=\"Cache-Control\")",
"def _may_cache(self, request, response=None):\n # any successful request may be cached\n return ((HTTPStatus.OK <= response.status_code < HTTPStatus.BAD_REQUEST)\n if response else True)",
"def test_not_modified_headers(self):\n\n def get_response(req):\n resp = self.client.get(req.path_info)\n resp[\"Date\"] = \"Sat, 12 Feb 2011 17:35:44 GMT\"\n resp[\"Last-Modified\"] = \"Sat, 12 Feb 2011 17:35:44 GMT\"\n resp[\"Expires\"] = \"Sun, 13 Feb 2011 17:35:44 GMT\"\n resp[\"Vary\"] = \"Cookie\"\n resp[\"Cache-Control\"] = \"public\"\n resp[\"Content-Location\"] = \"/alt\"\n resp[\"Content-Language\"] = \"en\" # shouldn't be preserved\n resp[\"ETag\"] = '\"spam\"'\n resp.set_cookie(\"key\", \"value\")\n return resp\n\n self.req.META[\"HTTP_IF_NONE_MATCH\"] = '\"spam\"'\n\n new_response = ConditionalGetMiddleware(get_response)(self.req)\n self.assertEqual(new_response.status_code, 304)\n base_response = get_response(self.req)\n for header in (\n \"Cache-Control\",\n \"Content-Location\",\n \"Date\",\n \"ETag\",\n \"Expires\",\n \"Last-Modified\",\n \"Vary\",\n ):\n self.assertEqual(\n new_response.headers[header], base_response.headers[header]\n )\n self.assertEqual(new_response.cookies, base_response.cookies)\n self.assertNotIn(\"Content-Language\", new_response)",
"def wrapped(request):\n if request.method in methods:\n return True\n else:\n return False",
"def method_override(self):\n\t\treturn self.headers.get(\"X-HTTP-Method-Override\")",
"def should_skip_auth(flask_request):\n return flask_request.method in ['HEAD', 'OPTIONS']",
"def method_check(self, request, allowed=None):\n if allowed is None:\n allowed = []\n \n request_method = request.method.lower()\n allows = ','.join(map(lambda s: s.upper(), allowed))\n \n if request_method == 'options':\n response = HttpResponse(allows)\n response['Access-Control-Allow-Origin'] = '*'\n response['Access-Control-Allow-Headers'] = 'Content-Type', 'Authorization'\n response['Access-Control-Allow-Methods'] = \"GET, PUT, POST, PATCH\"\n response['Allow'] = allows\n raise ImmediateHttpResponse(response=response)\n \n if not request_method in allowed:\n response = http.HttpMethodNotAllowed(allows)\n response['Allow'] = allows\n raise ImmediateHttpResponse(response=response)\n \n return request_method",
"def check_method_allowed(cls, request):\r\n if not request.method in cls._meta.allowed_methods:\r\n raise HttpError(\r\n 'Method \\'%s\\' not allowed on this resource.' % request.method,\r\n status=status.HTTP_405_METHOD_NOT_ALLOWED)",
"def test_wants_to_handle3(self):\n\n self.bogus_environ['REQUEST_METHOD'] = 'GET'\n\n wants_to_handle = self.uh.wants_to_handle(self.bogus_environ)\n\n self.assertFalse(wants_to_handle)",
"def test_vary_on_headers_disabled(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"get\": content},\n status_code=200,\n content=content,\n headers_exclude=\"Vary\")",
"def test_not_get(self):\n self._environ['REQUEST_METHOD'] = 'POST'\n self.assertResponse('405 %s' % httplib.responses[405], [], '', self.app,\n self._environ)",
"def http_method_not_allowed(self, *args, **kwargs):\n\t\treturn self.abort(status.HTTP_405_METHOD_NOT_ALLOWED)",
"def method_check(self, request, allowed=None):\n if allowed is None:\n allowed = []\n\n # Normally we'll just use request.method to determine the request\n # method. However, since some bad clients can't support all HTTP\n # methods, we allow overloading POST requests with a\n # X-HTTP-Method-Override header. This allows POST requests to\n # masquerade as different methods.\n request_method = request.method.lower()\n if request_method == 'post' and 'HTTP_X_HTTP_METHOD_OVERRIDE' in request.META:\n request_method = request.META['HTTP_X_HTTP_METHOD_OVERRIDE'].lower()\n\n allows = ','.join(map(str.upper, allowed))\n\n if request_method == \"options\":\n response = HttpResponse(allows)\n response['Allow'] = allows\n raise ImmediateHttpResponse(response=response)\n\n if not request_method in allowed:\n response = http.HttpMethodNotAllowed(allows)\n response['Allow'] = allows\n raise ImmediateHttpResponse(response=response)\n\n return request_method",
"def request_is_valid(request):\n return 'method' in request",
"def test_none_metadata(self):\n class ExampleView(views.APIView):\n metadata_class = None\n\n view = ExampleView.as_view()\n response = view(request=request)\n assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED\n assert response.data == {'detail': 'Method \"OPTIONS\" not allowed.'}",
"def testNoMatch_UnknownHTTPMethod(self):\n self.mox.ReplayAll()\n\n self.handler.handle('UNKNOWN', '/my_service', 'does_not_matter')\n\n self.VerifyResponse('405',\n 'Unsupported HTTP method: UNKNOWN',\n 'Method Not Allowed',\n 'text/plain; charset=utf-8')\n\n self.mox.VerifyAll()",
"def assertHttpMethodNotAllowed(self, response):\r\n self.assertEqual(response.status_code, 405)",
"def test_no_cache(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_no_cache\": True, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"no-cache\"})",
"def unknown_method(self, response):\n raise NoData"
] | [
"0.7131305",
"0.6643577",
"0.6529459",
"0.6490842",
"0.63628715",
"0.6359141",
"0.6331613",
"0.62303495",
"0.6216774",
"0.62062776",
"0.6205969",
"0.619509",
"0.6160482",
"0.6113934",
"0.6082911",
"0.6071884",
"0.6065874",
"0.6062073",
"0.6059281",
"0.6055744",
"0.6038644",
"0.6018276",
"0.60037714",
"0.5970682",
"0.5959097",
"0.5953622",
"0.59253466",
"0.59113485",
"0.5903903",
"0.5888469"
] | 0.6926853 | 1 |
Get all command parsers | def get_all_command_parsers(self) -> None:
for command in self.commands:
self.get_command_parser(command) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_all_parsers():\n return [OptimizerFactory.get_parser(optimizer) for optimizer in OptimizerFactory.optimizers]",
"def get_parsers(self):\n return tuple([getattr(self, '_{}'.format(i)) for i in self.parsers_available])",
"def list_parsers(self, *args):\n print('==== Available parsing modules: ====\\n')\n for parser in sorted(self.parse_modules):\n print(self.parse_modules[parser].name.ljust(16) + \\\n ': ' + self.parse_modules[parser].desc)\n sys.exit(0)",
"def d_parsers(self):\n\n return self._d_parsers",
"def getCommands():\n return getPlugins(ICommand, plugins)",
"def _register_commands(self):\n cmds = []\n cmd_help = CommandParser(\"help\", \"Show help for a command.\")\n cmd_help.add_argument(\n \"command\",\n nargs=\"*\",\n help=\"The command to get help for. Specify multiple names to get help for subcommands.\",\n )\n cmd_help.add_argument(\"-m\", \"--module\", help=\"List all commands from the given module\")\n cmd_help.add_argument(\n \"-f\",\n \"--full\",\n action=\"store_true\",\n help='Include descriptions in the \"all\" help output.',\n )\n cmds.append(cmd_help)\n\n target_mod = CommandParser()\n target_mod.add_argument(\"module\", nargs=\"+\", help=\"Target module(s)\")\n target_mod.add_argument(\n \"-p\",\n \"--protocol\",\n action=\"store_const\",\n const=\"protocol\",\n default=\"feature\",\n dest=\"mtype\",\n help=\"Target is a protocol module\",\n )\n cmd_module = CommandParser(\"module\", \"Manage and query ZeroBot modules\")\n add_subcmd = cmd_module.make_adder(metavar=\"OPERATION\", dest=\"subcmd\", required=True)\n add_subcmd(\"load\", description=\"Load a module\", parents=[target_mod])\n add_subcmd(\"reload\", description=\"Reload a module\", parents=[target_mod])\n subcmd_list = add_subcmd(\"list\", description=\"List available modules\")\n subcmd_list.add_argument(\"-l\", \"--loaded\", action=\"store_true\", help=\"Only loaded modules\")\n list_group = subcmd_list.add_mutually_exclusive_group()\n default_categories = [\"protocol\", \"feature\"]\n list_group.add_argument(\n \"-f\",\n \"--feature\",\n action=\"store_const\",\n const=[\"feature\"],\n dest=\"category\",\n default=default_categories,\n help=\"Only feature modules\",\n )\n list_group.add_argument(\n \"-p\",\n \"--protocol\",\n action=\"store_const\",\n const=[\"protocol\"],\n dest=\"category\",\n default=default_categories,\n help=\"Only protocol modules\",\n )\n add_subcmd(\"info\", description=\"Show module information\", parents=[target_mod])\n cmds.append(cmd_module)\n\n save_reload_args = CommandParser()\n save_reload_args.add_argument(\n \"config_file\",\n nargs=\"*\",\n help=\"Name of config file (without .toml extension). Omit to affect all loaded config files.\",\n )\n set_reset_args = CommandParser()\n set_reset_args.add_argument(\"config_file\", help=\"Name of config file (without .toml extension)\")\n cmd_config = CommandParser(\"config\", \"Manage configuration\")\n add_subcmd = cmd_config.make_adder(metavar=\"OPERATION\", dest=\"subcmd\", required=True)\n add_subcmd(\"save\", description=\"Save config files to disk\", parents=[save_reload_args])\n subcmd_savenew = add_subcmd(\"savenew\", description=\"Save config file to a new path\")\n subcmd_savenew.add_argument(\"config_file\", help=\"Name of config file (without .toml extension)\")\n subcmd_savenew.add_argument(\"new_path\", help=\"The path to save the config file to\")\n add_subcmd(\n \"reload\",\n description=\"Reload config files from disk\",\n parents=[save_reload_args],\n )\n subcmd_set = add_subcmd(\"set\", description=\"Modify config settings\", parents=[set_reset_args])\n subcmd_set.add_argument(\n \"key_path\",\n help=\"The config key to set. Subkeys are separated by dots, e.g. 'Core.Backup.Filename'\",\n )\n subcmd_set.add_argument(\"value\", nargs=\"?\", help=\"The new value. Omit to show the current value.\")\n subcmd_reset = add_subcmd(\n \"reset\",\n description=\"Reset config settings to last loaded value\",\n parents=[set_reset_args],\n )\n subcmd_reset.add_argument(\n \"key_path\",\n nargs=\"?\",\n help=(\n \"The config key to set. Subkeys are separated by dots, \"\n \"e.g. 'Core.Backup.Filename'. If omitted, the entire \"\n \"config will be reset.\"\n ),\n )\n subcmd_reset.add_argument(\n \"-d\",\n \"--default\",\n action=\"store_true\",\n help=\"Set the key to its default value instead. Effectively unsets a config key.\",\n )\n cmds.append(cmd_config)\n\n cmd_version = CommandParser(\"version\", \"Show version information\")\n cmds.append(cmd_version)\n\n cmd_restart = CommandParser(\"restart\", \"Restart ZeroBot.\")\n cmd_restart.add_argument(\"msg\", nargs=\"*\", help=\"Message sent to protocol modules as a reason\")\n cmds.append(cmd_restart)\n\n cmd_quit = CommandParser(\"quit\", \"Shut down ZeroBot.\")\n cmd_quit.add_argument(\"msg\", nargs=\"*\", help=\"Message sent to protocol modules as a reason\")\n cmds.append(cmd_quit)\n\n cmd_wait = CommandParser(\"wait\", \"Execute a command after a delay\")\n cmd_wait.add_argument(\n \"delay\",\n help=\"Amount of time to delay. Accepts the following modifier suffixes: 'ms', 's' (default), 'm', 'h'.\",\n )\n cmd_wait.add_argument(\"command\", help=\"Command to delay\")\n cmd_wait.add_argument(\"args\", nargs=argparse.REMAINDER, help=\"Command arguments\")\n cmds.append(cmd_wait)\n\n cmd_cancel = CommandParser(\"cancel\", \"Cancel a waiting command\")\n cancel_group = cmd_cancel.add_mutually_exclusive_group()\n cancel_group.add_argument(\"id\", type=int, nargs=\"?\", help=\"The ID of a waiting command\")\n cancel_group.add_argument(\"-l\", \"--list\", action=\"store_true\", help=\"List currently waiting commands\")\n cmds.append(cmd_cancel)\n\n cmd_backup = CommandParser(\"backup\", \"Create a database backup\")\n cmd_backup.add_argument(\"name\", type=Path, help=\"Backup filename\")\n cmds.append(cmd_backup)\n\n self.command_register(\"core\", *cmds)",
"def GetParsersInformation(cls):\n parsers_information = []\n for _, parser_class in cls.GetParsers():\n description = getattr(parser_class, u'DESCRIPTION', u'')\n parsers_information.append((parser_class.NAME, description))\n\n return parsers_information",
"def get_command_line_parser():\n command_line_parser = argparse.ArgumentParser(\n description=\"Execute data workflows defined in flo.yaml files\",\n )\n subcommand_creator = command_line_parser.add_subparsers(\n title='SUBCOMMANDS',\n )\n for command_module in COMMAND_MODULES:\n command = command_module.Command(subcommand_creator)\n\n # this sets a default value for the command \"option\" so\n # that, when this Command is selected by argparse from the\n # command line, we know which comman instance it\n # corresponds with. See run_subcommand function below.\n command.option_parser.set_defaults(command=command)\n return command_line_parser",
"def _get_commands(self) -> list:\n return [i[1] for i in inspect.getmembers(self, predicate=lambda i: hasattr(i, \"is_cmd\"))]",
"def commands(self) -> List[Command]:\n return []",
"def _resolve_commands(parser):\n from .plugin import list as list_plugins\n\n # create a subparser\n subparsers = parser.add_subparsers(dest='cmd')\n\n default_command = None\n\n for command in list_plugins('command'):\n _log.info('add command ' + command.id)\n if hasattr(command, 'isDefault') and command.isDefault:\n default_command = command.id\n\n # create a argument parser for the command\n cmdparser = subparsers.add_parser(command.id)\n _log.info('loading and initializing the command: ' + command.id)\n\n # use the phovea extension point loading mechanism.\n # pass the parser as argument to the factory method so that the extension point (i.e., command)\n # can add further arguments to the parser (e.g., the address or port of the server).\n # the factory must return a launcher function, which gets the previously defined parser arguments as parameter.\n instance = command.load().factory(cmdparser)\n\n # register the instance as argument `launcher` and the command as `launcherid` to the command parser\n _log.info('add command instance to parser')\n cmdparser.set_defaults(launcher=instance, launcherid=command.id)\n\n return default_command",
"def get_commands(self):\n\t\treturn list(self.command_handlers.keys())",
"def GetNamesOfParsersWithPlugins(cls):\n parser_names = []\n\n for parser_name, parser_class in cls.GetParsers():\n if parser_class.SupportsPlugins():\n parser_names.append(parser_name)\n\n return sorted(parser_names)",
"def get_command_handlers(self):\n\t\treturn self.command_handlers",
"def get_commands(self):\n return self.__commands",
"def parse_commands(self) -> list:\n\n command = self.path.split(\"?\")[1]\n commands = command.split(\"&\")\n\n return commands",
"def get_args(cls) -> List[argparse.ArgumentParser]:\n commander_parser = argparse.ArgumentParser(description=cls.HELP, add_help=False)\n\n commander_options = commander_parser.add_argument_group(\"commander options\")\n commander_options.add_argument(\"-H\", \"--halt\", action=\"store_true\", default=None,\n help=\"Halt core upon connect. (Deprecated, see --connect.)\")\n commander_options.add_argument(\"-N\", \"--no-init\", action=\"store_true\",\n help=\"Do not init debug system.\")\n commander_options.add_argument(\"--elf\", metavar=\"PATH\",\n help=\"Optionally specify ELF file being debugged.\")\n commander_options.add_argument(\"-c\", \"--command\", dest=\"commands\", metavar=\"CMD\", action='append', nargs='+',\n help=\"Run commands.\")\n \n return [cls.CommonOptions.COMMON, cls.CommonOptions.CONNECT, commander_parser]",
"def get_commands(self):\n return self._commands",
"def get_commands(self):\r\n return self._commands",
"def getCommands(self):\n\n return self.__commands",
"def getCmdList():\n return [obj for name, obj in inspect.getmembers(sys.modules[__name__]) \n if inspect.isclass(obj) and issubclass(obj, Cmd)][1:]",
"def getCommands(self):",
"def build_parsers(self, parser: argparse.ArgumentParser):\n for opt in reversed(self._options):\n parser.add_argument(*opt.args, **opt.kwargs)\n parser.set_defaults(_cmd=self, _parser=parser)\n\n if self._children:\n subparsers = parser.add_subparsers(title=\"commands\")\n for child in self._children:\n if child._name is None:\n raise CLIError(\n f\"Children {child._func} should be wrapped with\"\n \" @command\")\n subparser = subparsers.add_parser(child._name, **child._kwargs)\n child.build_parsers(subparser)",
"def get_commands(self):\n return list(self.commands.values())",
"def get_mdf_parsers() -> Set[str]:\n return set([name for name, info in get_available_adapters().items()\n if info['class'].startswith('mdf_matio')])",
"def parse(self, commands):\n raise NotImplementedError()",
"def get_commands(cls, prefix=\"\", delim=\"_\"):\n\t\tparent_members = [k for k, v in getmembers(Command)]\n\t\tfor method, func in getmembers(cls):\n\t\t\tif callable(func) and method not in parent_members and method.startswith(prefix):\n\t\t\t\t\tcommand = findall(\"{pf}{delim}?(.*)\".format(pf=prefix, delim=delim), method)[0]\n\t\t\t\t\tyield (command, method)",
"def add_command_parsers(parser, logparser):\n subparsers = parser.add_subparsers(metavar='Command')\n help_text = 'ONE OF THE FOLLOWING:\\n'\n available_commands = find_commands(\n os.path.join(os.path.abspath(os.path.dirname(__file__)), os.path.pardir, 'functions'))\n max_length = max([len(a) for a in available_commands])\n for command in available_commands:\n child_parser = subparsers.add_parser(command, parents=[logparser])\n call = importlib.import_module('functions.%s'% command)\n if hasattr(call, 'set_argparser'):\n call.set_argparser(child_parser)\n else:\n child_parser.description = 'Description is missing'\n help_text += command + \": \" + \" \"*(max_length-len(command)) + ('\\n'+' '*(max_length+2)\n ).join(textwrap.wrap(child_parser.description,60)) + '\\n'\n child_parser.set_defaults(func=call.main)\n subparsers.help = help_text + '\\nType \"Command --help\" for more information about given command'",
"def pairs(self) -> Iterator[tuple[str, list[CommandParser]]]:\n for module, cmds in self._registry[\"by_module\"].items():\n yield (module, cmds)",
"def _iter_commands(self):\n return {entry_point.name: entry_point for entry_point in\n pkg_resources.iter_entry_points('chanjo.subcommands')}"
] | [
"0.7812656",
"0.76759416",
"0.7401005",
"0.715044",
"0.68813735",
"0.6622853",
"0.65283746",
"0.6448501",
"0.63693243",
"0.6357003",
"0.63433",
"0.629923",
"0.62847006",
"0.6254544",
"0.6221769",
"0.62131053",
"0.61972797",
"0.617932",
"0.6152421",
"0.6145008",
"0.61301106",
"0.6095694",
"0.6092635",
"0.6090033",
"0.6060542",
"0.6045108",
"0.6009808",
"0.6009486",
"0.60066605",
"0.59512055"
] | 0.8496855 | 0 |
Checks if controller has commands | def has_commands(self) -> bool:
return len(self.commands) > 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_commands(self):\n pass",
"def check_commands(self):\n self.check_subsystem_commands()\n self._select_mode()",
"def check_subsystem_commands(self):\n self.communications.check_controls()\n self.__check_video()\n self.__check_picture()\n self.__check_ping()\n self.__check_motion()",
"def hasMACCommands(self):\n return hasattr(self, 'commands') and len(self.commands) > 0",
"def is_enabled(command):\n if command not in Controller.commands:\n return False\n return Controller.commands[command][2]",
"def has_sub_commands(self) -> bool:\n if self.__dict__.get(\"sub_commands\"):\n return True\n\n return False",
"def __commandExists(self, command, cmdtype):\n try:\n # method exists\n if hasattr(self, self.__getFullCommandName(command, cmdtype)):\n # command handler type exists\n if self.__commandHandlerTypeExists(cmdtype):\n return True\n else:\n return False\n else:\n return False\n # any key does not exist\n except KeyError:\n return False",
"def test_command_method_exists(self):\n motor_shield = MotorShield(self.options, self.connection)\n\n for command in motor_shield.commands:\n self.assertIn(command, dir(motor_shield))",
"def _iscommand(self, key):\r\n\t\tyes = False\r\n\t\tfor i in COMMAND_NAME.keys():\r\n\t\t\tif key == i: \r\n\t\t\t\tyes = True; break\r\n\t\treturn yes",
"def is_empty(self):\n return len(self.commands) == 0",
"def test_importtleCommandExists(self):\n self.assertIn('importtle', get_commands())",
"def is_configured(command):\n return command in COMMANDS",
"def check_command(self):\n return self.process is not None and self.process.poll() is None",
"def command_registered(self, command: str) -> bool:\n return command in self._commands",
"def __is_active(self, command):\n return True",
"def is_valid_command(args):\n if args.command is not None:\n return True\n return False",
"def is_valid_command(command):\n return is_get(command) or is_insert(command) or is_update(command) or is_delete(command) or is_showall(command) or is_search(command)",
"def _is_command(self, ext):\n try:\n return issubclass(ext, CommandExtension)\n except TypeError:\n return False",
"def validate_command(command):\n return command in list(VALID_COMMANDS.keys())",
"def has_command_with_name(self, command_name):\n return command_name in self.commands",
"def is_cmd(self, name):\n \n return name in self.cmds",
"def responds_to(self, command) -> bool:\n return command == self.command and self.active is True and self.command is not None",
"def run_command_check(self):\n pass",
"def _check_for_commands(keep_path):\n if not os.path.exists(keep_path):\n click.echo(\"You have not registered any command yet.\")\n quit()",
"def HasPendingCommands(self):\n\t\n return self.queue.qsize() > 0",
"def is_command(self, text):\n return text.split(' ', 1)[0].startswith(\"!\")",
"def has_more_commands(self):\n return not self.eof",
"def has_more_commands(self):\n return self._current_inst < len(self._lines) - 1",
"def checkIfEnabled(self):\n\n # Reload the command file to check for new commands\n importlib.reload(BotSettings)\n matches = BotSettings.config['commands']\n\n # Check for the match and if it is there return the value that goes with the command\n for key in matches:\n key.strip(\"!\")\n if key == self.command:\n return matches.get(key)\n\n # If reached the command does not exist\n return False",
"def is_no_command_supported(command):\n command_type = command.get('command-type')\n if command_type:\n if command_type in ['display-table','display-rest', 'show']:\n return False\n no_supported = command.get('no-supported', True)\n if no_supported == False:\n return False\n return True"
] | [
"0.78209513",
"0.69910705",
"0.6867466",
"0.6849808",
"0.6793685",
"0.67325574",
"0.6730906",
"0.66486543",
"0.65833414",
"0.6539244",
"0.65337753",
"0.65301454",
"0.65070695",
"0.64574474",
"0.6441197",
"0.6439012",
"0.643634",
"0.6421861",
"0.6406352",
"0.6401896",
"0.6394844",
"0.6356183",
"0.63245356",
"0.6303469",
"0.63028044",
"0.63010067",
"0.62686455",
"0.6259732",
"0.62585044",
"0.6254925"
] | 0.78347164 | 0 |
Gets all controllers modules | def _get_modules(self) -> Dict[str, ModuleType]:
modules = {}
terminal_path = Path(openbb_terminal.__file__).parent
for file in terminal_path.glob("**/*controller.py"):
spec = spec_from_file_location(file.stem, file)
if spec is not None and spec.loader is not None:
module = module_from_spec(spec)
spec.loader.exec_module(module)
ctrl_path = (
str(file)
.replace(str(terminal_path), "")
.replace("\\", "/")
.split("/")[1:]
)
for sub_name, abbr in sub_folders_abbr.items():
ctrl_path = [
path.lower().replace(sub_name, abbr) for path in ctrl_path
]
trailmap = ".".join(ctrl_path[:-1])
if trailmap not in modules:
modules[trailmap] = module
return modules | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_modules(self):\n return self._modules.values()",
"def get_controllers(self):\n s = self._NDL_API('getcontrollers', {})\n return s.split(\",\")",
"def modules(self):\n return self._modules.keys()",
"def modules(self):\n return self._modules",
"def init_controllers(self):\n if self.controllers == None:\n return\n controllers_namespace = self.__namespace + \".controllers\" # TODO: allow customize this\n try:\n controllers_package = import_module(controllers_namespace)\n except:\n return None\n\n from ron import Application\n controllers_modules = self._get_package_modules(controllers_package)\n for controller_name in controllers_modules:\n imported_controller = import_module('.' + controller_name, package=controllers_namespace)\n for i in dir(imported_controller):\n attribute = getattr(imported_controller, i)\n if inspect.isclass(attribute) and issubclass(attribute, Controller):\n controller_class = attribute(self)\n self.controllers[controllers_namespace+'.'+controller_name] = controller_class\n Application().controllers[controllers_namespace+'.'+controller_name] = controller_class",
"def controllers(self):\n output = self.run_json('/call show all')\n\n try:\n controllers = output['Controllers']\n except KeyError:\n raise StorcliException('Output is missing Controllers segment')\n\n return [c['Response Data'] for c in controllers]",
"def get_app_modules():\n apps = get_apps()\n app_modules = []\n for app in apps:\n app_modules.append(get_app_module(app))\n return app_modules",
"def _load_modules(self):\n modules = []\n agent_cls_list = dashboard_utils.get_all_modules(\n dashboard_utils.DashboardAgentModule\n )\n for cls in agent_cls_list:\n logger.info(\n \"Loading %s: %s\", dashboard_utils.DashboardAgentModule.__name__, cls\n )\n c = cls(self)\n modules.append(c)\n logger.info(\"Loaded %d modules.\", len(modules))\n return modules",
"def modules_enabled(self, c):\n\n modules = []\n for name, module in self.modules.iteritems():\n modules.append( (name, module.__class__.__name__) )\n\n return modules",
"def get_app_modules():\n for app in apps.get_app_configs():\n yield app.name, app.module",
"def modules(self):\r\n if not self._modules:\r\n self._modules = DajaxiceModule()\r\n for name, function in self._registry.items():\r\n self._modules.add(name, function)\r\n return self._modules",
"def get_modules(self):\n return self._module_loader.filelist",
"def get_controllers(self):\n controllers = list()\n try:\n rc, controllers = self.request(\"storage-systems/%s/graph/xpath-filter?query=/controller/id\" % self.ssid)\n except Exception as err:\n self.module.fail_json(msg=\"Failed to retrieve controller list! Array Id [%s]. Error [%s].\" % (self.ssid, to_native(err)))\n\n controllers.sort()\n\n controllers_dict = {}\n i = ord(\"A\")\n for controller in controllers:\n label = chr(i)\n controllers_dict[label] = controller\n i += 1\n\n return controllers_dict",
"def modules_registered(self) -> list[Module]:\n return [cmds[0].module for cmds in self._registry[\"by_module\"].values()]",
"def all_registered_modules():\n yield from iterchain(modules.values() for modules in Registry.monomers.values())",
"def get_loaded_modules(self):\n return self._get_modules(self.loaded_modules)",
"def modules(self):\n return ModuleManager(self)",
"def _list_modules():\r\n return [\r\n desc.module_class\r\n for desc\r\n in _list_descriptors()\r\n ]",
"def modules(cls):\n members = inspect.getmembers(cls, lambda a: not (inspect.isroutine(a) and a.__name__ == 'modules'))\n modules = [module for name, module in members if not name.startswith('_')]\n return modules",
"def getAllModules(self):\n\n modules = cmds.ls(type=\"network\")\n returnMods = []\n for module in modules:\n attrs = cmds.listAttr(module)\n if \"parent\" in attrs:\n returnMods.append(module)\n\n return returnMods",
"def init_controllers(app):\n for controller in os.listdir(os.getcwd() + \"/controllers\"):\n module_name, ext = os.path.splitext(controller)\n if module_name.endswith('_controller') and ext == '.py':\n module = __import__(\"controllers.%s\" % module_name)\n PYSTHClient.controllers.append(\n module.__getattribute__(module_name))\n for controller in PYSTHClient.controllers:\n app.register_blueprint(controller.PAGE)",
"def get_canvas_modules(request):\n try:\n canvas_course_id = request.GET['course_id']\n canvas_user_id = request.session['LTI_LAUNCH']['user_id']\n except KeyError:\n return http.HttpResponseBadRequest()\n try:\n canvas_auth = CanvasApiAuthorization.objects.get(lti_user_id=canvas_user_id)\n except CanvasApiAuthorization.DoesNotExist:\n return http.HttpResponseForbidden()\n module_list = canvas_api.get_module_list(canvas_auth, canvas_course_id)\n return http.JsonResponse(\n {'id': request.GET['course_id'], 'modules': module_list}, safe=False\n )",
"def list_modules():\n for module_name in listdir(modules_directory):\n if isdir(join(modules_directory, module_name)):\n log.debug('Load module: {0}'.format(module_name))\n yield module_name",
"def modules(self):\n return self.rpc.call(MsfRpcMethod.SessionCompatibleModules, [self.sid])['modules']",
"def modules():",
"def plugin_list(self):\r\n return get_module_list()",
"def modules(self):\n for desc in self._mappings.values():\n if hasattr(desc, 'module'):\n yield desc.module\n else:\n continue",
"def models(self):\n return self.config.models()",
"def routers():\n routers = []\n\n for app_controller in __app_controllers__:\n routers.append(app_controller.router())\n\n return routers",
"def get_enabled_modules(self):\n return self._gconf.get_enabled_modules()"
] | [
"0.6834421",
"0.6675395",
"0.6673012",
"0.6617",
"0.65380013",
"0.65167725",
"0.6488117",
"0.635962",
"0.62996364",
"0.62916434",
"0.6280447",
"0.6221304",
"0.62206507",
"0.6197537",
"0.61878586",
"0.6163962",
"0.61522275",
"0.61329126",
"0.6120377",
"0.6109719",
"0.610647",
"0.6047105",
"0.60184824",
"0.60136336",
"0.5978936",
"0.5953184",
"0.5918956",
"0.5899924",
"0.58891034",
"0.58861446"
] | 0.6716505 | 1 |
Get the ControllerDoc instance for a controller | def get_controller_doc(self, controller_name: str) -> ControllerDoc:
if controller_name not in self.controller_docs:
raise KeyError(f"Controller {controller_name} not found")
return self.controller_docs[controller_name] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_controller(self):\n return self.__controller",
"def getController(self):\n return self.__controller",
"def controller( self ):\n\t\ttry:\n\t\t\treturn self._controller\n\t\texcept Exception as e:\n\t\t\tself.logToConsole( \"controller: %s\" % str(e) )",
"def get_controller(cls):\n if not cls.hnd:\n raise Exception('A handler is to be set for getting contoller.')\n if not cls.controller:\n cls.controller = cls.config.controller_class(cls.hnd)\n cls.session = cls.controller.session\n return cls.controller",
"def controller(self):\n return self._controller",
"def create_controller() -> Controller:\n _controller = Controller()\n return _controller",
"def instance():\n\n if Controller._instance == None:\n Controller._instance = Controller()\n return Controller._instance",
"def get_controller(self) -> PIDController:\n return deepcopy(self._controller)",
"def create_controller(self, typ):\n return self.controller_objects[typ]()",
"def getController(self,deviceID):\n if deviceID in self.controllers:\n return self.controllers[deviceID]\n else:\n newCtrl = MotorController(self,deviceID)\n self.controllers[deviceID] = newCtrl\n return newCtrl",
"def controller(self) -> Optional['outputs.CSIVXFlexOSSpecDriverController']:\n return pulumi.get(self, \"controller\")",
"def _find_controller(self, controller):\n if controller is None:\n return None\n # If the output specified is a string controller e.g. \"WelcomeController@show\"\n elif isinstance(controller, str):\n if \"@\" in controller:\n controller_path, controller_method_str = controller.split(\"@\")\n else:\n controller_path = controller\n controller_method_str = \"__call__\"\n\n controller_path = modularize(controller_path).split(\".\")\n if len(controller_path) > 1:\n controller_name = controller_path.pop()\n prefix_path = \".\".join(controller_path)\n else:\n controller_name = controller_path[0]\n prefix_path = \"\"\n # build a list of all locations where the controller can be found\n # if the controller is defined such as auth.WelcomeController, append the prefix path to\n # the locations\n locations = list(\n map(\n lambda loc: f\"{loc}.{removeprefix(prefix_path, loc)}\"\n if prefix_path\n else loc,\n self.controllers_locations,\n )\n )\n try:\n self.controller_class = Loader.find(\n Controller, locations, controller_name, raise_exception=True\n )\n except LoaderNotFound as e:\n self.e = e\n print(f\"\\033[93mTrouble importing controller!\\n> {str(e)}\\033[0m\")\n # controller is an instance with a bound method\n elif hasattr(controller, \"__self__\"):\n _, controller_method_str = controller.__qualname__.split(\".\")\n self.controller_instance = controller.__self__\n\n # it's a class or class.method, we don't have to find it, just get the class\n elif hasattr(controller, \"__qualname__\"):\n if \".\" in controller.__qualname__:\n controller_name, controller_method_str = controller.__qualname__.split(\n \".\"\n )\n else:\n controller_name = controller.__qualname__\n controller_method_str = \"__call__\"\n\n try:\n self.controller_class = Loader.get_object(\n controller.__module__, controller_name, raise_exception=True\n )\n except LoaderNotFound as e:\n self.e = e\n print(f\"\\033[93mTrouble importing controller!\\n> {str(e)}\\033[0m\")\n # it's a controller instance\n else:\n self.controller_instance = controller\n controller_method_str = \"__call__\"\n\n # Set the controller method on class. This is a string\n self.controller_method = controller_method_str",
"def get_controller1(self):\n return self.__controller1",
"def getViewerController(self, v_id=-1) -> ViewerController:\n if v_id == -1:\n v_id = self.getCurrentId()\n if v_id == -1: # No tab open\n return None\n viewer_ctrl = self._model.getViewerCtrl(v_id)\n return viewer_ctrl",
"def controller(self) -> Optional['outputs.CSIUnitySpecDriverController']:\n return pulumi.get(self, \"controller\")",
"def get_controller_func(controller):\n\n if controller in CONTROLLERS:\n return CONTROLLERS[controller]\n\n return None",
"def controller(self) -> Optional['outputs.CSIPowerStoreSpecDriverController']:\n return pulumi.get(self, \"controller\")",
"def get_controller(equipment, accessmethod, logfile=None):\n path = _CONTROLLERMAP[accessmethod]\n constructor = module.get_object(path)\n return constructor(equipment, logfile)",
"def get_controller2(self):\n return self.__controller2",
"def controller(self) -> Optional['outputs.CSIIsilonSpecDriverController']:\n return pulumi.get(self, \"controller\")",
"def __init__(self, controller):\n self._controller = controller",
"def controller(self): # type: () -> ControllerHostConfig\n return self.host_settings.controller",
"def get_current_controller():\n controllers = parse_yaml_file(JUJU_CONTROLLERS_YAML)\n return controllers.get(\"current-controller\", \"\")",
"def name(self) -> str:\n return \"Controller\"",
"def getDefault(self):\n return DefaultController.getInstance()",
"def get_controller(self):\n node_id, _host, _port, _rack = self.client.cluster.controller\n return node_id",
"def controller(self) -> Optional['outputs.CSIPowerMaxSpecDriverController']:\n return pulumi.get(self, \"controller\")",
"def get_controller(request: pytest.FixtureRequest) -> Callable[..., Controller]:\n default_class = Controller\n marker = request.node.get_closest_marker(\"controller_data\")\n if marker and marker.kwargs:\n # Must copy so marker data do not change between test cases if marker is\n # applied to test class\n markerdata = marker.kwargs.copy()\n else:\n markerdata = {}\n\n def getter(\n handler: Any,\n class_: Optional[Type[Controller]] = None,\n **server_kwargs,\n ) -> Controller:\n \"\"\"\n :param handler: The handler object\n :param class_: If set to None, check controller_data(class_).\n If both are none, defaults to Controller.\n \"\"\"\n assert not inspect.isclass(handler)\n marker_class: Optional[Type[Controller]]\n marker_class = markerdata.pop(\"class_\", default_class)\n class_ = class_ or marker_class\n if class_ is None:\n raise RuntimeError(\n f\"Fixture '{request.fixturename}' needs controller_data to specify \"\n f\"what class to use\"\n )\n ip_port: HostPort = markerdata.pop(\"host_port\", HostPort())\n # server_kwargs takes precedence, so it's rightmost (PEP448)\n server_kwargs = {**markerdata, **server_kwargs}\n server_kwargs.setdefault(\"hostname\", ip_port.host)\n server_kwargs.setdefault(\"port\", ip_port.port)\n return class_(\n handler,\n **server_kwargs,\n )\n\n return getter",
"def get_model(self):\n return Doc()",
"def get_topic_controller(self, topic, project=None):\n target = self.lookup(topic, project)\n return target and target.topic_controller"
] | [
"0.741653",
"0.7332534",
"0.7241322",
"0.7156778",
"0.69970584",
"0.67327803",
"0.66837436",
"0.6661715",
"0.6410457",
"0.6287942",
"0.6268927",
"0.62614125",
"0.6155133",
"0.59957176",
"0.5957962",
"0.59505653",
"0.5928965",
"0.58955836",
"0.5859399",
"0.58194286",
"0.5709869",
"0.5693956",
"0.56743306",
"0.5672699",
"0.5641757",
"0.56398195",
"0.56339175",
"0.5629737",
"0.5627669",
"0.5542403"
] | 0.8244815 | 0 |
Return the mongodb session document or None | def _get_mongo_session(self, sid):
return self.coll.find_one({'sid': sid}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def find_session_by_id(id: int, mongo: MongoDB = mongodb) -> SessionOutModel:\n if session := await mongo.session_coll.find_one({\"id\": id}):\n return SessionOutModel(**session)\n else:\n return SessionOutModel()",
"def document(self):\n query = {\"_id\": ObjectId(self.document_id)}\n return Document(get_collection(\"documents\").find_one(query))",
"def session(self):\n\t\treturn self._session",
"def session(self):\n return self.session_store.get_session()",
"def session(self):\n return self.ssession()",
"def session(self):\n if not self._session: #Create new session if none exists\n return self._new_session()\n return self._session",
"def get_session(self):\n return self.session",
"def get_session(self):\n return self._session()",
"def session(self):\n return self.session_store.get_session(backend=\"datastore\")",
"def session(self):\n return session",
"def get_current_session(self):\n if self.session is not None:\n return self.session\n else:\n return None",
"def session(self):\n return self._session",
"def session(self):\n return self._session",
"def session(self):\n if self._session is None:\n self.init_session()\n\n return self._session",
"def session(self):\n return self.__session",
"def session(self):\n return self.session_store.get_session()",
"def session(self):\n return self.session_store.get_session()",
"def get(database, session_id: SessionId):\n return database.sessions.find_one({\"session_id\": session_id})",
"def _session(self):\n if self.session is None:\n self.session = create_session(self.config, self.auth)\n return self.session",
"def get_document(self, doc_id: int) -> Optional[Users]:\n try:\n doc = self.session.query(CandidatesDocuments).get(doc_id)\n\n return doc\n except Exception as excpt:\n self.session.rollback()\n print(f'Could not get doc: {excpt}')\n\n return None",
"def get_session(self, renew: Optional[bool] = False) -> neo4j.work.simple.Session:\n if self.session is None or renew:\n sess = self.driver.session()\n self.session = sess\n return self.session",
"def session(self):\n\n return self._session",
"def get_session(cls):\r\n if cls._session is not None:\r\n return cls._session\r\n else:\r\n raise RuntimeError('Session not set.')",
"async def get_document(self):\n try:\n document = await self.request.json()\n except json.JSONDecodeError:\n document = {}\n\n return document",
"def session(self) -> \"Session\":\n return self._instance",
"def get_document(self):\n return self.document",
"def get_session(self):\r\n if self._config.has_key('database'):\r\n return self._builder.session(self._config['database'], self.get_threads())\r\n if not self._config.has_key('host'):\r\n raise Exception(\"Database engine host configuration is not found\")\r\n elif not self._config.has_key('dbpath'):\r\n raise Exception(\"Database path configuration is not found\")\r\n else:\r\n return self._builder.session(None, self.get_threads(), self._config['host'], self._config['dbpath'])",
"def get_record(oid):\n\n username = _authenticate_admin_from_session(request)\n\n if username:\n\n # execute raw MongoDB query and return the record with the specified oid.\n recs = Metadata.objects.get_or_404(pk=oid)\n return jsonify(dict(results=recs))\n\n else:\n return Response('Bad or missing session id.', status=401)",
"def obj_get(self, request=None, **kwargs):\n return Document(self.get_collection(request).find_one({\n \"_id\": ObjectId(kwargs.get(\"pk\"))\n }))",
"def session(self) -> Session:\n if self._session is None:\n self._session = Session()\n\n return self._session"
] | [
"0.68857235",
"0.6614606",
"0.6558535",
"0.6487476",
"0.64846057",
"0.6472054",
"0.6447504",
"0.6445352",
"0.6444354",
"0.64425755",
"0.64312917",
"0.6427674",
"0.6427674",
"0.6384613",
"0.63736564",
"0.636652",
"0.636652",
"0.63567775",
"0.62485236",
"0.61746705",
"0.61694515",
"0.616703",
"0.6098926",
"0.60961616",
"0.60837907",
"0.60826147",
"0.6061687",
"0.6027266",
"0.6027139",
"0.60180116"
] | 0.7997712 | 0 |
Returns IEX Corporate Actions from the refdata endpoints | def get_iex_corporate_actions(start=None, **kwargs):
return CorporateActions(start=start, **kwargs).fetch() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def actions(self) -> List[str]:\n return list(self.__endpoints.keys())",
"def _get_cloud_functions_actions(self, namespace_id):\n\n res = requests.get(\n f\"{self.cf_namespaces_url}/{namespace_id}/actions?limit=200\",\n headers=self.get_headers(),\n )\n return json.loads(res.text)",
"def get_actions(self: object, *args, parameters: dict = None, **kwargs) -> dict:\n # [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/recon/GetActionsV1\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"GetActionsV1\",\n keywords=kwargs,\n params=handle_single_argument(args, parameters, \"ids\")\n )",
"def get_actions(self):\n\n if self.description == exceptions.NotAvailableError:\n raise exceptions.NotAvailableError('Can\\'t get actions because a description for this service is'\n ' not available.')\n return list(self.actions.values())",
"def get_actions(\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = GetActions.create(\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)",
"def getActions(self, state): \n util.raiseNotDefined()",
"def ExtractOperations(toolF):\n return [o[\"uri\"] for o in toolF[\"operation\"]]",
"def _get_legal_actions(self):\n raise NotImplementedError",
"def GetCustomActions(debug, verbose, explicit_configurations):\r\n\r\n return []",
"def _get_actions(self):\n return self.__actions",
"def _get_actions(self):\n return self.__actions",
"def _get_actions(self):\n return self.__actions",
"def get_integrations_actions(self, **kwargs):\n\n all_params = ['page_size', 'page_number', 'next_page', 'previous_page', 'sort_by', 'sort_order', 'category', 'name', 'secure', 'include_auth_actions']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_integrations_actions\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n\n resource_path = '/api/v2/integrations/actions'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'page_size' in params:\n query_params['pageSize'] = params['page_size']\n if 'page_number' in params:\n query_params['pageNumber'] = params['page_number']\n if 'next_page' in params:\n query_params['nextPage'] = params['next_page']\n if 'previous_page' in params:\n query_params['previousPage'] = params['previous_page']\n if 'sort_by' in params:\n query_params['sortBy'] = params['sort_by']\n if 'sort_order' in params:\n query_params['sortOrder'] = params['sort_order']\n if 'category' in params:\n query_params['category'] = params['category']\n if 'name' in params:\n query_params['name'] = params['name']\n if 'secure' in params:\n query_params['secure'] = params['secure']\n if 'include_auth_actions' in params:\n query_params['includeAuthActions'] = params['include_auth_actions']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['PureCloud OAuth']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='ActionEntityListing',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def list(self):\n\n return list(\n filter(\n lambda x: x.get('type') != 'tagit', # pragma: no cover\n self._post(\n request=ApiActions.LIST.value,\n uri=ApiUri.ACTIONS.value,\n ).get('actions')\n )\n )",
"def get_actions(self):\n return self.agent.get_actions()",
"def getExpAccessions(cf):\n\tplatform = cf.get_parameter('platform')\n\tsrafetchxml = cf.get_input('srafetchxml')\n\tsraexplist = cf.get_output('sraexplist')\n\tsraxmlparser = SRAXMLParser()\n\truns = sraxmlparser.parse(srafetchxml)\n\twriter = csv.writer(open(sraexplist, 'wb'), quoting=csv.QUOTE_NONE)\n\twriter.writerow(['NCBISRAExpID'])\n\taccessions = []\n\tfor run in runs:\n\t\tif platform and \\\n\t\t\tnot run.platform == platform:\n\t\t\tcontinue\n\t\telif not run.exp_accession in accessions:\n\t\t\twriter.writerow([run.exp_accession])\n\t\t\taccessions.append(run.exp_accession)\n\tcf.write_log(\"GetExpAccessions: wrote %s experiment accessions\" % len(accessions))\n\treturn constants.OK",
"def list(self):\n return list(\n filter(\n lambda x: x.get('type') == 'tagit', # pragma: no cover\n self._post(\n request=ApiActions.LIST.value,\n uri=ApiUri.ACTIONS.value,\n ).get('actions')\n )\n )",
"def getActions(self):\n actions = self.actions[:]\n return actions",
"def get_actions(self):\n return []",
"def fusion_api_get_authorization_category_actions(self, api=None, headers=None, resource_uri='', sessionID=None,):\n param = '/category-actions%s' % resource_uri\n return self.auth.get(api=api, param=param, headers=headers, sessionID=sessionID)",
"def actions(self):\n self._actions = {}\n self._actions['getItems'] = ('FileCrawler', None)\n #self._actions['getContents'] = ('ParseContents', ('path'))\n return self._actions",
"def get_actions(self, oid, action_id=None):\n if action_id is None:\n path = '/servers/%s/os-instance-actions' % oid\n key = 'instanceActions'\n else:\n path = '/servers/%s/os-instance-actions/%s' % (oid, action_id)\n key = 'instanceAction'\n res = self.client.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('get openstack server %s actions: %s' % \n (oid, truncate(res)))\n return res[0][key]",
"def get_legal_actions(self):\n pass",
"def actions(self):\n\n return self._actions.getSlice(0)",
"def actions(self):\n\n return self._actions.getSlice(0)",
"def get_actions(\n self, observations: Observations, action_space: gym.Space\n ) -> Actions:\n return super().get_actions(observations, action_space)",
"def actions(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"actions\")",
"def actions(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"actions\")",
"def get_interactions(self):\n return self._interactions",
"def actions(self):\n return self._actions"
] | [
"0.578277",
"0.5591671",
"0.55305415",
"0.546881",
"0.5408633",
"0.5356158",
"0.5350876",
"0.53415024",
"0.53084",
"0.5253717",
"0.5253717",
"0.5253717",
"0.5253487",
"0.524838",
"0.52430826",
"0.52192104",
"0.51942307",
"0.51654327",
"0.51577294",
"0.51444995",
"0.5118404",
"0.50962615",
"0.5070397",
"0.5053905",
"0.5053905",
"0.5034384",
"0.5034156",
"0.5034156",
"0.5011528",
"0.4993416"
] | 0.6782529 | 0 |
Returns IEX Dividends from the refdata endpoints | def get_iex_dividends(start=None, **kwargs):
return Dividends(start=start, **kwargs).fetch() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_path_endpoints(self):\n endpoints = []\n\n # Get the far end of the last path segment\n path, split_ends, position_stack = self.trace()\n endpoint = path[-1][2]\n if split_ends is not None:\n for termination in split_ends:\n endpoints.extend(termination.get_path_endpoints())\n elif endpoint is not None:\n endpoints.append(endpoint)\n\n return endpoints",
"def endpoints(self) -> pulumi.Input[Sequence[pulumi.Input['EndpointDependencyArgs']]]:\n return pulumi.get(self, \"endpoints\")",
"def endpoints(self):\n return self[\"endpoints\"]",
"def get_endpoints(self):\r\n return ENDPOINTS",
"def getEnds(self) -> List[int]:\n ...",
"def getLinkEnds(self):\n dataDict = self.__dict__\n result = set(ca.boundLinkEnd for ca in self.chemAtoms if isinstance(ca,LinkAtom))\n if None in result:\n result.remove(None)\n result = frozenset(result)\n return result",
"def get_drefs_to( ea ):\r\n\tret = []\r\n\txrf = get_first_dref_to( ea )\r\n\tif xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\txrf = get_next_dref_to( ea, xrf )\r\n\twhile xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\t\txrf = get_next_dref_to( ea, xrf )\r\n\treturn ret",
"def get_drefs_from( ea ):\r\n\tret = []\r\n\txrf = get_first_dref_from( ea )\r\n\tif xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\txrf = get_next_dref_from( ea, xrf )\r\n\twhile xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\t\txrf = get_next_dref_from( ea, xrf )\r\n\treturn ret",
"def endpoint_list(self):\n _, body = self.request('/v1.1/endpoints', 'GET')\n return body",
"def getIntrons(self):\n rtrn = []\n for i in range(0,len(self.exonStarts)-1):\n rtrn.append(Interval(self.chr,self.exonEnds[i]+1,self.exonStarts[i+1]-1))\n return rtrn",
"def get_endpoints(self):\n return self.endpoints.values()",
"def getExons(self):\n rtrn = []\n for i in range(0,len(self.exonStarts)):\n rtrn.append(Interval(self.chr,self.exonStarts[i],self.exonEnds[i],self.strand,name = self.name+\"_exon_\"+str(i+1)))\n return rtrn",
"def lookup_dividends(ticker):\n dividend_df = ticker.dividends\n return(convert_df_to_list(dividend_df))",
"def list_endpoints(self):\n resp, body = self.get(\"endpoints\")\n body = self._parse_array(etree.fromstring(body))\n return resp, body",
"def get_endpoints(self):\n url = self.urlobject_single.format(self._cb.credentials.org_key, self._model_unique_id) + \"/endpoints\"\n resp = self._cb.get_object(url)\n return resp.get(\"results\", [])",
"def endpoints(self) -> Optional[Sequence['outputs.EndpointResponse']]:\n return pulumi.get(self, \"endpoints\")",
"def _get_urls_drs(file_type, id, reference_name = None, start = None, end = None):\n\n file_exists = file_exists_drs(id)\n if file_exists:\n file_format = \"VCF\" \n urls = _create_slices(CHUNK_SIZE, id, reference_name, start, end)\n response = {\n 'htsget': {\n 'format': file_format,\n 'urls': urls \n }\n }\n return {\"response\": response, \"http_status_code\": 200}\n else:\n err = f\"No {file_type} found for id: {id}\" \n return {\"response\": err, \"http_status_code\": 404}",
"def uri(self) -> list:\n raise NotImplementedError(\"ErddapArgoDataFetcher.uri not implemented\")",
"def _findExonEnd(self, exonRecs, iBlkStart):\n iBlkEnd = iBlkStart + 1\n while (iBlkEnd < len(exonRecs)) and (self._tGapSize(exonRecs, iBlkEnd) < minIntronSize):\n iBlkEnd += 1\n return iBlkEnd, exonRecs[iBlkEnd - 1].end - exonRecs[iBlkStart].start",
"def endpoint(self):\n return (self._start, self._end)",
"def find_isolated_endpoints(lines):\n \n isolated_endpoints = []\n count = len(lines)\n print(\"Finding isolated end points 2/3\")\n pb = pbar.ProgressBar(count)\n for i, line in enumerate(lines):\n pb += 1\n other_lines = lines[:i] + lines[i+1:]\n for q in [0,-1]:\n endpoint = Point(line.coords[q])\n if any(endpoint.touches(another_line) \n for another_line in other_lines):\n continue\n else:\n isolated_endpoints.append(endpoint)\n del pb\n return isolated_endpoints",
"async def Available_Endpoints() -> List[Dict[str, str]]:\n return [{\"path\": endpoint} for endpoint in busylightapi.endpoints]",
"def get_view_endpoints(self):\n return []",
"def get_endpoints(self, **kwargs):\n return self._database.lookup('endpoint', kwargs)",
"def get_endpoints(self, epg_dn):\n result = []\n for item in filter(lambda x: type(x).__name__ == 'CEp', self.query_child_objects(epg_dn)):\n # Creates a dynamic object type.\n endpoint = type('endpoint', (object,), {})\n\n # Filter the endpoint in memory looking for the object that contains the interface where the endpoint is\n # attached\n endpoint_connection_mo = filter(lambda x: type(x).__name__ == 'RsCEpToPathEp',\n self.query_child_objects(item.dn))[0]\n\n # Format the string to be human readable\n endpoint_connection_interface = str(endpoint_connection_mo.tDn).replace('topology/pod-1/paths','node').\\\n replace('pathep-[', '').replace(']','')\n\n # Add attributes to the object\n endpoint.ip = item.ip\n endpoint.mac = item.mac\n endpoint.name = item.name\n endpoint.interface = endpoint_connection_interface\n\n # Append it to the list\n result.append(endpoint)\n return result",
"def get_imported_endpoints(self):\n with self.__import_lock:\n return [reg.get_import_reference() for reg in self.__imported_regs]",
"def edges(self):\n return self.dovetails + self.containments + self.internals",
"def get_cc_endpoints(self):\n # ~~controllers\n self.cc_ep_list = []\n self.cc_list_all_key = []\n for cur_gld_ep_name in self.gld_list_all_key:\n cur_cc_ep_dict = {}\n\n cur_cc_ep_dict[\"global\"] = self.param_cc_ep_global\n cur_cc_ep_dict[\"name\"] = self.param_cc_ep_pref + cur_gld_ep_name\n cur_cc_ep_dict[\n \"destination\"\n ] = f\"{self.gld_json_config_name}/{cur_gld_ep_name}\"\n cur_cc_ep_dict[\"type\"] = self.param_cc_ep_type\n\n self.cc_ep_list.append(cur_cc_ep_dict)\n self.cc_list_all_key.append(cur_cc_ep_dict[\"name\"])",
"def get_crefs_from( ea ):\r\n\tret = []\r\n\txrf = get_first_cref_from( ea )\r\n\tif xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\txrf = get_next_cref_from( ea, xrf )\r\n\twhile xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\t\txrf = get_next_cref_from( ea, xrf )\r\n\treturn ret",
"def get_flask_endpoints(self):\n urls = self.endpoints.keys()\n return urls"
] | [
"0.57260317",
"0.570597",
"0.5704981",
"0.5656375",
"0.5609971",
"0.542474",
"0.5416715",
"0.54091465",
"0.5392832",
"0.53809077",
"0.5363819",
"0.53049743",
"0.52954215",
"0.5283466",
"0.5228727",
"0.52135223",
"0.51494527",
"0.5141439",
"0.51192796",
"0.5101566",
"0.50996774",
"0.5090531",
"0.5029777",
"0.49923828",
"0.49915022",
"0.49811763",
"0.49759945",
"0.4967549",
"0.49533197",
"0.49516848"
] | 0.60407233 | 0 |
Returns IEX Next Day Ex Date from the refdata endpoints | def get_iex_next_day_ex_date(start=None, **kwargs):
return NextDay(start=start, **kwargs).fetch() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_next_day(self):\n pass",
"def next_day(isotext):\n as_arrow = arrow.get(isotext)\n return as_arrow.replace(days=+1).isoformat()",
"def next_day(isotext):\n as_arrow = arrow.get(isotext)\n return as_arrow.replace(days=+1).isoformat()",
"def _get_date(self, relative_idx):\r\n return self.dl.dates[self._identified_date_id + relative_idx]",
"def findNextDate(cnx, firstdate):\n\n cur = cnx.cursor()\n cur.execute(\"SELECT gross_date FROM boxoffice ORDER BY gross_date DESC LIMIT 1\")\n\n try:\n lastdate = cur.fetchone()[0]\n nextdate = datetime.strptime(lastdate, '%Y-%m-%d') + timedelta(days=1)\n nextdate = nextdate.strftime('%Y-%m-%d')\n except TypeError:\n nextdate = firstdate\n finally:\n cur.close()\n return nextdate",
"def next_determination_date(ddates,clo_idx):\n dd = ddates.loc[ddates['Fund']==clo_idx,'Determination Date']\n next_ddate = min(dd.loc[dd>pd.Timestamp.today()], key=lambda s: (s-pd.Timestamp.today()))\n return next_ddate",
"def next_day(date):\n return date + datetime.timedelta(days=1)",
"def next_day(date):\n return date + datetime.timedelta(days=1)",
"def get_day(x):\n return x[\"SALE DATE\"].day",
"def next_payment_date(ddates,clo_idx):\n dd = ddates.loc[ddates['Fund']==clo_idx,'Payment Date'].dropna()\n next_date = min(dd.loc[dd>pd.Timestamp.today()], key=lambda s: (s-pd.Timestamp.today()))\n return next_date",
"def exp_day_to_date(ins):\n a_day = 24 * 60 * 60\n if ins.exp_time > a_day and ins.exp_time < 2 * a_day:\n return ins.exp_day.add_days(-1)\n else:\n return ins.exp_day",
"def _next_trading_day(self, day):\n next_day = self._trading_days.shift(-1)[day]\n return next_day if not pd.isnull(next_day) else None",
"def test_output_day(self):\n input_ = [\n self.indicator_record(date=datetime.date(2011, 1, 1), value=0.83),\n self.indicator_record(date=datetime.date(2011, 2, 1), value=0.80),\n ]\n output = self.expander._ipca_from_15_expander(input_)\n\n self.assertEqual(output[-1].date.day, 1)",
"def getnextrunningdate(jsondata):\n\n returneddata = json.loads(jsondata)\n days = {}\n\n if returneddata[\"response_code\"]==200:\n trainData = returneddata[\"train\"]\n daysData = trainData[\"days\"]\n if daysData:\n for day in trainData[\"days\"]:\n days[day[\"day-code\"]]=day[\"runs\"]\n\n today = datetime.date.today()\n nextweekday = (today + datetime.timedelta(days=7))\n\n for i in range(len(days)):\n runningdate = (nextweekday + datetime.timedelta(days=i))\n if models.istrainrunningonjourneydate(days, runningdate):\n return runningdate\n\n return nextweekday",
"def next_release_date(date):\n df = get_release_dates()\n df = df[df['ReleaseDate'] > date]\n return df['ReleaseDate'].iloc[0]",
"def next(self):\n return self.from_date(self.date_b)",
"def test_time_series_intraday_date_indexing_python2(self, mock_urlopen):\n ts = TimeSeries(key=TestAlphaVantage._API_KEY_TEST,\n output_format='pandas', indexing_type='date')\n url = \"https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=MSFT&interval=1min&apikey=test\"\n path_file = self.get_file_from_url(url)\n with open(path_file) as f:\n mock_urlopen.return_value = f\n data, _ = ts.get_intraday(\n \"MSFT\", interval='1min', outputsize='full')\n assert type(data.index[0]) == str",
"def next_date(date):\n #For this function, I just created as many if else statements as I could to cover every situation I could think of.\n #Most of these if else statements are distinct edge cases where I add 1 in a different spot each time.\n if date[0] == 1 or date[0] == 3 or date[0] == 5 or date[0] == 7 or date[0] == 8 or date[0] == 10:\n if date[1] < 31:\n nextday = (date[0], date[1] + 1, date[2])\n return nextday\n elif date[1] == 31:\n nextday = (date[0] + 1, 1, date[2])\n return nextday\n elif date[0] == 12:\n if date[1] < 31:\n nextday = (date[0], date[1] + 1, date[2])\n return nextday\n elif date[1] == 31:\n nextday = (1, 1, date[2] + 1)\n return nextday\n elif date[0] == 4 or date[0] == 6 or date[0] == 9 or date[0] == 11:\n if date[1] < 30:\n nextday = (date[0], date[1] + 1, date[2])\n return nextday\n elif date[1] == 30:\n nextday = (date[0] + 1, 1, date[2])\n return nextday\n elif date[0] == 2:\n if date[2] % 4 == 0 or date[2] % 1000 == 0:\n if date[1] < 29:\n nextday = (date[0], date[1] + 1, date[2])\n return nextday\n elif date[1] == 29:\n nextday = (date[0] + 1, 1, date[2])\n return nextday\n elif date[1] < 28:\n nextday = (date[0], date[1] + 1, date[2])\n return nextday\n elif date[1] == 28:\n nextday = (date[0] + 1, 1, date[2])\n return nextday",
"def _get_first_end_date(self):\n ins = acm.FInstrument['SACPI']\n market = \"internal\"\n start_year = acm.Time.DateAddDelta(acm.Time.FirstDayOfYear(self.start_date), 0, 0, -1)\n this_year_prices = acm.FPrice.Select(\"instrument='%s' and market='%s' and day>'%s' and day<'%s'\" \n % (ins.Name(), market, start_year, self.start_date))\n\n prices = sorted(this_year_prices, key=lambda price: price.Day(), reverse=True)\n last_sacpi_day = prices[0].Day()\n sacpi_plus_five_m = acm.Time.FirstDayOfMonth(acm.Time.DateAddDelta(last_sacpi_day, 0, 5, 0))\n return sacpi_plus_five_m",
"def call_fut(self, node):\r\n fut = duedate.get_extended_due_date\r\n return fut(node)",
"def test_get_index_of_day_one_day_list(self):\n days = [\"15.07.2013\"]\n self._test_find_day(days)\n self._test_giod(days, \"16.07.2013\", 0,\n -1, \"Find not existing day in an One-Day-List\")\n self._test_giod(days, \"16.07.2013\", 1,\n 0, \"Find not existing day in an One-Day-List with next=1.\")\n self._test_giod(days, \"16.07.2013\", -1,\n 0, \"Find not existing day in an One-Day-List with next=-1.\")\n self._test_giod(days, \"10.07.2013\", 0,\n -1, \"Find not existing day in an One-Day-List\")\n self._test_giod(days, \"10.07.2013\", 1,\n 0, \"Find not existing day in an One-Day-List with next=1.\")\n self._test_giod(days, \"10.07.2013\", -1,\n 0, \"Find not existing day in an One-Day-List with next=-1.\")",
"def test_time_series_intraday_date_indexing_python3(self, mock_urlopen):\n ts = TimeSeries(key=TestAlphaVantage._API_KEY_TEST,\n output_format='pandas', indexing_type='date')\n url = \"https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=MSFT&interval=1min&apikey=test\"\n path_file = self.get_file_from_url(url)\n with open(path_file) as f:\n mock_urlopen.return_value = f\n data, _ = ts.get_intraday(\n \"MSFT\", interval='1min', outputsize='full')\n assert type(data.index[0]) == str",
"def __getdaysinbase(self):\n\t\treturn self._daysinbase",
"def test_simple_case(self):\n input_ = (datetime.date(1991, 2, 26), datetime.date(1991, 3, 26))\n\n expected = (datetime.date(1991, 2, 27), datetime.date(1991, 3, 27))\n actual = self.expander._get_next_days(*input_)\n\n self.assertEqual(expected, actual)",
"def test_first_date_static_2(self):\n input_ = (datetime.date(2006, 3, 1), datetime.date(2006, 3, 31))\n expected = (datetime.date(2006, 3, 1), datetime.date(2006, 4, 1))\n actual = self.expander._get_next_days(*input_)\n\n self.assertEqual(expected, actual)",
"def get_price_data(ticker, days_befoure):\r\n #config_file=raw_input('config file: ')\r\n config_file=\"d:/tmp/moex.json\" \r\n try:\r\n with open(config_file) as config_file: \r\n conn_data = json.load(config_file)\r\n except:\r\n print \"Error: Unable to read config file. \"\r\n sys.exit(1)\r\n\r\n username = conn_data['username']\r\n password = conn_data['password']\r\n my_config = Config(user=username, password=password, proxy_url='')\r\n\r\n my_auth = MicexAuth(my_config)\r\n date = datetime.datetime.now() - datetime.timedelta(days_befoure)\r\n \r\n #ticker = 'SBER' # for tesing...\r\n \r\n if my_auth.is_real_time():\r\n iss = MicexISSClient(my_config, my_auth, MyDataHandler, MyData)\r\n iss.get_history_securities('stock',\r\n 'shares',\r\n 'tqbr',\r\n ticker, \r\n date.strftime(\"%Y-%m-%d\")\r\n #here to be start end dates\r\n )\r\n #print iss.handler.data.history\r\n return iss.handler.data.as_dataframe()",
"def get_expiration_date():\n dt = datetime.now()\n bdays_indx = pd.bdate_range(\n dt.strftime(\"%Y-%m-%d\"),\n (dt + timedelta(days=20)).strftime(\"%Y-%m-%d\"),\n freq=pd.offsets.CustomBusinessDay(calendar=USFederalHolidayCalendar()),\n ).tolist()\n expiration = [x.strftime(\"%Y-%m-%d\") for x in bdays_indx if x.weekday() == 4][0]\n return expiration",
"def get_expiry_date(name: str, date_str: str):\n if 'IO' == name:\n # 沪深300, 到期月份的第三个星期五,遇国家法定假日顺延\n dates = THIRD_FRIDAYS[THIRD_FRIDAYS > date_str]\n day_str = get_next_trading_day_str(dates[0])\n elif name in ['cu', 'al', 'zn', 'au', 'ru']:\n # 上期所,标的期货合约到期日前一个月的倒数第 5 个交易日\n dates = LAST_BUSINESS_DAY[LAST_BUSINESS_DAY < date_str]\n day_str = get_next_trading_day_str(dates[-1], -5)\n elif name in ['m', 'c', 'i', 'pg', 'l', 'v', 'pp']:\n # 大商所,标的期货合约到期日前一个月的第 5 个交易日\n dates = FIRST_BUSINESS_DAY[FIRST_BUSINESS_DAY < date_str]\n day_str = get_next_trading_day_str(dates[-1], 5)\n elif 'SR' == name and date_str < '2019-09-01':\n # 郑商所,2019-09-01 之前为标的期货合约到期日前两个月的倒数第 5 个交易日\n dates = LAST_BUSINESS_DAY[LAST_BUSINESS_DAY < date_str]\n day_str = get_next_trading_day_str(dates[-2], -5)\n elif name in ['CF', 'SR', 'RM', 'MA', 'TA', 'ZC']:\n # 郑商所,标的期货合约到期日前一个月的第 3 个交易日\n dates = FIRST_BUSINESS_DAY[FIRST_BUSINESS_DAY < date_str]\n day_str = get_next_trading_day_str(dates[-1], 3)\n else:\n raise ValueError(f\"options contract not supported: {name}\")\n return day_str",
"def test_time_series_intraday_date_integer_python2(self, mock_urlopen):\n ts = TimeSeries(key=TestAlphaVantage._API_KEY_TEST,\n output_format='pandas', indexing_type='integer')\n url = \"https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=MSFT&interval=1min&apikey=test\"\n path_file = self.get_file_from_url(url)\n with open(path_file) as f:\n mock_urlopen.return_value = f\n data, _ = ts.get_intraday(\n \"MSFT\", interval='1min', outputsize='full')\n assert type(data.index[0]) == int",
"def get_expiry(\n expiries: List[pendulum.DateTime], n: int = 1, sort: bool = True\n) -> pendulum.DateTime:\n n = n if n < 1 else n - 1\n print(\"int0\")\n if sort:\n return sorted(expiries)[n]\n else:\n return expiries[n]"
] | [
"0.6659892",
"0.6455671",
"0.6455671",
"0.5981699",
"0.57411253",
"0.56931394",
"0.5636958",
"0.5636958",
"0.562946",
"0.55402404",
"0.5531359",
"0.5511678",
"0.54665154",
"0.5407168",
"0.538642",
"0.5366092",
"0.5354358",
"0.5353504",
"0.5346452",
"0.53019017",
"0.5287904",
"0.5264765",
"0.52567816",
"0.5249552",
"0.5248669",
"0.5238809",
"0.5234135",
"0.5234131",
"0.5232598",
"0.52182263"
] | 0.71448374 | 0 |
Returns IEX Listed Symbol Directory from the refdata endpoints | def get_iex_listed_symbol_dir(start=None, **kwargs):
return ListedSymbolDir(start=start, **kwargs).fetch() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def path_entries(self):",
"async def Available_Endpoints() -> List[Dict[str, str]]:\n return [{\"path\": endpoint} for endpoint in busylightapi.endpoints]",
"def uri(self) -> list:\n raise NotImplementedError(\"ErddapArgoDataFetcher.uri not implemented\")",
"def build_filelist(basepath):\n log.info(\"Building list of files containing EDM symbols in %s\", basepath)\n symbol_files = []\n for dir_path, _, filenames in os.walk(basepath):\n for filename in filenames:\n filepath = os.path.join(dir_path, filename)\n if filename.endswith(\".opi\") and utils.grep(filepath, \"EDM Symbol\"):\n symbol_files.append(filepath)\n\n return symbol_files",
"def _get_data_reference_list(\n self, data_asset_name: Optional[str] = None\n ) -> List[str]:\n raise NotImplementedError",
"def get_listfile(self, datadir):\n return []",
"def list(self, dataPath, ext=None, start=None, stop=None, recursive=False):\n scheme, bucket_name, keylist = self.getfiles(\n dataPath, ext=ext, start=start, stop=stop, recursive=recursive)\n\n return [\"%s:///%s/%s\" % (scheme, bucket_name, key) for key in keylist]",
"def get_imported_endpoints(self):\n with self.__import_lock:\n return [reg.get_import_reference() for reg in self.__imported_regs]",
"def document_symbols(self) -> UriDict[List[types.DocumentSymbol]]:\n return self._document_symbols",
"def getpaths(self,libname):\n if os.path.isabs(libname):\n yield libname\n else:\n # FIXME / TODO return '.' and os.path.dirname(__file__)\n for path in self.getplatformpaths(libname):\n yield path\n\n path = ctypes.util.find_library(libname)\n if path: yield path",
"def getpaths(self,libname):\n if os.path.isabs(libname):\n yield libname\n else:\n # FIXME / TODO return '.' and os.path.dirname(__file__)\n for path in self.getplatformpaths(libname):\n yield path\n\n path = ctypes.util.find_library(libname)\n if path: yield path",
"def get_drefs_from( ea ):\r\n\tret = []\r\n\txrf = get_first_dref_from( ea )\r\n\tif xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\txrf = get_next_dref_from( ea, xrf )\r\n\twhile xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\t\txrf = get_next_dref_from( ea, xrf )\r\n\treturn ret",
"def get_kb_location(self):\n return ['dav',]",
"def get_symbol(self):\n return []",
"def getURLs():",
"def _get_refpaths(data_dict, reference_file_types, observatory):\n if not reference_file_types: # [] interpreted as *all types*.\n return {}\n with crds_cache_locking.get_cache_lock():\n bestrefs = crds.getreferences(\n data_dict, reftypes=reference_file_types, observatory=observatory)\n refpaths = {filetype: filepath if \"N/A\" not in filepath.upper() else \"N/A\"\n for (filetype, filepath) in bestrefs.items()}\n return refpaths",
"def get_reference_housenumber_paths() -> List[str]:\n Config.__get()\n assert Config.__config is not None\n relpaths = Config.__config.get(\"wsgi\", \"reference_housenumbers\").strip().split(' ')\n return [get_abspath(relpath) for relpath in relpaths]",
"def get_drefs_to( ea ):\r\n\tret = []\r\n\txrf = get_first_dref_to( ea )\r\n\tif xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\txrf = get_next_dref_to( ea, xrf )\r\n\twhile xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\t\txrf = get_next_dref_to( ea, xrf )\r\n\treturn ret",
"def get_urls_and_paths():\n # Array to store tuples in (url, path) format.\n urls_and_paths = []\n\n for file_name in os.listdir('tickers'):\n # Sanity check. Only use text files.\n if file_name.endswith('.txt'):\n # Create a folder for each group (each txt file is a group)\n group = os.path.splitext(file_name)[0]\n\n # Create the folder for storing stock price data.\n os.makedirs('data/' + group)\n\n # Open the file.\n input_file = open('tickers/' + file_name)\n\n # For each line (stock), create the GET URL and store the save location.\n for line in input_file.read().splitlines():\n urls_and_paths.append((\n 'https:/www.wsj.com/market-data/quotes/' + line + '/historical-prices/download?num_rows=100000000000000&range_days=100000000000000&startDate=01/01/1970&endDate=01/01/2040',\n 'data/' + group + '/' + line.split('/')[-1] + '.csv'\n ))\n\n return urls_and_paths",
"def DirDS():\n\n global Asm\n\n target.BoundarySync()\n\n dec.Asm.Memory = 1\n dec.Asm.BOL_Address = dec.Asm.RM_Address\n dec.Asm.List_Address = dec.Asm.RM_Address",
"def _get_paths():\n paths = [\n '/'\n ]\n return paths",
"def DirItems():\n return diritems",
"def getFileListDAS(dataset,blacklist=[ ]):\n dataset = dataset.replace('__','/')\n if dataset[0]!='/':\n dataset = '/'+dataset\n instance = 'prod/global'\n if 'USER' in dataset:\n instance = 'prod/phys03'\n #cmd='das_client --limit=0 --query=\"file dataset=%s instance=%s\"'%(dataset,instance)\n cmd = 'das_client --limit=0 --query=\"file dataset=%s instance=%s status=*\"'%(dataset,instance)\n if args.verbose:\n print \"Executing \",cmd\n cmd_out = getoutput( cmd )\n tmpList = cmd_out.split(os.linesep)\n filelist = [ ]\n for line in tmpList:\n if '.root' in line and line not in blacklist:\n #files.append(\"root://cms-xrd-global.cern.ch/\"+line) # global\n filelist.append(\"root://xrootd-cms.infn.it/\"+line) # Eurasia\n filelist.sort()\n return filelist",
"def get_data_files():\n return [\n ('share/jupyter/nbextensions/{}'.format(PY_PACKAGE), TARGETS),\n ('share/jupyter/lab/extensions', [\n os.path.relpath(f, '.') for f in glob.glob(TAR_PATH)\n ])\n ]",
"def listdir(self):\n if self._isurl(self._baseurl):\n raise NotImplementedError(\n \"Directory listing of URLs, not supported yet.\")\n else:\n return os.listdir(self._baseurl)",
"def read_reference_data():\n return {f:read_local_file(f) for f in os.listdir(DATA_DIR)}",
"def getSymbolMap():\n name = os.path.join(os.path.dirname(__file__), 'nasdaq_nasdaqcom.csv')\n symbols = TickerSymbols(name)\n return symbols.getNameToTicker()",
"def _extract_kiss_path(self, start):\n for i in range(2, start):\n path_call = aprs.Callsign(self.frame[i * 7:])\n\n if path_call:\n if ord(self.frame[i * 7 + 6]) & 0x80:\n path_call.digi = True\n\n self.path.append(path_call)",
"def get_dev_examples(self, data_dir):\n raise NotImplementedError()",
"def get_dev_examples(self, data_dir):\n raise NotImplementedError()"
] | [
"0.52443194",
"0.52027243",
"0.51738554",
"0.50811166",
"0.507001",
"0.5039871",
"0.5038013",
"0.5036818",
"0.5025535",
"0.5022163",
"0.5022163",
"0.4984427",
"0.4979942",
"0.49156174",
"0.48973256",
"0.48810825",
"0.48625612",
"0.48537242",
"0.48472953",
"0.48335177",
"0.4831219",
"0.48120618",
"0.4807862",
"0.47926226",
"0.47776833",
"0.47735837",
"0.47645718",
"0.47631875",
"0.4756264",
"0.4756264"
] | 0.67579544 | 0 |
Configures CUDA environment variable and returns tensorflow GPU config. | def set_gpu(gpu):
os.environ['CUDA_VISIBLE_DEVICES'] = gpu
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
return tf_config | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def configure_gpu_tf():\n\n try:\n # locate available devices & set required environment variables\n available_device_ids = GPUtil.getFirstAvailable(order='first', maxLoad=0.7, maxMemory=0.7, attempts=1, interval=10)\n available_device_id = available_device_ids[0]\n os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'\n os.environ['CUDA_VISIBLE_DEVICES'] = str(available_device_id)\n print(f\"\\n GPU Found! running on GPU:{available_device_id}\\n\")\n\n # set GPU configuration (use all GPU memory if device 0, else use <50% of memory)\n tf.debugging.set_log_device_placement(False)\n physical_gpu = tf.config.experimental.list_physical_devices('GPU')[0]\n\n if available_device_id == 0:\n tf.config.experimental.set_memory_growth(physical_gpu, True)\n else:\n tf.config.experimental.set_virtual_device_configuration(\n physical_gpu,\n [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=4500)]\n )\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n assert len(logical_gpus) == 1, \"error creating virtual GPU to fractionally use memory\"\n\n # if we can't find a GPU, or they are all busy, default to using CPU\n except RuntimeError:\n print(\"\\n No GPUs available... running on CPU\\n\")\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1'",
"def _setup_gpu_environment() -> None:\n gpu_memory_config = os.getenv(ENV_GPU_CONFIG)\n\n if not gpu_memory_config:\n return\n\n # Import from tensorflow only if necessary (environment variable was set)\n from tensorflow import config as tf_config\n\n parsed_gpu_config = _parse_gpu_config(gpu_memory_config)\n physical_gpus = tf_config.list_physical_devices(\"GPU\")\n\n # Logic taken from https://www.tensorflow.org/guide/gpu\n if physical_gpus:\n for gpu_id, gpu_id_memory in parsed_gpu_config.items():\n _allocate_gpu_memory(physical_gpus[gpu_id], gpu_id_memory)\n\n else:\n rasa.shared.utils.io.raise_warning(\n f\"You have an environment variable '{ENV_GPU_CONFIG}' set but no GPUs were \"\n f\"detected to configure.\"\n )",
"def get_config():\n config = tf.ConfigProto(allow_soft_placement=True)\n config.gpu_options.allow_growth=True\n return config",
"def get_config():\n config = tf.ConfigProto(allow_soft_placement=True)\n config.gpu_options.allow_growth = True\n return config",
"def _config_session(self):\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n config.gpu_options.visible_device_list = str(self.device_num)\n return tf.Session(config=config)",
"def cuda_if_gpu(T):\n\n return T.cuda() if use_cuda else T",
"def set_gpu_from_theano():\r\n # Transfer the theano gpu binding to pycuda, for consistency\r\n if config.device.startswith(\"gpu\") and len(config.device) > 3:\r\n os.environ[\"CUDA_DEVICE\"] = theano.config.device[3:]\r\n elif (config.init_gpu_device.startswith(\"gpu\") and\r\n len(config.init_gpu_device) > 3):\r\n os.environ[\"CUDA_DEVICE\"] = theano.config.init_gpu_device[3:]",
"def prepare_config(device='npu'):\n if device == 'npu':\n # config for Ascend processor\n config = tf.ConfigProto()\n custom_op = config.graph_options.rewrite_options.custom_optimizers.add()\n custom_op.name = \"NpuOptimizer\"\n custom_op.parameter_map[\"use_off_line\"].b = True\n custom_op.parameter_map[\"precision_mode\"].s = tf.compat.as_bytes(\"force_fp16\")\n custom_op.parameter_map[\"graph_run_mode\"].i = 0\n config.graph_options.rewrite_options.remapping = RewriterConfig.OFF\n custom_op.parameter_map[\"debug_dir\"].s = tf.compat.as_bytes(str(TMP))\n else:\n config = tf.ConfigProto()\n return config",
"def locate_cuda():\n nvcc_bin = 'nvcc'\n if sys.platform.startswith(\"win\"):\n nvcc_bin = 'nvcc.exe'\n\n # check env variables CUDA_HOME, CUDAHOME, CUDA_PATH.\n found = False\n for env_name in ['CUDA_PATH', 'CUDAHOME', 'CUDA_HOME']:\n if env_name not in os.environ:\n continue\n found = True\n home = os.environ[env_name]\n nvcc = os.path.join(home, 'bin', nvcc_bin)\n break\n if not found:\n # otherwise, search the PATH for NVCC\n nvcc = find_in_path(nvcc_bin, os.environ['PATH'])\n if nvcc is None:\n logging.warning('The nvcc binary could not be located in your '\n '$PATH. Either add it to '\n 'your path, or set $CUDA_HOME to enable CUDA extensions')\n return None\n home = os.path.dirname(os.path.dirname(nvcc))\n\n cudaconfig = {'home': home,\n 'nvcc': nvcc,\n 'include': os.path.join(home, 'include'),\n 'lib64': os.path.join(home, 'lib64')}\n cuda_ver = os.path.basename(os.path.realpath(home)).split(\"-\")[1].split(\".\")\n major, minor = int(cuda_ver[0]), int(cuda_ver[1])\n cuda_ver = 10 * major + minor\n assert cuda_ver >= 70, f\"too low cuda ver {major}.{minor}\"\n print(f\"cuda_ver: {major}.{minor}\")\n arch = get_cuda_arch(cuda_ver)\n sm_list = get_cuda_sm_list(cuda_ver)\n compute = get_cuda_compute(cuda_ver)\n post_args = [f\"-arch=sm_{arch}\"] + \\\n [f\"-gencode=arch=compute_{sm},code=sm_{sm}\" for sm in sm_list] + \\\n [f\"-gencode=arch=compute_{compute},code=compute_{compute}\",\n \"--ptxas-options=-v\", \"-O2\"]\n print(f\"nvcc post args: {post_args}\")\n if HALF_PRECISION:\n post_args = [flag for flag in post_args if \"52\" not in flag]\n\n if sys.platform == \"win32\":\n cudaconfig['lib64'] = os.path.join(home, 'lib', 'x64')\n post_args += ['-Xcompiler', '/MD', '-std=c++14', \"-Xcompiler\", \"/openmp\"]\n if HALF_PRECISION:\n post_args += [\"-Xcompiler\", \"/D HALF_PRECISION\"]\n else:\n post_args += ['-c', '--compiler-options', \"'-fPIC'\",\n \"--compiler-options\", \"'-std=c++14'\"]\n if HALF_PRECISION:\n post_args += [\"--compiler-options\", \"'-D HALF_PRECISION'\"]\n for k, val in cudaconfig.items():\n if not os.path.exists(val):\n logging.warning('The CUDA %s path could not be located in %s', k, val)\n return None\n\n cudaconfig['post_args'] = post_args\n return cudaconfig",
"def setup_device(gpuid=None):\n\n if gpuid is not None and not isinstance(gpuid, str):\n gpuid = str(gpuid)\n\n if gpuid is not None:\n nb_devices = len(gpuid.split(','))\n else:\n nb_devices = 1\n\n if gpuid is not None and (gpuid != '-1'):\n device = '/gpu:' + gpuid\n os.environ['CUDA_VISIBLE_DEVICES'] = gpuid\n\n # GPU memory configuration differs between TF 1 and 2\n if hasattr(tf, 'ConfigProto'):\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n config.allow_soft_placement = True\n tf.keras.backend.set_session(tf.Session(config=config))\n else:\n tf.config.set_soft_device_placement(True)\n for pd in tf.config.list_physical_devices('GPU'):\n tf.config.experimental.set_memory_growth(pd, True)\n else:\n device = '/cpu:0'\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1'\n\n return device, nb_devices",
"def setup_gpu_and_random(config):\n random.seed(config.general.manualSeed)\n np.random.seed(config.general.manualSeed)\n torch.manual_seed(config.general.manualSeed)\n torch.cuda.manual_seed(config.general.manualSeed)\n\n cudnn.benchmark = True\n cudnn.deterministic = True\n config.num_gpu = torch.cuda.device_count()\n\n if config.num_gpu > 1:\n print('------ Use multi-GPU setting ------')\n print('if you stuck too long time with multi-GPU setting, try to set --workers 0')\n # check multi-GPU issue https://github.com/clovaai/deep-text-recognition-benchmark/issues/1\n config.workers = config.workers * config.num_gpu\n config.batch_size = config.batch_size * config.num_gpu\n\n \"\"\" previous version\n print('To equlize batch stats to 1-GPU setting, the batch_size is multiplied with num_gpu and multiplied batch_size is ', opt.batch_size)\n opt.batch_size = opt.batch_size * opt.num_gpu\n print('To equalize the number of epochs to 1-GPU setting, num_iter is divided with num_gpu by default.')\n If you dont care about it, just commnet out these line.)\n opt.num_iter = int(opt.num_iter / opt.num_gpu)\n \"\"\"",
"def config_keras() -> None:\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n kbackend.tensorflow_backend.set_session(tf.Session(config=config))",
"def locate_cuda():\n # adapted from\n # https://stackoverflow.com/questions/10034325/can-python-distutils-compile-cuda-code\n nvcc = None\n envs = ['CUDA_HOME', 'CUDA_ROOT', 'CUDAHOME', 'CUDAROOT']\n for env in envs:\n if env in os.environ:\n nvcc = os.path.join(os.environ[env], 'bin', 'nvcc')\n break\n else:\n # otherwise, search PATH for NVCC\n nvcc = find_in_path(['nvcc'])\n if nvcc is None:\n raise EnvironmentError(\n 'The nvcc executable could not be found. ' +\n 'Add it to $PATH or set one of the environment variables ' +\n ', '.join(envs))\n home = os.path.dirname(os.path.dirname(nvcc))\n\n cudaconfig = {}\n cudaconfig['home'] = home\n cudaconfig['nvcc'] = nvcc\n cudaconfig['include'] = os.path.join(home, 'include')\n # on Linux, CUDA has the libraries in lib64\n lib_dir = os.path.join(home, 'lib64')\n if not os.path.isdir(lib_dir):\n # on the MAC they are in lib\n lib_dir = os.path.join(home, 'lib')\n cudaconfig['lib'] = lib_dir\n\n for k, v in cudaconfig.items():\n if not os.path.exists(v):\n raise EnvironmentError(\n 'The CUDA %s path could not be located in %s' % (k, v))\n # print \"CUDA installation detected: \" + home\n return cudaconfig",
"def setup_device(n_gpus: int) -> object:\n if n_gpus >= 1 and torch.cuda.is_available():\n LOG.info('\\n CUDA is available! using GPU...')\n return torch.device('cuda')\n else:\n LOG.info('\\n Using CPU...')\n return torch.device('cpu')",
"def set_gpu(gpus):\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = gpus",
"def cntk_gpu_mode_config(model, num_samples):\n model.model._make_train_function()\n trainer = model.model.train_function.trainer\n learner_no = len(trainer.parameter_learners)\n if learner_no < 1:\n raise ValueError(\"No learner in the trainer.\")\n if learner_no > 1:\n warnings.warn(\"Unexpected multiple learners in a trainer.\")\n learner = trainer.parameter_learners[0]\n dist_learner = cntk.train.distributed. \\\n data_parallel_distributed_learner(\n learner, num_quantization_bits=32, distributed_after=0)\n model.model.train_function.trainer = cntk.trainer.Trainer(\n trainer.model, [trainer.loss_function,\n trainer.evaluation_function], [dist_learner])\n\n rank = cntk.Communicator.rank()\n workers = cntk.Communicator.num_workers()\n if workers == 1:\n warnings.warn(\"Only one worker is found.\")\n total_items = num_samples\n start = rank * total_items // workers\n end = min((rank+1) * total_items // workers, total_items)\n return start, end",
"def set_device(in_arg): \n \n return torch.device(\"cuda\" if torch.cuda.is_available() and in_arg.gpu == 1 else \"cpu\")",
"def EnableCUDA(gpu_id=0, use_cudnn=True):\n global option\n option['device'] = 'CUDA'\n option['device_id'] = gpu_id\n option['use_cudnn'] = use_cudnn",
"def GetGPU():\n return option['device_id']",
"def _create_device(self):\n return torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")",
"def process_initializer():\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'\n import tensorflow as tf\n physical_devices = tf.config.experimental.list_physical_devices('GPU')\n if len(physical_devices) > 0:\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n tf.config.experimental.set_memory_growth(physical_devices[0], True)",
"def set_tf_config():\n tf_config = {\n \"cluster\": {\n \"worker\": [f\"localhost:{BASE_TF_SERVER_PORT + index}\" for index in range(num_workers)]\n },\n \"task\": {\"type\": \"worker\", \"index\": worker_index}\n }\n tf_config_text = json.dumps(tf_config)\n os.environ[\"TF_CONFIG\"] = tf_config_text\n print(f\"TF_CONFIG = {tf_config_text}\")\n return tf_config_text",
"def set_device(gpu_arg):\n\n dev = 'cpu'\n if gpu_arg and torch.cuda.is_available():\n dev = 'cuda'\n elif gpu_arg:\n print('Not gpu found. Using cpu instead.') \n\n return torch.device(dev)",
"def OnGPU(gpu_id):\n device_option = caffe2_pb2.DeviceOption()\n device_option.device_type = workspace.GpuDeviceType\n device_option.device_id = gpu_id\n return device_option",
"def setup_device(self, conf: DictConfig) -> device:\n device = torch.device(conf.runner.device) if torch.cuda.is_available() else torch.device('cpu')\n\n return device",
"def variable_on_gpu(name, shape, initializer):\n # Use the /cpu:0 device for scoped operations\n with tf.device('/device:GPU:0'):\n # Create or get apropos variable\n var = tf.get_variable(name=name, shape=shape, initializer=initializer)\n return var",
"def get_default_config(self):\n config = super(NvidiaGPUCollector, self).get_default_config()\n config.update({\n 'path': 'nvidia',\n 'bin': '/usr/bin/nvidia-smi',\n 'stats': [\n 'index',\n 'memory.total',\n 'memory.used',\n 'memory.free',\n 'utilization.gpu',\n 'utilization.memory',\n 'temperature.gpu'\n ]\n })\n return config",
"def setup_gpu(use_gpu: int, silent=None) -> None:\n if silent is None:\n local_msg = Printer()\n else:\n local_msg = Printer(no_print=silent, pretty=not silent)\n if use_gpu >= 0:\n local_msg.info(f\"Using GPU: {use_gpu}\")\n require_gpu(use_gpu)\n else:\n local_msg.info(\"Using CPU\")\n if gpu_is_available():\n local_msg.info(\"To switch to GPU 0, use the option: --gpu-id 0\")",
"def setGPU(state):\n\n\timport tensorflow as tf\n\tfrom keras import backend as K\n\n\tcheckGPU()\n\n\tnum_cores = 1\n\tnum_CPU = 1\n\tnum_GPU = 0\n\tif state:\n\t\tnum_GPU = 1\n\n\tconfig = tf.ConfigProto(intra_op_parallelism_threads=num_cores,\\\n\t inter_op_parallelism_threads=num_cores, allow_soft_placement=True,\\\n\t device_count = {'CPU' : num_CPU, 'GPU' : num_GPU})\n\tsession = tf.Session(config=config)\n\tK.set_session(session)",
"def _setup_cpu_environment() -> None:\n inter_op_parallel_threads = os.getenv(ENV_CPU_INTER_OP_CONFIG)\n intra_op_parallel_threads = os.getenv(ENV_CPU_INTRA_OP_CONFIG)\n\n if not inter_op_parallel_threads and not intra_op_parallel_threads:\n return\n\n from tensorflow import config as tf_config\n\n if inter_op_parallel_threads:\n try:\n inter_op_parallel_threads_number = int(inter_op_parallel_threads.strip())\n except ValueError:\n raise ValueError(\n f\"Error parsing the environment variable '{ENV_CPU_INTER_OP_CONFIG}'. \"\n f\"Please cross-check the value.\"\n )\n\n tf_config.threading.set_inter_op_parallelism_threads(\n inter_op_parallel_threads_number\n )\n\n if intra_op_parallel_threads:\n try:\n intra_op_parallel_threads_number = int(intra_op_parallel_threads.strip())\n except ValueError:\n raise ValueError(\n f\"Error parsing the environment variable '{ENV_CPU_INTRA_OP_CONFIG}'. \"\n f\"Please cross-check the value.\"\n )\n\n tf_config.threading.set_intra_op_parallelism_threads(\n intra_op_parallel_threads_number\n )"
] | [
"0.7806248",
"0.71696466",
"0.7148312",
"0.7142579",
"0.69135433",
"0.686044",
"0.6859421",
"0.6798698",
"0.6659943",
"0.66523975",
"0.6578853",
"0.64233935",
"0.6382924",
"0.63151455",
"0.62676334",
"0.6249967",
"0.6245296",
"0.620838",
"0.6173646",
"0.6164236",
"0.61608374",
"0.61437505",
"0.6142109",
"0.61203104",
"0.6118899",
"0.6115009",
"0.61040556",
"0.6089206",
"0.60879165",
"0.6017152"
] | 0.78912264 | 0 |
Create baseline convolutional recurrent model. Arguments | def create_baseline_model(filters, gru_units, dropout, bias, mels, nb_classes):
inp = Input(shape=(259, mels, 1))
x = Conv2D(filters, (3,3), padding='same', activation='relu', use_bias=bias)(inp)
x = MaxPooling2D(pool_size=(1,5))(x)
x = Conv2D(filters, (3,3), padding='same', activation='relu', use_bias=bias)(x)
x = MaxPooling2D(pool_size=(1,2))(x)
x = Conv2D(filters, (3,3), padding='same', activation='relu', use_bias=bias)(x)
x = MaxPooling2D(pool_size=(1,2))(x)
x = Reshape((x_train.shape[-3], -1))(x)
x = Bidirectional(GRU(units=gru_units, activation='tanh', dropout=dropout,
recurrent_dropout=dropout, return_sequences=bias), merge_mode='mul')(x)
x = TimeDistributed(Dense(512, activation='relu', use_bias=bias))(x)
x = Dropout(rate=dropout)(x)
x = TimeDistributed(Dense(256, activation='relu', use_bias=bias))(x)
x = Dropout(rate=dropout)(x)
x = TimeDistributed(Dense(128, activation='relu', use_bias=bias))(x)
x = Dropout(rate=dropout)(x)
output = Dense(nb_classes, activation='sigmoid')(x)
model = Model(inputs=[inp], outputs=output)
return model | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_baseline(cls: Type['ResNet'], *, d_in: int, n_blocks: int, d_main: int, d_hidden: int, dropout_first: float, dropout_second: float, d_out: int) ->'ResNet':\n return cls(d_in=d_in, n_blocks=n_blocks, d_main=d_main, d_hidden=d_hidden, dropout_first=dropout_first, dropout_second=dropout_second, normalization='BatchNorm1d', activation='ReLU', d_out=d_out)",
"def baseline_model(optimizer='rmsprop', init='glorot_uniform', dropout=0.2):\n model = keras.models.Sequential()\n model.add(Dropout(dropout, input_shape=(12,)))\n model.add(Dense(12, input_dim=12, kernel_initializer=init, activation='relu'))\n model.add(Dropout(dropout))\n model.add(Dense(1, kernel_initializer=init))\n # Compile model\n model.compile(loss='mean_squared_error', optimizer=optimizer)\n return model",
"def model_CNN(x_train, y_train, x_test=None, y_test=None, kwargs={}):\n \"\"\"\n Notes on Input shape\n 4D tensor with shape (batch_size, timesteps, features, `colors`).\n 4D tensor with shape: (samples, rows, cols, channels)\n `channels_last` (default)\n Output 4D tensor with shape: (samples, new_rows, new_cols, filters)\n \"\"\"\n ######## CNN for stocks\n # create and fit CNN\n # input_shape = StockDate x Lookback x Features\n from keras.layers import Conv2D, MaxPooling2D\n from keras.optimizers import SGD\n\n\n layers = kwargs.get('layers', 10 ) #TODO\n nodes = kwargs.get('nodes', None) #TODO\n\n if nodes is None or nodes==0 or nodes==[0]:\n nodes = [np.shape(x_train)[1]*3]\n elif isinstance(nodes, (int, np.integer)): # turn int to list\n nodes = [nodes]\n\n if layers > 1 and len(nodes) < layers:\n nodes = list(np.pad(nodes,[0,layers-len(nodes)], mode='constant',constant_values=nodes[-1]))\n\n ndim = np.max([2,len(np.shape(x_train))]) # Min 2D\n if ndim==2:\n input_shape=(x_train.shape[1],)\n elif ndim==3:\n input_shape=(x_train.shape[1],x_train.shape[2])\n elif ndim==4:\n input_shape=(x_train.shape[1],x_train.shape[2],x_train.shape[3])\n else:\n input_shape=x_train.shape[1:]\n if kwargs.get('learning_rate', False):\n lr = kwargs.get('learning_rate')\n else:\n lr = False\n\n if False:\n conv = (3, 3)\n else:\n conv = (2, 2)\n n_conv = 5\n\n if np.ndim(y_train)==1:\n n_out = 1 #e.g. forecast y as float, just 1 step ahead.\n else:\n n_out = np.shape(y_train)[1] #e.g. onehot encoded, or n-steps ahead.\n\n dropout = kwargs.get('dropout',0) # dropout rate between 0 and 1.\n #stateful = kwargs.get('stateful',True)\n actvn = 'relu' #kwargs.get('actvn','relu')\n actvl = kwargs.get('actvl','sigmoid')\n model=[]\n model = Sequential() # https://keras.io/models/sequential/\n model.reset_states()\n # input: 100x100 images with 3 channels -> (100, 100, 3) tensors.\n # this applies 32 convolution filters of size 3x3 each.\n model.add(Conv2D(n_conv, conv, activation=actvn, input_shape=input_shape))\n #model.add(Conv2D(n_conv, conv, activation=actvn))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(dropout ))\n\n model.add(Conv2D(n_conv*2, conv, activation=actvn))\n #model.add(Conv2D(n_conv*2, conv, activation=actvn))\n #model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(dropout ))\n\n model.add(Flatten())\n model.add(Dense(np.min(input_shape), activation=actvn))\n model.add(Dropout(dropout*2))\n model.add(Dense(n_out, activation=actvl))\n\n if hasattr(kwargs,'optimizer'):\n optimizer = kwargs['optimizer']\n elif lr:\n optimizer = SGD(lr=lr, decay=1e-6, momentum=0.01, nesterov=True)\n else:\n optimizer = 'Nadam' #keras.optimizers.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False)\n\n if is_bool_dtype(y_train):\n model.compile(loss='binary_crossentropy', optimizer=optimizer)\n if is_categorical_dtype(y_train) or kwargs.get('onehot',False):\n #TODO Multiple Category\n model.compile(loss='categorical_crossentropy', optimizer=optimizer)\n else:\n #model.compile(loss='mean_squared_error', optimizer=optimizer)\n model.compile(loss='mean_squared_error', optimizer=optimizer, metrics=[r2_keras])\n\n\n if kwargs.get('verbose',False) > 1:\n model.summary()\n print(\"Inputs: {}\".format(model.input_shape))\n print(\"Outputs: {}\".format(model.output_shape))\n print(\"Actual input: {}\".format(x_train.shape))\n print(\"Actual output: {}\".format(y_train.shape))\n print('Model Loss: ' + model.loss)\n\n # For compatability with other models;\n model.score = model.evaluate\n\n return model #self.model=model",
"def build_baseline_model(num_of_input):\n \n # create model\n model = Sequential()\n model.add(Dense(2, input_dim=num_of_input, activation='relu'))\n model.add(Dense(1, activation='linear'))\n # Compile model\n model.compile(loss='mean_squared_error', optimizer='adam')\n return model",
"def PXRCmodel(isize, nc, conv_init, ndf=128, bn=True, se=False):\n \n def squeeze_excite_block(tensor, ratio=16):\n \n init = tensor\n filters = init._keras_shape[3]\n se_shape = (1, 1, filters)\n\n se = GlobalAveragePooling2D()(init)\n se = Reshape(se_shape)(se)\n se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se)\n se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se)\n\n x = multiply([init, se])\n return x\n \n x = inputs = Input(shape=(isize, isize, nc))\n x = Conv2D(filters=ndf, kernel_size=4, strides=1, use_bias=False,\n padding = \"same\", kernel_initializer = conv_init)(x)\n x = Conv2D(filters=ndf, kernel_size=4, strides=2, use_bias=False,\n padding = \"same\", kernel_initializer = conv_init)(x) \n x = LeakyReLU(alpha=0.2)(x)\n \n \n x = Conv2D(filters=ndf*2, kernel_size=4, strides=2, use_bias=False,\n padding = \"same\", kernel_initializer = conv_init)(x)\n x = Conv2D(filters=ndf*2, kernel_size=4, strides=1, use_bias=False,\n padding = \"same\", kernel_initializer = conv_init)(x)\n x = LeakyReLU(alpha=0.2)(x)\n \n\n x = Conv2D(filters=ndf*4, kernel_size=4, strides=2, use_bias=False,\n padding = \"same\", kernel_initializer = conv_init)(x)\n x = Conv2D(filters=ndf*4, kernel_size=4, strides=1, use_bias=False,\n padding = \"same\", kernel_initializer = conv_init)(x)\n x = LeakyReLU(alpha=0.2)(x)\n \n \n x = Conv2D(filters=ndf*8, kernel_size=4, strides=2, use_bias=False,\n padding = \"same\", kernel_initializer = conv_init)(x)\n x = Conv2D(filters=ndf*8, kernel_size=4, strides=1, use_bias=False,\n padding = \"same\", kernel_initializer = conv_init)(x)\n x = LeakyReLU(alpha=0.2)(x)\n \n \n y = Conv2D(filters=256, kernel_size=(3, 3), padding='same')(x)\n y = Conv2D(filters=256, kernel_size=(3, 3), padding='same')(y)\n y = Conv2D(filters=256, kernel_size=(3, 3), padding='same')(y)\n \n if (bn==True):\n y = BatchNormalization()(y)\n \n y = LeakyReLU()(y)\n y = MaxPool2D()(y)\n y = LeakyReLU()(y)\n \n ###########\n \n y = Conv2D(filters=128, kernel_size=(3, 3), padding='same')(y)\n y = Conv2D(filters=128, kernel_size=(3, 3), padding='same')(y)\n y = Conv2D(filters=128, kernel_size=(3, 3), padding='same')(y)\n \n if (se==True):\n y = squeeze_excite_block(y)\n \n if (bn==True):\n y = BatchNormalization()(y)\n \n y = LeakyReLU()(y)\n y = MaxPool2D()(y)\n y = LeakyReLU()(y)\n \n \n y = GlobalAveragePooling2D()(y)\n predictions = Dense(2, activation='softmax')(y)\n \n return Model(inputs=inputs, outputs=predictions)",
"def TCN(input_dim): \r\n # Number of dilations in order to use for the temporal blocks.\r\n dilations = np.array([1, 2, 4, 8, 16, 32])\r\n\r\n input_dim.insert(0,1)\r\n print(f\"input_dim: {input_dim}\")\r\n input_layer = Input(shape=input_dim)\r\n cropping = 0\r\n assert (sum(dilations) * block_size + 1) == 127, \"Paper specifies receptive field size should be 127\"\r\n \r\n prev_layer, skip_layer, _ = add_temporal_block(input_layer, None, 1, 1, cropping)\r\n \r\n for dilation in dilations:\r\n prev_layer, skip_layer, cropping = add_temporal_block(prev_layer, skip_layer, 2, dilation, cropping)\r\n\r\n output_layer = PReLU(shared_axes=[2, 3])(skip_layer)\r\n output_layer = SpectralNormalization(Conv1D(fixed_filters, kernel_size=1))(output_layer)\r\n output_layer = PReLU(shared_axes=[2, 3])(output_layer)\r\n output_layer = SpectralNormalization(Conv1D(1, kernel_size=1))(output_layer)\r\n\r\n return Model(input_layer, output_layer)",
"def context(model: Sequential) -> Sequential:\n model.add(ZeroPadding2D(padding=(33, 33)))\n model.add(Conv2D(42, (3, 3), activation='relu', name='ct_conv1_1'))\n model.add(Conv2D(42, (3, 3), activation='relu', name='ct_conv1_2'))\n model.add(AtrousConvolution2D(84, 3, 3, atrous_rate=(2, 2), activation='relu', name='ct_conv2_1'))\n model.add(AtrousConvolution2D(168, 3, 3, atrous_rate=(4, 4), activation='relu', name='ct_conv3_1'))\n model.add(AtrousConvolution2D(336, 3, 3, atrous_rate=(8, 8), activation='relu', name='ct_conv4_1'))\n model.add(AtrousConvolution2D(672, 3, 3, atrous_rate=(16, 16), activation='relu', name='ct_conv5_1'))\n model.add(Conv2D(672, (3, 3), activation='relu', name='ct_fc1'))\n model.add(Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n \n model.add(Conv2D(256, (3, 3), padding='same'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n \n model.add(Conv2DTranspose(128, (7, 7), strides=(7, 7), padding='same'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n \n model.add(Conv2D(64, (3, 3), padding='same'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n \n # last conv\n model.add(Conv2D(1, (3, 3), activation='sigmoid', padding='same'))\n \n return model",
"def __init__(self, growth_rate, *args, **kwargs):\n super().__init__()\n self.normconv1 = NormActConv2D(4 * growth_rate, kernel_size=1, use_bias=False *args, **kwargs)\n self.normconv2 = NormActConv2D(growth_rate, kernel_size=3, use_bias=False, padding='same', *args, **kwargs)\n self.concat = Concatenate()",
"def __call__(self, ens_x_input, vgg_x_input, inc_x_input, tcd_x_input):\n reuse = True if self.built else None\n logits = None\n aux_logits = None\n weights = [[0.7, 0.1], [0.2, 0.1]]\n all_inputs = [[ens_x_input, tcd_x_input], [inc_x_input, tcd_x_input]]\n scopes = [inception_resnet_v2.inception_resnet_v2_arg_scope(), inception.inception_v3_arg_scope()]\n reuse_flags = [reuse, True]\n for model_idx, model in enumerate([inception_resnet_v2.inception_resnet_v2, inception.inception_v3]):\n with slim.arg_scope(scopes[model_idx]):\n for idx, inputs in enumerate(all_inputs[model_idx]):\n result = model(inputs, num_classes=self.num_classes, is_training=False, reuse=reuse_flags[idx])\n weight = weights[model_idx][idx]\n # :1 is for slicing out the background class\n if logits == None:\n logits = result[0][:, 1:] * weight\n aux_logits = result[1]['AuxLogits'][:, 1:] * weight\n else:\n logits += result[0][:, 1:] * weight\n aux_logits += result[1]['AuxLogits'][:, 1:] * weight\n\n with slim.arg_scope(vgg.vgg_arg_scope()):\n weight = 0.1\n result = vgg.vgg_16(vgg_x_input, num_classes=1000, is_training=False)\n logits += result[0] * weight\n\n with slim.arg_scope(resnet_utils.resnet_arg_scope()):\n weight = 0.05\n result = resnet_v2.resnet_v2_152(vgg_x_input, num_classes=self.num_classes, reuse=reuse)\n logits += tf.squeeze(result[0])[:, 1:] * weight\n\n self.built = True\n aux_weight = 0.8\n logits += aux_logits * aux_weight\n\n predictions = layers_lib.softmax(logits)\n return predictions",
"def build_resnet(self):\r\n\r\n # INPUTS\r\n inputs_data = Input((self.data_rows, self.data_cols, 1),name='inputs_data')\r\n\r\n\r\n def residual_block(input, output_channels=64, kernel_size=(3, 3), stride=(1, 1)):\r\n x = Conv2D(output_channels, kernel_size, padding='same', strides=stride)(input)\r\n x = BatchNormalization()(x)\r\n x = Activation('relu')(x)\r\n\r\n x = Conv2D(output_channels, kernel_size, padding='same', strides=stride)(x)\r\n x = BatchNormalization()(x)\r\n x = Activation('relu')(x)\r\n\r\n x = Add()([x, input])\r\n\r\n residual_block.counter += 1\r\n return x\r\n\r\n residual_block.counter = 0\r\n\r\n conv1=Conv2D(64,(3,3),strides=(1,1),padding='same',activation='relu')(inputs_data)\r\n res_block1=residual_block(conv1,output_channels=64)\r\n res_block2 =residual_block(res_block1, output_channels=64)\r\n res_block3 =residual_block(res_block2, output_channels=64)\r\n conv2=Conv2D(1,(3,3),strides=(1,1),padding='same')(res_block3)\r\n outputs=Add()([conv2,inputs_data])\r\n\r\n\r\n model = Model(inputs=inputs_data, outputs=outputs)\r\n\r\n\r\n return model",
"def fit_recurrent(self, x, y):\n # print('Stage 1')\n x_ = self.scaler_s1.fit_transform(x)\n\n self.basemodel.fit(x_, y)\n self.training_hit_probability = self._hitprobability(x_, y)\n\n # Learn the hit probability\n self.hitproba = HitProbability()\n self.hitproba.fit(x_, self.training_hit_probability)\n\n # Learn high confidence for all classes\n hm_y, auto_gamma = self._adjust_gamma(self.training_hit_probability)\n self.joint_class_hc = HC_LR()\n self.joint_class_hc.fit(x_, hm_y)\n\n # hm_subtypes = []\n # proba_subtypes = []\n\n # while np.mean(y_) > 0.01:\n # for label in np.unique(y):\n\n hm_1hot = []\n hm_1hot.append(self._one_hot(self.training_hit_probability, y)[0])\n y_ = y.copy()\n\n self.recurrent_base = []\n self.recurrent_hpc = []\n for ii in range(self.recurrent_modes):\n print('Stage 1 iter: ' + str(ii))\n #self.recurrent_base.append(BaseSvc())\n\n if np.sum(y_) > 2:\n self.basemodel = BaseSvc()\n hm_y, proba_tmp = self._fit_mode(x_, y_)\n hm_candidate = self._one_hot(proba_tmp, y_)[1]\n else:\n hm_candidate = np.zeros_like(y_)\n\n self.recurrent_base.append(self.basemodel)\n\n #if np.sum(hm_candidate) >= 2:\n hm_1hot.append(hm_candidate)\n\n # remove the selected subgroup from the target list\n y_[hm_1hot[-1] == 1] = 0\n\n # make the default base model the first\n self.basemodel = self.recurrent_base[0]\n\n print('Stage 2')\n # Stage 2\n # hm_1hot = hm_subtypes\n # train stage2\n self.confidencemodel.fit(x_, hm_1hot)",
"def __init__(\n self,\n in_shape: Tuple,\n kernel_size: int,\n out_channels: int = None,\n stride: int = 1,\n aux_shape: Optional[Tuple] = None,\n downsampling_mode: str = \"convolutional\",\n upsampling_mode: str = \"convolutional\",\n transposed: bool = False,\n residual: bool = True,\n weightnorm: bool = True,\n gated: bool = True,\n activation: nn.Module = nn.ReLU,\n dropout: Optional[float] = None,\n ):\n super().__init__(in_shape=in_shape, transposed=transposed, residual=residual, aux_shape=aux_shape)\n\n # some parameters\n self.channels_in = in_shape[0]\n self.channels_out = out_channels\n self.kernel_size = kernel_size\n self.stride = stride\n self.resample_mode = upsampling_mode if transposed else downsampling_mode\n self.transposed = transposed\n self.residual = residual\n self.gated = gated\n self.activation_pre = activation() if self.residual else None\n\n # first convolution is always non-transposed and stride 1\n self.conv1 = TransposeableNormedSameConv2d(\n in_shape=in_shape,\n out_channels=out_channels,\n kernel_size=kernel_size,\n stride=1,\n transposed=False,\n resample_mode=\"convolutional\",\n weightnorm=weightnorm,\n )\n\n # aux op\n if aux_shape is not None:\n self.activation_aux = activation()\n\n if list(aux_shape[1:]) > list(self.conv1.out_shape[1:]):\n # Downsample height and width (and match channels)\n aux_stride = tuple(np.asarray(aux_shape[1:]) // np.asarray(self.conv1.out_shape[1:]))\n self.aux_op = TransposeableNormedSameConv2d(\n in_shape=aux_shape,\n out_channels=self.conv1.out_shape[0],\n kernel_size=kernel_size,\n stride=aux_stride,\n transposed=False,\n resample_mode=self.resample_mode,\n weightnorm=weightnorm,\n )\n elif list(aux_shape[1:]) < list(self.conv1.out_shape[1:]):\n # Upsample height and width (and match channels)\n aux_stride = tuple(np.asarray(self.conv1.out_shape[1:]) // np.asarray(aux_shape[1:]))\n self.aux_op = TransposeableNormedSameConv2d(\n in_shape=aux_shape,\n out_channels=self.conv1.out_shape[0],\n kernel_size=kernel_size,\n stride=aux_stride,\n transposed=True,\n resample_mode=self.resample_mode,\n weightnorm=weightnorm,\n )\n elif aux_shape[0] != self.conv1.out_shape[0]:\n # Change only channels using 1x1 convolution\n self.aux_op = TransposeableNormedSameConv2d(\n in_shape=aux_shape,\n out_channels=self.conv1.out_shape[0],\n kernel_size=1,\n stride=1,\n transposed=False,\n resample_mode=self.resample_mode,\n weightnorm=weightnorm,\n )\n else:\n # aux_shape and out_shape are the same\n assert aux_shape == self.conv1.out_shape\n self.aux_op = None\n else:\n self.aux_op = None\n\n self.activation_mid = activation()\n\n # dropout\n self.dropout = nn.Dropout(dropout) if dropout else dropout\n\n # second convolution is potentially transposed and potentially resampling\n gated_channels = 2 * out_channels if self.gated else out_channels\n self.conv2 = TransposeableNormedSameConv2d(\n in_shape=self.conv1.out_shape,\n out_channels=gated_channels,\n kernel_size=kernel_size,\n stride=self.stride,\n weightnorm=weightnorm,\n transposed=transposed,\n resample_mode=self.resample_mode,\n ) # doubled out channels for gating\n\n # output shape\n self._out_shape = (out_channels, *self.conv2.out_shape[1:]) # always out_channels regardless of gating\n\n # residual connections\n self.residual_op = ResidualConnectionConv2d(self._in_shape, self._out_shape, residual)",
"def _build_model(self):\n \n #convolutional part\n conv_inputs = keras.Input(shape = self._state_shape[0])\n c1 = layers.Conv2D(filters = 4, kernel_size = 2, strides = (2,2), padding = \"same\", activation = 'relu')(conv_inputs)\n c2 = layers.Conv2D(filters = 8, kernel_size = 2, strides = (1,1), padding = \"same\", activation = 'relu')(c1)\n flat = layers.Flatten()(c2)\n\n\n #current green phase layer\n # phase_inputs = keras.Input(shape = (self._state_shape[1],))\n \n #elapsed green time layer\n elapsed_time_inputs = keras.Input(shape = (self._state_shape[2],))\n \n \n #combine elapsed time and green time layer\n # combined_green = layers.concatenate([phase_inputs, elapsed_time_inputs])\n # green_dense = layers.Dense(10, activation='relu')(elapsed_time_inputs)\n \n #combine green layer with conv layer\n all_combined = layers.concatenate([elapsed_time_inputs, flat])\n dense = layers.Dense(32, activation='relu')(all_combined)\n dense = layers.Dense(16, activation='relu')(dense)\n outputs = layers.Dense(self._output_dim, activation='linear')(dense)\n \n model = keras.Model(inputs = [conv_inputs, elapsed_time_inputs], outputs = outputs, name='simple_CNN') \n model.compile(loss=losses.mean_squared_error, optimizer=Adam(lr=self._learning_rate))\n \n return model",
"def __init__(\n self, \n dim_feat_raw, \n dim_feat_smooth, \n dim_label_raw, \n dim_label_smooth, \n arch_gnn, \n aug_feat,\n num_ensemble, \n train_params\n ):\n super().__init__()\n self.mulhead = 1\n self.num_layers = arch_gnn[\"num_layers\"]\n self.dropout, self.dropedge = train_params[\"dropout\"], train_params['dropedge']\n self.mulhead = int(arch_gnn[\"heads\"]) # only useful for GAT\n\n self.branch_sharing = arch_gnn['branch_sharing'] # only for ensemble\n\n self.type_feature_augment = aug_feat\n assert dim_feat_raw <= dim_feat_smooth, \"smoothened feature cannot have smaller shape than the original one\"\n # NOTE: dim_label_raw may be larger than dim_label_smooth ==> label is not used as input\n self.num_classes = dim_label_raw\n self.dim_label_in = dim_label_smooth\n self.dim_feat_in = dim_feat_smooth\n self.dim_hidden = arch_gnn['dim']\n # build the model below\n dim, act = arch_gnn['dim'], arch_gnn['act']\n self.aug_layers, self.conv_layers, self.res_pool_layers = [], [], []\n for i in range(num_ensemble):\n # feat aug\n if len(self.type_feature_augment) > 0:\n self.aug_layers.append(nn.ModuleList(\n nn.Linear(_dim, self.dim_feat_in) for _, _dim in self.type_feature_augment\n ))\n # graph convs\n convs = []\n if i == 0 or not self.branch_sharing:\n for j in range(arch_gnn['num_layers']):\n cls_gconv = DeepGNN.NAME2CLS[arch_gnn['aggr']]\n dim_in = (self.dim_feat_in + self.dim_label_in) if j == 0 else dim\n convs.append(cls_gconv(dim_in, dim, dropout=self.dropout, act=act, mulhead=self.mulhead))\n self.conv_layers.append(nn.Sequential(*convs))\n else: # i > 0 and branch_sharing\n self.conv_layers.append(self.conv_layers[-1])\n # skip-pooling layer\n type_res = arch_gnn['residue'].lower()\n type_pool = arch_gnn['pooling'].split('-')[0].lower()\n cls_res_pool = layers.ResPool\n args_pool = {}\n if type_pool == 'sort':\n args_pool['k'] = int(arch_gnn['pooling'].split('-')[1])\n self.res_pool_layers.append(\n cls_res_pool(dim, dim, arch_gnn['num_layers'], type_res, type_pool,\n dropout=self.dropout, act=act, args_pool=args_pool\n ))\n if len(self.aug_layers) > 0:\n self.aug_layers = nn.ModuleList(self.aug_layers)\n self.conv_layers = nn.ModuleList(self.conv_layers)\n self.res_pool_layers = nn.ModuleList(self.res_pool_layers)\n # ------- ensembler + classifier -------\n if num_ensemble == 1:\n self.ensembler = layers.EnsembleDummy()\n else:\n self.ensembler = layers.EnsembleAggregator(dim, dim, num_ensemble, dropout=self.dropout, \n type_dropout=train_params[\"ensemble_dropout\"], act=arch_gnn[\"ensemble_act\"])\n self.classifier = DeepGNN.NAME2CLS['mlp'](dim, self.num_classes, act='I', dropout=0.)\n # ---- optimizer, etc. ----\n self.lr = train_params[\"lr\"]\n self.sigmoid_loss = arch_gnn[\"loss\"] == \"sigmoid\"\n self.loss, self.opt_op = 0, None\n self.optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)\n\n self.num_ensemble = num_ensemble",
"def base_model(input_shape):\n\n # Layer 0: input layer\n input_layer = Input(shape=input_shape)\n # layer 1: Conv2D layer\n x = layers.Conv2D(filters=64, kernel_size=(10,10), activation='relu')(input_layer)\n # layer 2: MaxPool2D layer\n x = layers.MaxPool2D(pool_size=(2,2))(x)\n # layer 3: Conv2D layer\n x = layers.Conv2D(filters=128, kernel_size=(7,7), activation='relu')(x)\n # layer 4: MaxPool2D layer\n x = layers.MaxPool2D(pool_size=(2,2))(x)\n # layer 5: Conv2D layer\n x = layers.Conv2D(filters=128, kernel_size=(4,4), activation='relu')(x)\n # layer 6: MaxPool2D layer\n x = layers.MaxPool2D(pool_size=(2,2))(x)\n # layer 7: Conv2D layer\n x = layers.Conv2D(filters=256, kernel_size=(4,4), activation='relu')(x)\n # Layer 8: flatten layer\n x = layers.Flatten()(x)\n # layer 9: Fully connected layer\n x = layers.Dense(4096, activation='sigmoid')(x)\n\n base_model = keras.Model(inputs=input_layer, outputs=x)\n return base_model",
"def init(self):\n self.reparam_layers = []\n if self.model_type == \"GCN\":\n for i in range(self.num_layers):\n if self.reparam_all_layers is True:\n is_reparam = True\n elif isinstance(self.reparam_all_layers, tuple):\n reparam_all_layers = tuple([kk + self.num_layers if kk < 0 else kk for kk in self.reparam_all_layers])\n is_reparam = i in reparam_all_layers\n else:\n raise\n if is_reparam:\n self.reparam_layers.append(i)\n setattr(self, \"conv{}\".format(i + 1),\n GCNConv(self.num_features if i == 0 else self.latent_size,\n self.latent_size if i != self.num_layers - 1 else self.num_classes,\n cached=True,\n reparam_mode=self.reparam_mode if is_reparam else None,\n prior_mode=self.prior_mode if is_reparam else None,\n sample_size=self.sample_size,\n bias=True if self.with_relu else False,\n val_use_mean=self.val_use_mean,\n normalize=self.normalize,\n ))\n # self.conv1 = ChebConv(self.num_features, 16, K=2)\n # self.conv2 = ChebConv(16, self.num_features, K=2)\n\n elif self.model_type == \"GAT\":\n latent_size = int(self.latent_size / 2) # Under the default setting, latent_size = 8\n for i in range(self.num_layers):\n if i == 0:\n input_size = self.num_features\n else:\n if self.struct_dropout_mode[0] == 'DNsampling' or (self.struct_dropout_mode[0] == 'standard' and len(self.struct_dropout_mode) == 3):\n input_size = latent_size * 8 * 2\n else:\n input_size = latent_size * 8\n if self.reparam_all_layers is True:\n is_reparam = True\n elif isinstance(self.reparam_all_layers, tuple):\n reparam_all_layers = tuple([kk + self.num_layers if kk < 0 else kk for kk in self.reparam_all_layers])\n is_reparam = i in reparam_all_layers\n else:\n raise\n if is_reparam:\n self.reparam_layers.append(i)\n setattr(self, \"conv{}\".format(i + 1), GATConv(\n input_size,\n latent_size if i != self.num_layers - 1 else self.num_classes,\n heads=8 if i != self.num_layers - 1 else 1, concat=True,\n reparam_mode=self.reparam_mode if is_reparam else None,\n prior_mode=self.prior_mode if is_reparam else None,\n val_use_mean=self.val_use_mean,\n struct_dropout_mode=self.struct_dropout_mode,\n sample_size=self.sample_size,\n ))\n if self.struct_dropout_mode[0] == 'DNsampling' or (self.struct_dropout_mode[0] == 'standard' and len(self.struct_dropout_mode) == 3):\n setattr(self, \"conv{}_1\".format(i + 1), GATConv(\n input_size,\n latent_size if i != self.num_layers - 1 else self.num_classes,\n heads=8 if i != self.num_layers - 1 else 1, concat=True,\n reparam_mode=self.reparam_mode if is_reparam else None,\n prior_mode=self.prior_mode if is_reparam else None,\n val_use_mean=self.val_use_mean,\n struct_dropout_mode=self.struct_dropout_mode,\n sample_size=self.sample_size,\n ))\n # On the Pubmed dataset, use heads=8 in conv2.\n \n else:\n raise Exception(\"Model_type {} is not valid!\".format(self.model_type))\n\n self.reparam_layers = sorted(self.reparam_layers)\n \n if self.model_type == \"GCN\":\n if self.with_relu:\n reg_params = [getattr(self, \"conv{}\".format(i+1)).parameters() for i in range(self.num_layers - 1)]\n self.reg_params = itertools.chain(*reg_params)\n self.non_reg_params = getattr(self, \"conv{}\".format(self.num_layers)).parameters()\n else:\n self.reg_params = OrderedDict()\n self.non_reg_params = self.parameters()\n else:\n self.reg_params = self.parameters()\n self.non_reg_params = OrderedDict()\n self.to(self.device)",
"def build_one(frames=64, bands=40, n_classes=10, dropout=0.0, tstride = 1, fstride = 4):\n\n from keras.layers import Conv2D, Dense, Dropout, Flatten\n\n\n # In the paper there are some differences\n # uses log-mel as input instead of MFCC\n # uses 4 in stride for frequency\n # has a linear bottleneck as second layer to reduce multiplications,\n # instead of doing a single full-frequency convolution\n # probably uses ReLu for the DNN layers?\n # probably does not use ReLu for the conv layer?\n\n # Note, in keyword spotting task tstride=2,4,8 performed well also\n \n conv_f = 8\n conv_t = 32\n kernels = 90\n bottleneck = 32\n\n input_shape = (frames, bands, 1)\n\n model = keras.Sequential([\n Conv2D(kernels, (conv_t, conv_f), strides=(tstride, fstride),\n padding='valid', activation='relu', use_bias=True,\n input_shape=input_shape),\n Dense(bottleneck, activation=None, use_bias=True),\n Dropout(dropout),\n Dense(128, activation='relu', use_bias=True),\n Dropout(dropout),\n Dense(128, activation='relu', use_bias=True),\n Dropout(dropout),\n Dense(n_classes, activation='softmax', use_bias=True),\n ])\n return model",
"def __init__(self, **kwargs):\n super().__init__()\n self.model_conv = models.resnet50(pretrained=True)\n for param in self.model_conv.parameters():\n param.requires_grad = False\n num_ftrs = self.model_conv.fc.in_features\n num_classes = 10\n self.model_conv.fc = nn.Linear(num_ftrs, num_classes)",
"def add_context(model: Sequential) -> Sequential:\n # model.add(ZeroPadding2D(padding=(33, 33)))\n # model.add(Conv2D(42, (3, 3), activation='relu', name='ct_conv1_1'))\n # model.add(Conv2D(42, (3, 3), activation='relu', name='ct_conv1_2'))\n # model.add(AtrousConvolution2D(84, 3, 3, atrous_rate=(2, 2), activation='relu', name='ct_conv2_1'))\n # model.add(AtrousConvolution2D(168, 3, 3, atrous_rate=(4, 4), activation='relu', name='ct_conv3_1'))\n # model.add(AtrousConvolution2D(336, 3, 3, atrous_rate=(8, 8), activation='relu', name='ct_conv4_1'))\n # model.add(AtrousConvolution2D(672, 3, 3, atrous_rate=(16, 16), activation='relu', name='ct_conv5_1'))\n # model.add(Conv2D(672, (3, 3), activation='relu', name='ct_fc1'))\n model.add(Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n \n model.add(Conv2D(256, (3, 3), padding='same'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n \n model.add(Conv2DTranspose(128, (7, 7), strides=(7, 7), padding='same'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n \n model.add(Conv2D(64, (3, 3), padding='same'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n \n # last conv\n model.add(Conv2D(1, (3, 3), activation='sigmoid', padding='same'))\n \n return model",
"def make_baseline(cls: Type['MLP'], d_in: int, d_layers: List[int], dropout: float, d_out: int) ->'MLP':\n assert isinstance(dropout, float)\n if len(d_layers) > 2:\n assert len(set(d_layers[1:-1])) == 1, 'if d_layers contains more than two elements, then all elements except for the first and the last ones must be equal.'\n return MLP(d_in=d_in, d_layers=d_layers, dropouts=dropout, activation='ReLU', d_out=d_out)",
"def resnet_head(input_shape):\n input_layer = layers.Input(shape=input_shape)\n\n model = layers.Conv2D(16, kernel_size=(3, 3), strides=(1, 1), padding='same', activation=None)(input_layer)\n model = layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2))(model)\n model = layers.ReLU()(model)\n model = residual_block(model, 16)\n model = residual_block(model, 16)\n\n model = layers.Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding='same', activation=None)(model)\n model = layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2))(model)\n model = layers.ReLU()(model)\n model = residual_block(model, 32)\n model = residual_block(model, 32)\n\n model = layers.Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding='same', activation=None)(model)\n model = layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2))(model)\n model = layers.ReLU()(model)\n model = residual_block(model, 32)\n model = residual_block(model, 32)\n\n model = layers.Flatten()(model)\n\n return input_layer, model",
"def rl_modelrl_l1_base():\n hparams = rl_modelrl_base()\n hparams.generative_model_params = \"basic_conv_l1\"\n return hparams",
"def residual_block(layer_input, filters):\n d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(layer_input)\n d = Activation('relu')(d)\n d = BatchNormalization(momentum=0.8)(d)\n d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(d)\n d = BatchNormalization(momentum=0.8)(d)\n d = Add()([d, layer_input])\n return d",
"def ResNet18(input_shape = (28, 28, 1), classes = 24):\n \n # Define the input as a tensor with shape input_shape\n X = X_input = Input(input_shape)\n\n \n # Zero-Padding\n X = ZeroPadding2D((3, 3))(X_input)\n \n # Stage 1\n X = Conv2D(64, (7, 7), strides = (2, 2), name = 'conv1', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = 'bn_conv1')(X)\n X = Activation('relu')(X)\n #X = MaxPooling2D((3, 3), strides=(2, 2))(X)\n\n # Stage 2\n X = convolutional_block(X, [64, 64], stage=2, block='a')\n X = identity_block(X, [64, 64], stage=2, block='b')\n\n # Stage 3\n X = convolutional_block(X, [128, 128], stage=3, block='a')\n X = identity_block(X, [128, 128], stage=3, block='b')\n\n # Stage 4\n X = convolutional_block(X, [256, 256], stage=4, block='a')\n X = identity_block(X, [256, 256], stage=4, block='b')\n\n # Stage 5\n X = convolutional_block(X, [512, 512], stage=5, block='a')\n X = identity_block(X, [512, 512], stage=5, block='b')\n\n # AVGPOOL\n # X = AveragePooling2D(pool_size=(2,2), name='avg_pool')(X)\n\n # output layer\n X = Flatten()(X)\n X = Dense(classes, activation='softmax', name='fc' + str(classes), kernel_initializer = glorot_uniform(seed=0))(X)\n \n # Create model\n model = Model(inputs = X_input, outputs = X, name='ResNet18')\n\n return model",
"def add_model(self, input_data, target_data=None):\n # Consider signal matrix as an image with channels.\n height = self.config.num_steps\n width = self.config.time_stamps\n channels = self.config.channels\n batch_size = self.config.batch_size\n\n # input_data: (-1, height, width, channels)\n input_data = tf.reshape(input_data, [-1, channels, height, width])\n input_data = tf.transpose(input_data, perm=[0, 2, 3, 1])\n\n # module-1\n # conv/ReLU-1\n x = layer_conv_relu(input_data, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # conv/ReLU-2\n x = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # conv/ReLU-3\n x_1 = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n\n # module-2\n # conv/ReLU-1\n x = layer_conv_relu(x_1, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # conv/ReLU-2\n x = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # conv/ReLU-3\n x_2 = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n\n # module-3\n # conv/ReLU-1\n x = layer_conv_relu(x_2, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # conv/ReLU-2\n x = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # conv/ReLU-3\n x_3 = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n\n # module-4\n # conv/ReLU-1\n x = layer_conv_relu(x_3, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # conv/ReLU-2\n x = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # conv/ReLU-3\n x_4 = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n\n # module-5\n # conv/ReLU-1\n x = layer_conv_relu(x_4, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # conv/ReLU-2\n x = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # conv/ReLU-3\n x = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n\n # module-6\n # conv/ReLU-1\n x = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # concatenate\n x = concatenate([x, x_4], axis=-1)\n # conv/ReLU-2\n x = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # conv/ReLU-3\n x = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n\n # module-7\n # conv/ReLU-1\n x = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # concatenate\n x = concatenate([x, x_3], axis=-1)\n # conv/ReLU-2\n x = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # conv/ReLU-3\n x = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n\n # module-8\n # conv/ReLU-1\n x = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # concatenate\n x = concatenate([x, x_2], axis=-1)\n # conv/ReLU-2\n x = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # conv/ReLU-3\n x = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n\n # module-9\n # conv/ReLU-1\n x = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # concatenate\n x = concatenate([x, x_1], axis=-1)\n # conv/ReLU-2\n x = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # conv/tanh-3\n x = Convolution2D(self.config.n1, (self.config.f1, self.config.f3),\n activation='relu', padding='same',\n kernel_regularizer=self.config.regularizer,\n bias_regularizer=self.config.regularizer,\n data_format='channels_last',\n )(x)\n x = Activation('tanh')(x)\n\n # output x: (-1, height, width, channels)\n output = Convolution2D(channels, (self.config.f2, self.config.f2),\n activation='linear',\n padding='same',\n name='output',\n kernel_regularizer=self.config.regularizer,\n bias_regularizer=self.config.regularizer,\n data_format='channels_last'\n )(x)\n\n prediction = tf.transpose(output, perm=[0, 3, 1, 2])\n prediction = tf.reshape(prediction, [batch_size, height, width])\n return prediction",
"def first_model():\n model=Sequential()\n # model.add(Flatten(input_shape=(160,320,3)))\n model.add(Lambda(lambda x: (x-128.0)/128.0,input_shape=(160, 320, 3)))\n model.add(Cropping2D(cropping=((70,25), (0,0))))\n model.add(Convolution2D(32, 3, 3))\n model.add(MaxPooling2D((2, 2)))\n model.add(Dropout(0.5))\n model.add(Activation('relu'))\n model.add(Flatten())\n model.add(Dense(128))\n model.add(Activation('relu'))\n model.add(Dense(1))\n\n model.compile(loss=\"mse\",optimizer=\"adam\")\n return model",
"def __init__(self, channels, momentum):\n super(PointNetConv2Layer, self).__init__()\n self.channels = channels\n self.momentum = momentum",
"def SampleCNN(cfg):\n # Variable-length input for feature visualization.\n x_in = Input(shape=(None, 1), name='input')\n\n num_features = cfg.init_features\n x = Conv1D(num_features, kernel_size=3, strides=3, padding='same', use_bias=True,\n kernel_regularizer=l2(cfg.weight_decay), kernel_initializer=taejun_uniform(scale=1.), name='conv0')(x_in)\n x = BatchNormalization(name='norm0')(x)\n x = Activation('relu', name='relu0')(x)\n\n # Stack convolutional blocks.\n layer_outputs = []\n for i in range(cfg.num_blocks):\n num_features *= 2 if (i == 2 or i == (cfg.num_blocks - 1)) else 1\n x = cfg.block_fn(x, num_features, cfg, f'block{i}')\n layer_outputs.append(x)\n\n if cfg.multi: # Use multi-level feature aggregation or not.\n x = Concatenate(name='multi')([GlobalMaxPool1D(name=f'final_pool{i}')(output)\n for i, output in enumerate(layer_outputs[-3:])])\n else:\n x = GlobalMaxPool1D(name='final_pool')(x)\n\n # The final two FCs.\n x = Dense(x.shape[-1].value, kernel_initializer='glorot_uniform', name='final_fc')(x)\n x = BatchNormalization(name='final_norm')(x)\n x = Activation('relu', name='final_relu')(x)\n if cfg.dropout > 0.:\n x = Dropout(cfg.dropout, name='final_drop')(x)\n x = Dense(cfg.num_classes, kernel_initializer='glorot_uniform', name='logit')(x)\n x = Activation(cfg.activation, name='pred')(x)\n\n return Model(inputs=[x_in], outputs=[x], name='sample_cnn')",
"def build(self,\r\n conv_filters=196,\r\n conv_size=13,\r\n conv_strides=4,\r\n act='relu',\r\n rnn_layers=2,\r\n LSTM_units=128,\r\n drop_out=0.8):\r\n i = Input(shape=self.input_size, name='input')\r\n x = Conv1D(conv_filters,\r\n conv_size,\r\n strides=conv_strides,\r\n name='conv1d')(i)\r\n x = BatchNormalization()(x)\r\n x = Activation(act)(x)\r\n for _ in range(rnn_layers):\r\n x = Bidirectional(LSTM(LSTM_units,\r\n return_sequences=True))(x)\r\n x = Dropout(drop_out)(x)\r\n x = BatchNormalization()(x)\r\n y_pred = TimeDistributed(Dense(self.output_size,\r\n activation='softmax'))(x)\r\n # ctc inputs\r\n labels = Input(name='the_labels', shape=[None, ], dtype='int32')\r\n input_length = Input(name='input_length', shape=[1], dtype='int32')\r\n label_length = Input(name='label_length', shape=[1], dtype='int32')\r\n # Keras doesn't currently support loss funcs with extra parameters\r\n # so CTC loss is implemented in a lambda layer\r\n loss_out = Lambda(ctc_lambda_func,\r\n output_shape=(1,),\r\n name='ctc')([y_pred,\r\n labels,\r\n input_length,\r\n label_length])\r\n self.tm = Model(inputs=i,\r\n outputs=y_pred)\r\n self.m = Model(inputs=[i,\r\n labels,\r\n input_length,\r\n label_length],\r\n outputs=loss_out)\r\n return self.m, self.tm",
"def build(self, input_shape: tf.Tensor):\n self.conv = tf.keras.layers.Conv2D(\n self.channels, (1, 1), input_shape=input_shape)\n self.bn = tf.keras.layers.BatchNormalization(momentum=self.momentum)"
] | [
"0.647522",
"0.6304403",
"0.6284953",
"0.6194349",
"0.60414153",
"0.5895362",
"0.587735",
"0.58602476",
"0.58557093",
"0.5830293",
"0.5820108",
"0.5811813",
"0.5765692",
"0.576528",
"0.5761624",
"0.57612926",
"0.5757742",
"0.5744773",
"0.573896",
"0.57358533",
"0.573499",
"0.5729594",
"0.5724417",
"0.57186455",
"0.5718335",
"0.57107675",
"0.57096064",
"0.5706419",
"0.5701735",
"0.56998324"
] | 0.64495075 | 1 |
Plot the accuracy during training for the train and val datasets. Arguments | def plot_accuracy(model_fit, save_folder):
train_acc = model_fit.history['binary_accuracy']
val_acc = model_fit.history['val_binary_accuracy']
epoch_axis = np.arange(1, len(train_acc) + 1)
plt.title('Train vs Validation Accuracy')
plt.plot(epoch_axis, train_acc, 'b', label='Train Acc')
plt.plot(epoch_axis, val_acc,'r', label='Val Acc')
plt.xlim([1, len(train_acc)])
plt.xticks(np.arange(min(epoch_axis), max(epoch_axis) + 1, round((len(train_acc) / 10) + 0.5)))
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.xlabel('Epochs')
plt.savefig(save_folder + '/accuracy.png')
plt.show()
plt.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_accuracy(self):\n plot_title, img_title = self.prep_titles(\"\")\n test_legend = ['training data', 'test data']\n\n # Data for plotting x- and y-axis\n x = np.arange(1, CFG.EPOCHS + 1)\n y = [self.tr_accuracy, self.test_accuracy]\n\n # prints x and y-axis values\n print(f'x: {x}')\n print(f'training: {self.tr_accuracy}')\n print(f'test: {self.test_accuracy}')\n\n plt.figure(figsize=(CFG.FIG_WIDTH, CFG.FIG_HEIGHT))\n\n # Create the lineplot\n for line in range(2):\n ax = sns.lineplot(x=x, y=y[line], color=CFG.COLOR_ACCURACY[line], label=test_legend[line])\n\n if CFG.ANNOTATE:\n ax.set(xlabel='Epochs',\n ylabel='Accuracy (%)',\n title=plot_title,\n xlim=(1, CFG.EPOCHS + 2),\n ylim=(0, 119))\n\n for line in range(2):\n for e in range(0, CFG.EPOCHS):\n if y[line][e] > CFG.ANNOTATE_LEVEL:\n value = \"{:.2f}\".format(y[line][e])\n label = \"epoch \" + str(e + 1) + \"\\n\" + value + \"%\"\n plt.annotate(label,\n xy=(x[e], y[line][e]),\n alpha=1,\n size=9,\n rotation=45,\n textcoords='offset pixels', xytext=(0, 7),\n ha='left', va='bottom')\n else:\n ax.set(xlabel='Epochs',\n ylabel='Accuracy (%)',\n title=plot_title,\n xlim=(1, CFG.EPOCHS),\n ylim=(0, 102))\n\n ax.legend(loc='best')\n\n self.save_plot(img_title)\n plt.show()",
"def cross_validation_visualization_accuracy(epochs, accs, save=False, filename=\"cross_validation_acc\"):\n plt.plot(epochs, accs, marker=\".\", color='r', label='accuracy')\n plt.xlabel(\"epoch\")\n plt.ylabel(\"accuracy\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n if (save):\n plt.savefig(filename)",
"def plot_acc(acc_v, acc_t, save_plots_path):\n\n plt.figure()\n plt.plot(acc_v, label='Validation acc')\n plt.plot(acc_t, label='Training acc')\n plt.legend()\n title = 'Accuracy per epoch'\n plt.title(title)\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Accuracy\")\n plt.savefig(save_plots_path + \"swag_accuracy_plot.png\")",
"def main():\n args = parse_args()\n\n with open(args.train_details_json, mode='r', encoding='utf-8') as json_f:\n results_dict = json.load(json_f)[-1]\n\n losses_plot = plt.figure()\n plt.plot(range(1, len(results_dict['train_loss']) + 1),\n results_dict['train_loss'])\n plt.plot(range(1, len(results_dict['val_loss']) + 1),\n results_dict['val_loss'])\n plt.plot(range(1, len(results_dict['test_loss']) + 1),\n results_dict['test_loss'])\n plt.legend(['train', 'val', 'test'])\n plt.title(f'loss vs epoch for {args.model} model on {args.dataset} dataset')\n plt.xlabel('epoch')\n plt.ylabel('loss')\n plt.grid(True)\n losses_plot.set_size_inches((8, 8))\n losses_plot.savefig(\n os.path.join(FIGURES_DIR,\n f'{args.dataset}_{args.model}_losses_plot.png'))\n\n accuracies_plot = plt.figure()\n plt.plot(range(1, len(results_dict['train_acc']) + 1),\n results_dict['train_acc'])\n plt.plot(range(1, len(results_dict['val_acc']) + 1),\n results_dict['val_acc'])\n plt.plot(range(1, len(results_dict['test_acc']) + 1),\n results_dict['test_acc'])\n plt.legend(['train', 'val', 'test'])\n plt.title(f'accuracy vs epoch for {args.model} '\n f'model on {args.dataset} dataset')\n plt.xlabel('epoch')\n plt.ylabel('accuracy')\n plt.grid(True)\n accuracies_plot.set_size_inches((8, 8))\n accuracies_plot.savefig(\n os.path.join(FIGURES_DIR,\n f'{args.dataset}_{args.model}_accuracies_plot.png'))",
"def make_accuracy_plot(num_trials=10):\n data = load_digits()\n # print data.DESCR\n train_percentages = range(5, 95, 5)\n test_accuracies = numpy.zeros(len(train_percentages))\n\n for i in range(len(train_percentages)):\n individual_trial_accuracies = []\n for j in range(num_trials):\n X_train, X_test, y_train, y_test = train_test_split(data.data, data.target, train_size=train_percentages[i]*.01)\n model = LogisticRegression(C=10**-10)\n model.fit(X_train, y_train)\n individual_trial_accuracies.append(model.score(X_test, y_test))\n test_accuracies[i] = numpy.mean(individual_trial_accuracies)\n\n fig = plt.figure()\n plt.plot(train_percentages, test_accuracies, 'b')\n plt.xlabel('Percentage of Data Used for Training')\n plt.ylabel('Accuracy on Test Set')\n plt.show()",
"def train_visualization(output_path): \n log_path = output_path + 'output.log'\n Train_Cost, Valid_Cost, Test_Cost, Train_Acc, Valid_Acc, Test_Acc = log_reader(log_path)\n n_epoch = len(Train_Cost)\n\n x1 = range(n_epoch)\n x2 = range(n_epoch)\n y1 = Train_Cost\n y2 = Valid_Cost\n y3 = Test_Cost\n y4 = Train_Acc\n y5 = Valid_Acc\n y6 = Test_Acc\n plt.subplot(2, 1, 1)\n plt.plot(x1, y1, label=\"Train_Cost\", linewidth=2)\n plt.plot(x1, y2, label=\"Valid_Cost\", linewidth=2)\n plt.plot(x1, y3, label=\"Test_Cost\", linewidth=2)\n\n plt.title('binary cross entropy vs. epoches')\n plt.ylabel('binary cross entropy')\n plt.legend(loc='best')\n plt.subplot(2, 1, 2)\n plt.plot(x2, y4, label=\"Train_Acc\", linewidth=2)\n plt.plot(x2, y5, label=\"Valid_Acc\", linewidth=2)\n plt.plot(x2, y6, label=\"Test_Acc\", linewidth=2)\n plt.xlabel('Accuracy@20 vs. epoches')\n plt.ylabel('Accuracy@20')\n plt.legend(loc='best')\n plt.savefig(output_path + 'loss_fig.png')\n # plt.show()",
"def plot_on_ax(ax, trn_ls, val_ls, ylabel=\"Accuracy\"):\n ax.plot(trn_ls, 'o-', label='Training')\n ax.plot(val_ls, 'x-', label='Validation')\n ax.set_xlabel('Epochs')\n ax.set_ylabel(ylabel)\n ax.legend()",
"def model_testing(X_train,y_train):\n\n # for testing amount of layers, each layer has 32 neurons\n # layers = [[32, 32], [32, 32, 32], [32, 32, 32, 32], [32, 32, 32, 32],\\\n # [32, 32, 32, 32, 32], [32, 32, 32, 32, 32, 32]]\n layers = [[8], [16], [32], [64], [128], [256]]\n\n # activation = [\"linear\", \"sigmoid\", \"relu\", \"softmax\"]\n activation = [\"relu\"]\n runs = 1\n for i, act in enumerate(activation):\n val_accs = []\n for layer in layers:\n acc_avg = []\n for run in range(runs):\n model = create_model_testing(layer, act)\n\n # train model on full train set, with 80/20 CV split\n training = model.fit(X_train, y_train, epochs=100, validation_split=0.2, verbose=0)\n val_acc = np.mean(training.history['val_accuracy'])\n print(\"Run \", run, \" - \", act + \" activation - layer \" + str(layer))\n acc_avg.append(val_acc)\n\n # save average accuracy of runs\n val_accs.append(round(np.mean(acc_avg)*100, 2))\n print(\"accuracy: \" + str(np.mean(acc_avg)))\n\n # plot line for each activation method\n plt.plot([1,2,4,8,16,32,64,128,256], val_accs, label=act)\n # plt.plot(val_accs, label=act)\n\n # plotting\n plt.title(\"Accuracy of neural network model with different layers (N=\" +\\\n str(len(layers)) + \")\", fontsize=22)\n plt.xlabel(\"Layers\", fontsize=20)\n # plt.xticks(np.arange(1, len(val_accs) + 1, 1), fontsize=18)\n plt.ylabel(\"Accuracy (%)\", fontsize=20)\n plt.legend()\n plt.subplots_adjust(bottom=.15, left=.15)\n plt.savefig(\"results/linear-relu-\" + str(runs) + \"runs.png\")\n plt.show()",
"def plot_acc(model_dir):\n ## extract loss from csv\n file_dir = os.path.join(model_dir, 'acc.csv')\n data = pd.read_csv(file_dir)\n epochs = data['epoch'].ravel()\n acc_train = data['acc_train'].ravel()\n acc_test = data['acc_test'].ravel()\n # epoch,acc_train,acc_test\n\n ## Theoretical Loss\n fig, ax = plt.subplots(1, 1, figsize=(7, 5), sharey=True, sharex=True, dpi=400)\n ax.plot(epochs, acc_train, label='train', color='green', alpha=0.8)\n ax.plot(epochs, acc_test, label='test', color='red', alpha=0.8)\n ax.set_ylabel('Accuracy', fontsize=10)\n ax.set_xlabel('Epoch', fontsize=10)\n ax.legend(loc='lower right', prop={\"size\": 15}, ncol=3, framealpha=0.5)\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n plt.tight_layout()\n\n ## create saving directory\n acc_dir = os.path.join(model_dir, 'figures', 'acc')\n os.makedirs(acc_dir, exist_ok=True)\n file_name = os.path.join(acc_dir, 'accuracy.png')\n plt.savefig(file_name, dpi=400)\n print(\"Plot saved to: {}\".format(file_name))\n file_name = os.path.join(acc_dir, 'accuracy.pdf')\n plt.savefig(file_name, dpi=400)\n plt.close()\n print(\"Plot saved to: {}\".format(file_name))",
"def plot_acc (history, acc='acc', val_acc='val_acc'):\n \n history_dict = history.history\n acc = history_dict[acc]\n val_acc = history_dict[val_acc]\n loss_values = history_dict['loss']\n epochs = range(1, len(loss_values) + 1)\n\n plt.plot (epochs, acc, 'bo', label='Training accuracy')\n plt.plot (epochs, val_acc, 'b', label=\"validation accuracy\")\n plt.title('Training and validation accuracy')\n plt.xlabel('Epochs')\n plt.ylabel('Accuracy')\n plt.legend()\n plt.show()",
"def accuracy_plot(training, test, layers, data_size, n_neighbours, learning_rate, dropout_rate):\n\n plt.figure()\n plt.plot(training, label=\"Training\")\n plt.plot(test, label=\"Test\")\n plt.xlabel(\"Iterations\", size='medium')\n plt.ylabel(\"Accuracy function (%)\", size='medium')\n plt.suptitle(\"Accuracy function while training the neural network\", size='medium', ha='center')\n plt.title(\"layers: {} with dropout rate of {}, learning rate: {}\".format(layers, dropout_rate, learning_rate),\n size='small', ha='center')\n if n_neighbours == 0:\n plt.figtext(0.83, 0.80, \"Neighbours\\nexcluded\", size='medium')\n else:\n plt.figtext(0.83, 0.80, \"{} neighbours\\nincluded\".format(n_neighbours), size='medium')\n plt.figtext(0.83, 0.70, \"{}\\nsamples\".format(data_size), size='medium')\n plt.legend(loc='right', bbox_to_anchor=(1.3, 0.5))\n plt.subplots_adjust(right=0.8)\n\n working_dir = os.path.dirname(os.path.abspath(__file__))\n saving(working_dir + \"/output_ANN/accuracy_plots/{}_accuracy_{}\".format(n_neighbours, data_size))",
"def plot_eval_3(trained_model, X_val, y_val, image_name):\n # FOR EACH CLASS\n # val_pred = trained_model.predict_proba(X_val, num_iteration=iteration)\n \n iterations = trained_model.booster_.current_iteration()\n# results = np.zeros((2, iterations))\n results = np.zeros((iterations,))\n for pos in range(iterations):\n \n # Calculate the current iteration (from 1 to iterations)\n iteration = pos + 1\n \n # Predict validation set for the current iteration\n# start_time = timeit.default_timer()\n val_pred = trained_model.predict(X_val, num_iteration=iteration)\n# end_time = timeit.default_timer()\n# time = end_time - start_time\n# speed = int(X_val.shape[0] / time)\n \n # Number of hits\n val_ok = (val_pred == y_val)\n \n # Percentage of hits\n val_acc = val_ok.sum() / val_ok.size\n \n # Actualize data for plotting results\n# results[0][pos] = time\n# results[1][pos] = val_acc\n results[pos] = val_acc\n \n # Generate accuracy plot\n plt.figure()\n# plt.plot(results[0], results[1], 'b')\n plt.plot(results, 'b')\n plt.title('Validation accuracy')\n plt.xlabel('iterations')\n plt.ylabel('accuracy')\n plt.legend()\n \n # Save validation plot\n plot_file = os.path.join(OUTPUT_DIR, \"{}_val_accuracy\".format(image_name))\n plt.savefig(plot_file + \".svg\", bbox_inches='tight', format='svg')",
"def plot_observations():\n plt.plot(history.history['loss'], label='training_loss')\n plt.plot(history.history['val_loss'], label='val_loss ')\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.show()\n\n plt.plot(history.history['acc'], label='accuracy')\n plt.plot(history.history['val_acc'], label='val_accuracy')\n plt.xlabel('Epoch')\n plt.ylabel('Accuracy')\n plt.legend(loc='lower right')\n plt.show()\n\n test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)\n print(\"Test Accuracy:\", test_acc)",
"def plot_model_performance(train_loss, train_acc, train_val_loss, train_val_acc):\n \n green = '#72C29B'\n orange = '#FFA577'\n \n with plt.xkcd():\n # plot model loss\n fig, ax1 = plt.subplots()\n ax1.plot(range(1, len(train_loss) + 1), train_loss, green, linewidth=5,\n label='training')\n ax1.plot(range(1, len(train_val_loss) + 1), train_val_loss, orange,\n linewidth=5, label='validation')\n ax1.set_xlabel('# epoch')\n ax1.set_ylabel('loss')\n ax1.tick_params('y')\n ax1.legend(loc='upper right', shadow=False)\n # plot model accuracy\n fig, ax2 = plt.subplots()\n ax2.plot(range(1, len(train_acc) + 1), train_acc, green, linewidth=5,\n label='training')\n ax2.plot(range(1, len(train_val_acc) + 1), train_val_acc, orange,\n linewidth=5, label='validation')\n ax2.set_xlabel('# epoch')\n ax2.set_ylabel('accuracy')\n ax2.tick_params('y')\n ax2.legend(loc='lower right', shadow=False)\n plt.show()",
"def cross_validation_visualization_accuracy_multiple(epochs, accs, save=False, filename=\"cross_validation_acc_multiple\"):\n \n for i in range(accs.shape[0]):\n plt.plot(epochs, accs[i], marker=\".\", color='r', label=str(i+1)+'th accuracy')\n \n plt.xlabel(\"epoch\")\n plt.ylabel(\"accuracy\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n if (save):\n plt.savefig(filename)",
"def display_convergence_acc(train_accs, valid_accs):\n if len(valid_accs) > 0:\n plt.plot(len(train_accs), train_accs, color=\"red\")\n plt.plot(len(valid_accs), valid_accs, color=\"blue\")\n plt.legend([\"Train\", \"Valid\"])\n else:\n plt.plot(len(train_accs), train_accs, color=\"red\")\n plt.legend([\"Train\"])\n plt.xlabel('Epoch')\n plt.ylabel('Accuracy')\n plt.show()",
"def plot_results(\n train_data: tuple[Tensor, Tensor],\n test_data: tuple[Tensor, Tensor],\n correct_class: Tensor\n):\n #fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(21,7), subplot_kw=dict(box_aspect=1))\n fig1, (ax1, ax2) = plt.subplots(1, 2, figsize=(14,7), subplot_kw=dict(box_aspect=1))\n fig2, ax3 = plt.subplots(figsize=(7,7), subplot_kw=dict(box_aspect=1))\n ax1.set_title('Training data')\n plot_dataset(train_data, ax1)\n\n ax2.set_title('Test data')\n plot_dataset(test_data, ax2)\n\n ax3.set_title('Test prediction correctness')\n plot_dataset((test_data[0], correct_class.int()), ax3, cmap={0: '#ff0000', 1: '#00ff00'})\n \n fig1.savefig('plots/datasets')\n fig2.savefig('plots/predictions')\n plt.show()",
"def plot(self, ylog=False, category=\"Accuracy\", figsize=(12, 5)):\n if self.CV == False: # no Cross Validation set case\n fig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize)\n plt.suptitle(\"Training Curve for \" + self.loss, fontsize=12)\n ax[0].plot(range(1, len(self.trainError) + 1), self.trainError, 'g-', label='Training Error')\n ax[0].set_xlabel('Iteration')\n ax[0].set_ylabel(\"Error\")\n if ylog == True:\n ax[0].set_yscale('log')\n ax[0].legend()\n ax[0].grid('on')\n\n if category == \"Accuracy\":\n ax[1].plot(range(1, len(self.trainAcc) + 1), self.trainAcc, 'r-', label='Training Accuracy')\n ax[1].set_ylabel(\"Accuracy\")\n elif category == \"Error Rate\":\n ax[1].plot(range(1, len(self.trainAcc) + 1), 1 - np.array(self.trainAcc), 'r-', label='Training Error Rate')\n ax[1].set_ylabel(\"Error Rate\")\n # ax[1].set_ylim((0, 1))\n ax[1].set_xlabel('Iteration')\n ax[1].legend(loc='best')\n ax[1].grid('on')\n plt.show()\n if self.CV == True: # has Cross Validation set case\n fig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize)\n plt.suptitle(\"Training Curve for \" + self.loss, fontsize=12)\n ax[0].plot(range(1, len(self.trainError) + 1), self.trainError, 'g-', label='Training Error')\n ax[0].plot(range(1, len(self.cvError) + 1), self.cvError, 'r-', label='CV Error')\n ax[0].set_xlabel('Iteration')\n ax[0].set_ylabel(\"Error\")\n if ylog == True:\n ax[0].set_yscale('log')\n ax[0].legend()\n ax[0].grid('on')\n\n if category == \"Accuracy\":\n ax[1].plot(range(1, len(self.trainAcc) + 1), self.trainAcc, 'g-', label='Training Accuracy')\n ax[1].plot(range(1, len(self.cvAcc) + 1), self.cvAcc, 'r-', label='CV Accuracy')\n ax[1].set_ylabel(\"Accuracy\")\n elif category == \"Error Rate\":\n ax[1].plot(range(1, len(self.trainAcc) + 1), 1 - np.array(self.trainAcc), 'g-', label='Training Error Rate')\n ax[1].plot(range(1, len(self.cvAcc) + 1), 1 - np.array(self.cvAcc), 'r-', label='CV Error Rate')\n ax[1].set_ylabel(\"Error Rate\")\n # ax[1].set_ylim((0, 1))\n ax[1].set_xlabel('Iteration')\n ax[1].legend(loc='best')\n ax[1].grid('on')\n plt.show()\n\n return fig, ax",
"def create_val_plots(x_vals, vals_zeros,vals_ones):\n plt.plot(x_vals, vals_zeros,label=\"non-fraud\")\n plt.plot(x_vals, vals_ones,label=\"fraud\")\n plt.title('Accuracy per number of iterations')\n plt.xlabel('Number of Iterations')\n plt.ylabel('Accuracy')\n plt.xticks(np.arange(100, 210, 10))\n plt.legend() \n plt.show()\n # plt.savefig('./analysis_deliverable/visualizations/accuracy_plot.png')",
"def plot_train_results(metrics2record, loss_metric,\n train_metrics, test_metrics):\n pyplot.figure(figsize=(10, 5))\n min_, max_ = np.min(loss_metric), np.max(loss_metric)\n lg, = pyplot.plot(loss_metric)\n pyplot.yticks(min_ + np.arange(5) * (max_ - min_))\n # if learning_rate is not None:\n # lg, = pyplot.plot(learning_rate)\n pyplot.title('Loss')\n pyplot.xlabel('Epoch')\n pyplot.yscale('log')\n pyplot.show()\n\n for prm in basic_metrics:\n if prm in metrics2record:\n leg = []\n met_idx = metrics2record.index(prm)\n pyplot.figure(figsize=(10, 5))\n lg, = pyplot.plot(train_metrics[:, met_idx], label=('train'))\n leg.append(lg)\n lg, = pyplot.plot(test_metrics[:, met_idx], label=('test'))\n leg.append(lg)\n\n pyplot.legend(handles=leg)\n pyplot.title(prm)\n pyplot.xlabel('Epoch')\n pyplot.show()\n\n has_prf = any([(prm in PRF_metrics) for prm in metrics2record])\n if has_prf:\n pyplot.figure(figsize=(10, 5))\n leg = []\n for prm in PRF_metrics:\n if prm in metrics2record:\n met_idx = metrics2record.index(prm)\n lg, = pyplot.plot(train_metrics[:, met_idx],\n label=(prm + ':train'))\n leg.append(lg)\n\n for prm in PRF_metrics:\n if prm in metrics2record:\n met_idx = metrics2record.index(prm)\n lg, = pyplot.plot(test_metrics[:, met_idx],\n label=(prm + ':test'))\n leg.append(lg)\n\n pyplot.legend(handles=leg)\n pyplot.title('Precision / Recall')\n pyplot.xlabel('Epoch')\n pyplot.show()",
"def cross_validation_experiment(train_data, train_labels):\n accuracies = []\n for i in range(1, 200):\n avg = cross_validation(train_data, train_labels, i, 10)\n accuracies.append(avg)\n fig = plt.figure()\n dim = np.arange(1,len(accuracies)+1)\n plt.plot(dim,accuracies, label='Accuracy')\n plt.xlabel('k')\n plt.ylabel('accuracy')\n plt.grid()\n plt.legend()\n plt.tight_layout()\n fig.savefig('knn_cross_validation.png')\n best_k = np.argmax(accuracies)+1\n return best_k",
"def train_accuracy(self):\n # Train accuarcy\n add = np.ones(len(self.X_train))\n X_add1 = np.c_[add, self.X_train]\n pred_train = np.dot(X_add1, self.w_result.T)\n pred_train[pred_train > 0] = 1\n pred_train[pred_train < 0] = 0\n print(pred_train)\n train_check_lable = np.isclose(pred_train, self.y_train)\n num_true_lable = np.sum(train_check_lable)\n num_all_lable = np.size(train_check_lable)\n train_accuracy = num_true_lable / num_all_lable\n print(\"train_accuracy is: %f\" %train_accuracy)\n return train_accuracy",
"def plot_model_performance(train_loss, train_acc, train_val_loss, train_val_acc, save_figure_path):\n\n green = '#72C29B'\n orange = '#FFA577'\n\n with plt.xkcd():\n fig, (ax1, ax2) = plt.subplots(2, figsize=(10, 8))\n ax1.plot(range(1, len(train_loss) + 1), train_loss, green, linewidth=5,\n label='training')\n ax1.plot(range(1, len(train_val_loss) + 1), train_val_loss, orange,\n linewidth=5, label='validation')\n ax1.set_xlabel('# epoch')\n ax1.set_ylabel('loss')\n ax1.tick_params('y')\n ax1.legend(loc='upper right', shadow=False)\n ax1.set_title('Model loss through #epochs', fontweight='bold')\n\n ax2.plot(range(1, len(train_acc) + 1), train_acc, green, linewidth=5,\n label='training')\n ax2.plot(range(1, len(train_val_acc) + 1), train_val_acc, orange,\n linewidth=5, label='validation')\n ax2.set_xlabel('# epoch')\n ax2.set_ylabel('accuracy')\n ax2.tick_params('y')\n ax2.legend(loc='lower right', shadow=False)\n ax2.set_title('Model accuracy through #epochs', fontweight='bold')\n\n plt.tight_layout()\n plt.show()\n fig.savefig(save_figure_path)\n plt.close(fig)",
"def plot_training_info(case, metrics, save, history):\n val = False\n if 'val_accuracy' in history and 'val_loss' in history:\n val = True\n plt.ioff()\n if 'accuracy' in metrics:\n fig = plt.figure()\n plt.plot(history['accuracy'])\n if val:\n plt.plot(history['val_accuracy'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n if val:\n plt.legend(['train', 'val'], loc='upper left')\n else:\n plt.legend(['train'], loc='upper left')\n if save:\n plt.savefig(case + 'accuracy.png')\n plt.gcf().clear()\n else:\n plt.show()\n plt.close(fig)\n\n # summarize history for loss\n if 'loss' in metrics:\n fig = plt.figure()\n plt.plot(history['loss'])\n if val:\n plt.plot(history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n # plt.ylim(1e-3, 1e-2)\n plt.yscale(\"log\")\n if val:\n plt.legend(['train', 'val'], loc='upper left')\n else:\n plt.legend(['train'], loc='upper left')\n if save:\n plt.savefig(case + 'loss.png')\n plt.gcf().clear()\n else:\n plt.show()\n plt.close(fig)",
"def acc_loss_graph(self):\n acc = self.history['accuracy']\n val_acc = self.history['val_accuracy']\n loss = self.history['loss']\n val_loss = self.history['val_loss']\n plt.figure(figsize=(15, 5))\n plt.subplot(1, 2, 1)\n plt.plot(acc, label='Train')\n plt.plot(val_acc, label='Val')\n plt.legend(loc='lower right')\n plt.ylabel('Accuracy')\n plt.xlabel('Epoch')\n plt.ylim([min(plt.ylim()), 1])\n plt.title('Accuracy')\n\n plt.subplot(1, 2, 2)\n plt.plot(loss, label='Train')\n plt.plot(val_loss, label='Val')\n plt.legend(loc='lower right')\n plt.ylabel('Loss')\n plt.xlabel('Epoch')\n plt.ylim([0, max(plt.ylim())])\n plt.title('Loss')\n plt.show();",
"def train_nn(train_nn_results, label, title, yaxis):\n plt.figure(figsize=(12,5))\n for i in range(len(label)):\n plt.plot(train_nn_results[i], label=label[i], alpha=0.75)\n plt.title(title)\n plt.xlabel('epoch')\n plt.ylabel(yaxis)\n plt.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left')\n plt.tight_layout()\n plt.show()",
"def create(self, train: List[float], validation: List[float]) -> None:\n self.ax.plot(train)\n self.ax.plot(validation)\n self.ax.set_xlabel('epochs')\n if self.loss:\n self.ax.set_ylabel('loss')\n else:\n self.ax.set_ylabel('accuracy')\n self.ax.legend(['train', 'validation'])",
"def test(self, plot=False):\n accuracy_list = []\n fobj_avg = self.load_stats()\n\n for ii in range(settings.PARS['maxIters']):\n model = self.load_model(ii)\n D1 = model['D']\n W1 = model['W']\n\n # classification\n tic = time.time()\n accuracy_list.append(self.classification(D1, W1)[1])\n toc = time.time()\n print(\n 'Final recognition rate for OnlineDL is : {} , objective function value: {}, time: {}'\n .format(accuracy_list[ii], fobj_avg[ii], toc-tic)\n )\n\n accuracy_list = np.asarray(accuracy_list)\n\n print('Best recognition rate for OnlineDL is {} at iteration {}'.format(\n accuracy_list.max(), accuracy_list.argmax()))\n\n if plot:\n # plot the objective function values for all iterations\n plt.clf()\n plt.plot(list(fobj_avg.keys()), list(fobj_avg.values()), 'mo--', linewidth=2)\n plt.xlabel('Iterations')\n plt.ylabel('Average objective function value')\n plt.xticks(list(range(0, 20)), list(range(1, 21)))\n plt.show()\n\n plt.clf()\n plt.plot(accuracy_list, 'rs--', linewidth=2)\n plt.xticks(list(range(0, 20)), list(range(1, 21)))\n plt.xlabel('Iterations')\n plt.ylabel('Accuracy')\n plt.show()",
"def plot_metric_values(self, threshold=0):\n epochs_range = np.arange(threshold, len(self.accuracies), 1)\n plt.plot(epochs_range, self.accuracies[threshold:], color='red', marker='o')\n plt.title('Accuracy on test data. Eta={:.2f} Lambda={:2.2f}'.format(self.eta, self.lambda_r))\n plt.xlabel('Epochs')\n plt.ylabel('Accuracy')\n plt.grid(True)\n plt.show()",
"def predict(x_train, y_train, x_test, y_test, fn, params):\n y_train_predicted = fn(x_train, None, *params)\n y_train_predicted = (y_train_predicted >= 0.5) * 1\n y_test_predicted = fn(x_test, None, *params)\n y_test_predicted = (y_test_predicted >= 0.5) * 1\n\n train_acc = np.sum(y_train_predicted == y_train) / x_train.shape[0]\n test_acc = np.sum(y_test_predicted == y_test) / x_test.shape[0]\n print('train accuracy =', train_acc)\n print('test accuracy =', test_acc)\n scatter_plot(x_train, y_train_predicted, x_test, y_test_predicted, 'predicted 0', 'predicted 1')"
] | [
"0.82207394",
"0.7538035",
"0.7494659",
"0.74623656",
"0.7418454",
"0.7315145",
"0.72615117",
"0.72388804",
"0.7238736",
"0.71817374",
"0.7173168",
"0.71567535",
"0.7148802",
"0.71439624",
"0.7055238",
"0.70549214",
"0.7051441",
"0.7018947",
"0.70176554",
"0.70033777",
"0.7001087",
"0.6958237",
"0.6939887",
"0.6918762",
"0.69102395",
"0.68957645",
"0.68901664",
"0.6884202",
"0.68812746",
"0.6877989"
] | 0.7705123 | 1 |
Plot and save the ROC with AUC value. Arguments | def plot_ROC(model, x_test, y_test, save_folder):
predicted = model.predict(x_test).ravel()
actual = y_test.ravel()
fpr, tpr, thresholds = roc_curve(actual, predicted, pos_label=None)
roc_auc = auc(fpr, tpr)
plt.title('Test ROC AUC')
plt.plot(fpr, tpr, 'b', label='AUC = %0.3f' % roc_auc)
plt.legend(loc='lower right')
plt.plot([0,1],[0,1],'r--')
plt.xlim([0.0,1.0])
plt.ylim([0.0,1.0])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.savefig(save_folder + '/ROC.png')
plt.show()
plt.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_roc(X,y,test_preds,fname=\"res/roc.png\"):\n\t#Retrieve multiple fpr and tpr values for different thresholds\n\tfpr, tpr, thresholds = roc_curve(y,test_preds)\n\tplt.plot(fpr, tpr)\n\tplt.title(auc(fpr, tpr))\n\tplt.savefig(fname, bbox_inches='tight')\n\tplt.close()",
"def plot_roc_acc(self,x_test,y_test):\n if self.validated:\n preds = self.model_cv.predict_proba(x_test[self.feats])\n fpr, tpr, thresholds = metrics.roc_curve(y_test, preds[:,1])\n auc = metrics.roc_auc_score(y_test, preds[:,1])\n\n # get accuracy of class0 and class1 predictions at different thresholds\n acc_0 = []\n acc_1 = []\n for ind,th in enumerate(thresholds):\n binary_preds = pred_from_prob(preds,th)\n acc_1.append(accuracy_score(y_test[np.where(y_test==1)],binary_preds[np.where(y_test==1)]))\n acc_0.append(accuracy_score(y_test[np.where(y_test==0)],binary_preds[np.where(y_test==0)]))\n\n # set up plotting parameters\n sns.set()\n mpl.rcParams['figure.figsize']=[15.0,5.0]\n mpl.rcParams['lines.linewidth']=2.0\n mpl.rcParams['xtick.labelsize']=13\n mpl.rcParams['ytick.labelsize']=13\n mpl.rcParams['axes.labelsize']=15\n mpl.rcParams['axes.labelweight']='heavy'\n mpl.rcParams['axes.titlesize']=18\n mpl.rcParams['axes.titleweight']='heavy'\n mpl.rcParams['legend.fontsize']=12\n\n # build the plots- ROC in subplot 1, accuracy in subplot 2\n plt.subplot(1,2,1)\n plt.plot(fpr,tpr,'r*', label='roc auc={:.4f}'.format(auc))\n plt.plot(np.linspace(0,1,20),np.linspace(0,1,20),'k--')\n plt.xlabel('FPR')\n plt.ylabel('TPR')\n plt.title('ROC')\n plt.legend(frameon=False)\n plt.xlim([0.0,1.0])\n plt.ylim([0.0,1.0])\n\n plt.subplot(1,2,2)\n plt.plot(thresholds,acc_0,'go--',label='cases: 0',alpha=0.3)\n plt.plot(thresholds,acc_1,'yo--',label='cases: 1')\n plt.plot(thresholds[::5],fpr[::5],'c*',alpha=0.2,label='fpr')\n\n plt.xlim([0,1])\n plt.xlim([0,1])\n\n plt.xlabel('threshold')\n plt.ylabel('accuracy')\n plt.title('accuracy')\n\n plt.legend(frameon=False)\n\n plt.show()\n return None\n else:\n raise err.NotYetFittedModelError('You should train the model with method train_CV first \\n')",
"def plot_final_roc(prediction_matrix, model_names, y_test, PATH = None):\n plt.figure(figsize=(10, 8))\n for i, model in enumerate(model_names): \n predictions = prediction_matrix[:,i]\n fpr, tpr, threshholds = roc_curve(y_test, predictions)\n sns.set_style('darkgrid', {'axes.facecolor': '0.9'})\n lw = 2\n plt.plot(fpr, tpr,\n lw=lw, label=f'{model_names[i]} AUC: {round(auc(fpr, tpr), 3)}')\n plt.plot([0, 1], [0, 1], lw=lw, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.yticks([i/20.0 for i in range(21)], size = 14)\n plt.xticks([i/20.0 for i in range(21)], rotation = 45, size = 14)\n plt.xlabel('False Positive Rate', size =16)\n plt.ylabel('True Positive Rate', size =16)\n plt.title('ROC Curve', size = 20)\n plt.legend(loc='lower right', prop = {\"size\" : 20})\n if PATH:\n plt.savefig(PATH, bbox_inches='tight', transparent = True)\n plt.show()",
"def Classification_ROC_Report(X,Y,model): \n \n # Plot Classification report, Confustion Matrix, ROC\n labels = {0: 'CNV', 1: 'DME', 2: 'DRUSEN', 3: 'NORMAL'}\n\n # get predictions on the test set\n y_hat = model.predict(X)\n #\n Y_pred_classes = np.argmax(y_hat,axis = 1) \n Y_true = np.argmax(Y,axis = 1)\n\n # Classification report\n ax=plt.figure(figsize=(15,5))\n ax = plt.subplot(1,3,1)\n rpt = sklearn.metrics.classification_report(np.argmax(Y, axis=1), np.argmax(y_hat, axis=1), target_names=list(labels.values()))\n ax.axis('off')\n ax.annotate(rpt, \n xy = (1.0,0.5), \n xytext = (0, 0), \n xycoords='axes fraction', textcoords='offset points',\n fontsize=13, ha='right', va='center') \n\n # Plot confusion matrix\n cm_df = Confusion_Matrix(Y,y_hat,labels,normalization=True)\n ax = plt.subplot(1,3,2)\n sns.heatmap(cm_df, annot=True)\n score = model.evaluate(X, Y, verbose=1)\n ax.set_title('Confusion Matrix\\nresult: {0:.2f} - loss: {0:.2f}'.format(score[1], score[0]))\n ax.set_ylabel('True label')\n ax.set_xlabel('Predicted label')\n\n # Plot ROC\n lw=2\n n_classes = 4\n fpr, tpr, roc_auc = ROC(Y,y_hat,n_classes)\n\n # Plot all ROC curves\n ax = plt.subplot(1,3,3)\n ax.plot(fpr[\"micro\"], tpr[\"micro\"],\n label='micro-average ROC curve (area = {0:0.2f})'\n ''.format(roc_auc[\"micro\"]),\n color='deeppink', linestyle=':', linewidth=4)\n\n ax.plot(fpr[\"macro\"], tpr[\"macro\"],\n label='macro-average ROC curve (area = {0:0.2f})'\n ''.format(roc_auc[\"macro\"]),\n color='navy', linestyle=':', linewidth=4)\n\n colors = cycle(['aqua', 'darkorange', 'cornflowerblue', '#4DBD33'])\n for i, color in zip(range(n_classes), colors):\n ax.plot(fpr[i], tpr[i], color=color, lw=lw,\n label='ROC curve of class {0} (area = {1:0.2f})'\n ''.format(labels[i], roc_auc[i]))\n\n ax.plot([0, 1], [0, 1], 'k--', lw=lw)\n ax.set_xlim([0.0, 1.0])\n ax.set_ylim([0.0, 1.05])\n ax.set_xlabel('False Positive Rate')\n ax.set_ylabel('True Positive Rate')\n ax.set_title('ROC')\n ax.legend(loc=\"lower right\")\n\n plt.tight_layout()\n plt.show()\n\n return",
"def plot_roc(self, out_tag):\n roc_fig = self.plotter.plot_roc(self.y_train, self.y_pred_train, self.train_weights, \n self.y_test, self.y_pred_test, self.test_weights, out_tag=out_tag\n )\n\n Utils.check_dir('{}/plotting/plots/{}'.format(os.getcwd(), out_tag))\n roc_fig.savefig('{0}/plotting/plots/{1}/{1}_ROC_curve.pdf'.format(os.getcwd(),out_tag))\n print('saving: {0}/plotting/plots/{1}/{1}_ROC_curve.pdf'.format(os.getcwd(),out_tag))\n plt.close()\n\n #for MVA ROC comparisons later on\n np.savez(\"{}/models/{}_ROC_comp_arrays\".format(os.getcwd(), out_tag), self.y_pred_test, self.y_pred_test, self.test_weights)",
"def roc_plot(label, fpr, tpr, roc_auc):\n plt.figure()\n for i in range(len(label)):\n plt.plot(fpr[i], tpr[i], label=label[i] + ' AUC = %0.2f' % roc_auc[i], alpha=0.75)\n plt.plot([0, 1], [0, 1], 'r--')\n plt.xlim([-0.01, 1.01])\n plt.ylim([-0.01, 1.01])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('ROC Curve')\n plt.legend(loc='lower right')\n plt.show()",
"def plot_ROC(y, prediction_prob, cnames, dataset_name, fname):\r\n plt.figure(figsize=(20, 10))\r\n for i in range(len(cnames)):\r\n fpr, tpr, _ = roc_curve(y, prediction_prob[i][:,1])\r\n auc = roc_auc_score(y, prediction_prob[i][:,1])\r\n plt.plot(fpr, tpr, label='%s (AUC = %.2f)'%(cnames[i], auc))\r\n plt.plot([0, 1], [0, 1], 'k--', lw=2)\r\n plt.xlim([0.0, 1.0])\r\n plt.ylim([0.0, 1.05])\r\n plt.xlabel('False Positive Rate')\r\n plt.ylabel('True Positive Rate')\r\n plt.title('ROC Curve - Binary Classification - %s'%(dataset_name))\r\n plt.legend(loc=\"lower right\")\r\n plt.savefig(fname, bbox_inches='tight')\r\n plt.close()",
"def plot_ROC():\r\n fpr = dict()\r\n tpr = dict()\r\n roc_auc = dict()\r\n threshold = dict()\r\n \r\n for i in range(n_classes):\r\n \r\n fpr[i], tpr[i], threshold[i] = roc_curve(y_test[:, i], y_pred[:, i])\r\n \r\n roc_auc[i] = auc(fpr[i], tpr[i])\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot(1, 1, 1)\r\n ax.set_aspect('equal')\r\n colors = cycle(['aqua', 'red', 'purple', 'royalblue', 'black'])\r\n \r\n for i, color in zip(range(n_classes), colors):\r\n \r\n plt.plot(\r\n fpr[i],\r\n tpr[i],\r\n color=color,\r\n linewidth=3,\r\n #label='Class {0} (AUC {1:0.3f})'\r\n label='AUC {1:0.2f}' \r\n ''.format(i+1, roc_auc[i])\r\n )\r\n\r\n #plt.plot([0, 1], [0, 1], 'k--', linewidth=3)\r\n plt.xlim([-0.03, 1])\r\n plt.ylim([0, 1.03])\r\n ax.axhline(y=0, color='k', linewidth=4)\r\n ax.axhline(y=1.03, color='k', linewidth=4)\r\n ax.axvline(x=-0.03, color='k', linewidth=4)\r\n ax.axvline(x=1, color='k', linewidth=4) \r\n plt.xticks([0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=16, fontweight='bold')\r\n plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=16, fontweight='bold')\r\n #plt.xlabel('False Positive Rate', fontweight='bold', fontsize=16)\r\n #plt.ylabel('True Positive Rate', fontweight='bold', fontsize=16)\r\n plt.legend(loc='lower right', prop={'size': 14, 'weight': 'bold'}) \r\n plt.grid(True)\r\n\r\n ROC_filename = 'ROC' + '_' + \\\r\n str(DNN_Model) + '_' + \\\r\n strftime(\"%d-%b-%Y-%H-%M-%S\", gmtime()) + '.png'\r\n \r\n plt.savefig(\r\n os.path.join(result_dir, ROC_filename),\r\n format='png',\r\n dpi=600\r\n )\r\n\r\n plt.show()\r\n plt.close()",
"def plot_roc(model, X_test, Y_test, verbose=False):\n\n y_true, y_pred = Y_test, model.predict(X_test)\n if verbose:\n print(\"CLASSIFICATION REPORT\")\n print(classification_report(y_true, y_pred))\n\n y_pred_prob = model.predict_proba(X_test)[:,1]\n\n fpr, tpr, _ = roc_curve(Y_test, y_pred_prob)\n\n if verbose:\n print(\"TESTING PROBABILITIES:\")\n for a,b in zip(Y_test,y_pred_prob):\n print(a,b)\n \n if verbose:\n print(\"ROC RAW DATA:\")\n for a,b in zip(fpr, tpr):\n print(a,b)",
"def plot_ROC_curve(model, X_train, X_test, y_train, y_test):\n \n # Model Metrics\n print model\n print \"*************************** Model Metrics *********************************\"\n print 'Accuracy: %s' % cross_val_score(model, X_train, y_train, scoring = 'accuracy', cv = 5).mean()\n print 'Precision: %s' % cross_val_score(model, X_train, y_train, scoring = 'precision', cv = 5).mean()\n print 'Recall: %s' % cross_val_score(model, X_train, y_train, scoring = 'recall_weighted', cv = 5).mean()\n print 'F1: %s' % cross_val_score(model, X_train, y_train, scoring = 'f1', cv = 5).mean()\n\n fitted = model.fit(X_train, y_train)\n try:\n y_score = fitted.predict_proba(X_test)[:,1]\n except:\n y_score = fitted.decision_function(X_test)\n \n # Confusion matrix\n print \"********************* Normalized Confusion Matrix *************************\"\n cm = confusion_matrix(y_test, fitted.predict(X_test))\n cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n \n print('Normalized confusion matrix')\n print(cm_normalized)\n plt.matshow(cm, cmap=plt.cm.Blues)\n plt.colorbar()\n plt.xlabel('Predicted Values')\n plt.ylabel('Actual Values')\n \n # Classification Report\n print \"********************* Classification Report********************************\" \n print classification_report(y_test, fitted.predict(X_test))\n \n print \"********************* ROC Curve *******************************************\"\n \n # ROC Curve\n fpr, tpr, _ = roc_curve(y_test, y_score)\n roc_auc = auc(fpr, tpr)\n \n plt.figure()\n plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)\n plt.plot([0, 1], [0, 1], 'k--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic')\n plt.legend(loc=\"lower right\")\n plt.show()",
"def AUC_ROC(true_vessel_arr, pred_vessel_arr):\n fpr, tpr, _ = roc_curve(true_vessel_arr.flatten(), pred_vessel_arr.flatten())\n \n AUC_ROC=roc_auc_score(true_vessel_arr.flatten(), pred_vessel_arr.flatten())\n print(\"AUC_ROC:\",AUC_ROC)\n\n plt.title('Receiver Operating Characteristic')\n plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' )\n plt.legend(loc = 'lower right')\n plt.plot([0, 1], [0, 1],'b--')\n plt.xlim([0, 1])\n plt.ylim([0, 1])\n plt.ylabel('True Positive Rate')\n plt.xlabel('False Positive Rate')\n plt.show()",
"def plotROC(attr, target, pred, save_as=None):\n fig, ax = plt.subplots()\n auc = metrics.roc_auc_score(target, pred)\n acc = metrics.accuracy_score(target, (pred >= 0.5).astype(int))\n fpr, tpr, _ = metrics.roc_curve(target, pred)\n plt.plot(fpr, tpr, lw = 2, label = attr.title())\n plt.legend(loc = 4, fontsize = 15)\n plt.title(('ROC Curve for {attr} (Accuracy = {acc:.3f}, AUC = {auc:.3f})'\n .format(attr = attr.title(), acc= acc, auc = auc)),\n fontsize = 15)\n plt.xlabel('False Positive Rate', fontsize = 15)\n plt.ylabel('True Positive Rate', fontsize = 15)\n if not save_as is None:\n plt.savefig(save_as)\n else:\n plt.show()",
"def do_roc(scores, true_labels, file_name='', directory='', plot=True):\n\n # fpr_0, tpr_0, _ = roc_curve(true_labels, scores[0])\n # roc_auc_0 = auc(fpr_0, tpr_0)\n # fpr_1, tpr_1, _ = roc_curve(true_labels, scores[1])\n # roc_auc_1 = auc(fpr_1, tpr_1)\n # fpr_2, tpr_2, _ = roc_curve(true_labels, scores[2])\n # roc_auc_2 = auc(fpr_2, tpr_2)\n # fpr_3, tpr_3, _ = roc_curve(true_labels, scores[3])\n # roc_auc_3 = auc(fpr_3, tpr_3)\n # fpr_4, tpr_4, _ = roc_curve(true_labels, scores[4])\n # roc_auc_4 = auc(fpr_4, tpr_4)\n # fpr_5, tpr_5, _ = roc_curve(true_labels, scores[5])\n # roc_auc_5 = auc(fpr_5, tpr_5)\n\n fpr, tpr, _ = roc_curve(true_labels, scores)\n roc_auc = auc(fpr, tpr) # compute area under the curve\n\n plt.figure(figsize=(10,10))\n plt.rcParams.update({'font.size': 50})\n # plt.plot(fpr_0, tpr_0, label='ROC curve (area = %0.3f)' % (roc_auc_0))\n # plt.plot(fpr_1, tpr_1, label='ROC curve (area = %0.3f)' % (roc_auc_1))\n # plt.plot(fpr_2, tpr_2, label='ROC curve (area = %0.3f)' % (roc_auc_2))\n # plt.plot(fpr_3, tpr_3, label='ROC curve (area = %0.3f)' % (roc_auc_3))\n # plt.plot(fpr_4, tpr_4, label='ROC curve (area = %0.3f)' % (roc_auc_4))\n plt.plot(fpr, tpr, color='lightblue',label='area = %0.3f' % (roc_auc))\n\n plt.plot([0, 1], [0, 1], 'k--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n# plt.title('Receiver operating characteristic')\n plt.legend(loc=\"lower right\")\n\n plt.savefig(directory + file_name + 'roc.eps', bbox_inches='tight', format='eps')\n plt.close()\n\n # return roc_auc",
"def plotROC(yscore, true, predtrue, datasets, title, outfile):\n fig = plt.figure()\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.0])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title(title)\n \n for i in range(len(datasets)):\n acc = accuracy_score(true[i], predtrue[i])\n fpr, tpr, _ = roc_curve(true[i], yscore[i][:,1])\n roc_auc = auc(fpr, tpr)\n plt.plot(fpr, tpr, label=datasets[i]+' (area = %0.2f, acc = %0.2f)' % (roc_auc,acc),linewidth=2)\n \n plt.legend(loc=\"lower right\")\n \n pdfplot = PdfPages(outfile);\n pdfplot.savefig(fig)\n pdfplot.close()",
"def auroc_helper(labels, preds):\r\n fpr, tpr, threshold = metrics.roc_curve(labels, preds)\r\n df = pd.DataFrame({'fpr': fpr,\r\n 'tpr': tpr,\r\n 'threshold': threshold})\r\n # uncomment for the best model to save\r\n # df.to_csv('/home/delvinso/nephro/output/all_custom_thresholds.csv', index = False)\r\n epoch_auc = metrics.auc(fpr, tpr)\r\n print('AUC: {}'.format(epoch_auc))\r\n\r\n # https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html\r\n f = plt.figure()\r\n lw = 2\r\n plt.plot(fpr, tpr, color='darkred',\r\n lw=lw, label='ROC curve (area = %0.3f)' % epoch_auc)\r\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\r\n plt.xlim([0.0, 1.0])\r\n plt.ylim([0.0, 1.05])\r\n plt.xlabel('1 - Specificity (FPR)')\r\n plt.ylabel('Sensitivity (TPR)')\r\n plt.title('ROC - Bladder vs Other')\r\n plt.legend(loc=\"lower right\")\r\n return f",
"def roc_writer(clf, y_true, y_prob, eval_folder, i=''):\n # make sure that y_true, y_prob are iterable\n if type(y_true[0]) is not np.ndarray: y_true = [y_true]\n if type(y_prob[0]) is not np.ndarray: y_prob = [y_prob]\n\n # make an image folder if you have to\n img_dir = eval_folder+'/images/'\n if not os.path.exists(img_dir):\n os.makedirs(img_dir)\n\n # get the classifier family name i.e., LogisticRegression\n clf_name = str(clf)[:str(clf).index('(')]+str(i)\n\n enum_list = range(0, len(y_true))\n roc_auc = []\n\n mean_tpr = 0.0\n mean_fpr = np.linspace(0,1,1000)\n\n # plot each fold's ROC curve, calculate mean ROC curve\n for i in enum_list:\n fpr, tpr, _ = metrics.roc_curve(y_true[i], y_prob[i])\n mean_tpr += interp(mean_fpr, fpr, tpr) \n mean_tpr[0] = 0.0\n roc_auc.append(metrics.roc_auc_score(y_true[i], y_prob[i]))\n plt.plot(fpr, tpr)\n\n mean_tpr /= len(y_true)\n mean_tpr[-1] = 1.0\n\n # make plot, plot mean ROC curve\n plt.title('Receiver Operating Characteristic')\n plt.plot(mean_fpr, mean_tpr, 'k--', label='Mean AUC = %0.2f\\nSE AUC = %0.3f'% (np.mean(roc_auc), np.std(roc_auc)))\n plt.legend(loc='lower right')\n plt.plot([0,1],[0,1],'r--')\n plt.xlim([0,1.0])\n plt.ylim([0,1.0])\n plt.ylabel('True Positive Rate')\n plt.xlabel('False Positive Rate')\n\n plt.savefig(img_dir+'ROC_Curve_'+clf_name+'.png')",
"def draw_roc(signal, background, output_dir=\".\", output_name=\"roc\", form=\".pdf\"):\n\n x, y = get_roc(signal, background)\n file_path = output_dir + \"/\"+ output_name + \"_X.cvs\"\n numpy.savetxt(file_path, x, delimiter=\",\")\n file_path = output_dir + \"/\"+ output_name + \"_Y.cvs\"\n numpy.savetxt(file_path, y, delimiter=\",\")\n output_name = output_name + form\n\n auc = metrics.auc(x, y, reorder=True)\n\n fig = plt.figure(1, figsize=(7, 7), dpi=300)\n fig.clear()\n\n # Create an axes instance\n ax = fig.add_subplot(111)\n\n ax.plot(x, y, '-', color='#B64926', lw=2, label=\"AUC: %.4f\" % auc)\n ax.margins(0.05)\n\n ax.set_xlabel(\"Background efficiency\")\n ax.set_ylabel(\"Signal efficiency\")\n \n fig.set_tight_layout(True)\n\n ax.legend(loc='lower right', numpoints=1, frameon=False)\n\n print(\"AUC: %.4f\" % auc)\n\n fig.savefig(os.path.join(output_dir, output_name))\n\n plt.close()\n\n def get_index(y, value):\n \"\"\"\n Find the last index of the element in y\n satistying y[index] <= value\n \"\"\"\n\n for i in range(len(y)):\n if y[i] <= value:\n continue\n\n return i\n\n print(\"Background efficiency for signal efficiency of 0.70: %f\" % x[get_index(y, 0.70)])\n print(\"Background efficiency for signal efficiency of 0.80: %f\" % x[get_index(y, 0.80)])\n print(\"Background efficiency for signal efficiency of 0.90: %f\" % x[get_index(y, 0.90)])",
"def compute_and_plot_ic_AUCROC(y_true, y_score, ic_name,\n filename, title='AUC/ROC Curve'):\n print(' Computing {} AUC score / ROC curve...'.format(ic_name))\n start = time()\n\n # calculate AUC\n auc = roc_auc_score(y_true, y_score)\n print(' AUC: %.3f' % auc)\n # calculate roc curve\n fpr, tpr, _ = roc_curve(y_true, y_score)\n print(' ROC curve size: {}'.format(len(fpr)))\n if len(fpr) > 10000: # Sample when curve has *a lot* of data-points\n # Take every other 50th data-point. Reduce size by factor 50.0\n fpr = fpr[0:len(fpr):50]\n tpr = tpr[0:len(tpr):50]\n\n print(' → sampled roc curve to {} data-points'.format(len(fpr)))\n\n end = time()\n print(' Computed in {}'.format(timedelta(seconds=end - start)))\n\n # plot no skill\n plt.plot([0, 1], [0, 1], linestyle='--')\n # plot the roc curve for the model\n plt.plot(fpr, tpr, marker='.')\n plt.title(title)\n plt.xlabel('AUC score: {:.3f}'.format(auc))\n\n # Save\n plt.savefig(join(VISUALS_FOLDERPATH, '{}-aucroc.svg'\n .format(filename)))\n plt.clf()",
"def roc(test_set_y_org,test_set_y_pred_prob,classes_unique,plot_curve=False,filename=\"./fig_roc.pdf\",colors=None,positive_class_for_two_classes=None,figwidth=5,figheight=5):\n from sklearn.metrics import roc_curve\n #from sklearn.metrics import auc\n from sklearn.metrics import roc_auc_score\n from scipy import interp\n import matplotlib as mpl\n mpl.use(\"pdf\")\n import matplotlib.pyplot as plt\n \n fpr = dict()\n tpr = dict()\n roc_auc = dict()\n n_classes=len(classes_unique)\n test_set_Y_org,test_set_y_org_unique=membership_vector_to_indicator_matrix(test_set_y_org)\n \n for c in range(n_classes):\n fpr[c], tpr[c], _ = roc_curve(test_set_Y_org[:, c], test_set_y_pred_prob[:, c])\n roc_auc[c] = roc_auc_score(test_set_Y_org[:, c], test_set_y_pred_prob[:, c])\n \n # Compute macro-average ROC curve and AUROC area\n # First aggregate all false positive rates\n all_fpr = np.unique(np.concatenate([fpr[c] for c in range(n_classes)]))\n # Then interpolate all ROC curves at this points\n mean_tpr = np.zeros_like(all_fpr)\n for c in range(n_classes):\n mean_tpr += interp(all_fpr, fpr[c], tpr[c])\n # Finally average it and compute AUC\n mean_tpr /= n_classes\n fpr[\"macro\"] = all_fpr\n tpr[\"macro\"] = mean_tpr\n #roc_auc[\"macro\"] = auc(fpr[\"macro\"], tpr[\"macro\"])\n \n # Compute micro-average PRC curve and PRC areas\n fpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(test_set_Y_org.ravel(), test_set_y_pred_prob.ravel())\n roc_auc[\"macro\"] = roc_auc_score(test_set_Y_org, test_set_y_pred_prob, average=\"macro\") # micro macro, weighted, or samples\n roc_auc[\"micro\"] = roc_auc_score(test_set_Y_org, test_set_y_pred_prob,average=\"micro\") # micro macro, weighted, or samples\n roc_auc[\"weighted\"] = roc_auc_score(test_set_Y_org, test_set_y_pred_prob, average=\"weighted\") # micro macro, weighted, or samples\n roc_auc[\"samples\"] = roc_auc_score(test_set_Y_org, test_set_y_pred_prob, average=\"samples\") # micro macro, weighted, or samples\n\n if plot_curve:\n fig=plt.figure(num=1,figsize=(figwidth,figheight))\n ax=fig.add_subplot(1,1,1)\n ax.plot([0, 1], [0, 1], 'k--')\n if n_classes>2 or positive_class_for_two_classes is None:\n ax.plot(fpr[\"macro\"], tpr[\"macro\"], linewidth=1,color=colors[n_classes],label='macro-avg ROC (area={0:0.4f})'.format(roc_auc[\"macro\"]))\n \n for c in range(n_classes):\n if positive_class_for_two_classes is None or (n_classes==2 and positive_class_for_two_classes==c):\n ax.plot(fpr[c], tpr[c],linewidth=1,color=colors[c],label='ROC of {0} (area={1:0.4f})'.format(classes_unique[c], roc_auc[c]))\n \n # add some text for labels, title and axes ticks\n ax.set_ylim(0.0,1.0)\n ax.set_xlim(0.0,1.0)\n ax.set_ylabel(\"True Positive Rate\",fontsize=12)\n ax.set_xlabel(\"False Positive Rate\",fontsize=12) \n #ax.set_title(\"\",fontsize=15)\n ax.legend(loc=\"lower right\",fontsize=8)\n #plt.subplots_adjust(bottom=0.12) # may this is not working because of the following setting\n fig.savefig(filename,bbox_inches='tight')\n plt.close(fig)\n\n roc_auc_list=[roc_auc[c] for c in range(n_classes)]\n roc_auc_list.extend([roc_auc[\"macro\"],roc_auc[\"micro\"],roc_auc[\"weighted\"],roc_auc[\"samples\"]])\n roc_auc=np.array(roc_auc_list)\n names=[\"AUROC_\" + c for c in classes_unique]\n names.extend([\"macro\",\"micro\",\"weighted\",\"samples\"])\n names=np.array(names)\n return roc_auc,names",
"def roc(y_true, y_prob, ARGS):\n roc_auc = roc_auc_score(y_true, y_prob)\n if ARGS.graphs:\n fpr, tpr, _ = roc_curve(y_true, y_prob)\n plt.plot(fpr, tpr, color='darkorange', lw=2,\n label='ROC curve (Area = %0.3f)'% roc_auc)\n plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate (1 - Specifity)')\n plt.ylabel('True Positive Rate (Sensitivity)')\n plt.title('Receiver Operating Characteristic')\n plt.legend(loc=\"lower right\")\n print(f'ROC Curve saved to {ARGS.out_directory}/roc.png')\n plt.savefig(f'{ARGS.out_directory}/roc.png')\n else:\n print('ROC-AUC %0.3f' % roc_auc)",
"def plot_ROC(self, canvas):\n\t\tfpr = self.fpr_\n\t\ttpr = self.tpr_\n\t\tauc = self.class_auroc_\n\t\tclasses = self.classes_\n\t\tnum_classes = self.num_classes_\n\n\t\trcParams.update({'font.size': 7})\n\t\tcanvas.figure.clear()\n\t\tax = canvas.figure.subplots()\n\n\t\tax.plot(fpr['avg'], tpr['avg'], label='avg (area={0})'.format(self.avg_auroc_), \\\n\t\t\tcolor = 'black', linewidth=2, linestyle='--')\n\n\t\tcolors = cycle(['blue', 'orange', 'green', 'red', 'yellow', 'purple', 'cyan'])\n\t\tfor i, color in zip(range(0, num_classes), colors):\n\t\t\tax.plot(fpr[i], tpr[i], label='{0} (area={1})'.format(classes[i], auc[classes[i]]), \\\n\t\t\t\tcolor=color, linewidth=1)\n\n\t\tax.plot([0 ,1], [0, 1], color='lightgray', linewidth=1, linestyle='--')\n\t\tax.set_xlim([0.0, 1.0])\n\t\tax.set_ylim([0.0, 1.05])\n\t\tax.set_xlabel('FPR')\n\t\tax.set_ylabel('TPR')\n\t\tax.legend(loc='lower right')\n\n\t\tcanvas.figure.tight_layout()\n\t\tcanvas.draw()",
"def do_roc(scores, true_labels, file_name='', directory='', plot=True):\n fpr, tpr, _ = roc_curve(true_labels, scores)\n roc_auc = auc(fpr, tpr) # compute area under the curve\n if plot: \n plt.figure()\n plt.plot(fpr, tpr, label='ROC curve (area = %0.3f)' % (roc_auc))\n plt.plot([0, 1], [0, 1], 'k--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic')\n plt.legend(loc=\"lower right\")\n\n plt.savefig(directory + file_name + 'roc.png')\n plt.close()\n\n return roc_auc",
"def _roc_plot_single(metrics, save_name):\n plt.figure()\n plt.plot([0, 1], [0, 1], \"k--\")\n plt.plot(metrics[\"fpr\"], metrics[\"tpr\"], \"r\", linewidth=2)\n plt.xscale(\"log\")\n plt.yscale(\"log\")\n plt.grid()\n plt.xlabel(\"True Positive Rate\")\n plt.ylabel(\"False Positive Rate\")\n plt.tight_layout()\n plt.savefig(save_name)",
"def _th_plot(self, y_true, y_pred_proba, pos_label, pos_label_ind,\n best_th_, q, tpr, fpr, th_, th_range, index, roc_auc_kwargs):\n fig, axs = plt.subplots(nrows=2, ncols=1)\n fig.set_size_inches(10, 10)\n # Roc score.\n y_type = sklearn.utils.multiclass.type_of_target(y_true)\n if y_type == \"binary\":\n roc_auc = sklearn.metrics.roc_auc_score(\n y_true, y_pred_proba[:, pos_label_ind], **roc_auc_kwargs)\n elif y_type == \"multiclass\":\n roc_auc = sklearn.metrics.roc_auc_score(\n y_true, y_pred_proba, **roc_auc_kwargs)\n else:\n assert False, f\"Unhandled y_type {y_type}\"\n # Roc curve.\n axs[0].plot(fpr, tpr, 'darkorange',\n label=f\"ROC curve (AUC = {roc_auc:.3f}).\")\n axs[0].scatter(fpr[index], tpr[index], c='b', marker=\"o\")\n axs[0].plot([0, 1], [0, 1], color='navy', linestyle='--')\n axs[0].set_xlabel('False Positive Rate')\n axs[0].set_ylabel('True Positive Rate')\n axs[0].set_title(f\"Receiver operating characteristic \"\n f\"(label '{pos_label}')\")\n axs[0].legend(loc=\"lower right\")\n # Metric q.\n axs[1].plot(th_, q, 'green')\n axs[1].vlines(best_th_, np.min(q), np.max(q))\n axs[1].vlines(th_range, np.min(q), np.max(q), colors='b',\n linestyles=':')\n axs[1].set_xlim([0.0, 1.0])\n axs[1].set_xlabel('Threshold')\n axs[1].set_ylabel('TPR/(TPR+FPR)')\n axs[1].set_title('Selected th values objective maximum')\n # plt.plot(th_, fpr, 'red')\n plt.show()",
"def plot_roc_auc(tree, reg, f_test, l_test):\n\n # Calculate probabilites for each model\n s_probs = [0 for _ in range(len(l_test))]\n tree_probs = tree.predict_proba(f_test)\n reg_probs = reg.predict_proba(f_test)\n\n # Only keep postive outcomes\n tree_probs = tree_probs[:, 1]\n reg_probs = reg_probs[:, 1]\n\n # Calculate AUC value for each model and 0.5\n s_auc = roc_auc_score(l_test, s_probs)\n tree_auc = roc_auc_score(l_test, tree_probs)\n reg_auc = roc_auc_score(l_test, reg_probs)\n\n print(f'Random prediction AUC: {s_auc:.3f}')\n print(f'Decision Tree AUC: {tree_auc:.3f}')\n print(f'Logistic Regression AUC: {reg_auc:3f}')\n\n # calculate FPR & TPRs\n s_fpr, s_tpr, s_thresh = roc_curve(l_test, s_probs)\n tree_fpr, tree_tpr, tree_thresh = roc_curve(l_test, tree_probs)\n reg_fpr, reg_tpr, reg_thresh = roc_curve(l_test, reg_probs)\n\n # plot the curves\n plt.plot(s_fpr, s_tpr, linestyle='--', label='Random Prediction')\n plt.plot(tree_fpr, tree_tpr, linestyle='solid', label='Decision Tree AUC')\n plt.plot(reg_fpr, reg_tpr, linestyle='dotted', label='Regression AUC')\n\n plt.title('ROC Plot')\n plt.xlabel('FPR')\n plt.ylabel('TPR')\n plt.legend()\n plt.savefig('Baseball_AUC.png')",
"def plot_curve(self, true_values, predictions, ax=None, title='ROC', label='ROC', lw=1, add_auc=True, **kwargs):\n fpr, tpr, _ = roc_curve(true_values, predictions)\n roc_auc = auc(fpr, tpr)\n label_auc = label + ': {:.3f} AUC'.format(roc_auc)\n logging.info('ROC result: %s', label_auc)\n ax.plot(fpr, tpr, lw=lw, label=label_auc if add_auc else label, **kwargs)\n ax.set_title(title)\n ax.set_xlabel('FPR')\n ax.set_ylabel('TPR')\n ax.legend(loc='lower right', frameon=False)\n return ax",
"def roc(proba, ts, classifier_name):\n fpr, tpr, _ = roc_curve(ts, proba[:,1])\n roc_auc = auc(fpr, tpr)\n print(\"Area under the ROC curve for %s : %f\") % (classifier_name, roc_auc)\n\n l = 'ROC curve for %s (area = %0.2f)' % (classifier_name, roc_auc)\n pl.clf()\n pl.plot(fpr, tpr, label=l)\n pl.plot([0, 1], [0, 1], 'k--')\n pl.xlim([0.0, 1.0])\n pl.ylim([0.0, 1.0])\n pl.xlabel('False Positive Rate')\n pl.ylabel('True Positive Rate')\n pl.title('Receiver operating characteristic for %s' % classifier_name)\n pl.legend(loc=\"lower right\")\n pl.show()",
"def plot_roc(preds, labels, title=\"Receiver operating characteristic\"):\n\n # Compute values for curve\n fpr, tpr, _ = roc_curve(labels, preds)\n\n # Compute FPR (95% TPR)\n tpr95 = fpr_at_95_tpr(preds, labels)\n\n # Compute AUROC\n roc_auc = auroc(preds, labels)\n\n # Draw the plot\n plt.figure()\n lw = 2\n plt.plot(fpr, tpr, color='darkorange',\n lw=lw, label='AUROC = %0.2f' % roc_auc)\n plt.plot([0, 1], [0.95, 0.95], color='black', lw=lw, linestyle=':', label='FPR (95%% TPR) = %0.2f' % tpr95)\n plt.plot([tpr95, tpr95], [0, 1], color='black', lw=lw, linestyle=':')\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--', label='Random detector ROC')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title(title)\n plt.legend(loc=\"lower right\")\n plt.show()",
"def roc2(fpr, tpr, roc_auc):\n plt.figure()\n plt.plot(fpr, tpr, color='darkorange', lw=2, label='ROC curve (area = %0.2f)' % roc_auc)\n plt.plot([0, 1], [0, 1], color='navy', lw=1, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic')\n plt.legend(loc=\"lower right\")\n plt.show()",
"def verif_valid(model, X_val, y_val, cut=0.5):\n if type(model) == Sequential:\n X_val = np.array(X_val)\n reality = y_val\n \n if ((type(model) == svm.classes.OneClassSVM) | (type(model) == lgb.basic.Booster) | (type(model) == svm.classes.LinearSVC) | (type(model) == Sequential)):\n pred_score = model.predict(X_val)\n if (type(model) == svm.classes.OneClassSVM):\n pred_score = np.where(pred_score == -1, 1, 0)\n else:\n pred_score = model.predict_proba(X_val)[:,1]\n \n plt.hist(pred_score)\n plt.title('Distribution of the prediction score')\n plt.show()\n #if (type(model) == Sequential):\n # predictions = np.where(pred_score > 0.5, 1, 0)\n #else:\n predictions = np.where(pred_score > cut, 1, 0)\n \n print('Matrice de confusion :')\n conf_mat = confusion_matrix(reality, predictions)\n print(pd.DataFrame(conf_mat))\n print('Associated metrics :')\n print(classification_report(reality, predictions))\n fpr, tpr, _ = roc_curve(y_val, pred_score)\n \n roc_auc = auc(fpr, tpr)\n plt.plot(fpr, tpr, 'r-', lw=4)\n actual_fpr = conf_mat[1, 0] / (conf_mat[1, 0] + conf_mat[0, 0])\n actual_tpr = conf_mat[1, 1] / (conf_mat[1, 1] + conf_mat[0, 1])\n plt.plot(actual_fpr, actual_tpr, 'bo', lw=10)\n plt.xlabel('False-Positive Rate')\n plt.ylabel('True-Positive Rate')\n plt.title('ROC curve (with AUC = ' + str(round(roc_auc, 6)) + ')')\n plt.plot([0, 1], [0, 1], 'k-')\n plt.show()\n print('Score AUC : ' + str(roc_auc))\n print('Accuracy : ' + str(metrics.accuracy_score(y_val, predictions)))"
] | [
"0.7867306",
"0.7626776",
"0.732372",
"0.7319301",
"0.73173314",
"0.7298921",
"0.7294701",
"0.7286847",
"0.71924317",
"0.71809953",
"0.71744895",
"0.7162829",
"0.71388173",
"0.712525",
"0.70957816",
"0.7076601",
"0.70679814",
"0.70641404",
"0.706279",
"0.69863737",
"0.69803417",
"0.6926248",
"0.68958706",
"0.689518",
"0.68821436",
"0.6874888",
"0.68213457",
"0.68192786",
"0.6805078",
"0.6800101"
] | 0.78781956 | 0 |
Saves the network architecture as a .txt file. Arguments | def save_arch(model, save_folder):
with open(save_folder + '/architecture.txt','w') as a_save:
model.summary(print_fn=lambda x: a_save.write(x + '\n')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save(self, path=\"\"):\n path = path + \"model_\" + str(self.name) + \".txt\"\n if os.path.isfile(path):\n os.remove(path)\n f = open(path, \"w+\")\n for ident in self.networks:\n f.write(ident + \"_\" + self.networks[ident].descriptor.codify_components() + \"_\" + str(self.networks[ident].taking.size) + \",\" + self.networks[ident].taking.type + \"_\" + str(self.networks[ident].producing.size) + \",\" + self.networks[ident].producing.type + \"_\" +\n str(self.networks[ident].depth) + \"_\" + \",\".join(self.reachable[ident]) + \"_\" + \",\".join(self.comps_below[ident]) + \"\\n\")\n f.write(\"\\n\")\n\n for ident in self.inputs:\n f.write(ident + \"_\" + str(self.inputs[ident].producing.size) + \"_\" + self.inputs[ident].producing.type + \"_\" + str(self.inputs[ident].depth) + \"\\n\")\n f.write(\"\\n\")\n\n for ident in self.outputs:\n f.write(ident + \"_\" + str(self.outputs[ident].taking.size) + \"_\" + self.outputs[ident].taking.type + \"_\" + str(self.outputs[ident].depth) + \"_\" + \",\".join(self.comps_below[ident]) + \"\\n\")\n f.write(\"\\n\")\n\n for con in self.connections:\n f.write(self.connections[con].codify() + \"\\n\")\n #f.write(\"\\n\")\n\n f.close()\n\n return path",
"def save_network_architecture(self,network_path):\n net_architecture = {}\n net_architecture['y_res'] = self.y_res\n net_architecture['x_res'] = self.x_res\n net_architecture['n_input_channels'] = self.n_input_channels\n net_architecture['n_output_classes'] = self.n_output_classes\n net_architecture['fc1_dropout'] = self.fc1_dropout\n net_architecture['alpha'] = self.alpha\n net_architecture['n_samples_trained'] = self.n_samples_trained\n net_architecture['n_class_samples_trained'] = self.n_class_samples_trained\n net_architecture['n_samples_list'] = self.n_samples_list\n net_architecture['n_class_samples_list'] = self.n_class_samples_list\n net_architecture['accuracy_list'] = self.accuracy_list\n net_architecture['precision_list'] = self.precision_list\n net_architecture['recall_list'] = self.recall_list\n net_architecture['F1_list'] = self.F1_list\n np.save(os.path.join( \\\n network_path,'net_architecture.npy'), net_architecture)\n self.log(\"Network architecture saved to file:\\n{}\".format(\n os.path.join(network_path,'net_architecture.npy')))",
"def save_network_architecture(self,network_path):\n net_architecture = {}\n net_architecture['y_res'] = self.y_res\n net_architecture['x_res'] = self.x_res\n net_architecture['n_input_channels'] = self.n_input_channels\n net_architecture['n_output_classes'] = self.n_output_classes\n net_architecture['fc1_n_chan'] = self.fc1_n_chan\n net_architecture['fc1_dropout'] = self.fc1_dropout\n net_architecture['alpha'] = self.alpha\n net_architecture['n_samples_trained'] = self.n_samples_trained\n net_architecture['n_class_samples_trained'] = self.n_class_samples_trained\n net_architecture['n_samples_list'] = self.n_samples_list\n net_architecture['n_class_samples_list'] = self.n_class_samples_list\n net_architecture['accuracy_list'] = self.accuracy_list\n net_architecture['precision_list'] = self.precision_list\n net_architecture['recall_list'] = self.recall_list\n net_architecture['F1_list'] = self.F1_list\n np.save(os.path.join( \\\n network_path,'net_architecture.npy'), net_architecture)\n self.log(\"Network architecture saved to file:\\n{}\".format(\n os.path.join(network_path,'net_architecture.npy')))",
"def save_architecture(model, path_out):\n # Redirect the print output the a textfile\n orig_stdout = sys.stdout\n # and store the architecture\n f = file(os.path.join(path_out, \"architecture.txt\"), 'w')\n sys.stdout = f\n model.summary()\n # Reset the print output direction\n sys.stdout = orig_stdout\n f.close()\n\n open(os.path.join(path_out, \"config.json\"), 'w').write(model.to_json())",
"def save_network_architecture(self,network_path):\n net_architecture = {}\n net_architecture['y_res'] = self.y_res\n net_architecture['x_res'] = self.x_res\n net_architecture['n_input_channels'] = self.n_input_channels\n net_architecture['n_output_classes'] = self.n_output_classes\n net_architecture['conv1_size'] = self.conv1_size\n net_architecture['conv1_n_chan'] = self.conv1_n_chan\n net_architecture['conv1_n_pool'] = self.conv1_n_pool\n net_architecture['conv2_size'] = self.conv2_size\n net_architecture['conv2_n_chan'] = self.conv2_n_chan\n net_architecture['conv2_n_pool'] = self.conv2_n_pool\n net_architecture['fc1_n_chan'] = self.fc1_n_chan\n net_architecture['fc1_dropout'] = self.fc1_dropout\n net_architecture['alpha'] = self.alpha\n net_architecture['n_samples_trained'] = self.n_samples_trained\n net_architecture['n_class_samples_trained'] = self.n_class_samples_trained\n net_architecture['n_samples_list'] = self.n_samples_list\n net_architecture['n_class_samples_list'] = self.n_class_samples_list\n net_architecture['accuracy_list'] = self.accuracy_list\n net_architecture['precision_list'] = self.precision_list\n net_architecture['recall_list'] = self.recall_list\n net_architecture['F1_list'] = self.F1_list\n np.save(os.path.join( \\\n network_path,'net_architecture.npy'), net_architecture)\n self.log(\"Network architecture saved to file:\\n{}\".format(\n os.path.join(network_path,'net_architecture.npy')))",
"def write_network_file(self, model, **kwargs):\n model.set_names()\n # Output network file\n output_file = self.output_path + \"/network.txt\"\n\n self.network_have_substations = False\n\n # Lists for storing strings\n source_string_list = []\n overhead_string_list = []\n overhead_byphase_string_list = []\n underground_string_list = []\n switch_string_list = []\n fuse_string_list = []\n recloser_string_list = []\n breaker_string_list = []\n capacitor_string_list = []\n two_windings_transformer_string_list = []\n three_windings_transformer_string_list = []\n regulator_string_list = []\n converter_string_list = []\n converter_control_string_list = []\n pv_settings_string_list = []\n bess_settings_string_list = []\n dg_generation_string_list = []\n\n # The linecodes dictionary is used to group lines which have the same properties\n # (impedance matrix, ampacity...)\n # This dictionary will be outputed in write_equipment_file\n ID = 0\n self.linecodes_overhead = {}\n ID_cable = 0\n self.cablecodes = {}\n ID_cap = 0\n self.capcodes = {}\n ID_trans = 0\n self.two_windings_trans_codes = {}\n ID_reg = 0\n self.reg_codes = {}\n ID_trans_3w = 0\n self.three_windings_trans_codes = {}\n ID_cond = 0\n self.bess_codes = {}\n ID_bess = 0\n self.conductors = {}\n self.switchcodes = {}\n self.fusecodes = {}\n self.reclosercodes = {}\n self.breakercodes = {}\n self.irradiance_profiles = {}\n\n intermediate_nodes = []\n\n self.sources = {}\n\n self.substations = []\n\n # Open the output file...\n with open(output_file, \"w\") as f:\n\n # before doing anything, we need to get all transformers that have a regulator connected to them\n # In CYME, Regulators do not need to have a transformer object, so we need to ignore the transformers with regulators\n self.transformers_to_ignore = [\n i.connected_transformer\n for i in model.models\n if isinstance(i, Regulator)\n ]\n\n # Build connector_string_mapping\n # TODO integrate into rest of model so we don't have to loop twice over ditto elements\n for i in model.models:\n if (\n hasattr(i, \"from_element\")\n and i.from_element is not None\n and hasattr(i, \"from_element_connection_index\")\n and i.from_element_connection_index is not None\n ):\n self.node_connector_string_mapping[\n (i.from_element, i.from_element_connection_index)\n ] = \"{f}_{t}\".format(f=i.from_element, t=i.to_element)\n if (\n len(\n self.node_connector_string_mapping[\n (i.from_element, i.from_element_connection_index)\n ]\n )\n > 64\n ):\n hasher = hashlib.sha1()\n hasher.update(\n self.node_connector_string_mapping[\n (i.from_element, i.from_element_connection_index)\n ].encode(\"utf-8\")\n )\n self.node_connector_string_mapping[\n (i.from_element, i.from_element_connection_index)\n ] = hasher.hexdigest()\n\n if (\n hasattr(i, \"to_element\")\n and i.to_element is not None\n and hasattr(i, \"to_element_connection_index\")\n and i.to_element_connection_index is not None\n ):\n self.node_connector_string_mapping[\n (i.to_element, i.to_element_connection_index)\n ] = \"{f}_{t}\".format(f=i.from_element, t=i.to_element)\n if (\n len(\n self.node_connector_string_mapping[\n (i.to_element, i.to_element_connection_index)\n ]\n )\n > 64\n ):\n hasher = hashlib.sha1()\n hasher.update(\n self.node_connector_string_mapping[\n (i.to_element, i.to_element_connection_index)\n ].encode(\"utf-8\")\n )\n self.node_connector_string_mapping[\n (i.to_element, i.to_element_connection_index)\n ] = hasher.hexdigest()\n\n # Loop over the DiTTo objects\n for i in model.models:\n\n if hasattr(i, \"drop\") and i.drop == 1:\n continue\n\n # If we get a PowerSource object\n #\n if isinstance(i, PowerSource):\n # Check that the PowerSouce object is an external power source\n if hasattr(i, \"is_sourcebus\") and i.is_sourcebus == 1:\n # Empty new source string\n new_source_string = \"\"\n self.substations.append({})\n\n if (\n hasattr(i, \"connecting_element\")\n and i.connecting_element is not None\n ):\n self.sources[i.connecting_element] = None\n new_source_string += i.connecting_element\n self.substations[-1][\n \"connecting_element\"\n ] = i.connecting_element\n else:\n continue\n\n if (\n hasattr(i, \"nominal_voltage\")\n and i.nominal_voltage is not None\n ):\n new_source_string += \",\" + str(i.nominal_voltage * 10 ** -3)\n self.sources[i.connecting_element] = str(\n i.nominal_voltage * 10 ** -3\n )\n self.substations[-1][\"KVLL\"] = str(\n i.nominal_voltage * 10 ** -3\n )\n elif (\n hasattr(i, \"connecting_element\")\n and i.connecting_element is not None\n and i.connecting_element in model.model_names\n and hasattr(model[i.connecting_element], \"nominal_voltage\")\n and model[i.connecting_element].nominal_voltage is not None\n ):\n voltage = model[i.connecting_element].nominal_voltage\n new_source_string += \",\" + str(voltage * 10 ** -3)\n self.sources[i.connecting_element] = str(voltage * 10 ** -3)\n self.substations[-1][\"KVLL\"] = str(voltage * 10 ** -3)\n else:\n new_source_string += \",\"\n\n if hasattr(i, \"phase_angle\") and i.phase_angle is not None:\n new_source_string += \",\" + str(i.phase_angle)\n new_source_string += \",\" + str(i.phase_angle - 120)\n new_source_string += \",\" + str(i.phase_angle + 120)\n self.substations[-1][\"phase_angle\"] = str(i.phase_angle)\n else:\n new_source_string += \",,,\"\n\n if (\n hasattr(i, \"positive_sequence_impedance\")\n and i.positive_sequence_impedance is not None\n ):\n new_source_string += (\n \",\"\n + str(i.positive_sequence_impedance.real)\n + \",\"\n + str(i.positive_sequence_impedance.imag)\n )\n self.substations[-1][\"R1\"] = str(\n i.positive_sequence_impedance.real\n )\n self.substations[-1][\"X1\"] = str(\n i.positive_sequence_impedance.imag\n )\n else:\n new_source_string += \",,\"\n\n if (\n hasattr(i, \"zero_sequence_impedance\")\n and i.zero_sequence_impedance is not None\n ):\n new_source_string += (\n \",\"\n + str(i.zero_sequence_impedance.real)\n + \",\"\n + str(i.zero_sequence_impedance.imag)\n )\n self.substations[-1][\"R0\"] = str(\n i.zero_sequence_impedance.real\n )\n self.substations[-1][\"X0\"] = str(\n i.zero_sequence_impedance.imag\n )\n else:\n new_source_string += \",,\"\n\n if (\n hasattr(i, \"negative_sequence_impedance\")\n and i.negative_sequence_impedance is not None\n ):\n new_source_string += (\n \",\"\n + str(i.negative_sequence_impedance.real)\n + \",\"\n + str(i.negative_sequence_impedance.imag)\n )\n elif (\n hasattr(i, \"zero_sequence_impedance\")\n and i.zero_sequence_impedance is not None\n ):\n new_source_string += (\n \",\"\n + str(i.zero_sequence_impedance.real)\n + \",\"\n + str(i.zero_sequence_impedance.imag)\n )\n else:\n new_source_string += \",,\"\n\n # OperatingVoltages\n try:\n new_source_string += \",{v},{v},{v},0\".format(\n v=i.nominal_voltage * 10 ** -3\n )\n except:\n new_source_string += \",,,,0\"\n pass\n\n if hasattr(i, \"rated_power\") and i.rated_power is not None:\n self.substations[-1][\"MVA\"] = str(i.rated_power * 10 ** -6)\n\n if new_source_string != \"\":\n source_string_list.append(new_source_string)\n\n # If we get a Node object\n #\n if isinstance(i, Node):\n\n # Empty new node string\n new_node_string = \"\"\n\n # Empty new bus string (for bus representations of nodes with two coords)\n new_bus_string = \"\"\n\n # Name\n if hasattr(i, \"name\") and i.name is not None:\n self.nodeID_list.append(i.name)\n else:\n continue\n\n # CoordX and CoordY\n if (\n hasattr(i, \"positions\")\n and i.positions is not None\n and len(i.positions) == 1\n ):\n new_node_string += i.name\n try:\n new_node_string += \",\" + str(i.positions[0].long)\n except:\n new_node_string += \",0\"\n pass\n\n try:\n new_node_string += \",\" + str(i.positions[0].lat)\n except:\n new_node_string += \",0\"\n pass\n elif (\n hasattr(i, \"positions\")\n and i.positions is not None\n and len(i.positions) >= 2\n ):\n new_bus_string += i.name\n try:\n new_bus_string += \",\" + str(i.positions[0].long)\n except:\n new_bus_string += \",0\"\n pass\n\n try:\n new_bus_string += \",\" + str(i.positions[0].lat)\n except:\n new_bus_string += \",0\"\n pass\n\n try:\n new_bus_string += \",\" + str(i.positions[-1].long)\n except:\n new_bus_string += \",0\"\n pass\n\n try:\n new_bus_string += \",\" + str(i.positions[-1].lat)\n except:\n new_bus_string += \",0\"\n pass\n new_bus_string += \",2\" # Set width of 2\n for j in range(1, len(i.positions) - 1):\n sectionid = \"\"\n if (i.name, j - 1) in self.node_connector_string_mapping:\n sectionid = self.node_connector_string_mapping[\n (i.name, j - 1)\n ]\n new_node_connector_string = \"{n},{x},{y},{s}\".format(\n n=i.name,\n x=i.positions[j].long,\n y=i.positions[j].lat,\n s=sectionid,\n )\n self.node_connector_string_list.append(\n new_node_connector_string\n )\n\n else:\n new_node_string += i.name\n new_node_string += \",0,0\"\n\n # Add the node string to the list\n if new_node_string != \"\":\n self.node_string_list.append(new_node_string)\n\n if new_bus_string != \"\":\n self.bus_string_list.append(new_bus_string)\n\n # If we get a Line object\n #\n if isinstance(i, Line):\n\n matching_list = {\n \"overhead\": overhead_string_list,\n \"by_phase\": overhead_byphase_string_list,\n \"underground\": underground_string_list,\n \"switch\": switch_string_list,\n \"fuse\": fuse_string_list,\n \"recloser\": recloser_string_list,\n \"breaker\": breaker_string_list,\n }\n\n # Empty new strings for sections and overhead lines\n new_section_line = \"\"\n new_line_string = \"\"\n line_type = \"overhead\" # Line type is set to overhead by default\n\n # Name\n if hasattr(i, \"name\") and i.name is not None:\n\n # Get the type\n #\n # (In DiTTo, a line object can be used to represent overhead and underground lines,\n # as well as switches and fuses).\n #\n if hasattr(i, \"line_type\"):\n\n # if i.line_type is None:\n\n # Fuses and reclosers are modelled in OpenDSS as an object monitoring a line.\n # In RNM, this dummy line is actually a switch, meaning that we have in DiTTo\n # line objects where is_switch==1 AND is_fuse==1 (or is_recloser==1)\n # We want to output these as fuses or reclosers, not as switches\n # Hence the following:\n # if hasattr(i, 'is_fuse') and i.is_fuse==1:\n # line_type='fuse'\n\n # elif hasattr(i, 'is_recloser') and i.is_recloser==1:\n # line_type='recloser'\n # ONLY if line is not a fuse nor a recloser, but is a switch do we output a switch...\n # elif hasattr(i, 'is_switch') and i.is_switch==1:\n # line_type='switch'\n\n if i.line_type is not None:\n if i.line_type.lower() == \"underground\":\n line_type = \"underground\"\n\n if (\n hasattr(i, \"nominal_voltage\")\n and i.nominal_voltage is not None\n and i.nominal_voltage < 600\n ):\n line_type = \"underground\" # for triplex lines\n\n if hasattr(i, \"is_fuse\") and i.is_fuse == 1:\n line_type = \"fuse\"\n\n elif hasattr(i, \"is_recloser\") and i.is_recloser == 1:\n line_type = \"recloser\"\n\n elif hasattr(i, \"is_breaker\") and i.is_breaker == 1:\n line_type = \"breaker\"\n\n # ONLY if line is not a fuse nor a recloser, but is a switch do we output a switch...\n elif hasattr(i, \"is_switch\") and i.is_switch == 1:\n line_type = \"switch\"\n\n # From element for sections\n if (\n hasattr(i, \"from_element\")\n and i.from_element is not None\n and hasattr(i, \"to_element\")\n and i.to_element is not None\n ):\n new_section_ID = \"{f}_{t}\".format(\n f=i.from_element, t=i.to_element\n )\n if hasattr(i, \"feeder_name\") and i.feeder_name is not None:\n if i.feeder_name in self.section_feeder_mapping:\n while (\n new_section_ID\n in self.section_feeder_mapping[i.feeder_name]\n ):\n new_section_ID = (\n new_section_ID + \"*\"\n ) # This is used to deal with duplicate lines from same from and to nodes\n if len(new_section_ID) > 64:\n hasher = hashlib.sha1()\n hasher.update(\n new_section_ID.encode(\"utf-8\")\n )\n new_section_ID = hasher.hexdigest()\n if len(new_section_ID) > 64:\n hasher = hashlib.sha1()\n hasher.update(new_section_ID.encode(\"utf-8\"))\n new_section_ID = hasher.hexdigest()\n new_line_string += new_section_ID\n from_index = 0\n to_index = 0\n if (\n hasattr(i, \"from_element_connection_index\")\n and i.from_element_connection_index is not None\n ):\n from_index = i.from_element_connection_index\n if (\n hasattr(i, \"to_element_connection_index\")\n and i.to_element_connection_index is not None\n ):\n to_index = i.to_element_connection_index\n new_section_line = \"{id},{f},{fi},{t},{ti}\".format(\n id=new_section_ID,\n f=i.from_element,\n fi=from_index,\n t=i.to_element,\n ti=to_index,\n )\n if hasattr(i, \"feeder_name\") and i.feeder_name is not None:\n if i.feeder_name in self.section_feeder_mapping:\n self.section_feeder_mapping[i.feeder_name].append(\n new_section_ID\n )\n else:\n self.section_feeder_mapping[i.feeder_name] = [\n new_section_ID\n ]\n if (\n hasattr(i, \"substation_name\")\n and i.substation_name is not None\n ):\n self.section_headnode_mapping[\n i.feeder_name\n ] = i.substation_name\n else:\n raise ValueError(\n \"Line {name} does not have from and to.\".format(\n name=i.name\n )\n )\n\n if (\n hasattr(i, \"positions\")\n and i.positions is not None\n and len(i.positions) > 0\n ):\n for seg_number, position in enumerate(i.positions):\n intermediate_nodes.append(\n [\n new_section_ID,\n seg_number,\n position.long,\n position.lat,\n ]\n )\n\n # Phases of the section\n #\n new_section_line += \",\"\n phases = []\n cond_id = {}\n if hasattr(i, \"wires\") and i.wires is not None:\n i.wires = [w for w in i.wires if w.drop != 1]\n for wire in i.wires:\n\n if hasattr(wire, \"phase\") and wire.phase is not None:\n # Do not count the neutral(s)...\n if wire.phase in [\"A\", \"B\", \"C\"]:\n new_section_line += wire.phase\n phases.append(wire.phase)\n\n new_code = \"\"\n if (\n hasattr(wire, \"diameter\")\n and wire.diameter is not None\n ):\n new_code += \",{}\".format(wire.diameter)\n else:\n new_code += \",\"\n\n if hasattr(wire, \"gmr\") and wire.gmr is not None:\n new_code += \",{}\".format(wire.gmr)\n\n # These calculations require no neutral wire as output (since these equations assume no kron reduction)\n # They serve the purpose of getting the impedance matrix output in CYME to match the impedance matrix from DiTTo\n # NOTE: a 2x2 impedance matrix is probably derived from R1, R0, X1, X0 and isn't actually a 2-wire or even a kron reduced matrix.\n # To get the cross-terms to match would require a kron reduction, often of imaginary wire resistances to get the cross-terms to match\n # For that reason, we let CYME apply the cross terms with their default spacing. This may cause some differences in the powerflow\n # i.e. WARNING - 2x2 matrix cross terms won't match\n\n elif wire.gmr is None and (\n len(i.impedance_matrix) == 1\n or len(i.impedance_matrix) == 2\n ):\n\n if isinstance(i.impedance_matrix, list):\n x_in_miles = i.impedance_matrix[0][0].imag\n else:\n x_in_miles = i.impedance_matrix[0].imag\n x_in_miles = (\n x_in_miles * 1609.34\n ) # internally impedance per meter\n coeff1 = 0.12134\n coeff2 = 7.93402\n gmr_in_feet = 1 / (\n math.exp((x_in_miles / coeff1) - coeff2)\n ) # Solving Kerstin 4.41 for GMR\n gmr_in_cm = 30.48 * gmr_in_feet\n new_code += \",{}\".format(gmr_in_cm)\n else:\n new_code += \",\"\n\n if (\n hasattr(wire, \"resistance\")\n and wire.resistance is not None\n ):\n new_code += \",{}\".format(wire.resistance)\n elif wire.resistance is None and (\n len(i.impedance_matrix) == 1\n or len(i.impedance_matrix) == 2\n ): # Calculate the resistance from the impedance matrix\n if isinstance(i.impedance_matrix, list):\n r_in_miles = i.impedance_matrix[0][0].real\n else:\n r_in_miles = i.impedance_matrix[0].real\n r_in_miles = (\n r_in_miles * 1609.34\n ) # internally impedance per meter\n resistance = r_in_miles - 0.09530 # From Kersting\n resistance = (\n resistance / 1.60934\n ) # output in ohms per km\n new_code += \",{}\".format(resistance)\n\n else:\n new_code += \",\"\n\n if (\n hasattr(wire, \"ampacity\")\n and wire.ampacity is not None\n ):\n new_code += \",{}\".format(wire.ampacity)\n else:\n new_code += \",\"\n\n if (\n hasattr(wire, \"emergency_ampacity\")\n and wire.emergency_ampacity is not None\n ):\n new_code += \",{}\".format(wire.emergency_ampacity)\n else:\n new_code += \",\".format(wire.emergency_ampacity)\n\n # if line_type=='underground':\n # If we have a name for the wire, we use it as the equipment id\n if (\n hasattr(wire, \"nameclass\")\n and wire.nameclass is not None\n and wire.nameclass != \"\"\n ):\n wire_name = wire.nameclass\n # If not already in the conductors dictionary, add it\n if wire_name not in self.conductors:\n self.conductors[wire_name] = new_code\n cond_id[wire.phase] = wire_name\n # If we do not have a name for the wire, we create one:\n # The IDs will be wire_1, wire_2,...\n else:\n found = False\n # Try to find if we already have the conductor stored\n for key, value in self.conductors.items():\n if value == new_code:\n cond_id[wire.phase] = key\n found = True\n # If not, create it\n if not found:\n ID_cond += 1\n self.conductors[\n \"conductor_{}\".format(ID_cond)\n ] = new_code\n cond_id[wire.phase] = ID_cond\n\n # Impedance matrix\n #\n # Here, we group lines that have the same characteristics:\n # R0,R1,X0,X1,ampacity\n # We create am ID for these lines (Here a simple integer)\n #\n # If we have a switch, we just use default because there is no way (to my knowledge)\n # to provide the impedance matrix for a switch in CYME\n frequency = 60 # Need to make this changable\n if line_type == \"switch\":\n if (\n i.nameclass is not None\n and i.nameclass != \"\"\n and i.wires[0].ampacity is not None\n and i.nominal_voltage is not None\n ):\n new_code2 = \"{amps},{amps},{amps},{amps},{amps},{kvll},0,,,,,,,,0,0,0,0,0,\".format(\n amps=i.wires[0].ampacity,\n kvll=i.nominal_voltage * 10 ** -3,\n )\n\n if (\n i.nameclass\n + \"_\"\n + str(int(i.nominal_voltage))\n + \"_\"\n + str(int(i.wires[0].ampacity))\n not in self.switchcodes\n ):\n self.switchcodes[\n i.nameclass\n + \"_\"\n + str(int(i.nominal_voltage))\n + \"_\"\n + str(int(i.wires[0].ampacity))\n ] = new_code2\n new_line_string += (\n \",\"\n + i.nameclass\n + \"_\"\n + str(int(i.nominal_voltage))\n + \"_\"\n + str(int(i.wires[0].ampacity))\n )\n\n elif (\n self.switchcodes[\n i.nameclass\n + \"_\"\n + str(int(i.nominal_voltage))\n + \"_\"\n + str(int(i.wires[0].ampacity))\n ]\n != new_code2\n ):\n found = False\n for k, v in self.switchcodes.items():\n if new_code2 == v:\n new_line_string += \",\" + str(k)\n found = True\n if not found:\n self.switchcodes[\n i.nameclass\n + \"_\"\n + str(int(i.nominal_voltage))\n + \"_\"\n + str(int(i.wires[0].ampacity))\n ] = new_code2\n new_line_string += (\n \",\"\n + i.nameclass\n + \"_\"\n + str(int(i.nominal_voltage))\n + \"_\"\n + str(int(i.wires[0].ampacity))\n )\n else:\n new_line_string += (\n \",\"\n + i.nameclass\n + \"_\"\n + str(int(i.nominal_voltage))\n + \"_\"\n + str(int(i.wires[0].ampacity))\n )\n\n else:\n new_line_string += \",DEFAULT\"\n\n elif line_type == \"fuse\":\n if (\n i.nameclass is not None\n and i.nameclass != \"\"\n and i.wires[0].ampacity is not None\n and i.nominal_voltage is not None\n ):\n new_code2 = \"{amps},{amps},{amps},{amps},{amps},{kvll},0,600.000000,0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,0,0,0,,,,\".format(\n amps=i.wires[0].ampacity,\n kvll=i.nominal_voltage * 10 ** -3,\n )\n\n if (\n i.nameclass\n + \"_\"\n + str(int(i.nominal_voltage))\n + \"_\"\n + str(int(i.wires[0].ampacity))\n not in self.fusecodes\n ):\n self.fusecodes[\n i.nameclass\n + \"_\"\n + str(int(i.nominal_voltage))\n + \"_\"\n + str(int(i.wires[0].ampacity))\n ] = new_code2\n new_line_string += (\n \",\"\n + i.nameclass\n + \"_\"\n + str(int(i.nominal_voltage))\n + \"_\"\n + str(int(i.wires[0].ampacity))\n )\n\n elif (\n self.fusecodes[\n i.nameclass\n + \"_\"\n + str(int(i.nominal_voltage))\n + \"_\"\n + str(int(i.wires[0].ampacity))\n ]\n != new_code2\n ):\n found = False\n for k, v in self.fusecodes.items():\n if new_code2 == v:\n new_line_string += \",\" + str(k)\n found = True\n if not found:\n self.fusecodes[\n i.nameclass\n + \"_\"\n + str(int(i.nominal_voltage))\n + \"_\"\n + str(int(i.wires[0].ampacity))\n ] = new_code2\n new_line_string += (\n \",\"\n + i.nameclass\n + \"_\"\n + str(int(i.nominal_voltage))\n + \"_\"\n + str(int(i.wires[0].ampacity))\n )\n else:\n new_line_string += (\n \",\"\n + i.nameclass\n + \"_\"\n + str(int(i.nominal_voltage))\n + \"_\"\n + str(int(i.wires[0].ampacity))\n )\n\n else:\n new_line_string += \",DEFAULT\"\n\n elif line_type == \"recloser\":\n if (\n i.nameclass is not None\n and i.nameclass != \"\"\n and i.wires[0].ampacity is not None\n and i.nominal_voltage is not None\n ):\n new_code2 = \"{amps},{amps},{amps},{amps},{amps},{kvll},0,600.000000,0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,0,0,0,0,0,0,,1,,\".format(\n amps=i.wires[0].ampacity,\n kvll=i.nominal_voltage * 10 ** -3,\n )\n\n if (\n i.nameclass\n + \"_\"\n + str(int(i.nominal_voltage))\n + \"_\"\n + str(int(i.wires[0].ampacity))\n not in self.reclosercodes\n ):\n self.reclosercodes[\n i.nameclass\n + \"_\"\n + str(int(i.nominal_voltage))\n + \"_\"\n + str(int(i.wires[0].ampacity))\n ] = new_code2\n new_line_string += (\n \",\"\n + i.nameclass\n + \"_\"\n + str(int(i.nominal_voltage))\n + \"_\"\n + str(int(i.wires[0].ampacity))\n )\n\n elif (\n self.reclosercodes[\n i.nameclass\n + \"_\"\n + str(int(i.nominal_voltage))\n + \"_\"\n + str(int(i.wires[0].ampacity))\n ]\n != new_code2\n ):\n found = False\n for k, v in self.reclosercodes.items():\n if new_code2 == v:\n new_line_string += \",\" + str(k)\n found = True\n if not found:\n self.reclosercodes[\n i.nameclass\n + \"_\"\n + str(int(i.nominal_voltage))\n + \"_\"\n + str(int(i.wires[0].ampacity))\n ] = new_code2\n new_line_string += (\n \",\"\n + i.nameclass\n + \"_\"\n + str(int(i.nominal_voltage))\n + \"_\"\n + str(int(i.wires[0].ampacity))\n )\n else:\n new_line_string += (\n \",\"\n + i.nameclass\n + \"_\"\n + str(int(i.nominal_voltage))\n + \"_\"\n + str(int(i.wires[0].ampacity))\n )\n\n else:\n new_line_string += \",DEFAULT\"\n\n elif line_type == \"breaker\":\n if (\n i.nameclass is not None\n and i.nameclass != \"\"\n and i.wires[0].ampacity is not None\n and i.nominal_voltage is not None\n ):\n new_code2 = \"{amps},{amps},{amps},{amps},{amps},{kvll},0,600.000000,0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,0,0,0,0,0,0,\".format(\n amps=i.wires[0].ampacity,\n kvll=i.nominal_voltage * 10 ** -3,\n )\n\n if (\n i.nameclass\n + \"_\"\n + str(int(i.nominal_voltage))\n + \"_\"\n + str(int(i.wires[0].ampacity))\n not in self.breakercodes\n ):\n self.breakercodes[\n i.nameclass\n + \"_\"\n + str(int(i.nominal_voltage))\n + \"_\"\n + str(int(i.wires[0].ampacity))\n ] = new_code2\n new_line_string += (\n \",\"\n + i.nameclass\n + \"_\"\n + str(int(i.nominal_voltage))\n + \"_\"\n + str(int(i.wires[0].ampacity))\n )\n\n elif (\n self.breakercodes[\n i.nameclass\n + \"_\"\n + str(int(i.nominal_voltage))\n + \"_\"\n + str(int(i.wires[0].ampacity))\n ]\n != new_code2\n ):\n found = False\n for k, v in self.breakercodes.items():\n if new_code2 == v:\n new_line_string += \",\" + str(k)\n found = True\n if not found:\n self.breakercodes[\n i.nameclass\n + \"_\"\n + str(int(i.nominal_voltage))\n + \"_\"\n + str(int(i.wires[0].ampacity))\n ] = new_code2\n new_line_string += (\n \",\"\n + i.nameclass\n + \"_\"\n + str(int(i.nominal_voltage))\n + \"_\"\n + str(int(i.wires[0].ampacity))\n )\n else:\n new_line_string += (\n \",\"\n + i.nameclass\n + \"_\"\n + str(int(i.nominal_voltage))\n + \"_\"\n + str(int(i.wires[0].ampacity))\n )\n\n else:\n new_line_string += \",DEFAULT\"\n\n elif line_type == \"underground\":\n tt = {}\n if (\n hasattr(i, \"nominal_voltage\")\n and i.nominal_voltage is not None\n ):\n if (\n i.nominal_voltage is not None\n and i.nominal_voltage < 600\n and len(i.wires) <= 2\n ): # LV lines are assigned as triplex unless they're three phase\n tt[\"cabletype\"] = 2\n else:\n tt[\"cabletype\"] = 0\n\n else:\n tt[\"cabletype\"] = 0\n\n if (\n hasattr(i, \"impedance_matrix\")\n and i.impedance_matrix is not None\n ):\n z_diag = 0\n z_offdiag = 0\n try:\n for kk in range(len(i.impedance_matrix)):\n if i.impedance_matrix[kk][kk] != 0:\n z_diag = i.impedance_matrix[kk][kk]\n for jj in range(len(i.impedance_matrix)):\n if jj == kk:\n continue\n if i.impedance_matrix[kk][jj] != 0:\n z_offdiag = i.impedance_matrix[kk][\n jj\n ]\n\n except:\n try:\n z_diag = i.impedance_matrix[0]\n z_offdiag = i.impedance_matrix[0]\n except:\n raise ValueError(\n \"Cannot get a value from impedance matrix for line {}\".format(\n i.name\n )\n )\n coeff = 10 ** 3\n z0 = z_diag + 2 * z_offdiag\n z1 = z_diag - z_offdiag\n\n tt[\"R0\"] = z0.real * coeff\n tt[\"X0\"] = z0.imag * coeff\n try:\n pos_seq_imp = i.impedance_matrix[1][1]\n tt[\"R1\"] = z1.real * coeff\n tt[\"X1\"] = z1.imag * coeff\n except:\n tt[\"R1\"] = tt[\"R0\"]\n tt[\"X1\"] = tt[\"X0\"]\n pass\n try:\n neg_seq_imp = i.impedance_matrix[2][2]\n tt[\"R2\"] = z1.real * coeff\n tt[\"X2\"] = z1.imag * coeff\n except:\n tt[\"R2\"] = tt[\"R1\"]\n tt[\"X2\"] = tt[\"X1\"]\n pass\n\n if (\n hasattr(i, \"capacitance_matrix\")\n and i.capacitance_matrix is not None\n ):\n c_diag = 0\n c_offdiag = 0\n try:\n for kk in range(len(i.impedance_matrix)):\n if i.capacitance_matrix[kk][kk] != 0:\n c_diag = i.capacitance_matrix[kk][kk]\n for jj in range(\n len(i.capacitance_matrix)\n ):\n if jj == kk:\n continue\n if (\n i.capacitance_matrix[kk][jj]\n != 0\n ):\n c_offdiag = i.capacitance_matrix[\n kk\n ][\n jj\n ]\n\n except:\n try:\n c_diag = i.capacitance_matrix[0]\n c_offdiag = i.capacitance_matrix[0]\n except:\n import pdb\n\n pdb.set_trace()\n raise ValueError(\n \"Cannot get a value from impedance matrix for line {}\".format(\n i.name\n )\n )\n coeff = 10 ** 3\n c0 = c_diag + 2 * c_offdiag\n c1 = c_diag - c_offdiag\n\n tt[\"B0\"] = (\n c0.real * 2 * math.pi * frequency\n ) # Don't multiply by km conversion since cyme output in micro siemens\n tt[\"B1\"] = c1.real * 2 * math.pi * frequency #\n\n else:\n tt[\"B1\"] = 0\n tt[\"B0\"] = 0\n try:\n tt[\"amps\"] = i.wires[0].ampacity\n except:\n tt[\"amps\"] = 0\n pass\n\n if (\n hasattr(i.wires[0], \"nameclass\")\n and i.wires[0].nameclass is not None\n and i.wires[0].nameclass != \"\"\n ):\n cable_name = i.wires[0].nameclass\n self.cablecodes[cable_name] = tt\n new_line_string += \",\" + cable_name\n else:\n if len(self.cablecodes) == 0:\n ID_cable += 1\n self.cablecodes[\"cable\" + str(ID_cable)] = tt\n new_line_string += \",cable_\" + str(ID_cable)\n else:\n found = False\n for k, v in self.cablecodes.items():\n if v == tt:\n new_line_string += \",cable_\" + str(k)\n found = True\n if not found:\n ID_cable += 1\n self.cablecodes[\n \"cable\" + str(ID_cable)\n ] = tt\n new_line_string += \",cable_\" + str(ID_cable)\n\n else: # We use impedance_matrix if it exists and we have 3 phases. otherwise we use by_phase. TODO: change to by_phase whenever we have the wire information for it.\n # try:\n tt = {}\n if \"A\" in cond_id:\n tt[\"CondID_A\"] = cond_id[\"A\"]\n else:\n tt[\"CondID_A\"] = \"NONE\"\n if \"B\" in cond_id:\n tt[\"CondID_B\"] = cond_id[\"B\"]\n else:\n tt[\"CondID_B\"] = \"NONE\"\n if \"C\" in cond_id:\n tt[\"CondID_C\"] = cond_id[\"C\"]\n else:\n tt[\"CondID_C\"] = \"NONE\"\n if \"N\" in cond_id:\n tt[\"CondID_N\"] = cond_id[\"N\"]\n else:\n tt[\"CondID_N\"] = \"NONE\"\n if \"N1\" in cond_id:\n tt[\"CondID_N1\"] = cond_id[\"N1\"]\n else:\n tt[\"CondID_N1\"] = \"NONE\"\n if \"N2\" in cond_id:\n tt[\"CondID_N2\"] = cond_id[\"N2\"]\n else:\n tt[\"CondID_N2\"] = \"NONE\"\n\n if hasattr(i, \"wires\") and i.wires is not None:\n for wire in i.wires:\n if hasattr(wire, \"phase\") and str(wire.phase) in [\n \"A\",\n \"B\",\n \"C\",\n ]:\n p = str(wire.phase)\n if (\n hasattr(wire, \"ampacity\")\n and wire.ampacity is not None\n ):\n try:\n tt[\"Amps{}\".format(p)] = wire.ampacity\n except:\n tt[\"Amps{}\".format(p)] = \"DEFAULT\"\n pass\n\n # If we have 3 phases, use OVERHEADLINE SETTING\n if len(phases) == 3 and i.impedance_matrix is not None:\n\n tt.update(\n {\"SpacingID\": \"DEFAULT\", \"UserDefinedImpedances\": 1}\n )\n\n for k, p1 in enumerate(phases):\n for j, p2 in enumerate(phases):\n if j == k:\n tt[\"R{p}\".format(p=p1)] = (\n i.impedance_matrix[k][j].real * 10 ** 3\n )\n tt[\"X{p}\".format(p=p1)] = (\n i.impedance_matrix[k][j].imag * 10 ** 3\n )\n if i.capacitance_matrix is not None and len(\n i.capacitance_matrix\n ) == len(i.impedance_matrix):\n tt[\"B{p}\".format(p=p1)] = (\n i.capacitance_matrix[k][j].real\n * 2\n * math.pi\n * frequency\n * 10 ** 3\n )\n else:\n tt[\"Ba\"] = 0\n tt[\"Bb\"] = 0\n tt[\"Bc\"] = 0\n elif j > k:\n if p1 == \"A\" and p2 == \"C\":\n tt[\"MutualResistanceCA\"] = (\n i.impedance_matrix[k][j].real\n * 10 ** 3\n )\n tt[\"MutualReactanceCA\"] = (\n i.impedance_matrix[k][j].imag\n * 10 ** 3\n )\n if i.capacitance_matrix is not None and len(\n i.capacitance_matrix\n ) == len(\n i.impedance_matrix\n ):\n tt[\"MutualShuntSusceptanceCA\"] = (\n i.capacitance_matrix[k][j].real\n * 2\n * math.pi\n * frequency\n * 10 ** 3\n )\n else:\n tt[\"MutualShuntSusceptanceCA\"] = 0\n else:\n tt[\n \"MutualResistance{p1}{p2}\".format(\n p1=p1, p2=p2\n )\n ] = (\n i.impedance_matrix[k][j].real\n * 10 ** 3\n )\n tt[\n \"MutualReactance{p1}{p2}\".format(\n p1=p1, p2=p2\n )\n ] = (\n i.impedance_matrix[k][j].imag\n * 10 ** 3\n )\n if i.capacitance_matrix is not None and len(\n i.capacitance_matrix\n ) == len(\n i.impedance_matrix\n ):\n tt[\n \"MutualShuntSusceptance{p1}{p2}\".format(\n p1=p1, p2=p2\n )\n ] = (\n i.capacitance_matrix[k][j].real\n * 2\n * math.pi\n * frequency\n * 10 ** 3\n )\n else:\n tt[\"MutualShuntSusceptanceAB\"] = 0\n tt[\"MutualShuntSusceptanceBC\"] = 0\n\n if (\n hasattr(i, \"nameclass\")\n and i.nameclass is not None\n and i.nameclass != \"\"\n ):\n line_nameclass = i.nameclass\n self.linecodes_overhead[line_nameclass] = tt\n new_line_string += \",\" + line_nameclass\n else:\n # If the linecode dictionary is empty, just add the new element\n if len(self.linecodes_overhead) == 0:\n ID += 1\n self.linecodes_overhead[ID] = tt\n new_line_string += \",line_\" + str(ID)\n\n # Otherwise, loop over the dict to find a matching linecode\n else:\n found = False\n for k, v in self.linecodes_overhead.items():\n if v == tt:\n new_line_string += \",\" + str(k)\n found = True\n if not found:\n ID += 1\n self.linecodes_overhead[\n \"line_\" + str(ID)\n ] = tt\n new_line_string += \",line_\" + str(ID)\n\n # If we have less than 3 phases, then use a BY_PHASE configuration\n else:\n line_type = \"by_phase\" # Change the line_type to write the line under the proper header\n\n # Add device number and phase conductor IDs\n new_line_string += \",{device},{condIDA},{condIDB},{condIDC}\".format(\n device=new_section_ID,\n condIDA=tt[\"CondID_A\"],\n condIDB=tt[\"CondID_B\"],\n condIDC=tt[\"CondID_C\"],\n )\n\n # Add neutral conductor IDs\n #\n # If we have valid IDs for BOTH N1 and N2, then use that\n if (\n tt[\"CondID_N1\"] != \"NONE\"\n and tt[\"CondID_N2\"] != \"NONE\"\n ):\n new_line_string += \",{condIDN1},{condIDN2}\".format(\n condIDN1=tt[\"CondID_N1\"],\n condIDN2=tt[\"CondID_N2\"],\n )\n # Otherwise, if we have a valid ID for N, then use that as condIDN1 and use whatever we have for N2\n elif tt[\"CondID_N\"] != \"NONE\":\n new_line_string += \",{condIDN1},{condIDN2}\".format(\n condIDN1=tt[\"CondID_N\"],\n condIDN2=tt[\"CondID_N2\"],\n )\n # Otherwise, do as for case 1\n else:\n new_line_string += \",{condIDN1},{condIDN2}\".format(\n condIDN1=tt[\"CondID_N1\"],\n condIDN2=tt[\"CondID_N2\"],\n )\n\n # Use Default spacing\n #\n # TODO: User-defined spacing support\n #\n if len(phases) == 1:\n new_line_string += \",N_ABOVE_1PH\"\n if len(phases) == 2:\n new_line_string += \",N_ABOVE_2PH\"\n if len(phases) == 3:\n new_line_string += \",N_ABOVE_3PH\"\n\n # Length\n if hasattr(i, \"length\") and i.length is not None:\n if (\n line_type != \"switch\"\n and line_type != \"fuse\"\n and line_type != \"recloser\"\n and line_type != \"breaker\"\n ):\n try:\n new_line_string += \",\" + str(i.length)\n except:\n new_line_string += \",\"\n pass\n else:\n if (\n line_type != \"switch\"\n and line_type != \"fuse\"\n and line_type != \"recloser\"\n and line_type != \"breaker\"\n ):\n new_line_string += \",\"\n\n if line_type == \"switch\" or line_type == \"breaker\":\n closed_phase = np.sort(\n [\n wire.phase\n for wire in i.wires\n if wire.is_open == 0\n and wire.phase not in [\"N\", \"N1\", \"N2\"]\n ]\n )\n if len(closed_phase) == 0:\n new_line_string += \",M,None,0\"\n else:\n new_line_string += \",M,{},0\".format(\n reduce(lambda x, y: x + y, closed_phase)\n )\n\n if line_type == \"fuse\" or line_type == \"recloser\":\n closed_phase = np.sort(\n [\n wire.phase\n for wire in i.wires\n if wire.phase not in [\"N\", \"N1\", \"N2\"]\n ]\n )\n new_line_string += \",M,{},0\".format(\n reduce(lambda x, y: x + y, closed_phase)\n )\n\n # ConnectionStatus\n new_line_string += \",0\" # Assumes the line is connected because there is no connected field in DiTTo\n\n # DeviceNumber\n if (\n line_type == \"switch\"\n or line_type == \"fuse\"\n or line_type == \"recloser\"\n or line_type == \"breaker\"\n ):\n new_line_string += \",\" + new_section_ID\n\n if line_type == \"underground\":\n new_line_string += (\n \",10,2\" # DistanceBetweenConductors, CableConfiguration\n )\n\n # Add the strings to the lists\n #\n if new_section_line != \"\":\n self.section_line_list.append(new_section_line)\n # If the object is inside of a substation...\n if hasattr(i, \"is_substation\") and i.is_substation == 1:\n # ...it should have the name of the substation specified in the 'substation_name' attribute\n if (\n hasattr(i, \"substation_name\")\n and i.substation_name is not None\n and i.substation_name != \"\"\n ):\n # Add 'substation_' prefix to easily distinguish substation from feeders or transmission lines\n ff_name = \"substation_{}\".format(i.substation_name)\n self.network_have_substations = True\n # If the object is not inside of a substation, then use the feeder_name attribute if it exists\n elif (\n hasattr(i, \"feeder_name\")\n and i.feeder_name is not None\n and i.feeder_name != \"\"\n ):\n ff_name = i.feeder_name\n\n if ff_name in self.section_line_feeder_mapping:\n self.section_line_feeder_mapping[ff_name].append(\n new_section_line\n )\n else:\n self.section_line_feeder_mapping[ff_name] = [\n new_section_line\n ]\n\n if new_line_string != \"\":\n try:\n matching_list[line_type].append(new_line_string)\n except:\n pass\n\n if isinstance(i, Storage):\n bess_string = \"\"\n new_bess_setting_string = \"\"\n new_converter_string = \"\"\n new_converter_control_setting_string = \"\"\n\n if (\n hasattr(i, \"name\")\n and i.name is not None\n and hasattr(i, \"connecting_element\")\n and i.connecting_element is not None\n and (\n i.connecting_element in model.model_names\n or \"load_\" + i.connecting_element in model.model_names\n )\n ):\n new_section_ID = \"{f}_{t}\".format(\n f=i.connecting_element, t=i.name\n )\n if len(new_section_ID) > 64:\n hasher = hashlib.sha1()\n hasher.update(new_section_ID.encode(\"utf-8\"))\n new_section_ID = hasher.hexdigest()\n\n new_section = (\n new_section_ID\n + \",{f},0,{t},0,\".format( # Assume only one index for the load connection point\n f=i.connecting_element, t=i.name\n )\n )\n\n new_node_string = \"{n}\".format(n=i.name)\n if hasattr(i, \"positions\") and i.positions is not None:\n try:\n new_node_string += \",\" + str(i.positions[0].long)\n except:\n new_node_string += \",0\"\n pass\n\n try:\n new_node_string += \",\" + str(i.positions[0].lat)\n except:\n new_node_string += \",0\"\n pass\n else:\n new_node_string += \",0,0\"\n self.node_string_list.append(new_node_string)\n\n phases = \"\"\n if i.phase_storages is not None:\n for ps in i.phase_storages:\n if ps.phase in [\"A\", \"B\", \"C\"]:\n new_section += ps.phase\n phases += ps.phase\n\n # If the object is inside of a substation...\n if hasattr(i, \"is_substation\") and i.is_substation == 1:\n # ...it should have the name of the substation specified in the 'substation_name' attribute\n if (\n hasattr(i, \"substation_name\")\n and i.substation_name is not None\n and i.substation_name != \"\"\n ):\n # Add 'substation_' prefix to easily distinguish substation from feeders or transmission lines\n ff_name = \"substation_{}\".format(i.substation_name)\n self.network_have_substations = True\n\n # If the object is not inside of a substation, then use the feeder_name attribute if it exists\n elif (\n hasattr(i, \"feeder_name\")\n and i.feeder_name is not None\n and i.feeder_name != \"\"\n ):\n ff_name = i.feeder_name\n\n self.section_line_list.append(new_section)\n if ff_name in self.section_line_feeder_mapping:\n self.section_line_feeder_mapping[ff_name].append(\n new_section\n )\n else:\n self.section_line_feeder_mapping[ff_name] = [new_section]\n\n new_converter_string += (\n new_section_ID + \",80,\"\n ) # 45 is the CYME code for PV devices\n new_converter_control_setting_string += (\n new_section_ID + \",80,0,0,\"\n ) # The controlindex and timetrigger indices are both zero\n\n if hasattr(i, \"rated_kWh\") and i.rated_kWh is not None:\n bess_string += str(i.rated_kWh * 10 ** -3)\n bess_string += \",\"\n\n if hasattr(i, \"rated_power\") and i.rated_power is not None:\n bess_string += str(i.rated_power * 10 ** -3)\n bess_string += \",\"\n\n # Use for both charging and discharging power\n if hasattr(i, \"rated_power\") and i.rated_power is not None:\n bess_string += str(i.rated_power * 10 ** -3)\n bess_string += \",\"\n\n if (\n hasattr(i, \"charging_efficiency\")\n and i.charging_efficiency is not None\n ):\n bess_string += str(i.charging_efficiency)\n bess_string += \",\"\n\n if (\n hasattr(i, \"discharging_efficiency\")\n and i.discharging_efficiency is not None\n ):\n bess_string += str(i.discharging_efficiency)\n\n bess_type = \"\"\n if bess_string in self.bess_codes:\n bess_type = self.bess_codes[bess_string]\n else:\n ID_bess += 1\n bess_type = \"BESS_\" + str(ID_bess)\n self.bess_codes[bess_string] = bess_type\n\n bess_string = bess_type + \",\" + bess_string\n\n new_bess_setting_string += (\n new_section_ID\n + \",M,\"\n + new_section_ID\n + \",\"\n + bess_type\n + \",\"\n + phases\n + \",\"\n )\n\n if (\n hasattr(i, \"stored_kWh\")\n and i.stored_kWh is not None\n and hasattr(i, \"rated_kWh\")\n and i.rated_kWh is not None\n and i.rated_kWh != 0\n ):\n new_bess_setting_string += str(\n int(i.stored_kWh / i.rated_kWh * 100)\n )\n\n if hasattr(i, \"active_rating\") and i.active_rating is not None:\n if (\n hasattr(i, \"reactive_rating\")\n and i.reactive_rating is not None\n ):\n new_converter_string += (\n str(\n math.sqrt(\n i.reactive_rating ** 2\n + i.active_rating ** 2\n )\n / 1000.0\n )\n + \",\"\n )\n else:\n new_converter_string += (\n str(i.active_rating / 1000.0) + \",\"\n )\n new_converter_string += str(i.active_rating / 1000.0) + \",\"\n elif hasattr(i, \"rated_power\") and i.rated_power is not None:\n if (\n hasattr(i, \"reactive_rating\")\n and i.reactive_rating is not None\n ):\n new_converter_string += (\n str(\n math.sqrt(\n i.reactive_rating ** 2\n + (i.rated_power * 1.1) ** 2\n )\n / 1000.0\n )\n + \",\"\n )\n else:\n new_converter_string += (\n str(i.rated_power * 1.1 / 1000.0) + \",\"\n )\n new_converter_string += (\n str(i.rated_power * 1.1 / 1000.0) + \",\"\n ) # Default value sets inverter to be oversized by 10%\n else:\n new_converter_string += \",,\"\n\n if (\n hasattr(i, \"reactive_rating\")\n and i.reactive_rating is not None\n ):\n new_converter_string += (\n str(i.reactive_rating / 1000.0) + \",\"\n )\n elif hasattr(i, \"rated_power\") and i.rated_power is not None:\n new_converter_string += (\n str(i.rated_power * 1.1 / 1000.0) + \",\"\n ) # Default value sets inverter to be oversized by 10% and active=reactive\n else:\n new_converter_string += \",\"\n\n if hasattr(i, \"rated_power\") and i.rated_power is not None:\n new_dg_generation_string += str(i.rated_power / 1000.0)\n elif hasattr(i, \"rated_power\") and i.rated_power is not None:\n new_dg_generation_string += str(i.rated_power / 1000.0)\n new_dg_generation_string += \",\"\n if (\n hasattr(i, \"min_powerfactor\")\n and i.min_powerfactor is not None\n ):\n new_converter_string += str(i.powerfactor * 100)\n new_dg_generation_string += str(i.powerfactor * 100)\n new_dg_generation_string += \",\"\n new_converter_string += \",\"\n if hasattr(i, \"fall_limit\") and i.fall_limit is not None:\n new_converter_string += str(i.fall_limit)\n new_converter_string += \",\"\n if hasattr(i, \"rise_limit\") and i.rise_limit is not None:\n new_converter_string += str(i.rise_limit)\n new_converter_string += \",\"\n if (hasattr(i, \"fall_limit\") and i.fall_limit is not None) or (\n hasattr(i, \"rise_limit\") and i.rise_limit is not None\n ):\n new_converter_string += \"0\" # Using units of % per minute\n\n if hasattr(i, \"control_type\") and i.control_type is not None:\n if (\n i.control_type.lower() == \"voltvar_vars_over_watts\"\n or i.control_type.lower() == \"voltvar\"\n ): # use default voltvar curve in cyme\n new_converter_control_setting_string += \"1\"\n if i.control_type.lower() == \"voltvar_watts_over_vars\":\n new_converter_control_setting_string += \"0\"\n if i.control_type.lower() == \"voltvar_fixed_vars\":\n new_converter_control_setting_string += \"2\"\n if i.control_type.lower() == \"voltvar_novars\":\n new_converter_control_setting_string += \"3\"\n if i.control_type.lower() == \"voltwatt\":\n new_converter_control_setting_string += \"5\"\n if i.control_type.lower() == \"watt_powerfactor\":\n new_converter_control_setting_string += \"6\"\n if i.control_type.lower() == \"powerfactor\":\n new_converter_control_setting_string += \"10\"\n\n new_converter_control_setting_string += \",\"\n if (\n i.control_type.lower() == \"voltvar_fixed_vars\"\n and i.var_injection is not None\n ):\n new_converter_control_setting_string += (\n str(i.var_injection) + \",2,\"\n ) # 2 is the code for the pecentage reactive power available\n else:\n new_converter_control_setting_string += \",,\"\n if (\n i.control_type.lower() == \"voltvar_watts_over_vars\"\n or i.control_type.lower() == \"voltvar_vars_over_watts\"\n ) and i.voltvar_curve is not None:\n new_converter_control_setting_string += (\n i.voltvar_curve + \",,\"\n )\n elif (\n i.control_type.lower() == \"voltwatt\"\n and i.voltwatt_curve is not None\n ):\n new_converter_control_setting_string += (\n i.voltwatt_curve + \",0,\"\n ) # 0 is the code for using the active power rating\n elif (\n i.control_type.lower() == \"watt_powerfactor\"\n and i.watt_powerfactor_curve is not None\n ):\n new_converter_control_setting_string += (\n i.watt_powerfactor_curve + \",0,\"\n ) # 0 is the code for using the active power rating\n else:\n new_converter_control_setting_string += \",,\"\n else:\n new_converter_control_setting_string += \"10\" + \",\" * 5\n\n if new_converter_string != \"\":\n converter_string_list.append(new_converter_string)\n if new_converter_control_setting_string != \"\":\n converter_control_string_list.append(\n new_converter_control_setting_string\n )\n\n if new_bess_setting_string != \"\":\n bess_settings_string_list.append(new_bess_setting_string)\n\n # If we get a Photovoltaic object\n\n if isinstance(i, Photovoltaic):\n new_converter_string = \"\"\n new_converter_control_setting_string = \"\"\n new_pv_setting_string = \"\"\n new_dg_generation_string = \"\"\n if (\n hasattr(i, \"name\")\n and i.name is not None\n and hasattr(i, \"connecting_element\")\n and i.connecting_element is not None\n and (\n i.connecting_element in model.model_names\n or \"load_\" + i.connecting_element in model.model_names\n )\n ):\n new_section_ID = \"{f}_{t}\".format(\n f=i.connecting_element, t=i.name\n )\n if len(new_section_ID) > 64:\n hasher = hashlib.sha1()\n hasher.update(new_section_ID.encode(\"utf-8\"))\n new_section_ID = hasher.hexdigest()\n\n new_section = (\n new_section_ID\n + \",{f},0,{t},0,\".format( # Assume only one index for the load connection point\n f=i.connecting_element, t=i.name\n )\n )\n\n new_node_string = \"{n}\".format(n=i.name)\n if hasattr(i, \"positions\") and i.positions is not None:\n try:\n new_node_string += \",\" + str(i.positions[0].long)\n except:\n new_node_string += \",0\"\n pass\n\n try:\n new_node_string += \",\" + str(i.positions[0].lat)\n except:\n new_node_string += \",0\"\n pass\n else:\n new_node_string += \",0,0\"\n self.node_string_list.append(new_node_string)\n\n phases = \"\"\n for phase in i.phases:\n if phase.default_value in [\"A\", \"B\", \"C\"]:\n new_section += phase.default_value\n phases += phase.default_value\n self.section_line_list.append(new_section)\n if hasattr(i, \"feeder_name\") and i.feeder_name is not None:\n if i.feeder_name in self.section_line_feeder_mapping:\n self.section_line_feeder_mapping[i.feeder_name].append(\n new_section\n )\n else:\n self.section_line_feeder_mapping[i.feeder_name] = [\n new_section\n ]\n\n if hasattr(i, \"feeder_name\") and i.feeder_name is not None:\n if i.feeder_name in self.section_feeder_mapping:\n self.section_feeder_mapping[i.feeder_name].append(\n new_section_ID\n )\n else:\n self.section_feeder_mapping[i.feeder_name] = [\n new_section_ID\n ]\n if (\n hasattr(i, \"substation_name\")\n and i.substation_name is not None\n ):\n self.section_headnode_mapping[\n i.feeder_name\n ] = i.substation_name\n\n new_converter_string += (\n new_section_ID + \",45,\"\n ) # 45 is the CYME code for PV devices\n new_converter_control_setting_string += (\n new_section_ID + \",45,0,0,\"\n ) # The controlindex and timetrigger indices are both zero\n new_pv_setting_string += (\n new_section_ID + \",M,\" + new_section_ID + \",DEFAULT,\"\n ) # Use the default CYME PV configuration for the moment.\n new_dg_generation_string += new_section_ID + \"45,DEFAULT,\"\n # DGGENERATIONMODEL is not included as this just sets the LoadModelName which is DEFAULT\n\n if hasattr(i, \"rated_power\") and i.rated_power is not None:\n panel_area = math.ceil(\n i.rated_power / 1000 / 0.08\n ) # Each panel produces 0.08 kw\n num_x, num_y = self.smallest_perimeter(panel_area)\n if min(num_x, num_y) == 1:\n num_x, num_y = self.smallest_perimeter(\n panel_area + 1\n ) # if area is prime\n new_pv_setting_string += str(num_x) + \",\" + str(num_y) + \",\"\n elif (\n hasattr(i, \"active_rating\") and i.active_rating is not None\n ):\n panel_area = math.ceil(\n i.active_rating / 1.1 / 1000 / 0.08\n ) # Each panel produces 0.08 kw. Assume 10% inverter oversize\n num_x, num_y = self.smallest_perimeter(panel_area)\n if min(num_x, num_y) == 1:\n num_x, num_y = self.smallest_perimeter(\n panel_area + 1\n ) # if area is prime\n new_pv_setting_string += str(num_x) + \",\" + str(num_y) + \",\"\n else:\n new_pv_setting_string += (\n \",,\" # This will produce garbage output power\n )\n\n if hasattr(i, \"temperature\") and i.temperature is not None:\n new_pv_setting_string += str(i.temperature)\n\n new_pv_setting_string += \",\" + phases\n\n if hasattr(i, \"active_rating\") and i.active_rating is not None:\n if (\n hasattr(i, \"reactive_rating\")\n and i.reactive_rating is not None\n ):\n new_converter_string += (\n str(\n math.sqrt(\n i.reactive_rating ** 2\n + i.active_rating ** 2\n )\n / 1000.0\n )\n + \",\"\n )\n else:\n new_converter_string += (\n str(i.active_rating / 1000.0) + \",\"\n )\n new_converter_string += str(i.active_rating / 1000.0) + \",\"\n elif hasattr(i, \"rated_power\") and i.rated_power is not None:\n if (\n hasattr(i, \"reactive_rating\")\n and i.reactive_rating is not None\n ):\n new_converter_string += (\n str(\n math.sqrt(\n i.reactive_rating ** 2\n + (i.rated_power * 1.1) ** 2\n )\n / 1000.0\n )\n + \",\"\n )\n else:\n new_converter_string += (\n str(i.rated_power * 1.1 / 1000.0) + \",\"\n )\n new_converter_string += (\n str(i.rated_power * 1.1 / 1000.0) + \",\"\n ) # Default value sets inverter to be oversized by 10%\n else:\n new_converter_string += \",,\"\n\n if (\n hasattr(i, \"reactive_rating\")\n and i.reactive_rating is not None\n ):\n new_converter_string += (\n str(i.reactive_rating / 1000.0) + \",\"\n )\n elif hasattr(i, \"rated_power\") and i.rated_power is not None:\n new_converter_string += (\n str(i.rated_power * 1.1 / 1000.0) + \",\"\n ) # Default value sets inverter to be oversized by 10% and active=reactive\n else:\n new_converter_string += \",\"\n\n if hasattr(i, \"rated_power\") and i.rated_power is not None:\n new_dg_generation_string += str(i.rated_power / 1000.0)\n elif hasattr(i, \"rated_power\") and i.rated_power is not None:\n new_dg_generation_string += str(i.rated_power / 1000.0)\n new_dg_generation_string += \",\"\n if (\n hasattr(i, \"min_powerfactor\")\n and i.min_powerfactor is not None\n ):\n new_converter_string += str(i.powerfactor * 100)\n new_dg_generation_string += str(i.powerfactor * 100)\n new_dg_generation_string += \",\"\n new_converter_string += \",\"\n if hasattr(i, \"fall_limit\") and i.fall_limit is not None:\n new_converter_string += str(i.fall_limit)\n new_converter_string += \",\"\n if hasattr(i, \"rise_limit\") and i.rise_limit is not None:\n new_converter_string += str(i.rise_limit)\n new_converter_string += \",\"\n if (hasattr(i, \"fall_limit\") and i.fall_limit is not None) or (\n hasattr(i, \"rise_limit\") and i.rise_limit is not None\n ):\n new_converter_string += \"0\" # Using units of % per minute\n\n if hasattr(i, \"control_type\") and i.control_type is not None:\n if (\n i.control_type.lower() == \"voltvar_vars_over_watts\"\n or i.control_type.lower() == \"voltvar\"\n ): # use default voltvar curve in cyme\n new_converter_control_setting_string += \"1\"\n if i.control_type.lower() == \"voltvar_watts_over_vars\":\n new_converter_control_setting_string += \"0\"\n if i.control_type.lower() == \"voltvar_fixed_vars\":\n new_converter_control_setting_string += \"2\"\n if i.control_type.lower() == \"voltvar_novars\":\n new_converter_control_setting_string += \"3\"\n if i.control_type.lower() == \"voltwatt\":\n new_converter_control_setting_string += \"5\"\n if i.control_type.lower() == \"watt_powerfactor\":\n new_converter_control_setting_string += \"6\"\n if i.control_type.lower() == \"powerfactor\":\n new_converter_control_setting_string += \"10\"\n\n new_converter_control_setting_string += \",\"\n if (\n i.control_type.lower() == \"voltvar_fixed_vars\"\n and i.var_injection is not None\n ):\n new_converter_control_setting_string += (\n str(i.var_injection) + \",2,\"\n ) # 2 is the code for the pecentage reactive power available\n else:\n new_converter_control_setting_string += \",,\"\n if (\n i.control_type.lower() == \"voltvar_watts_over_vars\"\n or i.control_type.lower() == \"voltvar_vars_over_watts\"\n ) and i.voltvar_curve is not None:\n new_converter_control_setting_string += (\n i.voltvar_curve + \",,\"\n )\n elif (\n i.control_type.lower() == \"voltwatt\"\n and i.voltwatt_curve is not None\n ):\n new_converter_control_setting_string += (\n i.voltwatt_curve + \",0,\"\n ) # 0 is the code for using the active power rating\n elif (\n i.control_type.lower() == \"watt_powerfactor\"\n and i.watt_powerfactor_curve is not None\n ):\n new_converter_control_setting_string += (\n i.watt_powerfactor_curve + \",0,\"\n ) # 0 is the code for using the active power rating\n else:\n new_converter_control_setting_string += \",,\"\n\n if (\n i.control_type.lower() == \"powerfactor\"\n and i.powerfactor is not None\n ):\n new_converter_control_setting_string += str(\n i.powerfactor\n )\n else:\n new_converter_control_setting_string += (\n \"10,,,,,100\" # Use Powerfactor as default\n )\n\n if (\n hasattr(i, \"timeseries\")\n and i.timeseries is not None\n and len(i.timeseries) > 0\n and i.timeseries[0].data_label is not None\n and i.timeseries[0].data_location is not None\n ):\n new_pv_setting_string += \",0,{loc}\".format(\n loc=i.timeseries[0].data_label\n )\n self.irradiance_profiles[\n i.timeseries[0].data_label\n ] = i.timeseries[0].data_location\n else:\n new_pv_setting_string += \",1,\"\n\n else:\n if hasattr(i, \"name\"):\n logger.warning(\n \"PV \"\n + i.name\n + \" was not connected and has not been written to CYME\"\n )\n else:\n logger.warning(\"PV element is unnamed\")\n\n if new_converter_string != \"\":\n converter_string_list.append(new_converter_string)\n if new_converter_control_setting_string != \"\":\n converter_control_string_list.append(\n new_converter_control_setting_string\n )\n if new_pv_setting_string != \"\":\n pv_settings_string_list.append(new_pv_setting_string)\n if new_dg_generation_string != \"\":\n dg_generation_string_list.append(new_dg_generation_string)\n\n # If we get a Capacitor object\n #\n if isinstance(i, Capacitor):\n\n # Empty new capacitor string\n new_capacitor_line = \"\"\n new_capacitor_object_line = \"\"\n\n # Connecting element\n # We need to create a new section since there is no physical line connecting\n # capacitors to the rest of the feeder in DiTTo, but CYME needs a section for this\n new_section = None\n if (\n hasattr(i, \"name\")\n and i.name is not None\n and hasattr(i, \"connecting_element\")\n and i.connecting_element is not None\n ):\n try:\n new_section_ID = \"{f}_{t}\".format(\n f=i.connecting_element, t=i.name\n )\n if len(new_section_ID) > 64:\n hasher = hashlib.sha1()\n hasher.update(new_section_ID.encode(\"utf-8\"))\n new_section_ID = hasher.hexdigest()\n new_section = (\n new_section_ID\n + \",{f},0,{t},0,\".format( # assume only one connection point for capacitors\n f=i.connecting_element, t=i.name\n )\n )\n new_capacitor_line += new_section_ID\n if i.connecting_element not in self.nodeID_list:\n self.nodeID_list.append(i.connecting_element)\n self.node_string_list.append(\n \"{},0,0\".format(i.connecting_element)\n )\n if i.name not in self.nodeID_list:\n if hasattr(i, \"positions\") and i.positions is not None:\n try:\n X = i.positions[0].long\n Y = i.positions[0].lat\n except:\n X = 0\n Y = 0\n pass\n else:\n X = 0\n Y = 0\n self.nodeID_list.append(i.name)\n self.node_string_list.append(\n \"{name},{X},{Y}\".format(name=i.name, X=X, Y=Y)\n )\n if hasattr(i, \"feeder_name\") and i.feeder_name is not None:\n if i.feeder_name in self.section_feeder_mapping:\n self.section_feeder_mapping[i.feeder_name].append(\n new_section_ID\n )\n else:\n self.section_feeder_mapping[i.feeder_name] = [\n new_section_ID\n ]\n if (\n hasattr(i, \"substation_name\")\n and i.substation_name is not None\n ):\n self.section_headnode_mapping[\n i.feeder_name\n ] = i.substation_name\n except:\n continue\n\n # Connection type\n if hasattr(i, \"connection_type\") and i.connection_type is not None:\n try:\n new_capacitor_line += \",\" + i.connection_type\n except:\n new_capacitor_line += \",\"\n pass\n else:\n new_capacitor_line += \",\"\n\n # KVAR and Phase\n phases = []\n if (\n hasattr(i, \"phase_capacitors\")\n and i.phase_capacitors is not None\n ):\n total_var = 0\n one_var = 0\n switched_vars = {}\n # new_capacitor_line+=','\n for phase_capacitor in i.phase_capacitors:\n if (\n hasattr(phase_capacitor, \"phase\")\n and phase_capacitor.phase is not None\n ):\n phases.append(phase_capacitor.phase)\n if new_section is not None:\n new_section += str(phase_capacitor.phase)\n\n if (\n hasattr(phase_capacitor, \"var\")\n and phase_capacitor.var is not None\n ):\n total_var += phase_capacitor.var\n if (\n phase_capacitor.var is not None\n and phase_capacitor.phase is not None\n ):\n switched_vars[\n phase_capacitor.phase.upper()\n ] = phase_capacitor.var\n if \"A\" in switched_vars:\n new_capacitor_line += \",\" + str(\n switched_vars[\"A\"] * 10 ** -3\n )\n one_var = switched_vars[\"A\"]\n else:\n new_capacitor_line += \",\"\n if \"B\" in switched_vars:\n new_capacitor_line += \",\" + str(\n switched_vars[\"B\"] * 10 ** -3\n )\n one_var = switched_vars[\"B\"]\n else:\n new_capacitor_line += \",\"\n if \"C\" in switched_vars:\n new_capacitor_line += \",\" + str(\n switched_vars[\"C\"] * 10 ** -3\n )\n one_var = switched_vars[\"C\"]\n else:\n new_capacitor_line += \",\"\n if total_var > 0:\n new_capacitor_object_line += str(one_var * 10 ** -3) + \",\"\n else:\n new_capacitor_object_line += \",\"\n pass\n\n # KV\n if hasattr(i, \"nominal_voltage\") and i.nominal_voltage is not None:\n try:\n if len(phases) == 1:\n new_capacitor_line += \",\" + str(\n i.nominal_voltage * 10 ** -3\n )\n new_capacitor_object_line += (\n str(i.nominal_voltage * 10 ** -3) + \",\"\n )\n else:\n new_capacitor_line += \",\" + str(\n i.nominal_voltage * 10 ** -3 / math.sqrt(3)\n )\n new_capacitor_object_line += (\n str(i.nominal_voltage * 10 ** -3 / math.sqrt(3))\n + \",\"\n )\n except:\n new_capacitor_line += \",\"\n new_capacitor_object_line += \",\"\n pass\n\n if hasattr(i, \"mode\") and i.mode is not None:\n if i.mode.lower() == \"currentFlow\":\n new_capacitor_line += \",2\"\n elif i.mode.lower() == \"voltage\":\n new_capacitor_line += \",1\"\n elif i.mode.lower() == \"activepower\":\n new_capacitor_line += \",4\"\n elif i.mode.lower() == \"reactivepower\":\n new_capacitor_line += \",7\"\n elif i.mode.lower() == \"timescheduled\":\n new_capacitor_line += \",6\"\n else:\n new_capacitor_line += \",0\"\n else:\n new_capacitor_line += \",\"\n\n if hasattr(i, \"low\") and i.low is not None:\n new_capacitor_line += (\n \",\" + str(i.low) + \",\" + str(i.low) + \",\" + str(i.low)\n )\n else:\n new_capacitor_line += \",,,\"\n\n if hasattr(i, \"high\") and i.high is not None:\n new_capacitor_line += (\n \",\" + str(i.high) + \",\" + str(i.high) + \",\" + str(i.high)\n )\n else:\n new_capacitor_line += \",,,\"\n\n found = False\n for k, d in self.capcodes.items():\n if d == new_capacitor_object_line:\n new_capacitor_line += (\n \",\" + new_section_ID + \",capacitor_\" + str(k)\n )\n found = True\n if not found:\n ID_cap += 1\n self.capcodes[ID_cap] = new_capacitor_object_line\n new_capacitor_line += (\n \",\" + new_section_ID + \",capacitor_\" + str(ID_cap)\n )\n\n new_capacitor_line += \",S,0\" # Location and ConnectionStatus\n\n if new_capacitor_line != \"\":\n capacitor_string_list.append(new_capacitor_line)\n\n if new_section is not None:\n self.section_line_list.append(new_section)\n # If the object is inside of a substation...\n if hasattr(i, \"is_substation\") and i.is_substation == 1:\n # ...it should have the name of the substation specified in the 'substation_name' attribute\n if (\n hasattr(i, \"substation_name\")\n and i.substation_name is not None\n and i.substation_name != \"\"\n ):\n # Add 'substation_' prefix to easily distinguish substation from feeders or transmission lines\n ff_name = \"substation_{}\".format(i.substation_name)\n self.network_have_substations = True\n # If the object is not inside of a substation, then use the feeder_name attribute if it exists\n elif (\n hasattr(i, \"feeder_name\")\n and i.feeder_name is not None\n and i.feeder_name != \"\"\n ):\n ff_name = i.feeder_name\n\n if ff_name in self.section_line_feeder_mapping:\n self.section_line_feeder_mapping[ff_name].append(\n new_section\n )\n else:\n self.section_line_feeder_mapping[ff_name] = [new_section]\n\n # If we get a Regulator\n #\n if isinstance(i, Regulator) and not (\n hasattr(i, \"ltc\") and i.ltc is not None and i.ltc\n ):\n\n new_regulator_string = \"\"\n new_regulator_object_line = \"\"\n\n # We need to get bus1 and bus2 to create the section bus1_bus2\n new_section = None\n new_section_ID = None\n if (\n hasattr(i, \"from_element\")\n and i.from_element is not None\n and hasattr(i, \"to_element\")\n and i.to_element is not None\n ):\n # try:\n from_index = 0\n to_index = 0\n if (\n hasattr(i, \"from_element_connection_index\")\n and i.from_element_connection_index is not None\n ):\n from_index = i.from_element_connection_index\n if (\n hasattr(i, \"to_element_connection_index\")\n and i.to_element_connection_index is not None\n ):\n to_index = i.to_element_connection_index\n new_section_ID = \"{f}_{t}\".format(\n f=i.from_element, t=i.to_element\n )\n if len(new_section_ID) > 64:\n hasher = hashlib.sha1()\n hasher.update(new_section_ID.encode(\"utf-8\"))\n new_section_ID = hasher.hexdigest()\n new_section = new_section_ID + \",{f},{fi},{t},{ti},\".format(\n f=i.from_element, fi=from_index, t=i.to_element, ti=to_index\n )\n if hasattr(i, \"feeder_name\") and i.feeder_name is not None:\n if i.feeder_name in self.section_feeder_mapping:\n self.section_feeder_mapping[i.feeder_name].append(\n new_section_ID\n )\n else:\n self.section_feeder_mapping[i.feeder_name] = [\n new_section_ID\n ]\n if (\n hasattr(i, \"substation_name\")\n and i.substation_name is not None\n ):\n self.section_headnode_mapping[\n i.feeder_name\n ] = i.substation_name\n # except:\n # pass\n\n # If we have a regulator with two windings that have different\n # voltages, we create a new section and a new transformer connected to it\n # in order to have the voltage change\n winding1 = None\n winding2 = None\n from_element = None\n to_element = None\n from_index = 0\n to_index = 0\n windings_local = []\n if hasattr(i, \"windings\") and i.windings is not None:\n if (\n len(i.windings) >= 2\n and hasattr(i.windings[0], \"nominal_voltage\")\n and hasattr(i.windings[1], \"nominal_voltage\")\n and i.windings[0].nominal_voltage is not None\n and i.windings[1].nominal_voltage is not None\n ):\n winding1 = i.windings[0]\n winding2 = i.windings[1]\n if (\n hasattr(i, \"from_element\")\n and i.from_element is not None\n and hasattr(i, \"to_element\")\n and i.to_element is not None\n ):\n from_element = i.from_element\n to_element = i.to_element\n\n if (\n hasattr(i, \"from_element_connection_index\")\n and i.from_element_connection_index is not None\n ):\n from_index = i.from_element_connection_index\n if (\n hasattr(i, \"to_element_connection_index\")\n and i.to_element_connection_index is not None\n ):\n to_index = i.to_element_connection_index\n if winding1 is not None and winding2 is not None:\n windings_local = [winding1, winding2]\n if winding1.nominal_voltage != winding2.nominal_voltage:\n new_trans_sectionID = \"{f}_{t}\".format(\n f=from_element, t=to_element + \"_reg\"\n )\n if len(new_trans_sectionID) > 64:\n hasher = hashlib.sha1()\n hasher.update(new_trans_sectionID.encode(\"utf-8\"))\n new_trans_sectionID = hasher.hexdigest()\n new_trans_section = (\n new_trans_sectionID\n + \",{f},{fi},{t},{ti},\".format(\n f=from_element,\n fi=from_index,\n t=to_element + \"_reg\",\n ti=to_index,\n )\n )\n new_section_ID = \"{f}_{t}\".format(\n f=to_element + \"_reg\", t=to_element\n )\n if len(new_section_ID) > 64:\n hasher = hashlib.sha1()\n hasher.update(new_section_ID.encode(\"utf-8\"))\n new_section_ID = hasher.hexdigest()\n new_section = new_section_ID + \",{f},{fi},{t},{ti},\".format(\n f=to_element + \"_reg\",\n fi=from_index,\n t=to_element,\n ti=to_index,\n )\n if hasattr(i, \"feeder_name\") and i.feeder_name is not None:\n if i.feeder_name in self.section_feeder_mapping:\n self.section_feeder_mapping[i.feeder_name].append(\n new_section_ID\n )\n else:\n self.section_feeder_mapping[i.feeder_name] = [\n new_section_ID\n ]\n if (\n hasattr(i, \"substation_name\")\n and i.substation_name is not None\n ):\n self.section_headnode_mapping[\n i.feeder_name\n ] = i.substation_name\n self.nodeID_list.append(to_element + \"_reg\")\n self.node_string_list.append(\n \"{},0,0\".format(to_element + \"_reg\")\n )\n new_transformer_line = \"\"\n new_transformer_object_line = \"\"\n phase_on = \"\"\n if (\n hasattr(winding1, \"phase_windings\")\n and winding1.phase_windings is not None\n ):\n for phase_winding in winding1.phase_windings:\n if new_trans_section is not None:\n if (\n hasattr(phase_winding, \"phase\")\n and phase_winding.phase is not None\n ):\n new_trans_section += str(\n phase_winding.phase\n )\n phase_on += str(phase_winding.phase)\n\n if (\n new_trans_section is not None\n and new_trans_section not in self.section_line_list\n ):\n self.section_line_list.append(new_trans_section)\n # If the object is inside of a substation...\n if hasattr(i, \"is_substation\") and i.is_substation == 1:\n # ...it should have the name of the substation specified in the 'substation_name' attribute\n if (\n hasattr(i, \"substation_name\")\n and i.substation_name is not None\n and i.substation_name != \"\"\n ):\n # Add 'substation_' prefix to easily distinguish substation from feeders or transmission lines\n ff_name = \"substation_{}\".format(\n i.substation_name\n )\n self.network_have_substations = True\n # If the object is not inside of a substation, then use the feeder_name attribute if it exists\n elif (\n hasattr(i, \"feeder_name\")\n and i.feeder_name is not None\n and i.feeder_name != \"\"\n ):\n ff_name = i.feeder_name\n\n if ff_name in self.section_line_feeder_mapping:\n self.section_line_feeder_mapping[ff_name].append(\n new_section\n )\n else:\n self.section_line_feeder_mapping[ff_name] = [\n new_section\n ]\n\n if (\n hasattr(winding1, \"phase_windings\")\n and winding1.phase_windings is not None\n ):\n try:\n if len(winding1.phase_windings) == 1:\n TYPE = 1\n elif len(winding1.phase_windings) == 3:\n TYPE = 2\n else:\n TYPE = 3\n except:\n TYPE = 3\n pass\n else:\n TYPE = 3\n\n try:\n new_transformer_line += new_trans_sectionID\n except:\n pass\n\n # CoordX and CoordY\n if hasattr(i, \"positions\") and i.positions is not None:\n try:\n new_transformer_line += \",\" + str(\n i.positions[0].long\n )\n new_transformer_line += \",\" + str(\n i.positions[0].lat\n )\n except:\n new_transformer_line += \",,\"\n pass\n\n CONN = \"\"\n try:\n new_transformer_line += (\n \",\"\n + self.transformer_connection_configuration_mapping(\n winding1.connection_type,\n winding2.connection_type,\n \"transformer_settings\",\n )\n )\n CONN = self.transformer_connection_configuration_mapping(\n winding1.connection_type, winding2.connection_type\n )\n except:\n new_transformer_line += \",\"\n pass\n\n phase_shift = 0\n if CONN == \"0\" or CONN == \"4\":\n phase_shift = 0\n if CONN == \"1\" or CONN == \"2\":\n phase_shift = 1\n\n try:\n new_transformer_line += \",\" + phase_on\n except:\n new_transformer_line += \",\"\n pass\n\n if (\n hasattr(winding1, \"resistance\")\n and hasattr(winding2, \"resistance\")\n and winding1.resistance is not None\n and winding2.resistance is not None\n ):\n # Resistance is given as a percentage of the KVA of the corresponding winding\n try:\n RH = (\n winding1.resistance\n * 10 ** -2\n * winding1.rated_power\n * 10 ** -3\n )\n RL = (\n winding2.resistance\n * 10 ** -2\n * winding2.rated_power\n * 10 ** -3\n )\n except:\n RH = 0\n RL = 0\n pass\n\n # We have ZHL=(RH+RL)+XHLj\n #\n # Compute the X over R ratio\n try:\n XR = (XHL) / (RH + RL)\n XR0 = XR\n except:\n XR = 0\n XR0 = 0\n pass\n #\n # |ZHL|=sqrt((RH+RL)^2 + XHL^2)\n try:\n _ZHL_ = math.sqrt((RH + RL) ** 2 + XHL ** 2)\n except:\n _ZHL_ = 0\n pass\n\n #\n # Expressed in percentage of the KVA base\n try:\n Z1 = _ZHL_ * 100.0 / (winding1.rated_power * 10 ** -3)\n except:\n Z1 = 0\n pass\n Z0 = Z1\n\n # Total kva\n try:\n KVA = windings_local[0].rated_power\n except:\n pass\n\n for w, winding in enumerate(windings_local):\n\n if hasattr(winding, \"nominal_voltage\"):\n try:\n if w == 0:\n KVLLprim = (\n winding.nominal_voltage * 10 ** -3\n )\n if transformer_object.is_center_tap == True:\n KVLLprim = round(\n KVLLprim / (3 ** 0.5), 2\n ) # produces output in L-N format if center-tap rather than L-L\n VoltageUnit = (\n 1 # Voltage declared in KV, not in KVLL\n )\n elif w == 1:\n KVLLsec = winding.nominal_voltage * 10 ** -3\n VoltageUnit = (\n 1 # Voltage declared in KV, not in KVLL\n )\n except:\n pass\n # NoLoadLosses\n if (\n hasattr(i, \"noload_loss\")\n and i.noload_loss is not None\n ):\n # TODO: Make sure noloadlosses is in % in DiTTo, or change what is next.\n NoLoadLosses = i.noload_loss / 100.0 * KVA\n else:\n NoLoadLosses = \"\"\n\n new_transformer_object_line += \"{type},{kva},{voltageunit},{kvllprim},{kvllsec},{Z1},{Z0},{XR},{XR0},{Conn},{WindingType},{noloadloss},{phaseshift},{isltc}\".format(\n type=TYPE,\n kva=KVA,\n voltageunit=VoltageUnit,\n kvllprim=KVLLprim,\n kvllsec=KVLLsec,\n Conn=CONN,\n Z1=Z1,\n Z0=Z0,\n XR=XR,\n XR0=XR0,\n WindingType=1,\n noloadloss=NoLoadLosses,\n phaseshift=phase_shift,\n isltc=0,\n )\n\n found = False\n for k, d in self.two_windings_trans_codes.items():\n if d == new_transformer_object_line:\n new_transformer_line += (\n \",transformer_\"\n + str(k)\n + \",transformer_\"\n + str(k)\n )\n found = True\n if not found:\n ID_trans += 1\n self.two_windings_trans_codes[\n ID_trans\n ] = new_transformer_object_line\n new_transformer_line += (\n \",transformer_\"\n + str(ID_trans)\n + \",transformer_\"\n + str(ID_trans)\n )\n\n new_transformer_line += \",{PhaseShiftType},M,100,100,None,0\".format(\n PhaseShiftType=phase_shift\n ) # Phase shift, Location, PrimTap,SecondaryTap, ODPrimPh, and ConnectionStatus\n\n if new_transformer_line != \"\":\n two_windings_transformer_string_list.append(\n new_transformer_line\n )\n\n if hasattr(winding1, \"phase_windings\"):\n for phase_winding in winding1.phase_windings:\n try:\n new_section += str(phase_winding.phase)\n except:\n pass\n\n if (\n new_section is not None\n and new_section not in self.section_line_list\n ):\n self.section_line_list.append(new_section)\n # If the object is inside of a substation...\n if hasattr(i, \"is_substation\") and i.is_substation == 1:\n # ...it should have the name of the substation specified in the 'substation_name' attribute\n if (\n hasattr(i, \"substation_name\")\n and i.substation_name is not None\n and i.substation_name != \"\"\n ):\n # Add 'substation_' prefix to easily distinguish substation from feeders or transmission lines\n ff_name = \"substation_{}\".format(i.substation_name)\n self.network_have_substations = True\n # If the object is not inside of a substation, then use the feeder_name attribute if it exists\n elif (\n hasattr(i, \"feeder_name\")\n and i.feeder_name is not None\n and i.feeder_name != \"\"\n ):\n ff_name = i.feeder_name\n\n if ff_name in self.section_line_feeder_mapping:\n self.section_line_feeder_mapping[ff_name].append(\n new_section\n )\n else:\n self.section_line_feeder_mapping[ff_name] = [new_section]\n\n try:\n new_regulator_string += new_section_ID\n except:\n pass\n\n # CoordX and CoordY\n if hasattr(i, \"positions\") and i.positions is not None:\n try:\n new_regulator_string += \",\" + str(i.positions[0].long)\n new_regulator_string += \",\" + str(i.positions[0].lat)\n except:\n new_regulator_string += \",,\"\n pass\n else:\n new_regulator_string += \",,\"\n\n # if hasattr(i, \"pt_phase\") and i.pt_phase is not None:\n # try:\n # new_regulator_string += \",\" + str(i.pt_phase)\n # except:\n # new_regulator_string += \",\"\n # pass\n # else:\n # new_regulator_string += \",\"\n\n _KVA = 0\n _KVLN = 0\n _Rset = {\"A\": 0, \"B\": 0, \"C\": 0}\n _Xset = {\"A\": 0, \"B\": 0, \"C\": 0}\n _regphases = []\n if len(windings_local) >= 2:\n try:\n _KVA = windings_local[0].rated_power * 10 ** -3\n except:\n pass\n _KVLN = (\n windings_local[-1].nominal_voltage / math.sqrt(3) * 10 ** -3\n )\n\n if (\n hasattr(winding1, \"phase_windings\")\n and winding1.phase_windings is not None\n ):\n for phase_winding in winding1.phase_windings:\n try:\n _regphases.append(phase_winding.phase)\n _Rset[\n phase_winding.phase\n ] = phase_winding.compensator_r\n _Xset[\n phase_winding.phase\n ] = phase_winding.compensator_x\n except:\n pass\n\n new_regulator_string += \",\"\n for phase in _regphases:\n new_regulator_string += phase\n\n _band = None\n if hasattr(i, \"bandwidth\") and i.bandwidth is not None:\n try:\n new_regulator_string += \",\" + str(i.bandwidth)\n _band = str(i.bandwidth)\n except:\n new_regulator_string += \",\"\n pass\n else:\n new_regulator_string += \",\"\n\n _CT = None\n if hasattr(i, \"ct_ratio\") and i.ct_ratio is not None:\n try:\n new_regulator_string += \",\" + str(i.ct_ratio)\n _CT = str(i.ct_ratio)\n except:\n new_regulator_string += \",\"\n else:\n new_regulator_string += \",\"\n\n _PT = None\n if hasattr(i, \"pt_ratio\") and i.pt_ratio is not None:\n try:\n new_regulator_string += \",\" + str(i.pt_ratio)\n _PT = str(i.pt_ratio)\n except:\n new_regulator_string += \",\"\n else:\n new_regulator_string += \",\"\n\n if hasattr(i, \"setpoint\") and i.setpoint is not None:\n scaled_setpoint = i.setpoint * 120 / 100.0\n try:\n new_regulator_string += (\n \",\"\n + str(scaled_setpoint)\n + \",\"\n + str(scaled_setpoint)\n + \",\"\n + str(scaled_setpoint)\n ) # Assume same setpoint on all phases\n except:\n new_regulator_string += \",,,\"\n else:\n new_regulator_string += \",,,\"\n\n new_regulator_object_line = \"{kva},{band},{ct},{pt},{Type},{KVLN},{MaxBuck},{MaxBoost},{Taps},{Reversible}\".format(\n kva=_KVA,\n band=_band,\n ct=_CT,\n pt=_PT,\n Type=0,\n KVLN=_KVLN,\n MaxBuck=10,\n MaxBoost=10,\n Taps=32,\n Reversible=0,\n )\n\n found = False\n for k, d in self.reg_codes.items():\n if d == new_regulator_object_line:\n new_regulator_string += \",regulator_{id},{secid}\".format(\n id=k, secid=new_section_ID\n )\n found = True\n if not found:\n ID_reg += 1\n self.reg_codes[ID_reg] = new_regulator_object_line\n new_regulator_string += \",regulator_{id},{secid}\".format(\n id=ID_reg, secid=new_section_ID\n )\n\n # Location, MaxBuck, MaxBoost, SettingOption, RsetA, RsetB, RsetC, XsetA,\n # XsetB, XsetC, TapA, TapB, TapC, and ConnectionStatus\n new_regulator_string += \",M,10,10,T,{RsetA},{RsetB},{RsetC},{XsetA},{XsetB},{XsetC},0,0,0,0\".format(\n RsetA=_Rset[\"A\"],\n RsetB=_Rset[\"B\"],\n RsetC=_Rset[\"C\"],\n XsetA=_Xset[\"A\"],\n XsetB=_Xset[\"B\"],\n XsetC=_Xset[\"C\"],\n )\n\n if new_regulator_string != \"\":\n regulator_string_list.append(new_regulator_string)\n\n # If we get a Transformer object\n #\n if (\n isinstance(i, PowerTransformer)\n and (i.name not in self.transformers_to_ignore)\n ) or (\n isinstance(i, Regulator)\n and (hasattr(i, \"ltc\") and i.ltc is not None and i.ltc == 1)\n ):\n\n transformer_object = i\n\n # These are only set if it's an LTC\n Setpoint = \"\"\n ControlType = \"\"\n LowerBandwidth = \"\"\n UpperBandwidth = \"\"\n MaxBoost = \"\"\n MaxBuck = \"\"\n is_ltc = 0\n if isinstance(i, Regulator):\n is_ltc = 1\n Setpoint = i.setpoint\n if hasattr(i, \"connected_transformer\"):\n transformer_object = model[i.connected_transformer]\n ControlType = \"0\"\n else:\n raise ValueError(\n \"An LTC regulator needs a connecting transformer\"\n )\n\n # We need to get bus1 and bus2 to create the section bus1_bus2\n new_section = None\n new_section_ID = None\n if (\n hasattr(transformer_object, \"from_element\")\n and transformer_object.from_element is not None\n and hasattr(transformer_object, \"to_element\")\n and transformer_object.to_element is not None\n ):\n from_index = 0\n to_index = 0\n if (\n hasattr(transformer_object, \"from_element_connection_index\")\n and transformer_object.from_element_connection_index\n is not None\n ):\n from_index = (\n transformer_object.from_element_connection_index\n )\n if (\n hasattr(transformer_object, \"to_element_connection_index\")\n and transformer_object.to_element_connection_index\n is not None\n ):\n to_index = transformer_object.to_element_connection_index\n\n new_section_ID = \"{f}_{t}\".format(\n f=transformer_object.from_element,\n t=transformer_object.to_element,\n )\n\n if len(new_section_ID) > 64:\n hasher = hashlib.sha1()\n hasher.update(new_section_ID.encode(\"utf-8\"))\n new_section_ID = hasher.hexdigest()\n new_section = new_section_ID + \",{f},{fi},{t},{ti},\".format(\n f=transformer_object.from_element,\n fi=from_index,\n t=transformer_object.to_element,\n ti=to_index,\n )\n # If it's a regulator, use the regulator object to find the feeder and substation if they're set\n if hasattr(i, \"feeder_name\") and i.feeder_name is not None:\n if i.feeder_name in self.section_feeder_mapping:\n self.section_feeder_mapping[i.feeder_name].append(\n new_section_ID\n )\n else:\n self.section_feeder_mapping[i.feeder_name] = [\n new_section_ID\n ]\n if (\n hasattr(i, \"substation_name\")\n and i.substation_name is not None\n ):\n self.section_headnode_mapping[\n i.feeder_name\n ] = i.substation_name\n\n # Set Regulator attributes if its an LTC\n\n if hasattr(i, \"bandwidth\") and i.bandwidth is not None:\n bandcenter = 0\n if hasattr(i, \"bandcenter\") and i.bandcenter is not None:\n bandcenter = i.bandcenter\n LowerBandwidth = str(abs(bandcenter - i.bandwidth))\n UpperBandwidth = str(abs(bandcenter + i.bandwidth))\n\n if hasattr(i, \"highstep\") and i.highstep is not None:\n MaxBoost = str(i.highstep)\n\n if hasattr(i, \"highstep\") and i.highstep is not None:\n MaxBuck = str(i.lowstep)\n\n # Find out if we have a two or three windings transformer\n if (\n hasattr(transformer_object, \"windings\")\n and transformer_object.windings is not None\n ):\n\n phase_on = \"\"\n if (\n hasattr(transformer_object.windings[0], \"phase_windings\")\n and transformer_object.windings[0].phase_windings\n is not None\n ):\n for phase_winding in transformer_object.windings[\n 0\n ].phase_windings:\n if new_section is not None:\n if (\n hasattr(phase_winding, \"phase\")\n and phase_winding.phase is not None\n ):\n new_section += str(phase_winding.phase)\n phase_on += str(phase_winding.phase)\n\n if (\n new_section is not None\n and new_section not in self.section_line_list\n ):\n self.section_line_list.append(new_section)\n # If the object is inside of a substation...\n if (\n hasattr(transformer_object, \"is_substation\")\n and transformer_object.is_substation == 1\n ):\n # ...it should have the name of the substation specified in the 'substation_name' attribute\n if (\n hasattr(transformer_object, \"substation_name\")\n and transformer_object.substation_name is not None\n and transformer_object.substation_name != \"\"\n ):\n # Add 'substation_' prefix to easily distinguish substation from feeders or transmission lines\n ff_name = \"substation_{}\".format(\n transformer_object.substation_name\n )\n self.network_have_substations = True\n # If the object is not inside of a substation, then use the feeder_name attribute if it exists\n elif (\n hasattr(transformer_object, \"feeder_name\")\n and transformer_object.feeder_name is not None\n and transformer_object.feeder_name != \"\"\n ):\n ff_name = transformer_object.feeder_name\n\n if ff_name in self.section_line_feeder_mapping:\n self.section_line_feeder_mapping[ff_name].append(\n new_section\n )\n else:\n self.section_line_feeder_mapping[ff_name] = [\n new_section\n ]\n\n # Case 1: Two Windings\n #\n if (\n len(transformer_object.windings) == 2\n or transformer_object.is_center_tap == True\n ):\n # Empty new transformer string\n new_transformer_line = \"\"\n new_transformer_object_line = \"\"\n\n if (\n hasattr(\n transformer_object.windings[0], \"phase_windings\"\n )\n and transformer_object.windings[0].phase_windings\n is not None\n ):\n try:\n if (\n transformer_object.is_center_tap == True\n and len(\n transformer_object.windings[\n 0\n ].phase_windings\n )\n == 1\n ):\n TYPE = 4\n elif (\n len(\n transformer_object.windings[\n 0\n ].phase_windings\n )\n == 1\n ):\n TYPE = 1\n elif (\n len(\n transformer_object.windings[\n 0\n ].phase_windings\n )\n == 3\n ):\n TYPE = 2\n else:\n TYPE = 3\n except:\n TYPE = 3\n pass\n else:\n TYPE = 3\n\n try:\n new_transformer_line += new_section_ID\n except:\n pass\n\n # CoordX and CoordY\n if (\n hasattr(transformer_object, \"positions\")\n and transformer_object.positions is not None\n ):\n try:\n new_transformer_line += \",\" + str(\n transformer_object.positions[0].long\n )\n new_transformer_line += \",\" + str(\n transformer_object.positions[0].lat\n )\n except:\n new_transformer_line += \",,\"\n pass\n\n CONN = \"\"\n try:\n if TYPE == 4:\n CONN = \"0\" # Center Tap not a configuration for transformer object. Leave as Y-Y\n new_transformer_line += \",15\"\n else:\n new_transformer_line += (\n \",\"\n + self.transformer_connection_configuration_mapping(\n transformer_object.windings[\n 0\n ].connection_type,\n transformer_object.windings[\n 1\n ].connection_type,\n \"transformer_settings\",\n )\n )\n CONN = self.transformer_connection_configuration_mapping(\n transformer_object.windings[0].connection_type,\n transformer_object.windings[1].connection_type,\n )\n except:\n new_transformer_line += \",\"\n pass\n\n phase_shift = 0\n if CONN == \"0\" or CONN == \"4\":\n phase_shift = 0\n if CONN == \"1\" or CONN == \"2\":\n phase_shift = 1\n\n try:\n new_transformer_line += \",\" + phase_on\n except:\n new_transformer_line += \",\"\n pass\n\n # Compute the impedances of center tap transformers. These should be three windings, one phase transformers in DiTTo\n # with the is_center_tap flag set to 1\n if TYPE == 4:\n if (\n hasattr(transformer_object, \"reactances\")\n and transformer_object.reactances is not None\n and len(i.reactances) == 3\n ):\n XHL, XHT, XLT = transformer_object.reactances\n if (\n hasattr(\n transformer_object.windings[0], \"resistance\"\n )\n and hasattr(\n transformer_object.windings[1], \"resistance\"\n )\n and hasattr(\n transformer_object.windings[2], \"resistance\"\n )\n and transformer_object.windings[0].resistance\n is not None\n and transformer_object.windings[1].resistance\n is not None\n and transformer_object.windings[2].resistance\n is not None\n ):\n R0, R1, R2 = [\n w.resistance\n for w in transformer_object.windings\n ]\n KVA_BASE = (\n transformer_object.windings[0].rated_power\n * 10 ** -3\n )\n XR, Z1 = self.get_center_tap_impedances(\n R0, R1, R2, XHL, XHT, XLT, KVA_BASE\n )\n XR0 = XR\n Z0 = Z1\n\n else:\n if (\n hasattr(transformer_object, \"reactances\")\n and transformer_object.reactances is not None\n and len(transformer_object.reactances) == 1\n ):\n XHL_perct = transformer_object.reactances[0]\n # XHL is in percentage of the KVA of the FIRST winding\n try:\n XHL = (\n XHL_perct\n * 10 ** -2\n * transformer_object.windings[0].rated_power\n * 10 ** -3\n )\n except:\n XHL = 0\n pass\n\n if (\n hasattr(\n transformer_object.windings[0], \"resistance\"\n )\n and hasattr(\n transformer_object.windings[1], \"resistance\"\n )\n and transformer_object.windings[0].resistance\n is not None\n and transformer_object.windings[1].resistance\n is not None\n ):\n # Resistance is given as a percentage of the KVA of the corresponding winding\n try:\n RH = (\n transformer_object.windings[0].resistance\n * 10 ** -2\n * transformer_object.windings[0].rated_power\n * 10 ** -3\n )\n RL = (\n transformer_object.windings[1].resistance\n * 10 ** -2\n * transformer_object.windings[1].rated_power\n * 10 ** -3\n )\n except:\n RH = 0\n RL = 0\n pass\n\n # We have ZHL=(RH+RL)+XHLj\n #\n # Compute the X over R ratio\n try:\n XR = (XHL) / (RH + RL)\n XR0 = XR\n except:\n XR = 0\n XR0 = 0\n pass\n #\n # |ZHL|=sqrt((RH+RL)^2 + XHL^2)\n try:\n _ZHL_ = math.sqrt((RH + RL) ** 2 + XHL ** 2)\n except:\n _ZHL_ = 0\n pass\n\n #\n # Expressed in percentage of the KVA base\n try:\n Z1 = (\n _ZHL_\n * 100.0\n / (\n transformer_object.windings[0].rated_power\n * 10 ** -3\n )\n )\n except:\n Z1 = 0\n pass\n Z0 = Z1\n\n # Total kva\n try:\n KVA = (\n transformer_object.windings[0].rated_power\n * 10 ** -3\n )\n except:\n KVA = \"DEFAULT\"\n pass\n\n for w, winding in enumerate(transformer_object.windings):\n # try:\n # KVA+=winding.rated_power*10**-3\n # except:\n # pass\n\n if hasattr(winding, \"nominal_voltage\"):\n # If we have a one phase transformer or a delta transformer, we specify voltage in KV, not in KVLL\n # This is done by setting the voltageUnit keyword to 1\n if (\n len(\n transformer_object.windings[\n 0\n ].phase_windings\n )\n <= 1\n or len(\n transformer_object.windings[\n 0\n ].phase_windings\n )\n == 2\n ):\n if w == 0:\n KVLLprim = (\n winding.nominal_voltage * 10 ** -3\n )\n # if transformer_object.is_center_tap == True:\n # KVLLprim = round(\n # KVLLprim / (3 ** 0.5), 2\n # ) # produces output in L-N format if center-tap rather than L-L\n voltageUnit = (\n 1 # Voltage declared in KV, not in KVLL\n )\n elif w == 1:\n # In addition, if we have a center tap, we need to add the secondary and tertiary voltages here\n if TYPE == 4:\n try:\n KVLLsec = (\n winding.nominal_voltage\n * 10 ** -3\n + transformer_object.windings[\n 2\n ].nominal_voltage\n * 10 ** -3\n )\n voltageUnit = 1 # Voltage declared in KV, not in KVLL\n except:\n KVLLsec = \"DEFAULT\"\n pass\n else:\n KVLLsec = (\n winding.nominal_voltage * 10 ** -3\n )\n voltageUnit = 1 # Voltage declared in KV, not in KVLL\n # If we have a three phase transformer, we need to specify the voltage in KVLL.\n # This is done by setting the voltageUnit to 0, and multiplying the voltage by sqrt(3)\n # Note: If we have three phases, the transformer shouln't be a center tap\n elif (\n len(\n transformer_object.windings[\n 0\n ].phase_windings\n )\n == 3\n ):\n if w == 0:\n KVLLprim = (\n winding.nominal_voltage * 10 ** -3\n ) # *math.sqrt(3)\n if transformer_object.is_center_tap == True:\n KVLLprim = round(\n KVLLprim / (3 ** 0.5), 2\n ) # produces output in L-N format if center-tap rather than L-L\n voltageUnit = 0\n if w == 1:\n KVLLsec = (\n winding.nominal_voltage * 10 ** -3\n ) # *math.sqrt(3)\n voltageUnit = 0\n\n # NoLoadLosses\n if (\n hasattr(transformer_object, \"noload_loss\")\n and transformer_object.noload_loss is not None\n ):\n # TODO: Make sure noloadlosses is in % in DiTTo, or change what is next.\n NOLOADLOSS = (\n transformer_object.noload_loss / 100.0 * KVA\n )\n else:\n NOLOADLOSS = \"\"\n\n new_transformer_object_line += \"{type},{kva},{voltageUnit},{kvllprim},{kvllsec},{Z1},{Z0},{XR},{XR0},{Conn},{WindingType},{noloadloss},{phaseshift},{isltc}\".format(\n phaseshift=phase_shift,\n type=TYPE,\n kva=KVA,\n voltageUnit=voltageUnit,\n kvllprim=KVLLprim,\n kvllsec=KVLLsec,\n Conn=CONN,\n Z1=Z1,\n Z0=Z0,\n XR=XR,\n XR0=XR0,\n WindingType=1,\n noloadloss=NOLOADLOSS,\n isltc=is_ltc,\n )\n\n found = False\n for k, d in self.two_windings_trans_codes.items():\n if d == new_transformer_object_line:\n new_transformer_line += (\n \",transformer_\" + str(k) + \",\" + new_section_ID\n )\n found = True\n if not found:\n ID_trans += 1\n self.two_windings_trans_codes[\n ID_trans\n ] = new_transformer_object_line\n new_transformer_line += (\n \",transformer_\"\n + str(ID_trans)\n + \",\"\n + new_section_ID\n )\n\n new_transformer_line += \",{PhaseShiftType},M,100,100,None,0\".format(\n PhaseShiftType=phase_shift\n ) # Phase shift, Location, PrimTap,SecondaryTap, ODPrimPh, and ConnectionStatus\n\n try:\n TAP = 1.0 / float(\n transformer_object.windings[1]\n .phase_windings[0]\n .tap_position\n )\n new_transformer_line += \",{}\".format(TAP)\n except:\n new_transformer_line += \",\"\n pass\n\n # Apply the LTC settings. These are empty if it's just a transformer\n new_transformer_line += \",{setpoint},{controltype},{lowerbandwidth},{upperbandwidth},{maxbuck},{maxboost}\".format(\n setpoint=Setpoint,\n controltype=ControlType,\n lowerbandwidth=LowerBandwidth,\n upperbandwidth=UpperBandwidth,\n maxbuck=MaxBuck,\n maxboost=MaxBoost,\n )\n\n if new_transformer_line != \"\":\n two_windings_transformer_string_list.append(\n new_transformer_line\n )\n\n # Case 2: Three Windings\n #\n elif len(transformer_object.windings) == 3:\n # Empty new transformer string\n new_transformer_line = \"\"\n new_transformer_object_line = \"\"\n\n # Name\n if (\n hasattr(transformer_object, \"name\")\n and transformer_object.name is not None\n ):\n try:\n new_transformer_line += new_section_ID\n except:\n pass\n\n # CoordX and CoordY\n if (\n hasattr(transformer_object, \"positions\")\n and transformer_object.positions is not None\n ):\n try:\n new_transformer_line += \",\" + str(\n transformer_object.positions[0].long\n )\n new_transformer_line += \",\" + str(\n transformer_object.positions[0].lat\n )\n except:\n new_transformer_line += \",,\"\n pass\n\n _primary_rated_capacity = None\n _secondary_rated_capacity = None\n _tertiary_rated_capacity = None\n _primary_voltage = None\n _secondary_voltage = None\n _tertiary_voltage = None\n _primary_connection = None\n _secondary_connection = None\n _tertiary_connection = None\n R = {}\n XHL_perct, XLT_perct, XHT_perct = None, None, None\n for w, winding in enumerate(transformer_object.windings):\n if (\n hasattr(winding, \"rated_power\")\n and winding.rated_power is not None\n ):\n if w == 0:\n _primary_rated_capacity = str(\n winding.rated_power * 10 ** -3\n )\n if w == 1:\n _secondary_rated_capacity = str(\n winding.rated_power * 10 ** -3\n )\n if w == 2:\n _tertiary_rated_capacity = str(\n winding.rated_power * 10 ** -3\n )\n\n if (\n hasattr(winding, \"connection_type\")\n and winding.connection_type is not None\n ):\n if w == 0:\n _primary_connection = winding.connection_type\n if w == 1:\n _secondary_connection = winding.connection_type\n if w == 2:\n _tertiary_connection = winding.connection_type\n\n if (\n hasattr(winding, \"nominal_voltage\")\n and winding.nominal_voltage is not None\n ):\n try:\n new_transformer_line += \",\" + str(\n winding.nominal_voltage * 10 ** -3\n )\n except:\n new_transformer_line += \",\"\n pass\n if w == 0:\n _primary_voltage = str(\n winding.nominal_voltage * 10 ** -3\n )\n if w == 1:\n _secondary_voltage = str(\n winding.nominal_voltage * 10 ** -3\n )\n if w == 2:\n _tertiary_voltage = str(\n winding.nominal_voltage * 10 ** -3\n )\n\n else:\n new_transformer_line += \",\"\n\n if (\n hasattr(winding, \"resistance\")\n and winding.resistance is not None\n ):\n try:\n R[w] = (\n winding.resistance\n * winding.rated_power\n * 10 ** -3\n )\n except:\n R[w] = None\n pass\n\n if (\n hasattr(transformer_object, \"reactances\")\n and i.reactances is not None\n ):\n try:\n XHL_perct, XLT_perct, XHT_perct = i.reactances\n except:\n pass\n\n if XHL_perct is not None:\n try:\n XHL = (\n XHL_perct\n * 10 ** -2\n * transformer_object.windings[0].rated_power\n * 10 ** -3\n )\n except:\n XHL = None\n pass\n if XLT_perct is not None:\n try:\n XLT = (\n XLT_perct\n * 10 ** -2\n * transformer_object.windings[0].rated_power\n * 10 ** -3\n )\n except:\n XLT = None\n pass\n if XHT_perct is not None:\n try:\n XHT = (\n XHT_perct\n * 10 ** -2\n * transformer_object.windings[0].rated_power\n * 10 ** -3\n )\n except:\n XHT = None\n pass\n\n if (\n sum([x is None for x in R]) == 0\n and XHL is not None\n and XLT is not None\n and XHT is not None\n ):\n ZHL = complex(R[0] + R[1], XHL)\n ZLT = complex(R[1] + R[2], XLT)\n ZHT = complex(R[0] + R[2], XHT)\n\n _PrimaryToSecondaryXR1 = ZHL.imag / ZHL.real\n _PrimaryToSecondaryXR0 = _PrimaryToSecondaryXR1\n\n _PrimaryToTertiaryXR1 = ZHT.imag / ZHT.real\n _PrimaryToTertiaryXR0 = _PrimaryToTertiaryXR1\n\n _SecondaryToTertiaryXR1 = ZLT.imag / ZLT.real\n _SecondaryToTertiaryXR0 = _SecondaryToTertiaryXR1\n\n _PrimaryToSecondaryZ1 = (\n math.sqrt(ZHL.real ** 2 + ZHL.imag ** 2)\n * 100.0\n / (\n transformer_object.windings[0].rated_power\n * 10 ** -3\n )\n )\n _PrimaryToSecondaryZ0 = _PrimaryToSecondaryZ1\n\n _PrimaryToTertiaryZ1 = (\n math.sqrt(ZHT.real ** 2 + ZHT.imag ** 2)\n * 100.0\n / (\n transformer_object.windings[0].rated_power\n * 10 ** -3\n )\n )\n _PrimaryToTertiaryZ0 = _PrimaryToTertiaryZ1\n\n _SecondaryToTertiaryZ1 = (\n math.sqrt(ZLT.real ** 2 + ZLT.imag ** 2)\n * 100.0\n / (\n transformer_object.windings[0].rated_power\n * 10 ** -3\n )\n )\n _SecondaryToTertiaryZ0 = _SecondaryToTertiaryZ1\n\n # NoLoadLosses\n if (\n hasattr(transformer_object, \"noload_loss\")\n and transformer_object.noload_loss is not None\n ):\n # TODO: Make sure noloadlosses is in % in DiTTo, or change what is next.\n NOLOADLOSS = (\n transformer_object.noload_loss / 100.0 * KVA\n )\n else:\n NOLOADLOSS = \"\"\n\n new_transformer_object_line = \"{kva1},{kv1},{conn1},,,,,,,{kva2},{kv2},{conn2},{kva3},{kv3},{conn3},\".format(\n kva1=_primary_rated_capacity,\n kv1=_primary_voltage,\n conn1=_primary_connection,\n kva2=_secondary_rated_capacity,\n kv2=_secondary_voltage,\n conn2=_secondary_connection,\n kva3=_tertiary_rated_capacity,\n kv3=_tertiary_voltage,\n conn3=_tertiary_connection,\n )\n new_transformer_object_line += \"{PrimaryToSecondaryZ1},{PrimaryToSecondaryZ0},{PrimaryToSecondaryXR1},{PrimaryToSecondaryXR0},{PrimaryToTertiaryZ1},{PrimaryToTertiaryZ0},{PrimaryToTertiaryXR1},{PrimaryToTertiaryXR0},{SecondaryToTertiaryZ1},{SecondaryToTertiaryZ0}\".format(\n PrimaryToSecondaryZ1=_PrimaryToSecondaryZ1,\n PrimaryToSecondaryZ0=_PrimaryToSecondaryZ0,\n PrimaryToSecondaryXR1=_PrimaryToSecondaryXR1,\n PrimaryToSecondaryXR0=_PrimaryToSecondaryXR0,\n PrimaryToTertiaryZ1=_PrimaryToTertiaryZ1,\n PrimaryToTertiaryZ0=_PrimaryToTertiaryZ0,\n PrimaryToTertiaryXR1=_PrimaryToTertiaryXR1,\n PrimaryToTertiaryXR0=_PrimaryToTertiaryXR0,\n SecondaryToTertiaryZ1=_SecondaryToTertiaryZ1,\n SecondaryToTertiaryZ0=_SecondaryToTertiaryZ0,\n )\n new_transformer_object_line += \",{SecondaryToTertiaryXR1},{SecondaryToTertiaryXR0},{SecondaryCapacityLimit1},{SecondaryCapacityLimit2},{TertiaryCapacityLimit1},{TertiaryCapacityLimit2},{TertiaryConnection},{noloadloss}\".format(\n SecondaryToTertiaryXR1=_SecondaryToTertiaryXR1,\n SecondaryToTertiaryXR0=_SecondaryToTertiaryXR0,\n SecondaryCapacityLimit1=0,\n SecondaryCapacityLimit2=0,\n TertiaryCapacityLimit1=0,\n TertiaryCapacityLimit2=0,\n TertiaryConnection=0,\n noloadloss=NOLOADLOSS,\n )\n\n found = False\n for k, d in self.three_windings_trans_codes.items():\n if d == new_transformer_object_line:\n new_transformer_line += (\n \",3_wdg_transformer_\"\n + str(k)\n + \",\"\n + new_section_ID\n )\n found = True\n if not found:\n ID_trans_3w += 1\n self.three_windings_trans_codes[\n ID_trans_3w\n ] = new_transformer_object_line\n new_transformer_line += (\n \",3_wdg_transformer_\"\n + str(ID_trans_3w)\n + \",\"\n + new_section_ID\n )\n\n new_transformer_line += \",{Location},{tertiarynodeID},{PrimaryFixedTapSetting},{SecondaryFixedTapSetting},{ConnectionStatus}\".format(\n Location=\"M\",\n tertiarynodeID=0,\n PrimaryFixedTapSetting=0,\n SecondaryFixedTapSetting=0,\n ConnectionStatus=0,\n )\n\n try:\n TAP = 1.0 / float(\n transformer_object.windings[1]\n .phase_windings[0]\n .tap_position\n )\n new_transformer_line += \",{}\".format(TAP)\n except:\n new_transformer_line += \",\"\n pass\n\n if new_transformer_line != \"\":\n three_windings_transformer_string_list.append(\n new_transformer_line\n )\n\n # Write everything to the network file\n #\n # HEADER\n #\n f.write(\"[GENERAL]\\n\")\n\n # DATE\n #\n current_date = datetime.now().strftime(\"%B %d, %Y at %H:%M:%S\")\n f.write(\"DATE={}\\n\".format(current_date))\n\n # CYME version\n #\n f.write(\"CYME_VERSION=8.02\\n\")\n\n # Unit system\n #\n f.write(\"\\n[SI]\\n\")\n\n # Nodes\n #\n f.write(\"\\n[NODE]\\n\")\n f.write(\"FORMAT_NODE=NodeID,CoordX,CoordY\\n\")\n\n for node_string in self.node_string_list:\n f.write(node_string + \"\\n\")\n\n if len(self.bus_string_list) > 0:\n f.write(\"FORMAT_NODE=NodeID,CoordX1,CoordY1,CoordX2,CoordY2,Width\\n\")\n for bus_string in self.bus_string_list:\n f.write(bus_string + \"\\n\")\n\n # Intermediate nodes\n #\n f.write(\"\\n[INTERMEDIATE NODES]\\n\")\n f.write(\"FORMAT_INTERMEDIATENODE=SectionID,SeqNumber,CoordX,CoordY\\n\")\n\n for inter in intermediate_nodes:\n f.write(\n \"{SectionID},{SegNumber},{CoordX},{CoordY}\\n\".format(\n SectionID=inter[0],\n SegNumber=inter[1],\n CoordX=inter[2],\n CoordY=inter[3],\n )\n )\n\n # Sources\n #\n f.write(\"\\n[SOURCE]\\n\")\n f.write(\"FORMAT_SOURCE=SourceID,DeviceNumber,NodeID,NetworkID\\n\")\n k = 0\n self.substation_IDs = {}\n\n for _source, _voltage in self.sources.items():\n # _source should be the name of the headnode for one feeder_metadata object\n # TODO: Find a better way to find it\n for obj in model.models:\n if isinstance(obj, Feeder_metadata) and obj.headnode == _source:\n sourceID = obj.headnode + \"_src\"\n nodeID = obj.headnode\n NetworkID = obj.name\n k += 1\n for j, sub in enumerate(self.substations):\n if sub[\"connecting_element\"] == _source:\n self.substations[j][\"sub_ID\"] = \"sub_\" + str(k)\n self.substation_IDs[_source] = \"sub{}\".format(k)\n f.write(\n \"sub_{k},sub_{k},{nodeID},{NetID}\\n\".format(\n sourceID=sourceID, k=k, nodeID=nodeID, NetID=NetworkID\n )\n )\n\n f.write(\"\\n[HEADNODES]\\n\")\n f.write(\"FORMAT_HEADNODES=NodeID,NetworkID\\n\")\n # k=0\n # for source_string in source_string_list:\n # k+=1\n # nodeID=source_string.split(',')[0]\n # f.write('{nodeID},{NetID}\\n'.format(nodeID=nodeID, NetID=k))\n for f_name, section_l in self.section_feeder_mapping.items():\n # for kk in model.models:\n # if isinstance(kk,Feeder_metadata):\n # print(kk.name, kk.headnode)\n # import pdb;pdb.set_trace()\n if f_name != \"\":\n head = model[\n f_name\n ].headnode # self.section_headnode_mapping[f_name]\n f.write(\"{nodeID},{NetID}\\n\".format(nodeID=head, NetID=f_name))\n\n # Source equivalent\n #\n f.write(\"\\n[SOURCE EQUIVALENT]\\n\")\n f.write(\n \"FORMAT_SOURCEEQUIVALENT=NodeID,Voltage,OperatingAngle1,OperatingAngle2,OperatingAngle3,PositiveSequenceResistance,PositiveSequenceReactance,ZeroSequenceResistance,ZeroSequenceReactance,NegativeSequenceResistance,NegativeSequenceReactance,OperatingVoltage1,OperatingVoltage2,OperatingVoltage3,ImpedanceUnit\\n\"\n )\n\n id_from_source = []\n for source_string in source_string_list:\n id_from_source.append(source_string.split(\",\")[0])\n f.write(source_string + \"\\n\")\n\n for f_name, section_l in self.section_line_feeder_mapping.items():\n if f_name != \"subtransmission\" and \"substation\" not in f_name:\n temp = model[f_name]\n if (\n hasattr(temp, \"nominal_voltage\")\n and temp.nominal_voltage is not None\n ):\n volt = temp.nominal_voltage * 10 ** -3\n else:\n volt = model[temp.headnode].nominal_voltage * 10 ** -3\n if temp.headnode not in id_from_source:\n f.write(\n \"{node_id},{voltage},{angle1},{angle2},{angle3},{R1},{X1},{R0},{X0},{R2},{X2},{voltage},{voltage},{voltage},0\\n\".format(\n node_id=temp.headnode,\n voltage=volt,\n angle1=temp.operating_angle1,\n angle2=temp.operating_angle2,\n angle3=temp.operating_angle3,\n R1=temp.positive_sequence_resistance,\n X1=temp.positive_sequence_reactance,\n R0=temp.zero_sequence_resistance,\n X0=temp.zero_sequence_reactance,\n R2=temp.negative_sequence_resistance,\n X2=temp.negative_sequence_reactance,\n )\n )\n\n # Sections\n #\n f.write(\"\\n[SECTION]\\n\")\n\n # Always write the SECTION format\n f.write(\n \"FORMAT_SECTION=SectionID,FromNodeID,FromNodeIndex,ToNodeID,ToNodeIndex,Phase,SubNetworkId\\n\"\n )\n\n # Always write the FEEDER format\n f.write(\"FORMAT_FEEDER=NetworkID,HeadNodeID,CoordSet\\n\")\n\n # If we have subtransmission, then write the TRANSMISSIONLINE format\n if \"subtransmission\" in self.section_line_feeder_mapping:\n f.write(\"FORMAT_TRANSMISSIONLINE=NetworkID,HeadNodeID,CoordSet\\n\")\n\n # If we have a substation (have to have \"substation in the name...),\n # then write the SUBSTATION format\n if self.network_have_substations:\n f.write(\"FORMAT_SUBSTATION=NetworkID,HeadNodeID,CoordSet\\n\")\n\n #####################################\n # TO REMOVE ????????\n ####################################\n #\n # k=0\n # for source_string in source_string_list:\n # k+=1\n # f.write('FEEDER={NetID},{HeadNodeID},{coordset}\\n'.format(NetID=k,HeadNodeID=source_string.split(',')[0],coordset=0))\n\n # section_list=self.merge_regulators(self.section_line_list)\n # for section_line in section_list:\n # f.write(section_line+'\\n')\n #######################################\n\n for f_name, section_l in self.section_line_feeder_mapping.items():\n if \"substation\" in f_name:\n head = \"\"\n else:\n head = model[\n f_name\n ].headnode # self.section_headnode_mapping[f_name]\n # If we are considering the subtransmission network, use TRANSMISSIONLINE\n if f_name == \"subtransmission\":\n f.write(\n \"TRANSMISSIONLINE={NetID},{HeadNodeID},{coordset}\\n\".format(\n NetID=f_name, HeadNodeID=head, coordset=1\n )\n )\n subnetID = \"\"\n # If substation is in the name of the \"feeder\", then use SUBSTATION\n elif \"substation\" in f_name:\n f.write(\n \"SUBSTATION={NetID},{HeadNodeID},{coordset}\\n\".format(\n NetID=f_name.split(\"ation_\")[1], HeadNodeID=head, coordset=1\n )\n )\n subnetID = f_name.split(\"ation_\")[1]\n # Otherwise, it should be an actual feeder, so use FEEDER\n else:\n f.write(\n \"FEEDER={NetID},{HeadNodeID},{coordset}\\n\".format(\n NetID=f_name, HeadNodeID=head, coordset=1\n )\n )\n subnetID = \"\"\n # Then, write all the sections belonging to this subnetwork\n for sec in section_l:\n f.write(sec + \",{}\".format(subnetID) + \"\\n\")\n\n # Subnetworks\n #\n # Use subnetworks only for substations\n # TODO: Let the user specify what should be subnetworked...\n #\n if self.network_have_substations:\n f.write(\"\\n[SUBNETWORKS]\\n\")\n f.write(\n \"FORMAT_SUBNETWORKS=SubNetID,Angle,X,Y,Height,Length,SymbolID,SubNetTypeID,Version,SymbolReferenceSize,TextReferenceSize,CoordSet\\n\"\n )\n for f_name, section_l in self.section_line_feeder_mapping.items():\n if \"substation\" in f_name:\n # We need to find the X,Y coordinates for the subnetwork\n # (CASE 1) - First, we try setting these coordinates as the average of the LV elements.\n # (CASE 2) - If this does not work, we try using the average of all the substation elements.\n # (CASE 3) - If this does not work either, we try using the global average (all Nodes in the system), such that\n # the subnetwork is more or less in the middle of the system.\n # (CASE 4) - Finally, if nothing works, set the coordinates as (0,0)...\n defaultX = []\n defaultY = []\n by_nominal_voltage_X = {}\n by_nominal_voltage_Y = {}\n all_coordsX = []\n all_coordsY = []\n #\n # TODO: Better way to do this???\n #\n for obj in model.models:\n # (CASE 3) - Just append all Node's valid coordinates to all_coordsX and all_coordsY\n if (\n isinstance(obj, Node)\n and len(obj.positions) > 0\n and obj.positions[0] is not None\n and obj.positions[0].lat is not None\n and obj.positions[0].long is not None\n ):\n all_coordsX.append(obj.positions[0].long)\n all_coordsY.append(obj.positions[0].lat)\n # (CASE 1) - Since we don't know what the LV value is beforehand, we store all coordinates by nominal voltage\n # in the dictionaries by_nominal_voltage_X and by_nominal_voltage_Y.\n if (\n isinstance(obj, Node)\n and obj.substation_name == f_name.split(\"ation_\")[1]\n and obj.is_substation_connection == 1\n ):\n if obj.nominal_voltage is not None:\n if (\n len(obj.positions) > 0\n and obj.positions[0] is not None\n ):\n if (\n obj.positions[0].lat is not None\n and obj.positions[0].long is not None\n ):\n if (\n obj.nominal_voltage\n in by_nominal_voltage_X\n and obj.nominal_voltage\n in by_nominal_voltage_Y\n ):\n by_nominal_voltage_X[\n obj.nominal_voltage\n ].append(obj.positions[0].long)\n by_nominal_voltage_Y[\n obj.nominal_voltage\n ].append(obj.positions[0].lat)\n else:\n by_nominal_voltage_X[\n obj.nominal_voltage\n ] = [obj.positions[0].long]\n by_nominal_voltage_Y[\n obj.nominal_voltage\n ] = [obj.positions[0].lat]\n # (CASE 2) - If the nominal voltage was None, then add the coordinates to the default list\n else:\n if (\n len(obj.positions) > 0\n and obj.positions[0] is not None\n ):\n if (\n obj.positions[0].lat is not None\n and obj.positions[0].long is not None\n ):\n defaultX.append(obj.positions[0].long)\n defaultY.append(obj.positions[0].lat)\n # (CASE 1)\n if len(list(by_nominal_voltage_X.keys())) > 0:\n low_voltage = min(list(by_nominal_voltage_X.keys()))\n Xs = by_nominal_voltage_X[low_voltage]\n Ys = by_nominal_voltage_Y[low_voltage]\n # (CASE 2)\n else:\n Xs = defaultX\n Ys = defaultY\n # If we were able to sample some coordinates, take the average\n if len(Xs) > 0 and len(Ys) > 0:\n X = np.mean(Xs)\n Y = np.mean(Ys) + 50 / 2.0\n # (CASE 3)\n elif len(all_coordsX) > 0 and len(all_coordsY) > 0:\n X = np.mean(all_coordsX)\n Y = np.mean(all_coordsY)\n # (CASE 4) - Otherwise, set to 0,0 (best effort...)\n else:\n logger.warning(\n \"Could not find any coordinate for substation {s}. Setting the subnetwork coordinates to (0,0)...\".format(\n s=f_name\n )\n )\n X = 0\n Y = 0\n f.write(\n \"{NetID},0,{X},{Y},{Height},{Length},-1,Geographically Referenced,-1,5,0.251957,1\\n\".format(\n NetID=f_name.split(\"ation_\")[1],\n X=X,\n Y=Y,\n Height=50.00,\n Length=50.00,\n )\n )\n\n # Subnetwork Connections\n #\n # Use subnetwork connections only for substations\n # TODO: Let the user specify what should be subnetworked\n if self.network_have_substations:\n f.write(\"\\n[SUBNETWORK CONNECTIONS]\\n\")\n f.write(\n \"FORMAT_SUBNETWORKCONNECTIONS=SubNetID,NodeID,ConnectorCoordX,ConnectorCoordY\\n\"\n )\n for f_name, section_l in self.section_line_feeder_mapping.items():\n if \"substation\" in f_name:\n # We need to find all the connections between the subnetwork and the rest of the system\n # Use the \"is_substation_connection\" attribute of Node objects\n #\n # TODO: Better way to do this???\n for obj in model.models:\n if (\n isinstance(obj, Node)\n and obj.is_substation_connection == 1\n and obj.substation_name == f_name.split(\"ation_\")[1]\n ):\n # We also need the coordinates of this connection.\n # Use the coordinates of the Node\n if obj.positions is not None and len(obj.positions) > 0:\n X = obj.positions[0].long\n Y = obj.positions[0].lat\n # If we don't have coordinates, then set to (0,0)....\n else:\n X = 0\n Y = 0\n f.write(\n \"{NetID},{NodeID},{X},{Y}\\n\".format(\n NetID=f_name.split(\"ation_\")[1],\n NodeID=obj.name,\n X=X,\n Y=Y,\n )\n )\n\n # Overhead lines\n #\n if len(overhead_string_list) > 0:\n f.write(\"\\n[OVERHEADLINE SETTING]\\n\")\n f.write(\n \"FORMAT_OVERHEADLINESETTING=SectionID,LineCableID,Length,ConnectionStatus\\n\"\n )\n for overhead_string in overhead_string_list:\n f.write(overhead_string + \"\\n\")\n\n # Overhead by phase lines\n #\n if len(overhead_byphase_string_list) > 0:\n f.write(\"\\n[OVERHEAD BYPHASE SETTING]\\n\")\n f.write(\n \"FORMAT_OVERHEADBYPHASESETTING=SectionID,DeviceNumber,CondID_A,CondID_B,CondID_C,CondID_N1,CondID_N2,SpacingID,Length,ConnectionStatus\\n\"\n )\n for overhead_byphase_string in overhead_byphase_string_list:\n f.write(overhead_byphase_string + \"\\n\")\n\n # Underground lines\n #\n if len(underground_string_list) > 0:\n f.write(\"\\n[UNDERGROUNDLINE SETTING]\\n\")\n f.write(\n \"FORMAT_UNDERGROUNDLINESETTING=SectionID,LineCableID,Length,ConnectionStatus,DistanceBetweenConductors,CableConfiguration\\n\"\n )\n for underground_string in underground_string_list:\n f.write(underground_string + \"\\n\")\n\n # Switches\n #\n if len(switch_string_list) > 0:\n f.write(\"\\n[SWITCH SETTING]\\n\")\n f.write(\n \"FORMAT_SWITCHSETTING=SectionID,EqID,Location,ClosedPhase,Locked,ConnectionStatus,DeviceNumber\\n\"\n )\n for switch_string in switch_string_list:\n f.write(switch_string + \"\\n\")\n\n # Fuses\n #\n if len(fuse_string_list) > 0:\n f.write(\"\\n[FUSE SETTING]\\n\")\n f.write(\n \"FORMAT_FUSESETTING=SectionID,EqID,Location,ClosedPhase,Locked,ConnectionStatus,DeviceNumber\\n\"\n )\n for fuse_string in fuse_string_list:\n f.write(fuse_string + \"\\n\")\n\n # Reclosers\n #\n if len(recloser_string_list) > 0:\n f.write(\"\\n[RECLOSER SETTING]\\n\")\n f.write(\n \"FORMAT_RECLOSERSETTING=SectionID,EqID,Location,ClosedPhase,Locked,ConnectionStatus,DeviceNumber\\n\"\n )\n for recloser_string in recloser_string_list:\n f.write(recloser_string + \"\\n\")\n\n # Breakers\n #\n if len(breaker_string_list) > 0:\n f.write(\"\\n[BREAKER SETTING]\\n\")\n f.write(\n \"FORMAT_BREAKERSETTING=SectionID,EqID,Location,ClosedPhase,Locked,ConnectionStatus,DeviceNumber\\n\"\n )\n for breaker_string in breaker_string_list:\n f.write(breaker_string + \"\\n\")\n\n # Capacitors\n #\n if len(capacitor_string_list) > 0:\n f.write(\"\\n[SHUNT CAPACITOR SETTING]\\n\")\n f.write(\n \"FORMAT_SHUNTCAPACITORSETTING=SectionID,Connection,SwitchedKVARA,SwitchedKVARB,SwitchedKVARC,KV,Control,OnValueA,OnValueB,OnValueC,OffValueA,OffValueB,OffValueC,DeviceNumber,ShuntCapacitorID,Location,ConnectionStatus\\n\"\n )\n for capacitor_string in capacitor_string_list:\n f.write(capacitor_string + \"\\n\")\n\n # Transformers\n #\n # 2 WINDINGS\n #\n if len(two_windings_transformer_string_list) > 0:\n f.write(\"\\n[TRANSFORMER SETTING]\\n\")\n f.write(\n \"FORMAT_TRANSFORMERSETTING=SectionID,CoordX,CoordY,Conn,PhaseON,EqID,DeviceNumber,PhaseShiftType,Location,PrimTap,SecondaryTap,ODPrimPh,ConnectionStatus,Tap,SetPoint,ControlType,LowerBandwidth,UpperBandwidth,Maxbuck,Maxboost\\n\"\n )\n for transformer_string in two_windings_transformer_string_list:\n f.write(transformer_string + \"\\n\")\n\n # 3 WINDINGS\n #\n if len(three_windings_transformer_string_list) > 0:\n f.write(\"\\n[THREE WINDING TRANSFORMER SETTING]\\n\")\n f.write(\n \"FORMAT_THREEWINDINGTRANSFORMERSETTING=SectionID,CoordX,CoordY,PrimaryBaseVoltage,SecondaryBaseVoltage,TertiaryBaseVoltage,EqID,DeviceNumber,Location,TertiaryNodeID,PrimaryFixedTapSetting,SecondaryFixedTapSetting,ConnectionStatus,Tap\\n\"\n )\n for transformer_string in three_windings_transformer_string_list:\n f.write(transformer_string + \"\\n\")\n\n # Regulators\n if len(regulator_string_list) > 0:\n # Merge the Regulators\n regulator_string_list_merged = self.merge_regulators(\n regulator_string_list\n )\n f.write(\"\\n[REGULATOR SETTING]\\n\")\n f.write(\n \"FORMAT_REGULATORSETTING=SectionID,CoordX,CoordY,PhaseON,BandWidth,CT,PT,VsetA,VsetB,VsetC,EqID,DeviceNumber,Location,MaxBuck,MaxBoost,SettingOption,RsetA,RsetB,RsetC,XsetA,XsetB,XsetC,TapA,TapB,TapC,ConnectionStatus\\n\"\n )\n for regulator_string in regulator_string_list_merged:\n f.write(regulator_string + \"\\n\")\n\n if len(converter_string_list) > 0:\n f.write(\"\\n[CONVERTER]\\n\")\n f.write(\n \"FORMAT_CONVERTER=DeviceNumber,DeviceType,ConverterRating,ActivePowerRating,ReactivePowerRating,MinimumPowerFactor,PowerFallLimit,PowerRiseLimit,RiseFallUnit\\n\"\n )\n for i in converter_string_list:\n f.write(i)\n f.write(\"\\n\")\n\n if len(converter_control_string_list) > 0:\n f.write(\"\\n[CONVERTER CONTROL SETTING]\\n\")\n f.write(\n \"FORMAT_CONVERTERCONTROLSETTING=DeviceNumber,DeviceType,ControlIndex,TimeTriggerIndex,ControlType,FixedVarInjection,InjectionReference,ConverterControlID,PowerReference,PowerFactor\\n\"\n )\n for i in converter_control_string_list:\n f.write(i)\n f.write(\"\\n\")\n\n if len(pv_settings_string_list) > 0:\n f.write(\"\\n[PHOTOVOLTAIC SETTINGS]\\n\")\n f.write(\n \"FORMAT_PHOTOVOLTAICSETTING=SectionID,Location,DeviceNumber,EquipmentID,NS,NP,AmbientTemperature,Phase,ConstantInsolation,InsolationModelID\\n\"\n )\n for i in pv_settings_string_list:\n f.write(i)\n f.write(\"\\n\")\n\n if len(dg_generation_string_list) > 0:\n f.write(\"\\n[DGGENERATIONMODEL]\\n\")\n f.write(\n \"FORMAT_DGGENERATIONMODEL=DeviceNumber,DeviceType,LoadModelName,ActiveGeneration,PowerFactor\\n\"\n )\n for i in dg_generation_string_list:\n f.write(i)\n f.write(\"\\n\")\n\n if len(self.node_connector_string_list) > 0:\n f.write(\"\\n[NODE CONNECTOR]\\n\")\n f.write(\"FORMAT_NODECONNECTOR=NodeID,CoordX,CoordY,SectionID\\n\")\n for i in self.node_connector_string_list:\n f.write(i)\n f.write(\"\\n\")\n\n if len(bess_settings_string_list) > 0:\n f.write(\"\\n[BESS SETTINGS]\\n\")\n f.write(\n \"FORMAT_BESSSETTING=SectionID,Location,DeviceNumber,EquipmentID,Phase,InitialSOC\\n\"\n )\n for i in bess_settings_string_list:\n f.write(i)\n f.write(\"\\n\")",
"def save_network(network, fpath):\n\twith open(fpath, \"wb\") as f:\n\t\tpickle.dump(network, f)",
"def save_utility_network(self,path_save):\n print(\"Save the neural network to : \"+path_save)\n self.nn.save_on_file(path_save)",
"def _write_network_file(graph, out_name, out_format=None, data=False,weight=False):\n\n if out_format==None:\n out_format=\"edges\"\n os.makedirs(os.path.dirname(out_name), exist_ok=True)\n #print(\"writing graph of format \" + out_format + \" at \" + out_name)\n if out_format == 'edges':\n nx.write_edgelist(graph, \"%s.edges\" % (out_name), data=data)\n elif out_format == 'gefx':\n nx.write_gexf(graph, \"%s.gefx\" % (out_name))\n elif out_format == 'gml':\n nx.write_gml(graph, \"%s.gml\" % (out_name))\n elif out_format == 'pajek':\n nx.write_pajek(graph, \"%s.pajek\" % (out_name))\n elif out_format == 'ncol':\n nx.write_edgelist(graph, \"%s.ncol\" % (out_name), delimiter='\\t',data=weight)\n elif out_format == 'graphML' :\n g = nx.write_graphml(graph, \"%s.graphml\" % (out_name))\n else:\n raise Exception(\"UNKNOWN FORMAT \" + out_format)",
"def save_graph(graph, file_name):\r\n print \"Saving network into \"+file_name\r\n f = open(file_name, 'w')\r\n f.write(str(len(graph))+'\\n')\r\n for citizen in graph:\r\n f.write(str(citizen.id) + ';' + str(citizen.location) + ';' + str(citizen.influence_level) + ';' + \\\r\n str(citizen.proactivity_level) + '\\n')\r\n for op in citizen.opinions.keys():\r\n value = citizen.opinions[op].weight\r\n f.write(str(op)+':'+str(value)+';')\r\n f.write('\\n')\r\n for friend in citizen.friends:\r\n f.write(str(friend.id) + ';')\r\n f.write('\\n')\r\n f.close()",
"def save_graph(self, filename, fileType):\n if fileType == \"GML Format\":\n nx.write_gml(self.graph, filename+\".gml\")\n if fileType == \"Adjacency list\":\n nx.write_adjlist(self.graph, filename+\".adjlist\")\n if fileType == \"YAML\":\n nx.write_yaml(self.graph, filename + \".yaml\")",
"def save(self):\n self.save_network_architecture( network_path=self.network_path )\n self.save_network_parameters(\n file_name='net_parameters', file_path=self.network_path )",
"def dump_net_to_file(net_def, filename):\n\n print(\"DUMP TO FILE:\\n{}\\n\".format(net_def))\n\n with open(filename, \"w\") as fd:\n line = json.dumps(net_def)\n fd.write(\"{}\\n\".format(line))\n\n print(\"Wrote neural net to {}\".format(filename))",
"def export_network(file_name, net) -> None:\r\n file = open(file_name, 'wb')\r\n file.write(pickle.dumps(net.__dict__))\r\n file.close()",
"def to_net(self, filename):\n if len(self.nodes1)>0:\n h = open(filename, \"w\")\n for n1,n2,s in zip(self.nodes1, self.nodes2, self.signs):\n h.write(\"%s -> %s %s\\n\" % (n1, n2, s))\n h.close()",
"def save_nn(self, networkname= 'nn'):\n np.save(f\"{networkname}_data.npy\", self.weights_and_biases)\n print(f\"Data saved to {networkname}_data.npy\")",
"def save_net(net, filepath):\n\twith open(filepath, 'wb+') as fh:\n\t\tdump(obj = net, file = fh, protocol = -1)",
"def but_save_net(self):\n if isinstance(self.nn_obj, dict):\n if platform == \"linux\" or platform == \"linux2\":\n path=tk.filedialog.asksaveasfilename(filetypes = [('LM NN file','.csv')])\n elif platform == \"win32\":\n path=tk.filedialog.asksaveasfilename(filetypes = [('LM NN file','.csv')], defaultextension=\"*.*\")\n else:\n path=tk.filedialog.asksaveasfilename(filetypes = [('LM NN file','.csv')])\n elif isinstance(self.nn_obj, Net_tr):\n if platform == \"linux\" or platform == \"linux2\":\n path=tk.filedialog.asksaveasfilename(filetypes = [(\"Torch NN file\",\".pt\")])\n elif platform == \"win32\":\n path=tk.filedialog.asksaveasfilename(filetypes = [(\"Torch NN file\",\".pt\")], defaultextension=\"*.*\")\n else:\n path=tk.filedialog.asksaveasfilename(filetypes = [(\"Torch NN file\",\".pt\")])\n else:\n tk.messagebox.showerror(\"Error\", \"Crete NN\")\n return\n save_nn(self.nn_obj, path)",
"def save_network(nodes, edges, name, pid):\n d = {'nodes': nodes, 'edges': edges, 'name': name, 'pid': pid}\n\n out_filename = \"{0}-{1}.pickle\".format(\n datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"),\n pid\n )\n with open(out_filename, 'wb') as out_file:\n pickle.dump(d, out_file)",
"def save_state(self, path='/home/lukas/weights/'):\r\n stuff_in_path = os.listdir(path)\r\n counter = 0\r\n for i in stuff_in_path:\r\n if 'parameters' in i:\r\n counter += 1\r\n with open(path + 'info.txt', mode='a') as f:\r\n f.write('counter: %i \\taccuracy: %.8f%% \\tloss: %.8f\\n' % (counter, returnList(self.accuracy)[-1] * 100, returnList(self.loss)[-1]))\r\n\r\n parameters = [ self.batchsize_train,\r\n self.iterator,\r\n self.n_hidden_layers,\r\n self.n_hidden_neurons,\r\n self.n_input_neurons,\r\n self.n_output_neurons,\r\n self.hid_transfer.__name__,\r\n self.out_transfer.__name__]\r\n try:\r\n print '[Network] Saving network status ...'\r\n np.save(path + 'parameters' + str(counter), parameters)\r\n np.save(path + 'weights' + str(counter), self.weights)\r\n np.save(path + 'bias' + str(counter), self.bias)\r\n np.save(path + 'weights_gradient' + str(counter), self.weights_gradient)\r\n np.save(path + 'bias_gradient' + str(counter), self.bias_gradient)\r\n np.save(path + 'loss' + str(counter), self.loss)\r\n np.save(path + 'accuracy' + str(counter), self.accuracy)\r\n np.save(path + 'r_weights' + str(counter), self.r_weights)\r\n np.save(path + 'r_bias' + str(counter), self.r_bias)\r\n print '\\033[92m' + '[Network] Network status succesfully saved' + '\\033[0m'\r\n\r\n except Exception as e:\r\n print '\\033[1m' + '\\033[91m' + '[Network] Could not correctly save network status:' + '\\033[0m'\r\n print e.message",
"def save_network(self, file_name):\n data = {\n \"layers_num\": self.layers_num,\n \"weights\": [w.tolist() for w in self.weights],\n \"biases\": [b.tolist() for b in self.biases],\n \"act_func\": str(self.act_func.__name__),\n \"cost_func\": str(self.cost_func.__name__),\n \"metric\": str(self.metric.__name__)\n }\n\n with open(file_name, 'w') as file:\n json.dump(data, file)",
"def save_network(self, **kwargs):\n raise NotImplementedError",
"def save(net, txt_dict, path):\n dict_m = net.state_dict()\n _ = txt_dict\n torch.save(dict_m,path)",
"def saveGraph(self, filename):\n nx.write_yaml(self.G,filename)",
"def save_network(save_file, training_round, digit):\n with open(save_file, 'wb') as f:\n np.save(f, S.w)\n np.save(f, S.w_min)\n np.save(f, S.w_max)\n np.save(f, S.a_plus)\n np.save(f, S.a_minus)\n np.save(f, S.b_plus)\n np.save(f, S.b_minus)\n np.save(f, Output.v_th)\n np.save(f, training_set.flatten())\n np.save(f, training_round)\n np.save(f, digit)",
"def save_to_file(self, representation, filename) -> bool:\n filename = \"data/\" + filename\n output_matrix = self.get_graph(representation)\n if isinstance(output_matrix, list):\n with open(filename, \"w+\") as f:\n for row in output_matrix:\n f.write(\" \".join(str(item) for item in row))\n f.write(\"\\n\")\n return True\n elif isinstance(output_matrix, np.ndarray):\n with open(filename, \"w+\") as f:\n np.savetxt(f, output_matrix, fmt=\"%i\")\n return True\n else:\n return False",
"def save(self, filename, path=\".\"):\n if not self.trained:\n if self.verbose:\n print(\"Neural Network Model Class - Save Function: No trained model\")\n return -1\n\n if filename is None:\n if self.verbose:\n print(\"Neural Network Model Class - Save Function: No file name\")\n return -1\n\n #trn_params\n self.trn_params.save('%s_trn_params.pickle'%(filename),path=path)\n #model\n # serialize model to JSON\n model_json = self.model.to_json()\n with open(\"%s/%s_model.json\"%(path,filename), \"w\") as json_file:\n json_file.write(model_json)\n # serialize weights to HDF5\n self.model.save_weights(\"%s/%s_model.h5\"%(path,filename))\n\n #trn_desc\n pickle.dump([self.trn_desc], open(\"%s/%s_trn_desc.pickle\"%(path,filename), \"wb\"))",
"def save_depfile(depdata,outname,is31=True): \n\n if outname==None:\n print('save_depfile requires a filename to save.')\n return\n try:\n fp=open(outname,'w')\n except IOError:\n print('save_depfile: invalid filename.')\n return data\n if is31:\n fp.write('Node Number = %d\\n' % len(depdata['node_num']) )\n for i in range(0,len(depdata['node_num'])):\n fp.write('%f %f %f\\n'% (depdata['x'][i],depdata['y'][i],depdata['h'][i]))\n fp.close()\n \n return",
"def save_net(self, file_path):\n \tserialized_weights = [w.tolist() for w \\\n in self.stacked_net.weights]\n \tserialized_biases = [b.tolist() for b \\\n in self.stacked_net.biases]\n \tparams = {'weights':serialized_weights,\\\n 'biases':serialized_biases}\n \twith open(file_path,'w') as f:\n \t\tf.write(json.dumps(params))",
"def save_model(self):\n self.pred_net.save((self.save_path / \"iqn_pred_net\").absolute().as_posix())\n self.target_net.save((self.save_path / \"iqn_target_net\").absolute().as_posix())"
] | [
"0.7442755",
"0.6993565",
"0.69844145",
"0.69380265",
"0.69357723",
"0.6913174",
"0.6849668",
"0.6798643",
"0.66806936",
"0.65847754",
"0.6535876",
"0.6521386",
"0.64706856",
"0.6424189",
"0.6404124",
"0.6398808",
"0.63774717",
"0.6374491",
"0.6335782",
"0.6327243",
"0.62925816",
"0.6291275",
"0.62752867",
"0.6273573",
"0.6271367",
"0.6259008",
"0.62510663",
"0.6229186",
"0.62275475",
"0.62126136"
] | 0.7138623 | 1 |
Compute and store the QC metrics Runs the QC on the session and stores a map of the metrics for each datapoint for each test, and a map of which datapoints passed for each test | def compute(self, **kwargs):
if self.extractor is None:
kwargs['download_data'] = kwargs.pop('download_data', self.download_data)
self.load_data(**kwargs)
self.log.info(f"Session {self.session_path}: Running QC on behavior data...")
self.metrics, self.passed = get_bpodqc_metrics_frame(
self.extractor.data,
wheel_gain=self.extractor.settings['STIM_GAIN'], # The wheel gain
photodiode=self.extractor.frame_ttls,
audio=self.extractor.audio_ttls,
re_encoding=self.extractor.wheel_encoding or 'X1',
min_qt=self.extractor.settings.get('QUIESCENT_PERIOD') or 0.2
)
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute(self, download_data=None):\n if self.extractor is None:\n # If download_data is None, decide based on whether eid or session path was provided\n ensure_data = self.download_data if download_data is None else download_data\n self.load_data(download_data=ensure_data)\n self.log.info(f\"Session {self.session_path}: Running QC on habituation data...\")\n\n # Initialize checks\n prefix = '_task_'\n data = self.extractor.data\n metrics = {}\n passed = {}\n\n # Check all reward volumes == 3.0ul\n check = prefix + 'reward_volumes'\n metrics[check] = data['rewardVolume']\n passed[check] = metrics[check] == 3.0\n\n # Check session durations are increasing in steps >= 12 minutes\n check = prefix + 'habituation_time'\n if not self.one or not self.session_path:\n self.log.warning('unable to determine session trials without ONE')\n metrics[check] = passed[check] = None\n else:\n subject, session_date = self.session_path.parts[-3:-1]\n # compute from the date specified\n date_minus_week = (\n datetime.strptime(session_date, '%Y-%m-%d') - timedelta(days=7)\n ).strftime('%Y-%m-%d')\n sessions = self.one.alyx.rest('sessions', 'list', subject=subject,\n date_range=[date_minus_week, session_date],\n task_protocol='habituation')\n # Remove the current session if already registered\n if sessions and sessions[0]['start_time'].startswith(session_date):\n sessions = sessions[1:]\n metric = ([0, data['intervals'][-1, 1] - data['intervals'][0, 0]] +\n [(datetime.fromisoformat(x['end_time']) -\n datetime.fromisoformat(x['start_time'])).total_seconds() / 60\n for x in [self.one.alyx.get(s['url']) for s in sessions]])\n\n # The duration from raw trial data\n # duration = map(float, self.extractor.raw_data[-1]['elapsed_time'].split(':'))\n # duration = timedelta(**dict(zip(('hours', 'minutes', 'seconds'),\n # duration))).total_seconds() / 60\n metrics[check] = np.array(metric)\n passed[check] = np.diff(metric) >= 12\n\n # Check event orders: trial_start < stim on < stim center < feedback < stim off\n check = prefix + 'trial_event_sequence'\n nans = (\n np.isnan(data[\"intervals\"][:, 0]) | # noqa\n np.isnan(data[\"stimOn_times\"]) | # noqa\n np.isnan(data[\"stimCenter_times\"]) |\n np.isnan(data[\"valveOpen_times\"]) | # noqa\n np.isnan(data[\"stimOff_times\"])\n )\n a = np.less(data[\"intervals\"][:, 0], data[\"stimOn_times\"], where=~nans)\n b = np.less(data[\"stimOn_times\"], data[\"stimCenter_times\"], where=~nans)\n c = np.less(data[\"stimCenter_times\"], data[\"valveOpen_times\"], where=~nans)\n d = np.less(data[\"valveOpen_times\"], data[\"stimOff_times\"], where=~nans)\n\n metrics[check] = a & b & c & d & ~nans\n passed[check] = metrics[check].astype(float)\n\n # Check that the time difference between the visual stimulus center-command being\n # triggered and the stimulus effectively appearing in the center is smaller than 150 ms.\n check = prefix + 'stimCenter_delays'\n metric = np.nan_to_num(data[\"stimCenter_times\"] - data[\"stimCenterTrigger_times\"],\n nan=np.inf)\n passed[check] = (metric <= 0.15) & (metric > 0)\n metrics[check] = metric\n\n # Phase check\n check = prefix + 'phase'\n metric = data['phase']\n passed[check] = (metric <= 2 * np.pi) & (metric >= 0)\n metrics[check] = metric\n\n check = prefix + 'phase_distribution'\n metric, _ = np.histogram(data['phase'])\n _, p = chisquare(metric)\n passed[check] = p < 0.05\n metrics[check] = metric\n\n # Checks common to training QC\n checks = [check_goCue_delays, check_stimOn_goCue_delays,\n check_stimOn_delays, check_stimOff_delays]\n for fcn in checks:\n check = prefix + fcn.__name__[6:]\n metrics[check], passed[check] = fcn(data)\n\n self.metrics, self.passed = (metrics, passed)",
"def qc_metrics(self, files_in, qc_files):\n self.cmd(\"{samtools} index {bam_in}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in=files_in[0],\n ),\n shell=True)\n self.cmd(\"{samtools} idxstats {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[0],\n ),\n shell=True,\n log_output=True)\n self.cmd(\"{samtools} flagstat {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[1],\n ),\n shell=True,\n log_output=True)\n \n self.checkpoint(qc_files[0])\n self.checkpoint(qc_files[1])\n self.checkpoint(qc_files[2])",
"def compute_session_status(self):\n if self.passed is None:\n raise AttributeError('passed is None; compute QC first')\n # Get mean passed of each check, or None if passed is None or all NaN\n results = {k: None if v is None or np.isnan(v).all() else np.nanmean(v)\n for k, v in self.passed.items()}\n session_outcome, outcomes = self.compute_session_status_from_dict(results)\n return session_outcome, results, outcomes",
"def get_bpodqc_metrics_frame(data, **kwargs):\n def is_metric(x):\n return isfunction(x) and x.__name__.startswith('check_')\n # Find all methods that begin with 'check_'\n checks = getmembers(sys.modules[__name__], is_metric)\n prefix = '_task_' # Extended QC fields will start with this\n # Method 'check_foobar' stored with key '_task_foobar' in metrics map\n qc_metrics_map = {prefix + k[6:]: fn(data, **kwargs) for k, fn in checks}\n\n # Split metrics and passed frames\n metrics = {}\n passed = {}\n for k in qc_metrics_map:\n metrics[k], passed[k] = qc_metrics_map[k]\n\n # Add a check for trial level pass: did a given trial pass all checks?\n n_trials = data['intervals'].shape[0]\n # Trial-level checks return an array the length that equals the number of trials\n trial_level_passed = [m for m in passed.values()\n if isinstance(m, Sized) and len(m) == n_trials]\n name = prefix + 'passed_trial_checks'\n metrics[name] = reduce(np.logical_and, trial_level_passed or (None, None))\n passed[name] = metrics[name].astype(float) if trial_level_passed else None\n\n return metrics, passed",
"def metric_tests(self) -> Dict[str, FAIRResultEvaluationCriterium]:\n return self._metric_tests",
"def run_tests(self):\n\n self.test_report = []\n\n #dict of unsorted lists\n dict_of_un_lists = self.dict_un_lists_intersection_test(self.data_dict)\n self.test_report.append(dict_of_un_lists)\n\n #dict of sets\n dict_of_sets = self.build_dict_of_sets(self.data_dict)\n self.test_report.append(self.dict_sets_intersection_test(dict_of_sets))\n\n #pandas - experimental and probably not the way to use pandas\n # dict_of_pandas = self.build_dict_of_panda_series(self.data_dict)\n # self.test_report.append(self.dicts_any_intersection_node_test(dict_of_pandas))\n\n # print results\n\n if self.verbose:\n self.print_tests_results()",
"def test_evaluate(self):\n # test normalized by 'bbox_size'\n jhmdb_pck_metric = JhmdbPCKAccuracy(thr=0.5, norm_item='bbox')\n jhmdb_pck_metric.process(self.data_batch, self.data_samples)\n pck_results = jhmdb_pck_metric.evaluate(self.batch_size)\n target = {\n 'Head PCK': 1.0,\n 'Sho PCK': 1.0,\n 'Elb PCK': 1.0,\n 'Wri PCK': 1.0,\n 'Hip PCK': 1.0,\n 'Knee PCK': 1.0,\n 'Ank PCK': 1.0,\n 'PCK': 1.0,\n }\n self.assertDictEqual(pck_results, target)\n\n # test normalized by 'torso_size'\n jhmdb_tpck_metric = JhmdbPCKAccuracy(thr=0.2, norm_item='torso')\n jhmdb_tpck_metric.process(self.data_batch, self.data_samples)\n tpck_results = jhmdb_tpck_metric.evaluate(self.batch_size)\n target = {\n 'Head tPCK': 1.0,\n 'Sho tPCK': 1.0,\n 'Elb tPCK': 1.0,\n 'Wri tPCK': 1.0,\n 'Hip tPCK': 1.0,\n 'Knee tPCK': 1.0,\n 'Ank tPCK': 1.0,\n 'tPCK': 1.0,\n }\n self.assertDictEqual(tpck_results, target)",
"def qc_metrics(\n data: AnnData,\n mito_prefix: str = \"MT-\",\n min_genes: int = 500,\n max_genes: int = 6000,\n min_umis: int = 100,\n max_umis: int = 600000,\n percent_mito: float = 10.0,\n percent_cells: float = 0.05,\n) -> None:\n\n data.obs[\"passed_qc\"] = False\n\n data.obs[\"n_genes\"] = data.X.getnnz(axis=1)\n data.obs[\"n_counts\"] = data.X.sum(axis=1).A1\n\n mito_prefixes = mito_prefix.split(\",\")\n\n def startswith(name):\n for prefix in mito_prefixes:\n if name.startswith(prefix):\n return True\n return False\n\n mito_genes = data.var_names.map(startswith).values.nonzero()[0]\n data.obs[\"percent_mito\"] = (data.X[:, mito_genes].sum(axis=1).A1 / np.maximum(\n data.obs[\"n_counts\"].values, 1.0\n )) * 100\n\n # Assign passed_qc\n filters = [\n data.obs[\"n_genes\"] >= min_genes,\n data.obs[\"n_genes\"] < max_genes,\n data.obs[\"n_counts\"] >= min_umis,\n data.obs[\"n_counts\"] < max_umis,\n data.obs[\"percent_mito\"] < percent_mito,\n ]\n\n data.obs.loc[np.logical_and.reduce(filters), \"passed_qc\"] = True\n\n var = data.var\n data = data[\n data.obs[\"passed_qc\"]\n ] # compute gene stats in space of filtered cells only\n\n var[\"n_cells\"] = data.X.getnnz(axis=0)\n var[\"percent_cells\"] = (var[\"n_cells\"] / data.shape[0]) * 100\n var[\"robust\"] = var[\"percent_cells\"] >= percent_cells\n var[\"highly_variable_features\"] = var[\n \"robust\"\n ] # default all robust genes are \"highly\" variable",
"def calc_data(self):\n\n circ_counts = {}\n for trialidx in range(self._ntrials):\n for _, depth in enumerate(self._depths):\n circ_name = 'qv_depth_%d_trial_%d' % (depth, trialidx)\n\n # get the counts form ALL executed circuits\n count_list = []\n for result in self._result_list:\n try:\n count_list.append(result.get_counts(circ_name))\n except (QiskitError, KeyError):\n pass\n\n circ_counts[circ_name] = \\\n build_counts_dict_from_list(count_list)\n\n self._circ_shots[circ_name] = \\\n sum(circ_counts[circ_name].values())\n\n # calculate the heavy output probability\n self._heavy_output_counts[circ_name] = \\\n self._subset_probability(\n self._heavy_outputs[circ_name],\n circ_counts[circ_name])",
"def compute_metrics(self):\n pass",
"def collect_data(self):\n exp_conf: ec.ExperimentConfiguration\n # Disabled multiprocess run because of huge memory usage\n processes_number = 1 # self._campaign_configuration['General']['j']\n if processes_number == 1:\n self._logger.info(\"-->Evaluate experiments (sequentially)\")\n for exp_conf in tqdm.tqdm(self._exp_confs, dynamic_ncols=True):\n exp_conf.evaluate()\n if bool(self._campaign_configuration['General']['generate_plots']):\n exp_conf.generate_plots()\n self._logger.info(\"<--\")\n else:\n self._logger.info(\"-->Evaluate experiments (in parallel)\")\n pool = multiprocessing.Pool(processes_number)\n self._exp_confs = list(tqdm.tqdm(pool.imap(evaluate_wrapper, self._exp_confs), total=len(self._exp_confs)))\n if bool(self._campaign_configuration['General']['generate_plots']):\n pool = multiprocessing.Pool(processes_number)\n self._exp_confs = list(tqdm.tqdm(pool.imap(plot_wrapper, self._exp_confs), total=len(self._exp_confs)))\n self._logger.info(\"<--\")\n\n self.raw_results = {}\n for exp_conf in self._exp_confs:\n self.raw_results[tuple(exp_conf.get_signature())] = exp_conf.mapes",
"def get_test_cases_coverage(session_id):\n tc_stats={}\n tc_stats_list=[]\n total_executed=0\n sql='SELECT DISTINCT(test_id) FROM stats WHERE session_id=:sid AND test_id!=\"null\"'\n params={\"sid\":session_id}\n conn=sqlite3.connect(CONNECTION_STRING)\n c=conn.cursor()\n c.execute(sql,params)\n tests=c.fetchall()\n conn.close()\n if len(tests)>0:\n for t in tests:\n total_executed=0\n sql=\"SELECT DISTINCT(file_id) FROM stats WHERE session_id=:sid AND test_id=:tid\"\n params={\"sid\":session_id,\"tid\":t[0]}\n conn=sqlite3.connect(CONNECTION_STRING)\n c=conn.cursor()\n c.execute(sql,params)\n files=c.fetchall()\n conn.close()\n for f in files:\n line_count=get_executable_lines_count_for_file(f[0])\n # get executions\n sql=\"SELECT COUNT(DISTINCT line_guid) FROM stats WHERE session_id= :sid AND file_id= :fid AND test_id=:tid\"\n params={\"sid\":session_id,\"tid\":t[0],\"fid\":f[0]}\n conn=sqlite3.connect(CONNECTION_STRING)\n c=conn.cursor()\n c.execute(sql,params)\n executed=c.fetchone()\n conn.close()\n total_executed+=executed[0]\n # save test case and it's executions\n tc_stats={}\n tc_stats[\"test_id\"]=t[0]\n tc_stats[\"total_executed\"]=total_executed\n tc_stats[\"total_executed\"]\n \n tc_stats_list.append(tc_stats)\n return tc_stats_list",
"def mgcEval(self):\n import numpy as np\n def report_to_df(report):\n\n \"\"\"\n function to convert classification report to dataframe (for visualisation plot)\n \"\"\"\n\n report = re.sub(r\" +\", \" \", report).replace(\"avg / total\", \"avg/total\").replace(\"\\n \", \"\\n\")\n # update this due to sklearn classification report output change\n report = re.sub(r\" +\", \" \", report).replace(\"micro avg\", \"micro_avg\").replace(\"macro avg\", \"macro_avg\").replace(\"weighted avg\", \"weighted_avg\").replace(\"\\n \", \"\\n\")\n report_df = pd.read_csv(StringIO(\"Classes\" + report), sep=' ', index_col=0) \n return(report_df)\n \n #txt report to df\n class_rpttop1 = classification_report(self.y_true, self.y_pred)\n df_report = report_to_df(class_rpttop1)\n\n df_report = df_report.iloc[:self.nb_classes, :].copy()\n df_report.index = df_report.index.astype(int)\n \n\n # classifier prediction metrics\n def classMetrics(averagex):\n precision, recall, fscore, support = score(self.y_true, self.y_pred, average=averagex)\n \n return(\n print(''), \n print('-------------{0:}--------------------'.format(averagex)), \n print('precision: {0:.4f}'.format(precision)),\n print('recall: {0:.4f}'.format(recall)),\n print('fscore: {0:.4f}'.format(fscore)),\n print(''),\n print('kappa score: {0:.4f}'.format(cohen_kappa_score(self.y_true, self.y_pred))),\n print('accuracy score: {0:.4f}'.format(accuracy_score(self.y_true, self.y_pred))))\n \n def predSamp():\n\n correct = np.nonzero(self.y_pred==self.y_true)[0]\n incorrect = np.nonzero(self.y_pred!=self.y_true)[0]\n\n # quick check of the number of correct prediction from validation set\n print(\"\")\n print(\"correct/total = {0: .4f}\".format(len(correct)/(len(correct)+len(incorrect))))\n print(\"total correct sample = {0: .0f}\".format(len(correct)))\n print('------------------------------------------------------------------')\n \n def classReport():\n print('----------------------------- Classfication Report -------------------------------')\n print(classification_report(pd.Series(self.y_true).map(self.dict_label), pd.Series(self.y_pred).map(self.dict_label)))\n \n self.class_rpt = pd.concat([pd.DataFrame(pd.Series(df_report.index.tolist()).map(self.dict_label), columns = ['label']), df_report], axis = 1)\n \n self.classMetricsMac = classMetrics(\"macro\")\n self.classMetricsMic = classMetrics(\"micro\")\n self.predSample = predSamp()\n self.class_rptTop1 = classReport()\n \n return self",
"def calculate_dataset_metrics(self):\n pass",
"def run_tests():\n with open(FILENAME) as file:\n # Loads testing parameters from the yaml file.\n tests = yaml.safe_load(file)\n\n # create a dataframe to keep the results\n test_dict = tests['Tests']\n results = pd.DataFrame(test_dict)\n results['Last Average Score'] = \"\"\n results['No of Q-Learning episodes'] = \"\"\n\n # run experiments:\n for i, test in enumerate(test_dict):\n grid = Rooms(test[\"env_size\"], testing=True)\n learning = QLearning(grid, test[\"gamma\"], test[\"alpha\"], test[\"agent_start_pos\"])\n e_greedy = Policy(\"e-greedy\", test[\"epsilon\"], test[\"decay\"])\n greedy = Policy(policy_type=\"greedy\")\n experiment = Experiments(grid, learning, greedy, test[\"iters\"],\n test[\"agent_start_pos\"], test[\"test_no\"])\n\n for session in range(test[\"iters\"]):\n learning.run_multiple_episodes(test[\"batch_episodes\"], e_greedy)\n mean_reward = experiment.run_experiments(test[\"exp_per_batch\"])\n\n results.loc[i,'Last Average Score'] = mean_reward\n results.loc[i,'No of Q-Learning episodes'] = (session + 1) * test[\"batch_episodes\"]\n\n # save results to csv file\n filename = 'results/' + 'test_table.csv'\n results.to_csv(filename)\n\n # plot & save graphs\n experiment.generate_results(test[\"test_no\"], test)\n\n return results",
"def test_evaluate(self):\n # test normalized by 'bbox'\n pck_metric = PCKAccuracy(thr=0.5, norm_item='bbox')\n pck_metric.process(self.data_batch, self.data_samples)\n pck = pck_metric.evaluate(self.batch_size)\n target = {'PCK': 1.0}\n self.assertDictEqual(pck, target)\n\n # test normalized by 'head_size'\n pckh_metric = PCKAccuracy(thr=0.3, norm_item='head')\n pckh_metric.process(self.data_batch, self.data_samples)\n pckh = pckh_metric.evaluate(self.batch_size)\n target = {'PCKh': 1.0}\n self.assertDictEqual(pckh, target)\n\n # test normalized by 'torso_size'\n tpck_metric = PCKAccuracy(thr=0.05, norm_item=['bbox', 'torso'])\n tpck_metric.process(self.data_batch, self.data_samples)\n tpck = tpck_metric.evaluate(self.batch_size)\n self.assertIsInstance(tpck, dict)\n target = {\n 'PCK': 1.0,\n 'tPCK': 1.0,\n }\n self.assertDictEqual(tpck, target)",
"def initialize_metrics():\n metrics = {\n 'cd_losses': [],\n 'cd_corrects': [],\n 'cd_precisions': [],\n 'cd_recalls': [],\n 'cd_f1scores': [],\n }\n\n return metrics",
"def run_tests(self, validate=False):\n fitness = 0\n\n for test_set in [self.debugger.PASS, self.debugger.FAIL]:\n passed = self.run_test_set(test_set, validate=validate)\n ratio = passed / len(self.debugger.collectors[test_set])\n fitness += self.weight(test_set) * ratio\n\n return fitness",
"def run_test(self):\n self.output_analytics = self.run_inference()\n self.output_df = pd.DataFrame(self.output_analytics)",
"def compute_metrics(self, results: list) -> dict:",
"def evaluate_benchmarks(self):\n\n # iterate over replicates\n results = {}\n for replicate_id, replicate in self.replicates:\n\n # evaluate benchmark for current replicate\n bmark = SimulationBenchmark(replicate.copy(),\n graph=self.graphs[replicate_id],\n **self.params)\n\n # store results\n results[replicate_id] = dict(\n\n labels_MAE=bmark.scores['labels'].MAE,\n level_only_MAE=bmark.scores['level_only'].MAE,\n spatial_only_MAE=bmark.scores['spatial_only'].MAE,\n community_MAE=bmark.scores['labels_comm'].MAE,\n\n labels_PCT=bmark.scores['labels'].percent_correct,\n level_only_PCT=bmark.scores['level_only'].percent_correct,\n spatial_only_PCT=bmark.scores['spatial_only'].percent_correct,\n community_PCT=bmark.scores['labels_comm'].percent_correct)\n\n # compile dataframe\n results = pd.DataFrame.from_dict(results, orient='index')\n results.index.set_names(self.multiindex, inplace=True)\n\n return results",
"def _calculate_metrics(self):\n metrics = {}\n precision, recall = self.calc_precision_recall()\n metrics[\"precision\"] = precision\n metrics[\"recall\"] = recall\n metrics[\"entropy\"] = self.calc_entropy()\n metrics[\"component_entropy\"] = self.calc_component_entropy()\n metrics[\"num_comps\"] = len(self.get_components())\n metrics[\"num_diagnoses\"] = len(self.diagnoses)\n metrics[\"distinct_diagnoses_scores\"] = len(Counter(list(map(lambda x: x.probability, self.diagnoses))))\n metrics[\"num_tests\"] = len(self.get_tests())\n metrics[\"num_distinct_traces\"] = len(self.get_distinct_traces())\n metrics[\"num_failed_tests\"] = len(self._get_tests_by_error(1))\n metrics[\"num_passed_tests\"] = len(self._get_tests_by_error(0))\n passed_comps = set(self._get_components_by_error(0))\n failed_comps = set(self.get_components_in_failed_tests())\n metrics[\"num_failed_comps\"] = len(failed_comps)\n metrics[\"only_failed_comps\"] = len(failed_comps - passed_comps)\n metrics[\"only_passed_comps\"] = len(passed_comps - failed_comps)\n metrics[\"num_bugs\"] = len(self.get_bugs())\n metrics[\"wasted\"] = self.calc_wasted_components()\n metrics[\"top_k\"] = self.calc_top_k()\n metrics[\"num_comps_in_diagnoses\"] = len(self._get_comps_in_diagnoses())\n metrics[\"bugs_cover_ratio\"] = self._get_bugs_cover_ratio()\n metrics[\"average_trace_size\"] = self._get_average_trace_size()\n metrics[\"average_component_activity\"] = self._get_average_component_activity()\n metrics[\"average_diagnosis_size\"] = self._get_average_diagnosis_size()\n metrics[\"bugs_scores_average\"], metrics[\"bugs_scores_std\"], metrics[\"bugs_scores_entropy\"] = self._get_bugs_scores()\n metrics[\"non_bugs_scores_average\"], metrics[\"non_bugs_scores_std\"], metrics[\"non_bugs_scores_entropy\"] = self._get_non_bugs_scores()\n metrics.update(self.cardinality())\n # metrics[\"ochiai\"] = self.calc_ochiai_values()\n return metrics",
"def run_tests():\n with open(FILENAME) as file:\n\n # Loads the test hyper-parameters as dictionaries.\n tests = yaml.safe_load(file)\n \n # create a dataframe to keep the results\n test_dict = tests['Tests']\n results = pd.DataFrame(test_dict)\n results[\"Episode\"] = \"\"\n results['Max average score'] = \"\"\n\n for i, test in enumerate(tests['Tests']):\n\n env = gym.make(test['env'])\n env.reset()\n\n actor_critic = ActorCritic(env, test['episodes'], test['max_score'], \n test['hidden_size'], test['gamma'], test['save'])\n\n ## run training \n best_score, episode, rew_hist = actor_critic.train()\n\n results.loc[i,'Episode'] = episode\n results.loc[i,'Max average score'] = best_score\n\n plot_graphs(test, rew_hist)\n\n # save results to csv file\n filename = 'results/' + 'test_table.csv'\n results.to_csv(filename)\n\n return results",
"def evaluate(self):\n results = dict()\n for metric in self.metrics:\n print('Evaluating clustering with metric %s' % metric)\n if metric in LABEL_METRICS.keys():\n results[metric] = LABEL_METRICS[metric](self.X, self.model.labels_)\n results['adjusted_rand_score'] = SCORE_METRICS['adjusted_rand_score'](self.Y[:, 0], self.model.labels_)\n self.results = results\n return results",
"def evaluate(self, test):\r\n self.logger.info(\"Testing model over test set\")\r\n metrics = self.run_evaluate(test)\r\n msg = \" - \".join([\"{} {:04.2f}\".format(k, v)\r\n for k, v in metrics.items()])\r\n self.logger.info(msg)\r\n return metrics",
"def make_metrics(self):\n num_batches = self.data_loader.number_of_batches()\n dose_score_vec = np.zeros(num_batches)\n\n # Only make calculations if data_loader is not empty\n if not self.data_loader.file_paths_list:\n print('No patient information was given to calculate metrics')\n else:\n # Change batch size to 1\n self.data_loader.batch_size = 1 # Loads data related to ground truth patient information\n if self.dose_loader is not None:\n self.dose_loader.batch_size = 1 # Loads data related to ground truth patient information\n\n for idx in tqdm.tqdm(range(num_batches)):\n # Get roi masks for patient\n self.get_constant_patient_features(idx)\n # Get dose tensors for reference dose and evaluate criteria\n reference_dose = self.get_patient_dose_tensor(self.data_loader)\n if reference_dose is not None:\n self.reference_dose_metric_df = self.calculate_metrics(self.reference_dose_metric_df, reference_dose)\n # If a dose loader was provided, calculate the score\n if self.dose_loader is not None:\n new_dose = self.get_patient_dose_tensor(self.dose_loader)\n # Make metric data frames\n self.new_dose_metric_df = self.calculate_metrics(self.new_dose_metric_df, new_dose)\n # Evaluate mean absolute error of 3D dose\n dose_score_vec[idx] = np.sum(np.abs(reference_dose - new_dose)) / np.sum(self.possible_dose_mask)\n # Save metrics at the patient level (this is a template for how DVH stream participants could save\n # their files\n # self.dose_metric_df.loc[self.patient_list[0]].to_csv('{}.csv'.format(self.patient_list[0]))\n\n if self.dose_loader is not None:\n dvh_score = np.nanmean(np.abs(self.reference_dose_metric_df - self.new_dose_metric_df).values)\n dose_score = dose_score_vec.mean()\n return dvh_score, dose_score\n else:\n print('No new dose provided. Metrics were only calculated for the provided dose.')",
"def run_faqc(self, **kwargs):\n if self.qc is True:\n build([FaQC.SummarizeQC(fastq_dic=self.fastq_dic,\n num_cpus=self.num_cpus,\n workdir=self.workdir)],\n local_scheduler=self.local_scheduler,\n workers=1)\n qc_dic = {}\n for samp, path in self.fastq_dic.items():\n trim_dir = os.path.join(self.workdir, \"processes\", \"qc\", samp)\n qc_dic[samp] = trim_dir + \"/\" + samp + \".1.trimmed.fastq\" + \":\" + \\\n trim_dir + \"/\" + samp + \".2.trimmed.fastq\" \n return qc_dic\n\n else:\n return self.fastq_dic",
"def calculate_metrics(self):\n self.data_stats = self.sqlContext.read.format(\"org.apache.spark.sql.cassandra\").options(table=self.cassandra_trip_table, keyspace=self.cassandra_keyspace).load()\n self.data_stats = self.data_stats.groupBy(['time_block','day','month','borough_name']).agg(func.avg('num_trips').alias('mean'))",
"def evaluate_data():\n try:\n # General system related info\n ram = psutil.virtual_memory()\n total_ram = round((ram.total / 1024 / 1024),2)\n free_ram = round((ram.available / 1024 / 1024),2)\n used_ram = round((ram.used / 1024 / 1024),2)\n cpu_total = psutil.cpu_count(logical=True)\n cpu_loadavg = round([x / cpu_total * 100 for x in psutil.getloadavg()][0],2)\n acs_8080 = sp.getoutput(\"netstat -an|grep -c 8080\")\n acs_8181 = sp.getoutput(\"netstat -an|grep -c 8181\")\n acs_8443 = sp.getoutput(\"netstat -an|grep -c 8443\")\n mysql = sp.getoutput(\"netstat -an|grep -c 3306\")\n oracle = sp.getoutput(\"netstat -an|grep -c 1521\")\n logging.info('General system info obtained')\n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)\n # Process specific details\n try:\n iis_pid = SystemInformation.get_pid(\"w3wp.exe\")\n iis_ram = SystemInformation.get_ram_usage(iis_pid)\n iis_cpu = SystemInformation.get_cpu_usage(iis_pid)\n java_pid = SystemInformation.get_pid(\"java.exe\")\n java_ram = SystemInformation.get_ram_usage(java_pid)\n java_cpu = SystemInformation.get_cpu_usage(java_pid)\n mysqld_pid = SystemInformation.get_pid(\"mysqld.exe\")\n mysqld_ram = SystemInformation.get_ram_usage(mysqld_pid) \n mysqld_cpu = SystemInformation.get_cpu_usage(mysqld_pid)\n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)\n\n try:\n dictionary = {}\n now = datetime.datetime.now()\n timestampt = now.strftime(\"%Y-%m-%d-%H:%M:%S\")\n fieldnames = ['timestampt','total_ram','free_ram','used_ram','cpu_total','cpu_loadavg','acs_8080','acs_8181','acs_8443','mysql','oracle','iis_ram','iis_cpu','java_ram','java_cpu','mysqld_ram','mysqld_cpu']\n for var in fieldnames:\n dictionary[var] = eval(var)\n \n logging.info('Data for report generated')\n return dictionary\n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)",
"def _cv_results(self, metrics):\n\n param_keys = self.param_combs[0].keys()\n param_dict = {k: [comb[k] for comb in self.param_combs]\n for k in param_keys}\n df_metric_result = {k: pd.DataFrame({'Metric mean': self.mean[k],\n 'Std. dev.': self.std[k],\n **param_dict})\n for k in metrics.keys()}\n self.cv_results_ = df_metric_result"
] | [
"0.6188606",
"0.6146431",
"0.6113068",
"0.60673046",
"0.59287566",
"0.5926206",
"0.5902293",
"0.5877014",
"0.5784998",
"0.573546",
"0.5707369",
"0.5694776",
"0.56773263",
"0.5656921",
"0.5625058",
"0.5584204",
"0.55599093",
"0.5556537",
"0.55550545",
"0.55543596",
"0.5553796",
"0.5546866",
"0.5515304",
"0.5514842",
"0.54901385",
"0.547845",
"0.5477112",
"0.54697996",
"0.54434395",
"0.5441672"
] | 0.61870587 | 1 |
Given a dictionary of results, computes the overall session QC for each key and aggregates in a single value | def compute_session_status_from_dict(results):
indices = np.zeros(len(results), dtype=int)
for i, k in enumerate(results):
if k in TaskQC.criteria.keys():
indices[i] = TaskQC._thresholding(results[k], thresholds=TaskQC.criteria[k])
else:
indices[i] = TaskQC._thresholding(results[k], thresholds=TaskQC.criteria['default'])
def key_map(x):
return 'NOT_SET' if x < 0 else list(TaskQC.criteria['default'].keys())[x]
# Criteria map is in order of severity so the max index is our overall QC outcome
session_outcome = key_map(max(indices))
outcomes = dict(zip(results.keys(), map(key_map, indices)))
return session_outcome, outcomes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def aggregate_results(results):\n\n for (config,con,dec),folds in results.iteritems():\n m = MODEL_PATTERN.match(config)\n if m:\n mode = m.groupdict()['mode'] # mle, rl, mrt, ...\n model = m.groupdict()['model'] # haem, hacm, hard, ...\n align = m.groupdict()['align'] # crp, cls ...\n else:\n mode, model, align = '', '', ''\n # mean accuracies across seeds for each fold\n foldaccuracies = []\n # we count number of models over folds and seeds\n num_individual_models = 0\n\n for foldname,fold in folds.items():\n if 'Q' in options.mode:\n seedaccurracies = fold.values()[:1] if fold.values() else [] # pick one\n# SUPPORT_STATISTICS[(config,con,dec,model,align,mode,foldname)] += 1\n else:\n seedaccurracies = []\n for seed_acc in fold.values():\n seedaccurracies.append(seed_acc)\n SUPPORT_STATISTICS[(config,con,dec,model,align,mode,foldname)] += 1\n # aggregate on fold level\n fold['__MEAN__'] = float(np.mean(seedaccurracies))\n fold['__SD__'] = float(np.std(seedaccurracies))\n l = len(seedaccurracies)\n num_individual_models += l\n SUPPORT_STATISTICS[(config,con,dec,model,align,mode,'__MEAN__')] += l\n SUPPORT_STATISTICS[(config,con,dec,model,align,mode,'__SD__')] += l\n\n # statistics over seeds for this fold\n fold['__STATS__'] = fold['__MEAN__'], fold['__SD__'], l\n foldaccuracies.append(fold['__MEAN__'])\n # aggregate on (config, condition, decoding) level\n folds['__MEAN__'] = float(np.mean(foldaccuracies))\n folds['__SD__'] = float(np.std(foldaccuracies))\n # statistics over folds for this (config, condition, decoding)\n folds['__STATS__'] = folds['__MEAN__'], folds['__SD__'], num_individual_models",
"def compute_metrics(self, results: list) -> dict:",
"def score_aggregate(results):\n scores = []\n truth_count = detected_count = segment_count = 0\n\n for res in results:\n scores.append(res[\"scores\"])\n truth_count += len(res[\"labels\"])\n detected_count += len(res[\"detected\"])\n segment_count += len(res[\"scores\"][\"segments\"])\n\n ret = dict()\n ret[\"scores\"] = sum_scores(scores)\n ret[\"stats\"] = dict(truth_count=truth_count, detected_count=detected_count, segment_count=segment_count)\n return ret",
"def get_results_stats(self, results, key_by_date=True):\n stats = {}\n for r in results:\n if key_by_date:\n key = r.get(1)\n value = r.get(2)\n else:\n key = r.get(2)\n value = r.get(1)\n\n if key not in stats:\n stats[key] = {}\n\n if value not in stats[key]:\n stats[key][value] = 1\n else:\n stats[key][value] += 1\n\n # sort each keyset\n if not key_by_date:\n stats[key] = self._stats_sort(stats[key])\n\n if stats:\n # only if sorted per key\n if key_by_date:\n stats = self._stats_sort(stats)\n\n return stats",
"def aggregate_results(self, results):\n result = dict()\n result['MAE'] = self.average_dict_items(results, 'MAE')\n result['MdAE'] = self.average_dict_items(results, 'MdAE')\n result['RMSE'] = self.average_dict_items(results, 'RMSE')\n result['SMAPE'] = self.average_dict_items(results, 'SMAPE')\n result['num_values'] = self.average_dict_items(results, 'num_values')\n return result",
"def reduce_result(results):\n agg = OrderedDict()\n for result in results:\n # Keys by model and explore, adds additional values for `passed` to a set\n agg.setdefault((result[\"model\"], result[\"explore\"]), set()).add(\n result[\"passed\"]\n )\n reduced = [\n {\"model\": k[0], \"explore\": k[1], \"passed\": min(v)}\n for k, v in agg.items()\n ]\n return reduced",
"def aggregate_batch_evals(result_dict: dict, all_runs: list) -> dict:\n # Loop over all evals (e.g. b_1_eval_0) and merge + aggregate data\n new_results_dict = {}\n for eval in all_runs:\n all_seeds_for_run = list(result_dict[eval].keys())\n eval_dict = aggregate_single_eval(result_dict[eval], all_seeds_for_run, eval)\n new_results_dict[eval] = eval_dict[eval]\n return new_results_dict",
"def aggregate_over_seeds(result_dict: DotMap, batch_case: bool = False) -> DotMap:\n all_runs = list(result_dict.keys())\n if batch_case:\n # Perform seed aggregation for all evaluations\n new_results_dict = aggregate_batch_evals(result_dict, all_runs)\n else:\n new_results_dict = aggregate_single_eval(result_dict, all_runs, \"eval\")\n return DotMap(new_results_dict, _dynamic=False)",
"def aggregate_results(self):\n\n raise NotImplementedError",
"def average_metrics_results(results):\n res = {}\n\n if len(results) == 0:\n return {}\n\n for key in results[0]:\n try:\n res[key] = sum([r[key] for r in results]) / len(results)\n except TypeError:\n res[key] = [sum(b)/len(results) for b in zip(*[s[key] for s in results])]\n return res",
"def accumulate_study_results(ids, prob):\n sum_result = {}\n cnt_result = {}\n size = prob.shape[0]\n for i in range(size):\n study_id = ids[i]\n idx = int(study_id)\n if idx not in cnt_result:\n cnt_result[idx] = 0.\n sum_result[idx] = np.zeros((1, prob.shape[1]), dtype=np.float32)\n cnt_result[idx] += 1\n sum_result[idx] += prob[i, :]\n for i in cnt_result.keys():\n sum_result[i][:] /= cnt_result[i]\n return sum_result",
"def aggregate_single_eval( # noqa: C901\n result_dict: dict, all_seeds_for_run: list, eval_name: str\n) -> dict:\n new_results_dict = {}\n data_temp = result_dict[all_seeds_for_run[0]]\n # Get all main data source keys (\"meta\", \"stats\", \"time\")\n data_sources = list(data_temp.keys())\n # Get all variables within the data sources\n data_items = {\n data_sources[i]: list(data_temp[data_sources[i]].keys())\n for i in range(len(data_sources))\n }\n # Collect all runs together - data at this point is not modified\n source_to_store = {key: {} for key in data_sources}\n for ds in data_sources:\n data_to_store = {key: [] for key in data_items[ds]}\n for i, o_name in enumerate(data_items[ds]):\n for i, seed_id in enumerate(all_seeds_for_run):\n seed_run = result_dict[seed_id]\n data_to_store[o_name].append(seed_run[ds][o_name][:])\n source_to_store[ds] = data_to_store\n new_results_dict[eval_name] = source_to_store\n\n # Aggregate over the collected runs\n aggregate_sources = {key: {} for key in data_sources}\n for ds in data_sources:\n if ds in [\"time\"]:\n aggregate_dict = {key: {} for key in data_items[ds]}\n for i, o_name in enumerate(data_items[ds]):\n aggregate_dict[o_name] = new_results_dict[eval_name][ds][o_name][0]\n # Mean over stats data\n elif ds in [\"stats\"]:\n aggregate_dict = {key: {} for key in data_items[ds]}\n for i, o_name in enumerate(data_items[ds]):\n if type(new_results_dict[eval_name][ds][o_name][0][0]) not in [\n str,\n bytes,\n np.bytes_,\n np.str_,\n ]:\n # Compute mean and standard deviation over seeds\n mean_tol, std_tol = tolerant_mean(\n new_results_dict[eval_name][ds][o_name]\n )\n aggregate_dict[o_name][\"mean\"] = mean_tol\n aggregate_dict[o_name][\"std\"] = std_tol\n\n # Compute 10, 25, 50, 75, 90 percentiles over seeds\n p50, p10, p25, p75, p90 = tolerant_median(\n new_results_dict[eval_name][ds][o_name]\n )\n aggregate_dict[o_name][\"p50\"] = p50\n aggregate_dict[o_name][\"p10\"] = p10\n aggregate_dict[o_name][\"p25\"] = p25\n aggregate_dict[o_name][\"p75\"] = p75\n aggregate_dict[o_name][\"p90\"] = p90\n else:\n aggregate_dict[o_name] = new_results_dict[eval_name][ds][o_name]\n # Append over all meta data (strings, seeds nothing to mean)\n elif ds == \"meta\":\n aggregate_dict = {}\n for i, o_name in enumerate(data_items[ds]):\n temp = (\n np.array(new_results_dict[eval_name][ds][o_name])\n .squeeze()\n .astype(\"U200\")\n )\n # Get rid of duplicate experiment dir strings\n if o_name in [\n \"experiment_dir\",\n \"eval_id\",\n \"config_fname\",\n \"model_type\",\n ]:\n aggregate_dict[o_name] = str(np.unique(temp)[0])\n else:\n aggregate_dict[o_name] = temp\n\n # Add seeds as clean array of integers to dict\n aggregate_dict[\"seeds\"] = [int(s.split(\"_\")[1]) for s in all_seeds_for_run]\n else:\n raise ValueError\n aggregate_sources[ds] = aggregate_dict\n new_results_dict[eval_name] = aggregate_sources\n return new_results_dict",
"def generate_stats(results, ads, vmid, session_date, export_folder, process_ex_time):\n try:\n os.makedirs(export_folder)\n except OSError:\n pass\n\n # to be read and inserted into db\n totalads = 0 # total number of ads seen during this session\n totaluniqads = len(ads) # does not support multicategories at this point\n\n # for each category, for each test site, count total number of ads seen\n totalad_category = {} \n # for each category, for each test site, count total number of uniq ads seen\n uniqad_category = {}\n \n with open(os.path.join(export_folder, 'session_bugs.csv'), 'w') as bugs_wtr:\n bugs_wtr.write('#Ad-UID, Website-URL, Refresh-Num, Training-Topic,\\\n Site-Context, BugCount, BugSrc\\n')\n for train_category, cat_dict in results.items():\n totalad_category[train_category] = {}\n uniqad_category[train_category] = {}\n for test_site, bug_dict_list in cat_dict.items():\n total_ads = 0 # for each site\n uniq_ads = [] # for each site\n for refresh_num in range(len(bug_dict_list)):\n bug_dict = bug_dict_list[refresh_num]\n for bug, bugcount in bug_dict.items():\n if bug.is_ad():\n uuid = bug.get_uuid()\n bugs_wtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\\n'.format(uuid, test_site,\n refresh_num, train_category, 'N/A', bugcount, bug.get_src()))\n total_ads += bugcount\n if bug not in uniq_ads:\n uniq_ads.append(bug)\n totalad_category[train_category][test_site] = total_ads\n uniqad_category[train_category][test_site] = len(uniq_ads)\n totalads += total_ads # global count for total ads\n\n with open(os.path.join(export_folder, 'session_stats.csv'), 'w') as ses_wtr:\n # write some metadata information about this session\n ses_wtr.write('#VMID: %s\\n' % vmid)\n ses_wtr.write('#Session-Date: %s\\n' % session_date)\n ses_wtr.write('#Time to complete: %s\\n' % process_ex_time)\n ses_wtr.write('#Training Categories: %s\\n' % str(results.keys()))\n ses_wtr.write('#Total Number of ads: %d\\n' % totalads)\n ses_wtr.write('#Total Uniq ads: %d\\n\\n' % totaluniqads)\n ses_wtr.write('#TrainingTopic, Test-Site, NumberOfVisit, TotalAds, UniqAds\\n')\n\n for train_category, cat_dict in results.items(): \n for test_site, bug_dict_list in cat_dict.items():\n num_of_visit = len(bug_dict_list)\n ses_wtr.write('{0}, {1}, {2}, {3}, {4}\\n'.format(train_category,\n test_site, num_of_visit, totalad_category[train_category][test_site],\n uniqad_category[train_category][test_site]))",
"def summary(self):\n summary = defaultdict(int)\n\n for r in self.results:\n summary[r.result] += 1\n\n return summary",
"def process_metrics_overall(\n self, the_dict, names=[\"metric\", \"phase\", \"epoch\", \"performance\"]\n ):\n result = (\n pd.DataFrame(the_dict)\n .reset_index()\n .melt(id_vars=\"index\")\n .set_index([\"index\", \"variable\"])\n .value.apply(pd.Series)\n .stack()\n .reset_index()\n )\n result.columns = names\n return result",
"def _cv_results(self, metrics):\n\n param_keys = self.param_combs[0].keys()\n param_dict = {k: [comb[k] for comb in self.param_combs]\n for k in param_keys}\n df_metric_result = {k: pd.DataFrame({'Metric mean': self.mean[k],\n 'Std. dev.': self.std[k],\n **param_dict})\n for k in metrics.keys()}\n self.cv_results_ = df_metric_result",
"def summarizeResults(results, what, discard=(), minName=None, maxName=None):\n # threadCounts = sorted (transpose(results).keys())\n threadCounts = [t for t in list(transpose(results).keys()) if t not in discard]\n if not isinstance(threadCounts[0], str):\n threadCounts.sort()\n\n sds = {}\n meanValues = {}\n what_SD = \"SD\" if what == \"Mean\" else what + \"_SD\"\n\n for k in list(results.keys()):\n res = results[k]\n meanValues[k] = [\n None if res.get(tc, None) == None else res[tc].__dict__.get(what, None)\n for tc in threadCounts\n ]\n if minName != None and maxName != None:\n sds[k] = [\n None\n if res.get(tc, None) == None\n else convertMinMaxIntoError(res[tc], what, minName, maxName)\n for tc in threadCounts\n ]\n else:\n sds[k] = [\n None\n if res.get(tc, None) == None\n else res[tc].__dict__.get(what_SD, None)\n for tc in threadCounts\n ]\n return (threadCounts, meanValues, sds)",
"def sum(app, args):\n if not args.key:\n db = get_db(app)\n notrans = db.transient.count()\n print(\"No Transient records: \", notrans)\n if notrans > 0:\n print(\"Total data Transient: \", nicesize(\n list(db.transient.aggregate([\n {\"$group\": {\"_id\": None,\n \"total\": {\"$sum\": \"$size\"}}}]))[0]['total']))\n print(\" No Core records: \", db.transient.count())\n return\n\n kname, kinfo = key_info(app.conf, args.key)\n res = _single_sum(app, group_by=kname, force=args.force)\n total_size = int(0)\n total_count = 0\n mgn = len(\"Total\")\n for reshost in res:\n gid = reshost['_id']\n if gid is None:\n mgn = max(4, mgn)\n else:\n mgn = max(len(str(reshost['_id'])), mgn)\n\n fms = \"{:\" + str(mgn) + \"}\\t{:>10}\\t{:>9}\"\n if args.human:\n print(\"# {}:\".format(kname))\n for reshost in res:\n total = reshost['total']\n count = reshost['count']\n total_size += int(total)\n total_count += count\n if args.human:\n total_human = nicesize(total)\n count_human = nicenumber(count)\n categ = reshost['_id']\n if categ is None:\n categ = \"<undefined>\"\n\n print(fms.format(\n categ, total_human, count_human))\n else:\n print(\"{}\\t{}\\t{}\".format(\n reshost['_id'], total, count))\n\n if args.human:\n total_size_human = nicesize(total_size)\n total_count_human = nicenumber(total_count)\n print(fms.format('', '-'*10, '-'*9))\n print(fms.format(\n \"Total\", total_size_human, total_count_human))\n else:\n print(\"Total\\t{}\\t{}\".format(total_size, total_count))",
"def process_result_dict(\n the_dict, names=[\"metric\", \"phase\", \"epoch\", \"performance\"]\n ):\n result = (\n pd.DataFrame(the_dict)\n .reset_index()\n .melt(id_vars=\"index\")\n .set_index([\"index\", \"variable\"])\n .value.apply(pd.Series)\n .stack()\n .reset_index()\n )\n result.columns = names\n return result",
"def get_job_metrics_summary_for_task(query):\n metric_list = ['hs06sec', 'gco2_global']\n metrics = {}\n for m in metric_list:\n metrics[m] = {'finished': 0, 'failed': 0, 'total': 0}\n\n hquery = copy.deepcopy(query)\n hquery['jobstatus__in'] = ('finished', 'failed')\n\n if 'jeditaskid' in hquery:\n\n hs06sec_sum = []\n # getting jobs. Can not use the .annotate() as there can be duplicates\n jobs = []\n jvalues = ['pandaid', 'jobstatus', ] + metric_list\n jobs.extend(Jobsarchived4.objects.filter(**hquery).values(*jvalues))\n jobs.extend(Jobsarchived.objects.filter(**hquery).values(*jvalues))\n jobs = drop_duplicates(jobs)\n\n for job in jobs:\n for m in metric_list:\n metrics[m]['total'] += job[m] if m in job and job[m] is not None else 0\n if job['jobstatus'] == 'finished':\n metrics[m]['finished'] += job[m] if m in job and job[m] is not None else 0\n elif job['jobstatus'] == 'failed':\n metrics[m]['failed'] += job[m] if m in job and job[m] is not None else 0\n\n # getting data from ATLARC DB, only hs06s\n pj_models = get_pandajob_arch_models_by_year(query['modificationtime__castdate__range'])\n if len(pj_models) > 0:\n for pjm in pj_models:\n try:\n hs06sec_sum.extend(pjm.objects.filter(**hquery).values('jobstatus').annotate(hs06secsum=Sum('hs06sec')))\n except Exception as ex:\n _logger.exception('Failed to get hs06sec from {} at ATLARC DB:\\n{}'.format(pjm, ex))\n\n if len(hs06sec_sum) > 0:\n for hs in hs06sec_sum:\n metrics['hs06sec']['total'] += hs['hs06secsum'] if hs['hs06secsum'] is not None else 0\n if hs['jobstatus'] == 'finished':\n metrics['hs06sec']['finished'] += hs['hs06secsum'] if hs['hs06secsum'] is not None else 0\n elif hs['jobstatus'] == 'failed':\n metrics['hs06sec']['failed'] += hs['hs06secsum'] if hs['hs06secsum'] is not None else 0\n\n\n return metrics",
"def calc_data(self):\n\n circ_counts = {}\n for trialidx in range(self._ntrials):\n for _, depth in enumerate(self._depths):\n circ_name = 'qv_depth_%d_trial_%d' % (depth, trialidx)\n\n # get the counts form ALL executed circuits\n count_list = []\n for result in self._result_list:\n try:\n count_list.append(result.get_counts(circ_name))\n except (QiskitError, KeyError):\n pass\n\n circ_counts[circ_name] = \\\n build_counts_dict_from_list(count_list)\n\n self._circ_shots[circ_name] = \\\n sum(circ_counts[circ_name].values())\n\n # calculate the heavy output probability\n self._heavy_output_counts[circ_name] = \\\n self._subset_probability(\n self._heavy_outputs[circ_name],\n circ_counts[circ_name])",
"def aggregator(index_keynames, value_keynames, ts_keyname, func, interval = 60 * 5):\n data = {}\n ts = None\n #print ts_keyname\n for parsts, parsdata in func():\n #print parsdata\n #print parsdata[\"log_timestamp\"]\n if ts is None:\n ts = parsts\n key = tuple((parsdata[key] for key in index_keynames))\n values = tuple((int(parsdata[key]) for key in value_keynames))\n if key not in data:\n data[key] = values\n else:\n data[key] = tuple((data[key][index] + int(values[index]) for index in range(len(values))))\n if parsts > (ts + interval):\n for keys, values in data.items():\n yield \"%s\\t%s\\t%s\" % (ts, \"\\t\".join((str(index_key) for index_key in keys)), \"\\t\".join((str(value_key) for value_key in values)))\n ts = None\n data = {}",
"def _print_aggregate_results(\n task: Task, task_results: Dict[Task, List[List[Dict[str, Any]]]]\n) -> None:\n aggregate_task_result = aggregate_nvs_results(task_results[task])\n print(\"\")\n print(f\"Aggregate results for task={task}:\")\n pretty_print_nvs_metrics(aggregate_task_result)\n print(\"\")",
"def get_test_cases_coverage(session_id):\n tc_stats={}\n tc_stats_list=[]\n total_executed=0\n sql='SELECT DISTINCT(test_id) FROM stats WHERE session_id=:sid AND test_id!=\"null\"'\n params={\"sid\":session_id}\n conn=sqlite3.connect(CONNECTION_STRING)\n c=conn.cursor()\n c.execute(sql,params)\n tests=c.fetchall()\n conn.close()\n if len(tests)>0:\n for t in tests:\n total_executed=0\n sql=\"SELECT DISTINCT(file_id) FROM stats WHERE session_id=:sid AND test_id=:tid\"\n params={\"sid\":session_id,\"tid\":t[0]}\n conn=sqlite3.connect(CONNECTION_STRING)\n c=conn.cursor()\n c.execute(sql,params)\n files=c.fetchall()\n conn.close()\n for f in files:\n line_count=get_executable_lines_count_for_file(f[0])\n # get executions\n sql=\"SELECT COUNT(DISTINCT line_guid) FROM stats WHERE session_id= :sid AND file_id= :fid AND test_id=:tid\"\n params={\"sid\":session_id,\"tid\":t[0],\"fid\":f[0]}\n conn=sqlite3.connect(CONNECTION_STRING)\n c=conn.cursor()\n c.execute(sql,params)\n executed=c.fetchone()\n conn.close()\n total_executed+=executed[0]\n # save test case and it's executions\n tc_stats={}\n tc_stats[\"test_id\"]=t[0]\n tc_stats[\"total_executed\"]=total_executed\n tc_stats[\"total_executed\"]\n \n tc_stats_list.append(tc_stats)\n return tc_stats_list",
"def gather_results(dataset: Dataset,\n conf: Namespace\n ) -> \"dict[str, dict[str, any]]\":\n\n data = {}\n\n for k in np.copy(dataset.folds):\n print(f\"\\nFold {k}\", '~'*60)\n\n dataset.set_fold(k)\n calibrated_scores = APPROACHES[conf.approach](dataset=dataset, conf=conf)\n\n data[f'fold{k}'] = {\n 'scores': calibrated_scores,\n 'metrics': get_metrics(calibrated_scores, dataset, conf)\n }\n\n return data",
"def compute_session_status(self):\n if self.passed is None:\n raise AttributeError('passed is None; compute QC first')\n # Get mean passed of each check, or None if passed is None or all NaN\n results = {k: None if v is None or np.isnan(v).all() else np.nanmean(v)\n for k, v in self.passed.items()}\n session_outcome, outcomes = self.compute_session_status_from_dict(results)\n return session_outcome, results, outcomes",
"def calculates_results_stats(results_dic): \n # Creates empty dictionary for results_stats_dic\n results_stats_dic = dict()\n \n # Sets all counters to initial values of zero so that they can \n # be incremented while processing through the images in results_dic \n results_stats_dic['n_dogs_img'] = 0\n results_stats_dic['n_match'] = 0\n results_stats_dic['n_correct_dogs'] = 0\n results_stats_dic['n_correct_notdogs'] = 0\n results_stats_dic['n_correct_breed'] = 0\n \n # process through the results dictionary\n for key in results_dic:\n \n # Labels Match Exactly\n if results_dic[key][2] == 1:\n results_stats_dic['n_match'] += 1\n\n # TODO: 5a. REPLACE pass with CODE that counts how many pet images of\n # dogs had their breed correctly classified. This happens \n # when the pet image label indicates the image is-a-dog AND \n # the pet image label and the classifier label match. You \n # will need to write a conditional statement that determines\n # when the dog breed is correctly classified and then \n # increments 'n_correct_breed' by 1. Recall 'n_correct_breed' \n # is a key in the results_stats_dic dictionary with it's value \n # representing the number of correctly classified dog breeds.\n # \n # Pet Image Label is a Dog AND Labels match- counts Correct Breed\n if results_dic[key][3] == 1 and results_dic[key][2] == 1:\n results_stats_dic['n_correct_breed'] += 1\n \n # Pet Image Label is a Dog - counts number of dog images\n if results_dic[key][3] == 1:\n results_stats_dic['n_dogs_img'] += 1\n \n # Classifier classifies image as Dog (& pet image is a dog)\n # counts number of correct dog classifications\n if results_dic[key][4] == 1:\n results_stats_dic['n_correct_dogs'] += 1\n\n # TODO: 5b. REPLACE pass with CODE that counts how many pet images \n # that are NOT dogs were correctly classified. This happens \n # when the pet image label indicates the image is-NOT-a-dog \n # AND the classifier label indicates the images is-NOT-a-dog.\n # You will need to write a conditional statement that \n # determines when the classifier label indicates the image \n # is-NOT-a-dog and then increments 'n_correct_notdogs' by 1. \n # Recall the 'else:' above 'pass' already indicates that the \n # pet image label indicates the image is-NOT-a-dog and \n # 'n_correct_notdogs' is a key in the results_stats_dic dictionary \n # with it's value representing the number of correctly \n # classified NOT-a-dog images.\n # \n # Pet Image Label is NOT a Dog\n else:\n # Classifier classifies image as NOT a Dog(& pet image isn't a dog)\n # counts number of correct NOT dog clasifications.\n if results_dic[key][3] == 0 and results_dic[key][4] == 0:\n results_stats_dic['n_correct_notdogs'] += 1\n\n\n # Calculates run statistics (counts & percentages) below that are calculated\n # using the counters from above.\n\n # calculates number of total images\n results_stats_dic['n_images'] = len(results_dic)\n\n # calculates number of not-a-dog images using - images & dog images counts\n results_stats_dic['n_notdogs_img'] = (results_stats_dic['n_images'] - \n results_stats_dic['n_dogs_img']) \n\n # TODO: 5c. REPLACE zero(0.0) with CODE that calculates the % of correctly\n # matched images. Recall that this can be calculated by the\n # number of correctly matched images ('n_match') divided by the \n # number of images('n_images'). This result will need to be \n # multiplied by 100.0 to provide the percentage.\n # \n # Calculates % correct for matches\n results_stats_dic['pct_match'] = (results_stats_dic['n_match'] / results_stats_dic['n_images']) * 100\n\n # TODO: 5d. REPLACE zero(0.0) with CODE that calculates the % of correctly\n # classified dog images. Recall that this can be calculated by \n # the number of correctly classified dog images('n_correct_dogs')\n # divided by the number of dog images('n_dogs_img'). This result \n # will need to be multiplied by 100.0 to provide the percentage.\n # \n # Calculates % correct dogs\n results_stats_dic['pct_correct_dogs'] = (results_stats_dic['n_correct_dogs'] / results_stats_dic['n_dogs_img']) * 100\n\n # TODO: 5e. REPLACE zero(0.0) with CODE that calculates the % of correctly\n # classified breeds of dogs. Recall that this can be calculated \n # by the number of correctly classified breeds of dog('n_correct_breed') \n # divided by the number of dog images('n_dogs_img'). This result \n # will need to be multiplied by 100.0 to provide the percentage.\n # \n # Calculates % correct breed of dog\n results_stats_dic['pct_correct_breed'] = (results_stats_dic['n_correct_breed'] / results_stats_dic['n_dogs_img']) * 100\n\n # Calculates % correct not-a-dog images\n # Uses conditional statement for when no 'not a dog' images were submitted \n if results_stats_dic['n_notdogs_img'] > 0:\n results_stats_dic['pct_correct_notdogs'] = (results_stats_dic['n_correct_notdogs'] /\n results_stats_dic['n_notdogs_img']) * 100.0\n else:\n results_stats_dic['pct_correct_notdogs'] = 0.0\n\n \n # TODO 5f. REPLACE None with the results_stats_dic dictionary that you \n # created with this function \n return results_stats_dic",
"def extract_score(results):\n total_score = 0;\n total_possible_score = 0;\n for k in results.keys():\n total_score = total_score + results[k][0]\n total_possible_score = total_possible_score + results[k][1]\n return (total_score, total_possible_score)",
"def get_DFGs_result_summary(DFGs):\n DFGs_summary = dict()\n for DFG_name in DFGs:\n DFGs_summary[DFG] = {}\n for res in jenkins_const.RESULTS:\n DFGs_summary[DFG_name][res] = Job.count(\n name='DFG-{}'.format(DFG_name),\n last_build_res=res)\n return DFGs_summary",
"def totalize_measurement(conts_dict, si_model):\n print('######## BEGIN CHECKING TOTAL ########')\n for cont_measure, doc_measure in [\n ['packages', 'total_packages'],\n ['container_weight', 'total_weight'],\n ['container_measurement', 'total_measurement']\n ]:\n doc_qua, doc_unit = split_quantity_unit(si_model[doc_measure])\n if not doc_qua:\n doc_qua = '0'\n if floatable(doc_qua.replace(',', '')):\n doc_qua = float(doc_qua.replace(',', ''))\n else:\n doc_qua = 0\n conts_detail = [\n split_quantity_unit(cont.get(cont_measure, '0'))\n for cont in conts_dict\n ]\n\n print(cont_measure, ':', conts_detail, '|', doc_qua, '-', doc_unit)\n\n conts_unit = list(set([\n cont_unit\n for cont_qua, cont_unit in conts_detail\n ]))\n if len(conts_unit) == 1:\n conts_sum = sum([\n float(cont_qua.replace(',', ''))\n for cont_qua, cont_unit in conts_detail\n if floatable(cont_qua.replace(',', ''))\n ])\n conts_unit = conts_unit[0]\n else:\n conts_sum = 0\n conts_unit = ''\n\n if conts_sum != doc_qua and conts_unit and conts_sum > 0:\n print('### Containers', cont_measure, conts_sum,\n '=/= SI', doc_measure, doc_qua, '###')\n if doc_measure == 'total_packages':\n si_model[doc_measure] = f\"{conts_sum:,.0f}\" + conts_unit\n else:\n si_model[doc_measure] = f\"{conts_sum:,.3f}\" + conts_unit\n\n return si_model"
] | [
"0.6222572",
"0.60406405",
"0.5990933",
"0.5945015",
"0.58555114",
"0.5849532",
"0.5788227",
"0.57747865",
"0.5715895",
"0.5701647",
"0.56428385",
"0.5619109",
"0.5589009",
"0.55545366",
"0.5523717",
"0.5481006",
"0.5478383",
"0.545642",
"0.54334474",
"0.53829503",
"0.5372924",
"0.5372085",
"0.53708786",
"0.5342637",
"0.53384066",
"0.53200006",
"0.5300919",
"0.5294238",
"0.52901095",
"0.5288867"
] | 0.6752876 | 0 |
Computes the overall session QC for each key and aggregates in a single value | def compute_session_status(self):
if self.passed is None:
raise AttributeError('passed is None; compute QC first')
# Get mean passed of each check, or None if passed is None or all NaN
results = {k: None if v is None or np.isnan(v).all() else np.nanmean(v)
for k, v in self.passed.items()}
session_outcome, outcomes = self.compute_session_status_from_dict(results)
return session_outcome, results, outcomes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def aggregate(self):\n data_to_track = {}\n for possession in self.possessions_to_track_aggregate:\n data_to_track[possession] = self._haves[possession]\n\n for variable in self.variables_to_track_aggregate:\n try:\n data_to_track[variable] = self.__dict__[variable]\n except KeyError:\n pass\n self.database_connection.put([\"aggregate\",\n data_to_track,\n self.group,\n self.round])",
"def compute_session_status_from_dict(results):\n indices = np.zeros(len(results), dtype=int)\n for i, k in enumerate(results):\n if k in TaskQC.criteria.keys():\n indices[i] = TaskQC._thresholding(results[k], thresholds=TaskQC.criteria[k])\n else:\n indices[i] = TaskQC._thresholding(results[k], thresholds=TaskQC.criteria['default'])\n\n def key_map(x):\n return 'NOT_SET' if x < 0 else list(TaskQC.criteria['default'].keys())[x]\n # Criteria map is in order of severity so the max index is our overall QC outcome\n session_outcome = key_map(max(indices))\n outcomes = dict(zip(results.keys(), map(key_map, indices)))\n return session_outcome, outcomes",
"def calc_data(self):\n\n circ_counts = {}\n for trialidx in range(self._ntrials):\n for _, depth in enumerate(self._depths):\n circ_name = 'qv_depth_%d_trial_%d' % (depth, trialidx)\n\n # get the counts form ALL executed circuits\n count_list = []\n for result in self._result_list:\n try:\n count_list.append(result.get_counts(circ_name))\n except (QiskitError, KeyError):\n pass\n\n circ_counts[circ_name] = \\\n build_counts_dict_from_list(count_list)\n\n self._circ_shots[circ_name] = \\\n sum(circ_counts[circ_name].values())\n\n # calculate the heavy output probability\n self._heavy_output_counts[circ_name] = \\\n self._subset_probability(\n self._heavy_outputs[circ_name],\n circ_counts[circ_name])",
"def run_faqc(self, **kwargs):\n if self.qc is True:\n build([FaQC.SummarizeQC(fastq_dic=self.fastq_dic,\n num_cpus=self.num_cpus,\n workdir=self.workdir)],\n local_scheduler=self.local_scheduler,\n workers=1)\n qc_dic = {}\n for samp, path in self.fastq_dic.items():\n trim_dir = os.path.join(self.workdir, \"processes\", \"qc\", samp)\n qc_dic[samp] = trim_dir + \"/\" + samp + \".1.trimmed.fastq\" + \":\" + \\\n trim_dir + \"/\" + samp + \".2.trimmed.fastq\" \n return qc_dic\n\n else:\n return self.fastq_dic",
"def get_test_cases_coverage(session_id):\n tc_stats={}\n tc_stats_list=[]\n total_executed=0\n sql='SELECT DISTINCT(test_id) FROM stats WHERE session_id=:sid AND test_id!=\"null\"'\n params={\"sid\":session_id}\n conn=sqlite3.connect(CONNECTION_STRING)\n c=conn.cursor()\n c.execute(sql,params)\n tests=c.fetchall()\n conn.close()\n if len(tests)>0:\n for t in tests:\n total_executed=0\n sql=\"SELECT DISTINCT(file_id) FROM stats WHERE session_id=:sid AND test_id=:tid\"\n params={\"sid\":session_id,\"tid\":t[0]}\n conn=sqlite3.connect(CONNECTION_STRING)\n c=conn.cursor()\n c.execute(sql,params)\n files=c.fetchall()\n conn.close()\n for f in files:\n line_count=get_executable_lines_count_for_file(f[0])\n # get executions\n sql=\"SELECT COUNT(DISTINCT line_guid) FROM stats WHERE session_id= :sid AND file_id= :fid AND test_id=:tid\"\n params={\"sid\":session_id,\"tid\":t[0],\"fid\":f[0]}\n conn=sqlite3.connect(CONNECTION_STRING)\n c=conn.cursor()\n c.execute(sql,params)\n executed=c.fetchone()\n conn.close()\n total_executed+=executed[0]\n # save test case and it's executions\n tc_stats={}\n tc_stats[\"test_id\"]=t[0]\n tc_stats[\"total_executed\"]=total_executed\n tc_stats[\"total_executed\"]\n \n tc_stats_list.append(tc_stats)\n return tc_stats_list",
"def sum(app, args):\n if not args.key:\n db = get_db(app)\n notrans = db.transient.count()\n print(\"No Transient records: \", notrans)\n if notrans > 0:\n print(\"Total data Transient: \", nicesize(\n list(db.transient.aggregate([\n {\"$group\": {\"_id\": None,\n \"total\": {\"$sum\": \"$size\"}}}]))[0]['total']))\n print(\" No Core records: \", db.transient.count())\n return\n\n kname, kinfo = key_info(app.conf, args.key)\n res = _single_sum(app, group_by=kname, force=args.force)\n total_size = int(0)\n total_count = 0\n mgn = len(\"Total\")\n for reshost in res:\n gid = reshost['_id']\n if gid is None:\n mgn = max(4, mgn)\n else:\n mgn = max(len(str(reshost['_id'])), mgn)\n\n fms = \"{:\" + str(mgn) + \"}\\t{:>10}\\t{:>9}\"\n if args.human:\n print(\"# {}:\".format(kname))\n for reshost in res:\n total = reshost['total']\n count = reshost['count']\n total_size += int(total)\n total_count += count\n if args.human:\n total_human = nicesize(total)\n count_human = nicenumber(count)\n categ = reshost['_id']\n if categ is None:\n categ = \"<undefined>\"\n\n print(fms.format(\n categ, total_human, count_human))\n else:\n print(\"{}\\t{}\\t{}\".format(\n reshost['_id'], total, count))\n\n if args.human:\n total_size_human = nicesize(total_size)\n total_count_human = nicenumber(total_count)\n print(fms.format('', '-'*10, '-'*9))\n print(fms.format(\n \"Total\", total_size_human, total_count_human))\n else:\n print(\"Total\\t{}\\t{}\".format(total_size, total_count))",
"def get_aggregated_values(self):\n if not self._initialized:\n raise Exception(\"To readout you must first initialize, then\"\n \"process batches!\")\n else:\n ret_vals = [q.readout() for q in self.quantities]\n return dict(zip(self.quantity_names, ret_vals))",
"def session_counting (self, target_dict, IEI_timeRange = (0, 900, 1), cal_IEI = True, cal_duration = True, cal_latency = True, cal_inputDistribution = True, num_bins = 10, **kwards):\r\n if len(self.df) == 0:\r\n #if empty then pass without analysis\r\n pass\r\n else:\r\n get_kwards = lambda x: (\"\",\"\") if x == None else x\r\n input1OnOff = get_kwards(kwards.get('Input1OnOff'))\r\n input2OnOff = get_kwards(kwards.get('Input2OnOff'))\r\n input3OnOff = get_kwards(kwards.get('Input3OnOff'))\r\n input4OnOff = get_kwards(kwards.get('Input4OnOff'))\r\n \r\n #get a start and end time point\r\n if 'Start' in self.df['States'].values:\r\n start = self.df[(self.df['Events'] == '') & (self.df['States'] == 'Start')]['Time']\r\n else:\r\n start = pd.Series({'Start': self.df['Time'].values[0]-1})\r\n \r\n if 'Finish' in self.df['States'].values:\r\n end = self.df[(self.df['Events'] == '') & (self.df['States'] == 'Finish')]['Time']\r\n else:\r\n end = pd.Series({'Finish': self.df['Time'].values[-1]+1}) #plus 1 second\r\n \r\n #get data from each event window\r\n event = self.df[(self.df['Time']>= start.iloc[0])&(self.df['Time']<= end.iloc[0])]\r\n \r\n result_dict = {}\r\n for k, v in target_dict.items():\r\n \r\n if k == 'Input1':\r\n result_dict [v[1]] = self.__counting (event, v, self.subject, IEI_timeRange, cal_IEI, cal_duration, cal_latency, cal_inputDistribution, num_bins, InputOnOff = input1OnOff)\r\n elif k == 'Input2':\r\n result_dict [v[1]] = self.__counting (event, v, self.subject, IEI_timeRange, cal_IEI, cal_duration, cal_latency, cal_inputDistribution, num_bins, InputOnOff = input2OnOff)\r\n elif k == 'Input3':\r\n result_dict [v[1]] = self.__counting (event, v, self.subject, IEI_timeRange, cal_IEI, cal_duration, cal_latency, cal_inputDistribution, num_bins, InputOnOff = input3OnOff)\r\n elif k == 'Input4':\r\n result_dict [v[1]] = self.__counting (event, v, self.subject, IEI_timeRange, cal_IEI, cal_duration, cal_latency, cal_inputDistribution, num_bins, InputOnOff = input4OnOff)\r\n else:\r\n result_dict [v[1]] = self.__counting (event, v, self.subject)\r\n \r\n return result_dict",
"def aggregate_results(results):\n\n for (config,con,dec),folds in results.iteritems():\n m = MODEL_PATTERN.match(config)\n if m:\n mode = m.groupdict()['mode'] # mle, rl, mrt, ...\n model = m.groupdict()['model'] # haem, hacm, hard, ...\n align = m.groupdict()['align'] # crp, cls ...\n else:\n mode, model, align = '', '', ''\n # mean accuracies across seeds for each fold\n foldaccuracies = []\n # we count number of models over folds and seeds\n num_individual_models = 0\n\n for foldname,fold in folds.items():\n if 'Q' in options.mode:\n seedaccurracies = fold.values()[:1] if fold.values() else [] # pick one\n# SUPPORT_STATISTICS[(config,con,dec,model,align,mode,foldname)] += 1\n else:\n seedaccurracies = []\n for seed_acc in fold.values():\n seedaccurracies.append(seed_acc)\n SUPPORT_STATISTICS[(config,con,dec,model,align,mode,foldname)] += 1\n # aggregate on fold level\n fold['__MEAN__'] = float(np.mean(seedaccurracies))\n fold['__SD__'] = float(np.std(seedaccurracies))\n l = len(seedaccurracies)\n num_individual_models += l\n SUPPORT_STATISTICS[(config,con,dec,model,align,mode,'__MEAN__')] += l\n SUPPORT_STATISTICS[(config,con,dec,model,align,mode,'__SD__')] += l\n\n # statistics over seeds for this fold\n fold['__STATS__'] = fold['__MEAN__'], fold['__SD__'], l\n foldaccuracies.append(fold['__MEAN__'])\n # aggregate on (config, condition, decoding) level\n folds['__MEAN__'] = float(np.mean(foldaccuracies))\n folds['__SD__'] = float(np.std(foldaccuracies))\n # statistics over folds for this (config, condition, decoding)\n folds['__STATS__'] = folds['__MEAN__'], folds['__SD__'], num_individual_models",
"def __calculate_agg_shap_scores(self):\n self.agg_stats_timer = SimbaTimer(start=True)\n for clf_state, clf_state_name in zip(range(2), [\"ABSENT\", \"PRESENT\"]):\n self.results = {}\n self.df_save_path = os.path.join(\n self.shap_logs_path,\n \"SHAP_summary_{}_{}_{}.csv\".format(\n self.classifier_name, clf_state_name, self.datetime\n ),\n )\n shap_clf_sliced = self.shap_df[\n self.shap_df[self.classifier_name] == clf_state\n ]\n for feature_category, feature_time_bin in itertools.product(\n self.unique_feature_category_names, self.unique_time_bin_names\n ):\n if feature_category not in self.results.keys():\n self.results[feature_category] = {}\n feature_names_sliced = list(\n self.feature_categories_df.loc[\n :, (feature_category, feature_time_bin)\n ]\n )\n feature_names_sliced = [\n x\n for x in feature_names_sliced\n if str(x) != \"nan\" and x in shap_clf_sliced\n ]\n self.results[feature_category][feature_time_bin] = round(\n shap_clf_sliced[feature_names_sliced].sum(axis=1).mean() * 100, 6\n )\n self.__save_aggregate_scores()\n self.agg_stats_timer.stop_timer()\n self.visualization_timer = SimbaTimer(start=True)\n\n stdout_success(\n msg=f\"Aggregate SHAP statistics saved in {self.shap_logs_path} directory\",\n elapsed_time=self.agg_stats_timer.elapsed_time_str,\n )",
"def generate_stats(results, ads, vmid, session_date, export_folder, process_ex_time):\n try:\n os.makedirs(export_folder)\n except OSError:\n pass\n\n # to be read and inserted into db\n totalads = 0 # total number of ads seen during this session\n totaluniqads = len(ads) # does not support multicategories at this point\n\n # for each category, for each test site, count total number of ads seen\n totalad_category = {} \n # for each category, for each test site, count total number of uniq ads seen\n uniqad_category = {}\n \n with open(os.path.join(export_folder, 'session_bugs.csv'), 'w') as bugs_wtr:\n bugs_wtr.write('#Ad-UID, Website-URL, Refresh-Num, Training-Topic,\\\n Site-Context, BugCount, BugSrc\\n')\n for train_category, cat_dict in results.items():\n totalad_category[train_category] = {}\n uniqad_category[train_category] = {}\n for test_site, bug_dict_list in cat_dict.items():\n total_ads = 0 # for each site\n uniq_ads = [] # for each site\n for refresh_num in range(len(bug_dict_list)):\n bug_dict = bug_dict_list[refresh_num]\n for bug, bugcount in bug_dict.items():\n if bug.is_ad():\n uuid = bug.get_uuid()\n bugs_wtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\\n'.format(uuid, test_site,\n refresh_num, train_category, 'N/A', bugcount, bug.get_src()))\n total_ads += bugcount\n if bug not in uniq_ads:\n uniq_ads.append(bug)\n totalad_category[train_category][test_site] = total_ads\n uniqad_category[train_category][test_site] = len(uniq_ads)\n totalads += total_ads # global count for total ads\n\n with open(os.path.join(export_folder, 'session_stats.csv'), 'w') as ses_wtr:\n # write some metadata information about this session\n ses_wtr.write('#VMID: %s\\n' % vmid)\n ses_wtr.write('#Session-Date: %s\\n' % session_date)\n ses_wtr.write('#Time to complete: %s\\n' % process_ex_time)\n ses_wtr.write('#Training Categories: %s\\n' % str(results.keys()))\n ses_wtr.write('#Total Number of ads: %d\\n' % totalads)\n ses_wtr.write('#Total Uniq ads: %d\\n\\n' % totaluniqads)\n ses_wtr.write('#TrainingTopic, Test-Site, NumberOfVisit, TotalAds, UniqAds\\n')\n\n for train_category, cat_dict in results.items(): \n for test_site, bug_dict_list in cat_dict.items():\n num_of_visit = len(bug_dict_list)\n ses_wtr.write('{0}, {1}, {2}, {3}, {4}\\n'.format(train_category,\n test_site, num_of_visit, totalad_category[train_category][test_site],\n uniqad_category[train_category][test_site]))",
"def do_report(input):\n for (session_id, group) in itertools.groupby(input, get_session_id):\n group = list(group)\n elapsed = [finfo.elapsed for finfo in group]\n n = float(len(elapsed))\n mean = sum(elapsed) / n\n s = math.sqrt(\n (1 / (n - 1)) * sum((e - mean) ** 2 for e in elapsed)\n )\n yield SummaryData(\n session_id,\n group[0].message,\n min(elapsed),\n max(elapsed),\n mean,\n s\n )",
"def aggregator(index_keynames, value_keynames, ts_keyname, func, interval = 60 * 5):\n data = {}\n ts = None\n #print ts_keyname\n for parsts, parsdata in func():\n #print parsdata\n #print parsdata[\"log_timestamp\"]\n if ts is None:\n ts = parsts\n key = tuple((parsdata[key] for key in index_keynames))\n values = tuple((int(parsdata[key]) for key in value_keynames))\n if key not in data:\n data[key] = values\n else:\n data[key] = tuple((data[key][index] + int(values[index]) for index in range(len(values))))\n if parsts > (ts + interval):\n for keys, values in data.items():\n yield \"%s\\t%s\\t%s\" % (ts, \"\\t\".join((str(index_key) for index_key in keys)), \"\\t\".join((str(value_key) for value_key in values)))\n ts = None\n data = {}",
"def qc_metrics(\n data: AnnData,\n mito_prefix: str = \"MT-\",\n min_genes: int = 500,\n max_genes: int = 6000,\n min_umis: int = 100,\n max_umis: int = 600000,\n percent_mito: float = 10.0,\n percent_cells: float = 0.05,\n) -> None:\n\n data.obs[\"passed_qc\"] = False\n\n data.obs[\"n_genes\"] = data.X.getnnz(axis=1)\n data.obs[\"n_counts\"] = data.X.sum(axis=1).A1\n\n mito_prefixes = mito_prefix.split(\",\")\n\n def startswith(name):\n for prefix in mito_prefixes:\n if name.startswith(prefix):\n return True\n return False\n\n mito_genes = data.var_names.map(startswith).values.nonzero()[0]\n data.obs[\"percent_mito\"] = (data.X[:, mito_genes].sum(axis=1).A1 / np.maximum(\n data.obs[\"n_counts\"].values, 1.0\n )) * 100\n\n # Assign passed_qc\n filters = [\n data.obs[\"n_genes\"] >= min_genes,\n data.obs[\"n_genes\"] < max_genes,\n data.obs[\"n_counts\"] >= min_umis,\n data.obs[\"n_counts\"] < max_umis,\n data.obs[\"percent_mito\"] < percent_mito,\n ]\n\n data.obs.loc[np.logical_and.reduce(filters), \"passed_qc\"] = True\n\n var = data.var\n data = data[\n data.obs[\"passed_qc\"]\n ] # compute gene stats in space of filtered cells only\n\n var[\"n_cells\"] = data.X.getnnz(axis=0)\n var[\"percent_cells\"] = (var[\"n_cells\"] / data.shape[0]) * 100\n var[\"robust\"] = var[\"percent_cells\"] >= percent_cells\n var[\"highly_variable_features\"] = var[\n \"robust\"\n ] # default all robust genes are \"highly\" variable",
"def aggregate_results(self):\n\n raise NotImplementedError",
"def compute(self, download_data=None):\n if self.extractor is None:\n # If download_data is None, decide based on whether eid or session path was provided\n ensure_data = self.download_data if download_data is None else download_data\n self.load_data(download_data=ensure_data)\n self.log.info(f\"Session {self.session_path}: Running QC on habituation data...\")\n\n # Initialize checks\n prefix = '_task_'\n data = self.extractor.data\n metrics = {}\n passed = {}\n\n # Check all reward volumes == 3.0ul\n check = prefix + 'reward_volumes'\n metrics[check] = data['rewardVolume']\n passed[check] = metrics[check] == 3.0\n\n # Check session durations are increasing in steps >= 12 minutes\n check = prefix + 'habituation_time'\n if not self.one or not self.session_path:\n self.log.warning('unable to determine session trials without ONE')\n metrics[check] = passed[check] = None\n else:\n subject, session_date = self.session_path.parts[-3:-1]\n # compute from the date specified\n date_minus_week = (\n datetime.strptime(session_date, '%Y-%m-%d') - timedelta(days=7)\n ).strftime('%Y-%m-%d')\n sessions = self.one.alyx.rest('sessions', 'list', subject=subject,\n date_range=[date_minus_week, session_date],\n task_protocol='habituation')\n # Remove the current session if already registered\n if sessions and sessions[0]['start_time'].startswith(session_date):\n sessions = sessions[1:]\n metric = ([0, data['intervals'][-1, 1] - data['intervals'][0, 0]] +\n [(datetime.fromisoformat(x['end_time']) -\n datetime.fromisoformat(x['start_time'])).total_seconds() / 60\n for x in [self.one.alyx.get(s['url']) for s in sessions]])\n\n # The duration from raw trial data\n # duration = map(float, self.extractor.raw_data[-1]['elapsed_time'].split(':'))\n # duration = timedelta(**dict(zip(('hours', 'minutes', 'seconds'),\n # duration))).total_seconds() / 60\n metrics[check] = np.array(metric)\n passed[check] = np.diff(metric) >= 12\n\n # Check event orders: trial_start < stim on < stim center < feedback < stim off\n check = prefix + 'trial_event_sequence'\n nans = (\n np.isnan(data[\"intervals\"][:, 0]) | # noqa\n np.isnan(data[\"stimOn_times\"]) | # noqa\n np.isnan(data[\"stimCenter_times\"]) |\n np.isnan(data[\"valveOpen_times\"]) | # noqa\n np.isnan(data[\"stimOff_times\"])\n )\n a = np.less(data[\"intervals\"][:, 0], data[\"stimOn_times\"], where=~nans)\n b = np.less(data[\"stimOn_times\"], data[\"stimCenter_times\"], where=~nans)\n c = np.less(data[\"stimCenter_times\"], data[\"valveOpen_times\"], where=~nans)\n d = np.less(data[\"valveOpen_times\"], data[\"stimOff_times\"], where=~nans)\n\n metrics[check] = a & b & c & d & ~nans\n passed[check] = metrics[check].astype(float)\n\n # Check that the time difference between the visual stimulus center-command being\n # triggered and the stimulus effectively appearing in the center is smaller than 150 ms.\n check = prefix + 'stimCenter_delays'\n metric = np.nan_to_num(data[\"stimCenter_times\"] - data[\"stimCenterTrigger_times\"],\n nan=np.inf)\n passed[check] = (metric <= 0.15) & (metric > 0)\n metrics[check] = metric\n\n # Phase check\n check = prefix + 'phase'\n metric = data['phase']\n passed[check] = (metric <= 2 * np.pi) & (metric >= 0)\n metrics[check] = metric\n\n check = prefix + 'phase_distribution'\n metric, _ = np.histogram(data['phase'])\n _, p = chisquare(metric)\n passed[check] = p < 0.05\n metrics[check] = metric\n\n # Checks common to training QC\n checks = [check_goCue_delays, check_stimOn_goCue_delays,\n check_stimOn_delays, check_stimOff_delays]\n for fcn in checks:\n check = prefix + fcn.__name__[6:]\n metrics[check], passed[check] = fcn(data)\n\n self.metrics, self.passed = (metrics, passed)",
"def calc_sim_collector(self, key, values):\r\n (rest1, rest2), common_ratings = key, values\r\n\t #your code here\r\n yield (rest1, rest2), (rho, n_common)",
"def test_metric(self, qset: Iterator[Tuple[str, float]]) -> Dict[str, float]:\n res = dict(mks0=0.0, mks1=0.0, mks2=0.0, sum_weights=0.0, sum_wlen=0.0, n=0)\n hist = {k: {} for k in {\"mks0\", \"mks1\", \"mks2\", \"l\"}} # pylint: disable=C0208\n wei = {k: {} for k in hist}\n res[\"hist\"] = hist\n res[\"histnow\"] = wei\n\n for el, _ in self.enumerate_test_metric(qset):\n le = len(el.value)\n w = el.weight\n res[\"mks0\"] += w * el.mks0\n res[\"mks1\"] += w * el.mks1\n res[\"mks2\"] += w * el.mks2\n res[\"sum_weights\"] += w\n res[\"sum_wlen\"] += w * le\n res[\"n\"] += 1\n\n if el.mks0 not in hist[\"mks0\"]:\n hist[\"mks0\"][el.mks0] = w\n wei[\"mks0\"][el.mks0] = 1\n else:\n hist[\"mks0\"][el.mks0] += w\n wei[\"mks0\"][el.mks0] += 1\n if el.mks1 not in hist[\"mks1\"]:\n hist[\"mks1\"][el.mks1] = w\n wei[\"mks1\"][el.mks1] = 1\n else:\n hist[\"mks1\"][el.mks1] += w\n wei[\"mks1\"][el.mks1] += 1\n if el.mks2 not in hist[\"mks2\"]:\n hist[\"mks2\"][el.mks2] = w\n wei[\"mks2\"][el.mks2] = 1\n else:\n hist[\"mks2\"][el.mks2] += w\n wei[\"mks2\"][el.mks2] += 1\n if le not in hist[\"l\"]:\n hist[\"l\"][le] = w\n wei[\"l\"][le] = 1\n else:\n hist[\"l\"][le] += w\n wei[\"l\"][le] += 1\n return res",
"def _group_sessions(self, sessions):\n session_dict = collections.defaultdict(list)\n for session in sessions:\n session_dict[session.query].append(session)\n return session_dict",
"def aggregate_data(mts, feature, target):\r\n set_dict = dict()\r\n set_dict['mt'] = mts\r\n set_dict['feature'] = feature\r\n set_dict['target'] = target\r\n \r\n return set_dict",
"def summary(data, key=itemgetter(0), value=itemgetter(1)):\n\n for k, group in groupby(data, key):\n yield (k, sum(value(row) for row in group))",
"def get_job_metrics_summary_for_task(query):\n metric_list = ['hs06sec', 'gco2_global']\n metrics = {}\n for m in metric_list:\n metrics[m] = {'finished': 0, 'failed': 0, 'total': 0}\n\n hquery = copy.deepcopy(query)\n hquery['jobstatus__in'] = ('finished', 'failed')\n\n if 'jeditaskid' in hquery:\n\n hs06sec_sum = []\n # getting jobs. Can not use the .annotate() as there can be duplicates\n jobs = []\n jvalues = ['pandaid', 'jobstatus', ] + metric_list\n jobs.extend(Jobsarchived4.objects.filter(**hquery).values(*jvalues))\n jobs.extend(Jobsarchived.objects.filter(**hquery).values(*jvalues))\n jobs = drop_duplicates(jobs)\n\n for job in jobs:\n for m in metric_list:\n metrics[m]['total'] += job[m] if m in job and job[m] is not None else 0\n if job['jobstatus'] == 'finished':\n metrics[m]['finished'] += job[m] if m in job and job[m] is not None else 0\n elif job['jobstatus'] == 'failed':\n metrics[m]['failed'] += job[m] if m in job and job[m] is not None else 0\n\n # getting data from ATLARC DB, only hs06s\n pj_models = get_pandajob_arch_models_by_year(query['modificationtime__castdate__range'])\n if len(pj_models) > 0:\n for pjm in pj_models:\n try:\n hs06sec_sum.extend(pjm.objects.filter(**hquery).values('jobstatus').annotate(hs06secsum=Sum('hs06sec')))\n except Exception as ex:\n _logger.exception('Failed to get hs06sec from {} at ATLARC DB:\\n{}'.format(pjm, ex))\n\n if len(hs06sec_sum) > 0:\n for hs in hs06sec_sum:\n metrics['hs06sec']['total'] += hs['hs06secsum'] if hs['hs06secsum'] is not None else 0\n if hs['jobstatus'] == 'finished':\n metrics['hs06sec']['finished'] += hs['hs06secsum'] if hs['hs06secsum'] is not None else 0\n elif hs['jobstatus'] == 'failed':\n metrics['hs06sec']['failed'] += hs['hs06secsum'] if hs['hs06secsum'] is not None else 0\n\n\n return metrics",
"def compute_key_value(self) -> Dict[str, float]:\n # @TODO: ddp hotfix, could be done better\n if self._is_ddp:\n for key in self.statistics:\n value: List[np.ndarray] = all_gather(self.statistics[key])\n value: np.ndarray = np.sum(np.vstack(value), axis=0)\n self.statistics[key] = value\n\n per_class, micro, macro, weighted = self.compute()\n metrics = self._convert_metrics_to_kv(\n per_class=per_class, micro=micro, macro=macro, weighted=weighted\n )\n return metrics",
"def metrics_group():",
"def association_rules_baseline(train_sessions):\n comatrix = collections.defaultdict(list)\n for session in train_sessions:\n for (x, y) in itertools.permutations(session, 2):\n comatrix[x].append(y)\n return comatrix",
"def selectedComparisons(samples_dict):\n sectors=\"peripherals\",\"intermediaries\",\"hubs\"\n ks_measures={}\n for analyses_type in samples_dict[sectors[0]]:\n ks_measures[analyses_type]={}\n for analysis_grouping in samples_dict[sectors[0]][analyses_type]:\n ks_measures[analyses_type][analysis_grouping]={}\n if samples_dict[sectors[0]][analyses_type][analysis_grouping]==dict:\n ks_measures[analyses_type][analysis_grouping]={}\n for analysis in samples_dict[sectors[0]][analyses_type][analysis_grouping]:\n for var in samples_dict[sectors[0]][analyses_type][analysis_grouping][analysis]:\n samples_peripherals=samples_dict[sectors[0]][analyses_type][analysis_grouping][analysis]\n samples_intermediaries=samples_dict[sectors[1]][analyses_type][analysis_grouping][analysis]\n samples_hubs=samples_dict[sectors[2]][analyses_type][analysis_grouping][analysis]\n ks_measures[analysis][\"peripherals_intermediaries\"]=P.kolmogorovSmirnovTest(samples_peripherals,samples_intermediaries)\n ks_measures[analysis][\"peripherals_hubs\"]=P.kolmogorovSmirnovTest(samples_peripherals,samples_hubs)\n ks_measures[analysis][\"hubs_intermediaries\"]=P.kolmogorovSmirnovTest(samples_hubs,samples_intermediaries)\n else:\n for var in samples_dict[sectors[0]][analyses_type][analysis_grouping]:\n samples_peripherals=samples_dict[sectors[0]][analyses_type][analysis_grouping]\n samples_intermediaries=samples_dict[sectors[1]][analyses_type][analysis_grouping]\n samples_hubs=samples_dict[sectors[2]][analyses_type][analysis_grouping]\n\n\n\n samples[sector][analyses][analysis_grouping]=updateDict(samples[sector][analyses][analysis_grouping],getSamples(authors_analysis[analyses][author][analysis_grouping]))",
"def evaluate(self, click_model, search_sessions):\n\n # Only use queries that occur more than MINUMUM_OCCURENCES times and have a true relevance\n counter = collections.Counter([session.query for session in search_sessions])\n useful_sessions = [query_id for query_id in counter if counter[query_id] >= self.minimum_occurences and query_id in self.relevances]\n\n # Group sessions by query\n sessions_dict = self._group_sessions_if_useful(search_sessions, useful_sessions)\n total_ndcg = 0\n not_useful = 0\n\n # For every useful query get the predicted relevance and compute NDCG\n for query_id in useful_sessions:\n \n rel = self.relevances[query_id]\n ideal_ranking = sorted(rel.values(),reverse = True)[:5]\n \n # Only use query if there is a document with a positive ranking. (Otherwise IDCG will be 0 -> NDCG undetermined.)\n if not any(ideal_ranking):\n not_useful += 1\n continue\n \n current_sessions = sessions_dict[query_id]\n pred_rels = dict()\n for session in current_sessions:\n for rank, result in enumerate(session.web_results):\n if not result.id in pred_rels:\n pred_rels[result.id] = click_model.predict_relevance(session.query, result.id)\n ranking = sorted([doc for doc in pred_rels],key = lambda doc : pred_rels[doc], reverse = True)\n \n ranking_relevances = self.get_relevances(query_id, ranking[:5])\n \n dcg = self.dcg(ranking_relevances)\n idcg = self.dcg(ideal_ranking) \n ndcg = dcg / idcg\n total_ndcg += ndcg\n\n # If too few queries there might not be any useful queries that also have a ranking in the true_relevances.\n assert not len(useful_sessions)-not_useful is 0\n\n # Average NDCG over all queries\n return total_ndcg / (len(useful_sessions)-not_useful)",
"def test_get_qc_metrics(integrated_ff):\n\n key, ff_env = integrated_ff['ff_key'], integrated_ff['ff_env']\n uuid = '331106bc-8535-3338-903e-854af460b544'\n qc_metrics = ff_utils.get_associated_qc_metrics(uuid, key=key, ff_env=ff_env)\n assert len(qc_metrics.keys()) == 1\n assert '131106bc-8535-4448-903e-854abbbbbbbb' in qc_metrics\n target_qc = qc_metrics['131106bc-8535-4448-903e-854abbbbbbbb']\n assert 'QualityMetric' in target_qc['values']['@type']\n assert target_qc['organism'] == 'human'\n assert target_qc['experiment_type'] == 'Dilution Hi-C'\n assert target_qc['experiment_subclass'] == 'Hi-C'\n assert target_qc['source_file_association'] == 'processed_files'\n assert target_qc['source_experiment'] == '4DNEXO67APV1'\n assert target_qc['source_experimentSet'] == '4DNESOPFAAA1'\n assert target_qc['biosource_summary'] == \"GM12878\"\n\n kwargs = { # do same as above w/ kwargs, specify to include raw files this time\n 'key': key,\n 'ff_env': ff_env,\n 'include_raw_files': True\n }\n qc_metrics = ff_utils.get_associated_qc_metrics(uuid, **kwargs)\n assert len(qc_metrics.keys()) == 2\n assert '131106bc-8535-4448-903e-854abbbbbbbb' in qc_metrics\n assert '4c9dabc6-61d6-4054-a951-c4fdd0023800' in qc_metrics\n assert 'QualityMetric' in qc_metrics['131106bc-8535-4448-903e-854abbbbbbbb']['values']['@type']\n assert 'QualityMetric' in qc_metrics['4c9dabc6-61d6-4054-a951-c4fdd0023800']['values']['@type']",
"def qasmCircuitResults(self):\n returnedDictionary={}\n self.circutDrawing = self.draw()\n self.blochSpheres=self.separatedBlochSpheres()\n returnedDictionary[\"wires\"]=self.num_qubits\n returnedDictionary[\"probabilities\"] = self.separatedProbabilities()\n #returnedDictionary[\"blochSpheres\"] = self.separatedBlochSpheres()\n returnedDictionary[\"diracNotation\"] = self.diracNotation()\n returnedDictionary['chart'] = self.graph()\n returnedDictionary[\"link\"] = \"\"\n #returnedDictionary[\"qasmRows\"] = np.transpose(cols).tolist()\n \n if self.API_TOKEN != \"\":\n returnedDictionary[\"link\"] = self.runOnIBMQ()\n \n return returnedDictionary",
"def execute(self):\r\n factors = self.factors\r\n variables = list(self.variables)\r\n variables.remove(self.query)\r\n\r\n for fac in factors:\r\n self.set_observations(fac)\r\n\r\n for var in variables:\r\n if var not in self.obs:\r\n factors = self.eliminate_variable(var, factors)\r\n\r\n result = factor_module.multiply_batch(self.query, factors)\r\n denominator = sum(result.get_cpt())\r\n print(denominator)\r\n return {val: pr / denominator for val, pr in zip([True, False], result.get_cpt())}"
] | [
"0.5516195",
"0.5445356",
"0.5358602",
"0.5349585",
"0.5333742",
"0.5271777",
"0.52661145",
"0.5255679",
"0.5213781",
"0.5190634",
"0.51589733",
"0.51226103",
"0.51122093",
"0.50610095",
"0.5024973",
"0.501899",
"0.50044125",
"0.49149823",
"0.49074388",
"0.48728678",
"0.4871925",
"0.48680615",
"0.48478523",
"0.48456863",
"0.48396322",
"0.4832546",
"0.48309195",
"0.47907776",
"0.4782801",
"0.47802615"
] | 0.5601444 | 0 |
Compute and store the QC metrics Runs the QC on the session and stores a map of the metrics for each datapoint for each test, and a map of which datapoints passed for each test | def compute(self, download_data=None):
if self.extractor is None:
# If download_data is None, decide based on whether eid or session path was provided
ensure_data = self.download_data if download_data is None else download_data
self.load_data(download_data=ensure_data)
self.log.info(f"Session {self.session_path}: Running QC on habituation data...")
# Initialize checks
prefix = '_task_'
data = self.extractor.data
metrics = {}
passed = {}
# Check all reward volumes == 3.0ul
check = prefix + 'reward_volumes'
metrics[check] = data['rewardVolume']
passed[check] = metrics[check] == 3.0
# Check session durations are increasing in steps >= 12 minutes
check = prefix + 'habituation_time'
if not self.one or not self.session_path:
self.log.warning('unable to determine session trials without ONE')
metrics[check] = passed[check] = None
else:
subject, session_date = self.session_path.parts[-3:-1]
# compute from the date specified
date_minus_week = (
datetime.strptime(session_date, '%Y-%m-%d') - timedelta(days=7)
).strftime('%Y-%m-%d')
sessions = self.one.alyx.rest('sessions', 'list', subject=subject,
date_range=[date_minus_week, session_date],
task_protocol='habituation')
# Remove the current session if already registered
if sessions and sessions[0]['start_time'].startswith(session_date):
sessions = sessions[1:]
metric = ([0, data['intervals'][-1, 1] - data['intervals'][0, 0]] +
[(datetime.fromisoformat(x['end_time']) -
datetime.fromisoformat(x['start_time'])).total_seconds() / 60
for x in [self.one.alyx.get(s['url']) for s in sessions]])
# The duration from raw trial data
# duration = map(float, self.extractor.raw_data[-1]['elapsed_time'].split(':'))
# duration = timedelta(**dict(zip(('hours', 'minutes', 'seconds'),
# duration))).total_seconds() / 60
metrics[check] = np.array(metric)
passed[check] = np.diff(metric) >= 12
# Check event orders: trial_start < stim on < stim center < feedback < stim off
check = prefix + 'trial_event_sequence'
nans = (
np.isnan(data["intervals"][:, 0]) | # noqa
np.isnan(data["stimOn_times"]) | # noqa
np.isnan(data["stimCenter_times"]) |
np.isnan(data["valveOpen_times"]) | # noqa
np.isnan(data["stimOff_times"])
)
a = np.less(data["intervals"][:, 0], data["stimOn_times"], where=~nans)
b = np.less(data["stimOn_times"], data["stimCenter_times"], where=~nans)
c = np.less(data["stimCenter_times"], data["valveOpen_times"], where=~nans)
d = np.less(data["valveOpen_times"], data["stimOff_times"], where=~nans)
metrics[check] = a & b & c & d & ~nans
passed[check] = metrics[check].astype(float)
# Check that the time difference between the visual stimulus center-command being
# triggered and the stimulus effectively appearing in the center is smaller than 150 ms.
check = prefix + 'stimCenter_delays'
metric = np.nan_to_num(data["stimCenter_times"] - data["stimCenterTrigger_times"],
nan=np.inf)
passed[check] = (metric <= 0.15) & (metric > 0)
metrics[check] = metric
# Phase check
check = prefix + 'phase'
metric = data['phase']
passed[check] = (metric <= 2 * np.pi) & (metric >= 0)
metrics[check] = metric
check = prefix + 'phase_distribution'
metric, _ = np.histogram(data['phase'])
_, p = chisquare(metric)
passed[check] = p < 0.05
metrics[check] = metric
# Checks common to training QC
checks = [check_goCue_delays, check_stimOn_goCue_delays,
check_stimOn_delays, check_stimOff_delays]
for fcn in checks:
check = prefix + fcn.__name__[6:]
metrics[check], passed[check] = fcn(data)
self.metrics, self.passed = (metrics, passed) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute(self, **kwargs):\n if self.extractor is None:\n kwargs['download_data'] = kwargs.pop('download_data', self.download_data)\n self.load_data(**kwargs)\n self.log.info(f\"Session {self.session_path}: Running QC on behavior data...\")\n self.metrics, self.passed = get_bpodqc_metrics_frame(\n self.extractor.data,\n wheel_gain=self.extractor.settings['STIM_GAIN'], # The wheel gain\n photodiode=self.extractor.frame_ttls,\n audio=self.extractor.audio_ttls,\n re_encoding=self.extractor.wheel_encoding or 'X1',\n min_qt=self.extractor.settings.get('QUIESCENT_PERIOD') or 0.2\n )\n return",
"def qc_metrics(self, files_in, qc_files):\n self.cmd(\"{samtools} index {bam_in}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in=files_in[0],\n ),\n shell=True)\n self.cmd(\"{samtools} idxstats {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[0],\n ),\n shell=True,\n log_output=True)\n self.cmd(\"{samtools} flagstat {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[1],\n ),\n shell=True,\n log_output=True)\n \n self.checkpoint(qc_files[0])\n self.checkpoint(qc_files[1])\n self.checkpoint(qc_files[2])",
"def compute_session_status(self):\n if self.passed is None:\n raise AttributeError('passed is None; compute QC first')\n # Get mean passed of each check, or None if passed is None or all NaN\n results = {k: None if v is None or np.isnan(v).all() else np.nanmean(v)\n for k, v in self.passed.items()}\n session_outcome, outcomes = self.compute_session_status_from_dict(results)\n return session_outcome, results, outcomes",
"def get_bpodqc_metrics_frame(data, **kwargs):\n def is_metric(x):\n return isfunction(x) and x.__name__.startswith('check_')\n # Find all methods that begin with 'check_'\n checks = getmembers(sys.modules[__name__], is_metric)\n prefix = '_task_' # Extended QC fields will start with this\n # Method 'check_foobar' stored with key '_task_foobar' in metrics map\n qc_metrics_map = {prefix + k[6:]: fn(data, **kwargs) for k, fn in checks}\n\n # Split metrics and passed frames\n metrics = {}\n passed = {}\n for k in qc_metrics_map:\n metrics[k], passed[k] = qc_metrics_map[k]\n\n # Add a check for trial level pass: did a given trial pass all checks?\n n_trials = data['intervals'].shape[0]\n # Trial-level checks return an array the length that equals the number of trials\n trial_level_passed = [m for m in passed.values()\n if isinstance(m, Sized) and len(m) == n_trials]\n name = prefix + 'passed_trial_checks'\n metrics[name] = reduce(np.logical_and, trial_level_passed or (None, None))\n passed[name] = metrics[name].astype(float) if trial_level_passed else None\n\n return metrics, passed",
"def metric_tests(self) -> Dict[str, FAIRResultEvaluationCriterium]:\n return self._metric_tests",
"def run_tests(self):\n\n self.test_report = []\n\n #dict of unsorted lists\n dict_of_un_lists = self.dict_un_lists_intersection_test(self.data_dict)\n self.test_report.append(dict_of_un_lists)\n\n #dict of sets\n dict_of_sets = self.build_dict_of_sets(self.data_dict)\n self.test_report.append(self.dict_sets_intersection_test(dict_of_sets))\n\n #pandas - experimental and probably not the way to use pandas\n # dict_of_pandas = self.build_dict_of_panda_series(self.data_dict)\n # self.test_report.append(self.dicts_any_intersection_node_test(dict_of_pandas))\n\n # print results\n\n if self.verbose:\n self.print_tests_results()",
"def test_evaluate(self):\n # test normalized by 'bbox_size'\n jhmdb_pck_metric = JhmdbPCKAccuracy(thr=0.5, norm_item='bbox')\n jhmdb_pck_metric.process(self.data_batch, self.data_samples)\n pck_results = jhmdb_pck_metric.evaluate(self.batch_size)\n target = {\n 'Head PCK': 1.0,\n 'Sho PCK': 1.0,\n 'Elb PCK': 1.0,\n 'Wri PCK': 1.0,\n 'Hip PCK': 1.0,\n 'Knee PCK': 1.0,\n 'Ank PCK': 1.0,\n 'PCK': 1.0,\n }\n self.assertDictEqual(pck_results, target)\n\n # test normalized by 'torso_size'\n jhmdb_tpck_metric = JhmdbPCKAccuracy(thr=0.2, norm_item='torso')\n jhmdb_tpck_metric.process(self.data_batch, self.data_samples)\n tpck_results = jhmdb_tpck_metric.evaluate(self.batch_size)\n target = {\n 'Head tPCK': 1.0,\n 'Sho tPCK': 1.0,\n 'Elb tPCK': 1.0,\n 'Wri tPCK': 1.0,\n 'Hip tPCK': 1.0,\n 'Knee tPCK': 1.0,\n 'Ank tPCK': 1.0,\n 'tPCK': 1.0,\n }\n self.assertDictEqual(tpck_results, target)",
"def qc_metrics(\n data: AnnData,\n mito_prefix: str = \"MT-\",\n min_genes: int = 500,\n max_genes: int = 6000,\n min_umis: int = 100,\n max_umis: int = 600000,\n percent_mito: float = 10.0,\n percent_cells: float = 0.05,\n) -> None:\n\n data.obs[\"passed_qc\"] = False\n\n data.obs[\"n_genes\"] = data.X.getnnz(axis=1)\n data.obs[\"n_counts\"] = data.X.sum(axis=1).A1\n\n mito_prefixes = mito_prefix.split(\",\")\n\n def startswith(name):\n for prefix in mito_prefixes:\n if name.startswith(prefix):\n return True\n return False\n\n mito_genes = data.var_names.map(startswith).values.nonzero()[0]\n data.obs[\"percent_mito\"] = (data.X[:, mito_genes].sum(axis=1).A1 / np.maximum(\n data.obs[\"n_counts\"].values, 1.0\n )) * 100\n\n # Assign passed_qc\n filters = [\n data.obs[\"n_genes\"] >= min_genes,\n data.obs[\"n_genes\"] < max_genes,\n data.obs[\"n_counts\"] >= min_umis,\n data.obs[\"n_counts\"] < max_umis,\n data.obs[\"percent_mito\"] < percent_mito,\n ]\n\n data.obs.loc[np.logical_and.reduce(filters), \"passed_qc\"] = True\n\n var = data.var\n data = data[\n data.obs[\"passed_qc\"]\n ] # compute gene stats in space of filtered cells only\n\n var[\"n_cells\"] = data.X.getnnz(axis=0)\n var[\"percent_cells\"] = (var[\"n_cells\"] / data.shape[0]) * 100\n var[\"robust\"] = var[\"percent_cells\"] >= percent_cells\n var[\"highly_variable_features\"] = var[\n \"robust\"\n ] # default all robust genes are \"highly\" variable",
"def calc_data(self):\n\n circ_counts = {}\n for trialidx in range(self._ntrials):\n for _, depth in enumerate(self._depths):\n circ_name = 'qv_depth_%d_trial_%d' % (depth, trialidx)\n\n # get the counts form ALL executed circuits\n count_list = []\n for result in self._result_list:\n try:\n count_list.append(result.get_counts(circ_name))\n except (QiskitError, KeyError):\n pass\n\n circ_counts[circ_name] = \\\n build_counts_dict_from_list(count_list)\n\n self._circ_shots[circ_name] = \\\n sum(circ_counts[circ_name].values())\n\n # calculate the heavy output probability\n self._heavy_output_counts[circ_name] = \\\n self._subset_probability(\n self._heavy_outputs[circ_name],\n circ_counts[circ_name])",
"def compute_metrics(self):\n pass",
"def collect_data(self):\n exp_conf: ec.ExperimentConfiguration\n # Disabled multiprocess run because of huge memory usage\n processes_number = 1 # self._campaign_configuration['General']['j']\n if processes_number == 1:\n self._logger.info(\"-->Evaluate experiments (sequentially)\")\n for exp_conf in tqdm.tqdm(self._exp_confs, dynamic_ncols=True):\n exp_conf.evaluate()\n if bool(self._campaign_configuration['General']['generate_plots']):\n exp_conf.generate_plots()\n self._logger.info(\"<--\")\n else:\n self._logger.info(\"-->Evaluate experiments (in parallel)\")\n pool = multiprocessing.Pool(processes_number)\n self._exp_confs = list(tqdm.tqdm(pool.imap(evaluate_wrapper, self._exp_confs), total=len(self._exp_confs)))\n if bool(self._campaign_configuration['General']['generate_plots']):\n pool = multiprocessing.Pool(processes_number)\n self._exp_confs = list(tqdm.tqdm(pool.imap(plot_wrapper, self._exp_confs), total=len(self._exp_confs)))\n self._logger.info(\"<--\")\n\n self.raw_results = {}\n for exp_conf in self._exp_confs:\n self.raw_results[tuple(exp_conf.get_signature())] = exp_conf.mapes",
"def get_test_cases_coverage(session_id):\n tc_stats={}\n tc_stats_list=[]\n total_executed=0\n sql='SELECT DISTINCT(test_id) FROM stats WHERE session_id=:sid AND test_id!=\"null\"'\n params={\"sid\":session_id}\n conn=sqlite3.connect(CONNECTION_STRING)\n c=conn.cursor()\n c.execute(sql,params)\n tests=c.fetchall()\n conn.close()\n if len(tests)>0:\n for t in tests:\n total_executed=0\n sql=\"SELECT DISTINCT(file_id) FROM stats WHERE session_id=:sid AND test_id=:tid\"\n params={\"sid\":session_id,\"tid\":t[0]}\n conn=sqlite3.connect(CONNECTION_STRING)\n c=conn.cursor()\n c.execute(sql,params)\n files=c.fetchall()\n conn.close()\n for f in files:\n line_count=get_executable_lines_count_for_file(f[0])\n # get executions\n sql=\"SELECT COUNT(DISTINCT line_guid) FROM stats WHERE session_id= :sid AND file_id= :fid AND test_id=:tid\"\n params={\"sid\":session_id,\"tid\":t[0],\"fid\":f[0]}\n conn=sqlite3.connect(CONNECTION_STRING)\n c=conn.cursor()\n c.execute(sql,params)\n executed=c.fetchone()\n conn.close()\n total_executed+=executed[0]\n # save test case and it's executions\n tc_stats={}\n tc_stats[\"test_id\"]=t[0]\n tc_stats[\"total_executed\"]=total_executed\n tc_stats[\"total_executed\"]\n \n tc_stats_list.append(tc_stats)\n return tc_stats_list",
"def mgcEval(self):\n import numpy as np\n def report_to_df(report):\n\n \"\"\"\n function to convert classification report to dataframe (for visualisation plot)\n \"\"\"\n\n report = re.sub(r\" +\", \" \", report).replace(\"avg / total\", \"avg/total\").replace(\"\\n \", \"\\n\")\n # update this due to sklearn classification report output change\n report = re.sub(r\" +\", \" \", report).replace(\"micro avg\", \"micro_avg\").replace(\"macro avg\", \"macro_avg\").replace(\"weighted avg\", \"weighted_avg\").replace(\"\\n \", \"\\n\")\n report_df = pd.read_csv(StringIO(\"Classes\" + report), sep=' ', index_col=0) \n return(report_df)\n \n #txt report to df\n class_rpttop1 = classification_report(self.y_true, self.y_pred)\n df_report = report_to_df(class_rpttop1)\n\n df_report = df_report.iloc[:self.nb_classes, :].copy()\n df_report.index = df_report.index.astype(int)\n \n\n # classifier prediction metrics\n def classMetrics(averagex):\n precision, recall, fscore, support = score(self.y_true, self.y_pred, average=averagex)\n \n return(\n print(''), \n print('-------------{0:}--------------------'.format(averagex)), \n print('precision: {0:.4f}'.format(precision)),\n print('recall: {0:.4f}'.format(recall)),\n print('fscore: {0:.4f}'.format(fscore)),\n print(''),\n print('kappa score: {0:.4f}'.format(cohen_kappa_score(self.y_true, self.y_pred))),\n print('accuracy score: {0:.4f}'.format(accuracy_score(self.y_true, self.y_pred))))\n \n def predSamp():\n\n correct = np.nonzero(self.y_pred==self.y_true)[0]\n incorrect = np.nonzero(self.y_pred!=self.y_true)[0]\n\n # quick check of the number of correct prediction from validation set\n print(\"\")\n print(\"correct/total = {0: .4f}\".format(len(correct)/(len(correct)+len(incorrect))))\n print(\"total correct sample = {0: .0f}\".format(len(correct)))\n print('------------------------------------------------------------------')\n \n def classReport():\n print('----------------------------- Classfication Report -------------------------------')\n print(classification_report(pd.Series(self.y_true).map(self.dict_label), pd.Series(self.y_pred).map(self.dict_label)))\n \n self.class_rpt = pd.concat([pd.DataFrame(pd.Series(df_report.index.tolist()).map(self.dict_label), columns = ['label']), df_report], axis = 1)\n \n self.classMetricsMac = classMetrics(\"macro\")\n self.classMetricsMic = classMetrics(\"micro\")\n self.predSample = predSamp()\n self.class_rptTop1 = classReport()\n \n return self",
"def calculate_dataset_metrics(self):\n pass",
"def run_tests():\n with open(FILENAME) as file:\n # Loads testing parameters from the yaml file.\n tests = yaml.safe_load(file)\n\n # create a dataframe to keep the results\n test_dict = tests['Tests']\n results = pd.DataFrame(test_dict)\n results['Last Average Score'] = \"\"\n results['No of Q-Learning episodes'] = \"\"\n\n # run experiments:\n for i, test in enumerate(test_dict):\n grid = Rooms(test[\"env_size\"], testing=True)\n learning = QLearning(grid, test[\"gamma\"], test[\"alpha\"], test[\"agent_start_pos\"])\n e_greedy = Policy(\"e-greedy\", test[\"epsilon\"], test[\"decay\"])\n greedy = Policy(policy_type=\"greedy\")\n experiment = Experiments(grid, learning, greedy, test[\"iters\"],\n test[\"agent_start_pos\"], test[\"test_no\"])\n\n for session in range(test[\"iters\"]):\n learning.run_multiple_episodes(test[\"batch_episodes\"], e_greedy)\n mean_reward = experiment.run_experiments(test[\"exp_per_batch\"])\n\n results.loc[i,'Last Average Score'] = mean_reward\n results.loc[i,'No of Q-Learning episodes'] = (session + 1) * test[\"batch_episodes\"]\n\n # save results to csv file\n filename = 'results/' + 'test_table.csv'\n results.to_csv(filename)\n\n # plot & save graphs\n experiment.generate_results(test[\"test_no\"], test)\n\n return results",
"def test_evaluate(self):\n # test normalized by 'bbox'\n pck_metric = PCKAccuracy(thr=0.5, norm_item='bbox')\n pck_metric.process(self.data_batch, self.data_samples)\n pck = pck_metric.evaluate(self.batch_size)\n target = {'PCK': 1.0}\n self.assertDictEqual(pck, target)\n\n # test normalized by 'head_size'\n pckh_metric = PCKAccuracy(thr=0.3, norm_item='head')\n pckh_metric.process(self.data_batch, self.data_samples)\n pckh = pckh_metric.evaluate(self.batch_size)\n target = {'PCKh': 1.0}\n self.assertDictEqual(pckh, target)\n\n # test normalized by 'torso_size'\n tpck_metric = PCKAccuracy(thr=0.05, norm_item=['bbox', 'torso'])\n tpck_metric.process(self.data_batch, self.data_samples)\n tpck = tpck_metric.evaluate(self.batch_size)\n self.assertIsInstance(tpck, dict)\n target = {\n 'PCK': 1.0,\n 'tPCK': 1.0,\n }\n self.assertDictEqual(tpck, target)",
"def initialize_metrics():\n metrics = {\n 'cd_losses': [],\n 'cd_corrects': [],\n 'cd_precisions': [],\n 'cd_recalls': [],\n 'cd_f1scores': [],\n }\n\n return metrics",
"def run_tests(self, validate=False):\n fitness = 0\n\n for test_set in [self.debugger.PASS, self.debugger.FAIL]:\n passed = self.run_test_set(test_set, validate=validate)\n ratio = passed / len(self.debugger.collectors[test_set])\n fitness += self.weight(test_set) * ratio\n\n return fitness",
"def compute_metrics(self, results: list) -> dict:",
"def run_test(self):\n self.output_analytics = self.run_inference()\n self.output_df = pd.DataFrame(self.output_analytics)",
"def evaluate_benchmarks(self):\n\n # iterate over replicates\n results = {}\n for replicate_id, replicate in self.replicates:\n\n # evaluate benchmark for current replicate\n bmark = SimulationBenchmark(replicate.copy(),\n graph=self.graphs[replicate_id],\n **self.params)\n\n # store results\n results[replicate_id] = dict(\n\n labels_MAE=bmark.scores['labels'].MAE,\n level_only_MAE=bmark.scores['level_only'].MAE,\n spatial_only_MAE=bmark.scores['spatial_only'].MAE,\n community_MAE=bmark.scores['labels_comm'].MAE,\n\n labels_PCT=bmark.scores['labels'].percent_correct,\n level_only_PCT=bmark.scores['level_only'].percent_correct,\n spatial_only_PCT=bmark.scores['spatial_only'].percent_correct,\n community_PCT=bmark.scores['labels_comm'].percent_correct)\n\n # compile dataframe\n results = pd.DataFrame.from_dict(results, orient='index')\n results.index.set_names(self.multiindex, inplace=True)\n\n return results",
"def _calculate_metrics(self):\n metrics = {}\n precision, recall = self.calc_precision_recall()\n metrics[\"precision\"] = precision\n metrics[\"recall\"] = recall\n metrics[\"entropy\"] = self.calc_entropy()\n metrics[\"component_entropy\"] = self.calc_component_entropy()\n metrics[\"num_comps\"] = len(self.get_components())\n metrics[\"num_diagnoses\"] = len(self.diagnoses)\n metrics[\"distinct_diagnoses_scores\"] = len(Counter(list(map(lambda x: x.probability, self.diagnoses))))\n metrics[\"num_tests\"] = len(self.get_tests())\n metrics[\"num_distinct_traces\"] = len(self.get_distinct_traces())\n metrics[\"num_failed_tests\"] = len(self._get_tests_by_error(1))\n metrics[\"num_passed_tests\"] = len(self._get_tests_by_error(0))\n passed_comps = set(self._get_components_by_error(0))\n failed_comps = set(self.get_components_in_failed_tests())\n metrics[\"num_failed_comps\"] = len(failed_comps)\n metrics[\"only_failed_comps\"] = len(failed_comps - passed_comps)\n metrics[\"only_passed_comps\"] = len(passed_comps - failed_comps)\n metrics[\"num_bugs\"] = len(self.get_bugs())\n metrics[\"wasted\"] = self.calc_wasted_components()\n metrics[\"top_k\"] = self.calc_top_k()\n metrics[\"num_comps_in_diagnoses\"] = len(self._get_comps_in_diagnoses())\n metrics[\"bugs_cover_ratio\"] = self._get_bugs_cover_ratio()\n metrics[\"average_trace_size\"] = self._get_average_trace_size()\n metrics[\"average_component_activity\"] = self._get_average_component_activity()\n metrics[\"average_diagnosis_size\"] = self._get_average_diagnosis_size()\n metrics[\"bugs_scores_average\"], metrics[\"bugs_scores_std\"], metrics[\"bugs_scores_entropy\"] = self._get_bugs_scores()\n metrics[\"non_bugs_scores_average\"], metrics[\"non_bugs_scores_std\"], metrics[\"non_bugs_scores_entropy\"] = self._get_non_bugs_scores()\n metrics.update(self.cardinality())\n # metrics[\"ochiai\"] = self.calc_ochiai_values()\n return metrics",
"def run_tests():\n with open(FILENAME) as file:\n\n # Loads the test hyper-parameters as dictionaries.\n tests = yaml.safe_load(file)\n \n # create a dataframe to keep the results\n test_dict = tests['Tests']\n results = pd.DataFrame(test_dict)\n results[\"Episode\"] = \"\"\n results['Max average score'] = \"\"\n\n for i, test in enumerate(tests['Tests']):\n\n env = gym.make(test['env'])\n env.reset()\n\n actor_critic = ActorCritic(env, test['episodes'], test['max_score'], \n test['hidden_size'], test['gamma'], test['save'])\n\n ## run training \n best_score, episode, rew_hist = actor_critic.train()\n\n results.loc[i,'Episode'] = episode\n results.loc[i,'Max average score'] = best_score\n\n plot_graphs(test, rew_hist)\n\n # save results to csv file\n filename = 'results/' + 'test_table.csv'\n results.to_csv(filename)\n\n return results",
"def evaluate(self):\n results = dict()\n for metric in self.metrics:\n print('Evaluating clustering with metric %s' % metric)\n if metric in LABEL_METRICS.keys():\n results[metric] = LABEL_METRICS[metric](self.X, self.model.labels_)\n results['adjusted_rand_score'] = SCORE_METRICS['adjusted_rand_score'](self.Y[:, 0], self.model.labels_)\n self.results = results\n return results",
"def evaluate(self, test):\r\n self.logger.info(\"Testing model over test set\")\r\n metrics = self.run_evaluate(test)\r\n msg = \" - \".join([\"{} {:04.2f}\".format(k, v)\r\n for k, v in metrics.items()])\r\n self.logger.info(msg)\r\n return metrics",
"def run_faqc(self, **kwargs):\n if self.qc is True:\n build([FaQC.SummarizeQC(fastq_dic=self.fastq_dic,\n num_cpus=self.num_cpus,\n workdir=self.workdir)],\n local_scheduler=self.local_scheduler,\n workers=1)\n qc_dic = {}\n for samp, path in self.fastq_dic.items():\n trim_dir = os.path.join(self.workdir, \"processes\", \"qc\", samp)\n qc_dic[samp] = trim_dir + \"/\" + samp + \".1.trimmed.fastq\" + \":\" + \\\n trim_dir + \"/\" + samp + \".2.trimmed.fastq\" \n return qc_dic\n\n else:\n return self.fastq_dic",
"def make_metrics(self):\n num_batches = self.data_loader.number_of_batches()\n dose_score_vec = np.zeros(num_batches)\n\n # Only make calculations if data_loader is not empty\n if not self.data_loader.file_paths_list:\n print('No patient information was given to calculate metrics')\n else:\n # Change batch size to 1\n self.data_loader.batch_size = 1 # Loads data related to ground truth patient information\n if self.dose_loader is not None:\n self.dose_loader.batch_size = 1 # Loads data related to ground truth patient information\n\n for idx in tqdm.tqdm(range(num_batches)):\n # Get roi masks for patient\n self.get_constant_patient_features(idx)\n # Get dose tensors for reference dose and evaluate criteria\n reference_dose = self.get_patient_dose_tensor(self.data_loader)\n if reference_dose is not None:\n self.reference_dose_metric_df = self.calculate_metrics(self.reference_dose_metric_df, reference_dose)\n # If a dose loader was provided, calculate the score\n if self.dose_loader is not None:\n new_dose = self.get_patient_dose_tensor(self.dose_loader)\n # Make metric data frames\n self.new_dose_metric_df = self.calculate_metrics(self.new_dose_metric_df, new_dose)\n # Evaluate mean absolute error of 3D dose\n dose_score_vec[idx] = np.sum(np.abs(reference_dose - new_dose)) / np.sum(self.possible_dose_mask)\n # Save metrics at the patient level (this is a template for how DVH stream participants could save\n # their files\n # self.dose_metric_df.loc[self.patient_list[0]].to_csv('{}.csv'.format(self.patient_list[0]))\n\n if self.dose_loader is not None:\n dvh_score = np.nanmean(np.abs(self.reference_dose_metric_df - self.new_dose_metric_df).values)\n dose_score = dose_score_vec.mean()\n return dvh_score, dose_score\n else:\n print('No new dose provided. Metrics were only calculated for the provided dose.')",
"def calculate_metrics(self):\n self.data_stats = self.sqlContext.read.format(\"org.apache.spark.sql.cassandra\").options(table=self.cassandra_trip_table, keyspace=self.cassandra_keyspace).load()\n self.data_stats = self.data_stats.groupBy(['time_block','day','month','borough_name']).agg(func.avg('num_trips').alias('mean'))",
"def evaluate_data():\n try:\n # General system related info\n ram = psutil.virtual_memory()\n total_ram = round((ram.total / 1024 / 1024),2)\n free_ram = round((ram.available / 1024 / 1024),2)\n used_ram = round((ram.used / 1024 / 1024),2)\n cpu_total = psutil.cpu_count(logical=True)\n cpu_loadavg = round([x / cpu_total * 100 for x in psutil.getloadavg()][0],2)\n acs_8080 = sp.getoutput(\"netstat -an|grep -c 8080\")\n acs_8181 = sp.getoutput(\"netstat -an|grep -c 8181\")\n acs_8443 = sp.getoutput(\"netstat -an|grep -c 8443\")\n mysql = sp.getoutput(\"netstat -an|grep -c 3306\")\n oracle = sp.getoutput(\"netstat -an|grep -c 1521\")\n logging.info('General system info obtained')\n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)\n # Process specific details\n try:\n iis_pid = SystemInformation.get_pid(\"w3wp.exe\")\n iis_ram = SystemInformation.get_ram_usage(iis_pid)\n iis_cpu = SystemInformation.get_cpu_usage(iis_pid)\n java_pid = SystemInformation.get_pid(\"java.exe\")\n java_ram = SystemInformation.get_ram_usage(java_pid)\n java_cpu = SystemInformation.get_cpu_usage(java_pid)\n mysqld_pid = SystemInformation.get_pid(\"mysqld.exe\")\n mysqld_ram = SystemInformation.get_ram_usage(mysqld_pid) \n mysqld_cpu = SystemInformation.get_cpu_usage(mysqld_pid)\n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)\n\n try:\n dictionary = {}\n now = datetime.datetime.now()\n timestampt = now.strftime(\"%Y-%m-%d-%H:%M:%S\")\n fieldnames = ['timestampt','total_ram','free_ram','used_ram','cpu_total','cpu_loadavg','acs_8080','acs_8181','acs_8443','mysql','oracle','iis_ram','iis_cpu','java_ram','java_cpu','mysqld_ram','mysqld_cpu']\n for var in fieldnames:\n dictionary[var] = eval(var)\n \n logging.info('Data for report generated')\n return dictionary\n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)",
"def _cv_results(self, metrics):\n\n param_keys = self.param_combs[0].keys()\n param_dict = {k: [comb[k] for comb in self.param_combs]\n for k in param_keys}\n df_metric_result = {k: pd.DataFrame({'Metric mean': self.mean[k],\n 'Std. dev.': self.std[k],\n **param_dict})\n for k in metrics.keys()}\n self.cv_results_ = df_metric_result"
] | [
"0.61865723",
"0.61484236",
"0.6111889",
"0.6068964",
"0.59302425",
"0.5927652",
"0.5902292",
"0.5878314",
"0.5785274",
"0.5735238",
"0.57067233",
"0.56961745",
"0.56763333",
"0.5656826",
"0.5624611",
"0.5584238",
"0.55624455",
"0.55575174",
"0.5555539",
"0.5554058",
"0.55537844",
"0.554788",
"0.5516049",
"0.5514577",
"0.54895395",
"0.5478405",
"0.5478227",
"0.54697764",
"0.5443361",
"0.5443084"
] | 0.6186402 | 1 |
Evaluates all the QC metric functions in this module (those starting with 'check') and returns the results. The optional kwargs listed below are passed to each QC metric function. | def get_bpodqc_metrics_frame(data, **kwargs):
def is_metric(x):
return isfunction(x) and x.__name__.startswith('check_')
# Find all methods that begin with 'check_'
checks = getmembers(sys.modules[__name__], is_metric)
prefix = '_task_' # Extended QC fields will start with this
# Method 'check_foobar' stored with key '_task_foobar' in metrics map
qc_metrics_map = {prefix + k[6:]: fn(data, **kwargs) for k, fn in checks}
# Split metrics and passed frames
metrics = {}
passed = {}
for k in qc_metrics_map:
metrics[k], passed[k] = qc_metrics_map[k]
# Add a check for trial level pass: did a given trial pass all checks?
n_trials = data['intervals'].shape[0]
# Trial-level checks return an array the length that equals the number of trials
trial_level_passed = [m for m in passed.values()
if isinstance(m, Sized) and len(m) == n_trials]
name = prefix + 'passed_trial_checks'
metrics[name] = reduce(np.logical_and, trial_level_passed or (None, None))
passed[name] = metrics[name].astype(float) if trial_level_passed else None
return metrics, passed | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_all(self):\n # TODO: this should use functions in execute.py to run tests in-sequence so that variable\n # name collisions are accounted for\n self._log_event(EventType.BEGIN_CHECK_ALL)\n\n # TODO: this is a janky way of resolving where the tests are. Formalize a method of \n # determining this and put it into a method in e.g. utils.py\n tests = [os.path.split(file)[1][:-3] for file in glob(os.path.join(self._path, \"*.py\")) \\\n if \"__init__.py\" not in file]\n if len(tests) == 0:\n nb_path = self._resolve_nb_path(None)\n with open(nb_path, encoding=\"utf-8\") as f:\n nb = json.load(f)\n tests = list(nb[\"metadata\"][NOTEBOOK_METADATA_KEY][\"tests\"].keys())\n\n global_env = inspect.currentframe().f_back.f_back.f_globals\n results = []\n if not _SHELVE:\n for test_name in sorted(tests):\n results.append(self.check(test_name, global_env))\n else:\n log = Log.from_file(_OTTER_LOG_FILENAME, ascending=False)\n for file in sorted(tests):\n if \"__init__.py\" not in file:\n test_name = os.path.splitext(os.path.split(file)[1])[0]\n\n entry = log.get_question_entry(test_name)\n env = entry.unshelve()\n global_env.update(env)\n del locals()[\"env\"]\n\n result = self.check(test_name, global_env)\n results.append((test_name, result))\n\n return GradingResults(results)",
"def test_evaluate(self):\n # test normalized by 'bbox_size'\n jhmdb_pck_metric = JhmdbPCKAccuracy(thr=0.5, norm_item='bbox')\n jhmdb_pck_metric.process(self.data_batch, self.data_samples)\n pck_results = jhmdb_pck_metric.evaluate(self.batch_size)\n target = {\n 'Head PCK': 1.0,\n 'Sho PCK': 1.0,\n 'Elb PCK': 1.0,\n 'Wri PCK': 1.0,\n 'Hip PCK': 1.0,\n 'Knee PCK': 1.0,\n 'Ank PCK': 1.0,\n 'PCK': 1.0,\n }\n self.assertDictEqual(pck_results, target)\n\n # test normalized by 'torso_size'\n jhmdb_tpck_metric = JhmdbPCKAccuracy(thr=0.2, norm_item='torso')\n jhmdb_tpck_metric.process(self.data_batch, self.data_samples)\n tpck_results = jhmdb_tpck_metric.evaluate(self.batch_size)\n target = {\n 'Head tPCK': 1.0,\n 'Sho tPCK': 1.0,\n 'Elb tPCK': 1.0,\n 'Wri tPCK': 1.0,\n 'Hip tPCK': 1.0,\n 'Knee tPCK': 1.0,\n 'Ank tPCK': 1.0,\n 'tPCK': 1.0,\n }\n self.assertDictEqual(tpck_results, target)",
"def run(self) -> None:\n self._does_apply = self.run_precondition()\n if not self._does_apply:\n self._check_result.status = CheckStatus.DOES_NOT_APPLY\n return\n\n self.calc_statistics()\n\n for statistic in self._check_result.statistics:\n\n if statistic.statistic_type == CheckStatisticType.UNDEFINED:\n capture_message('Warning: check statistics type is undefined')\n\n if self._check_result.status == CheckStatus.UNDEFINED:\n self._check_result.status = CheckStatus.PASS\n\n if statistic.value is not None:\n if statistic.thresholds.failure is not None and \\\n statistic.value > statistic.thresholds.failure:\n self._check_result.status = CheckStatus.FAIL\n if statistic.thresholds.warning is not None and \\\n statistic.value > statistic.thresholds.warning:\n if self._check_result.status != CheckStatus.FAIL:\n self._check_result.status = CheckStatus.WARNING",
"def perform_checks(self):\n retval = []\n retval.extend(self.check_slick_status())\n retval.extend(self.check_java_processes())\n retval.extend(self.check_firefox_processes())\n retval.extend(self.check_disk_space())\n return retval",
"def execute(self, context):\n redshift_hook = PostgresHook(postgres_conn_id=self.redshift_conn_id)\n\n for data_check in self.data_checks:\n records = redshift_hook.get_records(data_check[\"sql\"])\n num_records = records[0][0]\n\n assert (num_records != data_check[\"expected_result\"]) , \\\n f\"Data quality test returned unexpected result.\"\n\n self.log.info('DataQualityOperator not implemented yet')",
"def _get_all_checks(self):\n this_class = self.__class__\n\n check_list = [\n getattr(self, func)\n for func in dir(self.__class__)\n if callable(getattr(this_class, func))\n and func.startswith(self.check_prefix)\n ]\n\n return check_list",
"def _CommonChecks(input_api, output_api):\n result = []\n result.extend(_CheckChromeUpdateTriggerRule(input_api, output_api))\n result.extend(_CheckCurrentVersionIncreaseRule(input_api, output_api))\n result.extend(_CheckNoOverlappingFileNamesInResourceDirsRule(input_api,\n output_api))\n\n return result",
"def run_checks(self):\n\n try:\n check_obj = self.metadata.get_described_element()\n except ObjectDoesNotExist:\n pass\n\n if self.metadata.is_service_metadata:\n if self.metadata.is_service_type(OGCServiceEnum.WMS):\n self.check_wms(check_obj)\n elif self.metadata.is_service_type(OGCServiceEnum.WFS):\n self.check_wfs(check_obj)\n\n elif self.metadata.is_layer_metadata:\n self.check_layer(check_obj)\n elif self.metadata.is_featuretype_metadata:\n self.check_featuretype(check_obj)\n elif self.metadata.is_dataset_metadata:\n self.check_dataset()\n\n # all checks are done. Calculate the health state for all monitoring results\n health_state = HealthState.objects.create(monitoring_run=self.monitoring_run, metadata=self.metadata)\n health_state.run_health_state()",
"def metric_tests(self) -> Dict[str, FAIRResultEvaluationCriterium]:\n return self._metric_tests",
"def run_all_tests(self) -> None:\n self.run_trt_precision_tests()\n logging.info(\"Check analysis result at: %s\", self._output_dir)",
"def test_evaluate(self):\n # test normalized by 'bbox'\n pck_metric = PCKAccuracy(thr=0.5, norm_item='bbox')\n pck_metric.process(self.data_batch, self.data_samples)\n pck = pck_metric.evaluate(self.batch_size)\n target = {'PCK': 1.0}\n self.assertDictEqual(pck, target)\n\n # test normalized by 'head_size'\n pckh_metric = PCKAccuracy(thr=0.3, norm_item='head')\n pckh_metric.process(self.data_batch, self.data_samples)\n pckh = pckh_metric.evaluate(self.batch_size)\n target = {'PCKh': 1.0}\n self.assertDictEqual(pckh, target)\n\n # test normalized by 'torso_size'\n tpck_metric = PCKAccuracy(thr=0.05, norm_item=['bbox', 'torso'])\n tpck_metric.process(self.data_batch, self.data_samples)\n tpck = tpck_metric.evaluate(self.batch_size)\n self.assertIsInstance(tpck, dict)\n target = {\n 'PCK': 1.0,\n 'tPCK': 1.0,\n }\n self.assertDictEqual(tpck, target)",
"def check(self,hostNames,getMetricValueForHost):\r\n \r\n #get all hosts which contributed to the metric\r\n filteredHostNames=set()\r\n for metricHostName in self.metricHostNames:\r\n filteredHostNames=filteredHostNames.union(\r\n set(fnmatch.filter(hostNames,metricHostName)))\r\n \r\n #combine values form the contributing hosts\r\n if self.metricCombine==\"max\":\r\n combinedMetric=-1.0*sys.float_info.max\r\n elif self.metricCombine==\"min\":\r\n combinedMetric=sys.float_info.max\r\n elif self.metricCombine==\"sum\" or metricCombine==\"ave\":\r\n combinedMetric=0.0\r\n if self.metricCombine==\"ave\":\r\n count=0\r\n for hostName in filteredHostNames:\r\n value=getMetricValueForHost(self.metricName,hostName)\r\n print(\"hostName=\",hostName)\r\n print(\"self.metricName=\",self.metricName)\r\n print(\"value=\",value)\r\n if value!=None:\r\n if self.metricCombine==\"max\":\r\n if float(value)>combinedMetric:\r\n combinedMetric=float(value)\r\n elif self.metricCombine==\"min\":\r\n if float(value)<combinedMetric:\r\n combinedMetric=float(value)\r\n elif self.metricCombine==\"sum\" or metricCombine==\"ave\":\r\n combinedMetric=combinedMetric+float(value)\r\n if self.metricCombine==\"ave\":\r\n count+=1\r\n if self.metricCombine==\"ave\":\r\n combinedMetric=combinedMetric/float(count)\r\n \r\n #test for triggering an action\r\n print(\"checking \"+self.metricCombine+\" of metric \\\"\"+self.metricName\r\n +\"\\\"=\"+str(combinedMetric)+\" is \"+self.comparison+\" reference=\"\r\n +str(self.reference)+\" ...\")\r\n if self.comparison==\"lt\":\r\n if combinedMetric < self.reference:\r\n return True\r\n elif self.comparison==\"gt\":\r\n if combinedMetric > self.reference:\r\n return True\r\n elif self.comparison==\"eq\":\r\n if combinedMetric == self.reference:\r\n return True\r\n elif self.comparison==\"ne\":\r\n if combinedMetric != self.reference:\r\n return True\r\n elif self.comparison==\"le\":\r\n if combinedMetric <= self.reference:\r\n return True\r\n elif self.comparison==\"ge\":\r\n if combinedMetric >= self.reference:\r\n return True\r\n return False",
"def test_run_all_sql_data_checks(self):\n\n # we should run all the data checks through the main function to support\n # different sql data checker classes we may create in the future\n results = sql_data_checker.main()\n\n # if any data check threw an exception, its value in the dict will be None\n failed_data_check_ids = []\n for data_check_type in sorted(results, key=lambda key: key.data_check_type_id):\n if results[data_check_type] is None:\n failed_data_check_ids.append(data_check_type.data_check_type_id)\n\n # I want it to display all failed checks so I'm not doing a self.assertEqual(0, len(failed_data_check_ids))\n if len(failed_data_check_ids) is not 0:\n self.fail('Failed SQL Data Check IDs: %s' % [str(s) for s in failed_data_check_ids])",
"def _CommonChecks(input_api, output_api):\n results = []\n old_sys_path = sys.path\n try:\n # Modules in tools/perf depend on telemetry.\n sys.path = [os.path.join(os.pardir, 'telemetry')] + sys.path\n results.extend(input_api.canned_checks.RunPylint(\n input_api, output_api,\n black_list=PYLINT_BLACKLIST,\n disabled_warnings=PYLINT_DISABLED_WARNINGS))\n results.extend(_CheckJson(input_api, output_api))\n finally:\n sys.path = old_sys_path\n return results",
"def compute(self, **kwargs):\n if self.extractor is None:\n kwargs['download_data'] = kwargs.pop('download_data', self.download_data)\n self.load_data(**kwargs)\n self.log.info(f\"Session {self.session_path}: Running QC on behavior data...\")\n self.metrics, self.passed = get_bpodqc_metrics_frame(\n self.extractor.data,\n wheel_gain=self.extractor.settings['STIM_GAIN'], # The wheel gain\n photodiode=self.extractor.frame_ttls,\n audio=self.extractor.audio_ttls,\n re_encoding=self.extractor.wheel_encoding or 'X1',\n min_qt=self.extractor.settings.get('QUIESCENT_PERIOD') or 0.2\n )\n return",
"def RunChecks(self):\n results = []\n\n affected_files = self.input_api.AffectedFiles(\n file_filter=self.file_filter, include_deletes=False)\n affected_js_files = filter(\n lambda f: f.LocalPath().endswith('.js'), affected_files)\n\n if affected_js_files:\n self.input_api.logging.info(\n 'Running appengine eslint on %d JS file(s)', len(affected_js_files))\n results += self.RunESLintChecks(affected_js_files)\n\n\n if results:\n results.append(self.output_api.PresubmitNotifyResult(\n 'See the JavaScript style guide at https://goo.gl/Ld1CqR.'))\n\n return results",
"def quality_checks(ds):\n parameters = ['barometric_pressure', 'relative_humidity', 'air_temperature', 'longwave_irradiance',\n 'precipitation', 'shortwave_irradiance', 'sea_surface_temperature', 'sea_surface_conductivity',\n 'sea_surface_salinity', 'eastward_wind_velocity', 'northward_wind_velocity']\n for p in parameters:\n # The primary failure mode of the METBK is to repeat the last value it received from a sensor.\n # Use the IOOS QARTOD flat line test to identify these cases (consider it suspect if it repeats\n # for 20+ minutes and failed if it repeats for 35+ minutes).\n flags = qartod.flat_line_test(ds[p].values, ds['time'].values, 1200, 2100, 0.00001)\n\n # The secondary failure mode occurs when the METBK logger sets values to a NaN if no sensor data is available.\n # In the case of the sea surface conductivity and temperature data, different values are used to represent\n # missing data. Specifically, the values are set to a 0.0 and -5.0, respectively. In either case, (NaNs or\n # 0.0 and -5.0) set the QC flag to 9 to indicate \"Missing\" data, and then convert the 0.0 and -5.0 values to\n # a NaN to avoid propagating false numbers into subsequent calculations (e.g. salinity or heat flux).\n if p == 'sea_surface_temperature':\n m = ds[p] < -4.0 # use a floating point value just above -5\n flags[m] = 9\n ds[p][m] = np.nan\n ds['sea_surface_salinity'][m] = np.nan\n elif p == 'sea_surface_conductivity':\n m = ds[p] < 0.5 # use a floating point value just above 0\n flags[m] = 9\n ds[p][m] = np.nan\n ds['sea_surface_salinity'][m] = np.nan\n else:\n m = np.isnan(ds[p])\n flags[m] = 9\n\n # add the qc_flags to the dataset, rolling up the results into a single value\n qc_summary = p + '_qc_summary_flag'\n if qc_summary in ds.variables:\n # add the new test results to the existing QC summary results\n qc = ds[qc_summary]\n flags = np.array([flags, qc.values])\n ds[qc_summary] = ('time', flags.max(axis=0, initial=1))\n else:\n # create a new QC summary variable\n ds[qc_summary] = ('time', flags)\n\n # set up the attributes for the new variable\n ds[qc_summary].attrs = dict({\n 'long_name': '%s QC Summary Flag' % ds[p].attrs['long_name'],\n 'standard_name': 'aggregate_quality_flag',\n 'comment': ('Summary quality flag combining the results of the instrument-specific quality tests with '\n 'existing OOI QC tests, if available, to create a single QARTOD style aggregate quality flag'),\n 'flag_values': np.array([1, 2, 3, 4, 9]),\n 'flag_meanings': 'pass not_evaluated suspect_or_of_high_interest fail missing'\n })",
"def evaluate(self, data, metric, classes=None):\n func_dict = {\n 'mutual_information': sklearn.metrics.mutual_info_score,\n 'normed_mutual_information': sklearn.metrics.normalized_mutual_info_score,\n 'square_error': sklearn.metrics.mean_squared_error,\n 't-test': scipy.stats.ttest_ind,\n 'wilcoxon': scipy.stats.wilcoxon,\n 'correlation': np.corrcoef\n }\n self.make_signature(data, classes)\n try:\n if metric in {'mutual_information', 'normed_mutual_information'}:\n self.score = func_dict[metric](classes, self.digit_signature()) \n elif metric == 'square_error':\n self.score = func_dict[metric](classes, self.signatures)\n elif metric in {'t-test', 'wilcoxon'} :\n self.score = np.abs(func_dict[metric](self.signatures[classes==1], \\\n self.signatures[classes==0])[0])\n \n elif metric == 'correlation':\n self.score = func_dict[metric](classes, self.signatures)[1,0]\n \n except: KeyError(\"no such a function\") \n \n return self.score",
"def mgcEval(self):\n import numpy as np\n def report_to_df(report):\n\n \"\"\"\n function to convert classification report to dataframe (for visualisation plot)\n \"\"\"\n\n report = re.sub(r\" +\", \" \", report).replace(\"avg / total\", \"avg/total\").replace(\"\\n \", \"\\n\")\n # update this due to sklearn classification report output change\n report = re.sub(r\" +\", \" \", report).replace(\"micro avg\", \"micro_avg\").replace(\"macro avg\", \"macro_avg\").replace(\"weighted avg\", \"weighted_avg\").replace(\"\\n \", \"\\n\")\n report_df = pd.read_csv(StringIO(\"Classes\" + report), sep=' ', index_col=0) \n return(report_df)\n \n #txt report to df\n class_rpttop1 = classification_report(self.y_true, self.y_pred)\n df_report = report_to_df(class_rpttop1)\n\n df_report = df_report.iloc[:self.nb_classes, :].copy()\n df_report.index = df_report.index.astype(int)\n \n\n # classifier prediction metrics\n def classMetrics(averagex):\n precision, recall, fscore, support = score(self.y_true, self.y_pred, average=averagex)\n \n return(\n print(''), \n print('-------------{0:}--------------------'.format(averagex)), \n print('precision: {0:.4f}'.format(precision)),\n print('recall: {0:.4f}'.format(recall)),\n print('fscore: {0:.4f}'.format(fscore)),\n print(''),\n print('kappa score: {0:.4f}'.format(cohen_kappa_score(self.y_true, self.y_pred))),\n print('accuracy score: {0:.4f}'.format(accuracy_score(self.y_true, self.y_pred))))\n \n def predSamp():\n\n correct = np.nonzero(self.y_pred==self.y_true)[0]\n incorrect = np.nonzero(self.y_pred!=self.y_true)[0]\n\n # quick check of the number of correct prediction from validation set\n print(\"\")\n print(\"correct/total = {0: .4f}\".format(len(correct)/(len(correct)+len(incorrect))))\n print(\"total correct sample = {0: .0f}\".format(len(correct)))\n print('------------------------------------------------------------------')\n \n def classReport():\n print('----------------------------- Classfication Report -------------------------------')\n print(classification_report(pd.Series(self.y_true).map(self.dict_label), pd.Series(self.y_pred).map(self.dict_label)))\n \n self.class_rpt = pd.concat([pd.DataFrame(pd.Series(df_report.index.tolist()).map(self.dict_label), columns = ['label']), df_report], axis = 1)\n \n self.classMetricsMac = classMetrics(\"macro\")\n self.classMetricsMic = classMetrics(\"micro\")\n self.predSample = predSamp()\n self.class_rptTop1 = classReport()\n \n return self",
"def evaluate(self, task, **kwargs):\n self.solver.clear()\n\n func_name = task.replace(\" \", \"_\")\n if not hasattr(self, func_name):\n raise ValueError(\"Unknown task `%s`\" % task)\n logger.info(\"evaluate on %s\" % task)\n result = getattr(self, func_name)(**kwargs)\n for metric, value in sorted(result.items()):\n logger.warning(\"%s: %g\" % (metric, value))",
"def run_and_check(self, *args, **kwargs) -> None:\n raise NotImplementedError",
"def check(self, data_input, debug_flag):\n self.results = [ [], [], [], False, [] ]\n _result = {}\n _wave1_t = data_input[0][0]\n _wave2_t = data_input[0][1]\n _wave3_t = data_input[0][2] \n _wave1_p = data_input[1][0]\n _wave2_p = data_input[1][1]\n _wave3_p = data_input[1][2]\n\n #Step1: b vs a\n #Step1.1: time_analaysis\n _result = {} \n _result[str(\"b_a_t\")] = EW_fibonacci.check_fibratio(_wave2_t, _wave1_t)\n self.results[0].append(_result)\n\n #Step1.2: price_analaysis\n _result = {} \n _result[str(\"b_a_p\")] = EW_fibonacci.check_fibratio(_wave2_p, _wave1_p)\n self.results[0].append(_result)\n\n\n #Step2: c vs b\n #Step2.1: time_analaysis\n _result = {} \n _result[str(\"c_b_t\")] = EW_fibonacci.check_fibratio(_wave3_t, _wave2_t)\n self.results[1].append(_result)\n\n #Step2.2: price_analaysis\n _result = {} \n _result[str(\"c_b_p\")] = EW_fibonacci.check_fibratio(_wave3_p, _wave2_p)\n self.results[1].append(_result)\n\n \n #Step3: c vs a\n #Step3.1: time_analaysis\n _result = {} \n _result[str(\"c_a_t\")] = EW_fibonacci.check_fibratio(_wave3_t, _wave1_t)\n self.results[2].append(_result)\n\n #Step3.2: price_analaysis\n _result = {} \n _result[str(\"c_a_p\")] = EW_fibonacci.check_fibratio(_wave3_p, _wave1_p)\n self.results[2].append(_result)\n\n\n #Step4: Check if this a-b-c is valid or not and which pattern can be chosen\n self.results[3], self.results[4] = self.check_type(data_input, debug_flag)\n\n\n #Step5: return the results\n return self.results",
"def perform_sensor_innov_checks(\n metrics: Dict[str, float], sensor_checks: List[str], innov_fail_checks: List[str],\n check_levels: Dict[str, float]) -> Dict[str, str]:\n\n sensor_status = dict()\n\n for result_id in ['hgt', 'mag', 'vel', 'pos', 'tas', 'hagl']:\n\n # only run sensor checks, if they apply.\n if result_id in sensor_checks:\n if metrics['{:s}_percentage_amber'.format(result_id)] > check_levels[\n '{:s}_amber_fail_pct'.format(result_id)]:\n sensor_status['{:s}_sensor_status'.format(result_id)] = 'Fail'\n print('{:s} sensor check failure.'.format(result_id))\n elif metrics['{:s}_percentage_amber'.format(result_id)] > check_levels[\n '{:s}_amber_warn_pct'.format(result_id)]:\n sensor_status['{:s}_sensor_status'.format(result_id)] = 'Warning'\n print('{:s} sensor check warning.'.format(result_id))\n else:\n sensor_status['{:s}_sensor_status'.format(result_id)] = 'Pass'\n\n # perform innovation checks.\n for signal_id, metric_name, result_id in [('posv', 'hgt_fail_percentage', 'hgt'),\n ('magx', 'magx_fail_percentage', 'mag'),\n ('magy', 'magy_fail_percentage', 'mag'),\n ('magz', 'magz_fail_percentage', 'mag'),\n ('yaw', 'yaw_fail_percentage', 'yaw'),\n ('velh', 'vel_fail_percentage', 'vel'),\n ('velv', 'vel_fail_percentage', 'vel'),\n ('posh', 'pos_fail_percentage', 'pos'),\n ('tas', 'tas_fail_percentage', 'tas'),\n ('hagl', 'hagl_fail_percentage', 'hagl'),\n ('ofx', 'ofx_fail_percentage', 'flow'),\n ('ofy', 'ofy_fail_percentage', 'flow')]:\n\n # only run innov fail checks, if they apply.\n if signal_id in innov_fail_checks:\n\n if metrics[metric_name] > check_levels['{:s}_fail_pct'.format(result_id)]:\n sensor_status['{:s}_sensor_status'.format(result_id)] = 'Fail'\n print('{:s} sensor check failure.'.format(result_id))\n else:\n if not ('{:s}_sensor_status'.format(result_id) in sensor_status):\n sensor_status['{:s}_sensor_status'.format(result_id)] = 'Pass'\n\n return sensor_status",
"def check(self, f=None, verbose=True, level=1, checktype=None):\n if checktype is None:\n checktype = mf6check\n # do general checks\n chk = super().check(f, verbose, level, checktype)\n\n # do mf6 specific checks\n if hasattr(self, \"auxiliary\"):\n # auxiliary variable check\n # check if auxiliary variables are defined\n aux_names = self.auxiliary.get_data()\n if aux_names is not None and len(aux_names[0]) > 1:\n num_aux_names = len(aux_names[0]) - 1\n # check for stress period data\n aux_data = self._get_aux_data(aux_names)\n if aux_data is not None and len(aux_data) > 0:\n # make sure the check object exists\n if chk is None:\n chk = self._get_check(f, verbose, level, checktype)\n if isinstance(aux_data, dict):\n aux_datasets = list(aux_data.values())\n else:\n aux_datasets = [aux_data]\n dataset_type = \"unknown\"\n for dataset in aux_datasets:\n if isinstance(dataset, np.recarray):\n dataset_type = \"recarray\"\n break\n elif isinstance(dataset, np.ndarray):\n dataset_type = \"ndarray\"\n break\n # if aux data is in a list\n if dataset_type == \"recarray\":\n # check for time series data\n time_series_name_dict = {}\n if hasattr(self, \"ts\") and hasattr(\n self.ts, \"time_series_namerecord\"\n ):\n # build dictionary of time series data variables\n ts_nr = self.ts.time_series_namerecord.get_data()\n if ts_nr is not None:\n for item in ts_nr:\n if len(item) > 0 and item[0] is not None:\n time_series_name_dict[item[0]] = True\n # auxiliary variables are last unless boundnames\n # defined, then second to last\n if self._boundnames_active():\n offset = 1\n else:\n offset = 0\n\n # loop through stress period datasets with aux data\n for data in aux_datasets:\n if isinstance(data, np.recarray):\n for row in data:\n row_size = len(row)\n aux_start_loc = (\n row_size - num_aux_names - offset\n )\n # loop through auxiliary variables\n for idx, var in enumerate(aux_names):\n # get index of current aux variable\n data_index = aux_start_loc + idx\n # verify auxiliary value is either\n # numeric or time series variable\n if (\n not datautil.DatumUtil.is_float(\n row[data_index]\n )\n and not row[data_index]\n in time_series_name_dict\n ):\n desc = (\n f\"Invalid non-numeric \"\n f\"value \"\n f\"'{row[data_index]}' \"\n f\"in auxiliary data.\"\n )\n chk._add_to_summary(\n \"Error\",\n desc=desc,\n package=self.package_name,\n )\n # else if stress period data is arrays\n elif dataset_type == \"ndarray\":\n # loop through auxiliary stress period datasets\n for data in aux_datasets:\n # verify auxiliary value is either numeric or time\n # array series variable\n if isinstance(data, np.ndarray):\n val = np.isnan(np.sum(data))\n if val:\n desc = (\n f\"One or more nan values were \"\n f\"found in auxiliary data.\"\n )\n chk._add_to_summary(\n \"Warning\",\n desc=desc,\n package=self.package_name,\n )\n return chk",
"def test_check(self):\n return self._testCheck()",
"def run(self):\n if self.verbose:\n print(f'Running {self.name} tests...')\n\n # try running setup if there is one\n if self.setup:\n self.__process_setup()\n\n final_report = [None] * len(self.tests)\n\n for test_in, test_out in sorted(self.tests.items()):\n # increment total num of tests\n self.total += 1\n\n if self.verbose:\n print(f'#{self.total}')\n\n # evaluate test input w/ setup vars, if any\n try:\n inp = eval(test_in, self.vars)\n except Exception as err:\n print(f'Issue during evaluation of test input: {err}')\n final_report[self.total - 1] = 'input eval error'\n if self.verbose:\n print(f'Test input was: {test_in}')\n print('Vars from execution: {}'.format({k : v for k, v in self.vars.items() if k != '__builtins__'}))\n continue\n\n \n # checking if function input has more than one arg\n if type(inp) in (list, tuple):\n try:\n student_out = self.student_function(*inp)\n except Exception as err:\n print(f'Issue while running student code: {err}')\n final_report[self.total - 1] = f'student code error: {err}; input: {inp}; func_name: {self.name}'\n if self.verbose:\n print(f'Function being run was: {self.name}')\n print(f'Inputs were: {inp}')\n continue\n else:\n try:\n student_out = self.student_function(inp)\n except Exception as err:\n print(f'Issue while running student code: {err}')\n final_report[self.total - 1] = f'student code error: {err}; input: {inp}; func_name: {self.name}'\n if self.verbose:\n print(f'Function being run was: {self.name}')\n print(f'Input was: {inp}')\n continue\n\n # ans alias for ease of answer checking\n self.vars['ans'] = student_out\n\n if self.schema:\n format_vals = eval(test_out, self.vars)\n results, maybe_failed_schema = self.__process_schema(format_vals)\n if all(results):\n self.correct += 1\n final_report[self.total - 1] = 'PASSED'\n else:\n # failed at least one of the tests\n failed_str = \" and \".join([\", \".join(maybe_failed_schema[:-1]),maybe_failed_schema[-1]] if len(maybe_failed_schema) > 2 else maybe_failed_schema)\n final_report[self.total - 1] = f'FAILED; failed following assertion(s): {failed_str}'\n else:\n expected_ans = eval(test_out, self.vars)\n if student_out == expected_ans:\n self.correct += 1\n final_report[self.total - 1] = 'PASSED'\n else:\n # failed the only test\n final_report[self.total - 1] = f'FAILED; got {repr(student_out)} but expected {repr(expected_ans)}'\n\n # run callback function, if there is one\n if self.callback:\n if self.verbose:\n print('Running callback...')\n print('call back is:', self.callback)\n\n # once done, put the final report on the queue\n self.queue.put((self.student_username, self.name, f'{self.correct}/{self.total}', final_report))",
"def evaluate(\n self,\n test_data=None,\n print_report=True,\n save_path=\"ktrain_classification_report.csv\",\n class_names=[],\n ):\n return self.validate(\n val_data=test_data,\n print_report=print_report,\n save_path=save_path,\n class_names=class_names,\n )",
"def jmx_metrics(check, verbose):\n\n checks = process_checks_option(check, source='integrations')\n integrations = sorted(check for check in checks if is_jmx_integration(check))\n echo_info(f\"Validating JMX metrics files for {len(integrations)} checks ...\")\n\n saved_errors = defaultdict(list)\n\n for check_name in integrations:\n validate_jmx_metrics(check_name, saved_errors, verbose)\n validate_config_spec(check_name, saved_errors)\n\n for key, errors in saved_errors.items():\n if not errors:\n continue\n check_name, filepath = key\n annotate_error(filepath, \"\\n\".join(errors))\n echo_info(f\"{check_name}:\")\n for err in errors:\n echo_failure(f\" - {err}\")\n\n echo_info(f\"{len(integrations)} total JMX integrations\")\n echo_success(f\"{len(integrations) - len(saved_errors)} valid metrics files\")\n if saved_errors:\n echo_failure(f\"{len(saved_errors)} invalid metrics files\")\n abort()",
"def run_evaluation(\n categories, groundtruth, detections, excluded_keys, verbose=True\n):\n\n pascal_evaluator = object_detection_evaluation.PascalDetectionEvaluator(\n categories\n )\n\n boxes, labels, _ = groundtruth\n\n gt_keys = []\n pred_keys = []\n\n for image_key in boxes:\n if image_key in excluded_keys:\n logging.info(\n (\n \"Found excluded timestamp in ground truth: %s. \"\n \"It will be ignored.\"\n ),\n image_key,\n )\n continue\n pascal_evaluator.add_single_ground_truth_image_info(\n image_key,\n {\n standard_fields.InputDataFields.groundtruth_boxes: np.array(\n boxes[image_key], dtype=float\n ),\n standard_fields.InputDataFields.groundtruth_classes: np.array(\n labels[image_key], dtype=int\n ),\n standard_fields.InputDataFields.groundtruth_difficult: np.zeros(\n len(boxes[image_key]), dtype=bool\n ),\n },\n )\n\n gt_keys.append(image_key)\n\n boxes, labels, scores = detections\n\n for image_key in boxes:\n if image_key in excluded_keys:\n logging.info(\n (\n \"Found excluded timestamp in detections: %s. \"\n \"It will be ignored.\"\n ),\n image_key,\n )\n continue\n pascal_evaluator.add_single_detected_image_info(\n image_key,\n {\n standard_fields.DetectionResultFields.detection_boxes: np.array(\n boxes[image_key], dtype=float\n ),\n standard_fields.DetectionResultFields.detection_classes: np.array(\n labels[image_key], dtype=int\n ),\n standard_fields.DetectionResultFields.detection_scores: np.array(\n scores[image_key], dtype=float\n ),\n },\n )\n\n pred_keys.append(image_key)\n\n metrics = pascal_evaluator.evaluate()\n\n pprint.pprint(metrics, indent=2)\n return metrics",
"def execute(self, context):\n if len(self.data_quality_checks) == 0:\n self.log.info('DataQualityOperator not implemented yet')\n return\n \n redshift_hook = PostgresHook(postgres_conn_id=self.redshift_conn_id) \n \n errors = 0\n failure_tests = []\n\n # process each sql stmt for data quality relevant to each dimension table:\n # Dimension tables : a. songplays, b. songs, c. artists, d. users, e. time\n \n for (curr_table, check_stmt) in zip(self.list_of_tables, self.data_quality_checks):\n curr_sql = check_stmt.get('check_sql')\n result1 = check_stmt.get('expected_result')\n \n self.log.info('Current Table Processed : ' + curr_table)\n\n try:\n records = redshift_hook.get_records(curr_sql)[0]\n except Exception as e:\n self.loginfo(f\"Error : {curr_table} : Query failed : {e}\")\n\n # record error when actual result is not the same as expected result\n if result1 != records[0]:\n errors += 1\n failure_tests.append(curr_table + ' : ' + curr_sql)\n \n # display Failure or Success Message \n if errors > 0:\n self.log.info(failure_tests)\n self.log.info('Failure Msg : Tests Failed')\n raise ValueError('Error : Failed : Data Quality check')\n else:\n self.log.info('Success : All Data Quality Tests passed')"
] | [
"0.5860062",
"0.5827285",
"0.5809412",
"0.57624185",
"0.5620037",
"0.5605969",
"0.5575548",
"0.55414146",
"0.54573965",
"0.5391575",
"0.5385385",
"0.53589267",
"0.5340336",
"0.53369385",
"0.53235877",
"0.53075016",
"0.53034043",
"0.5302152",
"0.52251285",
"0.52243036",
"0.5211908",
"0.52072436",
"0.51933736",
"0.5189894",
"0.5183716",
"0.51764494",
"0.5174707",
"0.51676804",
"0.5162758",
"0.51508564"
] | 0.6367398 | 0 |
Checks that the time difference between the onset of the visual stimulus and the onset of the go cue tone is positive and less than 10ms. | def check_stimOn_goCue_delays(data, **_):
# Calculate the difference between stimOn and goCue times.
# If either are NaN, the result will be Inf to ensure that it crosses the failure threshold.
metric = np.nan_to_num(data["goCue_times"] - data["stimOn_times"], nan=np.inf)
passed = (metric < 0.01) & (metric > 0)
assert data["intervals"].shape[0] == len(metric) == len(passed)
return metric, passed | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_time():\n times = get_times()\n time_difference = abs((times['local'] - times['target']).total_seconds())\n return time_difference < post_time_tol_seconds",
"def check_errorCue_delays(data, **_):\n metric = np.nan_to_num(data[\"errorCue_times\"] - data[\"errorCueTrigger_times\"], nan=np.inf)\n passed = ((metric <= 0.0015) & (metric > 0)).astype(float)\n passed[data[\"correct\"]] = metric[data[\"correct\"]] = np.nan\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def check_goCue_delays(data, **_):\n metric = np.nan_to_num(data[\"goCue_times\"] - data[\"goCueTrigger_times\"], nan=np.inf)\n passed = (metric <= 0.0015) & (metric > 0)\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def test_c(self):\n self.failIf(cgs.speed_of_light/mks.speed_of_light!=100)",
"def check_time(self, m, s):\r\n if m*60 + s > 5400:\r\n self.unit.s = 0\r\n self.unit.m = 90\r\n return\r\n if s < 0:\r\n s = 0\r\n if m < 0:\r\n m = 0\r\n self.unit.s = s\r\n self.unit.m = m",
"def check_convergency(self):\n if self.vars['ema_trace'][self.vars['step']] <= self.settings[\"emaSpeedTol\"]:\n return True\n else:\n return False",
"def check_stimulus_move_before_goCue(data, photodiode=None, **_):\n if photodiode is None:\n _log.warning(\"No photodiode TTL input in function call, returning None\")\n return None\n photodiode_clean = ephys_fpga._clean_frame2ttl(photodiode)\n s = photodiode_clean[\"times\"]\n s = s[~np.isnan(s)] # Remove NaNs\n metric = np.array([])\n for i, c in zip(data[\"intervals\"][:, 0], data[\"goCue_times\"]):\n metric = np.append(metric, np.count_nonzero(s[s > i] < (c - 0.02)))\n\n passed = (metric == 0).astype(float)\n # Remove no go trials\n passed[data[\"choice\"] == 0] = np.nan\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def valid(t):\n return float(t) > time.time()",
"def check_stimOn_delays(data, **_):\n metric = np.nan_to_num(data[\"stimOn_times\"] - data[\"stimOnTrigger_times\"], nan=np.inf)\n passed = (metric <= 0.15) & (metric > 0)\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def exceeded(self):\r\n return int(time.time()) - self.start_time >= self.length",
"def within_threshold(self, other):\n if abs(other.ts - self.ts) < TIME_THRESHOLD:\n return True\n return False",
"def is_time(self) -> bool:\n return self.times > 1",
"def check_audio_pre_trial(data, audio=None, **_):\n if audio is None:\n _log.warning(\"No BNC2 input in function call, retuning None\")\n return None\n s = audio[\"times\"][~np.isnan(audio[\"times\"])] # Audio TTLs with NaNs removed\n metric = np.array([], dtype=np.int8)\n for i, c in zip(data[\"intervals\"][:, 0], data[\"goCue_times\"]):\n metric = np.append(metric, sum(s[s > i] < (c - 0.02)))\n passed = metric == 0\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def check_diff(self,game,wanted_diff,wanted_starting_time=''):\n return True",
"def check_stimOff_delays(data, **_):\n metric = np.nan_to_num(data[\"stimOff_times\"] - data[\"stimOffTrigger_times\"], nan=np.inf)\n passed = (metric <= 0.15) & (metric > 0)\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def sanity_check(self):\n res = True\n res = res and self.detected\n res = res and np.sum(self.diffs) < 30000 # experimental value\n return res",
"def check_trial_length(data, **_):\n # NaN values are usually ignored so replace them with Inf so they fail the threshold\n metric = np.nan_to_num(data[\"feedback_times\"] - data[\"goCue_times\"], nan=np.inf)\n passed = (metric < 60.1) & (metric > 0)\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def test_constructed_is_small(self):\n self.assertTrue(all(elt<10 for elt in goodwinsheaf.checkradii()))#check all entries have small radii",
"def quick_check(self):\n #loop three times and moce the servo \n for ang in range(self.MIDPOINT - 115, self.MIDPOINT+116, 115):\n self.servo(ang)\n time.sleep(.05)\n if self.read_distance() < self.SAFE_DISTANCE:\n return False\n #if the three-part check didn't freak out\n return True",
"def check_peak_win(self):\n if self.peak_win[0] < 0.0:\n self.peak_win[0] = 0.0\n if self.logger is not None:\n self.logger.warning(('Start of peak window < 0 sec for cond: {}. ' +\n 'Setting to 0.').format(self.cond))\n if self.peak_win[1] > self.psc_dur:\n self.peak_win[1] = self.psc_dur\n if self.logger is not None:\n logger.warning(('End of peak window is longer than trial HRF ' +\n 'for cond: {}. Truncating.').format(self.cond))\n return",
"def is_sampling_for_minmax(self):\n return (self._level_change_time is not None) and \\\n (get_time() - self._level_change_time) < self._duration_in_sec",
"def check_timer(self, wanted_time):\n if time.time() - self.start_time >= wanted_time:\n return True\n return False",
"def overtime(self):\n if self._overtime != '':\n return True\n return False",
"def is_over(self, time):\n over = (not self.enable_loop()) and (time >= self.get_duration())\n return over",
"def _badness(self, time):\n return (time - self.expected_time)**2",
"def check(ht, mt, st, pid):\n\n ns_ticks = 0\n shift = 0\n\n diff = (mt - ht + TOTAL_TICKS) % TOTAL_TICKS\n for rep in range(12):\n tmp = diff + rep * TOTAL_TICKS\n if tmp % 11 == 0:\n ns_ticks = tmp / 11\n shift = (ht - ns_ticks + TOTAL_TICKS) % TOTAL_TICKS\n\n if (ns_ticks + shift) % TOTAL_TICKS != ht:\n continue\n\n if (12*ns_ticks + shift) % TOTAL_TICKS != mt:\n continue\n\n if (720*ns_ticks + shift) % TOTAL_TICKS != st:\n continue\n\n # calc_st = (720*ns_ticks + shift) % TOTAL_TICKS\n # if calc_st == st:\n ns = ns_ticks % 1e9\n ns_ticks /= 1e9\n\n secs = ns_ticks % 60\n ns_ticks /= 60\n\n mins = ns_ticks % 60\n ns_ticks /= 60\n\n hrs = ns_ticks\n\n if hrs < 12:\n print(f\"Case #{pid}: {int(hrs)} {int(mins)} {int(secs)} {int(ns)}\")\n return True\n\n return False",
"def find_alert_time(self) -> None:\n \n # Also not clear from the paper how to doe this,\n # use the first 10 data points in the light curve to determine the magnitude\n # baseline\n\n mean_mag = np.mean(self.mags[:10])\n std_mag = np.std(self.mags[:10])\n\n num_above = 0 \n i = 9\n\n while num_above < 3 and i < len(self.times)-1:\n \n i += 1 \n\n if self.mags[i] < mean_mag - std_mag:\n num_above += 1\n else:\n num_above = 0.0\n\n if len(self.times) - 1 == i:\n print(\"Give me more training data, not alerted yet, this is probably going to fail\")\n \n return self.times[i-1]",
"def lessThan(self, t):\n if t is None:\n return False\n if isinstance(t, (float, int)):\n return self._micros < long(t * 1000000)\n else:\n return self._micros < t._micros",
"def quick_check(self):\n # loop three times and move the servo\n for ang in range(self.MIDPOINT - 100, self.MIDPOINT + 101, 100):\n self.servo(ang)\n time.sleep(.01)\n if self.read_distance() < self.SAFE_DISTANCE:\n return False \n # if the three-part check didn't freak out\n return True",
"def quick_check(self):\n for ang in range(self.MIDPOINT-150, self.MIDPOINT+151, 150):\n self.servo(ang)\n if self.read_distance() < self.SAFE_DIST:\n return False\n return True"
] | [
"0.67452455",
"0.661119",
"0.66066587",
"0.6345932",
"0.6258288",
"0.6236096",
"0.6135885",
"0.610645",
"0.6103701",
"0.60535103",
"0.5981245",
"0.59316677",
"0.5911776",
"0.5881983",
"0.5873338",
"0.5859669",
"0.58114415",
"0.5803081",
"0.57706165",
"0.57561266",
"0.57425934",
"0.5704249",
"0.5700038",
"0.5689883",
"0.5687904",
"0.56801057",
"0.56603545",
"0.56385404",
"0.55805427",
"0.55737746"
] | 0.6955062 | 0 |
Checks that the time difference between the response and the feedback onset (error sound or valve) is positive and less than 10ms. | def check_response_feedback_delays(data, **_):
metric = np.nan_to_num(data["feedback_times"] - data["response_times"], nan=np.inf)
passed = (metric < 0.01) & (metric > 0)
assert data["intervals"].shape[0] == len(metric) == len(passed)
return metric, passed | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_time():\n times = get_times()\n time_difference = abs((times['local'] - times['target']).total_seconds())\n return time_difference < post_time_tol_seconds",
"def check_errorCue_delays(data, **_):\n metric = np.nan_to_num(data[\"errorCue_times\"] - data[\"errorCueTrigger_times\"], nan=np.inf)\n passed = ((metric <= 0.0015) & (metric > 0)).astype(float)\n passed[data[\"correct\"]] = metric[data[\"correct\"]] = np.nan\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def valid(t):\n return float(t) > time.time()",
"def check_convergency(self):\n if self.vars['ema_trace'][self.vars['step']] <= self.settings[\"emaSpeedTol\"]:\n return True\n else:\n return False",
"def exceeded(self):\r\n return int(time.time()) - self.start_time >= self.length",
"def _badness(self, time):\n return (time - self.expected_time)**2",
"def test_error_at_98tpr(self):\r\n\r\n return self.test_error_at_confidence(self.confidence_at_tpr(0.98))",
"def test_error_at_995tpr(self):\r\n\r\n return self.test_error_at_confidence(self.confidence_at_tpr(0.995))",
"def check_timeout(self, transport, earlier_time, interval, error_msg):\n now = datetime.datetime.now()\n secs = int((now - earlier_time).total_seconds())\n if secs >= interval:\n self.connection_lost(transport, f'{error_msg}: {secs} seconds')",
"def test_max_response_time():\r\n cmd = ShdlcCmdGetErrorState(clear=False)\r\n assert type(cmd.max_response_time) is float\r\n assert cmd.max_response_time == 0.5",
"def remaining_ms():",
"def validate_correct_hint(self):\n is_response_hint_valid = False\n while is_response_hint_valid is False:\n hint_value = self.ask_user_input(\"Enter maximum hint threshold\")\n if not hint_value.isdigit():\n print(\"Not a number, please try again\")\n elif 0 <= int(hint_value) <= 81:\n is_response_hint_valid = True\n self.current_response = hint_value\n else:\n print(\"Number is out of the valid range, please try again\")\n return is_response_hint_valid",
"def is_few_remaining(self) -> bool:\n return self.on_hand <= self.warn_limit",
"def check_stimOn_goCue_delays(data, **_):\n # Calculate the difference between stimOn and goCue times.\n # If either are NaN, the result will be Inf to ensure that it crosses the failure threshold.\n metric = np.nan_to_num(data[\"goCue_times\"] - data[\"stimOn_times\"], nan=np.inf)\n passed = (metric < 0.01) & (metric > 0)\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def test_error_at_99tpr(self):\r\n\r\n return self.test_error_at_confidence(self.confidence_at_tpr(0.99))",
"def check_response_stimFreeze_delays(data, **_):\n # Calculate the difference between stimOn and goCue times.\n # If either are NaN, the result will be Inf to ensure that it crosses the failure threshold.\n metric = np.nan_to_num(data[\"stimFreeze_times\"] - data[\"response_times\"], nan=np.inf)\n # Test for valid values\n passed = ((metric < 0.1) & (metric > 0)).astype(float)\n # Finally remove no_go trials (stimFreeze triggered differently in no_go trials)\n # These values are ignored in calculation of proportion passed\n passed[data[\"choice\"] == 0] = np.nan\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def isTimeRemaining(self):\n if self.run_type.startswith('timed'):\n time_since_start = (time.time() - self.start_times['run'])\n remaining_time = self.max_time * 60 - time_since_start\n if remaining_time < 0:\n return False\n else:\n return True",
"def _safe_limit_check(self):\n if self.rem == 40:\n self.time_start = time.time()\n elif time.time() - self.time_start >= 11:\n self.rem = 40\n self.time_start = time.time()\n elif self.rem <= 0:\n t = 11 - (time.time() - self.time_start)\n\n if t <= 0:\n self.rem = 40\n self.time_start = time.time()\n else:\n if self.policy == Limit.Sleep:\n time.sleep(t)\n elif self.policy == Limit.Ignore:\n return False\n\n self.rem -= 1\n return True",
"def check_goCue_delays(data, **_):\n metric = np.nan_to_num(data[\"goCue_times\"] - data[\"goCueTrigger_times\"], nan=np.inf)\n passed = (metric <= 0.0015) & (metric > 0)\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def validity_by_time(self):\n conn = psycopg2.connect(self.conn)\n permissable_maximum_age_secs = 600 # 600s = 10mins\n query = \"SELECT time FROM steve_sense_sensor_logs ORDER BY time DESC LIMIT 1\"\n cur = conn.cursor()\n cur.execute(query)\n queryResult = cur.fetchall()\n age_seconds = (datetime.datetime.now(\n timezone.utc) - queryResult[0][0]).seconds\n cur.close()\n conn.close()\n if age_seconds > permissable_maximum_age_secs:\n print(\"Check Sensor, last sample is \"+str(age_seconds)+\" old\")\n return False\n else:\n return True",
"def check_trial_length(data, **_):\n # NaN values are usually ignored so replace them with Inf so they fail the threshold\n metric = np.nan_to_num(data[\"feedback_times\"] - data[\"goCue_times\"], nan=np.inf)\n passed = (metric < 60.1) & (metric > 0)\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def is_timeout(self) -> bool:\n return self.runtime.timeout <= 0.0",
"async def test_evaluate_different_response_time(self):\n self.set_source_parameter(\"response_time_to_evaluate\", \"min_response_time\")\n self.set_source_parameter(\"target_response_time\", \"45\")\n response = await self.collect(get_request_json_return_value=self.GATLING_JSON)\n self.assert_measurement(response, value=\"1\", entities=self.expected_entities[:1])",
"def check_peak_win(self):\n if self.peak_win[0] < 0.0:\n self.peak_win[0] = 0.0\n if self.logger is not None:\n self.logger.warning(('Start of peak window < 0 sec for cond: {}. ' +\n 'Setting to 0.').format(self.cond))\n if self.peak_win[1] > self.psc_dur:\n self.peak_win[1] = self.psc_dur\n if self.logger is not None:\n logger.warning(('End of peak window is longer than trial HRF ' +\n 'for cond: {}. Truncating.').format(self.cond))\n return",
"def checkTimeout(self):\n if TIMEOUT <= (datetime.now() - self.clockCheckStop).total_seconds():\n print('Didn\\'t received messages for 1 minute - Program ends')\n exit(0)",
"def _exceeds_hop_latency(self,ping_time):\n # remote ' ms' from ping time\n ping_as_float = float(ping_time.replace(\" ms\",\"\"))\n\tprint \"Compare {0} to {1}\".format(ping_as_float, self.LATENCY_THRESHOLD)\n\n return ping_as_float >= self.LATENCY_THRESHOLD",
"def check_time(self, m, s):\r\n if m*60 + s > 5400:\r\n self.unit.s = 0\r\n self.unit.m = 90\r\n return\r\n if s < 0:\r\n s = 0\r\n if m < 0:\r\n m = 0\r\n self.unit.s = s\r\n self.unit.m = m",
"def check_rate(self):\n rate = self.rate_measurer.rate()\n if rate < self.request_rate:\n self._fail(WRequestRateTooLow(rate))\n\n if self.rate_measurer.outstanding() > self.max_outstanding:\n self._fail(WRequestOverload())",
"def assert_timeout(self) -> None:",
"def _its_remaining_time(cls, prefix, timeout, from_start_time):\n already_passed = time.time() - from_start_time\n remain_time = timeout - already_passed\n if remain_time < 0.0:\n remain_time = 0.0\n msg = \"{} {:.3f} [sec], already passed {:.3f} [sec]\".format(prefix, remain_time, already_passed)\n return remain_time, msg"
] | [
"0.6539133",
"0.6228387",
"0.5913726",
"0.58829933",
"0.58373797",
"0.5837246",
"0.58290553",
"0.5787292",
"0.5782435",
"0.5740518",
"0.57268006",
"0.5696717",
"0.5689673",
"0.5688602",
"0.56735766",
"0.5667824",
"0.56554264",
"0.56536883",
"0.5650586",
"0.5647413",
"0.5622538",
"0.5596657",
"0.55772704",
"0.5552858",
"0.5525522",
"0.5522481",
"0.55206597",
"0.55003625",
"0.5468514",
"0.54554003"
] | 0.66533184 | 0 |
Checks that the time difference between the visual stimulus freezing and the response is positive and less than 100ms. | def check_response_stimFreeze_delays(data, **_):
# Calculate the difference between stimOn and goCue times.
# If either are NaN, the result will be Inf to ensure that it crosses the failure threshold.
metric = np.nan_to_num(data["stimFreeze_times"] - data["response_times"], nan=np.inf)
# Test for valid values
passed = ((metric < 0.1) & (metric > 0)).astype(float)
# Finally remove no_go trials (stimFreeze triggered differently in no_go trials)
# These values are ignored in calculation of proportion passed
passed[data["choice"] == 0] = np.nan
assert data["intervals"].shape[0] == len(metric) == len(passed)
return metric, passed | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_time():\n times = get_times()\n time_difference = abs((times['local'] - times['target']).total_seconds())\n return time_difference < post_time_tol_seconds",
"def remaining_ms():",
"def check_stimFreeze_delays(data, **_):\n metric = np.nan_to_num(data[\"stimFreeze_times\"] - data[\"stimFreezeTrigger_times\"], nan=np.inf)\n passed = (metric <= 0.15) & (metric > 0)\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def valid(t):\n return float(t) > time.time()",
"def check_response_feedback_delays(data, **_):\n metric = np.nan_to_num(data[\"feedback_times\"] - data[\"response_times\"], nan=np.inf)\n passed = (metric < 0.01) & (metric > 0)\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def _check_sse_timing(self, ok_version, **kwargs):\n last_get_rd_sse = getattr(self, 'last_get_rd_sse', None)\n\n if last_get_rd_sse:\n last_elapsed = datetime.datetime.utcnow() - last_get_rd_sse\n if last_elapsed.seconds == 0 and not ok_version:\n m = \"You must wait at least one second between server side export requests!\".format\n raise pytan.exceptions.ServerSideExportError(m())\n\n self.last_get_rd_sse = datetime.datetime.utcnow()",
"def delay_checks(self):\n return False",
"def time_is_out(self):\n return self.get_simulation_time() > self.config.max_time",
"def _badness(self, time):\n return (time - self.expected_time)**2",
"def _check_pulse(self):\n timedelta = time.time() - self.heartbeat\n update_delay = float(1/self.qbpm.frequency)\n time_to_update = False\n if timedelta > update_delay:\n time_to_update = True\n self.heartbeat = time.time()\n return time_to_update",
"def is_timeout(self) -> bool:\n return self.runtime.timeout <= 0.0",
"def check_convergency(self):\n if self.vars['ema_trace'][self.vars['step']] <= self.settings[\"emaSpeedTol\"]:\n return True\n else:\n return False",
"def check_diff(self,game,wanted_diff,wanted_starting_time=''):\n return True",
"def check_timer(self, wanted_time):\n if time.time() - self.start_time >= wanted_time:\n return True\n return False",
"def isTimeRemaining(self):\n if self.run_type.startswith('timed'):\n time_since_start = (time.time() - self.start_times['run'])\n remaining_time = self.max_time * 60 - time_since_start\n if remaining_time < 0:\n return False\n else:\n return True",
"async def test_evaluate_different_response_time(self):\n self.set_source_parameter(\"response_time_to_evaluate\", \"min_response_time\")\n self.set_source_parameter(\"target_response_time\", \"45\")\n response = await self.collect(get_request_json_return_value=self.GATLING_JSON)\n self.assert_measurement(response, value=\"1\", entities=self.expected_entities[:1])",
"def test_max_response_time():\r\n cmd = ShdlcCmdGetErrorState(clear=False)\r\n assert type(cmd.max_response_time) is float\r\n assert cmd.max_response_time == 0.5",
"def check_stimOn_goCue_delays(data, **_):\n # Calculate the difference between stimOn and goCue times.\n # If either are NaN, the result will be Inf to ensure that it crosses the failure threshold.\n metric = np.nan_to_num(data[\"goCue_times\"] - data[\"stimOn_times\"], nan=np.inf)\n passed = (metric < 0.01) & (metric > 0)\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def check_stimOn_delays(data, **_):\n metric = np.nan_to_num(data[\"stimOn_times\"] - data[\"stimOnTrigger_times\"], nan=np.inf)\n passed = (metric <= 0.15) & (metric > 0)\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def should_sleep(self):\n return",
"def check_stimOff_delays(data, **_):\n metric = np.nan_to_num(data[\"stimOff_times\"] - data[\"stimOffTrigger_times\"], nan=np.inf)\n passed = (metric <= 0.15) & (metric > 0)\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def _wait_for_response(self, millis):\n loops = millis / 10\n while not self.__atresponse_received:\n time.sleep(0.01)\n loops -= 1\n if loops == 0:\n return False\n return True",
"def haveTime(self):\n if self.timeout is None:\n return True\n return time.time() <= self._stop",
"def timedout(self):\n\n return self.duration() > self.check.timeout",
"def assert_timeout(self) -> None:",
"def test_notify_delay_not_elapsed(self):\n # self.client.force_authenticate(user=self.admin)\n\n self.wait_queue_notif = WaitQueueNotification.objects.create(\n user=self.user2,\n retreat=self.retreat,\n )\n\n response = self.client.get(\n '/'.join([\n reverse('retreat:waitqueuenotification-list'),\n 'notify',\n ])\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_200_OK,\n response.content,\n )\n\n response_data = json.loads(response.content)\n\n content = {\n 'detail': 'Last notification was sent less than 24h ago.'\n }\n\n self.assertEqual(response_data, content)",
"def check_timer():\n end = time.time()\n time_elapsed = end - target_time[0]\n durationMSG = fg.cyan + f\"Scans Completed for {args.target} in: \" + fg.rs\n print(durationMSG, display_time(time_elapsed))",
"def check_last_cycle_duration(self):\n min_pm_time = timedelta(seconds=self.args.min_pm_time)\n max_pm_time = timedelta(seconds=self.args.max_pm_time)\n if self.args.pm_timestamp:\n pm_timestamp = datetime.fromtimestamp(self.args.pm_timestamp)\n now = datetime.now()\n pm_time = now - pm_timestamp\n if pm_time < min_pm_time:\n raise TestFailed(\n \"{0} time less than expected: {1} < {2}\".format(\n self.args.pm_operation.capitalize(), pm_time, min_pm_time\n )\n )\n if pm_time > max_pm_time:\n raise TestFailed(\n \"{0} time greater than expected: {1} > {2}\".format(\n self.args.pm_operation.capitalize(), pm_time, max_pm_time\n )\n )\n\n logging.info(\n \"{0} time: {1}\".format(self.args.pm_operation.capitalize(), pm_time)\n )",
"def __sleep(self):\n if self.sleep_duration > 0:\n gevent.sleep(self.sleep_duration)\n else:\n self.__warn(\n f\"The average tick took longer than the set tick duration of {self.__tick_duration}. \"\n f\"Program is to heavy to run real time\")",
"def exceeded(self):\r\n return int(time.time()) - self.start_time >= self.length"
] | [
"0.6850367",
"0.66160536",
"0.6374804",
"0.6345569",
"0.6269667",
"0.6260532",
"0.6249905",
"0.6247848",
"0.62191147",
"0.6194862",
"0.6190176",
"0.61887175",
"0.6181643",
"0.61796886",
"0.6098504",
"0.60839707",
"0.6060317",
"0.6029891",
"0.6001722",
"0.5985031",
"0.59810483",
"0.59505826",
"0.5948616",
"0.5930368",
"0.5923588",
"0.59181577",
"0.5888869",
"0.58862025",
"0.5880144",
"0.587848"
] | 0.69304246 | 0 |
Check that the wheel does move within 100ms of the feedback onset (error sound or valve). | def check_wheel_move_before_feedback(data, **_):
# Get tuple of wheel times and positions within 100ms of feedback
traces = traces_by_trial(
data["wheel_timestamps"],
data["wheel_position"],
start=data["feedback_times"] - 0.05,
end=data["feedback_times"] + 0.05,
)
metric = np.zeros_like(data["feedback_times"])
# For each trial find the displacement
for i, trial in enumerate(traces):
pos = trial[1]
if pos.size > 1:
metric[i] = pos[-1] - pos[0]
# except no-go trials
metric[data["choice"] == 0] = np.nan # NaN = trial ignored for this check
nans = np.isnan(metric)
passed = np.zeros_like(metric) * np.nan
passed[~nans] = (metric[~nans] != 0).astype(float)
assert data["intervals"].shape[0] == len(metric) == len(passed)
return metric, passed | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def quick_check(self):\n #loop three times and moce the servo \n for ang in range(self.MIDPOINT - 115, self.MIDPOINT+116, 115):\n self.servo(ang)\n time.sleep(.05)\n if self.read_distance() < self.SAFE_DISTANCE:\n return False\n #if the three-part check didn't freak out\n return True",
"def quick_check(self):\n # loop three times and move the servo\n for ang in range(self.MIDPOINT - 100, self.MIDPOINT + 101, 100):\n self.servo(ang)\n time.sleep(.01)\n if self.read_distance() < self.SAFE_DISTANCE:\n return False \n # if the three-part check didn't freak out\n return True",
"def quick_check(self):\n for ang in range(self.MIDPOINT-150, self.MIDPOINT+151, 150):\n self.servo(ang)\n if self.read_distance() < self.SAFE_DIST:\n return False\n return True",
"def wait_untel_pos_eq(target_pos):\n global joints\n TriggerSimualtion()\n err = abs(np.array(target_pos) - np.array(joints))\n global err_old\n global position_geted\n while (err != err_old).all() or not position_geted:\n global err_old\n global position_geted\n global joints\n err_old = err\n TriggerSimualtion()\n #sleep(0.1)\n position_geted=False",
"def test_c(self):\n self.failIf(cgs.speed_of_light/mks.speed_of_light!=100)",
"def on_timeout(self, data):\n new_value = self.progressbar.get_fraction() + 0.01\n\n if new_value > 1:\n return False\n\n self.progressbar.set_fraction(new_value)\n return True",
"def test(self):\n winsound.PlaySound('SystemExclamation', winsound.SND_ALIAS)\n \n pulses=1000*3\n winsound.Beep(200, 1000) # .Beep(1650Hz, (XXXXms)) #e.g 1000ms=1second\n self.run(pulses); self.run(pulses, ANTI_CLK_W)\n sleep(1)\n\n winsound.Beep(400, 1000)\n self.swing(128, count=30); self.stop() #0.9 degrees\n sleep(1)\n\n winsound.Beep(800, 1000)\n print('Testing I.....')\n self.swing(32, count=120); self.stop() #0.225 degrees \n sleep(1)\n\n winsound.Beep(1600, 1000)\n print('Testing II.....')\n self.swing(2, count=1800); self.stop() #0.05625 degrees\n \n winsound.PlaySound('SystemExclamation', winsound.SND_ALIAS)\n print(' Testings Done! ')\n return self.stop() #set low before exist ",
"def _wait_for_pole_at_rest(self, thold_ang_vel: float = 0.1 / 180.0 * np.pi):\n cnt = 0\n while cnt < 1.5 / self._dt:\n # Get next measurement\n meas = self._qsoc.snd_rcv(np.zeros(self.act_space.shape))\n\n if np.abs(meas[2]) < thold_ang_vel and np.abs(meas[3]) < thold_ang_vel:\n cnt += 1\n else:\n cnt = 0",
"def check_convergency(self):\n if self.vars['ema_trace'][self.vars['step']] <= self.settings[\"emaSpeedTol\"]:\n return True\n else:\n return False",
"async def movement_tick(self):\n self.movement_progress += self.sub.power.get_power(\"engines\")\n threshold = get_square(self.x, self.y).difficulty()\n if \"blessing\" in self.sub.upgrades.keywords:\n # Bound difficulty above by four (normal waters)\n threshold = min(4, threshold)\n if self.movement_progress >= threshold:\n self.movement_progress -= threshold\n direction = self.direction # Direction can change as result of movement.\n message = await self.move()\n move_status = (\n f\"Moved **{self.sub.name()}** in direction **{direction.upper()}**!\\n\"\n f\"**{self.sub.name()}** is now at position **{self.get_position()}**.\"\n )\n\n # Do all the puzzles stuff.\n await self.sub.puzzles.movement_tick()\n\n # Cancel trades, if necessary.\n trade_messages = self.sub.inventory.timeout_trade()\n\n # Finally, return our movement.\n if message:\n return f\"{message}\\n{move_status}\", trade_messages\n return move_status, trade_messages\n return None, {}",
"def nearest_test_pulse(self):",
"def check_wheel_move_during_closed_loop(data, wheel_gain=None, **_):\n # Get the Bpod extracted wheel data\n timestamps = data['wheel_timestamps']\n position = data['wheel_position']\n\n return _wheel_move_during_closed_loop(timestamps, position, data, wheel_gain, tol=3)",
"def checkMotion(self):\n res = 0\n while(self.ser.inWaiting() > 0):\n res = self.ser.readline().strip()\n\n try:\n if self.state == 1 and time.time() - self.last_move > self.config[\"keep_on_time\"]:\n self.execOff()\n\n if res == \"1\":\n self.last_move = time.time()\n\n if res == \"1\" and self.state == 0:\n self.execOn()\n except Exception as e:\n self.logger.error(e)",
"def _wheel_move_during_closed_loop(re_ts, re_pos, data, wheel_gain=None, tol=1, **_):\n if wheel_gain is None:\n _log.warning(\"No wheel_gain input in function call, returning None\")\n return None, None\n\n # Get tuple of wheel times and positions over each trial's closed-loop period\n traces = traces_by_trial(re_ts, re_pos,\n start=data[\"goCueTrigger_times\"],\n end=data[\"response_times\"])\n\n metric = np.zeros_like(data[\"feedback_times\"])\n # For each trial find the absolute displacement\n for i, trial in enumerate(traces):\n t, pos = trial\n if pos.size != 0:\n # Find the position of the preceding sample and subtract it\n idx = np.abs(re_ts - t[0]).argmin() - 1\n origin = re_pos[idx]\n metric[i] = np.abs(pos - origin).max()\n\n # Load wheel_gain and thresholds for each trial\n wheel_gain = np.array([wheel_gain] * len(data[\"position\"]))\n thresh = data[\"position\"]\n # abs displacement, s, in mm required to move 35 visual degrees\n s_mm = np.abs(thresh / wheel_gain) # don't care about direction\n criterion = cm_to_rad(s_mm * 1e-1) # convert abs displacement to radians (wheel pos is in rad)\n metric = metric - criterion # difference should be close to 0\n rad_per_deg = cm_to_rad(1 / wheel_gain * 1e-1)\n passed = (np.abs(metric) < rad_per_deg * tol).astype(float) # less than 1 visual degree off\n metric[data[\"choice\"] == 0] = passed[data[\"choice\"] == 0] = np.nan # except no-go trials\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def _update_cb(self, timeout, offset=0):\n if timeout <= 0:\n timeout = float(\"inf\")\n start = time.time()\n is_done = [False]\n def cb(e=None):\n if e is None and not is_done[0]:\n now = time.time()\n if now-start > timeout:\n is_done[0] = True\n blutil.notice(\"motor position: {1:{0}}\".format(self._prec(), self.wm() - offset))\n return cb",
"def delay_checks(self):\n return False",
"def check_detected_wheel_moves(data, min_qt=0, **_):\n # Depending on task version this may be a single value or an array of quiescent periods\n min_qt = np.array(min_qt)\n if min_qt.size > data[\"intervals\"].shape[0]:\n min_qt = min_qt[:data[\"intervals\"].shape[0]]\n\n metric = data['firstMovement_times']\n qevt_start = data['goCueTrigger_times'] - np.array(min_qt)\n response = data['response_times']\n # First movement time for each trial should be after the quiescent period and before feedback\n passed = np.array([a < m < b for m, a, b in zip(metric, qevt_start, response)], dtype=float)\n nogo = data['choice'] == 0\n passed[nogo] = np.nan # No go trial may have no movement times and that's fine\n return metric, passed",
"def tick(self):\n if time.time() - self.t > self.speed:\n self.t = time.time()\n return True\n else:\n return False",
"def nanny(self): \n while not self.started and not self.failed:\n eventlet.sleep(.1)\n return not self.failed",
"def check_wheel_freeze_during_quiescence(data, **_):\n assert np.all(np.diff(data[\"wheel_timestamps\"]) >= 0)\n assert data[\"quiescence\"].size == data[\"stimOnTrigger_times\"].size\n # Get tuple of wheel times and positions over each trial's quiescence period\n qevt_start_times = data[\"stimOnTrigger_times\"] - data[\"quiescence\"]\n traces = traces_by_trial(\n data[\"wheel_timestamps\"],\n data[\"wheel_position\"],\n start=qevt_start_times,\n end=data[\"stimOnTrigger_times\"]\n )\n\n metric = np.zeros((len(data[\"quiescence\"]), 2)) # (n_trials, n_directions)\n for i, trial in enumerate(traces):\n t, pos = trial\n # Get the last position before the period began\n if pos.size > 0:\n # Find the position of the preceding sample and subtract it\n idx = np.abs(data[\"wheel_timestamps\"] - t[0]).argmin() - 1\n origin = data[\"wheel_position\"][idx if idx != -1 else 0]\n # Find the absolute min and max relative to the last sample\n metric[i, :] = np.abs([np.min(pos - origin), np.max(pos - origin)])\n # Reduce to the largest displacement found in any direction\n metric = np.max(metric, axis=1)\n metric = 180 * metric / np.pi # convert to degrees from radians\n criterion = 2 # Position shouldn't change more than 2 in either direction\n passed = metric < criterion\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def control_wheel(self, om_w, time_for_move, side):\n for i in range(1, len(time_for_move)):\n if side == 'left':\n print(\"OMEGA LEFT CONTROL\")\n self.set_speed(om_w[i - 1], 10)\n rospy.sleep(time_for_move[i] - time_for_move[i - 1])\n else:\n print(\"OMEGA RIGHT CONTROL\")\n self.set_speed(10, om_w[i - 1])\n rospy.sleep(time_for_move[i] - time_for_move[i - 1] - 0.5)\n self.flag = True\n self.set_speed(10, 10)",
"def wait(self, axis, timeout=10):\n if not self.enabled:\n return\n\n # Wait for the motor to stop moving\n moving = True\n seconds = int(round(time.time() * 1000))\n \n # check moving flag\n while moving:\n time.sleep(0.01)\n flags = self.status(axis)\n if (flags[0] and flags[1])==True:\n moving = False\n return False\n else: # Timeout\n moving = True\n if timeout == -1:\n pass\n elif (int(round(time.time() * 1000))-seconds)/1000 > timeout:\n return True",
"def _distanceCheck(self):\n\n # Catches the occasional polling error that occurs with the ultrasonic distance sensor\n try:\n # 3 point averager to smooth out distance data\n dist = self.u.distance\n sleep(0.05)\n dist += self.u.distance\n sleep(0.05)\n dist += self.u.distance\n dist = dist/3\n\n #print(\"Distance check reading: {0:1.3f}\".format(dist))\n\n if( dist <= self.detectDist ):\n if( self.birdHere == 0 ):\n self.statusWrite(\"in\")\n self.birdHere = 1\n\n else:\n if( self.birdHere == 1 ):\n self.statusWrite(\"out\")\n self.birdHere = 0\n\n except RuntimeError:\n pass",
"def check_peak_win(self):\n if self.peak_win[0] < 0.0:\n self.peak_win[0] = 0.0\n if self.logger is not None:\n self.logger.warning(('Start of peak window < 0 sec for cond: {}. ' +\n 'Setting to 0.').format(self.cond))\n if self.peak_win[1] > self.psc_dur:\n self.peak_win[1] = self.psc_dur\n if self.logger is not None:\n logger.warning(('End of peak window is longer than trial HRF ' +\n 'for cond: {}. Truncating.').format(self.cond))\n return",
"def wait_untel_pos(target_pos):\n mini = 0.003\n err0 = abs(target_pos[0] - joints[0])\n err1 = abs(target_pos[1] - joints[1])\n err2 = abs(target_pos[2] - joints[2])\n err3 = abs(target_pos[3] - joints[3])\n err4 = abs(target_pos[4] - joints[4])\n while err0 > mini or err1 > mini or err2 > mini or err3 > mini or err4 > mini:\n TriggerSimualtion()\n err0 = abs(target_pos[0] - joints[0])\n err1 = abs(target_pos[1] - joints[1])\n err2 = abs(target_pos[2] - joints[2])\n err3 = abs(target_pos[3] - joints[3])\n err4 = abs(target_pos[4] - joints[4])",
"async def test_increase_decrease_speed(hass: HomeAssistant, calls) -> None:\n await _register_components(hass, speed_count=3)\n\n await common.async_turn_on(hass, _TEST_FAN)\n for func, extra, state, value in [\n (common.async_set_percentage, 100, STATE_ON, 100),\n (common.async_decrease_speed, None, STATE_ON, 66),\n (common.async_decrease_speed, None, STATE_ON, 33),\n (common.async_decrease_speed, None, STATE_ON, 0),\n (common.async_increase_speed, None, STATE_ON, 33),\n ]:\n await func(hass, _TEST_FAN, extra)\n assert int(float(hass.states.get(_PERCENTAGE_INPUT_NUMBER).state)) == value\n _verify(hass, state, value, None, None, None)",
"def tick(self):\n return True",
"def cooldown_checker(self):\n self.cooldown_tick += 1\n if self.cooldown_tick == self.pattern_cooldown:\n self.wait = False\n self.cooldown_tick = 0",
"def solvestep(self):\n if self.__n>=self.__nt:\n return True\n \n for self.__n in range (self.__n,self.__n+self.__output):\n self.__timestep()\n\n self.__plotcallback(self.__u[self.pml_length:-self.pml_length,\n self.pml_length:-self.pml_length],\n self.__n*self.__dt)\n time.sleep(self.__slowdown)\n \n return False",
"def test_t(self):\n assert np.isclose(self.stepper.t, self.final_t)"
] | [
"0.66823375",
"0.6660327",
"0.6061239",
"0.6020045",
"0.60030603",
"0.59889495",
"0.5890945",
"0.58775824",
"0.58463895",
"0.58456963",
"0.58017486",
"0.57889277",
"0.574577",
"0.57411665",
"0.57179874",
"0.56755745",
"0.5675489",
"0.5675418",
"0.56641567",
"0.5627082",
"0.55876935",
"0.5587316",
"0.55785406",
"0.5574275",
"0.556071",
"0.5556522",
"0.5556154",
"0.5548831",
"0.5535873",
"0.5487044"
] | 0.68362546 | 0 |
Check that the wheel moves by approximately 35 degrees during the closedloop period on trials where a feedback (error sound or valve) is delivered. | def check_wheel_move_during_closed_loop(data, wheel_gain=None, **_):
# Get the Bpod extracted wheel data
timestamps = data['wheel_timestamps']
position = data['wheel_position']
return _wheel_move_during_closed_loop(timestamps, position, data, wheel_gain, tol=3) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_wheel_move_before_feedback(data, **_):\n # Get tuple of wheel times and positions within 100ms of feedback\n traces = traces_by_trial(\n data[\"wheel_timestamps\"],\n data[\"wheel_position\"],\n start=data[\"feedback_times\"] - 0.05,\n end=data[\"feedback_times\"] + 0.05,\n )\n metric = np.zeros_like(data[\"feedback_times\"])\n # For each trial find the displacement\n for i, trial in enumerate(traces):\n pos = trial[1]\n if pos.size > 1:\n metric[i] = pos[-1] - pos[0]\n\n # except no-go trials\n metric[data[\"choice\"] == 0] = np.nan # NaN = trial ignored for this check\n nans = np.isnan(metric)\n passed = np.zeros_like(metric) * np.nan\n\n passed[~nans] = (metric[~nans] != 0).astype(float)\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def _wheel_move_during_closed_loop(re_ts, re_pos, data, wheel_gain=None, tol=1, **_):\n if wheel_gain is None:\n _log.warning(\"No wheel_gain input in function call, returning None\")\n return None, None\n\n # Get tuple of wheel times and positions over each trial's closed-loop period\n traces = traces_by_trial(re_ts, re_pos,\n start=data[\"goCueTrigger_times\"],\n end=data[\"response_times\"])\n\n metric = np.zeros_like(data[\"feedback_times\"])\n # For each trial find the absolute displacement\n for i, trial in enumerate(traces):\n t, pos = trial\n if pos.size != 0:\n # Find the position of the preceding sample and subtract it\n idx = np.abs(re_ts - t[0]).argmin() - 1\n origin = re_pos[idx]\n metric[i] = np.abs(pos - origin).max()\n\n # Load wheel_gain and thresholds for each trial\n wheel_gain = np.array([wheel_gain] * len(data[\"position\"]))\n thresh = data[\"position\"]\n # abs displacement, s, in mm required to move 35 visual degrees\n s_mm = np.abs(thresh / wheel_gain) # don't care about direction\n criterion = cm_to_rad(s_mm * 1e-1) # convert abs displacement to radians (wheel pos is in rad)\n metric = metric - criterion # difference should be close to 0\n rad_per_deg = cm_to_rad(1 / wheel_gain * 1e-1)\n passed = (np.abs(metric) < rad_per_deg * tol).astype(float) # less than 1 visual degree off\n metric[data[\"choice\"] == 0] = passed[data[\"choice\"] == 0] = np.nan # except no-go trials\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def quick_check(self):\n #loop three times and moce the servo \n for ang in range(self.MIDPOINT - 115, self.MIDPOINT+116, 115):\n self.servo(ang)\n time.sleep(.05)\n if self.read_distance() < self.SAFE_DISTANCE:\n return False\n #if the three-part check didn't freak out\n return True",
"def u_turn(self, direction, diameter_in):\n \n# pdb.set_trace()\n # Calculate radius of turn for the inside wheel.\n r_in = diameter_in / 2\n\n # Outside radius is 20 inches from inside radius.\n r_out = r_in + MuleBot.WHEEL_BASE_LENGTH\n \n # Outside travel distance\n travel = r_out * 3.14159\n travel_revolutions = travel / MuleBot.CIRCUM_IN\n \n r_ratio = r_out / r_in\n #r_ratio_half = r_ratio / 2\n\n speed_multiplier = MuleBot.MAX_RPM / r_ratio\n\n outside_rpm = r_ratio * speed_multiplier\n inside_rpm = speed_multiplier\n \n \n # \n # minutes at outside_rpm\n minutes = travel_revolutions / outside_rpm\n seconds = minutes * MuleBot.SECONDS_PER_MINUTE\n \n # Something isn't quite perfect.\n if direction == 'left':\n if diameter_in < 25:\n seconds -= 1\n else:\n seconds -= 2\n else:\n if diameter_in < 25:\n seconds += 1\n else:\n seconds += 2\n\n if direction == 'left':\n v_l = self.rpm_to_rps(inside_rpm)\n v_r = self.rpm_to_rps(outside_rpm)\n else:\n v_r = self.rpm_to_rps(inside_rpm)\n v_l = self.rpm_to_rps(outside_rpm)\n\n #print(\"2inside: rpm: \", inside_rpm)\n #print(\"2outside: rpm: \", outside_rpm)\n \n #print(\"2.1: v_l: \", v_l)\n #print(\"2.1: v_r: \", v_r)\n\n # Set wheel drive rates.\n self.set_wheel_drive_rates(v_l, v_r)\n\n # Sleep during the turn.\n time.sleep(seconds)\n\n # Stop\n self.stop()\n \n # Move forward 24 inches.\n self.forward(24)",
"def quick_check(self):\n # loop three times and move the servo\n for ang in range(self.MIDPOINT - 100, self.MIDPOINT + 101, 100):\n self.servo(ang)\n time.sleep(.01)\n if self.read_distance() < self.SAFE_DISTANCE:\n return False \n # if the three-part check didn't freak out\n return True",
"def check_wheel_freeze_during_quiescence(data, **_):\n assert np.all(np.diff(data[\"wheel_timestamps\"]) >= 0)\n assert data[\"quiescence\"].size == data[\"stimOnTrigger_times\"].size\n # Get tuple of wheel times and positions over each trial's quiescence period\n qevt_start_times = data[\"stimOnTrigger_times\"] - data[\"quiescence\"]\n traces = traces_by_trial(\n data[\"wheel_timestamps\"],\n data[\"wheel_position\"],\n start=qevt_start_times,\n end=data[\"stimOnTrigger_times\"]\n )\n\n metric = np.zeros((len(data[\"quiescence\"]), 2)) # (n_trials, n_directions)\n for i, trial in enumerate(traces):\n t, pos = trial\n # Get the last position before the period began\n if pos.size > 0:\n # Find the position of the preceding sample and subtract it\n idx = np.abs(data[\"wheel_timestamps\"] - t[0]).argmin() - 1\n origin = data[\"wheel_position\"][idx if idx != -1 else 0]\n # Find the absolute min and max relative to the last sample\n metric[i, :] = np.abs([np.min(pos - origin), np.max(pos - origin)])\n # Reduce to the largest displacement found in any direction\n metric = np.max(metric, axis=1)\n metric = 180 * metric / np.pi # convert to degrees from radians\n criterion = 2 # Position shouldn't change more than 2 in either direction\n passed = metric < criterion\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def _wait_for_pole_at_rest(self, thold_ang_vel: float = 0.1 / 180.0 * np.pi):\n cnt = 0\n while cnt < 1.5 / self._dt:\n # Get next measurement\n meas = self._qsoc.snd_rcv(np.zeros(self.act_space.shape))\n\n if np.abs(meas[2]) < thold_ang_vel and np.abs(meas[3]) < thold_ang_vel:\n cnt += 1\n else:\n cnt = 0",
"def test_t(self):\n assert np.isclose(self.stepper.t, self.final_t)",
"def check_wheel_move_during_closed_loop_bpod(data, wheel_gain=None, **_):\n # Get the Bpod extracted wheel data\n timestamps = data.get('wheel_timestamps_bpod', data['wheel_timestamps'])\n position = data.get('wheel_position_bpod', data['wheel_position'])\n\n return _wheel_move_during_closed_loop(timestamps, position, data, wheel_gain, tol=1)",
"def test_diff_analog_in_cal_5v_loop(self):\n for g in self.l.gains:\n for s,c,e in [(5, 11, .1), (2.5, 10, .03)]:\n v = self.l.input(channels=(c,c,c,c), gains=(g,g,g,g))\n r = v[0]\n if s*g > 20:\n if s*g > 25:\n self.assertTrue(v[3],\n \"%s should be overvoltage (%g, %g)\" % (v,s,g))\n continue\n for i in r:\n self.assertTrue(abs(s-i) < e,\n \"%g is not %g, channel %g, gain %g\" % (i,s,c,g))",
"def tick(self):\n time.sleep(self.sleep_time)\n self.time += 1\n print(\"[Turn \" + str(self.time) + \"] Tick tock...\")\n directions = [(0, -1), (1, -1), (1, 0), (1, 1), (0, 1), (-1, 1), (-1, 0), (-1, -1)]\n for i in range(len(self.robots)):\n self.robots[i][2] = (self.robots[i][2] + self.robots[i][3]) % 8\n self.robots[i][3] = 0\n self.robots[i][0] += directions[self.robots[i][2]][0]\n self.robots[i][1] += directions[self.robots[i][2]][1]\n if self.robots[i][0] < 0 or self.robots[i][0] >= self.width or \\\n self.robots[i][1] < 0 or self.robots[i][1] >= self.height:\n self.robots = []\n raise RobotWallCrashException # A robot crashed into a wall! Simulation over!\n for j in range(len(self.robots)):\n if i != j:\n if self.robots[i][0] == self.robots[j][0] and self.robots[i][1] == self.robots[j][1]:\n self.robots = []\n raise RobotCollisionException # A robot crashed into another robot! Simulation over!\n for j in range(len(self.items)):\n if self.robots[i][0] == self.items[j][0] and self.robots[i][1] == self.items[j][1]:\n if self.items[j][2] == 1:\n self.robots = []\n raise RobotFoundTreasureException # A robot found the treasure! You win!\n elif self.items[j][2] == 2:\n self.robots = []\n raise RobotObjectCrashException # A robot crashed into an object!\n if random.random() > self.reliability:\n print(\"*glug-glug-glug* Oil leak detected!\")\n self.items.append([self.robots[i][0], self.robots[i][1], 2])",
"def safe_to_dance(self):\n # check for all fail/early-termination conditions\n for _ in range(4):\n if self.read_distance() < 300:\n print(\"NOT SAFE TO DANCE!\")\n return False\n else: \n self.turn_by_deg(90) \n\n #after all checks have been done. We deduce it's safe\n print(\"SAFE TO DANCE!\")\n return True\n\n for x in range(3): \n self.shake()",
"def test(self):\n winsound.PlaySound('SystemExclamation', winsound.SND_ALIAS)\n \n pulses=1000*3\n winsound.Beep(200, 1000) # .Beep(1650Hz, (XXXXms)) #e.g 1000ms=1second\n self.run(pulses); self.run(pulses, ANTI_CLK_W)\n sleep(1)\n\n winsound.Beep(400, 1000)\n self.swing(128, count=30); self.stop() #0.9 degrees\n sleep(1)\n\n winsound.Beep(800, 1000)\n print('Testing I.....')\n self.swing(32, count=120); self.stop() #0.225 degrees \n sleep(1)\n\n winsound.Beep(1600, 1000)\n print('Testing II.....')\n self.swing(2, count=1800); self.stop() #0.05625 degrees\n \n winsound.PlaySound('SystemExclamation', winsound.SND_ALIAS)\n print(' Testings Done! ')\n return self.stop() #set low before exist ",
"def test_correctness(self):\n M_win = 1024\n N_fft = 131072\n # Set norm=False for correctness as the values obtained from the\n # scientific publication do not normalize the values. Normalizing\n # changes the sidelobe level from the desired value.\n w = windows.taylor(M_win, nbar=4, sll=35, norm=False, sym=False)\n f = fft(w, N_fft)\n spec = 20 * np.log10(np.abs(f / np.amax(f)))\n\n first_zero = np.argmax(np.diff(spec) > 0)\n\n PSLL = np.amax(spec[first_zero:-first_zero])\n\n BW_3dB = 2*np.argmax(spec <= -3.0102999566398121) / N_fft * M_win\n BW_18dB = 2*np.argmax(spec <= -18.061799739838872) / N_fft * M_win\n\n assert_allclose(PSLL, -35.1672, atol=1)\n assert_allclose(BW_3dB, 1.1822, atol=0.1)\n assert_allclose(BW_18dB, 2.6112, atol=0.1)",
"def wait_untel_pos_eq(target_pos):\n global joints\n TriggerSimualtion()\n err = abs(np.array(target_pos) - np.array(joints))\n global err_old\n global position_geted\n while (err != err_old).all() or not position_geted:\n global err_old\n global position_geted\n global joints\n err_old = err\n TriggerSimualtion()\n #sleep(0.1)\n position_geted=False",
"def test_single_analog_in_cal_5v_loop(self):\n for s,c,e in [(5, 6, .1), (0, 7, .05), (2.5, 4, .05), (0, 5, .05)]:\n for v in self.l.input(channels=(c,c,c,c), gains=(1,1,1,1))[0]:\n self.assertTrue(abs(s-v) < e,\n \"%g is not %g, channel %g\" % (v, s, c))",
"def checkRing(self, verbose = 0):\n angleSum = self.tfs.ANGLE.sum()\n if self.verbose: print (\"check\")\n print (\"---------------------------------- \\n Checking, if ring is closed: \\n\", \"angleSum = \", angleSum)\n twoPi = 2*pi\n \n if angleSum != twoPi:\n fudge = 2*pi - angleSum\n print (\" ** Ring not closed - offset of: \", fudge)",
"def step_solution(self):\n import time, random\n time.sleep(1.0)\n print '(step_solution) Implement me!'\n return True if random.random() < 0.25 else False",
"def demo_grading(hunter_bot, target_bot, next_move_fcn, OTHER = None):\n max_distance = 0.97 * target_bot.distance # 0.98 is an example. It will change.\n separation_tolerance = 0.02 * target_bot.distance # hunter must be within 0.02 step size to catch target\n caught = False\n ctr = 0\n\n # We will use your next_move_fcn until we catch the target or time expires.\n while not caught and ctr < 1000:\n\n # Check to see if the hunter has caught the target.\n hunter_position = (hunter_bot.x, hunter_bot.y)\n target_position = (target_bot.x, target_bot.y)\n separation = distance_between(hunter_position, target_position)\n if separation < separation_tolerance:\n print \"You got it right! It took you \", ctr, \" steps to catch the target.\"\n caught = True\n\n # The target broadcasts its noisy measurement\n target_measurement = target_bot.sense()\n\n # This is where YOUR function will be called.\n turning, distance, OTHER = next_move_fcn(hunter_position, hunter_bot.heading, target_measurement, max_distance, OTHER)\n\n # Don't try to move faster than allowed!\n if distance > max_distance:\n distance = max_distance\n\n # We move the hunter according to your instructions\n hunter_bot.move(turning, distance)\n\n # The target continues its (nearly) circular motion.\n target_bot.move_in_circle()\n\n ctr += 1\n if ctr >= 1000:\n print \"It took too many steps to catch the target.\"\n return caught",
"def test_c(self):\n self.failIf(cgs.speed_of_light/mks.speed_of_light!=100)",
"def quick_check(self):\n for ang in range(self.MIDPOINT-150, self.MIDPOINT+151, 150):\n self.servo(ang)\n if self.read_distance() < self.SAFE_DIST:\n return False\n return True",
"def safe_to_dance(self):\n #check for all fil/early-termination conditions\n for _ in range(4):\n if self.read_distance() < 300:\n print(\"not safe to dance!\")\n return False\n else:\n self.turn_by_deg(90)\n #after all checks have been done, we deduce its safe to dance\n print(\"Dance on!\")\n return True",
"def _distanceCheck(self):\n\n # Catches the occasional polling error that occurs with the ultrasonic distance sensor\n try:\n # 3 point averager to smooth out distance data\n dist = self.u.distance\n sleep(0.05)\n dist += self.u.distance\n sleep(0.05)\n dist += self.u.distance\n dist = dist/3\n\n #print(\"Distance check reading: {0:1.3f}\".format(dist))\n\n if( dist <= self.detectDist ):\n if( self.birdHere == 0 ):\n self.statusWrite(\"in\")\n self.birdHere = 1\n\n else:\n if( self.birdHere == 1 ):\n self.statusWrite(\"out\")\n self.birdHere = 0\n\n except RuntimeError:\n pass",
"async def movement_tick(self):\n self.movement_progress += self.sub.power.get_power(\"engines\")\n threshold = get_square(self.x, self.y).difficulty()\n if \"blessing\" in self.sub.upgrades.keywords:\n # Bound difficulty above by four (normal waters)\n threshold = min(4, threshold)\n if self.movement_progress >= threshold:\n self.movement_progress -= threshold\n direction = self.direction # Direction can change as result of movement.\n message = await self.move()\n move_status = (\n f\"Moved **{self.sub.name()}** in direction **{direction.upper()}**!\\n\"\n f\"**{self.sub.name()}** is now at position **{self.get_position()}**.\"\n )\n\n # Do all the puzzles stuff.\n await self.sub.puzzles.movement_tick()\n\n # Cancel trades, if necessary.\n trade_messages = self.sub.inventory.timeout_trade()\n\n # Finally, return our movement.\n if message:\n return f\"{message}\\n{move_status}\", trade_messages\n return move_status, trade_messages\n return None, {}",
"def demo_grading(hunter_bot, target_bot, next_move_fcn, OTHER=None):\n max_distance = 0.98 * target_bot.distance # 0.98 is an example. It will change.\n separation_tolerance = 0.02 * target_bot.distance # hunter must be within 0.02 step size to catch target\n caught = False\n ctr = 0\n\n # We will use your next_move_fcn until we catch the target or time expires.\n while not caught and ctr < 1000:\n\n # Check to see if the hunter has caught the target.\n hunter_position = (hunter_bot.x, hunter_bot.y)\n target_position = (target_bot.x, target_bot.y)\n separation = distance_between(hunter_position, target_position)\n if separation < separation_tolerance:\n print(\"You got it right! It took you \", ctr, \" steps to catch the target.\")\n caught = True\n\n # The target broadcasts its noisy measurement\n target_measurement = target_bot.sense()\n\n # This is where YOUR function will be called.\n turning, distance, OTHER = next_move_fcn(hunter_position, hunter_bot.heading, target_measurement, max_distance,\n OTHER)\n\n # Don't try to move faster than allowed!\n if distance > max_distance:\n distance = max_distance\n\n # We move the hunter according to your instructions\n hunter_bot.move(turning, distance)\n\n # The target continues its (nearly) circular motion.\n target_bot.move_in_circle()\n\n ctr += 1\n if ctr >= 1000:\n print(\"It took too many steps to catch the target.\")\n return ctr-1",
"def trajectory_error_correcter(trajectories):\r\n\r\n n_birds, n_paramaters, n_time_steps = np.shape(trajectories)\r\n\r\n for i in range(n_birds):\r\n if squared_distance_calculator(trajectories[i, :, 1],\r\n trajectories[i, :, 0]) > 1.5 * min(squared_distance_calculator(\r\n trajectories[i, :, 1], trajectories[i, :, 2]), squared_distance_calculator(\r\n trajectories[i, :, 2], trajectories[i, :, 3]), squared_distance_calculator(\r\n trajectories[i, :, 3], trajectories[i, :, 4])):\r\n for l in range(n_birds):\r\n if squared_distance_calculator(trajectories[i, :, 0],\r\n trajectories[l, :, 1]) < 1.5 * min(squared_distance_calculator(\r\n trajectories[i, :, 1], trajectories[i, :, 2]), squared_distance_calculator(\r\n trajectories[i, :, 2], trajectories[i, :, 3]), squared_distance_calculator(\r\n trajectories[i, :, 3], trajectories[i, :, 4])):\r\n trajectories[i, :, :], trajectories[l, :, :] = trajectory_switcher(trajectories[i, :, :],\r\n trajectories[l, :, :], 1)\r\n break\r\n for j in range(2, n_time_steps):\r\n if squared_distance_calculator(trajectories[i, :, j - 1],\r\n trajectories[i, :, j]) > 1.5 * squared_distance_calculator(\r\n trajectories[i, :, j - 1], trajectories[i, :, j - 2]):\r\n for l in range(n_birds):\r\n if squared_distance_calculator(trajectories[i, :, j - 1],\r\n trajectories[l, :, j]) < 2 * squared_distance_calculator(\r\n trajectories[i, :, j - 1], trajectories[i, :, j - 2]):\r\n trajectories[i, :, :], trajectories[l, :, :] = trajectory_switcher(trajectories[i, :, :],\r\n trajectories[l, :, :], j)\r\n break\r\n return trajectories",
"def right_twist(self):\n self.turn_by_deg(180)\n #time.sleep(.1)\n self.stop()\n self.turn_by_deg(180)\n #time.sleep(.1)\n self.stop()",
"def test_steps(self, model):\r\n model.fs.unit.initialize()\r\n\r\n # Add disturbances\r\n for t in model.fs.time:\r\n if 300 <= t < 600:\r\n model.fs.unit.shell_inlet.temperature[t].fix(288.15 - 10)\r\n elif 600 <= t < 900:\r\n model.fs.unit.shell_inlet.temperature[t].fix(288.15)\r\n elif 900 <= t < 1200:\r\n model.fs.unit.shell_inlet.temperature[t].fix(288.15 + 10)\r\n elif t >= 1200:\r\n model.fs.unit.shell_inlet.temperature[t].fix(288.15)\r\n\r\n # Transient solution\r\n solver.solve(model)\r\n\r\n times = [0, 300, 600, 900, 1200, 1500]\r\n sco2_exp = [305.2, 304.9, 305.1, 306.5, 305.7, 305.2]\r\n air_exp = [370.4, 373.1, 370.3, 365.9, 370.7, 370.4]\r\n wall_exp = [339.4, 338.7, 339.1, 340.7, 339.9, 339.4]\r\n\r\n self.check_temperatures(model, times, sco2_exp, air_exp, wall_exp)",
"def _ftolCheck(self):\n oldLoss = biggestRecentLoss(self.loss, self.lookback)\n newLoss = float(self.loss[-1])\n fracDiff = 2 * (oldLoss - newLoss)/(oldLoss + newLoss)\n \n if fracDiff < self.ftol:\n \n self.converged = True",
"def wait(self):\n self.set_vals(spin=.2)\n nearest_deg = 0\n nearest_deg_dist = self.perim_dist + 1\n for i, x in enumerate(self.ranges):\n if (x != 0) and (x < nearest_deg_dist):\n nearest_deg = i\n nearest_deg_dist = x\n if nearest_deg_dist < self.perim_dist:\n nearest_deg = ((nearest_deg + 180) % 360) - 180\n self.center(degree=nearest_deg)\n self.current_state = \"follow\""
] | [
"0.68954474",
"0.6710869",
"0.6472989",
"0.6405544",
"0.6370359",
"0.6348814",
"0.6189378",
"0.61632794",
"0.61214834",
"0.60362446",
"0.59838736",
"0.59421074",
"0.5902024",
"0.5865627",
"0.5801896",
"0.5794535",
"0.5791539",
"0.576547",
"0.5760083",
"0.57588446",
"0.5731025",
"0.5693097",
"0.56851715",
"0.5667445",
"0.56524956",
"0.5650005",
"0.56402564",
"0.56317246",
"0.56075674",
"0.55848557"
] | 0.6723247 | 1 |
Check that the wheel moves by approximately 35 degrees during the closedloop period on trials where a feedback (error sound or valve) is delivered. This check uses the Bpod wheel data (measured at a lower resolution) with a stricter tolerance (1 visual degree). | def check_wheel_move_during_closed_loop_bpod(data, wheel_gain=None, **_):
# Get the Bpod extracted wheel data
timestamps = data.get('wheel_timestamps_bpod', data['wheel_timestamps'])
position = data.get('wheel_position_bpod', data['wheel_position'])
return _wheel_move_during_closed_loop(timestamps, position, data, wheel_gain, tol=1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_wheel_move_during_closed_loop(data, wheel_gain=None, **_):\n # Get the Bpod extracted wheel data\n timestamps = data['wheel_timestamps']\n position = data['wheel_position']\n\n return _wheel_move_during_closed_loop(timestamps, position, data, wheel_gain, tol=3)",
"def check_wheel_move_before_feedback(data, **_):\n # Get tuple of wheel times and positions within 100ms of feedback\n traces = traces_by_trial(\n data[\"wheel_timestamps\"],\n data[\"wheel_position\"],\n start=data[\"feedback_times\"] - 0.05,\n end=data[\"feedback_times\"] + 0.05,\n )\n metric = np.zeros_like(data[\"feedback_times\"])\n # For each trial find the displacement\n for i, trial in enumerate(traces):\n pos = trial[1]\n if pos.size > 1:\n metric[i] = pos[-1] - pos[0]\n\n # except no-go trials\n metric[data[\"choice\"] == 0] = np.nan # NaN = trial ignored for this check\n nans = np.isnan(metric)\n passed = np.zeros_like(metric) * np.nan\n\n passed[~nans] = (metric[~nans] != 0).astype(float)\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def check_wheel_freeze_during_quiescence(data, **_):\n assert np.all(np.diff(data[\"wheel_timestamps\"]) >= 0)\n assert data[\"quiescence\"].size == data[\"stimOnTrigger_times\"].size\n # Get tuple of wheel times and positions over each trial's quiescence period\n qevt_start_times = data[\"stimOnTrigger_times\"] - data[\"quiescence\"]\n traces = traces_by_trial(\n data[\"wheel_timestamps\"],\n data[\"wheel_position\"],\n start=qevt_start_times,\n end=data[\"stimOnTrigger_times\"]\n )\n\n metric = np.zeros((len(data[\"quiescence\"]), 2)) # (n_trials, n_directions)\n for i, trial in enumerate(traces):\n t, pos = trial\n # Get the last position before the period began\n if pos.size > 0:\n # Find the position of the preceding sample and subtract it\n idx = np.abs(data[\"wheel_timestamps\"] - t[0]).argmin() - 1\n origin = data[\"wheel_position\"][idx if idx != -1 else 0]\n # Find the absolute min and max relative to the last sample\n metric[i, :] = np.abs([np.min(pos - origin), np.max(pos - origin)])\n # Reduce to the largest displacement found in any direction\n metric = np.max(metric, axis=1)\n metric = 180 * metric / np.pi # convert to degrees from radians\n criterion = 2 # Position shouldn't change more than 2 in either direction\n passed = metric < criterion\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def test_correctness(self):\n M_win = 1024\n N_fft = 131072\n # Set norm=False for correctness as the values obtained from the\n # scientific publication do not normalize the values. Normalizing\n # changes the sidelobe level from the desired value.\n w = windows.taylor(M_win, nbar=4, sll=35, norm=False, sym=False)\n f = fft(w, N_fft)\n spec = 20 * np.log10(np.abs(f / np.amax(f)))\n\n first_zero = np.argmax(np.diff(spec) > 0)\n\n PSLL = np.amax(spec[first_zero:-first_zero])\n\n BW_3dB = 2*np.argmax(spec <= -3.0102999566398121) / N_fft * M_win\n BW_18dB = 2*np.argmax(spec <= -18.061799739838872) / N_fft * M_win\n\n assert_allclose(PSLL, -35.1672, atol=1)\n assert_allclose(BW_3dB, 1.1822, atol=0.1)\n assert_allclose(BW_18dB, 2.6112, atol=0.1)",
"def _wheel_move_during_closed_loop(re_ts, re_pos, data, wheel_gain=None, tol=1, **_):\n if wheel_gain is None:\n _log.warning(\"No wheel_gain input in function call, returning None\")\n return None, None\n\n # Get tuple of wheel times and positions over each trial's closed-loop period\n traces = traces_by_trial(re_ts, re_pos,\n start=data[\"goCueTrigger_times\"],\n end=data[\"response_times\"])\n\n metric = np.zeros_like(data[\"feedback_times\"])\n # For each trial find the absolute displacement\n for i, trial in enumerate(traces):\n t, pos = trial\n if pos.size != 0:\n # Find the position of the preceding sample and subtract it\n idx = np.abs(re_ts - t[0]).argmin() - 1\n origin = re_pos[idx]\n metric[i] = np.abs(pos - origin).max()\n\n # Load wheel_gain and thresholds for each trial\n wheel_gain = np.array([wheel_gain] * len(data[\"position\"]))\n thresh = data[\"position\"]\n # abs displacement, s, in mm required to move 35 visual degrees\n s_mm = np.abs(thresh / wheel_gain) # don't care about direction\n criterion = cm_to_rad(s_mm * 1e-1) # convert abs displacement to radians (wheel pos is in rad)\n metric = metric - criterion # difference should be close to 0\n rad_per_deg = cm_to_rad(1 / wheel_gain * 1e-1)\n passed = (np.abs(metric) < rad_per_deg * tol).astype(float) # less than 1 visual degree off\n metric[data[\"choice\"] == 0] = passed[data[\"choice\"] == 0] = np.nan # except no-go trials\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def test_t(self):\n assert np.isclose(self.stepper.t, self.final_t)",
"def test_diff_analog_in_cal_5v_loop(self):\n for g in self.l.gains:\n for s,c,e in [(5, 11, .1), (2.5, 10, .03)]:\n v = self.l.input(channels=(c,c,c,c), gains=(g,g,g,g))\n r = v[0]\n if s*g > 20:\n if s*g > 25:\n self.assertTrue(v[3],\n \"%s should be overvoltage (%g, %g)\" % (v,s,g))\n continue\n for i in r:\n self.assertTrue(abs(s-i) < e,\n \"%g is not %g, channel %g, gain %g\" % (i,s,c,g))",
"def quick_check(self):\n #loop three times and moce the servo \n for ang in range(self.MIDPOINT - 115, self.MIDPOINT+116, 115):\n self.servo(ang)\n time.sleep(.05)\n if self.read_distance() < self.SAFE_DISTANCE:\n return False\n #if the three-part check didn't freak out\n return True",
"def _wait_for_pole_at_rest(self, thold_ang_vel: float = 0.1 / 180.0 * np.pi):\n cnt = 0\n while cnt < 1.5 / self._dt:\n # Get next measurement\n meas = self._qsoc.snd_rcv(np.zeros(self.act_space.shape))\n\n if np.abs(meas[2]) < thold_ang_vel and np.abs(meas[3]) < thold_ang_vel:\n cnt += 1\n else:\n cnt = 0",
"def quick_check(self):\n # loop three times and move the servo\n for ang in range(self.MIDPOINT - 100, self.MIDPOINT + 101, 100):\n self.servo(ang)\n time.sleep(.01)\n if self.read_distance() < self.SAFE_DISTANCE:\n return False \n # if the three-part check didn't freak out\n return True",
"def test_single_analog_in_cal_5v_loop(self):\n for s,c,e in [(5, 6, .1), (0, 7, .05), (2.5, 4, .05), (0, 5, .05)]:\n for v in self.l.input(channels=(c,c,c,c), gains=(1,1,1,1))[0]:\n self.assertTrue(abs(s-v) < e,\n \"%g is not %g, channel %g\" % (v, s, c))",
"def check_wheel_integrity(data, re_encoding='X1', enc_res=None, **_):\n if isinstance(re_encoding, str):\n re_encoding = int(re_encoding[-1])\n # The expected difference between samples in the extracted units\n resolution = 1 / (enc_res or ephys_fpga.WHEEL_TICKS\n ) * np.pi * 2 * ephys_fpga.WHEEL_RADIUS_CM / re_encoding\n # We expect the difference of neighbouring positions to be close to the resolution\n pos_check = np.abs(np.diff(data['wheel_position']))\n # Timestamps should be strictly increasing\n ts_check = np.diff(data['wheel_timestamps']) <= 0.\n metric = pos_check + ts_check.astype(float) # all values should be close to zero\n passed = metric < 1.5 * resolution\n return metric, passed",
"def test_c(self):\n self.failIf(cgs.speed_of_light/mks.speed_of_light!=100)",
"def u_turn(self, direction, diameter_in):\n \n# pdb.set_trace()\n # Calculate radius of turn for the inside wheel.\n r_in = diameter_in / 2\n\n # Outside radius is 20 inches from inside radius.\n r_out = r_in + MuleBot.WHEEL_BASE_LENGTH\n \n # Outside travel distance\n travel = r_out * 3.14159\n travel_revolutions = travel / MuleBot.CIRCUM_IN\n \n r_ratio = r_out / r_in\n #r_ratio_half = r_ratio / 2\n\n speed_multiplier = MuleBot.MAX_RPM / r_ratio\n\n outside_rpm = r_ratio * speed_multiplier\n inside_rpm = speed_multiplier\n \n \n # \n # minutes at outside_rpm\n minutes = travel_revolutions / outside_rpm\n seconds = minutes * MuleBot.SECONDS_PER_MINUTE\n \n # Something isn't quite perfect.\n if direction == 'left':\n if diameter_in < 25:\n seconds -= 1\n else:\n seconds -= 2\n else:\n if diameter_in < 25:\n seconds += 1\n else:\n seconds += 2\n\n if direction == 'left':\n v_l = self.rpm_to_rps(inside_rpm)\n v_r = self.rpm_to_rps(outside_rpm)\n else:\n v_r = self.rpm_to_rps(inside_rpm)\n v_l = self.rpm_to_rps(outside_rpm)\n\n #print(\"2inside: rpm: \", inside_rpm)\n #print(\"2outside: rpm: \", outside_rpm)\n \n #print(\"2.1: v_l: \", v_l)\n #print(\"2.1: v_r: \", v_r)\n\n # Set wheel drive rates.\n self.set_wheel_drive_rates(v_l, v_r)\n\n # Sleep during the turn.\n time.sleep(seconds)\n\n # Stop\n self.stop()\n \n # Move forward 24 inches.\n self.forward(24)",
"def test_error_at_95tpr(self):\r\n\r\n return self.test_error_at_confidence(self.confidence_at_tpr(0.95))",
"def did_something_go_wrong_with_dumbells(error, dumbbell_deltax, new_dumbbell_deltax, explosion_protection):\r\n \r\n for i in range(new_dumbbell_deltax.shape[0]):\r\n if np.arccos(np.round(np.dot(dumbbell_deltax[i], new_dumbbell_deltax[i]) / (np.linalg.norm(dumbbell_deltax[i]) * np.linalg.norm(new_dumbbell_deltax[i])), 4)) > np.pi / 2:\r\n print(\" \")\r\n print(f\"Code point H# reached on dumbbell {str(i)}\")\r\n print(f\"Old delta x: {str(dumbbell_deltax[i])}\")\r\n print(f\"New delta x: {str(new_dumbbell_deltax[i])}\")\r\n if explosion_protection and np.linalg.norm(new_dumbbell_deltax[i]) > 5:\r\n print(\"ERROR\")\r\n print(\r\n f\"Dumbbell {str(i)} length ({str(np.linalg.norm(new_dumbbell_deltax[i]))}) has exceeded 5.\"\r\n )\r\n print(\"Something has probably gone wrong (normally your timestep is too large).\")\r\n print(\"Code exiting gracefully.\")\r\n error = True\r\n return error",
"def test_error_at_995tpr(self):\r\n\r\n return self.test_error_at_confidence(self.confidence_at_tpr(0.995))",
"def nearest_test_pulse(self):",
"def correct(self):\n stop_flag = False\n self.orbit_class.online_calc = False\n # read orbit devs\n for elem in self.orbit.corrs:\n try:\n elem.kick_mrad = elem.mi.get_value()\n except Exception as e:\n stop_flag = True\n logger.warning(elem.id + \" reading error: \" + str(e))\n return stop_flag\n \n elem.angle_read = elem.kick_mrad*1e-3\n elem.i_kick = elem.kick_mrad\n elem.ui.set_init_value(elem.kick_mrad)\n elem.ui.set_value(elem.kick_mrad)\n elem.transfer_map = self.parent.lat.method.create_tm(elem)\n if elem.ui.alarm:\n stop_flag = True\n logger.warning(\"correct - STOP: corrector shows alarm: \" + elem.id)\n\n\n #self.parent.lat.update_transfer_maps()\n\n for elem in self.orbit.bpms:\n if elem.id not in self.new_ref_orbit.keys():\n logger.warning(\"correct - STOP: BPM is not in new ref orbit: \" + elem.id)\n stop_flag = True\n return stop_flag\n elem.x = self.new_ref_orbit[elem.id][0]\n elem.y = self.new_ref_orbit[elem.id][1]\n elem.ui.set_value((elem.x*1000., elem.y*1000.))\n if elem.ui.alarm:\n logger.warning(\"correct - STOP: BPM shows alarm: \" + elem.id)\n stop_flag = True\n if stop_flag:\n return stop_flag\n self.orbit_class.online_calc = True\n\n\n if not self.orbit_class.is_rm_ok(self.orbit):\n logger.error(\" correct: Calculate Response Matrix\")\n self.parent.error_box(\"Calculate Response Matrix\")\n return 0\n\n self.orbit_class.golden_orbit.dict2golden_orbit()\n\n if self.orbit_class.ui.cb_close_orbit.isChecked():\n self.orbit_class.close_orbit()\n\n self.calc_correction = {}\n for cor in self.orbit.corrs:\n cor.angle = 0.\n self.calc_correction[cor.id] = cor.angle\n\n alpha = 0.\n\n self.orbit.correction(alpha=alpha, p_init=None, beta=0, print_log=False)\n for cor in self.orbit.corrs:\n self.calc_correction[cor.id] = cor.angle\n\n self.set_values2correctors()\n return stop_flag",
"def test_error_at_98tpr(self):\r\n\r\n return self.test_error_at_confidence(self.confidence_at_tpr(0.98))",
"def convergence_check(self):\n air = self.air_alias.val\n flue_gas = self.fuel_alias.val + '_fg'\n fuel = self.fuel_alias.val\n\n for c in self.outl:\n if not c.fluid.val_set[air]:\n if c.fluid.val[air] > 0.95:\n c.fluid.val[air] = 0.95\n if c.fluid.val[air] < 0.5:\n c.fluid.val[air] = 0.5\n\n if not c.fluid.val_set[flue_gas]:\n if c.fluid.val[flue_gas] > 0.5:\n c.fluid.val[flue_gas] = 0.5\n if c.fluid.val[flue_gas] < 0.05:\n c.fluid.val[flue_gas] = 0.05\n\n if not c.fluid.val_set[fuel]:\n if c.fluid.val[fuel] > 0:\n c.fluid.val[fuel] = 0\n\n c.target.propagate_fluid_to_target(c, c.target)\n\n for i in self.inl:\n if i.m.val_SI < 0 and not i.m.val_set:\n i.m.val_SI = 0.01\n\n for c in self.outl:\n if c.m.val_SI < 0 and not c.m.val_set:\n c.m.val_SI = 10\n c.target.propagate_fluid_to_target(c, c.target)\n\n if self.lamb.val < 1 and not self.lamb.is_set:\n self.lamb.val = 2",
"def checkRing(self, verbose = 0):\n angleSum = self.tfs.ANGLE.sum()\n if self.verbose: print (\"check\")\n print (\"---------------------------------- \\n Checking, if ring is closed: \\n\", \"angleSum = \", angleSum)\n twoPi = 2*pi\n \n if angleSum != twoPi:\n fudge = 2*pi - angleSum\n print (\" ** Ring not closed - offset of: \", fudge)",
"def check_detected_wheel_moves(data, min_qt=0, **_):\n # Depending on task version this may be a single value or an array of quiescent periods\n min_qt = np.array(min_qt)\n if min_qt.size > data[\"intervals\"].shape[0]:\n min_qt = min_qt[:data[\"intervals\"].shape[0]]\n\n metric = data['firstMovement_times']\n qevt_start = data['goCueTrigger_times'] - np.array(min_qt)\n response = data['response_times']\n # First movement time for each trial should be after the quiescent period and before feedback\n passed = np.array([a < m < b for m, a, b in zip(metric, qevt_start, response)], dtype=float)\n nogo = data['choice'] == 0\n passed[nogo] = np.nan # No go trial may have no movement times and that's fine\n return metric, passed",
"def test_high_voltage_passing_signal(self):\n data = gen_random_data(-0.5, 0.5, self.channels)\n self.assertFalse(self.highvoltage_rule.is_broken(data))",
"def test_base_period_tolerance(delta, expected):\n result = wrap(180 - delta)\n print(result, np.isclose(result, -180))\n assert np.isclose(result, -180)[0] == expected",
"def test_error_at_99tpr(self):\r\n\r\n return self.test_error_at_confidence(self.confidence_at_tpr(0.99))",
"def test2(self): \n bt = np.loadtxt('test/btimes')\n mt = np.loadtxt('test/mtimes')\n \n fit = EventSync.sync(bt, mt, min_acceptable_error=.5, \n gross_delay_scales_start=None, gross_delay_scales_factor=2.0,\n clock_sync_guess=.9966361, n_scales=4)\n \n self.assertTrue(len(fit) > 40)\n self.assertTrue(fit.xi2yi[42] == 0)\n self.assertTrue(fit.xi2yi[580] == 51)",
"def test_WIMP_cut_region_on_true_data(bolo_name, mass, analysis):\n\t\n\n\t#Load 2D PDF\n\tfWIMP2D, f = PyRPl.open_ROOT_object(\"./ROOT_files/WIMP_PDF2D_\" + analysis + \".root\", \"WIMP_\" + mass + \"_GeV\")\n\n\t#Load cut value on PDF for 95% WIMP box\n\tcut_val_90, cut_val_99 = 0,0\n\twith open (\"./Text_files/WIMP_PDF_90_and_99_cut_value_\" + analysis + \".txt\", \"r\") as fcut:\n\t\tstuff = [elem.rstrip().split(\",\") for elem in fcut.readlines()]\n\t\tfor elem in stuff:\n\t\t\tmass_val = elem[0]\n\t\t\tif int(mass)==int(mass_val):\n\t\t\t\tcut_val_90 = float(elem[1])\n\t\t\t\tcut_val_99 = float(elem[2])\n\t\n\n\tdata_path = \"/home/irfulx204/mnt/tmain/Desktop/Run308_Analyse_ERA/Fond_ERA_merged/\"\n\tfilou = TFile(data_path + bolo_name + \"_\" + analysis + \"_fond.root\", \"read\")\n\ttree = filou.Get(\"data\")\n\tnum_pass_cut =0\n\n\thpass = TH2F(\"hpass\", \"hpass\", 100, 0, 15, 100, 0, 15)\n\n\t# #T Check that the events are found where expected\n\t# arr1 = np.random.uniform(0,15,size=(200000,2))\n\t# for i in range(arr1.shape[0]):\n\t# \tPDF_val = fWIMP2D.Eval(arr1[i][0], arr1[i][1])\n\t# \tif (cut_val_99<PDF_val<cut_val_90):\n\t# \t# if (cut_val_99<PDF_val<cut_val_90):\n\t# \t\tnum_pass_cut+=1\n\t# \t\thpass.Fill(arr1[i][0], arr1[i][1])\t\t\n\n\t# hpass.Draw()\n\t# raw_input()\n\n\tfor k in range(tree.GetEntries()):\n\t\ttree.GetEntry(k)\n\t\tER=(1+8./3)*0.5*(tree.EC1+tree.EC2)-0.33*(1.5*tree.EIA+4*tree.EIB+1.5*tree.EIC+4*tree.EID)\n\t\tPDF_val = fWIMP2D.Eval(ER, 0.5*(tree.EIB+tree.EID))\n\t\tif (cut_val_99<PDF_val<cut_val_90 and 0.5*(tree.EIB+tree.EID)>0.7):\n\t\t# if (cut_val_99<PDF_val<cut_val_90):\n\t\t\tnum_pass_cut+=1\n\t\t\thpass.Fill(0.5*(tree.EC1+tree.EC2), 0.5*(tree.EIB+tree.EID))\n\n\tprint num_pass_cut\n\thpass.Draw()\n\traw_input()",
"def test_full_chelyabinsk(self, tolerance=0.1):\n conditions = [8.5, 19.2e3, 3300, 4e6, 18.3*np.pi/180]\n system = asteroid(*conditions)\n system.solve_ode()\n peak = [system.burst['height (km)'], system.burst['ke Lost (kt/km)']]\n peak = np.array(peak)\n\n obs_peak = np.array([29.5578, 81.505])\n diff = abs(peak - obs_peak)\n assert np.all(diff < obs_peak * tolerance), \\\n \"chelyabinsk simulation does not match imperical data\"",
"def distance_between_wheels():"
] | [
"0.70852953",
"0.6954929",
"0.67112726",
"0.6621059",
"0.65253836",
"0.61799824",
"0.60759044",
"0.60224724",
"0.5899143",
"0.58587676",
"0.5816214",
"0.57901406",
"0.5771904",
"0.5665009",
"0.5647753",
"0.564011",
"0.5637705",
"0.5631486",
"0.562445",
"0.558077",
"0.5572283",
"0.55678546",
"0.554737",
"0.5518476",
"0.55046856",
"0.5501134",
"0.54926264",
"0.54864603",
"0.5473101",
"0.5471604"
] | 0.6956577 | 1 |
Check that the wheel does not move more than 2 degrees in each direction during the quiescence interval before the stimulus appears. | def check_wheel_freeze_during_quiescence(data, **_):
assert np.all(np.diff(data["wheel_timestamps"]) >= 0)
assert data["quiescence"].size == data["stimOnTrigger_times"].size
# Get tuple of wheel times and positions over each trial's quiescence period
qevt_start_times = data["stimOnTrigger_times"] - data["quiescence"]
traces = traces_by_trial(
data["wheel_timestamps"],
data["wheel_position"],
start=qevt_start_times,
end=data["stimOnTrigger_times"]
)
metric = np.zeros((len(data["quiescence"]), 2)) # (n_trials, n_directions)
for i, trial in enumerate(traces):
t, pos = trial
# Get the last position before the period began
if pos.size > 0:
# Find the position of the preceding sample and subtract it
idx = np.abs(data["wheel_timestamps"] - t[0]).argmin() - 1
origin = data["wheel_position"][idx if idx != -1 else 0]
# Find the absolute min and max relative to the last sample
metric[i, :] = np.abs([np.min(pos - origin), np.max(pos - origin)])
# Reduce to the largest displacement found in any direction
metric = np.max(metric, axis=1)
metric = 180 * metric / np.pi # convert to degrees from radians
criterion = 2 # Position shouldn't change more than 2 in either direction
passed = metric < criterion
assert data["intervals"].shape[0] == len(metric) == len(passed)
return metric, passed | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def quick_check(self):\n # loop three times and move the servo\n for ang in range(self.MIDPOINT - 100, self.MIDPOINT + 101, 100):\n self.servo(ang)\n time.sleep(.01)\n if self.read_distance() < self.SAFE_DISTANCE:\n return False \n # if the three-part check didn't freak out\n return True",
"def quick_check(self):\n #loop three times and moce the servo \n for ang in range(self.MIDPOINT - 115, self.MIDPOINT+116, 115):\n self.servo(ang)\n time.sleep(.05)\n if self.read_distance() < self.SAFE_DISTANCE:\n return False\n #if the three-part check didn't freak out\n return True",
"def quick_check(self):\n for ang in range(self.MIDPOINT-150, self.MIDPOINT+151, 150):\n self.servo(ang)\n if self.read_distance() < self.SAFE_DIST:\n return False\n return True",
"def _check_for_suicide(self, x, y):\n if self.count_liberties(x, y) == 0:\n self._pop_history()\n raise BoardError('Cannot play on location with no liberties!')",
"def check_wheel_move_before_feedback(data, **_):\n # Get tuple of wheel times and positions within 100ms of feedback\n traces = traces_by_trial(\n data[\"wheel_timestamps\"],\n data[\"wheel_position\"],\n start=data[\"feedback_times\"] - 0.05,\n end=data[\"feedback_times\"] + 0.05,\n )\n metric = np.zeros_like(data[\"feedback_times\"])\n # For each trial find the displacement\n for i, trial in enumerate(traces):\n pos = trial[1]\n if pos.size > 1:\n metric[i] = pos[-1] - pos[0]\n\n # except no-go trials\n metric[data[\"choice\"] == 0] = np.nan # NaN = trial ignored for this check\n nans = np.isnan(metric)\n passed = np.zeros_like(metric) * np.nan\n\n passed[~nans] = (metric[~nans] != 0).astype(float)\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def safe_to_dance(self):\n # check for all fail/early-termination conditions\n for _ in range(4):\n if self.read_distance() < 300:\n print(\"NOT SAFE TO DANCE!\")\n return False\n else: \n self.turn_by_deg(90) \n\n #after all checks have been done. We deduce it's safe\n print(\"SAFE TO DANCE!\")\n return True\n\n for x in range(3): \n self.shake()",
"def is_equidistant(self) -> bool:\n if len(self.time) < 3:\n return True\n return len(self.time.to_series().diff().dropna().unique()) == 1",
"def verify_legal_rotation(self, direction):\n test_figure = None\n if direction == \"CW\":\n test_figure = self.get_block_positions(self.active_piece.get_cw_rotation())\n elif direction == \"CCW\":\n test_figure = self.get_block_positions(self.active_piece.get_ccw_rotation())\n\n for b_x, b_y in test_figure:\n if b_x < 0 or b_x >= self.WIDTH:\n return False\n\n if b_y < 0 or b_y >= self.HEIGHT:\n return False\n\n if self.board[b_y][b_x] != 0:\n return False\n return True",
"def right_twist(self):\n self.turn_by_deg(180)\n #time.sleep(.1)\n self.stop()\n self.turn_by_deg(180)\n #time.sleep(.1)\n self.stop()",
"def check_wheel_move_during_closed_loop(data, wheel_gain=None, **_):\n # Get the Bpod extracted wheel data\n timestamps = data['wheel_timestamps']\n position = data['wheel_position']\n\n return _wheel_move_during_closed_loop(timestamps, position, data, wheel_gain, tol=3)",
"def need_changes(self, wheel, angle):\n if self.current_angle.get(wheel) == angle:\n return False\n\n self.current_angle[wheel] = angle\n return True",
"def test_function_continuity(self):\n self.check_function_continuity()",
"def test_function_continuity(self):\n self.check_function_continuity()",
"def test_function_continuity(self):\n self.check_function_continuity()",
"def test_function_continuity(self):\n self.check_function_continuity()",
"def test_function_continuity(self):\n self.check_function_continuity()",
"def test_function_continuity(self):\n self.check_function_continuity()",
"def test_function_continuity(self):\n self.check_function_continuity()",
"def test_function_continuity(self):\n self.check_function_continuity()",
"def test_function_continuity(self):\n self.check_function_continuity()",
"def test_function_continuity(self):\n self.check_function_continuity()",
"def test_function_continuity(self):\n self.check_function_continuity()",
"def test_function_continuity(self):\n self.check_function_continuity()",
"def test_function_continuity(self):\n self.check_function_continuity()",
"def test_function_continuity(self):\n self.check_function_continuity()",
"def test_function_continuity(self):\n self.check_function_continuity()",
"def test_function_continuity(self):\n self.check_function_continuity()",
"def checkRing(self, verbose = 0):\n angleSum = self.tfs.ANGLE.sum()\n if self.verbose: print (\"check\")\n print (\"---------------------------------- \\n Checking, if ring is closed: \\n\", \"angleSum = \", angleSum)\n twoPi = 2*pi\n \n if angleSum != twoPi:\n fudge = 2*pi - angleSum\n print (\" ** Ring not closed - offset of: \", fudge)",
"def safe_to_dance(self):\n #check for all fil/early-termination conditions\n for _ in range(4):\n if self.read_distance() < 300:\n print(\"not safe to dance!\")\n return False\n else:\n self.turn_by_deg(90)\n #after all checks have been done, we deduce its safe to dance\n print(\"Dance on!\")\n return True",
"def check_position(self, desired_pos):\n new_pos = self.get_current_position()\n for i, pos in enumerate(new_pos):\n if abs(float(pos) - float(desired_pos[i])) > 0.5: # up to a half micrometer\n self.log.error(\n \"Table movement failed. Position: \"\n + str(new_pos)\n + \" is not equal to desired position: \"\n + str(desired_pos)\n )\n return False\n return True"
] | [
"0.6584762",
"0.6560514",
"0.5976932",
"0.59019744",
"0.5843425",
"0.57786036",
"0.5677382",
"0.5633235",
"0.55985075",
"0.55899405",
"0.55809784",
"0.5558802",
"0.5558802",
"0.5558802",
"0.5558802",
"0.5558802",
"0.5558802",
"0.5558802",
"0.5558802",
"0.5558802",
"0.5558802",
"0.5558802",
"0.5558802",
"0.5558802",
"0.5558802",
"0.5558802",
"0.5558802",
"0.5531892",
"0.5531429",
"0.54987335"
] | 0.7057081 | 0 |
Check that the number events per trial is correct Within every trial interval there should be one of each trial event, except for goCueTrigger_times which should only be defined for incorrect trials | def check_n_trial_events(data, **_):
intervals = data['intervals']
correct = data['correct']
err_trig = data['errorCueTrigger_times']
# Exclude these fields; valve and errorCue times are the same as feedback_times and we must
# test errorCueTrigger_times separately
# stimFreeze_times fails often due to TTL flicker
exclude = ['camera_timestamps', 'errorCueTrigger_times', 'errorCue_times',
'firstMovement_times', 'peakVelocity_times', 'valveOpen_times',
'wheel_moves_peak_amplitude', 'wheel_moves_intervals', 'wheel_timestamps',
'wheel_intervals', 'stimFreeze_times']
events = [k for k in data.keys() if k.endswith('_times') and k not in exclude]
metric = np.zeros(data["intervals"].shape[0], dtype=bool)
# For each trial interval check that one of each trial event occurred. For incorrect trials,
# check the error cue trigger occurred within the interval, otherwise check it is nan.
for i, (start, end) in enumerate(intervals):
metric[i] = (all([start < data[k][i] < end for k in events]) and
(np.isnan(err_trig[i]) if correct[i] else start < err_trig[i] < end))
passed = metric.astype(bool)
assert intervals.shape[0] == len(metric) == len(passed)
return metric, passed | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_goCue_delays(data, **_):\n metric = np.nan_to_num(data[\"goCue_times\"] - data[\"goCueTrigger_times\"], nan=np.inf)\n passed = (metric <= 0.0015) & (metric > 0)\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def check_errorCue_delays(data, **_):\n metric = np.nan_to_num(data[\"errorCue_times\"] - data[\"errorCueTrigger_times\"], nan=np.inf)\n passed = ((metric <= 0.0015) & (metric > 0)).astype(float)\n passed[data[\"correct\"]] = metric[data[\"correct\"]] = np.nan\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def check_trial_length(data, **_):\n # NaN values are usually ignored so replace them with Inf so they fail the threshold\n metric = np.nan_to_num(data[\"feedback_times\"] - data[\"goCue_times\"], nan=np.inf)\n passed = (metric < 60.1) & (metric > 0)\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def test_all_repetition_frequency_have_timedelta(self):\n for value in EventRepetitionFrequency:\n if value is EventRepetitionFrequency.not_repeated:\n self.assertIsNone(value.to_timedelta())\n else:\n self.assertIsNotNone(value.to_timedelta())",
"def check_stimOn_goCue_delays(data, **_):\n # Calculate the difference between stimOn and goCue times.\n # If either are NaN, the result will be Inf to ensure that it crosses the failure threshold.\n metric = np.nan_to_num(data[\"goCue_times\"] - data[\"stimOn_times\"], nan=np.inf)\n passed = (metric < 0.01) & (metric > 0)\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def test_time_supp_length_matches_no_timesteps(self):\n for no_timesteps in [5, 578, 993, 300072]:\n for dt in [0.1, 0.5, 3.0]:\n test_rec = rt.Recording(np.empty([6, no_timesteps, 1]), dt=dt)\n self.assertEqual(\n len(test_rec.time_supp),\n no_timesteps,\n 'Expected length of time_supp {} to match no_timesteps of '\n 'input {}.'.format(len(test_rec.time_supp), no_timesteps),\n )",
"def num_trials(self):",
"def testTrialEndedEarly2(self):\n stats = self.default_statistics()\n trial_count = stats[str(0)][\"n\"] + stats[str(1)][\"n\"]\n sched, mock_runner = self.schedulerSetup(trial_count)\n trials = sched._state[\"bracket\"].current_trials()\n for t in trials[:-1]:\n mock_runner._launch_trial(t)\n sched.on_trial_result(\n mock_runner, t, result(stats[str(1)][\"r\"], 10))\n\n mock_runner._launch_trial(trials[-1])\n sched.on_trial_complete(mock_runner, trials[-1], result(100, 12))\n self.assertEqual(len(sched._state[\"bracket\"].current_trials()),\n self.downscale(stats[str(1)][\"n\"], sched))",
"def testTrialErrored2(self):\n stats = self.default_statistics()\n trial_count = stats[str(0)][\"n\"] + stats[str(1)][\"n\"]\n sched, mock_runner = self.schedulerSetup(trial_count)\n trials = sched._state[\"bracket\"].current_trials()\n for t in trials[:-1]:\n mock_runner._launch_trial(t)\n sched.on_trial_result(\n mock_runner, t, result(stats[str(1)][\"r\"], 10))\n\n mock_runner._launch_trial(trials[-1])\n sched.on_trial_error(mock_runner, trials[-1])\n self.assertEqual(len(sched._state[\"bracket\"].current_trials()),\n self.downscale(stats[str(1)][\"n\"], sched))",
"def test_check_trialpubs_nctids(self):\n pmids = {29037101, 28735855, 12214118, 28697569, 15380154, 26294005, 21539488, 23680940, 23720230, 24164735,\n 25599006, 25681666, 26086182, 21514250, 19621072, 25961184, 26384466, 24134194, 24495355, 25996285,\n 26265727, 24374288, 25771249, 28359749, 24045855, 24880197, 26640437, 26682691, 27895474, 23796946,\n 25264972, 24507770, 26305649, 25565485, 25891115, 26890759, 26867200, 27529771, 26812512, 24899709,\n 28054939, 27102361, 25344629, 24617349, 25733635, 25733639, 29141041, 25391305, 26135351, 24938711,\n 28319243, 15205295, 20858954, 25352453, 26213339, 25414047, 24334113, 19643207, 28676015, 27570766,\n 17569205, 25002849, 26690214, 18709889, 22232016, 16210710, 22122400, 19204158, 21506929, 22449789,\n 22794138, 27738491, 19641487, 9149659, 28213052, 12663275, 10374811, 17101822, 22371413, 28861684,\n 26652155, 16614482, 27624276, 28925645, 22170358, 25061569, 28980404, 26740832, 26286890, 28448083,\n 29562543, 25928696, 26253520, 26003546, 20810976}\n res = bot.check_trialpubs_nctids(29865058, '10.3233/JAD-179940')\n pmids1 = set([int(pmid) for pmid in res.pmids])\n self.assertEqual(pmids1, pmids)\n pmids = {24491689, 23741057, 15265849, 12409541, 26673558, 23616602, 21080835, 21444883, 21931078, 26984864,\n 26857383, 25131977, 23680885, 21080836, 9921604, 22433752, 21187258, 21315441, 26560249, 25286913,\n 18342224, 12598066, 20176990, 25921522, 21906250, 26874388, 20562255, 18794390, 27207191}\n res = bot.check_trialpubs_nctids(27634736, '10.1002/ejhf.638')\n pmids1 = set([int(pmid) for pmid in res.pmids])\n self.assertEqual(pmids1, pmids)",
"def check_iti_delays(data, **_):\n # Initialize array the length of completed trials\n metric = np.full(data[\"intervals\"].shape[0], np.nan)\n passed = metric.copy()\n # Get the difference between stim off and the start of the next trial\n # Missing data are set to Inf, except for the last trial which is a NaN\n metric[:-1] = \\\n np.nan_to_num(data[\"intervals\"][1:, 0] - data[\"stimOff_times\"][:-1] - 0.5, nan=np.inf)\n passed[:-1] = np.abs(metric[:-1]) < .5 # Last trial is not counted\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def valid_clocks(self) -> int:\n pass",
"def check_audio_pre_trial(data, audio=None, **_):\n if audio is None:\n _log.warning(\"No BNC2 input in function call, retuning None\")\n return None\n s = audio[\"times\"][~np.isnan(audio[\"times\"])] # Audio TTLs with NaNs removed\n metric = np.array([], dtype=np.int8)\n for i, c in zip(data[\"intervals\"][:, 0], data[\"goCue_times\"]):\n metric = np.append(metric, sum(s[s > i] < (c - 0.02)))\n passed = metric == 0\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def check_stimOn_delays(data, **_):\n metric = np.nan_to_num(data[\"stimOn_times\"] - data[\"stimOnTrigger_times\"], nan=np.inf)\n passed = (metric <= 0.15) & (metric > 0)\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def test_consecutive_events(sample_events, woodshop, caplog):\n caplog.set_level(logging.INFO)\n event1, event2 = sample_events.make_consecutive_events()\n woodshop.start_event(event1)\n woodshop.log_conflicts(event1.start_time)\n woodshop.start_event(event2)\n woodshop.end_event(event1)\n woodshop.log_conflicts(event1.end_time)\n woodshop.end_event(event2)\n woodshop.log_conflicts(event2.end_time)\n assert caplog.text == \"\"",
"def verify_event_timing(self, event, item):\n return True",
"def testTrialEndedEarly(self):\n stats = self.default_statistics()\n trial_count = stats[str(0)][\"n\"] + 3\n sched, mock_runner = self.schedulerSetup(trial_count)\n\n t1, t2, t3 = sched._state[\"bracket\"].current_trials()\n for t in [t1, t2, t3]:\n mock_runner._launch_trial(t)\n\n sched.on_trial_complete(mock_runner, t3, result(1, 12))\n self.assertEqual(\n TrialScheduler.PAUSE,\n sched.on_trial_result(\n mock_runner, t1, result(stats[str(1)][\"r\"], 10)))\n self.assertEqual(\n TrialScheduler.CONTINUE,\n sched.on_trial_result(\n mock_runner, t2, result(stats[str(1)][\"r\"], 10)))",
"def check_trigger_amount(self, thr=None, num_timepoints_expected=0, tr=0):\n LGR.info(\"Counting trigger points\")\n # Use the trigger channel to find the TRs,\n # comparing it to a given threshold.\n trigger = self.timeseries[self.trigger_idx]\n time = self.timeseries[0]\n LGR.info(f\"The trigger is in channel {self.trigger_idx}\")\n # Check that trigger and time channels have the same length.\n # If not, resample time to the length of the trigger\n if len(time) != len(trigger):\n LGR.warning(\n \"The trigger channel has a different sampling \"\n \"from the registered time. Using a resampled version \"\n \"of time to find the starting time.\"\n )\n time = np.linspace(time[0], time[-1], len(trigger))\n\n self._time_resampled_to_trigger = time\n\n flag = 0\n if thr is None:\n # If trigger channels are binary\n # (i.e., \"on\" is a higher value and \"off\" is a lower value)\n # and each \"on\" and \"off\" are each always approzimately the same value\n # then any value above the mean is \"on\" and every value below the mean\n # is \"off\".\n thr = np.mean(trigger)\n flag = 1\n timepoints = trigger > thr\n num_timepoints_found = np.count_nonzero(np.ediff1d(timepoints.astype(np.int8)) > 0)\n if flag == 1:\n LGR.info(\n f\"The number of timepoints according to the std_thr method \"\n f\"is {num_timepoints_found}. The computed threshold is {thr:.4f}\"\n )\n else:\n LGR.info(\n f\"The number of timepoints found with the manual threshold of {thr:.4f} \"\n f\"is {num_timepoints_found}\"\n )\n time_offset = time[timepoints.argmax()]\n\n if num_timepoints_expected:\n LGR.info(\"Checking number of timepoints\")\n if num_timepoints_found > num_timepoints_expected:\n timepoints_extra = num_timepoints_found - num_timepoints_expected\n LGR.warning(\n f\"Found {timepoints_extra} timepoints\"\n \" more than expected!\\n\"\n \"Assuming extra timepoints are at the end \"\n \"(try again with a more liberal thr)\"\n )\n\n elif num_timepoints_found < num_timepoints_expected:\n timepoints_missing = num_timepoints_expected - num_timepoints_found\n LGR.warning(f\"Found {timepoints_missing} timepoints\" \" less than expected!\")\n if tr:\n LGR.warning(\n \"Correcting time offset, assuming missing \"\n \"timepoints are at the beginning (try again \"\n \"with a more conservative thr)\"\n )\n time_offset -= timepoints_missing * tr\n else:\n LGR.warning(\"Can't correct time offset - you should \" \"specify the TR\")\n\n else:\n LGR.info(\"Found just the right amount of timepoints!\")\n\n else:\n LGR.warning(\n \"The necessary options to find the amount of timepoints \" \"were not provided.\"\n )\n self.thr = thr\n self.time_offset = time_offset\n self.timeseries[0] = self.timeseries[0] - time_offset\n self.num_timepoints_found = num_timepoints_found",
"def test_next_window_time_sample_passed(self):\n test_window_scheme = WindowingScheme(self.window_test_filter, 3)\n # Value 15 will be filtered as it ranges between lower and upper bound limits\n filtered_value = test_window_scheme.filter(self.middle_value)\n self.assertEquals(filtered_value, self.middle_value)\n # Let next window time elapse\n time.sleep(4)\n filtered_value = test_window_scheme.filter(self.more_than_upper_bound)\n # None is expected as filtered value because at least one sample has been already passed and\n # value ranges outside lower and upper bound limits\n self.assertEquals(filtered_value, None)",
"def onetrial(trialnum):\r\n eventsexist = False\r\n anglelist = csvread(\"Angles/%s.csv\" % (trialnum, ))\r\n for n in range(len(anglelist)):\r\n for m in range(len(anglelist[n])):\r\n if anglelist[n][m] == 'Events':\r\n eventsexist = True\r\n if eventsexist == False:\r\n print(\"WARNING: no events in angles file, aborting with empty output.\")\r\n return {}\r\n angles = readangles(anglelist)\r\n trajlist = csvread(\"Trajectories/%s.csv\" % (trialnum, ))\r\n trajectories = readtrajectories(trajlist, angles['Frames'])\r\n output = {**trajectories, **angles}\r\n output['TrialUsed'] = trialnum\r\n #import pdb; pdb.set_trace()\r\n return output",
"def test_online_pred_contemporaneous_events(self):\n learner, held_out_student_idx = self.set_up_learner(False, False, 1.0)\n num_test_interactions = len(held_out_student_idx)\n unique_student_idx = set(held_out_student_idx)\n\n # test without providing times\n prob_correct = get_online_rps(learner, held_out_student_idx, max_iterations=1000,\n compute_first_interaction_rps=True)\n for student_idx in unique_student_idx:\n # expect all predicted RPs to be different\n student_prob_correct = prob_correct[held_out_student_idx == student_idx]\n self.assertTrue(np.all(np.diff(student_prob_correct)))\n\n # test with all unique times\n unique_times = np.arange(num_test_interactions)\n prob_correct = get_online_rps(learner, held_out_student_idx, max_iterations=1000,\n compute_first_interaction_rps=True,\n test_student_time_idx=unique_times)\n for student_idx in unique_student_idx:\n # expect all predicted RPs to be different\n student_prob_correct = prob_correct[held_out_student_idx == student_idx]\n self.assertTrue(np.all(np.diff(student_prob_correct)))\n\n # test with contemporaneous interactions. last two of each student's interactions share time\n contemp_times = np.arange(num_test_interactions)\n for student_idx in unique_student_idx:\n contemp_times[np.flatnonzero(held_out_student_idx == student_idx)[-2:]] = -1\n prob_correct = get_online_rps(learner, held_out_student_idx, max_iterations=1000,\n compute_first_interaction_rps=True,\n test_student_time_idx=contemp_times)\n for student_idx in unique_student_idx:\n # expect last two predicted RPs to be same\n student_prob_correct = prob_correct[held_out_student_idx == student_idx]\n if len(student_prob_correct) > 1:\n self.assertFalse(np.any(np.diff(student_prob_correct[-2:])))",
"def test_check_freq_ts_crash(self):\n self.assertEqual(check_freq(self.jobset3), 'ocrashed')",
"def isolate_self_reporting_cases(self, time: int):",
"def testTrialErrored(self):\n stats = self.default_statistics()\n trial_count = stats[str(0)][\"n\"] + 3\n sched, mock_runner = self.schedulerSetup(trial_count)\n t1, t2, t3 = sched._state[\"bracket\"].current_trials()\n for t in [t1, t2, t3]:\n mock_runner._launch_trial(t)\n\n sched.on_trial_error(mock_runner, t3)\n self.assertEqual(\n TrialScheduler.PAUSE,\n sched.on_trial_result(\n mock_runner, t1, result(stats[str(1)][\"r\"], 10)))\n self.assertEqual(\n TrialScheduler.CONTINUE,\n sched.on_trial_result(\n mock_runner, t2, result(stats[str(1)][\"r\"], 10)))",
"def strong_collision_trials(max_number_of_trials):\n\n num_trials_strong_list = [] # list to hold number of trials\n\n # Iterate through the number of experiments for\n # strong collision\n for i in range(1, 20):\n # Call function to run strong collision\n num_trials_strong = strong_collision_breaking()\n\n # Append num_trials_weak to list to keep track\n num_trials_strong_list.append(num_trials_strong)\n\n # Take the average\n avg_num_trials_strong = sum(num_trials_strong_list) / len(num_trials_strong_list)\n\n return avg_num_trials_strong",
"def testAllTimesExists(self):\n times = []\n for ref in self.coal.get_community_references():\n times.append(self.coal.get_community_parameters(ref)[\"time\"])\n for time in times:\n self.assertTrue(time in self.times, msg=\"Time {} not in times.\".format(time))\n for time in self.times:\n self.assertTrue(time in times, msg=\"Time {} not in times.\".format(time))",
"def multi_event(st,et,instrument_chosen,subevent):\r\n print('checking for multiple events within given time window')\r\n \r\n #creating file for time window with first events for all thresholds\r\n out_name = Path(cfg.obs_path) / database_extraction(st,et,instrument_chosen,subevent)\r\n\r\n #creating files for all second events for all thresholds\r\n new_files = two_in_one(out_name,et,subevent)\r\n \r\n #creating files for any third events for all thresholds that had a second event\r\n for file in new_files:\r\n two_in_one(file,et,subevent) \r\n \r\n return",
"def audio_event_detection(self):\n # Test if trials already exist\n if 'TimeIntervals_speaker' not in self.model.nwb.intervals:\n # Test if file contains audio signals\n if any(name in self.model.nwb.stimulus for name in ['speaker1', 'speaker2']):\n AudioEventDetection(parent=self)\n else:\n NoAudioDialog()\n else:\n ExistIntervalsDialog()",
"def test_issue_tracked_times(self):\n pass",
"def test_bad_time_repeat(self):\n repeated = np.concatenate([np.repeat(self.times[0], 3),\n self.times[3:]])\n self.assertFalse(utils.check_timestamps(repeated))"
] | [
"0.6352797",
"0.6281508",
"0.6265516",
"0.6174661",
"0.610466",
"0.6047479",
"0.598376",
"0.58821785",
"0.58792245",
"0.58375305",
"0.5833678",
"0.5823899",
"0.5821095",
"0.57591885",
"0.57549226",
"0.5688481",
"0.56665254",
"0.5654957",
"0.56445843",
"0.5622237",
"0.5621385",
"0.5620579",
"0.56091774",
"0.5563927",
"0.5558801",
"0.55262166",
"0.5521653",
"0.54977006",
"0.5491604",
"0.5485876"
] | 0.79935586 | 0 |
Check that the time difference between the visual stimulus offsetcommand being triggered and the visual stimulus effectively turning off on the screen is smaller than 150 ms. | def check_stimOff_delays(data, **_):
metric = np.nan_to_num(data["stimOff_times"] - data["stimOffTrigger_times"], nan=np.inf)
passed = (metric <= 0.15) & (metric > 0)
assert data["intervals"].shape[0] == len(metric) == len(passed)
return metric, passed | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_time():\n times = get_times()\n time_difference = abs((times['local'] - times['target']).total_seconds())\n return time_difference < post_time_tol_seconds",
"def time_is_out(self):\n return self.get_simulation_time() > self.config.max_time",
"def check_stimOn_delays(data, **_):\n metric = np.nan_to_num(data[\"stimOn_times\"] - data[\"stimOnTrigger_times\"], nan=np.inf)\n passed = (metric <= 0.15) & (metric > 0)\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def offset_capture():\n Clock()",
"def check_stimOn_goCue_delays(data, **_):\n # Calculate the difference between stimOn and goCue times.\n # If either are NaN, the result will be Inf to ensure that it crosses the failure threshold.\n metric = np.nan_to_num(data[\"goCue_times\"] - data[\"stimOn_times\"], nan=np.inf)\n passed = (metric < 0.01) & (metric > 0)\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def check_offset(self):\n\n for d in range(self.n_dmps):\n if abs(self.y0[d] - self.goal[d]) < 1e-4:\n self.goal[d] += 1e-4",
"def check_timer():\n end = time.time()\n time_elapsed = end - target_time[0]\n durationMSG = fg.cyan + f\"Scans Completed for {args.target} in: \" + fg.rs\n print(durationMSG, display_time(time_elapsed))",
"def check_diff(self,game,wanted_diff,wanted_starting_time=''):\n return True",
"def __check_mouse_offset(self, task):\n\n mouse_pointer = Mgr.get(\"mouse_pointer\", 0)\n mouse_x = mouse_pointer.get_x()\n mouse_y = mouse_pointer.get_y()\n mouse_start_x, mouse_start_y = self._mouse_start_pos\n\n if max(abs(mouse_x - mouse_start_x), abs(mouse_y - mouse_start_y)) > 3:\n if self._picked_point:\n Mgr.do(\"init_transform\", self._picked_point)\n return task.done\n\n return task.cont",
"def remaining_ms():",
"def display_timeoffset_statubar(self, timeOffset):\n\n if timeOffset:\n self.lbTimeOffset.setText(\n \"Time offset: <b>{}</b>\".format(timeOffset if self.timeFormat == S else seconds2time(timeOffset)))\n else:\n self.lbTimeOffset.clear()",
"def has_ontime_pane(self):\n pass",
"def test_custom_time(self):\n interval = 0.5\n M = simulation.StateMonitor(self.G, 'v', interval=interval)\n sim = simulation.Simulation(self.G, M, dt=self.dt)\n sim.run(self.t_max)\n self.assertTrue(np.allclose(M.t, np.arange(0, self.t_max, interval)))",
"def off_screen(self):\n return self._x < 0",
"def check_last_cycle_duration(self):\n min_pm_time = timedelta(seconds=self.args.min_pm_time)\n max_pm_time = timedelta(seconds=self.args.max_pm_time)\n if self.args.pm_timestamp:\n pm_timestamp = datetime.fromtimestamp(self.args.pm_timestamp)\n now = datetime.now()\n pm_time = now - pm_timestamp\n if pm_time < min_pm_time:\n raise TestFailed(\n \"{0} time less than expected: {1} < {2}\".format(\n self.args.pm_operation.capitalize(), pm_time, min_pm_time\n )\n )\n if pm_time > max_pm_time:\n raise TestFailed(\n \"{0} time greater than expected: {1} > {2}\".format(\n self.args.pm_operation.capitalize(), pm_time, max_pm_time\n )\n )\n\n logging.info(\n \"{0} time: {1}\".format(self.args.pm_operation.capitalize(), pm_time)\n )",
"def test_check_sun_above_horizon():\n pass",
"def test_only_one_delay_in_window(self):\n sliding_window = SlidingWindow(SLIDING_WINDOW_SIZE)\n sliding_window.delays = [100]\n self.assertEqual(sliding_window.get_median(), -1)",
"def overtime(self):\n if self._overtime != '':\n return True\n return False",
"def set_display_time(log_mes,displaytime = 1800000):\n kill_adb_uiautomator_block_old()\n if int(get_screen_off_time(log_mes)) == displaytime:\n if int(displaytime) >= 60000:\n log_mes.info( 'screen off time is already %s mins'%(displaytime/60000))\n else:\n log_mes.info('screen off time is already %s secs'%(displaytime/1000))\n else:\n os.system('adb shell am start -a android.settings.DISPLAY_SETTINGS')\n device(text=\"Sleep\").click()\n kill_adb_uiautomator_block_old()\n if int(displaytime) >= 60000:\n device(text=\"%s minutes\"%(displaytime/60000)).click()\n else:\n device(text=\"%s seconds\"%(displaytime/1000)).click()\n time.sleep(1)\n os.system(\"adb shell am force-stop com.android.settings\")",
"def time_to_move(self):\r\n if int(self.pix_pos.x+TOP_BOTTOM_BUFFER//2) % self.app.cell_width == 0:\r\n if self.direction == vec(1, 0) or self.direction == vec(-1, 0) or self.direction == vec(0, 0):\r\n return True\r\n # for the x-direction\r\n\r\n if int(self.pix_pos.y+TOP_BOTTOM_BUFFER//2) % self.app.cell_height == 0:\r\n if self.direction == vec(0, 1) or self.direction == vec(0, -1) or self.direction == vec(0, 0):\r\n return True\r\n # for the y-direction\r\n\r\n # checks to see if the player is still within the bounds\r",
"def is_on(self):\n return self.device.override_time != 0",
"def GAME_TIME_ADVANCE(dt):",
"def is_dropped(upd_time, time_before):\n if (upd_time - time_before) / float(Config.BOUNDARY) >= 1.5:\n return True\n return False",
"def test_time(self):\n M = simulation.StateMonitor(self.G, 'v')\n sim = simulation.Simulation(self.G, M, dt=self.dt)\n sim.run(self.t_max)\n self.assertTrue(np.allclose(M.t, np.arange(0, self.t_max, self.dt)))",
"def lightleep(time_ms: int = None) -> None:",
"def exceeded(self):\r\n return int(time.time()) - self.start_time >= self.length",
"def check_goCue_delays(data, **_):\n metric = np.nan_to_num(data[\"goCue_times\"] - data[\"goCueTrigger_times\"], nan=np.inf)\n passed = (metric <= 0.0015) & (metric > 0)\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def check_time_borders(self, sam_ev, ):\n mask = np.logical_and(sam_ev['timeMJD'] > self.DataStart,\n sam_ev['timeMJD'] < self.DataEnd)\n return sam_ev[mask]",
"def end_stimulus(win,end_stim):\n #start core clock\n clock = core.Clock()\n\n #while space bar is not pressed continue to show end stimulus\n #if 50 seconds pass, then stop showing end stimulus\n end_stim.setAutoDraw(True)\n while not event.getKeys(['space']):\n win.flip()\n if int(clock.getTime()) > 50:\n break\n end_stim.setAutoDraw(False)",
"def is_timeout(self) -> bool:\n return self.runtime.timeout <= 0.0"
] | [
"0.58006537",
"0.56599766",
"0.5618854",
"0.5499769",
"0.5478083",
"0.53942436",
"0.53775084",
"0.5348891",
"0.5329084",
"0.53289485",
"0.5304241",
"0.5275082",
"0.52612203",
"0.52390426",
"0.5216157",
"0.5191221",
"0.51908827",
"0.5189167",
"0.51579493",
"0.51578766",
"0.51496315",
"0.5147549",
"0.51450396",
"0.5143258",
"0.5140493",
"0.51270986",
"0.5118876",
"0.5116448",
"0.51155025",
"0.51129055"
] | 0.58672446 | 0 |
Check that the time difference between the visual stimulus freezecommand being triggered and the visual stimulus effectively freezing on the screen is smaller than 150 ms. | def check_stimFreeze_delays(data, **_):
metric = np.nan_to_num(data["stimFreeze_times"] - data["stimFreezeTrigger_times"], nan=np.inf)
passed = (metric <= 0.15) & (metric > 0)
assert data["intervals"].shape[0] == len(metric) == len(passed)
return metric, passed | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _checkUiFreeze(self):\r\n\r\n motionCountBefore = core.FW_conf['blackbox'].getCountMotionFrames()\r\n\r\n # swipe a bit to see if it causes motion\r\n yCoordinate = int(self.phone.uiState.getScreenHeight()/1.5)\r\n self.phone._touch.drawLine((self.phone.uiState.getScreenWidth()-2, yCoordinate), (self.phone.uiState.getScreenWidth() - 100, yCoordinate), stepSize = 30)\r\n self.phone._run('Move screen a bit to see is UI freezed', testStepReporting = False)\r\n\r\n if motionCountBefore == core.FW_conf['blackbox'].getCountMotionFrames():\r\n #self.phonecomment('Possible UI Freeze. KBD_KEY_KEYLOCK_TOGGLE pressed to check if phone UI is freezed or not')\r\n\r\n try:\r\n self.phone.select('KBD_KEY_KEYLOCK_TOGGLE')\r\n self.phone.delay(3000, False)\r\n motionCountBefore = core.FW_conf['blackbox'].getCountMotionFrames() # screen lock might work, get motion count here\r\n self.phone.select('KBD_KEY_KEYLOCK_TOGGLE')\r\n self.phone.delay(3000, False)\r\n except:\r\n self.phone.comment('Failed to press keylock in %s mode' % (self.phone.isFullBlackBox() and 'full blackbox' or core.FW_conf['blackbox_enabled'] and 'blackbox' or 'whitebox'))\r\n\r\n if core.FW_conf['blackbox'].getCountMotionFrames() == motionCountBefore:\r\n\r\n dumpCheckOk = core.FW_conf['current_testcase'].checkQCRamdumps(True)[0] # releases ramdump mode, which corresponds to a freeze.\r\n if not dumpCheckOk: # if unable to fetch dumps, wait for boot up\r\n if core.FW_conf['ramdump_method'] == \"SDCARD\": # if dumps are fetched from SDCARD, it might take more than 5 minutes\r\n timeout = 360\r\n else:\r\n timeout = 50\r\n\r\n self.phone.comment('Nothing happened with keylock toggle. Waiting %s seconds for motion..' % str(timeout))\r\n for i in range(timeout):\r\n self.phone.sendWaitMessageToGraniteGUI(1, 'Waiting UI motion for %s second' % 1)\r\n self.phone.delay(1000, False)\r\n if core.FW_conf['blackbox'].getCountMotionFrames() != motionCountBefore: # if motion detected, release loop\r\n break\r\n\r\n if core.FW_conf['blackbox'].getCountMotionFrames() == motionCountBefore:\r\n if core.FW_conf['should_stop']:\r\n return\r\n try:\r\n self.phone.resetPhone('UI Freeze detected with external camera')\r\n finally:\r\n self.phone.comment('Sleeping %s seconds after phone reset' % str(50))\r\n self.phone.sendWaitMessageToGraniteGUI(1, 'Waiting phone to boot up for %s second' % 1)\r\n self.phone.delay(50000)\r\n self.phone.fail('UI Freeze detected with external camera')\r\n else:\r\n self.phone.comment('Motion detected, not a freeze. Continue..')\r\n else:\r\n self.phone.comment('Not a freeze. Continue..')",
"def check_response_stimFreeze_delays(data, **_):\n # Calculate the difference between stimOn and goCue times.\n # If either are NaN, the result will be Inf to ensure that it crosses the failure threshold.\n metric = np.nan_to_num(data[\"stimFreeze_times\"] - data[\"response_times\"], nan=np.inf)\n # Test for valid values\n passed = ((metric < 0.1) & (metric > 0)).astype(float)\n # Finally remove no_go trials (stimFreeze triggered differently in no_go trials)\n # These values are ignored in calculation of proportion passed\n passed[data[\"choice\"] == 0] = np.nan\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def remaining_ms():",
"def getRenderingDelay():\n\treturn 10000",
"def check_diff(self,game,wanted_diff,wanted_starting_time=''):\n return True",
"def is_delayed(self) -> bool:\n if self.periodic and self.is_attached():\n return self.runtime.cost > self.runtime.tasklet.tick\n\n return False",
"def delay_checks(self):\n return False",
"def please_wait_should_appear_while_settings_are_being_applied(driver):\n assert wait_on_element_disappear(driver, 60, '//h6[contains(.,\"Please wait\")]')\n assert wait_on_element(driver, 10, f'//span[text()=\"{nameserver_1}\"]')",
"def _busy_wait_ms(self, ms):\n start = time.time()\n delta = ms/1000.0\n while (time.time() - start) <= delta:\n pass",
"def check_time():\n times = get_times()\n time_difference = abs((times['local'] - times['target']).total_seconds())\n return time_difference < post_time_tol_seconds",
"def BeFrameNice(ms = 15):\n try:\n if not stackless.current.is_main:\n if ms < 1.0:\n ms = 1.0\n while blue.os.GetWallclockTimeNow() - blue.os.GetWallclockTime() > ms * 10000:\n blue.synchro.Yield()\n ms *= 1.02\n\n return True\n return False\n except:\n raise",
"def wait(self):\n time.sleep(0.010)",
"def wait_second(self, time_wait):\n # each test case 1st check for the stop button flag\n if not self.stopLoop:\n # get time\n ts = datetime.datetime.now().strftime(self.tsFormat)\n # Create label\n x = Label(\n self.testFrame, text=f'{ts} - Waiting {time_wait}s',\n background=self.bgChooser(),\n foreground=\"#a5120d\",\n font=self.boldFont, anchor='w')\n x.pack(fill=X)\n # add counter for BG\n self.bgCounter += 1\n # allow window to catch up\n self.tkRoot.update()\n self.update_scrollbar()\n time.sleep(1)\n # Automation Script below --------------------\n\n self.tv.wait_in_second(time_wait)\n\n # Automation Script above --------------------\n\n # revert label color to black\n x.config(foreground=\"#000\", font=self.mainFont)\n self.LabelLists.append(x)\n else:\n print(\"stopping test\")",
"def cap_frame_rate(self):\n now = pygame.time.get_ticks()\n milliseconds_since_last_update = now - self.last_update_completed\n\n time_to_sleep = self.desired_milliseconds_between_updates - milliseconds_since_last_update\n if time_to_sleep > 0:\n pygame.time.delay(int(time_to_sleep))\n self.last_update_completed = now",
"def wait(self, ms=None):\r\n util.raiseNotDefined()",
"def wait_minute(self, time_wait):\n # each test case 1st check for the stop button flag\n if not self.stopLoop:\n # get time\n ts = datetime.datetime.now().strftime(self.tsFormat)\n # Create label\n x = Label(\n self.testFrame, text=f'{ts} - Waiting {time_wait} minute/s',\n background=self.bgChooser(),\n foreground=\"#a5120d\",\n font=self.boldFont, anchor='w')\n x.pack(fill=X)\n # add counter for BG\n self.bgCounter += 1\n # allow window to catch up\n self.tkRoot.update()\n self.update_scrollbar()\n time.sleep(1)\n # Automation Script below --------------------\n\n self.tv.wait_in_minute(time_wait)\n\n # Automation Script above --------------------\n\n # revert label color to black\n x.config(foreground=\"#000\", font=self.mainFont)\n self.LabelLists.append(x)\n else:\n print(\"stopping test\")",
"def test_time(self):\n M = simulation.StateMonitor(self.G, 'v')\n sim = simulation.Simulation(self.G, M, dt=self.dt)\n sim.run(self.t_max)\n self.assertTrue(np.allclose(M.t, np.arange(0, self.t_max, self.dt)))",
"def __sleep(self):\n if self.sleep_duration > 0:\n gevent.sleep(self.sleep_duration)\n else:\n self.__warn(\n f\"The average tick took longer than the set tick duration of {self.__tick_duration}. \"\n f\"Program is to heavy to run real time\")",
"def before_sweep(self):\r\n _debug('GUISignalGenerator: before_sweep()')\r\n self.window.sleep(0.05)",
"def T_elapsed(T_amount: BlockHeight) -> bool:\n T_now = getCurrentBlockHeight()\n return T_now - T_init >= T_amount",
"def should_sleep(self):\n return",
"def interface_sleeptime(sleep_time, puzzle, command_color=\"#ff8800\", arg_color=\"#5588ff\", error_color=\"#ff0000\"):\n try:\n print(puzzle.sleep_time)\n puzzle.sleep_time = float(sleep_time)\n print(puzzle.sleep_time)\n except:\n print(f\"{colored('Error:', error_color)} Given time is not a float.\")",
"def busyWait(self):\n time.sleep(0.0)",
"def is_timeout(self) -> bool:\n return self.runtime.timeout <= 0.0",
"def exceeded(self):\r\n return int(time.time()) - self.start_time >= self.length",
"def _sleep(self):\n while 1:\n diff = (time.time()-self.lastcall) - self.mindelay\n if diff >= 0: return\n time.sleep(max(-diff/2.0, 0.01))",
"def wait(self):\n time.sleep(self.pause_time)",
"def _check_pulse(self):\n timedelta = time.time() - self.heartbeat\n update_delay = float(1/self.qbpm.frequency)\n time_to_update = False\n if timedelta > update_delay:\n time_to_update = True\n self.heartbeat = time.time()\n return time_to_update",
"def check_convergency(self):\n if self.vars['ema_trace'][self.vars['step']] <= self.settings[\"emaSpeedTol\"]:\n return True\n else:\n return False",
"def check_timelimit_slot__(self):\n timerange = self.valkkafs_manager.getTimeRange()\n \n if len(timerange) < 1: # empty tuple implies no frames\n print(\"PlaybackController: check_timelimit_slot__ : WARNING! no timerange from ValkkaFS\")\n # fabricate a dummy time : this exact moment\n current_time = int(time.time() * 1000)\n timerange = (\n current_time,\n current_time + 1\n )\n print(\"check_timelimits_slot__ : timerange =\", timerange)\n print(\"check_timelimits_slot__ : %s -> %s\" % ( formatMstimestamp(timerange[0]), formatMstimestamp(timerange[1]) ) )\n self.signals.set_fs_time_limits.emit(timerange)"
] | [
"0.6285543",
"0.6078267",
"0.59383905",
"0.58365345",
"0.5743292",
"0.57134765",
"0.5674565",
"0.5663597",
"0.5644523",
"0.5638384",
"0.55540234",
"0.5553151",
"0.55458677",
"0.5534003",
"0.55099857",
"0.5495784",
"0.54823756",
"0.54745513",
"0.547419",
"0.54643154",
"0.5441384",
"0.5389485",
"0.53857094",
"0.53516364",
"0.5328387",
"0.53109527",
"0.5309855",
"0.5296929",
"0.5287878",
"0.5281148"
] | 0.6246702 | 1 |
Check that the reward volume is between 1.5 and 3 uL for correct trials, 0 for incorrect. | def check_reward_volumes(data, **_):
metric = data['rewardVolume']
correct = data['correct']
passed = np.zeros_like(metric, dtype=bool)
# Check correct trials within correct range
passed[correct] = (1.5 <= metric[correct]) & (metric[correct] <= 3.)
# Check incorrect trials are 0
passed[~correct] = metric[~correct] == 0
assert data["intervals"].shape[0] == len(metric) == len(passed)
return metric, passed | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_reward_volume_set(data, **_):\n metric = data[\"rewardVolume\"]\n passed = 0 < len(set(metric)) <= 2 and 0. in metric\n return metric, passed",
"def reward_threshold(self) -> Optional[float]:",
"def acquisition_function_expected_volume_removal(\n gp_reward_model: BasicGPRewardModel,\n) -> int:\n assert gp_reward_model.use_comparisons\n\n # DL: This assumes the same observation model for each query which we might\n # want to change at some point\n query0 = gp_reward_model.candidate_queries[0]\n response = query0.response\n\n (\n candidate_queries_gp_repr,\n candidate_queries_linear_combination,\n candidate_queries_gp_repr_idx,\n ) = gp_reward_model.get_candidate_queries_gp_repr()\n # mu_diff, _ = gp_reward_model.get_candidate_queries_reward_predictions()\n mu_diff, _ = gp_reward_model.gp_model.predict_multiple(\n candidate_queries_gp_repr,\n linear_combination=candidate_queries_linear_combination,\n )\n\n if response == \"bernoulli\":\n prob = (1 + np.clip(mu_diff, -1, 1)) / 2\n elif response == \"deterministic\":\n prob = np.sign(mu_diff)\n elif response == \"probit\":\n prob = norm.cdf(mu_diff / (np.sqrt(2) * query0.sigma))\n else:\n raise NotImplementedError(f\"evr for {response}\")\n\n volume_removal = np.minimum(1 - prob, prob)\n\n argmax_volume_removal = argmax_over_index_set(\n volume_removal, range(len(candidate_queries_gp_repr_idx))\n )\n return candidate_queries_gp_repr_idx[np.random.choice(argmax_volume_removal)]",
"def penalty_reward(reward):\n if reward < 0:\n return True\n return False",
"def check(self, reward_new, reward, iteration):\n return reward_new > reward",
"def reward(self, reward):\n return float(np.sign(reward))",
"def _get_reward(self):\n if self.status():\n return self.current_step/self.ep_length # the reward is proportional to the duration \n else:\n return 0",
"def checkCorrectLumisEventGEN(dataset):\n numlumis = dbs3Client.getLumiCountDataSet(dataset)\n numEvents = dbs3Client.getEventCountDataSet(dataset)\n # numEvents / numLumis >= 300\n if numlumis >= numEvents / 300.0:\n return True\n else:\n return False",
"def test_volume():\n structure = Material(input)\n assert (structure.volume == 90.725624999999965)",
"def test_staff_inputs_expressions(self):\r\n problem = self.build_problem(answer=\"1/3\", tolerance=1e-3)\r\n correct_responses = [\"1/3\", \"0.333333\"]\r\n incorrect_responses = []\r\n self.assert_multiple_grade(problem, correct_responses, incorrect_responses)",
"def test_reward(self):\n success = True\n old_sim = self.sim\n old_robot_num = self.robot_num\n old_agents = copy.deepcopy(self.agents)\n old_obstacles = copy.deepcopy(self.obstacles)\n old_goals = copy.deepcopy(self.goals)\n old_action_list = copy.deepcopy(self.last_actions)\n\n # Test collision penalties and overtaking penalty\n self.sim = rvo2.PyRVOSimulator(\n 0.1, 1.0, 10, 5.0, 5.0, 0.2, 1.5, (0,0)\n )\n self.obstacles = []\n self.goals = []\n self.last_actions = []\n self.robot_num = self.sim.addAgent((0, 0))\n self.agents = [self.robot_num]\n self.agents.append(self.sim.addAgent((0.1, 0.1)))\n self.agents.append(self.sim.addAgent((-0.1, 0.1)))\n self.agents.append(self.sim.addAgent((0.1, -0.1)))\n self.agents.append(self.sim.addAgent((-0.1, -0.1)))\n r = self.reward()[0].item()\n exp = -4.22\n if r != exp:\n success = False\n print(\"Actual reward: \", r, \"Expected: \", exp)\n print(\"Explanation: -4 for 4 collisions, -0.2 for 4 predicted \"\n \"collisions, -0.02 for overtake penalty with top right agent\")\n\n # Test closeness penalties and overtaking penalty\n self.agents = []\n self.sim = rvo2.PyRVOSimulator(\n 0.1, 1.0, 10, 5.0, 5.0, 0.2, 1.5, (0,0)\n )\n self.robot_num = self.sim.addAgent((0, 0))\n self.agents = [self.robot_num]\n self.agents.append(self.sim.addAgent((0.35, 0.35)))\n self.agents.append(self.sim.addAgent((0.35, -0.35)))\n self.agents.append(self.sim.addAgent((-0.35, 0.35)))\n self.agents.append(self.sim.addAgent((-0.35, -0.35)))\n r = self.reward()[0].item()\n exp = -1.02\n if r != exp:\n success = False\n print(\"Actual reward: \", r, \"Expected: \", exp)\n print(\"Explanation: -1 for 4 closeness violations, -0.02 for \"\n \"overtake penalty with top right agent\")\n\n # Test passing penalty\n self.agents = []\n self.sim = rvo2.PyRVOSimulator(\n 0.1, 1.0, 10, 5.0, 5.0, 0.2, 1.5, (0, 0)\n )\n self.robot_num = self.sim.addAgent((0, 0))\n self.agents = [self.robot_num]\n self.agents.append(self.sim.addAgent((0.7, -0.5), 1.0, 10, 5.0, 5.0,\n 0.2, 1.5, (-0.5, 0)))\n r = self.reward()[0].item()\n exp = -0.02\n if r != exp:\n success = False\n print(\"Actual reward: \", r, \"Expected: \", exp)\n print(\"Explanation: -0.02 for passing violation\")\n\n # Test crossing penalty\n self.agents = []\n self.sim = rvo2.PyRVOSimulator(\n 0.1, 1.0, 10, 5.0, 5.0, 0.2, 1.5, (0, 0)\n )\n self.robot_num = self.sim.addAgent((0, 0))\n self.agents = [self.robot_num]\n self.agents.append(self.sim.addAgent((0.35, 0.3), 1.0, 10, 5.0, 5.0,\n 0.2, 1.5, (0, -0.5)))\n r = self.reward()[0].item()\n exp = -0.27\n if r != exp:\n success = False\n print(\"Actual reward: \", r, \"Expected: \", exp)\n print(\"Explanation: -0.02 for crossing violation, -0.25 for \"\n \"closeness violation\")\n\n # Test action penalty (moving)\n self.agents = []\n self.sim = rvo2.PyRVOSimulator(\n 0.1, 1.0, 10, 5.0, 5.0, 0.2, 1.5, (0, 0)\n )\n self.robot_num = self.sim.addAgent((0, 0))\n self.last_actions = [1, 1]\n self.last_action_ind = 0\n r = self.reward()[0].item()\n exp = -0.01\n if r != exp:\n success = False\n print(\"Actual reward: \", r, \"Expected: \", exp)\n print(\"Explanation: -0.01 for moving\")\n\n # Test action penalty (changing actions)\n self.agents = []\n self.sim = rvo2.PyRVOSimulator(\n 0.1, 1.0, 10, 5.0, 5.0, 0.2, 1.5, (0, 0)\n )\n self.robot_num = self.sim.addAgent((0, 0))\n self.last_actions = [1, 0]\n self.last_action_ind = 0\n r = self.reward()[0].item()\n exp = -0.01\n if r != exp:\n success = False\n print(\"Actual reward: \", r, \"Expected: \", exp)\n print(\"Explanation: -0.01 for changing actions\")\n\n self.sim = old_sim\n self.robot_num = old_robot_num\n self.agents = old_agents\n self.obstacles = old_obstacles\n self.goals = old_goals\n self.last_actions = old_action_list\n return success",
"def volume_error(self) -> float:\n # TODO written formula and executed formula are different.\n ve = np.sum(self.predicted - self.true) / np.sum(self.true)\n return float(ve)",
"def test_absolute_volume(self):\n\n assert self.test_shape.volume() == pytest.approx(50 * 60 * math.pi * 2 * 1000)",
"def model_error(self):\n return self.premium() / self.data['premium'] - 1",
"def check_for_float(check):",
"def test_uniformity_values(self):\n for key, exp_val in self.unif_values.items():\n meas_val = self.quart.uniformity_module.rois[key].pixel_value\n self.assertAlmostEqual(exp_val, meas_val, delta=5)",
"def test_special_U3(self):\n self.check_oneq_special_cases(U3Gate(0.0, 0.1, -0.1).to_matrix(), \"U3\", {})\n self.check_oneq_special_cases(U3Gate(0.0, 0.1, 0.2).to_matrix(), \"U3\", {\"u3\": 1})\n self.check_oneq_special_cases(U3Gate(np.pi / 2, 0.2, 0.0).to_matrix(), \"U3\", {\"u3\": 1})\n self.check_oneq_special_cases(U3Gate(np.pi / 2, 0.0, 0.2).to_matrix(), \"U3\", {\"u3\": 1})\n self.check_oneq_special_cases(U3Gate(0.11, 0.27, 0.3).to_matrix(), \"U3\", {\"u3\": 1})",
"def bet_check(m):\n try:\n value = float(m.content)\n if 0 <= value <= player.coins:\n return True\n else:\n return False\n except:\n return False",
"def reward(self, reward):\r\n return np.sign(reward)",
"def isgood(self):\n\t\tanswer = True\n\t\t\n\t\tif self.mes_flux <= 0.0:\n\t\t\tanswer = False\n\n\t\treturn answer",
"def normalizedvolumeerror():\r\n volume_simulation = 0\r\n volume_observation = 0\r\n for sim_index in range(1, len(hydrograph)):\r\n volume_simulation_trapezoid = (hydrograph[sim_index-1]+hydrograph[sim_index])*simulation_timestep/2\r\n volume_simulation = volume_simulation + volume_simulation_trapezoid\r\n for data_index in range(1, len(obs_data)):\r\n volume_observation_trapezoid = (obs_data[data_index-1]+obs_data[data_index])*time_difference.total_seconds()/2\r\n volume_observation = volume_observation + volume_observation_trapezoid\r\n volume_error = abs(volume_simulation-volume_observation)/(volume_simulation + volume_observation)\r\n return volume_error",
"def confirm_Alex_fast(p, q, r):\n if p*q*r < 0:\n return False\n if q*r + p*r + p*q -1 == 0:\n return True\n else:\n return False",
"def test_SemiF47_level_0_5(self):\n self.assertEqual(viol_check(self.vol,5), [[131, 143]])",
"def check_sum_three(agent):\n return sum(agent.received[-3:]) == 3",
"def raise3():\r\n\r\n value = int(RAISE_BUTTONS[2].get_text().split(\" $\")[1])\r\n raise_bet(value)",
"def assert_valid_volume(wells,exception_info='invalid volume'):\n wells = ensure_list(wells)\n \n assert all([well.volume >= get_well_dead_volume(well) for well in wells]), exception_info\n assert all([well.volume <= get_well_max_volume(well) for well in wells]), exception_info",
"def test_uniform_in_comoving_volume_from_uniform_in_volume():\n original_samples = SamplesDict(\n {param: np.random.uniform(0, 10, n_samples) for param in gw_parameters()}\n )\n new_samples = uniform_in_comoving_volume_from_uniform_in_volume(\n original_samples\n )\n assert new_samples.number_of_samples <= original_samples.number_of_samples\n assert all(\n new_sample in original_samples.samples.T for new_sample in\n new_samples.samples.T\n )\n # check that if there are no redshift samples it still reweights\n original_samples.pop(\"redshift\")\n new_samples = uniform_in_comoving_volume_from_uniform_in_volume(\n original_samples\n )\n assert new_samples.number_of_samples <= original_samples.number_of_samples\n assert all(\n new_sample in original_samples.samples.T for new_sample in\n new_samples.samples.T\n )\n # check that if there are no distance samples it still reweights\n original_samples = SamplesDict(\n {param: np.random.uniform(0, 10, n_samples) for param in gw_parameters()}\n )\n original_samples.pop(\"luminosity_distance\")\n new_samples = uniform_in_comoving_volume_from_uniform_in_volume(\n original_samples\n )\n assert new_samples.number_of_samples <= original_samples.number_of_samples\n assert all(\n new_sample in original_samples.samples.T for new_sample in\n new_samples.samples.T\n )\n # check that if there are no redshift or distance samples it fails\n original_samples.pop(\"redshift\")\n with pytest.raises(Exception):\n new_samples = uniform_in_comoving_volume_from_uniform_in_volume(\n original_samples\n )",
"def succeeded(self):\n return self.current_reward == 300",
"def check_volatility(sr,time_unit='quarter'):\n\n if time_unit.lower()=='year':\n time_columns = [\"Year\"]\n elif time_unit.lower()=='quarter':\n time_columns = [\"Year\",\"Quarter\"]\n else:\n raise ValueError('invalid time unit {}. Must be \"quarter\" or \"year\"'.format(time_unit))\n turnover_columns = ['TurnoverCentral','TurnoverGross','TurnoverLocal','AmountDepositedByDealer','TaxCreditBeforeAdjustment']\n gr = sr.dropna(subset=turnover_columns+time_columns).groupby(['DealerTIN']+time_columns)\n dealer_time_level = gr.first()\n dealer_time_level[turnover_columns] = gr[turnover_columns].mean() #.sum() # If we used older years, summing turnovers for each year to have constant benchmark\n dealer_level = dealer_time_level.reset_index().drop_duplicates('DealerTIN')\n for c in turnover_columns:\n dealer_level[c+'_rsd'] = div(dealer_time_level[c].std(level=0),dealer_time_level[c].mean(level=0))\n for c in turnover_columns:\n attr_cumulative(dealer_level,c+'_rsd')\n\n # these seem to give almost no predictive power\n\n # Now I can add the results from \"red\" to the returns DB - but careful, it uses the future.",
"def test_negative_volume(self):\n with pytest.raises(StateError):\n State(substance=\"water\", T=Q_(300, \"K\"), v=Q_(-10.13, \"m**3/kg\"))"
] | [
"0.74345225",
"0.6066089",
"0.60463685",
"0.5764846",
"0.56886953",
"0.56773823",
"0.5655682",
"0.557835",
"0.557422",
"0.5477881",
"0.54463166",
"0.5427771",
"0.5410125",
"0.5407789",
"0.53777486",
"0.5369534",
"0.5368286",
"0.53387535",
"0.5324725",
"0.5320856",
"0.5311284",
"0.5309389",
"0.52981544",
"0.5289969",
"0.5276346",
"0.52678025",
"0.52671313",
"0.52540475",
"0.52518857",
"0.5250131"
] | 0.7724097 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.