query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Plots the free energy change computed using the equilibrated snapshots between the proper target time frames (f_ts and r_ts) in both forward (data points are stored in F_df and F_ddf) and reverse (data points are stored in R_df and R_ddf) directions.
def plotdFvsTime(f_ts, r_ts, F_df, R_df, F_ddf, R_ddf): fig = pl.figure(figsize=(8,6)) ax = fig.add_subplot(111) pl.setp(ax.spines['bottom'], color='#D2B9D3', lw=3, zorder=-2) pl.setp(ax.spines['left'], color='#D2B9D3', lw=3, zorder=-2) for dire in ['top', 'right']: ax.spines[dire].set_color('none') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') max_fts = max(f_ts) rr_ts = [aa/max_fts for aa in f_ts[::-1]] f_ts = [aa/max_fts for aa in f_ts] r_ts = [aa/max_fts for aa in r_ts] line0 = pl.fill_between([r_ts[0], f_ts[-1]], R_df[0]-R_ddf[0], R_df[0]+R_ddf[0], color='#D2B9D3', zorder=-5) for i in range(len(f_ts)): line1 = pl.plot([f_ts[i]]*2, [F_df[i]-F_ddf[i], F_df[i]+F_ddf[i]], color='#736AFF', ls='-', lw=3, solid_capstyle='round', zorder=1) line11 = pl.plot(f_ts, F_df, color='#736AFF', ls='-', lw=3, marker='o', mfc='w', mew=2.5, mec='#736AFF', ms=12, zorder=2) for i in range(len(rr_ts)): line2 = pl.plot([rr_ts[i]]*2, [R_df[i]-R_ddf[i], R_df[i]+R_ddf[i]], color='#C11B17', ls='-', lw=3, solid_capstyle='round', zorder=3) line22 = pl.plot(rr_ts, R_df, color='#C11B17', ls='-', lw=3, marker='o', mfc='w', mew=2.5, mec='#C11B17', ms=12, zorder=4) pl.xlim(r_ts[0], f_ts[-1]) pl.xticks(r_ts[::2] + f_ts[-1:], fontsize=10) pl.yticks(fontsize=10) leg = pl.legend((line1[0], line2[0]), (r'$Forward$', r'$Reverse$'), loc=1, prop=FP(size=18), frameon=False) pl.xlabel(r'$\mathrm{Fraction\/of\/the\/simulation\/step}$', fontsize=16, color='#151B54') pl.ylabel(r'$\mathrm{\Delta G\/%s}$' % P.units, fontsize=16, color='#151B54') pl.xticks(f_ts, ['%.2f' % i for i in f_ts]) pl.tick_params(axis='x', color='#D2B9D3') pl.tick_params(axis='y', color='#D2B9D3') pl.savefig(os.path.join(P.output_directory, 'dF_t.pdf')) pl.close(fig) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_vis():\n dataroot = 'solar_data.txt'\n debug = False \n diff = False\n X, y = read_data(dataroot, debug, diff)\n\n # First plot the original timeseries\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(y)\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')\n # plt.show()\n\n fig.add_subplot(3,3,2)\n plt.plot(X[:,0])\n plt.title('Avg Zenith Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,3)\n plt.plot(X[:,1])\n plt.title('Avg Azimuth Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,4)\n plt.plot(X[:,2])\n plt.title('Avg Tower Dry Bulb Temp [deg C]')\n # plt.show()\n\n fig.add_subplot(3,3,5)\n plt.plot(X[:,3])\n plt.title('Avg Tower RH [%]')\n # plt.show()\n\n fig.add_subplot(3,3,6)\n plt.plot(X[:,4])\n plt.title('Avg Total Cloud Cover [%]')\n # plt.show()\n\n fig.add_subplot(3,3,7)\n plt.plot(X[:,5])\n plt.title('Avg Avg Wind Speed @ 6ft [m/s]')\n # plt.show()\n\n ##########################################################################################\n # Plotting the Fourier Transform of the signals\n\n freq = np.fft.fftfreq(len(y), 1*60*60)\n\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(freq, np.abs(np.fft.fft(y)))\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')\n # plt.show()\n\n fig.add_subplot(3,3,2)\n plt.plot(freq, np.abs(np.fft.fft(X[:,0])))\n plt.title('Avg Zenith Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,3)\n plt.plot(freq, np.abs(np.fft.fft(X[:,1])))\n plt.title('Avg Azimuth Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,4)\n plt.plot(freq, np.abs(np.fft.fft(X[:,2])))\n plt.title('Avg Tower Dry Bulb Temp [deg C]')\n # plt.show()\n\n fig.add_subplot(3,3,5)\n plt.plot(freq, np.abs(np.fft.fft(X[:,3])))\n plt.title('Avg Tower RH [%]')\n # plt.show()\n\n fig.add_subplot(3,3,6)\n plt.plot(freq, np.abs(np.fft.fft(X[:,4])))\n plt.title('Avg Total Cloud Cover [%]')\n # plt.show()\n\n fig.add_subplot(3,3,7)\n plt.plot(freq, np.abs(np.fft.fft(X[:,5])))\n plt.title('Avg Avg Wind Speed @ 6ft [m/s]')\n # plt.show()\n\n ##################################################################################################\n # Print correlation matrix\n\n df = pd.DataFrame(np.c_[y, X])\n df.columns = ['Avg Global PSP (vent/cor) [W/m^2]','Avg Zenith Angle [degrees]','Avg Azimuth Angle [degrees]','Avg Tower Dry Bulb Temp [deg C]','Avg Tower RH [%]','Avg Total Cloud Cover [%]','Avg Avg Wind Speed @ 6ft [m/s]']\n f = plt.figure(figsize=(19, 15))\n plt.matshow(df.corr(), fignum=f.number)\n plt.xticks(range(df.shape[1]), df.columns, fontsize=14, rotation=20)\n plt.yticks(range(df.shape[1]), df.columns, fontsize=14)\n cb = plt.colorbar()\n cb.ax.tick_params(labelsize=14)\n plt.title('Correlation Matrix', fontsize=16);\n plt.show()", "def sysPLQF(mirror, blkFlag=True):\n import matplotlib.pyplot as plt\n import numpy as np # to ndarray.flatten ax\n\n mir = mirror\n xend = max(mir.r_t)\n\n fig, ax = plt.subplots(nrows=2, ncols=2,)\n ax = np.ndarray.flatten(ax)\n ax[0].set_title('Real Power Generated')\n for mach in mir.Machines:\n ax[0].plot(mir.r_t, mach.r_Pe, \n marker = 10,\n fillstyle='none',\n #linestyle = ':',\n label = 'Pe Gen '+ mach.Busnam)\n ax[0].set_xlabel('Time [sec]')\n ax[0].set_ylabel('MW')\n\n ax[2].set_title('Reactive Power Generated')\n for mach in mir.Machines:\n ax[2].plot(mir.r_t, mach.r_Q, \n marker = 10,\n fillstyle='none',\n #linestyle = ':',\n label = 'Q Gen '+ mach.Busnam)\n ax[2].set_xlabel('Time [sec]')\n ax[2].set_ylabel('MVAR')\n\n ax[1].set_title('Total System P Loading')\n ax[1].plot(mir.r_t, mir.r_ss_Pload, \n marker = 11,\n #fillstyle='none',\n #linestyle = ':',\n label = 'Pload')\n ax[1].set_xlabel('Time [sec]')\n ax[1].set_ylabel('MW')\n\n ax[3].set_title('System Mean Frequency')\n ax[3].plot(mir.r_t, mir.r_f,\n marker = '.',\n #linestyle = ':',\n label = r'System Frequency')\n ax[3].set_xlabel('Time [sec]')\n ax[3].set_ylabel('Frequency [PU]')\n\n # Global Plot settings\n for x in np.ndarray.flatten(ax):\n x.set_xlim(0,xend)\n x.legend()\n x.grid(True)\n\n fig.tight_layout()\n\n plt.show(block = blkFlag)", "def results_plot_fuel_reactor(self):\n \n import matplotlib.pyplot as plt \n\n # Total pressure profile\n P = []\n for z in self.MB_fuel.z:\n P.append(value(self.MB_fuel.P[z]))\n fig_P = plt.figure(1)\n plt.plot(self.MB_fuel.z, P)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Total Pressure [bar]\") \n\n # Temperature profile\n Tg = []\n Ts = []\n# Tw = []\n for z in self.MB_fuel.z:\n Tg.append(value(self.MB_fuel.Tg[z] - 273.15))\n Ts.append(value(self.MB_fuel.Ts[z] - 273.15))\n# Tw.append(value(self.MB_fuel.Tw[z]))\n fig_T = plt.figure(2)\n plt.plot(self.MB_fuel.z, Tg, label='Tg')\n plt.plot(self.MB_fuel.z, Ts, label='Ts')\n# plt.plot(self.MB_fuel.z, Tw, label='Tw')\n plt.legend(loc=0,ncol=2)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Temperature [C]\") \n \n # Superficial gas velocity and minimum fluidization velocity\n vg = []\n umf = []\n for z in self.MB_fuel.z:\n vg.append(value(self.MB_fuel.vg[z]))\n umf.append(value(self.MB_fuel.umf[z]))\n fig_vg = plt.figure(3)\n plt.plot(self.MB_fuel.z, vg, label='vg')\n plt.plot(self.MB_fuel.z, umf, label='umf')\n plt.legend(loc=0,ncol=2)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Superficial gas velocity [m/s]\")\n \n # Gas components molar flow rate\n for j in self.MB_fuel.GasList:\n F = []\n for z in self.MB_fuel.z:\n F.append(value(self.MB_fuel.F[z,j]))\n fig_F = plt.figure(4)\n plt.plot(self.MB_fuel.z, F, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.GasList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Gas component molar flow rate, F [mol/s]\") \n \n # Bulk gas phase total molar flow rate\n Ftotal = []\n for z in self.MB_fuel.z:\n Ftotal.append(value(self.MB_fuel.Ftotal[z]))\n fig_Ftotal = plt.figure(5)\n plt.plot(self.MB_fuel.z, Ftotal)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Total molar gas flow rate [mol/s]\") \n\n # Solid components mass flow rate\n for j in self.MB_fuel.SolidList:\n M = []\n for z in self.MB_fuel.z:\n M.append(value(self.MB_fuel.Solid_M[z,j]))\n fig_M = plt.figure(6)\n plt.plot(self.MB_fuel.z, M, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.SolidList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Solid components mass flow rate [kg/s]\")\n \n # Bulk solid phase total molar flow rate\n Mtotal = []\n for z in self.MB_fuel.z:\n Mtotal.append(value(self.MB_fuel.Solid_M_total[z]))\n fig_Mtotal = plt.figure(7)\n plt.plot(self.MB_fuel.z, Mtotal)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Solid total mass flow rate [kg/s]\") \n \n # Gas phase concentrations\n for j in self.MB_fuel.GasList:\n Cg = []\n for z in self.MB_fuel.z:\n Cg.append(value(self.MB_fuel.Cg[z,j]))\n fig_Cg = plt.figure(8)\n plt.plot(self.MB_fuel.z, Cg, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.GasList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Concentration [mol/m3]\") \n \n # Gas phase mole fractions\n for j in self.MB_fuel.GasList:\n y = []\n for z in self.MB_fuel.z:\n y.append(value(self.MB_fuel.y[z,j]))\n fig_y = plt.figure(9)\n plt.plot(self.MB_fuel.z, y, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.GasList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"y [-]\") \n \n # Solid phase mass fractions\n for j in self.MB_fuel.SolidList:\n x = []\n for z in self.MB_fuel.z:\n x.append(value(self.MB_fuel.x[z,j]))\n fig_x = plt.figure(10)\n plt.plot(self.MB_fuel.z, x, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.SolidList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"x [-]\") \n\n # Total mass fraction\n xtot = []\n for z in self.MB_fuel.z:\n xtot.append(value(self.MB_fuel.xtot[z]))\n fig_xtot = plt.figure(11)\n plt.plot(self.MB_fuel.z, xtot)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Total mass fraction [-]\") \n \n # # Gas mix density\n # rhog = []\n # for z in self.MB_fuel.z:\n # rhog.append(value(self.MB_fuel.rho_vap[z]))\n # fig_rhog = plt.figure(23)\n # plt.plot(self.MB_fuel.z, rhog)\n # plt.grid()\n # plt.xlabel(\"Bed height [-]\")\n # plt.ylabel(\"Gas mix density [kg/m3]\") \n \n # Fe conversion\n X_Fe = []\n for z in self.MB_fuel.z:\n X_Fe.append(value(self.MB_fuel.X[z])*100)\n fig_X_Fe = plt.figure(13)\n plt.plot(self.MB_fuel.z, X_Fe)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Fraction of metal oxide converted [%]\")", "def plot_fvc_distances(\n configuration: BaseConfiguration,\n fibre_data_F: pandas.DataFrame,\n path: str | pathlib.Path | None = None,\n):\n\n data = configuration.assignment_data.fibre_table.copy()\n data = data.reset_index().set_index([\"positioner_id\", \"fibre_type\"])\n\n data_F = fibre_data_F.copy()\n data_F = data_F.reset_index().set_index([\"positioner_id\", \"fibre_type\"])\n\n is_dither = configuration.is_dither\n\n data = data.loc[data_F.index, :]\n\n data_F[\"xwok_distance\"] = data.xwok - data_F.xwok\n data_F[\"ywok_distance\"] = data.ywok - data_F.ywok\n data_F[\"wok_distance\"] = numpy.hypot(data_F.xwok_distance, data_F.ywok_distance)\n\n deccen = configuration.assignment_data.boresight[0][1]\n cos_dec = numpy.cos(numpy.deg2rad(float(deccen)))\n\n data_F[\"ra_distance\"] = (data.ra_epoch - data_F.ra_epoch) * cos_dec * 3600.0\n data_F[\"dec_distance\"] = (data.dec_epoch - data_F.dec_epoch) * 3600.0\n data_F[\"sky_distance\"] = numpy.hypot(data_F.ra_distance, data_F.dec_distance)\n\n if not is_dither:\n data_F[\"racat_distance\"] = (data_F.ra_icrs - data_F.ra_epoch) * cos_dec * 3600.0\n data_F[\"deccat_distance\"] = (data_F.dec_icrs - data_F.dec_epoch) * 3600.0\n data_F[\"skycat_distance\"] = numpy.hypot(\n data_F.racat_distance, data_F.deccat_distance\n )\n\n with plt.ioff(): # type: ignore\n seaborn.set_theme()\n\n fig, axes = plt.subplots(1, 3, figsize=(30, 10))\n\n if not is_dither:\n data_F = data_F.groupby(\"positioner_id\").filter(\n lambda g: g.assigned.any() & g.on_target.any() & g.valid.all()\n )\n\n assert isinstance(axes, numpy.ndarray)\n\n _plot_wok_distance(data_F, axes[0])\n\n _plot_sky_distance(\n data_F,\n axes[1],\n \"sky_distance\",\n is_dither=is_dither,\n plot_metrology=True,\n title=\"Sky distance (ra/dec vs ra/dec)\",\n )\n\n # if not is_dither:\n # _plot_sky_distance(\n # data_F,\n # axes[1, 0],\n # \"skycat_distance\",\n # is_dither=is_dither,\n # plot_metrology=False,\n # title=\"Sky distance (ra/dec vs racat/deccat)\",\n # )\n\n _plot_sky_quiver(data_F, axes[2], is_dither=is_dither)\n\n fig.suptitle(\n f\"Configuration ID: {configuration.configuration_id}\"\n + (\" (dithered)\" if is_dither else \"\")\n )\n\n plt.tight_layout()\n\n if path:\n fig.savefig(str(path))\n plt.close(fig)\n\n seaborn.reset_defaults()\n\n return fig", "def plot_delta(fname, temp, delta_ts_list, delta_rxn_list, labels, var='G'):\n max_y_lines = 5\n x_axis = np.array([0, 1, 3, 4, 6, 7])\n y_axis = []\n y_labels = []\n # y_min = np.floor(np.array(g_rxn_list).min())\n # y_max = np.ceil(np.array(g_ts_list).max())\n for index in range(max_y_lines):\n try:\n y_axis.append(np.array([0.0, 0.0, delta_ts_list[index], delta_ts_list[index],\n delta_rxn_list[index], delta_rxn_list[index]]))\n except IndexError:\n y_axis.append(None)\n try:\n y_labels.append(labels[index])\n except IndexError:\n y_labels.append(None)\n\n make_fig(fname, x_axis, y_axis[0],\n x_label='reaction coordinate', y_label='\\u0394' + var + ' at {} K (kcal/mol)'.format(temp),\n y1_label=y_labels[0], y2_label=y_labels[1], y3_label=y_labels[2], y4_label=y_labels[3],\n y5_label=y_labels[4], y2_array=y_axis[1], y3_array=y_axis[2], y4_array=y_axis[3], y5_array=y_axis[4],\n ls2='-', ls3='-', ls4='-', ls5='-',\n # y_lima=y_min, y_limb=y_max,\n hide_x=True,\n )", "def plot(self):\n\t\t\n\t\ttf=tfData(self.shotno,tStart=None,tStop=None)\n\t\t\n\t\t_plt.figure()\n\t\tax1 = _plt.subplot2grid((3,2), (0,1), rowspan=3) #tf\n\t\tax2 = _plt.subplot2grid((3,2), (0,0)) #vf\n\t\tax3 = _plt.subplot2grid((3,2), (1,0),sharex=ax2) #oh\n\t\tax4 = _plt.subplot2grid((3,2), (2, 0),sharex=ax2) #sh\n\t\tfig=_plt.gcf()\n\t\tfig.set_size_inches(10,5)\n\t\t\t\t\n\t\ttStart=-2\n\t\ttStop=20\n\t\t\n\t\tax1.plot(tf.time*1e3,tf.tfBankField)\n\t\tax1.axvspan(tStart,tStop,color='r',alpha=0.3)\n\t\t_plot.finalizeSubplot(ax1,xlabel='Time (s)',xlim=[-150,450],ylabel='TF Field (T)')#,title=self.title\n\t\t\n\t\tax2.plot(self.vfTime*1e3,self.vfBankCurrent*1e-3)\n\t\t_plot.finalizeSubplot(ax2,ylabel='VF Current\\n(kA)')\n\t\t\n\t\tax3.plot(self.ohTime*1e3,self.ohBankCurrent*1e-3)\n\t\t_plot.finalizeSubplot(ax3,ylim=[-20,30],ylabel='OH Current\\n(kA)')\n\t\t\n\t\tax4.plot(self.shTime*1e3,self.shBankCurrent*1e-3)\n\t\t_plot.finalizeSubplot(ax4,ylim=[tStart,tStop],xlabel='Time (s)',ylabel='SH Current\\n(kA)')\n\t\t\n\t\t_plot.finalizeFigure(fig,title=self.title)\n#\t\tfig.set_tight_layout(True)\n\t\t\n\t\treturn fig", "def show_dcr_results(dg):\n cycle = dg.fileDB['cycle'].values[0]\n df_dsp = pd.read_hdf(f'./temp_{cycle}.h5', 'opt_dcr')\n # print(df_dsp.describe()) \n\n # compare DCR and A/E distributions\n fig, (p0, p1) = plt.subplots(2, 1, figsize=(8, 8))\n \n elo, ehi, epb = 0, 25000, 100\n \n # aoe distribution\n # ylo, yhi, ypb = -1, 2, 0.1\n # ylo, yhi, ypb = -0.1, 0.3, 0.005\n ylo, yhi, ypb = 0.05, 0.08, 0.0005\n nbx = int((ehi-elo)/epb)\n nby = int((yhi-ylo)/ypb)\n h = p0.hist2d(df_dsp['trapEmax'], df_dsp['aoe'], bins=[nbx,nby],\n range=[[elo, ehi], [ylo, yhi]], cmap='jet',\n norm=LogNorm())\n # p0.set_xlabel('Energy (uncal)', ha='right', x=1)\n p0.set_ylabel('A/E', ha='right', y=1)\n\n # dcr distribution\n # ylo, yhi, ypb = -20, 20, 1 # dcr_raw\n # ylo, yhi, ypb = -5, 2.5, 0.1 # dcr = dcr_raw / trapEmax\n # ylo, yhi, ypb = -3, 2, 0.1\n ylo, yhi, ypb = 0.9, 1.08, 0.001\n ylo, yhi, ypb = 1.034, 1.0425, 0.00005 # best for 64.4 us pz\n # ylo, yhi, ypb = 1.05, 1.056, 0.00005 # best for 50 us pz\n # ylo, yhi, ypb = 1.016, 1.022, 0.00005 # best for 100 us pz\n nbx = int((ehi-elo)/epb)\n nby = int((yhi-ylo)/ypb)\n h = p1.hist2d(df_dsp['trapEmax'], df_dsp['dcr'], bins=[nbx,nby],\n range=[[elo, ehi], [ylo, yhi]], cmap='jet',\n norm=LogNorm())\n p1.set_xlabel('Energy (uncal)', ha='right', x=1)\n p1.set_ylabel('DCR', ha='right', y=1)\n \n # plt.show()\n plt.savefig(f'./plots/dcr_cyc{cycle}.png', dpi=300)\n plt.cla()", "def show_derivative(self):\n for trace in self.plotWidget.plotDataItems:\n dt = float(trace.attrs['dt'])\n dtrace = np.diff(trace.data)\n x = pgplot.make_xvector(dtrace, dt)\n self.plotWidget.plot(x, dtrace, pen=pg.mkPen('r'))", "def plot_rfs(self):\n self.xe = self.data['XE']\n self.ye = self.data['YE']\n# self.IE = self.data['IE']\n self.Var = self.data['Var']\n std = np.sqrt(np.mean(self.Var))\n fig = plt.gcf()\n ax = plt.gca()\n ax.set_xlim((np.min(self.xe), np.max(self.xe)))\n ax.set_ylim((np.min(self.ye), np.max(self.ye)))\n for xe, ye in zip(self.xe, self.ye):\n circ = plt.Circle((xe, ye), std, color='b', alpha=0.4)\n fig.gca().add_artist(circ)", "def volatility_factor_plot(prices: list, dates: list, vf_data: VFStopsResultType,\n green_zone_x_values: List[list], red_zone_x_values: List[list],\n yellow_zone_x_values: List[list], y_range: float, minimum: float,\n text_str: str = \"\", str_color: str = \"\", **kwargs):\n # pylint: disable=too-many-locals,too-many-branches,too-many-statements\n register_matplotlib_converters()\n\n title = kwargs.get('title', '')\n save_fig = kwargs.get('save_fig', False)\n filename = kwargs.get('filename', 'temp_candlestick.png')\n\n stop_loss_objects = vf_data.data_sets\n\n shown_stop_loss = f\"VF: {np.round(vf_data.vf.curated, 3)}\\n\"\n if vf_data.current_status.status.value != 'stopped_out':\n shown_stop_loss += f\"Stop Loss: ${np.round(vf_data.stop_loss.curated, 2)}\"\n else:\n shown_stop_loss += \"Stop Loss: n/a\"\n\n fig, ax_handle = plt.subplots()\n\n date_indexes = [datetime.strptime(date, '%Y-%m-%d').date() for date in dates]\n ax_handle.plot(date_indexes, prices, color='black')\n\n # Set the tick spacing (this is because dates crowd easily)\n mid_tick_size = int(len(date_indexes) / 4)\n ax_handle.xaxis.set_ticks([\n date_indexes[0], date_indexes[mid_tick_size], date_indexes[mid_tick_size * 2],\n date_indexes[mid_tick_size * 3], date_indexes[-1]\n ])\n\n y_start = minimum - (y_range * 0.05)\n height = y_range * 0.02\n\n for stop in stop_loss_objects:\n sub_dates = [date_indexes[index] for index in stop.time_index_list]\n ax_handle.plot(sub_dates, stop.caution_line, color='gold')\n ax_handle.plot(sub_dates, stop.stop_loss_line, color='red')\n\n for green_zone in green_zone_x_values:\n start = mdates.date2num(date_indexes[green_zone[0]])\n end = mdates.date2num(date_indexes[green_zone[-1]])\n width = end - start\n ax_handle.add_patch(\n Rectangle(\n (start, y_start),\n width,\n height,\n edgecolor='green',\n facecolor='green',\n fill=True\n )\n )\n\n for red_zone in red_zone_x_values:\n start = mdates.date2num(date_indexes[red_zone[0]])\n end = mdates.date2num(date_indexes[red_zone[-1]])\n width = end - start\n ax_handle.add_patch(\n Rectangle(\n (start, y_start),\n width,\n height,\n edgecolor='red',\n facecolor='red',\n fill=True\n )\n )\n\n for yellow_zone in yellow_zone_x_values:\n start = mdates.date2num(date_indexes[yellow_zone[0]])\n end = mdates.date2num(date_indexes[yellow_zone[-1]])\n width = end - start\n ax_handle.add_patch(\n Rectangle(\n (start, y_start),\n width,\n height,\n edgecolor='yellow',\n facecolor='yellow',\n fill=True\n )\n )\n\n ax_handle.set_title(title)\n\n if len(text_str) > 0 and len(str_color) > 0:\n new_start = minimum - (y_range * 0.2)\n new_end = minimum + (y_range * 1.02)\n ax_handle.set_ylim(new_start, new_end)\n props = dict(boxstyle='round', facecolor='white', alpha=0.25)\n ax_handle.text(\n 0.02,\n 0.02,\n text_str,\n color=str_color,\n transform=ax_handle.transAxes,\n bbox=props\n )\n\n if len(shown_stop_loss) > 0:\n props = dict(boxstyle='round', facecolor='white', alpha=0.25)\n ax_handle.text(\n 0.02,\n 0.90,\n shown_stop_loss,\n transform=ax_handle.transAxes,\n bbox=props\n )\n\n try:\n if save_fig:\n temp_path = os.path.join(\"output\", \"temp\")\n if not os.path.exists(temp_path):\n # For functions, this directory may not exist.\n plt.close(fig)\n plt.clf()\n return\n\n filename = os.path.join(temp_path, filename)\n if os.path.exists(filename):\n os.remove(filename)\n plt.savefig(filename)\n\n else:\n plt.show()\n\n except: # pylint: disable=bare-except\n print(\n f\"{utils.WARNING}Warning: plot failed to render in 'volatility factor plot' of \" +\n f\"title: {title}{utils.NORMAL}\")\n\n plt.close('all')\n plt.clf()", "def v_positions_history(self, end=yesterdaydash(), rendered=True):\n start = self.totcftable.iloc[0].date\n times = pd.date_range(start, end)\n tdata = []\n for date in times:\n sdata = sorted(\n [\n (\n date,\n fob.briefdailyreport(date).get(\"currentvalue\", 0),\n fob.name,\n )\n for fob in self.fundtradeobj\n ],\n key=lambda x: x[1],\n reverse=True,\n )\n tdata.extend(sdata)\n\n tr = ThemeRiver()\n tr.add(\n series_name=[foj.name for foj in self.fundtradeobj],\n data=tdata,\n label_opts=opts.LabelOpts(is_show=False),\n singleaxis_opts=opts.SingleAxisOpts(type_=\"time\", pos_bottom=\"10%\"),\n )\n if rendered:\n return tr.render_notebook()\n else:\n return tr", "def show(self, fig=None):\n i = 0\n # for t = 0:obj.step_size:obj.duration\n # TODO: make a generator?\n iterator = np.linspace(0, self.duration(), num=math.ceil(self.duration() / self.step_precision) + 1)\n tfInterp_l = np.zeros((4, 4, len(iterator)))\n tfInterp_r = np.zeros((4, 4, len(iterator)))\n for t in iterator:\n [lfp, rfp] = self.footPosition(t)\n tfInterp_l[:, :, i] = lfp\n tfInterp_r[:, :, i] = rfp\n i = i + 1\n\n self.show_tf(fig, tfInterp_l, len(iterator))\n self.show_tf(fig, tfInterp_r, len(iterator))", "def plot_fits_and_residuals(all_fits_df, dfs_list, expt_name, **kwargs):\n\n colors = cm.rainbow(np.linspace(0, 1, len(dfs_list)))\n fig = plt.figure(figsize=(5, 5), tight_layout=True)\n fig.set_dpi(300)\n\n filename = f'{expt_name}_fits_and_residuals'\n fileformat = '.png'\n \n # Set parameters for decay traces plot\n xlabel_traces = kwargs.get('xlabel_traces', 'Time after Chase (Hrs.)')\n ylabel_traces = kwargs.get('ylabel_traces', 'YFP(t)/YFP(0)')\n ylim_traces = kwargs.get('ylim_traces', (0, 1.2))\n xticks_traces = kwargs.get('xticks_traces', make_ticks(all_fits_df.x_input, decimals=0))\n yticks_traces = kwargs.get('y_ticks_traces', make_ticks((0, 1), decimals=1, n_ticks=7))\n xlim_traces = kwargs.get('xlim_traces', (xticks_traces.min(), xticks_traces.max())) \n # Set parameters for decay fit residuals plot\n xlabel_resids = kwargs.get('xlabel_resids', xlabel_traces)\n ylabel_resids = kwargs.get('ylabel_resids', 'Residuals')\n xlim_resids = kwargs.get('xlim_resids', xlim_traces)\n xticks_resids = xticks_traces\n yticks_resids = kwargs.get('yticks_resids', make_yticks_0cent(all_fits_df.residual))\n ylim_resids = kwargs.get('ylim_resids', (yticks_resids.min(), yticks_resids.max()))\n \n # Set parameters for decay fit residuals kernel density estimate\n # plot \n xlabel_kde = kwargs.get('xlabel_kde', ylabel_resids)\n ylabel_kde = kwargs.get('ylabel_kde', 'Density')\n xlim_kde = kwargs.get('xlim_kde', ylim_resids)\n ylim_kde = kwargs.get('ylim_kde', None)\n xticks_kde = yticks_resids\n # yticks_kde will get set below during \n # density calcuation\n \n # Set parameters used across all plots\n hidden_spines = kwargs.get('hidden_spine', ['top', 'right'])\n labelfontsize = kwargs.get('labelfontsize', 12)\n linewidth = kwargs.get('linewidth', 1)\n linealpha = kwargs.get('linealpha', 1)\n scatteralpha = kwargs.get('scatteralpha', 0.8)\n scattersize = kwargs.get('scattersize', 5)\n \n # Make the residuals scatter plot\n ax = fig.add_subplot(222)\n for cell_index in all_fits_df.cell_index.unique()[:]:\n cell_df = all_fits_df.loc[all_fits_df.cell_index == cell_index, :]\n ax.scatter(cell_df.x_input, cell_df.residual,\n s=scattersize, alpha=scatteralpha,\n facecolor='white', edgecolor=colors[cell_index])\n\n ax.axhline(0, linewidth=linewidth, alpha=linealpha, color='black')\n for spine in [ax.spines[hidden_spine] for hidden_spine in hidden_spines]:\n spine.set_visible(False)\n try:\n ax.set_xticks(xticks_resids)\n except:\n pass\n try:\n ax.set_yticks(yticks_resids)\n except:\n pass\n try:\n ax.set_ylim(ylim_resids)\n except:\n pass\n if xlabel_resids:\n ax.set_xlabel(xlabel_resids, fontsize=labelfontsize)\n if ylabel_resids:\n ax.set_ylabel(ylabel_resids, fontsize=labelfontsize) \n\n ax.set_aspect(1.0/ax.get_data_ratio(), adjustable='box')\n\n # Scatter plot of traces and line plot of fitted decays\n ax2 = fig.add_subplot(221)\n\n for cell_index in all_fits_df.cell_index.unique()[:]:\n cell_df = all_fits_df.loc[all_fits_df.cell_index == cell_index, :]\n ax2.plot(cell_df.x_input, cell_df.y_pred_norm/cell_df.y_pred_norm.max(),\n linewidth=linewidth, alpha=linealpha, color=colors[cell_index])\n ax2.scatter(cell_df.x_input, cell_df.y_input_norm/cell_df.y_input_norm.max(),\n s=scattersize, alpha=scatteralpha, color=colors[cell_index])\n\n if ylim_traces:\n ax2.set_ylim(ylim_traces)\n if xlim_traces:\n ax2.set_xlim(xlim_traces) \n try:\n ax2.set_xticks(xticks_traces)\n except:\n pass\n try:\n ax2.set_yticks(yticks_traces)\n except:\n pass \n if xlabel_traces:\n ax2.set_xlabel(xlabel_traces, fontsize=labelfontsize)\n if ylabel_traces:\n ax2.set_ylabel(ylabel_traces, fontsize=labelfontsize) \n\n ax2.set_aspect(1.0/ax2.get_data_ratio(), adjustable='box')\n for spine in [ax2.spines[hidden_spine] for hidden_spine in hidden_spines]:\n spine.set_visible(False)\n\n # Smoothed hist of residuals for each cell (KDE plot)\n ax3 = fig.add_subplot(223)\n \n densities = []\n for cell_index in all_fits_df.cell_index.unique()[:]:\n cell_df = all_fits_df.loc[all_fits_df.cell_index == cell_index, :]\n density = gaussian_kde(cell_df.residual)\n xs = np.linspace(all_fits_df.residual.min(),all_fits_df.residual.max(),200)\n ax3.plot(xs,density(xs), color=colors[cell_index],\n alpha=linealpha, linewidth=linewidth)\n densities.append(density(xs))\n\n # Also plot total residuals\n density = gaussian_kde(all_fits_df.residual)\n xs = np.linspace(all_fits_df.residual.min(),all_fits_df.residual.max(),200)\n ax3.plot(xs,density(xs), color='black',\n alpha=linealpha*2, linewidth=linewidth)\n densities.append(density(xs))\n \n # Figure out whech density outuput array has the highest y value and\n # set the yticks of the plot using that density array\n max_dens = np.array([np.max(arr) for arr in densities])\n longest_range_den = densities[max_dens.argmax()]\n yticks_kde = make_ticks(longest_range_den)\n\n if ylim_kde:\n ax3.set_ylim(ylim)\n if xlim_kde:\n ax3.set_xlim(xlim_kde)\n if xlabel_kde:\n ax3.set_xlabel(xlabel_kde, fontsize=labelfontsize)\n if ylabel_kde:\n ax3.set_ylabel(ylabel_kde, fontsize=labelfontsize)\n try:\n ax3.set_yticks(yticks_kde)\n except:\n pass\n try:\n ax3.set_xticks(xticks_kde)\n except:\n pass\n\n ax3.set_aspect(1.0/ax3.get_data_ratio(), adjustable='box')\n for spine in [ax3.spines[hidden_spine] for hidden_spine in hidden_spines]:\n spine.set_visible(False)\n\n if filename and fileformat:\n fig.savefig(f'{filename}{fileformat}', transparent=True)\n print(f'Saved plot at {filename}{fileformat}')", "def plot_image_and_rfs(self, fig=None, ax=None, legend=True, q=None,\n alpha_rf=0.5, cmap=plt.cm.gray_r):\n if fig is None:\n fig, ax = plt.subplots(1, 1)\n\n if q is None:\n dx, dy = 0., 0.\n xr, yr = self.xr, self.yr\n\n else:\n dx = self.xr[q]\n dy = self.yr[q]\n xr, yr = self.xr[0:q], self.yr[0:q]\n\n m = max(max(self.data['XE']), max(self.data['YE']))\n ax.set_xlim([-m, m])\n ax.set_ylim([-m, m])\n\n if self.s_range == 'sym':\n ax.imshow(np.zeros((1, 1)), cmap=cmap, vmin=-0.5, vmax=0.5,\n extent=[-m, m, -m, m])\n\n self.plot_base_image(\n fig, ax, colorbar=False, alpha=1., cmap=cmap, dx=dx, dy=dy)\n\n _plot_rfs(\n ax, self.data['XE'], self.data['YE'], self.data['de'],\n legend, alpha=alpha_rf)\n\n ax.plot(-xr, yr, label='Eye path', c='g')", "def plot(self,displayplt = True,saveplt = False,savepath='',polarplt=True, dbdown = False):\n plt.figure()\n\n #legacy beamprofile data is a 1-D array of the peak negative pressure\n if len(self.hydoutput.shape)<2:\n pnp = self.hydoutput\n else:\n sensitivity = hyd_calibration(self.cfreq)\n pnp = -1*np.min(self.hydoutput,1)/sensitivity\n\n if dbdown:\n pnp = 20.0*np.log10(pnp/np.max(pnp))\n else:\n pnp = pnp*1e-6\n\n figure1 = plt.plot(self.depth, pnp)\n #the latest beamprofile data should be a 2-D array of the hydrophone output\n plt.xlabel('Depth (mm)')\n if dbdown:\n plt.ylabel('Peak Negative Pressure (dB Max)')\n else:\n plt.ylabel('Peak Negative Pressure (MPa)')\n plt.title(self.txdr)\n if displayplt:\n plt.show()\n if saveplt:\n if savepath=='':\n #prompt for a save path using a default filename\n defaultfn = self.txdr+'_'+self.collectiondate+'_'+self.collectiontime+'_depthprofile.png'\n savepath = tkinter.filedialog.asksaveasfilename(initialfile=defaultfn, defaultextension='.png')\n plt.savefig(savepath)\n return figure1, savepath", "def plot_snapshot(snap):\n\n q = snap['q'][:]\n phi = snap['phi'][:]\n t = snap['t'][()]\n\n fig = plt.figure(figsize=(10.5,5))\n cv = np.linspace(-.2,.2,30)\n cphi = np.linspace(0,4.,30)\n Ew = epsilon_w/muw/2\n Q = (2*np.pi)**-2 * epsilon/(mu**2 / kf**2)\n PHI = np.sqrt(epsilon_w/muw)\n PHI2 = PHI**2\n\n ax = fig.add_subplot(121,aspect=1)\n im1 = plt.contourf(x[:nmax,:nmax]/Lf,y[:nmax,:nmax]/Lf,q[:nmax,:nmax]/Q,cv,\\\n cmin=-0.2,cmax=0.2,extend='both',cmap=cmocean.cm.curl)\n plt.xlabel(r'$x\\, k_f$')\n plt.ylabel(r'$y\\, k_f$')\n\n plt.text(25.25,25.5,r\"$t\\,\\,\\gamma = %3.2f$\" % (t*muw))\n\n cbaxes = fig.add_axes([0.15, 1., 0.3, 0.025]) \n plt.colorbar(im1, cax = cbaxes, ticks=[-.2,-.1,0,.1,.2],orientation='horizontal',label=r'Potential vorticity $[q/Q]$')\n\n ax = fig.add_subplot(122,aspect=1)\n im2 = plt.contourf(x[:nmax,:nmax]/Lf,y[:nmax,:nmax]/Lf,np.abs(phi[:nmax,:nmax])**2/PHI2,cphi,\\\n cmin=0,cmax=4,extend='max',cmap=cmocean.cm.ice_r)\n\n plt.xlabel(r'$x\\, k_f$')\n plt.yticks([])\n\n cbaxes = fig.add_axes([0.575, 1., 0.3, 0.025]) \n plt.colorbar(im2,cax=cbaxes,ticks=[0,1,2,3,4],orientation='horizontal',label=r'Wave action density $[\\mathcal{A}/A]$')\n\n figname = \"figs2movie/qgniw/\"+ fni[-18:-3]+\".png\"\n\n plt.savefig(figname, dpi=80, pad_inces=0, bbox_inches='tight')\n\n plt.close(\"all\")", "def plot(self):\n\t\tplot_chain(self.database_path, self.temp_folder)\n\t\tplot_density(self.database_path, self.temp_folder, self.cal_params)", "def show(self):\n fixed_cols = [\"Recovered\", \"Actual\"]\n # Predict the future with curve fitting\n df_list = [\n self.curve_fit(df, num).drop(fixed_cols, axis=1)\n for (num, df) in enumerate(self.subsets)\n ]\n pred_df = pd.concat(df_list, axis=1)\n pred_df[fixed_cols] = self.all_df[fixed_cols]\n if \"1st_phase\" in pred_df.columns:\n phase0_name = \"Initial_phase\"\n else:\n phase0_name = \"Regression\"\n pred_df = pred_df.rename({\"0th_phase\": phase0_name}, axis=1)\n # The list of change points\n day_list = [df.index.min() for df in df_list]\n # Show figure and the list of change points in string\n str_dates = self._show_figure(pred_df, day_list)\n return str_dates", "def SA_data_display(opt_df, all_df):\n fig, axs = plt.subplots(2, 3)\n\n axs[0,0].set_title(\"Optimal rewire attempts for circularity\")\n axs[0,0].set_ylabel(\"Percent waste %\")\n axs[0,0].set_xlabel(\"Time (s)\")\n axs[0,0].plot(opt_df[\"Time (s)\"], opt_df[\"Percent waste (%)\"])\n\n axs[0,1].set_title(\"Optimal rewire attempts acceptance probability\")\n axs[0,1].set_ylabel(\"Acceptance Probability\")\n axs[0,1].set_xlabel(\"Time (s)\") # time??\n axs[0,1].scatter(opt_df[\"Time (s)\"], opt_df[\"Probability\"])\n\n axs[0,2].set_title(\"Optimal rewire attempts temperature decrease\")\n axs[0,2].set_ylabel(\"Temperature\")\n axs[0,2].set_xlabel(\"Time (s)\") # time??\n axs[0,2].plot(opt_df[\"Time (s)\"], opt_df[\"Temperature\"])\n\n axs[1,0].set_title(\"All rewire attempts for circularity\")\n axs[1,0].set_ylabel(\"Percent waste %\")\n axs[1,0].set_xlabel(\"Time (s)\")\n axs[1,0].plot(all_df[\"Time (s)\"], all_df[\"Percent waste (%)\"])\n\n axs[1,1].set_title(\"All rewire attempts acceptance probability\")\n axs[1,1].set_ylabel(\"Acceptance Probability\")\n axs[1,1].set_xlabel(\"Time (s)\") # time??\n axs[1,1].scatter(all_df[\"Time (s)\"], all_df[\"Probability\"])\n\n axs[1,2].set_title(\"All rewire attempts temperature decrease\")\n axs[1,2].set_ylabel(\"Temperature\")\n axs[1,2].set_xlabel(\"Time (s)\") # time??\n axs[1,2].plot(all_df[\"Time (s)\"], all_df[\"Temperature\"])\n\n return plt.show()", "def plot_derivatives(self, show=False):\n\n fig, ax = plt.subplots(4, 2, figsize = (15, 10))\n # plt.subplots_adjust(wspace = 0, hspace = 0.1)\n plt.subplots_adjust(hspace=0.5)\n training_index = np.random.randint(0,self.n_train * self.n_p)\n \n if self.flatten:\n print ('Plotting derivatives... reshaping the flattened data to %s'%str(input_shape))\n # TODO\n temp = self.data['x_p'][training_index].reshape(len(theta_fid),*input_shape)\n x, y = temp.T[:,0]\n else:\n print ('Plotting derivatives... reshaping the flattened data to power spectra')\n temp = self.data['x_p'][training_index].reshape(len(theta_fid),ncombinations,len(ells))\n # temp has shape (num_params, ncombinations, len(ells))\n Cl = temp[:,0,:] # plot the (0,0) autocorrelation bin\n \n # Cl has shape (1,10) since it is the data vector for the \n # upper training image for both params\n labels =[r'$θ_1$ ($\\Omega_M$)']\n\n # we loop over them in this plot to assign labels\n for i in range(Cl.shape[0]):\n if self.rescaled:\n ax[0, 0].plot(ells, Cl[i],label=labels[i])\n else:\n ax[0, 0].loglog(ells, ells*(ells+1)*Cl[i],label=labels[i])\n ax[0, 0].set_title('One upper training example, Cl 0,0')\n ax[0, 0].set_xlabel(r'$\\ell$')\n if self.rescaled:\n ax[0, 0].set_ylabel(r'$C_\\ell$')\n else:\n ax[0, 0].set_ylabel(r'$\\ell(\\ell+1) C_\\ell$')\n\n ax[0, 0].set_xscale('log')\n\n ax[0, 0].legend(frameon=False)\n\n if self.flatten:\n # TODO\n temp = self.data['x_m'][training_index].reshape(len(theta_fid),*input_shape)\n x, y = temp.T[:,0]\n else:\n temp = self.data['x_m'][training_index].reshape(len(theta_fid),ncombinations,len(ells))\n # temp has shape (num_params, ncombinations, len(ells))\n Cl = temp[:,0,:] # plot the (0,0) autocorrelation bin\n\n for i in range(Cl.shape[0]):\n if self.rescaled:\n ax[1, 0].plot(ells, Cl[i])\n else:\n ax[1, 0].loglog(ells, ells*(ells+1)*Cl[i])\n ax[1, 0].set_title('One lower training example, Cl 0,0')\n ax[1, 0].set_xlabel(r'$\\ell$')\n if self.rescaled:\n ax[1, 0].set_ylabel(r'$C_\\ell$')\n else:\n ax[1, 0].set_ylabel(r'$\\ell(\\ell+1) C_\\ell$')\n\n ax[1, 0].set_xscale('log')\n\n if self.flatten:\n # TODO\n temp = self.data[\"x_m\"][training_index].reshape(len(theta_fid),*input_shape)\n xm, ym = temp.T[:,0]\n\n temp = self.data[\"x_p\"][training_index].reshape(len(theta_fid),*input_shape)\n xp, yp = temp.T[:,0]\n else:\n temp = self.data['x_m'][training_index].reshape(len(theta_fid),ncombinations,len(ells))\n Cl_lower = temp[:,0,:]\n temp = self.data['x_p'][training_index].reshape(len(theta_fid),ncombinations,len(ells))\n Cl_upper = temp[:,0,:]\n\n for i in range(Cl_lower.shape[0]):\n ax[2, 0].plot(ells, (Cl_upper[i]-Cl_lower[i]))\n ax[2, 0].set_title('Upper - lower input data: train sample');\n ax[2, 0].set_xlabel(r'$\\ell$')\n ax[2, 0].set_ylabel(r'$C_\\ell (u) - C_\\ell (m) $')\n ax[2, 0].axhline(xmin = 0., xmax = 1., y = 0.\n , linestyle = 'dashed', color = 'black')\n ax[2, 0].set_xscale('log')\n\n for i in range(Cl_lower.shape[0]):\n ax[3, 0].plot(ells, (Cl_upper[i]-Cl_lower[i])/(2*delta_theta[i]))\n ax[3, 0].set_title('Numerical derivative: train sample');\n ax[3, 0].set_xlabel(r'$\\ell$')\n ax[3, 0].set_ylabel(r'$\\Delta C_\\ell / 2\\Delta \\theta$')\n ax[3, 0].axhline(xmin = 0., xmax = 1., y = 0.\n , linestyle = 'dashed', color = 'black')\n ax[3, 0].set_xscale('log')\n\n test_index = np.random.randint(self.n_p)\n\n if self.flatten:\n # TODO\n temp = self.data['x_p_test'][test_index].reshape(len(theta_fid),*input_shape)\n x, y = temp.T[:,0]\n else:\n temp = self.data['x_p_test'][test_index].reshape(len(theta_fid),ncombinations,len(ells))\n Cl = temp[:,0,:] # plot the (0,0) autocorrelation bin\n \n for i in range(Cl.shape[0]):\n if self.rescaled:\n ax[0, 1].plot(ells, Cl[i])\n else:\n ax[0, 1].loglog(ells, ells*(ells+1)*Cl[i])\n ax[0, 1].set_title('One upper test example Cl 0,0')\n ax[0, 1].set_xlabel(r'$\\ell$')\n if self.rescaled:\n ax[0, 1].set_ylabel(r'$C_\\ell$')\n else:\n ax[0, 1].set_ylabel(r'$\\ell(\\ell+1) C_\\ell$')\n\n if self.flatten:\n # TODO\n temp = self.data['x_m_test'][test_index].reshape(len(theta_fid),*input_shape)\n x, y = temp.T[:,0]\n else:\n temp = self.data['x_m_test'][test_index].reshape(len(theta_fid),ncombinations,len(ells))\n Cl = temp[:,0,:] # plot the (0,0) autocorrelation bin\n\n ax[0, 1].set_xscale('log')\n\n for i in range(Cl.shape[0]):\n if self.rescaled:\n ax[1, 1].plot(ells, Cl[i])\n else:\n ax[1, 1].loglog(ells, ells*(ells+1)*Cl[i])\n ax[1, 1].set_title('One lower test example Cl 0,0')\n ax[1, 1].set_xlabel(r'$\\ell$')\n if self.rescaled:\n ax[1, 1].set_ylabel(r'$C_\\ell$')\n else:\n ax[1, 1].set_ylabel(r'$\\ell(\\ell+1) C_\\ell$')\n\n if self.flatten:\n # TODO\n temp = self.data[\"x_m_test\"][test_index].reshape(len(theta_fid),*input_shape)\n xm, ym = temp.T[:,0]\n\n temp = self.data[\"x_p_test\"][test_index].reshape(len(theta_fid),*input_shape)\n xp, yp = temp.T[:,0]\n else:\n temp = self.data['x_m_test'][test_index].reshape(len(theta_fid),ncombinations,len(ells))\n Cl_lower = temp[:,0,:]\n temp = self.data['x_p_test'][test_index].reshape(len(theta_fid),ncombinations,len(ells))\n Cl_upper = temp[:,0,:]\n \n ax[1, 1].set_xscale('log')\n\n for i in range(Cl_lower.shape[0]):\n ax[2, 1].plot(ells, (Cl_upper[i]-Cl_lower[i]))\n ax[2, 1].set_title('Upper - lower input data: test sample');\n ax[2, 1].set_xlabel(r'$\\ell$')\n ax[2, 1].set_ylabel(r'$C_\\ell (u) - C_\\ell (m) $')\n ax[2, 1].axhline(xmin = 0., xmax = 1., y = 0.\n , linestyle = 'dashed', color = 'black')\n ax[2, 1].set_xscale('log')\n\n\n for i in range(Cl_lower.shape[0]):\n ax[3, 1].plot(ells, (Cl_upper[i]-Cl_lower[i])/(2*delta_theta[i]))\n ax[3, 1].set_title('Numerical derivative: train sample');\n ax[3, 1].set_xlabel(r'$\\ell$')\n ax[3, 1].set_ylabel(r'$\\Delta C_\\ell / \\Delta \\theta $')\n ax[3, 1].axhline(xmin = 0., xmax = 1., y = 0.\n , linestyle = 'dashed', color = 'black')\n ax[3, 1].set_xscale('log')\n\n plt.savefig(f'{self.figuredir}derivatives_visualization_{self.modelversion}.png')\n if show: plt.show()\n plt.close()", "def _create_velocity_figure(dataframe, color_key, title, color_mapper,\n legend_loc='top_right', plot_width=None, plot_height=None):\n \n # these markers are nearly indistinguishble\n markers = [marker for marker in MarkerType if marker not in ['circle_cross', 'circle_x']]\n fig = figure(title=title)\n _set_plot_wh(fig, plot_width, plot_height)\n\n for i, (marker, (path, df)) in enumerate(zip(markers, dataframe.iterrows())):\n ds = dict(df)\n source = ColumnDataSource(ds)\n fig.scatter('dpt', 'expr', source=source, color={'field': color_key, 'transform': color_mapper},\n marker=marker, size=10, legend=f'{path}', muted_alpha=0)\n\n fig.xaxis.axis_label = 'dpt'\n fig.yaxis.axis_label = 'expression'\n if legend_loc is not None:\n fig.legend.location = legend_loc\n\n if ds.get('x_test') is not None:\n if ds.get('x_mean') is not None:\n fig.line('x_test', 'x_mean', source=source, muted_alpha=0, legend=path)\n if all(map(lambda val: val is not None, ds.get('x_cov', [None]))):\n x_mean = ds['x_mean']\n x_cov = ds['x_cov']\n band_x = np.append(ds['x_test'][::-1], ds['x_test'])\n # black magic, known only to the most illustrious of wizards\n band_y = np.append((x_mean - np.sqrt(np.diag(x_cov)))[::-1], (x_mean + np.sqrt(np.diag(x_cov))))\n fig.patch(band_x, band_y, alpha=0.1, line_color='black', fill_color='black',\n legend=path, line_dash='dotdash', muted_alpha=0)\n\n if ds.get('x_grad') is not None:\n fig.line('x_test', 'x_grad', source=source, muted_alpha=0)\n\n\n fig.legend.click_policy = 'mute'\n\n return fig", "def plot(self):\n\t\tself.plotOfLoopVoltage()", "def plot_frenet_serret(self, fig, ax, frame_number=5, frame_scale=0.10):\n\n # Compute the tangent, normal and binormal unitary vectors\n h = 1e-12\n u = np.linspace(0+h, 1-h, frame_number)\n position = np.real(self.get_value(u))\n tangent = np.real(self.get_tangent(u))\n normal = np.real(self.get_normal(u))\n binormal = np.real(self.get_binormal(u))\n\n # Two dimensions (plane curve)\n if self.ndim == 2:\n\n # Plot the frames of reference\n for k in range(frame_number):\n\n # Plot the tangent vector\n x, y = position[:, k]\n u, v = tangent[:, k]\n ax.quiver(x, y, u, v, color='red', scale=7.5)\n\n # Plot the normal vector\n x, y = position[:, k]\n u, v = normal[:, k]\n ax.quiver(x, y, u, v, color='blue', scale=7.5)\n\n # Plot the origin of the vectors\n x, y = position\n points, = ax.plot(x, y)\n points.set_linestyle(' ')\n points.set_marker('o')\n points.set_markersize(5)\n points.set_markeredgewidth(1.25)\n points.set_markeredgecolor('k')\n points.set_markerfacecolor('w')\n points.set_zorder(4)\n # points.set_label(' ')\n\n # Three dimensions (space curve)\n elif self.ndim == 3:\n\n # Compute a length scale (fraction of the curve arc length)\n scale = frame_scale * self.get_arclength(0, 1)\n\n # Plot the frames of reference\n for k in range(frame_number):\n\n # Plot the tangent vector\n x, y, z = position[:, k]\n u, v, w = tangent[:, k]\n ax.quiver(x, y, z, u, v, w, color='red', length=scale, normalize=True)\n\n # Plot the norma vector\n x, y, z = position[:, k]\n u, v, w = normal[:, k]\n ax.quiver(x, y, z, u, v, w, color='blue', length=scale, normalize=True)\n\n # Plot the binormal vector\n x, y, z = position[:, k]\n u, v, w = binormal[:, k]\n ax.quiver(x, y, z, u, v, w, color='green', length=scale, normalize=True)\n\n # Plot the origin of the vectors\n x, y, z = position\n points, = ax.plot(x, y, z)\n points.set_linestyle(' ')\n points.set_marker('o')\n points.set_markersize(5)\n points.set_markeredgewidth(1.25)\n points.set_markeredgecolor('k')\n points.set_markerfacecolor('w')\n points.set_zorder(4)\n # points.set_label(' ')\n\n\n else: raise Exception('The number of dimensions must be 2 or 3')\n\n return fig, ax", "def parameter_forecast_plot(model_obj,time_index,start,end,num_samples = 100,cached_samples=None,col_labels = ['P','PET','Lag-1 Q','Lag-1 P','Seasonal','P$^2$','Constant']):\n \n f = plt.figure(figsize = (8,10))\n num_components = len(col_labels)\n gs = gridspec.GridSpec(8+2*num_components,6)\n ax0 = plt.subplot(gs[-8:-6,:])\n ax1 = plt.subplot(gs[-6::,:])\n col_labels = ['P','PET','Lag-1 Q','Lag-1 P','Seasonal','P$^2$','Constant']\n ffbs = model_obj # 120 is French Broad River at Blantyre, NC\n if cached_samples is None:\n samples = ffbs.backward_sample(num_samples=num_samples)\n else: \n samples = cached_samples\n for i in range(7):\n ax_new = plt.subplot(gs[2*i:2*i+2,:])\n\n upper = np.percentile(samples[start:end,i,:],75,axis = 1)\n mid = np.percentile(samples[start:end,i,:],50,axis = 1)\n lower = np.percentile(samples[start:end,i,:],25,axis = 1)\n\n ax_new.plot(time_index[start:end],mid,color='k')\n ax_new.fill_between(time_index[start:end],upper,lower,color='0.8')\n ax_new.tick_params(labelbottom=False,direction='in')\n ax_new.text(0.02, 0.82,col_labels[i],\n horizontalalignment='left',\n verticalalignment='center',transform=ax_new.transAxes)\n\n ax1.plot(time_index[start:end],ffbs.f[start:end],color='k',label='1-step forecast')\n ax1.plot(time_index[start:end],ffbs.Y[start:end],color='k',linestyle='',marker='+',\n markersize = 10,label='Observed streamflow')\n\n ax1.fill_between(time_index[start:end],\n np.squeeze(ffbs.f[start:end] + 2*ffbs.Q[start:end,0]),\n np.squeeze(ffbs.f[start:end] - 2*ffbs.Q[start:end,0]),color='0.8',\n label = 'Forecast $\\pm 2V_t$')\n ax1.tick_params(direction='in')\n ax1.legend(loc='upper right',ncol=1,frameon=True)\n #ax1.set_ylabel('Standardized streamflow')\n ax1.set_xlabel('Date',fontsize=16)\n ax1.get_yaxis().set_label_coords(-0.1,0.5)\n ax1.text(0.02, 0.92,'Standardized streamflow',\n horizontalalignment='left',\n verticalalignment='center',transform=ax1.transAxes,)\n ax0.plot(time_index[start:end],ffbs.s[start:end],color='k')\n ax0.text(0.02, 0.82,'$E[V_t]$',\n horizontalalignment='left',\n verticalalignment='center',transform=ax0.transAxes,)\n ax0.get_yaxis().set_label_coords(-0.1,0.5)\n return f,samples", "def _plot_rfs(ax, xe, ye, de, legend, alpha=0.5):\n # ax = plt.axes()\n ax.set_aspect('equal')\n # FIXME: HARD CODED 2x\n r = 0.203 * de\n for i, (x, y) in enumerate(zip(xe, ye)):\n if i == 0:\n label = None # 'One SDev of Neuron RF'\n else:\n label = None\n ax.add_patch(plt.Circle((x, -y), r, color='red', fill=True,\n alpha=alpha, label=label))\n\n if legend:\n plt.legend()\n ax.set_xlabel('x (arcmin)')\n ax.set_ylabel('y (arcmin)')", "def plot_ef2(self,er,cov,n_points):\n \n if er.shape[0] != 2:\n raise ValueError('Plot ef2 can only plot two asset efficient frontier')\n \n \n weights = [np.array([w, 1-w]) for w in np.linspace(0,1,n_points)]\n \n rets = [self.portfolio_returns(w,er) for w in weights]\n \n vols = [self.portfolio_vol(w,cov) for w in weights]\n \n ef = pd.DataFrame({\n 'Returns':rets,\n 'Volatility':vols\n })\n \n return ef.plot.line(x='Volatility',y='Returns',style='.-')", "def get_temp():\n epts = [\"cage_coldPlate_temp\", \"cage_pressure\"]\n # t_earlier_aug = '2019-10-02T00:00'\n # t_later_aug = datetime.utcnow().isoformat()\n t_earlier_aug = '2019-09-27T13:00'\n t_later_aug = '2019-09-28T19:49'\n dfs = pandas_db_query(epts, t_earlier_aug, t_later_aug)\n print(dfs[epts[0]].tail())\n\n exit()\n\n xv = dfs[epts[0]][\"timestamp\"]\n yv = dfs[epts[0]][epts[0]]\n plt.plot(xv, yv, '-b')\n plt.ylabel(epts[0], ha='right', y=1)\n\n p1a = plt.gca().twinx()\n xv = dfs[epts[1]][\"timestamp\"]\n yv = dfs[epts[1]][epts[1]]\n p1a.set_ylabel(epts[1], color='r', ha='right', y=1)\n p1a.tick_params('y', colors='r')\n p1a.semilogy(xv, yv, '-r')\n\n plt.gcf().autofmt_xdate()\n plt.tight_layout()\n plt.show()", "def plotData(BX,BY,xi,yi,expArr,t,savepath_dir):\r\n \r\n #Find the current channel data\r\n Jz=newCurrent(BX,BY,xi,yi,expArr,t)\r\n\r\n #Find the dipole vector components\r\n BxTime=np.real(BX*expArr[t])\r\n ByTime=np.real(BY*expArr[t])\r\n\r\n #Plot the current density contour and dipole vector grid\r\n #Create the figure\r\n p1=plt.figure(figsize=(9,8))\r\n \r\n #Plot the data\r\n p1=plt.contourf(xi,yi,Jz,levels=100,vmin=-0.1,vmax=0.1)\r\n qv1=plt.quiver(xi,yi,BxTime,ByTime,width=0.004,scale=3)\r\n \r\n #Add axes labels and title\r\n p1=plt.xlabel('X [cm]',fontsize=20)\r\n p1=plt.ylabel('Y [cm]',fontsize=20)\r\n # p1=plt.title('Alfven Wave Dipole; Frequency='+str(freq)+r'KHz; $\\nu_{ei}$='+str(col)+'KHz',fontsize=19,y=1.02)\r\n p1=plt.title('E Field; Frequency='+str(freq)+r'KHz; $\\nu_{ei}$='+str(col)+'KHz',fontsize=19,y=1.02)\r\n \r\n #Set axes parameters\r\n p1=plt.xticks(np.arange(-50,51,5))\r\n p1=plt.yticks(np.arange(-50,51,5))\r\n p1=plt.xlim(-xAxisLim,xAxisLim)\r\n p1=plt.ylim(-yAxisLim,yAxisLim)\r\n \r\n #Add colorbar\r\n cbar=plt.colorbar()\r\n cbar.set_label('Normalized Current Density',rotation=270,labelpad=15)\r\n cbar=plt.clim(-1,1)\r\n \r\n #Add vector label\r\n plt.quiverkey(qv1,-0.1,-0.1,0.2,label=r'$(B_x,B_y)$')\r\n \r\n #Miscellaneous\r\n p1=plt.tick_params(axis='both', which='major', labelsize=18)\r\n p1=plt.grid(True)\r\n p1=plt.gcf().subplots_adjust(left=0.15)\r\n\r\n #Save the plot\r\n savepath_frame=savepath_dir+'frame'+str(t+1)+'.png'\r\n p1=plt.savefig(savepath_frame,dpi=100,bbox_to_anchor='tight')\r\n p1=plt.close()\r\n\r\n #Let me know which frame we just saved\r\n print('Saved frame '+str(t+1)+' of '+str(len(expArr)))\r\n \r\n return", "def plot_ef(self,er,cov,n_points):\n \n weights = self.optimal_weights(n_points,er,cov)\n \n rets = [self.portfolio_returns(w,er) for w in weights]\n \n vols = [self.portfolio_vol(w,cov) for w in weights]\n \n ef = pd.DataFrame({\n 'Returns':rets,\n 'Volatility':vols\n })\n \n return ef.plot.line(x='Volatility',y='Returns',style='.-')", "def show_observables(rf, logical_pops_file='qubit_pop.dat',\n beta_pops_file='beta_pop.dat', exc_file='cavity_excitation.dat',\n pulse1_file='pulse1.dat', pulse2_file='pulse2.dat'):\n fig = plt.figure(figsize=(16,3.5), dpi=70)\n\n qubit_pop = np.genfromtxt(join(rf, logical_pops_file)).transpose()\n beta_pop = np.genfromtxt(join(rf, beta_pops_file)).transpose()\n exc = np.genfromtxt(join(rf, exc_file)).transpose()\n p1 = QDYN.pulse.Pulse.read(join(rf, pulse1_file))\n p2 = QDYN.pulse.Pulse.read(join(rf, pulse2_file))\n\n ax = fig.add_subplot(131)\n tgrid = qubit_pop[0] # microsecond\n ax.plot(tgrid, qubit_pop[1], label=r'00')\n ax.plot(tgrid, qubit_pop[2], label=r'01')\n ax.plot(tgrid, qubit_pop[3], label=r'10')\n ax.plot(tgrid, qubit_pop[4], label=r'11')\n ax.plot(tgrid, beta_pop[1], label=r'0010')\n ax.plot(tgrid, beta_pop[2], label=r'0001')\n analytical_pop = qubit_pop[1] + qubit_pop[2] + qubit_pop[3] \\\n + beta_pop[1] + beta_pop[2]\n ax.plot(tgrid, analytical_pop, label=r'ana. subsp.')\n ax.legend(loc='best', fancybox=True, framealpha=0.5)\n ax.set_xlabel(\"time (microsecond)\")\n ax.set_ylabel(\"population\")\n\n ax = fig.add_subplot(132)\n ax.plot(tgrid, exc[1], label=r'<n> (cav 1)')\n ax.plot(tgrid, exc[2], label=r'<n> (cav 2)')\n ax.plot(tgrid, exc[3], label=r'<L>')\n ax.legend(loc='best', fancybox=True, framealpha=0.5)\n ax.set_xlabel(\"time (microsecond)\")\n ax.set_ylabel(\"cavity excitation\")\n\n ax = fig.add_subplot(133)\n p1.render_pulse(ax, label='pulse 1')\n p2.render_pulse(ax, label='pulse 2')\n ax.legend(loc='best', fancybox=True, framealpha=0.5)\n\n #ax.set_xlim(-4, -1)" ]
[ "0.6313317", "0.6141398", "0.6133596", "0.61213934", "0.58762604", "0.5865189", "0.5831045", "0.5801431", "0.57747483", "0.5743009", "0.57139415", "0.56994927", "0.56850445", "0.5679581", "0.5657056", "0.5632359", "0.56177044", "0.5578176", "0.5577838", "0.55500615", "0.5545676", "0.5531571", "0.55263895", "0.5517456", "0.5490388", "0.54902756", "0.5473974", "0.5468654", "0.5463605", "0.54427654" ]
0.65924156
0
Plots the free energy differences evaluated for each pair of adjacent states for all methods. The layout is approximately 'nb' bars per subplot.
def plotdFvsLambda2(nb=10): x = numpy.arange(len(df_allk)) if len(x) < nb: return xs = numpy.array_split(x, len(x)/nb+1) mnb = max([len(i) for i in xs]) fig = pl.figure(figsize = (8,6)) width = 1./(len(P.methods)+1) elw = 30*width colors = {'TI':'#C45AEC', 'TI-CUBIC':'#33CC33', 'DEXP':'#F87431', 'IEXP':'#FF3030', 'GINS':'#EAC117', 'GDEL':'#347235', 'BAR':'#6698FF', 'UBAR':'#817339', 'RBAR':'#C11B17', 'MBAR':'#F9B7FF'} ndx = 1 for x in xs: lines = tuple() ax = pl.subplot(len(xs), 1, ndx) for name in P.methods: y = [df_allk[i][name]/P.beta_report for i in x] ye = [ddf_allk[i][name]/P.beta_report for i in x] line = pl.bar(x+len(lines)*width, y, width, color=colors[name], yerr=ye, lw=0.05*elw, error_kw=dict(elinewidth=elw, ecolor='black', capsize=0.5*elw)) lines += (line[0],) for dir in ['left', 'right', 'top', 'bottom']: if dir == 'left': ax.yaxis.set_ticks_position(dir) else: ax.spines[dir].set_color('none') pl.yticks(fontsize=10) ax.xaxis.set_ticks([]) for i in x+0.5*width*len(P.methods): ax.annotate('$\mathrm{%d-%d}$' % (i, i+1), xy=(i, 0), xycoords=('data', 'axes fraction'), xytext=(0, -2), size=10, textcoords='offset points', va='top', ha='center') pl.xlim(x[0], x[-1]+len(lines)*width + (mnb - len(x))) ndx += 1 leg = ax.legend(lines, tuple(P.methods), loc=0, ncol=2, prop=FP(size=8), title='$\mathrm{\Delta G\/%s\/}\mathit{vs.}\/\mathrm{lambda\/pair}$' % P.units, fancybox=True) leg.get_frame().set_alpha(0.5) pl.savefig(os.path.join(P.output_directory, 'dF_state.pdf'), bbox_inches='tight') pl.close(fig) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def results_plot_fuel_reactor(self):\n \n import matplotlib.pyplot as plt \n\n # Total pressure profile\n P = []\n for z in self.MB_fuel.z:\n P.append(value(self.MB_fuel.P[z]))\n fig_P = plt.figure(1)\n plt.plot(self.MB_fuel.z, P)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Total Pressure [bar]\") \n\n # Temperature profile\n Tg = []\n Ts = []\n# Tw = []\n for z in self.MB_fuel.z:\n Tg.append(value(self.MB_fuel.Tg[z] - 273.15))\n Ts.append(value(self.MB_fuel.Ts[z] - 273.15))\n# Tw.append(value(self.MB_fuel.Tw[z]))\n fig_T = plt.figure(2)\n plt.plot(self.MB_fuel.z, Tg, label='Tg')\n plt.plot(self.MB_fuel.z, Ts, label='Ts')\n# plt.plot(self.MB_fuel.z, Tw, label='Tw')\n plt.legend(loc=0,ncol=2)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Temperature [C]\") \n \n # Superficial gas velocity and minimum fluidization velocity\n vg = []\n umf = []\n for z in self.MB_fuel.z:\n vg.append(value(self.MB_fuel.vg[z]))\n umf.append(value(self.MB_fuel.umf[z]))\n fig_vg = plt.figure(3)\n plt.plot(self.MB_fuel.z, vg, label='vg')\n plt.plot(self.MB_fuel.z, umf, label='umf')\n plt.legend(loc=0,ncol=2)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Superficial gas velocity [m/s]\")\n \n # Gas components molar flow rate\n for j in self.MB_fuel.GasList:\n F = []\n for z in self.MB_fuel.z:\n F.append(value(self.MB_fuel.F[z,j]))\n fig_F = plt.figure(4)\n plt.plot(self.MB_fuel.z, F, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.GasList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Gas component molar flow rate, F [mol/s]\") \n \n # Bulk gas phase total molar flow rate\n Ftotal = []\n for z in self.MB_fuel.z:\n Ftotal.append(value(self.MB_fuel.Ftotal[z]))\n fig_Ftotal = plt.figure(5)\n plt.plot(self.MB_fuel.z, Ftotal)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Total molar gas flow rate [mol/s]\") \n\n # Solid components mass flow rate\n for j in self.MB_fuel.SolidList:\n M = []\n for z in self.MB_fuel.z:\n M.append(value(self.MB_fuel.Solid_M[z,j]))\n fig_M = plt.figure(6)\n plt.plot(self.MB_fuel.z, M, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.SolidList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Solid components mass flow rate [kg/s]\")\n \n # Bulk solid phase total molar flow rate\n Mtotal = []\n for z in self.MB_fuel.z:\n Mtotal.append(value(self.MB_fuel.Solid_M_total[z]))\n fig_Mtotal = plt.figure(7)\n plt.plot(self.MB_fuel.z, Mtotal)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Solid total mass flow rate [kg/s]\") \n \n # Gas phase concentrations\n for j in self.MB_fuel.GasList:\n Cg = []\n for z in self.MB_fuel.z:\n Cg.append(value(self.MB_fuel.Cg[z,j]))\n fig_Cg = plt.figure(8)\n plt.plot(self.MB_fuel.z, Cg, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.GasList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Concentration [mol/m3]\") \n \n # Gas phase mole fractions\n for j in self.MB_fuel.GasList:\n y = []\n for z in self.MB_fuel.z:\n y.append(value(self.MB_fuel.y[z,j]))\n fig_y = plt.figure(9)\n plt.plot(self.MB_fuel.z, y, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.GasList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"y [-]\") \n \n # Solid phase mass fractions\n for j in self.MB_fuel.SolidList:\n x = []\n for z in self.MB_fuel.z:\n x.append(value(self.MB_fuel.x[z,j]))\n fig_x = plt.figure(10)\n plt.plot(self.MB_fuel.z, x, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.SolidList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"x [-]\") \n\n # Total mass fraction\n xtot = []\n for z in self.MB_fuel.z:\n xtot.append(value(self.MB_fuel.xtot[z]))\n fig_xtot = plt.figure(11)\n plt.plot(self.MB_fuel.z, xtot)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Total mass fraction [-]\") \n \n # # Gas mix density\n # rhog = []\n # for z in self.MB_fuel.z:\n # rhog.append(value(self.MB_fuel.rho_vap[z]))\n # fig_rhog = plt.figure(23)\n # plt.plot(self.MB_fuel.z, rhog)\n # plt.grid()\n # plt.xlabel(\"Bed height [-]\")\n # plt.ylabel(\"Gas mix density [kg/m3]\") \n \n # Fe conversion\n X_Fe = []\n for z in self.MB_fuel.z:\n X_Fe.append(value(self.MB_fuel.X[z])*100)\n fig_X_Fe = plt.figure(13)\n plt.plot(self.MB_fuel.z, X_Fe)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Fraction of metal oxide converted [%]\")", "def plot_budget_analyais_results(df, fs=8, fs_title=14, lw=3, fontsize=20, colors=['#AA3377', '#009988', '#EE7733', '#0077BB', '#BBBBBB', '#EE3377', '#DDCC77']):\n df_decomposed = df.loc[df['block'] == 'decomposed']\n df_joint = df.loc[df['block'] == 'joint']\n ticklabels = []\n num_sweeps = df_decomposed['num_sweeps'].to_numpy()\n sample_sizes = df_decomposed['sample_sizes'].to_numpy()\n for i in range(len(num_sweeps)):\n ticklabels.append('K=%d\\nL=%d' % (num_sweeps[i], sample_sizes[i]))\n fig = plt.figure(figsize=(fs*2.5, fs))\n ax1 = fig.add_subplot(1, 2, 1)\n ax1.plot(num_sweeps, df_decomposed['density'].to_numpy(), 'o-', c=colors[0], linewidth=lw, label=r'$\\{\\mu, \\tau\\}, \\{c\\}$')\n ax1.plot(num_sweeps, df_joint['density'].to_numpy(), 'o-', c=colors[1], linewidth=lw,label=r'$\\{\\mu, \\tau, c\\}$')\n ax1.set_xticks(num_sweeps)\n ax1.set_xticklabels(ticklabels)\n ax1.tick_params(labelsize=fontsize)\n ax1.grid(alpha=0.4)\n ax2 = fig.add_subplot(1, 2, 2)\n ax2.plot(num_sweeps, df_decomposed['ess'].to_numpy(), 'o-', c=colors[0], linewidth=lw,label=r'$\\{\\mu, \\tau\\}, \\{c\\}$')\n ax2.plot(num_sweeps, df_joint['ess'].to_numpy(), 'o-', c=colors[1], linewidth=lw,label=r'$\\{\\mu, \\tau, c\\}$')\n ax2.set_xticks(num_sweeps)\n ax2.set_xticklabels(ticklabels)\n ax2.tick_params(labelsize=fontsize)\n ax2.grid(alpha=0.4)\n ax2.legend(fontsize=fontsize)\n ax1.legend(fontsize=fontsize)\n ax1.set_ylabel(r'$\\log \\: p_\\theta(x, \\: z)$', fontsize=35)\n ax2.set_ylabel('ESS / L', fontsize=35)", "def plot_variables(self, n, show=False, diagnostics=False):\n\n if diagnostics:\n fig, ax = plt.subplots(5, 1, sharex = True, figsize = (10, 10))\n else:\n fig, ax = plt.subplots(2, 1, sharex = True, figsize = (10, 10))\n\n plt.subplots_adjust(hspace = 0)\n end = len(n.history[\"det F\"])\n epochs = np.arange(end)\n a, = ax[0].plot(epochs, n.history[\"det F\"], label = 'Training data')\n b, = ax[0].plot(epochs, n.history[\"det test F\"], label = 'Test data')\n # ax[0].axhline(y=5,ls='--',color='k')\n ax[0].legend(frameon = False)\n ax[0].set_ylabel(r'$|{\\bf F}_{\\alpha\\beta}|$')\n ax[0].set_title('Final Fisher info on test data: %.3f'%n.history[\"det test F\"][-1])\n ax[1].plot(epochs, n.history[\"loss\"])\n ax[1].plot(epochs, n.history[\"test loss\"])\n # ax[1].set_xlabel('Number of epochs')\n ax[1].set_ylabel(r'$\\Lambda$')\n ax[1].set_xlim([0, len(epochs)]);\n \n if diagnostics:\n ax[2].plot(epochs, n.history[\"det C\"])\n ax[2].plot(epochs, n.history[\"det test C\"])\n # ax[2].set_xlabel('Number of epochs')\n ax[2].set_ylabel(r'$|{\\bf C}|$')\n ax[2].set_xlim([0, len(epochs)]);\n \n # Derivative of first summary wrt to theta1 theta1 is 3rd dimension index 0\n ax[3].plot(epochs, np.array(n.history[\"dμdθ\"])[:,0,0]\n , color = 'C0', label=r'$\\theta_1$',alpha=0.5)\n \n \"\"\"\n # Derivative of first summary wrt to theta2 theta2 is 3rd dimension index 1\n ax[3].plot(epochs, np.array(n.history[\"dμdθ\"])[:,0,1]\n , color = 'C0', ls='dashed', label=r'$\\theta_2$',alpha=0.5)\n \"\"\"\n\n # Test Derivative of first summary wrt to theta1 theta1 is 3rd dimension index 0\n ax[3].plot(epochs, np.array(n.history[\"test dμdθ\"])[:,0,0]\n , color = 'C1', label=r'$\\theta_1$',alpha=0.5)\n \n \"\"\"\n # Test Derivative of first summary wrt to theta2 theta2 is 3rd dimension index 1\n ax[3].plot(epochs, np.array(n.history[\"test dμdθ\"])[:,0,1]\n , color = 'C1', ls='dashed', label=r'$\\theta_2$',alpha=0.5)\n ax[3].legend(frameon=False)\n \"\"\"\n\n ax[3].set_ylabel(r'$\\partial\\mu/\\partial\\theta$')\n # ax[3].set_xlabel('Number of epochs')\n ax[3].set_xlim([0, len(epochs)])\n\n # Mean of network output summary 1\n ax[4].plot(epochs, np.array(n.history[\"μ\"])[:,0],alpha=0.5)\n # Mean of test output network summary 1\n ax[4].plot(epochs, np.array(n.history[\"test μ\"])[:,0],alpha=0.5)\n ax[4].set_ylabel('μ')\n ax[4].set_xlabel('Number of epochs')\n ax[4].set_xlim([0, len(epochs)])\n \n\n print ('Maximum Fisher info on train data:',np.max(n.history[\"det F\"]))\n print ('Final Fisher info on train data:',(n.history[\"det F\"][-1]))\n \n print ('Maximum Fisher info on test data:',np.max(n.history[\"det test F\"]))\n print ('Final Fisher info on test data:',(n.history[\"det test F\"][-1]))\n\n if np.max(n.history[\"det test F\"]) == n.history[\"det test F\"][-1]:\n print ('Promising network found, possibly more epochs needed')\n\n plt.tight_layout()\n plt.savefig(f'{self.figuredir}variables_vs_epochs_{self.modelversion}.png')\n if show: plt.show()\n plt.close()", "def plot_derivatives_divided(self, show=False):\n\n fig, ax = plt.subplots(3, 2, figsize = (15, 10))\n # plt.subplots_adjust(wspace = 0, hspace = 0.1)\n plt.subplots_adjust(hspace=0.5)\n training_index = np.random.randint(self.n_train * self.n_p)\n \n if self.flatten:\n print ('Plotting derivatives... reshaping the flattened data to %s'%str(input_shape))\n # TODO\n temp = self.data['x_p'][training_index].reshape(len(theta_fid),*input_shape)\n x, y = temp.T[:,0]\n else:\n print ('Plotting derivatives... reshaping the flattened data to power spectra')\n temp = self.data['x_p'][training_index].reshape(len(theta_fid),ncombinations,len(ells))\n # temp has shape (num_params, ncombinations, len(ells))\n Cl = temp[:,0,:] # plot the (0,0) autocorrelation bin\n \n # Cl has shape (1,10) since it is the data vector for the \n # upper training image for both params\n labels =[r'$θ_1$ ($\\Omega_M$)']\n\n # we loop over them in this plot to assign labels\n for i in range(Cl.shape[0]):\n if self.rescaled:\n ax[0, 0].plot(ells, Cl[i],label=labels[i])\n else:\n ax[0, 0].loglog(ells, ells*(ells+1)*Cl[i],label=labels[i])\n ax[0, 0].set_title('One upper training example, Cl 0,0')\n ax[0, 0].set_xlabel(r'$\\ell$')\n if self.rescaled:\n ax[0, 0].set_ylabel(r'$C_\\ell$')\n else:\n ax[0, 0].set_ylabel(r'$\\ell(\\ell+1) C_\\ell$')\n\n ax[0, 0].legend(frameon=False)\n\n if self.flatten:\n # TODO\n temp = self.data['x_m'][training_index].reshape(len(theta_fid),*input_shape)\n x, y = temp.T[:,0]\n else:\n temp = self.data['x_m'][training_index].reshape(len(theta_fid),ncombinations,len(ells))\n # temp has shape (num_params, ncombinations, len(ells))\n Cl = temp[:,0,:] # plot the (0,0) autocorrelation bin\n\n for i in range(Cl.shape[0]):\n if self.rescaled:\n ax[1, 0].plot(ells, Cl[i])\n else:\n ax[1, 0].loglog(ells, ells*(ells+1)*Cl[i])\n ax[1, 0].set_title('One lower training example, Cl 0,0')\n ax[1, 0].set_xlabel(r'$\\ell$')\n if self.rescaled:\n ax[1, 0].set_ylabel(r'$C_\\ell$')\n else:\n ax[1, 0].set_ylabel(r'$\\ell(\\ell+1) C_\\ell$')\n\n if self.flatten:\n # TODO\n temp = self.data[\"x_m\"][training_index].reshape(len(theta_fid),*input_shape)\n xm, ym = temp.T[:,0]\n\n temp = self.data[\"x_p\"][training_index].reshape(len(theta_fid),*input_shape)\n xp, yp = temp.T[:,0]\n else:\n temp = self.data['x_m'][training_index].reshape(len(theta_fid),ncombinations,len(ells))\n Cl_lower = temp[:,0,:]\n temp = self.data['x_p'][training_index].reshape(len(theta_fid),ncombinations,len(ells))\n Cl_upper = temp[:,0,:]\n\n for i in range(Cl_lower.shape[0]):\n ax[2, 0].plot(ells, (Cl_upper[i]-Cl_lower[i])/self.Cl_noiseless)\n ax[2, 0].set_title('Difference between upper and lower training examples');\n ax[2, 0].set_xlabel(r'$\\ell$')\n ax[2, 0].set_ylabel(r'$\\Delta C_\\ell$ / $C_{\\ell,thr}$')\n ax[2, 0].axhline(xmin = 0., xmax = 1., y = 0.\n , linestyle = 'dashed', color = 'black')\n ax[2, 0].set_xscale('log')\n\n # also plot sigma_cl / CL\n sigma_cl = np.sqrt(self.covariance)\n ax[2, 0].plot(ells, sigma_cl/self.Cl_noiseless, label=r'$\\sigma_{Cl} / C_{\\ell,thr}$')\n ax[2, 0].legend(frameon=False)\n\n test_index = np.random.randint(self.n_p)\n\n if self.flatten:\n # TODO\n temp = self.data['x_p_test'][test_index].reshape(len(theta_fid),*input_shape)\n x, y = temp.T[:,0]\n else:\n temp = self.data['x_p_test'][test_index].reshape(len(theta_fid),ncombinations,len(ells))\n Cl = temp[:,0,:] # plot the (0,0) autocorrelation bin\n \n for i in range(Cl.shape[0]):\n if self.rescaled:\n ax[0, 1].plot(ells, Cl[i])\n else:\n ax[0, 1].loglog(ells, ells*(ells+1)*Cl[i])\n ax[0, 1].set_title('One upper test example Cl 0,0')\n ax[0, 1].set_xlabel(r'$\\ell$')\n if self.rescaled:\n ax[0, 1].set_ylabel(r'$C_\\ell$')\n else:\n ax[0, 1].set_ylabel(r'$\\ell(\\ell+1) C_\\ell$')\n\n if self.flatten:\n # TODO\n temp = self.data['x_m_test'][test_index].reshape(len(theta_fid),*input_shape)\n x, y = temp.T[:,0]\n else:\n temp = self.data['x_m_test'][test_index].reshape(len(theta_fid),ncombinations,len(ells))\n Cl = temp[:,0,:] # plot the (0,0) autocorrelation bin\n\n for i in range(Cl.shape[0]):\n if self.rescaled:\n ax[1, 1].plot(ells, Cl[i])\n else:\n ax[1, 1].loglog(ells, ells*(ells+1)*Cl[i])\n ax[1, 1].set_title('One lower test example Cl 0,0')\n ax[1, 1].set_xlabel(r'$\\ell$')\n if self.rescaled:\n ax[1, 1].set_ylabel(r'$C_\\ell$')\n else:\n ax[1, 1].set_ylabel(r'$\\ell(\\ell+1) C_\\ell$')\n\n if self.flatten:\n # TODO\n temp = self.data[\"x_m_test\"][test_index].reshape(len(theta_fid),*input_shape)\n xm, ym = temp.T[:,0]\n\n temp = self.data[\"x_p_test\"][test_index].reshape(len(theta_fid),*input_shape)\n xp, yp = temp.T[:,0]\n else:\n temp = self.data['x_m_test'][test_index].reshape(len(theta_fid),ncombinations,len(ells))\n Cl_lower = temp[:,0,:]\n temp = self.data['x_p_test'][test_index].reshape(len(theta_fid),ncombinations,len(ells))\n Cl_upper = temp[:,0,:]\n \n for i in range(Cl_lower.shape[0]):\n ax[2, 1].plot(ells, (Cl_upper[i]-Cl_lower[i]) / self.Cl_noiseless)\n ax[2, 1].set_title('Difference between upper and lower test samples');\n ax[2, 1].set_xlabel(r'$\\ell$')\n ax[2, 1].set_ylabel(r'$\\Delta C_\\ell$ / $C_{\\ell,thr}$')\n ax[2, 1].axhline(xmin = 0., xmax = 1., y = 0.\n , linestyle = 'dashed', color = 'black')\n ax[2, 1].set_xscale('log')\n\n # also plot sigma_cl / CL\n sigma_cl = np.sqrt(self.covariance)\n ax[2, 1].plot(ells, sigma_cl/self.Cl_noiseless, label=r'$\\sigma_{Cl} / C_{\\ell,thr}$')\n\n plt.savefig(f'{self.figuredir}derivatives_visualization_divided_{self.modelversion}.png')\n if show: plt.show()\n plt.close()", "def main():\n # Load properties that will be needed\n store = [Storage.Storage(2), Storage.Storage(4)] \n pre_energy = [s.get(\"free_energy\") for s in store]\n post_energy = [s.get(\"post_energy\") for s in store]\n x_range = store[0].get(\"x_range\")\n xlocs = np.arange(x_range[0], x_range[1], x_range[2])\n y_range = store[0].get(\"y_range\")\n ylocs = np.arange(y_range[0], y_range[1], y_range[2])\n # Calculate step size\n xb2steps = stepsize(pre_energy[0], post_energy[0], xlocs) \n xb4steps = stepsize(pre_energy[1], post_energy[1], xlocs) \n # Set up the figure\n fig = plt.figure(1, figsize=(7.5,2.5)) \n axe = (fig.add_subplot(1, 2, 1), fig.add_subplot(1, 2, 2))\n # Plot the results\n axe[0].plot(ylocs, xb4steps, color='#FF466F', lw=4)\n axe[1].plot(ylocs, xb2steps, color='#76D753', lw=4)\n # Annotate the plots\n axe[0].set_title(\"4sXB step size\")\n axe[0].set_xlabel(\"Lattice spacing (nm)\") \n axe[0].set_ylabel(\"Step size (nm)\")\n axe[0].set_xlim((25.5, 39))\n axe[0].set_ylim((1, 8))\n axe[1].set_title(\"2sXB step size\")\n axe[1].set_xlabel(\"Lattice spacing (nm)\") \n axe[1].set_ylabel(\"Step size (nm)\")\n axe[1].set_xlim((25.5, 39))\n axe[1].set_ylim((1, 8))\n # Display the plots\n fig.subplots_adjust(wspace=0.25, hspace=0.48,\n left=0.08, right=0.98,\n top=0.85, bottom=0.21)\n plt.show()", "def _plot_comparison(xs, pan, other_program_name, **kw):\n\n pans = ['Bmax', 'Emax']\n units = ['(mG)', '(kV/m)']\n title_app = [', Max Magnetic Field', ', Max Electric Field']\n save_suf = ['-%s-comparison-Bmax' % other_program_name,\n '-%s-comparison-Emax' % other_program_name]\n\n for p,u,t,s in zip(pans, units, title_app, save_suf):\n #figure object and axes\n fig = plt.figure()\n ax_abs = fig.add_subplot(2,1,1)\n ax_per = ax_abs.twinx()\n ax_mag = fig.add_subplot(2,1,2)\n #Bmax\n #init handles and labels lists for legend\n kw['H'], kw['L'] = [], []\n _plot_comparison_repeatables(ax_abs, ax_per, ax_mag, pan, p, u,\n other_program_name, **kw)\n _plot_wires(ax_mag, xs.hot, xs.gnd, pan['emf.fields-results'][p], **kw)\n _check_und_conds([xs], [ax_mag], **kw)\n ax_abs.set_title('Absolute and Percent Difference' + t)\n ax_mag.set_ylabel(p + ' ' + u)\n ax_mag.set_title('Model Results' + t)\n ax_mag.legend(kw['H'], kw['L'], **_leg_kw)\n _color_twin_axes(ax_abs, mpl.rcParams['axes.labelcolor'], ax_per, 'firebrick')\n _format_line_axes_legends(ax_abs, ax_per, ax_mag)\n #_format_twin_axes(ax_abs, ax_per)\n _save_fig(xs.sheet + s, fig, **kw)", "def plot(self, nsteps_max=10):\r\n fig = plt.figure()\r\n ax1 = plt.subplot(221)\r\n ax2 = plt.subplot(222)\r\n ax3 = plt.subplot(224)\r\n\r\n if 'fig' in locals(): # assures tight layout even when plot is manually resized\r\n def onresize(event): plt.tight_layout()\r\n try: cid = fig.canvas.mpl_connect('resize_event', onresize) # tighten layout on resize event\r\n except: pass\r\n\r\n self.plot_px_convergence(nsteps_max=nsteps_max, ax=ax1)\r\n\r\n if getattr(self.px_spec, 'ref_tree', None) is None:\r\n self.calc_px(method='LT', nsteps=nsteps_max, keep_hist=True)\r\n\r\n self.plot_bt(bt=self.px_spec.ref_tree, ax=ax2, title='Binary tree of stock prices; ' + self.specs)\r\n self.plot_bt(bt=self.px_spec.opt_tree, ax=ax3, title='Binary tree of option prices; ' + self.specs)\r\n # fig, ax = plt.subplots()\r\n # def onresize(event): fig.tight_layout()\r\n # cid = fig.canvas.mpl_connect('resize_event', onresize) # tighten layout on resize event\r\n # self.plot_px_convergence(nsteps_max=nsteps_max, ax=ax)\r\n\r\n try: plt.tight_layout()\r\n except: pass\r\n plt.show()", "def plot_2nd(self, mod = 'F'):\n if not mpl: raise \"Problem with matplotib: Plotting not possible.\"\n f = plt.figure(figsize=(5,4), dpi=100)\n \n A2 = []\n \n strainList= self.__structures.items()[0][1].strainList\n \n if len(strainList)<=5:\n kk=1\n ll=len(strainList)\n grid=[ll]\n elif len(strainList)%5 == 0:\n kk=len(strainList)/5\n ll=5\n grid=[5 for i in range(kk)]\n else:\n kk=len(strainList)/5+1\n ll=5\n grid=[5 for i in range(kk)]\n grid[-1]=len(strainList)%5\n \n \n n=1\n m=1\n for stype in strainList:\n atoms = self.get_atomsByStraintype(stype)\n self.__V0 = atoms[0].V0\n strainList = atoms[0].strainList\n if self.__thermodyn and mod == 'F':\n energy = [i.gsenergy+i.phenergy[-1] for i in atoms]\n elif self.__thermodyn and mod=='E0':\n energy = [i.gsenergy for i in atoms]\n elif self.__thermodyn and mod=='Fvib':\n energy = [i.phenergy[-1] for i in atoms]\n else:\n energy = [i.gsenergy for i in atoms]\n \n strain = [i.eta for i in atoms]\n \n spl = '1'+str(len(strainList))+str(n)\n #plt.subplot(int(spl))\n #a = f.add_subplot(int(spl))\n if (n-1)%5==0: m=0\n \n \n a = plt.subplot2grid((kk,ll), ((n-1)/5,m), colspan=1)\n #print (kk,ll), ((n-1)/5,m)\n j = 0\n for i in [2,4,6]:\n ans = Energy()\n ans.energy = energy\n ans.strain = strain\n ans.V0 = self.__V0\n \n fitorder = i\n ans.set_2nd(fitorder)\n A2.append(ans.get_2nd())\n \n strains = sorted(map(float,A2[j+3*(n-1)].keys()))\n \n try:\n dE = [A2[j+3*(n-1)][str(s)] for s in strains]\n except:\n continue\n a.plot(strains, dE, label=str(fitorder))\n a.set_title(stype)\n a.set_xlabel('strain')\n a.set_ylabel(r'$\\frac{d^2E}{d\\epsilon^2}$ in eV')\n \n j+=1\n \n n+=1\n m+=1\n \n a.legend(title='Order of fit')\n return f", "def test_run_beta_diversity_through_plots(self):\r\n run_beta_diversity_through_plots(\r\n self.test_data['biom'][0],\r\n self.test_data['map'][0],\r\n self.test_out,\r\n call_commands_serially,\r\n self.params,\r\n self.qiime_config,\r\n tree_fp=self.test_data['tree'][0],\r\n parallel=False,\r\n status_update_callback=no_status_updates)\r\n\r\n unweighted_unifrac_dm_fp = join(\r\n self.test_out,\r\n 'unweighted_unifrac_dm.txt')\r\n weighted_unifrac_dm_fp = join(self.test_out, 'weighted_unifrac_dm.txt')\r\n unweighted_unifrac_pc_fp = join(\r\n self.test_out,\r\n 'unweighted_unifrac_pc.txt')\r\n weighted_unifrac_pc_fp = join(self.test_out, 'weighted_unifrac_pc.txt')\r\n weighted_unifrac_html_fp = join(self.test_out,\r\n 'weighted_unifrac_emperor_pcoa_plot', 'index.html')\r\n\r\n # check for expected relations between values in the unweighted unifrac\r\n # distance matrix\r\n dm = parse_distmat_to_dict(open(unweighted_unifrac_dm_fp))\r\n self.assertTrue(dm['f1']['f2'] < dm['f1']['p1'],\r\n \"Distance between pair of fecal samples is larger than distance\"\r\n \" between fecal and palm sample (unweighted unifrac).\")\r\n self.assertEqual(dm['f1']['f1'], 0)\r\n # check for expected relations between values in the weighted unifrac\r\n # distance matrix\r\n dm = parse_distmat_to_dict(open(weighted_unifrac_dm_fp))\r\n self.assertTrue(dm['f1']['f2'] < dm['f1']['p1'],\r\n \"Distance between pair of fecal samples is larger than distance\"\r\n \" between fecal and palm sample (unweighted unifrac).\")\r\n self.assertEqual(dm['f1']['f1'], 0)\r\n\r\n # check that final output files have non-zero size\r\n self.assertTrue(getsize(unweighted_unifrac_pc_fp) > 0)\r\n self.assertTrue(getsize(weighted_unifrac_pc_fp) > 0)\r\n self.assertTrue(getsize(weighted_unifrac_html_fp) > 0)\r\n\r\n # Check that the log file is created and has size > 0\r\n log_fp = glob(join(self.test_out, 'log*.txt'))[0]\r\n self.assertTrue(getsize(log_fp) > 0)", "def n27_and_sidebands():\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(4.5, 4))\n # n=26 through n=29\n folder = os.path.join(\"..\", \"..\", \"2018-09-06\")\n fname = \"1_dye_fscan.txt\"\n fname = os.path.join(folder, fname)\n data = pmu.fscan_import(fname)\n ax.axhline(0, color='grey')\n data.plot(x='fpoly', y='sig', label=\"MW Off\", c='k', ax=ax)\n # sidebands\n folder = os.path.join(\"..\", \"..\", \"2018-09-09\")\n fname = \"1_freq_dye.txt\"\n fname = os.path.join(folder, fname)\n data = pmu.fscan_import(fname)\n data['asig'] = data['sig'] - 0.3\n ax.axhline(-0.3, color='grey')\n data.plot(x='fpoly', y='asig', label=\"MW On\", c='k', ax=ax)\n # pretty figure\n ax.legend().remove()\n ax.set_ylabel(r\"$e^-$ Signal\")\n ax.set_yticks([])\n ax.set_xlabel(\"Frequency (GHz from Limit)\")\n ax.set_xticks([-4863, -4511, -4195, -3908])\n ax.text(-4400, -0.15, \"MW On\")\n ax.text(-4400, 0.3, \"MW Off\")\n # save\n fig.tight_layout()\n fig.savefig(\"n27_and_sidebands.pdf\")\n return", "def plot_balance_list(balance_list, b_scale='linear', progress = False, n_lims = None):\n if n_lims == None:\n n_min = 0\n n_max=len(balance_list)\n else:\n n_min = n_lims[0]\n n_max = n_lims[1]\n ncols=2\n nrows=int(np.ceil((n_max-n_min)/ncols))\n _, axes = plt.subplots(nrows, ncols, figsize=(22, 5*nrows))\n for n in range(n_min, n_max):\n ni = n-n_min \n i = int(np.floor(ni / ncols))\n j=ni % ncols\n r_p = []\n if progress: \n for k in range(len(balance_list[n])-1):\n r_p.append(np.abs(balance_list[n][k]-balance_list[n][k+1])/balance_list[n][k])\n\n axes[i,j].plot(balance_list[n], label='balance', color='b')#we put [1:] because want not to show drop in the beginning: TODO: understand fully and explain\n axes[i,j].set_title('Outer loop # '+ str(n))\n # axes[i,j].set_yscale('log')\n # axes[i,j].set_xscale('log')\n axes[i,j].grid(True)\n axes[i,j].set_ylabel('balance norm')\n axes[i,j].legend()\n axes[i,j].set_yscale(b_scale)\n if progress: \n ax_2=axes[i,j].twinx()\n ax_2.plot(r_p, 'g')\n ax_2.set_yscale('log')\n ax_2.set_ylabel('relative change')", "def vis_difference(self):\n print(self.init_vec)\n\n init = self.init_output.numpy()\n\n alphas = np.linspace(0, 1, 20)\n for i, alpha in enumerate(alphas):\n\n display.clear_output(wait=True)\n norm = [torch.linalg.norm(torch.tensor(\n self.init_vec + alpha*self.eigen[i]), axis=1).detach().numpy() for i in range(2)]\n\n diff = np.array([self.compute_difference(\n alpha, self.eigen[i]) for i in range(2)])\n\n fig = plt.figure(figsize=(14, 12), tight_layout=True)\n fig.suptitle(\"Latent direction variation\", fontsize=20)\n gs = gridspec.GridSpec(2, 2)\n\n ax_temp = plt.subplot(gs[0, :])\n ax_temp.scatter(\n init[:, 0], init[:, 1])\n ax_temp.set_title(\"Initial Dataset\")\n ax_temp.set_xlim(-1, 1)\n ax_temp.set_ylim(-1, 1)\n [s.set_visible(False) for s in ax_temp.spines.values()]\n\n for j in range(2):\n ax_temp = plt.subplot(gs[1, j])\n sc = ax_temp.quiver(\n init[:, 0], init[:, 1], diff[j, :, 0], diff[j, :, 1], norm[j])\n sc.set_clim(np.min(norm[j]), np.max(norm[j]))\n plt.colorbar(sc)\n ax_temp.set_title(\n \"Direction: {}, alpha: {}\".format(j+1, alpha))\n ax_temp.set_xlim(-1, 1)\n ax_temp.set_ylim(-1, 1)\n [s.set_visible(False) for s in ax_temp.spines.values()]\n\n plt.savefig(\"frames_dir/fig_{}\".format(i))\n plt.show()", "def plotdFvsLambda1():\n x = numpy.arange(len(df_allk))\n if x[-1]<8:\n fig = pl.figure(figsize = (8,6))\n else:\n fig = pl.figure(figsize = (len(x),6))\n width = 1./(len(P.methods)+1)\n elw = 30*width\n colors = {'TI':'#C45AEC', 'TI-CUBIC':'#33CC33', 'DEXP':'#F87431', 'IEXP':'#FF3030', 'GINS':'#EAC117', 'GDEL':'#347235', 'BAR':'#6698FF', 'UBAR':'#817339', 'RBAR':'#C11B17', 'MBAR':'#F9B7FF'}\n lines = tuple()\n for name in P.methods:\n y = [df_allk[i][name]/P.beta_report for i in x]\n ye = [ddf_allk[i][name]/P.beta_report for i in x]\n line = pl.bar(x+len(lines)*width, y, width, color=colors[name], yerr=ye, lw=0.1*elw, error_kw=dict(elinewidth=elw, ecolor='black', capsize=0.5*elw))\n lines += (line[0],)\n pl.xlabel('States', fontsize=12, color='#151B54')\n pl.ylabel('$\\Delta G$ '+P.units, fontsize=12, color='#151B54')\n pl.xticks(x+0.5*width*len(P.methods), tuple(['%d--%d' % (i, i+1) for i in x]), fontsize=8)\n pl.yticks(fontsize=8)\n pl.xlim(x[0], x[-1]+len(lines)*width)\n ax = pl.gca()\n for dir in ['right', 'top', 'bottom']:\n ax.spines[dir].set_color('none')\n ax.yaxis.set_ticks_position('left')\n for tick in ax.get_xticklines():\n tick.set_visible(False)\n\n leg = pl.legend(lines, tuple(P.methods), loc=3, ncol=2, prop=FP(size=10), fancybox=True)\n leg.get_frame().set_alpha(0.5)\n pl.title('The free energy change breakdown', fontsize = 12)\n pl.savefig(os.path.join(P.output_directory, 'dF_state_long.pdf'), bbox_inches='tight')\n pl.close(fig)\n return", "def _show_learning_rate():\n fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(6.4 * 2, 4.8))\n\n # Visualize c_prime\n c_prime_list = np.linspace(1, 100, num=11)\n x_label = f\"c'\"\n y_label = \"Minimum Clusters Size\"\n title = \"\"\n\n ax = axes[0]\n x_list = c_prime_list\n\n # MNIST\n y_list = [161, 16, 14, 15, 20, 21, 24, 27, 30, 30, 35]\n ax.plot(x_list, y_list, label=\"MNIST\")\n\n # Fashion MNIST\n y_list = [63, 12, 12, 15, 18, 19, 22, 25, 26, 28, 30]\n ax.plot(x_list, y_list, label=\"Fashion MNIST\")\n\n # 20 news groups\n y_list = [1297, 724, 221, 80, 52, 51, 54, 54, 52, 60, 60]\n ax.plot(x_list, y_list, label=\"Newsgroups\")\n\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.set_title(title)\n ax.legend()\n ax.set_yscale('log')\n\n # Visualize t0\n t0_list = np.linspace(1, 100, num=11)\n x_label = f\"t0\"\n y_label = \"Minimum Clusters Size\"\n title = \"\"\n\n ax = axes[1]\n x_list = t0_list\n\n # MNIST\n y_list = [16, 16, 16, 16, 16, 17, 16, 16, 16, 16, 16]\n ax.plot(x_list, y_list, label=\"MNIST\")\n\n # Fashion MNIST\n y_list = [12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12]\n ax.plot(x_list, y_list, label=\"Fashion MNIST\")\n\n # 20 news groups\n y_list = [765, 765, 767, 772, 772, 773, 789, 789, 793, 796, 799]\n ax.plot(x_list, y_list, label=\"Newsgroups\")\n\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.set_title(title)\n ax.legend()\n ax.set_yscale('log')\n\n plt.show()", "def plot_bs(self, show=False, density=True, pcolor=\"r\", mcolor=\"b\", lw=1.0, subtract = 0):\n cm = matplotlib.cm.jet\n\n if (subtract !=0):\n geqbispectra = subtract\n else:\n geqbispectra = np.zeros(np.shape(self.eqbispectra))\n\n if (density):\n \"\"\" also read the local overdensity value and plot line colors according to\n the density value, + = red, - = blue; adjust alpha accordingly\n \"\"\"\n if len(self.ds)<self.Nsubs:\n print (\"no density data\")\n return 0\n\n ads=np.abs(self.ds)\n meands=np.mean(self.ds)\n mads=np.max(ads)\n normds=np.array([ads[i]/mads for i in range(len(ads))])\n self.normds=normds\n\n cNorm = colors.Normalize(min(self.ds), vmax=max(self.ds))\n scalarMap = cmap.ScalarMappable(norm=cNorm, cmap=cm)\n scalarMap.set_array([])\n\n fig, ax = self.plt.subplots()\n\n for sub in range(self.Nsubs):\n #print sub\n if not(density):\n lplot=ax.plot(self.klist, self.fNLeq[sub])\n else:\n colorVal = scalarMap.to_rgba(self.ds[sub])\n lplot = ax.plot(self.klist[1:-1], self.eqbispectra[sub][1:-1]-geqbispectra[sub][1:-1], color=colorVal, alpha=normds[sub], linewidth=lw)\n \"\"\"\n if self.ds[sub]>meands:\n self.plt.plot(self.klist[1:-1], self.eqbispectra[sub][1:-1]-geqbispectra[sub][1:-1], color=pcolor, alpha=normds[sub], linewidth=lw)\n else:\n self.plt.plot(self.klist[1:-1], self.eqbispectra[sub][1:-1]-geqbispectra[sub][1:-1], color=mcolor, alpha=normds[sub], linewidth=lw)\n \"\"\"\n\n ax.set_xlabel(r\"$k {\\rm (h/Mpc)}$\")\n ax.set_ylabel(r\"${\\rm Q}(k)$\")\n ax.set_xscale('log')\n cbar = fig.colorbar(scalarMap, format='%.0e')\n #self.plt.yscale('log')\n if (show):\n self.plt.show()", "def plot_explorer_panels(self, param_val, photonnumber, initial_index, final_index, qbt_index, osc_index):\n def fig_ax(index):\n return fig, axes_list_flattened[index]\n\n param_index = np.searchsorted(self.param_vals, param_val)\n param_val = self.param_vals[param_index]\n\n initial_bare = self.sweep.lookup.bare_index(initial_index, param_index)\n final_bare = self.sweep.lookup.bare_index(final_index, param_index)\n energy_ground = self.sweep.lookup.energy_dressed_index(0, param_index)\n energy_initial = self.sweep.lookup.energy_dressed_index(initial_index, param_index) - energy_ground\n energy_final = self.sweep.lookup.energy_dressed_index(final_index, param_index) - energy_ground\n qbt_subsys = self.sweep.hilbertspace[qbt_index]\n\n nrows = 3\n ncols = 2\n fig, axs = plt.subplots(ncols=ncols, nrows=nrows, figsize=self.figsize)\n axes_list_flattened = [elem for sublist in axs for elem in sublist]\n\n # Panel 1 ----------------------------------\n panels.display_bare_spectrum(self.sweep, qbt_subsys, param_val, fig_ax(0))\n\n # Panels 2 and 6----------------------------\n if type(qbt_subsys).__name__ in ['Transmon', 'Fluxonium']: # do not plot wavefunctions if multi-dimensional\n panels.display_bare_wavefunctions(self.sweep, qbt_subsys, param_val, fig_ax(1))\n panels.display_charge_matrixelems(self.sweep, initial_bare, qbt_subsys, param_val, fig_ax(5))\n\n # Panel 3 ----------------------------------\n panels.display_dressed_spectrum(self.sweep, initial_bare, final_bare, energy_initial, energy_final, param_val,\n fig_ax(2))\n\n # Panel 4 ----------------------------------\n panels.display_n_photon_qubit_transitions(self.sweep, photonnumber, initial_bare, param_val, fig_ax(3))\n\n # Panel 5 ----------------------------------\n panels.display_chi_01(self.sweep, qbt_index, osc_index, param_index, fig_ax(4))\n\n fig.tight_layout()\n return fig, axs", "def test_run_beta_diversity_through_plots_even_sampling(self):\r\n\r\n run_beta_diversity_through_plots(\r\n self.test_data['biom'][0],\r\n self.test_data['map'][0],\r\n self.test_out,\r\n call_commands_serially,\r\n self.params,\r\n self.qiime_config,\r\n sampling_depth=20,\r\n tree_fp=self.test_data['tree'][0],\r\n parallel=False,\r\n status_update_callback=no_status_updates)\r\n\r\n unweighted_unifrac_dm_fp = join(\r\n self.test_out,\r\n 'unweighted_unifrac_dm.txt')\r\n weighted_unifrac_dm_fp = join(self.test_out, 'weighted_unifrac_dm.txt')\r\n unweighted_unifrac_pc_fp = join(\r\n self.test_out,\r\n 'unweighted_unifrac_pc.txt')\r\n weighted_unifrac_pc_fp = join(self.test_out, 'weighted_unifrac_pc.txt')\r\n weighted_unifrac_html_fp = join(self.test_out,\r\n 'weighted_unifrac_emperor_pcoa_plot', 'index.html')\r\n\r\n # check for expected relations between values in the unweighted unifrac\r\n # distance matrix\r\n dm = parse_distmat_to_dict(open(unweighted_unifrac_dm_fp))\r\n self.assertTrue(dm['f1']['f2'] < dm['f1']['p1'],\r\n \"Distance between pair of fecal samples is larger than distance\"\r\n \" between fecal and palm sample (unweighted unifrac).\")\r\n self.assertEqual(dm['f1']['f1'], 0)\r\n # check for expected relations between values in the weighted unifrac\r\n # distance matrix\r\n dm = parse_distmat_to_dict(open(weighted_unifrac_dm_fp))\r\n self.assertTrue(dm['f1']['f2'] < dm['f1']['p1'],\r\n \"Distance between pair of fecal samples is larger than distance\"\r\n \" between fecal and palm sample (unweighted unifrac).\")\r\n self.assertEqual(dm['f1']['f1'], 0)\r\n\r\n # check that final output files have non-zero size\r\n self.assertTrue(getsize(unweighted_unifrac_pc_fp) > 0)\r\n self.assertTrue(getsize(weighted_unifrac_pc_fp) > 0)\r\n self.assertTrue(getsize(weighted_unifrac_html_fp) > 0)\r\n\r\n # Check that the log file is created and has size > 0\r\n log_fp = glob(join(self.test_out, 'log*.txt'))[0]\r\n self.assertTrue(getsize(log_fp) > 0)", "def plot_overscan_diff(overscan, img, TITLE, OUT_DIR):\n fig = plt.figure(figsize=(20, 20))\n gs0 = gridspec.GridSpec(3, 3)\n\n for i, f in enumerate(img):\n x = f.dev_index % 3\n\n gs = gridspec.GridSpecFromSubplotSpec(\n 1, 2, wspace=0, subplot_spec=gs0[f.dev_index])\n ax2 = plt.subplot(gs[0, 0])\n for j in range(9, 17):\n plt.plot(overscan[i, j - 1] - overscan[i, 15] +\n 500 * (j - 8), label='seg' + str(j + 1))\n plt.legend(fontsize=6, loc='upper center', ncol=4)\n if(x != 0):\n ax2.set_yticklabels([])\n\n plt.grid()\n plt.xlim(0, 2100)\n plt.ylim(0, 4500)\n ax2.set_title(f.dev_name + ' (seg 10-17)')\n\n ax1 = plt.subplot(gs[0, 1])\n for j in range(1, 9):\n plt.plot(overscan[i, j - 1] - overscan[i, 7] +\n 500 * j, label='seg' + str(j - 1))\n plt.legend(fontsize=6, loc='upper center', ncol=4)\n if(x != 2):\n ax1.set_yticklabels([])\n if(x == 2):\n ax1.yaxis.tick_right()\n plt.grid()\n plt.xlim(0, 2100)\n plt.ylim(0, 4500)\n ax1.set_title(f.dev_name + ' (seg 0-7)')\n #\tax1.set_title('S-'+f[7:9]+' (seg 0-7)')\n\n fig.suptitle('Overscan (diff) ' + TITLE, y=0.94, size=20)\n plt.subplots_adjust(wspace=0.05)\n plt.savefig(OUT_DIR + TITLE + '_diff_spatial.png')\n plt.close(fig)", "def plot_bus_load(self):\n stops = {key: 0 for key, _ in self.route.timetable().items()}\n for passenger in self.passengers:\n trip = self.passenger_trip(passenger)\n stops[trip[0][1]] += 1\n stops[trip[1][1]] -= 1\n prev = None\n for i, stop in enumerate(stops):\n if i > 0:\n stops[stop] += stops[prev]\n prev = stop\n fig, ax = plt.subplots()\n ax.step(range(len(stops)), list(stops.values()), where=\"post\")\n ax.set_xticks(range(len(stops)))\n ax.set_xticklabels(list(stops.keys()))\n return fig, ax", "def plot(self):\n # plot the data for checking\n fig, [[ax1,ax2],[ax3,ax4], [ax5,ax6]] = plt.subplots(\n 3,2, figsize=(10,8))\n\n # Relative height\n self.board_reference.plot(\n column='z_reference', cmap='GnBu_r', legend=True, ax=ax1)\n self.board_intervention.plot(\n column='z_reference', cmap='GnBu_r', legend=True, ax=ax2)\n\n # Landuse\n self.board_reference.plot(\n column='landuse', legend=True, ax=ax3, cmap='viridis',\n scheme='equal_interval', k=11)\n self.board_intervention.plot(\n column='landuse', legend=True, ax=ax4, cmap='viridis',\n scheme='equal_interval', k=11)\n\n index = np.arange(7)\n xticks = self.PotTax_reference.index.values\n bar_width = 0.3\n\n # plot the initial and new situation comparison\n label = (\"reference: \" +\n str(round(self.PotTax_reference.sum().TFI, 2)))\n reference = ax5.bar(\n index, self.PotTax_reference.values.flatten(), bar_width,\n label=label, tick_label=xticks)\n label = (\"intervention: \" +\n str(round(self.PotTax_intervention.sum().TFI, 2)))\n intervention = ax5.bar(\n index+bar_width, self.PotTax_intervention.values.flatten(),\n bar_width, label=label, tick_label=xticks)\n ax5.set_ylabel(\"total value\")\n ax5.legend(loc='best')\n for tick in ax5.get_xticklabels():\n tick.set_rotation(90)\n\n # plot the percentage increase/decrease between the initial and new\n # situation\n data = self.PotTax_percentage.values.flatten()\n percentage = ax6.bar(\n index, data, bar_width, label=\"percentage\", tick_label=xticks)\n ax6.set_ylabel(\"increase (%)\")\n minimum = min(data)\n maximum = max(data)\n size = len(str(int(round(maximum))))\n maximum = int(str(maximum)[:1])\n maximum = (maximum + 1) * (10**(size-1))\n ax6.set_ylim([min(0, minimum), maximum])\n for tick in ax6.get_xticklabels():\n tick.set_rotation(90)", "def plot_balance_list_output(balance_list, path, nframes=4):\n import matplotlib\n font = {'size' : 15, 'weight': 'normal'}\n matplotlib.rc('font', **font)\n\n nplots=len(balance_list)\n ncols=2\n nrows=int(np.ceil(nframes/ncols))\n _, axes = plt.subplots(nrows, ncols, figsize=(25, 5*nrows))\n for n in range(nframes):\n i = int(np.floor(n / ncols))\n j=n % ncols\n r_p = []\n for k in range(len(balance_list[n])-1):\n r_p.append(np.abs(balance_list[n][k]-balance_list[n][k+1])/balance_list[n][k])\n\n axes[i,j].plot(balance_list[n], label='balance', color='b')\n axes[i,j].set_title('Outer loop # '+ str(n))\n # axes[i,j].set_yscale('log')\n # axes[i,j].set_xscale('log')\n axes[i,j].grid(True)\n axes[i,j].set_ylabel('Balance norm')\n # axes[i,j].legend()\n axes[i,j].set_ylim([0, 10])\n # ax_2=axes[i,j].twinx()\n # ax_2.plot(r_p, 'g')\n # ax_2.set_yscale('log')\n # ax_2.set_ylabel('relative change')\n\n plt.savefig(path,transparent=True, dpi=400)", "def test_run_beta_diversity_through_plots_parallel(self):\r\n run_beta_diversity_through_plots(\r\n self.test_data['biom'][0],\r\n self.test_data['map'][0],\r\n self.test_out,\r\n call_commands_serially,\r\n self.params,\r\n self.qiime_config,\r\n tree_fp=self.test_data['tree'][0],\r\n parallel=True,\r\n status_update_callback=no_status_updates)\r\n\r\n unweighted_unifrac_dm_fp = join(\r\n self.test_out,\r\n 'unweighted_unifrac_dm.txt')\r\n weighted_unifrac_dm_fp = join(self.test_out, 'weighted_unifrac_dm.txt')\r\n unweighted_unifrac_pc_fp = join(\r\n self.test_out,\r\n 'unweighted_unifrac_pc.txt')\r\n weighted_unifrac_pc_fp = join(self.test_out, 'weighted_unifrac_pc.txt')\r\n weighted_unifrac_html_fp = join(self.test_out,\r\n 'weighted_unifrac_emperor_pcoa_plot', 'index.html')\r\n\r\n # check for expected relations between values in the unweighted unifrac\r\n # distance matrix\r\n dm = parse_distmat_to_dict(open(unweighted_unifrac_dm_fp))\r\n self.assertTrue(dm['f1']['f2'] < dm['f1']['p1'],\r\n \"Distance between pair of fecal samples is larger than distance\"\r\n \" between fecal and palm sample (unweighted unifrac).\")\r\n self.assertEqual(dm['f1']['f1'], 0)\r\n # check for expected relations between values in the weighted unifrac\r\n # distance matrix\r\n dm = parse_distmat_to_dict(open(weighted_unifrac_dm_fp))\r\n self.assertTrue(dm['f1']['f2'] < dm['f1']['p1'],\r\n \"Distance between pair of fecal samples is larger than distance\"\r\n \" between fecal and palm sample (unweighted unifrac).\")\r\n self.assertEqual(dm['f1']['f1'], 0)\r\n\r\n # check that final output files have non-zero size\r\n self.assertTrue(getsize(unweighted_unifrac_pc_fp) > 0)\r\n self.assertTrue(getsize(weighted_unifrac_pc_fp) > 0)\r\n self.assertTrue(getsize(weighted_unifrac_html_fp) > 0)\r\n\r\n # Check that the log file is created and has size > 0\r\n log_fp = glob(join(self.test_out, 'log*.txt'))[0]\r\n self.assertTrue(getsize(log_fp) > 0)", "def plotResults(results):\n e = results['eclean'] - results['eCTI']\n e1 = results['e1clean'] - results['e1CTI']\n e2 = results['e2clean'] - results['e2CTI']\n\n print 'Delta e, e_1, e_2:', np.mean(e), np.mean(e1), np.mean(e2)\n print 'std e, e_1, e_2:', np.std(e), np.std(e1), np.std(e2)\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.hist(e, bins=15, label='$e$', alpha=0.5)\n ax.hist(e1, bins=15, label='$e_{2}$', alpha=0.5)\n ax.hist(e2, bins=15, label='$e_{1}$', alpha=0.5)\n ax.set_xlabel(r'$\\delta e$ [no CTI - CDM03 corrected]')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig('ellipticityDelta.pdf')\n plt.close()\n\n r2 = (results['R2clean'] - results['R2CTI'])/results['R2clean']\n print 'delta R2 / R2: mean, std ', np.mean(r2), np.std(r2)\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.hist(r2, bins=15, label='$R^{2}$')\n ax.set_xlabel(r'$\\frac{\\delta R^{2}}{R^{2}_{ref}}$ [no CTI - CDM03 corrected]')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig('sizeDelta.pdf')\n plt.close()", "def main():\n Nrep = 8 # number of repetition of EM steps\n nm = 3 # number of mixed gaussians.\n ns = 300 # number of samples.\n \n mu, sg, lm, lm_ind, smp, L_true = generate_synthetic_data(nm, ns)\n plt.figure(1, figsize=(5,4))\n plt.clf()\n plot_synthetic_data(smp, mu, sg, lm, lm_ind, nm, ns)\n \n mue, sge, lme = generate_initial_state(nm, ns)\n axi = 0 # subplot number\n plt.figure(2, figsize=(12,9))\n plt.clf()\n for rep in range(Nrep):\n # E-step\n r, L_infer = e_step(smp, mue, sge, lme, nm, ns)\n axi += 1 \n ax = plt.subplot(Nrep/2, 6, axi)\n plot_em_steps(smp, r, mue, sge, lme, nm, ns)\n ax.set_title('E-step : %d' % (rep + 1))\n ax.set_yticklabels([])\n ax.set_xticklabels([])\n ax.set_ylim((-0.1, 0.3))\n\n # M-step\n mue, sge, lme = m_step(smp, r, nm, ns)\n axi += 1 \n ax = plt.subplot(Nrep/2, 6, axi)\n plot_em_steps(smp, r, mue, sge, lme, nm, ns)\n ax.set_title('M-step : %d' % (rep + 1))\n ax.set_yticklabels([])\n ax.set_xticklabels([])\n ax.set_ylim((-0.1, 0.3))\n\n # plot the ground truth for comparison\n axi += 1 \n ax = plt.subplot(Nrep/2, 6, axi)\n plot_synthetic_data(smp, mu, sg, lm, lm_ind, nm, ns)\n ax.set_title('grn_truth')\n ax.set_yticklabels([])\n ax.set_xticklabels([])\n ax.set_ylim((-0.1, 0.3))\n\n print('L_infer = %2.6f , L_true = %2.6f' % (L_infer, L_true))", "def overview(self, minState=5):\n n = 600\n \n ### first plot: the RTOFFSETs and STATES\n plt.figure(10)\n plt.clf()\n plt.subplots_adjust(hspace=0.05, top=0.95, left=0.05,\n right=0.99, wspace=0.00, bottom=0.1)\n ax1 = plt.subplot(n+11)\n try:\n print self.insmode+' | pri:'+\\\n self.getKeyword('OCS PS ID')+' | sec:'+\\\n self.getKeyword('OCS SS ID')\n \n plt.title(self.filename+' | '+self.insmode+' | pri:'+\n self.getKeyword('OCS PS ID')+' | sec:'+\n self.getKeyword('OCS SS ID'))\n except:\n pass\n plt.plot(self.raw['OPDC'].data.field('TIME'),\n self.raw['OPDC'].data.field('FUOFFSET')*1e3,\n color=(1.0, 0.5, 0.0), label=self.DLtrack+' (FUOFFSET)',\n linewidth=3, alpha=0.5)\n plt.legend(prop={'size':9})\n plt.ylabel('(mm)')\n plt.xlim(0)\n \n plt.subplot(n+12, sharex=ax1) # == DDL movements\n \n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n 1e3*self.raw['DOPDC'].data.field(self.DDLtrack),\n color=(0.0, 0.5, 1.0), linewidth=3, alpha=0.5,\n label=self.DDLtrack)\n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n 1e3*self.raw['DOPDC'].data.field('PSP'),\n color=(0.0, 0.5, 1.0), linewidth=1, alpha=0.9,\n label='PSP', linestyle='dashed')\n plt.legend(prop={'size':9})\n plt.ylabel('(mm)')\n plt.xlim(0)\n \n plt.subplot(n+13, sharex=ax1) # == states\n plt.plot(self.raw['OPDC'].data.field('TIME'),\n self.raw['OPDC'].data.field('STATE'),\n color=(1.0, 0.5, 0.0), label='OPDC')\n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n self.raw['DOPDC'].data.field('STATE'),\n color=(0.0, 0.5, 1.0), label='DOPDC')\n plt.legend(prop={'size':9})\n plt.ylabel('STATES')\n yl=plt.ylim()\n plt.ylim(yl[0]-1, yl[1]+1)\n plt.xlim(0)\n ### fluxes\n plt.subplot(n+14, sharex=ax1)\n try:\n fsua_dark = self.fsu_calib[('FSUA', 'DARK')][0,0]\n fsub_dark = self.fsu_calib[('FSUB', 'DARK')][0,0]\n fsua_alldark = self.fsu_calib[('FSUA', 'DARK')].sum(axis=1)[0]\n fsub_alldark = self.fsu_calib[('FSUB', 'DARK')].sum(axis=1)[0]\n except:\n print 'WARNING: there are no FSUs calibrations in the header'\n fsua_dark = 0.0\n fsub_dark = 0.0\n fsua_alldark = 0.0\n fsub_alldark = 0.0\n\n M0 = 17.5\n fluxa = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA1')[:,0]+\n self.raw['IMAGING_DATA_FSUA'].data.field('DATA2')[:,0]+\n self.raw['IMAGING_DATA_FSUA'].data.field('DATA3')[:,0]+\n self.raw['IMAGING_DATA_FSUA'].data.field('DATA4')[:,0]-\n fsua_alldark)/\\\n (4*self.getKeyword('ISS PRI FSU1 DIT'))\n print 'FLUX FSUA (avg, rms):', round(fluxa.mean(), 0), 'ADU/s',\\\n round(100*fluxa.std()/fluxa.mean(), 0), '%'\n print ' -> pseudo mag = '+str(M0)+' - 2.5*log10(flux) =',\\\n round(M0-2.5*np.log10(fluxa.mean()),2)\n fluxb = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA1')[:,0]+\n self.raw['IMAGING_DATA_FSUB'].data.field('DATA2')[:,0]+\n self.raw['IMAGING_DATA_FSUB'].data.field('DATA3')[:,0]+\n self.raw['IMAGING_DATA_FSUB'].data.field('DATA4')[:,0]-\n fsub_alldark)/\\\n (4*self.getKeyword('ISS PRI FSU2 DIT'))\n print 'FLUX FSUB (avg, rms):', round(fluxb.mean(), 0), 'ADU/s',\\\n round(100*fluxb.std()/fluxb.mean(), 0), '%'\n print ' -> pseudo mag = '+str(M0)+' - 2.5*log10(flux) =',\\\n round(M0-2.5*np.log10(fluxb.mean()),2)\n plt.plot(self.raw['IMAGING_DATA_FSUA'].data.field('TIME'),\\\n fluxa/1000, color='b', alpha=0.5, label='FSUA')\n plt.plot(self.raw['IMAGING_DATA_FSUB'].data.field('TIME'),\\\n fluxb/1000, color='r', alpha=0.5, label='FSUB')\n\n plt.ylim(1)\n plt.legend(prop={'size':9})\n plt.ylabel('flux - DARK (kADU)')\n plt.xlim(0)\n plt.subplot(n+15, sharex=ax1)\n try:\n # -- old data version\n plt.plot(self.raw['IMAGING_DATA_FSUA'].data.field('TIME'),\n self.raw['IMAGING_DATA_FSUA'].data.field('OPDSNR'),\n color='b', alpha=0.5, label='FSUA SNR')\n plt.plot(self.raw['IMAGING_DATA_FSUB'].data.field('TIME'),\n self.raw['IMAGING_DATA_FSUB'].data.field('OPDSNR'),\n color='r', alpha=0.5, label='FSUB SNR')\n except:\n plt.plot(self.raw['IMAGING_DATA_FSUA'].data.field('TIME'),\n self.raw['IMAGING_DATA_FSUA'].data.field(self.OPDSNR),\n color='b', alpha=0.5, label='FSUA SNR')\n plt.plot(self.raw['IMAGING_DATA_FSUB'].data.field('TIME'),\n self.raw['IMAGING_DATA_FSUB'].data.field(self.OPDSNR),\n color='r', alpha=0.5, label='FSUB SNR')\n plt.legend(prop={'size':9})\n \n A = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA1')[:,0]-\n self.fsu_calib[('FSUA', 'DARK')][0,0])/\\\n (self.fsu_calib[('FSUA', 'FLAT')][0,0]-\n 2*self.fsu_calib[('FSUA', 'DARK')][0,0])\n B = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA2')[:,0]-\n self.fsu_calib[('FSUA', 'DARK')][0,1])/\\\n (self.fsu_calib[('FSUA', 'FLAT')][0,1]-\n 2*self.fsu_calib[('FSUA', 'DARK')][0,1])\n C = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA3')[:,0]-\n self.fsu_calib[('FSUA', 'DARK')][0,2])/\\\n (self.fsu_calib[('FSUA', 'FLAT')][0,2]-\n 2*self.fsu_calib[('FSUA', 'DARK')][0,2])\n D = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA4')[:,0]-\n self.fsu_calib[('FSUA', 'DARK')][0,3])/\\\n (self.fsu_calib[('FSUA', 'FLAT')][0,3]-\n 2*self.fsu_calib[('FSUA', 'DARK')][0,3])\n snrABCD_a = ((A-C)**2+(B-D)**2)\n snrABCD_a /= ((A-C).std()**2+ (B-D).std()**2)\n #plt.plot(self.raw['IMAGING_DATA_FSUA'].data.field('TIME'),\n # snrABCD_a, color='b', alpha=0.5, linestyle='dashed')\n \n A = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA1')[:,0]-\n self.fsu_calib[('FSUB', 'DARK')][0,0])/\\\n (self.fsu_calib[('FSUB', 'FLAT')][0,0]-\n 2*self.fsu_calib[('FSUB', 'DARK')][0,0])\n B = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA2')[:,0]-\n self.fsu_calib[('FSUB', 'DARK')][0,1])/\\\n (self.fsu_calib[('FSUB', 'FLAT')][0,1]-\n 2*self.fsu_calib[('FSUB', 'DARK')][0,1])\n C = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA3')[:,0]-\n self.fsu_calib[('FSUB', 'DARK')][0,2])/\\\n (self.fsu_calib[('FSUB', 'FLAT')][0,2]-\n 2*self.fsu_calib[('FSUB', 'DARK')][0,2])\n D = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA4')[:,0]-\n self.fsu_calib[('FSUB', 'DARK')][0,3])/\\\n (self.fsu_calib[('FSUB', 'FLAT')][0,3]-\n 2*self.fsu_calib[('FSUB', 'DARK')][0,3])\n \n snrABCD_b = ((A-C)**2+(B-D)**2)\n snrABCD_b /= ((A-C).std()**2+ (B-D).std()**2)\n #plt.plot(self.raw['IMAGING_DATA_FSUB'].data.field('TIME'),\n # snrABCD_b, color='r', alpha=0.5, linestyle='dashed') \n \n # -- SNR levels:\n #plt.hlines([self.getKeyword('INS OPDC OPEN'),\n # self.getKeyword('INS OPDC CLOSE'),\n # self.getKeyword('INS OPDC DETECTION')],\n # self.raw['IMAGING_DATA_FSUB'].data.field('TIME').min(),\n # self.raw['IMAGING_DATA_FSUB'].data.field('TIME').max(),\n # color=(1.0, 0.5, 0.0))\n #plt.hlines([self.getKeyword('INS DOPDC OPEN'),\n # self.getKeyword('INS DOPDC CLOSE'),\n # self.getKeyword('INS DOPDC DETECTION')],\n # self.raw['IMAGING_DATA_FSUB'].data.field('TIME').min(),\n # self.raw['IMAGING_DATA_FSUB'].data.field('TIME').max(),\n # color=(0.0, 0.5, 1.0))\n # -- plot thresholds\n plt.ylabel('SNR')\n plt.xlim(0)\n \n if self.getKeyword('OCS DET IMGNAME')=='PACMAN_OBJ_ASTRO_':\n # == dual FTK\n plt.subplot(n+16, sharex=ax1)\n plt.ylabel('PRIMET ($\\mu$m)')\n #met = interp1d(np.float_(self.raw['METROLOGY_DATA'].\\\n # data.field('TIME')),\\\n # self.raw['METROLOGY_DATA'].data.field('DELTAL'),\\\n # kind = 'linear', bounds_error=False, fill_value=0.0)\n met = lambda x: np.interp(x,\n np.float_(self.raw['METROLOGY_DATA'].data.field('TIME')),\n self.raw['METROLOGY_DATA'].data.field('DELTAL'))\n metro = met(self.raw['DOPDC'].data.field('TIME'))*1e6\n n_ = min(len(self.raw['DOPDC'].data.field('TIME')),\n len(self.raw['OPDC'].data.field('TIME')))\n\n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n metro, color=(0.5,0.5,0.), label='A-B')\n\n w1 = np.where((self.raw['OPDC'].data.field('STATE')[:n_]>=minState)*\\\n (self.raw['OPDC'].data.field('STATE')[:n_]<=7))\n try:\n print 'OPDC FTK stat:', round(100*len(w1[0])/float(n_), 1), '%'\n except:\n print 'OPDC FTK stat: 0%'\n\n w1 = np.where((self.raw['DOPDC'].data.field('STATE')[:n_]>=minState)*\\\n (self.raw['DOPDC'].data.field('STATE')[:n_]<=7))\n try:\n print 'DOPDC FTK stat:', round(100*len(w1[0])/float(n_), 1), '%'\n except:\n print 'DOPDC FTK stat: 0%'\n\n w = np.where((self.raw['DOPDC'].data.field('STATE')[:n_]>=minState)*\\\n (self.raw['DOPDC'].data.field('STATE')[:n_]<=7)*\\\n (self.raw['OPDC'].data.field('STATE')[:n_]>=minState)*\\\n (self.raw['OPDC'].data.field('STATE')[:n_]<=7))\n try:\n print 'DUAL FTK stat:', round(100*len(w[0])/float(n_),1), '%'\n except:\n print 'DUAL FTK stat: 0%'\n\n plt.xlim(0)\n plt.plot(self.raw['DOPDC'].data.field('TIME')[w],\n metro[w], '.g', linewidth=2,\n alpha=0.5, label='dual FTK')\n #plt.legend()\n if len(w[0])>10 and False:\n coef = np.polyfit(self.raw['DOPDC'].data.field('TIME')[w],\n metro[w], 2)\n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n np.polyval(coef, self.raw['DOPDC'].\n data.field('TIME')),\n color='g')\n plt.ylabel('metrology')\n\n print 'PRIMET drift (polyfit) :', 1e6*coef[1], 'um/s'\n slope, rms, synth = NoisySlope(self.raw['DOPDC'].\n data.field('TIME')[w],\n metro[w], 3e6)\n plt.figure(10)\n yl = plt.ylim()\n plt.plot(self.raw['DOPDC'].data.field('TIME')[w],\n synth, color='r')\n plt.ylim(yl)\n print 'PRIMET drift (NoisySlope):',\\\n slope*1e6,'+/-', rms*1e6, 'um/s'\n else:\n # == scanning\n plt.subplot(n+16, sharex=ax1)\n fringesOPDC = \\\n self.raw['IMAGING_DATA_'+self.primary_fsu].data.field('DATA1')[:,0]-\\\n self.raw['IMAGING_DATA_'+self.primary_fsu].data.field('DATA3')[:,0]\n \n fringesDOPDC =\\\n self.raw['IMAGING_DATA_'+self.secondary_fsu].data.field('DATA1')[:,0]-\\\n self.raw['IMAGING_DATA_'+self.secondary_fsu].data.field('DATA3')[:,0]\n \n plt.plot(self.raw['IMAGING_DATA_'+self.primary_fsu].data.field('TIME'),\n scipy.signal.wiener(fringesOPDC/fringesOPDC.std()),\n color=(1.0, 0.5, 0.0), alpha=0.6,\n label=self.primary_fsu+'/OPDC')\n plt.plot(self.raw['IMAGING_DATA_'+self.secondary_fsu].data.field('TIME'),\n scipy.signal.wiener(fringesDOPDC/fringesDOPDC.std()),\n color=(0.0, 0.5, 1.0), alpha=0.6,\n label=self.secondary_fsu+'/DOPDC')\n plt.legend(prop={'size':9})\n plt.ylabel('A-C')\n plt.xlabel('time stamp ($\\mu$s)')\n return", "def plotResultsComparison(monthlyData1, monthlyData2, indices, arg):\n \n energyType = arg[0] \n \n dummyRange = np.asarray(range(len(indices['E_tot1'])))\n \n fig = plt.figure(figsize=(16, 8))\n \n# plt.suptitle('Heating Demand (COP=' + str(usedEfficiencies['H_COP']) + ')')\n if energyType == 'PV':\n multiplier = -1\n else:\n multiplier = 1\n \n ax1 = plt.subplot(2,1,1)\n \n plt.plot(multiplier*monthlyData1[energyType][indices['E_tot1'], dummyRange], label = 'Results1', color='b')\n plt.plot(multiplier*monthlyData2[energyType][indices['E_tot2'], dummyRange], label = 'Results2', color='g')\n \n plt.ylabel('Energy [kWh]')\n plt.legend()\n \n majorLocator = MultipleLocator(24)\n majorFormatter = FormatStrFormatter('%d')\n minorLocator = MultipleLocator(24)\n minorFormatter = FormatStrFormatter('%d')\n\n ax1.xaxis.set_major_locator(majorLocator)\n ax1.xaxis.set_major_formatter(majorFormatter)\n ax1.xaxis.set_minor_locator(minorLocator)\n# ax1.xaxis.set_minor_formatter(minorFormatter)\n plt.grid(True, which='both')\n \n ax2 = plt.subplot(2,1,2, sharex=ax1)\n \n plt.plot(multiplier*monthlyData1[energyType][indices['E_tot1'], dummyRange]-multiplier*monthlyData2[energyType][indices['E_tot2'], dummyRange], label = '1-2', color='b')\n\n plt.ylabel('Energy Difference [kWh]')\n plt.legend()\n\n ax2.xaxis.set_major_locator(majorLocator)\n ax2.xaxis.set_major_formatter(majorFormatter)\n ax2.xaxis.set_minor_locator(minorLocator)\n# ax2.xaxis.set_minor_formatter(minorFormatter)\n plt.grid(True, which='both')\n \n return fig", "def plot_all(show=True):\n fig, axes = plt.subplots(max_iterations, 1, figsize=(6, 12))\n for t in range(max_iterations):\n with open('results/%s/df_%d.pkl' % (id, t), 'rb') as f:\n df = pickle.load(f)\n with open('results/%s/w_%d.pkl' % (id, t), 'rb') as f:\n w = pickle.load(f)\n axes[t].hist2d(x=df['vision'], y=df['metab'], weights=w, density=True,\n bins=((xticks, yticks)), cmap='magma')\n axes[t].set_ylabel('max metabolism')\n axes[t].set_xticks(vision_domain)\n axes[t].set_yticks((2, 3, 4))\n axes[3].set_xlabel('max vision')\n fig.tight_layout()\n if show:\n plt.show()\n else:\n plt.savefig('results/%s/abc_results.pdf' % id)", "def plot(self, noTLS, path_plots, interactive):\n fig = plt.figure(figsize=(10,12))\n ax1 = fig.add_subplot(4, 1, 1)\n ax2 = fig.add_subplot(4, 1, 2)\n ax3 = fig.add_subplot(4, 2, 5)\n ax4 = fig.add_subplot(4, 2, 6)\n ax5 = fig.add_subplot(4, 2, 7)\n ax6 = fig.add_subplot(4, 2, 8)\n\n # First panel: data from each sector\n colors = self._get_colors(self.nlc)\n for i, lci in enumerate(self.alllc):\n p = lci.normalize().remove_outliers(sigma_lower=5.0, sigma_upper=5.0)\n p.bin(5).scatter(ax=ax1, label='Sector %d' % self.sectors[i], color=colors[i])\n self.trend.plot(ax=ax1, color='orange', lw=2, label='Trend')\n ax1.legend(fontsize='small', ncol=4)\n\n # Second panel: Detrended light curve\n self.lc.remove_outliers(sigma_lower=5.0, sigma_upper=5.0).bin(5).scatter(ax=ax2,\n color='black',\n label='Detrended')\n\n # Third panel: BLS\n self.BLS.bls.plot(ax=ax3, label='_no_legend_', color='black')\n mean_SR = np.mean(self.BLS.power)\n std_SR = np.std(self.BLS.power)\n best_power = self.BLS.power[np.where(self.BLS.period.value == self.BLS.period_max)[0]]\n SDE = (best_power - mean_SR)/std_SR\n ax3.axvline(self.BLS.period_max, alpha=0.4, lw=4)\n for n in range(2, 10):\n if n*self.BLS.period_max <= max(self.BLS.period.value):\n ax3.axvline(n*self.BLS.period_max, alpha=0.4, lw=1, linestyle=\"dashed\")\n ax3.axvline(self.BLS.period_max / n, alpha=0.4, lw=1, linestyle=\"dashed\")\n sx, ex = ax3.get_xlim()\n sy, ey = ax3.get_ylim()\n ax3.text(ex-(ex-sx)/3, ey-(ey-sy)/3,\n 'P$_{MAX}$ = %.3f d\\nT0 = %.2f\\nDepth = %.4f\\nDuration = %.2f d\\nSDE = %.3f' %\n (self.BLS.period_max, self.BLS.t0_max,\n self.BLS.depth_max, self.BLS.duration_max, SDE))\n\n\n # Fourth panel: lightcurve folded to the best period from the BLS\n self.folded.bin(1*self.nlc).scatter(ax=ax4, label='_no_legend_', color='black',\n marker='.', alpha=0.5)\n l = max(min(4*self.BLS.duration_max/self.BLS.period_max, 0.5), 0.02)\n nbins = int(50*0.5/l)\n r1, dt1 = binningx0dt(self.folded.phase, self.folded.flux, x0=-0.5, nbins=nbins)\n ax4.plot(r1[::,0], r1[::,1], marker='o', ls='None',\n color='orange', markersize=5, markeredgecolor='orangered', label='_no_legend_')\n\n lc_model = self.BLS.bls.get_transit_model(period=self.BLS.period_max,\n duration=self.BLS.duration_max,\n transit_time=self.BLS.t0_max)\n lc_model_folded = lc_model.fold(self.BLS.period_max, t0=self.BLS.t0_max)\n ax4.plot(lc_model_folded.phase, lc_model_folded.flux, color='green', lw=2)\n ax4.set_xlim(-l, l)\n h = max(lc_model.flux)\n l = min(lc_model.flux)\n ax4.set_ylim(l-4.*(h-l), h+5.*(h-l))\n del lc_model, lc_model_folded, r1, dt1\n\n\n if not noTLS:\n # Fifth panel: TLS periodogram\n ax5.axvline(self.tls.period, alpha=0.4, lw=3)\n ax5.set_xlim(np.min(self.tls.periods), np.max(self.tls.periods))\n for n in range(2, 10):\n ax5.axvline(n*self.tls.period, alpha=0.4, lw=1, linestyle=\"dashed\")\n ax5.axvline(self.tls.period / n, alpha=0.4, lw=1, linestyle=\"dashed\")\n ax5.set_ylabel(r'SDE')\n ax5.set_xlabel('Period (days)')\n ax5.plot(self.tls.periods, self.tls.power, color='black', lw=0.5)\n ax5.set_xlim(0, max(self.tls.periods))\n\n period_tls = self.tls.period\n T0_tls = self.tls.T0\n depth_tls = self.tls.depth\n duration_tls = self.tls.duration\n FAP_tls = self.tls.FAP\n\n sx, ex = ax5.get_xlim()\n sy, ey = ax5.get_ylim()\n ax5.text(ex-(ex-sx)/3, ey-(ey-sy)/3,\n 'P$_{MAX}$ = %.3f d\\nT0 = %.1f\\nDepth = %.4f\\nDuration = %.2f d\\nFAP = %.4f' %\n (period_tls, T0_tls, 1.-depth_tls, duration_tls, FAP_tls))\n\n # Sixth panel: folded light curve to the best period from the TLS\n ax6.plot(self.tls.folded_phase, self.tls.folded_y, color='black', marker='.',\n alpha=0.5, ls='None', markersize=0.7)\n l = max(min(4*duration_tls/period_tls, 0.5), 0.02)\n nbins = int(50*0.5/l)\n r1, dt1 = binningx0dt(self.tls.folded_phase, self.tls.folded_y,\n x0=0.0, nbins=nbins, useBinCenter=True)\n ax6.plot(r1[::,0], r1[::,1], marker='o', ls='None', color='orange',\n markersize=5, markeredgecolor='orangered', label='_no_legend_')\n ax6.plot(self.tls.model_folded_phase, self.tls.model_folded_model, color='green', lw=2)\n ax6.set_xlim(0.5-l, 0.5+l)\n h = max(self.tls.model_folded_model)\n l = min(self.tls.model_folded_model)\n ax6.set_ylim(l-4.*(h-l), h+5.*(h-l))\n ax6.set_xlabel('Phase')\n ax6.set_ylabel('Relative flux')\n del r1, dt1\n\n fig.subplots_adjust(top=0.98, bottom=0.05, wspace=0.25, left=0.1, right=0.97)\n fig.savefig(os.path.join(path_plots, 'TIC%d.pdf' % self.TIC))\n if interactive:\n plt.show()\n plt.close('all')\n del fig", "def runExpt_and_makePlots(n, d, grid_size, reps, tho_scale=0.1, is_classification=True):\n\n args = [n, d, grid_size, reps]\n df_std_signal, df_tho_signal = repeatexp(*args,\n is_classification=is_classification,\n tho_scale=tho_scale,\n no_signal=False)\n \n df_std_nosignal, df_tho_nosignal = repeatexp(*args,\n is_classification=is_classification,\n tho_scale=tho_scale,\n no_signal=True)\n\n f, ax = plt.subplots(2, 2, figsize=(8,10), sharex=True, sharey=False)\n sb.set_style('whitegrid')\n \n kw_params = {'x':'dataset',\n 'y':'performance',\n 'units':'perm'}\n \n sb.barplot(data=df_std_signal,\n ax=ax[0,0],\n **kw_params)\n ax[0,0].set_title('Standard, HAS Signal')\n \n sb.barplot(data=df_tho_signal,\n ax=ax[0,1],\n **kw_params)\n ax[0,1].set_title('Thresholdout, HAS Signal')\n\n sb.barplot(data=df_std_nosignal,\n ax=ax[1,0],\n **kw_params)\n ax[1,0].set_title('Standard, NO Signal')\n\n sb.barplot(data=df_tho_nosignal,\n ax=ax[1,1],\n **kw_params)\n ax[1,1].set_title('Thresholdout, NO Signal')\n \n return f, ax", "def plot_derivatives(self, show=False):\n\n fig, ax = plt.subplots(4, 2, figsize = (15, 10))\n # plt.subplots_adjust(wspace = 0, hspace = 0.1)\n plt.subplots_adjust(hspace=0.5)\n training_index = np.random.randint(0,self.n_train * self.n_p)\n \n if self.flatten:\n print ('Plotting derivatives... reshaping the flattened data to %s'%str(input_shape))\n # TODO\n temp = self.data['x_p'][training_index].reshape(len(theta_fid),*input_shape)\n x, y = temp.T[:,0]\n else:\n print ('Plotting derivatives... reshaping the flattened data to power spectra')\n temp = self.data['x_p'][training_index].reshape(len(theta_fid),ncombinations,len(ells))\n # temp has shape (num_params, ncombinations, len(ells))\n Cl = temp[:,0,:] # plot the (0,0) autocorrelation bin\n \n # Cl has shape (1,10) since it is the data vector for the \n # upper training image for both params\n labels =[r'$θ_1$ ($\\Omega_M$)']\n\n # we loop over them in this plot to assign labels\n for i in range(Cl.shape[0]):\n if self.rescaled:\n ax[0, 0].plot(ells, Cl[i],label=labels[i])\n else:\n ax[0, 0].loglog(ells, ells*(ells+1)*Cl[i],label=labels[i])\n ax[0, 0].set_title('One upper training example, Cl 0,0')\n ax[0, 0].set_xlabel(r'$\\ell$')\n if self.rescaled:\n ax[0, 0].set_ylabel(r'$C_\\ell$')\n else:\n ax[0, 0].set_ylabel(r'$\\ell(\\ell+1) C_\\ell$')\n\n ax[0, 0].set_xscale('log')\n\n ax[0, 0].legend(frameon=False)\n\n if self.flatten:\n # TODO\n temp = self.data['x_m'][training_index].reshape(len(theta_fid),*input_shape)\n x, y = temp.T[:,0]\n else:\n temp = self.data['x_m'][training_index].reshape(len(theta_fid),ncombinations,len(ells))\n # temp has shape (num_params, ncombinations, len(ells))\n Cl = temp[:,0,:] # plot the (0,0) autocorrelation bin\n\n for i in range(Cl.shape[0]):\n if self.rescaled:\n ax[1, 0].plot(ells, Cl[i])\n else:\n ax[1, 0].loglog(ells, ells*(ells+1)*Cl[i])\n ax[1, 0].set_title('One lower training example, Cl 0,0')\n ax[1, 0].set_xlabel(r'$\\ell$')\n if self.rescaled:\n ax[1, 0].set_ylabel(r'$C_\\ell$')\n else:\n ax[1, 0].set_ylabel(r'$\\ell(\\ell+1) C_\\ell$')\n\n ax[1, 0].set_xscale('log')\n\n if self.flatten:\n # TODO\n temp = self.data[\"x_m\"][training_index].reshape(len(theta_fid),*input_shape)\n xm, ym = temp.T[:,0]\n\n temp = self.data[\"x_p\"][training_index].reshape(len(theta_fid),*input_shape)\n xp, yp = temp.T[:,0]\n else:\n temp = self.data['x_m'][training_index].reshape(len(theta_fid),ncombinations,len(ells))\n Cl_lower = temp[:,0,:]\n temp = self.data['x_p'][training_index].reshape(len(theta_fid),ncombinations,len(ells))\n Cl_upper = temp[:,0,:]\n\n for i in range(Cl_lower.shape[0]):\n ax[2, 0].plot(ells, (Cl_upper[i]-Cl_lower[i]))\n ax[2, 0].set_title('Upper - lower input data: train sample');\n ax[2, 0].set_xlabel(r'$\\ell$')\n ax[2, 0].set_ylabel(r'$C_\\ell (u) - C_\\ell (m) $')\n ax[2, 0].axhline(xmin = 0., xmax = 1., y = 0.\n , linestyle = 'dashed', color = 'black')\n ax[2, 0].set_xscale('log')\n\n for i in range(Cl_lower.shape[0]):\n ax[3, 0].plot(ells, (Cl_upper[i]-Cl_lower[i])/(2*delta_theta[i]))\n ax[3, 0].set_title('Numerical derivative: train sample');\n ax[3, 0].set_xlabel(r'$\\ell$')\n ax[3, 0].set_ylabel(r'$\\Delta C_\\ell / 2\\Delta \\theta$')\n ax[3, 0].axhline(xmin = 0., xmax = 1., y = 0.\n , linestyle = 'dashed', color = 'black')\n ax[3, 0].set_xscale('log')\n\n test_index = np.random.randint(self.n_p)\n\n if self.flatten:\n # TODO\n temp = self.data['x_p_test'][test_index].reshape(len(theta_fid),*input_shape)\n x, y = temp.T[:,0]\n else:\n temp = self.data['x_p_test'][test_index].reshape(len(theta_fid),ncombinations,len(ells))\n Cl = temp[:,0,:] # plot the (0,0) autocorrelation bin\n \n for i in range(Cl.shape[0]):\n if self.rescaled:\n ax[0, 1].plot(ells, Cl[i])\n else:\n ax[0, 1].loglog(ells, ells*(ells+1)*Cl[i])\n ax[0, 1].set_title('One upper test example Cl 0,0')\n ax[0, 1].set_xlabel(r'$\\ell$')\n if self.rescaled:\n ax[0, 1].set_ylabel(r'$C_\\ell$')\n else:\n ax[0, 1].set_ylabel(r'$\\ell(\\ell+1) C_\\ell$')\n\n if self.flatten:\n # TODO\n temp = self.data['x_m_test'][test_index].reshape(len(theta_fid),*input_shape)\n x, y = temp.T[:,0]\n else:\n temp = self.data['x_m_test'][test_index].reshape(len(theta_fid),ncombinations,len(ells))\n Cl = temp[:,0,:] # plot the (0,0) autocorrelation bin\n\n ax[0, 1].set_xscale('log')\n\n for i in range(Cl.shape[0]):\n if self.rescaled:\n ax[1, 1].plot(ells, Cl[i])\n else:\n ax[1, 1].loglog(ells, ells*(ells+1)*Cl[i])\n ax[1, 1].set_title('One lower test example Cl 0,0')\n ax[1, 1].set_xlabel(r'$\\ell$')\n if self.rescaled:\n ax[1, 1].set_ylabel(r'$C_\\ell$')\n else:\n ax[1, 1].set_ylabel(r'$\\ell(\\ell+1) C_\\ell$')\n\n if self.flatten:\n # TODO\n temp = self.data[\"x_m_test\"][test_index].reshape(len(theta_fid),*input_shape)\n xm, ym = temp.T[:,0]\n\n temp = self.data[\"x_p_test\"][test_index].reshape(len(theta_fid),*input_shape)\n xp, yp = temp.T[:,0]\n else:\n temp = self.data['x_m_test'][test_index].reshape(len(theta_fid),ncombinations,len(ells))\n Cl_lower = temp[:,0,:]\n temp = self.data['x_p_test'][test_index].reshape(len(theta_fid),ncombinations,len(ells))\n Cl_upper = temp[:,0,:]\n \n ax[1, 1].set_xscale('log')\n\n for i in range(Cl_lower.shape[0]):\n ax[2, 1].plot(ells, (Cl_upper[i]-Cl_lower[i]))\n ax[2, 1].set_title('Upper - lower input data: test sample');\n ax[2, 1].set_xlabel(r'$\\ell$')\n ax[2, 1].set_ylabel(r'$C_\\ell (u) - C_\\ell (m) $')\n ax[2, 1].axhline(xmin = 0., xmax = 1., y = 0.\n , linestyle = 'dashed', color = 'black')\n ax[2, 1].set_xscale('log')\n\n\n for i in range(Cl_lower.shape[0]):\n ax[3, 1].plot(ells, (Cl_upper[i]-Cl_lower[i])/(2*delta_theta[i]))\n ax[3, 1].set_title('Numerical derivative: train sample');\n ax[3, 1].set_xlabel(r'$\\ell$')\n ax[3, 1].set_ylabel(r'$\\Delta C_\\ell / \\Delta \\theta $')\n ax[3, 1].axhline(xmin = 0., xmax = 1., y = 0.\n , linestyle = 'dashed', color = 'black')\n ax[3, 1].set_xscale('log')\n\n plt.savefig(f'{self.figuredir}derivatives_visualization_{self.modelversion}.png')\n if show: plt.show()\n plt.close()" ]
[ "0.6250733", "0.6131744", "0.61037934", "0.6092076", "0.60732573", "0.59801644", "0.59770113", "0.5963969", "0.59115875", "0.58646387", "0.5833313", "0.58220226", "0.5787901", "0.57865995", "0.5741407", "0.5739193", "0.57294893", "0.57286334", "0.5718547", "0.57135636", "0.5679305", "0.5664708", "0.5647784", "0.5643514", "0.56236506", "0.5604072", "0.557861", "0.5566563", "0.5537968", "0.5537442" ]
0.62457854
1
Plots the ave_dhdl array as a function of the lambda value. If (TI and TICUBIC in methods) plots the TI integration area and the TICUBIC interpolation curve, elif (only one of them in methods) plots the integration area of the method.
def plotTI(): min_dl = dlam[dlam != 0].min() S = int(0.4/min_dl) fig = pl.figure(figsize = (8,6)) ax = fig.add_subplot(1,1,1) ax.spines['bottom'].set_position('zero') ax.spines['top'].set_color('none') ax.spines['right'].set_color('none') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') for k, spine in ax.spines.items(): spine.set_zorder(12.2) xs, ndx, dx = [0], 0, 0.001 colors = ['r', 'g', '#7F38EC', '#9F000F', 'b', 'y'] min_y, max_y = 0, 0 lines = tuple() ## lv_names2 = [r'$Coulomb$', r'$vdWaals$'] ## for the paper lv_names2 = [] for j in range(n_components): y = ave_dhdl[:,j] if not (y == 0).all(): lv_names2.append(r'$%s$' % P.lv_names[j].capitalize()) for j in range(n_components): y = ave_dhdl[:,j] if not (y == 0).all(): # Get the coordinates. lj = lchange[:,j] x = lv[:,j][lj] y = y[lj]/P.beta_report if 'TI' in P.methods: # Plot the TI integration area. ss = 'TI' for i in range(len(x)-1): min_y = min(y.min(), min_y) max_y = max(y.max(), max_y) #pl.plot(x,y) if i%2==0: pl.fill_between(x[i:i+2]+ndx, 0, y[i:i+2], color=colors[ndx], alpha=1.0) else: pl.fill_between(x[i:i+2]+ndx, 0, y[i:i+2], color=colors[ndx], alpha=0.5) xlegend = [-100*wnum for wnum in range(len(lv_names2))] pl.plot(xlegend, [0*wnum for wnum in xlegend], ls='-', color=colors[ndx], label=lv_names2[ndx]) ## for the paper if 'TI-CUBIC' in P.methods and not cubspl[j]==0: # Plot the TI-CUBIC interpolation curve. ss += ' and TI-CUBIC' xnew = numpy.arange(0, 1+dx, dx) ynew = cubspl[j].interpolate(y, xnew) min_y = min(ynew.min(), min_y) max_y = max(ynew.max(), max_y) pl.plot(xnew+ndx, ynew, color='#B6B6B4', ls ='-', solid_capstyle='round', lw=3.0) else: # Plot the TI-CUBIC integration area. ss = 'TI-CUBIC' for i in range(len(x)-1): xnew = numpy.arange(x[i], x[i+1]+dx, dx) ynew = cubspl[j].interpolate(y, xnew) ynew[0], ynew[-1] = y[i], y[i+1] min_y = min(ynew.min(), min_y) max_y = max(ynew.max(), max_y) if i%2==0: pl.fill_between(xnew+ndx, 0, ynew, color=colors[ndx], alpha=1.0) else: pl.fill_between(xnew+ndx, 0, ynew, color=colors[ndx], alpha=0.5) # Store the abscissa values and update the subplot index. xs += (x+ndx).tolist()[1:] ndx += 1 # Make sure the tick labels are not overcrowded. xs = numpy.array(xs) dl_mat = numpy.array([xs-i for i in xs]) ri = range(len(xs)) def getInd(r=ri, z=[0]): primo = r[0] min_dl=ndx*0.02*2**(primo>10) if dl_mat[primo].max()<min_dl: return z for i in r: for j in range(len(xs)): if dl_mat[i,j]>min_dl: z.append(j) return getInd(ri[j:], z) xt = [i if (i in getInd()) else '' for i in range(K)] pl.xticks(xs[1:], xt[1:], fontsize=10) pl.yticks(fontsize=10) #ax = pl.gca() #for label in ax.get_xticklabels(): # label.set_bbox(dict(fc='w', ec='None', alpha=0.5)) # Remove the abscissa ticks and set up the axes limits. for tick in ax.get_xticklines(): tick.set_visible(False) pl.xlim(0, ndx) min_y *= 1.01 max_y *= 1.01 pl.ylim(min_y, max_y) for i,j in zip(xs[1:], xt[1:]): pl.annotate(('%.2f' % (i-1.0 if i>1.0 else i) if not j=='' else ''), xy=(i, 0), xytext=(i, 0.01), size=10, rotation=90, textcoords=('data', 'axes fraction'), va='bottom', ha='center', color='#151B54') if ndx>1: lenticks = len(ax.get_ymajorticklabels()) - 1 if min_y<0: lenticks -= 1 if lenticks < 5: from matplotlib.ticker import AutoMinorLocator as AML ax.yaxis.set_minor_locator(AML()) pl.grid(which='both', color='w', lw=0.25, axis='y', zorder=12) pl.ylabel(r'$\mathrm{\langle{\frac{ \partial U } { \partial \lambda }}\rangle_{\lambda}\/%s}$' % P.units, fontsize=20, color='#151B54') pl.annotate('$\mathit{\lambda}$', xy=(0, 0), xytext=(0.5, -0.05), size=18, textcoords='axes fraction', va='top', ha='center', color='#151B54') if not P.software.title()=='Sire': lege = ax.legend(prop=FP(size=14), frameon=False, loc=1) for l in lege.legendHandles: l.set_linewidth(10) pl.savefig(os.path.join(P.output_directory, 'dhdl_TI.pdf')) pl.close(fig) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plotdFvsLambda1():\n x = numpy.arange(len(df_allk))\n if x[-1]<8:\n fig = pl.figure(figsize = (8,6))\n else:\n fig = pl.figure(figsize = (len(x),6))\n width = 1./(len(P.methods)+1)\n elw = 30*width\n colors = {'TI':'#C45AEC', 'TI-CUBIC':'#33CC33', 'DEXP':'#F87431', 'IEXP':'#FF3030', 'GINS':'#EAC117', 'GDEL':'#347235', 'BAR':'#6698FF', 'UBAR':'#817339', 'RBAR':'#C11B17', 'MBAR':'#F9B7FF'}\n lines = tuple()\n for name in P.methods:\n y = [df_allk[i][name]/P.beta_report for i in x]\n ye = [ddf_allk[i][name]/P.beta_report for i in x]\n line = pl.bar(x+len(lines)*width, y, width, color=colors[name], yerr=ye, lw=0.1*elw, error_kw=dict(elinewidth=elw, ecolor='black', capsize=0.5*elw))\n lines += (line[0],)\n pl.xlabel('States', fontsize=12, color='#151B54')\n pl.ylabel('$\\Delta G$ '+P.units, fontsize=12, color='#151B54')\n pl.xticks(x+0.5*width*len(P.methods), tuple(['%d--%d' % (i, i+1) for i in x]), fontsize=8)\n pl.yticks(fontsize=8)\n pl.xlim(x[0], x[-1]+len(lines)*width)\n ax = pl.gca()\n for dir in ['right', 'top', 'bottom']:\n ax.spines[dir].set_color('none')\n ax.yaxis.set_ticks_position('left')\n for tick in ax.get_xticklines():\n tick.set_visible(False)\n\n leg = pl.legend(lines, tuple(P.methods), loc=3, ncol=2, prop=FP(size=10), fancybox=True)\n leg.get_frame().set_alpha(0.5)\n pl.title('The free energy change breakdown', fontsize = 12)\n pl.savefig(os.path.join(P.output_directory, 'dF_state_long.pdf'), bbox_inches='tight')\n pl.close(fig)\n return", "def deap_plot_hyp(stats, colour=\"blue\"):\n plt.ion()\n # plot hypervolumes\n hyp = []\n for gen in stats:\n hyp.append(gen['hypervolume'])\n plt.figure()\n plt.plot(hyp, color=colour)\n plt.xlabel(\"Function Evaluations\")\n plt.ylabel(\"Hypervolume\")", "def plot_el(df, el, D, widths, lmbdas, to_pdf, is_sparse):\n df_cut = df[df.elect == el]\n fig, ax = plt.subplots(figsize=(8, 6))\n ymax = 0.0\n\n for d in widths:\n lbl = \"%i nm\" % d\n sel = np.array(df_cut[[\"lmbda\", D]][df_cut.d == d])\n\n if el == \"Carbon\":\n Nl = len(lmbdas)\n eb = [1.0/i for i in range(1, Nl+1)]\n plt.errorbar(sel[:, 0], sel[:, 1] * 1e9, yerr=eb, \\\n fmt=\"D-\", lw=4, ms=10, mew=0, label=lbl)\n else:\n plt.plot(sel[:, 0], sel[:, 1] * 1e9, \\\n \"D-\", lw=4, ms=10, mew=0, label=lbl)\n\n ymax_temp = (max(sel[:, 1] * 1e9) // 10 + 1) * 10 # round to 10s\n if ymax_temp > ymax: ymax = ymax_temp\n plt.ylim([0.0, ymax])\n\n plt.xlim([lmbdas[0]-1, lmbdas[-1]+1])\n plt.xticks([4, 8, 12, 16, 20, 24])\n plt.yticks(np.arange(0.0, ymax+1, 5))\n plt.xlabel(\"$\\lambda$\")\n ylbl = \"Normal\" if len(D) == 2 else \"Parralel\"\n plt.ylabel(\"$D_{\\mathrm{%s}} \\; (10^{-9} \\; \\mathrm{m^2/s}) $\" % ylbl)\n\n plt.legend(loc=\"best\", fontsize=22)\n# if el == \"Carbon\" and D == \"Dx\":\n# plt.legend(loc=2, fontsize=22)\n# if el == \"Carbon\" and D == \"Dyz\":\n# plt.legend(loc=(0.5, 0.5), fontsize=22)\n\n plt.grid()\n plt.title(el)\n fmt = \"pdf\" if to_pdf else \"png\"\n sp = \"_sp\" if is_sparse else \"\"\n plotname = \"%s/%s_%s%s.%s\" % \\\n (default_path, D, el.lower(), sp, fmt)\n# plotname = default_path + D + \"_\" + el.lower() + fmt\n plt.savefig(plotname, bbox_inches='tight')", "def investigate4DRepeatability():\n parentdir = '/home/rallured/Dropbox/Interferometer/SolarBFlat/Repeatability/'\n avgs = [1,2,4,8,16,32]\n\n #Temporal with fringes tilted\n fn = glob.glob(parentdir+'Tilt/17*RepeatabilityTiltTemporal*.bin')\n fn.sort()\n dx = met.readFlatScript(fn[0].split('.')[0])[1]\n d = np.array([met.readFlatScript(fi.split('.')[0])[0] for fi in fn])\n #Make progressive averaging plot\n plt.figure('TemporalTiltedFigure')\n for i in np.arange(6)*2:\n f,p = fourier.meanPSD(d[i],win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label=str(avgs[i/2]))\n plt.legend(loc='lower left')\n plt.title('Solar B PSD - Temporal,Tilted')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n #Get repeatability\n reptemptilt = d[-1]-d[-2]\n figtemptilt = d[-1]\n\n #Dynamic with fringes tilted\n fn = glob.glob(parentdir+'Tilt/17*RepeatabilityTilt_*.bin')\n fn.sort()\n dx = met.readFlatScript(fn[0].split('.')[0])[1]\n d = [met.readFlatScript(fi.split('.')[0])[0] for fi in fn]\n #Make progressive averaging plot\n plt.figure('DynamicTiltedFigure')\n for i in np.arange(6)*2:\n f,p = fourier.meanPSD(d[i],win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label=str(avgs[i/2]))\n plt.legend(loc='lower left')\n plt.title('Solar B PSD - Dynamic,Tilted')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n #Get repeatability\n repdyntilt = d[-1]-d[-2]\n figdyntilt = d[-1]\n \n #Temporal with fringes nulled\n fn = glob.glob(parentdir+'Nulled/17*.bin')\n fn.sort()\n dx = met.readFlatScript(fn[0].split('.')[0])[1]\n d = np.array([met.readFlatScript(fi.split('.')[0])[0] for fi in fn])\n #Make progressive averaging plot\n plt.figure('TemporalNulledFigure')\n for i in np.arange(6)*2:\n f,p = fourier.meanPSD(d[i],win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label=str(avgs[i/2]))\n plt.legend(loc='lower left')\n plt.title('Solar B PSD - Temporal,Nulled')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n #Get repeatability\n reptempnull = d[-1]-d[-2]\n figtempnull = d[-1]\n \n #Dynamic with fringes nulled\n d = pyfits.getdata('/home/rallured/Dropbox/Interferometer/'\n 'SolarBFlat/Repeatability/'\n 'Nulled/170103_Processed.fits')\n rep = np.array([d[i,0]-d[i,1] for i in range(32)])\n #Make progressive averaging plot\n plt.figure('DynamicNulledFigure')\n for i in [0,1,3,7,15,31]:\n f,p = fourier.meanPSD(d[i,0],win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label=str(i+1))\n plt.legend(loc='lower left')\n plt.title('Solar B PSD - Dynamic,Nulled')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n #Get repeatability\n repdynnull = d[-1][0]-d[-1][1]\n figdynnull = d[-1][0]\n\n #Make comparative repeatability plots with 32 averages\n plt.figure('CompareRepeatability')\n f,p = fourier.meanPSD(repdynnull,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Dynamic,Nulled')\n f,p = fourier.meanPSD(repdyntilt,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Dynamic,Tilted')\n f,p = fourier.meanPSD(reptemptilt,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Temporal,Tilted')\n f,p = fourier.meanPSD(reptempnull,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Temporal,Nulled')\n plt.legend(loc='lower left')\n plt.title('Solar B Repeatability - 32 Averages')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n\n #Make comparative figure plots with 32 averages\n plt.figure('CompareFigure')\n f,p = fourier.meanPSD(figdynnull,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Dynamic,Nulled')\n f,p = fourier.meanPSD(figdyntilt,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Dynamic,Tilted')\n f,p = fourier.meanPSD(figtemptilt,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Temporal,Tilted')\n f,p = fourier.meanPSD(figtempnull,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Temporal,Nulled')\n plt.legend(loc='lower left')\n plt.title('Solar B Figure - 32 Averages')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n\n #Make parroting repeatability plots\n fig = plt.figure('Parroting')\n fig.add_subplot(2,2,1)\n plt.imshow(repdyntilt)\n plt.title('Dynamic Repeatability')\n plt.colorbar()\n fig.add_subplot(2,2,2)\n plt.imshow(reptemptilt)\n plt.title('Temporal Repeatability')\n plt.colorbar()\n fig.add_subplot(2,2,3)\n res = legendre2d(repdyntilt,xo=3,yo=3)[0]\n plt.imshow(repdyntilt-res)\n plt.title('Dynamic Repeatability Filtered')\n plt.colorbar()\n fig.add_subplot(2,2,4)\n res = legendre2d(reptemptilt,xo=3,yo=3)[0]\n plt.imshow(reptemptilt-res)\n plt.title('Temporal Repeatability Filtered')\n plt.colorbar()", "def test_2d_plot(self):\n db = pd.HDFStore('test.h5')\n df_iv = db['iv']\n dates = df_iv[df_iv['dte'] == 30]['date']\n impl_vols = df_iv[df_iv['dte'] == 30]['impl_vol']\n db.close()\n\n print df_iv.sort_values('impl_vol').head()\n\n plt.plot(dates, impl_vols)\n plt.xlabel('date')\n plt.ylabel('impl_vols')\n plt.show()", "def plot_tild_integrands(self):\n fig, ax = plt.subplots()\n lambdas = self.get_lambdas()\n thermal_average, standard_error = self.get_tild_integrands()\n ax.plot(lambdas, thermal_average, marker=\"o\")\n ax.fill_between(\n lambdas,\n thermal_average - standard_error,\n thermal_average + standard_error,\n alpha=0.3,\n )\n ax.set_xlabel(\"Lambda\")\n ax.set_ylabel(\"dF/dLambda\")\n return fig, ax", "def _plot_dists_figure_2a(self, indexes = None, domain=None, two_landes=None, yrange=None, long=None, vline=None, params=None, different_stat =None, nolegend =None):\n\n bstat_stat = 'mean'\n bstat_er = 'se'\n\n\n if two_landes is None:\n two_landes = False\n\n if nolegend is None:\n nolegend = False\n\n if different_stat is None:\n different_stat = False\n elif not isinstance(different_stat, str):\n different_stat = 'var'\n\n if indexes == None:\n indexes = self.indexes\n\n if int is None:\n long = False\n\n if int:\n manyb = True\n bstats = ['dist','dist_guess']\n else:\n manyb = False\n bstats = ['dist']\n\n if different_stat:\n bstats = [different_stat]\n\n if vline is None:\n vline = False\n\n if params is None:\n params = False\n\n indexes = indexes[:2]\n\n\n undertext_params = [['N', 'U'], ['shift_s0', 'sigma_0_del'], ['f1', 's1', 's2']]\n if not params:\n undertext_params = []\n params = []\n else:\n params =undertext_params\n\n\n fewticks = False\n the_colors = ['darkgreen', 'blue','cyan','limegreen']\n the_colors_different = ['blue', 'cyan', 'darkgreen','limegreen']\n\n data_classes = [self.data_classes[indi] for indi in indexes]\n\n if self.data_classes[0].units_s0:\n myunits = self.name_class.units\n delti = False\n myunits = ' in trait SD'\n else:\n myunits = r' (units $\\delta =\\omega/\\sqrt{2N}$)'\n # myunits = r' (units $\\delta$)'\n delti = True\n\n\n plot_dict = dict()\n plot_dict['xlabel'] = 'Generations after shift'\n plot_dict['savedir'] = os.path.join(self.base_dir)\n plot_dict['domain'] = domain\n plot_dict['yrange'] = yrange\n\n plotspecs = dict()\n if int:\n plotspecs['fsize'] = (22, 5)\n else:\n plotspecs['fsize'] = (26, 14)\n plotspecs['fsize'] = (26, 14)\n\n if different_stat:\n plotspecs['fsize'] = (26, 8)\n\n plotspecs['dpi'] = 200\n plotspecs['linewidth'] = 3.5\n\n plotspecs['ticksize'] = 34\n\n # plotspecs['nxticks'] = 3\n\n if fewticks:\n plotspecs['nxticks'] = 2\n plotspecs['nyticks'] = 2\n\n plotspecs['linewidth'] = 8\n\n _mylabel = ''\n\n #if bstats[0] == 'dist':\n plotspecs['legend_loc'] = (0.98, 0.98)\n plotspecs['legend_anchor'] = 'upper right'\n if not delti:\n plot_dict['ylabel'] = 'Distance from optimum ' + myunits + ' \\n'\n else:\n plot_dict['ylabel'] = myunits\n if different_stat:\n if different_stat == 'var':\n plot_dict['ylabel'] = 'Variance'\n plotspecs['ylabelspace'] = 1\n elif different_stat == 'skewness':\n plot_dict['ylabel'] = 'Skewness ' + r'($\\mu_{3}(t)/\\sigma^3 (t)$)'\n else:\n plot_dict['ylabel'] = different_stat\n\n\n plotspecs['axis_font'] = {'fontname': 'Arial', 'size': '46'}\n plotspecs['legend_font'] = {'size': '42'}\n\n #\n # plotspecs['text_loc'] = [0.62, 0.5]\n # plotspecs['text_color'] = 'indianred'\n # plotspecs['text_size'] = 40\n\n\n if vline:\n #vlines = [data_classes[0].phase_2_time, data_classes[0].phase_3_time]\n vlines = [data_classes[0].get_phase_two_time()]\n plot_dict['vlines'] = vlines\n plotspecs['vlinecolor'] = 'black'\n plotspecs['vlineswidth'] = 4\n\n\n if fewticks:\n plotspecs['nyticks'] = 2\n\n plot_dict['plotspecs'] = plotspecs\n\n x = []\n y = []\n yer = []\n ynames = []\n linestyles = []\n colors = []\n len_y = 0\n\n\n col_num = 0\n for bstati in bstats:\n first_data_class = True\n for data_class in data_classes:\n if bstati not in data_class._bstats:\n print(bstati + \"not in\" + str(data_class.index))\n return\n\n _mylabel += bstati\n\n for data_class in data_classes:\n data = data_class.read_bstats(bstati)\n times = sorted(data[bstati].keys())\n yi = [data[bstati][tim][bstat_stat] for tim in times]\n\n style = '-'\n if different_stat:\n coli = the_colors_different[col_num]\n else:\n coli = the_colors[col_num]\n\n name = ''\n lib = ''\n if manyb:\n nametemp = self.name_class.yname(bstati).split(\"(units\")\n name += nametemp[0]\n if bstati in self._theory_stats:\n name = 'Simulations'\n if len(data_classes) == 2:\n if first_data_class:\n name = 'Simulations Lande'\n else:\n name = 'Simulations non-Lande'\n if params is None:\n\n if len(data_classes) > 1:\n name += ' I:' + str(data_class.index)\n lib += str(data_class.index)\n else:\n for param in params:\n try:\n name += param + ' = ' + '{0:.2f}'.format(data_class.param_dict[param]) + ' '\n lib += param + '{0:.0f}'.format(data_class.param_dict[param]) + '_'\n except KeyError:\n print('KeyError: ' + param)\n\n if bstati == 'dist_guess':\n if int:\n name = 'Closed form approximation'\n name = 'Quasi static approximation: ' + r'$\\mu_3 (t)/(2\\sigma^2 (t))$'\n style = '--'\n\n if not (bstati == 'dist_guess' and first_data_class and len(data_classes) == 2):\n col_num += 1\n linestyles.append(style)\n colors.append(coli)\n len_y+=1\n x.append(times)\n y.append(yi)\n ynames.append(name)\n _mylabel = _mylabel + lib\n first_data_class = False\n\n for bstati in bstats:\n first_data_class = True\n for data_class in data_classes:\n if different_stat:\n coli = the_colors_different[-1]\n else:\n coli = the_colors[-1]\n if first_data_class:\n style = '--'\n else:\n style = '-'\n if first_data_class or two_landes:\n if bstati =='var' or bstati == 'skewness' or bstati == 'mu3' or bstati in self._theory_stats and bstati != 'dist_guess':\n if bstati =='var':\n times = x[0]\n var_0 = data_class.param_dict['var_0']\n var_0 = 1.0\n yi = [var_0 for _ in times]\n ynami = 'Equilibrium variance'\n elif bstati == 'skewness' or bstati == 'mu3':\n times = x[0]\n mu0 = 0\n yi = [mu0 for _ in times]\n if bstati == 'skewness':\n ynami = 'Equilibrium skewness'\n else:\n ynami = 'Equilibrium third moment'\n\n elif bstati in self._theory_stats:\n times, yi = self.theory_stat(data_class, bstati)\n ynami = self.name_class.theory_yname(bstati)\n\n x.append(times)\n y.append(yi)\n len_y += 1\n ynames.append(ynami)\n colors.append(coli)\n linestyles.append(style)\n\n first_data_class = False\n\n plot_dict['x'] = x\n plot_dict['y'] = y\n plot_dict['colors'] = colors\n\n\n\n if len(_mylabel) < 30:\n plot_dict['label'] = _mylabel + '_many_cl'\n else:\n plot_dict['label'] = \"_\".join(bstats)\n\n # List with [text_top, text_bottom] containing relevant parameters\n undertext = []\n for listi in undertext_params:\n text_list = self._plot_text(index_list=indexes, params_list=listi)\n if text_list:\n text_string = ', '.join(text_list)\n undertext.append(text_string)\n if len(ynames) > 1 and not nolegend:\n plot_dict['ynames'] = ynames\n\n plot_dict['undertext'] = undertext\n plot_dict['linestyles'] = linestyles\n\n plot_many_y(**plot_dict)", "def evidence_tuning_plots(df, x_input = \"Mean Predicted Avg\",\n y_input = \"Empirical Probability\",\n x_name=\"Mean Predicted\",\n y_name=\"Empirical Probability\"):\n\n def lineplot(x, y, trials, methods, **kwargs):\n \"\"\"method_lineplot.\n\n Args:\n y:\n methods:\n kwargs:\n \"\"\"\n uniq_methods = set(methods.values)\n method_order = sorted(uniq_methods)\n\n method_new_names = [f\"$\\lambda={i:0.4f}$\" for i in method_order]\n method_df = []\n for method_idx, (method, method_new_name) in enumerate(zip(method_order,\n method_new_names)):\n lines_y = y[methods == method]\n lines_x = x[methods == method]\n for index, (xx, yy,trial) in enumerate(zip(lines_x, lines_y, trials)):\n\n to_append = [{x_name : x,\n y_name: y,\n \"Method\": method_new_name,\n \"Trial\" : trial}\n for i, (x,y) in enumerate(zip(xx,yy))]\n method_df.extend(to_append)\n method_df = pd.DataFrame(method_df)\n x = np.linspace(0,1,100)\n plt.plot(x, x, linestyle='--', color=\"black\")\n sns.lineplot(x=x_name, y=y_name, hue=\"Method\",\n alpha=0.8,\n hue_order=method_new_names, data=method_df,)\n # estimator=None, units = \"Trial\")\n\n df = df.copy()\n # Query methods that have evidence_new_reg_2.0\n df = df[[\"evidence\" in i for i in\n df['method_name']]].reset_index()\n\n # Get the regularizer and reset coeff\n coeff = [float(i.split(\"evidence_new_reg_\")[1]) for i in df['method_name']]\n df[\"method_name\"] = coeff\n df[\"Data\"] = convert_dataset_names(df[\"dataset\"])\n df[\"Method\"] = df[\"method_name\"]\n\n g = sns.FacetGrid(df, col=\"Data\", height=6, sharex = False, sharey = False)\n g.map(lineplot, x_input, y_input, \"trial_number\",\n methods=df[\"Method\"]).add_legend()", "def makeaplot(events,\n sensitivities,\n hrf_estimates,\n roi_pair,\n fn=True):\n import matplotlib.pyplot as plt\n\n # take the mean and transpose the sensitivities\n sensitivities_stacked = mv.vstack(sensitivities)\n\n if bilateral:\n sensitivities_stacked.sa['bilat_ROIs_str'] = map(lambda p: '_'.join(p),\n sensitivities_stacked.sa.bilat_ROIs)\n mean_sens = mv.mean_group_sample(['bilat_ROIs_str'])(sensitivities_stacked)\n else:\n sensitivities_stacked.sa['all_ROIs_str'] = map(lambda p: '_'.join(p),\n sensitivities_stacked.sa.all_ROIs)\n mean_sens = mv.mean_group_sample(['all_ROIs_str'])(sensitivities_stacked)\n\n mean_sens_transposed = mean_sens.get_mapped(mv.TransposeMapper())\n\n # some parameters\n # get the conditions\n block_design = sorted(np.unique(events['trial_type']))\n reorder = [0, 6, 1, 7, 2, 8, 3, 9, 4, 10, 5, 11]\n block_design = [block_design[i] for i in reorder]\n # end indices to chunk timeseries into runs\n run_startidx = np.array([0, 157, 313, 469])\n run_endidx = np.array([156, 312, 468, 624])\n\n runs = np.unique(mean_sens_transposed.sa.chunks)\n\n for j in range(len(hrf_estimates.fa.bilat_ROIs_str)):\n comparison = hrf_estimates.fa.bilat_ROIs[j][0]\n if (roi_pair[0] in comparison) and (roi_pair[1] in comparison):\n roi_pair_idx = j\n roi_betas_ds = hrf_estimates[:, roi_pair_idx]\n roi_sens_ds = mean_sens_transposed[:, roi_pair_idx]\n\n for run in runs:\n fig, ax = plt.subplots(1, 1, figsize=[18, 10])\n colors = ['#7b241c', '#e74c3c', '#154360', '#3498db', '#145a32', '#27ae60',\n '#9a7d0a', '#f4d03f', '#5b2c6f', '#a569bd', '#616a6b', '#ccd1d1']\n plt.suptitle('Timecourse of sensitivities, {} versus {}, run {}'.format(roi_pair[0],\n roi_pair[1],\n run + 1),\n fontsize='large')\n plt.xlim([0, max(mean_sens_transposed.sa.time_coords)])\n plt.ylim([-5, 7])\n plt.xlabel('Time in sec')\n plt.legend(loc=1)\n plt.grid(True)\n # for each stimulus, plot a color band on top of the plot\n for stimulus in block_design:\n onsets = events[events['trial_type'] == stimulus]['onset'].values\n durations = events[events['trial_type'] == stimulus]['duration'].values\n stimulation_end = np.sum([onsets, durations], axis=0)\n r_height = 1\n color = colors[0]\n y = 6\n\n # get the beta corresponding to the stimulus to later use in label\n beta = roi_betas_ds.samples[hrf_estimates.sa.condition == stimulus.replace(\" \", \"\"), 0]\n\n for i in range(len(onsets)):\n r_width = durations[i]\n x = stimulation_end[i]\n rectangle = plt.Rectangle((x, y),\n r_width,\n r_height,\n fc=color,\n alpha=0.5,\n label='_'*i + stimulus.replace(\" \", \"\") + '(' + str('%.2f' % beta) + ')')\n plt.gca().add_patch(rectangle)\n plt.legend(loc=1)\n del colors[0]\n\n times = roi_sens_ds.sa.time_coords[run_startidx[run]:run_endidx[run]]\n\n ax.plot(times, roi_sens_ds.samples[run_startidx[run]:run_endidx[run]], '-', color='black', lw=1.0)\n glm_model = hrf_estimates.a.model.results_[0.0].predicted[run_startidx[run]:run_endidx[run], roi_pair_idx]\n ax.plot(times, glm_model, '-', color='#7b241c', lw=1.0)\n model_fit = hrf_estimates.a.model.results_[0.0].R2[roi_pair_idx]\n plt.title('R squared: %.2f' % model_fit)\n if fn:\n plt.savefig(results_dir + 'timecourse_localizer_glm_sens_{}_vs_{}_run-{}.svg'.format(roi_pair[0], roi_pair[1], run + 1))", "def plot_DA(filename):\n\n # Set up an array of redshift values.\n dz = 0.1\n z = numpy.arange(0., 10. + dz, dz)\n\n # Set up a cosmology dictionary, with an array of matter density values.\n cosmo = {}\n dom = 0.01\n om = numpy.atleast_2d(numpy.linspace(0.1, 1.0, (1.-0.1)/dom)).transpose()\n cosmo['omega_M_0'] = om\n cosmo['omega_lambda_0'] = 1. - cosmo['omega_M_0']\n cosmo['h'] = 0.701\n cosmo['omega_k_0'] = 0.0\n\n # Calculate the hubble distance.\n dh = cd.hubble_distance_z(0, **cosmo)\n # Calculate the angular diameter distance.\n da = cd.angular_diameter_distance(z, **cosmo)\n\n # Make plots.\n plot_dist(z, dz, om, dom, da, dh, 'angular diameter distance', r'D_A',\n filename)\n plot_dist_ony(z, dz, om, dom, da, dh, 'angular diameter distance', r'D_A',\n filename)", "def plotLambdaDependency(folder='results/', analysis='good', sigma=3):\n matplotlib.rc('text', usetex=True)\n if 'ind' in analysis:\n print 'Individual Results'\n data800 = [fileIO.cPicleRead(file) for file in g.glob('results/I800nm*.pkl')]\n data600 = [fileIO.cPicleRead(file) for file in g.glob('results/I800nm54*.pkl')]\n data700 = [fileIO.cPicleRead(file) for file in g.glob('results/I800nm52*.pkl')]\n data890 = [fileIO.cPicleRead(file) for file in g.glob('results/I800nm50*.pkl')]\n data = (data600, data700, data800, data890)\n datacontainer = []\n for x in data:\n wx = np.median([d['wx'] for d in x])\n wxerr = np.median([d['wxerr'] for d in x])\n wy = np.median([d['wy'] for d in x])\n wyerr = np.median([d['wyerr'] for d in x])\n dat = dict(wx=wx, wy=wy, wxerr=wxerr, wyerr=wyerr)\n datacontainer.append(dat)\n data = datacontainer\n waves = [600, 700, 800, 890]\n elif 'join' in analysis:\n print 'Joint Results'\n data800nm = fileIO.cPicleRead(folder+'J800nm.pkl')\n data600nm = fileIO.cPicleRead(folder+'J600nm54k.pkl')\n data700nm = fileIO.cPicleRead(folder+'J700nm52k.pkl')\n data890nm = fileIO.cPicleRead(folder+'J890nm50k.pkl')\n data = (data600nm, data700nm, data800nm, data890nm)\n waves = [int(d['wavelength'].replace('nm', '')) for d in data]\n else:\n print 'Using subset of data'\n #data600nm = fileIO.cPicleRead(folder+'G600nm0.pkl')\n data600nm = fileIO.cPicleRead(folder+'J600nm54k.pkl')\n #data700nm = fileIO.cPicleRead(folder+'G700nm0.pkl')\n data700nm = fileIO.cPicleRead(folder+'J700nm52k.pkl')\n #data800nm = fileIO.cPicleRead(folder+'G800nm0.pkl')\n data800nm = fileIO.cPicleRead(folder+'J800nm.pkl')\n #data890nm = fileIO.cPicleRead(folder+'G890nm0.pkl')\n data890nm = fileIO.cPicleRead(folder+'J890nm50k.pkl')\n data = (data600nm, data700nm, data800nm, data890nm)\n waves = [600, 700, 800, 890]\n\n wx = np.asarray([_FWHMGauss(d['wx']) for d in data])\n wxerr = np.asarray([_FWHMGauss(d['wxerr']) for d in data])\n wypix = np.asarray([d['wy'] for d in data])\n wy = _FWHMGauss(wypix)\n wyerrpix = np.asarray([d['wyerr'] for d in data])\n wyerr = _FWHMGauss(wyerrpix)\n waves = np.asarray(waves)\n\n w = np.sqrt(wx*wy)\n werr = np.sqrt(wxerr*wyerr)\n\n print zip(waves, w)\n\n #plot FWHM\n fig = plt.figure()\n ax1 = fig.add_subplot(311)\n ax2 = fig.add_subplot(312)\n ax3 = fig.add_subplot(313)\n fig.subplots_adjust(hspace=0, top=0.93, bottom=0.17, left=0.11, right=0.95)\n ax1.set_title('CCD273 PSF Wavelength Dependency')\n\n ax1.errorbar(waves, wx, yerr=sigma*wxerr/3., fmt='o', label='Data')\n ax2.errorbar(waves, wy, yerr=sigma**wyerr/3., fmt='o', label='Data')\n ax3.errorbar(waves, w, yerr=sigma*werr, fmt='o', label='Data')\n\n #fit a power law\n fitfunc = lambda p, x: p[0] * x ** p[1]\n errfunc = lambda p, x, y: fitfunc(p, x) - y\n fit1, success = optimize.leastsq(errfunc, [1, -0.2], args=(waves, wx))\n fit2, success = optimize.leastsq(errfunc, [1, -0.2], args=(waves, wy))\n fit3, success = optimize.leastsq(errfunc, [1, -0.2], args=(waves, w))\n\n #requirement\n alpha=0.2\n x = np.arange(500, 950, 1)\n y = 37*x**-alpha\n # compute the best fit function from the best fit parameters\n corrfit1 = fitfunc(fit1, x)\n corrfit2 = fitfunc(fit2, x)\n corrfit3 = fitfunc(fit3, x)\n print 'Slope:', fit1[1]\n print 'Slope:', fit2[1]\n print 'Slope [requirement < -0.2]:', fit3[1]\n\n #ax1.plot(x, corrfit1, 'k-', label=r'Power Law Fit: $\\alpha \\sim %.2f $' % (fit1[1]))\n #ax2.plot(x, corrfit2, 'k-', label=r'Power Law Fit: $\\alpha \\sim %.2f $' % (fit2[1]))\n ax3.plot(x, y, 'r-', label=r'Requirement: $\\alpha \\leq - %.1f$' % alpha)\n #ax3.plot(x, corrfit3, 'k-', label=r'Power Law Fit: $\\alpha \\sim %.2f $' % (fit3[1]))\n\n # Bayesian\n shift = 0.\n waves -= shift\n px, paramsx, errorsx, outliersx = powerlawFitWithOutliers(waves, wx, wxerr, outtriangle='WFWHMx.png')\n py, paramsy, errorsy, outliersy = powerlawFitWithOutliers(waves, wy, wyerr, outtriangle='WFWHMy.png')\n p, params, errors, outliers = powerlawFitWithOutliers(waves, w, werr, outtriangle='WFWHM.png')\n print paramsx[::-1], errorsx[::-1]\n print paramsy[::-1], errorsy[::-1]\n print params[::-1], errors[::-1]\n\n ax1.plot(x, paramsx[0]*(x-shift)**paramsx[1], 'g-', label=r'Power Law Fit: $\\alpha \\sim %.2f $' % (paramsx[1]))\n ax2.plot(x, paramsy[0]*(x-shift)**paramsy[1], 'g-', label=r'Power Law Fit: $\\alpha \\sim %.2f $' % (paramsy[1]))\n ax3.plot(x, params[0]*(x-shift)**params[1], 'g-', label=r'Power Law Fit: $\\alpha \\sim %.2f $' % (params[1]))\n\n plt.sca(ax1)\n plt.xticks(visible=False)\n plt.sca(ax2)\n plt.xticks(visible=False)\n plt.sca(ax3)\n\n ax1.set_ylim(6.6, 13.5)\n ax2.set_ylim(6.6, 13.5)\n ax3.set_ylim(6.6, 13.5)\n ax1.set_xlim(550, 900)\n ax2.set_xlim(550, 900)\n ax3.set_xlim(550, 900)\n\n ax1.set_ylabel(r'FWHM$_{X} \\, [\\mu$m$]$')\n ax2.set_ylabel(r'FWHM$_{Y} \\, [\\mu$m$]$')\n ax3.set_ylabel(r'FWHM$\\, [\\mu$m$]$')\n ax3.set_xlabel('Wavelength [nm]')\n ax1.legend(shadow=True, fancybox=True, loc='best', numpoints=1)\n ax2.legend(shadow=True, fancybox=True, loc='best', numpoints=1)\n ax3.legend(shadow=True, fancybox=True, loc='best', numpoints=1)\n plt.savefig('LambdaDependency.pdf')\n plt.close()\n\n print 'R2:'\n R2 = _R2FromGaussian(wxpix, wypix)*1e3\n print zip(waves, R2)\n errR2 = _R2err(wxpix, wypix, wxerrpix, wyerrpix)*1e3\n p, params, errors, outliers = powerlawFitWithOutliers(waves, R2, errR2, outtriangle='WR2.png')\n print params[::-1], errors[::-1]\n\n fig = plt.figure()\n ax1 = fig.add_subplot(211)\n ax2 = fig.add_subplot(212)\n fig.subplots_adjust(hspace=0, top=0.93, bottom=0.17, left=0.11, right=0.93)\n ax1.set_title('CCD273 PSF Wavelength Dependency')\n ax1.errorbar(waves, R2, yerr=sigma*errR2, fmt='o', label='Data')\n ax1.plot(x, params[0] * (x - shift)**params[1], 'm-', label=r'Power Law Fit: $\\alpha \\sim %.2f $' % (params[1]))\n #ax1.plot(waves[outliers], R2[outliers], 'ro', ms=20, mfc='none', mec='red')\n ax1.set_ylabel(r'R^{2} \\, [$mas$^{2}]$')\n ax1.legend(shadow=True, fancybox=True, numpoints=1, loc='lower right')\n\n print 'Ellipticity:'\n ell = _ellipticityFromGaussian(wxpix, wypix) + 1\n print zip(waves, ell)\n ellerr = _ellipticityerr(wxpix, wypix, wxerrpix, wyerrpix)\n p, params, errors, outliers = powerlawFitWithOutliers(waves, ell, ellerr, outtriangle='Well.png')\n print params[::-1], errors[::-1]\n\n fitfunc = lambda p, x: p[0] * x ** p[1]\n errfunc = lambda p, x, y: fitfunc(p, x) - y\n fit1, success = optimize.leastsq(errfunc, [2., -0.1], args=(waves, ell), maxfev=100000)\n print fit1[::-1]\n\n ax2.errorbar(waves, ell, yerr=sigma*ellerr, fmt='o', label='Data')\n ax2.plot(x, params[0] * (x - shift)**params[1], 'm-', label=r'Power Law Fit: $\\alpha \\sim %.2f $' % (params[1]))\n #ax2.plot(waves[outliers], ell[outliers], 'ro', ms=20, mfc='none', mec='red')\n ax1.legend(shadow=True, fancybox=True, numpoints=1, loc='lower right')\n ax2.legend(shadow=True, fancybox=True, numpoints=1)\n ax2.set_ylabel('Ellipticity')\n\n ax1.set_ylim(0.65, 2.5)\n ax2.set_ylim(1+-0.01, 1+0.16)\n\n plt.sca(ax1)\n plt.xticks(visible=False)\n\n plt.savefig('LambdaR2ell.pdf')\n plt.close()", "def plotdFvsLambda2(nb=10):\n x = numpy.arange(len(df_allk))\n if len(x) < nb:\n return\n xs = numpy.array_split(x, len(x)/nb+1)\n mnb = max([len(i) for i in xs])\n fig = pl.figure(figsize = (8,6))\n width = 1./(len(P.methods)+1)\n elw = 30*width\n colors = {'TI':'#C45AEC', 'TI-CUBIC':'#33CC33', 'DEXP':'#F87431', 'IEXP':'#FF3030', 'GINS':'#EAC117', 'GDEL':'#347235', 'BAR':'#6698FF', 'UBAR':'#817339', 'RBAR':'#C11B17', 'MBAR':'#F9B7FF'}\n ndx = 1\n for x in xs:\n lines = tuple()\n ax = pl.subplot(len(xs), 1, ndx)\n for name in P.methods:\n y = [df_allk[i][name]/P.beta_report for i in x]\n ye = [ddf_allk[i][name]/P.beta_report for i in x]\n line = pl.bar(x+len(lines)*width, y, width, color=colors[name], yerr=ye, lw=0.05*elw, error_kw=dict(elinewidth=elw, ecolor='black', capsize=0.5*elw))\n lines += (line[0],)\n for dir in ['left', 'right', 'top', 'bottom']:\n if dir == 'left':\n ax.yaxis.set_ticks_position(dir)\n else:\n ax.spines[dir].set_color('none')\n pl.yticks(fontsize=10)\n ax.xaxis.set_ticks([])\n for i in x+0.5*width*len(P.methods):\n ax.annotate('$\\mathrm{%d-%d}$' % (i, i+1), xy=(i, 0), xycoords=('data', 'axes fraction'), xytext=(0, -2), size=10, textcoords='offset points', va='top', ha='center')\n pl.xlim(x[0], x[-1]+len(lines)*width + (mnb - len(x)))\n ndx += 1\n leg = ax.legend(lines, tuple(P.methods), loc=0, ncol=2, prop=FP(size=8), title='$\\mathrm{\\Delta G\\/%s\\/}\\mathit{vs.}\\/\\mathrm{lambda\\/pair}$' % P.units, fancybox=True)\n leg.get_frame().set_alpha(0.5)\n pl.savefig(os.path.join(P.output_directory, 'dF_state.pdf'), bbox_inches='tight')\n pl.close(fig)\n return", "def EDA(data):\n\t# mean value curve\n\n\tfig,axs = plt.subplots(5,1, sharey='all')\n\tfig.set_size_inches(10, 15)\n\tdata.groupby('weather').mean().plot(y='count', marker='o', ax=axs[0])\n\tdata.groupby('humidity').mean().plot(y='count', marker='o', ax=axs[1])\n\tdata.groupby('temp').mean().plot(y='count', marker='o', ax=axs[2])\n\tdata.groupby('windspeed').mean().plot(y='count', marker='o', ax=axs[3])\n\tprint('\\n')\n\tdata.groupby('hour').mean().plot(y='count', marker='o', ax=axs[4])\n\tplt.title('mean count per hour')\n\tplt.tight_layout()\n\tplt.show()\n\n\t# grouping scatter\n\tfig,axs = plt.subplots(2,3, sharey='all')\n\tfig.set_size_inches(12, 8)\n\tdata.plot(x='temp', y='count', kind='scatter', ax=axs[0,0], color='magenta')\n\tdata.plot(x='humidity', y='count', kind='scatter', ax=axs[0,1], color='bisque')\n\tdata.plot(x='windspeed', y='count', kind='scatter', ax=axs[0,2], color='coral')\n\tdata.plot(x='month', y='count', kind='scatter', ax=axs[1,0], color='darkblue')\n\tdata.plot(x='day', y='count', kind='scatter', ax=axs[1,1], color='cyan')\n\tdata.plot(x='hour', y='count', kind='scatter', ax=axs[1,2], color='deeppink')\n\tplt.tight_layout()\n\tplt.show()\n\n\t# correlation analysis\n\tcorrMatt = data[[\"temp\",\"atemp\",\"casual\",\"registered\",\"humidity\",\"windspeed\",\"count\"]].corr()\n\tmask = np.array(corrMatt)\n\tmask[np.tril_indices_from(mask)] = False\n\n\tfig,ax= plt.subplots()\n\tfig.set_size_inches(20,10)\n\tsn.heatmap(corrMatt, mask=mask, vmax=.8, square=True, annot=True, cmap=\"Greens\")\n\tplt.show()", "def main(\n error_band_dir,\n output_dir,\n indep_var,\n ivar_start,\n ivar_stop,\n ivar_step,\n param_list,\n observable_list,\n Lambda_b,\n lambda_mult,\n p_decimal_list,\n orders,\n ignore_orders,\n interaction,\n X_ref_hash,\n prior_set,\n h,\n cbar_lower,\n cbar_upper,\n sigma,\n convention\n ):\n\n color_dict = {\n \"LOp\": plt.get_cmap(\"Greys\"),\n \"LO\": plt.get_cmap(\"Purples\"),\n \"NLO\": plt.get_cmap(\"Oranges\"),\n \"N2LO\": plt.get_cmap(\"Greens\"),\n \"N3LO\": plt.get_cmap(\"Blues\"),\n \"N4LO\": plt.get_cmap(\"Reds\")\n }\n\n fill_transparency = 1\n x = np.arange(ivar_start, ivar_stop, ivar_step)\n\n fig = plt.figure(figsize=(3.4, 3.4))\n ax = fig.add_subplot(1, 1, 1)\n ax.set_zorder(15)\n ax.set_axisbelow(False)\n\n if indep_var == \"theta\":\n param_var = \"energy\"\n indep_var_label = r\"$\\theta$ (deg)\"\n param_var_label = r\"$E_{\\mathrm{lab}}\"\n param_var_units = r\"$ MeV\"\n xmin = 0\n xmax = 180\n else:\n param_var = \"theta\"\n indep_var_label = r\"$E$ (MeV)\"\n param_var_label = r\"$\\theta\"\n param_var_units = r\"^\\circ$\"\n xmin = 0\n xmax = 350\n\n for observable in observable_list:\n for param in param_list:\n if observable == [\"t\", \"t\", \"t\", \"t\"] or \\\n (observable == [\"0\", \"0\", \"0\", \"0\"] and indep_var == \"energy\"):\n ax.set_yscale(\"log\", nonposy='clip')\n\n ax.set_xlabel(indep_var_label)\n # ax.set_ylabel('')\n\n # if indep_var == \"theta\":\n # major_tick_spacing = 60\n # minor_tick_spacing = 20\n # ax.xaxis.set_major_locator(\n # ticker.MultipleLocator(major_tick_spacing))\n # ax.xaxis.set_minor_locator(\n # ticker.MultipleLocator(minor_tick_spacing))\n\n if indep_var == \"energy\":\n major_ticks = np.arange(0, 351, 100)\n # minor_ticks = np.arange(50, 351, 100)\n x_minor_locator = AutoMinorLocator(n=2)\n elif indep_var == \"theta\":\n major_ticks = np.arange(0, 181, 60)\n # minor_ticks = np.arange(0, 181, 20)\n x_minor_locator = AutoMinorLocator(n=3)\n\n ax.xaxis.set_minor_locator(x_minor_locator)\n ax.set_xticks(major_ticks)\n\n # Create the description box\n # text_str = r\"$C_{\" + observable[0] + observable[1] + \\\n # observable[2] + observable[3] + r\"}$\" + \", \" # + \"\\n\"\n text_str = indices_to_observable_name(observable)\n if observable == ['t', 't', 't', 't']:\n text_str += \" (mb)\"\n elif observable == ['0', '0', '0', '0']:\n text_str += \" (mb/sr)\"\n\n # Probably don't include this extra info. Leave for caption.\n #\n # text_str += \", \"\n # if observable != ['t', 't', 't', 't']:\n # text_str += \", \" + param_var_label + r\" = \" + str(param) + param_var_units + \", \" # + \"\\n\"\n # text_str += r\"$\\Lambda_b = \" + str(lambda_mult*Lambda_b) + r\"$ MeV\"\n\n # Don't put in a text box\n #\n # ax.text(.5, .96, text_str,\n # horizontalalignment='center',\n # verticalalignment='top',\n # multialignment='center',\n # transform=ax.transAxes,\n # bbox=dict(facecolor='white', alpha=1, boxstyle='square', pad=.5),\n # zorder=20)\n\n # Don't put in the title\n # plt.title(text_str, fontsize=10)\n\n # Instead use y axis\n ax.set_ylabel(text_str, fontsize=10)\n legend_patches = []\n\n try:\n npwa_name = npwa_filename(observable, param_var, param)\n npwa_file = DataFile().read(os.path.join(\"../npwa_data/\", npwa_name))\n npwa_plot, = ax.plot(npwa_file[0], npwa_file[1],\n color=\"black\", linewidth=1,\n label=\"NPWA\", zorder=10,\n linestyle=\"--\")\n except FileNotFoundError:\n npwa_plot = None\n\n # First get global min/max of all orders\n for i, order in enumerate(orders):\n # obs_name = observable_filename(\n # observable, indep_var, ivar_start, ivar_stop,\n # ivar_step, param_var, param, order)\n # dob_name = dob_filename(p_decimal_list[0], Lambda_b, obs_name)\n dob_name = dob_filename(\n observable, indep_var, ivar_start, ivar_stop,\n ivar_step, param_var, param, order, ignore_orders,\n Lambda_b, lambda_mult, X_ref_hash,\n p_decimal_list[0], prior_set, h, convention, None,\n cbar_lower, cbar_upper, sigma,\n potential_info=None)\n dob_file = DataFile().read(os.path.join(error_band_dir, dob_name))\n if i == 0:\n obs = dob_file[1]\n obs_min = np.min(obs)\n obs_max = np.max(obs)\n else:\n old_obs = obs\n obs = dob_file[1]\n # Probably the worst way to do this.\n obs_min = min(np.min(np.minimum(old_obs, obs)), obs_min)\n obs_max = max(np.max(np.maximum(old_obs, obs)), obs_max)\n\n # Decide the padding above/below the lines\n # This weights values far from 0 more heavily.\n # ymin = obs_min - .25 * abs(obs_min)\n # ymax = obs_max + .25 * abs(obs_max)\n ymin = -1\n ymax = 20\n ax.set_ylim([ymin, ymax])\n # ax.set_xlim([ivar_start, ivar_stop-1])\n ax.set_xlim([xmin, xmax])\n\n # Start layering the plots\n for i, order in enumerate(orders):\n # obs_name = observable_filename(\n # observable, indep_var, ivar_start, ivar_stop,\n # ivar_step, param_var, param, order)\n # dob_name = dob_filename(p_decimal_list[0], Lambda_b, obs_name)\n dob_name = dob_filename(\n observable, indep_var, ivar_start, ivar_stop,\n ivar_step, param_var, param, order, ignore_orders,\n Lambda_b, lambda_mult, X_ref_hash,\n p_decimal_list[0], prior_set, h, convention, None,\n cbar_lower, cbar_upper, sigma,\n potential_info=None)\n dob_file = DataFile().read(os.path.join(error_band_dir, dob_name))\n\n # Plot the lines\n obs = dob_file[1]\n ax.plot(x, obs, color=color_dict[order](.99), zorder=i)\n\n # Plot the error bands\n for band_num, p in enumerate(sorted(p_decimal_list, reverse=True)):\n # dob_name = dob_filename(p, Lambda_b, obs_name)\n dob_name = dob_filename(\n observable, indep_var, ivar_start, ivar_stop,\n ivar_step, param_var, param, order, ignore_orders,\n Lambda_b, lambda_mult, X_ref_hash,\n p, prior_set, h, convention, None,\n cbar_lower, cbar_upper, sigma,\n potential_info=None)\n dob_file = DataFile().read(os.path.join(error_band_dir, dob_name))\n obs_lower = dob_file[2]\n obs_upper = dob_file[3]\n ax.fill_between(\n x, obs_lower, obs_upper,\n facecolor=color_dict[order](\n (band_num + 1) / (len(p_decimal_list) + 1)\n ),\n color=color_dict[order](\n (band_num + 1) / (len(p_decimal_list) + 1)\n ),\n alpha=fill_transparency, interpolate=True, zorder=i)\n\n # Use block patches instead of lines\n # Use innermost \"dark\" color of bands for legend\n # legend_patches.append(\n # mp.patches.Patch(\n # color=color_dict[order](len(p_decimal_list) / (len(p_decimal_list) + 1)),\n # label=order,\n # ))\n legend_patches.append(\n mpatches.Rectangle(\n (1, 1), 0.25, 0.25,\n # color=color_dict[order](len(p_decimal_list) / (len(p_decimal_list) + 1)),\n edgecolor=color_dict[order](.9),\n facecolor=color_dict[order](len(p_decimal_list) / (len(p_decimal_list) + 1)),\n label=order,\n linewidth=1\n ))\n\n if npwa_plot is None:\n my_handles = legend_patches\n handler_dict = dict(zip(my_handles, [HandlerSquare() for i in legend_patches]))\n else:\n my_handles = [npwa_plot, *legend_patches]\n squares = [HandlerSquare() for i in legend_patches]\n line = HandlerLine2D(marker_pad=1, numpoints=None)\n handler_dict = dict(zip(my_handles, [line] + squares))\n\n ax.legend(loc=\"best\", handles=my_handles,\n handler_map=handler_dict,\n handletextpad=.8,\n handlelength=.6,\n fontsize=8)\n\n # Squeeze and save it\n plt.tight_layout()\n # plot_name = plot_obs_error_bands_filename(\n # observable, indep_var, ivar_start, ivar_stop,\n # ivar_step, param_var, param, orders[:i+1],\n # Lambda_b, p_decimal_list)\n plot_name = plot_obs_error_bands_filename(\n observable, indep_var, ivar_start, ivar_stop, ivar_step,\n param_var, param, orders[:i+1], ignore_orders, Lambda_b,\n lambda_mult, X_ref_hash, p_decimal_list,\n prior_set, h, convention, None, cbar_lower, cbar_upper,\n sigma, potential_info=None)\n fig.savefig(os.path.join(output_dir, plot_name), bbox_inches=\"tight\")\n\n call([\"epstopdf\", os.path.join(output_dir, plot_name)])\n call([\"rm\", os.path.join(output_dir, plot_name)])\n\n # Clear the axes for the next observable/parameter.\n plt.cla()", "def test_plot_hid(self):\n # also produce a light curve with the same binning\n command = ('{0} -b 100 --e-interval {1} {2}').format(\n os.path.join(self.datadir, 'monol_testA_nustar_fpma_ev_calib' +\n HEN_FILE_EXTENSION), 3, 10)\n\n hen.lcurve.main(command.split())\n lname = os.path.join(self.datadir,\n 'monol_testA_nustar_fpma_E3-10_lc') + \\\n HEN_FILE_EXTENSION\n os.path.exists(lname)\n cname = os.path.join(self.datadir,\n 'monol_testA_nustar_fpma_E_10-5_over_5-3') + \\\n HEN_FILE_EXTENSION\n hen.plot.main([cname, lname, '--noplot', '--xlog', '--ylog', '--HID',\n '-o', 'dummy.qdp'])", "def make_area_plots(df, x_input = \"Mean Predicted Avg\",\n y_input = \"Empirical Probability\"):\n\n df = df.copy()\n\n # Get the regularizer and reset coeff\n coeff = [float(i.split(\"evidence_new_reg_\")[1]) if \"evidence\" in i else i for i in df['method_name']]\n df[\"method_name\"] = coeff\n df[\"Data\"] = convert_dataset_names(df[\"dataset\"])\n df[\"Method\"] = df[\"method_name\"]\n\n trials = 'trial_number'\n methods = 'Method'\n\n # Make area plot\n uniq_methods = set(df[\"Method\"].values)\n method_order = sorted(uniq_methods,\n key=lambda x : x if isinstance(x, float) else -1)\n method_df = []\n datasets = set()\n for data, sub_df in df.groupby(\"Data\"):\n # Add datasets\n datasets.add(data)\n x_vals = sub_df[x_input]\n y_vals = sub_df[y_input]\n methods_sub = sub_df[\"Method\"]\n trials_sub= sub_df['trial_number']\n for method_idx, method in enumerate(method_order):\n # Now summarize these lines\n bool_select = (methods_sub == method)\n lines_y = y_vals[bool_select]\n lines_x = x_vals[bool_select]\n trials_temp = trials_sub[bool_select]\n areas = []\n # create area!\n for trial, line_x, line_y in zip(trials_sub, lines_x, lines_y):\n new_y = np.abs(np.array(line_y) - np.array(line_x))\n area = simps(new_y, line_x)\n to_append = {\"Area from parity\": area,\n \"Regularizer Coeff, $\\lambda$\": method,\n \"method_name\": method,\n \"Data\": data,\n \"Trial\" : trial}\n method_df.append(to_append)\n method_df = pd.DataFrame(method_df)\n method_df_evidence = method_df[[isinstance(i, float) for i in\n method_df['method_name']]].reset_index()\n method_df_ensemble = method_df[[\"ensemble\" in str(i) for i in\n method_df['method_name']]].reset_index()\n data_colors = {\n dataset : sns.color_palette()[index]\n for index, dataset in enumerate(datasets)\n }\n\n min_x = np.min(method_df_evidence[\"Regularizer Coeff, $\\lambda$\"])\n max_x= np.max(method_df_evidence[\"Regularizer Coeff, $\\lambda$\"])\n\n sns.lineplot(x=\"Regularizer Coeff, $\\lambda$\", y=\"Area from parity\",\n hue=\"Data\", alpha=0.8, data=method_df_evidence,\n palette = data_colors)\n\n for data, subdf in method_df_ensemble.groupby(\"Data\"):\n\n color = data_colors[data]\n area = subdf[\"Area from parity\"].mean()\n std = subdf[\"Area from parity\"].std()\n plt.hlines(area, min_x, max_x, linestyle=\"--\", color=color, alpha=0.8)\n\n ensemble_line = plt.plot([], [], color='black', linestyle=\"--\",\n label=\"Ensemble\")\n # Now make ensemble plots\n plt.legend(bbox_to_anchor=(1.1, 1.05))", "def plot_hill_func(self,sim_run=None,trace=0,synapse=0,average=False):\n\n if sim_run is None:\n sim_run = self.default_runs[0]\n\n cav_hits = sim_run.data[\"Ca_t\"][:,trace,synapse]\n\n p_v_func = hill(np.arange(200)/100.,S=1,ec50=sim_run.params[\"ca_ec50\"],n=sim_run.params[\"ca_coop\"])\n plt.plot(np.arange(200)/100.,p_v_func)\n for i in range(len(cav_hits)):\n plt.plot((cav_hits[i],cav_hits[i]),(0,1))\n plt.ylabel('Probbility of Vesicle Release')\n plt.xlabel('Calcium Concentration (arb. units)')\n plt.title('Location of [Ca] response on Hill Function for sequential APs')\n plt.show()", "def view_datashade(self):\n # Select only sufficient data\n if self.x in self.y:\n self.y.remove(self.x)\n if self.y == []:\n return self.gif\n\n df = self.dataframe[[self.x] + self.y].copy()\n plot_opts = {\n 'Scatter': {'color': self.color_key, 'marker': self.marker_keys, 'size':10},\n 'Curve': {'color': self.color_key}\n }\n lines_overlay = df.hvplot.scatter(**self.plot_options).options(plot_opts)\n\n def hover_curve(x_range=[df.index.min(), df.index.max()]): # , y_range):\n # Compute\n dataframe = df.copy()\n if x_range is not None:\n dataframe = dataframe[(dataframe[self.x] > x_range[0]) & (dataframe[self.x] < x_range[1])]\n data_length = len(dataframe) * len(dataframe.columns)\n step = 1 if data_length < self.max_step else data_length // self.max_step\n \n plot_df = dataframe[::step].hvplot.line(**self.plot_options) * \\\n dataframe[::step*60].hvplot.scatter(**self.plot_options) \n plot_opts = {\n 'Scatter': {'color': 'k', 'marker': self.marker_keys, 'size':10},\n 'Curve': {'color': self.color_key}\n }\n if len(self.y) != 1:\n plot_opts['Scatter']['color'] = self.color_key\n return plot_df.options(plot_opts)\n\n # Define a RangeXY stream linked to the image\n rangex = hv.streams.RangeX(source=lines_overlay)\n data_shade_plot = hv.DynamicMap(hover_curve, streams=[rangex])\n if len(self.y) == 1:\n data_shade_plot *= datashade(lines_overlay)\n else:\n data_shade_plot *= datashade(lines_overlay, aggregator=ds.count_cat('Variable'))\n return pn.panel(data_shade_plot)", "def dline_tdepl_array(lines,**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n fig, axs = plt.subplots(len(lines), sharex='col',\\\n figsize=(6,15),facecolor='w',\\\n gridspec_kw={'hspace': 0, 'wspace': 0})\n\n for i,ax in enumerate(axs):\n\n dline_tdepl(line=lines[i],ax=ax,select=p.select,sim_run=p.sim_runs[1],nGal=p.nGals[1],add=True,cb=True)\n dline_tdepl(line=lines[i],ax=ax,select=p.select,sim_run=p.sim_runs[0],nGal=p.nGals[0],add=True,cb=False)\n\n plt.tight_layout()\n\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'luminosity/'): os.mkdir(p.d_plot + 'luminosity/') \n plt.savefig(p.d_plot + 'luminosity/dlines_tdepl_array_%s%s%s_%s%s_%s.png' % (p.ext,p.grid_ext,p.table_ext,p.sim_name,p.sim_run,p.select), format='png', dpi=300)", "def plot_lfads(x_txd, avg_lfads_dict, data_dict=None, dd_bidx=None,\n renorm_fun=None):\n print(\"bidx: \", dd_bidx)\n ld = avg_lfads_dict\n\n def remove_outliers(A, nstds=3):\n clip = nstds * onp.std(A)\n A_mean = onp.mean(A)\n A_show = onp.where(A < A_mean - clip, A_mean - clip, A)\n return onp.where(A_show > A_mean + clip, A_mean + clip, A_show)\n \n f = plt.figure(figsize=(12,12))\n plt.subplot(361)\n plt.imshow(x_txd.T)\n plt.title('x')\n\n plt.subplot(362)\n x_enc = remove_outliers(ld['xenc_t'])\n plt.imshow(x_enc.T)\n plt.title('x enc')\n\n plt.subplot(363)\n gen = remove_outliers(ld['gen_t'])\n plt.imshow(gen.T)\n plt.title('generator')\n\n plt.subplot(364)\n factors = remove_outliers(ld['factor_t'])\n plt.imshow(factors.T)\n plt.title('factors')\n\n if data_dict is not None:\n true_rates = renorm_fun(data_dict['hiddens'][dd_bidx])\n plt.subplot(366)\n plt.imshow(true_rates.T)\n plt.title('True rates')\n\n plt.subplot(365)\n rates = remove_outliers(onp.exp(ld['lograte_t']))\n plt.imshow(rates.T)\n plt.title('rates') \n\n plt.subplot(334)\n ic_mean = ld['ic_mean']\n ic_std = onp.exp(0.5*ld['ic_logvar'])\n plt.stem(ic_mean)\n plt.title('g0 mean')\n\n plt.subplot(335)\n con = remove_outliers(ld['c_t'])\n plt.imshow(con.T)\n plt.title('controller')\n\n plt.subplot(336)\n ii_mean = ld['ii_mean_t']\n plt.plot(ii_mean, 'b')\n if data_dict is not None:\n true_input = data_dict['inputs'][dd_bidx]\n slope, intercept, r_value, p_value, std_err = \\\n stats.linregress(true_input.T, ii_mean.T)\n plt.plot(slope*true_input + intercept, 'm', lw=2)\n #plt.plot(ld['ii_t'], 'k')\n plt.title('inferred input mean')\n plt.legend(('LFADS inferred input', 'rescaled true input to integrator RNN'))\n \n plt.subplot(313)\n ntoplot=8\n a = 0.25\n plt.plot(rates[:, 0:ntoplot] + a*onp.arange(0, ntoplot, 1), 'b')\n plt.plot(true_rates[:, 0:ntoplot] + a*onp.arange(0, ntoplot, 1), 'r')\n plt.title('LFADS rates (blue), True rates (red)')\n plt.xlabel('timesteps')\n \n return f", "def add_plot(self, state, data, y_axis, function=lambda x: x, x_axis='freq', ax=None, title=None, log_axis='x', save=True, show=False):\n\n functions_dict = {'x': plt.semilogx, 'y': plt.semilogx, 'both': plt.loglog, 'none': plt.plot}\n #TODO: Unfinished\n\n if ax is None:\n fig = plt.figure()\n ax = plt.gca()\n\n if title is None:\n title = y_axis\n\n # import IPython\n sweep_kwrds = data['sweep_params'][y_axis]\n sweep_kwrds = [kwrd for kwrd in sweep_kwrds if kwrd != x_axis]\n # combos = itertools.product(*(list(range(len(data[swp_kwrd]))) for swp_kwrd in sweep_kwrds))\n\n # IPython.embed()\n # if combos:\n # for index in combos:\n # functions_dict[log_axis](data[x_axis], np.abs(data[y_axis][index, :]))\n # else:\n functions_dict[log_axis](data[x_axis], function(data[y_axis]))\n plt.ylabel(y_axis)\n plt.xlabel(x_axis)\n ax.grid()\n if save:\n fname = os.path.join(self.data_dir, title + \".png\")\n if os.path.isfile(fname):\n os.remove(fname)\n plt.savefig(fname, dpi=200)\n plt.close()\n if show:\n plt.show()", "def dtw_plot_appendix(output = 'output/img/series/dtw'):\n # regions\n regs = _src.regions()\n regsL = [(v) for v in regs.values()]\n \n # iterate\n for r1 in range(len(regs)):\n for r2 in range(r1+1, len(regs)):\n dtw_plot(regsL[r1]['NUTS3'], regsL[r2]['NUTS3'],\n output = output, font_size = 12)", "def data_vis():\n dataroot = 'solar_data.txt'\n debug = False \n diff = False\n X, y = read_data(dataroot, debug, diff)\n\n # First plot the original timeseries\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(y)\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')\n # plt.show()\n\n fig.add_subplot(3,3,2)\n plt.plot(X[:,0])\n plt.title('Avg Zenith Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,3)\n plt.plot(X[:,1])\n plt.title('Avg Azimuth Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,4)\n plt.plot(X[:,2])\n plt.title('Avg Tower Dry Bulb Temp [deg C]')\n # plt.show()\n\n fig.add_subplot(3,3,5)\n plt.plot(X[:,3])\n plt.title('Avg Tower RH [%]')\n # plt.show()\n\n fig.add_subplot(3,3,6)\n plt.plot(X[:,4])\n plt.title('Avg Total Cloud Cover [%]')\n # plt.show()\n\n fig.add_subplot(3,3,7)\n plt.plot(X[:,5])\n plt.title('Avg Avg Wind Speed @ 6ft [m/s]')\n # plt.show()\n\n ##########################################################################################\n # Plotting the Fourier Transform of the signals\n\n freq = np.fft.fftfreq(len(y), 1*60*60)\n\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(freq, np.abs(np.fft.fft(y)))\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')\n # plt.show()\n\n fig.add_subplot(3,3,2)\n plt.plot(freq, np.abs(np.fft.fft(X[:,0])))\n plt.title('Avg Zenith Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,3)\n plt.plot(freq, np.abs(np.fft.fft(X[:,1])))\n plt.title('Avg Azimuth Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,4)\n plt.plot(freq, np.abs(np.fft.fft(X[:,2])))\n plt.title('Avg Tower Dry Bulb Temp [deg C]')\n # plt.show()\n\n fig.add_subplot(3,3,5)\n plt.plot(freq, np.abs(np.fft.fft(X[:,3])))\n plt.title('Avg Tower RH [%]')\n # plt.show()\n\n fig.add_subplot(3,3,6)\n plt.plot(freq, np.abs(np.fft.fft(X[:,4])))\n plt.title('Avg Total Cloud Cover [%]')\n # plt.show()\n\n fig.add_subplot(3,3,7)\n plt.plot(freq, np.abs(np.fft.fft(X[:,5])))\n plt.title('Avg Avg Wind Speed @ 6ft [m/s]')\n # plt.show()\n\n ##################################################################################################\n # Print correlation matrix\n\n df = pd.DataFrame(np.c_[y, X])\n df.columns = ['Avg Global PSP (vent/cor) [W/m^2]','Avg Zenith Angle [degrees]','Avg Azimuth Angle [degrees]','Avg Tower Dry Bulb Temp [deg C]','Avg Tower RH [%]','Avg Total Cloud Cover [%]','Avg Avg Wind Speed @ 6ft [m/s]']\n f = plt.figure(figsize=(19, 15))\n plt.matshow(df.corr(), fignum=f.number)\n plt.xticks(range(df.shape[1]), df.columns, fontsize=14, rotation=20)\n plt.yticks(range(df.shape[1]), df.columns, fontsize=14)\n cb = plt.colorbar()\n cb.ax.tick_params(labelsize=14)\n plt.title('Correlation Matrix', fontsize=16);\n plt.show()", "def descriptive_plot(data_onlyDV):\n outcome = data_onlyDV.columns.values[0] # get the outcome column name\n\n fig = plt.figure()\n # TODO: subplots appear in same frame instead of 3 separate ones (!!!)\n ax1 = fig.add_subplot(121)\n ax1 = data_onlyDV.plot(kind='hist', title=\"Histogram: \"+outcome, by=outcome)\n ax1.locator_params(axis='x', nbins=4)\n ax1.set_xlabel(outcome+\" bins\")\n ax1.set_ylabel(\"Num Instances\")\n\n ax2 = fig.add_subplot(122)\n ax2 = data_onlyDV.plot(kind='kde', title=\"KDE Density Plot: \"+outcome)\n\n fig.tight_layout()\n plt.show()", "def _2d_plot_samples(self, **kwargs):\n\n from pesummary.core.plots.bounded_2d_kde import Bounded_2d_kde\n\n # get bounds\n lows = []\n highs = []\n methods = []\n for param in self.parameters[0:2]:\n if param in DEFAULT_BOUNDS:\n lows.append(\n DEFAULT_BOUNDS[param][\"low\"]\n if \"low\" in DEFAULT_BOUNDS[param]\n else None\n )\n highs.append(\n DEFAULT_BOUNDS[param][\"high\"]\n if \"high\" in DEFAULT_BOUNDS[param]\n else None\n )\n methods.append(\n DEFAULT_BOUNDS[param][\"method\"]\n if \"method\" in DEFAULT_BOUNDS[param]\n else \"Reflection\"\n )\n\n if self.plottype == \"triangle\":\n from pesummary.core.plots.publication import triangle_plot as plotfunc\n elif self.plottype == \"reverse_triangle\":\n from pesummary.core.plots.publication import (\n reverse_triangle_plot as plotfunc,\n )\n else:\n # contour plot\n from pesummary.core.plots.publication import (\n comparison_twod_contour_plot as plotfunc,\n )\n\n # set KDE information\n kwargs.update(\n {\n \"kde\": Bounded_2d_kde,\n \"kde_kwargs\": {\n \"xlow\": lows[0],\n \"xhigh\": highs[0],\n \"ylow\": lows[1],\n \"yhigh\": highs[1],\n },\n }\n )\n\n # default to not showing data points\n if \"plot_datapoints\" not in kwargs:\n kwargs[\"plot_datapoints\"] = False\n\n if \"triangle\" in self.plottype:\n from pesummary.core.plots.bounded_1d_kde import bounded_1d_kde\n\n # set KDE informaiton\n kwargs.update(\n {\n \"kde_2d\": Bounded_2d_kde,\n \"kde_2d_kwargs\": {\n \"xlow\": lows[0],\n \"xhigh\": highs[0],\n \"ylow\": lows[1],\n \"yhigh\": highs[1],\n },\n \"kde\": bounded_1d_kde,\n }\n )\n\n kwargs[\"kde_kwargs\"] = {\n \"x_axis\": {\"xlow\": lows[0], \"xhigh\": highs[0], \"method\": methods[0]},\n \"y_axis\": {\"xlow\": lows[1], \"xhigh\": highs[1], \"method\": methods[1]},\n }\n\n args = [\n [samps[self.parameters[0]].values for samps in self._samples.values()],\n [samps[self.parameters[1]].values for samps in self._samples.values()],\n ]\n\n if \"xlabel\" not in kwargs:\n kwargs[\"xlabel\"] = self.latex_labels[self.parameters[0]]\n if \"ylabel\" not in kwargs:\n kwargs[\"ylabel\"] = self.latex_labels[self.parameters[1]]\n\n if \"labels\" not in kwargs and len(self.results) > 1:\n kwargs[\"labels\"] = list(self._samples.keys())\n\n # set injection parameter values\n if self.injection_parameters is not None:\n if (\n self.injection_parameters[self.parameters[0]] is not None\n and self.injection_parameters[self.parameters[1]] is not None\n ):\n kwargname = \"truths\" if self.plottype == \"corner\" else \"truth\"\n kwargs[kwargname] = [\n self.injection_parameters[self.parameters[0]]\n - self.parameter_offsets[self.parameters[0]],\n self.injection_parameters[self.parameters[1]]\n - self.parameter_offsets[self.parameters[1]],\n ]\n\n # create plot\n with DisableLogger():\n fig = plotfunc(*args, **kwargs)\n\n return fig", "def plot_calibration(df, x_input = \"Mean Predicted Avg\",\n y_input = \"Empirical Probability\",\n x_name=\"Mean Predicted\",\n y_name=\"Empirical Probability\",\n method_order = METHOD_ORDER, \n avg_x = False):\n\n methods = df['method_name']\n uniq_methods = pd.unique(methods)\n method_order = [j for j in METHOD_ORDER if j in uniq_methods]\n method_df = []\n\n if avg_x: \n df_copy = df.copy()\n new_list = [0]\n new_x_map = {}\n for method in uniq_methods: \n temp_vals = df[df['method_name'] == method][x_input]\n new_ar = np.vstack(temp_vals)\n new_ar = np.nanmean(new_ar, 0) # avg columnwise\n new_x_map[method] = new_ar\n df_copy[x_input] = [new_x_map[method] for method in methods]\n df = df_copy\n\n x, y = df[x_input].values, df[y_input].values\n\n\n method_df = [{x_name : xx, y_name : yy, \"Method\" : method}\n for x_i, y_i, method in zip(x, y, methods)\n for xx,yy in zip(x_i,y_i)]\n method_df = pd.DataFrame(method_df)\n sns.lineplot(x=x_name, y=y_name, hue=\"Method\", alpha=0.8,\n hue_order=method_order,\n data=method_df,\n palette = METHOD_COLORS)\n x = np.linspace(0,1,100)\n plt.plot(x, x, linestyle='--', color=\"black\")", "def plot_budget_analyais_results(df, fs=8, fs_title=14, lw=3, fontsize=20, colors=['#AA3377', '#009988', '#EE7733', '#0077BB', '#BBBBBB', '#EE3377', '#DDCC77']):\n df_decomposed = df.loc[df['block'] == 'decomposed']\n df_joint = df.loc[df['block'] == 'joint']\n ticklabels = []\n num_sweeps = df_decomposed['num_sweeps'].to_numpy()\n sample_sizes = df_decomposed['sample_sizes'].to_numpy()\n for i in range(len(num_sweeps)):\n ticklabels.append('K=%d\\nL=%d' % (num_sweeps[i], sample_sizes[i]))\n fig = plt.figure(figsize=(fs*2.5, fs))\n ax1 = fig.add_subplot(1, 2, 1)\n ax1.plot(num_sweeps, df_decomposed['density'].to_numpy(), 'o-', c=colors[0], linewidth=lw, label=r'$\\{\\mu, \\tau\\}, \\{c\\}$')\n ax1.plot(num_sweeps, df_joint['density'].to_numpy(), 'o-', c=colors[1], linewidth=lw,label=r'$\\{\\mu, \\tau, c\\}$')\n ax1.set_xticks(num_sweeps)\n ax1.set_xticklabels(ticklabels)\n ax1.tick_params(labelsize=fontsize)\n ax1.grid(alpha=0.4)\n ax2 = fig.add_subplot(1, 2, 2)\n ax2.plot(num_sweeps, df_decomposed['ess'].to_numpy(), 'o-', c=colors[0], linewidth=lw,label=r'$\\{\\mu, \\tau\\}, \\{c\\}$')\n ax2.plot(num_sweeps, df_joint['ess'].to_numpy(), 'o-', c=colors[1], linewidth=lw,label=r'$\\{\\mu, \\tau, c\\}$')\n ax2.set_xticks(num_sweeps)\n ax2.set_xticklabels(ticklabels)\n ax2.tick_params(labelsize=fontsize)\n ax2.grid(alpha=0.4)\n ax2.legend(fontsize=fontsize)\n ax1.legend(fontsize=fontsize)\n ax1.set_ylabel(r'$\\log \\: p_\\theta(x, \\: z)$', fontsize=35)\n ax2.set_ylabel('ESS / L', fontsize=35)", "def view_datashade(self):\n # Select only sufficient data\n if self.x in self.y:\n self.y.remove(self.x)\n if self.y == []:\n return self.gif\n\n df = self.dataframe[[self.x] + self.y].copy()\n lines_overlay = df.hvplot(**self.plot_options).options({'Curve': {'color': self.color_key}})\n\n def hover_curve(x_range=[df.index.min(), df.index.max()]): # , y_range):\n # Compute\n dataframe = df.copy()\n if x_range is not None:\n dataframe = dataframe[(dataframe[self.x] > x_range[0]) & (dataframe[self.x] < x_range[1])]\n data_length = len(dataframe) * len(dataframe.columns)\n step = 1 if data_length < self.max_step else data_length // self.max_step\n plot_df = dataframe[::step].hvplot(**self.plot_options)\n if len(self.y) == 1:\n return plot_df.options({'Curve': {'color': '#377eb8'}})\n else:\n return plot_df.options({'Curve': {'color': self.color_key}})\n\n # Define a RangeXY stream linked to the image\n rangex = hv.streams.RangeX(source=lines_overlay)\n data_shade_plot = hv.DynamicMap(hover_curve, streams=[rangex])\n if len(self.y) == 1:\n data_shade_plot *= datashade(lines_overlay)\n else:\n data_shade_plot *= datashade(lines_overlay, aggregator=ds.count_cat('Variable'))\n return pn.panel(data_shade_plot)", "def plot(self, noTLS, path_plots, interactive):\n fig = plt.figure(figsize=(10,12))\n ax1 = fig.add_subplot(4, 1, 1)\n ax2 = fig.add_subplot(4, 1, 2)\n ax3 = fig.add_subplot(4, 2, 5)\n ax4 = fig.add_subplot(4, 2, 6)\n ax5 = fig.add_subplot(4, 2, 7)\n ax6 = fig.add_subplot(4, 2, 8)\n\n # First panel: data from each sector\n colors = self._get_colors(self.nlc)\n for i, lci in enumerate(self.alllc):\n p = lci.normalize().remove_outliers(sigma_lower=5.0, sigma_upper=5.0)\n p.bin(5).scatter(ax=ax1, label='Sector %d' % self.sectors[i], color=colors[i])\n self.trend.plot(ax=ax1, color='orange', lw=2, label='Trend')\n ax1.legend(fontsize='small', ncol=4)\n\n # Second panel: Detrended light curve\n self.lc.remove_outliers(sigma_lower=5.0, sigma_upper=5.0).bin(5).scatter(ax=ax2,\n color='black',\n label='Detrended')\n\n # Third panel: BLS\n self.BLS.bls.plot(ax=ax3, label='_no_legend_', color='black')\n mean_SR = np.mean(self.BLS.power)\n std_SR = np.std(self.BLS.power)\n best_power = self.BLS.power[np.where(self.BLS.period.value == self.BLS.period_max)[0]]\n SDE = (best_power - mean_SR)/std_SR\n ax3.axvline(self.BLS.period_max, alpha=0.4, lw=4)\n for n in range(2, 10):\n if n*self.BLS.period_max <= max(self.BLS.period.value):\n ax3.axvline(n*self.BLS.period_max, alpha=0.4, lw=1, linestyle=\"dashed\")\n ax3.axvline(self.BLS.period_max / n, alpha=0.4, lw=1, linestyle=\"dashed\")\n sx, ex = ax3.get_xlim()\n sy, ey = ax3.get_ylim()\n ax3.text(ex-(ex-sx)/3, ey-(ey-sy)/3,\n 'P$_{MAX}$ = %.3f d\\nT0 = %.2f\\nDepth = %.4f\\nDuration = %.2f d\\nSDE = %.3f' %\n (self.BLS.period_max, self.BLS.t0_max,\n self.BLS.depth_max, self.BLS.duration_max, SDE))\n\n\n # Fourth panel: lightcurve folded to the best period from the BLS\n self.folded.bin(1*self.nlc).scatter(ax=ax4, label='_no_legend_', color='black',\n marker='.', alpha=0.5)\n l = max(min(4*self.BLS.duration_max/self.BLS.period_max, 0.5), 0.02)\n nbins = int(50*0.5/l)\n r1, dt1 = binningx0dt(self.folded.phase, self.folded.flux, x0=-0.5, nbins=nbins)\n ax4.plot(r1[::,0], r1[::,1], marker='o', ls='None',\n color='orange', markersize=5, markeredgecolor='orangered', label='_no_legend_')\n\n lc_model = self.BLS.bls.get_transit_model(period=self.BLS.period_max,\n duration=self.BLS.duration_max,\n transit_time=self.BLS.t0_max)\n lc_model_folded = lc_model.fold(self.BLS.period_max, t0=self.BLS.t0_max)\n ax4.plot(lc_model_folded.phase, lc_model_folded.flux, color='green', lw=2)\n ax4.set_xlim(-l, l)\n h = max(lc_model.flux)\n l = min(lc_model.flux)\n ax4.set_ylim(l-4.*(h-l), h+5.*(h-l))\n del lc_model, lc_model_folded, r1, dt1\n\n\n if not noTLS:\n # Fifth panel: TLS periodogram\n ax5.axvline(self.tls.period, alpha=0.4, lw=3)\n ax5.set_xlim(np.min(self.tls.periods), np.max(self.tls.periods))\n for n in range(2, 10):\n ax5.axvline(n*self.tls.period, alpha=0.4, lw=1, linestyle=\"dashed\")\n ax5.axvline(self.tls.period / n, alpha=0.4, lw=1, linestyle=\"dashed\")\n ax5.set_ylabel(r'SDE')\n ax5.set_xlabel('Period (days)')\n ax5.plot(self.tls.periods, self.tls.power, color='black', lw=0.5)\n ax5.set_xlim(0, max(self.tls.periods))\n\n period_tls = self.tls.period\n T0_tls = self.tls.T0\n depth_tls = self.tls.depth\n duration_tls = self.tls.duration\n FAP_tls = self.tls.FAP\n\n sx, ex = ax5.get_xlim()\n sy, ey = ax5.get_ylim()\n ax5.text(ex-(ex-sx)/3, ey-(ey-sy)/3,\n 'P$_{MAX}$ = %.3f d\\nT0 = %.1f\\nDepth = %.4f\\nDuration = %.2f d\\nFAP = %.4f' %\n (period_tls, T0_tls, 1.-depth_tls, duration_tls, FAP_tls))\n\n # Sixth panel: folded light curve to the best period from the TLS\n ax6.plot(self.tls.folded_phase, self.tls.folded_y, color='black', marker='.',\n alpha=0.5, ls='None', markersize=0.7)\n l = max(min(4*duration_tls/period_tls, 0.5), 0.02)\n nbins = int(50*0.5/l)\n r1, dt1 = binningx0dt(self.tls.folded_phase, self.tls.folded_y,\n x0=0.0, nbins=nbins, useBinCenter=True)\n ax6.plot(r1[::,0], r1[::,1], marker='o', ls='None', color='orange',\n markersize=5, markeredgecolor='orangered', label='_no_legend_')\n ax6.plot(self.tls.model_folded_phase, self.tls.model_folded_model, color='green', lw=2)\n ax6.set_xlim(0.5-l, 0.5+l)\n h = max(self.tls.model_folded_model)\n l = min(self.tls.model_folded_model)\n ax6.set_ylim(l-4.*(h-l), h+5.*(h-l))\n ax6.set_xlabel('Phase')\n ax6.set_ylabel('Relative flux')\n del r1, dt1\n\n fig.subplots_adjust(top=0.98, bottom=0.05, wspace=0.25, left=0.1, right=0.97)\n fig.savefig(os.path.join(path_plots, 'TIC%d.pdf' % self.TIC))\n if interactive:\n plt.show()\n plt.close('all')\n del fig", "def plot(self, dtables, figs, **kwargs):\n self.safe_update(**kwargs)\n\n sumtable = dtables['biasoscorr_sum']\n runtable = dtables['runs']\n\n yvals_s = sumtable['s_correl_mean'].flatten().clip(0., 1.)\n yvals_p = sumtable['p_correl_mean'].flatten().clip(0., 1.)\n runs = runtable['runs']\n\n figs.plot_run_chart(\"mean-s\", runs, yvals_s,\n ylabel=\"Correlation between serial overscan and imaging\")\n figs.plot_run_chart(\"mean-p\", runs, yvals_p,\n ylabel=\"Correlation between parallel overscan and imaging\")" ]
[ "0.5631575", "0.56224716", "0.55810195", "0.5569204", "0.544854", "0.54324406", "0.5401097", "0.5397317", "0.53738916", "0.53561234", "0.5292456", "0.52792794", "0.52540356", "0.52267444", "0.5214065", "0.52021885", "0.5194347", "0.5180309", "0.5170628", "0.51551074", "0.5147858", "0.5143375", "0.5120442", "0.5089592", "0.5088283", "0.5071791", "0.5065929", "0.5061781", "0.50574464", "0.50520223" ]
0.5738737
0
Searches for winning sequence in columns.
def check_columns(self, win: list) -> bool: for row in range(self.size): column = [self.tags[x][row] for x in range(self.size)] for j in range(len(column) - len(win) + 1): if win == column[j:j+self.win_condition]: return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def col_win(board, player):\n for row in board.T:\n if check_row(row, player):\n return True\n return False", "def check_columns():\n global game_still_going\n # Check if any of the rows have all the same value.\n column1 = board[0] == board[3] == board[6] != '_'\n column2 = board[1] == board[4] == board[7] != '_'\n column3 = board[2] == board[5] == board[8] != '_'\n # If any column does have a match, then game still going to False.\n if column1 or column2 or column3:\n game_still_going = False\n # Return winner 'X' or 'O'.\n if column1:\n return board[0]\n if column2:\n return board[1]\n if column3:\n return board[2]", "def win_column(playerid):\n\n if board[0][0] is playerid and board[1][0] is playerid and board[2][0] is playerid:\n return (True, \"Column 1\")\n\n if board[0][1] is playerid and board[1][1] is playerid and board[2][1] is playerid:\n return (True, \"Column 2\")\n\n if board[0][2] is playerid and board[1][2] is playerid and board[2][2] is playerid:\n return (True, \"Column 3\")\n\n return False", "def col_win(board):\n\tfor col in range(3):\n\t\tif board[0][col] != EMPTY and board[0][col] == board[1][col] == board[2][col]:\n\t\t\treturn True\n\treturn False", "def _check_column(self):\n for column in np.transpose(self._board):\n col_string = ''.join(column)\n match = re.search(WIN_REGEX, col_string)\n if match:\n return match.group()[0]\n return None", "def search_next_win(self, player):\n for i, j, k in self.winning_cases:\n if self.game_board[i] == player and \\\n self.game_board[j] == player and \\\n self.game_board[k] == ' ':\n return k\n elif self.game_board[j] == player and \\\n self.game_board[k] == player and \\\n self.game_board[i] == ' ':\n return i\n elif self.game_board[i] == player and \\\n self.game_board[k] == player and \\\n self.game_board[j] == ' ':\n return j\n return None", "def check_columns():\n global ongoing_game\n column_1 = board[0] == board[3] == board[6] != \"*\"\n column_2 = board[1] == board[4] == board[7] != \"*\"\n column_3 = board[2] == board[5] == board[8] != \"*\"\n if column_1 or column_2 or column_3:\n ongoing_game = False\n if column_1:\n return board[0]\n elif column_2:\n return board[1]\n elif column_3:\n return board[2]\n else:\n return None", "def check_column(self, column, symbol):\r\n\r\n tally = 0\r\n for row in range(3):\r\n if self.board[row][column][1] == symbol:\r\n tally += 1\r\n if tally == 3:\r\n self.winner = symbol", "def test_row_col(board, rows):\n for i in range(len(board)):\n cur_player = board[i][0] if rows else board[0][i]\n in_a_row = 0\n for j in range(len(board)):\n symbol = board[i][j] if rows else board[j][i]\n if (not symbol == '-') and (symbol == cur_player):\n in_a_row += 1\n else:\n break\n winner = who_won(in_a_row, len(board), cur_player)\n if not winner == 0:\n return winner\n return 0", "def column_wise_checking(player_):\n if board[0] == board[3] == player_:\n return 6\n elif board[3] == board[6] == player_:\n return 0\n elif board[1] == board[4] == player_:\n return 7\n elif board[4] == board[7] == player_:\n return 1\n elif board[2] == board[5] == player_:\n return 8\n elif board[5] == board[8] == player_:\n return 2\n else:\n return -1", "def column_similarity (self, row, col):\n my_number = self.board[row][col]\n \n for i in range (9):\n if (i,col) == (row,col):\n continue\n elif self.board[i][col] == my_number:\n return [i, col, False] \n else:\n continue", "def find_column_index(self, columns):\n for i in range(len(columns)):\n if self.match(columns[i]):\n return i\n return None", "def check_for_win(self, row, col, player): \n\n count = 0\n for i in range(0, len(self.board[0])):\n # Check vertical\n if self.board[row][i] == player:\n count += 1\n else:\n count = 0\n \n if count == self.max_count:\n return True\n\n count = 0\n for i in range(0, len(self.board)):\n # Check horisontal\n if self.board[:, col][i] == player:\n count += 1\n else:\n count = 0\n \n if count == self.max_count:\n return True\n \n count = 0\n totoffset = col - row\n for i in np.diagonal(self.board, offset=totoffset):\n # Check diagonal\n if i == player:\n count += 1\n else:\n count = 0\n\n if count == self.max_count:\n return True\n\n count = 0\n mirrorboard = np.fliplr(self.board)\n col = self.colswitch[col]\n totoffset = col - row\n for i in np.diagonal(mirrorboard, offset=totoffset):\n # Check other diagonal\n if i == player:\n count += 1\n else:\n count = 0\n\n if count == self.max_count:\n return True", "def winner_found(self):\n\n first_row = self.find_three_in_row([self._board[0][0], self._board[0][1], self._board[0][2]])\n second_row = self.find_three_in_row([self._board[1][0], self._board[1][1], self._board[1][2]])\n third_row = self.find_three_in_row([self._board[2][0], self._board[2][1], self._board[2][2]])\n winner_in_rows = first_row or second_row or third_row\n\n first_column = self.find_three_in_row([self._board[0][0], self._board[1][0], self._board[2][0]])\n second_column = self.find_three_in_row([self._board[0][1], self._board[1][1], self._board[2][1]])\n third_column = self.find_three_in_row([self._board[0][2], self._board[1][2], self._board[2][2]])\n winner_in_columns = first_column or second_column or third_column\n\n first_diagonal = self.find_three_in_row([self._board[0][0], self._board[1][1], self._board[2][2]])\n second_diagonal = self.find_three_in_row([self._board[2][0], self._board[1][1], self._board[0][2]])\n winner_in_diagonals = first_diagonal or second_diagonal\n\n return winner_in_rows or winner_in_columns or winner_in_diagonals", "def winner(board):\n columns = []\n for row in board:\n xcount = row.count(X)\n ocount = row.count(O)\n if xcount == 3:\n return X\n if ocount == 3:\n return O\n\n for j in range(len(board)):\n column = [row[j] for row in board]\n columns.append(column)\n \n for j in columns:\n xcounter = j.count(X)\n ocounter = j.count(O)\n if xcounter == 3:\n return X\n if ocounter == 3:\n return O\n \n if board[0][0] == O and board[1][1] == O and board[2][2] == O:\n return O\n if board[0][0] == X and board[1][1] == X and board[2][2] == X:\n return X\n if board[0][2] == O and board[1][1] == O and board[2][0] == O:\n return O\n if board[0][2] == X and board[1][1] == X and board[2][0] == X:\n return X\n\n return None", "def check_cols(self):\r\n for i in range(3):\r\n if self.grid[i][-1] != ' ' and self.grid[i][-1] == self.grid[i+3][-1] and self.grid[i+3][-1] == self.grid[i+6][-1]:\r\n return (i, (self.grid[i], self.grid[i+6]))\r\n return (-1, None)", "def columnWin( self ):\n\n for x in list(range(0,3)):\n firstVal = self.__grid[x]\n secondVal = self.__grid[x+3]\n thirdVal = self.__grid[x+6]\n\n compiledVal = str(firstVal) + str(secondVal) + str(thirdVal)\n\n if compiledVal.lower() == 'xxx':\n return 'X'\n\n elif compiledVal.lower() == 'ooo':\n return 'O'\n\n return None", "def check_columns(self):\n\t\ti=0\n\t\tfor i in range(len(self.board[i])):\n\t\t\tpts = 0\n\t\t\tfor j in range(len(self.board)):\n\t\t\t\tif self.board[j][i] == self.marker:\n\t\t\t\t\tpts+=1\n\t\t\tif pts == 3:\n\t\t\t\tprint('YOU WON')\n\t\t\t\treturn True", "def find_best_move(board):\n new_board = board.get_board()\n\n # X | X | X <-- Check for win on this row\n # ---------\n # 3 | 4 | 5\n # ---------\n # 6 | 7 | 9\n if new_board[0] == new_board[1] and new_board[2] == \"2\":\n return 2\n elif new_board[0] == new_board[2] and new_board[1] == \"1\":\n return 1\n elif new_board[1] == new_board[2] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | 2\n # ---------\n # X | X | X <-- Check for win on this row\n # ---------\n # 6 | 7 | 9\n elif new_board[3] == new_board[4] and new_board[5] == \"5\":\n return 5\n elif new_board[3] == new_board[5] and new_board[4] == \"4\":\n return 4\n elif new_board[4] == new_board[5] and new_board[3] == \"3\":\n return 3\n\n # 0 | 1 | 2\n # ---------\n # 3 | 4 | 5\n # ---------\n # X | X | X <-- Check for win on this row\n elif new_board[6] == new_board[7] and new_board[8] == \"8\":\n return 8\n elif new_board[6] == new_board[8] and new_board[7] == \"7\":\n return 7\n elif new_board[7] == new_board[8] and new_board[6] == \"6\":\n return 6\n\n # X | 1 | 2 Check for win on column one\n # ---------\n # X | 4 | 5\n # ---------\n # X | 7 | 9\n elif new_board[0] == new_board[3] and new_board[6] == \"6\":\n return 6\n elif new_board[0] == new_board[6] and new_board[3] == \"3\":\n return 3\n elif new_board[6] == new_board[3] and new_board[0] == \"0\":\n return 0\n\n # 0 | X | 2 Checks for win on column two\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | X | 9\n elif new_board[1] == new_board[4] and new_board[7] == \"7\":\n return 7\n elif new_board[1] == new_board[7] and new_board[4] == \"4\":\n return 4\n elif new_board[7] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | 4 | X\n # ---------\n # 6 | 7 | X\n elif new_board[2] == new_board[5] and new_board[8] == \"8\":\n return 8\n elif new_board[2] == new_board[8] and new_board[5] == \"5\":\n return 5\n elif new_board[8] == new_board[5] and new_board[2] == \"2\":\n return 2\n\n # X | 1 | 2\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | 7 | X\n elif new_board[0] == new_board[4] and new_board[8] == \"8\":\n return 8\n elif new_board[0] == new_board[8] and new_board[4] == \"4\":\n return 4\n elif new_board[8] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | X | 5\n # ---------\n # X | 7 | 9\n elif new_board[2] == new_board[4] and new_board[6] == \"6\":\n return 6\n elif new_board[2] == new_board[6] and new_board[4] == \"4\":\n return 4\n elif new_board[6] == new_board[4] and new_board[2] == \"2\":\n return 2\n\n # If corners are empty, play there\n elif new_board[0] == \"0\" or new_board[2] == \"2\" or new_board[6] == \"6\" or new_board[8] == \"8\":\n try_spot = 0\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2\n\n # If middle is empty, play there\n elif new_board[4] == \"4\":\n return 4\n\n # Finally if edges are empty try there\n elif new_board[1] == \"1\" or new_board[3] == \"3\" or new_board[5] == \"5\" or new_board[7] == \"7\":\n try_spot = 1\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2", "def check_for_win_lose(b):\r\n win_move = None\r\n block_win = None\r\n # check for wins based on row\r\n for ri in range(3):\r\n row = b[ri]\r\n if single_move(row):\r\n if row==[1,1,0]:\r\n win_move = (ri+1,3)\r\n elif row==[2,2,0]:\r\n block_win = (ri+1,3)\r\n elif row==[1,0,1]:\r\n win_move = (ri+1,2)\r\n elif row==[2,0,2]:\r\n block_win = (ri+1,2)\r\n elif row==[0,1,1]:\r\n win_move = (ri+1,1)\r\n elif row==[0,2,2]:\r\n block_win = (ri+1,1)\r\n else:\r\n print '144 ERROR!'\r\n print single_move(row)\r\n print row\r\n print ' '\r\n\r\n # check for win based on column\r\n for ci in range(3):\r\n col = get_col(b,ci)\r\n if single_move(col):\r\n if col==[1,1,0]:\r\n win_move = (3,ci+1)\r\n elif col==[2,2,0]:\r\n block_win = (3,ci+1)\r\n elif col==[1,0,1]:\r\n win_move = (2,ci+1)\r\n elif col==[2,0,2]:\r\n block_win = (2,ci+1)\r\n elif col==[0,1,1]:\r\n win_move = (1,ci+1)\r\n elif col==[0,2,2]:\r\n block_win = (1,ci+1)\r\n else:\r\n print '166 ERROR!'\r\n print single_move(col)\r\n print col\r\n print ' '\r\n\r\n # check for win on backward diagonal\r\n diag = get_bw_diag(b)\r\n if single_move(diag):\r\n if diag==[1,1,0]:\r\n win_move = (3,3)\r\n elif diag==[2,2,0]:\r\n block_win (3,3)\r\n elif diag == [1,0,1]:\r\n win_move = (2,2)\r\n elif diag==[2,0,2]:\r\n block_win = (2,2)\r\n elif diag == [0,1,1]:\r\n win_move = (1,1)\r\n elif diag==[0,2,2]:\r\n block_win = (1,1)\r\n \r\n # check for win on forward diagonal\r\n diag = get_fwd_diag(b)\r\n if single_move(diag):\r\n if diag == [1,1,0]:\r\n win_move = (3,1)\r\n elif diag==[2,2,0]:\r\n block_win = (3,1)\r\n elif diag == [1,0,1]:\r\n win_move = (2,2)\r\n elif diag==[2,0,2]:\r\n block_win = (2,2)\r\n elif diag == [0,1,1]:\r\n win_move = (1,3)\r\n elif diag==[0,2,2]:\r\n block_win = (1,3)\r\n\r\n if win_move is not None:\r\n return (win_move, True)\r\n elif block_win is not None:\r\n return (block_win, False)\r\n else:\r\n return (None, False)", "def searchcols(self,fctn,cols,*args):\n goodkeys=[]\n for key in self.allrowkeys:\n temp=[]\n for c in cols:\n temp.append(self.getentry(key,c))\n for i in range(len(args)):\n temp.append(args[i])\n\n if fctn(*tuple(temp)):\n goodkeys.append(key)\n return(goodkeys)", "def look_through_rows(board, column, player):\n if board.shape[1] > column:\n count = board.shape[0] - 1\n count2 = 1\n while count >= 0 and count2 == 1:\n if board[count,column] == 0:\n board[count,column] = player\n count2 = count2 - 1\n else:\n count = count - 1\n return board\n else:\n print('Improper Column Given')", "def check_rows(self, win: list) -> bool:\r\n for row in self.tags:\r\n for j in range(len(row) - len(win) + 1):\r\n if win == row[j:j+self.win_condition]:\r\n return True", "def check_col(sudoku):\r\n for col in range(9):\r\n for row in range(8):\r\n test = sudoku[row][col]\r\n for i in range(row+1,9):\r\n if sudoku[i][col] == test:\r\n return True #returns True is there is more than two of the same numbers in a column\r", "def check_diag(self, row, column, symbol):\r\n\r\n # get the current state of buttons..\r\n # tl -> top left; mm -> middle middle; etc...\r\n tl = self.board[0][0][1]\r\n mm = self.board[1][1][1]\r\n br = self.board[2][2][1]\r\n bl = self.board[0][2][1]\r\n tr = self.board[0][2][1]\r\n\r\n # we know if mm isn't on then we can return early\r\n if mm == symbol:\r\n if tl == symbol and br == symbol:\r\n self.winner = symbol\r\n return\r\n if tr == symbol and bl == symbol:\r\n self.winner = symbol\r\n return\r\n else:\r\n return\r\n else:\r\n return", "def check_winner(self):\n for row in self.board.values():\n if all([mark == \"x\" for mark in row]):\n return self.player_1\n elif all([mark == \"o\" for mark in row]):\n return self.player_2\n\n # checks every column\n for i in range(3):\n first_row, second_row, third_row = self.board.values()\n if first_row[i] == \"x\" and second_row[i] == \"x\" and third_row[i] == \"x\":\n return self.player_1\n elif first_row[i] == \"o\" and second_row[i] == \"o\" and third_row[i] == \"o\":\n return self.player_2\n\n # checks the diagonals\n if self.board[\"a\"][0] == \"x\" and self.board[\"b\"][1] == \"x\" and self.board[\"c\"][2] == \"x\":\n return self.player_1\n if self.board[\"a\"][2] == \"o\" and self.board[\"b\"][1] == \"o\" and self.board[\"c\"][0] == \"o\":\n return self.player_2\n\n return None", "def winner(board):\n x_in_board = []\n o_in_board = []\n winning_positions = [\n [[0, 0], [0, 1], [0, 2]],\n [[1, 0], [1, 1], [1, 2]],\n [[2, 0], [2, 1], [2, 2]],\n [[0, 0], [1, 0], [2, 0]],\n [[0, 1], [1, 1], [2, 1]],\n [[0, 2], [1, 2], [2, 2]],\n [[0, 0], [1, 1], [2, 2]],\n [[0, 2], [1, 1], [2, 0]]\n ]\n\n for i in range(len(board)):\n for j in range(len(board)):\n if board[i][j] == X:\n x_in_board.append([i, j])\n elif board[i][j] == O:\n o_in_board.append([i, j])\n\n for i in winning_positions:\n if i[0] in x_in_board and i[1] in x_in_board and i[2] in x_in_board:\n return X\n elif i[0] in o_in_board and i[1] in o_in_board and i[2] in o_in_board:\n return O\n\n return None", "def row_win(board):\n\tfor row in range(3):\n\t\tif board[row][0] != EMPTY and board[row][0] == board[row][1] == board[row][2]:\n\t\t\treturn True\n\treturn False", "def player(board):\n X_count = 0\n O_count = 0\n\n for row in board:\n X_count += row.count(X)\n O_count += row.count(O)\n\n if X_count <= O_count:\n return X\n else:\n return O", "def winner(board):\n # Hard code winning moves\n # row0\n if board[0][0] == board[0][1] == board[0][2] == X:\n return X\n elif board[0][0] == board[0][1] == board[0][2] == O:\n return O\n # row1\n elif board[1][0] == board[1][1] == board[1][2] == X:\n return X\n elif board[1][0] == board[1][1] == board[1][2] == O:\n return O\n # row2\n elif board[2][0] == board[2][1] == board[2][2] == X:\n return X\n elif board[2][0] == board[2][1] == board[2][2] == O:\n return O\n # col0\n elif board[0][0] == board[1][0] == board[2][0] == X:\n return X\n elif board[0][0] == board[1][0] == board[2][0] == O:\n return O\n # col1\n elif board[0][1] == board[1][1] == board[2][1] == X:\n return X\n elif board[0][1] == board[1][1] == board[2][1] == O:\n return O\n # col2\n elif board[0][2] == board[1][2] == board[2][2] == X:\n return X\n elif board[0][2] == board[1][2] == board[2][2] == O:\n return O\n # diagonal\n elif board[0][0] == board[1][1] == board[2][2] == X:\n return X\n elif board[0][0] == board[1][1] == board[2][2] == O:\n return O\n # inverse diagonal\n elif board[0][2] == board[1][1] == board[2][0] == X:\n return X\n elif board[0][2] == board[1][1] == board[2][0] == O:\n return O\n\n return None" ]
[ "0.6576831", "0.6555307", "0.64840585", "0.6481743", "0.6447641", "0.63892657", "0.6347227", "0.63392514", "0.629483", "0.62593347", "0.6223977", "0.6137701", "0.60959685", "0.6069051", "0.60535437", "0.6050567", "0.60471857", "0.603175", "0.59898293", "0.5885481", "0.58327746", "0.58294433", "0.57293683", "0.5718989", "0.5708146", "0.57061464", "0.56966716", "0.5685899", "0.5679373", "0.5672083" ]
0.6735366
0
Check for winning sequence in all possible diagonals that are at least as long as winning condition.
def check_diagonals(self, win: list) -> bool: for i in range(self.size - self.win_condition + 1): # [x x ] # [ x x ] # [ x x] # [ x] diagonal = [] x = i y = 0 for j in range(self.size - i): diagonal.append(self.tags[x][y]) x += 1 y += 1 for j in range(len(diagonal) - len(win) + 1): if win == diagonal[j:j + self.win_condition]: return True # [x ] # [x x ] # [ x x ] # [ x x] diagonal = [] x = 0 y = i for j in range(self.size - i): diagonal.append(self.tags[x][y]) x += 1 y += 1 for j in range(len(diagonal) - len(win) + 1): if win == diagonal[j:j + self.win_condition]: return True # [ x x] # [ x x ] # [x x ] # [x ] diagonal = [] x = self.size - 1 - i y = 0 for j in range(self.size - i): diagonal.append(self.tags[x][y]) x -= 1 y += 1 for j in range(len(diagonal) - len(win) + 1): if win == diagonal[j:j + self.win_condition]: return True # [ x] # [ x x] # [ x x ] # [x x ] diagonal = [] x = self.size - 1 y = 0 + i for j in range(self.size - i): diagonal.append(self.tags[x][y]) x -= 1 y += 1 for j in range(len(diagonal) - len(win) + 1): if win == diagonal[j:j + self.win_condition]: return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_winning(self, curr_state):\n rows = [[0,1,2], [3,4,5], [6,7,8]]\n columns = [[0,3,6], [1,4,7], [2,5,8]]\n diagonal = [[0,4,8], [2,4,6]]\n total_checks = rows + columns + diagonal\n for row in total_checks:\n sum = 0\n count = 0\n for pos in row:\n if np.isnan(curr_state[pos]):\n break\n else:\n sum = sum + curr_state[pos]\n count = count + 1\n if sum == 15 and count == 3:\n return True\n return False", "def diag_win(board):\n\tif board[1][1] != EMPTY and (board[1][1] == board[0][2] == board[2][0] or board[1][1] == board[0][0] == board[2][2]):\n\t\treturn True\n\treturn False", "def is_winning(self):\n\n current_board = self.current_board\n\n # check rows\n for row in current_board:\n row = set(row)\n if (\"X\" not in row and \"-\" not in row) or (\"O\" not in row and \"-\" not in row):\n return True\n\n # check columns\n for i in range(len(current_board)):\n column_to_check = set()\n \n for j in range(len(current_board)):\n column_to_check.add(current_board[j][i])\n\n if (\"X\" not in column_to_check and \"-\" not in column_to_check) or (\"O\" not in column_to_check and \"-\" not in column_to_check):\n return True\n \n # check diagonals\n forward_diagonal_check = set()\n backward_diagonal_check = set()\n \n for i in range(len(current_board)):\n forward_diagonal_check.add(current_board[i][i])\n backward_diagonal_check.add(current_board[i][len(current_board)-1-i])\n\n if forward_diagonal_check == {\"X\"} or forward_diagonal_check == {\"O\"}:\n return True\n\n if backward_diagonal_check == {\"X\"} or backward_diagonal_check == {\"O\"}:\n return True", "def check_diagonals():\n global game_still_going\n # Check if any of the rows have all the same value.\n diagonal1 = board[0] == board[4] == board[8] != '_'\n diagonal2 = board[6] == board[4] == board[2] != '_'\n # If any diagonals does have a match, then game still going to False.\n if diagonal1 or diagonal2:\n game_still_going = False\n # Return winner 'X' or 'O'.\n if diagonal1:\n return board[0]\n if diagonal2:\n return board[6]", "def check_diagonals(self):\n\t\tdiags = [[(0,0), (1,1), (2,2)], [(0,2), (1,1), (2,0)]]\n\n\t\tfor diag in diags:\n\t\t\tpts = 0\n\t\t\tfor loc in diag:\n\t\t\t\tif self.board[loc[0]][loc[1]] == self.marker:\n\t\t\t\t\tpts+=1\n\t\t\tif pts == 3:\n\t\t\t\tprint('WE WON')\n\t\t\t\treturn True", "def check_win(self):\n for pos in self.win_set:\n # s would be all 1 if all positions of a winning move is fulfilled\n # otherwise 1s and 0s\n s = set([self.grid[p] for p in pos])\n if len(s) == 1 and (0 not in s):\n return True\n return False", "def is_down_diagonal_win(self, checker):\n for row in range(self.height-3):\n for col in range(self.width-3):\n if self.slots[row][col] == checker and \\\n self.slots[row+1][col+1] == checker and \\\n self.slots[row+2][col+2] == checker and \\\n self.slots[row+3][col+3] == checker:\n return True\n return False", "def is_winning(self, curr_state):\n winning_combinations = [(0,1,2),(3,4,5),(6,7,8),(0,3,6),(1,4,7),(2,5,8),(0,4,8),(2,4,6)]\n # We will check only for the above 8 combinations to see any of them sums up to 15 which implies winning\n for combination in winning_combinations:\n #print('Combination:',combination)\n if not np.isnan(curr_state[combination[0]]) and not np.isnan(curr_state[combination[1]]) and not np.isnan(curr_state[combination[2]]) :\n if curr_state[combination[0]] + curr_state[combination[1]] + curr_state[combination[2]] == 15 :\n return True\n \n #If none of the above condition is True return False \n return False", "def diagonal_win():\n\n diagonal_set = set()\n for i in range(0, board_size):\n diagonal_set.add(self.board[i][i]) \n\n if diagonal_set == set([o_symbol]) or diagonal_set == set([x_symbol]):\n print \"diagonal win 1\"\n return True\n \n diagonal_set = set()\n for i in range(0, board_size):\n diagonal_set.add(self.board[i][board_size - 1 - i])\n\n if diagonal_set == set([o_symbol]) or diagonal_set == set([x_symbol]):\n print \"diagonal win 2\"\n return True", "def winGame(sub_state):\n for i in range(sub_state.shape[0] - 4):\n for j in range(sub_state.shape[1] - 4):\n\n horizontal = sub_state[i][j: j+5]\n if (horizontal == 1).all():\n return True\n\n vertical = [sub_state[i+k, j] for k in range(5)]\n if (np.array(vertical) == 1).all():\n return True\n\n diagonal = [sub_state[(i+k, j+k)] for k in range(5)]\n if (np.array(diagonal) == 1).all():\n return True\n\n return False", "def is_up_diagonal_win(self, checker):\n for row in range(3, self.height):\n for col in range(self.width-3):\n if self.slots[row][col] == checker and \\\n self.slots[row-1][col+1] == checker and \\\n self.slots[row-2][col+2] == checker and \\\n self.slots[row-3][col+3] == checker:\n return True\n return False", "def check_winner(self):\n if self.history:\n last_move = self.history[-1]\n last_player = self.get_last_player()\n\n connected_token = last_player*self.n_in_row\n # check for row\n if connected_token in [sum(self.state[last_move[0]][j:j+self.n_in_row]) for j in range(0, self.width-self.n_in_row+1, 1)]:\n self.winner = [True, last_player]\n return self.winner\n\n # check for column\n if connected_token in [sum(self.state.T[last_move[1]][i:i+self.n_in_row]) for i in range(0, self.height-self.n_in_row+1, 1)]:\n self.winner = [True, last_player]\n return self.winner\n\n # check for diagonal with slope 1\n diagonal = np.diag(self.state, last_move[1]-last_move[0])\n if connected_token in [sum(diagonal[i:i+self.n_in_row]) for i in range(0, len(diagonal)-self.n_in_row+1, 1)]:\n self.winner = [True, last_player]\n return self.winner\n\n # check for diagonal with slope -1\n diagonal = np.diag(self.state[:,::-1], self.width-1-last_move[1]-last_move[0])\n if connected_token in [sum(diagonal[i:i+self.n_in_row]) for i in range(0, len(diagonal)-self.n_in_row+1, 1)]:\n self.winner = [True, last_player]\n return self.winner\n\n # check for draw game\n if len(np.argwhere(self.state==0)) == 0:\n self.winner = [True, 0]\n return self.winner\n return self.winner", "def is_down_diagonal_win(self, checker):\r\n for row in range(self.height - self.win_condition + 1):\r\n for col in range(self.width - self.win_condition + 1):\r\n num_checkers = 0\r\n for i in range(self.win_condition):\r\n if self.grid[row + i][col + i] == checker:\r\n num_checkers += 1\r\n\r\n if num_checkers == self.win_condition:\r\n return True\r\n\r\n # if we get here, there's no horizontal win\r\n return False", "def terminal(self):\n # Horizontal check\n for i in range(3):\n b_ = True\n for j in range(2):\n if self.board[i][j] == None or self.board[i][j] != self.board[i][j + 1]:\n b_ = False\n \n if b_:\n self.winner = self.board[i][0]\n return True\n \n # Vertical check\n for j in range(3):\n b_ = True\n for i in range(2):\n if self.board[i][j] == None or self.board[i][j] != self.board[i + 1][j]:\n b_ = False\n \n if b_:\n self.winner = self.board[0][j]\n return True\n \n # Diagonal check\n if self.board[1][1] != None:\n if self.board[0][0] == self.board[1][1] == self.board[2][2]:\n self.winner = self.board[1][1]\n return True\n\n if self.board[2][0] == self.board[1][1] == self.board[0][2]:\n self.winner = self.board[1][1]\n return True\n\n # Draw check\n if sum([row.count(None) for row in self.board]) == 0:\n self.winner = None\n return True\n \n return False", "def check_for_win(self, row, col, player): \n\n count = 0\n for i in range(0, len(self.board[0])):\n # Check vertical\n if self.board[row][i] == player:\n count += 1\n else:\n count = 0\n \n if count == self.max_count:\n return True\n\n count = 0\n for i in range(0, len(self.board)):\n # Check horisontal\n if self.board[:, col][i] == player:\n count += 1\n else:\n count = 0\n \n if count == self.max_count:\n return True\n \n count = 0\n totoffset = col - row\n for i in np.diagonal(self.board, offset=totoffset):\n # Check diagonal\n if i == player:\n count += 1\n else:\n count = 0\n\n if count == self.max_count:\n return True\n\n count = 0\n mirrorboard = np.fliplr(self.board)\n col = self.colswitch[col]\n totoffset = col - row\n for i in np.diagonal(mirrorboard, offset=totoffset):\n # Check other diagonal\n if i == player:\n count += 1\n else:\n count = 0\n\n if count == self.max_count:\n return True", "def check_won (grid):\r\n w=False\r\n for row in range(4):\r\n for col in range(4):\r\n if grid[row][col]>=32:\r\n w=True\r\n break\r\n return w", "def check_win(self):\n lines = []\n\n # rows\n lines.extend(self._board)\n\n # cols\n cols = [[self._board[rowidx][colidx] for rowidx in range(self._dim)]\n for colidx in range(self._dim)]\n lines.extend(cols)\n\n # diags\n diag1 = [self._board[idx][idx] for idx in range(self._dim)]\n diag2 = [self._board[idx][self._dim - idx -1]\n for idx in range(self._dim)]\n lines.append(diag1)\n lines.append(diag2)\n\n # check all lines\n for line in lines:\n if len(set(line)) == 1 and line[0] != EMPTY:\n if self._reverse:\n return switch_player(line[0])\n else:\n return line[0]\n\n # no winner, check for draw\n if len(self.get_empty_squares()) == 0:\n return DRAW\n\n # game is still in progress\n return None", "def check_win(self, board, move):\n for i, j, k in self.winning_cases:\n if board[i] == move and board[j] == move and board[k] == move:\n return True\n return False", "def check_for_win_lose(b):\r\n win_move = None\r\n block_win = None\r\n # check for wins based on row\r\n for ri in range(3):\r\n row = b[ri]\r\n if single_move(row):\r\n if row==[1,1,0]:\r\n win_move = (ri+1,3)\r\n elif row==[2,2,0]:\r\n block_win = (ri+1,3)\r\n elif row==[1,0,1]:\r\n win_move = (ri+1,2)\r\n elif row==[2,0,2]:\r\n block_win = (ri+1,2)\r\n elif row==[0,1,1]:\r\n win_move = (ri+1,1)\r\n elif row==[0,2,2]:\r\n block_win = (ri+1,1)\r\n else:\r\n print '144 ERROR!'\r\n print single_move(row)\r\n print row\r\n print ' '\r\n\r\n # check for win based on column\r\n for ci in range(3):\r\n col = get_col(b,ci)\r\n if single_move(col):\r\n if col==[1,1,0]:\r\n win_move = (3,ci+1)\r\n elif col==[2,2,0]:\r\n block_win = (3,ci+1)\r\n elif col==[1,0,1]:\r\n win_move = (2,ci+1)\r\n elif col==[2,0,2]:\r\n block_win = (2,ci+1)\r\n elif col==[0,1,1]:\r\n win_move = (1,ci+1)\r\n elif col==[0,2,2]:\r\n block_win = (1,ci+1)\r\n else:\r\n print '166 ERROR!'\r\n print single_move(col)\r\n print col\r\n print ' '\r\n\r\n # check for win on backward diagonal\r\n diag = get_bw_diag(b)\r\n if single_move(diag):\r\n if diag==[1,1,0]:\r\n win_move = (3,3)\r\n elif diag==[2,2,0]:\r\n block_win (3,3)\r\n elif diag == [1,0,1]:\r\n win_move = (2,2)\r\n elif diag==[2,0,2]:\r\n block_win = (2,2)\r\n elif diag == [0,1,1]:\r\n win_move = (1,1)\r\n elif diag==[0,2,2]:\r\n block_win = (1,1)\r\n \r\n # check for win on forward diagonal\r\n diag = get_fwd_diag(b)\r\n if single_move(diag):\r\n if diag == [1,1,0]:\r\n win_move = (3,1)\r\n elif diag==[2,2,0]:\r\n block_win = (3,1)\r\n elif diag == [1,0,1]:\r\n win_move = (2,2)\r\n elif diag==[2,0,2]:\r\n block_win = (2,2)\r\n elif diag == [0,1,1]:\r\n win_move = (1,3)\r\n elif diag==[0,2,2]:\r\n block_win = (1,3)\r\n\r\n if win_move is not None:\r\n return (win_move, True)\r\n elif block_win is not None:\r\n return (block_win, False)\r\n else:\r\n return (None, False)", "def win_check(table: list) -> (bool, str):\n # Combinations that would lead to a win\n win_list = [\n [0,1,2], [3,4,5],\n [6,7,8], [0,3,6],\n [1,4,7], [2,5,8],\n [0,4,8], [6,4,2],\n ]\n for line in win_list:\n # Check rows, columns, and diagonals\n combination = set([table[line[0]], table[line[1]], table[line[2]]])\n\n if len(combination) == 1 and combination != {\"-\"}: # Which mean we have a straight line of either X or O\n #unpack comb (which is 1 item), which is either \"X\" or \"O\" to know who won\n return True, *combination\n else:\n return False, None", "def check_diag(self, row, column, symbol):\r\n\r\n # get the current state of buttons..\r\n # tl -> top left; mm -> middle middle; etc...\r\n tl = self.board[0][0][1]\r\n mm = self.board[1][1][1]\r\n br = self.board[2][2][1]\r\n bl = self.board[0][2][1]\r\n tr = self.board[0][2][1]\r\n\r\n # we know if mm isn't on then we can return early\r\n if mm == symbol:\r\n if tl == symbol and br == symbol:\r\n self.winner = symbol\r\n return\r\n if tr == symbol and bl == symbol:\r\n self.winner = symbol\r\n return\r\n else:\r\n return\r\n else:\r\n return", "def is_up_diagonal_win(self, checker):\r\n for row in range(self.height - self.win_condition + 1):\r\n for col in range(self.width - self.win_condition + 1):\r\n num_checkers = 0\r\n for i in range(self.win_condition):\r\n if self.grid[self.height - row - 1 - i][col+i] == checker:\r\n num_checkers += 1\r\n\r\n if num_checkers == self.win_condition:\r\n return True\r\n\r\n # if we get here, there's no horizontal win\r\n return False", "def __win(self, a):\n for i in range(len(a)-self.k+1):\n flag = True\n for j in range(self.k):\n if not a[i+j]:\n flag = False\n break\n if flag: return True", "def check_game_status2(board):\n board = np.array(board)\n for i in range(7):\n for j in range(6):\n if checkWin(board, j, i, 1):\n return 1\n if checkWin(board, j, i, 2):\n return 2\n if isfull(board):\n return 0\n return -1", "def _check_winning_combinations(board, player):\n winning_combinations = (\n ((0, 0), (0, 1), (0, 2)),\n ((1, 0), (1, 1), (1, 2)),\n ((2, 0), (2, 1), (2, 2)),\n ((0, 0), (1, 0), (2, 0)),\n ((0, 1), (1, 1), (2, 1)),\n ((0, 2), (1, 2), (2, 2)),\n ((0, 0), (1, 1), (2, 2)),\n ((0, 2), (1, 1), (2, 0))\n )\n\n if any(combination for combination in winning_combinations if _is_winning_combination(board, combination, player)):\n return player\n\n return None", "def checkAll(self, player, board):\n #retrieve current moves of the player who made the last move\n currentMoves = self.getPlayerMoves(player,board)\n\n #check column win\n is_col_win = self.checkWin(currentMoves, self.columnWins)\n if is_col_win != False:\n return True\n\n #check row win\n is_row_win = self.checkWin(currentMoves, self.rowWins)\n if is_row_win != False:\n return True\n\n #check diagonal win\n is_diag_win = self.checkWin(currentMoves, self.diagonalWins)\n if is_diag_win != False:\n return True\n else:\n return False", "def row_win(board):\n\tfor row in range(3):\n\t\tif board[row][0] != EMPTY and board[row][0] == board[row][1] == board[row][2]:\n\t\t\treturn True\n\treturn False", "def check_diagonals():\n global ongoing_game\n diagonal_1 = board[0] == board[4] == board[8] != \"*\"\n diagonal_2 = board[2] == board[4] == board[6] != \"*\"\n if diagonal_1 or diagonal_2:\n ongoing_game = False\n if diagonal_1:\n return board[0]\n elif diagonal_2:\n return board[2]\n else:\n return None", "def check_won (grid):\r\n for i in range (4):\r\n for j in range (4):\r\n if grid[i][j] >= 32:\r\n return True\r\n return False", "def is_winning(self, curr_state):\n \n winning_idx_lists = [\n [0, 1, 2],\n [3, 4, 5],\n [6, 7, 8],\n [0, 3, 6],\n [1, 4, 7],\n [2, 5, 8],\n [0, 4, 8],\n [2, 4, 6]\n ]\n is_winning = False\n for winning_idx_list in winning_idx_lists:\n result_list = [curr_state[index] for index in winning_idx_list]\n if (sum(result_list) == 15):\n is_winning = True \n break\n\n return is_winning" ]
[ "0.69447654", "0.68922657", "0.6877511", "0.6859907", "0.6785897", "0.6660476", "0.66416174", "0.66363674", "0.6619749", "0.6578498", "0.65732425", "0.65712273", "0.653169", "0.6508", "0.6497785", "0.64893246", "0.64755654", "0.6452486", "0.6433075", "0.64247596", "0.64144456", "0.6397901", "0.6393368", "0.6380367", "0.63419425", "0.63253975", "0.63222104", "0.6309371", "0.63081", "0.6307517" ]
0.73726594
0
Checks if the board is fully packed with figures, which in practice means => if the tags array is full.
def full_board(self) -> bool: counter = 0 for column in self.tags: if None in column: counter += 1 return counter == 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _board_is_full(self):\n return (self.get_counts()[0] + self.get_counts()[1] == self._num_rows * self._num_cols)", "def is_full(board):\r\n return False", "def is_full(board):\n return False", "def check_grid_full(self):\n for row in self.game_state:\n for e in row:\n if e is None:\n return False\n return True", "def is_full(self):\n return self.top == self.size - 1", "def is_board_full(board):\n for i in range(1, 10):\n if check_empty_space(board, i):\n return False\n return True", "def is_full(self):\n for i in range(self.width):\n if self.can_add_to(i) == True:\n return False\n return True", "def is_full(self):\r\n return self.num_checkers == self.width * self.height", "def full():\r\n\r\n count = 0\r\n for slot in board:\r\n if slot not in '012345678':\r\n count += 1\r\n return count == 9", "def board_is_full(self):\n\t\tfor i in range(len(self.board)):\n\t\t\tfor j in range(len(self.board[i])):\n\t\t\t\tif self.board[i][j] == '-':\n\t\t\t\t\treturn False\n\t\treturn True", "def is_full(self):\n full = True\n for i in range(len(self.board)):\n for j in range(len(self.board[i])):\n if self.board[i][j] == \"\":\n full = False\n return full", "def is_full(self):\n\n current_board = self.current_board\n remaining_rows = 0\n\n for row in current_board:\n if \"-\" in set(row):\n remaining_rows += 1\n\n if remaining_rows == 0:\n return True\n else:\n return False", "def is_complete(self):\n for i in range(9):\n if len(self.rows[i]) != 0 or len(self.columns[i]) != 0 or len(self.groups[i]) != 0:\n return False\n\n for row in self.board:\n for col in row:\n if col == self.empty_cell_flag:\n return False\n\n return True", "def _board_is_full(board):\n\n # looks for \"-\" in every position in the board\n # returns False if it finds one\n for row in board:\n if any(column for column in row if column == \"-\"):\n return False\n\n return True", "def isFull(self) -> bool:\n return self._elems == self._k", "def is_board_full(self):\n for position in self.positions:\n if self.is_position_availible(position):\n return False\n return True", "def is_full(self):\n return len(self.__occupied_slots__) >= self.__size__", "def is_full(self):\n return len(self.walls) == 4", "def check_if_board_full(self, board):\n for i in range(self.height // 80):\n for j in range(self.width // 80):\n if board[(j, i)] == 0:\n return False\n elif j == self.width // 80:\n break\n else:\n pass\n print(\"Board full! :(\")\n return True", "def is_full(self): #checks to see if stack is full by comparing it to the capacity\n if self.num_items == self.capacity:\n return True\n else:\n return False", "def check_full_board(self): #rows then columns\n for row in self.board:\n for column_of_row in row:\n if column_of_row == ' ':\n return False\n return True", "def is_board_full(board):\n return not any(0 in val for val in board)", "def is_full(self) -> bool:", "def checkFull(self, board):\n full = True\n for i in board:\n if i == ' ': full = False\n return full", "def full_board( self ):\n\n for x in self.__grid:\n if isinstance(x, int):\n return False\n else:\n continue\n\n return True", "def __is_board_full(self):\r\n for row in self.__board:\r\n if {self.PLAYER1, self.PLAYER2} & set(row) != 0:\r\n return False\r\n return True", "def check_for_empty(self):\n return ' ' in self.game_board", "def is_full(self):\n return all(map(lambda x: x != self.CELL_EMPTY, self.__values))", "def not_empty(entry):\n gt_boxes = entry['boxes']\n return gt_boxes.shape[0] > 0", "def isFull(self):\n return self.rear == self.size" ]
[ "0.6820874", "0.676673", "0.6736874", "0.6717051", "0.6591668", "0.65669096", "0.6556075", "0.6527779", "0.6506026", "0.6469064", "0.64647114", "0.6442245", "0.6417079", "0.6390676", "0.63876194", "0.637983", "0.6371609", "0.6350671", "0.6347817", "0.6293271", "0.62686855", "0.6224456", "0.62235546", "0.62179506", "0.6209308", "0.6203418", "0.6186902", "0.61837053", "0.6158378", "0.6107692" ]
0.74219203
0
Checks for empty spaces in the tags list. If the empty space is found it's coordinates are being packed into tuple and into new list.
def check_for_moves(self) -> list: avail_moves = [] for x in range(self.size): for y in range(self.size): if self.tags[x][y] is None: avail_moves.append((x, y)) return avail_moves
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _preprocess(self, tagged: List[Tuple]) -> Tuple:\n ori = \" \".join([tag[0] for tag in tagged])\n tags = [tag[1] for tag in tagged]\n # Mapping into general tagset\n tags = [self._map[tag] if tag in self._map else \"X\" for tag in tags]\n return \" \".join(tags), ori", "def empty_spots(self):\n\t\tret = []\n\t\tfor i in range(0, self.size):\n\t\t\tfor j in range(0, self.size):\n\t\t\t\tif(self.grid[i][j] == self.terminal):\n\t\t\t\t\tret.append((i,j))\n\t\treturn ret", "def make_empty_lists(tags):\n all_lists = {}\n\n #these are the first ones listed in sub groups of tags\n main_tags = []\n\n #if no nested groups of tags, this will work:\n ## all_tags = tags.split()\n ## for tag in all_tags:\n ## all_lists[tag] = []\n\n all_tags = parse_tags(tags)\n for tag in all_tags:\n if isinstance(tag, list):\n main_tags.append(tag[0])\n shared_list = []\n for sub_tag in tag:\n all_lists[sub_tag] = shared_list\n else:\n main_tags.append(tag)\n all_lists[tag] = []\n\n #pick up unmatched tags that have a '+' in any tag:\n all_lists['good'] = []\n main_tags.append('good')\n #for everything else:\n all_lists['misc'] = []\n main_tags.append('misc')\n\n return all_lists, main_tags", "def clean_nodes_no_names(tag, data):\n\tif not isinstance(tag, tuple):\n\t\tfor each in data:\n\t\t\tif each['k'] != [] and each['v'] != []:\n\t\t\t\tif tag in each['k'] and 'name' not in each['k']:\n\t\t\t\t\teach['removed'] = 'true'\n\t\t\t\t\ttagValueData = dict(zip(each['k'], each['v']))\n\t\t\t\t\tif tagValueData.get('amenity') == 'atm':\n\t\t\t\t\t\teach['removed'] = 'false'\n\t\t\tyield each\n\telse:\n\t\tfor each in data:\n\t\t\tif each['k'] != [] and each['v'] != []:\n\t\t\t\tif tag[0] in each['k'] and tag[1] in each['v'] and 'name' not in each['k']:\n\t\t\t\t\teach['removed'] = 'true'\n\t\t\tyield each", "def validateTags(self, tags):\n\t\treturn tags.replace(', ',' ')", "def sanitise_tags(tags):\n\n # hack out all kinds of whitespace, then split on ,\n # if you run into more illegal characters (simplenote does not want to sync them)\n # add them to the regular expression above.\n illegals_removed = tags_illegal_chars.sub('', tags)\n if len(illegals_removed) == 0:\n # special case for empty string ''\n # split turns that into [''], which is not valid\n return []\n\n else:\n return illegals_removed.split(',')", "def autoreveal_empty_spaces(self, position):\n revealed = []\n zero_spaces = []\n check_stack = [position]\n checked = []\n\n while len(check_stack) > 0:\n pos = x, y = check_stack.pop()\n if self.get_num_mines_around_position(x, y) == 0:\n zero_spaces.append(pos)\n \n # Add spaces around\n for ay in range(y-1, y+2):\n for ax in range(x-1, x+2):\n if ay >= 0 and ax >= 0 and ay < len(self.mine_map) and ax < len(self.mine_map[ay]): # Don't check spaces that are outside of the array\n apos = ax, ay\n if apos not in checked:\n check_stack.append(apos)\n revealed.append(apos)\n checked.append(pos)\n \n self.revealed.extend(revealed)", "def find_empty_space(self, state):\r\n for i in range(3):\r\n for j in range(3):\r\n if state[i][j] == 0:\r\n return (i, j)", "def _postprocess(self, tags: List[str], words: List[str], pos: List[str]):\n result = list()\n\n i = 0\n for tag in tags:\n if (\"<\" not in tag) and (\">\" not in tag):\n if pos:\n result.append(f\"{words[i]}/{pos[i]}\")\n else:\n result.append(words[i])\n i += 1\n else:\n result.append(tag)\n\n return \" \".join(result)", "def process_tags(tags=list):\n new_tag_list = list()\n for tag in tags:\n new_tag = tag.replace(\"<\", \" \")\n new_tag = new_tag.replace(\">\", \" \")\n new_tag = new_tag.split()\n # sort elements by string length (this to avoid 'c' being checked before 'c++', etc)\n new_tag.sort(key=len, reverse=True)\n new_tag_list.append(new_tag)\n return new_tag_list", "def get_empty_positions(self):\n\n empty_positions = []\n\n for i in range(self._dimension):\n for j in range(self._dimension):\n if self._board[i][j] == ' ':\n empty_positions.append((i, j))\n\n return empty_positions", "def get_empty_cells(grid):\n\tempty = []\n\tfor j,row in enumerate(grid):\n\t\tfor i,val in enumerate(row):\n\t\t\tif not val:\n\t\t\t\tempty.append((j,i))\n\treturn empty", "def _postprocess(\n self,\n tags: List[str],\n words: List[str],\n pos: bool = False,\n ):\n result = list()\n\n i = 0\n for tag in tags:\n if (\"<\" not in tag) and (\">\" not in tag):\n if pos:\n result.append(f\"{words[i]}/{pos[i]}\")\n else:\n result.append(words[i])\n i += 1\n else:\n result.append(tag)\n\n return \" \".join(result)", "def get_empty_cells(grid):\n empty = []\n for j,row in enumerate(grid):\n for i,val in enumerate(row):\n if not val:\n empty.append((j,i))\n return empty", "def normalized_pos_tags(self):\n pos_list = []\n for pos in self.pos_tags:\n pos_list.extend([i for i in re.split('[:;]', pos) if i != ''])\n return pos_list", "def mps_null_spaces(mpslist):\n AL, C, AR = mpslist\n d, chi, _ = AL.shape\n NLshp = (d, chi, (d-1)*chi)\n ALdag = fuse_left(AL).T.conj()\n NLm = null_space(ALdag)\n NL = NLm.reshape(NLshp)\n\n ARmat = fuse_right(AR)\n NRm_dag = null_space(ARmat)\n NRm = NRm_dag.conj()\n NR = NRm.reshape((d, chi, (d-1)*chi))\n NR = NR.transpose((0, 2, 1))\n return (NL, NR)", "def _filter_empty(lst):\n return [cell for cell in lst if cell is not Sudoku.EMPTY_CELL]", "def test_format_bad_tags(self):\n tags = self.c._format_tags(None)\n self.assertEqual(0, len(tags))", "def find_empty_space(board: list) -> tuple:\n board_length = len(board)\n for i in range(board_length):\n for j in range(board_length):\n if board[i][j] == 0:\n return (i,j)", "def _py3_safe(parsed_list):\n if len(parsed_list) < 2:\n return parsed_list\n else:\n new_list = [parsed_list[0]]\n nl_append = new_list.append\n for before, after in py23_zip(islice(parsed_list, 0, len(parsed_list)-1),\n islice(parsed_list, 1, None)):\n if isinstance(before, Number) and isinstance(after, Number):\n nl_append(\"\")\n nl_append(after)\n return tuple(new_list)", "def _check_sanity(self, tags: List[str], n_words: int):\n n_out = 0\n\n for tag in tags:\n if (\"<\" not in tag) and (\">\" not in tag):\n n_out += 1\n\n return n_out == n_words", "def _check_sanity(self, tags: List[str], n_words: int):\n n_out = 0\n\n for tag in tags:\n if (\"<\" not in tag) and (\">\" not in tag):\n n_out += 1\n\n return n_out == n_words", "def empty_cells(self) -> List[Cell]:\n return list(ob.pos[0] for ob in self.new_obs())", "def splitTags(user_input):\n \n elements = []\n if ',' in user_input:\n elements = user_input.split(',')\n elif ' ' in user_input:\n elements = user_input.split(' ')\n else:\n elements.append(user_input)\n\n tags = []\n for element in elements:\n element = element.strip(' \\t\\n\\r').lower()\n if(len(element) == 0): continue\n if element not in tags:\n tags.append(element)\n return tags", "def expand_empty_tags(tokens, keep_minimized=None):\n for token in tokens:\n if isinstance(token, Empty):\n if keep_minimized and token.name in keep_minimized:\n yield token\n else:\n token.__class__ = Start\n token.reserialize()\n yield Start(token.xml.replace('/', ''))\n yield End('</%s>' % token.name)\n else:\n yield token", "def next_empty(checker): # checker is dictionary\n min_len = 100 # arbitrary large assignment\n position = ' '\n values = []\n for element in checker:\n if len(checker[element]) < min_len:\n min_len = len(checker[element])\n position = element\n values = checker[element]\n return position, values", "def get_empty_tiles(self) -> List[Point]:\n\t\tempty_tiles = []\n\t\tfor x in range(self.size):\n\t\t\tfor y in range(self.size):\n\t\t\t\tif self.tiles[x][y] == 0:\n\t\t\t\t\tempty_tiles.append(Point(x,y))\n\t\treturn empty_tiles", "def make_free_cell_list():\r\n for row in range(9):\r\n for col in range(9):\r\n if (application.ui.__getattribute__(f'cell{col+1}{row+1}')).text() == \"\":\r\n lst_free_cells.append(Point(row, col))", "def clean_recording_gaps(self, pos_xy: np.ndarray, pos_times: np.ndarray):\n (\n position_gap_inds_above_threshold\n ) = self.check_for_position_gaps_above_threshold(pos_times)\n cleaned_pos_xy = pos_xy[:]\n for ind in position_gap_inds_above_threshold:\n cleaned_pos_xy[ind - 5 : ind + 5] = np.nan\n return (cleaned_pos_xy, position_gap_inds_above_threshold)", "def _genPosTags(self, tagged):\n return [pos for (token, pos) in tagged]" ]
[ "0.56815004", "0.5611002", "0.54052174", "0.5345678", "0.53289914", "0.5290784", "0.5282728", "0.52795637", "0.52427185", "0.5172312", "0.5162299", "0.5155179", "0.5121523", "0.5118857", "0.5089582", "0.50675386", "0.5064925", "0.506162", "0.50472486", "0.50319785", "0.500313", "0.500313", "0.497172", "0.49627206", "0.4953047", "0.49469945", "0.4933877", "0.49032608", "0.48838368", "0.48749062" ]
0.56864506
0
Function mashes together classes functionality and performs the AI's move. It recursively calls the minimax algorithm and after finding the best move it adds tag into the tags list.
def bot_handle_move(self) -> None: best_value = -INFINITY # default best value for maximizing player (bot in this app is a maximizing player) available_moves = self.check_for_moves() # for more info check the minimax algorithm theory depth = int(1.4*self.size - self.win_condition) # (depth) decides of how deep into recursion the algorithm will best_move = None # get. 1.4 seems to be the best consensus between time of # execution and accuracy of moves for move in available_moves: self.tags[move[0]][move[1]] = 'o' move_value = self.minimax(depth, -INFINITY, INFINITY, False) self.tags[move[0]][move[1]] = None if move_value > best_value: best_value = move_value best_move = move self.tags[best_move[0]][best_move[1]] = 'o'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_move(self):\n\n # get relavent information\n affinity = self.get_affinity()\n sample_space = self.get_game_space()\n depth_limit = self.__search_depth\n\n # run a minimax search and get the best value\n bestval = MinimaxTree.alphabeta(self, sample_space, affinity, depth_limit, -10000, 10001, True)\n if bestval[0] is None: bestval = ((0,6),'x', 0)\n\n # print the number of nodes expanded \n print(self.nodes_expanded)\n\n # make the move found by the search \n self.get_game_space().set_tile(bestval[0][0], bestval[0][1], affinity)", "def action(self):\r\n\r\n\r\n #have we just started?\r\n if self.player_information[\"us\"][\"nTokens\"] == 0:\r\n move = generate_starting_move(self.player_information[\"us\"][\"player_side\"], self.board_array)\r\n return move\r\n\r\n #otherwise do minimax \r\n \r\n #start off with some shallow depth:\r\n if self.turn_no < 5:\r\n depth = 3\r\n else:\r\n depth = 2\r\n \r\n #set a constraint for search depth\r\n if self.total_tokens_on_board < 6:\r\n depth = 3\r\n elif self.total_tokens_on_board < 10:\r\n depth = 2\r\n else:\r\n depth = 1\r\n \r\n #have a time reference\r\n print(f'nthrows: {self.player_information[\"us\"][\"nThrowsRemaining\"]}')\r\n starting_time = int(round(time.time(), 0))\r\n #salvage result from minimax\r\n result = minimax(self.board_dict.copy(), self.player_tokens.copy(), self.co_existance_dict.copy(),\r\n None, None, None, depth, True, -math.inf, math.inf,\r\n (-5, -5), self.player_information.copy(), self.board_array, self.board_edge, \r\n starting_time, True, self.turn_no)\r\n\r\n #clean it up a bit \r\n print(self.board_dict)\r\n #tidy it up\r\n result = result[0]\r\n print(f'pre: {result}')\r\n #in case we get a bad move redo but make it very shallow\r\n if len(result) == 1 or result == (-5, -5):\r\n #force it to return a usable move\r\n counter = 0\r\n while (len(result) == 1) or (result == (-5, -5)):\r\n result = minimax(self.board_dict.copy(), self.player_tokens.copy(), self.co_existance_dict.copy(),\r\n None, None, None, 1, True, -math.inf, math.inf,\r\n (-5, -5), self.player_information.copy(), self.board_array, self.board_edge, \r\n starting_time, False, self.turn_no)\r\n result = result[0]\r\n counter += 1\r\n \r\n #if its taking too long\r\n if counter > 2: \r\n #generate one random possible move to use \r\n allied_tokens = [token for token in self.player_tokens if self.player_tokens[token] == \"us\"]\r\n move_list = generate_moves(self.board_dict, self.player_tokens, self.co_existance_dict, allied_tokens,\r\n self.player_information, self.board_array, True, \"all\")\r\n \r\n \r\n #if there are no moves\r\n if len(move_list) == 0:\r\n if self.player_information['us']['nThrowsRemaining'] > 0:\r\n throws = generate_possible_throws(self.board_dict, self.player_tokens, self.co_existance_dict, self.player_information, \"us\",\r\n self.player_information[\"us\"][\"player_side\"], self.board_array, \"all\" )\r\n result = random.choice(throws)\r\n \r\n else:\r\n result = random.choice(move_list)\r\n print(f'random: {result}')\r\n break\r\n\r\n print(f' inside: {result}')\r\n\r\n print(result)\r\n #otherwise clean it up\r\n if result[0] == 'throw':\r\n final_result = (result[0].upper(), result[1], result[2])\r\n else:\r\n final_result = (result[0].upper(), result[2], result[3])\r\n # return final result \r\n return final_result", "def make_move(self):\n\n # get relavent information\n affinity = self.get_affinity()\n sample_space = self.get_game_space()\n depth_limit = self.__search_depth\n\n # run a minimax search and get the best value\n bestval = MinimaxTree.minimax(self, sample_space, affinity, depth_limit, True)\n if bestval[0] is None: bestval = ((0,6),'x', 0)\n\n # print the number of nodes expanded \n print(self.nodes_expanded)\n\n # make the move found by the search \n self.get_game_space().set_tile(bestval[0][0], bestval[0][1], affinity)", "def minimax(self, game, depth):\n if self.time_left() < self.TIMER_THRESHOLD:\n raise SearchTimeout()\n #Fetching legal moves for the active player at max level\n legal_moves = game.get_legal_moves()\n #Terminial condition - if there are no legal moves left will call the utility function\n if not legal_moves:\n return game.utility(self) #Returning utility function\n #Assigning default value to best move\n best_move = legal_moves[0]\n #Assigning default value to best score as -inf\n best_score = float('-inf')\n #for each future legal move of active player\n for move in legal_moves:\n #Fetching next state forecast moves\n next_state = game.forecast_move(move)\n #Calling min_value function for score - Return type of min_value function is score\n score = self.min_value(next_state, depth-1)\n if score > best_score:\n best_move = move\n best_score = score\n return best_move #Return best_move", "def run_ai():\n print(\"Othello AI\") # First line is the name of this AI\n arguments = input().split(\",\")\n \n color = int(arguments[0]) #Player color: 1 for dark (goes first), 2 for light. \n limit = int(arguments[1]) #Depth limit\n minimax = int(arguments[2]) #Minimax or alpha beta\n caching = int(arguments[3]) #Caching \n ordering = int(arguments[4]) #Node-ordering (for alpha-beta only)\n\n if (minimax == 1): eprint(\"Running MINIMAX\")\n else: eprint(\"Running ALPHA-BETA\")\n\n if (caching == 1): eprint(\"State Caching is ON\")\n else: eprint(\"State Caching is OFF\")\n\n if (ordering == 1): eprint(\"Node Ordering is ON\")\n else: eprint(\"Node Ordering is OFF\")\n\n if (limit == -1): eprint(\"Depth Limit is OFF\")\n else: eprint(\"Depth Limit is \", limit)\n\n if (minimax == 1 and ordering == 1): eprint(\"Node Ordering should have no impact on Minimax\")\n\n while True: # This is the main loop\n # Read in the current game status, for example:\n # \"SCORE 2 2\" or \"FINAL 33 31\" if the game is over.\n # The first number is the score for player 1 (dark), the second for player 2 (light)\n next_input = input()\n status, dark_score_s, light_score_s = next_input.strip().split()\n dark_score = int(dark_score_s)\n light_score = int(light_score_s)\n\n if status == \"FINAL\": # Game is over.\n print\n else:\n board = eval(input()) # Read in the input and turn it into a Python\n # object. The format is a list of rows. The\n # squares in each row are represented by\n # 0 : empty square\n # 1 : dark disk (player 1)\n # 2 : light disk (player 2)\n\n # Select the move and send it to the manager\n if (minimax == 1): #run this if the minimax flag is given\n movei, movej = select_move_minimax(board, color, limit, caching)\n else: #else run alphabeta\n movei, movej = select_move_alphabeta(board, color, limit, caching, ordering)\n \n print(\"{} {}\".format(movei, movej))", "def run_ai():\n print(\"Othello AI\") # First line is the name of this AI\n arguments = input().split(\",\")\n \n color = int(arguments[0]) #Player color: 1 for dark (goes first), 2 for light. \n limit = int(arguments[1]) #Depth limit\n minimax = int(arguments[2]) #Minimax or alpha beta\n caching = int(arguments[3]) #Caching \n ordering = int(arguments[4]) #Node-ordering (for alpha-beta only)\n\n if (minimax == 1): eprint(\"Running MINIMAX\")\n else: eprint(\"Running ALPHA-BETA\")\n\n if (caching == 1): eprint(\"State Caching is ON\")\n else: eprint(\"State Caching is OFF\")\n\n if (ordering == 1): eprint(\"Node Ordering is ON\")\n else: eprint(\"Node Ordering is OFF\")\n\n if (limit == -1): eprint(\"Depth Limit is OFF\")\n else: eprint(\"Depth Limit is \", limit)\n\n if (minimax == 1 and ordering == 1): eprint(\"Node Ordering should have no impact on Minimax\")\n\n while True: # This is the main loop\n # Read in the current game status, for example:\n # \"SCORE 2 2\" or \"FINAL 33 31\" if the game is over.\n # The first number is the score for player 1 (dark), the second for player 2 (light)\n next_input = input()\n status, dark_score_s, light_score_s = next_input.strip().split()\n dark_score = int(dark_score_s)\n light_score = int(light_score_s)\n\n if status == \"FINAL\": # Game is over.\n print\n else:\n board = eval(input()) # Read in the input and turn it into a Python\n # object. The format is a list of rows. The\n # squares in each row are represented by\n # 0 : empty square\n # 1 : dark disk (player 1)\n # 2 : light disk (player 2)\n\n # Select the move and send it to the manager\n if (minimax == 1): #run this if the minimax flag is given\n movei, movej = select_move_minimax(board, color, limit, caching)\n else: #else run alphabeta\n movei, movej = select_move_alphabeta(board, color, limit, caching, ordering)\n \n print(\"{} {}\".format(movei, movej))", "def alphabeta_search(state):\r\n \r\n '''\r\n Terminates when game.actions is empty\r\n Class Game needs the following functions:\r\n - game.result(state, a) -- successor\r\n - game.actions(state) -- possible moves\r\n - game.utility -- returns the state of the game (win/lose or tie, when game is terminal)\r\n \r\n '''\r\n #sort state.actions in increasing or decreasing based on max or min (alpha or beta)\r\n #use heuristics fn to get a value for each move (move is in format (x,y) where x and y are ints\r\n \r\n d = depthset[0] #this is the cutoff test depth value. if we exceed this value, stop\r\n cutoff_test=None\r\n sort_fn = [vitalpoint, eyeHeur]\r\n eval_fn = survivalheur \r\n #randnumheuristics \r\n player = state.to_move()\r\n prune = 0\r\n pruned = {} #this will store the depth of the prune\r\n totaldepth = [0]\r\n visited = {}\r\n heuristicInd = 0\r\n \r\n def max_value(state, alpha, beta, depth, heuristicInd):\r\n branches = len(state.actions())\r\n onbranch = 0\r\n \r\n if totaldepth[0] < depth:\r\n totaldepth[0] = depth\r\n if cutoff_test(state, depth):\r\n return eval_fn(state)\r\n v = -infinity\r\n \r\n #sort state.actions based on heuristics before calling\r\n #max wants decreasing\r\n #sorted(state.actions(), key = eval_sort, reverse = True)\r\n \r\n #sort by favorites first, returns a list of actions\r\n # for sorts in sort_fn:\r\n tempher = heuristicInd\r\n\r\n sorts = sort_fn[heuristicInd]\r\n sortedactions, heuristicInd = sorts(state)\r\n #if heuristicInd != tempher:\r\n # print 's',\r\n ''''''\r\n for a in sortedactions:\r\n if visited.get(depth) == None:\r\n visited[depth] = [a]\r\n else:\r\n visited[depth].append(a)\r\n \r\n onbranch += 1\r\n v = max(v, min_value(state.result(a),\r\n alpha, beta, depth+1, heuristicInd)) #+ vitscore.count(a)\r\n if v >= beta: #pruning happens here, but in branches\r\n if pruned.get(depth) == None:\r\n pruned[depth] = branches - onbranch\r\n else:\r\n pruned[depth] += (branches - onbranch)\r\n #print \"prune\", depth, \" \", state.actions()\r\n #state.display()\r\n return v\r\n alpha = max(alpha, v)\r\n \r\n #print depth, \" \", state.actions()\r\n #state.display()\r\n \r\n return v\r\n\r\n def min_value(state, alpha, beta, depth, heuristicInd):\r\n branches = len(state.actions())\r\n onbranch = 0\r\n \r\n if totaldepth[0] < depth:\r\n totaldepth[0] = depth\r\n if cutoff_test(state, depth):\r\n return eval_fn(state)\r\n v = infinity\r\n \r\n #sort state.actions based on heuristics before calling\r\n #min wants increasing\r\n #sorted(state.actions(), key = eval_sort)\r\n #Shayne\r\n tempher = heuristicInd\r\n sorts = sort_fn[heuristicInd]\r\n sortedactions, heuristicInd = sorts(state, 1)\r\n #if heuristicInd != tempher:\r\n # print 's',\r\n for a in sortedactions: #state.actions():\r\n onbranch += 1\r\n if visited.get(depth) == None:\r\n visited[depth] = [a]\r\n else:\r\n visited[depth].append(a)\r\n v = min(v, max_value(state.result(a),\r\n alpha, beta, depth+1, heuristicInd))\r\n if v <= alpha: #pruning happens here, but in branches\r\n if pruned.get(depth) == None:\r\n pruned[depth] = branches - onbranch\r\n else:\r\n pruned[depth] += (branches - onbranch)\r\n #print \"prune\", depth, \" \", state.actions()\r\n #state.display()\r\n return v\r\n beta = min(beta, v)\r\n #print depth, \" \", state.actions()\r\n #state.display()\r\n return v\r\n\r\n # Body of alphabeta_search starts here:\r\n #def cutoff_test and eval_fn \r\n cutoff_test = (cutoff_test or\r\n (lambda state,depth: depth>d or state.terminal_test()))\r\n eval_fn = eval_fn or (lambda state: state.utility(player))\r\n #by default, utility score is used\r\n \r\n \r\n #argmax goes through all the possible actions and \r\n # applies the alphabeta search onto all of them\r\n # and returns the move with the best score \r\n #print state.actions()\r\n heuristicInd = 0\r\n sorts = sort_fn[heuristicInd]\r\n sortedact, heuristicInd = sorts(state)\r\n abmove = argmax(sortedact,\r\n lambda a: min_value(state.result(a),\r\n -infinity, infinity, 0, heuristicInd))\r\n\r\n print 'problem,', problemno[0], ', total tree depth,', totaldepth[0]\r\n for i in range(1, len(visited)):\r\n if len(pruned) < i:\r\n pruned[i] = 0\r\n print i, \",\", len(visited[i]), \",\", pruned[i]\r\n \r\n return abmove", "def run(self):\n\n # keep track of counter\n counter = 0\n\n while self.queue:\n\n # print depth of tree every 10000 steps\n if counter % 10000 == 0:\n print(len(self.queue[0]))\n\n # get first moves set from queue\n moves_set = self.get_moves_set()\n\n # move all moves from set\n self.try_moves(moves_set)\n\n # continue branch (add to queue) if layout is not in archive\n if self.not_in_archive():\n self.add_to_queue(moves_set)\n \n # check for win\n if self.won_game():\n\n # return winning set of moves\n return moves_set\n \n # reverse moves to original layout\n self.reverse_moves(moves_set)\n \n # add to counter\n counter += 1", "def minimax(self, board: str):\n \"\"\" Your Code Here \"\"\"\n\n # ----------------------------BEGIN CODE----------------------------\n # disclaimer: I did only minimax (as said on the homework description https://drive.google.com/file/d/1JXBi_5JB8fwTWX34j0ZwN5YwjoIexh-Z/view)\n # my minimax does NOT try to prolong the game. It always goes for the best move!\n\n # initializing default values\n moveToMake = validMoves(board)[0]\n bestValue = -100\n\n # this will find the best move to go to, since value only returns the best score\n # util's setMove creates a copy, so we can use that as the successor\n for validmove in validMoves(board):\n currentValue = self.value(\n setMove(board, validmove, whoseMove(board)), 0)\n if currentValue > bestValue:\n bestValue = currentValue\n moveToMake = validmove\n\n # currently the val and game_length tuple is arbitrary because i have not implemented depth!\n return (moveToMake)", "def solveOneStep(self):\n ### Student code goes here\n # Mark this move as explored\n self.visited[self.currentState] = True\n self.visited_states.append(self.currentState.state)\n\n # Get move to make\n movables = self.gm.getMovables()\n # print(\"EXPLORING GAME STATE \" + str(self.gm.getGameState()) + \"---------------------------------------------------------\")\n to_move = self.currentState.nextChildToVisit # movables index\n # print(\"depth \", self.currentState.depth)\n\n # Return if done\n if self.currentState.state == self.victoryCondition:\n # print(\"DONE\")\n return True\n\n # If current state has no children, make children\n if not self.currentState.children:\n for movable_statement in movables:\n # Make the move\n # print(\"implementing move \", movable_statement)\n self.gm.makeMove(movable_statement)\n\n # Create a new state with this move made\n new_state = self.gm.getGameState()\n # print (\"new state \", new_state)\n\n # If the new state hasn't been visited and isn't in the queue then add it as a child and to the queue\n if (new_state not in self.visited_states):\n new_gs = GameState(new_state, self.currentState.depth + 1, movable_statement)\n new_gs.parent = self.currentState\n self.currentState.children.append(new_gs)\n self.currentState.nextChildToVisit = to_move + 1\n self.visited[new_gs] = True\n self.visited_states.append(new_state)\n self.gs_queue.append(new_gs)\n\n self.gm.reverseMove(movable_statement)\n\n # Return false if no more to explore\n if not self.gs_queue:\n return False\n\n # Revert to state at when current and next start to change\n root_curr = self.currentState\n self.currentState = self.gs_queue.popleft()\n root_new = self.currentState\n\n # Backtrack to when current node and new node start to diverge\n if root_new.depth == root_curr.depth:\n while root_curr.state != root_new.state:\n self.gm.reverseMove(root_curr.requiredMovable)\n root_curr = root_curr.parent\n root_new = root_new.parent\n else:\n while root_curr.requiredMovable:\n self.gm.reverseMove(root_curr.requiredMovable)\n root_curr = root_curr.parent\n\n # Return game master to state that we are exploring\n # Find path between root and current state\n path = []\n currNode = self.currentState\n while currNode != root_curr:\n path.append(currNode.requiredMovable)\n currNode = currNode.parent\n\n # Created backwards path, now make moves from root to current state\n path.reverse()\n for movable_statement in path:\n self.gm.makeMove(movable_statement)\n\n return False", "def move(self):\n for agent in self.agents:\n if not agent.fidelity:\n options = agent.get_move_options(agent.hex, self.kernel_size, None, extend=True)\n target = random36.choices(population=options,weights=[x.quality**2 for x in options])\n agent.move(target[0])", "def move(self, state):\n \n self.depth_limit=1\n self.best_utility=-2\n action=None\n while not self.is_time_up():\n self.terminal=True\n self.cache={}\n action=self.alpha_beta_search(state,0)\n if self.terminal==True:\n break\n self.depth_limit=self.depth_limit+1\n \n return action", "def minimax(self, game, depth):\n if self.time_left() < self.TIMER_THRESHOLD:\n raise SearchTimeout()\n # TODO: finish this function!\n # raise NotImplementedError\n\n legal_moves = game.get_legal_moves() # obtain legal moves available to the board\n best_move = (-1,-1) # initialisation of best move\n best_score = -math.inf # abstraction of infinity\n\n for m in legal_moves: # for each ACTION, create a new state for its outcome, RESULT\n new_state = game.forecast_move(m)\n score = self.min_value(new_state, depth - 1) # recursion to calculate the score of that state\n if score > best_score:\n best_move = m\n best_score = score\n return best_move", "def itarate_the_recursion(battle_queue) -> List:\n\n score = None\n name = 1\n children = []\n m = [name, score, battle_queue, children]\n thing = Stack()\n thing.add(m)\n first_player = battle_queue.peek().get_name()\n\n\n while not thing.is_empty():\n\n x = thing.remove()\n\n\n if x[2].is_over():\n if x[2].get_winner():\n winner_hp = x[2].get_winner().get_hp()\n x[1] = winner_hp if x[2].get_winner().get_name() \\\n == first_player else winner_hp * -1\n else:\n x[1] = 0\n\n elif x[1] is None and x[3] != []:\n j = []\n for i in x[3]:\n j += [i[1]]\n x[1] = max(j)\n\n\n elif not x[2].is_over():\n thing.add(x)\n\n moves = x[2].peek().get_available_actions()\n\n for i in moves:\n name += 1\n\n clone = x[2].copy()\n next_char = clone.peek()\n\n mover(i, next_char)\n\n if not clone.is_empty():\n clone.remove()\n\n new_tree = [name, None, clone, []]\n\n x[3].append(new_tree)\n thing.add(new_tree)\n return m", "def minimax(self, state, agent, parents_positions):\n if termialTest(state):\n return utility(state)\n return self.computeMinimaxScore(state, agent, parents_positions)", "def minimax(board):\n #This function will return the best move. \n #If Ai is playing as X, I can reduce the processing time by creating a random first move. \n if (board == initial_state()):\n coord1 = randint(0,2)\n coord2 = randint(0,2)\n return ((coord1,coord2))\n #first I determine which player's turn it is\n player_to_move = player(board)\n best_action = None\n #If I am X\n if(player_to_move == \"X\"):\n current_max = float('-inf')\n #for every possible action I have right now, I'll call my \"future\" Min_Value since I will asume what will happen if I take this move.\n for action in actions(board):\n #peak on the future if I take that move\n curr_score = Min_Value(result(board,action))\n #if my future is favorable, I will store it as my current best option.\n if curr_score>= current_max:\n current_max = curr_score\n best_action = action\n else:\n #If I am O, I do something similar. \n current_max = float('inf')\n #for every action I peak on the future for favorable results\n for action in actions(board):\n #this time, however, it would be X's turn so I need to start with Max_Value\n curr_score = Max_Value(result(board,action))\n #if my future is favorable, I store it\n if curr_score<= current_max:\n current_max = curr_score\n best_action = action\n #I return the best move.\n return best_action", "def player_loop(self):\n\n # Generate game tree object\n first_msg = self.receiver()\n # Initialize your minimax model\n model = self.initialize_model(initial_data=first_msg)\n\n while True:\n msg = self.receiver()\n\n # Create the root node of the game tree\n node = Node(message=msg, player=0)\n\n # Possible next moves: \"stay\", \"left\", \"right\", \"up\", \"down\"\n best_move = self.search_best_next_move(\n model=model, initial_tree_node=node)\n\n # Execute next action\n self.sender({\"action\": best_move, \"search_time\": None})", "def minimax(self, game, depth, maximizing_player=True):\n if self.time_left() < self.TIMER_THRESHOLD:\n raise Timeout()\n\n legal_moves = game.get_legal_moves()\n\n best_move = (-1, -1)\n if maximizing_player:\n score = float(\"-inf\")\n else:\n score = float(\"inf\")\n\n #At bottom of minimax tree\n if depth == 1:\n for each_move in legal_moves:\n new_game = game.forecast_move(each_move)\n if maximizing_player and self.score(new_game, self) > score:\n score = self.score(new_game, self)\n best_move = each_move\n elif not maximizing_player and self.score(new_game, self) < score:\n score = self.score(new_game, self)\n best_move = each_move\n\n return score, best_move\n\n #Not at bottom of minimax tree\n for each_move in legal_moves:\n #Return each_move that gets the highest score\n new_game = game.forecast_move(each_move)\n new_score , new_move = self.minimax(new_game, depth-1, not maximizing_player)\n if maximizing_player and new_score > score:\n score = new_score\n best_move = each_move\n elif not maximizing_player and new_score < score:\n score = new_score\n best_move = each_move\n\n return score, best_move", "def track(sprite, find_tags, pbad = 0.1) :\n\n enemies = []\n for t in find_tags:\n enemies.extend(get(t))\n\n distances = [distance(e.pos, sprite.pos) for e in enemies]\n\n enemy = enemies[distances.index(min(distances))]\n x, y = sprite.pos\n\n choices = [(x, y), (x, y-1), (x, y+1), (x+1, y), (x-1, y)]\n distances = [distance(p, enemy.pos) for p in choices]\n visibility = [visible(p) for p in choices]\n\n best = None\n min_dist = 999999\n for i in range(len(choices)):\n if is_wall(choices[i]) or not visibility[i]:\n continue\n\n #every now and then make a random \"bad\" move\n rnd = random.uniform(0, 1)\n if rnd <= pbad:\n best = choices[i]\n break\n elif distances[i] < min_dist:\n best = choices[i]\n min_dist = distances[i]\n if best is not None and best != (x,y):\n sprite.move((best[0] - x, best[1] - y))", "def makeMove(self, movable_statement):\n ### Student code goes here\n tile = movable_statement.terms[0].term.element\n initialX = movable_statement.terms[1].term.element\n initialY = movable_statement.terms[2].term.element\n goalX = movable_statement.terms[3].term.element\n goalY = movable_statement.terms[4].term.element\n r1 = parse_input(\"fact: (on \" + tile + \" \" + initialX + \" \" + initialY + \")\")\n self.kb.kb_retract(r1)\n r2 = parse_input(\"fact: (on empty \" + goalX + \" \" + goalY + \")\")\n self.kb.kb_retract(r2)\n stat1 = parse_input(\"fact: (on \" + tile + \" \" + goalX + \" \" + goalY + \")\")\n self.kb.kb_assert(stat1)\n stat2 = parse_input(\"fact: (on empty \" + initialX + \" \" + initialY + \")\")\n self.kb.kb_assert(stat2)\n\n\n #for facts in self.kb.facts:\n # print(facts.statement)\n\n #print(\"\\n\\n\")\n ##Need to handle adjacentTo\n '''ask = parse_input(\"fact: (adjacentTo empty ?tile)\")\n answer = self.kb.kb_ask(ask)\n empty_adj = []\n if answer:\n for ans in answer.list_of_bindings:\n adjTile = ans[0].bindings[0].constant.element\n if adjTile != tile:\n rt = parse_input(\"fact: (adjacentTo empty \" + adjTile + \")\")\n self.kb.kb_retract(rt)\n #print(\"RMOVINGGGG\")\n #print(rt)\n rt1 = parse_input(\"fact: (adjacentTo \" + adjTile + \" empty)\")\n self.kb.kb_retract(rt1)\n empty_adj.append(adjTile) #All of empty's adjacent tiles'''\n\n #for facts in self.kb.facts:\n # print(facts.statement)\n\n #print(\"::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\")\n '''ask1 = parse_input(\"fact: (adjacentTo \" + tile + \" ?tile)\")\n answer1 = self.kb.kb_ask(ask1)\n if answer1:\n for ans in answer1.list_of_bindings:\n adjTile = ans[0].bindings[0].constant.element\n if adjTile != \"empty\":\n stat = parse_input(\"fact: (adjacentTo empty \" + adjTile + \")\")\n self.kb.kb_assert(stat)\n radj1 = parse_input(\"fact: (adjacentTo \" + tile + \" \" + adjTile + \")\")\n self.kb.kb_retract(radj1)\n radj2 = parse_input(\"fact: (adjacentTo \" + adjTile + \" \" + tile + \")\")\n self.kb.kb_retract(radj2)\n for tiles in empty_adj:\n stat = parse_input(\"fact: (adjacentTo \" + tile + \" \" + tiles + \")\")\n self.kb.kb_assert(stat)'''", "def minimax_helper(self, game, depth, maximizing_player=True):\n if self.time_left() < self.TIMER_THRESHOLD:\n raise SearchTimeout()\n \n # Get all the available moves at the current state\n legal_moves = game.get_legal_moves()\n\n # Handle a terminal event/exhaustive search completion\n # at least in relation to depth sought\n if (not legal_moves) or (depth == 0):\n if maximizing_player:\n return (self.score(game, game.active_player), (-1, -1))\n else:\n return (self.score(game, game.inactive_player), (-1, -1))\n\n if maximizing_player: # the current player\n this_score = float(\"-inf\")\n for move in legal_moves:\n next_ply_state = game.forecast_move(move)\n next_ply_score, next_ply_move = self.minimax_helper(next_ply_state,\n depth-1, False)\n\n # Identify the maximum score branch for the current player.\n if next_ply_score >= this_score:\n this_move = move\n this_score = next_ply_score\n\n else: # the current player's opponent\n this_score = float(\"inf\")\n for move in legal_moves:\n next_ply_state = game.forecast_move(move)\n next_ply_score, next_ply_move = self.minimax_helper(next_ply_state,\n depth-1, True) \n\n # Identify the minimum score branch for the opponent\n if next_ply_score <= this_score:\n this_move = move\n this_score = next_ply_score\n\n return this_score, this_move", "def getMove(self, grid):\n# global prune\n# prune = 0\n def Terminal(stateTup):\n \"\"\"\n Checks if the node is a terminal node\n Returns eval(state) if it is terminal\n \"\"\"\n state = stateTup[0]\n maxDepth = self.depthLimit\n if stateTup[1] == maxDepth:\n val = self.h.get(str(state.map))\n if val == None:\n Val = Eval(state)\n self.h[str(state.map)] = Val\n return Val\n else:\n return val\n elif len(stateTup[0].getAvailableMoves()) == 0:\n val = self.h.get(str(state.map))\n if val == None:\n Val = Eval(state)\n self.h[str(state.map)] = Val\n return Val\n else:\n return val\n\n def Eval(state):\n \"\"\"\n This is the eval function which combines many heuristics and assigns\n weights to each of them\n Returns a single value\n \"\"\"\n\n# H1 = htest2(state)\n# return H1\n H2 = h1(state)*monotonic(state)\n return H2\n\n\n def h1(state):\n Max = state.getMaxTile()\n left = len(state.getAvailableCells())/16\n if state.getCellValue([0,0]) == Max:\n v = 1\n else:\n v= 0.3\n Max = Max/1024\n return Max*left*v\n\n def mono(state):\n mon = 0\n# for i in range(4):\n# row = 0\n# for j in range(3):\n# if state.map[i][j] > state.map[i][j+1]:\n# row+=1\n# if row == 4:\n# mon += 1\n# for i in range(4):\n# column = 0\n# for j in range(3):\n# if state.map[j][i] > state.map[j+1][i]:\n# column +=1\n# if column == 4:\n# mon +=1\n#\n#\n# return mon/8\n for i in range(4):\n if all(earlier >= later for earlier, later in zip(grid.map[i], grid.map[i][1:])):\n mon+=1\n\n return mon/8\n\n def monotonic(state):\n cellvals = {}\n Path1 = [(3,0),(3,1),(3,2),(3,3),(2,3),(2,2),(2,1),(2,0),\n (1,0),(1,1),(1,2),(1,3),(0,3),(0,2),(0,1),(0,0)]\n for i in Path1:\n cellvals[i] = state.getCellValue(i)\n mon = 0\n for i in range(4):\n if cellvals.get((i,0)) >= cellvals.get((i,1)):\n if cellvals.get((i,1)) >= cellvals.get((i,2)):\n if cellvals.get((i,2)) >= cellvals.get((i,3)):\n mon +=1\n for j in range(4):\n if cellvals.get((0,j)) >= cellvals.get((1,j)):\n if cellvals.get((1,j)) >= cellvals.get((2,j)):\n if cellvals.get((2,j)) >= cellvals.get((3,j)):\n mon+=1\n return mon/8\n\n\n\n def htest2(state):\n score1 = 0\n score2 = 0\n r = 0.5\n\n Path1 = [(3,0),(3,1),(3,2),(3,3),(2,3),(2,2),(2,1),(2,0),\n (1,0),(1,1),(1,2),(1,3),(0,3),(0,2),(0,1),(0,0)]\n Path2 = [(3,0),(2,0),(1,0),(0,0),(0,1),(1,1),(2,1),(3,1),\n (3,2),(2,2),(1,2),(0,2),(0,3),(1,3),(2,3),(3,3)]\n valDict = {}\n for n in range(16):\n valDict[Path1[n]] = state.getCellValue(Path1[n])\n for n in range(16):\n if n%3 == 0:\n self.emergency()\n cell1 = valDict.get(Path1[n])\n cell2 = valDict.get(Path2[n])\n score1 += (cell1) * (r**n)\n score2 += (cell2) * (r**n)\n return max(score1,score2)\n\n\n def Maximize(stateTup,A,B):\n \"\"\"\n Returns a tuple of state,eval(state)\n Takes in a stateTup(tuple of grid + depth of the grid), alpha,\n and beta\n \"\"\"\n self.emergency()\n t = Terminal(stateTup)\n if t != None:\n return (None, t)\n\n maxChild , maxUtility = None,-999999999\n state = stateTup[0]\n Map = self.dict.get(str(state.map))\n if Map == None:\n children = []\n for M in range(4):\n g = state.clone()\n if g.move(M):\n children.append(g)\n self.dict[str(state.map)] = children\n else:\n children = Map\n for child in children:\n childTup = (child,stateTup[1]+1)\n utility = Minimize(childTup,A,B)[1]\n if utility > maxUtility:\n maxChild , maxUtility = child , utility\n if maxUtility >= B:\n# global prune\n# prune +=1\n break\n if maxUtility > A:\n A = maxUtility\n\n return (maxChild,maxUtility)\n\n\n def Minimize(stateTup,A,B):\n \"\"\"\n Returns a tuple of state,eval(state)\n Takes in a stateTup(tuple of grid + depth of the grid), alpha,\n and beta\n \"\"\"\n self.emergency()\n t = Terminal(stateTup)\n if t != None:\n return (None, t)\n\n minChild , minUtility = None,999999999\n state = stateTup[0]\n Map= self.dict.get(str(state.map))\n if Map == None:\n cells= state.getAvailableCells()\n children = []\n tiles = [2,4]\n for i in cells:\n for j in tiles:\n g = state.clone()\n g.insertTile(i,j)\n children.append(g)\n self.dict[str(state.map)] = children\n else:\n children = Map\n for child in children:\n childTup = (child,stateTup[1]+1)\n utility = Maximize(childTup,A,B)[1]\n if utility < minUtility:\n minChild , minUtility = child , utility\n if minUtility <= A:\n# global prune\n# prune +=1\n break\n if minUtility < B:\n B = minUtility\n\n return (minChild,minUtility)\n\n\n\n def decision(grid):\n \"\"\"\n Decision function which returns the move which led to the state\n \"\"\"\n child = Maximize((grid,0),-999999999,999999999)[0]\n Child = child.map\n g = grid.clone()\n for M in range(4):\n if g.move(M):\n if g.map == Child:\n # global prune\n # global pruneLog\n # pruneLog.append(prune)\n # print(prune)\n # print(sum(pruneLog)/len(pruneLog))\n return M\n g = grid.clone()\n\n self.dict = {}\n self.h = {}\n self.prevTime = time.clock()\n self.depthLimit = 1\n self.mL = []\n self.over = False\n while self.over == False:\n self.depthLimit +=1\n try :\n self.mL.append(decision(grid))\n\n except KeyError:\n# print(self.depthLimit)\n return self.mL[-1]\n except IndexError:\n return random.randint(0,3)\n self.Alarm(time.clock())\n return self.mL[-1]", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n # For this problem we will be reusing the majority of our work from question 2, but we will be\n # implementing alpha-beta pruning on top of our existing minimax infrastructure\n actionList = gameState.getLegalActions(0)\n pacmanAgentIndex = 0\n ghostAgentIndices = list(range(1,gameState.getNumAgents())) # List of each agent index for looping\n count = util.Counter()\n agentEnd = gameState.getNumAgents()-1 # Last agent in the list\n\n def maximizer(curState, agentIndex, alpha, beta, depth):\n\n ghostActions = curState.getLegalActions(agentIndex)\n maxDepth = self.depth # Quantifying the end of the tree so we know when we reached a leaf node\n weight = -99999999 # Worst case starting value to be changed in the code\n if depth == maxDepth: # If we are at a leaf node\n return self.evaluationFunction(curState) # evaluate the state of this leaf node\n # Otherwise, we progress the tree until the above condition is reached\n if len(ghostActions) != 0:\n for x in ghostActions:\n if weight >= minimizer(curState.generateSuccessor(agentIndex, x), agentIndex+1, alpha, beta, depth):\n weight = weight\n else:\n weight = minimizer(curState.generateSuccessor(agentIndex, x), agentIndex+1, alpha, beta, depth)\n if weight > beta:\n return weight\n if alpha < weight:\n alpha = weight\n return weight\n # if there are no legal actions left then evaluate at the last known state\n # Fall through into this return\n return self.evaluationFunction(curState)\n\n def minimizer(curState, agentIndex, alpha, beta, depth):\n ghostActions = curState.getLegalActions(agentIndex)\n weight = 999999999 # Worst case starting value to be changed in the code\n if len(ghostActions) != 0:\n if agentIndex == agentEnd: # If we've reached the last ghost, we maximise\n for x in ghostActions: # For each legal action in the current position\n temp = maximizer(curState.generateSuccessor(agentIndex, x), pacmanAgentIndex, alpha, beta, depth+1)\n if weight < temp:\n weight = weight\n else:\n weight = temp\n if weight < alpha:\n return weight\n if beta > weight:\n beta = weight\n else: # Otherwise, we continue to minimize\n for x in ghostActions: # For each legal action in the current position\n temp = minimizer(curState.generateSuccessor(agentIndex, x), agentIndex+1, alpha, beta, depth)\n if weight < temp:\n weight = weight\n else:\n weight = temp\n if weight < alpha:\n return weight\n if beta > weight:\n beta = weight\n return weight\n # if there are no legal actions left then evaluate at the last known state\n # Fall through into this return\n return self.evaluationFunction(curState)\n\n endWeight = -999999999\n alpha = -999999999\n beta = 999999999\n\n # Executing the minimizer for all possible actions\n for x in actionList:\n tempState = gameState.generateSuccessor(pacmanAgentIndex,x)\n endWeight = minimizer(tempState, 1, alpha, beta, 0,)\n count[x] = endWeight\n if alpha < endWeight:\n alpha = endWeight\n # print('HELLO THERE')\n # print(count)\n return count.argMax()", "def utility(state:State,maximizing_player):\n best_move_score = -1\n #######################[Goal]#########################\n is_current_player_stuck = is_stuck(state,state.player_type)\n other_player = RIVAL if state.player_type == PLAYER else PLAYER\n # Check if stuck\n if is_current_player_stuck:\n if state.player_type == PLAYER:\n state.players_score[state.player_type] -= state.penalty_score\n else:\n state.players_score[state.player_type] += state.penalty_score\n return state.players_score[state.player_type] - state.players_score[other_player] \n ######################################################\n # Else\n #--------------------------------------------------\n ################# Available Steps #################\n #--------------------------------------------------\n player_available_steps = availables(state.board, state.locations[PLAYER])\n h1 = 4-player_available_steps\n h4 = player_available_steps\n #--------------------------------------------------\n ################# Fruits Distance #################\n #--------------------------------------------------\n h2 = -1\n if state.fruits_ttl > 0 and len(state.fruits_dict) > 0:\n min_fruit_dist = float('inf')\n for fruit_loc in state.fruits_dict:\n curr_fruit_dist = Manhattan(state.locations[state.player_type], fruit_loc)\n # Check what is the closest fruit reachable\n if curr_fruit_dist < min_fruit_dist and curr_fruit_dist <= state.fruits_ttl:\n other_player_fruit_dist = Manhattan(state.locations[other_player], fruit_loc)\n if curr_fruit_dist < other_player_fruit_dist:\n min_fruit_dist = curr_fruit_dist\n max_dist = len(state.board)+len(state.board[0])\n h2 = (max_dist*10.0/min_fruit_dist)+1 if min_fruit_dist < float('inf') else -1\n #--------------------------------------------------\n ################# Reachable Squrs #################\n #--------------------------------------------------\n reachables_player = reachables(state.board,state.locations[PLAYER])\n reachables_rival = reachables(state.board,state.locations[RIVAL])\n h3 = reachables_player - reachables_rival # We want more for us\n #--------------------------------------------------\n ################# Combine it all. #################\n #--------------------------------------------------\n if not state.half_game():\n w = 0.8 if h2 > 0 else 1\n best_move_score = w*(h1-h3) + (1-w)*h2 \n else:\n w = 0.7 if h2 > 0 else 1\n best_move_score = w*(h4+h3) + (1-w)*h2 \n\n best_move_score += state.players_score[state.player_type]\n return best_move_score", "def minimax(self, game, depth):\n if self.time_left() < self.TIMER_THRESHOLD:\n raise SearchTimeout()\n\n \"\"\"\n From AIMA psuedocode:\n\n function MINIMAX-DECISION(state) returns an action\n return arg max a is in ACTIONS(s) MIN-VALUE(RESULT(state, a))\n \"\"\"\n\n best_move = (-1,-1)\n best_score = float(\"-inf\")\n actions = game.get_legal_moves()\n\n if not actions:\n return best_move\n else:\n best_move = actions[randint(0, len(actions) - 1)]\n\n try:\n # The try/except block will automatically catch the exception\n # raised when the timer is about to expire.\n # return max(actions, key=lambda action: self._min_value(game.forecast_move(action), 1))\n for action in actions:\n score = self._min_value(game.forecast_move(action), 1)\n if score > best_score:\n best_score = score\n best_move = action\n\n except SearchTimeout:\n pass\n\n return best_move", "def parse(self, words, gold_tree=None, beam_size=10):\n if gold_tree:\n word_order = self.get_word_order(gold_tree)\n tags = self.tagger.tag(words)\n possible_configs = [self.initial_config(words)]\n while any(config['next_move'] != None for config in possible_configs):\n old_possible_configs = possible_configs\n possible_configs = []\n for config in old_possible_configs:\n config = self.move(config)\n candidates = self.valid_moves(config)\n if candidates:\n feat = self.features(words, tags, config)\n scores = self.predict(feat, candidates)\n if gold_tree:\n gold_move = self.gold_move(config, gold_tree, \\\n word_order)\n if config['is_gold'] and gold_move not in scores:\n possible_configs = self.update_and_reset_config( \\\n config, feat, gold_move)\n break\n # add new configs for the possible moves\n for curr_move, curr_score in scores.items():\n # create a copy of the config and append it to the list\n new_config = deepcopy(config)\n if curr_score > 0:\n new_config['score'] += log(curr_score)\n else:\n new_config['score'] += float(\"-inf\")\n new_config['next_move'] = curr_move\n if gold_tree and gold_move != curr_move:\n new_config['is_gold'] = False\n possible_configs.append(new_config)\n else:\n config['next_move'] = None\n possible_configs.append(config)\n # delete the configs with the lowest scores\n while len(possible_configs) > beam_size:\n worst_conf_ind, worst_conf = \\\n min(enumerate(possible_configs), \n key = lambda t: t[1]['score'])\n if gold_tree and worst_conf['is_gold'] == True:\n feat = self.features(words, tags, worst_conf)\n possible_configs = self.update_and_reset_config( \\\n worst_conf, feat, worst_conf['next_move'])\n else:\n del possible_configs[worst_conf_ind]\n # return best tree\n best_config = max(possible_configs, key = lambda t: t['score'])\n return tags, best_config['pred_tree']", "def search(start):\n\n '''\n Create a class named nodeClass which contains 4 elements: \n state: The puzzle object containing the puzzle board at the node \n misplaced: num of misplaced tiles\n depth: depth of the node in the tree \n prev: parent node\n '''\n nodeClass = namedtuple('nodeClass', 'state, misplaced, depth, prev')\n\n #instantiate object from class creating the root node\n node = nodeClass(start, 0, 0, None)\n\n #stores the nodes that are going to be explored. \n #the node with lower f-score is explored first\n frontier = q.PriorityQueue()\n frontier.put((0,node))\n\n # frontier_set keep track of the nodes in the frontier queue\n frontier_set = {node}\n #contains the board states already explored\n explored_states = set()\n for ite in range(1,max_iterations+2):#while True:\n #Retrieve the node in the frontier with lowest value\n node = frontier.get()[1]\n\n #get the puzzle board obj from the node object\n state = node.state\n\n #Check if the game has ben solved\n if state.solved or ite==max_iterations:\n Result = namedtuple('Result', 'board, depth, nodesExpanded, max_depth, isSolved')\n return Result(state, node.depth, ite, max(no.depth for no in frontier_set), state.solved)\n\n # expanded nodes are added to explored set\n explored_states.add(state)\n\n #EXPANDING\n for mov in state.possible_moves:\n new_state=state.move(mov)\n new_node = nodeClass(new_state, new_state.score,\n node.depth + 1, node)\n\n #compute f-score of the node\n f_score=new_state.score + new_node.depth\n\n if new_state not in explored_states and new_node not in frontier_set:\n frontier.put((f_score,new_node))\n frontier_set.add(new_node)", "def run_ai():\n print(\"Minimax AI\") # First line is the name of this AI \n color = int(input()) # Then we read the color: 1 for dark (goes first), \n # 2 for light. \n\n while True: # This is the main loop \n # Read in the current game status, for example:\n # \"SCORE 2 2\" or \"FINAL 33 31\" if the game is over.\n # The first number is the score for player 1 (dark), the second for player 2 (light)\n next_input = input() \n status, dark_score_s, light_score_s = next_input.strip().split()\n dark_score = int(dark_score_s)\n light_score = int(light_score_s)\n\n\n if status == \"FINAL\": # Game is over. \n print \n else: \n board = eval(input()) # Read in the input and turn it into a Python\n # object. The format is a list of rows. The \n # squares in each row are represented by \n # 0 : empty square\n # 1 : dark disk (player 1)\n # 2 : light disk (player 2)\n \n # Select the move and send it to the manager\n movei, movej = select_move_minimax(board, color)\n # movei, movej = select_move_alphabeta(board, color)\n print(\"{} {}\".format(movei, movej))", "def action(self):\n\n self.start_timer()\n\n minimax_probability = self.norm.cdf(self.root.branching)\n use_minimax = boolean_from_probability(minimax_probability)\n if self.time_consumed > 53:\n # Time is starting to run low, use the faster option\n use_minimax=True\n\n if self.time_consumed < 59:\n if self.root.turn < 4:\n result = book_first_four_moves(self.root)\n elif use_minimax:\n result = minimax_paranoid_reduction(self.root)\n else:\n result = monte_carlo_tree_search(\n self.root,\n playout_amount=3,\n node_cutoff=4,\n outer_cutoff=4,\n num_iterations=1200,\n turn_time=0.75,\n exploration_constant=1.7,\n use_slow_culling=False,\n verbosity=0,\n use_prior=True,\n num_priors=4,\n use_fast_prune_eval=False,\n use_fast_rollout_eval=False,\n )\n else:\n result = greedy_choose(self.root)\n\n self.end_timer()\n\n return result", "def step(self):\n if self.model.schedule.steps < self.model.residential_steps:\n residential_move = True\n else:\n residential_move = False\n\n\n if residential_move:\n # only step the agents if the number considered is not exhausted\n if self.model.total_considered < self.model.residential_moves_per_step:\n # move residential\n U_res = self.get_res_satisfaction(self.pos)\n self.model.res_satisfaction.append(U_res)\n\n # print(\"U_res\",U_res)\n if U_res < self.T:\n\n # todo: implement different move schemes, for now only random\n # find all empty places\n # rank them\n # take one with boltzmann probability.\n self.evaluate_move(U_res, school=False)\n\n else:\n self.model.res_happy += 1\n\n self.model.total_considered += 1\n #print(\"considered\",self.model.total_considered)\n\n\n else:\n if self.model.total_considered < self.model.school_moves_per_step:\n # school moves\n # satisfaction in current school\n U = self.get_school_satisfaction(self.school, self.dist_to_school)\n self.model.satisfaction.append(U)\n\n # If unhappy, compared to threshold move:\n if U < self.T:\n #print('unhappy')\n self.evaluate_move(U, school=True)\n\n else:\n self.model.happy += 1\n if self.model.total_considered>0:\n self.model.percent_happy = np.ma(self.model.happy/self.model.total_considered)" ]
[ "0.55172825", "0.53830636", "0.537255", "0.53636533", "0.53437847", "0.53437847", "0.52932066", "0.5251569", "0.5240633", "0.5203933", "0.51793545", "0.5154717", "0.50856817", "0.5070792", "0.50352716", "0.50260264", "0.4998866", "0.49944788", "0.4988436", "0.4984612", "0.49742377", "0.4970498", "0.49661568", "0.49660122", "0.4960328", "0.492652", "0.4911554", "0.49102223", "0.4905687", "0.48831335" ]
0.62191063
0
Workqueue element site restriction check (same as workRestrictions)
def testPassesSiteRestriction(self): # test element ala MonteCarlo ele = WorkQueueElement(SiteWhitelist=["T1_IT_CNAF", "T2_DE_DESY"], SiteBlacklist=["T1_US_FNAL"]) self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL")) self.assertFalse(ele.passesSiteRestriction("T2_CH_CERN")) self.assertTrue(ele.passesSiteRestriction("T1_IT_CNAF")) # test element with input dataset ele['Inputs'] = {"/MY/BLOCK/NAME#73e99a52": []} self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL")) self.assertFalse(ele.passesSiteRestriction("T2_CH_CERN")) self.assertFalse(ele.passesSiteRestriction("T1_IT_CNAF")) self.assertFalse(ele.passesSiteRestriction("T2_DE_DESY")) ele['Inputs'] = {"/MY/BLOCK/NAME#73e99a52": ["T1_US_FNAL", "T2_DE_DESY"]} self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL")) self.assertFalse(ele.passesSiteRestriction("T1_IT_CNAF")) self.assertTrue(ele.passesSiteRestriction("T2_DE_DESY")) # test element with input and parent dataset ele['Inputs'] = {"/MY/BLOCK/NAME#73e99a52": []} ele['ParentFlag'] = True ele['ParentData'] = {"/MY/BLOCK2/NAME#002590494c06": []} self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL")) self.assertFalse(ele.passesSiteRestriction("T2_CH_CERN")) self.assertFalse(ele.passesSiteRestriction("T1_IT_CNAF")) self.assertFalse(ele.passesSiteRestriction("T2_DE_DESY")) ele['Inputs'] = {"/MY/BLOCK/NAME#73e99a52": ["T1_US_FNAL", "T2_DE_DESY"]} self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL")) self.assertFalse(ele.passesSiteRestriction("T2_DE_DESY")) ele['Inputs'] = {"/MY/BLOCK/NAME#73e99a52": ["T1_US_FNAL", "T2_DE_DESY"]} ele['ParentData'] = {"/MY/BLOCK2/NAME#002590494c06": ["T1_IT_CNAF", "T2_CH_CERN", "T2_DE_DESY"]} self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL")) self.assertFalse(ele.passesSiteRestriction("T1_IT_CNAF")) self.assertTrue(ele.passesSiteRestriction("T2_DE_DESY")) # test element with input, parent and pileup dataset ele['PileupData'] = {"/MY/DATASET/NAME": []} self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL")) self.assertFalse(ele.passesSiteRestriction("T2_CH_CERN")) self.assertFalse(ele.passesSiteRestriction("T1_IT_CNAF")) self.assertFalse(ele.passesSiteRestriction("T2_DE_DESY")) ele['PileupData'] = {"/MY/DATASET/NAME": ["T2_US_Nebraska", "T1_IT_CNAF"]} self.assertFalse(ele.passesSiteRestriction("T1_IT_CNAF")) ele['Inputs'] = {"/MY/BLOCK/NAME#73e99a52": ["T1_US_FNAL", "T1_IT_CNAF", "T2_DE_DESY"]} self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL")) self.assertTrue(ele.passesSiteRestriction("T1_IT_CNAF")) self.assertFalse(ele.passesSiteRestriction("T2_DE_DESY"))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testPassesSiteRestrictionLocationFlags(self):\n # test element ala MonteCarlo\n ele = WorkQueueElement(SiteWhitelist=[\"T1_IT_CNAF\", \"T2_DE_DESY\"], SiteBlacklist=[\"T1_US_FNAL\"])\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_CH_CERN\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n\n # test element with input dataset\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": []}\n ele['NoInputUpdate'] = True\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_CH_CERN\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T2_DE_DESY\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n\n # test element with input and parent dataset\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": []}\n ele['ParentFlag'] = True\n ele['ParentData'] = {\"/MY/BLOCK2/NAME#002590494c06\": []}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_CH_CERN\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T2_DE_DESY\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T2_DE_DESY\"]}\n ele['ParentData'] = {\"/MY/BLOCK2/NAME#002590494c06\": [\"T1_IT_CNAF\", \"T2_CH_CERN\", \"T2_DE_DESY\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n\n # test element with input, parent and pileup dataset\n ele['PileupData'] = {\"/MY/DATASET/NAME\": []}\n ele['NoPileupUpdate'] = True\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_CH_CERN\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n ele['PileupData'] = {\"/MY/DATASET/NAME\": [\"T2_US_Nebraska\", \"T1_IT_CNAF\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T2_US_Nebraska\"))\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T1_IT_CNAF\", \"T2_DE_DESY\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n # only the pileup flag enabled now\n ele['NoInputUpdate'] = False\n ele['PileupData'] = {\"/MY/DATASET/NAME\": []}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_CH_CERN\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))", "def testPossibleSites(self):\n # test element ala MonteCarlo\n ele = WorkQueueElement(SiteWhitelist=[\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n self.assertItemsEqual(possibleSites(ele), [\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n # test element with InputDataset but no location\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": []}\n self.assertEqual(possibleSites(ele), [])\n # test element with InputDataset and no match location\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T2_CH_CERN\"]}\n self.assertEqual(possibleSites(ele), [])\n # test element with InputDataset and valid location\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T2_CH_CERN\", \"T2_DE_DESY\"]}\n self.assertEqual(possibleSites(ele), [\"T2_DE_DESY\"])\n\n # test element with InputDataset and ParentData with no location\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T2_CH_CERN\", \"T2_DE_DESY\"]}\n ele['ParentFlag'] = True\n ele['ParentData'] = {\"/MY/BLOCK2/NAME#002590494c06\": []}\n self.assertEqual(possibleSites(ele), [])\n # test element with InputDataset and ParentData with no match location\n ele['ParentData'] = {\"/MY/BLOCK2/NAME#002590494c06\": [\"T1_IT_CNAF\"]}\n self.assertEqual(possibleSites(ele), [])\n # test element with InputDataset and ParentData with valid location\n ele['ParentData'] = {\"/MY/BLOCK2/NAME#002590494c06\": [\"T1_US_FNAL\", \"T2_DE_DESY\"]}\n self.assertEqual(possibleSites(ele), [\"T2_DE_DESY\"])\n\n # test element with InputDataset, PileupData and ParentData with no location\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T2_CH_CERN\", \"T2_DE_DESY\"]}\n ele['ParentData'] = {\"/MY/BLOCK2/NAME#002590494c06\": [\"T2_DE_DESY\"]}\n ele['PileupData'] = {\"/MY/DATASET/NAME\": []}\n self.assertEqual(possibleSites(ele), [])\n # test element with InputDataset, PileupData and ParentData with no match location\n ele['PileupData'] = {\"/MY/DATASET/NAME\": [\"T1_IT_CNAF\", \"T2_CH_CERN\"]}\n self.assertEqual(possibleSites(ele), [])\n # test element with InputDataset, PileupData and ParentData with valid location\n ele['PileupData'] = {\"/MY/DATASET/NAME\": [\"T1_IT_CNAF\", \"T2_DE_DESY\"]}\n self.assertEqual(possibleSites(ele), [\"T2_DE_DESY\"])", "def testPossibleSitesLocationFlags(self):\n ele = WorkQueueElement(SiteWhitelist=[\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n\n # test element with InputDataset and no location, but input flag on\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": []}\n ele['NoInputUpdate'] = True\n self.assertItemsEqual(possibleSites(ele), [\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n # test element with InputDataset and one match, but input flag on\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_IT_CNAF\", \"T2_CH_CERN\"]}\n self.assertItemsEqual(possibleSites(ele), [\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n # test element with InputDataset and one match, but pu flag on\n ele['NoInputUpdate'] = False\n ele['NoPileupUpdate'] = True\n self.assertEqual(possibleSites(ele), [\"T1_IT_CNAF\"])\n # test element with InputDataset and one match, but both flags on\n ele['NoInputUpdate'] = True\n self.assertItemsEqual(possibleSites(ele), [\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n\n # test element with InputDataset and ParentData and no location, but both flags on\n ele['ParentFlag'] = True\n ele['ParentData'] = {\"/MY/BLOCK2/NAME#002590494c06\": []}\n self.assertItemsEqual(possibleSites(ele), [\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n # test element with InputDataset and ParentData and no location, but input flag on\n ele['NoPileupUpdate'] = False\n self.assertItemsEqual(possibleSites(ele), [\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n # test element with InputDataset and ParentData and no location, but pileup flag on\n ele['NoInputUpdate'] = False\n ele['NoPileupUpdate'] = True\n self.assertEqual(possibleSites(ele), [])\n\n # test element with InputDataset, PileupData and ParentData with no location, but pileup flag on\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T2_CH_CERN\", \"T2_DE_DESY\"]}\n ele['ParentData'] = {\"/MY/BLOCK2/NAME#002590494c06\": [\"T2_DE_DESY\"]}\n ele['PileupData'] = {\"/MY/DATASET/NAME\": []}\n self.assertEqual(possibleSites(ele), [\"T2_DE_DESY\"])\n # test element with InputDataset, PileupData and ParentData with no location, but both flags on\n ele['NoInputUpdate'] = True\n self.assertItemsEqual(possibleSites(ele), [\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n # test element with InputDataset, PileupData and ParentData with no location, but input flag on\n ele['NoPileupUpdate'] = False\n self.assertEqual(possibleSites(ele), [])", "def accept_policy(\n Qs, utility_function, e_parameter, server_utilities, current_server_id\n):\n condition = server_utilities[current_server_id - 1] <= utility_function(\n Qs, current_server_id, e_parameter\n )\n return condition", "def is_worker_allowed(self, worker_id):\n return worker_id in self.allowed_workers", "def rule_40_igw_available(session):\n def has_igw(session, side):\n conn_vpc = session[\"conn\"][side](\"vpc\")\n subnet = conn_vpc.get_all_subnets(\n [session[\"config\"][side][\"res\"][\"subnet_id\"]])[0]\n\n for igw in conn_vpc.get_all_internet_gateways():\n for att in igw.attachments:\n if att.vpc_id == subnet.vpc_id:\n return True\n return False\n\n return has_igw(session, \"server\") and has_igw(session, \"client\")", "def condition_singleton(csp, var) :\n return len(csp.get_domain(var))==1", "def check_restrictions(restrictions, element, keys, verbose):\n params = OrderedDict(zip(keys, element))\n for restrict in restrictions:\n if not eval(replace_param_occurrences(restrict, params)):\n if verbose:\n print(\"skipping config\", get_instance_string(params), \"reason: config fails restriction\")\n return False\n return True", "def checkonly(self):\n OTHER_WSREP.append(socket.gethostbyname(socket.gethostname()))\n for hostitem in ALL_NODES:\n checkhost(hostitem)\n if OTHER_WSREP:\n for wsrepitem in OTHER_WSREP:\n REMAINING_NODES.append(wsrepitem)\n if REMAINING_NODES:\n for wsrephost in OTHER_WSREP:\n checkwsrep(wsrephost)\n print ''", "def __verify_queue_item(self, queue_item):\n\n browser = BrowserHelper.request(queue_item)\n return browser and len(browser.window_handles) >= 2", "def check(self):\n self.__check_request_limit()", "def test_add_website_error(self, enabled_websites_mock):\n self.subscription.plan.allowance.return_value = 1\n with self.assertRaises(SubscriptionWebsiteLimitReached):\n self.subscription.add_website('url')\n self.assertEqual(len(self.subscription.enabled_websites()), 1)", "def test_with_limited_localsite(self):\n form = MyConfigForm(integration=self.integration,\n request=self.request,\n limit_to_local_site=self.local_site_1)\n\n self.assertEqual(form.limited_to_local_site, self.local_site_1)\n self.assertNotIn('local_site', form.fields)\n self.assertEqual(list(form.fields['group'].queryset),\n [self.local_site_1_group])\n self.assertEqual(\n form.fields['my_conditions'].choice_kwargs.get('local_site'),\n self.local_site_1)", "def condition_singleton(csp, var) :\n if len(csp.get_domain(var))==1:\n return True\n return False", "def __should_payload_execute(self, queue_item):\n\n soup = queue_item.get_soup_response()\n\n ng_app_soup = soup.select(\"[ng-app]\")\n if not ng_app_soup:\n return False\n\n for non_bindable in ng_app_soup[0].select(\"[ng-non-bindable]\"):\n non_bindable.decompose()\n\n in_scope_html = str(ng_app_soup[0])\n\n if queue_item.payload[\"value\"] in in_scope_html:\n return True\n\n return False", "def MembershipCondition(self) -> IMembershipCondition:", "def _is_job_within_limits(self, job_limits: dict) -> bool:\n # Job limits not specified in the config will default to an available limit of 1.\n return all(\n self.limits.get(limit_name, 1) - self.limits_used[limit_name] - count >= 0\n for limit_name, count in job_limits.items()\n )", "def check_element(self, e):\n my_view = {}\n if self.content_mimetype is not None:\n my_view[\"mimetype\"] = self.content_mimetype\n if self.content_model is not None:\n my_view[\"model\"] = self.content_model\n\n if self.element_constraint is not None:\n ret = self.element_constraint.apply_to(e)\n else:\n ret = True\n return ret & apply_to(my_view, e)", "def test_listing_from_wall_when_blocked_some_users(self):", "def is_request_in_themed_site():\n # We need to give priority to theming/site-configuration over microsites\n return configuration_helpers.is_site_configuration_enabled()", "def is_request_in_microsite():\r\n return get_configuration()", "def test_published_story_must_be_visible_for_everyone_but_blocked(self):\n self.assertEqual(self.ps.is_visible_for(self.au), True)\n\n \"\"\" Published story must be visible for another.\"\"\"\n self.assertEqual(self.ps.is_visible_for(self.u2), True)\n\n \"\"\" Publsihed story must be visible for owner. \"\"\"\n self.assertEqual(self.ps.is_visible_for(self.u1), True)\n\n \"\"\" Draft story must not be visible for a blocked user. \"\"\"\n self.assertEqual(self.ds.is_visible_for(self.u3), False)", "def is_bounded(self):\n return True", "def check_pool(self):\n if self.conn.queue_len() < MAX_PROXIES:\n return True\n return False", "def __call__(self, restriction):\n return NodeList([i for i in self if i.satisfies(restriction)])", "def check_engine_limits(current_rqmt, task):\n current_rqmt['time'] = min(168, current_rqmt.get('time', 1))\n return current_rqmt", "def vcac_worklfow_request(self):\n logging.info(\"Inside ucsvm_worklfow_request method base class\")\n return None", "def is_blocked(self, xsd_element: 'XsdElement') -> bool:\n xsd_type = xsd_element.type\n if self is xsd_type:\n return False\n\n block = f'{xsd_element.block} {xsd_type.block}'.strip()\n if not block:\n return False\n\n _block = {x for x in block.split() if x in ('extension', 'restriction')}\n return any(self.is_derived(xsd_type, derivation) for derivation in _block)", "def check_localSE_space(sitename, ub):\n\n # Select the correct mover\n (copycmd, setup) = getCopytool()\n\n tolog(\"Calling getSiteMover from check_localSE_space\")\n tolog(\"Copy command: %s\" % (copycmd))\n tolog(\"Setup: %s\" % (setup))\n sitemover = getSiteMover(copycmd, setup)\n tolog(\"Got site mover: %s\" % str(sitemover))\n tolog(\"Checking SE space...\")\n try:\n retval = int(sitemover.check_space(ub))\n if retval == 0:\n retval = 999995\n tolog(\"0 available space reported, returning %d\" % (retval))\n else:\n tolog(\"check_localSE_space will return %d\" % (retval))\n except:\n retval = 999999\n tolog(\"!!WARNING!!2999!! Exception (%s) in checking available space, returning %d\" % (get_exc_short(), retval))\n return retval", "def relevant_domains(self):\n pass" ]
[ "0.6806962", "0.5811562", "0.5728457", "0.53757226", "0.5239149", "0.52173674", "0.5165484", "0.51420397", "0.51234454", "0.5076183", "0.5031014", "0.50261384", "0.5019831", "0.5010309", "0.50060636", "0.49717343", "0.4966067", "0.49175805", "0.49115157", "0.49010026", "0.48990914", "0.48906416", "0.48679698", "0.48650366", "0.48634395", "0.4863414", "0.48623064", "0.48507473", "0.48477426", "0.48374036" ]
0.7161033
0
Workqueue element site restriction check (same as workRestrictions)
def testPassesSiteRestrictionLocationFlags(self): # test element ala MonteCarlo ele = WorkQueueElement(SiteWhitelist=["T1_IT_CNAF", "T2_DE_DESY"], SiteBlacklist=["T1_US_FNAL"]) self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL")) self.assertFalse(ele.passesSiteRestriction("T2_CH_CERN")) self.assertTrue(ele.passesSiteRestriction("T1_IT_CNAF")) # test element with input dataset ele['Inputs'] = {"/MY/BLOCK/NAME#73e99a52": []} ele['NoInputUpdate'] = True self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL")) self.assertFalse(ele.passesSiteRestriction("T2_CH_CERN")) self.assertTrue(ele.passesSiteRestriction("T1_IT_CNAF")) self.assertTrue(ele.passesSiteRestriction("T2_DE_DESY")) ele['Inputs'] = {"/MY/BLOCK/NAME#73e99a52": ["T1_US_FNAL", "T2_DE_DESY"]} self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL")) self.assertTrue(ele.passesSiteRestriction("T1_IT_CNAF")) self.assertTrue(ele.passesSiteRestriction("T2_DE_DESY")) # test element with input and parent dataset ele['Inputs'] = {"/MY/BLOCK/NAME#73e99a52": []} ele['ParentFlag'] = True ele['ParentData'] = {"/MY/BLOCK2/NAME#002590494c06": []} self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL")) self.assertFalse(ele.passesSiteRestriction("T2_CH_CERN")) self.assertTrue(ele.passesSiteRestriction("T1_IT_CNAF")) self.assertTrue(ele.passesSiteRestriction("T2_DE_DESY")) ele['Inputs'] = {"/MY/BLOCK/NAME#73e99a52": ["T1_US_FNAL", "T2_DE_DESY"]} self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL")) self.assertTrue(ele.passesSiteRestriction("T2_DE_DESY")) ele['Inputs'] = {"/MY/BLOCK/NAME#73e99a52": ["T1_US_FNAL", "T2_DE_DESY"]} ele['ParentData'] = {"/MY/BLOCK2/NAME#002590494c06": ["T1_IT_CNAF", "T2_CH_CERN", "T2_DE_DESY"]} self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL")) self.assertTrue(ele.passesSiteRestriction("T1_IT_CNAF")) self.assertTrue(ele.passesSiteRestriction("T2_DE_DESY")) # test element with input, parent and pileup dataset ele['PileupData'] = {"/MY/DATASET/NAME": []} ele['NoPileupUpdate'] = True self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL")) self.assertFalse(ele.passesSiteRestriction("T2_CH_CERN")) self.assertTrue(ele.passesSiteRestriction("T1_IT_CNAF")) self.assertTrue(ele.passesSiteRestriction("T2_DE_DESY")) ele['PileupData'] = {"/MY/DATASET/NAME": ["T2_US_Nebraska", "T1_IT_CNAF"]} self.assertFalse(ele.passesSiteRestriction("T2_US_Nebraska")) ele['Inputs'] = {"/MY/BLOCK/NAME#73e99a52": ["T1_US_FNAL", "T1_IT_CNAF", "T2_DE_DESY"]} self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL")) self.assertTrue(ele.passesSiteRestriction("T1_IT_CNAF")) self.assertTrue(ele.passesSiteRestriction("T2_DE_DESY")) # only the pileup flag enabled now ele['NoInputUpdate'] = False ele['PileupData'] = {"/MY/DATASET/NAME": []} self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL")) self.assertFalse(ele.passesSiteRestriction("T2_CH_CERN")) self.assertTrue(ele.passesSiteRestriction("T1_IT_CNAF")) self.assertTrue(ele.passesSiteRestriction("T2_DE_DESY"))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testPassesSiteRestriction(self):\n # test element ala MonteCarlo\n ele = WorkQueueElement(SiteWhitelist=[\"T1_IT_CNAF\", \"T2_DE_DESY\"], SiteBlacklist=[\"T1_US_FNAL\"])\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_CH_CERN\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n\n # test element with input dataset\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": []}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_CH_CERN\"))\n self.assertFalse(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T2_DE_DESY\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n\n # test element with input and parent dataset\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": []}\n ele['ParentFlag'] = True\n ele['ParentData'] = {\"/MY/BLOCK2/NAME#002590494c06\": []}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_CH_CERN\"))\n self.assertFalse(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T2_DE_DESY\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T2_DE_DESY\"]}\n ele['ParentData'] = {\"/MY/BLOCK2/NAME#002590494c06\": [\"T1_IT_CNAF\", \"T2_CH_CERN\", \"T2_DE_DESY\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n\n # test element with input, parent and pileup dataset\n ele['PileupData'] = {\"/MY/DATASET/NAME\": []}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_CH_CERN\"))\n self.assertFalse(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n ele['PileupData'] = {\"/MY/DATASET/NAME\": [\"T2_US_Nebraska\", \"T1_IT_CNAF\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T1_IT_CNAF\", \"T2_DE_DESY\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_DE_DESY\"))", "def testPossibleSites(self):\n # test element ala MonteCarlo\n ele = WorkQueueElement(SiteWhitelist=[\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n self.assertItemsEqual(possibleSites(ele), [\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n # test element with InputDataset but no location\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": []}\n self.assertEqual(possibleSites(ele), [])\n # test element with InputDataset and no match location\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T2_CH_CERN\"]}\n self.assertEqual(possibleSites(ele), [])\n # test element with InputDataset and valid location\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T2_CH_CERN\", \"T2_DE_DESY\"]}\n self.assertEqual(possibleSites(ele), [\"T2_DE_DESY\"])\n\n # test element with InputDataset and ParentData with no location\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T2_CH_CERN\", \"T2_DE_DESY\"]}\n ele['ParentFlag'] = True\n ele['ParentData'] = {\"/MY/BLOCK2/NAME#002590494c06\": []}\n self.assertEqual(possibleSites(ele), [])\n # test element with InputDataset and ParentData with no match location\n ele['ParentData'] = {\"/MY/BLOCK2/NAME#002590494c06\": [\"T1_IT_CNAF\"]}\n self.assertEqual(possibleSites(ele), [])\n # test element with InputDataset and ParentData with valid location\n ele['ParentData'] = {\"/MY/BLOCK2/NAME#002590494c06\": [\"T1_US_FNAL\", \"T2_DE_DESY\"]}\n self.assertEqual(possibleSites(ele), [\"T2_DE_DESY\"])\n\n # test element with InputDataset, PileupData and ParentData with no location\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T2_CH_CERN\", \"T2_DE_DESY\"]}\n ele['ParentData'] = {\"/MY/BLOCK2/NAME#002590494c06\": [\"T2_DE_DESY\"]}\n ele['PileupData'] = {\"/MY/DATASET/NAME\": []}\n self.assertEqual(possibleSites(ele), [])\n # test element with InputDataset, PileupData and ParentData with no match location\n ele['PileupData'] = {\"/MY/DATASET/NAME\": [\"T1_IT_CNAF\", \"T2_CH_CERN\"]}\n self.assertEqual(possibleSites(ele), [])\n # test element with InputDataset, PileupData and ParentData with valid location\n ele['PileupData'] = {\"/MY/DATASET/NAME\": [\"T1_IT_CNAF\", \"T2_DE_DESY\"]}\n self.assertEqual(possibleSites(ele), [\"T2_DE_DESY\"])", "def testPossibleSitesLocationFlags(self):\n ele = WorkQueueElement(SiteWhitelist=[\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n\n # test element with InputDataset and no location, but input flag on\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": []}\n ele['NoInputUpdate'] = True\n self.assertItemsEqual(possibleSites(ele), [\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n # test element with InputDataset and one match, but input flag on\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_IT_CNAF\", \"T2_CH_CERN\"]}\n self.assertItemsEqual(possibleSites(ele), [\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n # test element with InputDataset and one match, but pu flag on\n ele['NoInputUpdate'] = False\n ele['NoPileupUpdate'] = True\n self.assertEqual(possibleSites(ele), [\"T1_IT_CNAF\"])\n # test element with InputDataset and one match, but both flags on\n ele['NoInputUpdate'] = True\n self.assertItemsEqual(possibleSites(ele), [\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n\n # test element with InputDataset and ParentData and no location, but both flags on\n ele['ParentFlag'] = True\n ele['ParentData'] = {\"/MY/BLOCK2/NAME#002590494c06\": []}\n self.assertItemsEqual(possibleSites(ele), [\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n # test element with InputDataset and ParentData and no location, but input flag on\n ele['NoPileupUpdate'] = False\n self.assertItemsEqual(possibleSites(ele), [\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n # test element with InputDataset and ParentData and no location, but pileup flag on\n ele['NoInputUpdate'] = False\n ele['NoPileupUpdate'] = True\n self.assertEqual(possibleSites(ele), [])\n\n # test element with InputDataset, PileupData and ParentData with no location, but pileup flag on\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T2_CH_CERN\", \"T2_DE_DESY\"]}\n ele['ParentData'] = {\"/MY/BLOCK2/NAME#002590494c06\": [\"T2_DE_DESY\"]}\n ele['PileupData'] = {\"/MY/DATASET/NAME\": []}\n self.assertEqual(possibleSites(ele), [\"T2_DE_DESY\"])\n # test element with InputDataset, PileupData and ParentData with no location, but both flags on\n ele['NoInputUpdate'] = True\n self.assertItemsEqual(possibleSites(ele), [\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n # test element with InputDataset, PileupData and ParentData with no location, but input flag on\n ele['NoPileupUpdate'] = False\n self.assertEqual(possibleSites(ele), [])", "def accept_policy(\n Qs, utility_function, e_parameter, server_utilities, current_server_id\n):\n condition = server_utilities[current_server_id - 1] <= utility_function(\n Qs, current_server_id, e_parameter\n )\n return condition", "def is_worker_allowed(self, worker_id):\n return worker_id in self.allowed_workers", "def rule_40_igw_available(session):\n def has_igw(session, side):\n conn_vpc = session[\"conn\"][side](\"vpc\")\n subnet = conn_vpc.get_all_subnets(\n [session[\"config\"][side][\"res\"][\"subnet_id\"]])[0]\n\n for igw in conn_vpc.get_all_internet_gateways():\n for att in igw.attachments:\n if att.vpc_id == subnet.vpc_id:\n return True\n return False\n\n return has_igw(session, \"server\") and has_igw(session, \"client\")", "def condition_singleton(csp, var) :\n return len(csp.get_domain(var))==1", "def check_restrictions(restrictions, element, keys, verbose):\n params = OrderedDict(zip(keys, element))\n for restrict in restrictions:\n if not eval(replace_param_occurrences(restrict, params)):\n if verbose:\n print(\"skipping config\", get_instance_string(params), \"reason: config fails restriction\")\n return False\n return True", "def checkonly(self):\n OTHER_WSREP.append(socket.gethostbyname(socket.gethostname()))\n for hostitem in ALL_NODES:\n checkhost(hostitem)\n if OTHER_WSREP:\n for wsrepitem in OTHER_WSREP:\n REMAINING_NODES.append(wsrepitem)\n if REMAINING_NODES:\n for wsrephost in OTHER_WSREP:\n checkwsrep(wsrephost)\n print ''", "def __verify_queue_item(self, queue_item):\n\n browser = BrowserHelper.request(queue_item)\n return browser and len(browser.window_handles) >= 2", "def check(self):\n self.__check_request_limit()", "def test_add_website_error(self, enabled_websites_mock):\n self.subscription.plan.allowance.return_value = 1\n with self.assertRaises(SubscriptionWebsiteLimitReached):\n self.subscription.add_website('url')\n self.assertEqual(len(self.subscription.enabled_websites()), 1)", "def test_with_limited_localsite(self):\n form = MyConfigForm(integration=self.integration,\n request=self.request,\n limit_to_local_site=self.local_site_1)\n\n self.assertEqual(form.limited_to_local_site, self.local_site_1)\n self.assertNotIn('local_site', form.fields)\n self.assertEqual(list(form.fields['group'].queryset),\n [self.local_site_1_group])\n self.assertEqual(\n form.fields['my_conditions'].choice_kwargs.get('local_site'),\n self.local_site_1)", "def condition_singleton(csp, var) :\n if len(csp.get_domain(var))==1:\n return True\n return False", "def __should_payload_execute(self, queue_item):\n\n soup = queue_item.get_soup_response()\n\n ng_app_soup = soup.select(\"[ng-app]\")\n if not ng_app_soup:\n return False\n\n for non_bindable in ng_app_soup[0].select(\"[ng-non-bindable]\"):\n non_bindable.decompose()\n\n in_scope_html = str(ng_app_soup[0])\n\n if queue_item.payload[\"value\"] in in_scope_html:\n return True\n\n return False", "def MembershipCondition(self) -> IMembershipCondition:", "def _is_job_within_limits(self, job_limits: dict) -> bool:\n # Job limits not specified in the config will default to an available limit of 1.\n return all(\n self.limits.get(limit_name, 1) - self.limits_used[limit_name] - count >= 0\n for limit_name, count in job_limits.items()\n )", "def check_element(self, e):\n my_view = {}\n if self.content_mimetype is not None:\n my_view[\"mimetype\"] = self.content_mimetype\n if self.content_model is not None:\n my_view[\"model\"] = self.content_model\n\n if self.element_constraint is not None:\n ret = self.element_constraint.apply_to(e)\n else:\n ret = True\n return ret & apply_to(my_view, e)", "def test_listing_from_wall_when_blocked_some_users(self):", "def is_request_in_themed_site():\n # We need to give priority to theming/site-configuration over microsites\n return configuration_helpers.is_site_configuration_enabled()", "def is_request_in_microsite():\r\n return get_configuration()", "def test_published_story_must_be_visible_for_everyone_but_blocked(self):\n self.assertEqual(self.ps.is_visible_for(self.au), True)\n\n \"\"\" Published story must be visible for another.\"\"\"\n self.assertEqual(self.ps.is_visible_for(self.u2), True)\n\n \"\"\" Publsihed story must be visible for owner. \"\"\"\n self.assertEqual(self.ps.is_visible_for(self.u1), True)\n\n \"\"\" Draft story must not be visible for a blocked user. \"\"\"\n self.assertEqual(self.ds.is_visible_for(self.u3), False)", "def is_bounded(self):\n return True", "def check_pool(self):\n if self.conn.queue_len() < MAX_PROXIES:\n return True\n return False", "def __call__(self, restriction):\n return NodeList([i for i in self if i.satisfies(restriction)])", "def check_engine_limits(current_rqmt, task):\n current_rqmt['time'] = min(168, current_rqmt.get('time', 1))\n return current_rqmt", "def vcac_worklfow_request(self):\n logging.info(\"Inside ucsvm_worklfow_request method base class\")\n return None", "def is_blocked(self, xsd_element: 'XsdElement') -> bool:\n xsd_type = xsd_element.type\n if self is xsd_type:\n return False\n\n block = f'{xsd_element.block} {xsd_type.block}'.strip()\n if not block:\n return False\n\n _block = {x for x in block.split() if x in ('extension', 'restriction')}\n return any(self.is_derived(xsd_type, derivation) for derivation in _block)", "def check_localSE_space(sitename, ub):\n\n # Select the correct mover\n (copycmd, setup) = getCopytool()\n\n tolog(\"Calling getSiteMover from check_localSE_space\")\n tolog(\"Copy command: %s\" % (copycmd))\n tolog(\"Setup: %s\" % (setup))\n sitemover = getSiteMover(copycmd, setup)\n tolog(\"Got site mover: %s\" % str(sitemover))\n tolog(\"Checking SE space...\")\n try:\n retval = int(sitemover.check_space(ub))\n if retval == 0:\n retval = 999995\n tolog(\"0 available space reported, returning %d\" % (retval))\n else:\n tolog(\"check_localSE_space will return %d\" % (retval))\n except:\n retval = 999999\n tolog(\"!!WARNING!!2999!! Exception (%s) in checking available space, returning %d\" % (get_exc_short(), retval))\n return retval", "def relevant_domains(self):\n pass" ]
[ "0.71603197", "0.58114254", "0.5727928", "0.53752995", "0.5239392", "0.5217931", "0.5166661", "0.5141289", "0.51245993", "0.50754255", "0.5030956", "0.50263387", "0.5020246", "0.50115657", "0.5005242", "0.49711114", "0.49657822", "0.49160054", "0.49109706", "0.4902486", "0.48999903", "0.48903558", "0.48676392", "0.4865595", "0.48627347", "0.4862431", "0.48616472", "0.4849938", "0.48487473", "0.4838078" ]
0.68061155
1
Workqueue element data location check, using the input and PU data location flags
def testPossibleSitesLocationFlags(self): ele = WorkQueueElement(SiteWhitelist=["T1_IT_CNAF", "T2_DE_DESY"]) # test element with InputDataset and no location, but input flag on ele['Inputs'] = {"/MY/BLOCK/NAME#73e99a52": []} ele['NoInputUpdate'] = True self.assertItemsEqual(possibleSites(ele), ["T1_IT_CNAF", "T2_DE_DESY"]) # test element with InputDataset and one match, but input flag on ele['Inputs'] = {"/MY/BLOCK/NAME#73e99a52": ["T1_IT_CNAF", "T2_CH_CERN"]} self.assertItemsEqual(possibleSites(ele), ["T1_IT_CNAF", "T2_DE_DESY"]) # test element with InputDataset and one match, but pu flag on ele['NoInputUpdate'] = False ele['NoPileupUpdate'] = True self.assertEqual(possibleSites(ele), ["T1_IT_CNAF"]) # test element with InputDataset and one match, but both flags on ele['NoInputUpdate'] = True self.assertItemsEqual(possibleSites(ele), ["T1_IT_CNAF", "T2_DE_DESY"]) # test element with InputDataset and ParentData and no location, but both flags on ele['ParentFlag'] = True ele['ParentData'] = {"/MY/BLOCK2/NAME#002590494c06": []} self.assertItemsEqual(possibleSites(ele), ["T1_IT_CNAF", "T2_DE_DESY"]) # test element with InputDataset and ParentData and no location, but input flag on ele['NoPileupUpdate'] = False self.assertItemsEqual(possibleSites(ele), ["T1_IT_CNAF", "T2_DE_DESY"]) # test element with InputDataset and ParentData and no location, but pileup flag on ele['NoInputUpdate'] = False ele['NoPileupUpdate'] = True self.assertEqual(possibleSites(ele), []) # test element with InputDataset, PileupData and ParentData with no location, but pileup flag on ele['Inputs'] = {"/MY/BLOCK/NAME#73e99a52": ["T1_US_FNAL", "T2_CH_CERN", "T2_DE_DESY"]} ele['ParentData'] = {"/MY/BLOCK2/NAME#002590494c06": ["T2_DE_DESY"]} ele['PileupData'] = {"/MY/DATASET/NAME": []} self.assertEqual(possibleSites(ele), ["T2_DE_DESY"]) # test element with InputDataset, PileupData and ParentData with no location, but both flags on ele['NoInputUpdate'] = True self.assertItemsEqual(possibleSites(ele), ["T1_IT_CNAF", "T2_DE_DESY"]) # test element with InputDataset, PileupData and ParentData with no location, but input flag on ele['NoPileupUpdate'] = False self.assertEqual(possibleSites(ele), [])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testPassesSiteRestrictionLocationFlags(self):\n # test element ala MonteCarlo\n ele = WorkQueueElement(SiteWhitelist=[\"T1_IT_CNAF\", \"T2_DE_DESY\"], SiteBlacklist=[\"T1_US_FNAL\"])\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_CH_CERN\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n\n # test element with input dataset\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": []}\n ele['NoInputUpdate'] = True\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_CH_CERN\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T2_DE_DESY\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n\n # test element with input and parent dataset\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": []}\n ele['ParentFlag'] = True\n ele['ParentData'] = {\"/MY/BLOCK2/NAME#002590494c06\": []}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_CH_CERN\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T2_DE_DESY\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T2_DE_DESY\"]}\n ele['ParentData'] = {\"/MY/BLOCK2/NAME#002590494c06\": [\"T1_IT_CNAF\", \"T2_CH_CERN\", \"T2_DE_DESY\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n\n # test element with input, parent and pileup dataset\n ele['PileupData'] = {\"/MY/DATASET/NAME\": []}\n ele['NoPileupUpdate'] = True\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_CH_CERN\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n ele['PileupData'] = {\"/MY/DATASET/NAME\": [\"T2_US_Nebraska\", \"T1_IT_CNAF\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T2_US_Nebraska\"))\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T1_IT_CNAF\", \"T2_DE_DESY\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n # only the pileup flag enabled now\n ele['NoInputUpdate'] = False\n ele['PileupData'] = {\"/MY/DATASET/NAME\": []}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_CH_CERN\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))", "def check_Data(self):\r\n \r\n if self._target_data is None:\r\n self.processData()", "def check_data_validity(X, y, query, task):\n # ADD IMPLEMENTATION HERE", "def check(self, data):# ->bool:\r\n return check(self.gd, data)", "def check_pt_data(data):\r\n # bflb_utils.printf(binascii.hexlify(data))\r\n if partition_magic_code != bflb_utils.bytearray_to_int(data[0:4]):\r\n bflb_utils.printf(\"partition bin magic check fail \", binascii.hexlify(data[0:4]))\r\n return False, 0, 0\r\n table_count = bflb_utils.bytearray_to_int(\r\n data[6:7]) + (bflb_utils.bytearray_to_int(data[7:8]) << 8)\r\n # bflb_utils.printf(\"table count: \", table_count)\r\n if table_count > 16:\r\n bflb_utils.printf(\"error, pt enter size > 16\")\r\n return False, 0, 0\r\n crcarray = bflb_utils.get_crc32_bytearray(data[:12])\r\n if data[12:16] != crcarray:\r\n bflb_utils.printf(\"pt table crc fail \", binascii.hexlify(crcarray))\r\n return False, 0, 0\r\n crcarray = bflb_utils.get_crc32_bytearray(data[16:16 + (36 * table_count)])\r\n if data[16 + (36 * table_count):16 + (36 * table_count) + 4] != crcarray:\r\n bflb_utils.printf(\"pt entries crc fail \", binascii.hexlify(crcarray))\r\n return False, 0, 0\r\n age = bflb_utils.bytearray_to_int(data[8:9]) + (bflb_utils.bytearray_to_int(data[9:10])<<8) +\\\r\n (bflb_utils.bytearray_to_int(data[10:11])<<16) + (bflb_utils.bytearray_to_int(data[11:12])<<24)\r\n return True, table_count, age", "def check_box(volume,point,is_queued_map,is_visited_map):\n list_not_visited=[]\n list_not_queued = []\n list_are_near = []\n\n if point[0]==1227 and point[1]==735 and point[2]==27:\n pass\n\n\n for x in xrange(-1, 2):\n\n # Edgecase for x\n if point[0] + x < 0 or point[0] + x > volume.shape[0] - 1:\n continue\n\n for y in xrange(-1, 2):\n\n # Edgecase for y\n if point[1] + y < 0 or point[1] + y > volume.shape[1] - 1:\n continue\n\n for z in xrange(-1, 2):\n\n # Edgecase for z\n if point[2] + z < 0 or point[2] + z > volume.shape[2] - 1:\n continue\n\n # Dont look at the middle point\n if x == 0 and y == 0 and z == 0:\n continue\n\n # TODO case if loop, all are queued but not visited\n if volume[point[0] + x, point[1] + y, point[2] + z] == 1:\n\n\n list_are_near.extend([[point[0] + x, point[1] + y, point[2] + z]])\n\n if is_queued_map[point[0] + x, point[1] + y, point[2] + z]==0:\n list_not_queued.extend([[point[0] + x, point[1] + y, point[2] + z]])\n if is_visited_map[point[0] + x, point[1] + y, point[2] + z]==0:\n list_not_visited.extend([[point[0] + x, point[1] + y, point[2] + z]])\n\n is_visited_map[point[0],point[1],point[2]]=1\n return list_not_queued,list_not_visited,is_visited_map,list_are_near", "def _check_data_point(cube, metadata):\n point_index = []\n\n for dim_length in cube.shape:\n point_index.append(int(random.random() * dim_length))\n\n point_index = tuple(point_index)\n\n try:\n point_cube = cube[point_index]\n _data_point = point_cube.data\n except Exception:\n msg = 'Unable to extract data point {} from file: {}'.format(\n point_index, metadata['basename'])\n raise FileValidationError(msg)\n else:\n return True", "def contained(name, data): # noqa: N805", "def loadBlockQueue(input_queue, county_fips, config, start_time):\n try:\n temp_time = time.localtime()\n county_counter = 0\n for c in county_fips:\n input_queue.put((c))\n county_counter += 1\n my_message = \"\"\"\n INFO - STEP 0 (MASTER): TASK 6 OF 13 - COMPLETED LOADING INPUT \n QUEUE WITH COUNTY DATA\n \"\"\"\n my_message = ' '.join(my_message.split())\n print(nbmf.logMessage(my_message,temp_time, time.localtime(), \n time.mktime(time.localtime()) - time.mktime(start_time)))\n return True, county_counter\n\n except:\n my_message = \"\"\"\n ERROR - STEP 0 (MASTER): TASK 6 OF 13 - FAILED TO LOADING QUEUE WITH\n COUNTY DATA\n \"\"\"\n my_message = ' '.join(my_message.split()) + '\\n' + traceback.format_exc()\n print(nbmf.logMessage(my_message,temp_time, time.localtime(), \n time.mktime(time.localtime()) - time.mktime(start_time)))\n return False, None", "def check_path(data_pointer, log, msg):\n if not os.path.exists(data_pointer):\n log.debug(msg)\n return False\n else:\n return data_pointer", "def check_input_data(self, align=True):\n return self.check_data_list(self.inputs, align)", "def isUndefinedData(program: ghidra.program.model.listing.Program, addr: ghidra.program.model.address.Address) -> bool:\n ...", "def is_local(queue):\n _setup()\n return queue in [dest(0) for dest in cupsd.getDests()]", "async def _check_latch_data(self, key, data):\n process = False\n latching_entry = self.latch_map.get(key)\n if latching_entry[Constants.LATCH_STATE] == Constants.LATCH_ARMED:\n # Has the latching criteria been met\n if latching_entry[Constants.LATCHED_THRESHOLD_TYPE] == \\\n Constants.LATCH_EQ:\n if data == latching_entry[Constants.LATCH_DATA_TARGET]:\n process = True\n elif latching_entry[Constants.LATCHED_THRESHOLD_TYPE] == \\\n Constants.LATCH_GT:\n if data > latching_entry[Constants.LATCH_DATA_TARGET]:\n process = True\n elif latching_entry[Constants.LATCHED_THRESHOLD_TYPE] == \\\n Constants.LATCH_GTE:\n if data >= latching_entry[Constants.LATCH_DATA_TARGET]:\n process = True\n elif latching_entry[Constants.LATCHED_THRESHOLD_TYPE] == \\\n Constants.LATCH_LT:\n if data < latching_entry[Constants.LATCH_DATA_TARGET]:\n process = True\n elif latching_entry[Constants.LATCHED_THRESHOLD_TYPE] == \\\n Constants.LATCH_LTE:\n if data <= latching_entry[Constants.LATCH_DATA_TARGET]:\n process = True\n if process:\n latching_entry[Constants.LATCHED_DATA] = data\n await self._process_latching(key, latching_entry)", "async def _check_latch_data(self, key, data):\n process = False\n latching_entry = self.latch_map.get(key)\n if latching_entry[Constants.LATCH_STATE] == Constants.LATCH_ARMED:\n # Has the latching criteria been met\n if latching_entry[Constants.LATCHED_THRESHOLD_TYPE] == \\\n Constants.LATCH_EQ:\n if data == latching_entry[Constants.LATCH_DATA_TARGET]:\n process = True\n elif latching_entry[Constants.LATCHED_THRESHOLD_TYPE] == \\\n Constants.LATCH_GT:\n if data > latching_entry[Constants.LATCH_DATA_TARGET]:\n process = True\n elif latching_entry[Constants.LATCHED_THRESHOLD_TYPE] == \\\n Constants.LATCH_GTE:\n if data >= latching_entry[Constants.LATCH_DATA_TARGET]:\n process = True\n elif latching_entry[Constants.LATCHED_THRESHOLD_TYPE] == \\\n Constants.LATCH_LT:\n if data < latching_entry[Constants.LATCH_DATA_TARGET]:\n process = True\n elif latching_entry[Constants.LATCHED_THRESHOLD_TYPE] == \\\n Constants.LATCH_LTE:\n if data <= latching_entry[Constants.LATCH_DATA_TARGET]:\n process = True\n if process:\n latching_entry[Constants.LATCHED_DATA] = data\n await self._process_latching(key, latching_entry)", "def check(self,user_inp):\n self.z=0\n\n self.user_inp=user_inp\n for i in range(8):\n for j in range(8):\n if self.user_inp == self.lst[i][j]:\n \"\"\"If user input matches the element in the matrix then it breaks\"\"\"\n self.z=1\n break\n return self.z", "def _check_parameter(self, data):\n return self._pre_process_record(data) is not None", "def __validate_node_data(self, data):\n\n # skipping check of 'grapheap_node_id' optimisation key\n if all(key in data for key in self.optimisation_keys[1:]):\n return True\n\n else:\n missing_keys = [\n x for x in self.optimisation_keys[1:] if x not in data]\n raise ValueError(\"Grapheap Error: \" + str(missing_keys) +\n \" optimisation keys missing in data\")", "def in_queue(self):\n if self.get_db('jobid') is None:\n log.debug('jobid not found for calculation.')\n return False\n else:\n # get the jobid\n jobid = self.get_db('jobid')\n # see if jobid is in queue\n _, jobids_in_queue, _ = getstatusoutput('qselect',\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n if str(jobid) in jobids_in_queue.split('\\n'):\n # get details on specific jobid in case it is complete\n status, output, err = getstatusoutput(['qstat', jobid],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n if status == 0:\n lines = output.split('\\n')\n fields = lines[2].split()\n job_status = fields[4]\n if job_status == 'C':\n return False\n else:\n return True\n else:\n return False", "def _check_bound(self, q):\n mat = ur_utils.forward(q, self._ik_params)\n xyz = mat[:3, 3]\n inside_bound = np.all(self._end_effector_low <= xyz) and np.all(xyz <= self._end_effector_high)\n inside_buffer_bound = (np.all(self._end_effector_low + self._box_bound_buffer <= xyz) and \\\n np.all(xyz <= self._end_effector_high - self._box_bound_buffer))\n return inside_bound, inside_buffer_bound, mat, xyz", "def check_inputs(self, item_data):\n if not item_data[0] in self.data['pizza']:\n print('Error: ' + item_data[0] + ' pizza does not exist.')\n return False\n\n if not item_data[1] in self.data['pizza'][item_data[0]]:\n print('Error: ' + item_data[1] + ' size does not exist for '\n + item_data[0] + ' pizza.')\n return False\n\n for topping in item_data[2]:\n if not topping in self.data['topping']:\n print('Error: Pizza topping ' + topping + ' does not exist.')\n return False\n return True", "def check(self, input, node):\n assert False # Must be redefined", "def __getitem__(self, pos):\n row, column = pos\n if row <= self.n_rows-1 and column <= self.n_columns-1:\n return self.bits[row][column]\n else:\n return False", "def __contains__(self, data):\n return self._contains(data, self.root) # Start at the root", "def XPLMCanWriteDataRef(inDataRef):\n return bool", "def testPassesSiteRestriction(self):\n # test element ala MonteCarlo\n ele = WorkQueueElement(SiteWhitelist=[\"T1_IT_CNAF\", \"T2_DE_DESY\"], SiteBlacklist=[\"T1_US_FNAL\"])\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_CH_CERN\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n\n # test element with input dataset\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": []}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_CH_CERN\"))\n self.assertFalse(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T2_DE_DESY\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n\n # test element with input and parent dataset\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": []}\n ele['ParentFlag'] = True\n ele['ParentData'] = {\"/MY/BLOCK2/NAME#002590494c06\": []}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_CH_CERN\"))\n self.assertFalse(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T2_DE_DESY\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T2_DE_DESY\"]}\n ele['ParentData'] = {\"/MY/BLOCK2/NAME#002590494c06\": [\"T1_IT_CNAF\", \"T2_CH_CERN\", \"T2_DE_DESY\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n\n # test element with input, parent and pileup dataset\n ele['PileupData'] = {\"/MY/DATASET/NAME\": []}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_CH_CERN\"))\n self.assertFalse(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n ele['PileupData'] = {\"/MY/DATASET/NAME\": [\"T2_US_Nebraska\", \"T1_IT_CNAF\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T1_IT_CNAF\", \"T2_DE_DESY\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_DE_DESY\"))", "def CheckForExistence(requested_data, available_data):\n if requested_data is not None:\n return 1 # The requested data exists.\n elif available_data:\n return -1 # The requested data does not exist.\n else:\n return 0 # No data exists at all.", "def check_task(self): \n return self.buffer[0]", "def assert_stored_sp_rel_pos(self):\n# ## Temporal\n# if self.sp_relative_pos is not None:\n# if self._constant_neighs:\n# if self.staticneighs:\n# assert(len(np.array(self.sp_relative_pos).shape) == 3)\n# else:\n# assert(len(np.array(self.sp_relative_pos).shape) == 4)\n# #################\n array_types = [list, np.ndarray]\n if self.sp_relative_pos is not None:\n assert(type(self.sp_relative_pos) in [list, np.ndarray])\n# if type(self.sp_relative_pos) in [float, int, np.int32, np.int64]:\n# ### Probably redundant\n# # it is needed or possible this situation?\n# pass\n assert(type(self.sp_relative_pos) in [list, np.ndarray])\n# if self.ks is None:\n# assert(self.staticneighs)\n# assert(len(self.sp_relative_pos) == len(self.iss))\n if self.staticneighs:\n assert(len(self.sp_relative_pos) == len(self.iss))\n ## Assert deep 3\n if len(self.iss):\n assert(type(self.sp_relative_pos[0]) in array_types)\n else:\n assert(self.ks is not None)\n assert(len(self.sp_relative_pos) == len(self.ks))\n if type(self.sp_relative_pos[0]) in array_types:\n if not self.staticneighs:\n assert(len(self.sp_relative_pos[0]) == len(self.iss))\n if len(self.sp_relative_pos[0]) > 0:\n assert(type(self.sp_relative_pos[0][0]) in array_types)", "def _check_for_incomplete_input(self):\n pass" ]
[ "0.5973574", "0.5559499", "0.5359043", "0.53462744", "0.5329235", "0.5259342", "0.5239512", "0.5232094", "0.50799614", "0.50530607", "0.50330347", "0.5014102", "0.5005369", "0.49977046", "0.49977046", "0.498443", "0.49674127", "0.49529317", "0.4934858", "0.4912697", "0.49066678", "0.48973137", "0.48662692", "0.48587972", "0.48576152", "0.48502252", "0.48276305", "0.48241422", "0.4811092", "0.48033455" ]
0.5818764
1
Test run get_most_volatile() with stock prices from a file.
def test_run(filename='prices.csv'): prices = pd.read_csv(filename, parse_dates=['date']) print("Most volatile stock: {}".format(get_most_volatile(prices)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_stock(db, openfile):\n pass", "def get_most_volatile(prices):\n \n prices = prices.set_index('date')\n\n stock_return_volatility = []\n\n for ticker in prices.ticker.unique():\n prices_for_ticker = prices[prices['ticker'] == ticker]['price']\n log_return = np.log(prices_for_ticker) - np.log(prices_for_ticker.shift(1))\n stock_return_volatility.append(log_return.std())\n\n volatility_series = pd.Series(stock_return_volatility, index=prices.ticker.unique())\n\n return volatility_series.idxmax()", "def get_most_volatile(prices):\n # TODO: Fill in this function.\n #I have tried to select the specific column and then apply the standard deviation to \n # check the volatility to a column to see how it works.\n \n \n price_modified=prices.groupby(prices['ticker'])\n # print(price_modified.price.rolling(2).std())", "def top_50():\r\n file_read = read_file()\r\n vacabulary_list = []\r\n for key in file_read:\r\n vacabulary_list.extend(file_read[key])\r\n top_50 = Counter(vacabulary_list).most_common(50)\r\n return (top_50)", "def get_stocks_from_file(self, limit=0):\n\n file_rows = []\n with open(self.file_name) as f:\n reader = csv.reader(f)\n for row in reader:\n file_rows.append(row)\n\n for stock_info in file_rows:\n ticker, name, industry, cap, exchange = stock_info[:5]\n\n if cap[-1] == 'B' or not self.only_billions:\n stock_data = StockData(ticker, name, industry, exchange)\n self.stock_list.append(stock_data)\n\n # Handle limiting\n if limit > 0:\n if len(self.stock_list) >= limit:\n break\n\n print(\"TOTAL SIZE:\", len(self.stock_list))", "def test_run():\n for symbol in ['AAPL', 'IBM']:\n print(\"Max close\")\n print(symbol, get_max_close(symbol))", "def get_most_expensive_cars(table):\n cur, con = database.connect_to_database()\n query = \"SELECT t.* FROM \" + table + \" t WHERE t.price = \\\n (select max(subt.price) from \" + table + \" subt);\"\n return pandas.read_sql_query(query, con)", "def get_greatest_stock_price():\n greatest_stock_price = 0\n // your code here", "def download_all():\r\n f = open('stock_symbols.txt', 'r')\r\n fout = open('../data/stocks_read.txt', 'w')\r\n count_max = 500\r\n count = 0\r\n for stock_symbol in f:\r\n stock_symbol = stock_symbol.strip()\r\n try:\r\n stock_download(stock_symbol)\r\n fout.write(stock_symbol + '\\n')\r\n except:\r\n print(\"was not able to read file \", stock_symbol)\r\n count = count + 1\r\n if count >= count_max:\r\n break\r\n f.close()\r\n fout.close", "def test_low_stockprice_high_interest(self):\n stock_prices = np.array([[5, 4, 4, 2],\n [5, 3, 3, 3],\n [5, 4, 2, 2],\n [5, 3, 3, 1]], dtype=float)\n interest_rate = 2.0 # 200%\n test_case = StockMarket(5, stock_prices, interest_rate)\n test_case.dynamic_programming_bottom_up()\n for portfolio in set(test_case.backtracing_portfolio()):\n self.assertEqual(0, portfolio)", "def test_get_result_top_files(self):\n pass", "def get_prices(f_name):\n import ujson as json\n with open(f_name, 'r') as fid:\n indv_points = fid.read().split('\\n')\n prev_time, store_data = 0, []\n for ele in indv_points:\n if len(ele) != 0:\n main_dict = json.decode(ele)\n\n # Check that the data is in ascending order of time\n assert (main_dict['timestamp'] > prev_time)\n prev_time = main_dict['timestamp']\n # Store the data\n store_data.append({'change': main_dict['ticker']['change'], 'time': main_dict['timestamp'],\n 'price': main_dict['ticker']['price'], 'volume':main_dict['ticker']['volume']})\n logging.info('Completed getting prices from: {}, entries: {}'.format(f_name, len(store_data)))\n return store_data", "def read_freq(bfile, plinkexe, freq_threshold=0.1, maxmem=1700, threads=1):\n high = 1 - freq_threshold\n low = freq_threshold\n if not os.path.isfile('%s.frq.gz' % bfile):\n nname = os.path.split(bfile)[-1]\n frq = ('%s --bfile %s --freq gz --keep-allele-order --out %s --memory '\n '%d --threads %d')\n line = frq % (plinkexe, bfile, nname, maxmem, threads)\n o, e = executeLine(line)\n frq = pd.read_table('%s.frq.gz' % nname, delim_whitespace=True)\n else:\n frq = pd.read_table('%s.frq.gz' % bfile, delim_whitespace=True)\n # filter MAFs greater than 1 - freq_threshold and smaller than freq_threshold\n return frq[(frq.MAF < high) & (frq.MAF > low)]", "def test_lowest_price_many_listings(self):\n listings = steam_market.get_lowest_price(soup=get_soup_from_path(TEST_FILE_MANY_RESULTS))\n self.assertEqual('0,03€', listings)", "def stock_market(no_profiles: int) -> tuple:\n all_companies = []\n Stocks = namedtuple(\"Stocks\", 'name symbol open high close company_weight')\n MkValue_ = random.uniform(1000, 50000, 100)\n wts_ = random.uniform(0, 1, 100)\n wts_ = wts_/sum(wts_)\n\n for _ in range(100):\n name = fake.company()\n open_ = round(MkValue_[_]*wts_[_],2)\n close = round(open_ * random.uniform(0.7, 1.15), 2)\n high = round(open_ * random.uniform(0.85, 1.15), 2)\n if high < open_:\n high = open_\n if high < close:\n high = close\n\n all_companies.append(\n Stocks(name=name, symbol=symbol(name), open=open_, high=round(high, 2), close=round(close, 2), company_weight=round(wts_[_], 4)))\n\n stock_index = round(\n sum(x.open * x.company_weight for x in all_companies), 4)\n highest_for_day = round(\n sum(x.high * x.company_weight for x in all_companies), 2)\n lowest_close_for_day = round(\n sum(x.close * x.company_weight for x in all_companies), 2)\n\n # print(f\"\\n------------------------------------Top 100 listed companies on Fake Stock Exchange------------------------------------\")\n # [print(x) for x in sorted(all_companies, key=lambda x:x.symbol)]\n # print(f\"\\n--------------Main details on {date.today()}--------------\")\n # print(f\"\\nStart of the day: {stock_index}\")\n # print(f\"Highest for the day: {highest_for_day}\")\n # print(f\"Lowest close for the day: {lowest_close_for_day}\")\n return sorted(all_companies, key=lambda x: x.symbol), stock_index, highest_for_day, lowest_close_for_day", "def get_most_and_least_expensive_high_review_product(df):\n try:\n df3 = merge_metadata(df)\n product_filter = df3['overall'] >= 4.0\n high_reviewed_products = df3[product_filter]\n # print high_reviewed_products[:10]\n # The data contained NaN so we use the nanmax/min funtions to get max/min\n most_exp = round(np.nanmax(high_reviewed_products['price'])[0], 2)\n least_exp = round(np.nanmin(high_reviewed_products['price'])[0], 2)\n\n most_exp_prod = df3.loc[df3['price'] == most_exp, 'asin'].iloc[0]\n least_exp_prod = df3.loc[df3['price'] == least_exp, 'asin'].iloc[0]\n write_text_tofile(\"Most Expensive Product: \" + str(most_exp_prod) + \", Price: \" + str(most_exp))\n write_text_tofile(\"Least Expensive Product: \" + str(least_exp_prod) + \", Price: \" + str(least_exp))\n return {most_exp_prod: most_exp, least_exp_prod: least_exp}\n except Exception as e:\n print \"Error getting most and least expensive high review product\"\n print str(e)\n pass", "def execQ3():\n frame = pan.DataFrame(data, columns=['Product', 'Price', 'Period'])\n tuna = frame[dFrame.Series_title_1 == \"Tuna - canned (supermarket only), 185g\"]\n cheapest = tuna.sort_values(by=\"Price\").head(1)\n return cheapest", "def sorted_fruit_quantity(f):\n # skip the header of the file\n move_cursor(f)\n # put all the quantities into a list\n # expected output: [5, 10, 3, 15]\n # read the file line by line\n output = []\n for line in f:\n line_list = line.split() # [\"Apple\",\"5\"]\n output.append(int(line_list[1]))\n # sort the list in descending order\n # expected output: [15, 10, 5, 3]\n output.sort(reverse=True)\n # only select the highest two quantities in the list and return them\n # expected output: [15, 10]\n # slicing\n # Hint: ending pos is the index of the first element that I don't want to include\n # in the final result\n return output[0:2]", "def _process_stocks(self, limit):\n if self.testMode:\n g = self.testgraph\n else:\n g = self.graph\n model = Model(g)\n line_counter = 0\n\n raw = '/'.join((self.rawdir, 'stock'))\n logger.info(\"building labels for stocks\")\n\n with open(raw, 'r') as f:\n f.readline() # read the header row; skip\n filereader = csv.reader(f, delimiter='\\t', quotechar='\\\"')\n for line in filereader:\n line_counter += 1\n\n (stock_id, dbxref_id, organism_id, name, uniquename,\n description, type_id, is_obsolete) = line\n# 2 12153979 1 2 FBst0000002 w[*]; betaTub60D[2] Kr[If-1]/CyO 10670\n\n stock_num = stock_id\n stock_id = 'FlyBase:'+uniquename\n self.idhash['stock'][stock_num] = stock_id\n stock_label = description\n\n organism_key = organism_id\n taxon = self.idhash['organism'][organism_key]\n\n # from what i can tell, the dbxrefs are just more FBst,\n # so no added information vs uniquename\n\n if not self.testMode \\\n and limit is not None and line_counter > limit:\n pass\n else:\n if self.testMode \\\n and int(stock_num) not in self.test_keys['strain']:\n continue\n\n # tax_label = self.label_hash[taxon] # unused\n # add the tax in case it hasn't been already\n model.addClassToGraph(taxon)\n model.addIndividualToGraph(stock_id, stock_label, taxon)\n if is_obsolete == 't':\n model.addDeprecatedIndividual(stock_id)\n\n return", "def test_get_list_most_expensive(self):\n\n expensive_goods_test = self.info_list.get_list_most_expensive()\n most_expensive_test = self.form_expensive_list_goods()\n\n self.assertEqual(expensive_goods_test, most_expensive_test)", "def test_get_result_top_file(self):\n pass", "def test_wb(self):\n df = dep.read_wb(get_path('wb.txt'))\n self.assertAlmostEquals(df['precip'].max(), 162.04, 2)", "def test_max_daily_profit_output_correct(price_data):\n max_df = price_data\n res = c.calculate_max_profit(price_data)\n\n max_df = max_df.loc[(max_df['ticker'] == 'GOOGL')]\n max_df['profit'] = max_df['high'] - max_df['low']\n max_df = max_df.sort_values(by='profit', ascending=False).reset_index()\n max_profit = max_df.loc[0, 'profit']\n\n assert res.loc[(res['ticker'] == 'GOOGL'), 'profit'].item() == max_profit", "def writing_get_most_played(file_name):\n result = str(reports.get_most_played(file_name))\n with open (\"report_for_judy_part2.txt\", \"+a\") as f:\n f.write(result)\n f.write(\"\\n\")", "def getprice():\n\n print(\"Get price\")\n latest_price = get_latest_price(item_code)\n return latest_price", "def test_large_import_recovered(self):\n self.create_sample_data_set_dir(\"DOS15908.DAT\", RECOV_DIR)\n self.assert_initialize()\n result = self.data_subscribers.get_samples(DataParticleType.METADATA_RECOVERED,1,60)\n result = self.data_subscribers.get_samples(DataParticleType.SAMPLE_RECOVERED,96,400)", "def test_lowest_price(self):\n listings = steam_market.get_lowest_price(soup=get_soup_from_path(TEST_FILE_NORMAL_LISTING))\n self.assertEqual('11,59€', listings)", "def read_stock_data(stock_name, stock_file_name):\n daily_stock = read_json_from_file(stock_file_name)\n monthly_averages.sort(key=operator.itemgetter(0),reverse=False) # sorts list in ascending order by formatted_date\n average_price_numerator = 0 # resets numerator value to 0 when previous month value has been calculated\n average_price_denominator = 0 # resets denominator value to 0 when previous month value has been calculated\n del monthly_averages[:] # monthly_averages list will be cleared to allow testing of different files\n comparison_month = \"\"\n for ele in daily_stock:\n current_month = ele.get(\"Date\")[0:7]\n if comparison_month == \"\": # determines whether the next dictionary is of the current month or previous\n comparison_month = current_month\n if current_month == comparison_month:\n average_price_numerator += (ele.get(\"Volume\") * ele.get(\"Close\"))\n average_price_denominator += (ele.get(\"Volume\"))\n else:\n monthly_average_price = average_price_numerator / average_price_denominator\n formatted_date = comparison_month.replace(\"-\",\"/\") # formats date to match format in tests\n monthly_averages.append((formatted_date, round(monthly_average_price,2)))\n comparison_month = current_month\n average_price_numerator = (ele.get(\"Volume\") * ele.get(\"Close\"))\n average_price_denominator = (ele.get(\"Volume\"))\n\n # final month calculation\n monthly_average_price = average_price_numerator / average_price_denominator\n formatted_date = comparison_month.replace(\"-\", \"/\")\n monthly_averages.append((formatted_date, round(monthly_average_price, 2)))\n\n return monthly_averages", "def process_crsp(filename, frequency='Q'):\n data = pd.read_feather(filename)\n data['share'] = data['shrout'] * data['cfacshr'] * 1e3\n data['price'] = data['prc'].abs() / data['cfacpr']\n data['cap'] = data.price * data.share / 1e6\n\n aggs = {'rel': 'prod', 'cap': 'last', 'siccd': 'last'} # manual change\n info_cols = [k for k, v in aggs.items() if v == 'last'] # columns for beginnging quantities\n required_cols = ['date', 'permno', 'ret'] + info_cols # for check\n\n if frequency == 'Q':\n time_delta = QuarterEnd\n elif frequency == 'M':\n time_delta = MonthEnd\n else:\n raise ValueError(f'Unrecognized frequency {frequency}')\n\n tota_rows = data.shape[0]\n print(f'Total rows: {tota_rows}')\n print(f'{data.date.min()} to {data.date.max()}')\n\n print('Check invalid returns')\n invalid = data[(data.ret == -66) | (data.ret == -77) | (data.ret == -88) | (data.ret == -99)]\n print(f'Invalid returns: {invalid.shape[0]}')\n\n print('Check missing')\n for col in required_cols:\n count = data[col].isna().sum()\n print(f' {col} missing: {count} ({count / tota_rows:.2%})')\n\n data = data[required_cols].dropna()\n print(f'Remove NAs: {data.shape[0]} ({data.shape[0] / tota_rows:.2%})')\n\n data = data[(data.siccd < 6000) | (data.siccd > 6999)] # remove financial\n # data = data[(data.siccd < 1500) | (data.siccd > 1799)] # remove construction\n # data = data[data.siccd < 9100] # remove construction\n print(f'Remove industries: {data.shape[0]} ({data.shape[0] / tota_rows:.2%})')\n\n dup = data[['permno', 'date']].duplicated().sum()\n print(f'Duplicated for key [permno, date]: {dup}')\n\n data.permno = data.permno.astype(int)\n data['date'] = pd.to_datetime(data['date'])\n data['time_idx'] = data.date + time_delta(0)\n\n # aggregate up to quarterly\n data['rel'] = 1 + data.ret\n data.sort_values(['permno', 'date'], inplace=True) # so that first cap below is correct\n data = data.groupby(['permno', 'time_idx'], as_index=False).agg(aggs)\n data['ret'] = data.rel - 1\n data.drop('rel', axis=1, inplace=True)\n\n # join with next quarter for forward looking returns\n data['time_idx_next'] = data.time_idx + time_delta(1)\n\n data = pd.merge(data, data[['permno', 'time_idx', 'ret']],\n left_on=['permno', 'time_idx_next'], right_on=['permno', 'time_idx'],\n suffixes=('', '_1'), how='inner')\n data.drop(['time_idx_next', 'time_idx_1'], axis=1, inplace=True)\n\n # get coarser industry classification\n data.siccd = data.siccd.astype('str').str.zfill(4)\n data['sic1'] = data.siccd.str[:1]\n data['sic2'] = data.siccd.str[:2]\n data.dropna(inplace=True)\n print(f'{frequency} records: {data.shape[0]}')\n return data", "def getFirst30HighLow(client, future):\n console().info(\"Getting The First 30 High/Low.\")\n\n reqId = client.startRequest()\n\n today = date.today()\n endTime = datetime(year=today.year, month=today.month, day=today.day, hour=10)\n endTime = endTime.strftime(\"%Y%m%d %H:%M:%S\")\n\n client.pushRequestData(reqId, {\"name\" : \"HIGH/LOW\"})\n client.reqHistoricalData(\n reqId, future.contract, endTime, \"1800 S\", \"30 mins\", \"TRADES\", 1, 1, False, []\n )\n return client.waitForRequest(reqId, purge=True)[\"historical\"]" ]
[ "0.5832539", "0.57833153", "0.56723505", "0.54061973", "0.53237814", "0.5318612", "0.5297799", "0.52888286", "0.5231948", "0.5163269", "0.5150431", "0.50361997", "0.49637398", "0.49448463", "0.4941156", "0.4928857", "0.48964813", "0.4896082", "0.4889873", "0.48582286", "0.4849325", "0.4835351", "0.48225036", "0.48158547", "0.48097265", "0.48053813", "0.48039705", "0.4791372", "0.4789125", "0.47684705" ]
0.7938999
1
True if expires is not equal to orig_expires.
def updated(self): return self.expires != self.orig_expires
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_expires(self):\n # We aren't bother going to test the actual time in expires, that\n # way lies pain with broken tests later.\n up = self.get(self.good_data)\n hdrs = dict(up.get_headers(1))\n lm = datetime(*utils.parsedate_tz(hdrs['Last-Modified'])[:7])\n exp = datetime(*utils.parsedate_tz(hdrs['Expires'])[:7])\n assert (exp - lm).seconds == 3600", "def extended(self):\n if self.expires_at:\n return self.expires_at - self.issued_at > timedelta(days=30)\n return False", "def is_expired(self):\n return utcnow() >= self.expires", "def is_expired(self) -> bool:\n return now() > self.expires", "def expired(self):\n return int(time.time()) > self.expires_at", "def has_expired(self):\n self.ensure_one()\n return datetime.now() > fields.Datetime.from_string(self.expires)", "def is_expired(self):\n if not self.is_signed:\n return True\n return int(self._token_claims.get(self.__class__.exp_claim, 0)) < int(\n time.time()\n )", "def is_expired(self):\n return timeutils.utcnow_ts() > self.expire_ts", "def has_expired(self):\n if not self._initialized:\n return True\n\n expires_in = self.expires_in\n if expires_in > 0:\n return False\n else:\n return True", "def is_expired(self):\n return self.expiration_date <= self._now()", "def isStale(self):\n return self.m_expirationDate < datetime.datetime.now(tz=pytz.utc)", "def is_expired(self):\n\n return time.time() * 1000 - self._refreshed_on > self._expire", "def is_expired(self):\n if self.access_token is None:\n logging.debug('Access token not found')\n return True\n else:\n return (self.expiration <= datetime.now())", "def is_expired(self):\n\n if self._lifetime is not None and self._lifetime > 0:\n # 300 seconds waite is the tolerance !\n # The unit of lifetime is millisecond\n if (time.time() - self._create_date) * 1000 > self._lifetime + 300000:\n return True\n\n return False", "def _is_expired(self):\n current_time = datetime.now()\n if (current_time > self._expires_at):\n logging.debug('token expired')\n return True\n else:\n return False", "def _verify_timeout(self, doc):\n expires = doc['expires']\n if expires == 0:\n return False\n if expires >= self._time():\n return False\n return True", "def isExpired(self):\n return True/False", "def is_outdated(self, timestamp):\n\n expiry_time = datetime.datetime.now() - self.cache_time\n return expiry_time > timestamp", "def _about_to_expire(self, secret: Secret) -> bool:\n return secret.is_expired(datetime.now(UTC) + self.expiry_margin)", "def token_is_expired(self):\n # type: () -> bool\n token = self.token\n if not token:\n return False\n\n return token[\"expires_at\"] < time()", "def expired(self) -> bool:\n if not self.use_wts:\n return False\n\n return datetime.now() > self.expire", "def expired(self): # pragma: no cover\n return self._state in (_State.EXPIRING, _State.EXPIRED)", "def expired(self):\n\n return self.getNotAfter() <= rpki.sundial.now()", "def valid(self):\n return self.expiry > timezone.now()", "def is_expired(self):\n return int(time.time()) - self.time > self.interval", "def is_expired(self):\n delta = datetime.datetime.now() - self.created_at\n\n return delta.total_seconds() > 15*60", "def _has_expired(self):\n try:\n expires = datetime.fromtimestamp(\n os.stat(self.lockfile).st_mtime\n )\n except OSError as e:\n if e in self.NOT_EXIST_ERRORS:\n return False\n raise\n return datetime.now() > expires", "def _has_expired(self):\r\n expired = False\r\n if hasattr(self, 'Expiration'):\r\n now = datetime.datetime.utcnow()\r\n expiration = datetime.datetime.strptime(self.Expiration, '%Y-%m-%dT%H:%M:%SZ')\r\n expired = (now >= expiration)\r\n else:\r\n raise ValueError(\"ERROR: Request for expired property, but no Expiration in HIT!\")\r\n return expired", "def remove_if_expired(self, key, now):\n with self.GLOB_LOCK:\n inst = self._request_sessions.get(key, None)\n if inst is not None and (inst.last_access + self.TIMEOUT < now):\n self._request_sessions.pop(key, None)\n return True\n\n return False", "def is_access_expired(self) -> bool:\n entitlement_contract = self.cfg.entitlements.get(self.name, {})\n # TODO(No expiry per resource in MVP yet)\n expire_str = entitlement_contract.get('expires')\n if not expire_str:\n return False\n expiry = datetime.strptime(expire_str, '%Y-%m-%dT%H:%M:%S.%fZ')\n if expiry >= datetime.utcnow():\n return False\n return True" ]
[ "0.7073698", "0.69195384", "0.6914716", "0.69129467", "0.67846453", "0.6679914", "0.660565", "0.65767676", "0.6557804", "0.6405459", "0.638651", "0.63783026", "0.6330744", "0.6310361", "0.6259558", "0.6224024", "0.6209609", "0.6207446", "0.61312985", "0.61132365", "0.609304", "0.60679907", "0.60646784", "0.6063764", "0.60470676", "0.60157335", "0.60042673", "0.6001517", "0.59923506", "0.59741974" ]
0.76405764
0
Called to move/remove one item. Returns True if the item was purged, False if it was moved to self.new_expiry.
def remove_one(self): item = self.expiry.pop(0) if item.updated: self.new_expiry.append(item) return del self.index[item.target] return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove(self,item):\r\n raise AbstractError\r\n return False", "def drop(self, item: Item) -> bool:\n if item in self.bag:\n self.__bag.remove(item)\n self.room._add_item(item)\n return True\n return False", "def is_expired(self, key, now=None, remove=False):\n with self._lock:\n if now is None:\n now = time.time()\n # pylint: disable=unused-variable\n expire, _value = self._values[key]\n if expire is None:\n return False\n expired = expire < now\n if expired and remove:\n self.__delitem__(key)\n return expired", "def pickup(self, item: Item) -> bool:\n if len(self.bag) >= 5:\n return False\n\n if self.__room._take(item):\n self.__bag.append(item)\n return True\n\n raise Exception(f\"{item} was not found in {self.room}\")", "def _expire(self):\n with self._lock:\n self._items.popleft()", "def _expire(self):\n with self._lock:\n self._items.popleft()", "def _remove_expired(self):\n with self.__lock:\n is_changed = False\n for k in list(self._d.keys()):\n if self._d[k].is_expired():\n log.debug(\"removing expired item: {}\".format(self._d[k]))\n del self[k]\n is_changed = True\n\n if (is_changed is True) and (self.is_persistent):\n # save changed cache file\n self.save()", "def keep_item(self, content_item):\n return self._content_item_comparison_weak(\n content_item, self.touch_content_item\n )", "def delete(self):\n the_tuple = self.deque.delete(self.tube, self.task_id)\n\n self.update_from_tuple(the_tuple)\n\n return bool(self.state == 3)", "def _apply_item(self, item: Item) -> bool:\n if self.locked:\n self.__locked = item.item_type != self.__key\n return not self.locked", "def purging() -> bool:\r\n return _purge", "def popitem(self):\n pass", "def _expire_item(self, key):\n (timeout, callback) = self._timeouts[key]\n now = time.time()\n if timeout <= now:\n item = dict.pop(self, key)\n del self._timeouts[key]\n if callback:\n try:\n callback(key, item)\n except TypeError:\n try:\n callback(key)\n except TypeError:\n callback()\n return None\n else:\n return timeout - now", "def inventory_remove(self, item):\n if (item in self.ItemList):\n self.ItemList.remove(item)\n return 0\n # Item not found.\n return 1", "def insert_and_check(self, item) -> bool:\n with Monitor.acquire(self):\n if item in self:\n return False\n self.add(item)\n return True", "def delete(self, key: str):\n if self._json_to_dict():\n curr_time = datetime.now().timestamp()\n try:\n if(self.db_data[key][1]['expiration_time'] == -1):\n self.db_data.pop(key)\n return True\n elif (curr_time > self.db_data[key][1]['expiration_time']):\n print(\"Object Life Expired\")\n return False\n self.db_data.pop(key)\n if not self._dict_to_json():\n return False\n except KeyError:\n print(\"Object with\", key, \"does not exist.\")\n return False\n return True\n else:\n print(\"Unable to delete Object ID =\", id)\n return False", "def deleteLast(self) -> bool:\n if self.isEmpty():\n return False\n\n self.rear = (self.rear - 1 + self.capacity) % self.capacity\n return True", "def popitem(self): # real signature unknown; restored from __doc__\n pass", "def delete(self, item):\n is_found, active, node = self._find(item)\n if is_found and active:\n idx = node.items.index(item)\n node.active[idx] = False\n return True\n else:\n return False", "def perform_action(self):\r\n if self.__remove_ailment == False:\r\n return False\r\n\r\n self.__keyboad.press(self.__flask_key)\r\n self.__keyboad.release(self.__flask_key)\r\n self.__last_used = dt.datetime.now()\r\n return True", "def pop(self):\n return super().remove_item_from_front()", "def remove(self, item):\n try:\n entry = self.set.pop(item)\n entry[-1] = self.REMOVED\n except KeyError:\n print(\"Can't remove a non-existing item\")", "def active_item(self, remove=True):\n self.sleeping.reveille() # wake items whose sleep timer has expired\n if not self.stack.empty():\n pass\n elif not self.backlog.empty():\n # feed the stack the top priority item from the queue\n self.stack.push(self.backlog.get())\n else: # both the stack & queue are empty\n raise queue.Empty\n\n assert not self.stack.empty(), \"BUG: empty stack\"\n\n if remove:\n return self.stack.pop()\n\n return self.stack.peek()", "def remove(self, item):\n item_found = False\n\n try:\n # Traverse through the array to look for the 'item'\n for i in range(len(self)):\n if self.the_array[i] == item:\n # Move every item after the 'item' found to left in order\n # to remove the 'item'\n for j in range(i, self.count - 1):\n self.the_array[j] = self.the_array[j + 1]\n self.count -= 1\n item_found = True\n\n if (self.capacity // 2 >= self.BASE_SIZE) and (self.count < self.capacity / 8):\n self._resize(self.capacity // 2)\n break\n\n if not item_found:\n raise ValueError\n\n except ValueError:\n print(\"Item not found in list.\")\n\n return item_found", "def pop(self, timeout=None):\n item = super(ExclusiveQueue, self).pop(timeout)\n try:\n self.remove(item)\n except ValueError:\n pass\n return item", "def dequeue(self, obj):\n try:\n item = self.obj_item.pop(obj)\n self.user_queue[item.user].remove(item)\n r = True\n if self.user_skip[item.user] == 0 and not self.user_queue[item.user]:\n self._purge_user(item.user)\n self._normalise()\n except:\n r = False\n return r", "def deque_timeout_put(self, item, timeout):\n try:\n self.q.put(item, timeout=timeout)\n return True\n except queue.Full:\n try:\n _ = self.q.get(block=False)\n dropped = False\n except queue.Empty:\n dropped = True\n # TODO - could crash due to a race condition, could be solved with a lock\n self.q.put(item, block=False)\n return dropped", "def poplar(self, item_to_be_popped):\n if self.check_inventory(item_to_be_popped): # Basic check to see if it's in the list\n als_lament = item_to_be_popped# ;P\n for an_item in self.bag_of_holding: # here we are extracting an the index of the object in the list\n if an_item.name == item_to_be_popped:\n index = self.bag_of_holding.index(an_item)\n to_be_returned = self.bag_of_holding[index]\n # and here is where the majic happens and the item is removed from the list.\n self.bag_of_holding.remove(self.bag_of_holding[index])\n else:\n # for testing porpoises if the item is not in dah bag, remove later.\n print(\" {} was not found in bag of holding.\".format(item_to_be_popped))\n return None\n return to_be_returned", "def release(self, item, quantity):\n logger.info('ReleaseDiscard item release initiated')\n try:\n with Transaction().start(DBNAME, 1) as transaction:\n transaction.context = config.get_config().context\n quantity = Decimal(quantity)\n inventory_list = self.Inventory.search([('location', '=', self.inventory.id)]\n , order=[('batch_number', 'ASC')])\n product = self.Product.search([('name', '=', item),\n ('description', '=', 'Stock'),\n ('type', '=', 'goods')])[-1]\n done = False\n today = date.today()\n for i in inventory_list:\n for j in i.lines:\n if j.product.template.name == item:\n expiry = j.expiry_date\n if expiry:\n if expiry >= today:\n if Decimal(j.quantity) >= Decimal(quantity):\n j.quantity = Decimal(j.quantity) - Decimal(quantity)\n self.move(from_location=self.inventory, to_location=self.kitchen, item=product,\n quantity=quantity,\n batch_number=i.batch_number)\n self.store_inventory(location=self.kitchen, inventory_stock=j,\n quantity=quantity, batch=i.batch_number)\n j.save()\n self.check_and_delete(i)\n done = True\n else:\n quantity = Decimal(quantity) - Decimal(j.quantity)\n self.move(from_location=self.inventory, to_location=self.kitchen, item=product,\n quantity=j.quantity, batch_number=i.batch_number)\n self.store_inventory(location=self.kitchen, inventory_stock=j,\n quantity=j.quantity, batch=i.batch_number)\n j.quantity = 0\n j.save()\n self.check_and_delete(i)\n # transaction.cursor.commit()\n i.save()\n transaction.cursor.commit()\n if done:\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False", "def remove_item_in_storage(life, item_uid):\n\tif 'stored_in' in items.get_item_from_uid(item_uid):\n\t\titems.remove_item_from_any_storage(item_uid)\n\telse:\n\t\tprint 'incorrect: item not stored'\n\t\n\t#for _container in [items.get_item_from_uid(_container) for _container in life['inventory']]:\n\t#\tif not 'max_capacity' in _container:\n\t#\t\tcontinue\n\t#\n\t#\tif id in _container['storing']:\n\t#\t\t_container['storing'].remove(id)\n\t#\t\t_container['capacity'] -= get_inventory_item(life,id)['size']\n\t#\t\tlogging.debug('Removed item #%s from %s' % (id,_container['name']))\n\t#\t\t\n\t#\t\tupdate_container_capacity(_container['uid'])\n\t#\t\treturn _container\n\t\n\treturn False" ]
[ "0.6366809", "0.63568", "0.60818267", "0.6010963", "0.5990624", "0.5990624", "0.5898597", "0.58695376", "0.58585554", "0.5826926", "0.58192736", "0.58165", "0.57956797", "0.5775258", "0.574607", "0.56877345", "0.5676406", "0.5663184", "0.56496775", "0.563791", "0.5600733", "0.5576112", "0.55712277", "0.5571193", "0.5566177", "0.5554325", "0.5549153", "0.55463994", "0.5533877", "0.5498771" ]
0.696654
0
Purge stuff from the cache which is expired/oldest. Stuff is purged which is older than TTL or if the total number of entries is in excess of MAX_ASSOCS.
def purge(self): if not self.index: return now = time() while self.expiry[0].orig_expires <= now or len(self.index) > MAX_ASSOCS: self.remove_one() if not self.expiry: if not self.index: return self.rotate_lists() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _purge_expired_items():\n now = time.time()\n keys = data_table.keys()\n\n for key in keys:\n num_unique_vals = len(data_table[key])\n\n # We iterate through in reverse, because otherwise deleting an \n # entry will cause frivolous edge cases.\n iteration_scheme = range(num_unique_vals)\n iteration_scheme.reverse()\n\n # value format: (value variable, expiration time)\n for value in iteration_scheme:\n expiration_time = data_table[key][value][1]\n temp_value = data_table[key][value][0] # For logging purposese only.\n if now > expiration_time:\n # The entry is expired.\n del data_table[key][value]\n if len(data_table[key]) == 0:\n del data_table[key]\n if (verbose):\n logstring = str(\"Entry purged: \" + str(key) + \": \" + str(temp_value) + \"\\n\")\n _log_with_timestamp(logstring)\n\n return", "def purgeExpiredRecords(self):\n if hasattr(self, \"_test_time\"):\n now = self._test_time\n else:\n now = time.time()\n\n for indexType in self._cache:\n for key, (cachedTime, _ignore_record) in self._cache[indexType].items():\n if now - self._expireSeconds > cachedTime:\n del self._cache[indexType][key]", "def cache_clean(self):\n\t\tnow = time.time()\n\t\tkeys_for_removal = collections.deque()\n\t\tfor key, (_, expiration) in self.__cache.items():\n\t\t\tif expiration < now:\n\t\t\t\tkeys_for_removal.append(key)\n\t\tfor key in keys_for_removal:\n\t\t\tdel self.__cache[key]", "def _purge():\r\n _cache.clear()", "def clean(self):\n cutoff = int(time.time()) - int(self.__ttl)\n logging.info(\"Cleaning cache with cutoff time %d\" % cutoff)\n\n start_key = None\n while True:\n # Split in small transactions to avoid blocking other processes.\n with self.__env.begin(self.__metric_to_metadata_db, write=True) as txn:\n with txn.cursor() as cursor:\n if start_key is not None:\n if not cursor.set_range(self._encode(start_key)):\n break\n start_key = self._clean_some(txn, cursor, cutoff)\n if start_key is None:\n break", "def __cleanup(self, ttl_in_sec):\n ttl_in_ms = ttl_in_sec * 1000\n while True:\n logging.debug(\"cleanup action...\")\n current_ts = self.__current_timestamp_in_ms()\n self.lock.acquire()\n for key, value in self.orderedDict.items():\n if value[1] > current_ts - ttl_in_ms:\n break\n else:\n self.orderedDict.pop(key, None)\n self.lock.release()\n time.sleep(ttl_in_sec)", "def purge_expired (aging_hash, interval=aging_hash_interval):\n\n expired = []\n for k, v in aging_hash.items():\n set_time = v[0]\n if (time.time() - set_time) > aging_hash_interval:\n expired.append(k)\n for ex_k in expired:\n del aging_hash[ex_k]", "def purgeExpiredRequests( self ):\n cmd = \"DELETE FROM `ProxyDB_Requests` WHERE ExpirationTime < UTC_TIMESTAMP()\"\n return self._update( cmd )", "def _prune_cache(self):\n default_expiry = datetime.datetime.utcnow() - datetime.timedelta(minutes=self.cache_resources_for)\n for resource_id, resource in self.local_resource_status.items():\n if 'cache_until' in resource:\n if datetime.datetime.utcnow() > resource['cache_until']:\n self._delete_cache(resource_id)\n elif resource['last_accessed'] < default_expiry:\n self._delete_cache(resource_id)", "def purge_cache(self):\n\n self.local_store.purge_cache()", "def _remove_expired(self):\n with self.__lock:\n is_changed = False\n for k in list(self._d.keys()):\n if self._d[k].is_expired():\n log.debug(\"removing expired item: {}\".format(self._d[k]))\n del self[k]\n is_changed = True\n\n if (is_changed is True) and (self.is_persistent):\n # save changed cache file\n self.save()", "def clean_cache(self):\n timer = Timer()\n entries = []\n for file_in_cache in self.find_archives():\n cache_metadata = self.read_metadata(file_in_cache)\n last_accessed = cache_metadata.get('last-accessed', 0)\n entries.append((last_accessed, file_in_cache))\n to_remove = sorted(entries)[:-self.cache_limit]\n if to_remove:\n for last_used, file_in_cache in to_remove:\n logger.debug(\"Removing archive from cache: %s\", file_in_cache)\n metadata_file = self.get_metadata_file(file_in_cache)\n self.context.execute('rm', '-f', file_in_cache, metadata_file)\n logger.verbose(\"Took %s to remove %s from cache.\",\n timer, pluralize(len(to_remove), \"archive\"))\n else:\n logger.verbose(\"Wasted %s checking whether cache needs to be cleaned (it doesn't).\", timer)", "async def afterHoursAutoPurge(self, ctx: Context):", "def cache_expiration(self):\n\n\t\t# Iterate through servers\n\t\tfor serv in self.servers:\n\t\t\tserv.cache.hash_table.clear() # Erase the cache\n\t\t\tserv.cache.cur_size = 0 # Resets the number of items in the cache to 0", "def purgeExpiredProxies( self ):\n cmd = \"DELETE FROM `ProxyDB_Proxies` WHERE ExpirationTime < UTC_TIMESTAMP() and PersistentFlag = 'False'\"\n return self._update( cmd )", "def delete_expired(self):\n check_time = datetime.now()\n if self.can_expire and self.duration:\n exp_times = deepcopy(self.exp_times)\n for key in exp_times:\n if exp_times[key] < check_time:\n self.delete(key)", "def clean_local_cache(self):\n to_expire = []\n now = int(time())\n\n try:\n for k, (_, _, grace) in six.iteritems(self._local_cache):\n if now > grace:\n to_expire.append(k)\n except RuntimeError:\n # It's possible for the dictionary to be mutated in another thread\n # while iterating, but this case is rare, so instead of making a\n # copy and iterating that, it's more efficient to just let it fail\n # gracefully. It'll just get re-run later.\n return\n\n for k in to_expire:\n try:\n del self._local_cache[k]\n except KeyError:\n # This could only exist in a race condition\n # where another thread has already deleted this key,\n # but we'll guard ourselves against it Justin Case.\n pass", "def _purge_old(self):\n now = dt_util.utcnow()\n\n _LOGGER.debug(\n \"%s: purging records older then %s(%s)\",\n self.entity_id,\n dt_util.as_local(now - self._samples_max_age),\n self._samples_max_age,\n )\n\n while self.ages and (now - self.ages[0]) > self._samples_max_age:\n _LOGGER.debug(\n \"%s: purging record with datetime %s(%s)\",\n self.entity_id,\n dt_util.as_local(self.ages[0]),\n (now - self.ages[0]),\n )\n self.ages.popleft()\n self.states.popleft()", "def free_cache(self, tags=[]):\n unmemoize([ \"/entries/recent\", \"/entries/home\", \"/entries/archive\"])\n unmemoize([\"/entries/tag/%s\" % tag for tag in tags])", "def purgeLogs( self ):\n cmd = \"DELETE FROM `ProxyDB_Log` WHERE TIMESTAMPDIFF( SECOND, UTC_TIMESTAMP(), ExpirationTime ) > 15552000\"\n return self._update( cmd )", "def flush():\n for k in cache._thecache.keys():\n del cache._thecache[k]", "def clear_expired(self):\n raise NotImplementedError", "def _purge_stale_checkpoints(self):\n if len(self._checkpoint_files) > self.max_checkpoints:\n purge_files = self._checkpoint_files[: -self.max_checkpoints]\n self._checkpoint_files = self._checkpoint_files[-self.max_checkpoints:]\n for chk in purge_files:\n silent_try(chk.purge_values)", "def _clear_cache(self):\n keys = [\"nodes\", \"availability\", \"capacity\", \"cost\"]\n for key in keys:\n if key in self.__dict__:\n del self.__dict__[key]", "def do_expire(self):\n # Deep copy to avoid RuntimeError: dictionary changed size during iteration\n _timeouts = deepcopy(self.timeouts)\n for key, value in _timeouts.items():\n if value - self.clock.now() < timedelta(0):\n del self.timeouts[key]\n # removing the expired key\n if key in self.redis:\n self.redis.pop(key, None)", "def purge() -> None:\r\n _purge_func(False)", "def rem(self, key):\n if self.dexists('ttl', key):\n self.dpop('ttl', key)\n return super(MyCache, self).rem(key)", "def test_evict_expired(self):\n\n # use an invasive technique so that we don't have to sleep for\n # the item to expire\n\n bc = TimedCache(keep_time=1)\n\n bc[\"test\"] = \"value\"\n bc[\"test2\"] = \"value2\"\n self.assertEqual(len(bc), 2)\n\n # test that expired item i\n bc.cache[\"test\"].timestamp = bc.cache[\"test\"].timestamp - 2\n bc.purge_expired()\n self.assertEqual(len(bc), 1)\n self.assertFalse(\"test\" in bc)\n self.assertTrue(\"test2\" in bc)", "def purge(self):\n self.remaining = 0", "def purge(self):\n pass" ]
[ "0.7540263", "0.7274658", "0.7212392", "0.71395636", "0.6828041", "0.6794092", "0.6781577", "0.6743768", "0.6684985", "0.66247", "0.65925604", "0.6568118", "0.647569", "0.64700687", "0.64508235", "0.64348704", "0.64143634", "0.6377659", "0.62273985", "0.62217367", "0.622053", "0.62079906", "0.6199987", "0.61595064", "0.6143058", "0.61336535", "0.61312723", "0.60948366", "0.6085197", "0.6083554" ]
0.77597445
0
Get common letters of two words
def get_common_letters(word1: str, word2: str) -> str: common = '' for x, y in zip(word1, word2): if x == y: common += x return common
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_shared_prefix(word1: str, word2: str) -> str:\n shared_prefix = \"\"\n for char1, char2 in zip(word1, word2):\n if char1 == char2:\n shared_prefix += char1\n else:\n break\n return shared_prefix", "def shared_words(text1, text2):\r\n\r\n list1 = tokenize(text1.strip(' '))\r\n list2 = tokenize(text2.strip(' '))\r\n\r\n list3 = set(list1) & set(list2)\r\n list3.remove(' ');\r\n\r\n return list3", "def common_words(first, second):\n\n # Split the strings into lists of words\n first_words = first.split(',')\n second_words = second.split(',')\n\n duplicate_words = []\n\n # Check if there are duplicate words in the lists\n for item in first_words:\n if item in second_words:\n duplicate_words.append(item) # Create a list of the duplicate words\n\n result = ','.join(sorted(duplicate_words))\n\n if len(duplicate_words) == 0:\n print \"There are no common words in the two strings.\"\n\n return result", "def commonCharacterCount(s1, s2):\n return sum(min(s1.count(x),s2.count(x)) for x in set(s1))", "def get_alphanumeric_intersection(set1, set2):\n set1_clean = [x.upper() for x in set1]\n set2_clean = [x.upper() for x in set2]\n set1_clean = set([re.sub('[\\W_]', '', x) for x in set1_clean])\n set2_clean = set([re.sub('[\\W_]', '', x) for x in set2_clean])\n return set.intersection(set1_clean, set2_clean)", "def get_word_distance(word1: str, word2: str) -> bool:\n\n letters_different = 0\n for x, y in zip(word1, word2):\n if x != y:\n letters_different += 1\n if letters_different > 1:\n return False\n\n return True", "def common_words(filename):\n\n \"\"\"This function assumes that 'words' are strings of alphabetical characters, i.e. this function ignores punctuation\"\"\"\n\n return common_words_min(filename, 0)", "def word_difference(word1,word2):\n assert len(word1) == len(word2)\n\n count = 0 \n for c1, c2 in zip(word1, word2):\n if c1 != c2:\n count +=1\n return count", "def total_char_similarity(a,b):\n\ta_words, b_words = map(norm.set_clean_tokens, [a,b])\n\n\ttotal_score = 0\n\tfor ai in a_words:\n\t\tfor bi in b_words:\n\t\t\ttotal_score += similar(ai, bi)\n\treturn total_score", "def difference(word1: str, word2: str):\n\n count = 0\n for c1, c2 in zip(word1, word2):\n if c1 != c2:\n count += 1\n return count", "def __intersect(a, b):\n a = [elem.lower() for elem in a]\n b = [elem.lower() for elem in b]\n return list(set(a) & set(b))", "def __intersect(self, a, b):\n a = [elem.lower() for elem in a]\n b = [elem.lower() for elem in b]\n return list(set(a) & set(b))", "def get_common_words_count(arr1, arr2):\n return len(list(set(arr1).intersection(arr2)))", "def search4letters(phrase:str, letters:str) -> set:\n return set(letters).intersection(set(phrase))", "def one_away(w1, w2):\n\n if abs(len(w1) - len(w2) > 1):\n return False\n\n # i = 0\n # w1_d = {}\n # w2_d = {}\n\n # for i in w1:\n # w1_d[i] = w1.count(i)\n\n # for j in w2:\n # w2_d[j] = w2.count(j)\n\n # unmatched = set(w1_d.items())^set(w2_d.items())\n \n # if len(unmatched) > 2:\n # return False\n # return True\n \n if len(w2) > len(w1):\n w1, w2 = w2, w1\n\n # Keep track of number of wrong letters\n diff = 0\n\n # Loop over w1 with i and over w2 with j\n i = j = 0\n\n # while j < len(w2):\n\n # if w1[i] != w2[j]:\n\n # # We found a wrong letter\n # wrong += 1\n # # We'll move to the next char in the longer string.\n # i += 1\n # if wrong > 1:\n # return False\n\n # # If same length, move the next char in shorter.\n # # Otherwise, don't move in shorter string --- this\n # # will cover the case of a added letter.\n # if len(w1) == len(w2):\n # j += 1\n\n # else:\n # # Both letters match; move to next letter in both\n # i += 1\n # j += 1\n\n # return True\n\n # iterate over 1 word - shorter of the two, so there is no index out of range error\n # as i, j increments\n while j < len(w2):\n # if letter are different, add to diff variable\n if w1[i] != w2[j]:\n diff += 1\n # as soon as diff is more than 1, than it's fast fail\n if diff > 1:\n return False\n # two scenarios: if same length for both words, both go on check next \n # word\n if len(w1) == len(w2):\n i += 1\n j += 1\n \n else: #if one word is longer than the other, go on to next letter in \n # longer word, and see if it matches previous letter in shorter word\n # because this is a case where extra letter is added in the middle of long\n # word, but the rest should be the same as the shorter\n i += 1\n else:\n i += 1\n j += 1\n return True", "def containing(letter, text):\n return([word for word in text if word.count(letter) >= 1])", "def compare_words(word1, word2):\n word1 = word1.lower()\n word2 = word2.lower()\n seg_scores = []\n if len(word1) >= len(word2):\n for i in range(0, len(word1) - len(word2) + 1):\n seg_scores.append(find_difference(word1[i:i+len(word2)], word2))\n else:\n for i in range(0, len(word2) - len(word1) + 1):\n seg_scores.append(find_difference(word2[i:i+len(word1)], word1))\n return round(min(seg_scores) + abs(len(word1) - len(word2))/float(len(max([word1, word2]))),2)", "def search4letters(phrase:str, letters:str='aeyuio') -> set:\n letters_to_be_checked = set(letters)\n return letters_to_be_checked.intersection(set(phrase))", "def similar_strings(s1, s2):\n w1 = set(re.split(r'\\W+', s1))\n w2 = set(re.split(r'\\W+', s2))\n threshold = len(w1) // 2 + 1\n return len(w1 & w2) >= threshold", "def num_mismatched_letters(self, word1:str, word2:str)-> int:\n assert(len(word1) == len(word2))\n count = 0\n # -----------------------------------------\n # TODO: You need to write this method.\n\n\n # -----------------------------------------\n return count", "def word_match(w):\n matches = [word for word in common_words if len(word) == len(w)]\n \n # If the letter in w is upper then it is a decoded letter so that same letter must be in the same index in all matches\n # If the letter is lowercase then it is encrypted and can be mapped to any letter that is not already mapped to an encoded letter\n for i in range(len(w)):\n if (w[i]).isupper() == True:\n matches = [word for word in matches if word[i] == w[i]]\n else:\n matches = [word for word in matches if word[i] not in decoded_dict.values()]\n # Making a copy of the current matches so that I can iterate over them which removing items if the mapping isn't one to one\n matches_copy = [word for word in matches] \n map_dict = {}\n # I iterate through all the words in the matches list and then through all the letters in each match.\n # If it is the first time the letter appears in a word then the match is removed if that encoded letter is being sent to a letter that already has another encoded letter mapped to it.\n # If the letter has appeared in the word before then the word is removed if that encoded letter is not being mapped to the same letter as it was previously\n for match in matches_copy:\n map_dict.clear()\n for i in range(len(match)):\n if w[i] not in map_dict:\n if match[i] not in map_dict.values():\n map_dict[w[i]] = match[i]\n else:\n matches.remove(match)\n break\n else:\n if map_dict[w[i]] == match[i]:\n continue \n else: \n matches.remove(match)\n break \n return(matches)", "def wer(self, s1, s2):\n\n # build mapping of words to integers\n b = set(s1.split() + s2.split())\n word2char = dict(zip(b, range(len(b))))\n\n # map the words to a char array (Levenshtein packages only accepts\n # strings)\n w1 = [chr(word2char[w]) for w in s1.split()]\n w2 = [chr(word2char[w]) for w in s2.split()]\n\n return Lev.distance(''.join(w1), ''.join(w2))", "def commonWords(self):\n #utilize similar code used in stats.py\n exclude = set(('!', '.', '?'))\n freq = Stats()\n fullText = []\n #Parse email\n for x in range(self.getSCount()):\n #Simplify email into string of words separated by single space\n sString = self[x].lower()\n sString = ''.join(char for char in sString if char not in exclude)\n sString = sString.split()\n fullText = fullText + sString\n\n #Call findFreqDic() to find frequencies of words\n freqDict = freq.findFreqDic(fullText)\n\n #Analyze 10 words\n numTopic = 10\n \n #Find most and least common calling topNSort and bottomNSort\n mostCommon = freq.topNSort(freqDict, numTopic)\n leastCommon = freq.bottomNSort(freqDict, numTopic)\n \n most = list(mostCommon.keys())\n least = list(leastCommon.keys())\n \n return most, least", "def test_strings_common_symbols():\n\n common_result = strings_ops.strings_common_symbols(\"hi\", \"hello\")\n assert common_result == \"h\"", "def common(s1, s2):\n cl = commonlen(s1, s2)\n return s2[:cl]", "def word_overlap2(sentence_a, sentence_b):\n a_set = set(word for word in sentence_a) - config.stop_list\n b_set = set(word for word in sentence_b) - config.stop_list\n score = len(a_set&b_set)/float(len(a_set|b_set))# len(s1&s2)/max(len(s1),len(s2))\n\n return score", "def search4letters(phrase: str, letters: str = 'aeiou') -> set:\n return set(letters).intersection(set(phrase))", "def meet(self, word, sense1=0, sense2=0):\n s1 = self._synset(self.text, sense1)\n s2 = self._synset(word, sense2)\n\n common = s1.lowest_common_hypernyms(s2)\n\n result = list()\n for c in common:\n result.append(c.name()[:5])\n\n return result if result else []", "def intersect(list1, list2):\n result = []\n \n for word in list1:\n if word in list2:\n result.append(word)\n return result", "def compare_words(self, word1, word2):\n return Counter(word1) == Counter(word2)" ]
[ "0.7009229", "0.6888572", "0.68849295", "0.6847381", "0.68469286", "0.6775948", "0.67034924", "0.6648536", "0.6633855", "0.6592176", "0.6583248", "0.6566062", "0.6548944", "0.6540705", "0.6511979", "0.6449228", "0.644169", "0.64145076", "0.6395596", "0.637349", "0.63393426", "0.6295036", "0.6291746", "0.6275316", "0.6261457", "0.6242721", "0.62282354", "0.6215778", "0.6208248", "0.6204232" ]
0.88774985
0
Tests the filtering for a plan by its metal level to only match silver level plans.
def test_filtering_plans_by_metal_level_matches_only_silver(self): silver_plan_inputs = [ { 'plan_id': '05276NA2900195', 'state': 'MI', 'metal_level': 'Silver', 'rate': '283.39', 'rate_area': '1' }, { 'plan_id': '05276NA2900195', 'state': 'MI', 'metal_level': 'silver', 'rate': '283.39', 'rate_area': '1' } ] non_silver_plan_inputs = [ { 'plan_id': '68493CI1477769', 'state': 'SC', 'metal_level': 'Bronze', 'rate': '214.57', 'rate_area': '21' }, { 'plan_id': '09812TP4606635', 'state': 'NV', 'metal_level': 'Platinum', 'rate': '331.363599', 'rate_area': '1' }, { 'plan_id': '11698OD6718414', 'state': 'SC', 'metal_level': 'Gold', 'rate': '269.54', 'rate_area': '8' }, { 'plan_id': '70547DK6596753', 'state': 'FL', 'metal_level': 'Catastrophic', 'rate': '241.1', 'rate_area': '57' } ] for silver_plan in silver_plan_inputs: result = filter_plan_metal_level(silver_plan, DESIRED_METAL_LEVEL) self.assertEqual(True, result) for non_silver_plan in non_silver_plan_inputs: result = filter_plan_metal_level( non_silver_plan, DESIRED_METAL_LEVEL ) self.assertEqual(False, result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_instrument_inventory_filtering():\n filt = 'GR150R'\n data = mm.instrument_inventory('niriss',\n add_filters={'filter': filt},\n return_data=True)\n\n filters = [row['filter'] for row in data['data']]\n\n assert all([i == filt for i in filters])", "def _check_filters(self, level):\n if(self.filters == Filters.NoFilter):\n return True\n else:\n return (self.filters & level.filters == 0)", "def pollinated_filter(tile):\n current_sprite = tile.contains_sprite\n if current_sprite and current_sprite.type == \"plant\" and current_sprite.is_pollinated:\n return False\n else:\n return True", "def filter_cap(stock):\n return stock['Class'] == 'Small'", "def test_vs_filtering():\n vs = virtualscreening(n_cpu=-1)\n\n vs.load_ligands('sdf', os.path.join(test_data_dir, 'data/dude/xiap/actives_docked.sdf'))\n vs.apply_filter('ro5', soft_fail=1)\n assert_equal(len(list(vs.fetch())), 49)\n\n vs.load_ligands('sdf', os.path.join(test_data_dir, 'data/dude/xiap/actives_docked.sdf'))\n vs.apply_filter('ro3', soft_fail=2)\n assert_equal(len(list(vs.fetch())), 9)", "def test_get_rate_plan_by_product(self):\n pass", "def _target_filter(self, obj):\r\n return type(obj).__name__ in ['Cube'] and not obj.is_grasped # List because may be extended to other objects.\r", "def test_get_rate_plan_by_product_and_rate_plan(self):\n pass", "def filter_out_reduced_healing(raw_heals):\n max_heal = max(raw_heals)\n threshold = 0.75 * max_heal\n\n selector = raw_heals > threshold\n\n return raw_heals[selector]", "def filterLevelSlot(self, level, shown):\r\n\r\n if shown:\r\n self.model.removeFilter(level)\r\n else:\r\n self.model.addFilter(level)", "def filter_criteria_met(current_gre, current_gpa, current_toefl):\r\n\r\n if int(current_gre) < global_constants['MINIMUM_GRE']:\r\n return False\r\n if float(current_gpa) < global_constants['MINIMUM_GPA']:\r\n return False\r\n if int(current_toefl) < global_constants['MINIMUM_TOEFL']:\r\n return False\r\n return True", "def test_stealable(self):\r\n prod = Product(name='Test Product',\r\n weight=100, price=1,\r\n flammability=0.5)\r\n self.assertEqual(prod.stealability(), \"Not so stealable...\")", "def test_6_walls(self):\n grid_S = MAPPGridState.create_from_string(\n [\"#.#0###\",\n \"#.#.###\",\n \".......\",\n \"###.#.#\",\n \"###.#1#\"])\n \n grid_G = MAPPGridState.create_from_string(\n [\"#.#1###\",\n \"#.#.###\",\n \".......\",\n \"###.#.#\",\n \"###0#.#\"])\n plan = astar(grid_S,\n lambda s : s == grid_G,\n MAPPDistanceSum(grid_G))\n self.assertEqual(10,sum(a.cost for a in plan))", "def test_filter(self):\n self.client.ensure_path(\"/services/db/1.1.1.1\")\n self.client.ensure_path(\"/services/db/2.2.2.2\")\n self.client.ensure_path(\"/services/db/3.3.3.3\")\n self.client.ensure_path(\"/services/db/4.4.4.4\")\n self.client.set(\"/services/db/1.1.1.1\",\n json.dumps({\"enabled\": \"0\", \"weight\": \"20\"}))\n self.client.set(\"/services/db/2.2.2.2\",\n json.dumps({\"enabled\": \"1\", \"weight\": \"20\"}))\n self.client.set(\"/services/db/3.3.3.3\",\n json.dumps({\"enabled\": \"1\", \"weight\": \"10\"}))\n self.client.set(\"/services/db/4.4.4.4\",\n json.dumps({\"enabled\": \"1\", \"weight\": \"30\"}))\n z = ZkFarmExporter(self.client, \"/services/db\", self.conf,\n filter_handler=create_filter(\"enabled=1,weight>15\"))\n z.loop(2, timeout=self.TIMEOUT)\n self.conf.write.assert_called_with({\"2.2.2.2\": {\"enabled\": \"1\", \"weight\": \"20\"},\n \"4.4.4.4\": {\"enabled\": \"1\", \"weight\": \"30\"}})", "def test_filters(snaptype):\n filename = DIR / snaptype.filename\n snap = plonk.load_snap(filename)\n\n xwidth, ywidth, zwidth = 10 * AU, 10 * AU, 10 * AU\n height = 10 * AU\n radius = 100 * AU\n radius_min = 10 * AU\n radius_max = 20 * AU\n\n filters.annulus(\n snap=snap, radius_min=radius_min, radius_max=radius_max, height=height\n )\n filters.box(snap=snap, xwidth=xwidth, ywidth=ywidth, zwidth=zwidth)\n filters.cylinder(snap=snap, radius=radius, height=height)\n filters.shell(snap=snap, radius_min=radius_min, radius_max=radius_max)\n filters.sphere(snap=snap, radius=radius)\n\n snap.close_file()", "def test_brainvision_data_filters():\n with warnings.catch_warnings(record=True) as w: # event parsing\n raw = _test_raw_reader(\n read_raw_brainvision, vhdr_fname=vhdr_highpass_path,\n montage=montage, eog=eog)\n assert_true(all('parse triggers that' in str(ww.message) for ww in w))\n\n assert_equal(raw.info['highpass'], 0.1)\n assert_equal(raw.info['lowpass'], 250.)", "def _is_filter_match(self, arb_id):\n if not self.sw_filters:\n # Filtering done on HW or driver level or no filtering\n return True\n for can_filter in self.sw_filters:\n if not (arb_id ^ can_filter['can_id']) & can_filter['can_mask']:\n return True\n return False", "def passes_cutoff(self, filter_code):\r\n try:\r\n filterset_dict = {\"all_positions\":[True],\r\n \"all_variants\":[self.is_variant == True],\r\n \"actionable_variants\":[self.is_variant == True, \r\n self.in_blacklist == \"WHITE\", \r\n \"exon\" in self.loc, # and \"exonic_nc\" not in self.loc, \r\n \"syn\" not in self.func, \r\n \"ref\" not in self.func, \r\n self.ir_version == \"14\" or int(self.FAO)>50,\r\n int(self.FRO)+int(self.FAO)>500, \r\n self.FR == \".\"],\r\n \r\n \r\n \"indels\":[self.is_variant == True, self.type == \"del\" or self.type == \"in\" , \"exon\" in self.loc]\r\n }\r\n return all(filterset_dict[filter_code])\r\n \r\n except:\r\n return False", "def test_by_statement_mininimal_data(minimal_mockdata, qfilter):\n res = qfilter.filter(minimal_mockdata, st='st1')\n assert len(res) == 1\n res = qfilter.filter(minimal_mockdata, st='xxx2')\n assert not res == 0", "def testUsingFilterTool(self):\n pass", "def test_get_small_and_light_eligibility_by_seller_sku(self):\n pass", "def filter_listings(listing):\n\n MIN_PRICE = program_features.MIN_PRICE\n MAX_PRICE = program_features.MAX_PRICE\n\n MAX_DISTANCE = program_features.MAX_DISTANCE\n\n MIN_NUM_ROOMS = program_features.MIN_NUM_ROOMS\n MAX_NUM_ROOMS = program_features.MAX_NUM_ROOMS\n\n if (float(listing.price[1:4]) < MIN_PRICE) or (float(listing.price[1:4]) > MAX_PRICE):\n print (\"Listing price = {}\".format(listing.price[1:4], ))\n return False\n elif (float(listing.distance) > MAX_DISTANCE):\n print (\"Listing distance = {}\".format(float(listing.distance)))\n return False\n elif (int(listing.rooms) < MIN_NUM_ROOMS) or (int(listing.rooms) > MAX_NUM_ROOMS) :\n print (\"Listing rooms = {}\".format(int(listing.rooms)))\n return False\n else:\n return True", "def test_stealability(self):\n prod = Product('Test Product', price=100, weight=1)\n self.assertEqual(prod.stealability(), \"Very stealable!\")", "def prepare_filter_params(context, plan_name=None, **kw):\n from debra.models import Influencer\n from debra import logical_categories\n from django.core.cache import get_cache\n cache = get_cache('memcached')\n params = None #cache.get('filter_params')\n if not params:\n # influencers = Influencer.objects.filter(\n # show_on_search=True).exclude(blacklisted=True)\n # influencers = influencers.filter(\n # score_popularity_overall__isnull=False)\n # influencers = influencers.distinct()\n popularity = [\n {\n \"title\": \"Small\",\n },\n {\n \"title\": \"Medium\",\n },\n {\n \"title\": \"Large\",\n }\n ]\n engagement = [\n {\n \"title\": \"0-20\",\n },\n {\n \"title\": \"21-40\",\n },\n {\n \"title\": \"41-60\",\n },\n {\n \"title\": \"61-80\",\n },\n {\n \"title\": \"81+\",\n },\n ]\n\n price_ranges = [\n {\n \"title\": \"Cheap\",\n \"text\": \"Primarily In-expensive\"\n },\n # {\n # \"title\": \"Mid-level\",\n # },\n { \n \"title\": \"Expensive\",\n \"text\": \"Primarily High-end\"\n }\n ]\n\n genders = [\n {\n \"title\": \"Female\",\n },\n {\n \"title\": \"Male\",\n },\n ]\n\n social = [\n {\n \"value\": \"Facebook\",\n \"icon\": \"icon-social_facebook\"\n },\n {\n \"value\": \"Pinterest\",\n \"icon\": \"icon-social_pinterest2\"\n },\n {\n \"value\": \"Twitter\",\n \"icon\": \"icon-social_twitter\"\n },\n {\n \"value\": \"Instagram\",\n \"icon\": \"icon-social_instagram2\"\n },\n {\n \"value\": \"Youtube\",\n \"icon\": \"icon-social_youtube\"\n },\n ]\n\n age_groups = [\n {\n \"value\": \"0_19\",\n \"icon\": \"0 - 19\"\n },\n {\n \"value\": \"20_24\",\n \"icon\": \"20 - 24\"\n },\n {\n \"value\": \"25_29\",\n \"icon\": \"25 - 29\"\n },\n {\n \"value\": \"30_34\",\n \"icon\": \"30 - 34\"\n },\n {\n \"value\": \"35_39\",\n \"icon\": \"35 - 39\"\n },\n {\n \"value\": \"40\",\n \"icon\": \"40+\",\n }\n ]\n\n activity = [{\"value\": \"Blog\", \"icon\": \"icon icon-letter_quotes2\"}] + social\n\n categories = []\n\n brands = []\n\n locations = redis_cache.get('toplocs') or []\n # locations = Influencer.get_locations_list(num_results=200)\n # locations = Influencer.get_locations_list(num_results=None)\n\n tags = kw.get('tags', [])\n\n source = [{\"title\": \"Signup\", \"value\": \"blogger_signup\"}]\n\n params = {\n 'show_filters': True,\n 'popularity': list(popularity),\n 'engagement': list(engagement),\n 'categories': list(categories),\n 'brands': list(brands),\n 'priceranges': list(price_ranges),\n 'locations': list(locations),\n 'genders': list(genders),\n 'social': list(social),\n 'activity': list(activity),\n 'tags': list(tags),\n 'source': list(source),\n 'age_groups': list(age_groups),\n 'enabled_filters': [\n \"popularity\", \"engagement\", \"categories\", \"brands\",\n \"priceranges\", \"location\", \"genders\", \"socials\", \"activity\",\n \"tags\", \"likes\", \"shares\", \"comments\", \"source\", \"avgAge\",\n \"customCategories\", \"customOccupation\", \"customSex\", \"customEthnicity\",\n \"customTags\", \"customLanguage\", \"customAgeRange\",]\n }\n cache.set('filter_params', params)\n\n for loc in params.get('locations', []):\n loc['value'] = loc['title']\n\n if True: #settings.DEBUG:\n params['categories'] = [{\"title\": \"Fashion\", \"category\": \"fashion\"},\n {\"title\": \"Food\", \"category\": \"food\"},\n {\"title\": \"Kids\", \"category\": \"kids\"},\n {\"title\": \"Beauty\", \"category\": \"beauty\"},\n {\"title\": \"Travel\", \"category\": \"travel\"}]\n else:\n params['categories'] = []\n \n return params", "def test_7_medium(self):\n grid_S = MAPPGridState.create_from_string(\n [\"...#.........\",\n \"...#.........\",\n \"...#.........\",\n \"...########..\",\n \"..12......34.\",\n \"...###..###..\",\n \"...######....\",\n \"........#....\",\n \"........#....\"])\n \n grid_G = MAPPGridState.create_from_string(\n [\"...#.........\",\n \"...#.........\",\n \"...#.........\",\n \"...########..\",\n \"..34......21.\",\n \"...###..###..\",\n \"...######....\",\n \"........#....\",\n \"........#....\"])\n plan = astar(grid_S,\n lambda s : s == grid_G,\n MAPPDistanceSum(grid_G))\n self.assertEqual(36,sum(a.cost for a in plan))", "def cost_filter_press(blk):\n t0 = blk.flowsheet().time.first()\n # Add cost variable and constraint\n blk.capital_cost = pyo.Var(\n initialize=1,\n units=blk.config.flowsheet_costing_block.base_currency,\n bounds=(0, None),\n doc=\"Capital cost of unit operation\",\n )\n\n Q = pyo.units.convert(\n blk.unit_model.properties_in[t0].flow_vol,\n to_units=pyo.units.gal / pyo.units.hr,\n )\n\n # Get parameter dict from database\n parameter_dict = blk.unit_model.config.database.get_unit_operation_parameters(\n blk.unit_model._tech_type, subtype=blk.unit_model.config.process_subtype\n )\n\n # Get costing parameter sub-block for this technology\n A, B = blk.unit_model._get_tech_parameters(\n blk,\n parameter_dict,\n blk.unit_model.config.process_subtype,\n [\"capital_a_parameter\", \"capital_b_parameter\"],\n )\n\n # Determine if a costing factor is required\n factor = parameter_dict[\"capital_cost\"][\"cost_factor\"]\n\n expr = pyo.units.convert(\n A * Q + B, to_units=blk.config.flowsheet_costing_block.base_currency\n )\n\n blk.capital_cost_constraint = pyo.Constraint(expr=blk.capital_cost == expr)\n\n # Register flows\n blk.config.flowsheet_costing_block.cost_flow(\n blk.unit_model.electricity[t0], \"electricity\"\n )", "def plans():", "def test_allow_filtering(self):\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v2 AS SELECT * FROM t \"\n \"WHERE v2 IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v2, id)\"))\n\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i))\n\n for i in range(1000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {v}\".format(v=i), [i, i, 'a', 3.0])\n\n rows = list(session.execute(\"SELECT * FROM t_by_v2 WHERE v2 = 'a'\"))\n assert len(rows) == 1000, \"Expected 1000 rows but got {}\".format(len(rows))\n\n assert_invalid(session, \"SELECT * FROM t_by_v WHERE v = 1 AND v2 = 'a'\")\n assert_invalid(session, \"SELECT * FROM t_by_v2 WHERE v2 = 'a' AND v = 1\")\n\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {} AND v3 = 3.0 ALLOW FILTERING\".format(i),\n [i, i, 'a', 3.0]\n )\n assert_one(\n session,\n \"SELECT * FROM t_by_v2 WHERE v2 = 'a' AND v = {} ALLOW FILTERING\".format(i),\n ['a', i, i, 3.0]\n )", "def set_silver(face):\n \n ambient = [ 0.19225, 0.19225, 0.19225, 1.0 ]\n diffuse = [ 0.50754, 0.50754, 0.50754, 1.0 ]\n specular = [ 0.508273, 0.508273, 0.508273, 1.0 ]\n shininess = 10\n glMaterialfv(face, GL_AMBIENT, ambient);\n glMaterialfv(face, GL_DIFFUSE, diffuse);\n glMaterialfv(face, GL_SPECULAR, specular);\n glMaterialf(face, GL_SHININESS, shininess);", "def test_list_flavors_detailed_filter_by_min_ram(self):\n response = self.flavors_client.list_flavors_with_detail()\n flavors = response.entity\n\n # Sort the flavors by RAM in ascending order\n flavors.sort(key=lambda k: int(k.ram))\n\n # Remove any flavors from the list that are smaller than the\n # flavor with the second smallest RAM size\n filter_criteria = lambda x: int(x.ram) >= int(flavors[1].ram)\n expected_flavors = filter(filter_criteria, flavors)\n\n response = self.flavors_client.list_flavors_with_detail(\n min_ram=flavors[1].ram)\n actual_flavors = response.entity\n actual_flavors.sort(key=lambda k: k.id)\n expected_flavors.sort(key=lambda k: k.id)\n self.assertEqual(actual_flavors, expected_flavors)" ]
[ "0.537246", "0.53307503", "0.529262", "0.51645184", "0.51426303", "0.50996315", "0.5073268", "0.5032784", "0.50168383", "0.49994883", "0.485873", "0.48476052", "0.48407164", "0.4840311", "0.48165542", "0.48065767", "0.47913322", "0.47656873", "0.47651768", "0.47596028", "0.47466302", "0.47440743", "0.473864", "0.47285238", "0.47133106", "0.46969554", "0.46966305", "0.46915746", "0.46789744", "0.4678774" ]
0.84512395
0
Test that the zipcode data is cleaned properly and contains only unique rate areas.
def test_clean_zipcode_data_is_unique(self): input = { '11111': [('NY', '5')], '22222': [('WI', '2')], '33333': [('WI', '2'), ('NY', '5')], '44444': [('WI', '2'), ('WI', '2')], '55555': [('WI', '2'), ('WI', '2'), ('NY', '5')], '66666': [('WI', '2'), ('WI', '2'), ('NY', '5'), ('NY', '5')], '77777': [ ('WI', '2'), ('WI', '2'), ('NY', '5'), ('NY', '5'), ('CA', '7') ] } expected = { '11111': [('NY', '5')], '22222': [('WI', '2')], '33333': [('WI', '2'), ('NY', '5')], '44444': [('WI', '2')], '55555': [('WI', '2'), ('NY', '5')], '66666': [('WI', '2'), ('NY', '5')], '77777': [('WI', '2'), ('NY', '5'), ('CA', '7')] } cleaned_rate_areas = clean_zipcode_rate_areas(input) # Compare each set of rate areas for every zipcode; sort the values to # make sure we're comparing the data correctly. for zipcode, rate_areas in cleaned_rate_areas.items(): self.assertEqual(sorted(rate_areas), sorted(expected[zipcode]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_only_five_digit_zipcodes_match(self):\n\n incorrect_zipcodes = ['1', 'abcdef', '123ab', '12345-6789', 'abc-def']\n non_string_zipcodes = [1, [123, 143], {'test': '123'}, 344.234, True]\n cleaned_zipcode_data_input = {'11111': [('NY', '5')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n for incorrect_zipcode in incorrect_zipcodes:\n slcsp_rate = retrieve_slcsp_for_zipcode(\n incorrect_zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)\n\n for non_string_zipcode in non_string_zipcodes:\n slcsp_rate = retrieve_slcsp_for_zipcode(\n non_string_zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def test_empty_string_returned_if_too_many_plan_areas_exist(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': [('WI', '9'), ('NY', '5')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def test_clean_plan_rates_sorts_and_makes_data_unique(self):\n\n input = {\n ('IN', '1'): [\n '304.5',\n '422.28',\n '386.79',\n '382.7',\n '332.21',\n '422.28',\n '382.7'\n ],\n ('SD', '2'): [\n '279.4',\n '250.14',\n '270.13',\n '274.56',\n '247.67',\n '279.4',\n '270.13'\n ],\n ('FL', '63'): [\n '398.14',\n '330.9',\n '324.61',\n '398.14',\n '345.91',\n '214.32',\n '330.9'\n ],\n ('FL', '54'): [\n '428.03',\n '294.87',\n '339.6',\n '409.72',\n '294.44'\n ]\n }\n\n expected = {\n ('IN', '1'): [\n '304.5',\n '332.21',\n '382.7',\n '386.79',\n '422.28'\n ],\n ('SD', '2'): [\n '247.67',\n '250.14',\n '270.13',\n '274.56',\n '279.4'\n ],\n ('FL', '63'): [\n '214.32',\n '324.61',\n '330.9',\n '345.91',\n '398.14'\n ],\n ('FL', '54'): [\n '294.44',\n '294.87',\n '339.6',\n '409.72',\n '428.03'\n ]\n }\n\n cleaned_plan_data = clean_plan_rates(input)\n self.assertEqual(expected, cleaned_plan_data)", "def test_zipcode_is_successfully_mapped(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': [('NY', '5')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = '294.87'\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def test_empty_string_returned_if_no_plan_areas_exist(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': []}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def correct_zipcode(business_tag, zipcode):\n try:\n address = business_tag.find('div', {'class': 'secondary-attributes'}).find('address').text\n zipcode_found = re.search(re.compile('(^|[^\\d])\\d{5}($|[^\\d])'), address).group(0)\n zipcode_found = re.search(re.compile('\\d{5}'), zipcode_found).group(0)\n return zipcode_found == zipcode\n except:\n return False", "def checkPostalCode(self, code, country):\n if country == 'US':\n USZipCodeField().clean(code)", "def test_no_rate_found_is_empty_string(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'22222': [('NH', '12')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def test_zip_state_requirements(self):\n form_data = self.form_data(clear=['billing_state'])\n form = DonationPaymentForm(data=form_data)\n self.assertFalse(form.is_valid())\n\n form_data = self.form_data(billing_state='')\n form = DonationPaymentForm(data=form_data)\n self.assertFalse(form.is_valid())\n\n form_data = self.form_data(clear=['billing_zip'])\n form = DonationPaymentForm(data=form_data)\n self.assertFalse(form.is_valid())\n\n form_data = self.form_data(clear=['billing_state', 'billing_zip'])\n form_data['country'] = 'CAN'\n form = DonationPaymentForm(data=form_data)\n self.assertTrue(form.is_valid())", "def test_addr_zip_good_values(self):\n for input_val, output_val in self.known_values:\n self.line._parse_addr_zip(input_val)\n self.assertEqual(output_val, self.line.addr_zip)", "def test_geography_area(self):\n # SELECT ST_Area(poly) FROM geogapp_zipcode WHERE code='77002';\n z = Zipcode.objects.annotate(area=Area(\"poly\")).get(code=\"77002\")\n # Round to the nearest thousand as possible values (depending on\n # the database and geolib) include 5439084, 5439100, 5439101.\n rounded_value = z.area.sq_m\n rounded_value -= z.area.sq_m % 1000\n self.assertEqual(rounded_value, 5439000)", "def test_postal_code(self):\n self.assertIsInstance(self.address.postal_code, str)\n self.assertEqual(self.address.postal_code, \"75000\")", "def valid_zipcode(line):\n zipcode = line.o_zip_code\n invalid_zip = len(zipcode) not in [5, 9] and zipcode.isdigit()\n if invalid_zip:\n rule = 'Zipcode length'\n new_row = Error(e_name=rule, order_key=line.primary_key)\n line.errors.append(new_row)\n return False\n return True", "def test_correct_data_under_boundaries(self):\n load_to_datastore(self.places_sofia, self.metadata_sofia)\n\n area_name = self.metadata_sofia.area_name\n\n # ensure only the boundary for the correct single area is loaded\n redis_boundaries_keys = r.keys(\"*%s*\" % cities_boundaries_template_key) # returns a list\n self.assertEqual(set([cities_boundaries_template_key + area_name]), set(redis_boundaries_keys))\n\n CommonAssertions.check_correct_boundaries_for_area(tester=self, metadata=self.metadata_sofia)", "def get_clean_data(path = 'ucr_offenses_known_monthly_1960_2016_dta/', \n identifier_variables = ['fips_state_county_code', 'state', 'date', 'year', 'zip_code', 'month'], \n crime_category = ['act_aggravated_assault', 'act_simple_assault', 'act_murder', 'act_robbery_total', \n 'act_manslaughter', 'act_theft_total', 'act_mtr_vhc_theft_total', 'act_burglary_total', 'act_rape_total'], \n start_year = 1980, end_year = 2009, selected_area = 'all'):\n all_df = []\n for i in get_filenames(start_year, end_year):\n file = path + i\n print(file)\n each_df = pd.read_stata(file)\n each_df = each_df[identifier_variables + crime_category]\n each_df = each_df[each_df['fips_state_county_code'] == '06001']\n each_df['zipcode'] = each_df['zip_code'].apply(lambda x: str(x)[0:5])\n #split Alameda into West and East Alameda according to zip code\n if selected_area == 'east':\n each_df = each_df[(each_df['zipcode'] == '94550') | (each_df['zipcode'] == '94566') | \n (each_df['zipcode'] == '94586') | (each_df['zipcode'] == '94568') | \n (each_df['zipcode'] == '94588') | (each_df['zipcode'] == '94551')]\n elif selected_area == 'west':\n each_df = each_df[(each_df['zipcode'] != '94550') & (each_df['zipcode'] != '94566') & \n (each_df['zipcode'] != '94586') & (each_df['zipcode'] != '94568') & \n (each_df['zipcode'] != '94588') & (each_df['zipcode'] != '94551') &\n (each_df['zipcode'] != '0') & (each_df['zipcode'] != '0.0') & \n (each_df['zipcode'] != 'not r') & (each_df['zipcode'] != 'missi')]\n each_df.loc[:, 'YearMonth'] = [int(re.sub('-', '', date)[0:6]) for date in each_df.loc[:, 'date']]\n #sum up amount of crimes taken place in each category for each month\n each_df = each_df.groupby(['YearMonth'])[crime_category].sum()\n each_df['crime_sum'] = each_df.sum(axis = 1)\n each_df = each_df['crime_sum'].reset_index()\n all_df.append(each_df)\n df = pd.concat(all_df).fillna(0)\n df = df.sort_values('YearMonth').reset_index()\n #split variable 'YearMonth\" into two variables 'year' and \"month' for Poission regression\n del df['index']\n df['year'] = df['YearMonth'].apply(lambda x: str(x)[:4])\n df['month'] = df['YearMonth'].apply(lambda x: str(x)[4:])\n if selected_area == 'east':\n df.to_csv('east_alameda_crime.csv')\n elif selected_area == 'west':\n df.to_csv('west_alameda_crime.csv')\n else:\n df.to_csv('all_alameda_crime.csv')\n return(df)", "def test_normalize_missing_city_state_and_postal_code(self) -> None:\n try:\n address_missing_required_fields()\n except ValidationError as err:\n assert err.request_id is None\n assert err.source is ErrorSource.SHIPENGINE.value\n assert err.error_type is ErrorType.VALIDATION.value\n assert err.error_code is ErrorCode.FIELD_VALUE_REQUIRED.value\n assert (\n err.message\n == \"Invalid address. Either the postal code or the city/locality and state/province must be specified.\" # noqa\n )", "def valid_zip_sum(line):\n zipcode = line.o_zip_code\n if not sum(int(x) for x in zipcode) <= 20:\n rule = 'Zipcode sum'\n new_row = Error(e_name=rule, order_key=line.primary_key)\n line.errors.append(new_row)\n return False\n return True", "def test_get_polygons_ignore_invalid(self):\n path = os.path.join(BASE_DIR, \"tests\", \"fixtures\", \"kenya.zip\")\n zip_file = zipfile.ZipFile(path)\n shapefile = get_shapefile(zip_file)\n\n with TemporaryDirectory() as temp_dir:\n tpath = temp_dir\n # Extract all files to Temporary Directory\n zip_file.extractall(tpath)\n # concatenate Shapefile path\n shp_path = os.path.join(tpath, shapefile)\n # Make the shapefile a DataSource\n data_source = DataSource(shp_path)\n layer = data_source[0]\n # Get geoms for all Polygons in Datasource\n geom_object_list = layer.get_geoms()\n polygons = get_polygons(geom_object_list)\n\n # check that we get the expected number of Polygons\n self.assertEqual(379, len(polygons))\n\n for item in polygons:\n self.assertTrue(isinstance(item, Polygon))", "def zipcode_validation(add):\r\n lng=get_address(add)[1]\r\n lat=get_address(add)[0]\r\n engine = get_sql_engine()\r\n query = text(\r\n \"\"\"\r\n SELECT\r\n code\r\n FROM philly_zipcode\r\n WHERE ST_Intersects(geom, ST_SetSRID(ST_MakePoint(:lng, :lat), 4326))\r\n \"\"\"\r\n )\r\n resp = engine.execute(query,lng=lng, lat=lat).fetchall()\r\n return resp", "def test_mappings():\n\n data = {\n 'succeeded': [],\n 'failed': [],\n 'partial': []\n }\n\n zipcodes = get_postcodes()\n LOGGER.info('analyzing %d zip codes', len(zipcodes))\n\n session = requests.Session()\n for code in zipcodes:\n try:\n response = session.get('http://localhost:10847/zipcode/' + str(code))\n response.raise_for_status()\n\n payload = response.json()['data']\n if 'economic_region' not in payload:\n data['partial'].append(code)\n else:\n data['succeeded'].append(code)\n except requests.HTTPError:\n LOGGER.exception('unable to validate zip code')\n data['failed'].append(code)\n\n with open('./results.json', 'w') as f:\n json.dump(data, f)", "def test_empty_string_returned_if_too_few_plans_are_found(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': [('WI', '9')]}\n cleaned_plan_data_input = {('WI', '9'): []}\n cleaned_plan_data_input = {('WI', '9'): ['324.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def test_addr_zip_bad_values(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_addr_zip(val))", "def test_regions(self):\n for i, item in enumerate(self._letters_proto.item):\n for code in item.region:\n # Region codes should be alpha-2 (where possible) or alpha-3 codes as\n # defined by ISO 3166 standard.\n self.assertLess(1, len(code))\n self.assertGreater(4, len(code))\n self.assertTrue(code.isupper(), f'Letter {i}: Region code `{code}` '\n 'should be upper-case')\n if len(code) == 3:\n country = pycountry.countries.get(alpha_3=code)\n self.assertTrue(country, f'Failed to find country for code `{code}`')\n if hasattr(country, 'alpha_2'):\n self.fail(f'Letter {i}: Please use two-letter code '\n f'`{country.alpha_2}` instead of `{country.alpha_3}` '\n f'for {country.name}')\n else:\n country = pycountry.countries.get(alpha_2=code)\n self.assertTrue(country, f'Failed to find country for code {code}')", "def test_make_compatible_taxa_summaries_sample_id_map_incomplete_map(self):\r\n self.assertRaises(ValueError, _make_compatible_taxa_summaries,\r\n self.taxa_summary3, self.taxa_summary4, self.sample_id_map3)", "def CheckZipCode(zipcode):\n # see if there are enough digits\n if (len(zipcode) >= 5):\n # check if numerical\n try:\n int(zipcode)\n return True\n except:\n return False\n else:\n return False", "def test_normalize_alpha_postal_code(self) -> None:\n address = valid_canadian_address()\n normalized = normalize_an_address(address)\n\n valid_address_assertions(\n test_method=self.TEST_METHOD,\n locale=\"international\",\n original_address=address,\n returned_address=normalized,\n expected_residential_indicator=False,\n )", "def test_validate_pincode(self):\n schema = vol.Schema(valid_pin)\n\n for value in ('', '123-456-78', 'a23-45-678', '12345678', 1234):\n with self.assertRaises(vol.MultipleInvalid):\n schema(value)\n\n for value in ('123-45-678', '234-56-789'):\n self.assertTrue(schema(value))", "def is_valid_postal_code(postal_code):\n assert postal_code is not None\n postal_code = postal_code.replace(\" \", \"\")\n postal_code_re = re.compile(r\"\\s*(\\w\\d\\s*){3}\\s*\")\n return postal_code_re.match(postal_code) is not None", "def clean_postal_code(self):\n return self.cleaned_data['postal_code'].strip()", "def test_get_postal_code(self, raw, expected):\n\n field_mapper = FieldMapper(Row([raw], ['Zip']))\n actual = field_mapper.get_postal_code()\n self.assertEqual(expected, actual)" ]
[ "0.64460033", "0.62175375", "0.61588347", "0.60202265", "0.60172653", "0.5865516", "0.58646524", "0.5859416", "0.57990336", "0.57793456", "0.5748033", "0.57366174", "0.57348245", "0.5724401", "0.57015634", "0.55580825", "0.5512242", "0.5489846", "0.54823476", "0.54728675", "0.54445046", "0.5420462", "0.54135746", "0.5379753", "0.53662", "0.5366191", "0.53397393", "0.53370976", "0.5322655", "0.5299524" ]
0.85351896
0
Tests the plan ratea data is cleaned properly and is returned with sorted unique values for each rate area.
def test_clean_plan_rates_sorts_and_makes_data_unique(self): input = { ('IN', '1'): [ '304.5', '422.28', '386.79', '382.7', '332.21', '422.28', '382.7' ], ('SD', '2'): [ '279.4', '250.14', '270.13', '274.56', '247.67', '279.4', '270.13' ], ('FL', '63'): [ '398.14', '330.9', '324.61', '398.14', '345.91', '214.32', '330.9' ], ('FL', '54'): [ '428.03', '294.87', '339.6', '409.72', '294.44' ] } expected = { ('IN', '1'): [ '304.5', '332.21', '382.7', '386.79', '422.28' ], ('SD', '2'): [ '247.67', '250.14', '270.13', '274.56', '279.4' ], ('FL', '63'): [ '214.32', '324.61', '330.9', '345.91', '398.14' ], ('FL', '54'): [ '294.44', '294.87', '339.6', '409.72', '428.03' ] } cleaned_plan_data = clean_plan_rates(input) self.assertEqual(expected, cleaned_plan_data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_clean_zipcode_data_is_unique(self):\n\n input = {\n '11111': [('NY', '5')],\n '22222': [('WI', '2')],\n '33333': [('WI', '2'), ('NY', '5')],\n '44444': [('WI', '2'), ('WI', '2')],\n '55555': [('WI', '2'), ('WI', '2'), ('NY', '5')],\n '66666': [('WI', '2'), ('WI', '2'), ('NY', '5'), ('NY', '5')],\n '77777': [\n ('WI', '2'),\n ('WI', '2'),\n ('NY', '5'),\n ('NY', '5'),\n ('CA', '7')\n ]\n }\n\n expected = {\n '11111': [('NY', '5')],\n '22222': [('WI', '2')],\n '33333': [('WI', '2'), ('NY', '5')],\n '44444': [('WI', '2')],\n '55555': [('WI', '2'), ('NY', '5')],\n '66666': [('WI', '2'), ('NY', '5')],\n '77777': [('WI', '2'), ('NY', '5'), ('CA', '7')]\n }\n\n cleaned_rate_areas = clean_zipcode_rate_areas(input)\n\n # Compare each set of rate areas for every zipcode; sort the values to\n # make sure we're comparing the data correctly.\n for zipcode, rate_areas in cleaned_rate_areas.items():\n self.assertEqual(sorted(rate_areas), sorted(expected[zipcode]))", "def test_get_all_rate_plans(self):\n pass", "def test_empty_string_returned_if_too_many_plan_areas_exist(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': [('WI', '9'), ('NY', '5')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def test_get_rate_plan_by_product_and_rate_plan(self):\n pass", "def test_get_rate_plan_by_product(self):\n pass", "def area_report(\n file=None\n):\n for entry in file:\n elems = entry.strip().split(' ')\n elems = prune(elems)\n if len(elems) >= 3:\n if str(elems[0]) == \"Total\" and str(elems[1]) == \"cell\" and str(elems[2]) == \"area:\":\n area = float(elems[3])\n\n if str(elems[0]) == \"Total\" and str(elems[1]) == \"area:\":\n if str(elems[2]) != \"undefined\":\n if area < float(elems[2]):\n area = float(elems[2])\n \n area /= 1000000.0\n return area", "def test_remove_taxation_strategy_from_rate_plan(self):\n pass", "def clean_cases(data):\n newdata=[]\n #Add up Bucks Data\n bucks=defaultdict(list)\n for i in data:\n if i['areaName'] in ['Chiltern','Aylesbury Vale','South Bucks','Wycombe']:\n bucks[i['date']].append(i)\n else:\n newdata.append(i)\n log.debug(bucks)\n for _date,_all in bucks.items():\n item={'areaName': 'Buckinghamshire','areaCode':'E06000060','specimenDate':_date}\n item['newCasesBySpecimenDate']=sum([x['newCasesBySpecimenDate'] for x in _all])\n item['cumCasesBySpecimenDate']=sum([x['cumCasesBySpecimenDate'] for x in _all])\n newdata.append(item)\n\n return newdata", "def sort_by_area():\n # Create a list where index --> neuron and value --> area\n matched = [areas_from_channels[int(c)] for c in channels]\n # Find the indices (aka neurons) where they have a score < 2\n bad_indices = [i for i, score in enumerate(quality) if score[0] < 2]\n # Create a dictionary to sort neurons according to areas\n d = {}\n for index, area in enumerate(matched): # Iterate index and value together\n # Discard bad recordings\n if index not in bad_indices:\n # If the area is already a key then append this neuron index\n if area in d.keys():\n d[area].append(index)\n # Else create a new key for a single element list\n else:\n d[area] = [index]\n return d", "def outlierCleaner(predictions, ages, net_worths):\n \n cleaned_data = [90]\n errors=[90]\n single_set=[]\n test_tuple=[]\n\n for (networth,age,pred) in zip(net_worths,ages,predictions):\n error=abs(networth-pred)\n errors.append(error)\n single_set=(age,networth,error)\n test_tuple.append(single_set)\n #print predictions\n\n print (\"****test tuple***\")\n #for x in test_tuple:\n # print x\n #errors.sort()\n #print errors\n #print len(errors)\n print(\"########\")\n\n #cleaned_data=list(zip(ages,net_worths,errors))\n #cleaned_data.sort(errors)\n #cleaned_data.sort(key=lambda tup: tup[2])\n from operator import itemgetter\n sorted_data=sorted(test_tuple,key = itemgetter(2))\n\n print \"Printing sorted set\"\n #print sorted_data\n sorted_data.__delitem__(81)\n sorted_data.__delitem__(81)\n sorted_data.__delitem__(81)\n sorted_data.__delitem__(81)\n sorted_data.__delitem__(81)\n sorted_data.__delitem__(81)\n sorted_data.__delitem__(81)\n sorted_data.__delitem__(81)\n sorted_data.__delitem__(81)\n sorted_data.__delitem__(80)\n #for x in sorted_data:\n # print x\n #print sorted_data\n\n #print len(sorted_data)\n\n ### your code goes here\n\n \n return sorted_data", "def test_empty_string_returned_if_no_plan_areas_exist(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': []}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def test_create_rate_plan(self):\n pass", "def get_remove_list(db_path, file, include_data, airline, can_limit, zs_limit, processed_direc):\n\n z_score_path = '%s%s_%s_Zdata_%s.csv'%(processed_direc, file,airline,include_data) \n #df_score = pd.read_csv(raw_file_drop, index_col=\"Date\")\n df_score = pd.read_csv(z_score_path, index_col = \"Day_of_Year\")\n df_score.index = pd.to_datetime(df_score.index)\n airport_list = df_score.columns.tolist()\n \n df = atn_analysis.raw_query(db_path,file,airline)\n\n df = df[df['Origin_Airport_Code'].isin(airport_list)] # Filtering to make sure airports are equal in both directions\n df = df[df['Destination_Airport_Code'].isin(airport_list)]\n by_origin_count = df.groupby(['Flight_Date', 'Origin_Airport_Code'], as_index=False)[['Can_Status']].count()\n by_origin = df.groupby(['Flight_Date', 'Origin_Airport_Code'], as_index=False)[['Can_Status']].sum()\n by_origin.Can_Status = by_origin.Can_Status / by_origin_count.Can_Status\n #print(by_origin)\n df_score[\"idx\"] = df_score.index\n df_score = pd.melt(df_score, id_vars='idx', value_vars=airport_list)\n df_score = df_score.sort_values(['idx', 'variable'], ascending=[True, True])\n df_score.columns = [\"Date\", \"Airports\", \"Z_Score\"]\n df_score.set_index('Date')\n df_score[\"Cancellations\"] = by_origin.Can_Status\n\n ### Creating the or conditions. First is the percentage of delayed flights and the second is the z-score\n df_score[\"Z_score_9901\"] = np.where((df_score['Cancellations'] > can_limit) | (df_score['Z_Score'] > zs_limit), 1, 0)\n #print(df_score)\n\n ### Creating pivot table for easy manipulation. This creates the date as the index with the properties corresponding to\n ### it and finally repeats this trend for all airports being considered.\n df_pivot = df_score.pivot_table('Z_score_9901', ['Date'], 'Airports')\n #print(df_pivot)\n\n s = np.asarray(np.where(df_pivot == 1, ['{}'.format(x) for x in df_pivot.columns], '')).tolist()\n\n\n s_nested = []\n for k in s:\n p = list(filter(None,k))\n \n #p = filter(None,k)\n s_nested.append(p)\n #s_nested.extend(p)\n\n\n return s_nested", "def test_retire_rate_plan(self):\n pass", "def outlierCleaner(predictions, ages, net_worths):\n \n cleaned_data = []\n temp=[]\n ### your code goes here\n for x in xrange(len(predictions)):\n cleaned_data.append((ages[x],net_worths[x],abs(net_worths[x]-predictions[x])))\n \n cleaned_data.sort(key= lambda tup : tup[2], reverse= False)\n cleaned_data=cleaned_data[:81]\n print(len(cleaned_data))\n return cleaned_data", "def test_update_rate_plan(self):\n pass", "def test_heat_rate(pudl_out_eia):\n print(\"\\nCalculating heat rates by generation unit...\")\n hr_by_unit = pudl_out_eia.hr_by_unit()\n print(f\" heat_rate_by_unit: {len(hr_by_unit)} records found\")\n\n key_cols = ['report_date', 'plant_id_eia', 'unit_id_pudl']\n if not single_records(hr_by_unit, key_cols=key_cols):\n raise AssertionError(\"Found non-unique unit heat rates!\")\n\n print(\"Re-calculating heat rates for individual generators...\")\n hr_by_gen = pudl_out_eia.hr_by_gen()\n print(f\" heat_rate_by_gen: {len(hr_by_gen)} records found\")\n\n if not single_records(hr_by_gen):\n raise AssertionError(\"Found non-unique generator heat rates!\")", "def test1():\n print( 'testing state data processing...')\n fname = \"HPI_PO_state.txt\"\n data = indexTools.read_state_house_price_data( \"data/\" + fname )\n\n answer = dict()\n answer[\"HPI_PO_state.txt 1993 1\"] = [('UT', 117.69), ('OR', 116.94)]\n answer[\"HPI_PO_state.txt 1993 3\"] = [('UT', 128.49), ('CO', 125.16)]\n answer[\"HPI_PO_state.txt 1993 None\"] = [('UT', 125.77499999999999), ('CO', 122.3775)]\n answer[\"HPI_PO_state.txt 1997 1\"] = [('OR', 162.61), ('MT', 162.09)]\n answer[\"HPI_PO_state.txt 1997 3\"] = [('OR', 166.34), ('CO', 162.8)]\n answer[\"HPI_PO_state.txt 1997 None\"] = [('OR', 164.875), ('MT', 162.20499999999998)]\n answer[\"HPI_PO_state.txt 2010 1\"] = [('MT', 298.92), ('WY', 281.91)]\n answer[\"HPI_PO_state.txt 2010 3\"] = [('MT', 293.55), ('WY', 281.33)]\n answer[\"HPI_PO_state.txt 2010 None\"] = [('MT', 292.9875), ('WY', 281.6325)]\n\n for year in [ 1993, 1997, 2010]:\n for qtr in [ 1, 3, None]:\n\n if qtr != None:\n results = periodRanking.quarter_data( data, year, qtr )\n else:\n results = periodRanking.annual_data( indexTools.annualize( data), year )\n key = fname + \" \" + str(year) + \" \" + str(qtr) \n #print( key )\n #if key in answer:\n print( fname, year, qtr, \":\", ( results[1:3] == answer[ key] ))\n #else:\n # print( fname, year, qtr, \":\", \"incorrect\", results[1:3] )\n return", "def clean():\n filter_phase_data()\n combine_phase_data()\n remove_duplicates_phase_data()", "def areasComparison(self):\n # if self.nFuselage > 0:\n # logger.debug(\"Fuselage initial area:\\n \"+str(self.fs_m_pointsInitArea[0]))\n # logger.debug(\"Fuselage final A:\\n \"+str(self.fs_m_pointsA[0]))\n # logger.debug(\"Fuselage final Iy:\\n \"+str(self.fs_m_pointsIy[0]))\n # logger.debug(\"Fuselage final Iz:\\n \"+str(self.fs_m_pointsIz[0]))\n # logger.debug(\"Fuselage final J:\\n \"+str(self.fs_m_pointsJ[0]))\n # logger.debug(\"Fuselage nodes names\"+str(self.fs_m_pointsName[0]))\n # for i in range(self.nWings):\n # logger.debug(\"Wing initial area:\\n \"+str(self.ws_me_pointsInitArea[i]))\n # logger.debug(\"Wing final A:\\n \"+str(self.ws_me_pointsA[i]))\n # logger.debug(\"Wing final Iy:\\n \"+str(self.ws_me_pointsIy[i]))\n # logger.debug(\"Wing final Iz:\\n \"+str(self.ws_me_pointsIz[i]))\n # logger.debug(\"Wing final J:\\n \"+str(self.ws_me_pointsJ[i]))\n # logger.debug(\"Wing nodes names\"+str(self.ws_me_pointsName[i]))\n N = len(self.aircraftNodesPoints)\n for i in range(N):\n logger.debug(\"Aircraft nodes:\\n\"+str(self.aircraftNodesPoints[i]))\n logger.debug(\"Aircraft nodes names:\\n\"+str(self.aircraftNodesNames[i]))\n logger.debug(\"Aircraft A:\\n\"+str(self.aircraftNodesA[i]))\n logger.debug(\"Aircraft Iy:\\n\"+str(self.aircraftNodesIy[i]))\n logger.debug(\"Aircraft Iz:\\n\"+str(self.aircraftNodesIz[i]))\n logger.debug(\"Aircraft J:\\n\"+str(self.aircraftNodesJ[i]))\n sys.exit()", "def test_preliminary(self, ):\n (mid, aid) = self.make_money_and_account() #@UnusedVariable\n self.load_data_into_account(aid)\n if self.deals_count >= 0:\n self.assertEqual(self.deals_count, self.model._sqlite_connection.execute('select count(*) from deals').fetchone()[0])\n print('deals count passed')\n if self.report_type == 'open.ru':\n if self.open_ru_report_type == 'stock':\n pt = self.model.get_paper_type('stock')\n self.assertEqual(set([pt['id']]), set(map(lambda a: a[0], self.model._sqlite_connection.execute('select distinct type from papers').fetchall())))\n print('stock or fut passed')\n elif self.open_ru_report_type == 'future':\n pt = self.model.get_paper_type('future')\n self.assertEqual(set([pt['id']]), set(map(lambda a: a[0], self.model._sqlite_connection.execute('select distinct type from papers').fetchall())))\n print('stock or fut passed')", "def test_rarefy_to_list(self):\r\n maker = RarefactionMaker(self.otu_table_fp, 0, 1, 1, 1)\r\n res = maker.rarefy_to_list(include_full=True)\r\n self.assertItemsEqual(res[-1][2].SampleIds, self.otu_table.SampleIds)\r\n self.assertItemsEqual(\r\n res[-1][2].ObservationIds,\r\n self.otu_table.ObservationIds)\r\n self.assertEqual(res[-1][2], self.otu_table)\r\n\r\n sample_value_sum = []\r\n for val in res[1][2].iterSampleData():\r\n sample_value_sum.append(val.sum())\r\n assert_almost_equal(sample_value_sum, [1.0, 1.0])", "def test_overall_report_ad_revenue():\n assert (overall_data['overall_report']['data'][5][0] == 'Ad revenue')\n for num in overall_data['overall_report']['data'][5][1:]:\n assert (num == 600)", "def cleanse_priest_list(priests_list):", "def test_overall_report_banner_revenue_per_user():\n assert (overall_data['banner_report']['data'][2][0] == 'Revenue per user')\n for num in overall_data['banner_report']['data'][2][1:]:\n assert (num == 6.6667)", "def compare_rates_with_excel_data(self):\n is_compared = True\n self.grid_row_data.clear()\n self.grid_row_data.update({\"Rate1\": \"\", \"Rate2\": \"\", \"Rate3\": \"\", \"Begin Date\": \"\", \"Rating Method\": \"\"})\n self.buy_page_excel_data_dictionary[\"Rating Method\"] = self.buy_page_excel_data_dictionary[\"RatingMethod\"]\n self.buy_page_excel_data_dictionary[\"Begin Date\"] = self.buy_page_excel_data_dictionary[\"EffectiveDate\"]\n rates_grid_row_data = self.get_vendor_profile_page_grid_row_details(self.rates_grid_div_id, self.grid_row_data)\n for key in rates_grid_row_data:\n if rates_grid_row_data[key] != self.buy_page_excel_data_dictionary[key]:\n is_compared = False\n break\n return is_compared", "def test_get_available_taxation_strategies_for_rate_plan(self):\n pass", "def test_output_matches_structure_of_input(self):\n\n input_header = ['zipcode', 'rate']\n input_slcsp_zipcodes = ['11111', '22222', '33333', '44444', '55555']\n input_zipcode_data = {\n '11111': [('NY', '5')],\n '22222': [('WI', '2')],\n '33333': [('WI', '2'), ('NY', '5')],\n '55555': [('FL', '63')]\n }\n input_plan_data = {\n ('NY', '5'): [\n '304.5',\n '422.28',\n '386.79',\n '382.7',\n '332.21',\n '422.28',\n '382.7'\n ],\n ('WI', '2'): [\n '279.4',\n '250.14',\n '270.13',\n '274.56',\n '247.67',\n '279.4',\n '270.13'\n ],\n ('FL', '63'): [\n '398.14'\n ]\n }\n\n expected_output = OrderedDict({\n '11111': '332.21', # Rate found\n '22222': '250.14', # Rate found\n '33333': '', # Rate not found - too many rate areas\n '44444': '', # Rate not found - zipcode wasn't found\n '55555': '' # Rate not found - too few rates\n })\n\n # Check that the header row default is set properly.\n self.assertEqual(input_header, SLCSP_OUTPUT_FIELD_NAMES)\n\n # Clean the data to prepare it for calculating the second lowest cost\n # silver plan for a given zipcode.\n cleaned_zipcode_data = clean_zipcode_rate_areas(input_zipcode_data)\n cleaned_plan_data = clean_plan_rates(input_plan_data)\n\n # Prepare the data for final output.\n prepared_slcsp_output = prepare_slcsp_output(\n input_slcsp_zipcodes,\n cleaned_zipcode_data,\n cleaned_plan_data\n )\n\n # Check that the expected output matches what was produced with the\n # prepared output.\n self.assertEqual(expected_output, prepared_slcsp_output)", "def get_regional_breakdown():\n doc = reg_bdown_coll.find_one({}, {\"_id\": False})\n if doc:\n breakdown = {\n key: sorted(doc[key], key=lambda x: x['count'], reverse=True)\n for key in doc\n }\n for key in doc:\n areas_breakdown = doc[key]\n for ab in areas_breakdown:\n ab['count'] = format_number(ab['count'])\n else:\n breakdown = {\"err\": \"No data\"}\n return breakdown", "def test_overall_report_banner_revenue():\n assert (overall_data['banner_report']['data'][1][0] == 'Revenue')\n for num in overall_data['banner_report']['data'][1][1:]:\n assert (num == 600)" ]
[ "0.6119881", "0.56413275", "0.5277127", "0.5233631", "0.52200216", "0.5073727", "0.506989", "0.4968094", "0.49477315", "0.49471563", "0.4938029", "0.49331796", "0.49196985", "0.49146998", "0.48825213", "0.48451757", "0.48303533", "0.48172814", "0.4804989", "0.47885492", "0.4784963", "0.47759938", "0.4774437", "0.47642106", "0.47483048", "0.4746657", "0.4734526", "0.4719259", "0.47158885", "0.47031385" ]
0.7310434
0
Tests that when the conditions are right, a zipcode is properly mapped to a rate.
def test_zipcode_is_successfully_mapped(self): zipcode = '11111' cleaned_zipcode_data_input = {'11111': [('NY', '5')]} cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']} expected = '294.87' slcsp_rate = retrieve_slcsp_for_zipcode( zipcode, cleaned_zipcode_data_input, cleaned_plan_data_input ) self.assertEqual(expected, slcsp_rate)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_only_five_digit_zipcodes_match(self):\n\n incorrect_zipcodes = ['1', 'abcdef', '123ab', '12345-6789', 'abc-def']\n non_string_zipcodes = [1, [123, 143], {'test': '123'}, 344.234, True]\n cleaned_zipcode_data_input = {'11111': [('NY', '5')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n for incorrect_zipcode in incorrect_zipcodes:\n slcsp_rate = retrieve_slcsp_for_zipcode(\n incorrect_zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)\n\n for non_string_zipcode in non_string_zipcodes:\n slcsp_rate = retrieve_slcsp_for_zipcode(\n non_string_zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def test_clean_zipcode_data_is_unique(self):\n\n input = {\n '11111': [('NY', '5')],\n '22222': [('WI', '2')],\n '33333': [('WI', '2'), ('NY', '5')],\n '44444': [('WI', '2'), ('WI', '2')],\n '55555': [('WI', '2'), ('WI', '2'), ('NY', '5')],\n '66666': [('WI', '2'), ('WI', '2'), ('NY', '5'), ('NY', '5')],\n '77777': [\n ('WI', '2'),\n ('WI', '2'),\n ('NY', '5'),\n ('NY', '5'),\n ('CA', '7')\n ]\n }\n\n expected = {\n '11111': [('NY', '5')],\n '22222': [('WI', '2')],\n '33333': [('WI', '2'), ('NY', '5')],\n '44444': [('WI', '2')],\n '55555': [('WI', '2'), ('NY', '5')],\n '66666': [('WI', '2'), ('NY', '5')],\n '77777': [('WI', '2'), ('NY', '5'), ('CA', '7')]\n }\n\n cleaned_rate_areas = clean_zipcode_rate_areas(input)\n\n # Compare each set of rate areas for every zipcode; sort the values to\n # make sure we're comparing the data correctly.\n for zipcode, rate_areas in cleaned_rate_areas.items():\n self.assertEqual(sorted(rate_areas), sorted(expected[zipcode]))", "def test_company_EU_GR_vies_tax(self):\n self.assertEqual(self.policy.get_tax_rate(\"123456\", \"GR\"), (24, False))", "def test_output_matches_structure_of_input(self):\n\n input_header = ['zipcode', 'rate']\n input_slcsp_zipcodes = ['11111', '22222', '33333', '44444', '55555']\n input_zipcode_data = {\n '11111': [('NY', '5')],\n '22222': [('WI', '2')],\n '33333': [('WI', '2'), ('NY', '5')],\n '55555': [('FL', '63')]\n }\n input_plan_data = {\n ('NY', '5'): [\n '304.5',\n '422.28',\n '386.79',\n '382.7',\n '332.21',\n '422.28',\n '382.7'\n ],\n ('WI', '2'): [\n '279.4',\n '250.14',\n '270.13',\n '274.56',\n '247.67',\n '279.4',\n '270.13'\n ],\n ('FL', '63'): [\n '398.14'\n ]\n }\n\n expected_output = OrderedDict({\n '11111': '332.21', # Rate found\n '22222': '250.14', # Rate found\n '33333': '', # Rate not found - too many rate areas\n '44444': '', # Rate not found - zipcode wasn't found\n '55555': '' # Rate not found - too few rates\n })\n\n # Check that the header row default is set properly.\n self.assertEqual(input_header, SLCSP_OUTPUT_FIELD_NAMES)\n\n # Clean the data to prepare it for calculating the second lowest cost\n # silver plan for a given zipcode.\n cleaned_zipcode_data = clean_zipcode_rate_areas(input_zipcode_data)\n cleaned_plan_data = clean_plan_rates(input_plan_data)\n\n # Prepare the data for final output.\n prepared_slcsp_output = prepare_slcsp_output(\n input_slcsp_zipcodes,\n cleaned_zipcode_data,\n cleaned_plan_data\n )\n\n # Check that the expected output matches what was produced with the\n # prepared output.\n self.assertEqual(expected_output, prepared_slcsp_output)", "def test_rate_always_formatted_to_two_decimal_places(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': [('NY', '5')]}\n cleaned_plan_data_inputs = [\n {('NY', '5'): ['294.24', '294']},\n {('NY', '5'): ['294.24', '294.7']},\n {('NY', '5'): ['294.24', '294.3452']},\n {('NY', '5'): ['294.24', '294.24']}\n ]\n\n # NOTE: Formatting a decimal.Decimal value will result in rounding.\n expected_results = ['294.00', '294.70', '294.35', '294.24']\n\n for i, cleaned_plan_data_input in enumerate(cleaned_plan_data_inputs):\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected_results[i], slcsp_rate)", "def test_empty_string_returned_if_no_plan_areas_exist(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': []}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def test_no_rate_found_is_empty_string(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'22222': [('NH', '12')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def test_rate_cost_type_valid(self):\n self.ocp_data[\"rates\"][0][\"tiered_rates\"] = [\n {\n \"unit\": \"USD\",\n \"value\": 0.22,\n \"usage\": {\"usage_start\": None, \"usage_end\": None},\n \"cost_type\": \"Infrastructure\",\n }\n ]\n\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=self.ocp_data, context=self.request_context)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n\n self.ocp_data[\"rates\"][0][\"tiered_rates\"] = [\n {\n \"unit\": \"USD\",\n \"value\": 0.22,\n \"usage\": {\"usage_start\": None, \"usage_end\": None},\n \"cost_type\": \"Supplementary\",\n }\n ]\n\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=self.ocp_data, context=self.request_context)\n if serializer.is_valid(raise_exception=True):\n serializer.save()", "def compare_zipcodes(s1, s2):\n\n # check if the zipcode are identical (return 1 or 0)\n sim = (s1 == s2).astype(float)\n\n # check the first 2 numbers of the distinct comparisons\n sim[(sim == 0) & (s1.str[0:2] == s2.str[0:2])] = 0.5\n\n return sim", "def test_empty_string_returned_if_too_many_plan_areas_exist(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': [('WI', '9'), ('NY', '5')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def test_check_cost():", "def CheckZipCode(zipcode):\n # see if there are enough digits\n if (len(zipcode) >= 5):\n # check if numerical\n try:\n int(zipcode)\n return True\n except:\n return False\n else:\n return False", "def test_validity():\n\n data = request.json\n promo_code = Promo_code.query.filter_by(code=data['code']).first()\n if promo_code is not None:\n origin = Promo_code.query.filter_by(event=data['origin']).first()\n destination = Promo_code.query.filter_by(event=data['destination']).first()\n\n try:\n origin_distance = geolocator.geocode(data['origin'])\n origin_distance_codes = (origin_distance.latitude, origin_distance.longitude)\n\n destination_distance = geolocator.geocode(data['destination'])\n destination_distance_codes = (destination_distance.latitude, destination_distance.longitude)\n\n event = geolocator.geocode(promo_code.event)\n event_codes = (event.latitude, event.longitude)\n\n event_origin_distance = geopy.distance.vincenty(origin_distance_codes, event_codes).km\n event_destination_distance = geopy.distance.vincenty(destination_distance_codes, event_codes).km\n\n if origin or destination is not None or \\\n event_origin_distance < promo_code.radius or \\\n event_destination_distance < promo_code.radius:\n return jsonify({'promo_code details': dict(id=promo_code.id,\n code=promo_code.code,\n event=promo_code.event,\n expiry_data=promo_code.expiry_date,\n status=promo_code.status,\n price=promo_code.price),\n 'polyline':data['destination'] + data['origin']}), 200\n return jsonify({'status':'fail', 'message':'Promo code is not valid'}),400\n except:\n return jsonify({\"Error with the location entered\"})\n\n return jsonify({'status': 'fail',\n 'message': 'code doesnot exist'}), 404", "def test_rates_error_on_specifying_tiered_and_tag_rates(self):\n tag_values_kwargs = [{\"value\": 0.2}]\n tiered_rate = [{\"value\": 1.3, \"unit\": \"USD\"}]\n self.basic_model[\"rates\"][0][\"tag_rates\"] = format_tag_rate(tag_values=tag_values_kwargs)\n self.basic_model[\"rates\"][0][\"tiered_rates\"] = tiered_rate\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=self.basic_model, context=self.request_context)\n with self.assertRaises(serializers.ValidationError):\n self.assertFalse(serializer.is_valid(raise_exception=True))\n result_err_msg = serializer.errors[\"rates\"][0][\"non_field_errors\"][0]\n expected_err_msg = \"Set either 'tiered_rates' or 'tag_rates' but not both\"\n self.assertEqual(result_err_msg, expected_err_msg)", "def test_cambridge_rent_price_per_sqft():\n dataframe = get_final_zillow_dataframe()\n cambridge = get_city_state_row(dataframe, 'cambridge', 'massachusetts')\n assert round(cambridge.iloc[0].get('ZRIFAH'), 1) == 2.9", "def test_retire_rate_plan(self):\n pass", "def test_empty_string_returned_if_too_few_plans_are_found(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': [('WI', '9')]}\n cleaned_plan_data_input = {('WI', '9'): []}\n cleaned_plan_data_input = {('WI', '9'): ['324.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def test_tiered_rate_with_gaps(self):\n self.ocp_data[\"rates\"][0][\"tiered_rates\"] = [\n {\"unit\": \"USD\", \"value\": 0.22, \"usage\": {\"usage_start\": None, \"usage_end\": 7.0}},\n {\"unit\": \"USD\", \"value\": 0.26, \"usage_start\": 10.0, \"usage_end\": None},\n ]\n\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=self.ocp_data, context=self.request_context)\n with self.assertRaises(serializers.ValidationError):\n if serializer.is_valid(raise_exception=True):\n serializer.save()", "def test_currency_response():\n \n \n assert('{ \"from\" : \"1 United States Dollar\", \"to\" : \"1 United States Dollar\", \"success\" : true, \"error\" : \"\" }' == currency_response(\"USD\", \"USD\", \"1\"))\n assert('{ \"from\" : \"1 United States Dollar\", \"to\" : \"0.838095 Euros\", \"success\" : true, \"error\" : \"\" }' == currency_response(\"USD\", \"EUR\", \"1\"))\n assert('{ \"from\" : \"\", \"to\" : \"\", \"success\" : false, \"error\" : \"Source currency code is invalid.\" }' == currency_response(\"A\", \"USD\", \"1\"))", "def test_company_EU_GR_vies_zero(self, mock_check):\n mock_check.return_value = {\"valid\": True}\n self.assertEqual(self.policy.get_tax_rate(\"EL090145420\", \"GR\"), (None, True))", "def are_within_limits(rates):\n for srv in rates:\n for state in rates[srv]:\n rate = rates[srv][state]\n if rate < 0 or rate > 5:\n print(f\"Rate {rate} out of bounds: Server {srv}, State {state}\")\n return False\n return True", "def test_addr_zip_good_values(self):\n for input_val, output_val in self.known_values:\n self.line._parse_addr_zip(input_val)\n self.assertEqual(output_val, self.line.addr_zip)", "def test_get_postal_code(self, raw, expected):\n\n field_mapper = FieldMapper(Row([raw], ['Zip']))\n actual = field_mapper.get_postal_code()\n self.assertEqual(expected, actual)", "def test_country_code(self):\n\t\tcountry_name = 'United States'\n#\t\tpopulation = int(float(pop_dict['Value']))\n\t\tcode = get_country_code(country_name)\t\t\n\t\t#Assert methods verifies result received matches expected one\n\t\tself.assertEqual(code, 'usa')", "def test_mappings():\n\n data = {\n 'succeeded': [],\n 'failed': [],\n 'partial': []\n }\n\n zipcodes = get_postcodes()\n LOGGER.info('analyzing %d zip codes', len(zipcodes))\n\n session = requests.Session()\n for code in zipcodes:\n try:\n response = session.get('http://localhost:10847/zipcode/' + str(code))\n response.raise_for_status()\n\n payload = response.json()['data']\n if 'economic_region' not in payload:\n data['partial'].append(code)\n else:\n data['succeeded'].append(code)\n except requests.HTTPError:\n LOGGER.exception('unable to validate zip code')\n data['failed'].append(code)\n\n with open('./results.json', 'w') as f:\n json.dump(data, f)", "def test_error_on_rate_type(self):\n self.ocp_data[\"rates\"][0].pop(\"tiered_rates\")\n self.ocp_data[\"rates\"][0][\"bad_rates\"] = []\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=self.ocp_data, context=self.request_context)\n with self.assertRaises(serializers.ValidationError):\n if serializer.is_valid(raise_exception=True):\n serializer.save()", "def test_tiered_rate_with_overlaps(self):\n self.ocp_data[\"rates\"][0][\"tiered_rates\"] = [\n {\"unit\": \"USD\", \"value\": 0.22, \"usage\": {\"usage_start\": None, \"usage_end\": 10.0}},\n {\"unit\": \"USD\", \"value\": 0.26, \"usage\": {\"usage_start\": 5.0, \"usage_end\": 20.0}},\n {\"unit\": \"USD\", \"value\": 0.26, \"usage\": {\"usage_start\": 20.0, \"usage_end\": None}},\n ]\n\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=self.ocp_data, context=self.request_context)\n with self.assertRaises(serializers.ValidationError):\n if serializer.is_valid(raise_exception=True):\n serializer.save()", "def correct_zipcode(business_tag, zipcode):\n try:\n address = business_tag.find('div', {'class': 'secondary-attributes'}).find('address').text\n zipcode_found = re.search(re.compile('(^|[^\\d])\\d{5}($|[^\\d])'), address).group(0)\n zipcode_found = re.search(re.compile('\\d{5}'), zipcode_found).group(0)\n return zipcode_found == zipcode\n except:\n return False", "def test_empty_string_returned_if_no_plans_are_found(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': [('WI', '9')]}\n cleaned_plan_data_input = {('WI', '9'): []}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def test_zip_state_requirements(self):\n form_data = self.form_data(clear=['billing_state'])\n form = DonationPaymentForm(data=form_data)\n self.assertFalse(form.is_valid())\n\n form_data = self.form_data(billing_state='')\n form = DonationPaymentForm(data=form_data)\n self.assertFalse(form.is_valid())\n\n form_data = self.form_data(clear=['billing_zip'])\n form = DonationPaymentForm(data=form_data)\n self.assertFalse(form.is_valid())\n\n form_data = self.form_data(clear=['billing_state', 'billing_zip'])\n form_data['country'] = 'CAN'\n form = DonationPaymentForm(data=form_data)\n self.assertTrue(form.is_valid())" ]
[ "0.66684675", "0.60333157", "0.5978363", "0.58802783", "0.5841937", "0.5741159", "0.5704098", "0.565133", "0.55974364", "0.5566235", "0.54648787", "0.5458494", "0.5457171", "0.5455862", "0.5448905", "0.544727", "0.5438281", "0.5421464", "0.5415892", "0.5403968", "0.5398028", "0.53959376", "0.5392614", "0.537715", "0.5376972", "0.53664374", "0.53564554", "0.5354235", "0.5285426", "0.52769107" ]
0.69216657
0
Tests that if no matching rate is found for a zipcode, an empty string is returned instead per the exercise instructions.
def test_no_rate_found_is_empty_string(self): zipcode = '11111' cleaned_zipcode_data_input = {'22222': [('NH', '12')]} cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']} expected = '' slcsp_rate = retrieve_slcsp_for_zipcode( zipcode, cleaned_zipcode_data_input, cleaned_plan_data_input ) self.assertEqual(expected, slcsp_rate)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_empty_string_returned_if_no_plans_are_found(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': [('WI', '9')]}\n cleaned_plan_data_input = {('WI', '9'): []}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def compute_zip_code(zip_code_text):\n zip_code = None\n if zip_code_text and len(zip_code_text) >= 5 and zip_code_text.isdigit():\n zip_code = zip_code_text[:5]\n return zip_code", "def test_empty_string_returned_if_no_plan_areas_exist(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': []}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def test_empty_string_returned_if_too_few_plans_are_found(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': [('WI', '9')]}\n cleaned_plan_data_input = {('WI', '9'): []}\n cleaned_plan_data_input = {('WI', '9'): ['324.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def test_only_five_digit_zipcodes_match(self):\n\n incorrect_zipcodes = ['1', 'abcdef', '123ab', '12345-6789', 'abc-def']\n non_string_zipcodes = [1, [123, 143], {'test': '123'}, 344.234, True]\n cleaned_zipcode_data_input = {'11111': [('NY', '5')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n for incorrect_zipcode in incorrect_zipcodes:\n slcsp_rate = retrieve_slcsp_for_zipcode(\n incorrect_zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)\n\n for non_string_zipcode in non_string_zipcodes:\n slcsp_rate = retrieve_slcsp_for_zipcode(\n non_string_zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def test_zipcode_is_successfully_mapped(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': [('NY', '5')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = '294.87'\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def correct_zipcode(business_tag, zipcode):\n try:\n address = business_tag.find('div', {'class': 'secondary-attributes'}).find('address').text\n zipcode_found = re.search(re.compile('(^|[^\\d])\\d{5}($|[^\\d])'), address).group(0)\n zipcode_found = re.search(re.compile('\\d{5}'), zipcode_found).group(0)\n return zipcode_found == zipcode\n except:\n return False", "def zip_code(self, value):\n regex = config.get('validators', 'zip_code')\n zipcode = re.search(regex,\n value)\n if not zipcode:\n raise ZipCodeError(\"ZipCodeError: 'zip_code' must be 5 non-float digits\")\n else:\n self._zip_code = value", "def test_empty_string_returned_if_too_many_plan_areas_exist(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': [('WI', '9'), ('NY', '5')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def zip_code(self):\n\n\t\telement = Element(driver=self.driver,\n\t\t explicit_wait_time=self.explicit_wait_time,\n\t\t locator=BillPayPageLocator.ZIP_CODE_INPUT)\n\t\treturn element.element_value", "def get_closest_station_by_zipcode(zipcode):\n\n station_lookup_method_by_zipcode = lookup_usaf_station_by_zipcode(zipcode)\n try:\n station, warnings, lat, lon = _get_closest_station_by_zcta_ranked(zipcode)\n\n isd_metadata = get_isd_file_metadata(str(station))\n if len(isd_metadata) == 0:\n logging.warning(\"Zipcode %s mapped to station %s, but no ISD metadata was found.\" % (zipcode, station))\n return station_lookup_method_by_zipcode\n\n except UnrecognizedUSAFIDError as e:\n logging.warning(\"Closest station %s is not a recognized station. Using backup-method station %s for zipcode %s instead.\" % (\n str(station),\n station_lookup_method_by_zipcode,\n zipcode))\n return station_lookup_method_by_zipcode\n\n except UnrecognizedZCTAError as e:\n logging.warning(\"Unrecognized ZCTA %s\" % e)\n return None\n\n if str(station) != station_lookup_method_by_zipcode:\n logging.debug(\"Previously would have selected station %s instead of %s for zip code %s\" % (\n station_lookup_method_by_zipcode,\n str(station),\n zipcode))\n\n if warnings:\n logging.warning(\"Station %s is %d meters over maximum %d meters (%d meters) (zip code %s is at lat/lon %f, %f)\" % (\n str(station),\n int(warnings[0].data['distance_meters'] - warnings[0].data['max_distance_meters']),\n int(warnings[0].data['max_distance_meters']),\n int(warnings[0].data['distance_meters']),\n zipcode,\n lat,\n lon,\n ))\n logging.warning(\"Closest station %s is too far. Using backup-method station %s instead.\" % (\n str(station),\n station_lookup_method_by_zipcode))\n return station_lookup_method_by_zipcode\n\n return str(station)", "def county_name(zipcode): \n search = SearchEngine(simple_zipcode=True) # set simple_zipcode=False to use rich info database\n zipcode_query = search.by_zipcode(str(zipcode))\n zipcode_query_dict = zipcode_query.to_dict()\n county = zipcode_query_dict['county']\n if county is None:\n print('Invalid County')\n else :\n if 'County' in county:\n county = county[:-7]\n if county in county_list:\n print('County is County List')\n print(county)\n return county", "def lookup_usaf_station_by_zipcode(zipcode):\n\n usaf = zipcode_usaf.get(zipcode, None)\n return usaf", "def type_zip_code(self, zip_code):\n\n\t\twith allure.step(\"Type payee zip code\"):\n\t\t\telement = Element(driver=self.driver,\n\t\t\t explicit_wait_time=self.explicit_wait_time,\n\t\t\t locator=BillPayPageLocator.ZIP_CODE_INPUT)\n\t\t\telement.write(zip_code)\n\t\t\treturn None", "def CheckZipCode(zipcode):\n # see if there are enough digits\n if (len(zipcode) >= 5):\n # check if numerical\n try:\n int(zipcode)\n return True\n except:\n return False\n else:\n return False", "def find_zip_code(x):\n i = 0\n j = 4\n for i in range(1,len(x)-6):\n string = x[i-1:i+6]\n cond = (string[1:-1].isnumeric(), not string[0].isnumeric(), not string[-1].isnumeric())\n if all(cond):\n return x[i:i+5]", "def compare_zipcodes(s1, s2):\n\n # check if the zipcode are identical (return 1 or 0)\n sim = (s1 == s2).astype(float)\n\n # check the first 2 numbers of the distinct comparisons\n sim[(sim == 0) & (s1.str[0:2] == s2.str[0:2])] = 0.5\n\n return sim", "def checkZipCode(data):\n if len(data) < 5:\n while len(data) < 5:\n data = '0' + data\n elif len(data) > 5:\n data = data[0:4]\n # print(data)\n return (data)", "def get_city_by_code(post_code):\n post_code = post_code.replace(' ', '').encode('utf-8')\n error = ''\n city = ''\n opener = urllib2.build_opener()\n url = 'http://maps.googleapis.com/maps/api/geocode/json?address={0}&sensor=false'.format(post_code)\n response = opener.open(url).read()\n response_dict = json.loads(response)\n request_status = response_dict['status']\n if request_status == 'OK':\n logger.debug('Google response')\n logger.debug(response_dict)\n results = response_dict['results']\n \"\"\"\n first get all results\n with required zip code\n \"\"\"\n results_with_required_zip_code = []\n for result in results:\n address_components = result['address_components']\n for address_component in address_components:\n types = address_component['types']\n for t in types:\n if t == 'postal_code' and address_component['short_name'].replace(' ', '').lower() == post_code.lower():\n results_with_required_zip_code.append(result)\n if not results_with_required_zip_code:\n error = {\n 'status': '8',\n 'message': POST_CODE_DOES_NOT_EXISTS,\n 'title': POST_CODE_DOES_NOT_EXISTS_TITLE\n }\n # error = 'No location with post code %s' % post_code\n else:\n \"\"\"\n next we need all results in GB\n \"\"\"\n results_with_required_zip_code_in_GB = ''\n for good_result in results_with_required_zip_code:\n address_components = good_result['address_components']\n for address_component in address_components:\n types = address_component['types']\n for t in types:\n if t == 'country' and address_component['short_name'].lower() == 'GB'.lower():\n results_with_required_zip_code_in_GB = good_result\n if not results_with_required_zip_code_in_GB:\n error = {\n 'status': '7',\n 'message': POST_CODE_DOES_NOT_EXISTS_IN_GB,\n 'title': POST_CODE_DOES_NOT_EXISTS_IN_GB_TITLE\n }\n # error = 'No city with post code %s in GB' % post_code\n else:\n \"\"\"\n finally find city name\n \"\"\"\n address_components = results_with_required_zip_code_in_GB['address_components']\n # first try get postal city\n searching_city = get_city_by_key(address_components, 'postal_town')\n if not searching_city:\n # next by administrative_area_level_2\n searching_city = get_city_by_key(address_components, 'administrative_area_level_2')\n if not searching_city:\n print url\n error = {\n 'status': '7',\n 'message': POST_CODE_DOES_NOT_EXISTS_IN_GB,\n 'title': POST_CODE_DOES_NOT_EXISTS_IN_GB_TITLE\n }\n # error = 'No city with post code %s in GB' % post_code\n else:\n city = searching_city\n elif request_status == 'ZERO_RESULTS':\n error = {\n 'status': '8',\n 'message': POST_CODE_DOES_NOT_EXISTS,\n 'title': POST_CODE_DOES_NOT_EXISTS_TITLE\n }\n else:\n error = request_status\n return {\n 'error': error,\n 'data': city\n }", "def get_city(zip_code):\r\n\r\n # API key, retrieved from configure.py\r\n api_key = configure.ZIP_KEY\r\n\r\n # API endpoint\r\n url = f'https://www.zipcodeapi.com/rest/{api_key}/info.json/{zip_code}/degrees'\r\n\r\n # API call\r\n response = requests.get(url)\r\n\r\n # Collect response in json format\r\n data = response.json()\r\n\r\n if 'error_code' in data or 'error_msg' in data:\r\n return {\r\n 'success': False,\r\n 'query': zip_code\r\n }\r\n\r\n else:\r\n return {\r\n 'success': True,\r\n 'query': data['zip_code'],\r\n 'city': data['city'],\r\n 'state': data['state'],\r\n 'lat': data['lat'],\r\n 'lon': data['lng']\r\n }", "def replace_zip_code(zip_code):\r\n if len(zip_code)>5:\r\n return zip_code[0:5]\r\n else:\r\n return zip_code", "def searchZipcode(zipcode, jurisdictions):\n try:\n if len(str(zipcode)) != 5:\n return jurisdictions.none()\n\n zipcode = Zipcode.objects.get(code=zipcode)\n j = jurisdictions.filter(geometry__intersects=zipcode.geometry)\n return j\n except Exception as e:\n print(e)\n return jurisdictions.none()", "def clean_incident_zip(zipcode):\n zipcode = str(zipcode).replace('.0', '')[:5]\n try:\n zipcode = int(zipcode)\n except:\n return None\n # Pad it on the left with '0's\n zipcode = '{:05}'.format(zipcode)\n return zipcode", "def test_clean_zipcode_data_is_unique(self):\n\n input = {\n '11111': [('NY', '5')],\n '22222': [('WI', '2')],\n '33333': [('WI', '2'), ('NY', '5')],\n '44444': [('WI', '2'), ('WI', '2')],\n '55555': [('WI', '2'), ('WI', '2'), ('NY', '5')],\n '66666': [('WI', '2'), ('WI', '2'), ('NY', '5'), ('NY', '5')],\n '77777': [\n ('WI', '2'),\n ('WI', '2'),\n ('NY', '5'),\n ('NY', '5'),\n ('CA', '7')\n ]\n }\n\n expected = {\n '11111': [('NY', '5')],\n '22222': [('WI', '2')],\n '33333': [('WI', '2'), ('NY', '5')],\n '44444': [('WI', '2')],\n '55555': [('WI', '2'), ('NY', '5')],\n '66666': [('WI', '2'), ('NY', '5')],\n '77777': [('WI', '2'), ('NY', '5'), ('CA', '7')]\n }\n\n cleaned_rate_areas = clean_zipcode_rate_areas(input)\n\n # Compare each set of rate areas for every zipcode; sort the values to\n # make sure we're comparing the data correctly.\n for zipcode, rate_areas in cleaned_rate_areas.items():\n self.assertEqual(sorted(rate_areas), sorted(expected[zipcode]))", "def get_zipsearch(zipcode=u''):\n from x84.bbs import getterminal, LineEditor, echo\n term = getterminal()\n echo(u''.join((u'\\r\\n\\r\\n',\n term.bold_yellow(u' -'),\n term.reverse_yellow(u':'),\n u' ')))\n return LineEditor(width=min(30, term.width - 5), content=zipcode).read()", "def inputZip() -> int:\n while True:\n try:\n return int(input(\"Enter your zipcode for concerts near you: \"))\n except ValueError:\n print(\"Input only accepts numbers.\")", "def find_one(cls, zipcode ):\n qry = cls.session.query(cls).filter(cls.ZIPCODE.ilike(f'{zipcode}'))\n zc = qry.one()\n return zc", "def get_zip_code(string):\n zip_code = \"\"\n\n #for each character in string\n for ch in string:\n #if the character is a number, add it to the \"zip_code\" string\n if ch.isdigit():\n zip_code += ch\n\n return zip_code", "def match_city(self, city, dpt_code, zip_code = None):\n city = format_str_city_insee(city)\n dpt_code = dpt_code.rjust(2, '0')\n if zip_code:\n zip_code.rjust(5, '0')\n # Based on zip code and city name\n ls_matching = []\n found_indicator = False\n if zip_code:\n if zip_code in self.dict_corr_zip_insee:\n for city_insee, zip_insee, dpt_insee, code_insee in self.dict_corr_zip_insee[zip_code]:\n if city == city_insee:\n ls_matching.append((city_insee, zip_insee, code_insee))\n found_indicator = True\n if found_indicator:\n return (ls_matching, 'zip_city_match')\n # If no exact zip, city match: check if city name in insee city names\n for city_insee, zip_insee, dpt_insee, code_insee in self.dict_corr_zip_insee[zip_code]:\n if city in city_insee:\n ls_matching.append((city_insee, zip_insee, code_insee))\n found_indicator = True\n if found_indicator:\n return (ls_matching, 'zip_city_in_match(es)')\n # Based on dpt code and city name\n for city_insee, zip_insee, dpt_insee, code_insee in self.dict_corr_dpt_insee[dpt_code]:\n if city == city_insee:\n ls_matching.append((city_insee, zip_insee, code_insee))\n found_indicator = True\n if found_indicator:\n return (ls_matching, 'dpt_city_match')\n # If no exact dpt, city match: check if city name in insee city names\n for city_insee, zip_insee, dpt_insee, code_insee in self.dict_corr_dpt_insee[dpt_code]:\n if city in city_insee:\n ls_matching.append((city_insee, zip_insee, code_insee))\n found_indicator = True\n if found_indicator:\n return (ls_matching, 'dpt_city_in_match(es)')\n # No match\n return (None, 'no_match')", "def get(self, zipcode):\n response = hereService.getWeatherByZipcode(zipcode)\n return response" ]
[ "0.6593722", "0.65769804", "0.6516567", "0.64187825", "0.640926", "0.6383783", "0.6159052", "0.60808915", "0.5918545", "0.5846181", "0.5840774", "0.57913196", "0.57867366", "0.57632065", "0.57513416", "0.57177305", "0.5681527", "0.5549766", "0.5544274", "0.552554", "0.5523862", "0.55089045", "0.5489339", "0.54785824", "0.5470438", "0.5431667", "0.54068196", "0.5405432", "0.5397113", "0.53829753" ]
0.7299004
0
Tests that a rate is not returned when a zipcode is given in a format that is not 5 digits.
def test_only_five_digit_zipcodes_match(self): incorrect_zipcodes = ['1', 'abcdef', '123ab', '12345-6789', 'abc-def'] non_string_zipcodes = [1, [123, 143], {'test': '123'}, 344.234, True] cleaned_zipcode_data_input = {'11111': [('NY', '5')]} cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']} expected = '' for incorrect_zipcode in incorrect_zipcodes: slcsp_rate = retrieve_slcsp_for_zipcode( incorrect_zipcode, cleaned_zipcode_data_input, cleaned_plan_data_input ) self.assertEqual(expected, slcsp_rate) for non_string_zipcode in non_string_zipcodes: slcsp_rate = retrieve_slcsp_for_zipcode( non_string_zipcode, cleaned_zipcode_data_input, cleaned_plan_data_input ) self.assertEqual(expected, slcsp_rate)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def CheckZipCode(zipcode):\n # see if there are enough digits\n if (len(zipcode) >= 5):\n # check if numerical\n try:\n int(zipcode)\n return True\n except:\n return False\n else:\n return False", "def zip_code(self, value):\n regex = config.get('validators', 'zip_code')\n zipcode = re.search(regex,\n value)\n if not zipcode:\n raise ZipCodeError(\"ZipCodeError: 'zip_code' must be 5 non-float digits\")\n else:\n self._zip_code = value", "def test_no_rate_found_is_empty_string(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'22222': [('NH', '12')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def correct_zipcode(business_tag, zipcode):\n try:\n address = business_tag.find('div', {'class': 'secondary-attributes'}).find('address').text\n zipcode_found = re.search(re.compile('(^|[^\\d])\\d{5}($|[^\\d])'), address).group(0)\n zipcode_found = re.search(re.compile('\\d{5}'), zipcode_found).group(0)\n return zipcode_found == zipcode\n except:\n return False", "def test_zipcode_is_successfully_mapped(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': [('NY', '5')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = '294.87'\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def valid_zipcode(line):\n zipcode = line.o_zip_code\n invalid_zip = len(zipcode) not in [5, 9] and zipcode.isdigit()\n if invalid_zip:\n rule = 'Zipcode length'\n new_row = Error(e_name=rule, order_key=line.primary_key)\n line.errors.append(new_row)\n return False\n return True", "def compute_zip_code(zip_code_text):\n zip_code = None\n if zip_code_text and len(zip_code_text) >= 5 and zip_code_text.isdigit():\n zip_code = zip_code_text[:5]\n return zip_code", "def test_clean_zipcode_data_is_unique(self):\n\n input = {\n '11111': [('NY', '5')],\n '22222': [('WI', '2')],\n '33333': [('WI', '2'), ('NY', '5')],\n '44444': [('WI', '2'), ('WI', '2')],\n '55555': [('WI', '2'), ('WI', '2'), ('NY', '5')],\n '66666': [('WI', '2'), ('WI', '2'), ('NY', '5'), ('NY', '5')],\n '77777': [\n ('WI', '2'),\n ('WI', '2'),\n ('NY', '5'),\n ('NY', '5'),\n ('CA', '7')\n ]\n }\n\n expected = {\n '11111': [('NY', '5')],\n '22222': [('WI', '2')],\n '33333': [('WI', '2'), ('NY', '5')],\n '44444': [('WI', '2')],\n '55555': [('WI', '2'), ('NY', '5')],\n '66666': [('WI', '2'), ('NY', '5')],\n '77777': [('WI', '2'), ('NY', '5'), ('CA', '7')]\n }\n\n cleaned_rate_areas = clean_zipcode_rate_areas(input)\n\n # Compare each set of rate areas for every zipcode; sort the values to\n # make sure we're comparing the data correctly.\n for zipcode, rate_areas in cleaned_rate_areas.items():\n self.assertEqual(sorted(rate_areas), sorted(expected[zipcode]))", "def checkZipCode(data):\n if len(data) < 5:\n while len(data) < 5:\n data = '0' + data\n elif len(data) > 5:\n data = data[0:4]\n # print(data)\n return (data)", "def test_empty_string_returned_if_too_few_plans_are_found(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': [('WI', '9')]}\n cleaned_plan_data_input = {('WI', '9'): []}\n cleaned_plan_data_input = {('WI', '9'): ['324.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def clean_incident_zip(zipcode):\n zipcode = str(zipcode).replace('.0', '')[:5]\n try:\n zipcode = int(zipcode)\n except:\n return None\n # Pad it on the left with '0's\n zipcode = '{:05}'.format(zipcode)\n return zipcode", "def is_valid_postal_code(postal_code):\n is_code_valid = False\n postcode_regex = re.compile(r'^\\d{2}-\\d{3}$')\n\n if postcode_regex.search(postal_code) is not None:\n is_code_valid = True\n\n return is_code_valid", "def find_zip_code(x):\n i = 0\n j = 4\n for i in range(1,len(x)-6):\n string = x[i-1:i+6]\n cond = (string[1:-1].isnumeric(), not string[0].isnumeric(), not string[-1].isnumeric())\n if all(cond):\n return x[i:i+5]", "def is_valid_postal_code(postal_code):\n assert postal_code is not None\n postal_code = postal_code.replace(\" \", \"\")\n postal_code_re = re.compile(r\"\\s*(\\w\\d\\s*){3}\\s*\")\n return postal_code_re.match(postal_code) is not None", "def valid_zip_sum(line):\n zipcode = line.o_zip_code\n if not sum(int(x) for x in zipcode) <= 20:\n rule = 'Zipcode sum'\n new_row = Error(e_name=rule, order_key=line.primary_key)\n line.errors.append(new_row)\n return False\n return True", "def test_empty_string_returned_if_no_plan_areas_exist(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': []}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def is_valid(postal_code):\n return bool(re.match(UK_POST_CODE_REGEX, postal_code, re.VERBOSE)) if postal_code else False", "def replace_zip_code(zip_code):\r\n if len(zip_code)>5:\r\n return zip_code[0:5]\r\n else:\r\n return zip_code", "def test_rate_always_formatted_to_two_decimal_places(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': [('NY', '5')]}\n cleaned_plan_data_inputs = [\n {('NY', '5'): ['294.24', '294']},\n {('NY', '5'): ['294.24', '294.7']},\n {('NY', '5'): ['294.24', '294.3452']},\n {('NY', '5'): ['294.24', '294.24']}\n ]\n\n # NOTE: Formatting a decimal.Decimal value will result in rounding.\n expected_results = ['294.00', '294.70', '294.35', '294.24']\n\n for i, cleaned_plan_data_input in enumerate(cleaned_plan_data_inputs):\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected_results[i], slcsp_rate)", "def test_empty_string_returned_if_no_plans_are_found(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': [('WI', '9')]}\n cleaned_plan_data_input = {('WI', '9'): []}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def compare_zipcodes(s1, s2):\n\n # check if the zipcode are identical (return 1 or 0)\n sim = (s1 == s2).astype(float)\n\n # check the first 2 numbers of the distinct comparisons\n sim[(sim == 0) & (s1.str[0:2] == s2.str[0:2])] = 0.5\n\n return sim", "def test_empty_string_returned_if_too_many_plan_areas_exist(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': [('WI', '9'), ('NY', '5')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def checkPostalCode(self, code, country):\n if country == 'US':\n USZipCodeField().clean(code)", "def test_bad_airport(self):\n result = self.client.get(\"/search?origin=foo&destination=DFW%2C+Dallas+TX&date=2018-05-21\")\n self.assertNotIn('<meter', result.data)\n self.assertIn('enter a valid airport', result.data)", "def is_not_used(code):\n return 0 <= code <= 999", "def zipcode_validation(add):\r\n lng=get_address(add)[1]\r\n lat=get_address(add)[0]\r\n engine = get_sql_engine()\r\n query = text(\r\n \"\"\"\r\n SELECT\r\n code\r\n FROM philly_zipcode\r\n WHERE ST_Intersects(geom, ST_SetSRID(ST_MakePoint(:lng, :lat), 4326))\r\n \"\"\"\r\n )\r\n resp = engine.execute(query,lng=lng, lat=lat).fetchall()\r\n return resp", "def test_postal_code(self):\n self.assertIsInstance(self.address.postal_code, str)\n self.assertEqual(self.address.postal_code, \"75000\")", "def valid_zip(x):\r\n m_zip = [83,10286,10276,10268,10256,10249,10159,10150,10116,10113,10108,10101,10008,10282,10281,10280,10279,10278,10275,\r\n 10271,10270,10199,10178,10177,10176,10175,10174,10173,10172,10171,10170,10169,10168,10167,10166,10165,\r\n 10162,10161,10158,10155,10154,10153,10152,10151,10128,10123,10122,10121,10120,10119,10118,10115,10112,\r\n 10111,10110,10107,10106,10105,10104,10103,10099,10098,10095,10090,10069,10060,10055,10048,10047,10045,10044,\r\n 10041,10040,10039,10038,10037,10036,10035,10034,10033,10032,10031,10030,10029,10028,10027,10026,10025,\r\n 10024,10023,10022,10021,10020,10019,10018,10017,10016,10015,10014,10013,10012,10011,10010,10009,10007,\r\n 10006,10005,10004,10003,10002,10001,10065,10075,10080,\r\n 10285,10203,10178,10017,10178,10168,10167,10177,# supplementary\r\n 10175,10166,10171,10176,10174,10165,10170,10173,10169,10172,10019, 10105, 10097, 10104, 10107, 10103, 10106,\r\n 10022, 10055, 10155, 10152, 10153, 10151, 10154, 10001, 10120, 10119, 10118, 10123, 10122, 10121,\r\n 10005, 10081, 10286, 10260, 10271, 10259, 10043, 10270, 10265, 10203,10036, 10096, 10196, 10110\r\n ]\r\n brooklyn_zip = [11256,11252,11249,11243,11242,11241,11239,11238,11237,11236,11235,11234,11233,11232,11231,\r\n 11230,11229,11228,11226,11225,11224,11223,11222,11221,11220,11219,11218,11217,11216,11215,\r\n 11214,11213,11212,11211,11210,11209,11208,11207,11206,11205,11204,11203,11201]\r\n queens_zip = [11451,11436,11435,11434,11433,11432,11429,11428,11427,11426,\r\n 11423,11422,11421,11420,11419,11418,11417,11416,11415,11414,11413,11412,11411,11385,11379,\r\n 11378,11377,11375,11374,11373,11372,11369,11368,11367,11366,11365,11364,11363,\r\n 11362,11361,11360,11359,11358,11357,11356,11355,11354,11351,11109,11106,11105,11104,11103,\r\n 11102,11101,11004]\r\n if x in m_zip + brooklyn_zip + queens_zip:\r\n return 1\r\n else:\r\n return 0", "def validate_rating(self, key, value):\n assert value is None or value <= 10 and value >= 0\n return value", "def type_zip_code(self, zip_code):\n\n\t\twith allure.step(\"Type payee zip code\"):\n\t\t\telement = Element(driver=self.driver,\n\t\t\t explicit_wait_time=self.explicit_wait_time,\n\t\t\t locator=BillPayPageLocator.ZIP_CODE_INPUT)\n\t\t\telement.write(zip_code)\n\t\t\treturn None" ]
[ "0.69980687", "0.6726815", "0.6431161", "0.6381496", "0.60074365", "0.59789", "0.5864694", "0.5857582", "0.58574224", "0.57995015", "0.56933093", "0.5647003", "0.55855864", "0.5554181", "0.55517066", "0.554926", "0.5535344", "0.5522636", "0.5497078", "0.5466229", "0.54006547", "0.53977036", "0.53942317", "0.53901947", "0.5383247", "0.5309831", "0.5269886", "0.5267611", "0.5245358", "0.52374244" ]
0.72417444
0
Tests that an empty string is returned if no plan areas exist for a given zipcode.
def test_empty_string_returned_if_no_plan_areas_exist(self): zipcode = '11111' cleaned_zipcode_data_input = {'11111': []} cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']} expected = '' slcsp_rate = retrieve_slcsp_for_zipcode( zipcode, cleaned_zipcode_data_input, cleaned_plan_data_input ) self.assertEqual(expected, slcsp_rate)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_empty_string_returned_if_too_many_plan_areas_exist(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': [('WI', '9'), ('NY', '5')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def test_empty_string_returned_if_no_plans_are_found(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': [('WI', '9')]}\n cleaned_plan_data_input = {('WI', '9'): []}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def test_empty_string_returned_if_too_few_plans_are_found(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': [('WI', '9')]}\n cleaned_plan_data_input = {('WI', '9'): []}\n cleaned_plan_data_input = {('WI', '9'): ['324.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def test_no_rate_found_is_empty_string(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'22222': [('NH', '12')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def test_clean_zipcode_data_is_unique(self):\n\n input = {\n '11111': [('NY', '5')],\n '22222': [('WI', '2')],\n '33333': [('WI', '2'), ('NY', '5')],\n '44444': [('WI', '2'), ('WI', '2')],\n '55555': [('WI', '2'), ('WI', '2'), ('NY', '5')],\n '66666': [('WI', '2'), ('WI', '2'), ('NY', '5'), ('NY', '5')],\n '77777': [\n ('WI', '2'),\n ('WI', '2'),\n ('NY', '5'),\n ('NY', '5'),\n ('CA', '7')\n ]\n }\n\n expected = {\n '11111': [('NY', '5')],\n '22222': [('WI', '2')],\n '33333': [('WI', '2'), ('NY', '5')],\n '44444': [('WI', '2')],\n '55555': [('WI', '2'), ('NY', '5')],\n '66666': [('WI', '2'), ('NY', '5')],\n '77777': [('WI', '2'), ('NY', '5'), ('CA', '7')]\n }\n\n cleaned_rate_areas = clean_zipcode_rate_areas(input)\n\n # Compare each set of rate areas for every zipcode; sort the values to\n # make sure we're comparing the data correctly.\n for zipcode, rate_areas in cleaned_rate_areas.items():\n self.assertEqual(sorted(rate_areas), sorted(expected[zipcode]))", "def is_valid_postal_code(postal_code):\n assert postal_code is not None\n postal_code = postal_code.replace(\" \", \"\")\n postal_code_re = re.compile(r\"\\s*(\\w\\d\\s*){3}\\s*\")\n return postal_code_re.match(postal_code) is not None", "def compute_zip_code(zip_code_text):\n zip_code = None\n if zip_code_text and len(zip_code_text) >= 5 and zip_code_text.isdigit():\n zip_code = zip_code_text[:5]\n return zip_code", "def CheckZipCode(zipcode):\n # see if there are enough digits\n if (len(zipcode) >= 5):\n # check if numerical\n try:\n int(zipcode)\n return True\n except:\n return False\n else:\n return False", "def test_incomplete_polygons():\n assert not query_row(db_conf, 'osm_landusages', 30004)\n assert not query_row(db_conf, 'osm_landusages', 30006)", "def county_name(zipcode): \n search = SearchEngine(simple_zipcode=True) # set simple_zipcode=False to use rich info database\n zipcode_query = search.by_zipcode(str(zipcode))\n zipcode_query_dict = zipcode_query.to_dict()\n county = zipcode_query_dict['county']\n if county is None:\n print('Invalid County')\n else :\n if 'County' in county:\n county = county[:-7]\n if county in county_list:\n print('County is County List')\n print(county)\n return county", "def test_for_empty_list(self):\n emptylist = []\n self.assertEqual(self.place.amenity_ids, emptylist)", "def valid_zipcode(line):\n zipcode = line.o_zip_code\n invalid_zip = len(zipcode) not in [5, 9] and zipcode.isdigit()\n if invalid_zip:\n rule = 'Zipcode length'\n new_row = Error(e_name=rule, order_key=line.primary_key)\n line.errors.append(new_row)\n return False\n return True", "def test_portalBlank(self):\n streets = (\"6:00 Portal\", \"\")\n for front, cross in (streets, reversed(streets)):\n location = parseLocation(\n \"Burning Man Department\",\n \"Fire Conclave Convergence\",\n \"6:00 Portal\",\n front, cross,\n \"133 x 80\"\n )\n self.assertEquals(\n location,\n Location(\n name=\"Fire Conclave Convergence\",\n address=RodGarettAddress(\n concentric=None, radialHour=6, radialMinute=0,\n description=(\n \"6:00 Portal, Burning Man Department 133x80\"\n ),\n ),\n )\n )", "def find_zip_codes(self, zip_code):\n zip_code = str(zip_code).strip()\n cursor = self.households.find({\"addresses.zip_code\":zip_code})\n results = [Household.from_dict(dct) for dct in cursor]\n\n cursor = self.businesses.find({\"address.zip_code\":zip_code})\n results += [Business.from_dict(dct) for dct in cursor]\n\n return results", "def main(postalcode):\n places = postalcodes_mexico.places(postalcode)\n click.echo(places)\n return 0", "def test_areaid(self):\n self.assertTrue(\n int(self.ospf.parse_state(\n pattern='areaid',\n cmd_key='sh_ospf_ints')) == 0, 'OSPF Interface: area ID not found')", "def valid_zip_sum(line):\n zipcode = line.o_zip_code\n if not sum(int(x) for x in zipcode) <= 20:\n rule = 'Zipcode sum'\n new_row = Error(e_name=rule, order_key=line.primary_key)\n line.errors.append(new_row)\n return False\n return True", "def test_can_lookup_postcode(self):\n postcode_to_lookup = \"SW1A 1AA\"\n os_places_key = self.app.config.get(\"OS_PLACES_API_KEY\")\n addresses = AddressLookup(key=os_places_key).by_postcode(postcode_to_lookup)\n self.assertGreater(len(addresses), 0)\n result_postcode = addresses[0].get(\"DPA\", {}).get(\"POSTCODE\")\n self.assertEqual(result_postcode, postcode_to_lookup)", "def get_city_by_code(post_code):\n post_code = post_code.replace(' ', '').encode('utf-8')\n error = ''\n city = ''\n opener = urllib2.build_opener()\n url = 'http://maps.googleapis.com/maps/api/geocode/json?address={0}&sensor=false'.format(post_code)\n response = opener.open(url).read()\n response_dict = json.loads(response)\n request_status = response_dict['status']\n if request_status == 'OK':\n logger.debug('Google response')\n logger.debug(response_dict)\n results = response_dict['results']\n \"\"\"\n first get all results\n with required zip code\n \"\"\"\n results_with_required_zip_code = []\n for result in results:\n address_components = result['address_components']\n for address_component in address_components:\n types = address_component['types']\n for t in types:\n if t == 'postal_code' and address_component['short_name'].replace(' ', '').lower() == post_code.lower():\n results_with_required_zip_code.append(result)\n if not results_with_required_zip_code:\n error = {\n 'status': '8',\n 'message': POST_CODE_DOES_NOT_EXISTS,\n 'title': POST_CODE_DOES_NOT_EXISTS_TITLE\n }\n # error = 'No location with post code %s' % post_code\n else:\n \"\"\"\n next we need all results in GB\n \"\"\"\n results_with_required_zip_code_in_GB = ''\n for good_result in results_with_required_zip_code:\n address_components = good_result['address_components']\n for address_component in address_components:\n types = address_component['types']\n for t in types:\n if t == 'country' and address_component['short_name'].lower() == 'GB'.lower():\n results_with_required_zip_code_in_GB = good_result\n if not results_with_required_zip_code_in_GB:\n error = {\n 'status': '7',\n 'message': POST_CODE_DOES_NOT_EXISTS_IN_GB,\n 'title': POST_CODE_DOES_NOT_EXISTS_IN_GB_TITLE\n }\n # error = 'No city with post code %s in GB' % post_code\n else:\n \"\"\"\n finally find city name\n \"\"\"\n address_components = results_with_required_zip_code_in_GB['address_components']\n # first try get postal city\n searching_city = get_city_by_key(address_components, 'postal_town')\n if not searching_city:\n # next by administrative_area_level_2\n searching_city = get_city_by_key(address_components, 'administrative_area_level_2')\n if not searching_city:\n print url\n error = {\n 'status': '7',\n 'message': POST_CODE_DOES_NOT_EXISTS_IN_GB,\n 'title': POST_CODE_DOES_NOT_EXISTS_IN_GB_TITLE\n }\n # error = 'No city with post code %s in GB' % post_code\n else:\n city = searching_city\n elif request_status == 'ZERO_RESULTS':\n error = {\n 'status': '8',\n 'message': POST_CODE_DOES_NOT_EXISTS,\n 'title': POST_CODE_DOES_NOT_EXISTS_TITLE\n }\n else:\n error = request_status\n return {\n 'error': error,\n 'data': city\n }", "def validateAirport(self, code):\n print(code)\n if code in self.travel_db.airports:\n return True\n else:\n return False", "def zipcode_validation(add):\r\n lng=get_address(add)[1]\r\n lat=get_address(add)[0]\r\n engine = get_sql_engine()\r\n query = text(\r\n \"\"\"\r\n SELECT\r\n code\r\n FROM philly_zipcode\r\n WHERE ST_Intersects(geom, ST_SetSRID(ST_MakePoint(:lng, :lat), 4326))\r\n \"\"\"\r\n )\r\n resp = engine.execute(query,lng=lng, lat=lat).fetchall()\r\n return resp", "def test_geography_area(self):\n # SELECT ST_Area(poly) FROM geogapp_zipcode WHERE code='77002';\n z = Zipcode.objects.annotate(area=Area(\"poly\")).get(code=\"77002\")\n # Round to the nearest thousand as possible values (depending on\n # the database and geolib) include 5439084, 5439100, 5439101.\n rounded_value = z.area.sq_m\n rounded_value -= z.area.sq_m % 1000\n self.assertEqual(rounded_value, 5439000)", "def isEmptyLandmarkset(self):\n return self.subsetpointcloud is None", "def plans():\n results = []\n if 'qry' in request.args:\n look_for = request.args['qry']\n if look_for[0] == '*':\n look_for = ''\n zipcode = request.args['zipcode']\n\n try:\n plan = request.args['plan']\n except KeyError:\n return None\n\n # If this is a medicaid or private plan\n where = tools.get_location(zipcode)\n if where:\n if plan in ('medicaid', 'private'):\n state = where.STATE\n results = PlanNames.by_state(state, look_for, plan=='medicaid')\n results = [r.plan_name for r in results]\n if state == 'OH':\n results.append('OH State Medicaid')\n elif plan == 'medicare':\n county_code = where.GEO.COUNTY_CODE\n ma_region = where.GEO.MA_REGION_CODE\n pdp_region = where.GEO.PDP_REGION_CODE\n results = Plans.find_in_county(county_code, ma_region, pdp_region, look_for)\n\n return jsonify(sorted(results))", "def test_0_return(self):\n plan = astar(self.mapp_1_s,\n lambda s : s == self.mapp_1_g,\n MAPPDistanceSum(self.mapp_1_g))\n self.assertIsNotNone(plan, \"Have you forgotten the return statement?\")", "def test_zipcode_is_successfully_mapped(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': [('NY', '5')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = '294.87'\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def test_only_five_digit_zipcodes_match(self):\n\n incorrect_zipcodes = ['1', 'abcdef', '123ab', '12345-6789', 'abc-def']\n non_string_zipcodes = [1, [123, 143], {'test': '123'}, 344.234, True]\n cleaned_zipcode_data_input = {'11111': [('NY', '5')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n for incorrect_zipcode in incorrect_zipcodes:\n slcsp_rate = retrieve_slcsp_for_zipcode(\n incorrect_zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)\n\n for non_string_zipcode in non_string_zipcodes:\n slcsp_rate = retrieve_slcsp_for_zipcode(\n non_string_zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def test_get_contracts_addresses_empty():\n addresses = ContractHandler.get_contracts_addresses(_NETWORK, address_file=None)\n assert addresses is None", "def find_in_county(cls, county_code, ma_region, pdp_region, name='*'):\n flter = or_(cls.COUNTY_CODE == county_code,\n cls.MA_REGION_CODE == ma_region,\n cls.PDP_REGION_CODE == pdp_region\n )\n if not name == '*':\n look_for = f\"{name.lower()}%\"\n flter = and_(flter, cls.PLAN_NAME.ilike(look_for))\n\n qry = cls.session.query(Plans.PLAN_NAME).filter(flter).distinct(cls.PLAN_NAME).all()\n results = [r.PLAN_NAME for r in qry]\n return results", "def test_generalized_banana_polygon_is_valid():\n park = query_row(db_conf, 'osm_landusages', 7101)\n # geometry is not valid\n assert not park['geometry'].is_valid, park\n park = query_row(db_conf, 'osm_landusages_gen0', 7101)\n # but simplified geometies are valid\n assert park['geometry'].is_valid, park\n park = query_row(db_conf, 'osm_landusages_gen1', 7101)\n assert park['geometry'].is_valid, park" ]
[ "0.70404387", "0.60167426", "0.5886546", "0.57227695", "0.5487004", "0.51963043", "0.51898575", "0.518254", "0.51716274", "0.51694894", "0.51153564", "0.5086501", "0.50733477", "0.50481135", "0.50416213", "0.501723", "0.49452177", "0.49212265", "0.49157584", "0.4908836", "0.48739028", "0.48685893", "0.48619026", "0.48304692", "0.48248684", "0.48019364", "0.4788168", "0.47555834", "0.4748873", "0.47473192" ]
0.7164315
0
Tests that an empty string is returned if more than one plan area exists for a given zipcode.
def test_empty_string_returned_if_too_many_plan_areas_exist(self): zipcode = '11111' cleaned_zipcode_data_input = {'11111': [('WI', '9'), ('NY', '5')]} cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']} expected = '' slcsp_rate = retrieve_slcsp_for_zipcode( zipcode, cleaned_zipcode_data_input, cleaned_plan_data_input ) self.assertEqual(expected, slcsp_rate)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_empty_string_returned_if_no_plan_areas_exist(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': []}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def test_clean_zipcode_data_is_unique(self):\n\n input = {\n '11111': [('NY', '5')],\n '22222': [('WI', '2')],\n '33333': [('WI', '2'), ('NY', '5')],\n '44444': [('WI', '2'), ('WI', '2')],\n '55555': [('WI', '2'), ('WI', '2'), ('NY', '5')],\n '66666': [('WI', '2'), ('WI', '2'), ('NY', '5'), ('NY', '5')],\n '77777': [\n ('WI', '2'),\n ('WI', '2'),\n ('NY', '5'),\n ('NY', '5'),\n ('CA', '7')\n ]\n }\n\n expected = {\n '11111': [('NY', '5')],\n '22222': [('WI', '2')],\n '33333': [('WI', '2'), ('NY', '5')],\n '44444': [('WI', '2')],\n '55555': [('WI', '2'), ('NY', '5')],\n '66666': [('WI', '2'), ('NY', '5')],\n '77777': [('WI', '2'), ('NY', '5'), ('CA', '7')]\n }\n\n cleaned_rate_areas = clean_zipcode_rate_areas(input)\n\n # Compare each set of rate areas for every zipcode; sort the values to\n # make sure we're comparing the data correctly.\n for zipcode, rate_areas in cleaned_rate_areas.items():\n self.assertEqual(sorted(rate_areas), sorted(expected[zipcode]))", "def test_empty_string_returned_if_too_few_plans_are_found(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': [('WI', '9')]}\n cleaned_plan_data_input = {('WI', '9'): []}\n cleaned_plan_data_input = {('WI', '9'): ['324.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def test_empty_string_returned_if_no_plans_are_found(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': [('WI', '9')]}\n cleaned_plan_data_input = {('WI', '9'): []}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def county_name(zipcode): \n search = SearchEngine(simple_zipcode=True) # set simple_zipcode=False to use rich info database\n zipcode_query = search.by_zipcode(str(zipcode))\n zipcode_query_dict = zipcode_query.to_dict()\n county = zipcode_query_dict['county']\n if county is None:\n print('Invalid County')\n else :\n if 'County' in county:\n county = county[:-7]\n if county in county_list:\n print('County is County List')\n print(county)\n return county", "def is_valid_postal_code(postal_code):\n assert postal_code is not None\n postal_code = postal_code.replace(\" \", \"\")\n postal_code_re = re.compile(r\"\\s*(\\w\\d\\s*){3}\\s*\")\n return postal_code_re.match(postal_code) is not None", "def CheckZipCode(zipcode):\n # see if there are enough digits\n if (len(zipcode) >= 5):\n # check if numerical\n try:\n int(zipcode)\n return True\n except:\n return False\n else:\n return False", "def valid_zipcode(line):\n zipcode = line.o_zip_code\n invalid_zip = len(zipcode) not in [5, 9] and zipcode.isdigit()\n if invalid_zip:\n rule = 'Zipcode length'\n new_row = Error(e_name=rule, order_key=line.primary_key)\n line.errors.append(new_row)\n return False\n return True", "def test_no_rate_found_is_empty_string(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'22222': [('NH', '12')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def find_zip_codes(self, zip_code):\n zip_code = str(zip_code).strip()\n cursor = self.households.find({\"addresses.zip_code\":zip_code})\n results = [Household.from_dict(dct) for dct in cursor]\n\n cursor = self.businesses.find({\"address.zip_code\":zip_code})\n results += [Business.from_dict(dct) for dct in cursor]\n\n return results", "def match_city(self, city, dpt_code, zip_code = None):\n city = format_str_city_insee(city)\n dpt_code = dpt_code.rjust(2, '0')\n if zip_code:\n zip_code.rjust(5, '0')\n # Based on zip code and city name\n ls_matching = []\n found_indicator = False\n if zip_code:\n if zip_code in self.dict_corr_zip_insee:\n for city_insee, zip_insee, dpt_insee, code_insee in self.dict_corr_zip_insee[zip_code]:\n if city == city_insee:\n ls_matching.append((city_insee, zip_insee, code_insee))\n found_indicator = True\n if found_indicator:\n return (ls_matching, 'zip_city_match')\n # If no exact zip, city match: check if city name in insee city names\n for city_insee, zip_insee, dpt_insee, code_insee in self.dict_corr_zip_insee[zip_code]:\n if city in city_insee:\n ls_matching.append((city_insee, zip_insee, code_insee))\n found_indicator = True\n if found_indicator:\n return (ls_matching, 'zip_city_in_match(es)')\n # Based on dpt code and city name\n for city_insee, zip_insee, dpt_insee, code_insee in self.dict_corr_dpt_insee[dpt_code]:\n if city == city_insee:\n ls_matching.append((city_insee, zip_insee, code_insee))\n found_indicator = True\n if found_indicator:\n return (ls_matching, 'dpt_city_match')\n # If no exact dpt, city match: check if city name in insee city names\n for city_insee, zip_insee, dpt_insee, code_insee in self.dict_corr_dpt_insee[dpt_code]:\n if city in city_insee:\n ls_matching.append((city_insee, zip_insee, code_insee))\n found_indicator = True\n if found_indicator:\n return (ls_matching, 'dpt_city_in_match(es)')\n # No match\n return (None, 'no_match')", "def valid_zip_sum(line):\n zipcode = line.o_zip_code\n if not sum(int(x) for x in zipcode) <= 20:\n rule = 'Zipcode sum'\n new_row = Error(e_name=rule, order_key=line.primary_key)\n line.errors.append(new_row)\n return False\n return True", "def test_only_five_digit_zipcodes_match(self):\n\n incorrect_zipcodes = ['1', 'abcdef', '123ab', '12345-6789', 'abc-def']\n non_string_zipcodes = [1, [123, 143], {'test': '123'}, 344.234, True]\n cleaned_zipcode_data_input = {'11111': [('NY', '5')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n for incorrect_zipcode in incorrect_zipcodes:\n slcsp_rate = retrieve_slcsp_for_zipcode(\n incorrect_zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)\n\n for non_string_zipcode in non_string_zipcodes:\n slcsp_rate = retrieve_slcsp_for_zipcode(\n non_string_zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def zipcode_validation(add):\r\n lng=get_address(add)[1]\r\n lat=get_address(add)[0]\r\n engine = get_sql_engine()\r\n query = text(\r\n \"\"\"\r\n SELECT\r\n code\r\n FROM philly_zipcode\r\n WHERE ST_Intersects(geom, ST_SetSRID(ST_MakePoint(:lng, :lat), 4326))\r\n \"\"\"\r\n )\r\n resp = engine.execute(query,lng=lng, lat=lat).fetchall()\r\n return resp", "def test_portalBlank(self):\n streets = (\"6:00 Portal\", \"\")\n for front, cross in (streets, reversed(streets)):\n location = parseLocation(\n \"Burning Man Department\",\n \"Fire Conclave Convergence\",\n \"6:00 Portal\",\n front, cross,\n \"133 x 80\"\n )\n self.assertEquals(\n location,\n Location(\n name=\"Fire Conclave Convergence\",\n address=RodGarettAddress(\n concentric=None, radialHour=6, radialMinute=0,\n description=(\n \"6:00 Portal, Burning Man Department 133x80\"\n ),\n ),\n )\n )", "def validateAirport(self, code):\n print(code)\n if code in self.travel_db.airports:\n return True\n else:\n return False", "def test_incomplete_polygons():\n assert not query_row(db_conf, 'osm_landusages', 30004)\n assert not query_row(db_conf, 'osm_landusages', 30006)", "def searchZipcode(zipcode, jurisdictions):\n try:\n if len(str(zipcode)) != 5:\n return jurisdictions.none()\n\n zipcode = Zipcode.objects.get(code=zipcode)\n j = jurisdictions.filter(geometry__intersects=zipcode.geometry)\n return j\n except Exception as e:\n print(e)\n return jurisdictions.none()", "def get_city_by_code(post_code):\n post_code = post_code.replace(' ', '').encode('utf-8')\n error = ''\n city = ''\n opener = urllib2.build_opener()\n url = 'http://maps.googleapis.com/maps/api/geocode/json?address={0}&sensor=false'.format(post_code)\n response = opener.open(url).read()\n response_dict = json.loads(response)\n request_status = response_dict['status']\n if request_status == 'OK':\n logger.debug('Google response')\n logger.debug(response_dict)\n results = response_dict['results']\n \"\"\"\n first get all results\n with required zip code\n \"\"\"\n results_with_required_zip_code = []\n for result in results:\n address_components = result['address_components']\n for address_component in address_components:\n types = address_component['types']\n for t in types:\n if t == 'postal_code' and address_component['short_name'].replace(' ', '').lower() == post_code.lower():\n results_with_required_zip_code.append(result)\n if not results_with_required_zip_code:\n error = {\n 'status': '8',\n 'message': POST_CODE_DOES_NOT_EXISTS,\n 'title': POST_CODE_DOES_NOT_EXISTS_TITLE\n }\n # error = 'No location with post code %s' % post_code\n else:\n \"\"\"\n next we need all results in GB\n \"\"\"\n results_with_required_zip_code_in_GB = ''\n for good_result in results_with_required_zip_code:\n address_components = good_result['address_components']\n for address_component in address_components:\n types = address_component['types']\n for t in types:\n if t == 'country' and address_component['short_name'].lower() == 'GB'.lower():\n results_with_required_zip_code_in_GB = good_result\n if not results_with_required_zip_code_in_GB:\n error = {\n 'status': '7',\n 'message': POST_CODE_DOES_NOT_EXISTS_IN_GB,\n 'title': POST_CODE_DOES_NOT_EXISTS_IN_GB_TITLE\n }\n # error = 'No city with post code %s in GB' % post_code\n else:\n \"\"\"\n finally find city name\n \"\"\"\n address_components = results_with_required_zip_code_in_GB['address_components']\n # first try get postal city\n searching_city = get_city_by_key(address_components, 'postal_town')\n if not searching_city:\n # next by administrative_area_level_2\n searching_city = get_city_by_key(address_components, 'administrative_area_level_2')\n if not searching_city:\n print url\n error = {\n 'status': '7',\n 'message': POST_CODE_DOES_NOT_EXISTS_IN_GB,\n 'title': POST_CODE_DOES_NOT_EXISTS_IN_GB_TITLE\n }\n # error = 'No city with post code %s in GB' % post_code\n else:\n city = searching_city\n elif request_status == 'ZERO_RESULTS':\n error = {\n 'status': '8',\n 'message': POST_CODE_DOES_NOT_EXISTS,\n 'title': POST_CODE_DOES_NOT_EXISTS_TITLE\n }\n else:\n error = request_status\n return {\n 'error': error,\n 'data': city\n }", "def find_places(query):\n parts = str(query).split(' ')\n for i, p in enumerate(parts):\n p = p.replace('-', ' ').strip()\n try:\n postal_code = int(p)\n if len(postal_code) == 4:\n print(postal_code, parts[i+1])\n # Check \n #response = get_osm_location(\"{postal_code} {name}\")\n #lon = response['lon']\n #lat = response['lat']\n #poly = \n except Exception as e:\n continue", "def test_areaid(self):\n self.assertTrue(\n int(self.ospf.parse_state(\n pattern='areaid',\n cmd_key='sh_ospf_ints')) == 0, 'OSPF Interface: area ID not found')", "def correct_zipcode(business_tag, zipcode):\n try:\n address = business_tag.find('div', {'class': 'secondary-attributes'}).find('address').text\n zipcode_found = re.search(re.compile('(^|[^\\d])\\d{5}($|[^\\d])'), address).group(0)\n zipcode_found = re.search(re.compile('\\d{5}'), zipcode_found).group(0)\n return zipcode_found == zipcode\n except:\n return False", "def validate_area(area: str) -> bool:\n area_pattern_is_correct = re.fullmatch(\n '^{}$'.format(AREA_REGEX),\n str(area)\n )\n\n if area_pattern_is_correct:\n return True\n\n raise exceptions.InvalidAreaValueError(\n 'Area should be 1 or 2 alphabetic characters'\n )", "def compute_zip_code(zip_code_text):\n zip_code = None\n if zip_code_text and len(zip_code_text) >= 5 and zip_code_text.isdigit():\n zip_code = zip_code_text[:5]\n return zip_code", "def _build_area(db, place):\n location = get_main_location(db, place)\n street = location.get(PlaceType.STREET)\n city = location.get(PlaceType.CITY)\n # Build a title description string that will work for Eniro\n area_descr = \"\"\n if street:\n area_descr += street.strip() \n if city:\n area_descr += ', ' + city \n return _strip_leading_comma(area_descr)", "def purchase_number_in_same_area_code(phone_number):\r\n unpurchased_numbers = search_by_area_code(phone_number)\r\n for i in range(4):\r\n first_number = unpurchased_numbers[i]\r\n try:\r\n pn = _purchase(first_number)\r\n return pn\r\n except:\r\n continue", "def main(postalcode):\n places = postalcodes_mexico.places(postalcode)\n click.echo(places)\n return 0", "def test_for_empty_list(self):\n emptylist = []\n self.assertEqual(self.place.amenity_ids, emptylist)", "def test_zipcode_is_successfully_mapped(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': [('NY', '5')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = '294.87'\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)", "def find_in_county(cls, county_code, ma_region, pdp_region, name='*'):\n flter = or_(cls.COUNTY_CODE == county_code,\n cls.MA_REGION_CODE == ma_region,\n cls.PDP_REGION_CODE == pdp_region\n )\n if not name == '*':\n look_for = f\"{name.lower()}%\"\n flter = and_(flter, cls.PLAN_NAME.ilike(look_for))\n\n qry = cls.session.query(Plans.PLAN_NAME).filter(flter).distinct(cls.PLAN_NAME).all()\n results = [r.PLAN_NAME for r in qry]\n return results" ]
[ "0.6895567", "0.58014107", "0.5796688", "0.5711699", "0.5480371", "0.5380354", "0.53440535", "0.53259677", "0.52647734", "0.5237509", "0.52233595", "0.5210759", "0.51686174", "0.5094478", "0.5078834", "0.507732", "0.50009465", "0.4994607", "0.49879083", "0.49855915", "0.4953211", "0.49434215", "0.4929742", "0.49284565", "0.48997727", "0.4897833", "0.48822924", "0.48816097", "0.48782095", "0.4875935" ]
0.7240199
0
Tests that if a rate can be returned, it is always formatted to two decimal places.
def test_rate_always_formatted_to_two_decimal_places(self): zipcode = '11111' cleaned_zipcode_data_input = {'11111': [('NY', '5')]} cleaned_plan_data_inputs = [ {('NY', '5'): ['294.24', '294']}, {('NY', '5'): ['294.24', '294.7']}, {('NY', '5'): ['294.24', '294.3452']}, {('NY', '5'): ['294.24', '294.24']} ] # NOTE: Formatting a decimal.Decimal value will result in rounding. expected_results = ['294.00', '294.70', '294.35', '294.24'] for i, cleaned_plan_data_input in enumerate(cleaned_plan_data_inputs): slcsp_rate = retrieve_slcsp_for_zipcode( zipcode, cleaned_zipcode_data_input, cleaned_plan_data_input ) self.assertEqual(expected_results[i], slcsp_rate)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_rating(instance, args):\r\n raw_rating = instance.rating(args)\r\n # Do string conversion here\r\n if not raw_rating:\r\n str_rating = 'N/A'\r\n else:\r\n str_rating = \"{0:.2f}\".format(raw_rating)\r\n return str_rating", "def _exchange_amount(amount, rate):\n return '%.2f' % round(float(amount) * float(rate), 2)", "def format_result(wf, converted_rate, decimal_places=4):\n fmt_val = locale.format('%%.%if' % decimal_places, converted_rate, True, True)\n\n # User divisor\n divisor = wf.settings.get(SETTINGS_DEFAULT_NUMBER_DIVISOR, '.')\n\n try:\n locale_divisor = locale.localeconv().get('decimal_point')\n except:\n # Numero de casas decimais pra pegar o divisor\n locale_divisor = fmt_val[-decimal_places]\n\n # when there are no decimal places, i don't format the number\n return fmt_val if decimal_places == 0 else fmt_number(fmt_val, divisor, locale_divisor)", "def isRate(self):\n return _libsbml.Rule_isRate(self)", "def test_rate_to_representation(self):\n rates = {\n \"tiered_rates\": self.ocp_data[\"rates\"][0],\n \"tag_rates\": {\n \"metric\": {\"name\": metric_constants.OCP_METRIC_CPU_CORE_USAGE_HOUR},\n \"tag_rates\": format_tag_rate(tag_values=[{\"value\": 1}]),\n },\n }\n for key, rate in rates.items():\n with tenant_context(self.tenant):\n serializer = RateSerializer(data=rate)\n RateSerializer._convert_to_decimal(rate)\n serializer.to_representation(rate)\n rate_info = rate.get(key)\n if isinstance(rate_info, dict):\n values = rate_info.get(\"tag_values\")\n else:\n values = rate_info\n for value in values:\n self.assertIsInstance(value[\"value\"], Decimal)", "def test_calculate_tst_rate_coefficient(self):\n self.assertEqual(\"%0.7f\" % self.kineticsjob.reaction.calculate_tst_rate_coefficient(self.TminValue),\n str(46608.5904933))\n self.assertEqual(\"%0.5f\" % self.kineticsjob.reaction.calculate_tst_rate_coefficient(self.Tmaxvalue),\n str(498796.64535))", "def two_digits_after_point(value):\n if isinstance(value, str):\n result = format(float(value), '.2f')\n return result\n elif isinstance(value, float):\n result = format(value, '.2f')\n return float(result)\n elif isinstance(value, int):\n value = float(value)\n print(value)\n result = format(value, '.2f')\n return float(result)\n else:\n raise Exception(\"type \" + str(type(value)) + \" not implemented yet\")", "def success_rate(self):\n success_rate_text = self.emulator.get_screen_text(ui_element=self.ui['ENHANCE_POTENTIAL_RATE'])\n success_rate = success_rate_text.replace(\"%\", \"\").replace(\" \", \"\")\n return float(success_rate)", "def test_default_w_decimals(self):\n self.assertEqual(currency(188.00), \"$188.00\")", "def test_show_rating(self):\n self.assertTrue(isinstance(self.show.rating, float))", "def normalize_interest_rate(value):\n if '%' in value:\n value = value.replace('%', '')\n\n try : \n return Decimal(value)/100\n except: InvalidOperation\n\n return None", "def getRate(self, context):\n try:\n return VTypeHelper.toDouble(context.getDevice(\"rate\").read())\n except:\n return 60.0", "def get_precision(self):\n ...", "def check_for_float(check):", "def test_rates_error_on_specifying_tiered_and_tag_rates(self):\n tag_values_kwargs = [{\"value\": 0.2}]\n tiered_rate = [{\"value\": 1.3, \"unit\": \"USD\"}]\n self.basic_model[\"rates\"][0][\"tag_rates\"] = format_tag_rate(tag_values=tag_values_kwargs)\n self.basic_model[\"rates\"][0][\"tiered_rates\"] = tiered_rate\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=self.basic_model, context=self.request_context)\n with self.assertRaises(serializers.ValidationError):\n self.assertFalse(serializer.is_valid(raise_exception=True))\n result_err_msg = serializer.errors[\"rates\"][0][\"non_field_errors\"][0]\n expected_err_msg = \"Set either 'tiered_rates' or 'tag_rates' but not both\"\n self.assertEqual(result_err_msg, expected_err_msg)", "def fidelity(order: Order) -> Decimal:\n if order.customer.fidelity >= 1000:\n return order.total() * Decimal('0.05')\n return Decimal(0)", "def test_no_decimals_01(self):\n self.assertEqual(currency(188.01, False), \"$188.01\")", "def sensible_format_data(self, value):\n if abs(value) > 1e4 or abs(value)<1e-3:\n s = '%1.4e' % value\n return self._formatSciNotation(s)\n else:\n return '%4.3f' % value", "def round_of_rating(number):\n return round(number * 2) / 2", "def test_error_on_rate_type(self):\n self.ocp_data[\"rates\"][0].pop(\"tiered_rates\")\n self.ocp_data[\"rates\"][0][\"bad_rates\"] = []\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=self.ocp_data, context=self.request_context)\n with self.assertRaises(serializers.ValidationError):\n if serializer.is_valid(raise_exception=True):\n serializer.save()", "def test_format_throughput_for_two_inputs_which_will_result_in_a_fraction(self):\n actual = format_throughput(20, 7)\n self.assertEqual(actual, \"7/20 (35%)\")", "def _p2(x, prec=2):\n if isinstance(x, int):\n return \"%d\" % x\n elif isinstance(x, float):\n s = (\"%%.%df\" % prec % x).rstrip('0').rstrip('.').lstrip()\n if s == '':\n s = '0'\n return s\n else:\n return \"%s\" % x", "def rate_str(r):\n try:\n value = float(r)\n except Exception as e:\n raise argparse.ArgumentTypeError('%s' % e)\n\n # FIXME this is serious hard-coded kludge for now until we can make this more robust\n if value != 500.0:\n raise argparse.ArgumentTypeError('rate, r, in sa/sec must be 500.0 for now')\n\n return value", "def convertRate(row):\n if pd.isnull(row):\n return 1.0\n elif ':' in str(row):\n rows = row.split(':')\n return 1.0 - float(rows[1]) / float(rows[0])\n else:\n return float(row)", "def _price_precision(self) -> int:\n return selectors.get_exchange(self.exchange).vars['precisions'][self.symbol]['price_precision']", "def test_company_EU_GR_vies_tax(self):\n self.assertEqual(self.policy.get_tax_rate(\"123456\", \"GR\"), (24, False))", "def format_price(self, price):\n precision = self._price_limits[3] or 8\n tick_size = self._price_limits[2] or 0.00000001\n\n adjusted_price = truncate(round(price / tick_size) * tick_size, precision)\n formatted_price = \"{:0.0{}f}\".format(adjusted_price, precision)\n\n # remove tailing 0s and dot\n if '.' in formatted_price:\n formatted_price = formatted_price.rstrip('0').rstrip('.')\n\n return formatted_price", "def convertRate(row):\n if row == 'null':\n return 1.0\n elif ':' in row:\n rows = row.split(':')\n return 1.0 - float(rows[1])/float(rows[0])\n else:\n return float(row)", "def show_precision(self):\r\n return round(f1_score(self.actual, self.predicted),2)", "def get_sample_rate(rate_string):\n if rate_string.endswith(\"%\"):\n rate = float(rate_string[:-1])/100\n elif '/' in rate_string:\n x, y = rate_string.split('/')\n rate = Decimal(x) / (Decimal(y) * Decimal('1.0'))\n else:\n rate = float(rate_string)\n if rate < 0 or rate > 1:\n raise ValueError('rate %r (=%.3f) must be 1%% <= rate <= 100%% ' % (rate_string, rate))\n return int(rate * 1000)" ]
[ "0.635572", "0.62793225", "0.61580366", "0.59902906", "0.59895074", "0.59418416", "0.59058857", "0.5860149", "0.58415556", "0.57754457", "0.57288945", "0.57193255", "0.5702747", "0.5680467", "0.56775796", "0.56678134", "0.5606319", "0.56051075", "0.5598586", "0.5582338", "0.5581807", "0.55577916", "0.5538385", "0.5535924", "0.5526758", "0.5525523", "0.55077344", "0.5505049", "0.5474301", "0.54645777" ]
0.6863517
0
list all hangouts, supply keywords to filter by title
def hangouts(bot, event, *args): text_search = " ".join(args) lines = [] for convid, convdata in bot.conversations.get(filter="text:" + text_search).items(): lines.append("<b>{}</b>: <em>`{}`</em>".format(convdata["title"], convid)) lines.append(_('<b>Total: {}</b>').format(len(lines))) if text_search: lines.insert(0, _('<b>List of hangouts with keyword:</b> "<pre>{}</pre>"').format(text_search)) yield from bot.coro_send_message(event.conv, "<br />".join(lines))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hangouts(bot, event, *args):\n\n text_search = \" \".join(args)\n line = \"<b>List of hangouts with keyword:</b> \\\"{}\\\"<br />\".format(text_search)\n\n for conv in bot.list_conversations():\n conv_name = get_conv_name(conv)\n if text_search.lower() in conv_name.lower(): # For blank keywords, returns True\n line += \"<b>{}</b>: <i>{}</i><br />\".format(conv_name, conv.id_)\n\n bot.send_message_parsed(event.conv, line)", "async def hangmanlist(self, ctx):\n path = join('.', 'data', 'hangman')\n if not isdir(path):\n raise GamesError(\"No hangman folder detected in data folder.\")\n categories = [f[:-4] for f in listdir(path) if f.endswith('.txt')]\n\n embed = discord.Embed(title=\"Hangman Categories\",\n description='\\n'.join(categories), \n color=discord.Colour.blue())\n await ctx.send(embed=embed)", "def list(**kwargs):\n cluster_call(\"secret_list\", **kwargs)", "def track_keywords(k_list):\n keywords = k_list\n stream = streaming.stream(\n on_tweet=print_tweet, on_notification=print_notice, track=keywords)", "def ls(query):\n\n if (query != None):\n query = query.lower()\n\n # Search in commands return list of all matches\n matches = [x for x in commands if ( query in x['alias'].lower() or query in x['command'].lower() or query in x['description'].lower())]\n else:\n matches = commands\n\n grouped = groupCommands(matches)\n\n for group in grouped:\n if(len(group) > 0):\n echoGroup(group[0]['group'])\n for match in group:\n echoCommand(match, commands.index(match))\n click.echo(\" \")", "def headwords ():\n\n q = request.args.get ('q')\n fulltext = request.args.get ('fulltext')\n offset = int (arg ('offset', '0', re_integer_arg))\n limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)\n where = ''\n\n if (not q) and (not fulltext):\n # Retrieve full list of headwords\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n ORDER BY sortkeyword, n, no\n LIMIT :limit\n OFFSET :offset\n \"\"\", { 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)\n\n if q:\n q = q.replace ('-', '')\n q = q.replace ('%', '')\n q = q.replace ('?', '_')\n q = q.replace ('*', '%')\n where = \"(keyword LIKE :q) AND\"\n\n if not fulltext:\n # easy out\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n WHERE keyword LIKE :q\n ORDER BY sortkeyword, n, no\n LIMIT :limit\n OFFSET :offset\n \"\"\", { 'q' : q, 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)\n\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT DISTINCT\n k.id,\n k.webkeyword COLLATE utf8mb4_bin AS webkeyword,\n k.no\n FROM keyword k,\n article a\n WHERE {where} (MATCH (a.idxtext) AGAINST (:fulltext IN BOOLEAN MODE))\n AND a.no = k.no\n ORDER BY k.sortkeyword, k.n, k.no\n LIMIT :limit\n OFFSET :offset\n \"\"\".format (where = where), { 'q' : q, 'fulltext' : fulltext,\n 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)", "def filter_jobs(jobs, keyword):\n for job in jobs:\n if keyword == \"all\":\n yield job\n elif job[\"name\"].find(keyword) != -1:\n yield job", "async def list(self, ctx: MyContext):\n if ctx.subcommand_passed is None:\n await ctx.send_help(\"wormhole list\")", "async def list(self, ctx):\n\n cursor = await db.execute(\"Select MessageID, TimeEnding, Members, ChannelID from Giveaway \"\n \"where GuildID = ? and Ended = ?\", (ctx.guild.id, False))\n result = await cursor.fetchall()\n\n for i, tup in enumerate(result):\n try:\n msg = await ctx.guild.get_channel(tup[3]).fetch_message(tup[0])\n tup = list(tup)\n tup[0] = msg\n result[i] = tup\n except:\n result.remove(tup)\n await db.execute(\"Delete from Giveaway where MessageID = ?\", (tup[0],))\n await db.commit()\n\n if not result:\n return await send_embed(ctx, \"No active giveaways on this server.\", negative=True)\n\n embeds = []\n fields = []\n\n for i, tup in enumerate(result, start=1):\n fields.append((str(tup[0].id),\n f\"Prize: {tup[0].embeds[0].author.name}\\n\"\n f\"{tup[2]} possible winners\\n\"\n f\"Ends at {datetime.utcfromtimestamp(tup[1]).strftime('%Y-%m-%d %H:%M:%S')}\"))\n\n if i % 10 == 0 or i == len(result):\n embed = discord.Embed(\n colour=discord.Colour.blue(),\n title=\"Active Giveaways\"\n )\n\n for field in fields:\n embed.add_field(name=field[0], value=field[1], inline=False)\n\n embeds.append(embed)\n fields = []\n\n await self.bot.paginate(ctx, embeds)", "def FoodList(sc, event):\n count = 5\n after_command = event['text'][9:].lower()\n if re.match('^[1-9] ', after_command) or re.match('^[1-9]$', after_command):\n count = int(after_command[0])\n elif after_command and not re.match('^ ', after_command):\n sc.api_call('chat.postMessage', as_user='true', channel=event['channel'],\n text='Command needs to be !foodlist, !foodlist QUERY or '\n '!foodlistX QUERY where X is a number from 1-9')\n return\n text = 'The last %s things consumed:' % count\n search_term = event['text'][10:].lower().strip()\n db = pymysql.connect(host='localhost', user='pizzabot', db='pizzachat')\n cursor = db.cursor()\n if search_term:\n cursor.execute(FoodListQuery(search_term, count, 'date'),\n (\"%\" + search_term + \"%\"))\n else:\n cursor.execute(FoodListQuery(search_term, count, 'date'))\n foodlist = cursor.fetchall()\n db.close()\n sc.api_call('chat.postMessage', as_user='true', channel=event['channel'],\n text=text)\n for item in reversed(foodlist):\n sc.api_call('chat.postMessage', as_user='true', channel=event['channel'],\n text='On %s, %s had: %s' % item)", "def add_keywords(self, response: Response) -> list:\n return response.xpath(\"//ul[@class='term']/li/a/text()\").getall()", "def run_bot(bot):\n output = {key: None for key in subreddits}\n print(\"*\"*80)\n print(\" \"*10 + \"Running COVID-19 keyword mention scan for \" + str(datetime.date.today()))\n print(\"*\"*80+\"\\n\")\n print(\"-\"*80)\n for subreddit in subreddits:\n print(\"Scanning r/\" + subreddit + \"\\n\")\n count = 0\n current = bot.subreddit(subreddit)\n cutoff_time = datetime.date.today() - datetime.timedelta(1)\n cutoff_time = float(cutoff_time.strftime(\"%s\"))\n for submission in current.new():\n if submission.created_utc > cutoff_time:\n current_title = submission.title.lower()\n keyword_check = (\"coronavirus\" in current_title or\n \"covid\" in current_title or\n \"pandemic\" in current_title or\n \"quarantine\" in current_title)\n if keyword_check:\n count += 1\n print(submission.title + \"\\n\")\n output[subreddit] = count\n print(\"Total mentions of COVID-19 related keywords in r/\" + subreddit + \":\", count)\n print(\"-\"*80)\n write_output(output)", "def printHashtagsAndMentions(searchText=None, filterTerms=False, tweetLimit=0):\n tweets = db.Tweet.select()\n if searchText is not None:\n tweets = tweets.filter(db.Tweet.q.message.contains(searchText))\n tweets = tweets.limit(tweetLimit)\n\n hashtags, mentions, plain = getHashtagsAndMentions(tweets)\n\n if searchText and filterTerms:\n hashtags = Counter(\n {k: v for k, v in hashtags.items() if searchText.lower() in k.lower()}\n )\n mentions = Counter(\n {k: v for k, v in mentions.items() if searchText.lower() in k.lower()}\n )\n plain = Counter(\n {k: v for k, v in plain.items() if searchText.lower() in k.lower()}\n )\n\n # Unique word count for each area.\n hashtagWC = len(hashtags)\n mentionWC = len(mentions)\n plainWC = len(plain)\n\n print(\"Summary\")\n print(\"==============\")\n # Count items in the sliced selection since .count() does not work with\n # a limit.\n count = len(list(tweets)) if tweetLimit else tweets.count()\n print(\"{0:7,d} tweets\".format(count))\n print(\"{0:7,d} unique words\".format(hashtagWC + mentionWC + plainWC))\n print(\"{0:7,d} unique hashtags\".format(hashtagWC))\n print(\"{0:7,d} unique mentions\".format(mentionWC))\n print(\"{0:7,d} unique plain words\".format(plainWC))\n print()\n\n print(\"Hashtags\")\n print(\"========\")\n printCounterByCount(hashtags)\n print()\n\n print(\"Mentions\")\n print(\"========\")\n printCounterByCount(mentions)\n\n \"\"\"\n # Removal of stopwords and handling of URIs is needed to make this\n # useful.\n print 'Plain'\n print '========'\n printCounterByCount(plain)\n \"\"\"", "def shortsearch(term,location):\n results = search(term,location)['listings']\n result = []\n for business in results:\n result.append([business['id'],business['name'],\"Yellow Pages\"])\n return result", "async def banlist(self, ctx, *, username=None):\n bans = await ctx.guild.bans()\n list_of_matched_users = []\n for ban in bans:\n if username is None or username.lower() in ban.user.name.lower():\n list_of_matched_users.append(ban)\n\n entries = []\n for ban in list_of_matched_users:\n entries.append((f\"{ban.user.name}#{ban.user.discriminator}\", f\"<@!{ban.user.id}>: {ban.reason}\"))\n text_pages = paginator.FieldPages(ctx, entries=entries)\n await text_pages.paginate()", "def list_keywords(self, **kwargs) -> ApiResponse:\n return self._request(kwargs.pop('path'), params=kwargs)", "def main(appinfo, args):\n parser = optparse.OptionParser(\n usage='%prog list [OPTS] [--] [SEARCH..]',\n )\n parser.add_option(\n '-v', '--verbose',\n help='show more information',\n action='count',\n )\n parser.add_option(\n '--tag',\n help='only list tickets having this tag',\n action='append',\n )\n parser.add_option(\n '--order',\n help='sort listing according to criteria',\n )\n parser.add_option(\n '--hide',\n metavar='FIELD',\n help='hide field from listing',\n )\n parser.add_option(\n '--show',\n metavar='FIELD',\n help='show field in listing',\n )\n (options, args) = parser.parse_args(args)\n\n if args:\n raise NotImplementedError(\n 'TODO Full text search not supported yet.')\n\n def list_tickets():\n for (mode, type_, object, basename) in storage.git_ls_tree(\n path='',\n children=True,\n ):\n yield basename\n\n for ticket in list_tickets():\n number = storage.get(os.path.join(ticket, 'number'))\n if number is not None:\n number = number.rstrip()\n ident = '#%s' % number\n else:\n ident = ticket[:7]\n description = storage.get(os.path.join(ticket, 'description')).rstrip()\n tags = set(storage.ls(os.path.join(ticket, 'tags')))\n if options.tag:\n must = frozenset(options.tag)\n if not tags & must:\n continue\n tags = tagsort.human_friendly_tagsort(tags)\n if options.verbose:\n raise NotImplementedError\n if options.order:\n raise NotImplementedError\n if options.show:\n raise NotImplementedError\n if options.hide:\n raise NotImplementedError\n (title, description) = util.extract_title(description)\n print '%(ident)s\\t%(title)s' % dict(\n ident=ident,\n title=title,\n )\n if tags:\n print textwrap.fill(\n ' '.join(tags),\n initial_indent=' ',\n subsequent_indent=' ',\n break_long_words=False,\n )", "async def list(self, ctx):\n cyphon = discord.utils.get(ctx.message.server.members, id=\"186835826699665409\")\n\n if self.check_channel(ctx):\n if self.check_permission(ctx) or ctx.message.author == cyphon:\n message = []\n message.append(\"```\\n\")\n if self.check_channel(ctx):\n if self.check_permission(ctx) or ctx.message.author == cyphon:\n if len(self.twitch_streams) > 0:\n for stream in self.twitch_streams:\n message.append(stream[\"NAME\"] + \"\\n\")\n else:\n message.append(\"No streams found!\")\n message.append(\"```\")\n output = ''.join(message)\n await self.bot.say(output)\n else:\n await self.bot.send_message(ctx.message.author, \"You don't have permission to execute that command.\")", "def all_jokes(category):\n\n jokes = []\n\n if category is not None:\n c = Category.find_by(\"name\", category)\n\n if c:\n jokes = c.jokes()\n else:\n jokes = Joke.all()\n\n table = PrettyTable([\"Joke\", \"Read at\"])\n\n for joke in jokes:\n table.add_row([joke.value, joke.created_at])\n\n click.echo(table)", "def challenge_list(request, keywords, page_num, num_per_page):\n\t\n\tif page_num == None:\n\t\tpage_num = 1\n\telse:\n\t\ttry:\n\t\t\tpage_num = int(page_num)\n\t\texcept ValueError:\n\t\t\tpage_num = 1\n\t\t\n\tif num_per_page == None:\n\t\tnum_per_page = 20\n\telse:\n\t\ttry:\n\t\t\tnum_per_page = int(num_per_page)\n\t\texcept ValueError:\n\t\t\tnum_per_page = 20\n\t\n\tchallenges_list = None\n\t\n\tif(keywords != None):\n\t\tchallenges_list = Challenge.objects.filter(name__contains = keywords)\n\telse:\n\t\tchallenges_list = Challenge.objects.all()\n\t\t\n\tchallenges_list = challenges_list[(page_num-1) * num_per_page : (page_num) * num_per_page]\n\t\n\tcontext = RequestContext(request, {\"challenges_list\": challenges_list})\n\treturn render_to_response(\"encourage/challenge_list.html\", context)", "def search_command():\n listing.delete(0, END)\n for row in backend.search(title_text.get(), \n author_text.get(), \n year_text.get(), \n isbn_text.get()):\n listing.insert(END, row)", "def do_list(self, args):\n if args.option == 'config':\n print(list_config())\n if args.option == 'queries':\n for k,v in list_queries().items():\n print(k, \":\", json.dumps(v, indent=4))\n if args.option == 'jobs':\n update_jobs(CLI_GLOBALS.ENGAGEMENT)\n for k,v in list_jobs().items():\n print(k, \":\", json.dumps(v, indent=4))\n if args.option == 'results':\n for i in list_results():\n print(i)\n if args.option == 'key':\n for k,v in list_key().items():\n print(k, \":\", json.dumps(v, indent=4))\n if args.option == 'engagement':\n print(list_engagement())", "async def list(self, ctx):\r\n try:\r\n if ctx.message.server.id not in self.adkillr:\r\n await self.bot.say(\"There are no filters set for this server.\")\r\n else:\r\n await self.bot.say(\"The current filters are\\n{}.\".format(\", \".join(self.adkillr[ctx.message.server.id]['filters'])))\r\n except KeyError:\r\n await self.bot.say(\"There are no filters set for this server.\")", "def main():\n parser = argparse.ArgumentParser(\n description=\"\"\"Print the unique terms\n across Tweet messages in the db. Leave\n arguments unset to show all data.\"\"\"\n )\n parser.add_argument(\n \"-s\",\n \"--search\",\n metavar=\"TEXT\",\n help=\"\"\"Filter the Tweet records to those which contain the input\n TEXT anywhere in their message text, ignoring case. Enclose the\n argument in single quotes to escape a hashtag or to include\n spaces.\"\"\",\n )\n parser.add_argument(\n \"-f\",\n \"--filter\",\n action=\"store_true\",\n help=\"\"\"If flag is supplied, filter the unique terms in the *output*\n list to only those which contain the input term (requires TEXT to\n be set). This will tend to provide much shorter lists, but is\n useful for identifying hashtags or handles which are similar\n because they share a common string. When using --filter, it is\n recommended to keep TEXT input short and general (excluding\n @ or # sign) in order to provide the broadest range of related\n results.\"\"\",\n )\n parser.add_argument(\n \"-l\",\n \"--limit\",\n type=int,\n default=0,\n help=\"\"\"Max count of tweets to select, selected from tweets order\n by most recent post time first. The terms will be derived from\n this sample of tweets. Omit argument or set to 0 to use all tweets\n in the db.\"\"\",\n )\n\n args = parser.parse_args()\n\n printHashtagsAndMentions(\n searchText=args.search, filterTerms=args.filter, tweetLimit=args.limit\n )", "def findall(ctx):\n _check_for_commands(ctx.obj[\"keep_path\"])\n keep = ctx.obj[\"keep\"]\n results = {}\n for kw, command_ids in keep[\"keyword2Ids\"].items():\n results[kw] = []\n for command_id in command_ids:\n command = keep[\"id2Command\"][str(command_id)]\n explanation = keep[\"id2Explanation\"][str(command_id)]\n results[kw].append({ \n \"id\": command_id,\n \"command\": command,\n \"explanation\": explanation\n })\n _show_results(results)", "def read_all(search):\n if search == \"\":\n print(\"No searching for items\")\n # Create the list of trays from our data\n return [TRAYS[key] for key in sorted(TRAYS.keys())] # return this sorted by tray name i.e. A, B, C ,... or 1, 2, 3, ...\n else:\n search_words = search.split() # get a list of individual words\n\n # TODO: probably want something which removes any basic word ('a', 'the', ...) from search_words\n # since these might give loads of unwanted matches in our search\n\n # get a list of tuples: tray names paired with the number of word matches. Then sort these tuples by the number of matches. \n # We can then use the order of the names\n # lol Haskell says hello:\n search_ordered_pairs = sorted([(name, num_word_matches(search_words, TRAYS[name][\"info\"].split())) for name in TRAYS.keys()], key=lambda x:x[1], reverse=True)\n print(search_ordered_pairs)\n return [TRAYS[pair[0]] for pair in search_ordered_pairs] # trays are ordered appropriately", "def search_shopping_list_by_title_keyword(title):\n all_shopping_lists = db.session.query(ShoppingList).filter(ShoppingList.title.like(('%'+title+'%'))).all()\n return create_shopping_list_output(all_shopping_lists)", "def ls(filter=None):", "def api_search(title: str) -> Dict[str,List[AnimeThemeAnime]]:\n if not title:\n return None # an empty anime title\n \n r = session.get(URL.format(title))\n if r.status_code == 200:\n return r.json()\n elif r.status_code == 429:\n raise AnimeThemesTimeout('Got 429 error from animethemes.moe, please wait 30s to get the rest of entries.')\n else:\n r.raise_for_status()", "def twitch(self, irc, msg, args, things):\n #kawaiirice, fxomoonglade, hashe, liquidtlo, liquidhuk, liquidhaypro, spanishiwa\n \n searchurl='http://api.justin.tv/api/stream/list.json'\n headers = utils.web.defaultHeaders\n usernames=['ballisticautistic','nycc','zekklp','luminousinverse','riotgames']\n if things:\n channels=usernames+\" \"+things\n else:\n channels=usernames\n islive=[]\n out=[]\n\n\n opts = {}\n opts['channel']=','.join(channels)\n fd = utils.web.getUrlFd('%s?%s' % (searchurl,\n urllib.urlencode(opts)),\n headers)\n json = simplejson.load(fd)\n fd.close()\n\n if not json:\n # Most likely no streams are live\n pass\n else:\n for c in json:\n channelurl=c['channel']['channel_url'].encode('utf-8')\n if 'live' in c['name']:\n if c.get('title'):\n title=c['title'].encode('utf-8')\n else:\n title='(no title)'\n out.append('%s %s' % (channelurl, title))\n if out:\n irc.reply(' || '.join(out))\n else:\n irc.reply('No current live streams.')" ]
[ "0.73923945", "0.569828", "0.5560075", "0.5533528", "0.55277205", "0.5525617", "0.54996574", "0.5444355", "0.5396228", "0.5325632", "0.5320193", "0.5297989", "0.52948684", "0.52716166", "0.525805", "0.5255838", "0.5218868", "0.5202857", "0.5202308", "0.51878214", "0.5180707", "0.51640785", "0.51541007", "0.51343596", "0.5128738", "0.50907314", "0.50845444", "0.5071641", "0.5066437", "0.50660866" ]
0.7244985
1
reload config and memory, useful if manually edited on running bot
def reload(bot, event, *args): yield from bot.coro_send_message(event.conv, "<b>reloading config.json</b>") bot.config.load() yield from bot.coro_send_message(event.conv, "<b>reloading memory.json</b>") bot.memory.load()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reload(bot, event, *args):\n bot.config.load()\n bot.memory.load()", "def reload_config(self):\n pass", "def rehash(self):\n logging.info(\"Rehashing started\")\n modules = self.cmd_plugins.get_modules()\n CommandBot.pause(self)\n PlugBot.stop(self)\n\n logging.info(\"Reloading config file\")\n self.botconfig = self.load_config(self.config_file)\n for module in modules:\n reload(module)\n CommandBot.reset(self)\n\n PlugBot.start(self)\n CommandBot.resume(self)\n self.join_rooms()", "def reload_config():\n subprocess.run([SUPERVISOR_CMD, \"reload\"])", "def reload(self):\n self.load_config()\n # Seems we need to explicitly refresh this\n if self.main_instance:\n self.main_instance.config = self.config", "def reload(self):\n with open(self._config) as f:\n self.data = json.load(f)", "def reload(self):\n self.read(self._cfg_path)", "def refresh_configuration(self):\n pass", "def reload_configurations(self) -> None:\n ...", "def _refreshconfig(self):\n self.config = ConfigGenerator(os.path.join(self.rundir, const.CONFIG_FILE))", "def handle_adminreloadconfig(bot, event):\n try:\n bot.cfg.reload()\n getmainconfig().reload()\n except Exception, ex: handle_exception()\n event.done()", "def reload_config():\n old_env = os.environ.copy()\n\n yield\n\n os.environ = old_env\n importlib.reload(config)", "def reload_config(self):\n if self.faucet is not None:\n self.faucet.reload_config(None)", "def reload(self, cfg):\n self.init_cfg_data(cfg=cfg)", "def refresh_config(self):\n with open(config_name, 'rb') as f:\n self.CONFIG = simplejson.load(f)\n\n return self", "def resetConfiguration(self):\n exec(config.loadConfiguration(\"console.cfg\").read())", "def reload(self):", "def reload(self):", "def refresh(self) -> None:\n self.data = {}\n self.load_settings_file(self.default_settings_path / \"settings.yaml\", file_key=\"internal\")\n self.load_systems(self.default_settings_path / \"systems\")\n self.load_settings_file(self.personal_dir / \"settings.yaml\", file_key=\"user\")\n self.load_systems(self.personal_dir / \"systems\")", "def reload_config(self):\n for channel in self._channel_frames:\n self._channel_frames[channel].reload_config()", "def reload(self):\n\n pass", "def reset_config():\n return _set_config(_gen_config())", "def update(self):\n self.save_config_file()", "def comando_reload(self):\r\n\tif args.opcao == 'gne':\r\n configs = self.reload_gne_framework(args.file, args.loja, args.serie, args.nnf)\r\n return configs\r\n else:\r\n configs = self.reload_daruma_framework(args.file)\r\n return configs", "def conf_update(self):\n pass", "def refresh_config(self):\n self._user_config = UserConfig(None)", "async def reset_config(self):\n self.config = {\"enable_auto_gen\": False, \"enable_world_barrier\": False}\n await shared.event_handler.call_async(\"world:reset_config\")\n self.gamerule_handler = mcpython.common.world.GameRule.GameRuleHandler(self)", "def reload_eoxserver_config():\n global _cached_config, _last_access_time\n _, eoxs_path, _ = imp.find_module(\"eoxserver\")\n paths = [\n join(eoxs_path, \"conf\", \"default.conf\"),\n join(prefix, \"eoxserver/conf/default.conf\"),\n get_instance_config_path()\n ]\n\n logger.info(\n \"%soading the EOxServer configuration. Using paths: %s.\"\n % (\"Rel\" if _cached_config else \"L\", \", \".join(paths))\n )\n\n with config_lock:\n _cached_config = RawConfigParser()\n _cached_config.read(paths)\n _last_access_time = time()", "def refresh(self):\n self.config.read(self.filename)\n self.loadRecentFiles()", "def reload_config(self):\n for page in self._notebook.pages():\n if hasattr(page, \"_netframe\"):\n page._netframe.reload_config()" ]
[ "0.82988477", "0.7730746", "0.7237101", "0.7000755", "0.6990611", "0.69861", "0.69167477", "0.69043493", "0.68545073", "0.68489516", "0.67825955", "0.6778113", "0.67457616", "0.6589509", "0.65724033", "0.6516148", "0.6433054", "0.6433054", "0.6330398", "0.62764126", "0.62304264", "0.6201959", "0.61998385", "0.6199058", "0.6192222", "0.61872184", "0.6186426", "0.61818737", "0.6167271", "0.61430085" ]
0.7807844
1
Initializes a Map with the given number of buckets.
def new(num_buckets=256): aMap = [] #creating empty list aMap for i in range(0, num_buckets): aMap.append([]) #append num_buckets into aMap return aMap
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def new(num_buckets=256):\n\t#sets aMap variable to an empty list\n\t#then fills that list with the specified number of other empty lists ('buckets') \n\t#returns the new aMap\n\taMap = []\n\tfor i in range(0, num_buckets):\n\t\taMap.append([])\n\treturn aMap", "def new(num_buckets=256):\n aMap=[]", "def new(num_buckets=256):#用空列表初始化字典\n\taMap=[]\n\tfor i in range(num_buckets):\n\t\taMap.append([])\n\treturn aMap", "def __init__(self):\n self.buckets = 1009\n self.table = [{} for _ in range(self.buckets)]", "def __init__(self):\n self.buckets = 1009\n self.table = [[] for _ in range(self.buckets)]", "def __init__(self):\n self.bucket_length = 997\n self.bucket_array = [Bucket() for i in range(self.bucket_length)]", "def __init__(self, values=1000000):\n self.size = int(sqrt(values))\n self.buckets = [None] * self.size", "def __init__(self):\n self.hashmap = [[] for _ in range(self._cap)]", "def init_buckets(len2freq):\n source = Counter(len2freq)\n\n if not len(source):\n raise ValueError('Empty length-to-frequency map')\n\n if not all(map(lambda x: isinstance(x, int), source.keys())):\n raise ValueError('Keys of length-to-frequency must be integers')\n\n if not all(map(lambda x: isinstance(x, int), source.values())):\n raise ValueError('Values of length-to-frequency must be integers')\n\n denominator = 8\n lengths = sorted(source.keys())\n\n buckets = []\n for lng in lengths:\n b = int(np.ceil(lng / denominator)) * denominator + 1\n if not len(buckets) or buckets[-1][0] != b:\n buckets.append((b, {}))\n buckets[-1][1][lng] = source[lng]\n\n return buckets", "def __init__(self,n):\n\t\tself._dict={}\n\t\tfor i in range(n):\n\t\t\tself._dict[i]=[]", "def __init__(self):\n self.MAPSIZE = 10000\n self.map = [ None for _ in range(self.MAPSIZE) ]", "def __init__(self, init_size=8):\n self.size = 0\n self.buckets = [LinkedList() for i in range(init_size)]", "def __init__(self):\n # HashMap内部元素个数\n self.elements_count: int = 0\n # HashMap内部bucket数组的长度\n self.capacity: int = 16384\n # HashMap内部的数组, 用dummyHead的好处是Python没有显示指出引用修改,还是固定bucket数组,只修改数组各元素的next指针更好,不会出现UB\n # 缺点是初始化好慢啊,容易超时\n self.bucket: List[ListNode] = [ListNode(key=-1, value=0)] * self.capacity", "def __init__(self):\n self.hashmap = [[[],[]] for _ in range(self.N)]", "def __init__(self):\n self.size = 10000\n self.hashmap = [None] * self.size", "def __init__(self):\n self.buckets = [-1] * 10\n self.length = len(self.buckets)", "def __init__(self, buckets = 200):\n self.data = [None] * buckets\n self.slot = [None] * buckets\n self.size = buckets", "def Dictionary_create(nMarkers, markerSize):\n pass", "def __init__(self):\n self.buckets = collections.defaultdict(list)", "def __init___0(self, map):\n super(LongObjectHashMap, self).__init__()\n self.__init__()\n putAll(map)", "def __init__(self):\n # better to be a prime number, less collision\n self.key_space = 2069\n self.hash_table = [Bucket() for i in range(self.key_space)]", "def Dictionary_create_from(nMarkers, markerSize, baseDictionary):\n pass", "def _init_meg_map_dict(bands, length=0):\n\n # Initialize dictionary\n meg_map = dict()\n\n # Add oscillation bands\n for band in bands:\n meg_map[band] = np.zeros(length)\n\n return meg_map", "def __init__(self):\n self.size = 1000\n self.bucket = [None] * self.size", "def __init__(self, width, growth_factor, num_finite_buckets):\n\n if num_finite_buckets < 0:\n raise ValueError('num_finite_buckets must be >= 0 (was %d)' %\n num_finite_buckets)\n\n self.width = width\n self.growth_factor = growth_factor\n self.num_finite_buckets = num_finite_buckets\n self.total_buckets = num_finite_buckets + 2\n self.underflow_bucket = 0\n self.overflow_bucket = self.total_buckets - 1\n\n self._lower_bounds = list(self._generate_lower_bounds())", "def __init__(self):\n self.m = 1000\n self.bucket = [None] * 1000", "def __init__(self, init_size=8):\n # Create a new list (used as fixed-size array) of empty linked lists\n self.buckets = [LinkedList() for _ in range(init_size)]", "def __init__(self, n):\n self._dictOut = {}\n self._dictIn = {}\n for i in range(n):\n self._dictOut[i] = []\n self._dictIn[i] = []", "def __init__(self):\n self.bucket_of_keys = {}\n self.buckets = LinkedList()", "def __init__(self,n):\n\t\tself._dictOut={}\n\t\tself._dictIn = {}\n\t\tfor i in range(n):\n\t\t\tself._dictOut[i]=[]\n\t\t\tself._dictIn[i] = []" ]
[ "0.8146247", "0.8119073", "0.7875877", "0.7073408", "0.67659897", "0.6762607", "0.6628239", "0.66029876", "0.6587286", "0.6574333", "0.65716136", "0.6556335", "0.64881253", "0.6487887", "0.64756536", "0.64513785", "0.64458674", "0.637114", "0.6361271", "0.6350683", "0.6333639", "0.6312283", "0.62940425", "0.62812096", "0.6278981", "0.62742656", "0.6257498", "0.6248196", "0.62456775", "0.61892295" ]
0.82026
0
Returns the index, key, and value of a slot found in a bucket. Returns 1, key, and default (none if not set) when not found.
def get_slot(aMap, key, default=None): bucket = get_bucket(aMap, key) for i, kv in enumerate(bucket): k, v = kv if key == k: return i, k, v return -1, key, default
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_slot(aMap, key, default=None):\n\t#now that we know which bucket the key could be in\n\t#we iterate through all the elements of that bucket until it finds the key\n\t\n\tbucket = get_bucket(aMap, key)\n\t\n\tfor i, kv in enumerate(bucket):\n\t#enumerate returns a tuple containing the count (starting at 0) \n\t#and values obtained from iterating over the sequence\n\t\tk, v = kv\n\t\t#unpacks the elements in the bucket into 'key' and 'value'\n\t\tif key == k:\n\t\t\treturn i, k, v \n\t#if the slot does not contain the key, then it returns \"none\"\n\treturn -1, key, default", "def get_slot(aMap,key,default=None):\n\tbucket=get_bucket(aMap,key)\n\t\n\tfor i,kv in enumerate(bucket):\n\t\tk,v=kv\n\t\tif key==k:\n\t\t\treturn i,k,v\n\t\t\n\treturn -1,key,default", "def _bucket_getitem(self, j, k):\n bucket = self._table[j]\n if bucket is None: # no match found\n raise KeyError(\"Key Error: \" + repr(k))\n return bucket[k]", "def bucket_indexof(table, key):", "def lookup(self, key, default=None):\n hash_key = hash(key) % self.length\n bucket = self.array[hash_key]\n if not bucket:\n return default\n for key_val_pair in bucket:\n k, v = key_val_pair\n if k == key:\n return v", "def findBucket(conn, bucketName):\n for cand in conn.get_all_buckets():\n if cand.name == bucketName:\n return cand\n return None", "def findBucket(conn, bucketName):\n for cand in conn.get_all_buckets():\n if cand.name == bucketName:\n return cand\n return None", "def get(self, key):\n hashv = self.hash(key)\n bucket=self.hashmap[hashv]\n for i,(k,v) in enumerate(bucket):\n if k==key:\n return v\n return -1", "def get(aMap, key, default=None):\n\t#assigns variables to the same values we received from the get_slot function\n\t#index of the slot, the key and the value it found.\n\ti, k, v = get_slot(aMap, key, default=default)\n\t#but all we care about is the value, so that's all we return\n\t#this is _basically_ the same thing as get_slot, but since most people\n\t#only care about the value from this kind of function, that's all we return\n\treturn v", "def get(self, key):\n hash_key = self._hash_function(key) % self.capacity # returns hashed keys corresponding bucket index\n bucket = self._buckets[hash_key] # get bucket for that index\n\n current = bucket.head # set bucket.head to variable as not to override linked list\n\n while current is not None: # iterate through linked list until value is found, or returns None\n if current.key == key:\n return current.value\n current = current.next", "def get(self, key):\n index = key % self.size\n\n cur = self.bucket[index]\n while cur:\n if cur.key == key:\n return cur.val\n cur = cur.next\n return -1", "def get(self, key):\n # Find bucket where given key belongs\n # Check if key-value entry exists in bucket\n # If found, return value associated with given key\n # Otherwise, raise error to tell user get failed\n # Hint: raise KeyError('Key not found: {}'.format(key))\n key_bucket = self._bucket_index(key)\n\n for key_value_tuple in self.buckets[key_bucket].items():\n if key_value_tuple[0] is key:\n return key_value_tuple[1]\n\n raise KeyError('Key not found: {}'.format(key))", "def get(self, key):\r\n index = self.hash(key)\r\n l = self.bucket[index]\r\n while l.next:\r\n if l.next.key == key:\r\n return l.next.val\r\n l = l.next\r\n return -1", "def get_item(self, key):\n search_slot = self.count_hash(key, len(self.slots))\n\n if self.slots[search_slot] == key:\n data = self.data[search_slot]\n elif isinstance(self.slots[search_slot], tuple):\n index_tuple = (self.slots[search_slot].index(key))\n data = (self.data[search_slot][index_tuple])\n else:\n data = None\n\n return data", "def __getitem__(self, item):\n bucket = self._buckets[self._index(item)]\n for node in bucket.linked_list:\n bucket_object_key, bucket_object_value = node.value\n assert isinstance(bucket_object_key, BucketObject)\n assert isinstance(bucket_object_value, BucketObject)\n if bucket_object_key.load_value() == item:\n key_list_node, value_list_node = (\n self._object_to_list_node[bucket_object_key],\n self._object_to_list_node[bucket_object_value],\n )\n # update in-memory and disk linked list\n self._in_memory_objects.remove_and_append(key_list_node)\n self._in_memory_objects.remove_and_append(value_list_node)\n self._disk_objects.remove_and_append(key_list_node)\n self._disk_objects.remove_and_append(value_list_node)\n # balance memory usage\n self._balance()\n return bucket_object_value.load_value()\n raise KeyError(\"Key `{}` is not exists\".format(item))", "def __getitem__(self, key):\n\n bucket_key = self.key_for_bucket(key)\n return self.buckets[bucket_key][key]", "def get(aMap,key,default=None):\n\ti,k,v=get_slot(aMap,key,default=default)", "def get(self, key):\n # TODO: Check if the given key exists and return its associated value\n hash_key = self._bucket_index(key) # Gets the index of the key\n\n if self.buckets[hash_key].is_empty() is False: # If the hask_key exists\n for key_value_pair in self.buckets[hash_key]: # Iteratre through the value pair\n if key_value_pair[0] is key: # If the key matches\n return key_value_pair[1] # Return the value\n raise KeyError(\"Key doesn't exist\") # If key doesn't exist, return None", "def get(self, key):\n index = int((keyIndex(key) & (self.BUCKET_SIZE - 1)))\n inner = self.keys[index]\n if inner == None:\n return None\n i = 0\n while len(inner):\n innerKey = inner[i]\n if innerKey == self.EMPTY_KEY:\n return None\n elif innerKey == key:\n return self.values[index][i]\n i += 1\n return None", "def get(self, key):\n if key < self.length:\n return self.buckets[key]\n return -1", "def find(self, value):\n bucketNum = self.__hash(value)\n originalBucketNum = bucketNum\n if self.__buckets[bucketNum] is not None and self.__buckets[bucketNum] == value:\n return self.__buckets[bucketNum]\n else:\n bucketNum = self.__rehash(bucketNum)\n while self.__buckets[bucketNum] is not None and self.__buckets[bucketNum] != value and \\\n bucketNum != originalBucketNum:\n bucketNum = self.__rehash(bucketNum)\n if self.__buckets[bucketNum] is not None and self.__buckets[bucketNum] == value:\n return self.__buckets[bucketNum]\n else:\n return None", "def get(self, key: int) -> int:\n hashKey = key % 1000\n if self.bucket[hashKey]:\n node = self.bucket[hashKey]\n while node:\n if node.pair[0] == key:\n return node.pair[1]\n node = node.next\n return -1", "def get(aMap, key, default=None):\n\ti, k, v = get_slot(aMap, key, default)\n\treturn v", "def get(aMap, key, default=None):\n\ti, k, v = get_slot(aMap, key, default=default)\n\treturn v", "def get(self, element):\n bucket_index = self._bucket_index(element)\n return self.buckets[bucket_index].find(lambda value: value == element)", "def _get_bucket_key(self, download_meta):\n if 'bucket' in download_meta:\n resolved_bucket = download_meta['bucket']\n if resolved_bucket != self.bucket:\n log.error(f'Bucket mismatch found with blobs, overriding metadata and using bucket {self.bucket}')\n resolved_bucket = self.bucket\n return resolved_bucket, download_meta['key']\n else:\n return self.bucket, download_meta['blob_id']", "def _findBucket(self, node):\n for bucket in buckets:\n if bucket.inRange(node):\n return bucket\n #if bucket.low <= node and node <= bucket.high:\n # return bucket\n return None", "def get(self, key):\n if type(key) != str:\n raise TypeError(\"This is not the string you're looking for!\")\n number = self._hash(key)\n stored_key = number if self.function == 'fnv' else key\n try:\n return self.bucket_list[number % self.bucket_number].search(stored_key).stored_value\n except AttributeError:\n return None", "def get_slot(self, idx):\n assert (idx >= 0) and (idx < self.size()), \"Index is out of range\"\n return self.slots[idx]", "def get_bucket_location(Bucket=None):\n pass" ]
[ "0.79758656", "0.773949", "0.6553314", "0.64808375", "0.6404719", "0.62866116", "0.62866116", "0.62585574", "0.6220723", "0.6191381", "0.6135561", "0.61248016", "0.61166006", "0.6086522", "0.5985429", "0.5919926", "0.5919475", "0.5899041", "0.5883153", "0.58819985", "0.58608925", "0.5834038", "0.58337355", "0.5833646", "0.58215004", "0.5764631", "0.574589", "0.57394856", "0.573557", "0.5720742" ]
0.79655206
1
this keyword is used to grep keywords in a file, then return matching lines.\n filePath is the full path of the file.\n searchKeyWord is the keyword filter.\n isPattern is the flag indicate if searchKeyword is a normal search string or regular expression pattern.\n isCaseSensitive is the flag indicate if searchKeyword is case sensitive.\n timeout default to 0, mean not timeout wait.\n retry_interval is default to 20, mean every 20s will check the log once.\n Fromline default to 0, which mean from which line to search the content.\n The return value is a list for all the matched lines content.\n
def grep_local_file(filePath, searchKeyWord, isPattern=True, isCaseSensitive=True, Fromline=0, timeout=0, retry_interval=0): returnMatchLines = [] current = time.time() timout_value = float(timeout) maxtime = current + timout_value while (current <= maxtime): fileObj = open(filePath, "r") allLines = fileObj.readlines() fileObj.close() allLines=allLines[int(Fromline):] if isPattern == False: for line in allLines: if isCaseSensitive and line.find(searchKeyWord) != -1: returnMatchLines.append(line) if not isCaseSensitive and line.lower().find(searchKeyWord.lower()) != -1: returnMatchLines.append(line) else: if isCaseSensitive == False: pattern = re.compile(searchKeyWord, re.I) else: pattern = re.compile(searchKeyWord) for line in allLines: match = pattern.search(line) if match: returnMatchLines.append(line) if len(returnMatchLines) < 1: if timout_value == 0: break current = time.time() time.sleep(float(retry_interval)) else: break return returnMatchLines
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_dictionary_file_lines_for_keywords(self):\n keywords_iter = iter(self.keywords)\n next_keyword = keywords_iter.next()\n print(\"Searching for keyword {}\".format(next_keyword))\n\n self.dictionary_file.open_handle()\n result_lines = list()\n while next_keyword:\n line = self.dictionary_file.read_line_to_obj()\n if not line:\n print(\"Reached end of dictionary file\")\n break\n\n if line.term < next_keyword:\n continue\n elif line.term == next_keyword:\n print(\"Found postings list for term {}\".format(next_keyword))\n result_lines.append(line)\n\n try:\n next_keyword = keywords_iter.next()\n print(\"Searching for keyword {}\".format(next_keyword))\n except StopIteration:\n print(\"Finished searching for all keywords\")\n break\n\n return result_lines", "def __grep(findwhat, filename, ignorecase, regexp):\n\t\tresult = []\n\t\ttry:\n\t\t\tencoding = \"utf8\"\n\t\t\tcontent = open(filename,\"r\", encoding=encoding).read()\n\t\texcept FileNotFoundError:\n\t\t\treturn result\n\t\texcept UnicodeDecodeError:\n\t\t\tencoding = \"latin-1\"\n\t\t\tcontent = open(filename,\"r\", encoding=encoding).read()\n\t\t\t\n\t\tif __search(findwhat, content, ignorecase, regexp):\n\t\t\tlines = open(filename,\"r\", encoding=encoding).readlines()\n\t\t\tlineNumber = 1\n\t\t\tfor line in lines:\n\t\t\t\tif __search(findwhat, line, ignorecase, regexp):\n\t\t\t\t\tresult.append((filename, lineNumber, line.strip()))\n\t\t\t\tlineNumber += 1\n\t\treturn result", "def find_all_entries(\n yaml_file: str, pattern: Pattern, pattern_keyword: str) -> List[str]:\n matches = []\n with open(yaml_file, 'r') as data:\n lines = data.readlines()\n\n for idx, line in enumerate(lines):\n match = re.search(pattern, line)\n if match is not None:\n matches.append(match.group(pattern_keyword))\n return matches", "def text_file_search_all(cls, file, search_for, encoding='utf-8', search_using_regex=False,\n\t\t\t\t\t\t\t search_from_front_to_back=False, count=0):\n\n\t\tif search_for == '':\n\t\t\treturn None\n\t\ttry:\n\t\t\tresult = []\n\t\t\twith open(file, mode='r', encoding=encoding) as file_reader:\n\t\t\t\tfor line in file_reader if search_from_front_to_back else cls.reverse_file_reader(file_reader,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t save_memory=True):\n\t\t\t\t\tif not search_using_regex:\n\t\t\t\t\t\tif not line.find(search_for) == -1:\n\t\t\t\t\t\t\tresult.append(search_for)\n\n\t\t\t\t\telse:\n\t\t\t\t\t\tsearch_for_re = re.compile(search_for)\n\t\t\t\t\t\tre_result = re.search(search_for_re, line)\n\t\t\t\t\t\tif re_result:\n\t\t\t\t\t\t\tresult.append(re_result.group())\n\t\t\t\t\tif 0 < count == len(result):\n\t\t\t\t\t\tbreak\n\t\t\t\tif not result:\n\t\t\t\t\treturn None\n\t\t\t\telse:\n\t\t\t\t\treturn result\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\treturn None", "def get_keyword_files(\n self,\n filename_keywords=\"keywords\",\n read_size=1024*1024,\n offset=50):\n import ahocorasick\n\n A = ahocorasick.Automaton()\n with open(filename_keywords, \"r\") as f:\n while True:\n word = f.readline()\n if not word:\n break\n A.add_word(word.strip(), word.strip())\n\n A.make_automaton()\n\n for file in self.filelist:\n with open(file[\"filename\"], \"r\") as f:\n matches = list()\n buff = f.read(read_size)\n for match in A.iter(buff):\n pos_cur = match[0]\n pos_start = max(match[0]-offset, 0)\n pos_end = min(match[0]+offset, read_size)\n offset_start = buff[\n pos_start:pos_cur-len(match[1])+1\n ].find(\"\\n\")\n offset_end = buff[pos_cur+1:pos_end].rfind(\"\\n\")\n\n if offset_start >= offset:\n offset_start = 0\n if offset_end <= 0:\n offset_end = offset\n offset_end = offset - offset_end\n\n matched_text = buff[\n pos_start+offset_start:pos_cur-len(match[1])+1\n ] + \\\n bcolors.FAIL + \\\n buff[pos_cur-len(match[1])+1:pos_cur+1] + \\\n bcolors.ENDC + \\\n buff[pos_cur+1:pos_end-offset_end]\n\n matches.append((matched_text.replace(\"\\n\", \" \"), match[1]))\n if len(matches) > 0:\n yield (file, matches)", "def lookup_keywords(filename):\n keywords = []\n start_of_table = r'\\*+\\s+'\n start_of_kw_table = r'\\*+\\s+Keyword'\n in_kw_table = False\n f = open(filename, \"r\")\n for line in f.readlines():\n line = line.rstrip()\n if len(line) == 0 or line.startswith(\"#\"):\n continue # skip comments and blanks\n if re.match(start_of_kw_table, line):\n in_kw_table = True # table started\n continue\n if re.match(start_of_table, line) and not re.match(start_of_kw_table, line):\n in_kw_table = False # table ended\n continue\n if line.startswith(' '):\n continue # skip content rows\n if in_kw_table:\n keywords.append(line)\n f.close()\n return keywords", "def fsearch(self,pattern,msg=None, killon=None ):\n import re\n current=0\n cpat=re.compile(pattern)\n\n for num,line in enumerate(self.f):\n if killon:\n kill = re.search(killon,line)\n if kill:\n # the kill phrase was found first, so die. \n return False\n current=re.search(cpat,line)\n if current:\n if msg:\n print msg\n break\n if not current:\n# print 'ERROR: Requested pattern ('+pattern+') not found in file: <'+self.f.name+ '>. Check file for correct structure. Exiting...'\n return False\n\n return line", "def find_files(self, where_clause, keywords=[]):\n files_by_hash = {}\n old_factory = self.connection.row_factory\n self.connection.row_factory = sqlite3.Row\n if keywords:\n kw_clause = 'keywords._keyword in (%s) and ' % ','.join(\n ['\"%s\"'%kw for kw in keywords])\n else:\n kw_clause = ''\n query = \"\"\"select distinct files.* from files left join\n (\n keyword_x_file inner join keywords\n on keyword_x_file._keyword_id=keywords._keyword_id\n )\n on files._file_id=keyword_x_file._file_id\n where \"\"\" + kw_clause + where_clause\n rows = self.connection.execute(query).fetchall()\n self.connection.row_factory = old_factory\n return rows", "def search_text_in_log_file(self, text) :\n try:\n with open(self.file_path_name, 'r') as searchfile:\n for line in searchfile:\n if text in line:\n return True \n return False \n except: \n print 'The log : ' + self.file_path_name + 'cannot be opened'", "def grep(filename, pattern, verbose=False):\n with open(filename, \"r\") as file:\n for line in file:\n if re.search(pattern, line):\n if verbose:\n return line\n else:\n return True", "def search(self, user_pattern):\n regex_pattern = re.compile(user_pattern)\n matching_logs = Logs()\n\n for line in self:\n if re.match(regex_pattern, line):\n matching_logs.append(line)\n\n return matching_logs", "def search_file_all(pattern, filename):\n if not os.path.exists(filename):\n raise Exception(\"Can't open file for reading! \" + filename)\n\n matches = []\n fh = open(filename, \"r\")\n for line in fh:\n allmatch = re.findall(pattern, line)\n if allmatch:\n matches += allmatch\n\n fh.close()\n return matches", "def grep(pattern, filename):\n rx = re.compile(pattern)\n with open(filename, 'r') as file:\n for line in file:\n if rx.search(line):\n yield line", "def _parse_relevant_lines(cls, conf_file_path):\n # Make a dictionary with the keys of find_words corresponding with\n # empty array as a place holder.\n relevant_lines = dict([(word, []) for word in cls.FIND_WORDS])\n # Now locate the relevant lines in this file and keep the found\n # pattern matches.\n with open(conf_file_path, 'r') as config:\n for line in config:\n # Strip whitespaces\n line = line.strip(\" \\t\")\n # Skip comment lines..\n if line.startswith('#'):\n continue\n for word, pattern in cls.FIND_WORDS.items():\n if \"{} \".format(word) not in line:\n continue\n matches = pattern.findall(line)\n if matches:\n # We only need the first capturing group.\n matches = [match[0].strip(\" \\t\") for match in matches]\n # We will only need the matched strings later on.\n relevant_lines[word] += matches\n return relevant_lines", "def search(self,path,key_words):\t#key_words must be tuple\n\t\ttry:\n\t\t\tall=os.walk(path,False)\t#os.walk() is a generator , the return is a tuple which is (dirpath,dirnames,filenames)\n\t\texcept:\n\t\t\tpass\n\t\telse:\n\t\t\tfor item in all:\n\t\t\t\tfilepath=item[0]\n\t\t\t\tfor filename in item[2]:\n\t\t\t\t\tfor key_word in key_words:\t#find all key_word\n\t\t\t\t\t\tif key_word in filename.lower():\t#ignore case of word , and only search filename\n\t\t\t\t\t\t\tself.result.append(os.path.join(filepath,filename))", "def search(self, values: dict):\n self.results.clear()\n self.matches, self.records = 0, 0\n # Extensions to be ignored.\n if values[\"-EXT-\"].endswith(\";\"):\n values[\"-EXT-\"] = values[\"-EXT-\"][:-1]\n if values[\"-DIR-\"].endswith(\";\"):\n values[\"-DIR-\"] = values[\"-DIR-\"][:-1]\n ignore_extensions = tuple(values[\"-EXT-\"].split(\";\")) \\\n if values[\"-EXT-\"] else ()\n # Folders to be ignored.\n ignore_folders = tuple(\"/\" + folder + \"/\"\n for folder in values[\"-DIR-\"].split(\";\")\n if values[\"-DIR-\"])\n \n # Check whether to ignore or search dot files/folders\n if values[\"-DOT-\"]:\n ignore_folders = (\"/.\",) + ignore_folders\n \n if values[\"CONTAINS\"]:\n function = self.contains\n elif values[\"STARTSWITH\"]:\n function = self.startswith\n else:\n function = self.endswith\n \n search_term = values[\"TERM\"].lower()\n for path, files in self.file_index:\n if any(ignored_folder in path + \"/\"\n for ignored_folder in ignore_folders):\n continue\n for file in files:\n if file.endswith(ignore_extensions) or \\\n values[\"-DOT-\"] and file.startswith(\".\"):\n continue\n self.records += 1\n if function(file.lower(), search_term):\n result = os.path.join(path, file)\n self.results.append(result)\n self.matches += 1\n \n with open(\"search_results.txt\", \"w\") as f:\n f.writelines(self.results)", "def text_file_search(cls, file: str, search_for: str, encoding: str = 'utf-8', search_using_regex: bool = False,\n\t\t\t\t\t\t search_from_front_to_back: bool = False):\n\n\t\tresult = cls.text_file_search_all(file, search_for, encoding, search_using_regex,\n\t\t\t\t\t\t\t\t\t\t search_from_front_to_back, count=1)\n\n\t\treturn result[0] if result else None", "def search(path, f):\n\n started = False\n\n for count, line in enumerate(f):\n number = count + 1\n if search_line(line):\n if not started:\n print config.term.highlight(relpath(path), 'GREEN')\n if config.filenames:\n break\n started = True\n if len(line) <= config.output_limit:\n print '%d:%s' % (number,\n config.term.highlight(line.rstrip('\\n\\r'),\n ('BLACK', 'BG_YELLOW'),\n config.search))\n else:\n print '%d:LINE IS TOO LONG (>%d)' % (number, config.output_limit)\n if started:\n print", "def search(self, text, **kwargs):\n results = []\n path_pattern = self._get_base_path_pattern()\n if not 'limit' in kwargs and self._default_limit is not None:\n kwargs['limit'] = self._default_limit\n query = self._get_query_parser().parse(unicode(text))\n with self._get_searcher() as searcher:\n for hit in searcher.search(query, **kwargs):\n filename = re.sub(path_pattern, '', hit['path'], 1) \\\n if path_pattern is not None else hit['path']\n results.append({\n 'title': hit['title'],\n 'file': filename,\n 'highlights': hit.highlights('content')\n })\n return results", "def search(self, filtr):\n return [note for note in self.notes if note.match(filtr)]", "def grep(self, needle):\n result = []\n for line in self.contents:\n if needle in line:\n result.append(line)\n if result:\n return result\n return False", "def grep(pattern, *files_or_paths):\n matches = []\n\n for fop in files_or_paths:\n with fileobj(fop) as fo:\n matches.extend((line for line in fo if re.match(pattern, line)))\n\n return matches", "def search_by_pattern(self, pattern, key=lambda data: data['meta']):\n result = []\n for node, data in self.traverse():\n if re.search(pattern, key(data), flags=re.VERBOSE):\n result.append([node, data])\n return result", "def keywords_pattern():\n with open(\"keywords.txt\", 'r') as f:\n lines = [line.strip() for line in f if line.strip()]\n return set(lines)", "def get_keywords(prefix, file_path, blacklist):\n file_path = str(file_path).replace(prefix, '') # remove base_dir from file_path\n file_path = os.path.splitext(file_path)[0] # Only keep the part without extension\n file_path = str(file_path).lower()\n for bad_keyword in blacklist:\n file_path = file_path.replace(bad_keyword, ' ')\n file_path = re.sub(r'\\s+', ' ', file_path) # Replace multiple spaces to single one\n keywords = file_path.split(' ')\n keywords = [k for k in keywords if k]\n\n return keywords", "def search_by_pattern(self, tl):\n print(\"Search by regex pattern\")\n pattern = input(\"Please enter search pattern: \")\n return tl.findall_pattern(pattern)", "def regexSearch(regexStr, folderPath):\n if not os.path.isdir(folderPath):\n return 'Input a directory path'\n\n userRegex = re.compile(regex)\n\n for filename in os.listdir(folderPath):\n\n if filename.endswith('.txt'):\n\n with open(filename) as file:\n\n for line in file:\n mo = userRegex.search(line)\n \n if mo:\n print(line, end='')", "def read_results(results_file, threshold=0.0):\n patterns = []\n with open(results_file, 'r') as input_file:\n patterns_reader = csv.reader(input_file)\n next(input_file)\n for line in patterns_reader:\n #if float(line[6]) >= threshold:\n patterns.append(line[0])\n print(\"Read {:d} patterns.\".format(len(patterns)))\n return patterns", "def countLines(file_path, keyword):\n count = 0\n\n try:\n with open(file_path, 'r') as f:\n lines = f.readlines()\n if keyword in lines:\n count += 1\n # Catch permission errors.\n except OSError as e:\n print(\"{}. Could not open file {}.\").format(e, file_path)\n return count", "def grep_me(pattern, fname):\n for line in stream_reader(fname):\n if re.search(pattern, line, re.I):\n print('{}:{}:{}'.format(filename(), filelineno(), line), end='')" ]
[ "0.620735", "0.61173135", "0.6065361", "0.6045502", "0.5981804", "0.5717122", "0.57109797", "0.5688", "0.56368005", "0.5602805", "0.5584422", "0.55832374", "0.5575134", "0.5572834", "0.5553349", "0.5479438", "0.5465954", "0.54646385", "0.541022", "0.536871", "0.53384507", "0.53348756", "0.53188694", "0.53118384", "0.5276257", "0.52233225", "0.52217025", "0.52178913", "0.5208161", "0.5184885" ]
0.76741695
0
Modify the old contents to new contents.\n oldFile is the original file fullpath.\n newFile is the output file fullpath.\n case_insensitive is True or false.\n
def Modify_lines_matching_pattern(oldFile, newFile, modifydic, case_insensitive): fd = open(oldFile, "r") lines = fd.readlines() fd.close() for key in modifydic.keys(): if case_insensitive == True: pattern = re.compile(key, re.I) else: pattern = re.compile(key) for i in range(len(lines)): if pattern.search(lines[i]): lines[i] = re.sub(pattern, modifydic[key], lines[i]) fd = open(newFile, "w") fd.writelines(lines) fd.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def replaceLine(oldFile, string1, string2, newString, newFile = \"TempFile\", mvFile = True):\n with open(oldFile, \"r\") as oldfile, open(newFile, \"w\") as newfile:\n oldfile_read = oldfile.readlines()\n for line in oldfile_read:\n line_number = oldfile_read.index(line)\n if string1 in line and string2 in line:\n oldfile_read[line_number] = replaceString(oldfile_read[line_number],newString)\n newfile.writelines(oldfile_read[line_number])\n else:\n newfile.writelines(oldfile_read[line_number])\n\n if mvFile == True:\n shutil.move(newFile, oldFile)", "def copy_file(fromf,tof, fromapp, toapp):\n f2w=open(tof,\"w\")\n with open(fromf) as f:\n for line in f:\n newline=line.replace(fromapp,toapp)\n f2w.write(newline.replace(fromapp.upper(),toapp.upper()))\n f2w.close()", "def set_old_new_file(self, old_file, new_file):\n Gumtree.gumtree.setOldAndNewFile(old_file, new_file)", "def RenameFile(self, oldname: str, newname: str) -> None:\n ...", "def replace(file,original_text,replacement_text):\n with open(file, \"rt\") as fin:\n with open(str(file+\"temp\"), \"wt\") as fout:\n for line in fin:\n fout.write(line.replace(original_text,replacement_text))\n os.rename(str(file+\"temp\"),file)\n return", "def case_sensitive_replace(string, old, new):\n def repl(match):\n current = match.group()\n result = ''\n all_upper=True\n for i,c in enumerate(current):\n if i >= len(new):\n break\n if c.isupper():\n result += new[i].upper()\n else:\n result += new[i].lower()\n all_upper=False\n #append any remaining characters from new\n if all_upper:\n result += new[i+1:].upper()\n else:\n result += new[i+1:].lower()\n return result\n\n regex = re.compile(re.escape(old), re.I)\n return regex.sub(repl, string)", "def replace_in_file(path, old, new):\n with open(path) as fp:\n content = fp.read()\n\n lpf.ensure_removed(path)\n with open(path, 'w') as fp:\n fp.write(content.replace(old, new))", "def projectFileRenamed(self, oldfn, newfn):\n editor = self.getOpenEditor(oldfn)\n if editor:\n editor.fileRenamed(newfn)", "def marker_replace_template(in_file, out_file, old, new):\n replace_file = open(in_file, \"r\")\n template_content = replace_file.read()\n result_content = template_content.replace(old, new)\n replace_file = open(out_file, \"w\")\n replace_file.write(result_content)\n replace_file.close()", "def uppercase(self):\n\n file = open(self.filename, 'r')\n new_file = open(self.temp_filename, 'w')\n for line in file:\n for keyword in self.KEYWORDS:\n if keyword in line:\n line = line.replace(keyword, keyword.upper())\n new_file.write(line)\n file.close()\n new_file.close()\n self.overwrite_file()", "def rename(old, new):", "def rename(old, new):", "def rename_file(source, oldname, newname):\n #source = client_variables.output_folder\n renamefiles = os.listdir(source)\n ext = (\".xlsx\", \".csv\", \".pdf\", \".png\")\n for renamefile in renamefiles:\n if renamefile.endswith(ext):\n renamefile = source + \"/\" + renamefile\n print \"renaming:\", renamefile\n newname = source + \"/\" + newname\n print \"newname:\", newname\n os.rename(renamefile, newname)\n elif renamefile.startswith(oldname):\n renamefile = source + \"/\" + renamefile\n print \"renaming:\", renamefile\n newname = source + \"/\" + newname\n print \"newname:\", newname\n os.rename(renamefile, newname)", "def replace_file(new_content, current_location):\r\n\tif should_replace(new_content, current_location):\r\n\t\tabs_path = os.path.abspath(current_location)\r\n\t\tcurrent_dir, filename = os.path.split(abs_path)\r\n\t\ttmp_filename = '{0}.{1}'.format(filename, time.time())\r\n\t\ttmp_path = os.path.join(current_dir, tmp_filename)\r\n\r\n\t\ttry:\r\n\t\t\twith open(tmp_path, 'w') as tmp:\r\n\t\t\t\ttmp.write(new_content.getvalue())\r\n\t\t\tos.rename(tmp_path, abs_path)\t\r\n\t\texcept IOError:\r\n\t\t\tprint('Failed to replace ''{0}'''.format(abs_path), file=sys.stderr)\r\n\t\t\treturn False\r\n\t\treturn True\r\n\treturn False", "def main():\n arg = sys.argv[1:]\n arg_len = len(arg)\n if arg_len != 3:\n print(\"\\033[31;1m需要3个参数,你只给{0}个({1} old_string new_string file.txt)\\033[0m\".format(arg_len, sys.argv[0]))\n exit()\n else:\n if os.path.exists(sys.argv[-1]):\n f1 = open(sys.argv[-1], 'r+')\n f2 = open(\"{0}.temp\".format(sys.argv[3]), 'w')\n for line in f1:\n line = line.replace(sys.argv[1], sys.argv[2])\n f2.write(line)\n f2.flush()\n f1.close()\n f2.close()\n\n f1_size = os.path.getsize(sys.argv[-1])\n f2_size = os.path.getsize(\"{0}.temp\".format(sys.argv[-1]))\n if f1_size == f2_size:\n os.remove(sys.argv[-1])\n os.renames(\"{0}.temp\".format(sys.argv[-1]), sys.argv[-1])\n else:\n print(\"\\033[31;1m文件替换出错!\\033[0m\")\n exit()\n else:\n print(\"\\033[31;1m[{0}]\\033[0m文件不存在.\".format(sys.argv[-1]))", "def repl_file(self, dir, file, dirkey, filekey, txtkey):\n startloc = os.path.join(self.loc, dir, file)\n newdir = self.dictreplace(dir, dirkey)\n newfile = self.dictreplace(file, filekey)\n enddir = os.path.join(self.loc, newdir)\n endloc = os.path.join(enddir, newfile)\n if not os.path.exists(enddir):\n os.makedirs(enddir)\n if startloc != endloc:\n print(\"Reading \" + startloc)\n print(\"Writing \" + endloc)\n self.replace_all_vals(startloc, endloc, txtkey)", "def write_to_file(original_path, new_path):\n print(f\"[INFO]: Transform data from binary to text file {new_path}\")\n with open(new_path, mode='wt', encoding='utf-8') as new_file:\n with open(original_path, mode='rb') as original_file:\n for line in original_file:\n new_file.write(line.decode())", "def replace_file(new_content, current_location):\n\tif should_replace(new_content, current_location):\n\t\tabs_path = os.path.abspath(current_location)\n\t\tcurrent_dir, filename = os.path.split(abs_path)\n\t\ttmp_filename = '{0}.{1}'.format(filename, time.time())\n\t\ttmp_path = os.path.join(current_dir, tmp_filename)\n\n\t\ttry:\n\t\t\twith open(tmp_path, 'w') as tmp:\n\t\t\t\ttmp.write(new_content.getvalue())\n\t\t\tos.rename(tmp_path, abs_path)\t\n\t\texcept IOError:\n\t\t\tprint('Failed to replace ''{0}'''.format(abs_path), file=sys.stderr)\n\t\t\treturn False\n\t\treturn True\n\treturn False", "def Modify_lines(oldFile, newFile, modifydic):\r\n fd = open(oldFile, \"r\")\r\n lines = fd.readlines()\r\n fd.close()\r\n for key in modifydic.keys():\r\n for i in range(len(lines)):\r\n if lines[i].find(key) != -1:\r\n lines[i]=lines[i].replace(key, modifydic[key])\r\n fd = open(newFile, \"w\")\r\n fd.writelines(lines)\r\n fd.close()", "def replace2(oldlst,newlst):\n with open(oldlst, 'r') as f4:\n with open(newlst,'w') as f5:\n for line in f4:\n f5.write(line)", "def Add_File(self,txn,filename,newcontents):\n opid = self.new_opid()\n fullname = os.path.join(self.home,filename)\n #if not self.tx.dir_exists(os.path.dirname(fullname)):\n # raise OSError(errno.ENOENT,\"No directory: %r\"%os.path.dirname(fullname))\n xaction = ReplaceAll_Operation(fullname,newcontents,opid)\n self._add_operation(txn,xaction)", "def modify_input_file(filepath, updated_file_list):\n lines = 0 # current input line number\n file_changed = False # the file has changed\n\n # find and change matching lines\n pattern = re.compile(\"[Cc]opyright\")\n with open(filepath, mode='r', encoding='utf-8', newline='') as file_in:\n for line in file_in:\n lines += 1\n if pattern.search(line) and __old_date in line:\n line = line.replace(__old_date, __new_date)\n file_changed = True\n updated_file_list.append(line)\n return file_changed", "def update_file(this_file, new_lines):\r\n file_format = get_file_format(this_file)\r\n return new_write_file(this_file, new_lines, file_format=file_format)", "def changeInput(keyWord, newEntry, foamFile=None, foamCase='.', verbose=True):\n newEntry = str(newEntry)\n foamFiles = getFoamFiles(foamCase)\n if foamFile==None:\n for fFile in foamFiles:\n entry = readInput(fFile, keyWord, foamCase) \n if entry!=0:\n foamFile = fFile\n break\n else:\n entry = readInput(foamFile, keyWord, foamCase)\n \n if isinstance(entry, str):\n if entry==newEntry:\n if verbose:\n print(\"For keyWord \" + keyWord + \" the entry \" + entry + \" does not change. No changes made\")\n else:\n foamFilePath = findFoamFile(foamFile, foamCase)\n with open(foamFilePath, 'r') as f:\n fileData = f.read().split('\\n')\n for i in range(len(fileData)):\n if (keyWord in fileData[i] and entry in fileData[i]):\n fileData[i]=fileData[i].replace(entry, newEntry)\n\n with open(foamFilePath, 'w') as f:\n f.write('\\n'.join(fileData))\n if verbose:\n print(\"keyWord \" + keyWord + \" changed entry from \" + entry + \" to \" + newEntry + \" in file \" + foamFile)\n f.close()\n return 0\n else:\n print(\"The keyWord: \" + keyWord + \" could not be found\")\n return 0", "def RewriteFile(start, end, original_dir, original_filename, snippet,\n outdir=None):\n original_path = GetPath(os.path.join(original_dir, original_filename))\n original = file(original_path, 'r')\n original_content = original.read()\n original.close()\n if outdir:\n outpath = os.path.join(outdir, original_filename)\n else:\n outpath = original_path\n out = file(outpath, 'w')\n rx = re.compile(r'%s\\n.*?%s\\n' % (re.escape(start), re.escape(end)),\n re.DOTALL)\n new_content = re.sub(rx, '%s\\n%s%s\\n' % (start, snippet, end),\n original_content)\n out.write(new_content)\n out.close()\n print 'Output ' + os.path.normpath(outpath)", "def reverse(old, new):\r\n infile = open(old, \"r\")\r\n xs = infile.readlines() # Turn lines of old file into a list\r\n infile.close()\r\n\r\n xs.reverse() # Reverse this list\r\n outfile = open(new, \"w\")\r\n for i in xs:\r\n outfile.write(i) # Write a line-at-a-time at reversed list\r\n outfile.close()", "def replace(file, current_line, new_line):\n with fileinput.input(file, inplace=True) as f:\n for line in f:\n if current_line in line:\n line = new_line\n sys.stdout.write(line)", "def trim_vcf(from_file, to_file, new_id):\n try:\n f_input = open(from_file)\n except UnicodeDecodeError:\n f_input = gzip.open(from_file, 'rt')\n finally:\n with open(to_file, 'w') as f_output:\n for line in f_input:\n if line.startswith('#'):\n result = process_header(line, (new_id,))\n else:\n result = process_body(line)\n\n if result is not None:\n f_output.write(result)\n\n if not f_input.closed:\n f_input.close()\n if not f_output.closed:\n f_output.close()", "def replace(self, old, new):\n self.log('replace({0}, {1})'.format(old, new))\n if old is False:\n return False\n if isinstance(old, str):\n old = old.split('\\n')\n if not isinstance(old, list):\n raise TypeError(\"Parameter 'old' not a 'string' or 'list', is {0}\".format(type(old)))\n if not isinstance(new, str):\n raise TypeError(\"Parameter 'new' not a 'string', is {0}\".format(type(new)))\n local_changes = False\n for this in old:\n if this in self.contents:\n while this in self.contents:\n index = self.contents.index(this)\n self.changed = local_changes = True\n self.contents.remove(this)\n self.contents.insert(index, new)\n self.log('Replaced \"{0}\" with \"{1}\" at line {2}'.format(this, new, index))\n else:\n self.log('\"{0}\" not in {1}'.format(this, self.filename))\n return local_changes", "def _change_file(file):\n\n with fileinput.FileInput(file, inplace=True, backup='.bak') as f:\n for index, line in enumerate(f):\n if index == 13:\n print(line.replace(line, line[15:]), end='')\n else:\n print(line.replace(line, line), end='')" ]
[ "0.6301766", "0.6221288", "0.61109865", "0.6051536", "0.6004594", "0.59841347", "0.59138376", "0.58587974", "0.58379644", "0.5784545", "0.57423884", "0.57423884", "0.57224125", "0.5713668", "0.5694037", "0.5662745", "0.56497025", "0.5639031", "0.56102854", "0.5609075", "0.55987155", "0.5579439", "0.55633557", "0.5560076", "0.55360055", "0.55300605", "0.55258816", "0.5522576", "0.55023474", "0.5445814" ]
0.66408944
0
Override this method to respond to the mouse not being over the element. Called AFTER mouse_out if that method gets called.
def mouse_not_over(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mouse_out(self):\n pass", "def mouse_over(self):\n pass", "def on_unhover(self) -> None:", "def mouse_out(self, event):\r\n self['background'] = self.defaultBackground", "def MouseOverItem(self,item):\r\n pass", "def OnMouse(self,event):\r\n if event.Moving():\r\n (mouseItem,mouseHitFlag) = self.gList.HitTest(event.GetPosition())\r\n if mouseItem != self.mouseItem:\r\n self.mouseItem = mouseItem\r\n self.MouseOverItem(mouseItem)\r\n elif event.Leaving() and self.mouseItem != None:\r\n self.mouseItem = None\r\n self.MouseOverItem(None)\r\n event.Skip()", "def on_unhovered(self):\n if not self.is_selected:\n self.colour = self.normal_colour\n self.is_hovered = False\n self.redraw()", "def mouse_enter(self):\n pass", "def on_hover(self) -> None:", "def OnMouseOut( self, event ):\n self.whichChoice = 0\n event.context.triggerRedraw(1)", "def hoverLeaveEvent(self, event: 'QGraphicsSceneHoverEvent'):\n QApplication.instance().restoreOverrideCursor()", "def onMouseOver(self,mouseEvent):\n\t\tself.canvas.nodeOver(self)", "def _hover(self, event):\n if self.ignore(event):\n return\n\n if self._active_handle is not None or not self._selection_completed:\n # Do nothing if button is pressed and a handle is active, which may\n # occur with drag_from_anywhere=True.\n # Do nothing if selection is not completed, which occurs when\n # a selector has been cleared\n return\n\n _, e_dist = self._edge_handles.closest(event.x, event.y)\n self._set_cursor(e_dist <= self.grab_range)", "def OnMouse(self, event):\n\n self.Refresh()\n event.Skip()", "def mouseOver(self, mousePos):\n self._mouseOver = False\n if self._rect.collidepoint(mousePos):\n self._mouseOver = True\n return self._mouseOver", "def on_mouse_leave (self, event):\n\n\t\tif not self.clicked:\n\n\t\t\tself.cursor_position = [-1,-1]\n\t\t\tself.redraw_canvas()\n\t\t\tself.hide_tip()#self.timer1 = gobject.timeout_add(2000, self.hide_tip)", "def hovered(self, *args, **kwargs): # real signature unknown\n pass", "def ev_MOUSEUP(self, event):", "def handle_attributes_mouseover(self):\n pass", "def mouseReleaseEvent (self, event):\n if self.itemMoved:\n self.parentWidget.DataChanged.emit()\n self.itemMoved = False; \n super(DiagramItem, self).mouseReleaseEvent(event)", "def mouseOut(self, event):\n if self.editMode and self.lastChanged:\n self.changeColor(self.lastChanged, self.colors['pentomino'])\n return\n self.correctPending()\n self.lastPosition = None", "def mouse_over(self):\n self.scroll_to()\n ActionChains(self.driver).move_to_element(self._element).perform()", "def handle_equipment_mouseover(self):\n if self.skill_tree_displaying:\n return\n mouse_pos = pg.mouse.get_pos()\n slot_moused_over = ''\n for slot in self.equipment_tiles:\n if self.equipment_tiles[slot].collidepoint(mouse_pos):\n slot_moused_over = slot\n break\n\n if slot_moused_over:\n self.tooltip_focus = self.equipment_tiles[slot_moused_over]\n if self.player_dict['equipment'][slot_moused_over]: # i.e. if there is an item equipped in the slot\n equipment_dict = self.player_dict['equipment'][slot_moused_over].to_dict()\n else:\n equipment_dict = None\n player_panel_renderer.draw_equipment_details(equipment_dict, slot_moused_over)", "def mouse_in(self, event):\r\n self['background'] = '#E5F3FF'", "def hoverLeaveEvent(self, event):\n if self._hoverSpot:\n if self._hoverSpot.hoverLeaveEvent(event):\n self.update()\n \n self._hoverSpot = None\n \n super(XNode, self).hoverLeaveEvent(event)", "def mouseOver(self, event):\n if self.editMode:\n self.setEditCursor(event)\n return\n x = (event.y - self.margin) // self.cellSize\n y = (event.x - self.margin) // self.cellSize\n if self.lastPosition == (x, y):\n return # I've already drawn this\n if not (0 <= x < self.rows and 0 <= y < self.cols):\n return # not on the grid\n self.lastPosition = (x, y)\n self.paintBackground(x, y, self.checkFree(x, y))", "def mouse_out(self):\n self.scroll_to()\n size = self._element.size\n\n try:\n # Fails on left edge of viewport\n ActionChains(self.driver).move_to_element_with_offset(\n self._element,\n -10,\n -10,\n ).click().perform()\n except MoveTargetOutOfBoundsException:\n try:\n ActionChains(self.driver).move_to_element_with_offset(\n self._element,\n size[\"width\"] + 10,\n 10,\n ).click().perform()\n except MoveTargetOutOfBoundsException:\n ActionChains(self.driver).move_to_element_with_offset(\n self._element,\n 10,\n size[\"height\"] + 10,\n ).click().perform()", "def hoverLeaveEvent(self, moveEvent):\n self.setCursor(Qt.ArrowCursor)\n super().hoverLeaveEvent(moveEvent)", "def on_mouse_over(self, event):\n # This method was suggested by none other than Robin Dunn\n # http://www.blog.pythonlibrary.org/2010/04/04/wxpython-grid-tips-and-tricks/\n # https://alldunn.com/robin/\n x, y = self.CalcUnscrolledPosition(event.GetX(), event.GetY())\n coords = self.XYToCell(x, y)\n try:\n if model.RECHNUNGSTYP != \"INST\":\n msg = \"{} {}: {}\".format(model.get_dct_cells()[(coords[0], coords[1])].value[0].bezeichnung,\n model.get_dct_cells()[(coords[0], coords[1])].jahr,\n model.get_dct_cells()[(coords[0], coords[1])].posten.geldbetrag)\n event.GetEventObject().SetToolTip(msg)\n else:\n pass\n except KeyError: # for empty cells\n pass\n except AttributeError: # for cells without oberkategorie\n pas", "def mouseReleaseEvent(self, event):\n # super(PlotWidget, self).mouseReleaseEvent(event)\n event.accept()" ]
[ "0.82283014", "0.7914597", "0.7469889", "0.7266409", "0.69720304", "0.67417073", "0.6612279", "0.65831256", "0.6578622", "0.6568934", "0.6539237", "0.65102863", "0.6490017", "0.63950545", "0.6386635", "0.633578", "0.6321943", "0.6312482", "0.6304269", "0.62664276", "0.62366825", "0.6225487", "0.6198754", "0.61876273", "0.61800367", "0.61768466", "0.6146119", "0.6131945", "0.611814", "0.6116782" ]
0.8337129
0
Override this method to respond to the mouse entering the element.
def mouse_enter(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mouse_over(self):\n pass", "def enterEvent(self, ev):\n self.setFocus(Qt.MouseFocusReason)\n self.__pointerLeftWidget = False\n self.setCursor(self.defaultCursor)\n QGraphicsView.enterEvent(self, ev)", "def hoverEnterEvent(self, event: QGraphicsSceneHoverEvent):\n self.setCursor(Qt.ArrowCursor)", "def hoverEnterEvent(self, event: QGraphicsSceneHoverEvent):\n self.setCursor(Qt.ArrowCursor)", "def hoverEnterEvent(self, event: 'QGraphicsSceneHoverEvent'):\n QApplication.instance().setOverrideCursor(Qt.OpenHandCursor)", "def on_enter(self):\n raise NotImplemented(\"on_enter method should be implemented.\")", "def mouse_in(event):\r\n\r\n if str(event.type) == 'Enter':\r\n about_content.config(cursor=\"hand2\")\r\n else:\r\n about_content.config(cursor=\"arrow\")", "def mouse_in(self, event):\r\n self['background'] = '#E5F3FF'", "def dragEnterEvent(self, event):", "def dragEnterEvent(self, e):\n # TODO: Do it properly.\n # TODO: Redraw widget while dragging.\n e.accept()", "def mousePressEvent(self, mouse_event):\r\n return", "def hoverEnterEvent(self, event):\n if not self.isSelected():\n for hs in self.hs:\n hs.setOpacity(1)\n super(DiagramItem, self).hoverEnterEvent(event)", "def ev_MOUSEUP(self, event):", "def hoverEnterEvent( self, event ):\n # process the parent event\n super(XNode, self).hoverEnterEvent(event)\n \n # hover over a hotspot\n hotspot = self.hotspotAt(event.pos())\n if not hotspot:\n hotspot = self.dropzoneAt(event.pos())\n \n old_spot = self._hoverSpot\n \n if hotspot and hotspot != old_spot:\n # update the new hotspot\n self._hoverSpot = hotspot\n \n if old_spot:\n old_spot.hoverLeaveEvent(event)\n \n if hotspot.hoverEnterEvent(event):\n self.update()\n \n elif old_spot and not hotspot:\n self._hoverSpot = None\n \n if old_spot.hoverLeaveEvent(event):\n self.update()", "def dragEnterEvent(self, dee):\n dee.accept(hasattr(Globals.dragObject, 'trackFrame'))", "def onMouseOver(self,mouseEvent):\n\t\tself.canvas.nodeOver(self)", "def handle_mouse_press(self, event):", "def mouse_not_over(self):\n pass", "def _hover(self, event):\n if self.ignore(event):\n return\n\n if self._active_handle is not None or not self._selection_completed:\n # Do nothing if button is pressed and a handle is active, which may\n # occur with drag_from_anywhere=True.\n # Do nothing if selection is not completed, which occurs when\n # a selector has been cleared\n return\n\n _, e_dist = self._edge_handles.closest(event.x, event.y)\n self._set_cursor(e_dist <= self.grab_range)", "def MouseOverItem(self,item):\r\n pass", "def on_hover(self) -> None:", "def OnMouse(self,event):\r\n if event.Moving():\r\n (mouseItem,mouseHitFlag) = self.gList.HitTest(event.GetPosition())\r\n if mouseItem != self.mouseItem:\r\n self.mouseItem = mouseItem\r\n self.MouseOverItem(mouseItem)\r\n elif event.Leaving() and self.mouseItem != None:\r\n self.mouseItem = None\r\n self.MouseOverItem(None)\r\n event.Skip()", "def handle_mouse(self, x, y):\n pass", "def enterEvent(self, event):\n if self.responsive:\n self.in_focus = True\n self.set_background(self.backgrounds[\"inFocus\"])\n event.accept()", "def mouse_over(self):\n self.scroll_to()\n ActionChains(self.driver).move_to_element(self._element).perform()", "def OnMouseIn( self, event ):\n self.whichChoice = 1\n event.context.triggerRedraw(1)", "def append_cursor_enter_callback(self):", "def mouse_left_up(self):\n pass", "def enterEvent (self, event):\n if not self.isEnabled():\n event.accept()\n else:\n self.setStyleSheet( \"\"\"\n QWidget {\n background: #D7DCE0;\n margin-left: 10px;\n padding-top: 6px;\n } \"\"\")\n event.accept()", "def entered(self, mover):\n pass" ]
[ "0.72074497", "0.7113512", "0.7032834", "0.7032834", "0.69916975", "0.69322693", "0.6845052", "0.6836298", "0.6764046", "0.6755759", "0.6748612", "0.65055966", "0.649724", "0.64849204", "0.6443039", "0.64336354", "0.64145774", "0.64126825", "0.6396133", "0.6368497", "0.63338494", "0.62957007", "0.62530184", "0.62306106", "0.61934674", "0.6190441", "0.61601174", "0.61447984", "0.6132296", "0.6123619" ]
0.85851264
0
Override this method if to respond to the mouse leaving the element.
def mouse_out(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_mouse_leave (self, event):\n\n\t\tif not self.clicked:\n\n\t\t\tself.cursor_position = [-1,-1]\n\t\t\tself.redraw_canvas()\n\t\t\tself.hide_tip()#self.timer1 = gobject.timeout_add(2000, self.hide_tip)", "def hoverLeaveEvent(self, event: 'QGraphicsSceneHoverEvent'):\n QApplication.instance().restoreOverrideCursor()", "def leaveEvent(self, ev):\n if (self.panning):\n # don't immediately change pointer if we're panning\n self.__pointerLeftWidget = True\n else:\n self.setCursor(Qt.ArrowCursor)\n QGraphicsView.leaveEvent(self, ev)\n self.currentKbKey = None", "def hoverLeaveEvent(self, event):\n if self._hoverSpot:\n if self._hoverSpot.hoverLeaveEvent(event):\n self.update()\n \n self._hoverSpot = None\n \n super(XNode, self).hoverLeaveEvent(event)", "def mouse_out(self, event):\r\n self['background'] = self.defaultBackground", "def hoverLeaveEvent(self, moveEvent):\n self.setCursor(Qt.ArrowCursor)\n super().hoverLeaveEvent(moveEvent)", "def OnLeaveWindow(self, event):\r\n \r\n if self._hover_button:\r\n self._hover_button.cur_state = AUI_BUTTON_STATE_NORMAL\r\n self._hover_button = None\r\n self.Refresh()\r\n self.Update()", "def OnLeaveWindow(self, event):\r\n\r\n if self._hover_button:\r\n self.RefreshButton(self._hover_button)\r\n self._hover_button = None", "def ev_windowleave(self, event: WindowEvent) -> None:", "def OnLeaveWindow(self, event):\r\n\r\n self.RefreshOverflowState()\r\n self.SetHoverItem(None)\r\n self.SetPressedItem(None)\r\n\r\n self._tip_item = None\r\n self.StopPreviewTimer()", "def on_mouse_leave(self, event):\n global controller\n if self == controller:\n self.set_help_text(None)\n if self.task:\n self.task.stop()\n self.task = None\n controller = None", "def leaveEvent(self, event):\n self.destroy()", "def leave(event):\n if tooltip.event is not None:\n widget.after_cancel(tooltip.event)\n tooltip.event = None\n tooltip.hidetip()", "def mouseOut(self, event):\n if self.editMode and self.lastChanged:\n self.changeColor(self.lastChanged, self.colors['pentomino'])\n return\n self.correctPending()\n self.lastPosition = None", "def on_unhover(self) -> None:", "def leaveEvent(self, event):\n if self.responsive:\n self.in_focus = False\n self.set_background(self.default_background)\n event.accept()", "def ev_windowleave(self, event: tcod.event.WindowEvent) -> T | None:", "def mouse_not_over(self):\n pass", "def mouseReleased():\n if not game_controller.game_over:\n if game_controller.falling_disk and \\\n game_controller.falling_disk.y_vel == 0:\n game_controller.handle_mouseReleased()", "def leaveEvent (self, event):\n if not self.isEnabled():\n event.accept()\n else:\n # background: #EAEAEA;\n self.setStyleSheet( \"\"\"\n QWidget {\n margin-left: 10px;\n padding-top: 6px;\n } \"\"\")\n event.accept()", "def mouseReleaseEvent (self, event):\n if self.itemMoved:\n self.parentWidget.DataChanged.emit()\n self.itemMoved = False; \n super(DiagramItem, self).mouseReleaseEvent(event)", "def leave_notify_event(self, widget, event):\n self.logger.debug(\"leaving widget...\")\n return self.make_callback('leave')", "def OnMouseOut( self, event ):\n self.whichChoice = 0\n event.context.triggerRedraw(1)", "def _mouse_leave(self, event):\n\n #Task 1.2 (Tower placement): Delete the preview\n #Hint: Relevant canvas items are tagged with: 'path', 'range', 'shadow'\n # See tk.Canvas.delete (delete all with tag)\n self._view.delete(\"shadow\", \"range\", \"path\")", "def mouse_over(self):\n pass", "def onleaveanimation(self, event):\n print('onleaveanimation; event: %s, %s->%s' % (event.event, event.src, event.dst))", "def mouseReleaseEvent(self, event):\n if event.button() is not QtCore.Qt.MouseButton.LeftButton:\n return False\n if self.mousenode is not None:\n self.remove_mousenode(event)\n return QtGui.QGraphicsScene.mouseReleaseEvent(self, event)", "def frameLeave(self):\n try:\n self.contentFrame.currFrame.leave()\n except AttributeError:\n pass", "def mouseReleaseEvent(self, event: QMouseEvent):\n self._moving = False\n self.rectChanged.emit(self._rect)\n super().mouseReleaseEvent(event)", "def dnd_leave(self, source, event):\n\n if self._canvas_cursor:\n self._canvas['cursor'] = self._canvas_cursor\n self._canvas['relief'] = tk.SUNKEN" ]
[ "0.7270982", "0.71157235", "0.704455", "0.7016063", "0.67757094", "0.6765137", "0.67564774", "0.6679935", "0.6604109", "0.6557472", "0.6523289", "0.6511427", "0.6496346", "0.641088", "0.64010245", "0.62832344", "0.62736565", "0.6220964", "0.62058276", "0.6200299", "0.6186985", "0.6171938", "0.61702174", "0.6137098", "0.6128624", "0.60890985", "0.60832435", "0.6040201", "0.60169226", "0.59969455" ]
0.7645027
0
Override this method to respond to the left mouse button being held down over the element.
def mouse_left_down(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def leftButtonDown(self):\n\t\tautopy.mouse.toggle(True,autopy.mouse.LEFT_BUTTON)", "def mouse_left_up(self):\n pass", "def mouse_right_down(self):\n pass", "def OnLeftDown(self, event): # ANDY some PAN ideas from http://code.google.com/p/pyslip/\n if event.ShiftDown():\n event.Skip()\n return\n click_posn = event.GetPosition()\n self.SetCursor(wx.Cursor(wx.CURSOR_HAND))\n (self.last_drag_x, self.last_drag_y) = click_posn\n event.Skip()", "def LeftClick(self):\n self._PressLeftButton()\n self._ReleaseAllButtons()", "def leftButtonUp(self):\n\t\tautopy.mouse.toggle(False,autopy.mouse.LEFT_BUTTON)", "def onMouseLeftDown(self, event):\n # [NOTE] No need to call self.choice(). It is enough to call\n # event.Skip() and the machine will be called self.OnButtonClick()\n event.Skip()", "def mouse_right_up(self):\n pass", "def OnLeftDown(self, event):\r\n \r\n self.CaptureMouse()\r\n self._click_pt = wx.Point(-1, -1)\r\n self._is_dragging = False\r\n self._click_tab = None\r\n self._pressed_button = None\r\n \r\n wnd = self.TabHitTest(event.GetX(), event.GetY())\r\n \r\n if wnd is not None:\r\n new_selection = self.GetIdxFromWindow(wnd)\r\n\r\n # AuiNotebooks always want to receive this event\r\n # even if the tab is already active, because they may\r\n # have multiple tab controls\r\n if new_selection != self.GetActivePage() or isinstance(self.GetParent(), AuiNotebook):\r\n \r\n e = AuiNotebookEvent(wxEVT_COMMAND_AUINOTEBOOK_PAGE_CHANGING, self.GetId())\r\n e.SetSelection(new_selection)\r\n e.SetOldSelection(self.GetActivePage())\r\n e.SetEventObject(self)\r\n self.GetEventHandler().ProcessEvent(e)\r\n \r\n self._click_pt.x = event.GetX()\r\n self._click_pt.y = event.GetY()\r\n self._click_tab = wnd\r\n \r\n if self._hover_button:\r\n self._pressed_button = self._hover_button\r\n self._pressed_button.cur_state = AUI_BUTTON_STATE_PRESSED\r\n self._on_button = True\r\n self.Refresh()\r\n self.Update()", "def ev_mousebuttondown(self, event: MouseButtonDown) -> None:", "def mouseReleaseEvent(self, event):\n button = event.button()\n\n # select an item on which we clicked\n item = self.itemAt(event.x(), event.y())\n if item:\n self.setCurrentItem(item)\n if button == 1:\n print \"SIMPLE LEFT CLICK\"", "def on_mouse_press(self, x, y, button, key_modifiers):\r\n pass", "def rightButtonDown(self):\n\t\tautopy.mouse.toggle(True,autopy.mouse.RIGHT_BUTTON)", "def left_mouse_down_handler(self, event):\r\n\r\n self.is_left_mouse_down = True\r\n if not self.is_game_over:\r\n self.update_reset_button()\r\n\r\n tile = self.board.get_event_tile(event.pos)\r\n if tile is not None:\r\n self.board.update_tile_hover(tile, self.is_left_mouse_down, self.is_right_mouse_down)", "def leftDown(self):", "def getMouseLeftDown(self):\n if self.mouseData.leftNewlyActive:\n self.mouseData.leftNewlyActive = False\n return True\n else:\n return False", "def onLeftDown(self, event):\n\n self.is_box_select = False # assume not box selection\n\n click_posn = event.GetPositionTuple()\n\n if event.ShiftDown():\n self.is_box_select = True\n self.SetCursor(wx.StockCursor(wx.CURSOR_CROSS))\n (self.sbox_w, self.sbox_h) = (0, 0)\n (self.sbox_1_x, self.sbox_1_y) = click_posn\n else:\n self.SetCursor(wx.StockCursor(wx.CURSOR_HAND))\n (self.last_drag_x, self.last_drag_y) = click_posn\n event.Skip()", "def _left_button_press_event(self, obj, event):\n #print('area_picker - left_button_press_event')\n self.OnLeftButtonDown()\n pixel_x, pixel_y = self.parent.vtk_interactor.GetEventPosition()\n self.picker_points.append((pixel_x, pixel_y))", "def on_left_mouse_click(self, event: Event) -> None:\n\t\tself.mouse_state.set_click(event.x, event.y)", "def handle_mouse_press(self, event):", "def handle_event(self, event):\n if event.type != MOUSEMOTION:\n return\n self.model.slider.left = event.pos[0]", "def on_mouse_press(self, x, y, button):\n\n pass", "def ev_MOUSEDOWN(self, event):", "def OnLeftDClick(self, event):\r\n \r\n x, y = event.GetX(), event.GetY()\r\n wnd = self.TabHitTest(x, y)\r\n\r\n if wnd:\r\n e = AuiNotebookEvent(wxEVT_COMMAND_AUINOTEBOOK_TAB_DCLICK, self.GetId())\r\n e.SetEventObject(self)\r\n e.SetSelection(self.GetIdxFromWindow(wnd))\r\n self.GetEventHandler().ProcessEvent(e)\r\n elif not self.ButtonHitTest(x, y):\r\n e = AuiNotebookEvent(wxEVT_COMMAND_AUINOTEBOOK_BG_DCLICK, self.GetId())\r\n e.SetEventObject(self)\r\n self.GetEventHandler().ProcessEvent(e)", "def handle_left_shift_click(self, event):\n #placeholder to prevent handle_left_click being called\n return", "def left_mouse_up_handler(self, event):\r\n\r\n self.is_left_mouse_down = False\r\n\r\n if self.reset_button.contains_event(event.pos):\r\n self.start_new_game()\r\n elif self.is_right_mouse_down:\r\n self.shortcut_click(event)\r\n else:\r\n tile = self.board.get_event_tile(event.pos)\r\n if tile is not None and not self.is_game_over:\r\n self.update_reset_button()\r\n if self.is_new_game:\r\n self.first_move(tile)\r\n tile_reveal_result = self.board.left_click_up(tile)\r\n self.process_tile_reveal(tile_reveal_result)\r\n if not self.is_game_over:\r\n self.board.update_tile_hover(tile, self.is_left_mouse_down, self.is_right_mouse_down)", "def ev_MOUSEUP(self, event):", "def OnLeftUp(self, event):\r\n\r\n self._on_button = False\r\n \r\n if self._is_dragging:\r\n\r\n if self.HasCapture():\r\n self.ReleaseMouse()\r\n \r\n self._is_dragging = False\r\n if self._drag_image:\r\n self._drag_image.EndDrag()\r\n del self._drag_image\r\n self._drag_image = None\r\n self.GetParent().Refresh()\r\n\r\n evt = AuiNotebookEvent(wxEVT_COMMAND_AUINOTEBOOK_END_DRAG, self.GetId())\r\n evt.SetSelection(self.GetIdxFromWindow(self._click_tab))\r\n evt.SetOldSelection(evt.GetSelection())\r\n evt.SetEventObject(self)\r\n self.GetEventHandler().ProcessEvent(evt)\r\n\r\n return\r\n\r\n if self.HasCapture():\r\n self.ReleaseMouse()\r\n \r\n if self._pressed_button:\r\n \r\n # make sure we're still clicking the button\r\n button = self.ButtonHitTest(event.GetX(), event.GetY())\r\n \r\n if button is None:\r\n return\r\n\r\n if button != self._pressed_button:\r\n self._pressed_button = None\r\n return\r\n \r\n self.Refresh()\r\n self.Update()\r\n\r\n if self._pressed_button.cur_state & AUI_BUTTON_STATE_DISABLED == 0:\r\n \r\n evt = AuiNotebookEvent(wxEVT_COMMAND_AUINOTEBOOK_BUTTON, self.GetId())\r\n evt.SetSelection(self.GetIdxFromWindow(self._click_tab))\r\n evt.SetInt(self._pressed_button.id)\r\n evt.SetEventObject(self)\r\n self.GetEventHandler().ProcessEvent(evt)\r\n \r\n self._pressed_button = None\r\n \r\n self._click_pt = wx.Point(-1, -1)\r\n self._is_dragging = False\r\n self._click_tab = None", "def on_mouse_release(self, x, y, button, key_modifiers):\r\n pass", "def mouse_press_event(self, x: int, y: int, button: int):\n pass" ]
[ "0.7982013", "0.7962645", "0.77094394", "0.7422216", "0.7344164", "0.73293656", "0.7312629", "0.72654736", "0.72546726", "0.72282684", "0.72147775", "0.71788275", "0.717061", "0.7139647", "0.70763767", "0.7069452", "0.7060475", "0.70085615", "0.6988899", "0.6951568", "0.6910686", "0.6901199", "0.6896636", "0.6882542", "0.6852906", "0.6832133", "0.6801524", "0.67904204", "0.67697996", "0.67697585" ]
0.8459492
0
Override this method to respond to the right mouse button being held down over the element.
def mouse_right_down(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mouse_right_up(self):\n pass", "def rightButtonDown(self):\n\t\tautopy.mouse.toggle(True,autopy.mouse.RIGHT_BUTTON)", "def onRightDown(self, event):\n\n pass", "def rightButtonUp(self):\n\t\tautopy.mouse.toggle(False,autopy.mouse.RIGHT_BUTTON)", "def RightClick(self):\n self._PressRightButton()\n self._ReleaseAllButtons()", "def OnRightDown(self, event):\r\n \r\n x, y = event.GetX(), event.GetY()\r\n wnd = self.TabHitTest(x, y)\r\n\r\n if wnd:\r\n e = AuiNotebookEvent(wxEVT_COMMAND_AUINOTEBOOK_TAB_RIGHT_DOWN, self.GetId())\r\n e.SetEventObject(self)\r\n e.SetSelection(self.GetIdxFromWindow(wnd))\r\n self.GetEventHandler().ProcessEvent(e)\r\n elif not self.ButtonHitTest(x, y):\r\n e = AuiNotebookEvent(wxEVT_COMMAND_AUINOTEBOOK_BG_RIGHT_DOWN, self.GetId())\r\n e.SetEventObject(self)\r\n self.GetEventHandler().ProcessEvent(e)", "def getMouseRightDown(self):\n if self.mouseData.rightNewlyActive:\n self.mouseData.rightNewlyActive = False\n return True\n else:\n return False", "def mouse_left_down(self):\n pass", "def on_mouse_release(self, x, y, button, key_modifiers):\r\n pass", "def right_mouse_up_handler(self, event):\r\n\r\n self.is_right_mouse_down = False\r\n\r\n if self.is_left_mouse_down:\r\n self.shortcut_click(event)\r\n\r\n tile = self.board.get_event_tile(event.pos)\r\n if not self.is_game_over and tile is not None:\r\n self.board.update_tile_hover(tile, self.is_left_mouse_down, self.is_right_mouse_down)", "def OnRightUp(self, event):\r\n\r\n x, y = event.GetX(), event.GetY()\r\n wnd = self.TabHitTest(x, y)\r\n\r\n if wnd:\r\n e = AuiNotebookEvent(wxEVT_COMMAND_AUINOTEBOOK_TAB_RIGHT_UP, self.GetId())\r\n e.SetEventObject(self)\r\n e.SetSelection(self.GetIdxFromWindow(wnd))\r\n self.GetEventHandler().ProcessEvent(e)\r\n elif not self.ButtonHitTest(x, y):\r\n e = AuiNotebookEvent(wxEVT_COMMAND_AUINOTEBOOK_BG_RIGHT_UP, self.GetId())\r\n e.SetEventObject(self)\r\n self.GetEventHandler().ProcessEvent(e)", "def mouse_left_up(self):\n pass", "def OnRightDown(self, event):\r\n \r\n cli_rect = wx.RectPS(wx.Point(0, 0), self.GetClientSize())\r\n\r\n if self._gripper_sizer_item:\r\n gripper_rect = self._gripper_sizer_item.GetRect()\r\n if gripper_rect.Contains(event.GetPosition()):\r\n return\r\n \r\n if self._overflow_sizer_item:\r\n \r\n dropdown_size = self._art.GetElementSize(AUI_TBART_OVERFLOW_SIZE)\r\n if dropdown_size > 0 and event.m_x > cli_rect.width - dropdown_size and \\\r\n event.m_y >= 0 and event.m_y < cli_rect.height and self._art:\r\n return\r\n \r\n self._action_pos = wx.Point(*event.GetPosition())\r\n self._action_item = self.FindToolForPosition(*event.GetPosition())\r\n\r\n if self._action_item:\r\n if self._action_item.state & AUI_BUTTON_STATE_DISABLED:\r\n \r\n self._action_pos = wx.Point(-1, -1)\r\n self._action_item = None\r\n return", "def ev_mousebuttondown(self, event: MouseButtonDown) -> None:", "def leftButtonDown(self):\n\t\tautopy.mouse.toggle(True,autopy.mouse.LEFT_BUTTON)", "def rightDown(self):", "def mouseReleaseEvent(self, event):\n button = event.button()\n\n # select an item on which we clicked\n item = self.itemAt(event.x(), event.y())\n if item:\n self.setCurrentItem(item)\n if button == 1:\n print \"SIMPLE LEFT CLICK\"", "def on_mouse_press(self, x, y, button, key_modifiers):\r\n pass", "def on_mouse_release(self, x, y, button):\n pass", "def _PressRightButton(self):\n self._kit.MousePressButtons({PeripheralKit.MOUSE_BUTTON_RIGHT})\n time.sleep(self.send_delay)", "def __mouse_release(self, event, right_click=False):\n global choose_rectangle\n if right_click:\n return\n if choose_rectangle:\n self.__finish_rectangle(event)", "def right_mouse_down_handler(self, event):\r\n\r\n self.is_right_mouse_down = True\r\n\r\n tile = self.board.get_event_tile(event.pos)\r\n if not self.is_new_game and not self.is_game_over and tile is not None:\r\n if not self.is_left_mouse_down:\r\n change_in_unflagged_mines = tile.toggle_flag()\r\n self.mine_counter.update(change_in_unflagged_mines)\r\n self.board.update_tile_hover(tile, self.is_left_mouse_down, self.is_right_mouse_down)", "def _(event):\n system_line.cursor_right()", "def on_right_key(self, event) -> None:\r\n\r\n self.move_view(1, 0)", "def _right_click(self, event):\n if self.disabled is False:\n self.menu.tk_popup(event.x_root, event.y_root)", "def mouse_release_event(self, x: int, y: int, button: int):\n pass", "def ev_mousebuttondown(self, event: tcod.event.MouseButtonDown) -> T | None:", "def OnTabRightDown(self, event):\r\n \r\n tabs = event.GetEventObject()\r\n if not tabs.GetEnabled(event.GetSelection()):\r\n return\r\n\r\n # patch event through to owner\r\n wnd = tabs.GetWindowFromIdx(event.GetSelection())\r\n\r\n e = AuiNotebookEvent(wxEVT_COMMAND_AUINOTEBOOK_TAB_RIGHT_DOWN, self.GetId())\r\n e.SetSelection(self._tabs.GetIdxFromWindow(wnd))\r\n e.SetEventObject(self)\r\n self.GetEventHandler().ProcessEvent(e)", "def right_click(self):\n self.scroll_to()\n ActionChains(self.driver).context_click(self._element).perform()", "def ev_MOUSEDOWN(self, event):" ]
[ "0.7990627", "0.7949314", "0.7513452", "0.73088235", "0.7308333", "0.7301941", "0.7238975", "0.7186552", "0.7162225", "0.7041994", "0.69716364", "0.69352114", "0.69093263", "0.6895724", "0.6866687", "0.6828164", "0.6804783", "0.6775382", "0.6756245", "0.67409694", "0.67354745", "0.6690472", "0.6614017", "0.6608215", "0.65755373", "0.6550818", "0.65186626", "0.64833724", "0.6456499", "0.64525557" ]
0.85940593
0
Override this method to respond to the right mouse button being released on the element.
def mouse_right_up(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mouse_right_down(self):\n pass", "def on_mouse_release(self, x, y, button, key_modifiers):\r\n pass", "def mouse_release_event(self, x: int, y: int, button: int):\n pass", "def on_mouse_release(self, x, y, button):\n pass", "def rightButtonDown(self):\n\t\tautopy.mouse.toggle(True,autopy.mouse.RIGHT_BUTTON)", "def onRightDown(self, event):\n\n pass", "def rightButtonUp(self):\n\t\tautopy.mouse.toggle(False,autopy.mouse.RIGHT_BUTTON)", "def __mouse_release(self, event, right_click=False):\n global choose_rectangle\n if right_click:\n return\n if choose_rectangle:\n self.__finish_rectangle(event)", "def mouseReleaseEvent(self, event):\n button = event.button()\n\n # select an item on which we clicked\n item = self.itemAt(event.x(), event.y())\n if item:\n self.setCurrentItem(item)\n if button == 1:\n print \"SIMPLE LEFT CLICK\"", "def OnRightUp(self, event):\r\n\r\n x, y = event.GetX(), event.GetY()\r\n wnd = self.TabHitTest(x, y)\r\n\r\n if wnd:\r\n e = AuiNotebookEvent(wxEVT_COMMAND_AUINOTEBOOK_TAB_RIGHT_UP, self.GetId())\r\n e.SetEventObject(self)\r\n e.SetSelection(self.GetIdxFromWindow(wnd))\r\n self.GetEventHandler().ProcessEvent(e)\r\n elif not self.ButtonHitTest(x, y):\r\n e = AuiNotebookEvent(wxEVT_COMMAND_AUINOTEBOOK_BG_RIGHT_UP, self.GetId())\r\n e.SetEventObject(self)\r\n self.GetEventHandler().ProcessEvent(e)", "def OnRightDown(self, event):\r\n \r\n x, y = event.GetX(), event.GetY()\r\n wnd = self.TabHitTest(x, y)\r\n\r\n if wnd:\r\n e = AuiNotebookEvent(wxEVT_COMMAND_AUINOTEBOOK_TAB_RIGHT_DOWN, self.GetId())\r\n e.SetEventObject(self)\r\n e.SetSelection(self.GetIdxFromWindow(wnd))\r\n self.GetEventHandler().ProcessEvent(e)\r\n elif not self.ButtonHitTest(x, y):\r\n e = AuiNotebookEvent(wxEVT_COMMAND_AUINOTEBOOK_BG_RIGHT_DOWN, self.GetId())\r\n e.SetEventObject(self)\r\n self.GetEventHandler().ProcessEvent(e)", "def RightClick(self):\n self._PressRightButton()\n self._ReleaseAllButtons()", "def emitReleaseEvent(self, clickLocation, button, currentKbKey, items):\n # emit the mouseReleaseEvent signal\n self.mouseRelease.emit(self, clickLocation, button, currentKbKey, items)", "def ev_mousebuttondown(self, event: MouseButtonDown) -> None:", "def getMouseRightDown(self):\n if self.mouseData.rightNewlyActive:\n self.mouseData.rightNewlyActive = False\n return True\n else:\n return False", "def on_mouse_release(self, x, y, button, modifiers):\n \n menu: Menu = self.get_menu_for_display()\n\n menu_click_x, menu_click_y = self.get_menu_click(menu, x, y)\n\n if button == arcade.MOUSE_BUTTON_LEFT:\n if menu:\n menu.button_list.check_mouse_release_for_buttons(\n menu_click_x,\n menu_click_y,\n )", "def button_release(self, event: Any) -> None:\n if event.button == 1:\n self.left_button_down = False\n if event.button == 2:\n self.middle_button_down = False\n if event.button == 3:\n self.right_button_down = False", "def mouseReleaseEvent(self, event):\n if event.button() is not QtCore.Qt.MouseButton.LeftButton:\n return False\n if self.mousenode is not None:\n self.remove_mousenode(event)\n return QtGui.QGraphicsScene.mouseReleaseEvent(self, event)", "def on_mouse_press(self, x, y, button, key_modifiers):\r\n pass", "def button_release_event(self, widget, event):\n x, y = event.x, event.y\n\n # x, y = coordinates where the button was released\n self.last_win_x, self.last_win_y = x, y\n\n button = 0\n # prepare button mask as in button_press_event()\n\n data_x, data_y = self.check_cursor_location()\n\n return self.make_ui_callback('button-release', button, data_x, data_y)", "def ev_mousebuttonup(self, event: MouseButtonUp) -> None:", "def OnMouseUp(self, evt):\n self.ReleaseMouse()", "def mouseReleaseEvent(self, event):\n super(QIntSpinner3DS, self).mousePressEvent(event)\n super(QIntSpinner3DS, self).mouseReleaseEvent(event)\n self.unsetCursor()", "def _PressRightButton(self):\n self._kit.MousePressButtons({PeripheralKit.MOUSE_BUTTON_RIGHT})\n time.sleep(self.send_delay)", "def right_mouse_up_handler(self, event):\r\n\r\n self.is_right_mouse_down = False\r\n\r\n if self.is_left_mouse_down:\r\n self.shortcut_click(event)\r\n\r\n tile = self.board.get_event_tile(event.pos)\r\n if not self.is_game_over and tile is not None:\r\n self.board.update_tile_hover(tile, self.is_left_mouse_down, self.is_right_mouse_down)", "def ev_mousebuttondown(self, event: tcod.event.MouseButtonDown) -> T | None:", "def mouseReleaseEventEnabled(self, ev):\n\n self._btns.remove(ev.button())", "def mouseReleased(self, _evt, _id):\n if not self.is_enabled: return False\n \n self.mouse_icon.mouseReleased(_evt, _id)\n return False", "def mouseReleaseEvent(self, event):\n # super(PlotWidget, self).mouseReleaseEvent(event)\n event.accept()", "def OnRightUp(self, event):\r\n \r\n hit_item = self.FindToolForPosition(*event.GetPosition())\r\n\r\n if self._action_item and hit_item == self._action_item:\r\n \r\n e = AuiToolBarEvent(wxEVT_COMMAND_AUITOOLBAR_RIGHT_CLICK, self._action_item.id)\r\n e.SetEventObject(self)\r\n e.SetToolId(self._action_item.id)\r\n e.SetClickPoint(self._action_pos)\r\n self.ProcessEvent(e)\r\n self.DoIdleUpdate()\r\n \r\n else:\r\n \r\n # right-clicked on the invalid area of the toolbar\r\n e = AuiToolBarEvent(wxEVT_COMMAND_AUITOOLBAR_RIGHT_CLICK, -1)\r\n e.SetEventObject(self)\r\n e.SetToolId(-1)\r\n e.SetClickPoint(self._action_pos)\r\n self.ProcessEvent(e)\r\n self.DoIdleUpdate()\r\n \r\n # reset member variables\r\n self._action_pos = wx.Point(-1, -1)\r\n self._action_item = None" ]
[ "0.7984992", "0.7669759", "0.7560166", "0.745702", "0.73336405", "0.7120719", "0.7056546", "0.7043819", "0.70383954", "0.68456197", "0.6824234", "0.68107015", "0.67931944", "0.6708472", "0.6638119", "0.6634257", "0.6535301", "0.6518771", "0.6508248", "0.6502479", "0.6477549", "0.6465325", "0.6463102", "0.6451639", "0.644247", "0.64340085", "0.6420154", "0.64182055", "0.6368486", "0.63583755" ]
0.7677217
1
Override this method to respond to the middle mouse button being held down over the element.
def mouse_middle_down(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mouse_middle_up(self):\n pass", "def on_mouse_release(self, x, y, button, key_modifiers):\r\n pass", "def mouse_right_up(self):\n pass", "def on_mouse_release(self, x, y, button):\n pass", "def ev_mousebuttonup(self, event: MouseButtonUp) -> None:", "def ev_mousebuttondown(self, event: MouseButtonDown) -> None:", "def ev_MOUSEUP(self, event):", "def mouse_right_down(self):\n pass", "def mouse_left_up(self):\n pass", "def handle_mouse_press(self, event):", "def on_mouse_press(self, x, y, button, key_modifiers):\r\n pass", "def mouseReleaseEvent(self, event):\n button = event.button()\n\n # select an item on which we clicked\n item = self.itemAt(event.x(), event.y())\n if item:\n self.setCurrentItem(item)\n if button == 1:\n print \"SIMPLE LEFT CLICK\"", "def OnMiddleDown(self, event):\r\n \r\n x, y = event.GetX(), event.GetY()\r\n wnd = self.TabHitTest(x, y)\r\n\r\n if wnd:\r\n e = AuiNotebookEvent(wxEVT_COMMAND_AUINOTEBOOK_TAB_MIDDLE_DOWN, self.GetId())\r\n e.SetEventObject(self)\r\n e.SetSelection(self.GetIdxFromWindow(wnd))\r\n self.GetEventHandler().ProcessEvent(e)\r\n elif not self.ButtonHitTest(x, y):\r\n e = AuiNotebookEvent(wxEVT_COMMAND_AUINOTEBOOK_BG_MIDDLE_DOWN, self.GetId())\r\n e.SetEventObject(self)\r\n self.GetEventHandler().ProcessEvent(e)", "def ev_MOUSEDOWN(self, event):", "def ev_mousebuttondown(self, event: tcod.event.MouseButtonDown) -> T | None:", "def on_mouse_press(self, x, y, button):\n\n pass", "def OnMiddleUp(self, event):\r\n\r\n x, y = event.GetX(), event.GetY()\r\n wnd = self.TabHitTest(x, y)\r\n\r\n if wnd:\r\n e = AuiNotebookEvent(wxEVT_COMMAND_AUINOTEBOOK_TAB_MIDDLE_UP, self.GetId())\r\n e.SetEventObject(self)\r\n e.SetSelection(self.GetIdxFromWindow(wnd))\r\n self.GetEventHandler().ProcessEvent(e)\r\n elif not self.ButtonHitTest(x, y):\r\n e = AuiNotebookEvent(wxEVT_COMMAND_AUINOTEBOOK_BG_MIDDLE_UP, self.GetId())\r\n e.SetEventObject(self)\r\n self.GetEventHandler().ProcessEvent(e)", "def ev_mousebuttonup(self, event: tcod.event.MouseButtonUp) -> T | None:", "def mouse_release_event(self, x: int, y: int, button: int):\n pass", "def ev_mousebuttondown(self, event):\n if self.engine.game_map.in_bounds(*event.tile):\n if event.button == 1:\n return self.on_index_selected(*event.tile)\n return super().ev_mousebuttondown(event)", "def rightButtonUp(self):\n\t\tautopy.mouse.toggle(False,autopy.mouse.RIGHT_BUTTON)", "def mouse_left_down(self):\n pass", "def onMiddleDown(self, event):\n\n pass", "def leftButtonUp(self):\n\t\tautopy.mouse.toggle(False,autopy.mouse.LEFT_BUTTON)", "def mousePressEvent(self, ev):\n super(PlotObject, self).mousePressEvent(ev)\n self._downpos = self.mousePos", "def leftButtonDown(self):\n\t\tautopy.mouse.toggle(True,autopy.mouse.LEFT_BUTTON)", "def rightButtonDown(self):\n\t\tautopy.mouse.toggle(True,autopy.mouse.RIGHT_BUTTON)", "def button_press_cb(self, source, event):\n\n if event.button == MOUSE_BUTTON_RIGHT:\n pass\n return True\n elif event.button == MOUSE_BUTTON_MIDDLE:\n self.emit('begin-move')\n return True", "def OnMiddleDown(self, event):\r\n \r\n cli_rect = wx.RectPS(wx.Point(0, 0), self.GetClientSize())\r\n\r\n if self._gripper_sizer_item:\r\n \r\n gripper_rect = self._gripper_sizer_item.GetRect()\r\n if gripper_rect.Contains(event.GetPosition()):\r\n return\r\n \r\n if self._overflow_sizer_item:\r\n \r\n dropdown_size = self._art.GetElementSize(AUI_TBART_OVERFLOW_SIZE)\r\n if dropdown_size > 0 and event.m_x > cli_rect.width - dropdown_size and \\\r\n event.m_y >= 0 and event.m_y < cli_rect.height and self._art: \r\n return\r\n \r\n self._action_pos = wx.Point(*event.GetPosition())\r\n self._action_item = self.FindToolForPosition(*event.GetPosition())\r\n\r\n if self._action_item:\r\n if self._action_item.state & AUI_BUTTON_STATE_DISABLED:\r\n \r\n self._action_pos = wx.Point(-1, -1)\r\n self._action_item = None\r\n return", "def _on_key_release(self, event):" ]
[ "0.7794178", "0.73346615", "0.7278942", "0.7235611", "0.71748966", "0.71505445", "0.7085906", "0.70397234", "0.7038093", "0.69910294", "0.69589084", "0.69151455", "0.69036335", "0.68575525", "0.6837659", "0.6815109", "0.68076855", "0.67690843", "0.6741163", "0.67272973", "0.67102", "0.669898", "0.6693153", "0.6680007", "0.6671771", "0.6617052", "0.65825695", "0.6563974", "0.65409344", "0.6474942" ]
0.8144935
0
Override this method to respond to the mouse wheel spinning when the mouse is being held down over the element.
def mouse_wheel_down(self): if not self.scroll_element is None: self.scroll_element.mouse_wheel_down()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_mouse_wheel(self, e): # pragma: no cover\n super(TraceView, self).on_mouse_wheel(e)\n if e.modifiers == ('Alt',):\n start, end = self._interval\n delay = e.delta * (end - start) * .1\n self.shift(-delay)", "def on_mouse_press(self, event):\n self.on_mouse_wheel(event)", "def mouse_wheel_up(self):\n if not self.scroll_element is None:\n self.scroll_element.mouse_wheel_up()", "def ev_mousewheel(self, event: MouseWheel) -> None:", "def wheelEvent(self, ev):\n if ev.type() == QtCore.QEvent.Wheel:\n ev.ignore()", "def mouse_right_up(self):\n pass", "def mouse_right_down(self):\n pass", "def mouse_left_up(self):\n pass", "def ev_mousewheel(self, event: tcod.event.MouseWheel) -> T | None:", "def mouseReleaseEvent(self, event):\n super(QIntSpinner3DS, self).mousePressEvent(event)\n super(QIntSpinner3DS, self).mouseReleaseEvent(event)\n self.unsetCursor()", "def ev_MOUSEUP(self, event):", "def handle_mouse_press(self, event):", "def ev_MOUSEDOWN(self, event):", "def mouse_middle_down(self):\n pass", "def mouse_wheel(self, event):\n\n if event.num == 5 or event.delta == -120:\n event.widget.yview_scroll(1, UNITS)\n self.tablerowheader.yview_scroll(1, UNITS)\n if event.num == 4 or event.delta == 120:\n if self.canvasy(0) < 0:\n return\n event.widget.yview_scroll(-1, UNITS)\n self.tablerowheader.yview_scroll(-1, UNITS)\n self.redrawVisible()\n return", "def on_mouse_release(self, x, y, button):\n pass", "def ev_mousebuttondown(self, event: MouseButtonDown) -> None:", "def spinAround(self):", "def wheelEvent(self, ev):\n\n # Check if we're in auto Zoom mode\n if self.__zooming:\n # we're zooming\n if (ev.angleDelta().y() > 0):\n self.zoom(ev.pos(), 1)\n else:\n self.zoom(ev.pos(), -1)\n\n else:\n # not zooming - pass wheel event on\n self.mouseWheel.emit(self, ev)", "def wheel(self):\n if self.__enabled and self.__indicator.isVisible():\n self.__stopScrolling()\n return True\n \n return False", "def mouse_left_down(self):\n pass", "def wheel(ticks):\n m = PyMouse()\n m.scroll(ticks)", "def mouse_wheelEvent(self, e):\n if self.image is not None:\n modifiers = QtWidgets.QApplication.keyboardModifiers()\n if modifiers == QtCore.Qt.ControlModifier:\n wheel_counter = e.angleDelta()\n if wheel_counter.y() / 120 == -1:\n if self.width_result_image == 1000:\n pass\n else:\n self.width_result_image -= 100\n\n if wheel_counter.y() / 120 == 1:\n if self.width_result_image == 4000:\n pass\n else:\n self.width_result_image += 100\n self.show_to_window()", "def ev_mousebuttonup(self, event: MouseButtonUp) -> None:", "def on_mouse_release(self, x, y, button, key_modifiers):\r\n pass", "def on_mouse_wheel(self,event,canvas):\n canvas.yview(\"scroll\",-1*event.delta/100,\"units\")", "def mouse_out(self):\n pass", "def OnMouse(self, event):\n\n self.Refresh()\n event.Skip()", "def mouse_middle_up(self):\n pass", "def mousePressEvent(self, ev):\n super(PlotObject, self).mousePressEvent(ev)\n self._downpos = self.mousePos" ]
[ "0.71962875", "0.71476215", "0.7129475", "0.69585156", "0.6720184", "0.6612791", "0.6487933", "0.642528", "0.6412528", "0.64030284", "0.6369889", "0.6330504", "0.6318762", "0.6288254", "0.62793976", "0.6234082", "0.62089163", "0.6207074", "0.6181349", "0.61734873", "0.6170471", "0.61514664", "0.6148164", "0.6143475", "0.61367214", "0.61156565", "0.60843134", "0.60785127", "0.60673976", "0.6051141" ]
0.7533816
0
Returns the pointer to the GUI object that is under the screen coordinates passed in to the coordinates parameter. current_best should be None unless called from this method.
def handle_input(self, coordinates, current_best = None): if self.disable: return current_best if self.is_coords_in_bounds(coordinates): if current_best is None or self.z <= current_best.z: current_best = self else: if self._currently_hovered: self.mouse_out() self._currently_hovered = False self.mouse_not_over() for child in self.children: current_best = child.handle_input(coordinates, current_best) return current_best
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def current_gui(self):\n return self._current_gui", "def getlocalbestcoordinate(self):\n return self.localbest.coordinate", "def get_below(self):\n current_index = ALL_WINDOWS.index(self)\n if current_index == 0:\n return BASE_SCREEN\n\n return ALL_WINDOWS[current_index - 1]", "def _get_current_object(self):\n loc = object.__getattribute__(self, '_Proxy__local')\n if not hasattr(loc, '__release_local__'):\n return loc(*self.__args, **self.__kwargs)\n try:\n return getattr(loc, self.__name__)\n except AttributeError:\n raise RuntimeError('no object bound to {0.__name__}'.format(self))", "def _get_current_object(self):\n if not hasattr(self.__local, '__release_local__'):\n return self.__local()\n try:\n return getattr(self.__local, self.__name__)\n except AttributeError:\n raise RuntimeError('no object bound to %s' % self.__name__)", "def _get_current_object(self):\n if not hasattr(self.__local, '__release_local__'):\n return self.__local()\n try:\n return getattr(self.__local, self.__name__)\n except AttributeError:\n raise RuntimeError('no object bound to %s' % self.__name__)", "def get_current(self):\n return self.current", "def current_swing_mode(self):\n return None", "def find_best(self, metrics_eval_func=None):\n eval_func = metrics_eval_func or self.metrics_eval_func\n if not self.grid_points:\n raise RuntimeError(\"GridSearchResults are empty, cannot find a best point\")\n best = self.grid_points[0]\n for point in self.grid_points:\n if eval_func(point.metrics, best.metrics):\n best = point\n return best", "def getMayaMainPtr():\n ptr = OpenMayaUI.MQtUtil.mainWindow()\n return shiboken.wrapInstance(long(ptr), QtGui.QWidget)", "def current(self) -> Union['_Imagine', None]:\n return self.top[-1] if self.top else None", "def get_active(self):\n selections = self.view.selectedIndexes()\n if len(selections) == 0:\n return None\n\n return selections[0].internalPointer().obj", "def current_window(self):\n pass", "def get_current(self) -> typing.Any:\n\n return self.current_obj", "def get_current_focus():\n\n grab = Gtk.grab_get_current()\n if grab:\n return grab\n\n for win in Gtk.Window.list_toplevels():\n if win.get_property('has-toplevel-focus'):\n return win.get_focus()\n return None", "def get_under_pointer(cls):\n\n\t\tif cls._interface:\n\t\t\twindow = cls._interface.get_under_pointer()\n\t\t\tif window:\n\t\t\t\treturn cls(window)\n\t\t\telse:\n\t\t\t\treturn None\n\t\telse:\n\t\t\traise NotImplementedError('Unsupported platform')", "def get_current(self, event=None):\n childes = self.nb.winfo_children() # return the list objects of child widgets of notebook[tab widget]\n return childes[self.nb.index('current')].winfo_children()[0]", "def get_current_location(self):\n # switches to focused element or <body> if no element focused\n active_el = self._driver.switch_to.active_element\n # possible improvement: more advanced than active_el.location\n x = active_el.location[\"x\"]\n y = active_el.location[\"y\"]\n loc = {\"x\": x, \"y\": y}\n\n return loc", "def get_current_window_hwnd():\n\n return win32gui.GetForegroundWindow()", "def getCurrentSelection():\n node = cmds.ls(sl=True)\n if node:\n node = node[0]\n if cmds.nodeType(node) == 'alembicHolder':\n shape = node\n return shape\n else:\n relatives = cmds.listRelatives(node, shapes=True, f=1)\n if relatives:\n for i in relatives:\n if cmds.nodeType(i) == \"alembicHolder\":\n shape = i\n return shape\n return None", "def activeWindow(self):\n raise RuntimeError('Not implemented')\n \n return None # __IGNORE_WARNING_M831__", "def get_best_candidate(self):\n if not self.scores:\n return None\n return self.te_list[self.scores.index(max(self.scores))]", "def _get_current_plot_item(self):\n return self.io_model.img_dict_keys[self.data_opt - 1]", "def current_control(self):\n return self.layout.current_control", "def getCurrentNodeEditorWidget(self):\n activeSubWindow = self.mdiArea.activeSubWindow()\n if activeSubWindow:\n return activeSubWindow.widget()\n return None", "def getCurrentNodeEditorWidget(self):\n activeSubWindow = self.mdiArea.activeSubWindow()\n if activeSubWindow:\n return activeSubWindow.widget()\n return None", "def get_current_measurement(self):\n idx = self.measurementsListWidget.currentRow()\n key = list(self.mgr.obj.measurements)[idx]\n return self.mgr.obj.measurements[key]", "def get_active(cls):\n\n\t\tif cls._interface:\n\t\t\twindow = cls._interface.get_active()\n\t\t\tif window:\n\t\t\t\treturn cls(window)\n\t\t\telse:\n\t\t\t\treturn None\n\t\telse:\n\t\t\traise NotImplementedError('Unsupported platform')", "def getCurrent(self):\n return self.__current", "def get_above(self):\n current_index = ALL_WINDOWS.index(self)\n if current_index == len(ALL_WINDOWS) - 1:\n return None\n\n return ALL_WINDOWS[current_index + 1]" ]
[ "0.6390522", "0.58366036", "0.56894904", "0.54773855", "0.5454908", "0.5454908", "0.5435594", "0.53845", "0.53640234", "0.5357284", "0.53187394", "0.53144544", "0.53096515", "0.5299734", "0.52627397", "0.5261838", "0.52561283", "0.52342606", "0.52289695", "0.52089494", "0.52035946", "0.5201391", "0.52011603", "0.51900166", "0.51895213", "0.51895213", "0.5165874", "0.5163562", "0.51553", "0.51439756" ]
0.6283108
1
Must be called at the start of the execute method. Make sure that image has been set before calling this. A None image will draw a generic button.
def gui_init(self): GUI_element.gui_init(self) self.hover_sound = False if not self.image is None: self.generic_button = False self.width = self.image.width if self.width == 0 else self.width self.height = self.image.height if self.height == 0 else self.height else: # Set up a generic button self.generic_button = True self.image = self.game.core.media.gfx['gui_button_generic_background'] self.draw_strategy = "gui_button" # fixed height self.height = 30 # Create the text self.generic_button_text_object = Text(self.game.core.media.fonts["generic_buttons"], self.x, self.y + (self.height / 2), TEXT_ALIGN_CENTER, self.generic_button_text) self.generic_button_text_object.z = self.z - 1 self.generic_button_text_object.colour = (1.0,1.0,1.0) # Set up the width, if we have a larger than normal width then we want to centre the text. if self.width < self.generic_button_text_object.text_width + 20: self.width = self.generic_button_text_object.text_width + 20 self.generic_button_text_object.x += (self.width / 2) self.sequence_count = self.image.num_of_frames self.draw_strategy_call_parent = False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_image(self):\n button = self.buttons.checkedButton()\n if button is None:\n return\n\n button.click()", "def boutton(self,img1,x,y):\r\n self.button.append(self.creat_image(img1,x,y))", "def showBtnImg(*args, **kwargs):\n\targs[0].get_image().show()", "def setup_button_run(self):\n run_icon = tk.PhotoImage(file = self.run_icon)\n self.button_run = tk.Button(\n self.toolbar,\n width = 24,\n height = 24,\n image = run_icon,\n command = self.run_world)\n self.button_run.image = run_icon\n self.button_run.grid(row = 0, column = 2, sticky = tk.W)", "def change_button_img_to_null(self, null_img=None):\n null_img = self.null_img\n self.button1.configure(image=null_img)\n self.button2.configure(image=null_img)\n self.button3.configure(image=null_img)\n\n self.button4.configure(image=null_img)\n self.button5.configure(image=null_img)\n self.button6.configure(image=null_img)\n\n self.button7.configure(image=null_img)\n self.button8.configure(image=null_img)\n self.button9.configure(image=null_img)", "def draw_button(self):\n self.screen.fill(self.button_color, self.rect)\n self.screen.blit(self.msg_img, self.msg_img_rect)", "def draw_button(self):\r\n self.surface.fill(self.button_color, self.rect)\r\n self.surface.blit(self.msg_image, self.msg_image_rect)", "def buttonPress(self):\n if self.inPlay and not self.shown:\n self.configure(image = Tile.images[0])", "def testDrawDoesNotCrash(self):\n my_button = buttonsprite.ButtonSprite()\n my_button.rect.size = (64, 24)\n my_button._createImage()\n my_button.setMode('inactive')\n my_button._draw()\n my_button.setMode('highlighted')\n my_button._draw()\n my_button.setMode('pressed')\n my_button._draw()\n my_button.setMode('normal')\n my_button._draw()", "def bone(self):\n root = tkinter.Toplevel()\n button = ttk.Button(root)\n photo = tkinter.PhotoImage(file='C:/Users/shepheam/RobotTeamProject/assets/images/dog_treats.gif')\n button.image = photo\n button.grid()\n button['command'] = lambda: print('Good boy!')", "def generate_buttons(self):\n raise Exception('Implement me!')", "def __init__(self, gui, imgs, x, y, callback=None, label=None,\r\n label_pos='left', shortcut=None):\r\n if not (type(imgs) is list or type(imgs) is tuple):\r\n imgs = [imgs]\r\n shapes = []\r\n for i in imgs:\r\n if not os.path.isfile(i):\r\n i = gui.icon_path + i\r\n tex = pi3d.Texture(i, blend=True, mipmap=False)\r\n shape = pi3d.Sprite(camera=gui.camera, w=tex.ix, h=tex.iy, z=2.0)\r\n shape.set_draw_details(gui.shader, [tex])\r\n shapes.append(shape)\r\n super(Button, self).__init__(gui, shapes, x, y, callback=callback,\r\n label=label, label_pos=label_pos, shortcut=shortcut)", "def render(self):\n self.delete()\n self.__create_background(self._imfname)\n # XXX must be last after successor implementation, but works without this line\n #self.c.event_generate(\"<Configure>\")\n #self.c.update_idletasks()", "def on_draw_over_image(self):", "def paintButtons(self):\n\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(BUTTON_OK)\n buttonOK = guiobjects.OcempImageButtonTransparent(imgPath, self.buttonTooltips[\"ok\"], self.showTooltip, self.removeTooltip)\n buttonOK.topleft = [770, 30]\n buttonOK.connect_signal(ocempgui.widgets.Constants.SIG_CLICKED, self.changeConfiguration)\n self.window.add_child(buttonOK)\n\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(BUTTON_CANCEL)\n buttonCancel = guiobjects.OcempImageButtonTransparent(imgPath, self.buttonTooltips[\"cancel\"], self.showTooltip, self.removeTooltip)\n buttonCancel.topleft = [890, 30]\n buttonCancel.connect_signal(ocempgui.widgets.Constants.SIG_CLICKED, self.closeConfiguration)\n self.window.add_child(buttonCancel)", "def paint_project_button(self, running):\r\n if running:\r\n self.btn_start.setIcon(QIcon(\r\n os.path.abspath(os.path.join(self.app.path, \"PySkeletonViewer\", \"images\", \"red_btn.png\"))))\r\n else:\r\n self.btn_start.setIcon(QIcon(\r\n os.path.abspath(os.path.join(self.app.path, \"PySkeletonViewer\", \"images\", \"green_btn.png\"))))\r\n self.btn_start.setIconSize(QSize(\r\n self.btn_start.width(), self.btn_start.height()))", "def draw_buttons(self): \n self.button_frame = Frame(self)\n\n # -- getting images\n prev_image = PhotoImage(file=self.directory + '/images/previous.png')\n prev_image = prev_image.subsample(10, 10) \n\n next_image = PhotoImage(file=self.directory + '/images/next.png')\n next_image = next_image.subsample(10, 10) \n \n # -- adding image to label\n prev_label = ttk.Label(self.button_frame, image = prev_image)\n next_label = ttk.Label(self.button_frame, image = next_image)\n\n prev_label.image = prev_image\n next_label.image = next_image\n\n # -- adding a twitter hide button\n self.twitter_hide = ttk.Button(self.button_frame, text='hide twitter')\n \n # -- adding the buttons to the frame \n prev_label.pack(side=RIGHT, padx=75) \n self.twitter_hide.pack(side=RIGHT, padx=200) \n next_label.pack(side=LEFT, padx=75)\n\n # -- adding bindings and commands\n prev_label.bind('<Button-1>', self.prev_article)\n next_label.bind('<Button-1>', self.next_article) \n self.twitter_hide.config(command=self.hide_twitter) \n\n # -- adding frame to canvas\n self.button_frame.pack(side=BOTTOM, fill=X)", "def create_next_button(self, img_next, but_pos):\n tk.Button(self.top, height=50, width=50, image=img_next, \n command=lambda: self.retrieve_input()).grid(row=but_pos[0], \n column=but_pos[1])", "def blit_me(self):\n self.start_button.blit_me()\n self.title.blit_me()\n self.screen.blit(self.unicorn_img, self.rect)", "def help_main():\n global help_window, my_iterator, iterable, canvas, forward_button, picture_lst, image\n my_iterator = iter(picture_lst)\n pill_image = Image.open(image_base)\n image = ImageTk.PhotoImage(pill_image)\n\n canvas = Canvas(help_window, width=700 + 15, height=490 + 15)\n canvas.create_image(10, 10, anchor=NW, image=image)\n\n canvas.place(x=170, y=10)\n\n forward_button = ttk.Button(help_window, text=\" Вперед \", command=forward)\n forward_button.place(x=910, y=250)\n help_window.mainloop()", "def __init__(self, posX, posY, normal, hovered, pressed, command) :\n self.posX = posX\n self.posY = posY\n\n self.imgNormal = pygame.image.load(normal).convert_alpha()\n self.posButton = self.imgNormal.get_rect()\n self.imgPressed = pygame.image.load(pressed).convert_alpha()\n self.imgHovered = pygame.image.load(hovered).convert_alpha()\n\n self.images = (self.imgNormal, self.imgPressed, self.imgHovered)\n\n self.posButton.x = self.posX\n self.posButton.y = self.posY\n\n self.blitImage = self.imgNormal\n\n self.command = command\n\n self.buttonSize = self.imgNormal.get_size()", "def on_image(self, image):", "def setUpGUI(self):\n WHITE = '#ffffff'\n # Set up the GUI so that we can paint the fractal image on the screen\n canvas = Canvas(self.window, width=self.width, height=self.height, bg=WHITE)\n canvas.pack()\n canvas.create_image((self.width/2, self.height/2), image=self.img, state=\"normal\")", "def change_start_button(event):\n img_start_button_mouse_over = PhotoImage(\n file=r\"C:\\Users\\Owner\\PycharmProjects\\Module14\\buttons\\start_new_game_raised_active.png\")\n lbl_start_game.config(image=img_start_button_mouse_over)\n lbl_start_game.image = img_start_button_mouse_over\n lbl_start_game.grid(row=8, column=1, columnspan=8, pady=6)", "def normal_run(self):\n super().events_buttons(back=True)\n self.events_delete_btns()\n self.draw()", "def on_run_button(self, event):\n text = _(u\"Run button pressed.\")\n if self.state == 0:\n self.canvas_2d.render(text)\n else:\n self.canvas_3d.render()\n self.run_command()", "def initGui(self):\n\n icon_path = ':/icon.png'\n self.add_action(\n icon_path,\n text=self.tr(u'PacSafe'),\n callback=self.run,\n parent=self.iface.mainWindow())", "def initGui(self):\n\n icon_path = ':/plugins/Hybriddekning/icon.png'\n self.add_action(\n icon_path,\n text=self.tr(u'Hybriddekning'),\n callback=self.run,\n parent=self.iface.mainWindow())", "def run_frame(self, ti, img):\n pass", "def initGui(self):\n\n icon_path = ':/plugins/Integracion/icon.png'\n self.add_action(\n icon_path,\n text=self.tr(u''),\n callback=self.run,\n parent=self.iface.mainWindow())" ]
[ "0.70161515", "0.70027477", "0.6839604", "0.677396", "0.67521006", "0.6693708", "0.6679425", "0.66226333", "0.63661647", "0.6363211", "0.6360723", "0.63584304", "0.6267832", "0.624993", "0.6242955", "0.62371415", "0.62187743", "0.62062895", "0.6200216", "0.61864775", "0.61708206", "0.6153948", "0.6153325", "0.61517", "0.6142982", "0.6132538", "0.6130772", "0.6095819", "0.6095528", "0.6094579" ]
0.70079434
1
Called when the slider is dragged. Designed to be overridden to add custom behaviour.
def slider_dragged(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_slider(self, instance, value):\n self.slider.bind(value=self.update_proxy)\n self.bind(pos=self.hack_position)\n self.slider.bind(pos=self.hack_position)", "def mouseDragged(self, point, delta):\n pass", "def dragEnterEvent(self, event):", "def drag(self, event):\n self.yview('scroll', self.ypos - event.y, 'units')\n self.xview('scroll', self.xpos - event.x, 'units')\n self.ypos = event.y\n self.xpos = event.x", "def mouseMoveEvent (self, event):\n self.itemMoved = True\n super(DiagramItem, self).mouseMoveEvent(event)", "def mouseMoveEvent(self, e):\n if self.mousePressed:\n Globals.dragObject = QTextDrag('PKSampler: dragging a track', self)\n Globals.dragObject.trackFrame = self\n Globals.dragObject.dragCopy()", "def handle_event(self, event):\n if event.type != MOUSEMOTION:\n return\n self.model.slider.left = event.pos[0]", "def drag(self,x,y):\n self.x=x\n self.y=y", "def mouseReleaseEvent (self, event):\n if self.itemMoved:\n self.parentWidget.DataChanged.emit()\n self.itemMoved = False; \n super(DiagramItem, self).mouseReleaseEvent(event)", "def drag(self, event):\n if event.button:\n try:\n x_loc, y_loc = self.appWindow.spec_cv.mouse(event)\n print(x_loc, y_loc)\n trackNo, updated_track =\\\n self.model.updateTrackDrag(x_loc, y_loc,\\\n self.locked_track, self.x_high)\n self.appWindow.spec_cv.updateTrack(trackNo, updated_track)\n self.appWindow.spec_cv.redrawTracks()\n except TypeError:\n pass", "def on_dragg(self, event):\n if str(event.lastevent.button) == \"MouseButton.LEFT\":\n mX = event.xdata\n mY = event.ydata\n if mX and mY:\n if self.current_point is not None:\n self.x[self.current_point] = mX\n self.y[self.current_point] = mY\n self.redraw()", "def mouseDragged():\n if mousePressed:\n mousePressed()", "def OnMouse(self, event):\n if not event.Dragging():\n self._dragPos = None\n if self.HasCapture():\n self.ReleaseMouse()\n return\n else:\n if not self.HasCapture():\n self.CaptureMouse()\n\n if not self._dragPos:\n self._dragPos = event.GetPosition()\n else:\n pos = event.GetPosition()\n displacement = self._dragPos - pos\n self.SetPosition(self.GetPosition() - displacement)", "def startDrag(self):\n data = QtCore.QMimeData()\n data.versionId = self.id\n data.controller = self.scene().controller\n drag = QtGui.QDrag(self.scene().views()[0])\n drag.setMimeData(data)\n drag.setPixmap(CurrentTheme.VERSION_DRAG_PIXMAP)\n drag.start()", "def drag(self, x, y, btn):\n if self._doZoom:\n return self._zoom.drag(x, y, btn)\n else:\n return super(ZoomAndSelect, self).drag(x, y, btn)", "def on_progress_slider_value_changed(self):\n if self.progress_slider_pressed is True and self.playlist.current is not None:\n self.playlist.current.move2position_bytes(self.view.progress_bar.value())", "def _on_press(self, event):\n if tk.DISABLED in self.state():\n return\n\n region = self.identify_region(event.x, event.y)\n\n if self._drag_cols and region == 'heading':\n self._start_drag_col(event)\n elif self._drag_rows and region == 'cell':\n self._start_drag_row(event)", "def mousePressEvent(self, event):\n self.dragging = True\n self.moved = False\n self.parent.setCursor(QtCore.Qt.ClosedHandCursor)", "def dropEvent(self, de):\n # dragging a track\n if hasattr(Globals.dragObject, \"trackFrame\"):\n de.accept()\n trackFrame = Globals.dragObject.trackFrame\n oldParent = trackFrame.parentWidget()\n if oldParent:\n args = (trackFrame, self, oldParent.parentWidget())\n else:\n args = (trackFrame, self, None)\n self.emit(PYSIGNAL('dropped'), (args))\n # not yet used\n #Animation.animate(trackFrame, self, doneFunc=self.slotAnimationDone)", "def on_mouse_movement(self, event: wx.MouseEvent) -> None:\n if not event.Dragging():\n self._drag_start_pos = None\n return\n # self.CaptureMouse()\n if self._drag_start_pos is None:\n self._drag_start_pos = event.GetPosition()\n else:\n current_pos = event.GetPosition()\n change = self._drag_start_pos - current_pos\n self.SetPosition(self.GetPosition() - change)", "def on_mouse_movement(self, event: wx.MouseEvent) -> None:\n if not event.Dragging():\n self._drag_start_pos = None\n return\n # self.CaptureMouse()\n if self._drag_start_pos is None:\n self._drag_start_pos = event.GetPosition()\n else:\n current_pos = event.GetPosition()\n change = self._drag_start_pos - current_pos\n self.SetPosition(self.GetPosition() - change)", "def drop(self, event):\n self.config(cursor='arrow')", "def _on_motion(self, event):\n if not self._visual_drag.winfo_ismapped():\n return\n\n if self._drag_cols and self._dragged_col is not None:\n self._drag_col(event)\n elif self._drag_rows and self._dragged_row is not None:\n self._drag_row(event)", "def _spin_changed(self, event):\n val = event.GetValue()\n if val < self.minval:\n self.minval = val\n elif val > self.orig_min:\n self.minval = self.orig_min\n if val > self.maxval:\n self.maxval = val\n elif val < self.orig_max:\n self.maxval = self.orig_max\n self.slider.SetValue(100*(val-self.minval)/(self.maxval-self.minval))\n if self.handler:\n self.handler(event)\n event.Skip()", "def mouseMoveEvent(self, mouse_event):\r\n pos = self.mapToScene(mouse_event.pos())\r\n # Limiting playhead movement\r\n if pos.x() >= SLIDER_START and pos.x() <= self.slider_width:\r\n self._playhead.setX(pos.x() - SLIDER_START)", "def drag_motion(self, widget, context, x, y, t):\n \n if self.mouse_click_point:\n self.dy = y - self.mouse_click_point\n else:\n self.mouse_click_point = y", "def splitterMoved(self, p_int, p_int_1): # real signature unknown; restored from __doc__\n pass", "def move_slider(self, mouse_x):\n\n for slider in self._menu_items:\n if slider['menu_type'] == 'Slider':\n if slider['grabbed'] == True:\n if mouse_x > slider['xpos'] and mouse_x < (slider['xpos']\n +slider['width']):\n slider['rect'].move_ip(mouse_x-slider['rect'].centerx, 0)", "def MoveToSlide(self, event):\n pass", "def OnLeftUp(self, event): # ANDY PAN\n if event.ShiftDown():\n event.Skip()\n return\n self.last_drag_x = self.last_drag_y = None\n self.SetCursor(wx.Cursor(wx.CURSOR_DEFAULT))\n # turn off drag\n self.was_dragging = False\n # force PAINT event to remove selection box (if required)\n # self.Update()\n event.Skip()" ]
[ "0.71016735", "0.6670154", "0.65497184", "0.6326585", "0.6050225", "0.6031739", "0.6006089", "0.6001614", "0.5967166", "0.5907364", "0.5884901", "0.5763239", "0.57620794", "0.576003", "0.57411623", "0.56494236", "0.56295526", "0.56161594", "0.56027746", "0.56000704", "0.56000704", "0.559759", "0.5549773", "0.5545485", "0.5511523", "0.550844", "0.5505616", "0.5496143", "0.5480517", "0.54733586" ]
0.882381
0
function that ensures that the anime in user choosen listbox isn't in all anime listbox
def update_libox_all_anime(): ## print("update_libox_all_anime is runnig") a = libox_all_anime.get(0, END) ## print("the contents or a :", a) b = [] for x in a: ## print("this is x: ", x) b.append(x) ## print(x) for x in updated_your_anime(): if x in b: b.remove(x) c = sorted(b) ## print(b) libox_all_anime.delete(0, END) for x in c: libox_all_anime.insert(END, x)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_list(*args):\n\n search_term = search_var.get()\n all_anime = load(open(Save_file_dir.joinpath(\"anime_save.p\"), \"rb\"))\n\n all_anime_list = []\n for key, value in all_anime.items():\n all_anime_list.append(key)\n\n libox_all_anime.delete(0, END)\n\n for item in all_anime_list:\n if search_term.lower() in item.lower():\n libox_all_anime.insert(END, item)", "def is_in_list(self, atypical_event):\n\n bool_is_in_list = False\n if atypical_event in self.set_atypical_events:\n bool_is_in_list = True\n\n return bool_is_in_list", "def valid_scene_choices(self):\n newbies = [ob.id for ob in self.newbies]\n claimlist = [ob.id for ob in self.claimlist if ob.id not in newbies]\n choices = self.valid_choices\n if newbies:\n choices = choices.exclude(id__in=newbies)\n if claimlist:\n choices = choices.exclude(id__in=claimlist)\n return list(choices)", "def canUnlockAll(boxes):\n res = []\n res.append(0)\n if boxes[0] == []:\n return False\n for i in range(len(boxes)):\n for j in boxes[i]:\n if j not in res and j < len(boxes) and j != i:\n res.append(j)\n if len(res) != len(boxes):\n return False\n return True", "def checkAffiliation(self,event=None):\r\n if self.affiliation.getVal() not in self.affilList:\r\n self.affilList.append(self.affiliation.getVal())\r\n self.affilList.sort()\r\n self.affiliation.updateVals(self.affilList)", "def canUnlockAll(boxes):\n if boxes[0] == [] or not isinstance(boxes, list):\n return False\n\n opens = [0]\n for idx, box in enumerate(boxes):\n for key in box:\n if key not in opens and key in range(0, len(boxes)) and key != idx:\n opens.append(key)\n return len(opens) == len(boxes)", "def test_multiple_values_invalid(self):\n self.check_html(\n self.widget(choices=self.beatles),\n \"beatles\",\n [\"J\", \"G\", \"foo\"],\n html=(\n \"\"\"<select multiple name=\"beatles\">\n <option value=\"J\" selected>John</option>\n <option value=\"P\">Paul</option>\n <option value=\"G\" selected>George</option>\n <option value=\"R\">Ringo</option>\n </select>\"\"\"\n ),\n )", "def isUnlisted(self):\n return self.f1 is 'u'", "def check_for_entries():\n if Entry.select():\n return True\n\n else:\n clear()\n input('** Woops! Looks like there are no entries to lookup **\\n'\n 'Press enter to return to main menu.')", "def check_completeness(ISM):\n for item in ISM:\n if item not in ['A', 'T', 'C', 'G', '-']:\n return False\n return True", "def validateSelection(self, exportItems):\n\n invalidItems = []\n # Look for selected items which arent of the correct type\n for item in exportItems:\n if not item.sequence() and not item.trackItem():\n invalidItems.append(item)\n\n return len(invalidItems) < len(exportItems)", "def check_deletable(self):\n item = self.albums_artists.currentItem()\n self.delete_button.setEnabled(False)\n if item in self.new_artists:\n self.delete_button.setEnabled(True)", "def passengers(not_checked_in, checked_in):\n while not_checked_in:\n current_passenger = not_checked_in.pop() # remove last item on the list\n\n # Simulate checking a passenger inself.\n print(\"Checking in passenger: \" + current_passenger)\n checked_in.append(current_passenger) # Add to the check in list", "def check_for_list(check):", "def isspeech(phone):\n return phone not in OTHERS", "def not_use_triggered(self):\n\n self.select_items()\n if self.items_selected:\n for index, item in enumerate(self.items_selected):\n index_selected = self.indices_selected[index]\n frame_selected = index_selected + 1\n item.setText(\"Frame %i excluded\" % frame_selected)\n item.setBackground(self.background_excluded)\n item.setForeground(QtGui.QColor(255, 255, 255))\n self.index_included[index_selected] = False\n self.frame_selector.setPhoto(self.frame_index)", "def test_check_inputs_hospital_prefs_all_nonempty(game):\n\n hospital = game.hospitals[0]\n hospital.prefs = []\n\n with pytest.warns(PlayerExcludedWarning) as record:\n game._check_inputs_player_prefs_nonempty(\"hospitals\", \"residents\")\n\n assert len(record) == 1\n assert hospital.name in str(record[0].message)\n\n if game.clean:\n assert hospital not in game.hospitals", "def handle_invalid_inputs(question,my_list):\n\n final_answer = None\n while final_answer not in my_list:\n final_answer = input(question).lower()\n\n return final_answer", "def canUnlockAll(boxes):\n\n BoxesNumber = len(boxes)\n keys = [0]\n for key in keys:\n UnitBox = boxes[key]\n for NewKey in UnitBox:\n if NewKey not in keys and NewKey < BoxesNumber:\n keys.append(NewKey)\n\n if BoxesNumber == len(keys):\n return True\n else:\n return False", "def sjekkTallISekvens(listeTall, listeSekvens):\n for tall in listeSekvens:\n if tall not in listeTall:\n return False\n return True", "def check_for_unguarded_rooms(museum):\r\n\tempty_rooms = []\r\n\r\n\tfor row_idx in range(len(museum)):\t\r\n\t\t\r\n\t\tfor item_idx in range(len(museum[row_idx])): #Go back and fix this to be enumerate instead\r\n\t\t\t\r\n\t\t\tif museum[row_idx][item_idx] == \"0\":\r\n\t\t\t\tempty_rooms.append([row_idx, item_idx])\r\n\r\n\t# for row_idx, row_value in enumerate(museum):\r\n\t# \tfor item_idx, item_value in enumerate(row):\r\n\t# \t\tif item_value == \" \":\r\n\t# \t\t\tprint(item)\r\n\t# \t\t\tempty_rooms.append([row_idx, item_idx]) # need index\r\n\r\n\tif not empty_rooms:\r\n\t\tprint(\"true\")\r\n\t\r\n\telse:\r\n\t\tprint(\"false\")\r\n\r\n\t\tfor room in empty_rooms:\r\n\t\t\tprint(str(room[0]) + \" \" + str(room[1]))", "def verify_anime(animes: List[AnimeThemeAnime], alid: int, alsite: AnimeListSite) -> Optional[AnimeThemeAnime]:\n for anime in animes:\n for resource in anime['resources']:\n if resource['site']==alsite and resource['external_id']==alid:\n return anime\n return None", "def find_album(self):\n item = self.clementine_albums.currentItem()\n if not item:\n self.focus_albums()\n item = self.clementine_albums.currentItem()\n if item.text(0) in self.albums_map[self.c_artist]:\n ok = qtw.QMessageBox.question(self, self.appname, 'Album already has a '\n 'match - do you want to reassign?',\n qtw.QMessageBox.Yes | qtw.QMessageBox.No,\n qtw.QMessageBox.Yes)\n if ok == qtw.QMessageBox.No:\n return\n self.albums_map[self.c_artist].pop(item.text(0))\n # select albums for self.a_artist and remove the ones that are already matched\n albums = dmla.list_albums_by_artist('', self.a_artist, 'Titel')\n album_list = []\n for album in albums:\n test = album.id\n found = False\n for a_item in self.albums_map[self.c_artist].values():\n if a_item[1] == test:\n found = True\n break\n if not found:\n album_list.append((build_album_name(album), album.id))\n if album_list:\n albums = [x[0] for x in album_list]\n selected, ok = qtw.QInputDialog.getItem(self, self.appname, 'Select Album',\n albums, editable=False)\n if ok:\n a_item = self.albums_albums.findItems(\n str(album_list[albums.index(selected)][1]),\n core.Qt.MatchFixedString, 2)[0]\n c_year = str(item.data(0, core.Qt.UserRole))\n if c_year:\n a_year = a_item.text(1)\n if c_year != a_year:\n ask = f\"Clementine year ({c_year}) differs from Albums year ({a_year})\"\n ok = qtw.QMessageBox.question(self, self.appname, f\"{ask}, replace?\",\n qtw.QMessageBox.Yes | qtw.QMessageBox.No,\n qtw.QMessageBox.Yes)\n if ok == qtw.QMessageBox.Yes:\n a_item.setText(1, c_year)\n\n self.albums_to_update[self.c_artist].append(\n (a_item.text(0), a_item.text(1), int(a_item.text(2)), False, []))\n self.update_item(a_item, item)\n return\n self.add_album()", "def option2(movies):\r\n playerName = input ('Please select an actor:\\n')\r\n flag = 0\r\n for k in movies.keys():\r\n #if the player is in the dictionary's keys\r\n if playerName in movies[k]:\r\n flag = 1\r\n if flag == 0:\r\n print(\"Error\")\r\n return\r\n otherPlayers = []\r\n otherPlayers = set(otherPlayers)\r\n for x in movies.keys():\r\n #the condition verify if the actor is one of the values\r\n if playerName in movies[x]:\r\n otherPlayers = set(otherPlayers | movies[x])\r\n otherPlayers.remove(playerName)\r\n #sorting according the 'abc' serial\r\n otherPlayers = sorted(otherPlayers)\r\n #if they are no actors in the group\r\n if not otherPlayers:\r\n print(\"There are no actors in this group\\n\")\r\n return \r\n print(', '.join(otherPlayers)) \r\n return", "def _list_validity_check(l, valid_l):\n\n if not Settings._is_in_list(l, valid_l):\n raise InvalidSettingError()", "def canUnlockAll(boxes):\n for key in range(1, len(boxes) - 1):\n res = False\n for index in range(len(boxes)):\n res = key in boxes[index] and key != index\n if res:\n break\n if res is False:\n return res\n return True", "def canUnlockAll(boxes):\n if type(boxes) is not list or type(boxes[0]) is not list\\\n or len(boxes) == 0 or len(boxes[0]) == 0\\\n or type(boxes[0][0]) is not int:\n return False\n if (len(boxes[0]) == 1 and boxes[0][0] == 0):\n return True\n keys = [0]\n for i in range(len(boxes[0])):\n if boxes[0][i] < len(boxes):\n keys.append(boxes[0][i])\n break\n for i in keys:\n for d in range(len(boxes[i])):\n if not lookup(keys, boxes[i][d]) and boxes[i][d] < len(boxes):\n keys.append(boxes[i][d])\n return check(boxes, keys)", "def test_filter_not_available_plugins(plugin_dialog_constructor):\n item = plugin_dialog_constructor.available_list.item(0)\n widget = plugin_dialog_constructor.available_list.itemWidget(item)\n if widget:\n assert not widget.action_button.isEnabled()\n assert widget.warning_tooltip.isVisible()\n\n item = plugin_dialog_constructor.available_list.item(1)\n widget = plugin_dialog_constructor.available_list.itemWidget(item)\n assert widget.action_button.isEnabled()\n assert not widget.warning_tooltip.isVisible()", "def test_render_none(self):\n self.check_html(\n self.widget(choices=((\"\", \"Unknown\"),) + self.beatles),\n \"beatles\",\n None,\n html=(\n \"\"\"<select multiple name=\"beatles\">\n <option value=\"\">Unknown</option>\n <option value=\"J\">John</option>\n <option value=\"P\">Paul</option>\n <option value=\"G\">George</option>\n <option value=\"R\">Ringo</option>\n </select>\"\"\"\n ),\n )", "def check_answer(chaine):\n l_chaine = list(chaine)\n if len(l_chaine) ==1:\n l_chaine.append(1)\n if l_chaine[0] in ['N', 'S', 'E', 'W', 'Q'] and len(l_chaine)<3:\n return l_chaine\n else:\n return None" ]
[ "0.5358605", "0.5259478", "0.5149276", "0.51280844", "0.51028097", "0.50867", "0.50787604", "0.5066245", "0.5039338", "0.5033899", "0.50224185", "0.5019272", "0.49440777", "0.49417982", "0.4912668", "0.49124354", "0.49081933", "0.4905151", "0.49043363", "0.48986632", "0.48858342", "0.48835546", "0.48676625", "0.48649767", "0.4846802", "0.481255", "0.47943947", "0.47845766", "0.47823116", "0.47717193" ]
0.58440846
0
Set up GenericRelations for a given actionable model. Needed because actstream's generic relationship setup functionality is brittle and unreliable.
def actstream_register_model(model): for field in ('actor', 'target', 'action_object'): generic.GenericRelation(Action, content_type_field='%s_content_type' % field, object_id_field='%s_object_id' % field, related_name='actions_with_%s_%s_as_%s' % ( model._meta.app_label, model._meta.module_name, field), ).contribute_to_class(model, '%s_actions' % field) setattr(Action, 'actions_with_%s_%s_as_%s' % (model._meta.app_label, model._meta.module_name, field), None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_add_relation_types(self):\n pass", "def test_add_relation_type(self):\n pass", "def test_change_relation_types(self):\n pass", "def init_model(connection):\n db = connection\n\n for obj in common.__dict__.itervalues():\n if type(obj) == type and issubclass(obj, common.Model) and hasattr(obj, '__tablename__'):\n tablename = getattr(obj, '__tablename__')\n obj._object_store = Domain(db, tablename)\n collection_to_class[obj._object_store] = obj", "def setup_models(self):\n pass", "def create_intermediary_table_model(model):\n name = model.__name__ + 'Relation'\n \n class Meta:\n db_table = '%s_relation' % model._meta.db_table\n unique_together = (('tag', 'content_type', 'object_id'),)\n\n def obj_unicode(self):\n return u'%s [%s]' % (self.content_type.get_object_for_this_type(pk=self.object_id), self.tag)\n \n # Set up a dictionary to simulate declarations within a class \n attrs = {\n '__module__': model.__module__,\n 'Meta': Meta,\n 'tag': models.ForeignKey(model, verbose_name=_('tag'), related_name='items'),\n 'content_type': models.ForeignKey(ContentType, verbose_name=_('content type')),\n 'object_id': models.PositiveIntegerField(_('object id'), db_index=True),\n 'content_object': generic.GenericForeignKey('content_type', 'object_id'),\n '__unicode__': obj_unicode,\n }\n\n return type(name, (models.Model,), attrs)", "def create_models( self ):", "def _add_relations(self):\n relations = {\n 'manila:shared-db': 'mysql:shared-db',\n 'manila:amqp': 'rabbitmq-server:amqp',\n 'manila:identity-service': 'keystone:identity-service',\n 'manila:manila-plugin': '{{ metadata.package }}:manila-plugin',\n 'keystone:shared-db': 'mysql:shared-db',\n }\n super(ManilaPluginCharmDeployment, self)._add_relations(relations)", "def _add_relations(self):\n relations = {\n 'keystone:shared-db': 'percona-cluster:shared-db',\n 'swift-proxy:identity-service': 'keystone:identity-service',\n 'swift-storage:swift-storage': 'swift-proxy:swift-storage',\n 'glance:identity-service': 'keystone:identity-service',\n 'glance:shared-db': 'percona-cluster:shared-db',\n 'glance:object-store': 'swift-proxy:object-store'\n }\n super(SwiftProxyBasicDeployment, self)._add_relations(relations)", "def _add_relations(self):\n relations = {'keystone:shared-db': 'percona-cluster:shared-db',\n 'cinder:shared-db': 'percona-cluster:shared-db',\n 'cinder:amqp': 'rabbitmq-server:amqp',\n 'cinder:identity-service': 'keystone:identity-service'}\n super(KeystoneBasicDeployment, self)._add_relations(relations)", "def setUp(self):\n self.model = ModelBase(\n '__TestModel__' + self.mixin.__name__,\n (self.mixin,),\n {'__module__': self.mixin.__module__}\n )\n\n with connection.schema_editor() as schema_editor:\n schema_editor.create_model(self.model)", "def test_change_relation_type(self):\n pass", "def _do_relation(self):\n if self.chunks:\n ch = self.chunks[-1]\n for relation, role in ch.relations:\n if role == \"SBJ\" or role == \"OBJ\":\n self.relations[role][relation] = ch\n if ch.type in (\"VP\",):\n self.relations[ch.type][ch.relation] = ch", "def import_device_relations(self, obj, ci):\n _replace_relations(\n obj, ci, 'child', 'venture',\n self.venture_content_type, cdb.CI_RELATION_TYPES.CONTAINS,\n )\n _replace_relations(\n obj, ci, 'child', 'venture_role',\n self.venture_role_content_type, cdb.CI_RELATION_TYPES.HASROLE\n )\n _replace_relations(\n obj, ci, 'child', 'parent',\n self.device_content_type, cdb.CI_RELATION_TYPES.CONTAINS\n )", "def call( # type: ignore[override]\n self,\n instance: Model,\n step: builder.BuildStep,\n context: declarations.PostGenerationContext\n ) -> None:\n related_manager = getattr(instance, self.descriptor_name)\n # Get the right field names from the intermediary m2m table.\n source_field = related_manager.through._meta.get_field(\n related_manager.source_field_name\n )\n if isinstance(instance, source_field.related_model):\n # The source_field points to the instance's model.\n source = related_manager.source_field_name\n target = related_manager.target_field_name\n else:\n source = related_manager.target_field_name\n target = related_manager.source_field_name\n\n # Add the relation.\n for related_object in super().call(instance, step, context):\n related_manager.through.objects.create(\n **{source: instance, target: related_object}\n )", "def setup(cls):\n super().setup()\n cls.search_behaviour = cast(\n GenericSearchBehaviour, cls._skill.skill_context.behaviours.search\n )\n cls.tx_behaviour = cast(\n GenericTransactionBehaviour, cls._skill.skill_context.behaviours.transaction\n )\n cls.strategy = cast(GenericStrategy, cls._skill.skill_context.strategy)\n\n cls.logger = cls._skill.skill_context.logger", "def add_related_factories(self) -> None:\n for rel in get_model_relations(self.model, forward=False):\n if rel.many_to_many:\n continue\n # These are all reverse relations, meaning rel.model == self.model.\n factory_name = self._get_factory_name_for_model(rel.related_model)\n accessor_name = rel.get_accessor_name()\n if not hasattr(self.factory, rel.name):\n related_factory = RelatedFactory(\n factory=factory_name,\n factory_related_name=rel.field.name,\n accessor_name=accessor_name,\n related_model=rel.related_model\n )\n setattr(self.factory, rel.name, related_factory)", "def relation( self, obj, relType ):\n raise NotImplementedError(\"relation\")", "def _add_relations(self):\n relations = {\n 'neutron-openvswitch:amqp': 'rabbitmq-server:amqp',\n 'neutron-openvswitch:neutron-plugin':\n 'nova-compute:neutron-plugin',\n 'neutron-openvswitch:neutron-plugin-api':\n 'neutron-api:neutron-plugin-api',\n # Satisfy workload stat:\n 'neutron-api:identity-service': 'keystone:identity-service',\n 'neutron-api:shared-db': 'percona-cluster:shared-db',\n 'neutron-api:amqp': 'rabbitmq-server:amqp',\n 'nova-compute:amqp': 'rabbitmq-server:amqp',\n 'nova-compute:image-service': 'glance:image-service',\n 'glance:identity-service': 'keystone:identity-service',\n 'glance:shared-db': 'percona-cluster:shared-db',\n 'glance:amqp': 'rabbitmq-server:amqp',\n 'keystone:shared-db': 'percona-cluster:shared-db',\n 'nova-cloud-controller:shared-db': 'percona-cluster:shared-db',\n 'nova-cloud-controller:amqp': 'rabbitmq-server:amqp',\n 'nova-cloud-controller:identity-service': 'keystone:'\n 'identity-service',\n 'nova-cloud-controller:cloud-compute': 'nova-compute:'\n 'cloud-compute',\n 'nova-cloud-controller:image-service': 'glance:image-service',\n }\n if self._get_openstack_release() >= self.bionic_train:\n relations.update({\n 'placement:shared-db': 'percona-cluster:shared-db',\n 'placement:amqp': 'rabbitmq-server:amqp',\n 'placement:placement': 'nova-cloud-controller:placement',\n 'placement:identity-service': 'keystone:identity-service',\n })\n super(NeutronOVSBasicDeployment, self)._add_relations(relations)", "def _connectModel(self):\n pass", "def relationships(self):", "def test_get_relationship_templates(self):\n pass", "def construct(\n *,\n artifacts: types.ObjectArtifacts,\n model_schema: types.Schema,\n schemas: types.Schemas,\n) -> None:\n if artifacts.relationship.secondary is None:\n _foreign_key.set_(\n ref_model_name=artifacts.relationship.model_name,\n logical_name=artifacts.logical_name,\n model_schema=model_schema,\n schemas=schemas,\n fk_column=artifacts.fk_column,\n )\n else:\n table = _association_table.construct(\n parent_schema=model_schema,\n child_schema=artifacts.spec,\n schemas=schemas,\n tablename=artifacts.relationship.secondary,\n )\n facades.models.set_association(\n table=table, name=artifacts.relationship.secondary\n )", "def set_property_setters_for_actions_and_links(self, meta):\n\t\tfor doctype, fieldname, field_map in (\n\t\t\t(\"DocType Link\", \"links\", doctype_link_properties),\n\t\t\t(\"DocType Action\", \"actions\", doctype_action_properties),\n\t\t\t(\"DocType State\", \"states\", doctype_state_properties),\n\t\t):\n\t\t\thas_custom = False\n\t\t\titems = []\n\t\t\tfor i, d in enumerate(self.get(fieldname) or []):\n\t\t\t\td.idx = i\n\t\t\t\tif frappe.db.exists(doctype, d.name) and not d.custom:\n\t\t\t\t\t# check property and apply property setter\n\t\t\t\t\toriginal = frappe.get_doc(doctype, d.name)\n\t\t\t\t\tfor prop, prop_type in field_map.items():\n\t\t\t\t\t\tif d.get(prop) != original.get(prop):\n\t\t\t\t\t\t\tself.make_property_setter(prop, d.get(prop), prop_type, apply_on=doctype, row_name=d.name)\n\t\t\t\t\titems.append(d.name)\n\t\t\t\telse:\n\t\t\t\t\t# custom - just insert/update\n\t\t\t\t\td.parent = self.doc_type\n\t\t\t\t\td.custom = 1\n\t\t\t\t\td.save(ignore_permissions=True)\n\t\t\t\t\thas_custom = True\n\t\t\t\t\titems.append(d.name)\n\n\t\t\tself.update_order_property_setter(has_custom, fieldname)\n\t\t\tself.clear_removed_items(doctype, items)", "def relate(self, qs):\n model_map = {}\n item_map = {}\n for item in qs:\n object_id = getattr(item, self._object_id_field)\n content_type = getattr(item, self._content_type_field)\n model_map.setdefault(content_type, {}) \\\n [object_id] = item.id\n item_map[item.id] = item\n for ct, items_ in model_map.items():\n for o in ct.model_class().objects.select_related() \\\n .filter(id__in=items_.keys()).all():\n setattr(item_map[items_[o.id]],self._content_object_field, o)\n return qs", "def apply_relational_map(self, entity):\n for property_name in entity.__relational_map__:\n guide = entity.__relational_map__[property_name]\n \"\"\" :type: tori.db.mapper.RelatingGuide \"\"\"\n\n # In the reverse mapping, the lazy loading is not possible but so the proxy object is still used.\n if guide.inverted_by:\n collection = self.collection(guide.target_class)\n\n if guide.association in [AssociationType.ONE_TO_ONE, AssociationType.MANY_TO_ONE]:\n target = collection._api.find_one({guide.inverted_by: entity.id})\n\n entity.__setattr__(property_name, ProxyFactory.make(self, target['_id'], guide))\n elif guide.association == AssociationType.ONE_TO_MANY:\n proxy_list = [\n ProxyFactory.make(self, target['_id'], guide)\n for target in collection._api.find({guide.inverted_by: entity.id})\n ]\n\n entity.__setattr__(property_name, proxy_list)\n elif guide.association == AssociationType.MANY_TO_MANY:\n entity.__setattr__(property_name, ProxyCollection(self, entity, guide))\n else:\n raise IntegrityConstraintError('Unknown type of entity association (reverse mapping)')\n\n return # Done the application\n\n # In the direct mapping, the lazy loading is applied wherever applicable.\n if guide.association in [AssociationType.ONE_TO_ONE, AssociationType.MANY_TO_ONE]:\n entity.__setattr__(\n property_name,\n ProxyFactory.make(\n self,\n entity.__getattribute__(property_name),\n guide\n )\n )\n elif guide.association == AssociationType.ONE_TO_MANY:\n proxy_list = [\n ProxyFactory.make(self, object_id, guide)\n for object_id in entity.__getattribute__(property_name)\n ]\n\n entity.__setattr__(property_name, proxy_list)\n elif guide.association == AssociationType.MANY_TO_MANY:\n entity.__setattr__(property_name, ProxyCollection(self, entity, guide))\n else:\n raise IntegrityConstraintError('Unknown type of entity association')", "def set_relation(\n self, other, reltype=None, set_reverse=True\n ): ## TODO: logic to find and set siblings?\n ##TODO: test coverage\n reltype = reltype.upper()\n reltype_reverse = {\"CHILD\": \"PARENT\", \"PARENT\": \"CHILD\", \"SIBLING\": \"SIBLING\"}[\n reltype\n ]\n if isinstance(other, CalendarObjectResource):\n if other.id:\n uid = other.id\n else:\n uid = other.icalendar_component[\"uid\"]\n else:\n uid = other\n if set_reverse:\n other = self.parent.object_by_uid(uid)\n if set_reverse:\n other.set_relation(other=self, reltype=reltype_reverse, set_reverse=False)\n\n existing_relation = self.icalendar_component.get(\"related-to\", None)\n existing_relations = (\n existing_relation\n if isinstance(existing_relation, list)\n else [existing_relation]\n )\n for rel in existing_relations:\n if rel == uid:\n return\n\n self.icalendar_component.add(\n \"related-to\", uid, parameters={\"RELTYPE\": reltype}, encode=True\n )\n\n self.save()", "def setup(self, context, entity, path, reduced_path, adapter, **kwargs):\n\n pass", "def test_find_relation_types(self):\n pass", "def _get_and_create_relation_objects(self, root_node):\n relations = []\n\n for relation in root_node.iterdescendants(\"TLINK\"):\n lid = relation.get(\"lid\")\n\n # Get relation type as a string\n relation_type = relation.get(\"relType\")\n\n # Get relation_type_id\n relation_type_id = RelationType.get_id(relation_type)\n\n if not relation.get(\"timeID\") and not relation.get(\"relatedToTime\"):\n # This is event-event\n source_eiid = relation.get(\"eventInstanceID\")\n target_eiid = relation.get(\"relatedToEventInstance\")\n\n # Find source event\n source_obj = self.find_event_by_eiid(self.events, source_eiid)\n # Find target event\n target_obj = self.find_event_by_eiid(self.events, target_eiid)\n\n else:\n # This must be event-timex or timex-event or timex-timex\n target_tid = relation.get(\"relatedToTime\")\n target_eiid = relation.get(\"relatedToEventInstance\")\n\n source_tid = relation.get(\"timeID\")\n source_eiid = relation.get(\"eventInstanceID\")\n\n\n if source_tid and target_eiid:\n # timex-event\n source_obj = self.find_timex_by_tid(source_tid)\n target_obj = self.find_event_by_eiid(self.events, target_eiid)\n elif source_eiid and target_tid:\n # event-timex\n source_obj = self.find_event_by_eiid(self.events, source_eiid)\n target_obj = self.find_timex_by_tid(target_tid)\n elif source_tid and target_tid:\n # timex-timex\n source_obj = self.find_timex_by_tid(source_tid)\n target_obj = self.find_timex_by_tid(target_tid)\n\n relation_obj = Relation(lid, self.text_obj, source_obj, target_obj, relation_type_id)\n\n # So we don't run into problems with helper.output\n if relation_obj.is_timex_timex(): relation_obj.predicted_class = relation_type_id\n\n # There are sometimes duplicates which we do not want to have\n if relation_obj not in relations:\n relations.append(relation_obj)\n\n return relations" ]
[ "0.5983358", "0.5731366", "0.5586986", "0.5551285", "0.5508221", "0.54468006", "0.53996897", "0.53723776", "0.5369516", "0.53591746", "0.53363955", "0.53126204", "0.52928793", "0.5236608", "0.52262175", "0.5187518", "0.5169628", "0.5126868", "0.5075861", "0.5060441", "0.50516176", "0.5045429", "0.5036198", "0.50210387", "0.501029", "0.5005995", "0.49944648", "0.4968824", "0.49516234", "0.49254033" ]
0.6793783
0
Return the name of the Subreddit.
def getSubredditName(self): return self.nameOfSubreddit
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_sub_name(self):\n return self.sub_name", "def get_subreddit(self):\r\n from pylons import g\r\n from r2.models import Subreddit, Sub, NotFound, Default\r\n try:\r\n if not self.hostname or self.hostname.startswith(g.domain):\r\n if self.path.startswith('/r/'):\r\n return Subreddit._by_name(self.path.split('/')[2])\r\n elif self.path.startswith('/categories/'):\r\n return Sub\r\n else:\r\n return Default\r\n elif self.hostname:\r\n return Subreddit._by_domain(self.hostname)\r\n except NotFound:\r\n pass\r\n return None", "def subresource_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"subresource_name\")", "def _kind(self) -> str: # noqa: ANN001\n return self._reddit.config.kinds[\"subreddit\"]", "def subresource_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subresource_name\")", "def subresource_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subresource_name\")", "def setSubreddit(self, subredditName):\n\t\tself.nameOfSubreddit = subredditName\n\t\tself.subreddit = self.praw.subreddit(nameOfSubreddit)", "def get_name(self) -> str:\n return self.name", "def get_name(self) -> str:\n return self.name", "def get_name(self) -> str:\n return self.name", "def subnetwork_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subnetwork_name\")", "def get_name(self) -> str:\n\n return self.name_", "def get_name(self) -> str:\n return self._name", "def get_name(self) -> str:\n return self._name", "def get_name(self) -> str:\r\n return self.name", "def __str__(self):\n return \"/r/{subreddit} Post at {datetime}: {title}\".format(\n subreddit=self.subreddit,\n datetime=self.submit_at,\n title=self.title,\n )", "def get_name(self):\n\t\treturn self.name", "def get_name(self):\n\t\treturn self.name", "def get_name(self):\n\t\treturn self.name", "def _get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name" ]
[ "0.7218869", "0.68938684", "0.65898365", "0.64519304", "0.6427484", "0.6427484", "0.63539076", "0.61308515", "0.61308515", "0.61308515", "0.6120958", "0.60964257", "0.6078476", "0.6078476", "0.6071652", "0.6050746", "0.60496366", "0.60496366", "0.60496366", "0.6042231", "0.6041917", "0.6041917", "0.6041917", "0.6041917", "0.6041917", "0.6041917", "0.6041917", "0.6041917", "0.6041917", "0.6041917" ]
0.8894763
0
Changes the subreddit name. Name of the subreddit you want to change to.
def setSubreddit(self, subredditName): self.nameOfSubreddit = subredditName self.subreddit = self.praw.subreddit(nameOfSubreddit)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_name(self, new_name):\r\n self.__name = new_name", "def update_name(self, new_name):\r\n self.__name = new_name", "def ChangeName(self, newName):\n if newName != \"\":\n newPath = self.format + os.sep + \"playlists\" + os.sep + newName + \".txt\"\n os.replace(self.path, newPath)\n self.path = newPath", "def new_name(self,new_name):\n self.name = new_name", "def name(self, new_name):\n self.rename(new_name)", "def set_name(self, newname=\"\"):\n self.name = newname", "def updateTitle(rubricterm, event):\n rubricterm.updateTitle()", "def updateName(self,name):\n self.name = name", "def getSubredditName(self):\n\t\treturn self.nameOfSubreddit", "def rename(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name # overwrite the existing name with the input name", "def set_name(self, name):\n self.name = name # overwrite the existing name with the input name", "def rename(self, new_name):\n\n self.__enforce_connected()\n current_url = self.url\n self._set_field(\"name\",new_name)\n self.set_json(self._http_client.update(current_url, self.get_json()))", "def change_name(change_account):\n change_data(change_account, changed_data='name')", "def name(self, new_name: str) -> None:\n raise NotImplementedError()", "def set_name(self, name):\n\t\tself.name_ = name", "def set_name(self, name):\n self.name = name\n self.labels.change_name(name)", "def changeName(self, userId, newName):\n\t\turi = \"{}/users/{}\".format(tt_base_uri, userId)\n\t\turi_args = {\"name\":newName}\n\t\tr = requests.put(uri, json=uri_args, cookies={\"PLAY_SESSION\":self.play_session, \"__uvt\":\"\"})\n\t\tprint(\"change name: status code:\", r.status_code)", "def change_subreddit(self, new_sr_id):\r\n if self.sr_id != new_sr_id:\r\n self.sr_id = new_sr_id\r\n self._date = datetime.now(g.tz)\r\n self.url = self.make_permalink_slow()\r\n self._commit()\r\n\r\n # Comments must be in the same subreddit as the link that\r\n # the comments belong to. This is needed so that if a\r\n # comment is made on a draft link then when the link moves\r\n # to a public subreddit the comments also move and others\r\n # will be able to see and reply to the comment.\r\n for comment in Comment._query(Comment.c.link_id == self._id, data=True):\r\n comment.sr_id = new_sr_id\r\n comment._commit()", "def rename(self, name: str):\n self.doc['name'] = name", "async def name(self, ctx, *, name):\n # [p]set name <name>\n\n name = name.strip()\n if name != \"\":\n try:\n await self.bot.edit_profile(username=name)\n except:\n await self.bot.say(\"Failed to change name. Remember that you\"\n \" can only do it up to 2 times an hour.\"\n \"Use nicknames if you need frequent \"\n \"changes. {}set nickname\".format(ctx.prefix))\n else:\n await self.bot.say(\"Done.\")\n else:\n await send_command_help(ctx)", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name", "async def set_name(self, name: str):\n json = {**self.get_data(), \"name\": name}\n return await self.easee.put(f\"/api/sites/{self.id}\", json=json)", "def set_name(self, name):\n self.settings[\"name\"] = name", "def set_name(self, name):\n self._name = name", "def setName(self, name):\n self.name = str(name)" ]
[ "0.6665063", "0.6665063", "0.6544932", "0.6535344", "0.638324", "0.63765067", "0.63743895", "0.636293", "0.6334866", "0.612783", "0.612274", "0.612274", "0.6089802", "0.6071336", "0.60371286", "0.6034261", "0.6018913", "0.6016731", "0.6003661", "0.59978575", "0.591629", "0.5897233", "0.5897233", "0.5897233", "0.5897233", "0.5897233", "0.58765215", "0.5859544", "0.58454114", "0.5842444" ]
0.77809405
0
Returns a lists of integers of scores of the posts.
def getScore(self): self.scoreList = [submissionsss.score for submissionsss in self.subreddit.top(time_filter = 'day', limit = self.limits)] return self.scoreList
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scores(self) -> List[float]:\n if not self.prediction:\n return []\n return [sentence.score for sentence in self.prediction.sentences]", "def get_scores(self):\n return self.score", "def scores_(self):\n return self.predictor.scores_", "def get_scores(self) -> tuple:\n return (self.get_score(), self.p2_score)", "def childScores(self):\n return [x.score for x in self.children]", "def get_scores(self):\n return [(self.players[p.get_color()], p.get_score()) for p in self.state.get_players()]", "def get_score(self):\n return tuple(self.score)", "def get_student_scores(student_information):\n return [\n student_information[1]\n , student_information[2]\n , student_information[3]\n , student_information[4]\n , student_information[5]\n ]", "def get_vote_score(self):\n q = PostVote.objects.filter(post=self).aggregate(Sum('score'))\n return q['score__sum'] if q['score__sum'] else 0", "def score(self):\n return self.client.call('GET', self.name + 'score')", "def _section_scores(self, chapter_index, section_index):\r\n # This is CSS selector means:\r\n # Get the scores for the chapter at `chapter_index` and the section at `section_index`\r\n # Example text of the retrieved elements: \"0/1\"\r\n score_css = \"div.chapters>section:nth-of-type({0}) div.sections>div:nth-of-type({1}) div.scores>ol>li\".format(\r\n chapter_index, section_index\r\n )\r\n\r\n text_scores = self.q(css=score_css).text\r\n\r\n # Convert text scores to tuples of (points, max_points)\r\n return [tuple(map(int, score.split('/'))) for score in text_scores]", "def getScore(data):\n return score", "def get_scores(self):\n return pd.DataFrame(self._scores)", "def get_score(self, student_answers):\r\n pass", "def calculate_scores():\n all_people = models.Leaderboard.query.order_by(\n models.Leaderboard.score.desc()).all()\n print(all_people)\n users = []\n scores = []\n for person in all_people:\n users.append(person.username)\n scores.append(person.score)\n return users, scores", "def get_score_list(self) -> List[int]:\n\n result, values = [], []\n for i in range(len(self._cards)):\n for value in self._cards[i]:\n values.append(value[0])\n if sum(values) > 21:\n for num in range(len(values)):\n if values[num] == 11:\n values[num] = 1\n result.append(sum(values))\n values = []\n return result", "def alt_score(objects):\n scores = {}\n for tweet in objects:\n data = tweet._json\n raw_time = datetime.strptime(\n data['created_at'],\n '%a %b %d %H:%M:%S +0000 %Y'\n )\n age = ((datetime.utcnow() - raw_time).seconds / 60) + 1\n rt = data['retweet_count']\n fave = data['favorite_count']\n fol = data['user']['followers_count']\n weight = 1.5\n e2f = ((weight * rt + fave) / (fol / 2)) * 1000\n e2a = enagement / age\n score = e2f + e2a\n scores[score] = data['id']\n embeds = []\n for item in sorted(scores.items(), reverse=True)[:13]:\n embed = twitter.get_oembed(id=item[1], align='center')\n embeds.append(embed['html'])\n return embeds", "def scores_statistics(self, scores):\r\n aux_scores = np.array(scores)\r\n return np.mean(aux_scores), np.median(aux_scores), np.std(aux_scores)", "def get_score(self):\n for response in self.response_list:\n self.score += response.get_score", "def find_scores(self):\n p1_score = self.p1_store()\n p2_score = self.p2_store()\n return p1_score, p2_score", "def getSubmissionScore(submission):\r\n return submission.score", "def get_scores(self):\n\n\t\tscores = np.dot(self.rankings, self.weights)\n\t\tranked_indices = np.argsort(scores)\n\t\tranked_sources = self.source_names[ranked_indices]\n\t\tranked_scores = sorted(scores)\n\t\tself.scores = {source:score for source, score in zip(ranked_sources, ranked_scores)}\n\n\t\treturn self.scores", "def get_animelist_scores(soup):\n if soup.find_all('tbody', class_='list-item'):\n animelist_scores = []\n for element in soup.find_all('tbody', class_='list-item'):\n # Making sure that the element exists before appending\n if element.find(class_='data score'):\n if element.find(class_='data score').text:\n animelist_score = element.find(class_='data score').text.strip()\n animelist_scores.append(animelist_score)\n return animelist_scores", "def score_tweets(objects):\n scores = {}\n for tweet in objects:\n data = tweet._json\n rt = data['retweet_count']\n fave = data['favorite_count']\n fol = data['user']['followers_count']\n weight = 1.5\n score = ((weight * rt + fave) / (fol / 2)) * 1000\n scores[score] = data['id']\n embeds = []\n for item in sorted(scores.items(), reverse=True)[:13]: #sorted returns tuple\n embed = twitter.get_oembed(id=item[1],align='center')\n embeds.append(embed['html'])\n return embeds", "def as_list_of_counts_and_pcts(self):\n if self.e_pct is None:\n self.calculate_percentages()\n\n score_list = [\n ['E: ' + str(self.e_score) + '(' + str(self.e_pct) + '%)',\n 'I: ' + str(self.i_score) + '(' + str(self.i_pct) + '%)'],\n ['N: ' + str(self.n_score) + '(' + str(self.n_pct) + '%)',\n 'S: ' + str(self.s_score) + '(' + str(self.s_pct) + '%)'],\n ['F: ' + str(self.f_score) + '(' + str(self.f_pct) + '%)',\n 'T: ' + str(self.t_score) + '(' + str(self.t_pct) + '%)'],\n ['J: ' + str(self.j_score) + '(' + str(self.j_pct) + '%)',\n 'P: ' + str(self.p_score) + '(' + str(self.p_pct) + '%)']\n ]\n return score_list", "def _get_scores(target, predicted):\n recall = scoring(target, predicted, metric=\"recall\")\n precision = scoring(target, predicted, metric=\"precision\")\n accuracy = scoring(target, predicted, metric=\"accuracy\")\n f_score = scoring(target, predicted, metric=\"f1\")\n\n return [recall, precision, accuracy, f_score]", "def getScores(self,query):\n pass", "def get_scores(self):\n precision = self.right / self.count\n APs = self.right_labels / self.count\n mAP = np.mean(APs)\n distance = self.distance / self.count\n\n return {'precision': precision,\n 'APs': APs,\n 'mAP': mAP,\n 'distance': distance\n }", "def to_list_of_lists(self):\n score = [\n [\"E\", self.e_score],\n [\"I\", self.i_score],\n [\"N\", self.n_score],\n [\"S\", self.s_score],\n [\"F\", self.f_score],\n [\"T\", self.t_score],\n [\"J\", self.j_score],\n [\"P\", self.p_score],\n ]\n return score", "def scrape_scores(main_soup):\n ul = main_soup.find('ul', {'class': 'quality-mark-bar-list'})\n\n labels = []\n for li in ul.find_all('li'):\n label = str(li.text).strip()\n if label is not '' and '.' not in label:\n labels.append(label)\n\n scores = []\n for div in ul.find_all('div'):\n scores.append(div['data-tooltip-text'])\n\n return labels, scores" ]
[ "0.71532154", "0.7128279", "0.69161886", "0.6607638", "0.6482073", "0.64766103", "0.6364357", "0.634786", "0.6333597", "0.63325065", "0.62894195", "0.6277977", "0.6263328", "0.6218913", "0.62156534", "0.6215061", "0.6183405", "0.61831903", "0.6175631", "0.6165583", "0.61381984", "0.61239105", "0.60453093", "0.60031545", "0.59925085", "0.59912467", "0.5987303", "0.597861", "0.59664196", "0.5964778" ]
0.75239307
0
Buffer some data to a stream to a node's input. Whenever the node is available to read a value from the matching input, this will send the next value.
def buffer_input(self, node, direction, values): self.num_buffered_inputs += len(values) self.buffered_input.setdefault(node, {}).setdefault(direction, []).extend(values)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def onRecv(self, data):\n self.stream += data\n while self.handleStream(): pass", "def process(self, data):\n if self.__head:\n self.__head.send(Element(\n stream_id=self.id,\n data=data))", "def fillBuffer():\n buff[bufferCounter].next = dataIn", "def collect_incoming_data(self, data):\n self.__input.append(data)", "def queue_input(self, value):\n self.input_queue.append(value)", "def _got_remote(self, data):\n self._recv_buffer += data", "def _add_to_buffer(self, data):\n for byte in data:\n self.next_fn(byte) \n self._parse_cmds()", "def push_data(self, data):\n self.incoming.write(data)", "def data_received(self, data):\n self.log.debug('data_received: {!r}'.format(data))\n self._last_received = datetime.datetime.now()\n for byte in (bytes([value]) for value in data):\n\n try:\n self.stream.feed_byte(byte)\n except (ValueError, AssertionError):\n e_type, e_value, _ = sys.exc_info()\n map(self.log.warn,\n traceback.format_exception_only(e_type, e_value))\n continue\n\n if self.stream.is_oob:\n continue\n\n # self.reader.feed_byte()\n self.shell.feed_byte(byte)", "def stdin_read(self, data):\n self.write_master(data)", "def put(self, value):\n self.stdin.put(value)", "def sendBuffer():\n dislin.sendbf()", "def send_data(sock):\n while True:\n data = sys.stdin.readline()\n sock.send(data.encode())", "def write(self, data):\n self.buffer.append(data)\n while self.push():\n pass", "async def _send_stream_data(\n self, request: Request, stream_id: int, data: bytes\n ) -> None:\n while data:\n max_flow = await self._wait_for_outgoing_flow(request, stream_id)\n chunk_size = min(len(data), max_flow)\n chunk, data = data[:chunk_size], data[chunk_size:]\n self._h2_state.send_data(stream_id, chunk)\n await self._write_outgoing_data(request)", "def send(self, value, _control=False):\n if not _control:\n self.increment('out')\n for output in self.outputs:\n output.put(value)", "def dataReceived(self, data):\n if len(self._buffer) + len(data) > self.MAX_BUFFER_SIZE:\n raise ASN1TooMuch(\n 'Call read() or flush() before piping more data.')\n\n self._buffer += data", "def set_input(self, idx, input_stream):\n \n raise NotImplementedError", "def __next__(self):\n return next(self.buffered_streamer)", "def emit(data):", "def _send_from_buffer(cls, buf, stream):\n remaining_bytes = len(buf)\n while remaining_bytes > 0:\n next_chunk_bytes = min( remaining_bytes, VoxelsNddataCodec.STREAM_CHUNK_SIZE )\n chunk_start = len(buf)-remaining_bytes\n chunk_stop = len(buf)-(remaining_bytes-next_chunk_bytes)\n stream.write( buf[chunk_start:chunk_stop] )\n remaining_bytes -= next_chunk_bytes", "def send(self, item):\n self.input_queue.put(item)", "def data_came_in(self,data,readlinecallback):\r\n # data may come in in parts, not lines! Or multiple lines at same time\r\n \r\n if DEBUG:\r\n print >>sys.stderr,\"fasti2i: data_came_in\",`data`,len(data)\r\n\r\n if len(self.buffer) == 0:\r\n self.buffer = data\r\n else:\r\n self.buffer = self.buffer + data\r\n self.read_lines(readlinecallback)", "def input_pipe():\n x = ''\n while True:\n x = yield x\n yield # to keep the generator in lock step with input", "def Buffer(self) -> _n_0_t_7[_n_0_t_6]:", "def Buffer(self) -> _n_0_t_7[_n_0_t_6]:", "def Buffer(self) -> _n_0_t_7[_n_0_t_6]:", "async def emit(self, data):\n if type(data) is not str:\n serialized_data = json.dumps(data)\n else:\n serialized_data = data\n try:\n self.write(f\"data: {serialized_data}\\n\\n\")\n await self.flush()\n except StreamClosedError:\n app_log.warning(\"Stream closed while handling %s\", self.request.uri)\n # raise Finish to halt the handler\n raise Finish()", "def worker(self):\n while True:\n item,index = self.inbound.get()\n if index is None:\n self.buffer.append(item)\n self.index.value = self.index.value + 1 #index of next item for buffer\n if len(self.buffer)>self.size:\n del self.buffer[0]\n self.newitem.put(None)\n else:\n self.buffer[len(self.buffer)+(index - self.index.value)] = item", "def send(self, payload):\n self.emitter.input(payload)" ]
[ "0.64942396", "0.633012", "0.6143053", "0.60902137", "0.6055283", "0.59665143", "0.579951", "0.57874286", "0.57657534", "0.575062", "0.574913", "0.57437265", "0.5728528", "0.5725628", "0.57216823", "0.5718914", "0.57108384", "0.57058454", "0.56562674", "0.56273276", "0.56068724", "0.5575357", "0.5552419", "0.55273366", "0.5520968", "0.5520968", "0.5520968", "0.5515022", "0.5499535", "0.5487328" ]
0.690997
0
Return whether there are any buffered inputs remaining.
def has_buffered_inputs(self):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bufferIsFull(self):\n return len(self.buffer) == self.bufferSize", "def has_next(self):\n # type: () -> bool\n return len(self.buffer) > 0", "def full(self):\n return len(self.future_buffer) == self.CAPACITY", "def are_buffers_empty(self): \n i = 0\n for i in range(self.no_robots):\n if self.is_buffer_empty_for_robot(i) is True:\n i += 1\n else:\n return False\n if i >= self.no_robots:\n return True\n else:\n pass", "def has_batch(self) -> bool:\n return self._buffer and (self._batch_size is None or sum(\n BlockAccessor.for_block(b).num_rows()\n for b in self._buffer) >= self._batch_size)", "def more(self):\n # return True if there are still frames in the queue. If stream is not stopped, try to wait a moment\n tries = 0\n while self.Q.qsize() == 0 and not self.stopped and tries < 5:\n time.sleep(0.1)\n tries += 1\n\n return self.Q.qsize() > 0", "def check_readings(self):\n # loading data from log file\n if self.filepath is not None:\n if self.all_read is None:\n return False\n else:\n ei = self.curr_indexi + self.read_step\n if ei >= self.all_read.shape[0]:\n return False\n self.curr_read = self.all_read[self.curr_index: ei, :]\n self.curr_index = ei\n return True\n\n # stream of data from beaglebone\n # check that there is new data avalible\n isnew = not all_data.empty()\n\n if isnew:\n # read most current data\n qsize = all_data.qsize()\n curr_read = [all_data.get_nowait() for _ in range(qsize)]\n self.curr_read = np.concatenate(curr_read)\n\n return isnew", "def readable (self):\r\n return len(self.ac_in_buffer) <= self.ac_in_buffer_size", "def _buffer_capacity_reached(self) -> bool:\n return self.size() >= self.buffer_capacity", "def messages_pending(self):\r\n return bool(self._log_buffer)", "def isComplete(self):\n return self.bytesToRead == 0", "def isBufferEmpty(self):\n return self.ecg_buffer.empty()", "def running(self):\r\n return self.__maxlen__ > 0", "def isFull(self):\r\n if (len(self.queue) == self.maxlen):\r\n return True\r\n else:\r\n return False", "def is_full(self):\n return len(self) == self.buffer_size", "def is_full(self):\n return len(self) == self.buffer_size", "def is_full(self):\n return len(self) == self.buffer_size", "def is_full(self):\n return len(self) == self.buffer_size", "def isFull(self):\n return len(self.queue) == self.size", "def any(self) -> bool:\n return len(self.queue) > 0", "def HasPendingCommands(self):\n\t\n return self.queue.qsize() > 0", "def eof(self):\n\t\treturn not self.is_alive() and self._queue.empty()", "def has_full_batch(self) -> bool:", "def has_next_batch(self):\n return self.current_index + self.batch_size <= self.count", "def isFull(self):\n return self.__size == len(self.__buffer)", "def has_an_incomming_message(self):\n return self.pipe_start.poll(1)", "def is_buffer_empty(self): \n if self.buffer.shape == (0, 5):\n return True\n else:\n return False", "def is_ready_update(self):\n size_of_buffer = len(self.training_buffer.update_buffer['actions'])\n return size_of_buffer > max(int(self.trainer_parameters['buffer_size'] / self.policy.sequence_length), 1)", "def running(self):\n return self.more() or not self.stopped", "def has_next(self):\n return not self.finished_function(self.peek)" ]
[ "0.73473984", "0.7152609", "0.69882053", "0.6975826", "0.69175136", "0.6868512", "0.6840837", "0.68094856", "0.6788063", "0.6750303", "0.6736553", "0.6732254", "0.6728499", "0.6708489", "0.6695467", "0.6695467", "0.6695467", "0.6695467", "0.6689184", "0.66471004", "0.6631454", "0.66148", "0.65974367", "0.65874356", "0.6560377", "0.65514845", "0.6509895", "0.64987236", "0.64819896", "0.6479209" ]
0.77823186
0
Takes in and returns hyperparameters as an array of the same length. The elements of are pairs [lower, upper], and the corresponding hyperparameter is sampled from a uniform distribution in the interval [lower, upper]. If instead of a pair we have a number in , then we assign that value as the appropriate hyperparameter.
def initBoundedParams(bounds, sn=[]): hypinit = { 'cov': np.zeros(len(bounds)), 'lik': np.atleast_1d(np.log(sn)), 'mean': np.array([]) } # Sample from a uniform distribution for idx, pair in enumerate(bounds): # Randomize only if bounds are specified if isinstance(pair, collections.Iterable): hypinit['cov'][idx] = np.random.uniform(pair[0], pair[1]) # If no bounds, then keep default value always else: hypinit['cov'][idx] = pair return hypinit
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_hyperparameter_values(hyper):\n import ConfigSpace.hyperparameters as CSH\n\n if isinstance(hyper, CSH.CategoricalHyperparameter):\n return hyper.choices, False\n\n if isinstance(hyper, CSH.NumericalHyperparameter):\n return [hyper.lower, hyper.upper], True\n \n if isinstance(hyper, CSH.Constant):\n return [hyper.value, hyper.value], True\n\n raise ValueError(str(type(hyper)) + ' is not supported')", "def discrete_uniform_sampler(upper_value):\n return int(np.random.random() * upper_value)", "def initial_samples(lb, ub, method, numSamp):\r\n if not len(lb) == len(ub):\r\n raise AssertionError('Lower and upper bounds have different #s of design variables in initial_samples function.')\r\n assert method == 'random' or method == 'nolh' or method == 'nolh-rp' or method == 'nolh-cdr' or method == 'lhc' or method == 'rand-wor', 'An invalid method was specified for the initial_samples.'\r\n assert (method == 'nolh' or method == 'nolh-rp' or method == 'nolh-cdr') and len(ub) >= 2 and len(ub) <= 29, 'The Phase space dimensions are outside of the bounds for initial_samples.'\r\n for case in Switch(method):\r\n if case('random'):\r\n s = np.zeros((numSamp, len(lb)))\r\n for i in range(0, numSamp, 1):\r\n s[i, :] = lb + (ub - lb) * rand(len(lb))\r\n\r\n break\r\n if case('rand-wor'):\r\n s = np.zeros((numSamp, len(lb)))\r\n for i in range(0, numSamp, 1):\r\n s[i, :] = choice(len(ub), size=len(ub), replace=False)\r\n\r\n break\r\n if case('nolh'):\r\n dim = len(ub)\r\n m, q, r = params(dim)\r\n conf = range(q)\r\n if r != 0:\r\n remove = range(dim - r, dim)\r\n nolh = NOLH(conf, remove)\r\n else:\r\n nolh = NOLH(conf)\r\n s = np.array([ list(lb + (ub - lb) * nolh[i, :]) for i in range(len(nolh[:, 0]))\r\n ])\r\n break\r\n if case('nolh-rp'):\r\n dim = len(ub)\r\n m, q, r = params(dim)\r\n conf = random.sample(range(q), q)\r\n if r != 0:\r\n remove = random.sample(range(q - 1), r)\r\n nolh = NOLH(conf, remove)\r\n else:\r\n nolh = NOLH(conf)\r\n s = np.array([ list(lb + (ub - lb) * nolh[i, :]) for i in range(len(nolh[:, 0]))\r\n ])\r\n break\r\n if case('nolh-cdr'):\r\n dim = len(ub)\r\n m, q, r = params(dim)\r\n conf, remove = get_cdr_permutations(len(ub))\r\n if remove != []:\r\n nolh = NOLH(conf, remove)\r\n else:\r\n nolh = NOLH(conf)\r\n s = np.array([ list(lb + (ub - lb) * nolh[i, :]) for i in range(len(nolh[:, 0]))\r\n ])\r\n break\r\n if case('lhc'):\r\n tmp = lhs(len(lb), samples=numSamp, criterion='center')\r\n s = np.array([ list(lb + (ub - lb) * tmp[i, :]) for i in range(len(tmp[:, 0]))\r\n ])\r\n break\r\n if case():\r\n print 'Somehow you evaded my assert statement - good job!',\r\n print ' However, you still need to use a valid method string.'\r\n\r\n return s", "def grid_sampling():\n rangeset = []\n for dim in dimensions:\n bounds = parameters[dim]\n rangeset.append(np.arange(bounds[0], bounds[1], bounds[2]))\n grid = list(itertools.product(*rangeset))\n \n array = np.zeros(len(grid), dtype={'names' : dimensions, \n 'formats' : ['f8']*len(dimensions)})\n for idx, params in enumerate(grid):\n array[idx] = params\n\n return array", "def sample_parameters_given_hyper(self, gen_seed=0):\n if type(gen_seed) is not int:\n raise TypeError(\"gen_seed should be an int\")\n\n rng = random.Random(gen_seed)\n\n hypers = self.get_hypers()\n s = hypers[b's']\n r = hypers[b'r']\n nu = hypers[b'nu']\n m = hypers[b'mu']\n\n rho = rng.gammavariate(nu/2.0, s)\n mu = rng.normalvariate(m, (r/rho)**.5)\n\n assert(rho > 0)\n\n params = {'mu': mu, 'rho': rho}\n\n return params", "def _uniform(val_range):\r\n return np.random.uniform(val_range[0], val_range[1])", "def bs_parameters(T_min, T_max, num):\n T_array = np.linspace(T_min, T_max, num)\n t_array = np.sqrt(T_array)\n rf = np.vectorize(lambda t: sqrt(1 - pow(t, 2)))\n r_array = rf(t_array)\n return t_array, r_array", "def hyperparams():\n H = 6\n return Munch(N=500, H=H, D=(H // 2) ** 2, batch_size=10, precision=to.float32)", "def param_generater(**p):\r\n if p[\"type\"] == int:\r\n return np.arange(p[\"min\"], p[\"max\"] + p[\"delta\"], p[\"delta\"], np.int)\r\n elif p[\"type\"] == float:\r\n return np.arange(p[\"min\"], p[\"max\"] + p[\"delta\"], p[\"delta\"], np.float)\r\n elif p[\"type\"] == bool:\r\n return np.array([0, 1])\r\n else:\r\n raise TypeError", "def generate_data(params, sigma):\n rng = random.PRNGKey(0)\n k = len(params) // 2\n a_array = params[:k]\n b_array = params[k:]\n n = 20 * k\n xs = sample_our_uniform(n, 1, rng).reshape((n,))\n ys = onp.zeros(n)\n all_indices = set(onp.arange(n))\n for i in range(k):\n i_idxs = onp.random.choice(list(all_indices), 20, replace=False)\n all_indices = set(all_indices) - set(i_idxs)\n ys[i_idxs] = xs[i_idxs] * a_array[i] + b_array[i] + onp.random.normal(0, sigma, size=(20,))\n return xs, ys", "def random(self, lower, upper, shape):\n return np.random.uniform(lower, upper, shape)", "def random(self, lower, upper, shape):\n return np.random.uniform(lower, upper, shape)", "def uniform_sample(upper, num):\n sample = []\n for i in range(num):\n value = random.randint(0, upper - 1)\n sample.append(value)\n return sample", "def get_hypergrid(hyperparams_config: List[HyperParameter]) -> np.ndarray:\n hypervalues = [\n np.arange(hyperparam.lower_bound, hyperparam.upper_bound + hyperparam.stepsize / 2, hyperparam.stepsize)\n for hyperparam in hyperparams_config\n ]\n potential_points = [item for item in itertools.product(*hypervalues)]\n potential_points = np.array(potential_points, dtype=float)\n return potential_points", "def convert_searchspace(self, hyperparameter):\n LOG.debug(\"convert input parameter\\n\\n\\t{}\\n\".format(pformat(hyperparameter)))\n searchspace = [[], []]\n for name, param in hyperparameter.items():\n if param[\"domain\"] != \"categorical\" and \"frequency\" not in param.keys():\n param[\"frequency\"] = DEFAULTGRIDFREQUENCY\n warnings.warn(\"No frequency field found, used default gridsearch frequency {}\".format(DEFAULTGRIDFREQUENCY))\n\n if param[\"domain\"] == \"categorical\":\n searchspace[0].append(name)\n searchspace[1].append(param[\"data\"])\n elif param[\"domain\"] == \"uniform\":\n searchspace[0].append(name)\n searchspace[1].append(get_uniform_axis_sample(param[\"data\"][0],\n param[\"data\"][1],\n param[\"frequency\"],\n param[\"type\"]))\n elif param[\"domain\"] == \"normal\":\n searchspace[0].append(name)\n searchspace[1].append(get_gaussian_axis_sample(param[\"data\"][0],\n param[\"data\"][1],\n param[\"frequency\"],\n param[\"type\"]))\n elif param[\"domain\"] == \"loguniform\":\n searchspace[0].append(name)\n searchspace[1].append(get_logarithmic_axis_sample(param[\"data\"][0],\n param[\"data\"][1],\n param[\"frequency\"],\n param[\"type\"]))\n return searchspace", "def __init__(self, in_features, out_features):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n self.params = {'weight': np.random.normal(loc = 0, scale=0.0001, size=(out_features,in_features)),\\\n 'bias': np.zeros((1, out_features))}\n \n self.grads = {'weight': np.zeros((out_features,in_features)),\\\n 'bias': np.zeros((1, out_features))}\n ########################\n # END OF YOUR CODE #\n #######################", "def initial_sampling(self, params):\n i = params\n theta_star = self.priors_sample()\n model = self.simz( theta_star )\n rho = test_dist(self.data, model)\n while rho > self.eps0: \n theta_star = self.priors_sample()\n model = self.simz( theta_star )\n rho = test_dist(self.data, model)\n data_list = [np.int(i)]\n\n for i_param in xrange(self.n_params): \n data_list.append(theta_star[i_param])\n data_list.append(1./np.float(self.N))\n data_list.append(rho)\n\treturn np.array(data_list)", "def _create_paramList(numParams):\n paramList = np.random.random_sample( numParams )\n return paramList", "def WeightInitializer():\n return np.random.uniform(-1, 1)", "def init_hyperparameters():\n alpha = .8\n alpha2 = 1\n\n return alpha, alpha2", "def __init__(self, l_bound, u_bound, rng=None):\n if rng is None:\n self.rng = np.random.RandomState(np.random.randint(0, 10000))\n else:\n self.rng = rng\n self.min = l_bound\n self.max = u_bound\n if not (self.max > self.min):\n raise Exception(\n \"Upper bound of Tophat prior must be greater \\\n than the lower bound!\"\n )", "def _sample_hyperparameters(self):\n\t\tconfig = {}\n\t\tfor attr, option in self._config_options.items():\n\t\t\tprint('Sampling', attr)\n\t\t\tconfig[attr] = option.sample()\n\t\treturn config", "def uniform(feature, bins):\n t = (feature.max()-feature.min())/bins\n return [t*i for i in range(1, bins)]", "def uniform(self, size=None, low=0.0, high=1.0, ndim=None, dtype=None):\r\n return self.gen(uniform, size, low, high, ndim=ndim, dtype=dtype)", "def _get_shear_vals(lower_bound: float,\n upper_bound: float,\n step: float) -> Tuple[float]:\n return tuple(np.arange(lower_bound, upper_bound + step, step))", "def get_hyperparams(self):", "def fill_random_covariance_hyperparameters(hyperparameter_interval, num_hyperparameters, covariance_type=SquareExponential):\n hyper = [numpy.random.uniform(hyperparameter_interval.min, hyperparameter_interval.max)\n for _ in xrange(num_hyperparameters)]\n return covariance_type(hyper)", "def get_uniform_axis_sample(a, b, N, dtype):\n assert a < b, \"condition a < b violated!\"\n assert isinstance(N, int), \"condition N of type int violated!\"\n if dtype is int:\n return list(np.linspace(a, b, N).astype(int))\n elif dtype is float:\n return list(np.linspace(a, b, N))\n else:\n raise AssertionError(\"dtype {} not supported for uniform sampling!\".format(dtype))", "def _SampleInputMatrix(nrows, bl, bu, distname='randomUniform'):\n npars = len(bl)\n x = np.zeros((nrows,npars))\n bound = bu-bl\n for i in range(nrows):\n # x[i,:]= bl + DistSelector([0.0,1.0,npars],distname='randomUniform')*bound # only used in full Vhoeys-framework\n x[i,:]= bl + np.random.rand(1,npars)*bound\n return x", "def gendata(params,xmin,xmax,npts=4000):\n F = lorentzian.ForwardFactory\n def gensample(F, xmin, xmax):\n from numpy import arange\n import random\n a = arange(xmin, xmax, (xmax-xmin)/200.)\n ymin = 0\n ymax = F(a).max()\n while 1:\n t1 = random.random() * (xmax-xmin) + xmin\n t2 = random.random() * (ymax-ymin) + ymin\n t3 = F(t1)\n if t2 < t3:\n return t1\n fwd = F(params)\n return array([gensample(fwd, xmin,xmax) for i in xrange(npts)])" ]
[ "0.56640446", "0.55604845", "0.54990536", "0.5466633", "0.545822", "0.5446241", "0.54428786", "0.5440141", "0.5407368", "0.54070425", "0.5364159", "0.5364159", "0.5360082", "0.5337372", "0.53170127", "0.53168845", "0.53137463", "0.5287289", "0.52689487", "0.52573603", "0.52536166", "0.5223812", "0.52110726", "0.51996535", "0.5193132", "0.51921135", "0.51877517", "0.5180546", "0.51578873", "0.51482093" ]
0.57418513
0
Returns number of coordinates in string.
def counting_coordinates(string): num_of_commas = string.count(',') num_of_cords = num_of_commas + 1 return num_of_cords
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_string_info(string):\n line_count = 1\n column_count = 1\n for char in string:\n if char == '\\n':\n column_count = 1\n line_count += 1\n else:\n column_count += 1\n return Coords(line_count, column_count, len(string))", "def length(s: str) -> int:\n count = 0\n for i in s:\n count += 1\n return count", "def __count_commas__(test_str: str) -> int:\n i = test_str.find(\",\")\n if i == -1:\n return 0\n return 1 + MachineInterface.__count_commas__(test_str[i+1:])", "def get_len(string):\n return len(tokenizer.tokenize(string))", "def getLength(string):\n return (0)", "def point(surface, string):\n match = re.match('(.*?) (.*?)(?: |$)', string)\n if match:\n x, y = match.group(1, 2)\n string = string[match.end():]\n return (size(surface, x, 'x'), size(surface, y, 'y'), string)\n else:\n raise PointError", "def count_segments(s):\n s = s.strip().split()\n return len(s)", "def _getNumberOfSpaces(self, str):\n\t\tnum = 0\n\t\tfor char in str:\n\t\t\tif char is \" \":\n\t\t\t\tnum += 1\n\t\treturn num", "def count(seats: List[str]) -> int:\n # Map dimensions\n m = len(seats)\n n = len(seats[0]) if m else 0\n \n count = 0\n \n # Count locations filled with \"#\"\n for i in range(m):\n for j in range(n):\n if seats[i][j] == \"#\":\n count += 1\n\n return count", "def formatCoordinates(string):\n if string == 'N/A':\n return 0\n else:\n return float(string)", "def test_mine_count_coordinates(self):\n pg.font.init()\n font = utils.FONT.render(\"test\", True, utils.FONT_COLOUR)\n mine_count_text_coords = utils.mine_count_coords(font, (10, 10))\n self.assertIsInstance(mine_count_text_coords, tuple)", "def getAlphaNumCharCount(sdata):\n\tacount = 0\n\tncount = 0\n\tscount = 0\n\tocount = 0\n\tassertEqual(type(sdata), str, \"input must be string\")\n\tfor c in sdata:\n\t\tif c.isnumeric():\n\t\t\tncount += 1\n\t\telif c.isalpha():\n\t\t\tacount += 1\n\t\telif c.isspace():\n\t\t\tscount += 1\n\t\telse:\n\t\t\tocount += 1\n\tr = (acount, ncount, ocount)\n\treturn r", "def __get_num_from_str(elements: list, string: str) -> str:\n\n num = list()\n\n element_list = list(elements)\n\n for atom in string.split('-'):\n\n if atom == '*':\n num.append('0')\n else:\n num.append(f'{element_list.index(atom) + 1}')\n\n return ' '.join(num)", "def numCoords(self):\n return self.nCoords", "def gx_coords1(s: str) -> list[float]:\n return numarray(s.split(\" \"))", "def nextString(self, s, start):\r\n parens = 0\r\n quotes = 0\r\n\r\n for pos in range(start,len(s)):\r\n c = s[pos]\r\n if c == \",\" and parens == 0 and quotes == 0:\r\n return pos+1\r\n elif c == \"(\" and quotes == 0:\r\n parens += 1\r\n elif c == \")\" and quotes == 0:\r\n parens -= 1\r\n elif c == \"\\'\" and quotes == 0:\r\n quotes = 1\r\n elif c ==\"\\'\" and quotes == 1:\r\n quotes = 0\r\n \r\n return len(s)+1", "def getNumFromString(self, string):\n \n m = re.search(r'\\d+$', string)\n if m is not None:\n return int(m.group())\n else:\n return 0", "def parse_dimension_string(s: str) -> List[int]:\n dimensions = [int(x) for x in s.split(\"x\")]\n if len(dimensions) != 3:\n raise ValueError(f\"We have a weird number of dimensions for this : {s}\")\n return dimensions", "def srow(string, i):\r\n return string.count('\\n', 0, max(0, i)) + 1", "def countingValleys(n, s):\n\n elevation = 0\n valleys = 0\n\n for char in s:\n if char == 'U':\n elevation +=1\n elif char == 'D':\n if elevation == 0:\n valleys += 1\n elevation -= 1\n\n return valleys", "def get_string_length(self):\n return int(self.read('H')[0])", "def count(text):\n return len(text)", "def ncoordinates(self):\n return _coordsys.coordsys_ncoordinates(self)", "def true_length(self,str):\n\t\treturn len(re.sub(r'#\\[.*?\\]','',str))", "def LPSubsequenceLength(str):\n return len(LPSubsequence(str))", "def _parse_location(location_string):\n location_regex = r\"(\\d+)-(\\d+)(\\(+\\)|\\(-\\)|)\"\n match = re.match(location_regex, location_string.strip())\n start, end, strand = match.groups()\n return int(start), int(end), -1 if strand == \"(-)\" else 1", "def length(string):\n number=0#the length of the string starts from 0\n for num in string:#go over all the letter in the string\n number=number+1\n print('the length is',number)", "def convert_coordinates(coordinates):\r\n row = coordinates[1] - 1\r\n column = letters.index(coordinates[0])\r\n return column, row", "def write_width(self, font, string):\n width = 0\n for character in string:\n try:\n char_index = font.MAP.index(character)\n width += font.WIDTHS[char_index]\n\n except ValueError:\n pass\n\n return width", "def stoi(self, s):\n idx = self._stoi.get(s)\n return idx + 2 if idx else self.unk_idx" ]
[ "0.6940831", "0.6308547", "0.62703896", "0.60827315", "0.6065635", "0.6045179", "0.60267836", "0.5949909", "0.59459907", "0.5829819", "0.57608765", "0.5755629", "0.57515347", "0.57390684", "0.57338333", "0.5683465", "0.56755596", "0.5655975", "0.5646916", "0.56432575", "0.55876225", "0.555533", "0.55458015", "0.5527392", "0.5517759", "0.5510653", "0.5462357", "0.54507494", "0.54419535", "0.54227066" ]
0.82952464
0
Copy contents of arr1 to arr2.
def copy_arr1_to_arr2(arr1, arr2, k, num_of_cords): for i in range(k): for j in range(num_of_cords): arr2[i][j] = arr1[i][j]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def assign(array1, array2):\n for i in range(len(array1)):\n array2[i] = array1[i]", "def cat_arrays(arr1, arr2):\n newarr = [0 for i in range(len(arr1) + len(arr2))]\n for i in range(len(arr1)):\n newarr[i] = arr1[i]\n for i in range(len(arr2)):\n newarr[i + len(arr1)] = arr2[i]\n return newarr", "def array_merge(a1, a2, inplace=False, empty_source=False): \n if inplace:\n out = a1\n else:\n out = copy.deepcopy(a1)\n if empty_source:\n for i in range(len(out)):\n out.pop()\n for k in a2:\n out[k] = a2[k]\n return out", "def swapArray( a1, a2 ):\n assert( len(a1) == len(a2) );\n for i in range( len( a1 ) ):\n tmp = a1[i];\n a1[i] = a2[i];\n a2[i] = tmp;\n # for - end", "def merge(arr1, arr2):\n\tres = []\n\n\ti = j = 0\n\n\twhile i< len(arr1) and j < len(arr2):\n\t\tif arr1[i] < arr2[j]:\n\t\t\tres.append(arr1[i])\n\t\t\ti+=1\n\t\telse:\n\t\t\tres.append(arr2[j])\n\t\t\tj+=1\n\n\twhile i < len(arr1):\n\t\tres.append(arr1[i])\n\t\ti +=1\n\n\twhile j < len(arr2):\n\t\tj +=1\n\t\tres.append(arr2[j])\n\n\treturn res", "def array_insert(arr1, arr2, axis=1):\n arr3 = num.insert(arr1, len(arr1.T), arr2, axis=axis)\n\n return arr3", "def cat_arrays(arr1, arr2):\n res = []\n for i in arr1:\n res.append(i)\n for j in arr2:\n res.append(j)\n return (res)", "def two_way_merge(array1, array2):\n\n longer_arr_len = len(array1) + len(array2)\n merged_arr = []\n\n for i in range(longer_arr_len):\n if len(array1) == 0:\n merged_arr += array2\n break\n if len(array2) == 0:\n merged_arr += array1\n break\n\n if array1[0] < array2[0]:\n merged_arr.append(array1.pop(0))\n else:\n merged_arr.append(array2.pop(0))\n\n return merged_arr", "def solution(array1, array2):\n array1, array2 = np.array(array1), np.array(array2)\n return np.concatenate((array1, array2.flatten()))", "def merge(a1, a2):\n\n i, j = 0, 0\n result = [] # resulting array\n while i < len(a1) and j < len(a2): # both array have iterables\n if a1[i] < a2[j]:\n result.append(a1[i])\n i += 1\n elif a1[i] > a2[j]:\n result.append(a2[j])\n j += 1\n else:\n result.append(a1[i])\n result.append(a2[j])\n i += 1\n j += 1\n\n if i == len(a1): # array a1 was exhaused, append the remaining contents of the second array to the result\n result.extend(a2[j:])\n if j == len(a2): # array a2 was exhaused, append the remaining contents of the first array to the result\n result.extend(a1[i:])\n\n return result", "def merge(arr1, arr2):\n out = []\n # Iterate while neither list is empty\n while arr1 and arr2:\n # Compare heads, pop smallest head and append to output\n if arr1[0] <= arr2[0]:\n out.append(arr1.pop(0))\n else:\n out.append(arr2.pop(0))\n # Concat whichever array has more elements\n if arr1:\n out.extend(arr1)\n else:\n out.extend(arr2)\n return out", "def column_bind(arr1, arr2):\n arr1 = array(arr1, ndmin=2)\n arr2 = array(arr2, ndmin=2)\n _out = concatenate((arr1, arr2), axis=0).transpose()\n return _out", "def cat_arrays(arr1, arr2):\n return [x for x in arr1+arr2]", "def copyto(self, nparray):\n if self.__parity[0]==0:\n np.copyto(self.__np_array1, nparray)\n else:\n np.copyto(self.__np_array2, nparray)", "def merge_two_sorted_arrays(array_0, len_0, array_1, len_1):\n iter_0, iter_1, write_index = len_0 - 1, len_1 - 1, len_0 + len_1 - 1\n\n while iter_0 >= 0 and iter_1 >= 0:\n if array_0[iter_0] > array_1[iter_1]:\n array_0[write_index] = array_0[iter_0]\n iter_0 -= 1\n else:\n array_0[write_index] = array_1[iter_1]\n iter_1 -= 1\n write_index -= 1\n\n while iter_1 >= 0:\n array_0[write_index] = array_1[iter_1]\n write_index -= 1\n iter_1 -= 1", "def merge(arr1, arr2):\n i = 0\n j = 0\n sol = []\n while i < len(arr1) and j < len(arr2):\n if arr1[i] <= arr2[j]:\n sol.append(arr1[i])\n i += 1\n else:\n sol.append(arr2[j])\n j += 1\n if i < len(arr1):\n sol.extend(arr1[i:])\n if j < len(arr2):\n sol.extend(arr2[j:])\n return sol", "def merge(arr1,arr2):\n i = 0\n j = 0\n new_list = []\n while i < len(arr1) and j < len(arr2):\n if arr1[i] <= arr2[j]:\n new_list.append(arr1[i])\n i += 1\n else:\n new_list.append(arr2[j])\n j += 1\n if i == len(arr1):\n new_list.extend(arr2[j:])\n if j == len(arr2):\n new_list.extend(arr1[i:])\n return new_list", "def copytobuffer(self, nparray):\n if self.__parity[0]==0:\n np.copyto(self.__np_array2, nparray)\n else:\n np.copyto(self.__np_array1, nparray)", "def array_interleave(array1, array2, idx):\n array1 = numpy.asanyarray(array1)\n array2 = numpy.asanyarray(array2)\n idx = numpy.asanyarray(idx)\n assert(len(array1.shape) == len(array2.shape))\n assert(array1.shape[1:] == array2.shape[1:])\n assert(array1.dtype == array2.dtype)\n outarray = numpy.empty(dtype=array1.dtype,\n shape=((array1.shape[0] + array2.shape[0],) + array1.shape[1:]))\n outarray[idx, ...] = array1\n idx_comp = numpy.ones((outarray.shape[0],), dtype=numpy.bool)\n idx_comp[idx] = False\n outarray[idx_comp, ...] = array2\n return outarray", "def array_diff(arr1, arr2):\n\n # pretty weird way to do this task =)\n result = []\n list(map(lambda x: check_presence_and_append_if_necessary(generate_appender(result), generate_checker(arr2), x), arr1))\n \n return result", "def unison_arr():\n arr_size = randint(10, 100)\n arr1 = np.asarray([randint(0, 10) for _ in range(arr_size)])\n arr2 = np.asarray([randint(0, 10) for _ in range(arr_size)])\n return arr1, arr2", "def combine(array1, array2):\n array = [0 for _ in range(len(array1) + len(array2))]\n i = j = k = 0\n while i < len(array1) and j < len(array2):\n if array1[i] <= array2[j]:\n array[k] = array1[i]\n i += 1\n else:\n array[k] = array2[j]\n j += 1\n k += 1\n \n while i < len(array1):\n array[k] = array1[i]\n i += 1\n k += 1\n while j < len(array2):\n array[k] = array2[j]\n j += 1\n k += 1\n \n return array", "def add_arrays(arr1, arr2):\n if len(arr1) != len(arr2):\n return None\n return [arr1[i] + arr2[i] for i in range(len(arr1))]", "def add_arrays(arr1, arr2):\n n = len(arr1)\n m = len(arr2)\n if n != m:\n return None\n return [arr1[i] + arr2[i] for i in range(n)]", "def add_arrays(arr1, arr2):\n if len(arr1) != len(arr2):\n return (None)\n newList = []\n for i in range(len(arr1)):\n newList.append(arr1[i] + arr2[i])\n return (newList)", "def merge(self, nums1, m, nums2, n):\n copy_arr = nums1[:m]\n i, j = 0, 0\n for k in range(m + n):\n if i >= m:\n nums1[k] = nums2[j]\n j += 1\n elif j >= n:\n nums1[k] = copy_arr[i]\n i += 1\n elif copy_arr[i] <= nums2[j]:\n nums1[k] = copy_arr[i]\n i += 1\n else:\n nums1[k] = nums2[j]\n j += 1", "def joinArraysNoKeep(array1, index1, array2, index2):\n nrColumns1 = np.shape(array1)[1]\n nrColumns2 = np.shape(array2)[1]\n outRows = np.shape(array1)[0]\n outColumns = nrColumns1 + nrColumns2 \n out = np.zeros((outRows, outColumns)) - 9999\n\n out[:,0:nrColumns1] = array1\n dict2 = array2dict(array2, index2)\n t=0 \n for row in array1:\n key = row[index1]\n try:\n out[t,nrColumns1:] = dict2[key]\n t+=1\n except KeyError:\n t+=1\n delIndex = np.shape(array1)[1]+index2 \n out = np.delete(out, delIndex, axis=1)\n return out", "def add_mismatched_arrays(array1, array2, truncate=False):\n # Cast these arrays to the largest common type\n array1 = np.array(array1, dtype=np.promote_types(array1.dtype, array2.dtype))\n array2 = np.array(array2, dtype=np.promote_types(array1.dtype, array2.dtype))\n\n # TODO: find a more elegant way to do this whole function\n\n if truncate:\n if len(array1) < len(array2):\n result = array1.copy()\n result += array2[:len(array1)]\n else:\n result = array2.copy()\n result += array1[:len(array2)]\n else:\n if len(array1) < len(array2):\n result = array2.copy()\n result[:len(array1)] += array1\n else:\n result = array1.copy()\n result[:len(array2)] += array2\n\n return result", "def _swap(self, node1, node2):\n arr = self._array\n arr[node1._index], arr[node2._index] = arr[node2._index], \\\n arr[node1._index]\n # Swap indices stored in nodes as well\n node1._index, node2._index = node2._index, node1._index", "def swap_inarray(array, idx1, idx2):\n if idx1 != idx2:\n temp = array[idx1]\n array[idx1] = array[idx2]\n array[idx2] = temp\n return array" ]
[ "0.73633987", "0.67356104", "0.6724997", "0.6570111", "0.65166664", "0.6294176", "0.6041236", "0.60094804", "0.5956681", "0.5945453", "0.5928697", "0.5916138", "0.59127474", "0.5909704", "0.5871539", "0.5759969", "0.5749519", "0.57420987", "0.57081336", "0.56879663", "0.56604695", "0.5576985", "0.5574852", "0.5548397", "0.55010426", "0.54945767", "0.5448372", "0.54444104", "0.5420778", "0.5403465" ]
0.7642252
0
Turn all slots of currClusters to zero.
def make_all_zero(curr_clusters, k, num_of_cords): for i in range(k): for j in range(num_of_cords): curr_clusters[i][j] = 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _empty_clusters(clusters):\n for clst in clusters:\n clst.points = []", "def reset(self):\n self._clusters = {}\n self._clusters_val = {}\n self._centroids = {}\n self.store()", "def zero_cluster():\n if _TRAFFICCTL:\n cmd = _traffic_ctl(\"metric\", \"clear\", \"--cluster\")\n else:\n cmd = _traffic_line(\"-Z\")\n\n return _subprocess(cmd)", "def reset(self):\n for i in range(0, len(self.current_state)):\n self.current_state[i] = 0\n\n for i in range(0, len(self.weights)):\n self.weights[i] = 0", "def clear(self):\n self.__indexclusters[:] = []\n self.__sample_size = 0\n self.__samples[:] = []\n self.__simifunc = None", "def reset(self) -> None:\n self.current = 0\n self.num_cuts = 0", "def clear(self):\n for i in range(0, len(self.weights)):\n self.weights[i] = 0", "def zeroLickCount (self, chanList):\n global gLickArray\n for chan in chanList:\n gLickArray [chan] = 0", "def reset(self):\n self.liidx = 0\n self.clidx = 0", "def reset(self) -> None:\n self._dist['current'] = np.copy(self._dist['initial'])", "def resetCounters(self):\n self.chain.zero_counters()\n counters = self.session.query(Counter).all()\n self.session.query(Counter).delete()", "def zero_scanners(self):\n self.logger.info('Zero all Scanners.')\n self.anc350_instrument.zero_scanners()", "def setNull(self):\n self.components = [0 for i in range(len(self.components))]", "def setZeroes(self, matrix: List[List[int]]) -> None:\r\n import copy\r\n m=len(matrix)\r\n n=len(matrix[0])\r\n m_copy=copy.deepcopy(matrix)\r\n for i in range(m):\r\n for j in range(n):\r\n if m_copy[i][j]==0:\r\n matrix[i]=[0]*n\r\n for x in range(m):\r\n matrix[x][j]=0", "def reset(self) -> None:\n self.true_positives = 0\n self.all_positives = 0", "def zero_negative_weights(self):\n for k in range(len(self)):\n self[k] *= 0 if self[k] < 0 else 1\n self.finalized = True\n return self", "def set_zero_stock(self, warehouse_list):\n for wh in warehouse_list:\n self.starting_stock_by_wh[wh] = 0\n self.stock_by_wh[wh] = 0\n self.stock_in_transit_by_wh[wh] = 0", "def reset(self):\n self.num_inst = 0\n self.sum_metric = 0.0", "def reset(self) -> List[int]:", "def moveToZero(self):\n\t\tself.grp.a.t.v = [0,0,0]\n\t\tself.grp.a.r.v = [0,0,0]", "def clear(self):\n self._latencies = [0] * len(BUCKETS)", "def reset(self):\n self.z[:] = 0", "def clear(self):\n self.xi[:] = 0\n self.meanlogr[:] = 0\n self.weight[:] = 0\n self.npairs[:] = 0", "def clear(self):\n row, col = self.selected\n if self.cubes[row][col].value == 0:\n self.cubes[row][col].set_temp(0)", "def reset(self):\n self.nodes = []\n self.start = self.start\n self.end = self.end\n\n for row in self.charMap:\n for c in row:\n if c == \"2\":\n c.c = \"0\"\n self.n_checked = 0", "def reset(self) -> None:\n self.true_positives = 0\n self.actual_positives = 0", "def clean(self):\n\t\tfor v in self:\n\t\t\tv.reset_distance()\n\t\t\tv.reset_predecessor()\n\t\t\tv.reset_visited()", "def reset_counters_in_list(origin_champs_counters_to_buy_):\n logging.debug(\"Function reset_counters_in_list() called\")\n\n for champ_counter in origin_champs_counters_to_buy_:\n champ_counter.set(0)\n\n delete_all_buttons()\n\n logging.debug(\"Function reset_counters_in_list() end\")", "def reset_weight_zero(self):\n self.node_embedding = np.random.uniform(low=-0.5, high=0.5, size=(self.vocab_size, self.layer1_size)).astype(\n np.float32)\n self.context_embedding = np.zeros((self.vocab_size, self.layer1_size), dtype=np.float32)\n\n self.centroid = np.zeros((self.k, self.layer1_size), dtype=np.float32)\n self.covariance_mat = np.zeros((self.k, self.layer1_size, self.layer1_size), dtype=np.float32)\n self.inv_covariance_mat = np.zeros((self.k, self.layer1_size, self.layer1_size), dtype=np.float32)\n self.pi = np.zeros((self.vocab_size, self.k), dtype=np.float32)\n log.info(\"reset communities data| k: {}\".format(self.k))", "def reset(self) -> List[int]:\n pass" ]
[ "0.6796202", "0.64024293", "0.63158333", "0.62675047", "0.6168588", "0.6164785", "0.6119265", "0.60707", "0.6068551", "0.6046783", "0.595144", "0.5930416", "0.59281564", "0.5927291", "0.59178776", "0.5910214", "0.5910083", "0.5907188", "0.5899487", "0.58834374", "0.585963", "0.5855877", "0.58461255", "0.58246404", "0.5810179", "0.58021665", "0.5787255", "0.5784496", "0.5772636", "0.5763445" ]
0.74820185
0
Check if clusters1 equals to clusters2, return True if so, or False if not.
def is_converged(clusters1, clusters2, k, num_of_cords): for i in range(k): for j in range(num_of_cords): if clusters1[i][j] != clusters2[i][j]: return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def identical_cds(sc1,sc2):\n # Input 2 identical segment chains, return True if cds the same\n if sc1.covers(sc2) and sc2.covers(sc1):\n return True\n else:\n return False", "def are_clusters_similar(cls, c1, c2, proportion=0.8):\n if len(c1.indices) > len(c2.indices):\n c1_idx = set(c1.indices.tolist())\n c2_idx = set(c2.indices.tolist())\n smaller = c2.name\n else:\n c1_idx = set(c2.indices.tolist())\n c2_idx = set(c1.indices.tolist())\n smaller = c1.name\n\n if (len(c1_idx & c2_idx) > proportion * len(c2_idx) and\n len(c2_idx) >= 0.8 * len(c1_idx)) or\\\n len(c1_idx & c2_idx) == len(c2_idx):\n return smaller", "def test_equals_distance_clusters():\n rust_result = rust_force.calculate_distance_between_two_clusters(\n rust_buildings[:n_first_cluster], rust_buildings[n_first_cluster:], rust_first_cluster_position,\n rust_second_cluster_position)\n python_result = calculate_distance_between_two_clusters(\n python_first_cluster, python_second_cluster, python_first_cluster_position, python_second_cluster_position)\n assert rust_result == python_result", "def conflateable(seg1, seg2, segment_pairs):\n for segment_pair in segment_pairs:\n seg_set = set(segment_pair)\n if seg1 in seg_set and seg2 in seg_set:\n return True\n return False", "def equivalent(kls, first, second):\n if first.empty() and second.empty():\n return True\n elif first.vertices.shape[0] != second.vertices.shape[0]:\n return False\n elif first.edges.shape[0] != second.edges.shape[0]:\n return False\n\n EPSILON = 1e-7\n\n vertex1, ct1 = np.unique(first.vertices, axis=0, return_counts=True)\n vertex2, ct2 = np.unique(second.vertices, axis=0, return_counts=True)\n \n vertex_match = np.all(np.abs(vertex1 - vertex2) < EPSILON)\n ct_match = np.all(ct1 == ct2)\n if not (vertex_match and ct_match):\n return False\n\n g1 = nx.Graph()\n g1.add_edges_from(first.edges)\n g2 = nx.Graph()\n g2.add_edges_from(second.edges)\n edges_match = nx.is_isomorphic(g1, g2)\n del g1 \n del g2\n\n if not edges_match:\n return False\n\n second_verts = {}\n for i, vert in enumerate(second.vertices):\n second_verts[tuple(vert)] = i\n \n attrs = [ attr['id'] for attr in first.extra_attributes ]\n for attr in attrs:\n buf1 = getattr(first, attr)\n buf2 = getattr(second, attr)\n if len(buf1) != len(buf2):\n return False\n\n for i in range(len(buf1)):\n i2 = second_verts[tuple(first.vertices[i])]\n if buf1[i] != buf2[i2]:\n return False\n\n return True", "def same(series1, series2):\n # pylint: disable=protected-access\n return bool(\n series1._data == series2._data and\n Collection.same(series1._collection, series2._collection)\n )", "def equal(series1, series2):\n # pylint: disable=protected-access\n return bool(\n series1._data is series2._data and\n series1._collection is series2._collection\n )", "def check_two_sets_of_krauss_are_same(krauss1, krauss2, numb_qubits, dim_in, dim_out, numb=1000):\n is_same = True\n chann1 = DenseKraus(krauss1, numb_qubits, dim_in, dim_out)\n chann2 = DenseKraus(krauss2, numb_qubits, dim_in, dim_out)\n for _ in range(0, numb):\n # Get random Rho\n rho = np.array(rand_dm_ginibre(2).data.todense())\n rho1 = chann1.channel(rho)\n rho2 = chann2.channel(rho)\n # Compare them\n if np.any(np.abs(rho1 - rho2) > 1e-3):\n is_same = False\n break\n return is_same", "def __eq__(self, other):\n if not isinstance(other, HyperflexClusterAllOf):\n return False\n\n return self.to_dict() == other.to_dict()", "def __eq__(self, clst):\n return self.centroid == clst.centroid", "def test_equals_distance_clusters_rust():\n rust_result = rust_force.calculate_distance_between_two_clusters(\n rust_buildings[:n_first_cluster], rust_buildings[n_first_cluster:], rust_first_cluster_position,\n rust_second_cluster_position)\n rust_result_parallel = rust_force.calculate_distance_between_two_clusters_parallel(\n rust_buildings[:n_first_cluster], rust_buildings[n_first_cluster:], rust_first_cluster_position,\n rust_second_cluster_position)\n assert rust_result == rust_result_parallel", "def check_components(self, data, _cluster, _linked_clusters):\n\n do_not_merge = []\n clustercoords = data[0:2,_cluster.cluster_members]\n _linked_clusters = [_link.antecessor for _link in _linked_clusters]\n\n if _cluster.number_of_members > 50:\n # This is faster for large numbers of cluster_members but slower when\n # number_of_members is small. A value of 50 is arbitrary but selected\n # empirically.\n for _link in _linked_clusters:\n linkcoords = data[0:2,_link.cluster_members]\n concatcoords = np.concatenate([linkcoords.T, clustercoords.T])\n concatcoords = concatcoords.T\n vals, idx, count = np.unique(concatcoords, return_index=True, return_counts=True, axis = 1)\n idx_vals_repeated = np.where(count > 1)[0]\n if np.size(idx_vals_repeated) > 0:\n do_not_merge.append(True)\n else:\n do_not_merge.append(False)\n\n else:\n for _link in _linked_clusters:\n boolval = []\n for j in range(_cluster.number_of_members):\n # Check all cluster components against those belonging to another cluster\n multiple_components = (data[0,_cluster.cluster_members[j]] == data[0,_link.cluster_members]) & \\\n (data[1,_cluster.cluster_members[j]] == data[1,_link.cluster_members])\n if np.any(multiple_components):\n boolval.append(True)\n else:\n boolval.append(False)\n if np.any(boolval):\n do_not_merge.append(True)\n else:\n do_not_merge.append(False)\n boolval = None\n\n return do_not_merge", "def are_equal(self, sp1, sp2):\n return sp1 == sp2", "def are_equal(self, sp1, sp2):\n return True", "def isIsosceles(self):\n\t\treturn self.a == self.b or self.a == self.c or self.b == self.c", "def is_same_set(self, item1, item2):\n res = False\n for s in self._data:\n if item1 in s and item2 in s:\n res = True\n break\n return res", "def _are_equal(grid: List[List[str]], other: List[List[str]]) -> bool:\n for row in range(len(grid)):\n for col in range(len(grid[row])):\n if grid[row][col] != other[row][col]:\n return False\n return True", "def meets(self, s2):\n return set(self.keys()).intersection(list(s2.keys())) != set()", "def intersect(a1, b1, a2, b2):\n return ccw(a1, b1, a2) != ccw(a1, b1, b2) and ccw(a2, b2, a1) != ccw(a2, b2, b1)", "def test_equals_normalized_distance_clusters():\n rust_result = rust_force.calculate_normalized_distance_between_two_clusters(\n rust_buildings[:n_first_cluster], rust_buildings[n_first_cluster:], rust_first_cluster_position,\n rust_second_cluster_position)\n python_result = calculate_normalized_distance_between_two_clusters(\n python_first_cluster, python_second_cluster, python_first_cluster_position, python_second_cluster_position,\n building_offset_rules)\n assert rust_result == python_result", "def grid_equal(grid1, grid2):\r\n for i in range(len(grid1)):\r\n for j in range(len(grid1[i])):\r\n if grid1[i][j] != grid2[i][j]:\r\n return False\r\n return True", "def compute_cluster_similarities(emb_clusters1, emb_clusters2, compare, order, clmethod, plot):\n def compute_sim(e, e1, cls, cls1):\n sims = np.empty((20, 20))\n xticks, yticks = [], []\n for i, c in enumerate(cls):\n yticks.append(', '.join(c[1]) + (f' {round(c[3], 5)}' if order == 'avgfreq' else ''))\n for j, c1 in enumerate(cls1):\n if len(xticks) < 20:\n xticks.append(', '.join(c1[1]) + (f' {round(c1[3], 5)}' if order == 'avgfreq' else ''))\n sims[i, j] = jaccard_similarity_score(c[2], c1[2])\n jaccard_similarities[f'{e}-{e1}'] = sims\n\n if plot:\n if order == 'clustermap':\n similarity_clustermap(sims, xticks, yticks, f'{e}-{e1}_{clmethod}')\n elif order == 'default' or order == 'avgfreq':\n similarity_heatmap(sims, xticks, yticks, f'{e}-{e1}_{clmethod}', order)\n else:\n pass\n\n jaccard_similarities = {}\n if compare == 'cross':\n for ie, (e, cls) in enumerate(emb_clusters1.items()):\n for ie1, (e1, cls1) in enumerate(emb_clusters2.items()):\n if ie < ie1:\n compute_sim(e, e1, cls, cls1)\n elif compare == 'dot':\n for (e, cls), (e1, cls1) in zip(emb_clusters1.items(), emb_clusters2.items()):\n compute_sim(e, e1, cls, cls1)\n\n return jaccard_similarities", "def jacaard(clusters_a, clusters_b):\n return jaccard_similarity_score(clusters_a, clusters_b)", "def is_equal(self, a, b):\n return a == b", "def is_equal(self, state1, state2):\n return self._replace_unks(state1) == self._replace_unks(state2)", "def grid_equal (grid1, grid2):\r\n if grid1 == grid2:\r\n return True\r\n else:\r\n return False", "def __eq__(self, other):\n if not isinstance(other, KubernetesClusterConfiguration):\n return False\n\n return self.__dict__ == other.__dict__", "def are_equal(self, sp1, sp2):\n set1 = set(sp1.elements)\n set2 = set(sp2.elements)\n return set1.issubset(set2) or set2.issubset(set1)", "def grid_equal (grid1, grid2):\r\n if grid1 == grid2:\r\n return True\r\n return False", "def is_equal(self, a, b):\n return a.X[0] == b.X[0]" ]
[ "0.6819774", "0.67053854", "0.6577482", "0.6567598", "0.6566153", "0.6465101", "0.6442963", "0.6380351", "0.6379164", "0.6357021", "0.63215804", "0.6296718", "0.62854797", "0.6203128", "0.62005913", "0.61896455", "0.6144872", "0.61378944", "0.6137696", "0.61299753", "0.61097974", "0.6106333", "0.6104321", "0.60922736", "0.60874516", "0.608256", "0.6068244", "0.6061447", "0.6057758", "0.604725" ]
0.8044068
0
Retrieve an exchange from database by id
def get_exchange(self, id): return self.exch_repo.get(id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getbyid(self, id):\n\n return esd.retrieve(id)", "def get(self, _id):", "def read_item(id: str, request: Request):\n obj = db.get(id, kind=endpoint_model)\n return obj", "def get(cls, id):\n\n return cls.query.get(id)", "def get(cls, id):\n\n return cls.query.get(id)", "def get(self, id):\n\n\t\treturn MessageStore.get(id)", "def get_by_id(cls, id):\n return cls.query().get(id)", "def get_by_id(self, id):\n return Entry.all().filter('entry_id = ', id).get()", "def get(self, id):\n return self.__model__.query.get(id)", "def get_object(id):", "def get_by_id(self, id: int):\n\n\t\traise NotImplemented", "def get_by_id(cls, id):\n return db.session.query(cls).get(id)", "def getItem(self, id):\n path = 'item/' + id\n return self.sendRestRequest('GET', path)", "def get_by_id(cls, id):\n e = api.get([key.Key(cls.__name__, id)])\n if e:\n return cls.from_entity(e[0])\n raise ObjectDoesNotExist", "def get(self, id):\n resultado = EmployeeModel.query.filter_by(employee_id=id).first()\n if resultado:\n return resultado\n api.abort(404)", "def get(self, id):\n return Entry.query.filter(Entry.id == id).one()", "def get_object(self, id_):\n return self._objects.get(id_, None)", "def _get(self, table, _id):\n data = {\"Key\": _id}\n return self._response_handler(table, \"get_item\", data)", "def read(id):\n db = core.connect()\n return db[id]", "def lookup(cls, id: int):\n record = query_db(\n \"select id, amount, description, user_id from expenses where id = ?\",\n [id],\n one=True,\n )\n if record is None:\n raise NotFound()\n return cls(**record)", "def get_book_by_id(self, id):\n\n try:\n cur = self._db.cursor()\n results = cur.execute('SELECT rowid, * FROM books WHERE rowid = ?', (id, ))\n book_row = results.fetchone()\n return self._row_to_book(book_row)\n except sqlite3.Error as e:\n raise BookError(f'Error getting book ID {id}') from e", "def get_parcel(self, id):\n for p in self.db:\n if p['id'] == id:\n return p, 200\n else:\n return {\"Error\": \"No delivery exists with that id.\"}, 404", "def get_item_by_id(self, id):\n results = self.table_connector.query(\n KeyConditionExpression=Key(self.primary_key).eq(id)\n )\n return results[\"Items\"][0] if \"Items\" in results else []", "def get_entry_by_id(model, id):\n print(model, id)\n return db.session.query(model).filter_by(id=id).first()", "def get(id_: int):\n logger.debug('Retrieving employee by id %i.', id_)\n try:\n query = db.session.query(Employee)\n employee = query.filter(\n Employee.id == id_\n ).scalar()\n if not employee:\n raise Exception(f\"Can't get employee with id {id_}\", )\n except Exception as exception:\n logger.error('An error occurred while retrieving employee with id %i.'\n ' Exception: %s', id_, str(exception))\n db.session.rollback()\n raise\n db.session.commit()\n logger.info('Successfully retrieved employee by id %i.', id_)\n return employee", "def get(self, cls, id):\n pass", "def get_book_by_id(id):\n return Book.query.filter_by(id=id).first()", "def lookup(self, dict_id):\n\n return self.ep.get(\"{0}/{1}\".format(self.endpoint, dict_id))", "def get_employee(self, id):\n from Employee import Employee\n cursor = self.dbconnect.get_cursor()\n cursor.execute('SELECT * FROM employee WHERE employeeID=%s ', (id,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])", "def get_object(self, id, **args):\n return self.request(\"{0}/{1}\".format(self.version, id), args)" ]
[ "0.67121446", "0.657574", "0.6478656", "0.64241445", "0.64241445", "0.6390948", "0.6356077", "0.63458675", "0.6295548", "0.6249574", "0.6248896", "0.62442577", "0.61988556", "0.61408186", "0.6100281", "0.6074537", "0.6065128", "0.606222", "0.6061581", "0.6003485", "0.5983606", "0.5972038", "0.5901149", "0.58852935", "0.5877813", "0.58715874", "0.5870999", "0.5857728", "0.58457077", "0.58412343" ]
0.80811805
0
Create a new exchange in the database
def create_exchange(self, exchangename, public_key, private_key, user_id, uid = None, pw = None): if not exchangename or not public_key or not private_key: raise Exception("Exchangename, public key and private key must be given") else: return self.exch_repo.create(exchangename, public_key, private_key, user_id, uid, pw)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_exchange(self, exchange_name, exchange_type, internal=None):\n self._channel.exchange_declare(\n exchange=exchange_name,\n durable=True, # Survive reboot\n passive=False, # Perform a declare or just to see if it exists\n internal=internal, # Can only be published to by other exchanges\n exchange_type=exchange_type\n )\n\n self.logger.debug('Created exchange: [name={}, type={}]'.format(\n exchange_name, exchange_type))", "def test_add_exchange(self):\n exchange_name = \"Testing\"\n api_key = \"Testing\"\n secret = \"Testing\"\n new_exchange = self.app.add_exchange(exchange_name, api_key, secret)\n self.assertIs(new_exchange.exchange_name, \"Testing\")", "def create():\n upgrade()\n populate()", "def create(symbol, number_of_shares, purchase_price):\n stock = Stock(symbol, number_of_shares, purchase_price)\n database.session.add(stock)\n database.session.commit()", "def create(self, context=None):\n values = self.obj_get_changes()\n db_host = self.dbapi.host_create(context, values)\n self._from_db_object(context, self, db_host)", "def create(self, path):\n self._getMailbox(path, create=True)", "def test_add_exchange_empty_name(self):\n exchange_name = \"\"\n api_key = \"Testing\"\n secret = \"Testing\"\n new_exchange = self.app.add_exchange(exchange_name, api_key, secret)\n self.assertEqual(new_exchange[\"statuscode\"], 0x1001)", "def _create_exchanges(self):\n # create new picking for exchanged products\n if self.picking_id.picking_type_id.exchange_picking_type_id:\n picking_type_id = self.picking_id.picking_type_id.exchange_picking_type_id.id\n elif self.picking_id.picking_type_id.return_picking_type_id:\n picking_type_id = self.picking_id.picking_type_id.return_picking_type_id.id\n else:\n picking_type_id = self.picking_id.picking_type_id.id\n moves = self.picking_id.move_ids_without_package\n purchase_lines = moves.mapped('purchase_line_id')\n purchase_order = purchase_lines.mapped('order_id')\n new_picking = self.picking_id.copy({\n 'move_lines': [],\n 'sale_id': False,\n 'picking_type_id': picking_type_id,\n 'exchange_sale_id': self.picking_id.sale_id.id,\n 'exchange_purchase_id': purchase_order and purchase_order[0].id,\n 'state': 'draft',\n 'origin': _(\"Exchange of %s\") % self.picking_id.name,\n 'location_id': self.picking_id.location_dest_id.id,\n 'location_dest_id': self.location_id.id\n })\n new_picking.message_post_with_view(\n 'mail.message_origin_link', values={\n 'self': new_picking, 'origin': self.picking_id\n }, subtype_id=self.env.ref('mail.mt_note').id\n )\n exchanged_lines = 0\n invoices_values = []\n for exchange_line in self.exchange_line_ids:\n if not exchange_line.move_id:\n raise UserError(_(\n \"You have manually created product lines, \"\n \"please delete them to proceed.\"\n ))\n if not float_is_zero(\n exchange_line.quantity,\n precision_rounding=exchange_line.uom_id.rounding\n ):\n exchanged_lines += 1\n vals = self._prepare_move_default_values(exchange_line, new_picking)\n move = exchange_line.move_id.copy(vals)\n exchange_line.move_id.exchanged = True\n val = {'exchange_move_id': move.id}\n line = move.sale_line_id or move.purchase_line_id\n invoice_value = self._prepare_invoice_lines(exchange_line, line)\n if invoice_value:\n invoices_values.append(invoice_value)\n line.write(val)\n if not exchanged_lines:\n raise UserError(_(\"Please specify at least one non-zero quantity.\"))\n if invoices_values:\n self.action_create_invoices(invoices_values)\n new_picking.action_confirm()\n new_picking.action_assign()\n return new_picking.id, picking_type_id", "def create():\n\tcreate_db()", "def create():", "def create():", "def add_exchange(self, exchange_name, exchange_type, alt_exchange_name=None, passive=False, durable=False,\n arguments = None):\n amqp_session = self.__broker.getAmqpSession()\n if arguments == None:\n arguments = {}\n if alt_exchange_name:\n amqp_session.exchange_declare(exchange=exchange_name, type=exchange_type,\n alternate_exchange=alt_exchange_name, passive=passive, durable=durable,\n arguments=arguments)\n else:\n amqp_session.exchange_declare(exchange=exchange_name, type=exchange_type, passive=passive, durable=durable,\n arguments=arguments)", "def create_essid(self, essid):\n with SessionContext(self.SessionClass) as session:\n essid_obj = ESSID_DBObject(essid)\n session.add(essid_obj)\n session.commit()", "def create(self):\n db.session.add(self)\n db.session.commit()", "def create(self):\n pass", "def create(self):\n pass", "def create(self):\n pass", "def create(self):\n\n pass", "async def create(self, payload):\n\n async with self.db.manager.database.transaction():\n obj = await self._expand(await self.db.create(**payload))\n self.log.info(f\"New {self.db_model_name}: {obj}\")\n return obj", "def create(self):\n ...", "def setup_exchange(self):\n LOGGER.info('Setting the exchange with name :%s and type :%s',\n self._exchange, self._type)\n if self._channel is None:\n raise ChannelDoesntExist('The channel doesn''t exist')\n\n if len(self._exchange) < 3:\n raise ExchangeNameDoesntMatch('This exchange name does''nt match')\n # Check if the channel doesn't exist on rabbit\n\n list_rabbit_exchange = [] # Correct me\n if self._exchange in list_rabbit_exchange:\n raise ExchangeAlreadyExist('This exchange is already exist')\n\n # Check Me : self._channel.basic_qos(prefetch_count=1)\n self._channel.exchange_declare(exchange=self._exchange,\n type=self._type,\n durable=self._durable,\n auto_delete=self._auto_delete)", "def create(self, *args, **kwargs):\n pass", "def create_db(self):", "def create_ticket(event_id, net_id):\n connection = get_connection()\n cursor = connection.cursor()\n sql_string = \"INSERT INTO Ticket VALUES(\"+str(event_id)+\", '\"+net_id+\"')\"\n cursor.execute(sql_string)\n connection.commit()", "def obj_create(self, bundle, **kwargs):\n logger.info(\"Creating a new acknowledgement...\")\n #Create the object\n bundle.obj = Acknowledgement()\n #hydrate\n bundle = self.full_hydrate(bundle)\n \n #Set the customer\n try:\n logger.info(\"Setting customer...\")\n bundle.obj.customer = Customer.objects.get(pk=bundle.data[\"customer\"][\"id\"])\n bundle.obj.discount = bundle.obj.customer.discount\n except:\n logger.error(\"Customer with ID {0} could not be found.\".format(bundle.data['customer']['id']))\n raise\n \n #Set the employee\n try:\n logger.info(\"Setting employee...\")\n bundle.obj.employee = bundle.request.user\n except User.DoesNotExist:\n logger.error(\"User with ID {0} could not be found\".format(bundle.data['employee']['id']))\n raise\n except KeyError:\n logger.critical(\"Missing employee ID.\")\n raise\n \n #Set Status\n bundle.obj.status = \"ACKNOWLEDGED\"\n \n #Set the project or create a new one\n if \"project\" in bundle.data:\n try:\n project = Project.objects.get(pk=bundle.data['project']['id'])\n except KeyError, Project.DoesNotExist:\n try:\n project = Project()\n project.codename = bundle.data['project']['codename']\n project.save()\n except KeyError:\n project = None\n \n bundle.obj.project = project\n \n #Create items without saving them \n logger.info(\"Creating items...\")\n self.items = [Item.create(acknowledgement=bundle.obj,\n commit=False,\n **product) for product in bundle.data[\"items\"]]\n \n #Calculate the total price\n logger.info(\"Calculating balance of the order...\")\n bundle.obj.calculate_totals(self.items)\n bundle = self.save(bundle)\n \n #Save the items\n logger.info(\"Saving the items to the database...\")\n for item in self.items:\n item.acknowledgement = bundle.obj\n item.save()\n \n log_message = \"Ack {0} created on {1}. Schedule to be delivered on {1}\"\n log_message = log_message.format(bundle.obj.id,\n bundle.obj.time_created.strftime('%B %d, %Y'),\n bundle.obj.delivery_date.strftime('%B %d, %Y'))\n log = Log(message=log_message,\n delivery_date=bundle.obj.delivery_date,\n acknowledgement=bundle.obj)\n log.save()\n #Create and upload the pdfs to the \n #S3 system. The save the pdfs as\n #Attributes of the acknowledgement\n logger.info(\"Creating PDF documents...\")\n bundle.obj.create_and_upload_pdfs()\n \n \n #Add the url of the pdf to the outgoing data\n #only for when an acknowledgement is create\n try:\n ack = bundle.obj.acknowledgement_pdf\n production = bundle.obj.production_pdf\n bundle.data['pdf'] = {'acknowledgement': ack.generate_url(),\n 'production': production.generate_url()}\n except AttributeError: \n logger.warn('Missing acknowledgement or production pdf')\n \n #Conditionally email ack to Decoroom\n if \"decoroom\" in bundle.obj.customer.name.lower():\n try:\n logger.info(\"Emailing Decoroom Co., Ltd. the order details...\")\n bundle.obj.email_decoroom()\n except Exception as e:\n logger.error(\"Unable to mail decoroom.\")\n logger.error(e)\n \n \n \n logger.info(u\"Acknowledgement #{0} created for {1}\".format(bundle.obj.id, \n bundle.obj.customer.name)) \n return bundle", "def create():\n pass", "def create(self):", "def test_add_exchange_empty_api(self):\n exchange_name = \"Testing\"\n api_key = \"\"\n secret = \"Testing\"\n new_exchange = self.app.add_exchange(exchange_name, api_key, secret)\n self.assertIn(new_exchange[0], \"error\")", "def insert(self, name, email, message):\n params = {'name':name, 'email':email, 'date':date.today(), 'message':message}\n connection = sqlite3.connect(DB_FILE)\n cursor = connection.cursor()\n cursor.execute(\"insert into guestbook (name, email, signed_on, message) VALUES (:name, :email, :date, :message)\", params)\n\n connection.commit()\n cursor.close()\n return True", "def create(self):\n db.create_all()" ]
[ "0.6696044", "0.6593915", "0.6464137", "0.6299332", "0.6108338", "0.6079601", "0.605643", "0.60084194", "0.5968516", "0.5955321", "0.5955321", "0.5921751", "0.58424836", "0.579992", "0.57557315", "0.57557315", "0.57557315", "0.57540107", "0.569132", "0.56544197", "0.5631779", "0.56270474", "0.56160307", "0.5613753", "0.56046206", "0.56042445", "0.55850965", "0.5567644", "0.5542571", "0.5533915" ]
0.7036039
0
Delete an existing exchange from the database
def delete_exchange(self, exchange_id): if exchange_id: self.exch_repo.delete(exchange_id=exchange_id) else: raise Exception("No exchange_id found for deleting exchange.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_exchange(self):\n new_exchange = self.app.add_exchange(\"test\", \"test\", \"test\")\n ret = self.app.delete_exchange(new_exchange.id)\n self.assertIn(ret[0], \"success\")", "def test_delete_exchange_not_exists(self):\n ret = self.app.delete_exchange(20)\n self.assertIn(ret[0], \"error\")", "def safe_exchange_delete(self, exchange_name, channel=None):\n channel = channel or self.channel\n full_exchange_name = self.full_name(exchange_name)\n try:\n yield from channel.exchange_delete(full_exchange_name, no_wait=False, timeout=1.0)\n except asyncio.TimeoutError:\n logger.warning('Timeout on exchange %s deletion', full_exchange_name, exc_info=True)\n except Exception:\n logger.error('Unexpected error on exchange %s deletion', full_exchange_name, exc_info=True)", "def delete(self):\n\n\n try:\n db = getDatabase()\n connection = db.connect()\n\n connection.delete(self)\n except Exception as e:\n raise e\n finally:\n db.dispose()", "def test_persist_exchange_and_get_exchange_id(self):\n self.db_handler.persist_exchange(\"TEST\", True)\n test_result = self.db_handler.get_exchange_id(\"TEST\")\n result = self.session.query(Exchange).all()\n for item in result:\n if item.name == \"TEST\":\n result_id = item.id\n\n assert result_id == test_result\n\n self.session.query(Exchange).filter(Exchange.id.__eq__(result_id)).delete()", "def delete_exchange_log_in_db_log():\n log_message = (u\"Loeschen der Exchangelogs von vorgestern\")\n db.write_log_to_db(ac, log_message, \"e\")\n date_log_back = (datetime.datetime.now()\n + datetime.timedelta(days=- 2))\n c_date_log_back = date_log_back.strftime(\"%Y-%m-%d %H:%M\")\n\n sql_command = (\"DELETE FROM EXCHANGE_LOGS WHERE EX_LOG_TIME < '\"\n + c_date_log_back + \"'\")\n\n delete_ok = db.delete_logs_in_db_log(ac, sql_command, log_message)\n if delete_ok is None:\n db.write_log_to_db_a(ac, ac.app_errorslist[5],\n \"x\", \"write_also_to_console\")\n return", "def delete_entry_from_db(entry):\n db.session.delete(entry)\n db.session.commit()", "def delete():", "def delete(self):\r\n db.session.delete(self)\r\n db.session.commit()", "def delete_from_db(self):\n db.session.delete(self)\n db.session.commit()", "def delete_from_db(self):\n db.session.delete(self)\n db.session.commit()", "def delete(self):\n db.session.delete(self)\n db.session.commit()", "def delete(self):\n db.session.delete(self)\n db.session.commit()", "def delete(self):\n db.session.delete(self)\n db.session.commit()", "def delete(self):\n db.session.delete(self)\n db.session.commit()", "def delete(self):\n db.session.delete(self)\n db.session.commit()", "def delete(self):\n db.session.delete(self)\n db.session.commit()", "def delete(self):\n db.session.delete(self)\n db.session.commit()", "def delete(self):\n db.session.delete(self)\n db.session.commit()", "def delete(self):\n db.session.delete(self)\n db.session.commit()", "def delete_exchange(self, name: str, location: Location) -> tuple[bool, str]:\n if self.get_exchange(name=name, location=location) is None:\n return False, f'{location!s} exchange {name} is not registered'\n\n exchanges_list = self.connected_exchanges.get(location)\n if exchanges_list is None:\n return False, f'{location!s} exchange {name} is not registered'\n\n if len(exchanges_list) == 1: # if is last exchange of this location\n self.connected_exchanges.pop(location)\n else:\n self.connected_exchanges[location] = [x for x in exchanges_list if x.name != name]\n with self.database.user_write() as write_cursor: # Also remove it from the db\n self.database.remove_exchange(write_cursor=write_cursor, name=name, location=location) # noqa: E501\n self.database.delete_used_query_range_for_exchange(\n write_cursor=write_cursor,\n location=location,\n exchange_name=name,\n )\n return True, ''", "def delete(self):\n try:\n self.s.delete(self)\n self.s.commit()\n except SQLAlchemyError:\n self.s.rollback()\n raise", "def delete(self):\n db.session.delete(self)\n try:\n db.session.commit()\n return True\n except Exception as error:\n db.session.rollback()\n print(error.args)\n return False", "def delete(self):\r\n s = self.get_session()\r\n s.delete(self)\r\n s.commit()", "def delete_from_db(self):\n db.session.delete(self)\n db.session.commit()\n # try:\n # db.session.delete(self)\n # db.session.commit()\n # except exc.IntegrityError:\n # db.session.rollback()", "def delete_expired(cls):\n now = datetime.now(timezone.utc)\n\n sql = \"\"\"DELETE FROM qiita.{0} WHERE exp<%s\"\"\".format(cls._table)\n qdb.sql_connection.perform_as_transaction(sql, [now])", "def _delete_from_db(self, instance: DBModelInstance) -> None:\n self.db.session.delete(instance)\n self.db.session.commit()", "def delete(self):\n db.session.delete(self)\n self.__commit()", "def delete(self):\n pass", "def delete(self):\n pass" ]
[ "0.77815914", "0.6905101", "0.6586099", "0.62237895", "0.61273843", "0.6105402", "0.6093326", "0.60743785", "0.607348", "0.6057913", "0.6057913", "0.60360014", "0.60360014", "0.60360014", "0.60360014", "0.60360014", "0.60360014", "0.60360014", "0.60360014", "0.60360014", "0.60185355", "0.59994864", "0.5966042", "0.59556437", "0.5952565", "0.5949", "0.59328204", "0.58867794", "0.5879357", "0.5879357" ]
0.74982965
1
Checks if an input pair is a valid one for given exchange
def is_valid_pair(self, pair, exchange): pairs = self.ccxt.get_pairs(exchange) print(pairs) return pair in pairs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_pairs(pairs, historical_pairs):\n if pairs is None:\n return False\n for p in pairs:\n if p in historical_pairs:\n return False\n return True", "def is_exchange_information_valid(exchange_info: Dict[str, Any]) -> bool:\n return exchange_info.get(\"status\", None) == \"TRADING\"", "def test_get_all_currency_pairs_from_exchange_with_invalid_pair(self):\n self.session.query(ExchangeCurrencyPair).delete()\n self.db_handler._persist_exchange_currency_pair(\"invalid\", \"BTC\", \"ETH\", True) # pylint: protected-access\n test_result = self.db_handler.get_all_currency_pairs_from_exchange(\"TESTEXCHANGE\")\n result = []\n assert result == test_result\n\n self.session.query(ExchangeCurrencyPair).delete()\n self.db_handler.persist_exchange_currency_pairs(self.exchange_currency_pairs,\n is_exchange=True)", "def exchange_checker(self, exchanges):\n self.logger.debug(\"Checking exchanges: '%s'\", exchanges)\n exchanges = exchanges.split(\",\")\n reference = self.get_exchanges()\n for exchange in exchanges:\n if exchange in reference:\n pass\n else:\n raise InvalidExchangeError(\"Invalid exchange: '{}'\".format(exchange))", "def test_get_all_currency_pairs_from_exchange_with_no_invalid_pair(self):\n test_result = self.db_handler.get_all_currency_pairs_from_exchange(\"TESTEXCHANGE\")\n test_result = [(item.exchange_id,\n item.first_id,\n item.second_id) for item in test_result]\n result = self.session.query(ExchangeCurrencyPair).all()\n result = [(item.exchange_id,\n item.first_id,\n item.second_id) for item in result]\n assert result == test_result", "def is_pair_allowed(a, b):\n if a == complementary(b):\n return True\n if a == 'G' and b == 'U' or a == 'U' and b == 'G':\n return True\n return False", "def isValidPair(self,s1,s2):\n if (s1 == '(' and s2 == ')'):\n return True\n if (s1 == '[' and s2 == ']'):\n return True\n if (s1 == '{' and s2 == '}'):\n return True\n return False", "def check_pairs(self, all_pr, curr):\n flag = True\n for pair_ox in all_pr:\n if (curr[0] == pair_ox or curr[1] == pair_ox):\n flag = False\n return flag", "def is_pair(pair):\n return isa(pair, Pair) or is_list(pair)", "def valid_ric(ticker, ric):\n split_ric = ric.split('.')\n ticker_ = split_ric[0]\n exchange = split_ric[1]\n database = helper.create_db()\n exchange_list = database.retrieve_column_as_list(\"exchanges\",\n \"exchange_code\")\n return ticker == ticker_ and exchange in exchange_list", "def requires_pairing(cls) -> bool:\n return False", "def validate_exchange(cls, v: str):\n ret = validate_exchange(v)\n if ret is not None:\n raise ValueError(ret)\n return v", "def is_pair(a, b):\n\n\tif (not a.isalpha() or not b.isalpha()):\n\t\treturn False\n\n\ttest = ord(a) - ord(b)\n\tif (test == 32 or test == -32):\n\t\treturn True\n\t\n\treturn False", "def validate_exchange(cls, v: str, field: Field):\n ret = validate_exchange(v)\n if ret is not None:\n raise ValueError(ret)\n if field.name == \"maker_market_trading_pair\":\n cls.__fields__[\"maker_market\"].type_ = ClientConfigEnum( # rebuild the exchanges enum\n value=\"Exchanges\", # noqa: F821\n names={e: e for e in AllConnectorSettings.get_connector_settings().keys()},\n type=str,\n )\n if field.name == \"taker_market_trading_pair\":\n cls.__fields__[\"taker_market\"].type_ = ClientConfigEnum( # rebuild the exchanges enum\n value=\"Exchanges\", # noqa: F821\n names={e: e for e in AllConnectorSettings.get_connector_settings().keys()},\n type=str,\n )\n return v", "def check_exchange_amount(exc):\n if \"amount\" not in exc:\n raise InvalidExchange\n if np.isnan(exc[\"amount\"]) or np.isinf(exc[\"amount\"]):\n raise ValueError(\"Invalid amount in exchange {}\".format(exc))", "def is_pair(hand):\n\tis_a_pair = False\n\ti = 0\n\twhile i < 13:\n\t\tif hand[i] == 2:\n\t\t\tis_a_pair = True\n\t\ti += 1 \n\thigh_card = 0\n\tj = 0\n\twhile j < 13 and is_a_pair == True:\n\t\tif hand[j] == 2 and j >= high_card:\n\t\t\thigh_card = j\n\t\tj += 1\n\tif is_a_pair:\n\t\treturn True, high_card\n\telse:\n\t\treturn False", "def valid_trade(player, trade):\r\n\t\t\ttry:\r\n\t\t\t\tpl_id = trade['player_id']\r\n\t\t\t\tif pl_id is not None:\r\n\t\t\t\t\tpl = self.get_player(int(pl_id))\r\n\r\n\t\t\t\tport = False\r\n\t\t\t\tif 'port' in trade:\r\n\t\t\t\t\tport = trade['port']\r\n\r\n\t\t\t\tif port:\r\n\t\t\t\t\tif pl_id is not None:\r\n\t\t\t\t\t\t# Can't have both port and target player.\r\n\t\t\t\t\t\treturn False\r\n\r\n\t\t\t\t\tif player is not self.current_player:\r\n\t\t\t\t\t\t# Can't port trade on other turn\r\n\t\t\t\t\t\treturn False\r\n\r\n\t\t\t\t\tif len([val for val in trade['give'].values() if val > 0]) != 1:\r\n\t\t\t\t\t\t# Can't trade 2 different resources at once with port.\r\n\t\t\t\t\t\treturn False\r\n\r\n\t\t\t\t\ttrade_res, trade_count = [(key,val) for (key, val)\r\n\t\t\t\t\t in trade['give'].items()\r\n\t\t\t\t\t if val > 0][0]\r\n\r\n\t\t\t\t\t# You can only trade at the best rate you have.\r\n\t\t\t\t\tif trade_res in player.get_ports():\r\n\t\t\t\t\t\tif trade_count != 2:\r\n\t\t\t\t\t\t\treturn False\r\n\t\t\t\t\telif 'general' in player.get_ports():\r\n\t\t\t\t\t\tif trade_count != 3:\r\n\t\t\t\t\t\t\treturn False\r\n\t\t\t\t\telif trade_count != 4:\r\n\t\t\t\t\t\treturn False\r\n\r\n\t\t\t\tturn = int(trade['turn'])\r\n\t\t\t\tif turn != self.action_number or not self.can_trade:\r\n\t\t\t\t\t# Outdated trade\r\n\t\t\t\t\tlogging.debug(\"wrong action number\")\r\n\t\t\t\t\treturn False\r\n\r\n\t\t\t\t# Make sure all the resources are real ones.\r\n\t\t\t\tfor key in itertools.chain(trade['give'], trade['want']):\r\n\t\t\t\t\tif key not in resources:\r\n\t\t\t\t\t\tlogging.debug(\"invalid resource\")\r\n\t\t\t\t\t\treturn False\r\n\r\n\t\t\t\t# Check if player has enough resources.\r\n\t\t\t\tfor res in trade['give']:\r\n\t\t\t\t\tif player.cards[res] < trade['give'][res]:\r\n\t\t\t\t\t\tlogging.debug(\"not enough resources\")\r\n\t\t\t\t\t\treturn False\r\n\r\n\t\t\t\t\t# You can't have a resource in both give and want\r\n\t\t\t\t\tif res in trade['want'] and trade['give'][res] > 0 and trade['want'][res] > 0:\r\n\t\t\t\t\t\tlogging.debug(\"want and give\")\r\n\t\t\t\t\t\treturn False\r\n\r\n\t\t\t\t# Make sure both giving and wanting something.\r\n\t\t\t\tdef all_zero(dict):\r\n\t\t\t\t\treturn max(list(dict.values()) or [0]) == 0\r\n\r\n\t\t\t\tif all_zero(trade['give']) or all_zero(trade['want']):\r\n\t\t\t\t\tlogging.debug(\"want nothing\")\r\n\t\t\t\t\treturn False\r\n\r\n\t\t\t\t# Make sure people dont try and cheat by offering negative values...\r\n\t\t\t\tdef has_negative(dict):\r\n\t\t\t\t\treturn min(list(dict.values()) or [0]) < 0\r\n\r\n\t\t\t\tif has_negative(trade['give']) or has_negative(trade['want']):\r\n\t\t\t\t\tlogging.debug(\"neg values\")\r\n\t\t\t\t\treturn False\r\n\r\n\t\t\t\treturn True\r\n\t\t\texcept (KeyError, TypeError, IndexError, ValueError) as e:\r\n\t\t\t\tlogging.debug(\"error: {}\".format(e))\r\n\t\t\t\treturn False", "def is_valid_coordinate_pair(coordinates):\n longitude = coordinates[0]\n latitude = coordinates[1]\n\n if longitude == None or latitude == None:\n return False\n\n if latitude < -90.0 or latitude > 90.0:\n return False\n\n if longitude < -180.0 or longitude > 180.0:\n return False\n\n return True", "def _perform_exchange_pair_success_test(\n self, exchange_left, exchange_right, exchange_left_type, exchange_right_type\n ):\n match = exchange_right.get_match()\n self.assertEqual(exchange_left, match)\n\n self.assertTrue(exchange_right.matches(match))\n self.assertTrue(match.matches(exchange_right))\n\n if exchange_left.initiator_student:\n self.assertTrue(\n is_exchange_acceptable(exchange_right, exchange_left.initiator_student)\n )\n if exchange_right.initiator_student:\n self.assertTrue(\n is_exchange_acceptable(exchange_left, exchange_right.initiator_student)\n )\n\n # pre-check types\n self.assertEqual(exchange_left.get_type(), exchange_left_type)\n self.assertEqual(exchange_right.get_type(), exchange_right_type)\n\n globally_processed_exchanges_before = sum(\n 1 for e in Exchange.objects.all() if e.is_finalized()\n )\n\n process_exchange_request_matches(match, exchange_right)\n self._finalized_exchange_pair_checks(exchange_left, exchange_right)\n\n # post-check types\n self.assertEqual(exchange_left.get_type(), exchange_left_type)\n self.assertEqual(exchange_right.get_type(), exchange_right_type)\n\n globally_processed_exchanges_after = sum(\n 1 for e in Exchange.objects.all() if e.is_finalized()\n )\n self.assertEqual(\n globally_processed_exchanges_before + 2, globally_processed_exchanges_after\n )", "def _cross_check(self, pub_key):\n if self.curve_name != pub_key.curve.name:\n raise ValueError(\n \"The curve in private key {} and in algorithm {} don't \"\n \"match\".format(pub_key.curve.name, self.curve_name)\n )", "def validate_exchange(cls, v: str, field: Field):\n ret = validate_exchange(v)\n if ret is not None:\n raise ValueError(ret)\n return v", "def test_unsupported_pair(self):\n with pytest.raises(StateError, match=\"The pair of input\"):\n State(\"water\", T=Q_(100.0, \"degC\"), u=Q_(1e6, \"J/kg\"))", "def IsValidCoordinatePair(self, latitude, longitude):\n\n is_valid_latitude = False\n is_valid_longitude = False\n\n if (\n latitude >= CoordinateTransform.LATITUDE_MIN_VALUE and\n latitude <= CoordinateTransform.LATITUDE_MAX_VALUE\n ):\n is_valid_latitude = True\n else:\n self._logger.error(\n \"%s coordinate out of range %s, %s.\",\n latitude,\n CoordinateTransform.LATITUDE_MIN_VALUE,\n CoordinateTransform.LATITUDE_MAX_VALUE)\n\n if (\n longitude >= CoordinateTransform.LONGITUDE_MIN_VALUE and\n longitude <= CoordinateTransform.LONGITUDE_MAX_VALUE\n ):\n is_valid_longitude = True\n else:\n self._logger.error(\n \"%s coordinate out of range %s, %s.\",\n longitude,\n CoordinateTransform.LONGITUDE_MIN_VALUE,\n CoordinateTransform.LONGITUDE_MAX_VALUE)\n\n self._logger.debug(\n \"Latitude %s validity status : %s, Longitude %s validity status: %s.\",\n str(latitude), is_valid_latitude, str(longitude), is_valid_longitude\n )\n\n return is_valid_latitude and is_valid_longitude", "def input_validation(self, prompt):\r\n\r\n while True:\r\n try:\r\n x, y = map(int, input(prompt).split())\r\n except ValueError: # when there is less than or more than 2 input values\r\n print('Invalid input try again.')\r\n continue\r\n if (x != self.selected[0]) or (y != self.selected[1]): # different from first choice\r\n if (0 <= x <= 3) and (0 <= y <= 12): # Valid input\r\n if not ([x, y] in self.bin): # Check if this card is still there or not\r\n break\r\n else:\r\n print('This card has already been taken.')\r\n continue\r\n else: # invalid input\r\n print('Row and column should be from 0 to 3 and 1 to 12 respectively.')\r\n continue\r\n else:\r\n print('Choose a card different from your first choice')\r\n continue\r\n return x, y", "def shipvalidator(point1: tuple, point2: tuple, board: list):\n valid = True\n # Is horizontal\n if point1[0] == point2[0]:\n # No collisions\n for i in range(min(point1[1], point2[1]), max(point1[1], point2[1])):\n if board[point1[0]][i] != \" \":\n valid = False\n # Is vertical\n elif point1[1] == point2[1]:\n # Doesn't overlap\n for i in range(min(point1[0], point2[0]), max(point1[0], point2[0])):\n if board[i][point1[1]] != \" \":\n valid = False\n else:\n valid = False\n return valid", "def test_get_currency_pairs_with_second_currency_valid(self):\n # todo : Eingabeparameter in der Methode get_currency_pairs_with_second_currency in db_handler\n # müsste eigentlich eine Liste an Currencies entgegennehmen, wie in der Methode\n # get_currency_pairs_with_first_currency, und nicht nur einen einzelnen String.\n # Ich habe das noch nicht gefixt, da ich nicht genau weiß, ob dann eventuell Fehlermeldungen geworfen werden,\n # bei den vorhandenen Aufrufen der Methode. Diese Aufrufe müssten dann eventuell angepasst werden.\n test_result = self.db_handler.get_currency_pairs_with_second_currency(\"TESTEXCHANGE\", [\"BTC\"])\n test_result = [item.second_id for item in test_result]\n result = self.session.query(ExchangeCurrencyPair).filter(ExchangeCurrencyPair.second_id.__eq__(1)).all()\n result = [item.second_id for item in result]\n assert result == test_result", "def _validate(self, trade):\n LOGGER.debug(\"Validating trade %s\", trade.Oid())\n # Check if provision is applicable for given trade\n if not self._is_valid_trade(trade):\n message = \"Invalid trade\"\n raise ProvisionNotApplicable(message)\n\n # Check if provision is applicable for leg\n leg = self._first_leg(trade.Instrument())\n if not self._is_valid_leg(leg):\n message = \"Invalid leg\"\n raise ProvisionNotApplicable(message)\n\n # Check if forward curve matches\n if self.forward_yield_curve != self._mapped_forward_curve(leg):\n message = \"Forward curve doesn't match. Leg '{0}'. Curves: {1} != {2}\".format(leg.Oid(), \n self.forward_yield_curve.Name(), \n self._mapped_forward_curve(leg).Name())\n raise ProvisionNotApplicable(message)\n\n # Trade validated\n self.leg = leg\n\n # Fill-in calendars for banking days.\n self.pay_calendars = self._get_pay_calendars(leg)", "def has_pair(self):\n self.suit_hist()\n for val in self.ranks.values():\n if val == 2:\n self.rank_per_hand['0'] = \"pair\"\n return True\n return False", "def is_reversed(rate_tuple, from_currency, to_currency):\n if (rate_tuple[1] == from_currency) and (rate_tuple[0] == to_currency):\n return True\n else:\n return False", "def __contains__(self, pair):\n aVal, bVal = pair\n return aVal in self._forwardMap and \\\n bVal in self._forwardMap.__getitem__(aVal)" ]
[ "0.6861681", "0.6571933", "0.6249077", "0.6183069", "0.6036506", "0.5871636", "0.5846162", "0.5783997", "0.5700833", "0.56721383", "0.5660363", "0.5637686", "0.5573187", "0.5572298", "0.5558149", "0.5551392", "0.55479705", "0.55122805", "0.5481704", "0.5452177", "0.53962547", "0.5380818", "0.5379949", "0.5361806", "0.5346372", "0.53225744", "0.5320938", "0.53143054", "0.5306027", "0.5298587" ]
0.8063874
0
Fetches balance for a pair on an exchange through CCXT
def fetch_balance(self, exchange, pair): return self.ccxt.fetch_balance(exchange, pair)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def fetch_balance(self, params={}):\n await self.load_markets()\n request = {\n 'currency': 'all',\n }\n response = await self.privateGetUserMargin(self.extend(request, params))\n #\n # [\n # {\n # \"account\":1455728,\n # \"currency\":\"XBt\",\n # \"riskLimit\":1000000000000,\n # \"prevState\":\"\",\n # \"state\":\"\",\n # \"action\":\"\",\n # \"amount\":263542,\n # \"pendingCredit\":0,\n # \"pendingDebit\":0,\n # \"confirmedDebit\":0,\n # \"prevRealisedPnl\":0,\n # \"prevUnrealisedPnl\":0,\n # \"grossComm\":0,\n # \"grossOpenCost\":0,\n # \"grossOpenPremium\":0,\n # \"grossExecCost\":0,\n # \"grossMarkValue\":0,\n # \"riskValue\":0,\n # \"taxableMargin\":0,\n # \"initMargin\":0,\n # \"maintMargin\":0,\n # \"sessionMargin\":0,\n # \"targetExcessMargin\":0,\n # \"varMargin\":0,\n # \"realisedPnl\":0,\n # \"unrealisedPnl\":0,\n # \"indicativeTax\":0,\n # \"unrealisedProfit\":0,\n # \"syntheticMargin\":null,\n # \"walletBalance\":263542,\n # \"marginBalance\":263542,\n # \"marginBalancePcnt\":1,\n # \"marginLeverage\":0,\n # \"marginUsedPcnt\":0,\n # \"excessMargin\":263542,\n # \"excessMarginPcnt\":1,\n # \"availableMargin\":263542,\n # \"withdrawableMargin\":263542,\n # \"timestamp\":\"2020-08-03T12:01:01.246Z\",\n # \"grossLastValue\":0,\n # \"commission\":null\n # }\n # ]\n #\n return self.parse_balance(response)", "async def fetch_balance(self, params={}):\n # self api call does not return the 'used' amount - use the v1 version instead(which also returns zero balances)\n # there is a difference between self and the v1 api, namely trading wallet is called margin in v2\n await self.load_markets()\n accountsByType = self.safe_value(self.options, 'v2AccountsByType', {})\n requestedType = self.safe_string(params, 'type', 'exchange')\n accountType = self.safe_string(accountsByType, requestedType, requestedType)\n if accountType is None:\n keys = list(accountsByType.keys())\n raise ExchangeError(self.id + ' fetchBalance() type parameter must be one of ' + ', '.join(keys))\n isDerivative = requestedType == 'derivatives'\n query = self.omit(params, 'type')\n response = await self.privatePostAuthRWallets(query)\n result = {'info': response}\n for i in range(0, len(response)):\n balance = response[i]\n type = self.safe_string(balance, 0)\n currencyId = self.safe_string_lower(balance, 1, '')\n start = len(currencyId) - 2\n isDerivativeCode = currencyId[start:] == 'f0'\n # self will only filter the derivative codes if the requestedType is 'derivatives'\n derivativeCondition = (not isDerivative or isDerivativeCode)\n if (accountType == type) and derivativeCondition:\n code = self.safe_currency_code(currencyId)\n account = self.account()\n account['total'] = self.safe_string(balance, 2)\n account['free'] = self.safe_string(balance, 4)\n result[code] = account\n return self.safe_balance(result)", "def get_balance(self, currency):\n\n result = self.api_query('getInfo', {'coinName': currency, 'need_new':0})\n\n #{'success': True, 'message': '', 'result': {'Currency': 'NXS', 'Balance': 1.55257461, 'Available': 1.55257461, 'Pending': 0.0, 'CryptoAddress': None}}\n #{'success': 1, 'return': {'rights': {'info': 1, 'trade': 1, 'deposit': 1, 'withdraw': 0}, 'funds': {'btc': 0.00705219, 'lsk': 2}, 'funds_incl_orders': {'btc': 0.00705219, 'lsk': 2}, 'transaction_count': 0, 'open_orders': 0, 'server_time': 1499255221}}\n #{'success': 1, 'return': {'rights': {'info': 1, 'trade': 1, 'deposit': 1, 'withdraw': 0}, 'funds': {'btc': 0.00705219, 'lsk': 1}, 'funds_incl_orders': {'btc': 0.00705219, 'lsk': 2}, 'transaction_count': 0, 'open_orders': 0, 'server_time': 1499255362}}\n\n #{'success': False, 'message': 'INVALID_CURRENCY', 'result': None}\n #{'success': 1, 'return': {'rights': {'info': 1, 'trade': 1, 'deposit': 1, 'withdraw': 0}, 'funds': {'btc': 0.00705219, 'lsk': 1}, 'funds_incl_orders': {'btc': 0.00705219, 'lsk': 2}, 'transaction_count': 0, 'open_orders': 0, 'server_time': 1499255600}}\n try:\n result = {'success': True, 'message' :'', 'result':{'Currency': currency, 'Balance': result['return']['funds_incl_orders'][currency], 'Available': result['return']['funds'][currency], 'Pending': 0.0, 'CryptoAddress': None}}\n except:\n result = {'success': False, 'message' :'', 'result':{'Currency': currency, 'Balance': 0.0, 'Available': 0.0, 'Pending': 0.0, 'CryptoAddress': None}}\n return result", "def fetch_balance(self, params={}):\n self.load_markets()\n response = self.privateGetAccountBalanceV2(params)\n #\n # {\n # \"AVAILABLE_NIS\": 0.0,\n # \"NIS\": 0.0,\n # \"LOCKED_NIS\": 0.0,\n # \"AVAILABLE_BTC\": 0.0,\n # \"BTC\": 0.0,\n # \"LOCKED_BTC\": 0.0,\n # \"AVAILABLE_ETH\": 0.0,\n # \"ETH\": 0.0,\n # \"LOCKED_ETH\": 0.0,\n # \"AVAILABLE_BCHSV\": 0.0,\n # \"BCHSV\": 0.0,\n # \"LOCKED_BCHSV\": 0.0,\n # \"AVAILABLE_BCHABC\": 0.0,\n # \"BCHABC\": 0.0,\n # \"LOCKED_BCHABC\": 0.0,\n # \"AVAILABLE_LTC\": 0.0,\n # \"LTC\": 0.0,\n # \"LOCKED_LTC\": 0.0,\n # \"AVAILABLE_ETC\": 0.0,\n # \"ETC\": 0.0,\n # \"LOCKED_ETC\": 0.0,\n # \"AVAILABLE_BTG\": 0.0,\n # \"BTG\": 0.0,\n # \"LOCKED_BTG\": 0.0,\n # \"AVAILABLE_GRIN\": 0.0,\n # \"GRIN\": 0.0,\n # \"LOCKED_GRIN\": 0.0,\n # \"Fees\": {\n # \"BtcNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"EthNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"BchabcNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"LtcNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"EtcNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"BtgNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"LtcBtc\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"BchsvNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"GrinNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0}\n # }\n # }\n #\n return self.parse_balance(response)", "def balances():\n loop.run_until_complete(app.exchanges.fetch_balances())\n print(app.exchanges.balances_str)", "def get_price(self, pair='XBTZAR'):\n data = {'pair': pair}\n query_string = build_query_string(data)\n\n r = requests.get(build_api_call(self.base_url, None, 'ticker', query_string))\n if r.status_code == 200:\n return r.json()", "def getBalance(self, currency=''):\n\n if self.app.getExchange() == 'binance':\n if self.mode == 'live':\n model = BAuthAPI(self.app.getAPIKey(), self.app.getAPISecret())\n df = model.getAccount()\n if isinstance(df, pd.DataFrame):\n if currency == '':\n # retrieve all balances\n return df\n else:\n # retrieve balance of specified currency\n df_filtered = df[df['currency'] == currency]['available']\n if len(df_filtered) == 0:\n # return nil balance if no positive balance was found\n return 0.0\n else:\n # return balance of specified currency (if positive)\n if currency in ['EUR', 'GBP', 'USD']:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 2))\n else:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 4))\n else:\n return 0.0\n else:\n # return dummy balances\n if currency == '':\n # retrieve all balances\n return self.balance\n else:\n if self.app.getExchange() == 'binance':\n self.balance = self.balance.replace('QUOTE', currency)\n else: \n # replace QUOTE and BASE placeholders\n if currency in ['EUR','GBP','USD']:\n self.balance = self.balance.replace('QUOTE', currency)\n else:\n self.balance = self.balance.replace('BASE', currency)\n\n if self.balance.currency[self.balance.currency.isin([currency])].empty:\n self.balance.loc[len(self.balance)] = [currency, 0, 0, 0]\n\n # retrieve balance of specified currency\n df = self.balance\n df_filtered = df[df['currency'] == currency]['available']\n\n if len(df_filtered) == 0:\n # return nil balance if no positive balance was found\n return 0.0\n else:\n # return balance of specified currency (if positive)\n if currency in ['EUR', 'GBP', 'USD']:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 2))\n else:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 4))\n\n else:\n if self.mode == 'live':\n # if config is provided and live connect to Coinbase Pro account portfolio\n model = CBAuthAPI(self.app.getAPIKey(), self.app.getAPISecret(), self.app.getAPIPassphrase(), self.app.getAPIURL())\n if currency == '':\n # retrieve all balances\n return model.getAccounts()[['currency', 'balance', 'hold', 'available']]\n else:\n df = model.getAccounts()\n # retrieve balance of specified currency\n df_filtered = df[df['currency'] == currency]['available']\n if len(df_filtered) == 0:\n # return nil balance if no positive balance was found\n return 0.0\n else:\n # return balance of specified currency (if positive)\n if currency in ['EUR','GBP','USD']:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 2))\n else:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 4))\n \n else:\n # return dummy balances\n\n if currency == '':\n # retrieve all balances\n return self.balance\n else:\n # replace QUOTE and BASE placeholders\n if currency in ['EUR','GBP','USD']:\n self.balance = self.balance.replace('QUOTE', currency)\n elif currency in ['BCH','BTC','ETH','LTC','XLM']:\n self.balance = self.balance.replace('BASE', currency)\n\n if self.balance.currency[self.balance.currency.isin([currency])].empty == True:\n self.balance.loc[len(self.balance)] = [currency,0,0,0]\n\n # retrieve balance of specified currency\n df = self.balance\n df_filtered = df[df['currency'] == currency]['available']\n\n if len(df_filtered) == 0:\n # return nil balance if no positive balance was found\n return 0.0\n else:\n # return balance of specified currency (if positive)\n if currency in ['EUR','GBP','USD']:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 2))\n else:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 4))", "def _handle_icx_get_balance(self,\n context: 'IconScoreContext',\n params: dict) -> int:\n address = params['address']\n return self._icx_engine.get_balance(context, address)", "def get_wallet_balance():\n try:\n if CONF.exchange == 'bitmex':\n return EXCHANGE.fetch_balance()['info'][0]['walletBalance'] * CONF.satoshi_factor\n if CONF.exchange == 'kraken':\n asset = CONF.base if CONF.base != 'BTC' else 'XBt'\n return float(EXCHANGE.private_post_tradebalance({'asset': asset})['result']['tb'])\n if CONF.exchange == 'liquid':\n result = EXCHANGE.private_get_accounts_balance()\n if result is not None:\n for bal in result:\n if bal['currency'] == CONF.base:\n return float(bal['balance'])\n\n except (ccxt.ExchangeError, ccxt.NetworkError) as error:\n LOG.error(RETRY_MESSAGE, type(error).__name__, str(error.args))\n sleep_for(4, 6)\n get_wallet_balance()", "def get_balance(self, exchange_id, coin):\n self.check_arguments(exchange_id, coin)\n\n try:\n balance = self.get_balance_helper(exchange_id, coin)\n return balance\n except RetryError:\n raise ServerError(exchange_id)", "def get_margin_balance():\n try:\n if CONF.exchange == 'bitmex':\n bal = EXCHANGE.fetch_balance()[CONF.base]\n elif CONF.exchange == 'kraken':\n bal = EXCHANGE.private_post_tradebalance({'asset': CONF.base})['result']\n bal['free'] = float(bal['mf'])\n bal['total'] = float(bal['e'])\n bal['used'] = float(bal['m'])\n elif CONF.exchange == 'liquid':\n bal = get_crypto_balance()\n return bal\n\n except (ccxt.ExchangeError, ccxt.NetworkError) as error:\n LOG.error(RETRY_MESSAGE, type(error).__name__, str(error.args))\n sleep_for(4, 6)\n get_margin_balance()", "async def handle_get_trading_balance_response(self, response: RequesterResponse\n ) -> HitbtcTradingCurrencyBalances:", "def get_balance(self, ticker):\n return self.trading_client.account_balance(ticker, 'usd')", "def balance(self, card_number):\n database_cursor.execute(f\"SELECT balance FROM card WHERE number = {card_number};\")\n return database_cursor.fetchone()[0]", "def get_balance(self):\n r = requests.get(build_api_call(self.base_url, None, 'balance', ''), auth=HTTPBasicAuth(KEY, SECRET))\n if r.status_code == 200:\n return r.json()\n else:\n return 'error'", "def get_trades(self, pair='XBTZAR'):\n data = {'pair': pair}\n query_string = build_query_string(data)\n\n r = requests.get(build_api_call(self.base_url, None, 'trades', query_string))\n if r.status_code == 200:\n return r.json()", "async def balance(self, ctx):\n try:\n cash = await ctx.bot.pool.fetchrow(f'select cash from wallet where id={ctx.author.id}')\n\n if cash is None:\n await ctx.bot.pool.execute(f'insert into wallet values ({ctx.author.id}, 0);')\n return await ctx.send('You do not have a wallet yet.')\n\n if cash[0] is None:\n return await ctx.send('You do not have a wallet yet.')\n\n await ctx.send(f'You have {cash[0]} robux.')\n except Exception as e:\n await ctx.send(e)", "def get_balance(self, currency=None):\n if currency:\n return self.__call__('balance', 'getbalance',\n {'currencyname': currency})\n return self.__call__('balance', 'getbalances')", "def get_balance(card):\n data = {\n \"Card.Number\": card[0],\n \"Card.Pin\": card[1],\n }\n\n response = requests.post(BALANCE_URL, data=data, headers=HEADERS)\n if response.status_code == 200:\n match = BALANCE_RE.search(response.text)\n if match:\n return float(match.group(1))", "def get_balance(self, crypto, address, confirmations=1):\n raise NotImplementedError(\n \"This service does not support getting address balances. \"\n \"Or rather it has no defined 'get_balance' method.\"\n )", "def account_balance(self, currency_symbol):\n return self.get(f'balances/{currency_symbol}', auth=True)", "def get_balance(self, address):\n balance = 0\n for block in self.chain:\n for t in block['transactions']:\n if t['recipient'] == address:\n balance += t['amount']\n elif t['sender'] == address:\n balance -= t['amount']\n return balance", "def get_market_price(self, exchange, pair, type):\n return self.ccxt.get_market_price(exchange, pair, type)", "async def fetch_balance(self, params={}):\n await self.load_markets()\n response = await self.privateGetUserAssets(params)\n #\n # {\n # \"success\": \"1\",\n # \"data\": {\n # \"assets\": [\n # {\n # \"asset\": \"jpy\",\n # \"amount_precision\": \"4\",\n # \"onhand_amount\": \"0.0000\",\n # \"locked_amount\": \"0.0000\",\n # \"free_amount\": \"0.0000\",\n # \"stop_deposit\": False,\n # \"stop_withdrawal\": False,\n # \"withdrawal_fee\": {\n # \"threshold\": \"30000.0000\",\n # \"under\": \"550.0000\",\n # \"over\": \"770.0000\"\n # }\n # },\n # {\n # \"asset\": \"btc\",\n # \"amount_precision\": \"8\",\n # \"onhand_amount\": \"0.00000000\",\n # \"locked_amount\": \"0.00000000\",\n # \"free_amount\": \"0.00000000\",\n # \"stop_deposit\": False,\n # \"stop_withdrawal\": False,\n # \"withdrawal_fee\": \"0.00060000\"\n # },\n # ]\n # }\n # }\n #\n return self.parse_balance(response)", "def get_balance(self, address: str, erc20_address: str) -> int:\n return get_erc20_contract(self.w3, erc20_address).functions.balanceOf(address).call()", "async def fetch_currencies(self, params={}):\n response = await self.publicGetWalletAssets(params)\n #\n # {\n # \"XBt\": {\n # \"asset\": \"XBT\",\n # \"currency\": \"XBt\",\n # \"majorCurrency\": \"XBT\",\n # \"name\": \"Bitcoin\",\n # \"currencyType\": \"Crypto\",\n # \"scale\": \"8\",\n # # \"mediumPrecision\": \"8\",\n # # \"shorterPrecision\": \"4\",\n # # \"symbol\": \"₿\",\n # # \"weight\": \"1\",\n # # \"tickLog\": \"0\",\n # \"enabled\": True,\n # \"isMarginCurrency\": True,\n # \"minDepositAmount\": \"10000\",\n # \"minWithdrawalAmount\": \"1000\",\n # \"maxWithdrawalAmount\": \"100000000000000\",\n # \"networks\": [\n # {\n # \"asset\": \"btc\",\n # \"tokenAddress\": \"\",\n # \"depositEnabled\": True,\n # \"withdrawalEnabled\": True,\n # \"withdrawalFee\": \"20000\",\n # \"minFee\": \"20000\",\n # \"maxFee\": \"10000000\"\n # }\n # ]\n # },\n # }\n #\n result = {}\n for i in range(0, len(response)):\n currency = response[i]\n asset = self.safe_string(currency, 'asset')\n code = self.safe_currency_code(asset)\n id = self.safe_string(currency, 'currency')\n name = self.safe_string(currency, 'name')\n chains = self.safe_value(currency, 'networks', [])\n depositEnabled = False\n withdrawEnabled = False\n networks = {}\n scale = self.safe_string(currency, 'scale')\n precisionString = self.parse_precision(scale)\n precision = self.parse_number(precisionString)\n for j in range(0, len(chains)):\n chain = chains[j]\n networkId = self.safe_string(chain, 'asset')\n network = self.network_id_to_code(networkId)\n withdrawalFeeRaw = self.safe_string(chain, 'withdrawalFee')\n withdrawalFee = self.parse_number(Precise.string_mul(withdrawalFeeRaw, precisionString))\n isDepositEnabled = self.safe_value(chain, 'depositEnabled', False)\n isWithdrawEnabled = self.safe_value(chain, 'withdrawalEnabled', False)\n active = (isDepositEnabled and isWithdrawEnabled)\n if isDepositEnabled:\n depositEnabled = True\n if isWithdrawEnabled:\n withdrawEnabled = True\n networks[network] = {\n 'info': chain,\n 'id': networkId,\n 'network': network,\n 'active': active,\n 'deposit': isDepositEnabled,\n 'withdraw': isWithdrawEnabled,\n 'fee': withdrawalFee,\n 'precision': None,\n 'limits': {\n 'withdraw': {\n 'min': None,\n 'max': None,\n },\n 'deposit': {\n 'min': None,\n 'max': None,\n },\n },\n }\n currencyEnabled = self.safe_value(currency, 'enabled')\n currencyActive = currencyEnabled or (depositEnabled or withdrawEnabled)\n minWithdrawalString = self.safe_string(currency, 'minWithdrawalAmount')\n minWithdrawal = self.parse_number(Precise.string_mul(minWithdrawalString, precisionString))\n maxWithdrawalString = self.safe_string(currency, 'maxWithdrawalAmount')\n maxWithdrawal = self.parse_number(Precise.string_mul(maxWithdrawalString, precisionString))\n minDepositString = self.safe_string(currency, 'minDepositAmount')\n minDeposit = self.parse_number(Precise.string_mul(minDepositString, precisionString))\n result[code] = {\n 'id': id,\n 'code': code,\n 'info': currency,\n 'name': name,\n 'active': currencyActive,\n 'deposit': depositEnabled,\n 'withdraw': withdrawEnabled,\n 'fee': None,\n 'precision': precision,\n 'limits': {\n 'amount': {\n 'min': None,\n 'max': None,\n },\n 'withdraw': {\n 'min': minWithdrawal,\n 'max': maxWithdrawal,\n },\n 'deposit': {\n 'min': minDeposit,\n 'max': None,\n },\n },\n 'networks': networks,\n }\n return result", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://bittrex.com/api/v1.1/public/getticker?market=\"+pair\n jsonResponse = self.getJson(uri)\n currentPrice = jsonResponse[\"result\"][\"Last\"]\n return currentPrice", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://www.bitfinex.com/v2/ticker/t\"\n requestUrl = uri + pair\n jsonResponse = self.getJson(requestUrl)\n currentPrice = jsonResponse[0]\n return currentPrice", "def query_balances(self) -> ExchangeQueryBalances:\n self.first_connection()\n\n response = self._api_query('wallets')\n if response.status_code != HTTPStatus.OK:\n result, msg = self._process_unsuccessful_response(\n response=response,\n case='balances',\n )\n return result, msg\n try:\n response_list = jsonloads_list(response.text)\n except JSONDecodeError as e:\n msg = f'{self.name} returned invalid JSON response: {response.text}.'\n log.error(msg)\n raise RemoteError(msg) from e\n\n # Wallet items indices\n currency_index = 1\n balance_index = 2\n assets_balance: defaultdict[AssetWithOracles, Balance] = defaultdict(Balance)\n for wallet in response_list:\n if len(wallet) < API_WALLET_MIN_RESULT_LENGTH:\n log.error(\n f'Error processing a {self.name} balance result. '\n f'Found less items than expected',\n wallet=wallet,\n )\n self.msg_aggregator.add_error(\n f'Failed to deserialize a {self.name} balance result. '\n f'Check logs for details. Ignoring it.',\n )\n continue\n\n if wallet[balance_index] <= 0:\n continue # bitfinex can show small negative balances for some coins. Ignore\n\n try:\n asset = asset_from_bitfinex(\n bitfinex_name=wallet[currency_index],\n currency_map=self.currency_map,\n )\n except (UnknownAsset, UnsupportedAsset) as e:\n asset_tag = 'unknown' if isinstance(e, UnknownAsset) else 'unsupported'\n self.msg_aggregator.add_warning(\n f'Found {asset_tag} {self.name} asset {e.identifier} due to: {e!s}. '\n f'Ignoring its balance query.',\n )\n continue\n\n try:\n usd_price = Inquirer().find_usd_price(asset=asset)\n except RemoteError as e:\n self.msg_aggregator.add_error(\n f'Error processing {self.name} {asset.name} balance result due to inability '\n f'to query USD price: {e!s}. Skipping balance result.',\n )\n continue\n\n try:\n amount = deserialize_asset_amount(wallet[balance_index])\n except DeserializationError as e:\n self.msg_aggregator.add_error(\n f'Error processing {self.name} {asset.name} balance result due to inability '\n f'to deserialize asset amount due to {e!s}. Skipping balance result.',\n )\n continue\n\n assets_balance[asset] += Balance(\n amount=amount,\n usd_value=amount * usd_price,\n )\n\n return dict(assets_balance), ''", "def balance(self, account_number: int): \n return self._accounts[account_number][1]" ]
[ "0.7030547", "0.68978024", "0.68687195", "0.67780674", "0.6721376", "0.64743924", "0.64189506", "0.6408955", "0.63560534", "0.6347745", "0.63321614", "0.6328665", "0.6324221", "0.6236682", "0.6235676", "0.6205206", "0.61926544", "0.61854434", "0.6158296", "0.6154335", "0.6130548", "0.6120197", "0.6096025", "0.6084404", "0.6081849", "0.6075689", "0.6070944", "0.6060824", "0.6048849", "0.60245943" ]
0.85180926
0
Retrieves the trading fee for a certain pair on a certain exchange
def get_exchange_trading_fee(self, exchange, pair, type): return self.ccxt.get_exchange_trading_fee(exchange, pair, type)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_fee(self, pair, order_type):\n fees = self.p_state._getvalue()['fees']\n if fees:\n\n return float(fees[self._handler[order_type]][pair]['fee'])\n\n else:\n\n return 0.0", "def get_price(self, pair='XBTZAR'):\n data = {'pair': pair}\n query_string = build_query_string(data)\n\n r = requests.get(build_api_call(self.base_url, None, 'ticker', query_string))\n if r.status_code == 200:\n return r.json()", "async def handle_get_fee_response(self, response: RequesterResponse\n ) -> HitbtcTradingFeeModel:", "def get_market_price(self, exchange, pair, type):\n return self.ccxt.get_market_price(exchange, pair, type)", "def fetch_trading_fees(self, params={}):\n self.load_markets()\n response = self.privateGetAccountBalance(params)\n #\n # {\n # \"AVAILABLE_NIS\": 0.0,\n # \"NIS\": 0.0,\n # \"LOCKED_NIS\": 0.0,\n # \"AVAILABLE_BTC\": 0.0,\n # \"BTC\": 0.0,\n # \"LOCKED_BTC\": 0.0,\n # ...\n # \"Fees\": {\n # \"BtcNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"EthNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # ...\n # }\n # }\n #\n fees = self.safe_value(response, 'Fees', {})\n keys = list(fees.keys())\n result = {}\n for i in range(0, len(keys)):\n marketId = keys[i]\n symbol = self.safe_symbol(marketId)\n fee = self.safe_value(fees, marketId)\n makerString = self.safe_string(fee, 'FeeMaker')\n takerString = self.safe_string(fee, 'FeeTaker')\n maker = self.parse_number(Precise.string_div(makerString, '100'))\n taker = self.parse_number(Precise.string_div(takerString, '100'))\n result[symbol] = {\n 'info': fee,\n 'symbol': symbol,\n 'taker': taker,\n 'maker': maker,\n 'percentage': True,\n 'tierBased': True,\n }\n return result", "def fetch_balance(self, exchange, pair):\n return self.ccxt.fetch_balance(exchange, pair)", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://www.bitfinex.com/v2/ticker/t\"\n requestUrl = uri + pair\n jsonResponse = self.getJson(requestUrl)\n currentPrice = jsonResponse[0]\n return currentPrice", "def get_trades(self, pair='XBTZAR'):\n data = {'pair': pair}\n query_string = build_query_string(data)\n\n r = requests.get(build_api_call(self.base_url, None, 'trades', query_string))\n if r.status_code == 200:\n return r.json()", "def get_price(horizon_host, pair):\n print \"fetching latest price for:\" + pair[\"name\"]\n params = make_trade_params(pair)\n res = requests.get(horizon_host + \"/trades\", params).json()\n try:\n trade_record = res[\"_embedded\"][\"records\"][0]\n except IndexError:\n return DatedPrice(date=datetime.utcfromtimestamp(0), price=0)\n price = float(trade_record[\"price\"][\"n\"]) / float(trade_record[\"price\"][\"d\"])\n timestamp = parser.parse(trade_record[\"ledger_close_time\"])\n return DatedPrice(date=timestamp, price=price)", "async def fetch_trading_fees(self, params={}):\n await self.load_markets()\n response = await self.marketsGetSpotPairs(params)\n #\n # {\n # success: '1',\n # data: {\n # pairs: [\n # {\n # name: 'btc_jpy',\n # base_asset: 'btc',\n # quote_asset: 'jpy',\n # maker_fee_rate_base: '0',\n # taker_fee_rate_base: '0',\n # maker_fee_rate_quote: '-0.0002',\n # taker_fee_rate_quote: '0.0012',\n # unit_amount: '0.0001',\n # limit_max_amount: '1000',\n # market_max_amount: '10',\n # market_allowance_rate: '0.2',\n # price_digits: '0',\n # amount_digits: '4',\n # is_enabled: True,\n # stop_order: False,\n # stop_order_and_cancel: False\n # },\n # ...\n # ]\n # }\n # }\n #\n data = self.safe_value(response, 'data', {})\n pairs = self.safe_value(data, 'pairs', [])\n result = {}\n for i in range(0, len(pairs)):\n pair = pairs[i]\n marketId = self.safe_string(pair, 'name')\n market = self.safe_market(marketId)\n symbol = market['symbol']\n result[symbol] = {\n 'info': pair,\n 'symbol': symbol,\n 'maker': self.safe_number(pair, 'maker_fee_rate_quote'),\n 'taker': self.safe_number(pair, 'taker_fee_rate_quote'),\n 'percentage': True,\n 'tierBased': False,\n }\n return result", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://bittrex.com/api/v1.1/public/getticker?market=\"+pair\n jsonResponse = self.getJson(uri)\n currentPrice = jsonResponse[\"result\"][\"Last\"]\n return currentPrice", "async def get_quote_price(self, trading_pair: str, is_buy: bool, amount: Decimal) -> Optional[Decimal]:\n\n try:\n\n base, quote = trading_pair.split(\"-\")\n side = \"buy\" if is_buy else \"sell\"\n resp = await self._api_request(\"post\", \"terra/price\", {\"base\": base, \"quote\": quote, \"trade_type\": side,\n \"amount\": str(amount)})\n txFee = resp[\"txFee\"] / float(amount)\n price_with_txfee = resp[\"price\"] + txFee if is_buy else resp[\"price\"] - txFee\n return Decimal(str(price_with_txfee))\n # if resp[\"price\"] is not None:\n # return Decimal(str(resp[\"price\"]))\n except asyncio.CancelledError:\n raise\n except Exception as e:\n self.logger().network(\n f\"Error getting quote price for {trading_pair} {side} order for {amount} amount.\",\n exc_info=True,\n app_warning_msg=str(e)\n )", "def get_ticker(self, pair):\r\n method = self.public_endpoints['ticker']['method']\r\n url = self.base_url + self.public_endpoints['ticker']['url'].format(pairId=pair)\r\n req = requests.request(method, url)\r\n res = req.json()\r\n\r\n if res['success'] == True:\r\n return res[\"result\"]\r\n else:\r\n return res", "def get_rolling_price(self, pair='XBTZAR'):\n\n data = {'pair': pair}\n query_string = build_query_string(data)\n\n while True:\n r = requests.get(build_api_call(self.base_url, None, 'ticker', query_string))\n if r.status_code == 200:\n print(r.json())", "def fee(self, prices, fee):\n return self.volume(prices) * fee.value / Config.FEE_TOKEN_PRICE", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://www.bitstamp.net/api/v2/ticker/\"\n requestUrl = uri + pair\n jsonResponse = self.getJson(requestUrl)\n currentPrice = jsonResponse[\"last\"]\n return currentPrice", "def getOrderBookPrice(exchange, symbol, side, quantity, order_book=None):\n # TODO test it\n # print(\"obap1\")\n order_book_side = order_book['asks'] \\\n if side == exchange.SIDE_SELL else order_book['bids']\n\n quantity = Decimal(quantity)\n i, orders, price = 0, [], Decimal(0)\n accounted_for_quantity = Decimal(0)\n qtdif = Decimal(1)\n # print(\"obap2\")\n while accounted_for_quantity < quantity or qtdif > Decimal(0.0001):\n try:\n order = order_book_side[i]\n except IndexError:\n raise Exception(\"There are not enough orders in the Order Book.\")\n # return False\n qty = min(Decimal(order[1]), quantity - accounted_for_quantity)\n price += Decimal(order[0]) * qty\n accounted_for_quantity += qty\n qtdif = abs(Decimal(1) - accounted_for_quantity / quantity)\n i += 1\n\n # print(\"obap3\")\n return price / quantity", "def get_fee_pct(self, contract_type: str) -> Tuple[float, float]:\n if contract_type == 'forex':\n return (0.00002, 0.00002)\n elif contract_type == 'crypto':\n if self.CRYPTO_EXCHANGE == 'binance':\n if self.trade_volume < 50_000:\n return (.001, .001)\n elif self.trade_volume < 100_000:\n return (.0009, .0009)\n elif self.trade_volume < 5000_000:\n return (.0009, .0008)\n elif self.trade_volume < 1_000_000:\n return (.0008, .0007)\n elif self.trade_volume < 5_000_000:\n return (.0007, .0005)\n elif self.trade_volume < 10_000_000:\n return (.0006, .0004)\n elif self.trade_volume < 25_000_000:\n return (.0006, 0)\n elif self.trade_volume < 100_000_000:\n return (.0005, 0)\n elif self.trade_volume < 250_000_000:\n return (.0004, 0)\n elif self.trade_volume < 500_000_000:\n return (.0003, 0)\n else: return (.0002, 0)\n elif self.CRYPTO_EXCHANGE == 'kraken':\n if self.trade_volume < 50_000:\n return (.0026, .0016)\n elif self.trade_volume < 100_000:\n return (.0024, .0014)\n elif self.trade_volume < 250_000:\n return (.0022, .0012)\n elif self.trade_volume < 500_000:\n return (.002, .001)\n elif self.trade_volume < 1_000_000:\n return (.0018, .0008)\n elif self.trade_volume < 2_500_000:\n return (.0016, .0006)\n elif self.trade_volume < 5_000_000:\n return (.0014, .0004)\n elif self.trade_volume < 10_000_000:\n return (.0012, .0002)\n else: return (.001, 0)\n elif self.CRYPTO_EXCHANGE == 'coinbase':\n if self.trade_volume < 10_000:\n return (.005, .005)\n elif self.trade_volume < 50_000:\n return (.0035, .0035)\n elif self.trade_volume < 100_000:\n return (.0025, .0015)\n elif self.trade_volume < 1_000_000:\n return (.002, .001)\n elif self.trade_volume < 10_000_000:\n return (.0018, .0008)\n elif self.trade_volume < 50_000_000:\n return (.0015, .0005)\n elif self.trade_volume < 300_000_000:\n return (.0007, 0)\n elif self.trade_volume < 500_000_000:\n return (.0005, 0)\n else: return (.0004, 0)\n elif self.CRYPTO_EXCHANGE == 'robinhood':\n return (0.0001, 0.0001)\n return (0, 0)", "def get_transfer_fee(value: float) -> float:\n return (value * (0.99 / 100)) + 4.9", "async def get_order_price(self, trading_pair: str, is_buy: bool, amount: Decimal) -> Decimal:\n return await self.get_quote_price(trading_pair, is_buy, amount)", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n jsonResponse = self.getJson(\"https://poloniex.com/public?command=returnTicker\")\n currentPrice = jsonResponse[pair][\"last\"]\n return currentPrice", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://api.kraken.com/0/public/Ticker\"\n requestUrl = uri + \"?pair=\" + pair\n jsonResponse = self.getJson(requestUrl)\n currentPrice = jsonResponse[\"result\"][pair][\"c\"]\n return currentPrice", "def do_fee(self,args):\n totalamount,fee,howmanyto,nexttier = bitstamp.fee_schedule()\n print \"Your 30 day volume is: %.5f. Your trade fee is: %.2f%%\" % (totalamount,fee)\n print \"You are $%s away from the next tier of: $%s\" % (howmanyto,nexttier)", "async def _get_open_trades_value(self, pair: str) -> float:\n\n total = 0.0\n\n if pair in self.trades:\n for trade in self.trades[pair]['open']:\n total += trade['open_value'] * trade['quantity']\n\n return total", "def get_fee(self):\n fee = round(self.order_payment.amount * Decimal(0.015), 2)\n return fee", "def get_trades(self, pair=\"btc_jpy\"):\n return self.execute_http_call(\"/api/trades?pair={}\".format(pair), \"GET\", headers=None)", "def get_ether_current_prices():\n req = requests.get('https://min-api.cryptocompare.com/data/price?fsym=ETH&tsyms=BTC,USD,EUR')\n data = req.json()\n\n print('{0}, {1}, {2}'.format(data['EUR'], data['USD'], data['BTC']))", "async def get_price(self) -> PairPrice:\n try:\n data = await self._network.get_response_content_from_get_request(\n url=self._BTC_FEED_URL, format=DataFormat.JSON\n )\n except NetworkError as e:\n msg = f\"Error getting BTC feed from {self._BTC_FEED_URL}\"\n log.exception(msg)\n raise BtcFeedError() from e\n\n try:\n price = 1 / float(data[\"price\"])\n except KeyError as e:\n msg = f\"Missing price field in BTC feed from {self._BTC_FEED_URL}\"\n log.exception(msg)\n raise BtcFeedError() from e\n except ValueError as e:\n msg = f\"Error value in price field in BTC feed from {self._BTC_FEED_URL}: {data['price']}\"\n log.exception(msg)\n raise BtcFeedError() from e\n\n return PairPrice(pair=(self._btc, self._market), price=price)", "def get_sum_after_fees(self, index, way, token):\n sum = self.get_sum(index, way, token)\n if not sum:\n return None\n if self.pair.get_exchange().get_fee_token():\n return sum\n else:\n return int(sum * (1-self.pair.get_exchange().get_fees()))", "def get_fees(self, ordertype, quantity, price):\n params = {\n 'ordertype': ordertype,\n 'quantity': quantity,\n 'price': price\n }\n ret = self.Request.fetch('calculatefees',params=params)\n print ret\n return 0" ]
[ "0.72088563", "0.6718482", "0.6621101", "0.64627856", "0.6384671", "0.630662", "0.6283862", "0.6246754", "0.62267923", "0.6090204", "0.59220403", "0.59198195", "0.58899343", "0.58853585", "0.58453304", "0.58343554", "0.5809528", "0.5797195", "0.5788672", "0.5771605", "0.57653445", "0.57477957", "0.5733175", "0.57282287", "0.5724958", "0.5705684", "0.5675831", "0.567092", "0.56663024", "0.56557554" ]
0.8056572
0
Retrieves the market price for a certain pair on a certain exchange for a certain type(maker or taker)
def get_market_price(self, exchange, pair, type): return self.ccxt.get_market_price(exchange, pair, type)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_price(self, pair='XBTZAR'):\n data = {'pair': pair}\n query_string = build_query_string(data)\n\n r = requests.get(build_api_call(self.base_url, None, 'ticker', query_string))\n if r.status_code == 200:\n return r.json()", "def get_price(horizon_host, pair):\n print \"fetching latest price for:\" + pair[\"name\"]\n params = make_trade_params(pair)\n res = requests.get(horizon_host + \"/trades\", params).json()\n try:\n trade_record = res[\"_embedded\"][\"records\"][0]\n except IndexError:\n return DatedPrice(date=datetime.utcfromtimestamp(0), price=0)\n price = float(trade_record[\"price\"][\"n\"]) / float(trade_record[\"price\"][\"d\"])\n timestamp = parser.parse(trade_record[\"ledger_close_time\"])\n return DatedPrice(date=timestamp, price=price)", "def get_exchange_rate(self, order_type, amount=None, price=None, pair=\"btc_jpy\"):\n if order_type not in [\"sell\", \"buy\"]:\n raise CoinCheckApiException(\"order_type should be \\\"sell\\\" or \\\"buy\\\"\")\n if amount is not None and price is not None:\n raise CoinCheckApiException(\"only one of \\\"amount\\\" and \\\"price\\\" can be provided \")\n\n query = \"order_type={}&pair={}\".format(order_type, pair)\n if amount is not None:\n query += \"&amount={}\".format(amount)\n else:\n query += \"&price={}\".format(price)\n\n return self.execute_http_call(\"/api/exchange/orders/rate?{}\".format(query), \"GET\", headers=None)", "def get_ticker(self, pair):\r\n method = self.public_endpoints['ticker']['method']\r\n url = self.base_url + self.public_endpoints['ticker']['url'].format(pairId=pair)\r\n req = requests.request(method, url)\r\n res = req.json()\r\n\r\n if res['success'] == True:\r\n return res[\"result\"]\r\n else:\r\n return res", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://api.kraken.com/0/public/Ticker\"\n requestUrl = uri + \"?pair=\" + pair\n jsonResponse = self.getJson(requestUrl)\n currentPrice = jsonResponse[\"result\"][pair][\"c\"]\n return currentPrice", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://www.bitfinex.com/v2/ticker/t\"\n requestUrl = uri + pair\n jsonResponse = self.getJson(requestUrl)\n currentPrice = jsonResponse[0]\n return currentPrice", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://bittrex.com/api/v1.1/public/getticker?market=\"+pair\n jsonResponse = self.getJson(uri)\n currentPrice = jsonResponse[\"result\"][\"Last\"]\n return currentPrice", "def fetch_ticker(self, symbol: str, params={}):\n self.load_markets()\n market = self.market(symbol)\n request = {\n 'pair': market['id'],\n }\n response = self.publicGetExchangesPairTicker(self.extend(request, params))\n return self.parse_ticker(response, market)", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n jsonResponse = self.getJson(\"https://poloniex.com/public?command=returnTicker\")\n currentPrice = jsonResponse[pair][\"last\"]\n return currentPrice", "def get_exchange_trading_fee(self, exchange, pair, type):\n return self.ccxt.get_exchange_trading_fee(exchange, pair, type)", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://www.bitstamp.net/api/v2/ticker/\"\n requestUrl = uri + pair\n jsonResponse = self.getJson(requestUrl)\n currentPrice = jsonResponse[\"last\"]\n return currentPrice", "def price(self, irc, msg, args, optlist, typeName):\n\n try:\n typeID = self._get_typeID(typeName)\n itemType = self._get_type(typeID)\n except:\n irc.error('Unknown type')\n return\n\n if len(optlist) == 1:\n location = optlist[0][1]\n else:\n location = 'Jita'\n\n try:\n locationID = self._get_locationID(location)\n location = self._get_location(locationID)\n except:\n irc.error('Unknown location')\n return\n\n market = self._sql(\"\"\"\n SELECT * FROM evecentral_market\n WHERE \"locationID\"=%s\"\"\", [locationID])\n if not market:\n irc.reply('No data for that market location')\n return\n\n marketitem = self._sql(\"\"\"\n SELECT * FROM evecentral_marketitem\n WHERE \"locationID\"=%s AND \"typeID\"=%s\"\"\", [locationID, typeID])\n if marketitem:\n irc.reply('{0} in {1}: buy max: {2} (volume: {3:,d}). sell min: {4} (volume: {5:,d}).'.format(\n ircutils.bold(itemType['typeName']),\n self._colorize_system(location),\n ircutils.mircColor(\n '{:,.2f}'.format(marketitem['buy_max']),\n fg='green'),\n int(marketitem['buy_volume']),\n ircutils.mircColor(\n '{:,.2f}'.format(marketitem['sell_min']),\n fg='green'),\n int(marketitem['sell_volume']),\n ), prefixNick=False)\n else:\n irc.reply(\"Prices for {0} in {1} isn't updated yet.\".format(\n itemType['typeName'],\n location['itemName']\n ))", "def get(self, price, way):\n for offer in self.book[way]:\n if offer.get_price() == price:\n return offer\n return None", "async def fetch_trading_fees(self, params={}):\n await self.load_markets()\n response = await self.marketsGetSpotPairs(params)\n #\n # {\n # success: '1',\n # data: {\n # pairs: [\n # {\n # name: 'btc_jpy',\n # base_asset: 'btc',\n # quote_asset: 'jpy',\n # maker_fee_rate_base: '0',\n # taker_fee_rate_base: '0',\n # maker_fee_rate_quote: '-0.0002',\n # taker_fee_rate_quote: '0.0012',\n # unit_amount: '0.0001',\n # limit_max_amount: '1000',\n # market_max_amount: '10',\n # market_allowance_rate: '0.2',\n # price_digits: '0',\n # amount_digits: '4',\n # is_enabled: True,\n # stop_order: False,\n # stop_order_and_cancel: False\n # },\n # ...\n # ]\n # }\n # }\n #\n data = self.safe_value(response, 'data', {})\n pairs = self.safe_value(data, 'pairs', [])\n result = {}\n for i in range(0, len(pairs)):\n pair = pairs[i]\n marketId = self.safe_string(pair, 'name')\n market = self.safe_market(marketId)\n symbol = market['symbol']\n result[symbol] = {\n 'info': pair,\n 'symbol': symbol,\n 'maker': self.safe_number(pair, 'maker_fee_rate_quote'),\n 'taker': self.safe_number(pair, 'taker_fee_rate_quote'),\n 'percentage': True,\n 'tierBased': False,\n }\n return result", "def fetch_price():\n\n url = \"https://www.bitstamp.net/api/ticker/\"\n\n response = json.load(urllib2.urlopen(url))\n\n return {\"buy\": response['ask'], \"sell\": response['bid']}", "async def get_quote_price(self, trading_pair: str, is_buy: bool, amount: Decimal) -> Optional[Decimal]:\n\n try:\n\n base, quote = trading_pair.split(\"-\")\n side = \"buy\" if is_buy else \"sell\"\n resp = await self._api_request(\"post\", \"terra/price\", {\"base\": base, \"quote\": quote, \"trade_type\": side,\n \"amount\": str(amount)})\n txFee = resp[\"txFee\"] / float(amount)\n price_with_txfee = resp[\"price\"] + txFee if is_buy else resp[\"price\"] - txFee\n return Decimal(str(price_with_txfee))\n # if resp[\"price\"] is not None:\n # return Decimal(str(resp[\"price\"]))\n except asyncio.CancelledError:\n raise\n except Exception as e:\n self.logger().network(\n f\"Error getting quote price for {trading_pair} {side} order for {amount} amount.\",\n exc_info=True,\n app_warning_msg=str(e)\n )", "async def get_price(self) -> PairPrice:\n try:\n data = await self._network.get_response_content_from_get_request(\n url=self._BTC_FEED_URL, format=DataFormat.JSON\n )\n except NetworkError as e:\n msg = f\"Error getting BTC feed from {self._BTC_FEED_URL}\"\n log.exception(msg)\n raise BtcFeedError() from e\n\n try:\n price = 1 / float(data[\"price\"])\n except KeyError as e:\n msg = f\"Missing price field in BTC feed from {self._BTC_FEED_URL}\"\n log.exception(msg)\n raise BtcFeedError() from e\n except ValueError as e:\n msg = f\"Error value in price field in BTC feed from {self._BTC_FEED_URL}: {data['price']}\"\n log.exception(msg)\n raise BtcFeedError() from e\n\n return PairPrice(pair=(self._btc, self._market), price=price)", "def get_rolling_price(self, pair='XBTZAR'):\n\n data = {'pair': pair}\n query_string = build_query_string(data)\n\n while True:\n r = requests.get(build_api_call(self.base_url, None, 'ticker', query_string))\n if r.status_code == 200:\n print(r.json())", "async def fetch_ticker(self, symbol: str, params={}):\n await self.load_markets()\n market = self.market(symbol)\n request = {\n 'pair': market['id'],\n }\n response = await self.publicGetPairTicker(self.extend(request, params))\n data = self.safe_value(response, 'data', {})\n return self.parse_ticker(data, market)", "def getPrice(coin,cur):\n price = 'https://api.coinmarketcap.com/v1/ticker/' + coin\n json = requests.get(price).json()\n value = json[0]['price_' + str(cur)]\n return value", "def sell(self, currency_pair, rate, amount):\n return self.api_query('sell', {\"currencyPair\": currency_pair, \"rate\": rate, \"amount\": amount})", "def query_market_data(self, kind_of_price):\n market_data = pyRofex.get_market_data(\n ticker=self.symbol,\n entries=[kind_of_price]\n )\n return market_data", "def price(self, fsym, tsyms, sign=False, try_conversion=True, exchange='CCCAGG'):\n\n result = {} # Default return value.\n\n # Build URL needed to request data from the server.\n url = self._url + 'price?' + \\\n 'fsym=' + str(fsym) + '&tsyms=' + ','.join(tsyms) + '&sign=' + str(sign).lower() + \\\n '&tryConversion=' + str(try_conversion).lower() + '&e=' + str(exchange)\n\n # Connect and request data.\n with request.urlopen(url) as response:\n\n html = response.read()\n\n try:\n # Convert to Python dictionary.\n result = json.loads(html)\n\n except ValueError as ex:\n print('JSON response could not be parsed.')\n print(ex)\n\n else:\n if 'Response' in result:\n if result['Response'] == 'Error':\n if 'Message' in result:\n raise Exception('CryptoCompare API returned Error: ' + str(result['Message']))\n else:\n raise Exception('CryptoCompare API returned Error without further explanation.')\n else:\n raise Exception('CryptoCompare API returned an unexpected Response field: ' + str(result))\n\n return result", "def getprice():\n\n print(\"Get price\")\n latest_price = get_latest_price(item_code)\n return latest_price", "async def fetch_markets(self, params={}):\n response = await self.marketsGetSpotPairs(params)\n #\n # {\n # \"success\": 1,\n # \"data\": {\n # \"pairs\": [\n # {\n # \"name\": \"btc_jpy\",\n # \"base_asset\": \"btc\",\n # \"quote_asset\": \"jpy\",\n # \"maker_fee_rate_base\": \"0\",\n # \"taker_fee_rate_base\": \"0\",\n # \"maker_fee_rate_quote\": \"-0.0002\",\n # \"taker_fee_rate_quote\": \"0.0012\",\n # \"unit_amount\": \"0.0001\",\n # \"limit_max_amount\": \"1000\",\n # \"market_max_amount\": \"10\",\n # \"market_allowance_rate\": \"0.2\",\n # \"price_digits\": 0,\n # \"amount_digits\": 4,\n # \"is_enabled\": True,\n # \"stop_order\": False,\n # \"stop_order_and_cancel\": False\n # }\n # ]\n # }\n # }\n #\n data = self.safe_value(response, 'data')\n pairs = self.safe_value(data, 'pairs', [])\n result = []\n for i in range(0, len(pairs)):\n entry = pairs[i]\n id = self.safe_string(entry, 'name')\n baseId = self.safe_string(entry, 'base_asset')\n quoteId = self.safe_string(entry, 'quote_asset')\n base = self.safe_currency_code(baseId)\n quote = self.safe_currency_code(quoteId)\n result.append({\n 'id': id,\n 'symbol': base + '/' + quote,\n 'base': base,\n 'quote': quote,\n 'settle': None,\n 'baseId': baseId,\n 'quoteId': quoteId,\n 'settleId': None,\n 'type': 'spot',\n 'spot': True,\n 'margin': False,\n 'swap': False,\n 'future': False,\n 'option': False,\n 'active': self.safe_value(entry, 'is_enabled'),\n 'contract': False,\n 'linear': None,\n 'inverse': None,\n 'taker': self.safe_number(entry, 'taker_fee_rate_quote'),\n 'maker': self.safe_number(entry, 'maker_fee_rate_quote'),\n 'contractSize': None,\n 'expiry': None,\n 'expiryDatetime': None,\n 'strike': None,\n 'optionType': None,\n 'precision': {\n 'amount': self.parse_number(self.parse_precision(self.safe_string(entry, 'amount_digits'))),\n 'price': self.parse_number(self.parse_precision(self.safe_string(entry, 'price_digits'))),\n },\n 'limits': {\n 'leverage': {\n 'min': None,\n 'max': None,\n },\n 'amount': {\n 'min': self.safe_number(entry, 'unit_amount'),\n 'max': self.safe_number(entry, 'limit_max_amount'),\n },\n 'price': {\n 'min': None,\n 'max': None,\n },\n 'cost': {\n 'min': None,\n 'max': None,\n },\n },\n 'info': entry,\n })\n return result", "def sell(self, trading_pair: str, amount: Decimal, order_type: OrderType, price: Decimal) -> str:\n return self.place_order(False, trading_pair, amount, price)", "def get_ticker(self, pair='btc_jpy'):\n url = 'https://coincheck.com/api/ticker'\n r = requests.get(url, {'pair': pair}, timeout=self.timeout)\n\n return json.loads(r.text)", "def sell(self,\n currency_pair,\n rate,\n amount):\n pass", "def get_ether_current_prices():\n req = requests.get('https://min-api.cryptocompare.com/data/price?fsym=ETH&tsyms=BTC,USD,EUR')\n data = req.json()\n\n print('{0}, {1}, {2}'.format(data['EUR'], data['USD'], data['BTC']))", "def getOrderBookPrice(exchange, symbol, side, quantity, order_book=None):\n # TODO test it\n # print(\"obap1\")\n order_book_side = order_book['asks'] \\\n if side == exchange.SIDE_SELL else order_book['bids']\n\n quantity = Decimal(quantity)\n i, orders, price = 0, [], Decimal(0)\n accounted_for_quantity = Decimal(0)\n qtdif = Decimal(1)\n # print(\"obap2\")\n while accounted_for_quantity < quantity or qtdif > Decimal(0.0001):\n try:\n order = order_book_side[i]\n except IndexError:\n raise Exception(\"There are not enough orders in the Order Book.\")\n # return False\n qty = min(Decimal(order[1]), quantity - accounted_for_quantity)\n price += Decimal(order[0]) * qty\n accounted_for_quantity += qty\n qtdif = abs(Decimal(1) - accounted_for_quantity / quantity)\n i += 1\n\n # print(\"obap3\")\n return price / quantity" ]
[ "0.7793075", "0.69196445", "0.67033094", "0.6688754", "0.6671473", "0.66665936", "0.6616699", "0.6557316", "0.6485801", "0.6459023", "0.6424058", "0.6368259", "0.633056", "0.63264066", "0.6295311", "0.62687165", "0.626795", "0.62592244", "0.6171922", "0.6159443", "0.60960245", "0.60727286", "0.60475117", "0.60404944", "0.60347897", "0.6031588", "0.5984504", "0.5981977", "0.59083235", "0.59058774" ]
0.8632258
0
Place an order through the ccxt library for a certain exchange, for a certain pair (BTC/USD), type as buy/sell, and amount in currency (if BTC/USD will be BTC)
def place_order(self, exchange, pair, type, amount, price = None): return self.ccxt.place_order(exchange, pair, type, amount, price)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def buy(self, trading_pair: str, amount: Decimal, order_type: OrderType, price: Decimal) -> str:\n return self.place_order(True, trading_pair, amount, price)", "async def create_order(self, symbol: str, type: OrderType, side: OrderSide, amount, price=None, params={}):\n await self.load_markets()\n market = self.market(symbol)\n # order types \"limit\" and \"market\" immediatley parsed \"EXCHANGE LIMIT\" and \"EXCHANGE MARKET\"\n # note: same order types exist for margin orders without the EXCHANGE prefix\n orderTypes = self.safe_value(self.options, 'orderTypes', {})\n orderType = type.upper()\n if market['spot']:\n # although they claim that type needs to be 'exchange limit' or 'exchange market'\n # in fact that's not the case for swap markets\n orderType = self.safe_string_upper(orderTypes, type, type)\n stopPrice = self.safe_string_2(params, 'stopPrice', 'triggerPrice')\n timeInForce = self.safe_string(params, 'timeInForce')\n postOnlyParam = self.safe_value(params, 'postOnly', False)\n reduceOnly = self.safe_value(params, 'reduceOnly', False)\n clientOrderId = self.safe_value_2(params, 'cid', 'clientOrderId')\n params = self.omit(params, ['triggerPrice', 'stopPrice', 'timeInForce', 'postOnly', 'reduceOnly', 'price_aux_limit'])\n amountString = self.amount_to_precision(symbol, amount)\n amountString = amountString if (side == 'buy') else Precise.string_neg(amountString)\n request = {\n # 'gid': 0123456789, # int32, optional group id for the order\n # 'cid': 0123456789, # int32 client order id\n 'type': orderType,\n 'symbol': market['id'],\n # 'price': self.number_to_string(price),\n 'amount': amountString,\n # 'flags': 0, # int32, https://docs.bitfinex.com/v2/docs/flag-values\n # 'lev': 10, # leverage for a derivative orders, the value should be between 1 and 100 inclusive, optional, 10 by default\n # 'price_trailing': self.number_to_string(priceTrailing),\n # 'price_aux_limit': self.number_to_string(stopPrice),\n # 'price_oco_stop': self.number_to_string(ocoStopPrice),\n # 'tif': '2020-01-01 10:45:23', # datetime for automatic order cancellation\n # 'meta': {\n # 'aff_code': 'AFF_CODE_HERE'\n # },\n }\n stopLimit = ((orderType == 'EXCHANGE STOP LIMIT') or ((orderType == 'EXCHANGE LIMIT') and (stopPrice is not None)))\n exchangeStop = (orderType == 'EXCHANGE STOP')\n exchangeMarket = (orderType == 'EXCHANGE MARKET')\n stopMarket = (exchangeStop or (exchangeMarket and (stopPrice is not None)))\n ioc = ((orderType == 'EXCHANGE IOC') or (timeInForce == 'IOC'))\n fok = ((orderType == 'EXCHANGE FOK') or (timeInForce == 'FOK'))\n postOnly = (postOnlyParam or (timeInForce == 'PO'))\n if (ioc or fok) and (price is None):\n raise InvalidOrder(self.id + ' createOrder() requires a price argument with IOC and FOK orders')\n if (ioc or fok) and exchangeMarket:\n raise InvalidOrder(self.id + ' createOrder() does not allow market IOC and FOK orders')\n if (orderType != 'MARKET') and (not exchangeMarket) and (not exchangeStop):\n request['price'] = self.price_to_precision(symbol, price)\n if stopLimit or stopMarket:\n # request['price'] is taken for stop orders\n request['price'] = self.price_to_precision(symbol, stopPrice)\n if stopMarket:\n request['type'] = 'EXCHANGE STOP'\n elif stopLimit:\n request['type'] = 'EXCHANGE STOP LIMIT'\n request['price_aux_limit'] = self.price_to_precision(symbol, price)\n if ioc:\n request['type'] = 'EXCHANGE IOC'\n elif fok:\n request['type'] = 'EXCHANGE FOK'\n # flag values may be summed to combine flags\n flags = 0\n if postOnly:\n flags = self.sum(flags, 4096)\n if reduceOnly:\n flags = self.sum(flags, 1024)\n if flags != 0:\n request['flags'] = flags\n if clientOrderId is not None:\n request['cid'] = clientOrderId\n params = self.omit(params, ['cid', 'clientOrderId'])\n response = await self.privatePostAuthWOrderSubmit(self.extend(request, params))\n #\n # [\n # 1653325121, # Timestamp in milliseconds\n # \"on-req\", # Purpose of notification('on-req', 'oc-req', 'uca', 'fon-req', 'foc-req')\n # null, # unique ID of the message\n # null,\n # [\n # [\n # 95412102131, # Order ID\n # null, # Group ID\n # 1653325121798, # Client Order ID\n # \"tDOGE:UST\", # Market ID\n # 1653325121798, # Millisecond timestamp of creation\n # 1653325121798, # Millisecond timestamp of update\n # -10, # Amount(Positive means buy, negative means sell)\n # -10, # Original amount\n # \"EXCHANGE LIMIT\", # Type of the order: LIMIT, EXCHANGE LIMIT, MARKET, EXCHANGE MARKET, STOP, EXCHANGE STOP, STOP LIMIT, EXCHANGE STOP LIMIT, TRAILING STOP, EXCHANGE TRAILING STOP, FOK, EXCHANGE FOK, IOC, EXCHANGE IOC.\n # null, # Previous order type(stop-limit orders are converted to limit orders so for them previous type is always STOP)\n # null, # Millisecond timestamp of Time-In-Force: automatic order cancellation\n # null, # _PLACEHOLDER\n # 4096, # Flags, see parseOrderFlags()\n # \"ACTIVE\", # Order Status, see parseOrderStatus()\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # 0.071, # Price(Stop Price for stop-limit orders, Limit Price for limit orders)\n # 0, # Average Price\n # 0, # Trailing Price\n # 0, # Auxiliary Limit price(for STOP LIMIT)\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # 0, # Hidden(0 if False, 1 if True)\n # 0, # Placed ID(If another order caused self order to be placed(OCO) self will be that other order's ID)\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # \"API>BFX\", # Routing, indicates origin of action: BFX, ETHFX, API>BFX, API>ETHFX\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # {\"$F7\":1} # additional meta information about the order( $F7 = IS_POST_ONLY(0 if False, 1 if True), $F33 = Leverage(int))\n # ]\n # ],\n # null, # CODE(work in progress)\n # \"SUCCESS\", # Status of the request\n # \"Submitting 1 orders.\" # Message\n # ]\n #\n status = self.safe_string(response, 6)\n if status != 'SUCCESS':\n errorCode = response[5]\n errorText = response[7]\n raise ExchangeError(self.id + ' ' + response[6] + ': ' + errorText + '(#' + errorCode + ')')\n orders = self.safe_value(response, 4, [])\n order = self.safe_value(orders, 0)\n return self.parse_order(order, market)", "def place(exchange: str, side: str, amount: float, pair: str, price: float):\n loop.run_until_complete(place_order(exchange, side, amount, pair, price))", "def create_order(self, rate, amount, order_type, pair):\n nonce = nounce()\n payload = {'rate': rate,\n 'amount': amount,\n 'order_type': order_type,\n 'pair': pair\n }\n url = 'https://coincheck.com/api/exchange/orders'\n body = 'rate={rate}&amount={amount}&order_type={order_type}&pair={pair}'.format(**payload)\n message = nonce + url + body\n signature = hmac.new(self.secret_key.encode('utf-8'), message.encode('utf-8'), hashlib.sha256).hexdigest()\n headers = {\n 'ACCESS-KEY': self.access_key,\n 'ACCESS-NONCE': nonce,\n 'ACCESS-SIGNATURE': signature\n }\n r = requests.post(url, headers=headers, data=body, timeout=self.timeout)\n return json.loads(r.text)", "def buy(self,\n currency_pair,\n rate,\n amount):\n pass", "def place_order(self, tradetype, market, amount, rate,\n ordertype, timeInEffect, \n conditionType=None, target=None):\n\n if tradetype in ('BUY', 'buy'):\n method = \"tradebuy\"\n elif tradetype in ('SELL', 'sell'):\n method = \"tradesell\"\n\n if not conditionType:\n conditionType = \"CONDITION_NONE\"\n if not target:\n target = \"0\"\n options = {\"marketname\": market, \n \"ordertype\": ordertype, \n \"quantity\": str(amount),\n \"rate\": str(rate),\n \"timeineffect\": str(timeInEffect),\n \"conditiontype\": conditionType,\n \"target\": target}\n\n return self.__call__('market', method, options)", "def place_order(self, pair, side, ttype, size, price=0):\r\n possible_sides = ['bid','ask']\r\n possible_types = ['market', 'limit', 'stop', 'stop_limit']\r\n\r\n side = side.lower()\r\n ttype = ttype.lower()\r\n\r\n if not side in possible_sides:\r\n raise ValueError(\"Side value invalid\")\r\n\r\n if not ttype in possible_types:\r\n raise ValueError(\"Type value invalid\")\r\n\r\n payload = {\r\n \"trading_pair_id\": str(pair),\r\n \"side\": str(side),\r\n \"type\": str(ttype),\r\n \"size\": str(size)\r\n }\r\n\r\n if not price == 0:\r\n payload['price'] = str(price)\r\n\r\n method = self.private_endpoints['place_order']['method']\r\n url = self.base_url + self.private_endpoints['place_order']['url']\r\n req = requests.request(method, url, headers=self.get_auth_headers(nonce=True), json=payload)\r\n res = req.json()\r\n\r\n if res['success'] == True:\r\n return res[\"result\"]\r\n else:\r\n return res", "def buy(symbol: str,\n quantity: Any,\n order_type: str = \"market\",\n price: Any = None,\n exchange: str = CRYPTO_EXCHANGE,\n api_key: str = CRYPTO_API_KEY,\n api_secret: str = CRYPTO_API_SECRET,\n exchange_password: Any = CRYPTO_API_PASSWORD,\n exchange_uid: Any = CRYPTO_API_UID,\n test_mode: bool = False) -> Any:\n try:\n if test_mode == True:\n url = CRYPTO_URL_TEST\n else:\n url = CRYPTO_URL_LIVE\n payload = {\n 'symbol': symbol.upper(),\n 'quantity': quantity,\n 'order_type': order_type,\n 'limitPrice': price\n }\n response = requests.post('{}/buy/{}'.format(url, exchange),\n headers=crypto_get_headers(\n api_key, api_secret, exchange_password,\n exchange_uid),\n json=payload)\n if response:\n return response.json()\n if response.status_code == 400:\n logger.error('Oops! An error Occurred ⚠️')\n raise BadRequest(response.text)\n if response.status_code == 401:\n logger.error('Oops! An error Occurred ⚠️')\n raise InvalidCredentials(response.text)\n except Exception as exception:\n logger.error('Oops! An error Occurred ⚠️')\n raise exception", "async def create_order(self, symbol: str, type: OrderType, side: OrderSide, amount, price=None, params={}):\n await self.load_markets()\n market = self.market(symbol)\n orderType = self.capitalize(type)\n reduceOnly = self.safe_value(params, 'reduceOnly')\n if reduceOnly is not None:\n if (market['type'] != 'swap') and (market['type'] != 'future'):\n raise InvalidOrder(self.id + ' createOrder() does not support reduceOnly for ' + market['type'] + ' orders, reduceOnly orders are supported for swap and future markets only')\n brokerId = self.safe_string(self.options, 'brokerId', 'CCXT')\n qty = self.parse_to_int(self.amount_to_precision(symbol, amount))\n request = {\n 'symbol': market['id'],\n 'side': self.capitalize(side),\n 'orderQty': qty, # lot size multiplied by the number of contracts\n 'ordType': orderType,\n 'text': brokerId,\n }\n if (orderType == 'Stop') or (orderType == 'StopLimit') or (orderType == 'MarketIfTouched') or (orderType == 'LimitIfTouched'):\n stopPrice = self.safe_number_2(params, 'stopPx', 'stopPrice')\n if stopPrice is None:\n raise ArgumentsRequired(self.id + ' createOrder() requires a stopPx or stopPrice parameter for the ' + orderType + ' order type')\n else:\n request['stopPx'] = float(self.price_to_precision(symbol, stopPrice))\n params = self.omit(params, ['stopPx', 'stopPrice'])\n if (orderType == 'Limit') or (orderType == 'StopLimit') or (orderType == 'LimitIfTouched'):\n request['price'] = float(self.price_to_precision(symbol, price))\n clientOrderId = self.safe_string_2(params, 'clOrdID', 'clientOrderId')\n if clientOrderId is not None:\n request['clOrdID'] = clientOrderId\n params = self.omit(params, ['clOrdID', 'clientOrderId'])\n response = await self.privatePostOrder(self.extend(request, params))\n return self.parse_order(response, market)", "def _buy(self, amount, price):\n params = {\"pair\": self.pair, \"type\" : \"buy\", \"rate\" : price, \"amount\" : amount}\n response = self._send_request(\"Trade\", params)\n if \"error\" in response:\n raise TradeException(response[\"error\"])", "def create_order(self, symbol, tradeType, price, amount):\r\n param = {\r\n 'symbol': self.__transfer_symbol(symbol),\r\n 'tradeType': tradeType, #BUY/SELL\r\n 'price': price,\r\n 'amount': amount,\r\n 'appid': self.apiKey,\r\n 'nonce': int(time.time() * 1000),\r\n 'timestamp': int(time.time())\r\n }\r\n return self.__sign_POST('/api/v1/order/create', param, self.timeout)", "def create_market_buy_order(amount_crypto: float):\n try:\n if CONF.exchange == 'bitmex':\n cur_price = get_current_price()\n amount_fiat = round(amount_crypto * cur_price)\n new_order = EXCHANGE.create_market_buy_order(CONF.pair, amount_fiat)\n elif CONF.exchange == 'kraken':\n if CONF.apply_leverage:\n new_order = EXCHANGE.create_market_buy_order(CONF.pair, amount_crypto,\n {'leverage': CONF.leverage_default, 'oflags': 'fcib'})\n else:\n new_order = EXCHANGE.create_market_buy_order(CONF.pair, amount_crypto, {'oflags': 'fcib'})\n elif CONF.exchange == 'liquid':\n new_order = EXCHANGE.create_market_buy_order(CONF.pair, amount_crypto, {'funding_currency': CONF.base})\n norder = Order(new_order)\n LOG.info('Created market %s', str(norder))\n return norder\n\n except (ccxt.ExchangeError, ccxt.NetworkError) as error:\n if any(e in str(error.args) for e in STOP_ERRORS):\n LOG.warning('Insufficient available balance - not buying %s', amount_crypto)\n return None\n LOG.error(RETRY_MESSAGE, type(error).__name__, str(error.args))\n sleep_for(4, 6)\n create_market_buy_order(amount_crypto)", "def create_order(self, symbol: str, type: OrderType, side: OrderSide, amount, price=None, params={}):\n self.load_markets()\n method = 'privatePostOrderAddOrder'\n market = self.market(symbol)\n request = {\n 'Amount': amount,\n 'Pair': market['id'],\n }\n if type == 'market':\n method += 'MarketPrice' + self.capitalize(side)\n else:\n request['Price'] = price\n amountString = self.number_to_string(amount)\n priceString = self.number_to_string(price)\n request['Total'] = self.parse_number(Precise.string_mul(amountString, priceString))\n request['IsBid'] = (side == 'buy')\n response = getattr(self, method)(self.extend(request, params))\n return self.parse_order(response, market)", "async def _create_order(self,\n trade_type: TradeType,\n order_id: str,\n trading_pair: str,\n amount: Decimal,\n price: Decimal):\n\n amount = self.quantize_order_amount(trading_pair, amount)\n price = self.quantize_order_price(trading_pair, price)\n base, quote = trading_pair.split(\"-\")\n api_params = {\"base\": base,\n \"quote\": quote,\n \"trade_type\": \"buy\" if trade_type is TradeType.BUY else \"sell\",\n \"amount\": str(amount),\n \"secret\": self._terra_wallet_seeds,\n # \"maxPrice\": str(price),\n }\n self.start_tracking_order(order_id, None, trading_pair, trade_type, price, amount)\n try:\n order_result = await self._api_request(\"post\", \"terra/trade\", api_params)\n hash = order_result[\"txHash\"]\n txSuccess = order_result[\"txSuccess\"]\n tracked_order = self._in_flight_orders.get(order_id)\n if tracked_order is not None:\n self.logger().info(f\"Created {trade_type.name} order {order_id} txHash: {hash} \"\n f\"for {amount} {trading_pair}.\")\n tracked_order.update_exchange_order_id(hash)\n if txSuccess:\n tracked_order.fee_asset = order_result[\"fee\"][\"token\"]\n tracked_order.executed_amount_base = amount\n tracked_order.executed_amount_quote = amount * price\n tracked_order.fee_paid = order_result[\"fee\"][\"amount\"]\n event_tag = MarketEvent.BuyOrderCreated if trade_type is TradeType.BUY else MarketEvent.SellOrderCreated\n event_class = BuyOrderCreatedEvent if trade_type is TradeType.BUY else SellOrderCreatedEvent\n self.trigger_event(event_tag, event_class(self.current_timestamp, OrderType.LIMIT, trading_pair, amount,\n price, order_id, hash))\n self.trigger_event(MarketEvent.OrderFilled,\n OrderFilledEvent(\n self.current_timestamp,\n tracked_order.client_order_id,\n tracked_order.trading_pair,\n tracked_order.trade_type,\n tracked_order.order_type,\n price,\n amount,\n TradeFee(0.0, [(tracked_order.fee_asset, tracked_order.fee_paid)]),\n hash\n ))\n\n event_tag = MarketEvent.BuyOrderCompleted if tracked_order.trade_type is TradeType.BUY \\\n else MarketEvent.SellOrderCompleted\n event_class = BuyOrderCompletedEvent if tracked_order.trade_type is TradeType.BUY \\\n else SellOrderCompletedEvent\n self.trigger_event(event_tag,\n event_class(self.current_timestamp,\n tracked_order.client_order_id,\n tracked_order.base_asset,\n tracked_order.quote_asset,\n tracked_order.fee_asset,\n tracked_order.executed_amount_base,\n tracked_order.executed_amount_quote,\n tracked_order.fee_paid,\n tracked_order.order_type))\n self.stop_tracking_order(tracked_order.client_order_id)\n else:\n self.trigger_event(MarketEvent.OrderFailure,\n MarketOrderFailureEvent(self.current_timestamp, order_id, OrderType.LIMIT))\n except asyncio.CancelledError:\n raise\n except Exception as e:\n self.stop_tracking_order(order_id)\n self.logger().network(\n f\"Error submitting {trade_type.name} order to Terra for \"\n f\"{amount} {trading_pair} \"\n f\"{price}.\",\n exc_info=True,\n app_warning_msg=str(e)\n )\n self.trigger_event(MarketEvent.OrderFailure,\n MarketOrderFailureEvent(self.current_timestamp, order_id, OrderType.LIMIT))", "async def create_order(self, symbol: str, type: OrderType, side: OrderSide, amount, price=None, params={}):\n await self.load_markets()\n market = self.market(symbol)\n request = {\n 'pair': market['id'],\n 'amount': self.amount_to_precision(symbol, amount),\n 'side': side,\n 'type': type,\n }\n if type == 'limit':\n request['price'] = self.price_to_precision(symbol, price)\n response = await self.privatePostUserSpotOrder(self.extend(request, params))\n data = self.safe_value(response, 'data')\n return self.parse_order(data, market)", "def create_order(self, asset, amount, is_buy, style):\n exchange_symbol = self.get_symbol(asset)\n if isinstance(style, ExchangeLimitOrder) \\\n or isinstance(style, ExchangeStopLimitOrder):\n price = style.get_limit_price(is_buy)\n order_type = 'limit'\n\n elif isinstance(style, ExchangeStopOrder):\n price = style.get_stop_price(is_buy)\n order_type = 'stop'\n\n else:\n raise InvalidOrderStyle(exchange=self.name,\n style=style.__class__.__name__)\n\n req = dict(\n symbol=exchange_symbol,\n amount=str(float(abs(amount))),\n price=\"{:.20f}\".format(float(price)),\n side='buy' if is_buy else 'sell',\n type='exchange ' + order_type, # TODO: support margin trades\n exchange=self.name,\n is_hidden=False,\n is_postonly=False,\n use_all_available=0,\n ocoorder=False,\n buy_price_oco=0,\n sell_price_oco=0\n )\n\n date = pd.Timestamp.utcnow()\n try:\n self.ask_request()\n response = self._request('order/new', req)\n order_status = response.json()\n except Exception as e:\n raise ExchangeRequestError(error=e)\n\n if 'message' in order_status:\n raise ExchangeRequestError(\n error='unable to create Bitfinex order {}'.format(\n order_status['message'])\n )\n\n order_id = str(order_status['id'])\n order = Order(\n dt=date,\n asset=asset,\n amount=amount,\n stop=style.get_stop_price(is_buy),\n limit=style.get_limit_price(is_buy),\n id=order_id\n )\n\n return order", "def buy(self, irc, msg, args, optlist, amount, thing, price, otherthing, notes):\n self.db.deleteExpired(self.registryValue('orderExpiry'))\n gpgauth = self._checkGPGAuth(irc, msg.prefix)\n if gpgauth is None:\n irc.error(\"For identification purposes, you must be identified via GPG \"\n \"to use the order book.\")\n return\n results = self.db.getByNick(gpgauth['nick'])\n if len(results) >= self.registryValue('maxUserOpenOrders'):\n irc.error(\"You may not have more than %s outstanding open orders.\" % \\\n self.registryValue('maxUserOpenOrders'))\n return\n extratime = 0\n if dict(optlist).has_key('long'):\n extratime = self.registryValue('longOrderDuration')\n trust = self._getTrust(irc, 'nanotube', gpgauth['nick'])\n sumtrust = sum([t for t,n in trust])\n if sumtrust < self.registryValue('minTrustForLongOrders'):\n irc.error(\"You must have a minimum of %s cumulative trust at \"\n \"level 1 and level 2 from nanotube to \"\n \"to place long orders.\" % (self.registryValue('minTrustForLongOrders'),))\n return\n orderid = self.db.buy(gpgauth['nick'], msg.host, amount, thing, price, otherthing, notes, extratime)\n irc.reply(\"Order id %s created.\" % (orderid,))\n if not world.testing:\n irc.queueMsg(ircmsgs.privmsg(\"#bitcoin-otc-ticker\",\n \"#%s || %s || BUY %s %s @ %s %s || %s\" % (orderid,\n gpgauth['nick'],\n amount,\n thing,\n self._getIndexedValue(price),\n otherthing,\n notes,)))", "async def create_order(self, symbol: str, side: str, price: str, amount: str, urgency: int = 0) -> dict:\n base, quote = symbol.lower().split('_')\n spendable = quote if side == 'buy' else base\n params = {\n 'pair': symbol,\n 'type': side,\n 'price': price,\n spendable: amount\n }\n return await self._safe_call(urgency, self._request_private, 'trade', params)", "def sell(self, trading_pair: str, amount: Decimal, order_type: OrderType, price: Decimal) -> str:\n return self.place_order(False, trading_pair, amount, price)", "def orderBuy(self, rate = None, amount = None):\r\n\t\treturn OrderSell(self.pair, rate, amount)", "def create_buy_order(price: float, amount_crypto: float):\n try:\n if CONF.exchange == 'bitmex':\n price = round(price * 2) / 2\n order_size = round(price * amount_crypto)\n new_order = EXCHANGE.create_limit_buy_order(CONF.pair, order_size, price)\n elif CONF.exchange == 'kraken':\n if CONF.apply_leverage:\n new_order = EXCHANGE.create_limit_buy_order(CONF.pair, amount_crypto, price,\n {'leverage': CONF.leverage_default, 'oflags': 'fcib'})\n else:\n new_order = EXCHANGE.create_limit_buy_order(CONF.pair, amount_crypto, price, {'oflags': 'fcib'})\n elif CONF.exchange == 'liquid':\n new_order = EXCHANGE.create_limit_buy_order(CONF.pair, amount_crypto, price,\n {'funding_currency': CONF.base})\n norder = Order(new_order)\n LOG.info('Created %s', str(norder))\n return norder\n\n except (ccxt.ExchangeError, ccxt.NetworkError) as error:\n if any(e in str(error.args) for e in STOP_ERRORS):\n if CONF.exchange == 'bitmex':\n LOG.warning('Order submission not possible - not buying %s', order_size)\n else:\n LOG.warning('Order submission not possible - not buying %s', amount_crypto)\n return None\n LOG.error(RETRY_MESSAGE, type(error).__name__, str(error.args))\n sleep_for(4, 6)\n create_buy_order(price, amount_crypto)", "def buy_order(self, market_id, quan, direction):\n trading_accout_id = self.user_info()[TRADING_ACCOUNT_ID]\n # Get current rate of this market\n rate = self.get_current_rate(market_id)\n if rate is None:\n print(\"Error occured in Get market rate!\")\n return None\n\n null = None\n false = False\n true = True\n\n request_body = {\n # \"OcoOrder\": null,\n # \"Type\":null,\n # \"LastChangedDateTimeUTCDate\": null,\n # \"ExpiryDateTimeUTC\": null,\n # \"Applicability\": null,\n \"Direction\": direction,\n # \"ExpiryDateTimeUTCDate\": null,\n # \"TriggerPrice\": null,\n \"BidPrice\": rate,\n # \"AuditId\": \"8049808-0-0-0-R\",\n \"AutoRollover\": false,\n \"MarketId\": market_id,\n \"isTrade\": true,\n \"OfferPrice\": rate,\n \"OrderId\": 0,\n # \"LastChangedDateTimeUTC\": null,\n # \"Currency\": null,\n \"Quantity\": quan,\n # \"QuoteId\": null,\n \"TradingAccountId\": trading_accout_id, #402043148,\n #\"MarketName\": market_name,\n \"PositionMethodId\": 1,\n \"Status\": null,\n \"IfDone\": []\n }\n\n parameters = {SESSION: self.auth_token, USERNAME: self.uname}\n\n try:\n res = requests.post(URL_BUY_SELL, json=request_body, params=parameters)\n res_data_json = res.json()\n print(\"Buy order data************\\n\", res_data_json)\n\n except requests.exceptions.HTTPError as e:\n raise requests.exceptions.HTTPError(e.strerror)\n\n if res.status_code == 200:\n print(\"Trade Order successful, OrderId is\", res_data_json['OrderId'])\n return res_data_json['OrderId']\n\n return res_data_json['OrderId']", "def place_buy_order(self):\n price = request.form[\"price\"]\n stocks = request.form[\"stocks\"]\n trader_id = request.form[\"trader_id\"]\n self.market.place_buy_order(trader_id, price, stocks)\n return \"\"", "def buy(self, currency_pair, rate, amount):\n return self.api_query('buy', {\"CurrencyPair\": currency_pair, \"rate\": rate, \"amount\": amount})", "def sell(symbol: str,\n quantity: Any,\n order_type: str = \"market\",\n price: Any = None,\n exchange: str = CRYPTO_EXCHANGE,\n api_key: str = CRYPTO_API_KEY,\n api_secret: str = CRYPTO_API_SECRET,\n exchange_password: Any = CRYPTO_API_PASSWORD,\n exchange_uid: Any = CRYPTO_API_UID,\n test_mode: bool = False) -> Any:\n try:\n if test_mode == True:\n url = CRYPTO_URL_TEST\n else:\n url = CRYPTO_URL_LIVE\n payload = {\n 'symbol': symbol.upper(),\n 'quantity': quantity,\n 'order_type': order_type,\n 'limitPrice': price\n }\n response = requests.post('{}/sell/{}'.format(url, exchange),\n headers=crypto_get_headers(\n api_key, api_secret, exchange_password,\n exchange_uid),\n json=payload)\n if response:\n return response.json()\n if response.status_code == 400:\n logger.error('Oops! An error Occurred ⚠️')\n raise BadRequest(response.text)\n if response.status_code == 401:\n logger.error('Oops! An error Occurred ⚠️')\n raise InvalidCredentials(response.text)\n except Exception as exception:\n logger.error('Oops! An error Occurred ⚠️')\n raise exception", "def place_order(self, instrument, lower, upper, units=1, side_arg='buy'):\n if (side_arg == 'buy'):\n return self.oanda.create_order(self.account_id, instrument=instrument,\n units=units, side=side_arg,\n stopLoss=lower, takeProfit=upper,\n type='market')\n elif (side_arg == 'sell'):\n return self.oanda.create_order(self.account_id, instrument=instrument,\n units=units, side=side_arg,\n stopLoss=upper, takeProfit=lower,\n type='market')", "def do_orders(self,args):\n try:\n orders = bitstamp.open_orders()\n orders = sorted(orders, key=lambda x: float(x['price']))\n buytotal,selltotal = 0,0\n numbuys,numsells = 0,0\n amtbuys,amtsells = 0,0\n buyavg,sellavg = 0,0\n numorder = 0 \n for order in orders:\n ordertype=\"Sell\" if order['type'] == 1 else \"Buy\"\n numorder += 1\n print '%s = %s | $%s @ %s BTC %s' % (numorder,ordertype,order['price'],order['amount'],order['id']) \n if order['type'] == 0:\n buytotal += D(order['price'])*D(order['amount'])\n numbuys += D('1')\n amtbuys += D(order['amount'])\n elif order['type'] == 1:\n selltotal += D(order['price'])*D(order['amount'])\n numsells += D('1')\n amtsells += D(order['amount'])\n if amtbuys:\n buyavg = D(buytotal/amtbuys).quantize(cPrec)\n if amtsells:\n sellavg = D(selltotal/amtsells).quantize(cPrec)\n print \"There are %s Buys. There are %s Sells\" % (numbuys,numsells)\n print \"Avg Buy Price: $%s. Avg Sell Price: $%s\" % (buyavg,sellavg)\n except Exception as e:\n print e", "def order_buy_market(self, symbol, quantity, price_type='ask_price',\n time_in_force='gtc', extend_hours=False, **kwargs):\n\n pass", "def place_order(cls, order: 'Order') -> 'Order':\n counter_order_type = OrderType.SELL.value if order.type == OrderType.BUY.value else OrderType.BUY.value\n counter_orders = None\n with transaction.atomic():\n if counter_order_type == OrderType.SELL.value:\n counter_orders = cls.objects.select_for_update().filter(\n type=counter_order_type,\n instrument=order.instrument,\n price__lte=order.price).order_by('price', 'created_at_dt')\n elif counter_order_type == OrderType.BUY.value:\n counter_orders = cls.objects.select_for_update().filter(\n type=counter_order_type,\n instrument=order.instrument,\n price__gte=order.price).order_by('-price', 'created_at_dt')\n if not counter_orders:\n # place order into the order book\n order.save()\n return order\n for counter_order in counter_orders:\n order, counter_order, *balances = cls._trade_orders(\n order, counter_order)\n order.save()\n counter_order.save()\n for balance in balances:\n balance.save()\n if order.status == OrderStatus.COMPLETED:\n return order\n return order", "def buy(self, price, active, option, direction, expiry=60):\n name = 'buyV2'\n server_timestamp = self._timesync.server_timestamp\n expiration_timestamp = self._timesync.expiration_timestamp + expiry\n data = {'price': price,\n 'act': active,\n 'type': option,\n 'direction': direction,\n 'time': server_timestamp,\n 'exp': expiration_timestamp}\n self._send_websocket_request(name, data)" ]
[ "0.71927655", "0.7107266", "0.7054423", "0.6819442", "0.6688734", "0.66799307", "0.66651535", "0.6650478", "0.6600853", "0.6514789", "0.65064514", "0.64885443", "0.6481737", "0.6471577", "0.6377945", "0.6365583", "0.6359703", "0.63556045", "0.6327038", "0.6295299", "0.6267949", "0.62668985", "0.62557817", "0.617991", "0.61209", "0.60748565", "0.6056555", "0.60436136", "0.60064006", "0.5975261" ]
0.76989853
0
Cancel the order through ccxt library for a certain exchange
def cancel_order(self, exchange, order_id): return self.ccxt.cancel_order(exchange, order_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_cancel(order):\r\n self.gox.cancel(order.oid)", "def cancelOrder(self, order_number):\n pass", "def _order_cancel(self, bo):\n log.info(\"bo_blotter: order_cancel bracket order bo#%s\" % bo.ticket) \n cancelled = bo.cancel()\n return(cancelled)", "def cancel_order(self, walletId, orderId):\n return", "def market_cancel(self, orderid):\n return self.delete(f'orders/{orderid}', auth=True)", "def cancel(self, uuid):\n return self.__call__('market', 'tradecancel',\n {'orderId': uuid})", "def cancel_order(self):\n self.withdraw_from_delivery()\n if self.amount_paid:\n self.refund_payment()\n return 'refund_payment' if self.amount_paid else 'order_canceled'", "def cancel(self, order_id):\n url = 'https://coincheck.com/api/exchange/orders/' + order_id\n headers = make_header(url, access_key=self.access_key, secret_key=self.secret_key)\n r = requests.delete(url, headers=headers, timeout=self.timeout)\n return json.loads(r.text)", "async def handle_cancel_order_response(self, response: RequesterResponse\n ) -> HitbtcOrderModel:", "def cancel(self, uuid):\n\n result = self.api_query('CancelOrder', {'order_id': uuid})\n return result", "def cancel(self, currency_pair, order_number):\n return self.api_query('cancelOrder', {\"currencyPair\": currency_pair, \"orderNumber\": order_number})", "def cancel_pending_orders(self):\n raise NotImplementedError(\"Broker must implement \\\n `cancel_pending_orders()`\")", "def cancel_order(self, order):\r\n method = self.private_endpoints['cancel_order']['method']\r\n url = self.base_url + self.private_endpoints['cancel_order']['url'].format(orderId=order)\r\n req = requests.request(method, url, headers=self.get_auth_headers(nonce=True))\r\n res = req.json()\r\n\r\n if res['success'] == True:\r\n return True\r\n else:\r\n return res", "def onCancelOrder(self, item):\n self.frame.mode.cancelMarketOrder(self.lstOrders.getMultiSelectedItems(), self.mySystemDict['id'])", "def cancelMarketOrder(self, orderID):\n try:\n myOrder = self.marketOrders[orderID]\n result = self.refundMarketOrder(myOrder)\n if result == 1:\n del self.marketOrders[orderID]\n return result\n except:\n return 'galaxy->cancelMarketOrder error'", "def cancel_order(self, custom_id=None, **params):\n self.conn.send('cancelOrder', custom_id=custom_id, **params)", "def cancel(self, orderid, account=None):\n if not account:\n if \"default_account\" in config:\n account = config[\"default_account\"]\n if not account:\n raise ValueError(\"You need to provide an account\")\n\n op = transactions.Limit_order_cancel(**{\n \"owner\": account,\n \"orderid\": orderid,\n })\n return self.dpay.finalizeOp(op, account, \"active\")", "async def cancel_order():\n symbol = App.config[\"symbol\"]\n\n # Get currently active order and id (if any)\n order = App.order\n order_id = order.get(\"orderId\", 0) if order else 0\n if order_id == 0:\n # TODO: Maybe retrieve all existing (sell, limit) orders\n return None\n\n # -----\n try:\n log.info(f\"Cancelling order id {order_id}\")\n new_order = App.client.cancel_order(symbol=symbol, orderId=order_id)\n except Exception as e:\n log.error(f\"Binance exception in 'cancel_order' {e}\")\n return None\n\n # TODO: There is small probability that the order will be filled just before we want to kill it\n # We need to somehow catch and process this case\n # If we get an error (say, order does not exist and cannot be killed), then after error returned, we could do trade state reset\n\n # Impose and overwrite the new order information\n if new_order:\n order.update(new_order)\n else:\n return None\n\n # Now order[\"status\"] contains the latest status of the order\n return order[\"status\"]", "def _execute_cancel(self) -> None:\n # validation\n if self.position.is_open:\n raise Exception('cannot cancel orders when position is still open. there must be a bug somewhere.')\n\n logger.info('cancel all remaining orders to prepare for a fresh start...')\n\n self.broker.cancel_all_orders()\n\n self._reset()\n\n self._broadcast('route-canceled')\n\n self.on_cancel()\n\n if not jh.is_unit_testing() and not jh.is_live():\n store.orders.storage[f'{self.exchange}-{self.symbol}'].clear()", "def cancel(self, order_id):\n Library.functions.cancel(self._book, order_id)", "def cancel_order(order_id: str,\n exchange: str = CRYPTO_EXCHANGE,\n api_key: str = CRYPTO_API_KEY,\n api_secret: str = CRYPTO_API_SECRET,\n exchange_password: Any = CRYPTO_API_PASSWORD,\n exchange_uid: Any = CRYPTO_API_UID,\n test_mode: bool = False) -> Any:\n try:\n if test_mode == True:\n url = CRYPTO_URL_TEST\n else:\n url = CRYPTO_URL_LIVE\n payload = {'order_id': order_id}\n response = requests.post('{}/cancel_order/{}'.format(url, exchange),\n headers=crypto_get_headers(\n api_key, api_secret, exchange_password,\n exchange_uid),\n json=payload)\n if response:\n return response.json()\n if response.status_code == 400:\n logger.error('Oops! An error Occurred ⚠️')\n raise BadRequest(response.text)\n if response.status_code == 401:\n logger.error('Oops! An error Occurred ⚠️')\n raise InvalidCredentials(response.text)\n except Exception as exception:\n logger.error('Oops! An error Occurred ⚠️')\n raise exception", "def _collab_cancel_swap(self, sender, amount=0):\n\n some_swap_id = 42\n self.result = self.collab.cancel_swap(some_swap_id).interpret(\n storage=self.collab_storage, sender=sender, amount=amount)\n\n assert len(self.result.operations) == 1\n assert self.result.operations[0]['parameters']['entrypoint'] == 'cancel_swap'\n assert self.result.operations[0]['parameters']['value'] == {'int': '42'}\n self.assertEqual(\n self.result.operations[0]['destination'],\n self.collab_storage['marketplaceAddress']\n )", "def onCancelOrder(self, item):\n self.frame.mode.cancelIndustryOrder(self.lstOrders.getMultiSelectedItems(), self.mySystemDict['id'])", "def _do_cancel(self):\r\n\r\n def do_cancel(order):\r\n \"\"\"cancel a single order\"\"\"\r\n self.gox.cancel(order.oid)\r\n\r\n if not len(self.items):\r\n return\r\n if not len(self.selected):\r\n order = self.items[self.item_sel]\r\n do_cancel(order)\r\n else:\r\n for order in self.selected:\r\n do_cancel(order)", "def cancel_order(self, order_param):\n order_id = order_param.id \\\n if isinstance(order_param, Order) else order_param\n\n try:\n self.ask_request()\n response = self._request('order/cancel', {'order_id': order_id})\n status = response.json()\n except Exception as e:\n raise ExchangeRequestError(error=e)\n\n if 'message' in status:\n raise OrderCancelError(\n order_id=order_id,\n exchange=self.name,\n error=status['message']\n )", "def order_cancel(self, ticket):\n bo = self.bo_by_ticket(ticket)\n if bo:\n cancelled = self._order_cancel(bo)\n return(cancelled)\n else:\n return", "def cancel_order(self, order_id: str):\n return self._call_txtrader_api('cancel_order', {'id': order_id})", "async def handle_cancel_orders_response(self, response: RequesterResponse\n ) -> HitbtcOrders:", "def cancel(self, id):\n self.__init_client()\n order = self.get_open_order(id)\n\n if order is None:\n return False\n\n try:\n retry(lambda: self.client.futures_cancel_order(symbol=self.pair, origClientOrderId=order['clientOrderId']))\n except HTTPNotFound:\n return False\n logger.info(f\"Cancel Order : (clientOrderId, type, side, quantity, price, stop) = \"\n f\"({order['clientOrderId']}, {order['type']}, {order['side']}, {order['origQty']}, \"\n f\"{order['price']}, {order['stopPrice']})\")\n return True", "def cancel(self, order_id):\n del self.__pending_orders[order_id]" ]
[ "0.7706166", "0.744995", "0.7211225", "0.70317745", "0.6936221", "0.6923826", "0.6887567", "0.68759936", "0.6835702", "0.6832168", "0.68217963", "0.6820352", "0.68094903", "0.67458665", "0.6741805", "0.67316127", "0.6710419", "0.67094696", "0.66826385", "0.6623428", "0.65751", "0.6564676", "0.6546462", "0.6480709", "0.64662534", "0.6447407", "0.64217085", "0.6414089", "0.6404881", "0.6357382" ]
0.75702757
1
Get the the data for a certain exchange for a given pair for the last 700 hours
def get_history_data(self, exchange, pair, timedelta): return self.ccxt.get_history_data(exchange, pair, timedelta)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data(pair, other):\n days_ago = 7\n endtime = int(time())\n starttime = endtime - 60 * 60 * 24 * days_ago\n\n geckourl = '%s/markets?vs_currency=%s&ids=%s' % (API, pair[\"currency\"],\n pair[\"coin\"])\n liveprice = requests.get(geckourl).json()[0]\n pricenow = float(liveprice['current_price'])\n alltimehigh = float(liveprice['ath'])\n other['volume'] = float(liveprice['total_volume'])\n\n url_hist = '%s/%s/market_chart/range?vs_currency=%s&from=%s&to=%s' % (\n API, pair[\"coin\"], pair[\"currency\"], str(starttime), str(endtime))\n\n try:\n timeseriesarray = requests.get(url_hist).json()['prices']\n except JSONDecodeError as err:\n print(f'Caught JSONDecodeError: {repr(err)}')\n return None\n timeseriesstack = []\n length = len(timeseriesarray)\n i = 0\n while i < length:\n timeseriesstack.append(float(timeseriesarray[i][1]))\n i += 1\n\n timeseriesstack.append(pricenow)\n if pricenow > alltimehigh:\n other['ATH'] = True\n else:\n other['ATH'] = False\n\n other[\"image\"] = pair[\"image\"]\n other[\"coin\"] = pair[\"coin\"]\n\n return timeseriesstack", "def fetch(pair, time_period=None, interval=None):\n\n if time_period is None:\n url = f'http://platotradeinfo.silencatech.com/main/dashboard/ajaxgetetradedata'\n response = requests.get(url, params={'pair': pair})\n return response.json()['result']\n elif time_period is not None and interval is not None:\n url = 'http://platotradeinfo.silencatech.com/main/dashboard/ajaxgetetradedataforperiod'\n response = requests.get(url, params={'pair': pair,\n 'from': time_period['from'],\n 'to': time_period['to'],\n 'period': interval})\n return response.json()['data']", "def get_price(horizon_host, pair):\n print \"fetching latest price for:\" + pair[\"name\"]\n params = make_trade_params(pair)\n res = requests.get(horizon_host + \"/trades\", params).json()\n try:\n trade_record = res[\"_embedded\"][\"records\"][0]\n except IndexError:\n return DatedPrice(date=datetime.utcfromtimestamp(0), price=0)\n price = float(trade_record[\"price\"][\"n\"]) / float(trade_record[\"price\"][\"d\"])\n timestamp = parser.parse(trade_record[\"ledger_close_time\"])\n return DatedPrice(date=timestamp, price=price)", "def get_recent_trades(self, pair):\r\n method = self.public_endpoints['recent_trades']['method']\r\n url = self.base_url + self.public_endpoints['recent_trades']['url'].format(pairId=pair)\r\n req = requests.request(method, url)\r\n res = req.json()\r\n\r\n if res['success'] == True:\r\n return res[\"result\"]\r\n else:\r\n return res", "def download_data(pair):\n\n url = \"https://api.ofx.com/PublicSite.ApiService/SpotRateHistory/10year/{}/{}?DecimalPlaces=6&ReportingInterval=daily&format=json\"\n\n with requests.get(url.format(pair[0], pair[1])) as response:\n\n data_list = [[\"datetime\", \"rate\", \"inverse\"]]\n\n for item in response.json()[\"HistoricalPoints\"]:\n\n iso_date = datetime.fromtimestamp(\n int(item[\"PointInTime\"]) / 1000)\n\n data_list.append(\n [iso_date, item[\"InterbankRate\"], item[\"InverseInterbankRate\"]])\n\n with open(\"{}{}.csv\".format(pair[0], pair[1]), \"w\", encoding=\"utf-8\", newline=\"\") as temp_file:\n csv.writer(temp_file).writerows(data_list)", "def get_rolling_price(self, pair='XBTZAR'):\n\n data = {'pair': pair}\n query_string = build_query_string(data)\n\n while True:\n r = requests.get(build_api_call(self.base_url, None, 'ticker', query_string))\n if r.status_code == 200:\n print(r.json())", "def get_ticker(self, pair):\r\n method = self.public_endpoints['ticker']['method']\r\n url = self.base_url + self.public_endpoints['ticker']['url'].format(pairId=pair)\r\n req = requests.request(method, url)\r\n res = req.json()\r\n\r\n if res['success'] == True:\r\n return res[\"result\"]\r\n else:\r\n return res", "def get_trade_history(self, pair, limit=20):\r\n method = self.private_endpoints['trade_history']['method']\r\n url = self.base_url + self.private_endpoints['trade_history']['url'].format(pair=pair, limit=str(limit))\r\n req = requests.request(method, url, headers=self.get_auth_headers())\r\n res = req.json()\r\n\r\n if res['success'] == True:\r\n return res[\"result\"]\r\n else:\r\n return res", "def get_price(self, pair='XBTZAR'):\n data = {'pair': pair}\n query_string = build_query_string(data)\n\n r = requests.get(build_api_call(self.base_url, None, 'ticker', query_string))\n if r.status_code == 200:\n return r.json()", "def get_trades(self, pair='XBTZAR'):\n data = {'pair': pair}\n query_string = build_query_string(data)\n\n r = requests.get(build_api_call(self.base_url, None, 'trades', query_string))\n if r.status_code == 200:\n return r.json()", "def get_price_data(ticker, days_befoure):\r\n #config_file=raw_input('config file: ')\r\n config_file=\"d:/tmp/moex.json\" \r\n try:\r\n with open(config_file) as config_file: \r\n conn_data = json.load(config_file)\r\n except:\r\n print \"Error: Unable to read config file. \"\r\n sys.exit(1)\r\n\r\n username = conn_data['username']\r\n password = conn_data['password']\r\n my_config = Config(user=username, password=password, proxy_url='')\r\n\r\n my_auth = MicexAuth(my_config)\r\n date = datetime.datetime.now() - datetime.timedelta(days_befoure)\r\n \r\n #ticker = 'SBER' # for tesing...\r\n \r\n if my_auth.is_real_time():\r\n iss = MicexISSClient(my_config, my_auth, MyDataHandler, MyData)\r\n iss.get_history_securities('stock',\r\n 'shares',\r\n 'tqbr',\r\n ticker, \r\n date.strftime(\"%Y-%m-%d\")\r\n #here to be start end dates\r\n )\r\n #print iss.handler.data.history\r\n return iss.handler.data.as_dataframe()", "def returnChartData(self,\n currency_pair,\n start=datetime.now() - timedelta(days=1),\n end=datetime.now(),\n period=300):\n pass", "def get_data_from_exchange(self, now):\n currency_options = dict(\n currency_pair='USD',\n bid={12.00 : {'guy_1' : 100.00}},\n ask={14.00 : {'guy_2' : 200.00}},\n time=datetime.datetime.now()\n )\n currency_pair_state = CurrencyPairState(**currency_options)\n return [currency_pair_state]", "def get_prices(horizon_host, pairs):\n dated_prices = [(pair[\"name\"], get_price(horizon_host, pair)) for pair in pairs]\n price_dict = {}\n for dated_price in dated_prices:\n if dated_price[0] not in price_dict:\n price_dict[dated_price[0]] = dated_price[1]\n else:\n price_dict[dated_price[0]] = latest_date_price(price_dict[dated_price[0]], dated_price[1])\n return price_dict", "def returnTradeHistory(self,\n currency_pair=\"all\",\n start=datetime.now() - timedelta(days=1),\n end=datetime.now()):\n pass", "def returnTradeHistory(self,\n currency_pair=\"all\",\n start=datetime.now() - timedelta(days=1),\n end=datetime.now()):\n pass", "def fetch_data(t0, t1, stock_symbol):\n \n # Alpha vantage timeseries object to fetch data (value every 5 min) in pandas-format\n ts = TimeSeries(API_KEY, output_format=\"pandas\")\n data, _ = ts.get_intraday(symbol=stock_symbol, interval=\"5min\", outputsize=\"full\")\n\n # Cut current time window data\n current_data = data[str(t0):str(t1)]\n\n return current_data", "def get_btcoin_day_data(self):\n cursor = self.__connect().cursor()\n limit = (str(int(time.time() - 24*60*60)),)\n hashdata = []\n rewarddata = []\n summ = 0\n for row in cursor.execute('SELECT * from btcoin where key > ? ORDER BY key ASC', limit):\n date = int(row[0])\n hashrate = str(row[1])\n hashrate = self.convert_hashrate_to_float(hashrate)\n summ = summ + hashrate\n reward = float(row[2])\n hashdata.append([date, hashrate])\n rewarddata.append([date, reward])\n cursor.close()\n self.__disconnect()\n if len(hashdata) != 0:\n hashaverage = summ / len(hashdata)\n return (hashaverage, hashdata, rewarddata)\n else:\n return (-1, hashdata, rewarddata)", "def return_trade_history(self, currency_pair):\n return self.api_query('returnTradeHistory', {\"currencyPair\": currency_pair})", "async def fetch_hourly_data(self, day=None):\n self._logger.info(\"Fetching hourly data for %s\", day)\n await self._client.select_customer(self.account_id, self.customer_id)\n await self._client.select_customer(self.account_id, self.customer_id)\n\n if day is None:\n # Get yesterday\n yesterday = datetime.now() - timedelta(days=1)\n day_str = yesterday.strftime(\"%Y-%m-%d\")\n elif hasattr(day, \"strftime\"):\n day_str = day.strftime(\"%Y-%m-%d\")\n else:\n try:\n datetime.strptime(day, \"%Y-%m-%d\")\n except ValueError:\n print(\"Start date bad format. It must match %Y-%m-%d\")\n return\n day_str = day\n\n params = {\"dateDebut\": day_str, \"dateFin\": day_str}\n res = await self._client.http_request(HOURLY_DATA_URL_2, \"get\",\n params=params, )\n # We can not use res.json() because the response header are not application/json\n json_res = json.loads(await res.text())\n\n if len(json_res.get('results')) == 0:\n self._hourly_data[day_str] = {\n 'day_mean_temp': None,\n 'day_min_temp': None,\n 'day_max_temp': None,\n 'hours': {},\n }\n tmp_hour_dict = dict((h, {'average_temperature':None}) for h in range(24))\n else:\n self._hourly_data[day_str] = {\n 'day_mean_temp': json_res['results'][0]['tempMoyJour'],\n 'day_min_temp': json_res['results'][0]['tempMinJour'],\n 'day_max_temp': json_res['results'][0]['tempMaxJour'],\n 'hours': {},\n }\n tmp_hour_dict = dict((h, {}) for h in range(24))\n for hour, temp in enumerate(json_res['results'][0]['listeTemperaturesHeure']):\n tmp_hour_dict[hour]['average_temperature'] = temp\n\n raw_hourly_weather_data = []\n if len(json_res.get('results')) == 0:\n # Missing Temperature data from Hydro-Quebec (but don't crash the app for that)\n raw_hourly_weather_data = [None]*24\n else:\n raw_hourly_weather_data = json_res['results'][0]['listeTemperaturesHeure']\n\n params = {\"date\": day_str}\n res = await self._client.http_request(HOURLY_DATA_URL_1, \"get\", params=params)\n # We can not use res.json() because the response header are not application/json\n json_res = json.loads(await res.text())\n for hour, data in enumerate(json_res['results']['listeDonneesConsoEnergieHoraire']):\n tmp_hour_dict[hour]['lower_price_consumption'] = data['consoReg']\n tmp_hour_dict[hour]['higher_price_consumption'] = data['consoHaut']\n tmp_hour_dict[hour]['total_consumption'] = data['consoTotal']\n self._hourly_data[day_str]['hours'] = tmp_hour_dict.copy()\n\n #Also copy the raw hourly data from hydroquebec (This can be used later for commercial accounts, mostly 15 minutes power data)\n self._hourly_data_raw[day_str] = {\n 'Energy': json_res['results']['listeDonneesConsoEnergieHoraire'],\n 'Power': json_res['results']['listeDonneesConsoPuissanceHoraire'],\n 'Weather': raw_hourly_weather_data\n }", "def get_data(end_date, n, local, foreign):\n URL = \"https://api.exchangeratesapi.io/history\"\n PARAMS = {'start_at': str(get_weekday_n_days_ago(end_date, n)),\n 'end_at': str(end_date),\n 'symbols': foreign,\n 'base': local}\n r = requests.get(url=URL, params=PARAMS)\n data = r.json()\n input_data = []\n for day in data['rates']:\n input_data.append([datetime.strptime(day, '%Y-%m-%d').date(),\n float(\"{:.8f}\".format(data['rates'][day][foreign]))])\n input_data.sort(key=lambda x: x[0])\n return input_data[-n:]", "def get_price_history_lookback(access_token,ticker,periodType,period,frequencyType,frequency):\r\n \r\n price_url = 'https://api.tdameritrade.com/v1/marketdata/{}/pricehistory'.format(ticker)\r\n\r\n #The header for getting a quote needs to define the input type (json)\r\n headers = {'Authorization':'Bearer {}'.format(access_token),\r\n 'Content-Type':'application/json'}\r\n\r\n #Parameters for period of time and frequency of data to get\r\n params = {'periodType':periodType,\r\n 'period': period,\r\n 'frequencyType': frequencyType,\r\n 'frequency': frequency}\r\n \r\n #Make the get request to TD Ameritrade\r\n price_history_json = requests.get(url=price_url,headers=headers,params=params)\r\n return price_history_json.json()", "async def refresh_tick_data(self, pair: str) -> str:\n\n self.base_24hr_volumes[pair] = [array('d'), array('d')]\n\n has_backup = (pair in self.close_times_backup and\n pair in self.close_values_backup and\n pair in self.base_24hr_volumes_backup and\n self.close_times_backup[pair] and\n self.close_values_backup[pair] and\n self.base_24hr_volumes_backup[pair])\n\n if has_backup and self.close_times_backup[pair][-1] >= time.time() - config['tick_interval_secs'] * 2:\n self.close_times[pair] = self.close_times_backup[pair]\n self.close_values[pair] = self.close_values_backup[pair]\n self.base_24hr_volumes[pair][0] = self.base_24hr_volumes_backup[pair]\n\n self.log.info(\"{} Using {} ticks from backup.\", pair, len(self.close_times_backup[pair]))\n return pair\n\n rate_limit = len(self.data_refreshing) * config['api_initial_rate_limit_secs']\n self.data_refreshing.add(pair)\n await asyncio.sleep(rate_limit)\n ticks = await self.api.get_ticks(pair)\n self.data_refreshing.remove(pair)\n\n if not ticks:\n self.log.error(\"{} API returned no tick data.\", pair)\n return None\n\n self.log.debug(\"{} API ticks size {}, start {}, end {}.\", pair, len(ticks), ticks[0]['T'], ticks[-1]['T'])\n\n try:\n _, volume = await self.api.get_last_values(pair)\n self.close_times[pair], self.close_values[pair] = await self._expand_ticks(ticks)\n self.base_24hr_volumes[pair][0] = array('d', (volume for _ in range(len(self.close_times[pair]))))\n\n in_backup = (pair in self.close_times_backup and\n pair in self.close_values_backup and\n pair in self.base_24hr_volumes_backup)\n\n if not in_backup:\n self.close_times_backup[pair] = array('d')\n self.close_values_backup[pair] = array('d')\n self.base_24hr_volumes_backup[pair] = array('d')\n\n await self._truncate_tick_data(pair)\n await self._splice_backup_tick_data(pair)\n self.log.info('{} refreshed tick data.', pair)\n return pair\n\n except (KeyError, IndexError, TypeError) as e:\n self.log.error('Got {} for {}: {}', type(e).__name__, pair, e)\n\n return None", "def half_hour_ticker(*args):\n markets = fetch_markets()\n map(populate_half_hour_data, markets)\n return", "async def refresh_adjusted_tick_data(self, pair: str):\n\n self.base_24hr_volumes[pair][1] = array('d')\n self.last_adjusted_close_times[pair] = self.close_times[pair][-1]\n\n trade_base = config['trade_base']\n pair_base = pair.split('-')[0]\n\n if trade_base == pair_base:\n self.adjusted_close_values[pair] = self.close_values[pair]\n await self._refresh_volume_derivatives(pair)\n return\n\n else:\n self.adjusted_close_values[pair] = array('d')\n\n convert_pair = '{}-{}'.format(trade_base, pair_base)\n\n try:\n source_index = len(self.close_times[pair]) - 1\n convert_index = self.close_times[convert_pair].index(self.close_times[pair][-1])\n\n except ValueError:\n try:\n convert_index = len(self.close_times[convert_pair]) - 1\n source_index = self.close_times[pair].index(self.close_times[convert_pair][-1])\n convert_value = self.close_values[convert_pair][-1]\n\n for index in range(len(self.close_times[pair]) - 1, source_index, -1):\n adjusted_value = self.close_values[pair][index] * convert_value\n self.adjusted_close_values[pair].insert(0, adjusted_value)\n\n self.log.debug(\"{} last {} adjusted values are approximate.\", pair,\n len(self.close_times[pair]) - source_index)\n\n except ValueError:\n self.adjusted_close_values[pair] = array('d')\n self.log.error(\"{} ends at {} before start of convert pair {} data at {}.\",\n pair, self.close_times[pair][-1], convert_pair, self.close_times[convert_pair][0])\n return\n\n for index in range(source_index, -1, -1):\n if convert_index > -1:\n convert_value = self.close_values[convert_pair][convert_index]\n else:\n convert_value = self.close_values[convert_pair][0]\n\n adjusted_value = self.close_values[pair][index] * convert_value\n self.adjusted_close_values[pair].insert(0, adjusted_value)\n convert_index -= 1\n\n if convert_index < 0:\n self.log.debug(\"{} first {} adjusted values are approximate.\", pair, convert_index * -1)\n\n await self._refresh_volume_derivatives(pair)", "def get_data_logic():\r\n global input_exchange\r\n global input_symbols\r\n global all_symbols\r\n global input_timeframe\r\n\r\n # create exchange connection\r\n exchange = Exchange(input_exchange)\r\n\r\n # perform check that exchange can grab price data\r\n if exchange.connection.has['fetchOHLCV']:\r\n\r\n # user ticked 'All Symbols?', so includes all symbols in\r\n # exchange_tickers.py for the particular exchange\r\n if all_symbols:\r\n symbol_list = SymbolList(symbols='auto', exchange=exchange)\r\n # user didn't tick 'All Symbols?', so create unpopulated symbol list\r\n else:\r\n symbol_list = SymbolList(exchange=exchange)\r\n # add all symbols user inputted\r\n for s in input_symbols:\r\n symbol_list.input_symbol(s)\r\n\r\n # get auto timeframe and check it is valid\r\n timeframe = Timeframe(timeframe=input_timeframe, exchange=exchange)\r\n while not timeframe.check_timeframe():\r\n timeframe.input_timeframe() # default to asking for input\r\n\r\n print(f\"Pulling data on the {timeframe.tf} timeframe for...\")\r\n print(symbol_list.symbols)\r\n\r\n # get current time in UTC in milliseconds\r\n now = datetime.now().astimezone(pytz.timezone('UTC'))\r\n now = int(now.timestamp()*1000)\r\n\r\n # loop over each symbol and pull new data\r\n for sym in symbol_list.symbols:\r\n # create csv filename and path\r\n file_sym = sym.replace('/', '')\r\n file_sym = file_sym.replace('-', '')\r\n filename = f\"{exchange.name}_{file_sym}_{timeframe.tf}.csv\" # generate filename from given information\r\n csv_path = f\"{exchange.name}/{timeframe.tf}/{filename}\"\r\n\r\n # get most recent price data and append it to existing data\r\n # (if it exists)\r\n price_data = PriceData(exchange=exchange, tf=timeframe.tf,\r\n sym=sym, now=now, path=csv_path)\r\n\r\n # check if price data csv already exists\r\n if price_data.exists():\r\n price_data.get_current()\r\n # get new data as far back as possible if csv does not exist\r\n else:\r\n price_data.get_new()\r\n\r\n # keep updating price_data until current time\r\n price_data.update()\r\n\r\n # write to csv\r\n price_data.write()\r\n\r\n print(\"Finished writing files!\")", "def fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):\n url = 'http://mis.nyiso.com/public/csv/ExternalLimitsFlows/{}ExternalLimitsFlows.csv'\n\n sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))\n\n # In the source CSV, positive is flow into NY, negative is flow out of NY.\n # In Electricity Map, A->B means flow to B is positive.\n if sorted_zone_keys == 'US-NEISO->US-NY' or sorted_zone_keys == 'US-NE-ISNE->US-NY-NYIS':\n direction = 1\n relevant_exchanges = ['SCH - NE - NY', 'SCH - NPX_1385', 'SCH - NPX_CSC']\n elif sorted_zone_keys == 'US-NY->US-PJM':\n direction = -1\n relevant_exchanges = ['SCH - PJ - NY', 'SCH - PJM_HTP', 'SCH - PJM_NEPTUNE', 'SCH - PJM_VFT']\n elif sorted_zone_keys == 'US-MIDA-PJM->US-NY-NYIS':\n direction = 1\n relevant_exchanges = ['SCH - PJ - NY', 'SCH - PJM_HTP', 'SCH - PJM_NEPTUNE', 'SCH - PJM_VFT']\n elif sorted_zone_keys == 'CA-ON->US-NY' or sorted_zone_keys == 'CA-ON->US-NY-NYIS':\n direction = 1\n relevant_exchanges = ['SCH - OH - NY']\n elif sorted_zone_keys == 'CA-QC->US-NY' or sorted_zone_keys == 'CA-QC->US-NY-NYIS':\n direction = 1\n relevant_exchanges = ['SCH - HQ_CEDARS', 'SCH - HQ - NY']\n else:\n raise NotImplementedError('Exchange pair not supported: {}'.format(sorted_zone_keys))\n\n if target_datetime:\n # ensure we have an arrow object\n target_datetime = arrow.get(target_datetime)\n else:\n target_datetime = arrow.now('America/New_York')\n ny_date = target_datetime.format('YYYYMMDD')\n exchange_url = url.format(ny_date)\n\n try:\n exchange_data = read_csv_data(exchange_url)\n except HTTPError:\n # this can happen when target_datetime has no data available\n return None\n\n new_england_exs = exchange_data.loc[exchange_data['Interface Name'].isin(relevant_exchanges)]\n consolidated_flows = new_england_exs.reset_index().groupby(\"Timestamp\").sum()\n\n now = arrow.utcnow()\n\n exchange_5min = []\n for row in consolidated_flows.itertuples():\n flow = float(row[3]) * direction\n # Timestamp for exchange does not include seconds.\n dt = timestamp_converter(row[0] + ':00')\n\n if (dt > now) and ((dt - now) < timedelta(seconds=300)):\n # NYISO exchanges CSV (and only the exchanges CSV) includes data\n # up to 5 minutes in the future (but only 5 minutes in the future).\n # This also happens on their official website.\n # Electricity Map raises error with data in the future, so skip\n # that datapoint. If it's more than 5 minutes in the future,\n # it's weird/unexpected and thus worthy of failure and logging.\n continue\n\n exchange = {\n 'sortedZoneKeys': sorted_zone_keys,\n 'datetime': dt,\n 'netFlow': flow,\n 'source': 'nyiso.com'\n }\n\n exchange_5min.append(exchange)\n\n return exchange_5min", "def fetch_ticker(self, symbol: str, params={}):\n self.load_markets()\n market = self.market(symbol)\n request = {\n 'pair': market['id'],\n }\n response = self.publicGetExchangesPairTicker(self.extend(request, params))\n return self.parse_ticker(response, market)", "def fetch_balance(self, exchange, pair):\n return self.ccxt.fetch_balance(exchange, pair)", "def test_get_exchange_currency_pairs1(self):\n # currency_pairs = [{\"first\": \"BTC\", \"second\": \"LTC\"}]\n currency_pairs = \"BTC-LTC\"\n firsts = \"DIO\"\n seconds = None\n test_result = self.db_handler.get_exchanges_currency_pairs(\"TESTEXCHANGE\", currency_pairs, firsts, seconds)\n test_result = [(item.exchange_id,\n item.first_id,\n item.second_id) for item in test_result]\n result = self.session.query(ExchangeCurrencyPair).filter(ExchangeCurrencyPair.exchange_id.__eq__(1),\n ExchangeCurrencyPair.first_id.__eq__(1),\n ExchangeCurrencyPair.second_id.__eq__(3)).all()\n result.extend(self.session.query(ExchangeCurrencyPair).filter(ExchangeCurrencyPair.exchange_id.__eq__(1),\n ExchangeCurrencyPair.first_id.__eq__(5)).all())\n result = [(item.exchange_id,\n item.first_id,\n item.second_id) for item in result]\n assert result == test_result" ]
[ "0.6836399", "0.627741", "0.6234868", "0.62244236", "0.61926", "0.6124068", "0.605874", "0.60258263", "0.5984681", "0.5967804", "0.57844293", "0.5726083", "0.57112515", "0.5710812", "0.5707336", "0.5707336", "0.569271", "0.56634426", "0.5589972", "0.5550989", "0.55486476", "0.5539462", "0.5516538", "0.5503934", "0.5496274", "0.54463816", "0.5440279", "0.5434135", "0.54235977", "0.5422644" ]
0.68828434
0
Used for loading in model and word2vec files from disk. Returns their objects.
def LoadSavedModels(main_model_path="main_model.pkl", cler_model_path="cler_model.pkl", word2vec_path='GoogleNews-vectors-negative300.bin'): model_main = joblib.load(main_model_path) model_cler = joblib.load(cler_model_path) word2vec = gensim.models.KeyedVectors.load_word2vec_format(word2vec_path, binary=True) return model_main,model_cler,word2vec
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_vectors(path, to_train=False):\n model = Word2Vec.load(path)\n\n if to_train:\n return model\n\n # In case it doesn't need to be trained, delete train code to free up ram\n word_vectors = model.wv\n\n context_vectors = dict()\n if hasattr(model, \"syn1\"):\n # For hierarchical softmax\n context_vectors = model.syn1\n elif hasattr(model, \"syn1neg\"):\n # For negative sampling\n context_vectors = model.syn1neg\n\n del model # Save memory\n return VectorCollection(word_vectors, context_vectors)", "def load_word2vec(path):\n model = gensim.models.KeyedVectors.load_word2vec_format(path, binary=True) \n return model", "def read_word2vec_model():\n file_name = \"word2vec_model.txt\"\n # these are the pre-2018 lines to load a model:\n # from gensim.models.word2vec import Word2Vec\n # m = Word2Vec.load_word2vec_format(file_name, binary=False)\n \n # here are the post-2018 lines to load a model:\n from gensim.models import KeyedVectors\n print(\"Starting to load the model in \", file_name, \"...\")\n m = KeyedVectors.load_word2vec_format(file_name, binary=False)\n print(\"Model loaded.\\n\")\n\n print(\"The model built is\", m, \"\\n\")\n print(\"m.vocab has\", len(m.vocab), \"words\")\n ## The above line should print\n ## m.vocab has 43981 words\n\n print(\"Each word is a vector of size\", m.vector_size)\n ## which should tells us that each word is represented by a 300-dimensional vector\n\n print(\"\\nTry m.get_vector('hello') to see one...!\\n\")\n ## Once the model is built, it can't be changed without rebuilding it; we'll leave it. \n\n return m", "def load(self):\n self.word2vec, self.img2sentence, self.word_freq, self.num_words, self.word2idx, self.idx2word = pickle.load(open(self.save_file, 'rb'))", "def load_embeddings_models():\n\n\t# ---LOADING WORD2VEC MODEL---\n\tmodel_load_path = os.path.join(ROOT_PATH, 'models', 'word2vec', 'NILC', 'nilc_cbow_s300_300k.txt')\n\t# model_load_path = os.path.join('models', 'word2vec', 'NILC', 'nilc_skip_s300.txt')\n\tstart_time = time.time()\n\tprint(\"Started loading the word2vec model\")\n\tword2vec_model = KeyedVectors.load_word2vec_format(model_load_path)\n\t# word2vec_model = None\n\tprint(\"Model loaded\")\n\tprint(\"--- %s seconds ---\" %(time.time() - start_time))\n\tprint('\\a')\n\n\t# ---LOADING FASTTEXT MODEL---\n\tmodel_path = os.path.join(ROOT_PATH, 'models', 'fastText', 'cc.pt.300_300k.vec')\n\tstart_time = time.time()\n\tprint(\"Started loading the fasttext model\")\n\tfasttext_model = KeyedVectors.load_word2vec_format(model_path)\n\t# fasttext_model = None\n\tprint(\"Model loaded\")\n\tprint(\"--- %s seconds ---\" %(time.time() - start_time))\n\tprint('\\a')\t\n\n\t# ---LOADING PT-LKB MODEL---\n\tmodel_load_path = os.path.join(ROOT_PATH, 'models', 'ontoPT', 'PT-LKB_embeddings_64', 'ptlkb_64_30_200_p_str.emb')\n\t# model_load_path = os.path.join('models', 'ontoPT', 'PT-LKB_embeddings_128', 'ptlkb_128_80_10_p_str.emb')\n\tstart_time = time.time()\n\tprint(\"Started loading the PT-LKB-64 model\")\n\tptlkb64_model = KeyedVectors.load_word2vec_format(model_load_path)\n\t# ptlkb64_model = None\n\tprint(\"Model loaded\")\n\tprint(\"--- %s seconds ---\" %(time.time() - start_time))\n\tprint('\\a')\n\n\t# ---LOADING GLOVE-300 MODEL---\n\tmodel_load_path = os.path.join(ROOT_PATH, 'models', 'glove', 'glove_s300_300k.txt')\n\t# model_load_path = os.path.join('models', 'glove', 'glove_s100.txt')\n\tstart_time = time.time()\n\tprint(\"Started loading the GLOVE 300 dimensions model\")\n\tglove300_model = KeyedVectors.load_word2vec_format(model_load_path)\n\t# glove300_model = None\n\tprint(\"Model loaded\")\n\tprint(\"--- %s seconds ---\" %(time.time() - start_time))\n\tprint('\\a')\n\n\t# ---LOADING NUMBERBATCH MODEL---\n\tmodel_load_path = os.path.join(ROOT_PATH, 'models', 'numberbatch', 'numberbatch-17.02_pt_tratado.txt')\n\tstart_time = time.time()\n\tprint(\"Started loading the NUMBERBATCH dimensions model\")\n\tnumberbatch_model = KeyedVectors.load_word2vec_format(model_load_path)\n\t# numberbatch_model = None\n\tprint(\"Model loaded\")\n\tprint(\"--- %s seconds ---\" %(time.time() - start_time))\n\tprint('\\a')\n\n\treturn word2vec_model, fasttext_model, ptlkb64_model, glove300_model, numberbatch_model", "def load_models():\n vectorizer = ModelStorage.objects.all().values_list(\"vectorizer\", flat = True)[0]\n classifier = ModelStorage.objects.all().values_list(\"classifier\", flat = True)[0]\n\n return vectorizer, classifier", "def load_word2vec_model():\n logging.basicConfig(\n format='%(asctime)s : %(levelname)s : %(message)s', \n level=logging.INFO)\n model_path = '/playpen/home/tongn/GoogleNews-vectors-negative300.bin'\n model = KeyedVectors.load_word2vec_format(fname=model_path, binary=True)\n return model", "def read_model(self):\n filename=self.name + '_words'\n self.words=file_read(filename)\n\n filename2= self.name+'_word_lengths'\n self.word_lengths=file_read(filename2)\n\n filename3=self.name+'_stems'\n self.stems=file_read(filename3)\n\n filename4=self.sentence_lengths+'_sentence_lengths'\n self.setence_lengths=file_read(filename4)\n\n filename5= self.endings+'_endings'\n self.endings=file_read(filename5)", "def get_object_models(self):\n parser = WorldParser(self.world_fpath)\n return parser.models", "def load(self):\n\n x = [] # input documents (n_docs, max_seq_len)\n labels = [] # targets we are predicting for each input\n\n for file_path in glob.glob(self.train_dir + '*.txt'):\n tokens = read_tokens(file_path)\n unique = list(set(tokens))\n x_count = round(len(unique) * 0.85)\n\n for _ in range(self.samples_per_doc):\n random.shuffle(unique)\n x.append(' '.join(unique[:x_count]))\n labels.append(' '.join(unique[x_count:]))\n\n # make x and y\n pkl = open('Model/tokenizer.p', 'rb')\n self.tokenizer = pickle.load(pkl)\n x = self.tokenizer.texts_to_matrix(x, mode='binary')\n y = self.tokenizer.texts_to_matrix(labels, mode='binary')\n\n # column zero is empty\n return x, y[:,1:]", "def doc2vec_model(self):\n file_name = join(PARENT_BASE_DIR, '.files/model.model')\n if isfile(file_name):\n mdl = gensim_document_to_vector.load(file_name)\n return mdl, 'model exist.'\n else:\n return None, 'model not exist in \"base directory/.files/model.model\"'", "def loadOBJModel(file_name):\n file_text = open(file_name)\n text = file_text.readlines()\n vertex = []\n normals = []\n uv = []\n faces_vertex = []\n faces_normal = []\n faces_uv = []\n for line in text:\n info = line.split(\" \")\n if info[0] == \"v\":\n vertex.append(\n (float(info[1]), float(info[2]) - 0.1, float(info[3])))\n elif info[0] == \"vn\":\n normals.append((float(info[1]), float(info[2]), float(info[3])))\n elif info[0] == \"vt\":\n uv.append((float(info[1]), float(info[2])))\n elif info[0] == \"f\":\n p1 = info[1].split(\"/\")\n p2 = info[2].split(\"/\")\n p3 = info[3].split(\"/\")\n faces_vertex.append((int(p1[0]), int(p2[0]), int(p3[0])))\n faces_uv.append((int(p1[1]), int(p2[1]), int(p3[1])))\n faces_normal.append((int(p1[2]), int(p2[2]), int(p3[2])))\n return vertex, normals, uv, faces_vertex, faces_normal, faces_uv", "def _load_pretrained_tok2vec(nlp, loc):\n with loc.open(\"rb\") as file_:\n weights_data = file_.read()\n loaded = []\n for name, component in nlp.pipeline:\n if hasattr(component, \"model\") and component.model.has_ref(\"tok2vec\"):\n component.get_ref(\"tok2vec\").from_bytes(weights_data)\n loaded.append(name)\n return loaded", "def load_model(embed_dir):\n # need to have gensim model + syn0.npy + syn1neg.npy\n model = gensim.models.Word2Vec.load(embed_dir)\n return model", "def read_model(self):\n filename = self.name + '_words'\n f = open(filename, 'r') \n d_str = f.read() \n f.close()\n d = dict(eval(d_str))\n self.words = d\n \n filename2 = self.name + '_word_lengths'\n f = open(filename2, 'r') \n d2_str = f.read() \n f.close()\n d2 = dict(eval(d2_str))\n self.word_lengths = d2\n \n filename3 = self.name + '_stems'\n f = open(filename3, 'r') \n d3_str = f.read() \n f.close()\n d3 = dict(eval(d3_str))\n self.stems = d3\n \n filename4 = self.name + '_sentence_lengths'\n f = open(filename4, 'r') \n d4_str = f.read() \n f.close()\n d4 = dict(eval(d4_str))\n self.sentence_lengths = d4\n \n filename5 = self.name + '_punctuation'\n f = open(filename5, 'r') \n d5_str = f.read() \n f.close()\n d5 = dict(eval(d5_str))\n self.punctuation = d5", "def read_model(self):\n f = open(self.name + '_' + 'words', 'r')\n self.words = f.read()\n f.close()\n elf.words = dict(eval(self.words))\n \n f = open(self.name + '_' + 'word_lengths', 'r')\n self.word_lengths = f.read()\n f.close()\n self.word_lengths = dict(eval(self.word_lengths))\n\n f = open(self.name + '_' + 'sentence_lengths', 'r')\n self.sentence_lengths = f.read()\n f.close()\n self.sentence_lengths = dict(eval(self.sentence_lengths))\n\n f = open(self.name + '_' + 'stems', 'r')\n self.stems = f.read()\n f.close()\n self.stems = dict(eval(self.stems))\n\n f = open(self.name + '_' + 'commas_per_sentence', 'r')\n self.commas_per_sentence = f.read()\n f.close()\n self.commas_per_sentence = dict(eval(self.commas_per_sentence))", "def get_model_data_from_files(self, oc):\r\n # Load model related files\r\n model_path = self.config['DATA_PATH'] + self.config['CUSTOMER_NAME'] + '/models/'\r\n\r\n features_file = model_path + self.task + '_' + str(oc) + '_features.txt'\r\n dummies_file = model_path + self.task + '_' + str(oc) + '_dummies.txt'\r\n model_file = model_path + self.task + '_' + str(oc) + '.joblib'\r\n\r\n if os.path.isfile(features_file) and os.path.isfile(dummies_file) and os.path.isfile(model_file):\r\n model = joblib.load(model_file)\r\n features = open(features_file, 'r', encoding=self.config['DATA_ENCODING']).read().rstrip('\\n').split(self.config['DATA_SEPARATOR'])\r\n dummies = open(dummies_file, 'r', encoding=self.config['DATA_ENCODING']).read().rstrip('\\n').split(self.config['DATA_SEPARATOR'])\r\n return (model, features, dummies)\r\n return (None, None, None)", "def load_vecs():\n global VECTORIZER\n global CECTORIZER\n\n v_file = os.path.join(TMP_DIR, 'vectorizer.pickle')\n d_file = os.path.join(TMP_DIR, 'dectorizer.pickle')\n f_file = os.path.join(TMP_DIR, 'freq.pickle')\n\n if os.path.isfile(v_file) and os.path.isfile(d_file):\n with open(v_file, 'rb') as f:\n VECTORIZER = pickle.load(f)\n with open(d_file, 'rb') as f:\n CECTORIZER = pickle.load(f)\n return True\n\n return False", "def load_model(self):\n try:\n self.model = Word2Vec.load(self.config[\"model_path\"])\n self.model.init_sims(replace=True)\n except Exception as e:\n print(e)\n print(\"error in model loading!\")", "def read_model(self):\n \n # words dictionary\n f = open(self.name + \"_words\", 'r') \n d_str = f.read()\n f.close()\n \n d = dict(eval(d_str))\n self.words = d\n\n # word_lengths dictionary\n f = open(self.name + \"_word_lengths\", 'r') \n d_str = f.read()\n f.close()\n \n d = dict(eval(d_str))\n self.word_lengths = d\n\n # stems dictionary\n f = open(self.name + \"_stems\", 'r') \n d_str = f.read()\n f.close()\n \n d = dict(eval(d_str))\n self.stems = d\n\n # sentence_lengths dictionary\n f = open(self.name + \"_sentence_lengths\", 'r') \n d_str = f.read()\n f.close()\n \n d = dict(eval(d_str))\n self.sentence_lengths = d\n\n # ten most common words\n f = open(self.name + \"_common_word\", 'r') \n d_str = f.read()\n f.close()\n \n d = list(eval(d_str))\n self.common_word = d", "def load_model():\n prepro = Prepro(PATH_STOPSWORD, PATH_ACRONYM)\n vectorizer = joblib.load(PATH_TFIDF)\n label_encoder = joblib.load(PATH_ENCODER)\n model_svm = joblib.load(PATH_SVM)\n model_nb = joblib.load(PATH_NB)\n model_lr = joblib.load(PATH_LR)\n return prepro, vectorizer, label_encoder, model_svm, model_nb, model_lr", "def load_model(self, filename):\r\n pass", "def _load_processed_data(self):\n with open(os.path.join(self._data_root_path, self._processed_train_data_file_name),\n 'r') as f:\n train_examples = json.load(f)\n\n with open(os.path.join(self._data_root_path, self._processed_dev_data_file_name), 'r') as f:\n dev_examples = json.load(f)\n\n with open(os.path.join(self._data_root_path, self._word_vocab_file_name), 'r') as f:\n word_vocab = Vocab.from_json(json.load(f))\n\n with open(os.path.join(self._data_root_path, self._char_vocab_file_name), 'r') as f:\n char_vocab = Vocab.from_json(json.load(f))\n\n return train_examples, dev_examples, word_vocab, char_vocab", "def load_word2vec_model():\n model = Word2Vec.load_word2vec_format('GoogleNews-vectors-negative300.bin', binary=True, norm_only=True)\n return model", "def load_vectors(fname):\r\n # taken from: https://fasttext.cc/docs/en/english-vectors.html\r\n vectors_data = vocab.Vectors(name=fname)\r\n\r\n return vectors_data", "def get_vocab(self):\n if os.path.exists(self.vocab_file) & self.vocab_from_file:\n f = open(self.vocab_file, \"rb\")\n vocab = pickle.load(f)\n self.word2idx = vocab.word2idx\n self.idx2word = vocab.idx2word\n f.close()\n else:\n self.build_vocab()\n with open(self.vocab_file, 'wb') as f:\n pickle.dump(self, f)", "def load_word2vect(self, file_path):\n self.embeddings = []\n self.word_to_idx = {'<pad>' : 0}\n self.vocab = ['<pad>']\n\n model = w2v.load(file_path)\n self.embedding_size = model.vectors.shape[1]\n pad_embedding = np.zeros(self.embedding_size, \"float32\")\n self.embeddings.append(pad_embedding)\n\n train_words_set = set([word for text in self.train_data for word in\n text[1].split(\" \")])\n\n for w in model.vocab:\n if w in train_words_set:\n self.word_to_idx[w] = len(self.vocab)\n self.vocab.append(w)\n self.embeddings.append(model[w])\n\n del model", "def is_word2vec_model(path):\n if not os.path.isfile(path):\n return False\n parts = os.path.basename(path).split('.')\n if len(parts) not in (3, 4):\n return False\n elif len(parts) == 4 and parts[-1].lower() not in ('bz', 'bz2', 'gz'):\n return False\n return parts[0] == 'vectors' and parts[2] in ('bin', 'txt')", "def load_model(self, file=FILENAME, dim=DIMENSION, normalize=False):\n print(\"Loading pretrained Glove vectors from file {}\".format(FILENAME))\n self.dimension = dim\n self.normalize = normalize\n with open(file, \"r\", encoding=\"utf-8\") as textfile:\n self.num_tokens = count_lines(textfile)\n self.tokens_arr = [\"\" for i in range(self.num_tokens)]\n self.embeddings_mat = np.zeros((self.num_tokens, self.dimension))\n\n for idx, line in enumerate(textfile):\n line = line.split()\n token = ''.join(line[:-self.dimension])\n self.tokens_arr[idx] = token\n self.token_to_idx[token] = idx \n vec = list(map(float, line[-self.dimension:]))\n if self.normalize: \n # normalize the vectors as they are put into the matrix\n vec = vec / np.linalg.norm(vec)\n self.embeddings_mat[idx] = vec \n if (idx+1) % 200000 == 0:\n print(\" --{}% loaded.\".format(round(idx/self.num_tokens*100, 2)))\n print(\"Finished loading Glove model. {} vectors loaded\".format(self.num_tokens))", "def load(self, directory):\n\n self.ft = Word2Vec.load(os.path.join(directory, \"w2v.model\"))\n self.dictionary = Dictionary.load(os.path.join(directory, \"dict.model\"))\n self.matrix = SparseTermSimilarityMatrix.load(\n os.path.join(directory, \"stsm.model\")\n )" ]
[ "0.69597363", "0.6947155", "0.6811197", "0.6807579", "0.6805793", "0.6797359", "0.6768113", "0.6706945", "0.65690553", "0.6520081", "0.6514057", "0.6461425", "0.6448351", "0.64457756", "0.64116144", "0.64102066", "0.6408863", "0.63833255", "0.63704264", "0.6347624", "0.63475585", "0.6340077", "0.6282879", "0.6261471", "0.625945", "0.62443906", "0.62298965", "0.6173178", "0.61615926", "0.6158809" ]
0.7026279
0
Predict a label using the main model (clinical, clerical or other). Input is a sentence, along w/ model object and word2vec object. These objects can either be loaded from disk using the LoadSavedModels function, or, if you have just completed training, they can be passed in from the Train.Trainer class. May also adjust the probability threshold above which a clerical decision is made (good for reducing false positives). 0.98 makes for very few false positives!
def PredictLabel(sentence, model_main, word2vec, boundary=0.5): tokenized_sample = word_tokenize(re.sub("-"," ",sentence)) features = np.mean([word2vec.word_vec(w) for w in tokenized_sample if w in word2vec],axis=0) prediction = model_main.predict_proba(features.reshape(1,-1))[0] if model_main.classes_[prediction.argmax()]!="clerical": return model_main.classes_[prediction.argmax()] else: if np.max(prediction)>boundary: return "clerical" else: ranger = range(len(prediction)) del ranger[prediction.argmax()] return model_main.classes_[ranger][prediction[ranger].argmax()]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def PredictClerLabel(sentence, model_cler, word2vec):\n \n tokenized_sample = word_tokenize(re.sub(\"-\",\" \",sentence))\n features = np.mean([word2vec.word_vec(w) for w in tokenized_sample if w in word2vec],axis=0)\n prediction = model_cler.predict_proba(features.reshape(1,-1))[0]\n return model_cler.classes_[prediction.argmax()]", "def predict(self, text, threshold=.0):\n if not self.k_model or not self.w2v_model:\n raise RuntimeError(\"Model not in memory, please load it train new model\")\n start_at = time.time()\n x_test = keras.preprocessing.sequence.pad_sequences(\n self.tokenizer.texts_to_sequences(gensim.utils.simple_preprocess(text)),\n maxlen=self.k_max_sequence_len)\n # Predict\n confidences = self.k_model.predict(x_test)[0]\n # Get mex prediction\n idx = np.argmax(confidences)\n elapsed_time = time.time() - start_at\n if float(confidences[idx]) > threshold:\n return {\"label\": self.label_encoder.classes_[idx], \"confidence\": float(confidences[idx]),\n \"elapsed_time\": elapsed_time}\n return {\"label\": \"__OTHER__\", \"confidence\": float(confidences[idx]), \"elapsed_time\": elapsed_time}", "def predict(self, testData=[]):\n result = []\n for classValue in self._classAttrs:\n #print(f'Computing Label: {classValue}, {self._classLabelMap[classValue]}')\n result.append(self._computeCondProb(testData, classValue))\n return self._classLabelMap[result.index(max(result))]", "def predict(model, X):\n\tmodel.eval()\n\t# make the predictions\n\tscores = model.forward(X)\n\n\t# scores contains, for each example, two scores that can be interpreted as the\n\t# probability of each example belonging to each of the classes. To select the\n\t# final predicted label, we will select the class with higher probability.\n\tpredicted_labels = scores.argmax(dim=-1) # predicted_labels shape: (n_examples)\n\n\treturn predicted_labels", "def predict(self, X):\n words = X.split()\n chance = []\n for cur_label in self.model[\"labels\"]:\n probability = self.model[\"labels\"][cur_label][\"probability\"]\n total_grade = math.log(probability, math.e)\n for word in words:\n word_dict = self.model[\"words\"].get(word, None)\n if word_dict:\n total_grade += math.log(word_dict[cur_label], math.e)\n chance.append((total_grade, cur_label))\n _, prediction = max(chance)\n return prediction", "def predict(cls, input):\n clf = cls.get_model() \n\n input.to_csv(data_dir + 'vdok_predction_src_file.csv')\n\n q = qa_serializer_lang_selector(data_dir)\n q.serialize_record('vdok_predction_src_file.csv', task_name)\n q.select_lang([1], task_name).to_csv(data_dir + data_file, encoding= 'latin1')\n\n pipeline=['pos', 'lemma', 'synset', 'hype', 'hypo']\n\n bnlqd = fex_basic_nlp(data_file, data_dir)\n bnlqd.nlp_run(pipeline[0])\n bnlqd.nlp_run(pipeline[1])\n bnlqd.df_ac_lemma.to_csv(data_dir + 'Lemma-' + data_file, encoding= 'latin1')\n bnlqd.nlp_run(pipeline[2])\n bnlqd.df_ac_synset.to_csv(data_dir + 'Synset-' + data_file , encoding= 'latin1')\n bnlqd.nlp_run(pipeline[3])\n bnlqd.df_ac_hypernyms.to_csv(data_dir + 'Hypernyms-' + data_file, encoding= 'latin1')\n bnlqd.nlp_run(pipeline[4])\n bnlqd.df_ac_hyponyms.to_csv(data_dir + 'Hyponyms-' + data_file, encoding= 'latin1')\n\n bnlpd = fex_basic_nlp(def_file, data_dir, task_name)\n bnlpd.nlp_run(pipeline[0])\n bnlpd.nlp_run(pipeline[1])\n bnlpd.df_ac_lemma.to_csv(data_dir + 'Lemma-P-' + data_file, encoding= 'latin1')\n \n btgqd = bi_trigram(data_file, data_dir)\n btgqd.nlp_run(r'bigram')\n btgqd.nlp_run(r'trigram') \n\n stop_words_d = cls.remove_non_extracted_stop_word(bnlqd.df_ac_lemma, stop_words)\n\n oanc_shelve = oanc_resource + 'ANC-all-lemma-04262014.db'\n oalqd = odi_oanc_lemma_frequency(data_file, oanc_shelve, None, data_dir, stop_words_d) \n oalqd.oanc_lemma_frequency('Lemma-' + data_file, 'Student_Question_Index', 'Pre_Col_Name')\n \n stop_words_hy_d = cls.remove_non_extracted_stop_word(bnlqd.df_ac_lemma, stop_words_hy)\n\n ovlqd = odi_overlapping(data_file, def_file, data_dir, stop_words_d)\n ovlqd.count_overlapping('Lemma-' + data_file, 'Student_Question_Index',\n 'Pre_Col_Name', 'Question_ID', 'Question_ID_Sec',\n 'Lemma-P-' + data_file, 'Question_ID', 'Question_ID_Sec')\n ovlqd.count_overlapping_synset('Synset-' + data_file)\n ovlqd.count_overlapping_hypernyms('Hypernyms-' + data_file, stop_words_hy_d)\n ovlqd.count_overlapping_hyponyms('Hyponyms-' + data_file, stop_words_hy_d)\n\n df_ac_pmi_dist_bigram = cls.bi_trigram_pmi_distribution(pmi_bigram_file, data_dir, \n bnlqd.num_clm_in, btgqd.df_ac_bigram, 'bigram')\n df_ac_pmi_dist_trigram = cls.bi_trigram_pmi_distribution(pmi_trigram_file, data_dir, \n bnlqd.num_clm_in, btgqd.df_ac_trigram, 'Trigram')\n\n df_ac_aggregate = cls.aggregate_plim(bnlqd, oalqd, ovlqd, df_ac_pmi_dist_bigram, df_ac_pmi_dist_trigram,\n bnlpd, specific_count_lemmas, stop_words_pos, task_name)\n df_ac_aggregate.to_csv(data_dir + 'vdok_predction_Aggregate_plim.csv', encoding= 'latin1')\n df_ac_aggregate_item_level = cls.aggregate_item_level_plim(df_ac_aggregate, oalqd.stem_option_name_clm, \n task_name)\n df_ac_aggregate_item_level.to_csv(data_dir + 'vdok_predction_Key_Stem_Passage_Aggregate_plim.csv',\n encoding= 'latin1')\n\n rfrpod = tmv_RF_classify('Independent_Variable_w_Label-Def.csv', data_dir)\n rfrpod.load_data('vdok_predction_Key_Stem_Passage_Aggregate_plim.csv', True, drop_vars, dependent_var)\n clf.perform_prediction(rfrpod.df_ac_modeling_values)\n return clf.df_ac_classified", "def predict(self, example):\n label = \"\"\n pred = -99.0\n for w in self.weights:\n current = np.asarray(example.fvector)\n i = self.weights[w] @ current\n if i > pred:\n pred = i\n label = w\n return label", "def predict (self, model, context, data):\n # 1. Feature engineering\n features_df = FeatureEngineering().do_for_predict(data, joblib.load(model.files['description-vectorizer']), joblib.load(model.files['user-encoder']), context)\n\n # 2. Load model & other required files\n trained_model = joblib.load(model.files['model'])\n categories = pd.read_csv(model.files['categories'], index_col=0)\n\n # 3. Predict\n pred = trained_model.predict(features_df)\n\n # 4. Return the prediction\n predicted_category = pd.DataFrame(pred, columns=categories['category']).idxmax(axis=1)[0]\n\n logger.compute(context.correlation_id, '[ {ctx} ] - [ PREDICTION ] - Model {model}.v{version} - Predicted category for description [{desc}]: {c}'.format(ctx=context.process, model=model.info['name'], version=model.info['version'], desc=data['description'], c=predicted_category), 'info')\n \n return ModelPrediction(prediction={\"category\": predicted_category})", "def predict(self, X_test):\n\n # Predict Label 0\n i = 0\n X = X_test\n\n # Retrieve trained classifier for label 0\n clf = self.trained[i]\n\n # Make prediction\n y = clf.predict(X)\n result = np.zeros((X_test.shape[0], self.label_dim))\n\n result[:, i] = y\n\n # Concatenate result to X\n # X = sp.hstack([X, sp.csr_matrix(y)], format=\"csr\")\n\n # iterator = tqdm.trange(1, self.label_dim)\n iterator = range(1, self.label_dim)\n for i in iterator:\n # Predict Label i\n\n # Retrieve trained classifier for label i\n clf = self.trained[i]\n\n # Make prediction\n y = clf.predict(X)\n\n result[:, i] = y\n\n # Concatenate result to X\n # X = sp.hstack([X, sp.csr_matrix(y)], format=\"csr\")\n\n return result", "def predict(cls, input):\n clf = cls.get_model()\n print('input=')\n print(input)\n return clf.predict(input)", "def predict(self, sentence):\n data = pd.read_csv(StringIO(sentence), names=['review'])\n X = self.preprocess(data)\n Y = self.clf.predict_proba(X)\n \n return np.argmax(Y)", "def classify_text(classifier, sentence):\n\n sentence = Sentence(sentence)\n classifier.predict(sentence, multi_class_prob=True)\n return sentence.labels", "def PredictReviewScore(self, sentences, label=0):\n AdjR = 0.0\n # if text.startswith(\"For more photos and reviews do check out fourleggedfoodies\"):\n # x = 1\n adjAll = []\n for sentence in sentences:\n adjectives, dependencies = self.ExtractSentDetails(sentence)\n adjAll.extend(adjectives)\n allAdjectives = adjectives | Angel.GlobalAdjList\n AdjS = 0.0\n words = wordpunct_tokenize(sentence[\"Text\"])\n if len(words) <= 3:\n allAdjectives |= set([x.lower() for x in words])\n for i in range(len(words)):\n word = words[i].lower()\n if word in {\"but\", \"if\"}:\n AdjS = 0.0\n elif word in allAdjectives and word in self.lexicon:\n AdjS += float(self.lexicon[word]) * self.PredictMultiplier(word, dependencies[word], words, i)\n AdjR += AdjS\n AdjR *= self.PredictBase(adjAll)\n finalScore = AdjR\n if self.DumpRequested(finalScore, label):\n self.DumpDetails(sentences, label)\n return finalScore", "def predict_label(self, src): # real signature unknown; restored from __doc__\n pass", "def predict(self, datafile):\r\n \r\n # We load the test data and lower the text\r\n data_test = pd.read_csv(datafile, sep = \"\\t\", names = [\"polarity\", \"category\", \"word\", \"offsets\", \"sentence\"])\r\n data_test['sentence_l'] = data_test['sentence'].apply(str.lower)\r\n data_test['word'] = data_test['word'].apply(str.lower)\r\n \r\n # We try to keep all the no/nor/not words as this changes radically the sentiment analysis\r\n data_test['sentence_l'] = data_test[\"sentence_l\"].apply(lambda sentence: sentence.replace(\"can\\'t\", \"can not\"))\r\n data_test['sentence_l'] = data_test[\"sentence_l\"].apply(lambda sentence: sentence.replace(\"n\\'t\", \" not\"))\r\n \r\n # We clean the data and stem the words\r\n clean_sentences = []\r\n for row in data_test['sentence_l']:\r\n tokens = word_tokenize(row)\r\n tokens = [word for word in tokens if word.isalpha()]\r\n tokens = [w for w in tokens if not w in self.stopwords] \r\n tokens = [self.stemmer.stem(word) for word in tokens]\r\n clean_sentences.append(tokens)\r\n data_test['stems'] = clean_sentences\r\n \r\n # We also stem the target words to be coherent with the stemmed words in the sentences\r\n data_test['word'] = [self.stemmer.stem(word) for word in data_test['word']]\r\n\r\n # We recreate the sentences with the selected and cleaned words\r\n Classifier.create_sentence = staticmethod(Classifier.create_sentence)\r\n data_test.clean_sentence = Classifier.create_sentence(data_test.stems)\r\n \r\n # We create a BOW vector\r\n reviews_new_counts = self.restaurant_vect.transform(data_test.clean_sentence)\r\n \r\n # We transform the BOW vector with the tfidf scores\r\n reviews_new_tfidf = self.tfidf_transformer.transform(reviews_new_counts)\r\n \r\n # We make a prediction with the classifier\r\n self.pred = self.model.predict(reviews_new_tfidf)\r\n \r\n return self.pred", "def predict_label(self, x, weight=None, cutting=0.5, predict_label=None):\n if predict_label is None:\n predict_label = self.pred_label\n if weight is None: weight = self.weights[-1]\n pred = self.predict(x, weight, cutting)\n pred[np.where(pred == 0)] = predict_label[0]\n pred[np.where(pred == 1)] = predict_label[1]\n return pred", "def predict():\n to_predict = np.zeros(5).reshape(1, 5)\n features = ['is_male', 'num_interactions_with_cust_service', 'late_on_payment', 'age', 'years_in_contract']\n for i, feat in enumerate(features):\n if request.args.get(feat) is not None:\n to_predict[0][i] = request.args.get(feat)\n\n response = clf2.predict(to_predict)\n\n if response:\n return \"The customer is likely to churn\"\n else:\n return \"He is a loyal customer\"", "def predict():\n\n predict_cfg = get_predict_args()\n device = get_device()\n print(device)\n\n # load checkpoint\n ckpt_path = find_ckpt_in_directory(predict_cfg.ckpt)\n ckpt = torch.load(ckpt_path, map_location=device)\n best_iter = ckpt[\"best_iter\"]\n cfg = ckpt[\"cfg\"]\n aspect = cfg[\"aspect\"]\n\n for k, v in cfg.items():\n print(\"{:20} : {:10}\".format(k, str(v)))\n\n eval_batch_size = 64\n\n print(\"Loading data\")\n dev_data = list(beer_reader(cfg[\"dev_path\"]))\n test_data = beer_annotations_reader(cfg[\"test_path\"], aspect=aspect)\n\n print(\"dev\", len(dev_data))\n print(\"test\", len(test_data))\n\n print(\"Loading pre-trained word embeddings\")\n vocab = Vocabulary()\n vectors = load_embeddings(cfg[\"embeddings\"], vocab) # required for vocab\n\n # build model\n model = build_model(cfg[\"model\"], vocab, cfg=cfg)\n\n # load parameters from checkpoint into model\n print(\"Loading saved model..\")\n model.load_state_dict(ckpt[\"state_dict\"])\n model.to(device)\n print(\"Done\")\n\n print(model)\n print_parameters(model)\n\n print(\"Evaluating\")\n dev_eval = evaluate_loss(\n model, dev_data, batch_size=eval_batch_size,\n device=device, cfg=cfg)\n test_eval = evaluate_loss(\n model, test_data, batch_size=eval_batch_size,\n device=device, cfg=cfg)\n\n if hasattr(model, \"z\"):\n path = os.path.join(\n cfg[\"save_path\"], \"final_rationales.txt\")\n test_precision, test_macro_prec = evaluate_rationale(\n model, test_data, aspect=aspect, device=device,\n batch_size=eval_batch_size, path=path)\n else:\n test_precision = 0.\n test_macro_prec = 0.\n test_eval[\"precision\"] = test_precision\n test_eval[\"macro_precision\"] = test_macro_prec\n\n dev_s = make_kv_string(dev_eval)\n test_s = make_kv_string(test_eval)\n\n print(\"best model iter {:d} dev {} test {}\".format(\n best_iter, dev_s, test_s))", "def predict_label(img, net_model, label):\n img1 = cv2.resize(img, (80, 80))\n predict = net_model.predict(img1.reshape(1, 80, 80, 3))\n maxi = predict[0][0]\n curs = 0\n test = 0\n for i, pred in enumerate(predict[0]):\n test += pred\n if pred > maxi:\n maxi = pred\n curs = i\n return label[curs]", "def predict_cli(text, config_filepath):\n load_classifier(config_filepath)\n print(classifier.predict(text))", "def predict(cls, input):\n clf = cls.get_model()\n return clf.predict(input)", "def label_predict(self, sentence):\n index_words = FileUtils.index_sentence(sentence, self.word_to_index)\n chunks = FileUtils.divide_sentence(index_words, Settings.seq_size)\n result = np.zeros(Settings.class_num)\n if Settings.cuda:\n self.model.cuda()\n \n for chunk in chunks:\n with torch.no_grad():\n chunk = torch.from_numpy(np.asarray(chunk)).view(1, Settings.seq_size)\n if Settings.cuda:\n chunk = chunk.cuda()\n \n predict = self.model(chunk)\n if Settings.cuda:\n predict = predict.cpu()\n predict = predict.numpy()[0]\n result += predict\n result /= len(chunks)\n\n target_index = np.argmax(result) + 1\n label = self.index_to_label.get(str(target_index))\n score = np.max(result)\n return label, score", "def predict(OUTPUT_DIR,in_sentences):\n\tPRED_BATCH_SIZE = 64\n\n\tdevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\tmodel_config = os.path.join(OUTPUT_DIR,\"model_config.json\")\n\tmodel_config = json.load(open(model_config))\n\toutput_config_file = os.path.join(OUTPUT_DIR, CONFIG_NAME)\n\toutput_model_file = os.path.join(OUTPUT_DIR, WEIGHTS_NAME)\n\tconfig = BertConfig(output_config_file)\n\tmodel = BertForSequenceClassification(config, num_labels=model_config[\"num_labels\"])\n\tmodel.load_state_dict(torch.load(output_model_file))\n\tmodel.to(device)\n\ttokenizer = BertTokenizer.from_pretrained(model_config[\"bert_model\"],do_lower_case=model_config[\"do_lower\"])\n\n\tin_examples = [InputExample(guid=\"\", text_a=x, text_b=None, label=\"Adverse effect\") for x in in_sentences]\n\tin_features = convert_examples_to_features(in_examples, label_list, MAX_SEQ_LENGTH, tokenizer)\n\n\tall_input_ids = torch.tensor([f.input_ids for f in in_features], dtype=torch.long)\n\tall_input_mask = torch.tensor([f.input_mask for f in in_features], dtype=torch.long)\n\tall_segment_ids = torch.tensor([f.segment_ids for f in in_features], dtype=torch.long)\n\n\tpred_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids) \n\t# # Run prediction for full data\n\tpred_sampler = SequentialSampler(pred_data)\n\tpred_dataloader = DataLoader(pred_data, sampler=pred_sampler, batch_size=PRED_BATCH_SIZE,drop_last = False)\n\tmodel.eval()\n\n\tpreds = []\n\n\tlabel_map = model_config[\"label_map\"]\n\n\tfor input_ids, input_mask, segment_ids in tqdm(pred_dataloader, desc=\"Predicting\"):\n\t\tinput_ids = input_ids.to(device)\n\t\tinput_mask = input_mask.to(device)\n\t\tsegment_ids = segment_ids.to(device)\n\n\t\twith torch.no_grad():\n\t\t\tlogits = model(input_ids, segment_ids, input_mask)\n\n\t\tlogits = torch.argmax(F.log_softmax(logits,dim=1),dim=1)\n\t\tlogits = logits.detach().cpu().numpy() \n\n\t\tpreds.extend(logits)\n\tlabel_map_reverse = {\"1\":\"Adverse effect\",\"2\":\"Not an adverse effect\"}\n\treturn [(sentence,label_map_reverse[str(pred)]) for sentence,pred in zip(in_sentences,preds)]", "def predict(self, X):\r\n return 1 if self.predict_prob(X) > 0.5 else 0", "def predict(self, conf):\n conf.set_int(\"angel.worker.matrix.transfer.request.timeout.ms\", 60000)\n predict(conf, conf._jvm.com.tencent.angel.ml.classification.lr.LRModel(conf._jconf, None), 'com.tencent.angel.ml.classification.lr.LRPredictTask')", "def test_predict(self):\n\n classifier = BertCCAMClassifier()\n classifier.load_model(\"models\")\n prediction = classifier.predict([\"bartosz\"])\n self.assertEqual(prediction, [{\"labels\": (\"B\",)}])\n\n # with multiple labels\n prediction = classifier.predict([\"ala bert\"])\n self.assertEqual(prediction, [{\"labels\": (\"A\", \"B\")}])\n\n # in a batch\n prediction = classifier.predict([\"bartosz\", \"adam\"])\n self.assertEqual(prediction, [{\"labels\": (\"B\",)}, {\"labels\": (\"A\",)}])", "def predict_tweet():\n input_text = request.form['input_text']\n tokenized_tweet = tokenizer(input_text)\n logits = model.predict({k: np.array(tokenized_tweet[k])[None] for k in input_names})[0]\n scores = softmax(logits)\n pred = round(100 * scores.flatten()[1], 2)\n # return render_template('index.html', prediction=pred)\n\n exp = LIME_explainer.explain_instance(input_text, predictor, num_features=len(input_text.split()),\n top_labels=1, num_samples=100).as_html()\n\n if pred >= 50:\n return render_template('index.html', exp=exp, prediction=f'Emergency; confidence ({pred}%)')\n else:\n return render_template('index.html', exp=exp, prediction=f'Non-emergency; confidence ({100 - pred}%)')", "def main(self, data):\n\t\ttokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\", do_lower_case=True)\n\t\teval_features = self.get_features(data, self.labels, tokenizer, self.max_seq_length)\n\t\tlabel, prob = self.predict(eval_features)\n\t\treturn label, prob", "def predict(self, X):\n prob = self.predict_proba(X)\n if self.rule == 'fda':\n prob_1 = prob[:, :self.n_class_]\n prob_2 = prob[:, self.n_class_:]\n return np.vstack((self.labels_[prob_1.argmax(1)], self.labels_[prob_2.argmax(1)]))\n else:\n return self.labels_[prob.argmax(1)]", "def predictive_model (train_x, train_y, test_x, model_name):\n \n assert model_name in ['logisticregression', 'nn', 'randomforest',\n 'gaussiannb', 'bernoullinb', 'multinb',\n 'svmlin', 'gbm', 'extra trees',\n 'lda','passive aggressive', 'adaboost',\n 'bagging', 'xgb']\n \n # Define model\n if model_name == 'logisticregression':\n model = LogisticRegression()\n elif model_name == 'nn': \n model = MLPClassifier(hidden_layer_sizes=(200,200))\n elif model_name == 'randomforest': \n model = RandomForestClassifier()\n elif model_name == 'gaussiannb': \n model = GaussianNB()\n elif model_name == 'bernoullinb': \n model = BernoulliNB()\n elif model_name == 'multinb': \n model = MultinomialNB()\n elif model_name == 'svmlin': \n model = svm.LinearSVC()\n elif model_name == 'gbm': \n model = GradientBoostingClassifier() \n elif model_name == 'extra trees':\n model = ExtraTreesClassifier(n_estimators=20)\n elif model_name == 'lda':\n model = LinearDiscriminantAnalysis() \n elif model_name == 'passive aggressive':\n model = PassiveAggressiveClassifier()\n elif model_name == 'adaboost':\n model = AdaBoostClassifier()\n elif model_name == 'bagging':\n model = BaggingClassifier()\n elif model_name == 'xgb':\n model = XGBRegressor() \n \n # Train & Predict\n if model_name in ['svmlin', 'Passive Aggressive']: \n model.fit(train_x, train_y)\n test_y_hat = model.decision_function(test_x)\n \n elif model_name == 'xgb':\n model.fit(np.asarray(train_x), train_y)\n test_y_hat = model.predict(np.asarray(test_x))\n \n else:\n model.fit(train_x, train_y)\n test_y_hat = model.predict_proba(test_x)[:,1]\n \n return model, test_y_hat" ]
[ "0.6959012", "0.65132445", "0.65114045", "0.6501456", "0.6449135", "0.6394472", "0.6369494", "0.6363786", "0.63258064", "0.63060385", "0.62783635", "0.62781966", "0.6276569", "0.6266087", "0.62624", "0.61981785", "0.61941963", "0.61721903", "0.6151807", "0.6150158", "0.6136576", "0.61205435", "0.6115925", "0.6079878", "0.6055572", "0.6042505", "0.60340077", "0.60128045", "0.60081905", "0.600785" ]
0.7438309
0
Predict a label use the clerical model. Input is a sentence, along w/ model object and word2vec object. These objects can either be loaded from disk using the LoadSavedModels function, or, if you have just completed training, they can be passed in from the Train.Trainer class.
def PredictClerLabel(sentence, model_cler, word2vec): tokenized_sample = word_tokenize(re.sub("-"," ",sentence)) features = np.mean([word2vec.word_vec(w) for w in tokenized_sample if w in word2vec],axis=0) prediction = model_cler.predict_proba(features.reshape(1,-1))[0] return model_cler.classes_[prediction.argmax()]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def PredictLabel(sentence, model_main, word2vec, boundary=0.5):\n \n tokenized_sample = word_tokenize(re.sub(\"-\",\" \",sentence))\n features = np.mean([word2vec.word_vec(w) for w in tokenized_sample if w in word2vec],axis=0)\n prediction = model_main.predict_proba(features.reshape(1,-1))[0]\n if model_main.classes_[prediction.argmax()]!=\"clerical\":\n return model_main.classes_[prediction.argmax()]\n else:\n if np.max(prediction)>boundary:\n return \"clerical\"\n else:\n ranger = range(len(prediction))\n del ranger[prediction.argmax()]\n return model_main.classes_[ranger][prediction[ranger].argmax()]", "def predict_label(self, src): # real signature unknown; restored from __doc__\n pass", "def predict(cls, input):\n clf = cls.get_model()\n print('input=')\n print(input)\n return clf.predict(input)", "def predict_model():\n data = request.json\n\n if data:\n predictor.pred_dict[\"model\"] = data[\"model\"]\n else:\n pass\n\n return 'Non tam praeclarum est scire latine, quam turpe nescire'", "def predict(cls, input):\n clf = cls.get_model() \n\n input.to_csv(data_dir + 'vdok_predction_src_file.csv')\n\n q = qa_serializer_lang_selector(data_dir)\n q.serialize_record('vdok_predction_src_file.csv', task_name)\n q.select_lang([1], task_name).to_csv(data_dir + data_file, encoding= 'latin1')\n\n pipeline=['pos', 'lemma', 'synset', 'hype', 'hypo']\n\n bnlqd = fex_basic_nlp(data_file, data_dir)\n bnlqd.nlp_run(pipeline[0])\n bnlqd.nlp_run(pipeline[1])\n bnlqd.df_ac_lemma.to_csv(data_dir + 'Lemma-' + data_file, encoding= 'latin1')\n bnlqd.nlp_run(pipeline[2])\n bnlqd.df_ac_synset.to_csv(data_dir + 'Synset-' + data_file , encoding= 'latin1')\n bnlqd.nlp_run(pipeline[3])\n bnlqd.df_ac_hypernyms.to_csv(data_dir + 'Hypernyms-' + data_file, encoding= 'latin1')\n bnlqd.nlp_run(pipeline[4])\n bnlqd.df_ac_hyponyms.to_csv(data_dir + 'Hyponyms-' + data_file, encoding= 'latin1')\n\n bnlpd = fex_basic_nlp(def_file, data_dir, task_name)\n bnlpd.nlp_run(pipeline[0])\n bnlpd.nlp_run(pipeline[1])\n bnlpd.df_ac_lemma.to_csv(data_dir + 'Lemma-P-' + data_file, encoding= 'latin1')\n \n btgqd = bi_trigram(data_file, data_dir)\n btgqd.nlp_run(r'bigram')\n btgqd.nlp_run(r'trigram') \n\n stop_words_d = cls.remove_non_extracted_stop_word(bnlqd.df_ac_lemma, stop_words)\n\n oanc_shelve = oanc_resource + 'ANC-all-lemma-04262014.db'\n oalqd = odi_oanc_lemma_frequency(data_file, oanc_shelve, None, data_dir, stop_words_d) \n oalqd.oanc_lemma_frequency('Lemma-' + data_file, 'Student_Question_Index', 'Pre_Col_Name')\n \n stop_words_hy_d = cls.remove_non_extracted_stop_word(bnlqd.df_ac_lemma, stop_words_hy)\n\n ovlqd = odi_overlapping(data_file, def_file, data_dir, stop_words_d)\n ovlqd.count_overlapping('Lemma-' + data_file, 'Student_Question_Index',\n 'Pre_Col_Name', 'Question_ID', 'Question_ID_Sec',\n 'Lemma-P-' + data_file, 'Question_ID', 'Question_ID_Sec')\n ovlqd.count_overlapping_synset('Synset-' + data_file)\n ovlqd.count_overlapping_hypernyms('Hypernyms-' + data_file, stop_words_hy_d)\n ovlqd.count_overlapping_hyponyms('Hyponyms-' + data_file, stop_words_hy_d)\n\n df_ac_pmi_dist_bigram = cls.bi_trigram_pmi_distribution(pmi_bigram_file, data_dir, \n bnlqd.num_clm_in, btgqd.df_ac_bigram, 'bigram')\n df_ac_pmi_dist_trigram = cls.bi_trigram_pmi_distribution(pmi_trigram_file, data_dir, \n bnlqd.num_clm_in, btgqd.df_ac_trigram, 'Trigram')\n\n df_ac_aggregate = cls.aggregate_plim(bnlqd, oalqd, ovlqd, df_ac_pmi_dist_bigram, df_ac_pmi_dist_trigram,\n bnlpd, specific_count_lemmas, stop_words_pos, task_name)\n df_ac_aggregate.to_csv(data_dir + 'vdok_predction_Aggregate_plim.csv', encoding= 'latin1')\n df_ac_aggregate_item_level = cls.aggregate_item_level_plim(df_ac_aggregate, oalqd.stem_option_name_clm, \n task_name)\n df_ac_aggregate_item_level.to_csv(data_dir + 'vdok_predction_Key_Stem_Passage_Aggregate_plim.csv',\n encoding= 'latin1')\n\n rfrpod = tmv_RF_classify('Independent_Variable_w_Label-Def.csv', data_dir)\n rfrpod.load_data('vdok_predction_Key_Stem_Passage_Aggregate_plim.csv', True, drop_vars, dependent_var)\n clf.perform_prediction(rfrpod.df_ac_modeling_values)\n return clf.df_ac_classified", "def predict(model, X):\n\tmodel.eval()\n\t# make the predictions\n\tscores = model.forward(X)\n\n\t# scores contains, for each example, two scores that can be interpreted as the\n\t# probability of each example belonging to each of the classes. To select the\n\t# final predicted label, we will select the class with higher probability.\n\tpredicted_labels = scores.argmax(dim=-1) # predicted_labels shape: (n_examples)\n\n\treturn predicted_labels", "def predict(cls, input):\n clf = cls.get_model()\n return clf.predict(input)", "def predict (self, model, context, data):\n # 1. Feature engineering\n features_df = FeatureEngineering().do_for_predict(data, joblib.load(model.files['description-vectorizer']), joblib.load(model.files['user-encoder']), context)\n\n # 2. Load model & other required files\n trained_model = joblib.load(model.files['model'])\n categories = pd.read_csv(model.files['categories'], index_col=0)\n\n # 3. Predict\n pred = trained_model.predict(features_df)\n\n # 4. Return the prediction\n predicted_category = pd.DataFrame(pred, columns=categories['category']).idxmax(axis=1)[0]\n\n logger.compute(context.correlation_id, '[ {ctx} ] - [ PREDICTION ] - Model {model}.v{version} - Predicted category for description [{desc}]: {c}'.format(ctx=context.process, model=model.info['name'], version=model.info['version'], desc=data['description'], c=predicted_category), 'info')\n \n return ModelPrediction(prediction={\"category\": predicted_category})", "def predict(self, model, context, data):\n pass", "def label_predict(self, sentence):\n index_words = FileUtils.index_sentence(sentence, self.word_to_index)\n chunks = FileUtils.divide_sentence(index_words, Settings.seq_size)\n result = np.zeros(Settings.class_num)\n if Settings.cuda:\n self.model.cuda()\n \n for chunk in chunks:\n with torch.no_grad():\n chunk = torch.from_numpy(np.asarray(chunk)).view(1, Settings.seq_size)\n if Settings.cuda:\n chunk = chunk.cuda()\n \n predict = self.model(chunk)\n if Settings.cuda:\n predict = predict.cpu()\n predict = predict.numpy()[0]\n result += predict\n result /= len(chunks)\n\n target_index = np.argmax(result) + 1\n label = self.index_to_label.get(str(target_index))\n score = np.max(result)\n return label, score", "def predict_with_model(request):\n # [START predict_with_model]\n model = get_model(request)\n\n data_response = {}\n\n q = read_request(request)\n data_response[\"q\"] = escape(q)\n print(\"Predict with a model for: {}\".format(q))\n\n print(\"Setup TablesClient\")\n try:\n client = automl.TablesClient(\n project=project_id,\n region=region,\n client_options=client_options\n )\n except Exception as e:\n print(e.message)\n\n print(\"Prediction start\")\n try:\n response = client.predict(\n model=model,\n inputs=[q],\n feature_importance=True\n )\n except Exception as e:\n print(e.message)\n\n print(\"Prediction results\")\n for result in response.payload:\n data_response[escape(result.tables.value.string_value)] = round(result.tables.score, 3)\n print(\"Predicted class name: {}, score: {}\".format(\n result.tables.value.string_value,\n result.tables.score)\n )\n\n print(\"Prediction finished\")\n r = write_response(data_response)\n # [END predict_with_model]\n return r", "def predict(self, X_test):\n\n # Predict Label 0\n i = 0\n X = X_test\n\n # Retrieve trained classifier for label 0\n clf = self.trained[i]\n\n # Make prediction\n y = clf.predict(X)\n result = np.zeros((X_test.shape[0], self.label_dim))\n\n result[:, i] = y\n\n # Concatenate result to X\n # X = sp.hstack([X, sp.csr_matrix(y)], format=\"csr\")\n\n # iterator = tqdm.trange(1, self.label_dim)\n iterator = range(1, self.label_dim)\n for i in iterator:\n # Predict Label i\n\n # Retrieve trained classifier for label i\n clf = self.trained[i]\n\n # Make prediction\n y = clf.predict(X)\n\n result[:, i] = y\n\n # Concatenate result to X\n # X = sp.hstack([X, sp.csr_matrix(y)], format=\"csr\")\n\n return result", "def predict_word(word_path):\n word = word_path\n reshaped_word = shape_new_img(word)\n pred = model.predict(reshaped_word)\n get_class = np.argmax(pred)\n prediction = labels_list[get_class]\n return prediction", "def predict(self, X):\n # Check is fit had been called\n check_is_fitted(self)\n X = self._clean(X)\n y = self.model_.predict(X)\n return self.map_label_inverse_(y)", "def predict(first_word, second_word=\"\", top_n=10):\r\n\r\n try:\r\n if first_word and second_word:\r\n return predict_currword_given_lastword(first_word, second_word, top_n=top_n)\r\n else:\r\n return predict_currword(first_word, top_n)\r\n except KeyError:\r\n raise Exception(\"Please load predictive models\")", "def _predict_and_return_argmax_label(self, example):\n model_out = self._model.predict([example])\n softmax = list(model_out)[0]['preds']\n argmax = np.argmax(softmax)\n return self._model.output_spec()['preds'].vocab[argmax]", "def predict(self, infile, model_path=None, eval_gold=False, as_text=False):\n\n\t\tif model_path is None: # Try default model location\n\t\t\tmodel_path = script_dir + os.sep + \"models\" + os.sep + self.corpus + \"_ensemble_sent.pkl\"\n\n\t\tclf, num_labels, cat_labels, multicol_dict, vocab, firsts, lasts = joblib.load(model_path)\n\n\t\tif as_text:\n\t\t\tconllu = infile\n\t\telse:\n\t\t\tconllu = io.open(infile,encoding=\"utf8\").read()\n\n\t\t#tagged = udpipe_tag(conllu,self.udpipe_model)\n\t\ttagged = tt_tag(conllu,self.lang)\n\n\t\ttrain_feats, _, toks, _, _ = read_conll(tagged,genre_pat=self.genre_pat,mode=\"sent\",as_text=True,char_bytes=self.lang==\"zho\")\n\t\theaders = sorted(list(train_feats[0].keys()))\n\n\t\tdata = []\n\n\t\tpreds = {}\n\t\tfor e in self.estimators:\n\t\t\tpred = e.predict(tagged)\n\t\t\t_, preds[e.name + \"_prob\"] = [list(x) for x in zip(*pred)]\n\t\t\theaders.append(e.name + \"_prob\")\n\n\t\tgenre_warning = False\n\t\tfor i, item in enumerate(train_feats):\n\t\t\titem[\"first\"] = item[\"word\"][0] if item[\"word\"][0] in firsts else \"_\"\n\t\t\titem[\"last\"] = item[\"word\"][-1] if item[\"word\"][-1] in lasts else \"_\"\n\t\t\tif \"genre\" in cat_labels:\n\t\t\t\tif item[\"genre\"] not in multicol_dict[\"encoder_dict\"][\"genre\"].classes_: # New genre not in training data\n\t\t\t\t\tif not genre_warning:\n\t\t\t\t\t\tsys.stderr.write(\"! WARN: Genre not in training data: \" + item[\"genre\"] + \"; suppressing further warnings\\n\")\n\t\t\t\t\t\tgenre_warning = True\n\t\t\t\t\titem[\"genre\"] = \"_\"\n\t\t\tif \"pos\" in cat_labels:\n\t\t\t\tif item[\"pos\"] not in multicol_dict[\"encoder_dict\"][\"pos\"].classes_:\n\t\t\t\t\titem[\"pos\"] = \"_\"\n\t\t\tif \"cpos\" in cat_labels:\n\t\t\t\tif item[\"cpos\"] not in multicol_dict[\"encoder_dict\"][\"cpos\"].classes_:\n\t\t\t\t\titem[\"cpos\"] = \"_\"\n\t\t\tif item[\"word\"] not in vocab and \"word\" in multicol_dict[\"encoder_dict\"]:\n\t\t\t\tif item[\"pos\"] in multicol_dict[\"encoder_dict\"][\"word\"].classes_:\n\t\t\t\t\titem[\"word\"] = item[\"pos\"]\n\t\t\t\telse:\n\t\t\t\t\titem[\"word\"] = \"_\"\n\t\t\tfor e in self.estimators:\n\t\t\t\titem[e.name + \"_prob\"] = preds[e.name + \"_prob\"][i]\n\n\t\t\tfeats = []\n\t\t\tfor k in headers:\n\t\t\t\tfeats.append(item[k])\n\n\t\t\tdata.append(feats)\n\n\t\tdata, headers, _, _ = self.n_gram(data,headers,[],[])\n\n\t\tdata = pd.DataFrame(data, columns=headers)\n\t\tdata_encoded = self.multicol_transform(data,columns=multicol_dict[\"columns\"],all_encoders_=multicol_dict[\"all_encoders_\"])\n\n\t\tdata_x = data_encoded[cat_labels+num_labels].values\n\t\tpred = clf.predict(data_x)\n\n\t\t# Ensure first token in document is always a sentence break\n\t\tfor i, x in enumerate(data_encoded[\"tok_id\"].values):\n\t\t\tif x == 1:\n\t\t\t\tpred[i] = 1\n\n\t\tif eval_gold:\n\t\t\tgold_feats, _,_,_,_ = read_conll(conllu,genre_pat=self.genre_pat,mode=\"sent\",as_text=True)\n\t\t\tgold = [int(t['wid'] == 1) for t in gold_feats]\n\t\t\tconf_mat = confusion_matrix(gold, pred)\n\t\t\tsys.stderr.write(str(conf_mat) + \"\\n\")\n\t\t\ttrue_positive = conf_mat[1][1]\n\t\t\tfalse_positive = conf_mat[0][1]\n\t\t\tfalse_negative = conf_mat[1][0]\n\t\t\tprec = true_positive / (true_positive + false_positive)\n\t\t\trec = true_positive / (true_positive + false_negative)\n\t\t\tf1 = 2*prec*rec/(prec+rec)\n\t\t\tsys.stderr.write(\"P: \" + str(prec) + \"\\n\")\n\t\t\tsys.stderr.write(\"R: \" + str(rec) + \"\\n\")\n\t\t\tsys.stderr.write(\"F1: \" + str(f1) + \"\\n\")\n\t\t\twith io.open(\"diff.tab\",'w',encoding=\"utf8\") as f:\n\t\t\t\tfor i in range(len(gold)):\n\t\t\t\t\tf.write(\"\\t\".join([toks[i],str(gold[i]),str(pred[i])])+\"\\n\")\n\t\t\treturn conf_mat, prec, rec, f1\n\t\telse:\n\t\t\treturn pred", "def target_predict(self, inp):\n return self.target_model.predict(inp)", "def predict(x):\n model = Model()\n res = model.predict([x])[0][0]\n click.echo(res)", "def predict_cli(text, config_filepath):\n load_classifier(config_filepath)\n print(classifier.predict(text))", "def predict(data, model: str = None, **kwargs):\n\n model_instance = get_model(model)\n log.debug(\"Predict with \" + str(model_instance))\n return model_instance.predict(data, **kwargs)", "def test_predict(self):\n\n classifier = BertCCAMClassifier()\n classifier.load_model(\"models\")\n prediction = classifier.predict([\"bartosz\"])\n self.assertEqual(prediction, [{\"labels\": (\"B\",)}])\n\n # with multiple labels\n prediction = classifier.predict([\"ala bert\"])\n self.assertEqual(prediction, [{\"labels\": (\"A\", \"B\")}])\n\n # in a batch\n prediction = classifier.predict([\"bartosz\", \"adam\"])\n self.assertEqual(prediction, [{\"labels\": (\"B\",)}, {\"labels\": (\"A\",)}])", "def predict(self, title):\n \n return self.knn_model.predict(self.target_lang_topics[title])", "def predict():\n\n predict_cfg = get_predict_args()\n device = get_device()\n print(device)\n\n # load checkpoint\n ckpt_path = find_ckpt_in_directory(predict_cfg.ckpt)\n ckpt = torch.load(ckpt_path, map_location=device)\n best_iter = ckpt[\"best_iter\"]\n cfg = ckpt[\"cfg\"]\n aspect = cfg[\"aspect\"]\n\n for k, v in cfg.items():\n print(\"{:20} : {:10}\".format(k, str(v)))\n\n eval_batch_size = 64\n\n print(\"Loading data\")\n dev_data = list(beer_reader(cfg[\"dev_path\"]))\n test_data = beer_annotations_reader(cfg[\"test_path\"], aspect=aspect)\n\n print(\"dev\", len(dev_data))\n print(\"test\", len(test_data))\n\n print(\"Loading pre-trained word embeddings\")\n vocab = Vocabulary()\n vectors = load_embeddings(cfg[\"embeddings\"], vocab) # required for vocab\n\n # build model\n model = build_model(cfg[\"model\"], vocab, cfg=cfg)\n\n # load parameters from checkpoint into model\n print(\"Loading saved model..\")\n model.load_state_dict(ckpt[\"state_dict\"])\n model.to(device)\n print(\"Done\")\n\n print(model)\n print_parameters(model)\n\n print(\"Evaluating\")\n dev_eval = evaluate_loss(\n model, dev_data, batch_size=eval_batch_size,\n device=device, cfg=cfg)\n test_eval = evaluate_loss(\n model, test_data, batch_size=eval_batch_size,\n device=device, cfg=cfg)\n\n if hasattr(model, \"z\"):\n path = os.path.join(\n cfg[\"save_path\"], \"final_rationales.txt\")\n test_precision, test_macro_prec = evaluate_rationale(\n model, test_data, aspect=aspect, device=device,\n batch_size=eval_batch_size, path=path)\n else:\n test_precision = 0.\n test_macro_prec = 0.\n test_eval[\"precision\"] = test_precision\n test_eval[\"macro_precision\"] = test_macro_prec\n\n dev_s = make_kv_string(dev_eval)\n test_s = make_kv_string(test_eval)\n\n print(\"best model iter {:d} dev {} test {}\".format(\n best_iter, dev_s, test_s))", "def predict(model, dataset_info, args):\n dataset_info, model_info = fill_info_dicts(dataset_info, args)\n\n fill_pred_op_info(dataset_info, model, args, model_info)\n # fill_topic_op(args, model_info)\n\n str_ = 'Predictions of the given text data of dataset %s using different ' \\\n 'saved models:' % args.predict_dataset\n labels = [str(i) for i in dataset_info[args.predict_dataset]['labels']]\n if len(labels) == 2 or args.task == 'regression':\n # TODO currently just hard code for binary\n header = 'id\\tlabel\\t' + str(1) + '\\n'\n else:\n header = 'id\\tlabel\\t' + '\\t'.join(labels) + '\\n'\n\n saver = tf.train.Saver(max_to_keep=100)\n\n model_names = args.datasets\n if len(args.datasets) > 1:\n model_names.append('MULT')\n\n for model_name in model_names:\n # load the saved best model\n str_ += '\\nUsing the model that performs the best on (%s)\\n' % model_name\n\n output = header\n str_ += header\n\n data = []\n\n with tf.Session() as sess:\n if model_name == 'MULT':\n checkpoint_path = os.path.join(args.checkpoint_dir, 'MULT',\n 'model')\n else:\n checkpoint_path = model_info[model_name]['checkpoint_path']\n\n saver.restore(sess, checkpoint_path)\n\n dataset_name = args.predict_dataset\n\n # import pdb\n # sess.run(model_info[dataset_name]['pred_iter'].initializer)\n # batch = model_info[dataset_name]['pred_batch']\n # text, weights = sess.run([batch['text'], batch['text_weights']])\n # pdb.set_trace()\n\n _pred_op = model_info[dataset_name]['pred_pred_op']\n _pred_iter = model_info[dataset_name]['pred_iter']\n _ids, _predictions, _scores = get_all_pred_res(sess, _pred_op,\n _pred_iter, args)\n\n for id, pred, score in zip(_ids, _predictions, _scores):\n record = {\n 'id': id,\n 'label': pred\n }\n if args.task == 'classification':\n for l, s in zip(labels, score):\n record[str(l)] = s\n else:\n record['score'] = score[0]\n data.append(record)\n\n # output positive score for binary classification\n\n if len(score) == 2:\n score = str(score[1])\n else:\n score = '\\t'.join([str(i) for i in score])\n str_ += id + '\\t' + str(int(pred)) + '\\t' + score + '\\n'\n output += id + '\\t' + str(int(pred)) + '\\t' + score + '\\n'\n\n make_dir(args.predict_output_folder)\n\n with open(\n os.path.join(args.predict_output_folder, model_name) + '.tsv',\n 'w') as file:\n # for i in _predictions:\n # file.write(str(i))\n file.write(output)\n\n with open(\n os.path.join(args.predict_output_folder, model_name) + '.json',\n 'wt') as file:\n json.dump(data, file, ensure_ascii=False)\n\n logging.info(str_)", "def predict_label(img, net_model, label):\n img1 = cv2.resize(img, (80, 80))\n predict = net_model.predict(img1.reshape(1, 80, 80, 3))\n maxi = predict[0][0]\n curs = 0\n test = 0\n for i, pred in enumerate(predict[0]):\n test += pred\n if pred > maxi:\n maxi = pred\n curs = i\n return label[curs]", "def label_text(model, text, batch_size=None, reverse_label_map=None, device=None):\n model.eval()\n if reverse_label_map is None:\n reverse_label_map = {x: y for (x, y) in enumerate(model.labels)}\n if device is None:\n device = next(model.parameters()).device\n\n text = [update_text(s, model.config.wordvec_type) for s in text]\n\n if batch_size is None:\n intervals = [(0, len(text))]\n else:\n # TODO: results would be better if we sort by length and then unsort\n intervals = [(i, min(i+batch_size, len(text))) for i in range(0, len(text), batch_size)]\n labels = []\n for interval in intervals:\n output = model(text[interval[0]:interval[1]], device)\n predicted = torch.argmax(output, dim=1)\n labels.extend(predicted.tolist())\n\n logger.debug(\"Found labels\")\n for (label, sentence) in zip(labels, text):\n logger.debug((label, sentence))\n\n return labels", "def get_model(word_to_idx, label_to_idx, resume=False, use_glove=True):\n\n best_acc = 0 # best test accuracy\n start_epoch = 0 # start from epoch 0 or last checkpoint epoch\n\n if resume:\n # load checkpoint\n checkpoint = load_checkpoint()\n model = checkpoint['model']\n best_acc = checkpoint['acc']\n start_epoch = checkpoint['epoch']\n else:\n print('==> Building model {}...'.format(cfg.RUN_MODE))\n if cfg.RUN_MODE in [\"RNN\", \"LSTM\", \"GRU\"]:\n model = BatchRNN(cfg.EMBEDDING_DIM, cfg.HIDDEN_DIM, cfg.BATCH_SIZE,\n len(word_to_idx), len(label_to_idx), rnn_model=cfg.RUN_MODE)\n else:\n model = CNN_NLP(cfg.EMBEDDING_DIM, cfg.HIDDEN_DIM, cfg.BATCH_SIZE,\n len(word_to_idx), len(label_to_idx))\n if use_glove:\n # model.load_glove_model('GloVe-1.2/vectors.txt', word_to_idx)\n model.load_glove_model(cfg.GLOVE_FILE, word_to_idx, regenerate=True)\n return model, best_acc, start_epoch", "def predict(self, input_vec):\n return self.model.predict(input_vec)", "def predict(OUTPUT_DIR,in_sentences):\n\tPRED_BATCH_SIZE = 64\n\n\tdevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\tmodel_config = os.path.join(OUTPUT_DIR,\"model_config.json\")\n\tmodel_config = json.load(open(model_config))\n\toutput_config_file = os.path.join(OUTPUT_DIR, CONFIG_NAME)\n\toutput_model_file = os.path.join(OUTPUT_DIR, WEIGHTS_NAME)\n\tconfig = BertConfig(output_config_file)\n\tmodel = BertForSequenceClassification(config, num_labels=model_config[\"num_labels\"])\n\tmodel.load_state_dict(torch.load(output_model_file))\n\tmodel.to(device)\n\ttokenizer = BertTokenizer.from_pretrained(model_config[\"bert_model\"],do_lower_case=model_config[\"do_lower\"])\n\n\tin_examples = [InputExample(guid=\"\", text_a=x, text_b=None, label=\"Adverse effect\") for x in in_sentences]\n\tin_features = convert_examples_to_features(in_examples, label_list, MAX_SEQ_LENGTH, tokenizer)\n\n\tall_input_ids = torch.tensor([f.input_ids for f in in_features], dtype=torch.long)\n\tall_input_mask = torch.tensor([f.input_mask for f in in_features], dtype=torch.long)\n\tall_segment_ids = torch.tensor([f.segment_ids for f in in_features], dtype=torch.long)\n\n\tpred_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids) \n\t# # Run prediction for full data\n\tpred_sampler = SequentialSampler(pred_data)\n\tpred_dataloader = DataLoader(pred_data, sampler=pred_sampler, batch_size=PRED_BATCH_SIZE,drop_last = False)\n\tmodel.eval()\n\n\tpreds = []\n\n\tlabel_map = model_config[\"label_map\"]\n\n\tfor input_ids, input_mask, segment_ids in tqdm(pred_dataloader, desc=\"Predicting\"):\n\t\tinput_ids = input_ids.to(device)\n\t\tinput_mask = input_mask.to(device)\n\t\tsegment_ids = segment_ids.to(device)\n\n\t\twith torch.no_grad():\n\t\t\tlogits = model(input_ids, segment_ids, input_mask)\n\n\t\tlogits = torch.argmax(F.log_softmax(logits,dim=1),dim=1)\n\t\tlogits = logits.detach().cpu().numpy() \n\n\t\tpreds.extend(logits)\n\tlabel_map_reverse = {\"1\":\"Adverse effect\",\"2\":\"Not an adverse effect\"}\n\treturn [(sentence,label_map_reverse[str(pred)]) for sentence,pred in zip(in_sentences,preds)]" ]
[ "0.6949452", "0.6697954", "0.6614793", "0.65763474", "0.6551022", "0.65021735", "0.6459889", "0.6424772", "0.64095986", "0.63946915", "0.6354461", "0.6343086", "0.6327474", "0.62942815", "0.62706435", "0.6258738", "0.6226277", "0.62202674", "0.62170017", "0.62144226", "0.62062186", "0.6192424", "0.61923355", "0.61912537", "0.6189462", "0.6178384", "0.61715066", "0.6142006", "0.61354125", "0.61282593" ]
0.7439432
0
plots the fenics mesh as it is
def plot_fenics_mesh(mesh, new_fig=True): if(new_fig): plt.figure() plot(mesh) #plt.title("FEniCS mesh") plt.show(block=False) pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_plot_mesh(self):\n plt.close('all')\n\n #\n # Initialize\n #\n fig, ax = plt.subplots(3,3)\n plot = Plot()\n #\n # Define mesh\n # \n mesh = Mesh.newmesh(grid_size=(2,2))\n mesh.refine() \n mesh.root_node().children[1,1].mark(1)\n mesh.refine(1)\n \n # Plot simple mesh\n ax[0,0] = plot.mesh(ax[0,0], mesh)\n \n #\n # Flag a few cells\n # \n mesh.unmark(nodes=True)\n mesh.root_node().children[0,0].mark(2)\n mesh.root_node().children[1,0].mark(1)\n mesh.root_node().children[1,1].children['SW'].mark(3)\n mesh.root_node().children[1,1].children['NE'].mark(3)\n \n # Color flagged cells\n ax[0,1] = plot.mesh(ax[0,1], mesh, color_marked=[1,2,3], nested=True)\n \n # Plot vertex numbers\n ax[0,2] = plot.mesh(ax[0,2], mesh, vertex_numbers=True)\n \n # Plot edge numbers\n ax[1,0] = plot.mesh(ax[1,0], mesh, edge_numbers=True)\n \n # Plot cell numbers nested off\n mesh.refine(2)\n ax[1,1] = plot.mesh(ax[1,1], mesh, cell_numbers=True)\n \n # Plot cell numbers nested on\n ax[1,2] = plot.mesh(ax[1,2], mesh, cell_numbers=True, nested=True)\n\n # Plot dofs\n element = QuadFE(2,'Q1')\n ax[2,0] = plot.mesh(ax[2,0], mesh, element=element, dofs=True)\n \n # Assign dofs in a nested way\n ax[2,1] = plot.mesh(ax[2,1], mesh, element=element, dofs=True, \\\n nested=True)\n \n # Display only dofs of flagged nodes \n ax[2,2] = plot.mesh(ax[2,2], mesh, element=element, dofs=True, \\\n node_flag=3, nested=True, show_axis=True)", "def DisplayMesh():\r\n \r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, Vertices, Triangles = CreateMatrixVTK(VTKString)\r\n \r\n fig = plt.figure()\r\n ax1 = fig.add_subplot(111,projection = '3d')\r\n ax1.plot_trisurf(Vertices[:,0],Vertices[:,1],Vertices[:,2],triangles= Triangles[:,1:])\r\n ax1.set_zlabel('z')\r\n ax1.set_ylabel('y')\r\n ax1.set_xlabel('x')\r\n plt.show()", "def plot_multigroup_flux(mesh, state, edges = False) :\n if mesh.dimension() == 1 :\n # get the mesh points\n x = mesh_axes(mesh)\n # plot the map\n plt.plot(x, f)\n \n elif mesh.dimension() == 2 :\n\n # Get the mesh axes and then make a grid of them for plotting.\n x, y = mesh_axes(mesh)\n X, Y = np.meshgrid(x, y)\n edgec = 'none'\n if edges :\n edgec = 'k'\n plt.pcolor(X, Y, f, cmap=colormap, edgecolors=edgec)\n \n else :\n print \"not ready for 3d\"\n return\n # show the plot\n plt.show()", "def plotMesh(verts,tris):\n x = verts[:,0]\n y = verts[:,1]\n\n plt.figure()\n plt.gca().set_aspect('equal')\n plt.triplot(x, y, tris, 'k-')\n plt.title('Unstructured Mesh')\n plt.xlabel('distance (m)')\n plt.ylabel('distance (m)')", "def plot(self):\n fx = self.fitness_functions(self.archive)\n n = len(fx[0])\n\n if n == 2:\n plt.xlabel(\"F1\")\n plt.ylabel(\"F2\")\n plt.suptitle(\"Pareto Front\")\n plt.scatter(fx[:,0], fx[:,1], label='Archive')\n plt.show()\n elif n == 3:\n plt.figure()\n ax = plt.axes(projection='3d')\n ax.scatter(fx[:, 0], fx[:, 1], fx[:, 2])\n ax.set_xlabel(\"F1\")\n ax.set_ylabel(\"F2\")\n ax.set_zlabel(\"F3\")\n plt.suptitle(\"Pareto Front of Archive\")\n plt.show()\n else:\n print(\"Cannot Print Multi-Dimensional Front greater than 3D\")", "def mplot_mesh(meshtriang: df.Mesh) -> Tuple[plt.Figure, Any]:\n fig, ax = plt.subplots(1)\n ax.triplot(meshtriang, 'ko-', lw=1)\n return fig, ax", "def plot_mesh_function(mesh, f, title=\"\", colormap = \"hot\", edges = False, mybounds = [], myticks = []) :\n if mesh.dimension() == 1 :\n # get the mesh points\n x = mesh_axes(mesh)\n # plot the map\n plt.plot(x, f)\n \n elif mesh.dimension() == 2 :\n\n # Get the mesh axes and then make a grid of them for plotting.\n x, y = mesh_axes(mesh)\n X, Y = np.meshgrid(x, y)\n # Reshape the function\n f = f.reshape(mesh.number_cells_x(), mesh.number_cells_y())\n if edges :\n plt.pcolor(X, Y, f, cmap=colormap, edgecolors='k')\n else :\n plt.pcolor(X, Y, f, cmap=colormap)\n plt.axis(\"scaled\") \n plt.xlabel(\"x [cm]\")\n plt.ylabel(\"y [cm]\")\n if len(myticks) :\n cbar = plt.colorbar(boundaries=mybounds,ticks=myticks)\n else : \n cbar = plt.colorbar()\n else :\n print \"not ready for 3d\"\n return\n plt.title(title)\n # show the plot\n plt.show()", "def plot(self):\n\t\tself.plotOfXray().plot()", "def plot(mesh):\n from scipy.spatial import delaunay_plot_2d\n fig = delaunay_plot_2d(SimpleMesh(mesh))\n ax = fig.gca()\n ax.set_aspect(\"equal\")\n return fig, ax", "def plot(self):\n\t\tself.plotOfTF().plot()", "def plot_mesh(corners):\r\n triangle = tri.Triangulation(corners[:, 0], corners[:, 1])\r\n\r\n refiner = tri.UniformTriRefiner(triangle)\r\n trimesh = refiner.refine_triangulation(subdiv=4)\r\n \r\n plt.figure(figsize=(6, 4))\r\n for i, mesh in enumerate((triangle, trimesh)):\r\n plt.subplot(1, 2, i+1)\r\n plt.triplot(mesh)\r\n plt.axis('off')\r\n plt.axis('equal')", "def PlotMeshNumbering(self, figure=None, show_plot=True):\n\n self.__do_essential_memebers_exist__()\n\n import matplotlib.pyplot as plt\n import matplotlib as mpl\n\n if self.element_type == \"tri\":\n\n if figure is None:\n figure = plt.figure()\n plt.triplot(self.points[:,0],self.points[:,1], self.elements[:,:3])\n plt.tricontourf(self.points[:,0], self.points[:,1], self.elements[:,:3], np.ones(self.points.shape[0]), 100,alpha=0.3)\n\n for i in range(0,self.elements.shape[0]):\n coord = self.points[self.elements[i,:],:]\n x_avg = np.sum(coord[:,0])/self.elements.shape[1]\n y_avg = np.sum(coord[:,1])/self.elements.shape[1]\n plt.text(x_avg,y_avg,str(i),backgroundcolor='#F88379',ha='center')\n\n for i in range(0,self.points.shape[0]):\n plt.text(self.points[i,0],self.points[i,1],str(i),backgroundcolor='#0087BD',ha='center')\n\n plt.axis('equal')\n if show_plot:\n plt.show()\n\n elif self.element_type == \"quad\":\n\n if figure is None:\n figure = plt.figure()\n point_radius = 3.\n\n C = self.InferPolynomialDegree() - 1\n\n edge_elements = self.GetElementsEdgeNumberingQuad()\n reference_edges = NodeArrangementQuad(C)[0]\n reference_edges = np.concatenate((reference_edges,reference_edges[:,1,None]),axis=1)\n reference_edges = np.delete(reference_edges,1,1)\n\n self.GetEdgesQuad()\n x_edges = np.zeros((C+2,self.all_edges.shape[0]))\n y_edges = np.zeros((C+2,self.all_edges.shape[0]))\n\n BasesOneD = np.eye(2,2)\n for iedge in range(self.all_edges.shape[0]):\n ielem = edge_elements[iedge,0]\n edge = self.elements[ielem,reference_edges[edge_elements[iedge,1],:]]\n x_edges[:,iedge], y_edges[:,iedge] = self.points[edge,:].T\n\n\n plt.plot(x_edges,y_edges,'-k')\n\n for i in range(self.elements.shape[0]):\n coord = self.points[self.elements[i,:],:]\n x_avg = np.sum(coord[:,0])/self.elements.shape[1]\n y_avg = np.sum(coord[:,1])/self.elements.shape[1]\n plt.text(x_avg,y_avg,str(i),backgroundcolor='#F88379',ha='center')\n\n for i in range(0,self.points.shape[0]):\n plt.text(self.points[i,0],self.points[i,1],str(i),backgroundcolor='#0087BD',ha='center')\n\n plt.axis('equal')\n if show_plot:\n plt.show()\n\n elif self.element_type == \"tet\" or self.element_type == \"hex\":\n\n import matplotlib as mpl\n import os\n os.environ['ETS_TOOLKIT'] = 'qt4'\n from mayavi import mlab\n\n if figure is None:\n figure = mlab.figure(bgcolor=(1,1,1),fgcolor=(1,1,1),size=(800,600))\n view = mlab.view()\n figure.scene.disable_render = True\n\n color = mpl.colors.hex2color('#F88379')\n\n linewidth = 3.\n # trimesh_h = mlab.triangular_mesh(self.points[:,0],\n # self.points[:,1], self.points[:,2], self.faces[:,:3],\n # line_width=linewidth,tube_radius=linewidth,color=(0,0.6,0.4),\n # representation='wireframe') # representation='surface'\n\n # # CHANGE LIGHTING OPTION\n # trimesh_h.actor.property.interpolation = 'phong'\n # trimesh_h.actor.property.specular = 0.1\n # trimesh_h.actor.property.specular_power = 5\n\n # PLOTTING EDGES\n from Florence.PostProcessing import PostProcess\n tmesh = PostProcess(3,3).Tessellate(self, np.zeros_like(self.points), interpolation_degree=0,\n plot_points=True, plot_edges=True, plot_surfaces=False)\n\n x_edges = tmesh.x_edges\n y_edges = tmesh.y_edges\n z_edges = tmesh.z_edges\n connections = tmesh.connections\n\n src = mlab.pipeline.scalar_scatter(x_edges.T.copy().flatten(), y_edges.T.copy().flatten(), z_edges.T.copy().flatten())\n src.mlab_source.dataset.lines = connections\n h_edges = mlab.pipeline.surface(src, color = (0,0.6,0.4), line_width=linewidth)\n # AVOID WARNINGS\n # lines = mlab.pipeline.stripper(src)\n # h_edges = mlab.pipeline.surface(lines, color = (0,0.6,0.4), line_width=linewidth)\n\n # ELEMENT NUMBERING\n # for i in range(0,self.elements.shape[0]):\n # coord = self.points[self.elements[i,:],:]\n # x_avg = np.sum(coord[:,0])/self.elements.shape[1]\n # y_avg = np.sum(coord[:,1])/self.elements.shape[1]\n # z_avg = np.sum(coord[:,2])/self.elements.shape[1]\n\n # # mlab.text3d(x_avg,y_avg,z_avg,str(i),color=color)\n # mlab.text3d(x_avg,y_avg,z_avg,str(i),color=(0,0,0.),scale=2)\n\n # POINT NUMBERING\n for i in range(self.elements.shape[0]):\n for j in range(self.elements.shape[1]):\n text_obj = mlab.text3d(self.points[self.elements[i,j],0],\n self.points[self.elements[i,j],1],self.points[self.elements[i,j],2],str(self.elements[i,j]),\n color=(0,0,0.),scale=0.05)\n\n\n figure.scene.disable_render = False\n\n if show_plot:\n # mlab.view(*view)\n mlab.show()", "def plot_plasma(self):\n x = self.geom.x\n fig, axes = plt.subplots(1, 2, figsize=(8, 3),\n constrained_layout=True)\n # plot densities\n ax = axes[0]\n ax.plot(x, self.ne, 'b-')\n ax.plot(x, self.ni, 'r-')\n ax.legend(['E', 'Ion'])\n ax.set_xlabel('Position (m)')\n ax.set_ylabel('Density (m^-3)')\n # plot temperature\n ax = axes[1]\n ax.plot(x, self.Te, 'b-')\n ax.plot(x, self.Ti, 'r-')\n ax.legend(['Te', 'Ti'])\n ax.set_xlabel('Position (m)')\n ax.set_ylabel('Temperature (eV)')\n plt.show()", "def plot_reconstruction_diagnostics(self, figsize=(20, 10)):\n figs = []\n fig_names = []\n\n # upsampled frequency\n fx_us = tools.get_fft_frqs(2 * self.nx, 0.5 * self.dx)\n fy_us = tools.get_fft_frqs(2 * self.ny, 0.5 * self.dx)\n\n # plot different stages of inversion\n extent = tools.get_extent(self.fy, self.fx)\n extent_upsampled = tools.get_extent(fy_us, fx_us)\n\n for ii in range(self.nangles):\n fig = plt.figure(figsize=figsize)\n grid = plt.GridSpec(3, 4)\n\n for jj in range(self.nphases):\n\n # ####################\n # separated components\n # ####################\n ax = plt.subplot(grid[jj, 0])\n\n to_plot = np.abs(self.separated_components_ft[ii, jj])\n to_plot[to_plot <= 0] = np.nan\n plt.imshow(to_plot, norm=LogNorm(), extent=extent)\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 0:\n plt.title('O(f)otf(f)')\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 1:\n plt.title('m*O(f-fo)otf(f)')\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 2:\n plt.title('m*O(f+fo)otf(f)')\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n # ####################\n # deconvolved component\n # ####################\n ax = plt.subplot(grid[jj, 1])\n\n plt.imshow(np.abs(self.components_deconvolved_ft[ii, jj]), norm=LogNorm(), extent=extent)\n\n if jj == 0:\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 1:\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 2:\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 0:\n plt.title('deconvolved component')\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n # ####################\n # shifted component\n # ####################\n ax = plt.subplot(grid[jj, 2])\n\n # avoid any zeros for LogNorm()\n cs_ft_toplot = np.abs(self.components_shifted_ft[ii, jj])\n cs_ft_toplot[cs_ft_toplot <= 0] = np.nan\n plt.imshow(cs_ft_toplot, norm=LogNorm(), extent=extent_upsampled)\n plt.scatter(0, 0, edgecolor='r', facecolor='none')\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 1:\n circ2 = matplotlib.patches.Circle(-self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n elif jj == 2:\n circ2 = matplotlib.patches.Circle(self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n if jj == 0:\n plt.title('shifted component')\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n # ####################\n # normalized weights\n # ####################\n ax = plt.subplot(grid[jj, 3])\n\n to_plot = self.weights[ii, jj] / self.weight_norm\n to_plot[to_plot <= 0] = np.nan\n im2 = plt.imshow(to_plot, norm=LogNorm(), extent=extent_upsampled)\n im2.set_clim([1e-5, 1])\n fig.colorbar(im2)\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 1:\n circ2 = matplotlib.patches.Circle(-self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n elif jj == 2:\n circ2 = matplotlib.patches.Circle(self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n if jj == 0:\n plt.title('normalized weight')\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n plt.suptitle('period=%0.3fnm at %0.3fdeg=%0.3frad, f=(%0.3f,%0.3f) 1/um\\n'\n 'mod=%0.3f, min mcnr=%0.3f, wiener param=%0.2f\\n'\n 'phases (deg) =%0.2f, %0.2f, %0.2f, phase diffs (deg) =%0.2f, %0.2f, %0.2f' %\n (self.periods[ii] * 1e3, self.angles[ii] * 180 / np.pi, self.angles[ii],\n self.frqs[ii, 0], self.frqs[ii, 1], self.mod_depths[ii, 1], np.min(self.mcnr[ii]), self.wiener_parameter,\n self.phases[ii, 0] * 180/np.pi, self.phases[ii, 1] * 180/np.pi, self.phases[ii, 2] * 180/np.pi,\n 0, np.mod(self.phases[ii, 1] - self.phases[ii, 0], 2*np.pi) * 180/np.pi,\n np.mod(self.phases[ii, 2] - self.phases[ii, 0], 2*np.pi) * 180/np.pi))\n\n figs.append(fig)\n fig_names.append('sim_combining_angle=%d' % (ii + 1))\n\n # #######################\n # net weight\n # #######################\n figh = plt.figure(figsize=figsize)\n grid = plt.GridSpec(1, 2)\n plt.suptitle('Net weight, Wiener param = %0.2f' % self.wiener_parameter)\n\n ax = plt.subplot(grid[0, 0])\n net_weight = np.sum(self.weights, axis=(0, 1)) / self.weight_norm\n im = ax.imshow(net_weight, extent=extent_upsampled, norm=PowerNorm(gamma=0.1))\n\n figh.colorbar(im, ticks=[1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 1e-2, 1e-3, 1e-4, 1e-5])\n\n ax.set_title(\"non-linear scale\")\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n circ2 = matplotlib.patches.Circle((0, 0), radius=2*self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n circ3 = matplotlib.patches.Circle(self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ3)\n\n circ4 = matplotlib.patches.Circle(-self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ4)\n\n circ5 = matplotlib.patches.Circle(self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ5)\n\n circ6 = matplotlib.patches.Circle(-self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ6)\n\n circ7 = matplotlib.patches.Circle(self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ7)\n\n circ8 = matplotlib.patches.Circle(-self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ8)\n\n ax.set_xlim([-2 * self.fmax, 2 * self.fmax])\n ax.set_ylim([2 * self.fmax, -2 * self.fmax])\n\n ax = plt.subplot(grid[0, 1])\n ax.set_title(\"linear scale\")\n im = ax.imshow(net_weight, extent=extent_upsampled)\n\n figh.colorbar(im)\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n circ2 = matplotlib.patches.Circle((0, 0), radius=2 * self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n circ3 = matplotlib.patches.Circle(self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ3)\n\n circ4 = matplotlib.patches.Circle(-self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ4)\n\n circ5 = matplotlib.patches.Circle(self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ5)\n\n circ6 = matplotlib.patches.Circle(-self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ6)\n\n circ7 = matplotlib.patches.Circle(self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ7)\n\n circ8 = matplotlib.patches.Circle(-self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ8)\n\n ax.set_xlim([-2 * self.fmax, 2 * self.fmax])\n ax.set_ylim([2 * self.fmax, -2 * self.fmax])\n\n figs.append(figh)\n fig_names.append('net_weight')\n\n return figs, fig_names", "def show(self):\n from matplotlib import pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n\n fig = plt.figure()\n ax = Axes3D(fig)\n pos = self.cluster.get_positions()\n from itertools import combinations\n for tri in self.mesh.simplices:\n for comb in combinations(tri, 2):\n x1 = pos[comb[0], 0]\n x2 = pos[comb[1], 0]\n y1 = pos[comb[0], 1]\n y2 = pos[comb[1], 1]\n z1 = pos[comb[0], 2]\n z2 = pos[comb[1], 2]\n ax.plot([x1, x2], [y1, y2], zs=[z1, z2], color=\"black\")\n plt.show()", "def draw_stl_from_mesh(m):\n plt.ion()\n # Create a new plot\n figure = plt.figure()\n axes = mplot3d.Axes3D(figure)\n\n # Render the cube faces\n #for m in meshes:\n axes.add_collection3d(mplot3d.art3d.Poly3DCollection(m.vectors))\n\n # Auto scale to the mesh size\n scale = m.points.flatten(-1)\n axes.auto_scale_xyz(scale, scale, scale)", "def Draw1D(mesh, coefs, keep=False, n_p=2, figsize=(20,4)):\n if n_p <= 2:\n n_p = 2\n \n eps = 1e-6 \n \n x_v = [p[0] for p in mesh.ngmesh.Points()]\n x_s = []\n f_s = {}\n\n miny = 1e99\n for f, name in coefs:\n f_s[name] = []\n \n x_s.append(nan)\n for f,name in coefs:\n f_s[name].append(nan)\n \n for el in mesh.ngmesh.Elements1D():\n left = mesh.ngmesh.Points()[el.points[0]][0]\n right = mesh.ngmesh.Points()[el.points[1]][0]\n for l in range(n_p):\n y = left + eps + (l / (n_p-1)) * (right - eps -left) \n x_s.append(y)\n for f,name in coefs:\n ff = f(mesh(y))\n miny = min(miny,ff)\n f_s[name].append(ff)\n \n x_s.append(nan)\n for f,name in coefs:\n f_s[name].append(nan)\n\n \n # plt.clf()\n # display.display(plt.gcf())\n plt.figure(figsize=figsize)\n for f,name in coefs:\n plt.plot(x_s,f_s[name],label=name)\n plt.plot(x_v,[miny for v in x_v],'|',label='vertices')\n plt.xlabel(\"x\")\n plt.legend()\n plt.show()\n if keep:\n display.clear_output(wait=True)", "def show2(self):\n #zfactor = 4\n xb, yb = self.bary.T\n sol0 = self.dat[0]['sol'][:,0]\n triangles = self.tri_pnts_b\n import mayavi.mlab as mlab\n fig = mlab.figure(bgcolor = (0.1, 0.1, 0.1),\n size = (1280, 800))\n @mlab.animate()\n def showdat():\n \"\"\"Example from:\n http://github.enthought.com/mayavi/mayavi/tips.html#animating-a-visualization\n \"\"\"\n # triangular_mesh see:\n # http://github.enthought.com/mayavi/mayavi/auto/mlab_helper_functions.html?highlight=triangular_mesh#mayavi.mlab.triangular_mesh\n img = mlab.triangular_mesh(xb, yb, sol0, triangles, scalars=sol0)\n #fig = mlab.gcf()\n ms = img.mlab_source\n for t, s in self.dat:\n # see: http://github.enthought.com/mayavi/mayavi/mlab_animating.html?highlight=animating\n ms.set(scalars=s[:,0])\n yield\n a = showdat()", "def plot(self):\n pass", "def plot_all(self) -> None:\n self.__plot_si_cf_plane()\n self.__plot_convex_hull()\n self.__plot_fixed_radius()\n self.__plot_delaunay()", "def plot_wavefunctions(xmesh,interval,N,V,eigenvectors,pdf): \n for i in range(10):\n psi=[0]\n for val in eigenvectors[:,i]:\n psi.append(val)\n psi.append(0)\n\n pdfp=[0]\n for val in pdf[:,i]:\n pdfp.append(val)\n pdfp.append(0)\n\n Vplot=[]\n\n for i in range(len(xmesh)):\n \n Vplot.append(V(xmesh[i]))\n \n\n plt.plot(xmesh,psi)\n plt.plot(xmesh,pdfp,'g')\n plt.xlabel('x', fontsize=20, color='black')\n plt.ylabel('psi', fontsize=20, color='black')\n plt.show()\n\n## plt.plot(xmesh,Vplot,'k')\n## plt.xlabel('x', fontsize=20, color='black')\n## plt.ylabel('V(x)', fontsize=20, color='black')\n## plt.ylim(-5,30)\n## plt.show()\n \n return None", "def plot(self):\n self.plotsite()\n self.plotbond()\n plt.show()", "def plot(self):\n h = .02\n i=1\n bags_X = self.bags_X\n bags_y = self.bags_y\n fig1 = plt.figure(figsize=(45, 9))\n\n \n cm = plt.cm.RdBu\n cm_bright = ListedColormap(['#FF0000', '#0000FF'])\n \n for model in self.models:\n ax = plt.subplot(1, len(self.models) , i)\n X = pd.DataFrame(bags_X[i-1])\n y = pd.Series(bags_y[i-1])\n x_min, x_max = X[X.columns[0]].min() - .5, X[X.columns[0]].max() + .5\n y_min, y_max = X[X.columns[1]].min() - .5, X[X.columns[1]].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = np.array(model.predict(pd.DataFrame(np.c_[xx.ravel(), yy.ravel()], columns=X.columns)))\n # print(Z[12])\n Z = Z.reshape(xx.shape)\n ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n ax.scatter(X[X.columns[0]], X[X.columns[1]], c=y, cmap=cm_bright, edgecolors='k')\n # size=[1000*w for w in self.weights[i-1]]\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xlabel(str(X.columns[0]))\n ax.set_ylabel(str(X.columns[1]))\n plt.title(\"Estimator \"+str(i))\n i+=1\n \n fig2 = plt.figure(figsize=(9,9))\n X = self.X\n y = self.y\n ax2 = plt.subplot(1,1,1)\n x_min, x_max = X[X.columns[0]].min() - .5, X[X.columns[0]].max() + .5\n y_min, y_max = X[X.columns[1]].min() - .5, X[X.columns[1]].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = np.array(self.predict(pd.DataFrame(np.c_[xx.ravel(), yy.ravel()], columns=X.columns)))\n Z = Z.reshape(xx.shape)\n ax2.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n # size=[1000*w for w in self.weights[i-2]]\n ax2.scatter(X[X.columns[0]], X[X.columns[1]], c=y, cmap=cm_bright, edgecolors='k')\n ax2.set_xlim(xx.min(), xx.max())\n ax2.set_ylim(yy.min(), yy.max())\n plt.title(\"Combined Decision Surface\")\n \n plt.tight_layout()\n plt.show()\n\n return [fig1,fig2]", "def plot3d(self):\n plot_rupture_wire3d(self)", "def show_mesh(self):\n g = self.build_gmsh()\n if g:\n mesh = cfm.GmshMesh(g)\n mesh.el_type = self.el_type\n\n mesh.dofs_per_node = self.dofs_per_node\n mesh.el_size_factor = self.el_size_factor\n self.mesh = mesh\n\n coords, edof, dofs, bdofs, elementmarkers = mesh.create()\n cfv.clf()\n\n cfv.draw_mesh(\n coords=coords,\n edof=edof,\n dofs_per_node=mesh.dofs_per_node,\n el_type=mesh.el_type,\n filled=True\n )\n if self.figure_canvas is not None:\n self.figure_canvas.draw()\n else:\n cfv.show_and_wait()\n return None\n else:\n return \"Canceled\"", "def plot_surface(self):\n X, Y = np.meshgrid(self.x, self.y)\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.plot_surface(X=X, Y=Y, Z=self.z)\n plt.show()", "def plot_faces(self, f=None, index_row=0, index_col=0, show=True, plotter=None, cmap='jet', title=None,\n title_location=\"upper_edge\", font_size=10, font_color='black', texture=None, camera=None):\n if not plotter:\n plotter = pv.Plotter()\n plotter.subplot(index_column=index_col, index_row=index_row)\n if title:\n plotter.add_text(title, position=title_location, font_size=font_size, color=font_color)\n if camera is not None:\n plotter.set_position(camera[0])\n plotter.set_focus(camera[1])\n plotter.set_viewup(camera[2])\n if self.texture:\n plotter.add_mesh(self.pv_mesh, texture=self.texture)\n elif texture is None:\n plotter.add_mesh(self.pv_mesh, scalars=f, cmap=cmap, texture=texture, show_scalar_bar=False)\n else:\n if isinstance(texture, np.ndarray):\n tex = pv.numpy_to_texture(texture)\n else:\n tex = pv.read_texture(texture)\n self.pv_mesh.texture_map_to_plane(inplace=True)\n plotter.add_mesh(self.pv_mesh, texture=tex)\n if show:\n plotter.show()\n return plotter", "def plot(self):\n\t\tself.plotOfSpect()", "def plot(self):\n\t\tself.plotOfSpect().plot()", "def plot_2d(self):\n fig = plt.figure(figsize=(10,8))\n \n d = int(len(self.a_scale.flat)**0.5)\n a_scale = self.a_scale.reshape(d,d)\n c_scale = self.c_scale.reshape(d,d)\n E_coh = self.E_coh.reshape(d,d)\n plt.pcolormesh(a_scale, c_scale, E_coh)\n plt.xlabel('xy linear deformation coefficient')\n plt.xlabel('z linear deformation coefficient')\n cbar = plt.colorbar()\n cbar.ax.set_ylabel('cohesive energy (eV/atom)',\n fontsize='x-large')\n plt.show()\n \n return fig" ]
[ "0.7348052", "0.7272838", "0.7201923", "0.71683663", "0.6932882", "0.6908609", "0.68675065", "0.6819857", "0.67012626", "0.66765225", "0.66062456", "0.6585177", "0.65265316", "0.6516479", "0.6515228", "0.6511229", "0.6509004", "0.643969", "0.64123374", "0.6407074", "0.63938946", "0.6354461", "0.6350804", "0.63122076", "0.63050175", "0.6289925", "0.6286469", "0.62707883", "0.62491316", "0.6245859" ]
0.7717216
0
plots the mesh/centroids of mesh as is expected in peridynamics either mesh or cell_cent is to be provided by user neither provinding mesh nor providing cell_cent is wrong
def plot_peridym_mesh(mesh=None, struct_grd=True, cell_cent=None, disp_cent=None, annotate=False): if struct_grd: cell_centroid_function = structured_cell_centroids else: cell_centroid_function = get_cell_centroids if mesh == None and len(np.shape(cell_cent)) == 0 and len(np.shape(disp_cent)) == 0: raise AssertionError("provide either fenics mesh or cell centroid of PD particles") if len(np.shape(cell_cent)) != 0 and len(np.shape(disp_cent))==0: extents = get_domain_bounding_box(cell_cent=cell_cent) if len(np.shape(cell_cent)) == 0 and len(np.shape(disp_cent))!=0: extents = get_domain_bounding_box(cell_cent=disp_cent) if mesh != None and (len(np.shape(cell_cent)) == 0 and len(np.shape(disp_cent)) == 0): extents = get_domain_bounding_box(mesh=mesh) cell_cent = cell_centroid_function(mesh) ## we wish to scale the axis accordign to geometry dim = len(cell_cent[0]) x_min = extents[0][0]; x_max = extents[1][0] y_min = extents[0][1]; y_max = extents[1][1] x=None; y=None; z=None fig = plt.figure() if dim == 3: z_min = corners[0][2]; z_max = corners[1][2] x,y,z = cell_cent.T ax = fig.add_subplot(111, projection='3d') ax.scatter(x,y,z, s=70, marker='o', color='b', alpha=1.0, edgecolors='face') ax.axis('off') if dim == 2 : ax = fig.add_subplot(111) x,y = cell_cent.T plt.scatter(x,y, s=300, color='b', marker='o', alpha=0.6) plt.axis=('off') if annotate==True: for idx, cc in enumerate(cell_cent): plt.text(cc[0], cc[1], str(idx), color='k', verticalalignment='bottom', horizontalalignment='right', fontsize='medium') ax.set_aspect('equal') plt.title("peridynamics mesh") plt.show(block=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_plot_mesh(self):\n plt.close('all')\n\n #\n # Initialize\n #\n fig, ax = plt.subplots(3,3)\n plot = Plot()\n #\n # Define mesh\n # \n mesh = Mesh.newmesh(grid_size=(2,2))\n mesh.refine() \n mesh.root_node().children[1,1].mark(1)\n mesh.refine(1)\n \n # Plot simple mesh\n ax[0,0] = plot.mesh(ax[0,0], mesh)\n \n #\n # Flag a few cells\n # \n mesh.unmark(nodes=True)\n mesh.root_node().children[0,0].mark(2)\n mesh.root_node().children[1,0].mark(1)\n mesh.root_node().children[1,1].children['SW'].mark(3)\n mesh.root_node().children[1,1].children['NE'].mark(3)\n \n # Color flagged cells\n ax[0,1] = plot.mesh(ax[0,1], mesh, color_marked=[1,2,3], nested=True)\n \n # Plot vertex numbers\n ax[0,2] = plot.mesh(ax[0,2], mesh, vertex_numbers=True)\n \n # Plot edge numbers\n ax[1,0] = plot.mesh(ax[1,0], mesh, edge_numbers=True)\n \n # Plot cell numbers nested off\n mesh.refine(2)\n ax[1,1] = plot.mesh(ax[1,1], mesh, cell_numbers=True)\n \n # Plot cell numbers nested on\n ax[1,2] = plot.mesh(ax[1,2], mesh, cell_numbers=True, nested=True)\n\n # Plot dofs\n element = QuadFE(2,'Q1')\n ax[2,0] = plot.mesh(ax[2,0], mesh, element=element, dofs=True)\n \n # Assign dofs in a nested way\n ax[2,1] = plot.mesh(ax[2,1], mesh, element=element, dofs=True, \\\n nested=True)\n \n # Display only dofs of flagged nodes \n ax[2,2] = plot.mesh(ax[2,2], mesh, element=element, dofs=True, \\\n node_flag=3, nested=True, show_axis=True)", "def Draw1D(mesh, coefs, keep=False, n_p=2, figsize=(20,4)):\n if n_p <= 2:\n n_p = 2\n \n eps = 1e-6 \n \n x_v = [p[0] for p in mesh.ngmesh.Points()]\n x_s = []\n f_s = {}\n\n miny = 1e99\n for f, name in coefs:\n f_s[name] = []\n \n x_s.append(nan)\n for f,name in coefs:\n f_s[name].append(nan)\n \n for el in mesh.ngmesh.Elements1D():\n left = mesh.ngmesh.Points()[el.points[0]][0]\n right = mesh.ngmesh.Points()[el.points[1]][0]\n for l in range(n_p):\n y = left + eps + (l / (n_p-1)) * (right - eps -left) \n x_s.append(y)\n for f,name in coefs:\n ff = f(mesh(y))\n miny = min(miny,ff)\n f_s[name].append(ff)\n \n x_s.append(nan)\n for f,name in coefs:\n f_s[name].append(nan)\n\n \n # plt.clf()\n # display.display(plt.gcf())\n plt.figure(figsize=figsize)\n for f,name in coefs:\n plt.plot(x_s,f_s[name],label=name)\n plt.plot(x_v,[miny for v in x_v],'|',label='vertices')\n plt.xlabel(\"x\")\n plt.legend()\n plt.show()\n if keep:\n display.clear_output(wait=True)", "def PlotMeshNumbering(self, figure=None, show_plot=True):\n\n self.__do_essential_memebers_exist__()\n\n import matplotlib.pyplot as plt\n import matplotlib as mpl\n\n if self.element_type == \"tri\":\n\n if figure is None:\n figure = plt.figure()\n plt.triplot(self.points[:,0],self.points[:,1], self.elements[:,:3])\n plt.tricontourf(self.points[:,0], self.points[:,1], self.elements[:,:3], np.ones(self.points.shape[0]), 100,alpha=0.3)\n\n for i in range(0,self.elements.shape[0]):\n coord = self.points[self.elements[i,:],:]\n x_avg = np.sum(coord[:,0])/self.elements.shape[1]\n y_avg = np.sum(coord[:,1])/self.elements.shape[1]\n plt.text(x_avg,y_avg,str(i),backgroundcolor='#F88379',ha='center')\n\n for i in range(0,self.points.shape[0]):\n plt.text(self.points[i,0],self.points[i,1],str(i),backgroundcolor='#0087BD',ha='center')\n\n plt.axis('equal')\n if show_plot:\n plt.show()\n\n elif self.element_type == \"quad\":\n\n if figure is None:\n figure = plt.figure()\n point_radius = 3.\n\n C = self.InferPolynomialDegree() - 1\n\n edge_elements = self.GetElementsEdgeNumberingQuad()\n reference_edges = NodeArrangementQuad(C)[0]\n reference_edges = np.concatenate((reference_edges,reference_edges[:,1,None]),axis=1)\n reference_edges = np.delete(reference_edges,1,1)\n\n self.GetEdgesQuad()\n x_edges = np.zeros((C+2,self.all_edges.shape[0]))\n y_edges = np.zeros((C+2,self.all_edges.shape[0]))\n\n BasesOneD = np.eye(2,2)\n for iedge in range(self.all_edges.shape[0]):\n ielem = edge_elements[iedge,0]\n edge = self.elements[ielem,reference_edges[edge_elements[iedge,1],:]]\n x_edges[:,iedge], y_edges[:,iedge] = self.points[edge,:].T\n\n\n plt.plot(x_edges,y_edges,'-k')\n\n for i in range(self.elements.shape[0]):\n coord = self.points[self.elements[i,:],:]\n x_avg = np.sum(coord[:,0])/self.elements.shape[1]\n y_avg = np.sum(coord[:,1])/self.elements.shape[1]\n plt.text(x_avg,y_avg,str(i),backgroundcolor='#F88379',ha='center')\n\n for i in range(0,self.points.shape[0]):\n plt.text(self.points[i,0],self.points[i,1],str(i),backgroundcolor='#0087BD',ha='center')\n\n plt.axis('equal')\n if show_plot:\n plt.show()\n\n elif self.element_type == \"tet\" or self.element_type == \"hex\":\n\n import matplotlib as mpl\n import os\n os.environ['ETS_TOOLKIT'] = 'qt4'\n from mayavi import mlab\n\n if figure is None:\n figure = mlab.figure(bgcolor=(1,1,1),fgcolor=(1,1,1),size=(800,600))\n view = mlab.view()\n figure.scene.disable_render = True\n\n color = mpl.colors.hex2color('#F88379')\n\n linewidth = 3.\n # trimesh_h = mlab.triangular_mesh(self.points[:,0],\n # self.points[:,1], self.points[:,2], self.faces[:,:3],\n # line_width=linewidth,tube_radius=linewidth,color=(0,0.6,0.4),\n # representation='wireframe') # representation='surface'\n\n # # CHANGE LIGHTING OPTION\n # trimesh_h.actor.property.interpolation = 'phong'\n # trimesh_h.actor.property.specular = 0.1\n # trimesh_h.actor.property.specular_power = 5\n\n # PLOTTING EDGES\n from Florence.PostProcessing import PostProcess\n tmesh = PostProcess(3,3).Tessellate(self, np.zeros_like(self.points), interpolation_degree=0,\n plot_points=True, plot_edges=True, plot_surfaces=False)\n\n x_edges = tmesh.x_edges\n y_edges = tmesh.y_edges\n z_edges = tmesh.z_edges\n connections = tmesh.connections\n\n src = mlab.pipeline.scalar_scatter(x_edges.T.copy().flatten(), y_edges.T.copy().flatten(), z_edges.T.copy().flatten())\n src.mlab_source.dataset.lines = connections\n h_edges = mlab.pipeline.surface(src, color = (0,0.6,0.4), line_width=linewidth)\n # AVOID WARNINGS\n # lines = mlab.pipeline.stripper(src)\n # h_edges = mlab.pipeline.surface(lines, color = (0,0.6,0.4), line_width=linewidth)\n\n # ELEMENT NUMBERING\n # for i in range(0,self.elements.shape[0]):\n # coord = self.points[self.elements[i,:],:]\n # x_avg = np.sum(coord[:,0])/self.elements.shape[1]\n # y_avg = np.sum(coord[:,1])/self.elements.shape[1]\n # z_avg = np.sum(coord[:,2])/self.elements.shape[1]\n\n # # mlab.text3d(x_avg,y_avg,z_avg,str(i),color=color)\n # mlab.text3d(x_avg,y_avg,z_avg,str(i),color=(0,0,0.),scale=2)\n\n # POINT NUMBERING\n for i in range(self.elements.shape[0]):\n for j in range(self.elements.shape[1]):\n text_obj = mlab.text3d(self.points[self.elements[i,j],0],\n self.points[self.elements[i,j],1],self.points[self.elements[i,j],2],str(self.elements[i,j]),\n color=(0,0,0.),scale=0.05)\n\n\n figure.scene.disable_render = False\n\n if show_plot:\n # mlab.view(*view)\n mlab.show()", "def plotVoronoiCell(self, cells):\n for i in cells:\n #i indexes volumes\n i = self.nonBI[i] #now i indexes vor.point_region\n\n vI = self.vor.regions[self.vor.point_region[i]]\n v = self.vor.vertices[vI, :]\n r = v\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n fig.set_size_inches(18.5, 9.5)\n ax.set_title('Voronoi Cell of Particle ' + str(i))\n ax.set_xlabel('x [m]')\n ax.set_ylabel('y [m]')\n ax.set_zlabel('z [m]')\n ax.scatter(r[:, 0], r[:, 1], r[:, 2], s=5, alpha=0.5, label='Cell Boundaries')\n ax.scatter(self.data[i, 0], self.data[i, 1], self.data[i, 2], s=25, label='Cell Center')\n ax.set_xlim3d(np.min(self.data[:, 0]), np.max(self.data[:, 0]))\n ax.set_ylim3d(np.min(self.data[:, 1]), np.max(self.data[:, 1]))\n ax.set_zlim3d(np.min(self.data[:, 2]), np.max(self.data[:, 2]))\n # limits = np.vstack((np.array([np.max(self.data[:, 0]), np.max(self.data[:, 1]), np.max(self.data[:, 2])]), np.array([np.min(self.data[:, 0]), np.min(self.data[:, 1]), np.min(self.data[:, 2])])))\n # ax.scatter(limits[:, 0], limits[:, 1], limits[:, 2], s=1)\n ax.legend()", "def surfaceRender(nodal_mesh, focus, ax=None):\n\t# If no axes were passed, generate new set of axes\n\tif not ax:\n\t\tfig = mplt.figure()\n\t\tax = fig.add_subplot(111, projection='3d')\n\n\t# Sort the mesh by first 3 columns\n\tnodal_mesh = nodal_mesh[nodal_mesh[:, 0].argsort()]\n\tnodal_mesh = nodal_mesh[nodal_mesh[:, 1].argsort(kind='mergesort')]\n\tnodal_mesh = nodal_mesh[nodal_mesh[:, 2].argsort(kind='mergesort')]\n\t\n\t# Set up number of divisions and calculate e for each division (as a ratio)\n\tnum_div = 20\n\te = [i/num_div for i in range(num_div + 1)]\n\t# Convert angular values from degrees to radians\n\trads = math.pi/180\n\tnodal_mesh[:, 1:3] *= rads\n\t# Store the shapes and sizes of the mesh values\n\tm = nodal_mesh.shape[0]\n\tsize_nodal_nu = np.where(nodal_mesh[:, 2] == 0)[0].size\n\tsize_nodal_phi = m/size_nodal_nu\n\t# Get the mu and theta values from the mesh\n\tnodal_nu = nodal_mesh[:size_nodal_nu, 1]\n\tnodal_phi = nodal_mesh[::size_nodal_nu, 2]\n\t# Convert apex node from prolate to cartesian, then plot with scatter\n\tif min(nodal_nu) == 0:\n\t\tx, y, z = mathhelper.prolate2cart(nodal_mesh[0, 0], nodal_mesh[0, 1], nodal_mesh[0, 2], focus)\n\t\tax.scatter(z, y, -x)\n\t\tstart_nu = 1\n\telse:\n\t\tstart_nu = 0\n\t# Plot circumferential element boundaries\n\tfor i in range(start_nu, size_nodal_nu):\n\t\tfor j in range(int(size_nodal_phi)):\n\t\t\t# Define nodal values for interpolation\n\t\t\tif j == size_nodal_phi-1:\n\t\t\t\tind0 = i\n\t\t\t\tp0 = 2*math.pi\n\t\t\telse:\n\t\t\t\tind0 = (j+1)*size_nodal_nu + i\n\t\t\t\tp0 = nodal_phi[j+1]\n\t\t\tind1 = (j)*size_nodal_nu + i\n\t\t\tp1 = nodal_phi[j]\n\t\t\t# Get mu and dM/dm1\n\t\t\tm0 = nodal_mesh[ind0, 0]\n\t\t\tdm0 = nodal_mesh[ind0, 3]\n\t\t\tm1 = nodal_mesh[ind1, 0]\n\t\t\tdm1 = nodal_mesh[ind1, 3]\n\t\t\t# Convert to cartesian\n\t\t\tn0x, n0y, n0z = mathhelper.prolate2cart(nodal_mesh[ind0, 0], nodal_mesh[ind0, 1], nodal_mesh[ind0, 2], focus)\n\t\t\t# Plot the node\n\t\t\tax.scatter(n0z, n0y, -n0x)\n\t\t\t# Plot the arc segments\n\t\t\tfor k in range(2, len(e)):\n\t\t\t\t# Determine starting point to use\n\t\t\t\tif k == 2:\n\t\t\t\t\tpt_x, pt_y, pt_z = n0x, n0y, n0z\n\t\t\t\telse:\n\t\t\t\t\tpt_x, pt_y, pt_z = x_here, y_here, z_here\n\t\t\t\t# Get lambda\n\t\t\t\thm0 = 1 - 3*(e[k]**2) + 2*(e[k]**3)\n\t\t\t\thdm0 = e[k]*(e[k] - 1)**2\n\t\t\t\thm1 = (e[k]**2)*(3 - 2*e[k])\n\t\t\t\thdm1 = (e[k]**2)*(e[k] - 1)\n\t\t\t\tm = hm0 * m0 + hdm0 * dm0 + hm1 * m1 + hdm1 * dm1\n\t\t\t\t# Get theta\n\t\t\t\tp_here = p0 - e[k]*(p0 - p1)\n\t\t\t\t# Convert to cartesian\n\t\t\t\tx_here, y_here, z_here = mathhelper.prolate2cart(m, nodal_nu[i], p_here, focus)\n\t\t\t\t# Create vectors\n\t\t\t\tx = np.append(pt_x, x_here)\n\t\t\t\ty = np.append(pt_y, y_here)\n\t\t\t\tz = np.append(pt_z, z_here)\n\t\t\t\t# Plot segments\n\t\t\t\tax.plot(z, y, -x, 'k-.')\n\t# Plot longitudinal element boundaries\n\tfor i in range(int(size_nodal_phi)):\n\t\tfor j in range(size_nodal_nu-1):\n\t\t\t# Define nodal values needeed for interpolation\n\t\t\tind0 = i*size_nodal_nu + j\n\t\t\tind1 = ind0 + 1\n\t\t\tn0 = nodal_nu[j]\n\t\t\tn1 = nodal_nu[j+1]\n\t\t\t# Get lambda and dL/de2\n\t\t\tm0 = nodal_mesh[ind0, 0]\n\t\t\tdm0 = nodal_mesh[ind0, 4]\n\t\t\tm1 = nodal_mesh[ind1, 0]\n\t\t\tdm1 = nodal_mesh[ind1, 4]\n\t\t\t# Convert nodal points to cartesian\n\t\t\tn0x, n0y, n0z = mathhelper.prolate2cart(nodal_mesh[ind0, 0], nodal_mesh[ind0, 1], nodal_mesh[ind0, 2], focus)\n\t\t\t# Plot arc\n\t\t\tfor k in range(2, len(e)):\n\t\t\t\t# Determine point to use\n\t\t\t\tif k == 2:\n\t\t\t\t\tpt_x, pt_y, pt_z = n0x, n0y, n0z\n\t\t\t\telse:\n\t\t\t\t\tpt_x, pt_y, pt_z = x_here, y_here, z_here\n\t\t\t\t# Get lambda\n\t\t\t\thm0 = 1 - 3*(e[k]**2) + 2*(e[k]**3)\n\t\t\t\thdm0 = e[k]*(e[k] - 1)**2\n\t\t\t\thm1 = (e[k]**2)*(3 - 2*e[k])\n\t\t\t\thdm1 = (e[k]**2)*(e[k] - 1)\n\t\t\t\tm = hm0 * m0 + hdm0 * dm0 + hm1 * m1 + hdm1 * dm1\n\t\t\t\t# Get nu\n\t\t\t\tn_here = n0 + e[k]*(n1-n0)\n\t\t\t\t# Convert to cartesian\n\t\t\t\tx_here, y_here, z_here = mathhelper.prolate2cart(m, n_here, nodal_phi[i], focus)\n\t\t\t\t# Append the vectors for plotting\n\t\t\t\tx = np.append(pt_x, x_here)\n\t\t\t\ty = np.append(pt_y, y_here)\n\t\t\t\tz = np.append(pt_z, z_here)\n\t\t\t\t# Plot the segment\n\t\t\t\tax.plot(z, y, -x, 'k-.')\n\t\t\t\t\n\treturn(ax)", "def defineCircleLayout(self):\n # Define a 2-D array representing the position of each mesh point\n self.xPoints = self.frange(0,self.R,self.h)\n self.yPoints = self.frange(0,self.R,self.h)\n\n # Position of internal mesh points\n internal_xyCoord = [(i,j) for i in self.xPoints for j in self.yPoints if (i - self.R)**2 + (j - self.R)**2 < self.R^2] \n\n # Define the dictionary containing internal points\n for k in internal_xyCoord:\n x = k[0]\n y = k[1]\n xLabel = xPoints.index(x)\n yLabel = yPoints.index(y)\n self.internalPoints[(xLabel,yLabel)] = meshPoint(type = 'internal',x = x, y = y, xLabel = xLabel, yLabel = yLabel) \n\n # Position of the boundary points\n # Find the intersection of each mesh line with the circle\n # For a given vertical mesh line: \n # y = R - sqrt(R^2 - (x-R)^2) & y = R + sqrt(R^2 - (x-R)^2)\n # For a given horizontal mesh line: \n # x = R - sqrt(R^2 - (y-R)^2) & x = R + sqrt(R^2 - (y-R)^2)\n boundary_xyCoord = [(0,self.R),(self.R,0),(self.R,2*self.R),(2*self.R,self.R)] + [(x,self.R - math.sqrt(self.R**2 - (x-self.R)**2)) for x in self.xPoints[1:len(self.xPoints)-1]] + [(x,self.R - math.sqrt(self.R**2 + (x-self.R)**2)) for x in self.xPoints[1:len(self.xPoints)-1]] + [(self.R - math.sqrt(self.R**2 - (y-self.R)**2),y) for y in self.yPoints[1:len(yPoints)-1]] + [(self.R + math.sqrt(self.R**2 - (y-self.R)**2),y) for y in self.yPoints[1:len(yPoints)-1]] \n\n # Define the dictionary containing boundary points\n for k in boundary_xyCoord:\n x = k[0]\n y = k[1]\n [xLabel,yLabel] = self.findLabel(x,y)\n self.boundaryPoints[(xLabel,yLabel)] = meshPoint(type = 'boundary',x = x, y = y, xLabel = xLabel, yLabel = yLabel) \n \n # Now that we have assigned the labels we can define fE, fW, fN and fS\n self.fCalc()", "def plot_fenics_mesh(mesh, new_fig=True):\n if(new_fig):\n plt.figure()\n\n plot(mesh)\n #plt.title(\"FEniCS mesh\")\n plt.show(block=False)\n\n pass", "def plot_mesh_function(mesh, f, title=\"\", colormap = \"hot\", edges = False, mybounds = [], myticks = []) :\n if mesh.dimension() == 1 :\n # get the mesh points\n x = mesh_axes(mesh)\n # plot the map\n plt.plot(x, f)\n \n elif mesh.dimension() == 2 :\n\n # Get the mesh axes and then make a grid of them for plotting.\n x, y = mesh_axes(mesh)\n X, Y = np.meshgrid(x, y)\n # Reshape the function\n f = f.reshape(mesh.number_cells_x(), mesh.number_cells_y())\n if edges :\n plt.pcolor(X, Y, f, cmap=colormap, edgecolors='k')\n else :\n plt.pcolor(X, Y, f, cmap=colormap)\n plt.axis(\"scaled\") \n plt.xlabel(\"x [cm]\")\n plt.ylabel(\"y [cm]\")\n if len(myticks) :\n cbar = plt.colorbar(boundaries=mybounds,ticks=myticks)\n else : \n cbar = plt.colorbar()\n else :\n print \"not ready for 3d\"\n return\n plt.title(title)\n # show the plot\n plt.show()", "def plotWholeRoom(mesh):\r\n fig = plt.figure()\r\n ax = fig.gca(projection='3d')\r\n X = np.arange(0, mesh.xLength+mesh.meshsize, mesh.meshsize)\r\n Y = np.arange(0, mesh.yLength+mesh.meshsize, mesh.meshsize)\r\n X, Y = np.meshgrid(X,Y)\r\n numberOfXNodes = mesh.x_res#round(mesh.xLength/mesh.meshsize)+1\r\n numberOfYNodes = mesh.y_res#round(mesh.yLength/mesh.meshsize)+1\r\n Z = np.array([[mesh.grid[i,j].funcVal for i in range(numberOfYNodes)] for j in range(numberOfXNodes)])\r\n if mesh.y_res==2:\r\n print()\r\n surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm,\r\n linewidth=0, antialiased=False)\r\n # add vmin=4, vmax=41, to define lower and upper value for the color-scheme\r\n # set limits for z-axis\r\n ax.set_zlim(np.amin(Z)-mesh.meshsize, np.amax(Z)+mesh.meshsize)\r\n # don't know what these two lines are for\r\n # x.zaxis.set_major_locator(LinearLocator(10))\r\n # ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))\r\n # don't know what these two lines are for\r\n fig.colorbar(surf, shrink=0.5, aspect=5)\r\n plt.show() \r\n return fig", "def mesh_axes(mesh) :\n \n if (mesh.dimension() == 1) :\n # for 1D, we take the cell center points\n x = np.zeros(mesh.number_cells_x())\n x[0] = mesh.dx(0) * 0.5\n for i in range(0, mesh.number_cells_x()-1) :\n x[i + 1] = x[i] + 0.5*(mesh.dx(i) + mesh.dx(i+1))\n return x \n \n else :\n # for 2D, we take the mesh edges\n x = np.zeros(mesh.number_cells_x()+1)\n y = np.zeros(mesh.number_cells_y()+1)\n for i in range(0, mesh.number_cells_x()) :\n x[i + 1] = x[i] + mesh.dx(i)\n for j in range(0, mesh.number_cells_y()) :\n y[j + 1] = y[j] + mesh.dy(j)\n return (x, y)", "def plot_initial_geometry(ni=0.0, mu=0.5):", "def createMesh(self, chem, coord_x_start, coord_y_start) :\r\n init_conc = .0\r\n self.compParDiff(chem)\r\n comp.Comp.createMeshHomo(self, 'SC', chem, init_conc, coord_x_start, coord_y_start)\r\n #self.meshes[0].setConc(1)\r", "def DisplayMesh():\r\n \r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, Vertices, Triangles = CreateMatrixVTK(VTKString)\r\n \r\n fig = plt.figure()\r\n ax1 = fig.add_subplot(111,projection = '3d')\r\n ax1.plot_trisurf(Vertices[:,0],Vertices[:,1],Vertices[:,2],triangles= Triangles[:,1:])\r\n ax1.set_zlabel('z')\r\n ax1.set_ylabel('y')\r\n ax1.set_xlabel('x')\r\n plt.show()", "def cfdProcessGeometry(self):\r\n \r\n # self.faceCentroids']= [[] for i in range(self.numberOfFaces'])]\r\n # self.faceSf']= [[] for i in range(self.numberOfFaces'])]\r\n # self.faceAreas']= [[] for i in range(self.numberOfFaces'])]\r\n \r\n ## Linear weight of distance from cell center to face\r\n self.faceWeights= [[0] for i in range(self.numberOfFaces)]\r\n\r\n ## Not\r\n self.faceCF= [[0, 0, 0] for i in range(self.numberOfFaces)]\r\n \r\n self.faceCf= [[0,0,0] for i in range(self.numberOfFaces)]\r\n \r\n self.faceFf= [[0,0,0] for i in range(self.numberOfFaces)]\r\n \r\n self.wallDist= [[] for i in range(self.numberOfFaces)]\r\n \r\n self.wallDistLimited= [[] for i in range(self.numberOfFaces)]\r\n \r\n self.elementCentroids= [[] for i in range(self.numberOfElements)]\r\n self.elementVolumes= [[] for i in range(self.numberOfElements)]\r\n \r\n \"\"\"\r\n Calculate:\r\n -face centroids (faceCentroids)\r\n -face normal (Sf)\r\n -face areas (faceAreas)\r\n \"\"\"\r\n \r\n #find cell with largest number of points\r\n maxPoints=len(max(self.faceNodes, key=len))\r\n forCross1 = [[] for i in range(maxPoints)]\r\n forCross2 = [[] for i in range(maxPoints)]\r\n local_faceCentroid=[[] for i in range(maxPoints)]\r\n \r\n for iFace in range(self.numberOfFaces):\r\n theNodeIndices = self.faceNodes[iFace]\r\n theNumberOfFaceNodes = len(theNodeIndices)\r\n \r\n #compute a rough centre of the face\r\n local_centre = [0,0,0]\r\n \r\n for iNode in theNodeIndices:\r\n local_centre = local_centre + self.nodeCentroids[int(iNode)]\r\n \r\n local_centre = local_centre/theNumberOfFaceNodes\r\n \r\n for iTriangle in range(theNumberOfFaceNodes):\r\n \r\n point1 = local_centre\r\n point2 = self.nodeCentroids[int(theNodeIndices[iTriangle])]\r\n \r\n if iTriangle < theNumberOfFaceNodes-1:\r\n point3 = self.nodeCentroids[int(theNodeIndices[iTriangle+1])]\r\n else:\r\n point3 = self.nodeCentroids[int(theNodeIndices[0])]\r\n \r\n local_faceCentroid[iTriangle].append((point1+point2+point3)/3)\r\n \r\n left=point2-point1\r\n right=point3-point1\r\n \r\n forCross1[iTriangle].append(left)\r\n forCross2[iTriangle].append(right)\r\n \r\n \r\n local_Sf=[np.zeros([self.numberOfFaces,3]) for i in range(maxPoints)]\r\n local_area=[np.zeros([self.numberOfFaces,3]) for i in range(maxPoints)]\r\n \r\n centroid=np.zeros([self.numberOfFaces,3])\r\n area=np.zeros([self.numberOfFaces])\r\n Sf=np.zeros([self.numberOfFaces,3])\r\n \r\n #cells with fewer faces than others are full of zeros\r\n for i in range(maxPoints):\r\n \r\n forCrossLeft=np.vstack(np.array(forCross1[i]))\r\n forCrossRight=np.vstack(np.array(forCross2[i]))\r\n \r\n local_Sf[i]=0.5*np.cross(forCrossLeft,forCrossRight)\r\n local_area[i]=np.linalg.norm(local_Sf[i],axis=1)\r\n \r\n centroid = centroid + np.array(local_faceCentroid[i])*local_area[i][:,None]\r\n Sf=Sf+local_Sf[i]\r\n area=area+local_area[i]\r\n \r\n self.faceCentroids=centroid/area[:,None]\r\n self.faceSf=Sf\r\n self.faceAreas=area \r\n \r\n \r\n \"\"\"\r\n Pure python version - causes slowness due to iterative np.cross()\r\n \"\"\"\r\n \r\n # for iFace in range(self.numberOfFaces):\r\n # theNodeIndices = self.faceNodes[iFace]\r\n # theNumberOfFaceNodes = len(theNodeIndices)\r\n # \r\n # #compute a rough centre of the face\r\n # local_centre = [0,0,0]\r\n # \r\n # for iNode in theNodeIndices:\r\n # \r\n # local_centre = local_centre + self.nodeCentroids[int(iNode)]\r\n # \r\n # local_centre = local_centre/theNumberOfFaceNodes\r\n # centroid = [0, 0, 0]\r\n # Sf = [0,0,0]\r\n # area = 0\r\n # \r\n # #finds area of virtual triangles and adds them to the find to find face area\r\n # #and direction (Sf)\r\n # \r\n # \r\n # \r\n # for iTriangle in range(theNumberOfFaceNodes):\r\n # point1 = local_centre\r\n # point2 = self.nodeCentroids[int(theNodeIndices[iTriangle])]\r\n # \r\n # if iTriangle < theNumberOfFaceNodes-1:\r\n # point3 = self.nodeCentroids[int(theNodeIndices[iTriangle+1])]\r\n # else:\r\n # point3 = self.nodeCentroids[int(theNodeIndices[0])]\r\n # \r\n # local_centroid = (point1 + point2 + point3)/3\r\n # \r\n # left=point2-point1\r\n # right=point3-point1\r\n # x = 0.5*((left[1] * right[2]) - (left[2] * right[1]))\r\n # y = 0.5*((left[2] * right[0]) - (left[0] * right[2]))\r\n # z = 0.5*((left[0] * right[1]) - (left[1] * right[0]))\r\n # local_Sf=np.array([x,y,z])\r\n # \r\n # local_area = np.linalg.norm(local_Sf)\r\n # \r\n # centroid = centroid + local_area*local_centroid\r\n # Sf = Sf + local_Sf\r\n # area = area + local_area\r\n # centroid = centroid/area\r\n # self.faceCentroids[iFace]=centroid\r\n # self.faceSf[iFace]=Sf\r\n # self.faceAreas[iFace]=area\r\n \r\n \r\n \"\"\"\r\n Calculate:\r\n -element centroids (elementCentroids)\r\n -element volumes (elementVolumes)\r\n \"\"\"\r\n for iElement in range(self.numberOfElements):\r\n \r\n theElementFaces = self.elementFaces[iElement]\r\n \r\n #compute a rough centre of the element\r\n local_centre = [0,0,0]\r\n \r\n for iFace in range(len(theElementFaces)):\r\n faceIndex = theElementFaces[iFace]\r\n local_centre = local_centre + self.faceCentroids[faceIndex]\r\n \r\n local_centre = local_centre/len(theElementFaces)\r\n \r\n localVolumeCentroidSum = [0,0,0]\r\n localVolumeSum = 0\r\n \r\n for iFace in range(len(theElementFaces)):\r\n faceIndex = theElementFaces[iFace]\r\n \r\n Cf = self.faceCentroids[faceIndex]-local_centre\r\n \r\n faceSign = -1\r\n if iElement == self.owners[faceIndex]:\r\n faceSign = 1\r\n \r\n local_Sf = faceSign*self.faceSf[faceIndex]\r\n \r\n localVolume = np.dot(local_Sf,Cf)/3\r\n \r\n localCentroid = 0.75*self.faceCentroids[faceIndex]+0.25*local_centre\r\n \r\n localVolumeCentroidSum = localVolumeCentroidSum + localCentroid*localVolume\r\n \r\n localVolumeSum = localVolumeSum + localVolume\r\n \r\n self.elementCentroids[iElement]=localVolumeCentroidSum/localVolumeSum\r\n self.elementVolumes[iElement]=localVolumeSum\r\n \r\n \r\n for iFace in range(self.numberOfInteriorFaces):\r\n \r\n n=self.faceSf[iFace]/np.linalg.norm(self.faceSf[iFace])\r\n own=self.owners[iFace]\r\n nei = self.neighbours[iFace]\r\n \r\n self.faceCF[iFace]=self.elementCentroids[nei]-self.elementCentroids[own]\r\n self.faceCf[iFace]=self.faceCentroids[iFace]-self.elementCentroids[own]\r\n self.faceFf[iFace]=self.faceCentroids[iFace]-self.elementCentroids[nei]\r\n self.faceWeights[iFace]=(-np.dot(self.faceFf[iFace],n))/(-np.dot(self.faceFf[iFace],n)+np.dot(self.faceCf[iFace],n))\r\n \r\n for iBFace in range(self.numberOfInteriorFaces, self.numberOfFaces):\r\n \r\n \r\n n=self.faceSf[iBFace]/np.linalg.norm(self.faceSf[iBFace])\r\n own=self.owners[iBFace]\r\n \r\n self.faceCF[iBFace]=self.faceCentroids[iBFace]-self.elementCentroids[own]\r\n self.faceCf[iBFace]=self.faceCentroids[iBFace]-self.elementCentroids[own] \r\n self.faceWeights[iBFace]=1\r\n self.wallDist[iBFace]= max(np.dot(self.faceCf[iBFace], n), 1e-24)\r\n self.wallDistLimited[iBFace]= max(self.wallDist[iBFace], 0.05*np.linalg.norm(self.faceCf[iBFace]))", "def plotMesh(verts,tris):\n x = verts[:,0]\n y = verts[:,1]\n\n plt.figure()\n plt.gca().set_aspect('equal')\n plt.triplot(x, y, tris, 'k-')\n plt.title('Unstructured Mesh')\n plt.xlabel('distance (m)')\n plt.ylabel('distance (m)')", "def show_mesh(self):\n g = self.build_gmsh()\n if g:\n mesh = cfm.GmshMesh(g)\n mesh.el_type = self.el_type\n\n mesh.dofs_per_node = self.dofs_per_node\n mesh.el_size_factor = self.el_size_factor\n self.mesh = mesh\n\n coords, edof, dofs, bdofs, elementmarkers = mesh.create()\n cfv.clf()\n\n cfv.draw_mesh(\n coords=coords,\n edof=edof,\n dofs_per_node=mesh.dofs_per_node,\n el_type=mesh.el_type,\n filled=True\n )\n if self.figure_canvas is not None:\n self.figure_canvas.draw()\n else:\n cfv.show_and_wait()\n return None\n else:\n return \"Canceled\"", "def plot_multigroup_flux(mesh, state, edges = False) :\n if mesh.dimension() == 1 :\n # get the mesh points\n x = mesh_axes(mesh)\n # plot the map\n plt.plot(x, f)\n \n elif mesh.dimension() == 2 :\n\n # Get the mesh axes and then make a grid of them for plotting.\n x, y = mesh_axes(mesh)\n X, Y = np.meshgrid(x, y)\n edgec = 'none'\n if edges :\n edgec = 'k'\n plt.pcolor(X, Y, f, cmap=colormap, edgecolors=edgec)\n \n else :\n print \"not ready for 3d\"\n return\n # show the plot\n plt.show()", "def prog(args):\r\n i_fname, o_fname, pedestal_params, split_list, Num_W = args\r\n mesh = stl.mesh.Mesh.from_file(i_fname)\r\n #rotate mesh since by default the rotation axis is along X\r\n mesh.rotate([0,1,0],np.pi/2)\r\n\r\n v_arr = np.round(np.vstack(mesh.vectors).astype(float), decimals=1)\r\n\r\n splt0_arr = np.array(split_list)\r\n splt1_arr = np.roll(splt0_arr,-1)\r\n\r\n pos = cf.cartesian2cylyndrical(v_arr, Num_W)\r\n\r\n #make splits\r\n pos_list=[]\r\n for splt0, splt1 in zip(splt0_arr[:-1], splt1_arr[:-1]):\r\n pos_idx = np.where((splt0<=pos[:,:,2]) & (splt1>pos[:,:,2]))[0]\r\n print(splt0, splt1)\r\n #pos = [r, th, z] sectionwise\r\n pos_list.append(pos[pos_idx])\r\n #add pedestal mesh\r\n\r\n for sect_num, pos in enumerate(pos_list):\r\n pos = cf.add_pedestal(pos, pedestal_params)\r\n profiles=np.zeros_like(pos)\r\n\r\n for i in np.arange(np.shape(pos)[0]):\r\n profiles[i] = cf.cylyndrical2cartesian(pos[i])\r\n\r\n strokes = np.flipud(np.rot90(profiles))\r\n #transform data from longeron nodes [xyz] to:\r\n #a_arr - rotation angle around the rotation axis\r\n #r_arr - length of a segment perpenticular to the rotation axis and corresponding lateral mesh edge\r\n #z_arr - corresponding z coordiantes\r\n #v_arr - direction vector of the coresponding lateral mesh edge\r\n a_arr, r_arr, z_arr, v_arr = cf.transform(strokes, add_pedestal_bottom=True,add_pedestal_top=True)\r\n\r\n #make a summary plots\r\n cf.plot_loft_paths(profiles)\r\n cf.plot_loft_paths(pos)\r\n cf.plot_surf(a_arr,z_arr,r_arr)\r\n\r\n #collect data to the dictionary longeron wise\r\n res_dict = {'a_arr':np.rot90(a_arr, k=-1),\r\n 'r_arr':np.rot90(r_arr, k=-1),\r\n 'z_arr':np.rot90(z_arr, k=-1),\r\n 'v_arr':np.rot90(v_arr, k=-1)}\r\n\r\n #save result dictionary\r\n if not o_fname:\r\n o_fname = i_fname\r\n\r\n fname='{}_{}.pickle'.format(o_fname, sect_num)\r\n with open(fname, 'wb') as f:\r\n # Pickle the 'data' dictionary using the highest protocol available.\r\n pickle.dump(res_dict, f, pickle.HIGHEST_PROTOCOL)\r\n\r\n print(fname, ' saved')", "def mplot_mesh(meshtriang: df.Mesh) -> Tuple[plt.Figure, Any]:\n fig, ax = plt.subplots(1)\n ax.triplot(meshtriang, 'ko-', lw=1)\n return fig, ax", "def visualize(self, reduced_data):\n\t\t# Step size of the mesh. Decrease to increase the quality of the VQ.\n\t\th = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].\n\t\t\n\t\t# Plot the decision boundary. For that, we will assign a color to each\n\t\tx_min, x_max = reduced_data[:, 0].min() + 1, reduced_data[:, 0].max() - 1\n\t\ty_min, y_max = reduced_data[:, 1].min() + 1, reduced_data[:, 1].max() - 1\n\t\txx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n\n\t\t# Obtain labels for each point in mesh. Use last trained model.\n\t\tZ = self.estimator.predict(np.c_[xx.ravel(), yy.ravel()])\n\n\t\t# Put the result into a color plot\n\t\tZ = Z.reshape(xx.shape)\n\t\t\n\t\tplt.figure(1)\n\t\tplt.clf()\n\t\tplt.imshow(Z, interpolation='nearest',\n\t\t extent=(xx.min(), xx.max(), yy.min(), yy.max()),\n\t\t cmap=plt.cm.Paired,\n\t\t aspect='auto', origin='lower')\n\n\t\tplt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=4)\n\t\t# Plot the centroids as a white X\n\t\tcentroids = self.estimator.cluster_centers_\n\t\tplt.scatter(centroids[:, 0], centroids[:, 1],\n\t\t marker='x', s=169, linewidths=3,\n\t\t color='w', zorder=10)\n\t\tplt.title('K-means clustering with random data (PCA-reduced data)\\n'\n\t\t 'Centroids are marked with white cross')\n\t\tplt.xlim(x_min, x_max)\n\t\tplt.ylim(y_min, y_max)\n\t\tplt.xticks(())\n\t\tplt.yticks(())\n\t\tplt.show()", "def cmesh(self):\n return numpy.meshgrid(*self.centers, indexing='ij')", "def segment_mesh(mesh, k, coefficients, action, ev_method, kmeans_init):\n\n # set coefficients\n global delta\n global eta\n delta = coefficients[0]\n eta = coefficients[1]\n ev_method = 'sparse'\n kmeans_init = 'Liu'\n\n # affinity matrix\n W = _create_affinity_matrix(mesh)\n # print(\"---------------W:---------------\")\n # print(W)\n print(\"mesh_segmentation: Calculating graph laplacian...\")\n # degree matrix\n Dsqrt = numpy.sqrt(numpy.reciprocal(W.sum(1))) # 每一行的和,取倒数在开根号。\n # print(\"---------------Dsqrt:---------------\")\n # print(Dsqrt)\n # graph laplacian\n L = ((W * Dsqrt).transpose() * Dsqrt).transpose()\n # print(\"---------------L:---------------\")\n # print(L)\n print(\"mesh_segmentation: Calculating eigenvectors...\")\n # get eigenvectors\n if ev_method == 'dense':\n _, V = scipy.linalg.eigh(L, eigvals=(L.shape[0] - k, L.shape[0] - 1))\n # print(\"L.shape[0] - k: \", L.shape[0] - k, \", L.shape[0] - 1: \", L.shape[0] - 1)\n # print(\"---------------V:---------------\")\n # print(V)\n\n else:\n # 默认\n k = int(k)\n E, V = scipy.sparse.linalg.eigsh(L, k)\n # print(\"---------------V:---------------\")\n # print(V)\n # print(\"---------------E:---------------\")\n # print(E)\n # normalize each row to unit length\n V /= numpy.linalg.norm(V, axis=1)[:, None]\n # print(\"---------------norm V:---------------\")\n # print(V)\n start = time.time()\n if kmeans_init == 'kmeans++':\n print(\"mesh_segmentation: Applying kmeans...\")\n _, idx = scipy.cluster.vq.kmeans2(V, k, minit='++', iter=50)\n else:\n print(\"mesh_segmentation: Preparing kmeans...\")\n # compute association matrix\n\n Q = V.dot(V.transpose())\n # print(\"---------------Q:---------------\")\n # print(Q)\n # compute initial guess for clustering\n initial_centroids = _initial_guess(Q, k)\n\n print(\"mesh_segmentation: Applying kmeans...\")\n # print(\"-----------V[initial_centroids,:]-------------\")\n # print(V[initial_centroids, :])\n _, idx = scipy.cluster.vq.kmeans2(V, V[initial_centroids, :], iter=50)\n end = time.time()\n print(\"mesh_segmentation: Done clustering!: \", end - start)\n # perform action with the clustering result\n print(\"---------------idx:---------------\")\n print(idx)\n\n # 为每个面的材质索引赋值,就是类别\n faces = polygons(mesh.faces, mesh.vertices, mesh.face_normals, mesh.area_faces)\n for i, id in enumerate(idx):\n faces.polygons[i].material_index = id\n # for i in range(len(mesh.faces)):\n # print(faces.polygons[i].material_index)\n return faces", "def get_cell_centroids(mesh):\n num_els = mesh.num_cells()\n coords = mesh.coordinates()\n cells = mesh.cells()\n dim = len(coords[0])\n\n cell_cent = np.zeros((num_els, dim), dtype=float, order='c')\n\n for i in range(num_els):\n pts = [coords[idx] for idx in cells[i]]\n cell_cent[i] = (1/(dim+1))*sum(pts) #this works only for 2D/3D triangles\n\n return cell_cent", "def new_mesh_set(self, all_meshes):\n if isinstance(all_meshes, Mesh):\n mesh_tp = []\n mesh_tp.append(all_meshes)\n all_meshes = mesh_tp\n\n if not isinstance(all_meshes, list):\n raise TypeError(\"Please send a list of mesh to update_mesh\")\n self.all_meshes = all_meshes\n\n # Remove previous actors from the scene\n for actor in self.mesh_actors:\n self.parent_window.ren.RemoveActor(actor)\n self.mesh_actors = list()\n\n # Create the geometry of a point (the coordinate) points = vtkPoints()\n for i, mesh in enumerate(self.all_meshes):\n if mesh.time.size != 1:\n raise IndexError(\"Mesh should be from one frame only\")\n\n points = vtkPoints()\n for j in range(mesh.channel.size):\n # points.InsertNextPoint([0, 0, 0])\n points.InsertNextPoint(mesh.data[:3, j, 0].tolist())\n\n # Create an array for each triangle\n draw_patch = not mesh.automatic_triangles and not self.force_wireframe\n if draw_patch:\n poly_type = vtkPolygon\n n_ids = 3\n color = self.patch_color[i]\n else:\n poly_type = vtkPolyLine\n n_ids = 4\n color = self.mesh_color\n cells = vtkCellArray()\n\n # Create the polygons\n for j in range(mesh.triangles.shape[1]):\n poly = poly_type()\n poly.GetPointIds().SetNumberOfIds(n_ids) # make a tri\n for k in range(len(mesh.triangles[:, j])):\n poly.GetPointIds().SetId(k, mesh.triangles[k, j])\n if not draw_patch:\n poly.GetPointIds().SetId(3, mesh.triangles[0, j]) # Close the triangle\n cells.InsertNextCell(poly)\n\n poly_data = vtkPolyData()\n poly_data.SetPoints(points)\n if draw_patch:\n poly_data.SetPolys(cells)\n else:\n poly_data.SetLines(cells)\n\n mapper = vtkPolyDataMapper()\n mapper.SetInputData(poly_data)\n\n # Create an actor\n self.mesh_actors.append(vtkActor())\n self.mesh_actors[i].SetMapper(mapper)\n self.mesh_actors[i].GetProperty().SetColor(color)\n self.mesh_actors[i].GetProperty().SetOpacity(self.mesh_opacity)\n\n self.parent_window.ren.AddActor(self.mesh_actors[i])\n\n # Update marker position\n self.update_mesh(self.all_meshes)", "def visualise(self):\n\n # Initialise figure\n params = {\"figure.figsize\": (5, 5)}\n pylab.rcParams.update(params)\n self.fig = plt.figure()\n self.ax = self.fig.add_subplot(111)\n\n # Add particles if selected\n print(self.crds)\n cmap=cm.get_cmap('coolwarm')\n norm=Normalize(0,20)\n print(np.max(self.radii))\n print(np.max(self.weights))\n if self.vis_particles:\n if self.vis_vortype==-2:\n radii=self.weights\n if self.param>10:\n self.param=(self.param-10)/2+10\n colour=cmap(norm(self.param))\n else:\n radii=self.radii\n radii=self.weights\n colour='orange'\n colour=(0.8,0.687,0.287,1)\n colour='gold'\n patches = []\n patches_pnts = []\n patches_absent = []\n for i,c in enumerate(self.crds):\n patches.append(Circle(c,radius=radii[i]))\n if radii[i]>0:\n patches_pnts.append(Circle(c,radius=0.1))\n else:\n patches_absent.append(Circle(c,radius=0.1))\n self.ax.add_collection(PatchCollection(patches, facecolor=colour, edgecolor='k', alpha=0.5))\n self.ax.add_collection(PatchCollection(patches_pnts, facecolor='k', alpha=1,zorder=1))\n if self.vis_vortype==2:\n self.ax.add_collection(PatchCollection(patches_absent, facecolor='k', alpha=0.5,zorder=1))\n else:\n self.ax.add_collection(PatchCollection(patches_absent, facecolor='k', alpha=1,zorder=1))\n\n # Add voronoi\n if self.vis_vortype!=0:\n patches = []\n colours = []\n if self.vis_cellcolour==1:\n cell_colours = self.init_cell_colours()\n else:\n cell_colours = [(0,0,0,0)]*100\n for i in range(self.m):\n patches.append(Polygon(self.rings[i],True))\n colours.append(cell_colours[self.rings[i][:,0].size])\n self.ax.add_collection(PatchCollection(patches, facecolor=colours, edgecolor='k', linewidth=1, zorder=0))\n\n # Sandbox\n # print(np.max(self.radii))\n # cmap=cm.get_cmap('coolwarm')\n # norm=Normalize(0,np.max(20))\n sandbox=False\n if sandbox:\n # z=16\n # w=np.zeros_like(self.radii)\n # mask=2*self.radii>z\n # w[mask]=z**0.5*np.sqrt(2*self.radii[mask]-z)\n # patches = []\n # for i,c in enumerate(self.crds):\n # patches.append(Circle(c,radius=w[i]))\n # self.ax.add_collection(PatchCollection(patches, facecolor=cmap(norm(z)), edgecolor='k'))\n with open('./phi.dat','w') as f:\n for z in np.arange(0,np.max(self.radii)*2+0.5,0.01):\n w=np.zeros_like(self.radii)\n mask=2*self.radii>z\n w[mask]=z**0.5*np.sqrt(2*self.radii[mask]-z)\n phi=np.sum(np.pi*w**2)/52359.9\n # phi=np.sum(np.pi*w**2)/1309\n f.write('{:.6f} {:.6f}\\n'.format(z,phi))\n\n\n\n # Set axes\n buffer = 1.6\n lim = buffer*np.max(np.abs(self.crds))\n self.ax.set_xlim((-lim,lim))\n self.ax.set_ylim((-lim,lim))\n self.ax.set_axis_off()\n\n # Show figure\n if self.vis_save:\n plt.savefig('{}_{}_{}.png'.format(self.prefix,self.frame,self.vis_vortype),dpi=400)\n plt.show()", "def P_AI_Rocky(in_dict):\n # START\n fs = 16\n plt.rc('font', size=fs)\n fig = plt.figure(figsize=(14,12))\n ds = nc.Dataset(in_dict['fn'])\n\n # PLOT CODE\n aa = [-122.8, -122.54, 47.92, 48.22]\n import cmocean\n cmap = cmocean.cm.balance\n # cmap = 'RdYlBu_r'\n\n from warnings import filterwarnings\n filterwarnings('ignore') # skip some warning messages\n \n # plot Code\n \n # calculate divergence and vorticity\n uu = ds['u'][0, -1, :, :]\n vv = ds['v'][0, -1, :, :]\n u = zfun.fillit(uu)\n v = zfun.fillit(vv)\n u[np.isnan(u)] = 0\n v[np.isnan(v)] = 0\n \n G = zrfun.get_basic_info(in_dict['fn'], only_G=True)\n \n dive = ((np.diff(u, axis=1)/G['DX'][:, 1:-1])[1:-1, :]\n + (np.diff(v, axis = 0)/G['DY'][1:-1, :])[:, 1:-1])\n #dive[G['mask_rho'][1:-1,1:-1]==False] = np.nan\n \n vort = np.diff(v, axis=1)/G['DX'][1:,1:] - np.diff(u, axis=0)/G['DY'][1:,1:]\n #vort[G['mask_rho'][1:,1:]==False] = np.nan\n \n scl = 2e-3\n \n # panel 1\n ax = fig.add_subplot(121)\n # cs = plt.pcolormesh(G['lon_psi'], G['lat_psi'], dive/scl, cmap=cmap,\n # vmin=-1, vmax=1)\n cs = plt.pcolormesh(G['lon_rho'][1:-1,1:-1], G['lat_rho'][1:-1,1:-1], dive/scl, cmap=cmap,\n vmin=-1, vmax=1, shading='gouraud')\n tstr = (r'Surface Divergence (%0.1e $s^{-1}$)' % (scl))\n #pfun.add_bathy_contours(ax, ds, txt=True)\n pfun.add_coast(ax)\n ax.axis(aa)\n pfun.dar(ax)\n ax.set_xlabel('Longitude')\n ax.set_ylabel('Latitude')\n ax.set_title(tstr)\n pfun.add_info(ax, in_dict['fn'])\n ax.set_xticks([-122.8, -122.7, -122.6])\n ax.set_yticks([48, 48.1, 48.2])\n #\n # panel 2\n ax = fig.add_subplot(122)\n # cs = plt.pcolormesh(G['lon_rho'], G['lat_rho'], vort/scl, cmap=cmap,\n # vmin=-1, vmax=1)\n cs = plt.pcolormesh(G['lon_psi'], G['lat_psi'], vort/scl, cmap=cmap,\n vmin=-1, vmax=1, shading='gouraud')\n tstr = (r'Surface Vorticity (%0.1e $s^{-1}$)' % (scl))\n ax.set_xticks([-122.8, -122.7, -122.6])\n ax.set_yticks([])\n #fig.colorbar(cs)\n \n # Inset colorbar\n from mpl_toolkits.axes_grid1.inset_locator import inset_axes\n cbaxes = inset_axes(ax, width=\"4%\", height=\"40%\", loc='lower left')\n fig.colorbar(cs, cax=cbaxes, orientation='vertical')\n \n #pfun.add_bathy_contours(ax, ds)\n pfun.add_coast(ax)\n ax.axis(aa)\n pfun.dar(ax)\n ax.set_xlabel('Longitude')\n ax.set_title(tstr) \n \n #fig.tight_layout()\n # FINISH\n ds.close()\n if len(in_dict['fn_out']) > 0:\n plt.savefig(in_dict['fn_out'])\n plt.close()\n else:\n plt.show()\n plt.rcdefaults()", "def get_displaced_soln(cell_cent, u_disp, horizon, dim, data_dir=None, plot_=False, save_fig=False, zoom=40):\n disp_cent = cell_cent + u_disp\n if plot_ or save_fig:\n dpi = 2\n legend_size = {'size': str(6*dpi)}\n fig = plt.figure()\n if dim == 2:\n ax = fig.add_subplot(111)\n x, y = cell_cent.T\n #plt.scatter(x,y, s=300, color='r', marker='o', alpha=0.1, label='original config')\n x,y = (cell_cent + zoom*u_disp).T \n plt.scatter(x,y, s=150, color='b', marker='o', alpha=0.6, label=r'$\\delta$ = '+ format(horizon, '4.5g'))\n # plt.legend(prop=legend_size)\n #plt.xlim(x_min - fact*x_min, x_max + fact*x_max)\n #plt.ylim(y_min - fact*y_min, y_max + fact*y_max)\n\n if dim == 3:\n #z_min = corners[0][2]; z_max = corners[1][2]\n from mpl_toolkits.mplot3d import Axes3D \n x, y, z = cell_cent.T\n fig = plt.figure() \n ax = fig.add_subplot(111, projection='3d') \n ax.scatter(x,y,z, s=150, color='r', marker='o', alpha=0.1, label='original config')\n x,y,z = (cell_cent + zoom*u_disp)\n\n ax.scatter(x,y,z,s=150, color='g', marker='o', alpha=1.0, label='deformed config')\n ax.axis('off')\n plt.legend()\n\n ax.set_aspect('equal')\n\n if plot_:\n plt.show(block=False)\n\n if save_fig:\n plt.savefig(data_dir)\n plt.close(fig)\n\n return disp_cent", "def plot_entities(ax, cmesh, edim, color='b', size=10, show=False):\n coors = cmesh.get_centroids(edim)\n dim = cmesh.dim\n\n ax = _get_axes(ax, dim)\n\n if dim == 3:\n ax.scatter(coors[:, 0], coors[:, 1], coors[:, 2], s=size, c=color)\n\n else:\n ax.scatter(coors[:, 0], coors[:, 1], s=size, c=color)\n\n if show:\n plt.show()\n\n return ax", "def plot_nodes_over_data_1d_components(fig, X, Y, mdl, e_nodes, p_nodes, e_nodes_cov, p_nodes_cov, saveplot = False):\n\n idim = X.shape[1]\n odim = Y.shape[1]\n numplots = idim + odim\n \n for i in range(idim):\n # ax = fig.add_subplot(gs[i,0])\n ax = fig.axes[i]\n ax.clear()\n ax.hist(X[:,i], bins=20)\n xlim = ax.get_xlim()\n ylim = ax.get_ylim()\n yran = ylim[1] - ylim[0]\n offset1 = yran * -0.1\n offset2 = yran * -0.25\n # print(\"offsets 1,2 = %f, %f\" % (offset1, offset2))\n ax.plot(X[:,i], np.ones_like(X[:,i]) * offset1, \"ko\", alpha=0.33)\n for j,node in enumerate(e_nodes[:,i]):\n myms = 2 + 30 * np.sqrt(e_nodes_cov[i,i,i])\n # print(\"node\", j, node, myms)\n ax.plot([node], [offset2], \"ro\", alpha=0.33, markersize=10)\n # ax.plot([node], [offset2], \"r.\", alpha=0.33, markersize = myms)\n # x1, x2 = gmm.\n ax.text(node, offset2, \"n%d\" % j, fontsize=6)\n # plt.plot(e_nodes[:,i], np.zeros_like(e_nodes[:,i]), \"ro\", alpha=0.33, markersize=10)\n \n for i in range(idim, numplots):\n # ax = fig.add_subplot(gs[i,0])\n ax = fig.axes[i]\n ax.clear()\n ax.hist(Y[:,i-idim], bins=20)\n xlim = ax.get_xlim()\n ylim = ax.get_ylim()\n yran = ylim[1] - ylim[0]\n offset1 = yran * -0.1\n offset2 = yran * -0.25\n # print(\"offsets 1,2 = %f, %f\" % (offset1, offset2))\n ax.plot(Y[:,i-idim], np.ones_like(Y[:,i-idim]) * offset1, \"ko\", alpha=0.33)\n for j,node in enumerate(p_nodes[:,i-idim]):\n myms = 2 + 30 * np.sqrt(p_nodes_cov[i-idim,i-idim,i-idim])\n # print(\"node\", j, node, myms)\n ax.plot([node], [offset2], \"ro\", alpha=0.33, markersize=10)\n # ax.plot([node], [offset2], \"r.\", alpha=0.33, markersize = myms)\n ax.text(node, offset2, \"n%d\" % j, fontsize=6)\n \n # plt.plot(p_nodes[:,i-idim], np.zeros_like(p_nodes[:,i-idim]), \"ro\", alpha=0.33, markersize=10)\n\n plt.draw()\n plt.pause(1e-9)\n \n if saveplot:\n filename = \"plot_nodes_over_data_1d_components_%s.jpg\" % (mdl.__class__.__name__,)\n savefig(fig, filename)\n \n fig.show()\n # plt.show()", "def label_global_entities(ax, cmesh, edim, color='b', fontsize=10, show=False):\n coors = cmesh.get_centroids(edim)\n dim = cmesh.dim\n\n ax = _get_axes(ax, dim)\n\n for ii, cc in enumerate(coors):\n if dim == 3:\n ax.text(cc[0], cc[1], cc[2], ii,\n color=color, fontsize=fontsize)\n\n else:\n ax.text(cc[0], cc[1], ii,\n color=color, fontsize=fontsize)\n\n if show:\n plt.show()\n\n return ax" ]
[ "0.6864378", "0.6407954", "0.6205415", "0.61812276", "0.61798674", "0.60916793", "0.609019", "0.60803694", "0.60406137", "0.59588164", "0.59557277", "0.59142935", "0.58919984", "0.58745694", "0.58541936", "0.5786406", "0.57543874", "0.5720695", "0.5686189", "0.5673838", "0.5662324", "0.5644268", "0.56275815", "0.5613371", "0.5609961", "0.5581986", "0.5564785", "0.5553642", "0.55450815", "0.554236" ]
0.7505498
0
plots the displaced cell centroids after a solution step. Additionally retrns the final cell centroid after additon of displacement field in the orginal configuration
def get_displaced_soln(cell_cent, u_disp, horizon, dim, data_dir=None, plot_=False, save_fig=False, zoom=40): disp_cent = cell_cent + u_disp if plot_ or save_fig: dpi = 2 legend_size = {'size': str(6*dpi)} fig = plt.figure() if dim == 2: ax = fig.add_subplot(111) x, y = cell_cent.T #plt.scatter(x,y, s=300, color='r', marker='o', alpha=0.1, label='original config') x,y = (cell_cent + zoom*u_disp).T plt.scatter(x,y, s=150, color='b', marker='o', alpha=0.6, label=r'$\delta$ = '+ format(horizon, '4.5g')) # plt.legend(prop=legend_size) #plt.xlim(x_min - fact*x_min, x_max + fact*x_max) #plt.ylim(y_min - fact*y_min, y_max + fact*y_max) if dim == 3: #z_min = corners[0][2]; z_max = corners[1][2] from mpl_toolkits.mplot3d import Axes3D x, y, z = cell_cent.T fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(x,y,z, s=150, color='r', marker='o', alpha=0.1, label='original config') x,y,z = (cell_cent + zoom*u_disp) ax.scatter(x,y,z,s=150, color='g', marker='o', alpha=1.0, label='deformed config') ax.axis('off') plt.legend() ax.set_aspect('equal') if plot_: plt.show(block=False) if save_fig: plt.savefig(data_dir) plt.close(fig) return disp_cent
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plotcenterrange():\n plist1 = np.arange(0.02,0.1,0.02)\n plist = np.arange(0.1,1,0.1)\n infectlist = []\n for i in plist1:\n infectlist.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)),startcenter=True)[0])\n for i in plist:\n infectlist.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)),startcenter=True)[0])\n plt.plot(np.hstack((plist1,plist)),infectlist)\n plt.title(\"centerplot\")\n plt.xlabel(\"p\")\n plt.ylabel(\"total number of individuals infected\")\n plt.title(\"Total Number of Individuals Infected vs p\")\n plt.show()", "def plt_gm_clusters(df_all, model):\n\n # color_iter = itertools.cycle([cmap(i) for i in range(cmap.N)])\n\n color_iter = itertools.cycle([cmap(i) for i in range(clus_params['n_components'])])\n\n df = df_all[featureSet_dic[clus_params['feat_list']]].copy()\n\n XX = df.values\n Y_ = model.predict(XX) # predict labels for each model\n\n plt.figure(figsize=(8, 6))\n splot = plt.subplot(1, 1, 1)\n\n for i, (mean, cov, color) in enumerate(zip(model.means_, model.covariances_, color_iter)):\n\n if \"MEAN\" in clus_params['feat_list']:\n v, w = linalg.eigh(cov)\n else:\n\n subset = [0, 5] # mean torque L & R\n v, w = linalg.eigh(cov[np.ix_(subset, subset)])\n mean = np.array([mean[0], mean[5]])\n\n if not np.any(Y_ == i):\n continue\n\n if \"MEAN\" in clus_params['feat_list']:\n plt.scatter(XX[Y_ == i, 0], XX[Y_ == i, 1], color=color, s=60)\n else:\n plt.scatter(XX[Y_ == i, 0], XX[Y_ == i, 5], color=color, s=60)\n\n # Plot an ellipse to show the Gaussian component\n angle = np.arctan2(w[0][1], w[0][0])\n angle = 180. * angle / np.pi # convert to degrees\n v = 2. * np.sqrt(2.) * np.sqrt(v)\n ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)\n ell.set_clip_box(splot.bbox)\n ell.set_alpha(.5)\n splot.add_artist(ell)\n\n plt.xticks(())\n plt.yticks(())\n\n plt.title('Subject: {}, feature set: {}'.format(USER, clus_params['feat_list']))\n plt.subplots_adjust(hspace=.35, bottom=.02)\n plt.show()", "def plot_cell_grid_partitioning(\n output, cellsize_lon=5.0, cellsize_lat=5.0, figsize=(12, 6)\n):\n mp.rcParams[\"font.size\"] = 10\n mp.rcParams[\"text.usetex\"] = True\n plt.figure(figsize=figsize, dpi=300)\n ax = plt.axes([0, 0, 1, 1])\n\n map = Basemap(\n projection=\"cyl\",\n llcrnrlat=-90,\n urcrnrlat=90,\n llcrnrlon=-180,\n urcrnrlon=180,\n ax=ax,\n )\n map.drawparallels(\n np.arange(-90, 90, cellsize_lat), labels=[1, 0, 0, 0], linewidth=0.5\n )\n map.drawmeridians(\n np.arange(-180, 180, cellsize_lon),\n labels=[0, 0, 0, 1],\n rotation=\"vertical\",\n linewidth=0.5,\n )\n # fill continents 'coral' (with zorder=0), color wet areas 'aqua'\n map.drawmapboundary(fill_color=\"aqua\")\n map.fillcontinents(color=\"0.6\", lake_color=\"aqua\")\n label_lats = np.arange(-90 + cellsize_lat / 2.0, 90, cellsize_lat)\n label_lons = np.arange(-180 + cellsize_lon / 2.0, 180, cellsize_lon)\n lons, lats = np.meshgrid(label_lons, label_lats)\n x, y = map(lons.flatten(), lats.flatten())\n cells = grids.lonlat2cell(\n lons.flatten(),\n lats.flatten(),\n cellsize_lon=cellsize_lon,\n cellsize_lat=cellsize_lat,\n )\n for xt, yt, cell in zip(x, y, cells):\n plt.text(\n xt,\n yt,\n \"{:}\".format(cell),\n fontsize=4,\n va=\"center\",\n ha=\"center\",\n weight=\"bold\",\n )\n plt.savefig(output, format=\"png\", dpi=300)\n plt.close()", "def plot_auto_manual_corr(neighbor_df, cell_index, expt_name):\n\n collection_interval = 10\n n_auto_buds = len(neighbor_df.auto_bud_frame[~neighbor_df.auto_bud_frame.isnull()])\n n_manual_buds = len(neighbor_df.manual_bud_frame[~neighbor_df.manual_bud_frame.isnull()])\n ylim = (0, 70)\n xlim = ylim\n s=f'Cell {cell_index}\\n# of Auto Buds: {n_auto_buds}\\n# of Manual Buds: {n_manual_buds}'\n xy=(xlim[1]*0.33, xlim[1]*0.8)\n\n hidden_spines = ['top', 'right']\n filename = f'{expt_name}_cell{cell_index}_manual_vs_auto_bud_correlation'\n filetype = 'png'\n\n x = (neighbor_df.auto_bud_frame*collection_interval)/60\n y = (neighbor_df.nearest_manual_frame*collection_interval)/60\n\n xlabel = 'Auto Bud Hr.'\n ylabel = 'Nearest Manual Bud Hr.'\n\n fig = plt.figure(figsize=(2.5, 2.5), tight_layout=True)\n fig.set_dpi(300)\n\n ax = fig.add_subplot()\n ax.set_ylim(ylim)\n ax.set_xlim(xlim)\n ax.set_xticks(np.linspace(xlim[0], xlim[1], 8))\n ax.set_yticks(np.linspace(xlim[0], xlim[1], 8))\n ax.set_ylabel(ylabel)\n ax.set_xlabel(xlabel)\n\n ax.plot(np.linspace(0, np.max(y), np.max(y)),\n linewidth=1, color='blue', linestyle='--',\n alpha=0.7)\n\n ax.scatter(x, y,\n s=8, color='white', edgecolor='black', linewidths=0.8)\n\n for spine in [ax.spines[name] for name in hidden_spines]:\n spine.set_visible(False)\n\n ax.set_aspect(1.0/ax.get_data_ratio(), adjustable='box')\n ax.annotate(s=s,\n xy=xy,\n fontsize=8)\n\n fig.savefig(f'{filename}.{filetype}')", "def _show_cell_excerpts(self, fovsubset, radius, gs, gs_rows, gs_col):\n excerpts = extract_cells_from_tif(\n fovsubset.results_file,\n str(fovsubset.tif_file),\n fovsubset.indices,\n num=100,\n cell_radius=radius,\n data_channel=TiffChannels.ONE,\n number_of_channels=1,\n )\n cell_means = np.nanmean(excerpts, axis=1)\n if fovsubset.colabel_stack is not None:\n colabeled_excerpts = extract_cells_from_tif(\n fovsubset.results_file,\n str(fovsubset.colabel_img),\n fovsubset.indices,\n num=100,\n cell_radius=radius,\n data_channel=TiffChannels.ONE,\n number_of_channels=1,\n )\n colabeled_means = np.nanmean(colabeled_excerpts, axis=1)\n else:\n colabeled_means = [np.array([]) for item in cell_means]\n axis_rows = range(gs_rows.start, gs_rows.stop)[::-1]\n for cell, colabel, ax_row in zip(cell_means, colabeled_means, axis_rows):\n ax_img = plt.subplot(gs[ax_row, gs_col])\n ax_img.imshow(cell, cmap='gray')\n ax_img.imshow(colabel, cmap='cool', alpha=0.4)\n ax_img.axis('off')", "def cells_centroid_py(self):\n A=self.cells_area()\n cxy=np.zeros( (self.Ncells(),2), np.float64)\n\n refs=self.nodes['x'][self.cells['nodes'][:,0]]\n\n all_pnts=self.nodes['x'][self.cells['nodes']] - refs[:,None,:]\n\n for c in np.nonzero(~self.cells['deleted'])[0]:\n nodes=self.cell_to_nodes(c)\n\n i=np.arange(len(nodes))\n ip1=(i+1)%len(nodes)\n nA=all_pnts[c,i]\n nB=all_pnts[c,ip1]\n\n tmp=(nA[:,0]*nB[:,1] - nB[:,0]*nA[:,1])\n cxy[c,0] = ( (nA[:,0]+nB[:,0])*tmp).sum()\n cxy[c,1] = ( (nA[:,1]+nB[:,1])*tmp).sum()\n cxy /= 6*A[:,None] \n cxy += refs\n return cxy", "def plotVoronoiCell(self, cells):\n for i in cells:\n #i indexes volumes\n i = self.nonBI[i] #now i indexes vor.point_region\n\n vI = self.vor.regions[self.vor.point_region[i]]\n v = self.vor.vertices[vI, :]\n r = v\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n fig.set_size_inches(18.5, 9.5)\n ax.set_title('Voronoi Cell of Particle ' + str(i))\n ax.set_xlabel('x [m]')\n ax.set_ylabel('y [m]')\n ax.set_zlabel('z [m]')\n ax.scatter(r[:, 0], r[:, 1], r[:, 2], s=5, alpha=0.5, label='Cell Boundaries')\n ax.scatter(self.data[i, 0], self.data[i, 1], self.data[i, 2], s=25, label='Cell Center')\n ax.set_xlim3d(np.min(self.data[:, 0]), np.max(self.data[:, 0]))\n ax.set_ylim3d(np.min(self.data[:, 1]), np.max(self.data[:, 1]))\n ax.set_zlim3d(np.min(self.data[:, 2]), np.max(self.data[:, 2]))\n # limits = np.vstack((np.array([np.max(self.data[:, 0]), np.max(self.data[:, 1]), np.max(self.data[:, 2])]), np.array([np.min(self.data[:, 0]), np.min(self.data[:, 1]), np.min(self.data[:, 2])])))\n # ax.scatter(limits[:, 0], limits[:, 1], limits[:, 2], s=1)\n ax.legend()", "def vis_mechanically_coupled_regions(img_dir,output_dir,data,dbscn_length,dbscn_min_size,display_not_save=False):\n #Read in the image that is segmented/labelled for nuclei\n img=imread(img_dir)\n\n #save plots to show clusters\n fig = plt.figure(figsize=(6, 2))\n ax0 = fig.add_subplot(131)\n ax1 = fig.add_subplot(132)\n ax3 = fig.add_subplot(133)\n #show segmented image labels\n ax0.imshow(img,aspect='auto') \n ax0.axis('off')\n #nuclear centroid color-coded by their orientation\n img1=ax1.scatter(data[\"Y\"], data[\"X\"], c=data[\"angles\"],s=1)\n ax1.set_xlim(0,img.shape[0])\n ax1.set_ylim(img.shape[1],0)\n plt.colorbar(img1)\n ax1.axis('off')\n\n # plot the cluster assignments\n img3=ax3.scatter(data[data[\"clusters\"]> -1][\"Y\"], data[data[\"clusters\"]> -1][\"X\"], \n c=data[data[\"clusters\"]> -1][\"clusters\"],cmap=\"plasma\",s=1)\n ax3.set_xlim(0,img.shape[0])\n ax3.set_ylim(img.shape[1],0)\n ax3.axis('off')\n\n #add titles\n ax0.title.set_text('Segmented Image')\n ax1.title.set_text('Filtered Orientation')\n ax3.title.set_text('Clusters')\n\n if display_not_save:\n plt.show()\n else: \n plt.savefig((output_dir+\"/\"+img_dir.rsplit('/', 1)[-1][:-4]+\"_\"+str(dbscn_length)+\"_\"+ str(dbscn_min_size)+\".png\"),dpi=600, bbox_inches = 'tight',pad_inches = 0)\n fig.clf()\n plt.close(fig)\n plt.close('all')\n \n \n del fig,ax0,ax1,ax3,img1,img3", "def dim_reduction_plot(data, label, block_flag):\n \n PCA_model = TruncatedSVD(n_components=3).fit(data)\n data_PCA = PCA_model.transform(data)\n idxc1 = np.where(label==0)\n idxc2 = np.where(label==1)\n plt.scatter(data_PCA[idxc1,0],data_PCA[idxc1,1],s=80,c='r', marker='^',linewidths = 0, label='healthy')\n plt.scatter(data_PCA[idxc2,0],data_PCA[idxc2,1],s=80,c='y', marker='o',linewidths = 0, label='infected')\n plt.gca().axes.get_xaxis().set_ticks([])\n plt.gca().axes.get_yaxis().set_ticks([])\n plt.title('PCA of the codes')\n plt.legend(scatterpoints=1,loc='best')\n plt.show(block=block_flag)", "def draw_cells(cells, min_y=0.05, max_y=0.95, label_loc=location_ura_bp,\n cen_loc=location_cen5_bp, chr_size=chrv_size_bp,\n label_colors=None, ax=None):\n def chr_coords(s):\n \"\"\"Map from [0, 1] to locaion on plot.\"\"\"\n return max_y - (max_y - min_y)*s\n # rescale linkages to [0, 1]\n cells = [np.array(links) / chr_size for links in cells]\n n_cells = len(cells)\n # and all relevant locations\n locus_frac = label_loc / chr_size\n centromere_frac = cen_loc / chr_size\n if ax is None:\n # fill entire figure with invisible axes to draw in\n fig = plt.figure(figsize=(col_width, col_width/golden_ratio))\n ax = fig.add_axes([0, 0, 1, 1])\n ax.axis('off')\n # center each of N \"cells\" directly between N+1 fenceposts spanning [0, 1]\n n_fences = n_cells + 1\n fence_posts = np.linspace(0, 1, n_fences)\n width_per_cell = np.diff(fence_posts)[0]\n cell_centers = (fence_posts[1:] + fence_posts[:-1]) / 2\n # (1/2) times the spacing between centers of two chromosomes in each \"cell\"\n width_to_chr_center = width_per_cell / 5\n chr_width = 15\n # only works with mixed backends, where 72\"PX\"/in is always true, otherwise\n # you need to do something like:\n # transAxes.inverted().transform(dpi_scale_trans.transform([1/72, 1/72])\n pt_to_ax = ax.transAxes.inverted().transform(\n ax.get_figure().dpi_scale_trans.transform([1/72, 1/72])\n )\n for i, x in enumerate(cell_centers):\n for dx in [width_to_chr_center, -width_to_chr_center]:\n cap_radius_ax = chr_width/2 * pt_to_ax[1]\n # draw the chromosomes\n ax.plot(\n [[x + dx, x + dx], [x + dx, x + dx]],\n [[chr_coords(0), chr_coords(centromere_frac) - cap_radius_ax],\n [chr_coords(centromere_frac) + cap_radius_ax, chr_coords(1)]],\n transform=ax.transAxes, linewidth=chr_width,\n solid_capstyle='round', color=[50/255, 50/255, 50/255]\n )\n ax.plot(\n [[x + dx, x + dx], [x + dx, x + dx]],\n [[chr_coords(0), chr_coords(centromere_frac) - cap_radius_ax],\n [chr_coords(centromere_frac) + cap_radius_ax, chr_coords(1)]],\n transform=ax.transAxes, linewidth=chr_width-2,\n solid_capstyle='round', color=[197/255, 151/255, 143/255]\n )\n # draw the centromere black dot\n ax.scatter([x + dx], [chr_coords(centromere_frac)],\n zorder=10, transform=ax.transAxes, s=200, color='k')\n # draw the label, green star\n ax.scatter([x + dx], [chr_coords(locus_frac)],\n zorder=15, transform=ax.transAxes, s=500, color='g',\n marker='*', edgecolors='k')\n for linkage in cells[i]:\n ax.plot([x - width_to_chr_center, x + width_to_chr_center],\n 2*[chr_coords(linkage)],\n color=(0, 0, 1), transform=ax.transAxes,\n linewidth=5, solid_capstyle='round')\n num_linkages = len(cells[i])\n j = np.searchsorted(cells[i], locus_frac)\n closest_links = []\n if j != 0:\n closest_links.append(cells[i][j - 1])\n if j != num_linkages:\n closest_links.append(cells[i][j])\n closest_links = np.array(closest_links)\n if len(closest_links) > 0:\n linewidths = 1.2*np.ones_like(closest_links)\n closestest_link = np.argmin(np.abs(closest_links - locus_frac))\n linewidths[closestest_link] = 3.5\n for k, linkage in enumerate(closest_links):\n ax.plot([x - width_to_chr_center, x - width_to_chr_center,\n x + width_to_chr_center, x + width_to_chr_center],\n [chr_coords(locus_frac), chr_coords(linkage),\n chr_coords(linkage), chr_coords(locus_frac)],\n color=(1, 1, 1), transform=ax.transAxes,\n linewidth=linewidths[k], linestyle='--',\n dash_capstyle='butt', zorder=100)\n if label_colors:\n # add extra height above chromosomes to account for rounded end\n # caps and then some extra\n ax.transAxes\n ax.text(x, max_y, f'Cell {i}\\n', ha='center',\n va='bottom', color=label_colors[i],\n transform=ax.transAxes,\n fontsize=mpl.rcParams['axes.titlesize'])\n return ax", "def cell_centroids_original(crd, con):\n \n nele = con.shape[0]\n dim = crd.shape[1]\n centroid_xy = np.zeros((nele, dim))\n for i in range(len(con)):\n el_crds = crd[con[i, :], :] # (4, 2)\n centroid_xy[i, :] = (el_crds).mean(axis=0)\n return centroid_xy", "def finalize(self):\n\n if self.finalized:\n return\n\n # Instantiate radial cells\n for pin in self.pincells:\n if isinstance(pin, InfinitePinCell) and not pin.finalized:\n pin.finalize()\n \n # Instantiate the axial cells\n for i, (pin, plane) in enumerate(zip(self.pincells, self.axials)):\n\n label = \"{0} axial {1}: {2}\".format(self.name, i, pin.name)\n cell = openmc.Cell(name=label, fill=pin)\n\n if i == 0:\n # Bottom section\n cell.region = -plane\n\n else:\n # Middle section\n cell.region = -plane & +self.axials[i-1]\n\n self.add_cell(cell)\n\n # Top section\n label = \"{0} axial top: {1}\".format(self.name, self.pincells[-1].name)\n cell = openmc.Cell(name=label, fill=self.pincells[-1])\n cell.region = +self.axials[-1]\n \n self.add_cell(cell)\n \n self.finalized = True", "def plot_raw_assess(raw_data, figure_output, stat):\n sns.set_style(\"white\")\n raw_max = pd.read_table(raw_data)\n raw_max = raw_max.drop_duplicates()\n\n raw_edit = raw_max.pivot('Motif', 'Cell_lab', stat)\n raw_edit.sort(columns=\"Average\", axis=0, ascending=False, inplace=True)\n cg = sns.clustermap(raw_edit, method='single', metric=\"euclidean\", z_score=None,\n annot=True, row_cluster=False, col_cluster=True, linewidths=.15)\n # to rotate the y-axis labels correctly\n test = plt.setp(cg.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)\n test = plt.setp(cg.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)\n\n f = plt.gcf()\n f.savefig(figure_output, bbox_inches='tight')", "def run(self):\n # fill the x_values,y_values,z_values dictionaries\n if not self.__fillCoordinatesFromSource():\n self.raiseAWarning('Nothing to Plot Yet. Returning.')\n return\n\n self.counter += 1\n if self.counter > 1:\n self.actcm = None\n clusterDict = deepcopy(self.outStreamTypes)\n\n # start plotting.... loop over the plots that need to be included in this figure\n for pltIndex in range(len(self.outStreamTypes)):\n plotSettings = self.options['plotSettings']['plot'][pltIndex]\n if 'gridLocation' in plotSettings:\n x = None\n y = None\n if 'x' in plotSettings['gridLocation']:\n x = list(map(int, plotSettings['gridLocation']['x'].strip().split(' ')))\n else:\n x = None\n if 'y' in plotSettings['gridLocation'].keys():\n y = list(map(int, plotSettings['gridLocation']['y'].strip().split(' ')))\n else:\n y = None\n if pltIndex == 0:\n self.ax.remove() # remove axis so that there is not an extra axis on plot with subplots\n if (len(x) == 1 and len(y) == 1):\n if self.dim == 2:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0], y[0]])\n else:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0], y[0]], projection='3d')\n elif (len(x) == 1 and len(y) != 1):\n if self.dim == 2:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0], y[0]:y[-1]])\n else:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0], y[0]:y[-1]], projection='3d')\n elif (len(x) != 1 and len(y) == 1):\n if self.dim == 2:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0]:x[-1], y[0]])\n else:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0]:x[-1], y[0]], projection='3d')\n else:\n if self.dim == 2:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0]:x[-1], y[0]:y[-1]])\n else:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0]:x[-1], y[0]:y[-1]], projection='3d')\n\n if 'gridSpace' in self.options['plotSettings']:\n self.ax.locator_params(axis='y', nbins=4)\n self.ax.locator_params(axis='x', nbins=2)\n if 'range' in plotSettings:\n axes_range = plotSettings['range']\n if 'ymin' in axes_range:\n self.ax.set_ylim(bottom=ast.literal_eval(axes_range['ymin']))\n if 'ymax' in axes_range:\n self.ax.set_ylim(top=ast.literal_eval(axes_range['ymax']))\n if 'xmin' in axes_range:\n self.ax.set_xlim(left=ast.literal_eval(axes_range['xmin']))\n if 'xmax' in axes_range:\n self.ax.set_xlim(right=ast.literal_eval(axes_range['xmax']))\n if self.dim == 3:\n if 'zmin' in axes_range.options['plotSettings']['plot'][pltIndex]:\n if 'zmax' not in axes_range.options['plotSettings']:\n self.raiseAWarning('zmin inputted but not zmax. zmin ignored! ')\n else:\n self.ax.set_zlim(bottom=ast.literal_eval(axes_range['zmin']), top=ast.literal_eval(self.options['plotSettings']['zmax']))\n if 'zmax' in axes_range:\n if 'zmin' not in axes_range:\n self.raiseAWarning('zmax inputted but not zmin. zmax ignored! ')\n else:\n self.ax.set_zlim(bottom=ast.literal_eval(axes_range['zmin']), top=ast.literal_eval(axes_range['zmax']))\n if 'xlabel' not in plotSettings:\n self.ax.set_xlabel('x')\n else:\n self.ax.set_xlabel(plotSettings['xlabel'])\n if 'ylabel' not in plotSettings:\n self.ax.set_ylabel('y')\n else:\n self.ax.set_ylabel(plotSettings['ylabel'])\n if 'zlabel' in plotSettings:\n if self.dim == 2:\n self.raiseAWarning('zlabel keyword does not make sense in 2-D Plots!')\n elif self.dim == 3 and self.zCoordinates:\n self.ax.set_zlabel(plotSettings['zlabel'])\n elif self.dim == 3 and self.zCoordinates:\n self.ax.set_zlabel('z')\n else:\n if 'xlabel' not in self.options['plotSettings']:\n self.ax.set_xlabel('x')\n else:\n self.ax.set_xlabel(self.options['plotSettings']['xlabel'])\n if 'ylabel' not in self.options['plotSettings']:\n self.ax.set_ylabel('y')\n else:\n self.ax.set_ylabel(self.options['plotSettings']['ylabel'])\n if 'zlabel' in self.options['plotSettings']:\n if self.dim == 2:\n self.raiseAWarning('zlabel keyword does not make sense in 2-D Plots!')\n elif self.dim == 3 and self.zCoordinates:\n self.ax.set_zlabel(self.options['plotSettings']['zlabel'])\n elif self.dim == 3 and self.zCoordinates:\n self.ax.set_zlabel('z')\n\n if 'legend' in self.options['plotSettings']:\n if 'label' not in plotSettings.get('attributes', {}):\n if 'attributes' not in plotSettings:\n plotSettings['attributes'] = {}\n plotSettings['attributes']['label'] = self.outStreamTypes[pltIndex] + ' ' + str(pltIndex)\n #################\n # SCATTER PLOT #\n #################\n self.raiseADebug(f'creating plot {self.name}')\n if self.outStreamTypes[pltIndex] == 'scatter':\n if 's' not in plotSettings:\n plotSettings['s'] = '20'\n if 'c' not in plotSettings:\n plotSettings['c'] = 'b'\n if 'marker' not in plotSettings:\n plotSettings['marker'] = 'o'\n if 'alpha' not in plotSettings:\n plotSettings['alpha'] = 'None'\n if 'linewidths' not in plotSettings:\n plotSettings['linewidths'] = 'None'\n if self.colorMapCoordinates[pltIndex] is not None:\n # Find the max and min colormap values\n firstKey = utils.first(self.xValues[pltIndex].keys())\n vmin = np.amin(self.colorMapValues[pltIndex][firstKey])\n vmax = np.amax(self.colorMapValues[pltIndex][firstKey])\n for key in self.xValues[pltIndex]:\n vmin = min(vmin,np.amin(self.colorMapValues[pltIndex][key]))\n vmax = max(vmax,np.amax(self.colorMapValues[pltIndex][key]))\n plotSettings['norm'] = matplotlib.colors.Normalize(vmin,vmax)\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n for yIndex in range(len(self.yValues[pltIndex][key])):\n scatterPlotOptions = {'s': ast.literal_eval(plotSettings['s']),\n 'marker': (plotSettings['marker']),\n 'alpha': ast.literal_eval(plotSettings['alpha']),\n 'linewidths': ast.literal_eval(plotSettings['linewidths'])}\n if self.colorMapCoordinates[pltIndex] is not None:\n scatterPlotOptions['norm'] = plotSettings['norm']\n scatterPlotOptions.update(plotSettings.get('attributes', {}))\n if self.dim == 2:\n if self.colorMapCoordinates[pltIndex] is not None:\n scatterPlotOptions['c'] = self.colorMapValues[pltIndex][key][xIndex]\n scatterPlotOptions['cmap'] = matplotlib.cm.get_cmap(\"winter\")\n if self.actcm:\n first = False\n else:\n first = True\n if plotSettings['cmap'] == 'None':\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n **scatterPlotOptions)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n try:\n self.actcm.draw_all()\n # this is not good, what exception will be thrown?\n except:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n scatterPlotOptions['cmap'] = plotSettings['cmap']\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n **scatterPlotOptions)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m, ax=self.ax)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_clim(vmin = min(self.colorMapValues[pltIndex][key][-1]), vmax=max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n if 'color' not in scatterPlotOptions:\n scatterPlotOptions['c'] = plotSettings['c']\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n **scatterPlotOptions)\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n if self.colorMapCoordinates[pltIndex] is not None:\n scatterPlotOptions['c'] = self.colorMapValues[pltIndex][key][zIndex]\n if self.actcm:\n first = False\n else:\n first = True\n if plotSettings['cmap'] == 'None':\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], self.zValues[pltIndex][key][zIndex], **scatterPlotOptions)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n self.actcm.draw_all()\n else:\n scatterPlotOptions['cmap'] = plotSettings['cmap']\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], self.zValues[pltIndex][key][zIndex], **scatterPlotOptions)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap = self.actPlot.cmap, norm = self.actPlot.norm)\n m.set_clim(vmin = min(self.colorMapValues[pltIndex][key][-1]), vmax=max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n if 'color' not in scatterPlotOptions:\n scatterPlotOptions['c'] = plotSettings['c']\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], self.zValues[pltIndex][key][zIndex], **scatterPlotOptions)\n #################\n # LINE PLOT #\n #################\n elif self.outStreamTypes[pltIndex] == 'line':\n minV = 0\n maxV = 0\n # If the user does not define an appropriate cmap, then use matplotlib's default.\n if 'cmap' not in plotSettings or plotSettings['cmap'] not in matplotlib.cm.datad:\n plotSettings['cmap'] = None\n if bool(self.colorMapValues):\n for key in self.xValues[pltIndex]:\n minV = min(minV,self.colorMapValues[pltIndex][key][-1][-1])\n maxV = max(maxV,self.colorMapValues[pltIndex][key][-1][-1])\n cmap = matplotlib.cm.ScalarMappable(matplotlib.colors.Normalize(minV, maxV, True), plotSettings['cmap'])\n cmap.set_array([minV,maxV])\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n if self.colorMapCoordinates[pltIndex] is not None:\n plotSettings['interpPointsX'] = str(max(200, len(self.xValues[pltIndex][key][xIndex])))\n for yIndex in range(len(self.yValues[pltIndex][key])):\n if self.dim == 2:\n if self.yValues[pltIndex][key][yIndex].size < 2:\n return\n xi, yi = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], plotSettings, returnCoordinate=True)\n if self.colorMapCoordinates[pltIndex] is not None:\n self.ax.plot(xi, yi, c=cmap.cmap(self.colorMapValues[pltIndex][key][-1][-1]/(maxV-minV)))\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if self.actcm is None:\n self.actcm = self.fig.colorbar(cmap)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n self.actcm.draw_all()\n else:\n self.actPlot = self.ax.plot(xi, yi, **plotSettings.get('attributes', {}))\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n if self.zValues[pltIndex][key][zIndex].size <= 3:\n return\n if self.colorMapCoordinates[pltIndex] is not None:\n self.ax.plot(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n self.zValues[pltIndex][key][zIndex],\n c=cmap.cmap(self.colorMapValues[pltIndex][key][-1][-1]/(maxV-minV)))\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if self.actcm is None:\n self.actcm = self.fig.colorbar(cmap)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n self.actcm.draw_all()\n else:\n self.actPlot = self.ax.plot(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n self.zValues[pltIndex][key][zIndex],\n **plotSettings.get('attributes', {}))\n ##################\n # HISTOGRAM PLOT #\n ##################\n elif self.outStreamTypes[pltIndex] == 'histogram':\n if 'bins' not in plotSettings:\n if self.dim == 2:\n plotSettings['bins'] = '10'\n else:\n plotSettings['bins'] = '4'\n if 'normed' not in plotSettings:\n plotSettings['normed'] = 'False'\n if 'weights' not in plotSettings:\n plotSettings['weights'] = 'None'\n if 'cumulative' not in plotSettings:\n plotSettings['cumulative'] = 'False'\n if 'histtype' not in plotSettings:\n plotSettings['histtype'] = 'bar'\n if 'align' not in plotSettings:\n plotSettings['align'] = 'mid'\n if 'orientation' not in plotSettings:\n plotSettings['orientation'] = 'vertical'\n if 'rwidth' not in plotSettings:\n plotSettings['rwidth'] = 'None'\n if 'log' not in plotSettings:\n plotSettings['log'] = 'None'\n if 'color' not in plotSettings:\n plotSettings['color'] = 'b'\n if 'stacked' not in plotSettings:\n plotSettings['stacked'] = 'None'\n if self.sourceData[0].type.strip() == 'HistorySet':\n #####################################################################################################################################\n # @MANDD: This 'if' condition has been added in order to allow the user the correctly create an histogram out of an historySet #\n # If the histogram is created out of the input variables, then the plot has an identical meaning of the one generated by a pointSet #\n # However, if the histogram is created out of the output variables, then the plot consider only the last value of the array #\n #####################################################################################################################################\n data = {}\n data['x'] = np.empty(0)\n data['y'] = np.empty(0)\n for index in range(len(self.outStreamTypes)):\n for key in self.xValues[index]:\n data['x'] = np.append(data['x'], self.xValues[index][key][0][-1])\n if self.dim == 3:\n data['y'] = np.append(data['y'], self.yValues[index][key][0][-1])\n del self.xValues[index]\n self.xValues = {}\n self.xValues[index] = {}\n self.xValues[index][0] = []\n self.xValues[index][0].append(deepcopy(data['x']))\n if self.dim == 3:\n del self.yValues[index]\n self.yValues = {}\n self.yValues[index] ={ }\n self.yValues[index][0] = []\n self.yValues[index][0].append(deepcopy(data['y']))\n\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n try:\n colorss = ast.literal_eval(plotSettings['color'])\n # unknown what specific error is anticipated here, but I don't like a bare except...\n # ast.literal_eval can raise the exceptions listed below (see library docs):\n except (ValueError, TypeError, SyntaxError, MemoryError, RecursionError):\n colorss = plotSettings['color']\n if self.dim == 2:\n self.ax.hist(self.xValues[pltIndex][key][xIndex],\n bins=ast.literal_eval(plotSettings['bins']),\n density=ast.literal_eval(plotSettings['normed']),\n weights=ast.literal_eval(plotSettings['weights']),\n cumulative=ast.literal_eval(plotSettings['cumulative']),\n histtype=plotSettings['histtype'],\n align=plotSettings['align'],\n orientation=plotSettings['orientation'],\n rwidth=ast.literal_eval(plotSettings['rwidth']),\n log=ast.literal_eval(plotSettings['log']),\n color=colorss,\n stacked=ast.literal_eval(plotSettings['stacked']),\n **plotSettings.get('attributes', {}))\n else:\n for yIndex in range(len(self.yValues[pltIndex][key])):\n hist, xedges, yedges = np.histogram2d(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n bins=ast.literal_eval(plotSettings['bins']))\n elements = (len(xedges) - 1) * (len(yedges) - 1)\n if 'x_offset' in plotSettings:\n xoffset = float(plotSettings['x_offset'])\n else:\n xoffset = 0.25\n if 'y_offset' in plotSettings:\n yoffset = float(plotSettings['y_offset'])\n else:\n yoffset = 0.25\n if 'dx' in plotSettings:\n dxs = float(plotSettings['dx'])\n else:\n dxs = (self.xValues[pltIndex][key][xIndex].max() - self.xValues[pltIndex][key][xIndex].min()) / float(plotSettings['bins'])\n if 'dy' in plotSettings:\n dys = float(plotSettings['dy'])\n else:\n dys = (self.yValues[pltIndex][key][yIndex].max() - self.yValues[pltIndex][key][yIndex].min()) / float(plotSettings['bins'])\n xpos, ypos = np.meshgrid(xedges[:-1] + xoffset, yedges[:-1] + yoffset)\n self.actPlot = self.ax.bar3d(xpos.flatten(),\n ypos.flatten(),\n np.zeros(elements),\n dxs*np.ones_like(elements),\n dys*np.ones_like(elements),\n hist.flatten(),\n color=colorss,\n zsort='average',\n **plotSettings.get('attributes', {}))\n ##################\n # STEM PLOT #\n ##################\n elif self.outStreamTypes[pltIndex] == 'stem':\n if 'linefmt' not in plotSettings:\n plotSettings['linefmt'] = 'b-'\n if 'markerfmt' not in plotSettings:\n plotSettings['markerfmt'] = 'bo'\n if 'basefmt' not in plotSettings:\n plotSettings['basefmt'] = 'r-'\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n for yIndex in range(len(self.yValues[pltIndex][key])):\n if self.dim == 2:\n self.actPlot = self.ax.stem(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n linefmt=plotSettings['linefmt'],\n markerfmt=plotSettings['markerfmt'],\n basefmt = plotSettings['linefmt'],\n use_line_collection=True,\n **plotSettings.get('attributes', {}))\n else:\n # it is a basic stem plot constructed using a standard line plot. For now we do not use the previous defined keywords...\n for zIndex in range(len(self.zValues[pltIndex][key])):\n for xx, yy, zz in zip(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], self.zValues[pltIndex][key][zIndex]):\n self.ax.plot([xx, xx], [yy, yy], [0, zz], '-')\n ##################\n # STEP PLOT #\n ##################\n elif self.outStreamTypes[pltIndex] == 'step':\n if self.dim == 2:\n if 'where' not in plotSettings:\n plotSettings['where'] = 'mid'\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n if self.xValues[pltIndex][key][xIndex].size < 2:\n xi = self.xValues[pltIndex][key][xIndex]\n else:\n xi = np.linspace(self.xValues[pltIndex][key][xIndex].min(), self.xValues[pltIndex][key][xIndex].max(), ast.literal_eval(plotSettings['interpPointsX']))\n for yIndex in range(len(self.yValues[pltIndex][key])):\n if self.yValues[pltIndex][key][yIndex].size <= 3:\n return\n yi = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], plotSettings)\n self.actPlot = self.ax.step(xi, yi, where=plotSettings['where'], **plotSettings.get('attributes', {}))\n else:\n self.raiseAWarning('step Plot not available in 3D')\n return\n ########################\n # PSEUDOCOLOR PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'pseudocolor':\n if self.dim == 2:\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n if not self.colorMapCoordinates:\n self.raiseAMessage('pseudocolor Plot needs coordinates for color map... Returning without plotting')\n return\n for zIndex in range(len(self.colorMapValues[pltIndex][key])):\n if self.colorMapValues[pltIndex][key][zIndex].size <= 3:\n return\n xig, yig, Ci = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.colorMapValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n if plotSettings['cmap'] == 'None':\n self.actPlot = self.ax.pcolormesh(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n **plotSettings.get('attributes', {}))\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n else:\n self.actPlot = self.ax.pcolormesh(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n cmap=matplotlib.cm.get_cmap(name = plotSettings['cmap']),\n **plotSettings.get('attributes', {}))\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(ma.masked_where(np.isnan(Ci), Ci))\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n actcm = self.fig.colorbar(m)\n actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n self.raiseAWarning('pseudocolor Plot is considered a 2D plot, not a 3D!')\n return\n ########################\n # SURFACE PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'surface':\n if self.dim == 2:\n self.raiseAWarning('surface Plot is NOT available for 2D plots, IT IS A 3D!')\n return\n else:\n if 'rstride' not in plotSettings:\n plotSettings['rstride'] = '1'\n if 'cstride' not in plotSettings:\n plotSettings['cstride'] = '1'\n if 'antialiased' not in plotSettings:\n plotSettings['antialiased'] = 'False'\n if 'linewidth' not in plotSettings:\n plotSettings['linewidth'] = '0'\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n for zIndex in range(len(self.zValues[pltIndex][key])):\n if self.zValues[pltIndex][key][zIndex].size <= 3:\n return\n if self.colorMapCoordinates[pltIndex] is not None:\n xig, yig, Ci = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.colorMapValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n xig, yig, zi = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.zValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n if self.colorMapCoordinates[pltIndex] is not None:\n if self.actcm:\n first = False\n else:\n first = True\n if plotSettings['cmap'] == 'None':\n plotSettings['cmap'] = 'jet'\n self.actPlot = self.ax.plot_surface(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n facecolors=matplotlib.cm.get_cmap(name=plotSettings['cmap'])(ma.masked_where(np.isnan(Ci), Ci)),\n cmap=matplotlib.cm.get_cmap(name = plotSettings['cmap']),\n linewidth=ast.literal_eval(plotSettings['linewidth']),\n antialiased=ast.literal_eval(plotSettings['antialiased']),\n **plotSettings.get('attributes', {}))\n if first:\n self.actPlot.cmap = matplotlib.cm.get_cmap(name=plotSettings['cmap'])\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap = self.actPlot.cmap, norm = self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_clim(vmin=min(self.colorMapValues[pltIndex][key][-1]), vmax=max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n if plotSettings['cmap'] == 'None':\n self.actPlot = self.ax.plot_surface(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n linewidth=ast.literal_eval(plotSettings['linewidth']),\n antialiased=ast.literal_eval(plotSettings['antialiased']),\n **plotSettings.get('attributes', {}))\n if 'color' in plotSettings.get('attributes', {}):\n self.actPlot.set_color = plotSettings.get('attributes', {})['color']\n else:\n self.actPlot.set_color = 'blue'\n else:\n self.actPlot = self.ax.plot_surface(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n cmap=matplotlib.cm.get_cmap(name = plotSettings['cmap']),\n linewidth=ast.literal_eval(plotSettings['linewidth']),\n antialiased=ast.literal_eval(plotSettings['antialiased']),\n **plotSettings.get('attributes', {}))\n ########################\n # TRI-SURFACE PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'tri-surface':\n if self.dim == 2:\n self.raiseAWarning('TRI-surface Plot is NOT available for 2D plots, it is 3D!')\n return\n else:\n if 'color' not in plotSettings:\n plotSettings['color'] = 'b'\n if 'shade' not in plotSettings:\n plotSettings['shade'] = 'False'\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n for zIndex in range(len(self.zValues[pltIndex][key])):\n metric = (self.xValues[pltIndex][key][xIndex] ** 2 + self.yValues[pltIndex][key][yIndex] ** 2) ** 0.5\n metricIndeces = np.argsort(metric)\n xs = np.zeros(self.xValues[pltIndex][key][xIndex].shape)\n ys = np.zeros(self.yValues[pltIndex][key][yIndex].shape)\n zs = np.zeros(self.zValues[pltIndex][key][zIndex].shape)\n for sindex in range(len(metricIndeces)):\n xs[sindex] = self.xValues[pltIndex][key][xIndex][metricIndeces[sindex]]\n ys[sindex] = self.yValues[pltIndex][key][yIndex][metricIndeces[sindex]]\n zs[sindex] = self.zValues[pltIndex][key][zIndex][metricIndeces[sindex]]\n surfacePlotOptions = {'color': plotSettings['color'],\n 'shade': ast.literal_eval(plotSettings['shade'])}\n surfacePlotOptions.update(plotSettings.get('attributes', {}))\n if self.zValues[pltIndex][key][zIndex].size <= 3:\n return\n if self.colorMapCoordinates[pltIndex] is not None:\n if self.actcm:\n first = False\n else:\n first = True\n if plotSettings['cmap'] == 'None':\n plotSettings['cmap'] = 'jet'\n surfacePlotOptions['cmap'] = matplotlib.cm.get_cmap(name = plotSettings['cmap'])\n self.actPlot = self.ax.plot_trisurf(xs, ys, zs, **surfacePlotOptions)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n self.actPlot.cmap = matplotlib.cm.get_cmap(name=plotSettings['cmap'])\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_clim(vmin=min(self.colorMapValues[pltIndex][key][-1]), vmax=max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n if plotSettings['cmap'] != 'None':\n surfacePlotOptions[\"cmap\"] = matplotlib.cm.get_cmap(name=plotSettings['cmap'])\n self.actPlot = self.ax.plot_trisurf(xs, ys, zs, **surfacePlotOptions)\n ########################\n # WIREFRAME PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'wireframe':\n if self.dim == 2:\n self.raiseAWarning('wireframe Plot is NOT available for 2D plots, IT IS A 3D!')\n return\n else:\n if 'rstride' not in plotSettings:\n plotSettings['rstride'] = '1'\n if 'cstride' not in plotSettings:\n plotSettings['cstride'] = '1'\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n for zIndex in range(len(self.zValues[pltIndex][key])):\n if self.zValues[pltIndex][key][zIndex].size <= 3:\n return\n if self.colorMapCoordinates[pltIndex] is not None:\n xig, yig, Ci = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.colorMapValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n xig, yig, zi = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.zValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n if self.colorMapCoordinates[pltIndex] is not None:\n self.raiseAWarning(f'Currently, ax.plot_wireframe() in MatPlotLib version: {matplotlib.__version__} does not support a colormap! Wireframe plotted on a surface plot...')\n if self.actcm:\n first = False\n else:\n first = True\n if plotSettings['cmap'] == 'None':\n plotSettings['cmap'] = 'jet'\n self.actPlot = self.ax.plot_wireframe(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cmap=matplotlib.cm.get_cmap(name = plotSettings['cmap']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n **plotSettings.get('attributes', {}))\n self.actPlot = self.ax.plot_surface(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n alpha=0.4,\n rstride=ast.literal_eval(plotSettings['rstride']),\n cmap=matplotlib.cm.get_cmap(name=plotSettings['cmap']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n **plotSettings.get('attributes', {}))\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_clim(vmin=min(self.colorMapValues[pltIndex][key][-1]), vmax=max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n if plotSettings['cmap'] == 'None':\n self.actPlot = self.ax.plot_wireframe(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n **plotSettings.get('attributes', {}))\n if 'color' in plotSettings.get('attributes', {}):\n self.actPlot.set_color = plotSettings.get('attributes', {})['color']\n else:\n self.actPlot.set_color = 'blue'\n else:\n self.actPlot = self.ax.plot_wireframe(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n **plotSettings.get('attributes', {}))\n ########################\n # CONTOUR PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'contour' or self.outStreamTypes[pltIndex] == 'filledContour':\n if self.dim == 2:\n if 'numberBins' in plotSettings:\n nbins = int(plotSettings['numberBins'])\n else:\n nbins = 5\n for key in self.xValues[pltIndex]:\n if not self.colorMapCoordinates:\n self.raiseAWarning(self.outStreamTypes[pltIndex] + ' Plot needs coordinates for color map... Returning without plotting')\n return\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n for zIndex in range(len(self.colorMapValues[pltIndex][key])):\n if self.actcm:\n first = False\n else:\n first = True\n xig, yig, Ci = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.colorMapValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n if self.outStreamTypes[pltIndex] == 'contour':\n if plotSettings['cmap'] == 'None':\n if 'color' in plotSettings.get('attributes', {}):\n color = plotSettings.get('attributes', {})['color']\n else:\n color = 'blue'\n self.actPlot = self.ax.contour(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n colors=color,\n **plotSettings.get('attributes', {}))\n else:\n self.actPlot = self.ax.contour(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n **plotSettings.get('attributes', {}))\n else:\n if plotSettings['cmap'] == 'None':\n plotSettings['cmap'] = 'jet'\n self.actPlot = self.ax.contourf(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n **plotSettings.get('attributes', {}))\n self.ax.clabel(self.actPlot, inline=1, fontsize=10)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n self.actcm = self.fig.colorbar(self.actPlot, shrink=0.8, extend='both')\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap = self.actPlot.cmap, norm = self.actPlot.norm)\n m.set_clim(vmin = min(self.colorMapValues[pltIndex][key][-1]), vmax = max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n self.raiseAWarning('contour/filledContour is a 2-D plot, where x,y are the surface coordinates and colorMap vector is the array to visualize!\\n contour3D/filledContour3D are 3-D! ')\n return\n # These should be combined: ^^^ & vvv\n elif self.outStreamTypes[pltIndex] == 'contour3D' or self.outStreamTypes[pltIndex] == 'filledContour3D':\n if self.dim == 2:\n self.raiseAWarning('contour3D/filledContour3D Plot is NOT available for 2D plots, IT IS A 2D! Check \"contour/filledContour\"!')\n return\n else:\n if 'numberBins' in plotSettings:\n nbins = int(plotSettings['numberBins'])\n else:\n nbins = 5\n if 'extend3D' in plotSettings:\n ext3D = bool(plotSettings['extend3D'])\n else:\n ext3D = False\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n for zIndex in range(len(self.colorMapValues[pltIndex][key])):\n if self.actcm:\n first = False\n else:\n first = True\n xig, yig, Ci = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.colorMapValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n if self.outStreamTypes[pltIndex] == 'contour3D':\n if plotSettings['cmap'] == 'None':\n if 'color' in plotSettings.get('attributes', {}):\n color = plotSettings.get('attributes', {})['color']\n else:\n color = 'blue'\n self.actPlot = self.ax.contour3D(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n colors=color,\n extend3d=ext3D,\n **plotSettings.get('attributes', {}))\n else:\n self.actPlot = self.ax.contour3D(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n extend3d=ext3D,\n cmap=matplotlib.cm.get_cmap(name=plotSettings['cmap']),\n **plotSettings.get('attributes', {}))\n else:\n if plotSettings['cmap'] == 'None':\n plotSettings['cmap'] = 'jet'\n self.actPlot = self.ax.contourf3D(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n cmap=matplotlib.cm.get_cmap(name=plotSettings['cmap']),\n **plotSettings.get('attributes', {}))\n self.ax.clabel(self.actPlot, inline=1, fontsize=10)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n self.actcm = self.fig.colorbar(self.actPlot, shrink = 0.8, extend = 'both')\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap = self.actPlot.cmap, norm = self.actPlot.norm)\n m.set_clim(vmin = min(self.colorMapValues[pltIndex][key][-1]), vmax = max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n ########################\n # DataMining PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'dataMining':\n colors = cycle(['#88CCEE', '#DDCC77', '#AA4499', '#117733', '#332288', '#999933', '#44AA99', '#882255', '#CC6677', '#CD6677', '#DC6877', '#886677', '#AA6677', '#556677', '#CD7865'])\n if 's' not in plotSettings:\n plotSettings['s'] = '20'\n if 'c' not in plotSettings:\n plotSettings['c'] = 'b'\n if 'marker' not in plotSettings:\n plotSettings['marker'] = 'o'\n if 'alpha' not in plotSettings:\n plotSettings['alpha'] = 'None'\n if 'linewidths' not in plotSettings:\n plotSettings['linewidths'] = 'None'\n clusterDict[pltIndex] = {}\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n for yIndex in range(len(self.yValues[pltIndex][key])):\n dataMiningPlotOptions = {'s': ast.literal_eval(plotSettings['s']),\n 'marker': (plotSettings['marker']),\n 'alpha': ast.literal_eval(plotSettings['alpha']),\n 'linewidths': ast.literal_eval(plotSettings['linewidths'])}\n if self.colorMapCoordinates[pltIndex] is not None:\n self.raiseAWarning('ColorMap values supplied, however DataMining plots do not use colorMap from input.')\n if plotSettings['cmap'] == 'None':\n self.raiseAWarning('ColorSet supplied, however DataMining plots do not use color set from input.')\n if 'cluster' == plotSettings['SKLtype']:\n # TODO: include the cluster Centers to the plot\n if 'noClusters' in plotSettings.get('attributes', {}):\n clusterDict[pltIndex]['noClusters'] = int(plotSettings.get('attributes', {})['noClusters'])\n plotSettings.get('attributes', {}).pop('noClusters')\n else:\n clusterDict[pltIndex]['noClusters'] = np.amax(self.clusterValues[pltIndex][1][0]) + 1\n dataMiningPlotOptions.update(plotSettings.get('attributes', {}))\n if self.dim == 2:\n clusterDict[pltIndex]['clusterValues'] = np.zeros(shape=(len(self.xValues[pltIndex][key][xIndex]), 2))\n else:\n clusterDict[pltIndex]['clusterValues'] = np.zeros(shape=(len(self.xValues[pltIndex][key][xIndex]), 3))\n clusterDict[pltIndex]['clusterValues'][:, 0] = self.xValues[pltIndex][key][xIndex]\n clusterDict[pltIndex]['clusterValues'][:, 1] = self.yValues[pltIndex][key][yIndex]\n if self.dim == 2:\n for k, col in zip(range(int(clusterDict[pltIndex]['noClusters'])), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(clusterDict[pltIndex]['clusterValues'][myMembers, 0],\n clusterDict[pltIndex]['clusterValues'][myMembers, 1],\n color=col,\n **dataMiningPlotOptions)\n\n # Handle all of the outlying data\n myMembers = self.clusterValues[pltIndex][1][0] == -1\n # resize the points\n dataMiningPlotOptions['s'] /= 2\n # and hollow out their markers\n if 'facecolors' in dataMiningPlotOptions:\n faceColors = dataMiningPlotOptions['facecolors']\n else:\n faceColors = None\n dataMiningPlotOptions['facecolors'] = 'none'\n\n self.actPlot = self.ax.scatter(clusterDict[pltIndex]['clusterValues'][myMembers, 0],\n clusterDict[pltIndex]['clusterValues'][myMembers, 1],\n color='#000000',\n **dataMiningPlotOptions)\n\n # Restore the plot options to their original values\n dataMiningPlotOptions['s'] *= 2\n if faceColors is not None:\n dataMiningPlotOptions['facecolors'] = faceColors\n else:\n del dataMiningPlotOptions['facecolors']\n\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n clusterDict[pltIndex]['clusterValues'][:, 2] = self.zValues[pltIndex][key][zIndex]\n for k, col in zip(range(int(clusterDict[pltIndex]['noClusters'])), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(clusterDict[pltIndex]['clusterValues'][myMembers, 0],\n clusterDict[pltIndex]['clusterValues'][myMembers, 1],\n clusterDict[pltIndex]['clusterValues'][myMembers, 2],\n color=col,\n **dataMiningPlotOptions)\n\n # Handle all of the outlying data\n myMembers = self.clusterValues[pltIndex][1][0] == -1\n # resize the points\n dataMiningPlotOptions['s'] /= 2\n # and hollow out their markers\n if 'facecolors' in dataMiningPlotOptions:\n faceColors = dataMiningPlotOptions['facecolors']\n else:\n faceColors = None\n dataMiningPlotOptions['facecolors'] = 'none'\n\n self.actPlot = self.ax.scatter(clusterDict[pltIndex]['clusterValues'][myMembers, 0],\n clusterDict[pltIndex]['clusterValues'][myMembers, 1],\n clusterDict[pltIndex]['clusterValues'][myMembers, 2],\n color='#000000',\n **dataMiningPlotOptions)\n\n # Restore the plot options to their original values\n dataMiningPlotOptions['s'] *= 2\n if faceColors is not None:\n dataMiningPlotOptions['facecolors'] = faceColors\n else:\n del dataMiningPlotOptions['facecolors']\n\n elif 'bicluster' == plotSettings['SKLtype']:\n self.raiseAnError(IOError, 'SKLType Bi-Cluster Plots are not implemented yet!..')\n elif 'mixture' == plotSettings['SKLtype']:\n if 'noMixtures' in plotSettings.get('attributes', {}):\n clusterDict[pltIndex]['noMixtures'] = int(plotSettings.get('attributes', {})['noMixtures'])\n plotSettings.get('attributes', {}).pop('noMixtures')\n else:\n clusterDict[pltIndex]['noMixtures'] = np.amax(self.mixtureValues[pltIndex][1][0]) + 1\n if self.dim == 3:\n self.raiseAnError(IOError, 'SKLType Mixture Plots are only available in 2-Dimensions')\n else:\n clusterDict[pltIndex]['mixtureValues'] = np.zeros(shape = (len(self.xValues[pltIndex][key][xIndex]), 2))\n clusterDict[pltIndex]['mixtureValues'][:, 0] = self.xValues[pltIndex][key][xIndex]\n clusterDict[pltIndex]['mixtureValues'][:, 1] = self.yValues[pltIndex][key][yIndex]\n if 'mixtureCovars' in plotSettings.get('attributes', {}):\n split = self.__splitVariableNames('mixtureCovars', (pltIndex, 0))\n # mixtureCovars = self.sourceData[pltIndex].getParam(split[1], split[2], nodeId = 'ending')\n plotSettings.get('attributes', {}).pop('mixtureCovars')\n # else:\n # mixtureCovars = None\n if 'mixtureMeans' in plotSettings.get('attributes', {}):\n split = self.__splitVariableNames('mixtureMeans', (pltIndex, 0))\n # mixtureMeans = self.sourceData[pltIndex].getParam(split[1], split[2], nodeId = 'ending')\n plotSettings.get('attributes', {}).pop('mixtureMeans')\n # else:\n # mixtureMeans = None\n # mixtureCovars.reshape(3, 4)\n # mixtureMeans.reshape(3, 4)\n # for i, (mean, covar, col) in enumerate(zip(mixtureMeans, mixtureCovars, colors)):\n for i, col in zip(range(clusterDict[pltIndex]['noMixtures']), colors):\n if not np.any(self.mixtureValues[pltIndex][1][0] == i):\n continue\n myMembers = self.mixtureValues[pltIndex][1][0] == i\n self.actPlot = self.ax.scatter(clusterDict[pltIndex]['mixtureValues'][myMembers, 0],\n clusterDict[pltIndex]['mixtureValues'][myMembers, 1],\n color=col,\n **dataMiningPlotOptions)\n elif 'manifold' == plotSettings['SKLtype']:\n if self.dim == 2:\n manifoldValues = np.zeros(shape=(len(self.xValues[pltIndex][key][xIndex]), 2))\n else:\n manifoldValues = np.zeros(shape=(len(self.xValues[pltIndex][key][xIndex]), 3))\n manifoldValues[:, 0] = self.xValues[pltIndex][key][xIndex]\n manifoldValues[:, 1] = self.yValues[pltIndex][key][yIndex]\n if 'clusterLabels' in plotSettings.get('attributes', {}):\n split = self.__splitVariableNames('clusterLabels', (pltIndex, 0))\n clusterDict[pltIndex]['clusterLabels'] = self.sourceData[pltIndex].getParam(split[1], split[2], nodeId = 'ending')\n plotSettings.get('attributes', {}).pop('clusterLabels')\n else:\n clusterDict[pltIndex]['clusterLabels'] = None\n if 'noClusters' in plotSettings.get('attributes', {}):\n clusterDict[pltIndex]['noClusters'] = int(plotSettings.get('attributes', {})['noClusters'])\n plotSettings.get('attributes', {}).pop('noClusters')\n else:\n clusterDict[pltIndex]['noClusters'] = np.amax(self.clusterValues[pltIndex][1][0]) + 1\n if self.clusterValues[pltIndex][1][0] is not None:\n if self.dim == 2:\n for k, col in zip(range(clusterDict[pltIndex]['noClusters']), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(manifoldValues[myMembers, 0],\n manifoldValues[myMembers, 1],\n color=col,\n **dataMiningPlotOptions)\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n manifoldValues[:, 2] = self.zValues[pltIndex][key][zIndex]\n for k, col in zip(range(clusterDict[pltIndex]['noClusters']), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(manifoldValues[myMembers, 0],\n manifoldValues[myMembers, 1],\n manifoldValues[myMembers, 2],\n color=col,\n **dataMiningPlotOptions)\n else:\n if self.dim == 2:\n self.actPlot = self.ax.scatter(manifoldValues[:, 0],\n manifoldValues[:, 1],\n **dataMiningPlotOptions)\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n manifoldValues[:, 2] = self.zValues[pltIndex][key][zIndex]\n self.actPlot = self.ax.scatter(manifoldValues[:, 0],\n manifoldValues[:, 1],\n manifoldValues[:, 2],\n **dataMiningPlotOptions)\n elif 'decomposition' == plotSettings['SKLtype']:\n if self.dim == 2:\n decompositionValues = np.zeros(shape = (len(self.xValues[pltIndex][key][xIndex]), 2))\n else:\n decompositionValues = np.zeros(shape = (len(self.xValues[pltIndex][key][xIndex]), 3))\n decompositionValues[:, 0] = self.xValues[pltIndex][key][xIndex]\n decompositionValues[:, 1] = self.yValues[pltIndex][key][yIndex]\n if 'noClusters' in plotSettings.get('attributes', {}):\n clusterDict[pltIndex]['noClusters'] = int(plotSettings.get('attributes', {})['noClusters'])\n plotSettings.get('attributes', {}).pop('noClusters')\n else:\n clusterDict[pltIndex]['noClusters'] = np.amax(self.clusterValues[pltIndex][1][0]) + 1\n if self.clusterValues[pltIndex][1][0] is not None:\n if self.dim == 2:\n for k, col in zip(range(clusterDict[pltIndex]['noClusters']), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(decompositionValues[myMembers, 0],\n decompositionValues[myMembers, 1],\n color=col,\n **dataMiningPlotOptions)\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n decompositionValues[:, 2] = self.zValues[pltIndex][key][zIndex]\n for k, col in zip(range(clusterDict[pltIndex]['noClusters']), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(decompositionValues[myMembers, 0],\n decompositionValues[myMembers, 1],\n decompositionValues[myMembers, 2],\n color=col,\n **dataMiningPlotOptions)\n else:\n # no ClusterLabels\n if self.dim == 2:\n self.actPlot = self.ax.scatter(decompositionValues[:, 0],\n decompositionValues[:, 1],\n **dataMiningPlotOptions)\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n decompositionValues[:, 2] = self.zValues[pltIndex][key][zIndex]\n self.actPlot = self.ax.scatter(decompositionValues[:, 0],\n decompositionValues[:, 1],\n decompositionValues[:, 2],\n **dataMiningPlotOptions)\n else:\n # Let's try to \"write\" the code for the plot on the fly\n self.raiseAWarning('Trying to create a non-predefined plot of type ' + self.outStreamTypes[pltIndex] + '. If this fails, please refer to the and/or the related matplotlib method specification.')\n kwargs = {}\n for kk in plotSettings:\n if kk != 'attributes' and kk != self.outStreamTypes[pltIndex]:\n try:\n kwargs[kk] = ast.literal_eval(plotSettings[kk])\n except ValueError:\n kwargs[kk] = plotSettings[kk]\n try:\n if self.dim == 2:\n customFunctionCall = getattr(self.ax, self.outStreamTypes[pltIndex])\n else:\n customFunctionCall = getattr(self.ax, self.outStreamTypes[pltIndex])\n self.actPlot = customFunctionCall(**kwargs)\n except AttributeError as ae:\n self.raiseAnError(RuntimeError, '<' + str(ae) + '> -> in execution custom plot \"' + self.outStreamTypes[pltIndex] + '\" in Plot ' + self.name + '.\\nSTREAM MANAGER: ERROR -> command has been called in the following way: ' + 'ax.' + self.outStreamTypes[pltIndex])\n\n if 'legend' in self.options['plotSettings']:\n self.fig.legend(**self.options['plotSettings']['legend'])\n\n # SHOW THE PICTURE\n self.__executeActions()\n self.fig.canvas.draw_idle()\n\n if 'screen' in self.destinations and display:\n def handle_close(event):\n \"\"\"\n This method is aimed to handle the closing of figures (overall when in interactive mode)\n @ In, event, instance, the event to close\n @ Out, None\n \"\"\"\n self.fig.canvas.stop_event_loop()\n self.raiseAMessage('Closed Figure')\n self.fig.canvas.mpl_connect('close_event', handle_close)\n # self.plt.pause(1e-6)\n # The following code is extracted from pyplot.pause without actually\n # needing to force the code to sleep, according to MPL's documentation,\n # this feature is experimental, hopefully by not calling the pause\n # function, we can obtain consistent results.\n # We are skipping a few of the sanity checks done in that function,\n # since we are sure we have an interactive backend and access to the\n # correct type of canvas and figure.\n self.fig.canvas.draw()\n # If your graphs are unresponsive to user input, you may want to consider\n # adjusting this timeout, to allow more time for the input to be handled.\n self.fig.canvas.start_event_loop(1e-3)\n\n # self.fig.canvas.flush_events()\n\n for fileType in self.destinations:\n if fileType == 'screen':\n continue\n\n if not self.overwrite:\n prefix = str(self.counter) + '-'\n else:\n prefix = ''\n\n if len(self.filename) > 0:\n name = self.filename\n else:\n name = prefix + self.name + '_' + str(self.outStreamTypes).replace(\"'\", \"\").replace(\"[\", \"\").replace(\"]\", \"\").replace(\",\", \"-\").replace(\" \", \"\")\n\n if self.subDirectory is not None:\n name = os.path.join(self.subDirectory,name)\n\n self.fig.savefig(name + '.' + fileType, format=fileType)\n\n if 'screen' not in self.destinations:\n plt.close(fig=self.fig)\n\n gc.collect()", "def pretty_plot_confusion_matrix(df_cm, annot=True, cmap=\"Oranges\", fmt='.2f', fz=14,\n lw=0.5, cbar=False, figsize=[8,8], show_null_values=0, pred_val_axis='y'):\n if(pred_val_axis in ('col', 'x')):\n xlbl = 'Predicted'\n ylbl = 'Actual'\n else:\n xlbl = 'Actual'\n ylbl = 'Predicted'\n df_cm = df_cm.T\n\n # create \"Total\" column\n insert_totals(df_cm)\n\n #this is for print allways in the same window\n fig, ax1 = get_new_fig('Conf matrix default', figsize)\n\n #thanks for seaborn\n ax = sn.heatmap(df_cm, annot=annot, annot_kws={\"size\": fz}, linewidths=lw, ax=ax1,\n cbar=cbar, cmap=cmap, linecolor='w', fmt=fmt)\n\n #set ticklabels rotation\n ax.set_xticklabels(ax.get_xticklabels(), rotation = 45, fontsize = 10)\n ax.set_yticklabels(ax.get_yticklabels(), rotation = 25, fontsize = 10)\n\n # Turn off all the ticks\n for t in ax.xaxis.get_major_ticks():\n t.tick1On = False\n t.tick2On = False\n for t in ax.yaxis.get_major_ticks():\n t.tick1On = False\n t.tick2On = False\n\n #face colors list\n quadmesh = ax.findobj(QuadMesh)[0]\n facecolors = quadmesh.get_facecolors()\n\n #iter in text elements\n array_df = np.array( df_cm.to_records(index=False).tolist() )\n text_add = []; text_del = [];\n posi = -1 #from left to right, bottom to top.\n for t in ax.collections[0].axes.texts: #ax.texts:\n pos = np.array( t.get_position()) - [0.5,0.5]\n lin = int(pos[1]); col = int(pos[0]);\n posi += 1\n\n #set text\n txt_res = configcell_text_and_colors(array_df, lin, col, t, facecolors, posi, fz, fmt, show_null_values)\n\n text_add.extend(txt_res[0])\n text_del.extend(txt_res[1])\n\n #remove the old ones\n for item in text_del:\n item.remove()\n #append the new ones\n for item in text_add:\n ax.text(item['x'], item['y'], item['text'], **item['kw'])\n\n #titles and legends\n ax.set_title('Confusion matrix')\n ax.set_xlabel(xlbl)\n ax.set_ylabel(ylbl)\n plt.tight_layout() #set layout slim\n plt.show()", "def plot_centroid_movement(name, mu_arr):\n plt.clf()\n\n for vals in mu_arr:\n x, y = list(range(len(vals[\"mu\"]))), vals[\"mu\"]\n plt.plot(x, y, color=vals[\"color\"], label=vals[\"label\"])\n\n plt.title(\"Centroid Movement Convergence\")\n plt.xlabel(\"Iteration\")\n plt.ylabel(\"Max Change\")\n plt.legend(loc=\"upper right\")\n plt.savefig(f\"{name}/centroid_convergence.jpeg\")", "def gen_centers(self):\n\n \"\"\"x_track = self.cs.discrete_rollout()\n t = np.arange(len(x_track))*self.dt\n # choose the points in time we'd like centers to be at\n c_des = np.linspace(0, self.cs.run_time, self.n_bfs)\n self.c = np.zeros(len(c_des))\n for ii, point in enumerate(c_des):\n diff = abs(t - point)\n self.c[ii] = x_track[np.where(diff == min(diff))[0][0]]\"\"\"\n\n # desired activations throughout time\n des_c = jnp.linspace(0, self.cs.run_time, self.n_bfs)\n\n self.c = np.ones(len(des_c))\n for n in range(len(des_c)):\n # finding x for desired times t\n self.c[n] = jnp.exp(-self.cs.ax * des_c[n])\n self.c = jnp.array(self.c)", "def pretty_plot_confusion_matrix(df_cm, annot=True, cmap=\"Oranges\", fmt='.2f', fz=11,\n lw=0.5, cbar=False, figsize=[8,8], show_null_values=0, pred_val_axis='y'):\n if(pred_val_axis in ('col', 'x')):\n xlbl = 'Predicted'\n ylbl = 'Actual'\n else:\n xlbl = 'Actual'\n ylbl = 'Predicted'\n df_cm = df_cm.T\n\n # create \"Total\" column\n insert_totals(df_cm)\n\n #this is for print allways in the same window\n fig, ax1 = get_new_fig('Conf matrix default', figsize)\n\n #thanks for seaborn\n ax = sn.heatmap(df_cm, annot=annot, annot_kws={\"size\": fz}, linewidths=lw, ax=ax1,\n cbar=cbar, cmap=cmap, linecolor='w', fmt=fmt)\n\n #set ticklabels rotation\n ax.set_xticklabels(ax.get_xticklabels(), rotation = 45, fontsize = 10)\n ax.set_yticklabels(ax.get_yticklabels(), rotation = 25, fontsize = 10)\n\n # Turn off all the ticks\n for t in ax.xaxis.get_major_ticks():\n t.tick1On = False\n t.tick2On = False\n for t in ax.yaxis.get_major_ticks():\n t.tick1On = False\n t.tick2On = False\n\n #face colors list\n quadmesh = ax.findobj(QuadMesh)[0]\n facecolors = quadmesh.get_facecolors()\n\n #iter in text elements\n array_df = np.array( df_cm.to_records(index=False).tolist() )\n text_add = []; text_del = [];\n posi = -1 #from left to right, bottom to top.\n for t in ax.collections[0].axes.texts: #ax.texts:\n pos = np.array( t.get_position()) - [0.5,0.5]\n lin = int(pos[1]); col = int(pos[0]);\n posi += 1\n #print ('>>> pos: %s, posi: %s, val: %s, txt: %s' %(pos, posi, array_df[lin][col], t.get_text()))\n\n #set text\n txt_res = configcell_text_and_colors(array_df, lin, col, t, facecolors, posi, fz, fmt, show_null_values)\n\n text_add.extend(txt_res[0])\n text_del.extend(txt_res[1])\n\n #remove the old ones\n for item in text_del:\n item.remove()\n #append the new ones\n for item in text_add:\n ax.text(item['x'], item['y'], item['text'], **item['kw'])\n\n #titles and legends\n ax.set_title('Confusion matrix')\n ax.set_xlabel(xlbl)\n ax.set_ylabel(ylbl)\n plt.tight_layout() #set layout slim\n plt.show()", "def __init__(self, centroid):\n self.label = ''\n self.centroid = centroid\n self.points = []\n self.radius = 0.0 # used to draw plot\n self.neighbour = {}\n self.inter_cost = 0\n self.intra_cost = 0\n self.dm_cost = 0", "def DisplayCentroids(Centroids,outputs,ax,N=1,sections=1):\r\n\r\n SliceValues = np.linspace(float(min(Centroids[:,0])),float(max(Centroids[:,0])),sections+1) # Create boundaries in x for each slice.\r\n idx1 = np.asarray((Centroids[:,0]>=SliceValues[N-1]))*np.asarray((Centroids[:,0]<=SliceValues[N]))\r\n\r\n idx1 = idx1.flatten() \r\n\r\n CentroidSlice = Centroids[idx1,:]\r\n \r\n outputSlice = outputs[idx1,:]\r\n\r\n # Plot Data-------------------------------------------------------------------------------------------------------\r\n ax.scatter(CentroidSlice[:,0],CentroidSlice[:,1],CentroidSlice[:,2],c = [float(N) for N in outputSlice],cmap = 'bwr')\r\n ax.set_zlabel('z')\r\n ax.set_ylabel('y')\r\n ax.set_xlabel('x')", "def estimate_centroid(self):\r\n\t\tstrain = self.strain_distribution_compr(self.max_pure_compresive_strain,\\\r\n\t\t\tself.max_pure_compresive_strain)\r\n\t\tself.geometric_centrod = (self.depth/2) \r\n\t\tself.plastic_centroid = (self.depth/2)+\\\r\n\t\t\t(self.sectional_moment(strain, self.depth/2)/\\\r\n\t\t\tself.sectional_force(strain))", "def concent_graph(self):\n r_big = self['M_RSMALL']\n r_small = self['M_RBIG']\n C = self['M_C']\n \n xcenter = self['X_IMAGE'] ; ycenter = self['Y_IMAGE']\n xcenter = xcenter - self['MXMIN_IMAGE']\n ycenter = ycenter - self['MYMIN_IMAGE']\n center = (xcenter,ycenter)\n \n ellip = self['ELLIPTICITY'] \n q = 1. - ellip\n pa = self['THETA_IMAGE'] # Astronomical position angle.\n\n stamp = self['STAMP'].copy()\n mask = self['MASKOTHER'].copy()\n sky = self['BACKGROUND']\n Img = stamp - sky\n Img[num.where(mask != 0)] = 0.\n \n id = self._getGraphId()\n root = 'C_%s' % (id,)\n pngname = root + '.png' ; epsname = root + '.eps'\n jpgname = root + '.jpg'\n doStamp(Img,pngname,format='PNG')\n Convert(pngname,jpgname)\n \n Painted = Paint(jpgname)\n Painted.load()\n Painted.DrawEllipse(center,r_big,q,pa,color='red',linewidth=2)\n Painted.DrawEllipse(center,r_small,q,pa,color='green',linewidth=2)\n \n text = 'C=%5.2f' % (self['M_C'])\n # Painted.Graffiti(text,commtextpos)\n Painted.save(jpgname) \n Painted.release()\n \n Convert(jpgname,epsname)\n os.system('rm %s %s' % (pngname,jpgname))\n \n self['figures']['C'] = epsname\n self['figcomms']['C'] = text", "def cfdProcessGeometry(self):\r\n \r\n # self.faceCentroids']= [[] for i in range(self.numberOfFaces'])]\r\n # self.faceSf']= [[] for i in range(self.numberOfFaces'])]\r\n # self.faceAreas']= [[] for i in range(self.numberOfFaces'])]\r\n \r\n ## Linear weight of distance from cell center to face\r\n self.faceWeights= [[0] for i in range(self.numberOfFaces)]\r\n\r\n ## Not\r\n self.faceCF= [[0, 0, 0] for i in range(self.numberOfFaces)]\r\n \r\n self.faceCf= [[0,0,0] for i in range(self.numberOfFaces)]\r\n \r\n self.faceFf= [[0,0,0] for i in range(self.numberOfFaces)]\r\n \r\n self.wallDist= [[] for i in range(self.numberOfFaces)]\r\n \r\n self.wallDistLimited= [[] for i in range(self.numberOfFaces)]\r\n \r\n self.elementCentroids= [[] for i in range(self.numberOfElements)]\r\n self.elementVolumes= [[] for i in range(self.numberOfElements)]\r\n \r\n \"\"\"\r\n Calculate:\r\n -face centroids (faceCentroids)\r\n -face normal (Sf)\r\n -face areas (faceAreas)\r\n \"\"\"\r\n \r\n #find cell with largest number of points\r\n maxPoints=len(max(self.faceNodes, key=len))\r\n forCross1 = [[] for i in range(maxPoints)]\r\n forCross2 = [[] for i in range(maxPoints)]\r\n local_faceCentroid=[[] for i in range(maxPoints)]\r\n \r\n for iFace in range(self.numberOfFaces):\r\n theNodeIndices = self.faceNodes[iFace]\r\n theNumberOfFaceNodes = len(theNodeIndices)\r\n \r\n #compute a rough centre of the face\r\n local_centre = [0,0,0]\r\n \r\n for iNode in theNodeIndices:\r\n local_centre = local_centre + self.nodeCentroids[int(iNode)]\r\n \r\n local_centre = local_centre/theNumberOfFaceNodes\r\n \r\n for iTriangle in range(theNumberOfFaceNodes):\r\n \r\n point1 = local_centre\r\n point2 = self.nodeCentroids[int(theNodeIndices[iTriangle])]\r\n \r\n if iTriangle < theNumberOfFaceNodes-1:\r\n point3 = self.nodeCentroids[int(theNodeIndices[iTriangle+1])]\r\n else:\r\n point3 = self.nodeCentroids[int(theNodeIndices[0])]\r\n \r\n local_faceCentroid[iTriangle].append((point1+point2+point3)/3)\r\n \r\n left=point2-point1\r\n right=point3-point1\r\n \r\n forCross1[iTriangle].append(left)\r\n forCross2[iTriangle].append(right)\r\n \r\n \r\n local_Sf=[np.zeros([self.numberOfFaces,3]) for i in range(maxPoints)]\r\n local_area=[np.zeros([self.numberOfFaces,3]) for i in range(maxPoints)]\r\n \r\n centroid=np.zeros([self.numberOfFaces,3])\r\n area=np.zeros([self.numberOfFaces])\r\n Sf=np.zeros([self.numberOfFaces,3])\r\n \r\n #cells with fewer faces than others are full of zeros\r\n for i in range(maxPoints):\r\n \r\n forCrossLeft=np.vstack(np.array(forCross1[i]))\r\n forCrossRight=np.vstack(np.array(forCross2[i]))\r\n \r\n local_Sf[i]=0.5*np.cross(forCrossLeft,forCrossRight)\r\n local_area[i]=np.linalg.norm(local_Sf[i],axis=1)\r\n \r\n centroid = centroid + np.array(local_faceCentroid[i])*local_area[i][:,None]\r\n Sf=Sf+local_Sf[i]\r\n area=area+local_area[i]\r\n \r\n self.faceCentroids=centroid/area[:,None]\r\n self.faceSf=Sf\r\n self.faceAreas=area \r\n \r\n \r\n \"\"\"\r\n Pure python version - causes slowness due to iterative np.cross()\r\n \"\"\"\r\n \r\n # for iFace in range(self.numberOfFaces):\r\n # theNodeIndices = self.faceNodes[iFace]\r\n # theNumberOfFaceNodes = len(theNodeIndices)\r\n # \r\n # #compute a rough centre of the face\r\n # local_centre = [0,0,0]\r\n # \r\n # for iNode in theNodeIndices:\r\n # \r\n # local_centre = local_centre + self.nodeCentroids[int(iNode)]\r\n # \r\n # local_centre = local_centre/theNumberOfFaceNodes\r\n # centroid = [0, 0, 0]\r\n # Sf = [0,0,0]\r\n # area = 0\r\n # \r\n # #finds area of virtual triangles and adds them to the find to find face area\r\n # #and direction (Sf)\r\n # \r\n # \r\n # \r\n # for iTriangle in range(theNumberOfFaceNodes):\r\n # point1 = local_centre\r\n # point2 = self.nodeCentroids[int(theNodeIndices[iTriangle])]\r\n # \r\n # if iTriangle < theNumberOfFaceNodes-1:\r\n # point3 = self.nodeCentroids[int(theNodeIndices[iTriangle+1])]\r\n # else:\r\n # point3 = self.nodeCentroids[int(theNodeIndices[0])]\r\n # \r\n # local_centroid = (point1 + point2 + point3)/3\r\n # \r\n # left=point2-point1\r\n # right=point3-point1\r\n # x = 0.5*((left[1] * right[2]) - (left[2] * right[1]))\r\n # y = 0.5*((left[2] * right[0]) - (left[0] * right[2]))\r\n # z = 0.5*((left[0] * right[1]) - (left[1] * right[0]))\r\n # local_Sf=np.array([x,y,z])\r\n # \r\n # local_area = np.linalg.norm(local_Sf)\r\n # \r\n # centroid = centroid + local_area*local_centroid\r\n # Sf = Sf + local_Sf\r\n # area = area + local_area\r\n # centroid = centroid/area\r\n # self.faceCentroids[iFace]=centroid\r\n # self.faceSf[iFace]=Sf\r\n # self.faceAreas[iFace]=area\r\n \r\n \r\n \"\"\"\r\n Calculate:\r\n -element centroids (elementCentroids)\r\n -element volumes (elementVolumes)\r\n \"\"\"\r\n for iElement in range(self.numberOfElements):\r\n \r\n theElementFaces = self.elementFaces[iElement]\r\n \r\n #compute a rough centre of the element\r\n local_centre = [0,0,0]\r\n \r\n for iFace in range(len(theElementFaces)):\r\n faceIndex = theElementFaces[iFace]\r\n local_centre = local_centre + self.faceCentroids[faceIndex]\r\n \r\n local_centre = local_centre/len(theElementFaces)\r\n \r\n localVolumeCentroidSum = [0,0,0]\r\n localVolumeSum = 0\r\n \r\n for iFace in range(len(theElementFaces)):\r\n faceIndex = theElementFaces[iFace]\r\n \r\n Cf = self.faceCentroids[faceIndex]-local_centre\r\n \r\n faceSign = -1\r\n if iElement == self.owners[faceIndex]:\r\n faceSign = 1\r\n \r\n local_Sf = faceSign*self.faceSf[faceIndex]\r\n \r\n localVolume = np.dot(local_Sf,Cf)/3\r\n \r\n localCentroid = 0.75*self.faceCentroids[faceIndex]+0.25*local_centre\r\n \r\n localVolumeCentroidSum = localVolumeCentroidSum + localCentroid*localVolume\r\n \r\n localVolumeSum = localVolumeSum + localVolume\r\n \r\n self.elementCentroids[iElement]=localVolumeCentroidSum/localVolumeSum\r\n self.elementVolumes[iElement]=localVolumeSum\r\n \r\n \r\n for iFace in range(self.numberOfInteriorFaces):\r\n \r\n n=self.faceSf[iFace]/np.linalg.norm(self.faceSf[iFace])\r\n own=self.owners[iFace]\r\n nei = self.neighbours[iFace]\r\n \r\n self.faceCF[iFace]=self.elementCentroids[nei]-self.elementCentroids[own]\r\n self.faceCf[iFace]=self.faceCentroids[iFace]-self.elementCentroids[own]\r\n self.faceFf[iFace]=self.faceCentroids[iFace]-self.elementCentroids[nei]\r\n self.faceWeights[iFace]=(-np.dot(self.faceFf[iFace],n))/(-np.dot(self.faceFf[iFace],n)+np.dot(self.faceCf[iFace],n))\r\n \r\n for iBFace in range(self.numberOfInteriorFaces, self.numberOfFaces):\r\n \r\n \r\n n=self.faceSf[iBFace]/np.linalg.norm(self.faceSf[iBFace])\r\n own=self.owners[iBFace]\r\n \r\n self.faceCF[iBFace]=self.faceCentroids[iBFace]-self.elementCentroids[own]\r\n self.faceCf[iBFace]=self.faceCentroids[iBFace]-self.elementCentroids[own] \r\n self.faceWeights[iBFace]=1\r\n self.wallDist[iBFace]= max(np.dot(self.faceCf[iBFace], n), 1e-24)\r\n self.wallDistLimited[iBFace]= max(self.wallDist[iBFace], 0.05*np.linalg.norm(self.faceCf[iBFace]))", "def grid_inflation(self):\n for obs in self.obstacle_list:\n\n inflation_x1 = round((obs[0][0]-self._inflation_radius)/self.step_size)\n\n inflation_y2 = round((obs[0][1] + obs[2] +self._inflation_radius)/self.step_size)\n\n inflation_x2 = round((obs[0][0] + obs[1] +self._inflation_radius)/self.step_size)\n\n inflation_y1 = round((obs[0][1] -self._inflation_radius)/self.step_size)\n\n self.grid[1, inflation_x1:inflation_x2+1,\n inflation_y1:inflation_y2+1] = INFLATION_COST\n\n # border inflation\n self.grid[1, 0:self.gridwidth+1, 0:round(self._inflation_radius/self.step_size)+1] = INFLATION_COST\n self.grid[1, 0:self.gridwidth+1, self.gridheight-round(self._inflation_radius / self.step_size):self.gridheight+1] = INFLATION_COST\n self.grid[1, 0:round(self._inflation_radius/self.step_size)+1, 0:self.gridheight+1] = INFLATION_COST\n self.grid[1, self.gridwidth-round(self._inflation_radius/self.step_size):self.gridwidth+1, 0:self.gridheight+1] = INFLATION_COST\n\n # if NEED_DRAW_INFLATED_GRID:\n # for i in range(self.gridwidth):\n # plt.scatter(i,0)\n # plt.scatter(i,self.gridheight)\n # for j in range(self.gridheight):\n # plt.scatter(0,j)\n # plt.scatter(self.gridwidth,j)\n # if self.grid[i, j] != 0:\n # plt.scatter(i,j)\n # plt.show()\n\n return self.grid", "def step_by_step_plot(\n centroidss: List[Centroids], clusterss: List[Clusters], eps: float,\n bottom_left: Point, top_right: Point, circles: MaybeCircles = None):\n\n plot_init(bottom_left, top_right)\n\n # show the window on screen\n plot_refresh()\n\n points = concat(clusterss[0])\n plot_points(points, True, color=DEFAULT_POINT_COLOR, marker=POINT_MARKER)\n\n input(\"Put centroids ?\")\n plot_centroids(centroidss[0])\n plot_circles(circles, True)\n\n n = len(clusterss)\n for iteration in range(1, n + 1):\n input(\"Change clusters ? ({})\".format(iteration))\n plot_clear()\n plot_centroids(centroidss[iteration - 1])\n plot_clusters(clusterss[iteration - 1])\n plot_circles(circles, True)\n\n input(\"Move centroids ? ({})\".format(iteration))\n plot_clear()\n plot_centroids(centroidss[iteration])\n plot_clusters(clusterss[iteration - 1])\n plot_circles(circles, True)\n\n print(\"\\n--> Done with final eps = {}\".format(eps))\n plot_finalize()", "def cells_center(self,refresh=False,mode='first3'):\n if refresh is True:\n to_update=slice(None)\n elif refresh is not False:\n to_update=refresh\n else:\n to_update = np.isnan(self.cells['_center'][:,0])\n\n if np.sum(to_update) > 0:\n if mode=='first3':\n p1,p2,p3 = [self.nodes['x'][self.cells['nodes'][to_update,i]] for i in [0,1,2]]\n self.cells['_center'][to_update] = circumcenter(p1,p2,p3)\n elif mode=='sequential':\n for c in np.arange(self.Ncells())[to_update]:\n points=self.nodes['x'][self.cell_to_nodes(c)]\n self.cells['_center'][c] = poly_circumcenter(points)\n \n return self.cells['_center']", "def plot_connected_components(c_components, output_file):\n c_components_len = [len(k) for k in c_components]\n\n logging.info(\"Number of cc %d\" % (len(c_components_len)))\n logging.info(\"First five cc\" + str(c_components_len[0:5]))\n logging.info(\"Mean length of cc %d\" % (np.mean(c_components_len)))\n\n fig, axes = plt.subplots(1, figsize=(10, 10))\n g1 = sns.distplot(c_components_len, hist=True, kde=False,ax=axes,norm_hist=False)\n g1 = sns.distplot([np.max(c_components_len)], hist=False, kde=False, rug=True, color='r', ax=axes,norm_hist=False)\n\n axes.annotate('LCC: %d' %np.max(c_components_len), xy=(np.max(c_components_len), 0),\n xytext=(np.max(c_components_len)-10,axes.dataLim.y1/4), arrowprops=dict(arrowstyle=\"->\"))\n\n sns.despine(ax=axes, top=True, bottom=False, right=True, left=True)\n g1.set_ylabel(\"Number of CC\")\n g1.set_xlabel(\"Size of CC\")\n\n if output_file.endswith('.pdf'):\n plt.savefig(output_file, format=\"pdf\")\n elif output_file.endswith('.png'):\n plt.savefig(output_file, format=\"png\")\n else:\n logging.warning('The null distribution figure can only be saved in pdf or png, forced to png')\n fig.savefig(output_file+'.png', format=\"png\")", "def compute_cell_center(seg_img: np.ndarray, labels: np.ndarray, results: np.ndarray) \\\n -> np.ndarray:\n for label in labels:\n if label != 0:\n all_points_z, all_points_x, all_points_y = np.where(seg_img == label)\n avg_z = np.round(np.mean(all_points_z))\n avg_x = np.round(np.mean(all_points_x))\n avg_y = np.round(np.mean(all_points_y))\n results[label] = [avg_z, avg_x, avg_y]\n\n return results", "def plotClusters(self):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n fig.set_size_inches(18.5, 9.5)\n ax.set_title('Identification of Cluster Particles with Voronoi Volumes', fontsize=22)\n ax.set_xlabel('x [m]', fontsize=18)\n ax.set_ylabel('y [m]', fontsize=18)\n ax.set_zlabel('z [m]', fontsize=18)\n\n strength = np.linspace(0, 0.8, len(self.unique_labels))\n np.random.shuffle(strength)\n colors = [plt.cm.nipy_spectral(each) for each in strength]\n np.random.shuffle(strength)\n colorsB = [plt.cm.nipy_spectral(each) for each in strength]\n\n for k, col, colB in zip(self.unique_labels, colors, colorsB):\n a = 1\n s = 3\n if k == -1:\n # Black used for noise.\n col = [1, 0, 0]\n a = 0.3\n s = 1\n\n class_member_mask = (self.labels == k)\n xy = self.data[class_member_mask]\n if len(xy) > 0:\n ax.scatter(xy[:, 0], xy[:, 1], xy[:, 2], c=np.reshape(np.array(col), (1, -1)),\n edgecolors=np.reshape(np.array(colB), (1, -1)), alpha=a, s=s, label='Cluster ' + str(k))", "def plot_clusters(self):\n pass" ]
[ "0.61388534", "0.6034441", "0.58881974", "0.5873742", "0.58667403", "0.58461356", "0.5768717", "0.5750362", "0.56975144", "0.5666557", "0.5653945", "0.56459886", "0.5611346", "0.558924", "0.55840623", "0.5583146", "0.55808353", "0.5573108", "0.5564739", "0.5562984", "0.5537126", "0.5536546", "0.55120015", "0.5502637", "0.54958457", "0.5489455", "0.5485451", "0.5479614", "0.5474268", "0.54719466" ]
0.63715124
0
creates a structured cell centroids and cell volumes of square or cubic lattice using the fenics mesh by averaging appropriate number of 2D/3D triangles
def structured_cell_centroids(mesh): dim = mesh.topology().dim() stride = fact(dim) cents = get_cell_centroids(mesh) num_cells = int(mesh.num_cells()/stride) cell_cents_struct = np.zeros((num_cells,dim),dtype=float) for i in range(num_cells): start = int(stride*i) end = int(stride*i)+stride cell_cents_struct[i] = np.average(cents[start:end],axis=0) return cell_cents_struct
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cfdProcessGeometry(self):\r\n \r\n # self.faceCentroids']= [[] for i in range(self.numberOfFaces'])]\r\n # self.faceSf']= [[] for i in range(self.numberOfFaces'])]\r\n # self.faceAreas']= [[] for i in range(self.numberOfFaces'])]\r\n \r\n ## Linear weight of distance from cell center to face\r\n self.faceWeights= [[0] for i in range(self.numberOfFaces)]\r\n\r\n ## Not\r\n self.faceCF= [[0, 0, 0] for i in range(self.numberOfFaces)]\r\n \r\n self.faceCf= [[0,0,0] for i in range(self.numberOfFaces)]\r\n \r\n self.faceFf= [[0,0,0] for i in range(self.numberOfFaces)]\r\n \r\n self.wallDist= [[] for i in range(self.numberOfFaces)]\r\n \r\n self.wallDistLimited= [[] for i in range(self.numberOfFaces)]\r\n \r\n self.elementCentroids= [[] for i in range(self.numberOfElements)]\r\n self.elementVolumes= [[] for i in range(self.numberOfElements)]\r\n \r\n \"\"\"\r\n Calculate:\r\n -face centroids (faceCentroids)\r\n -face normal (Sf)\r\n -face areas (faceAreas)\r\n \"\"\"\r\n \r\n #find cell with largest number of points\r\n maxPoints=len(max(self.faceNodes, key=len))\r\n forCross1 = [[] for i in range(maxPoints)]\r\n forCross2 = [[] for i in range(maxPoints)]\r\n local_faceCentroid=[[] for i in range(maxPoints)]\r\n \r\n for iFace in range(self.numberOfFaces):\r\n theNodeIndices = self.faceNodes[iFace]\r\n theNumberOfFaceNodes = len(theNodeIndices)\r\n \r\n #compute a rough centre of the face\r\n local_centre = [0,0,0]\r\n \r\n for iNode in theNodeIndices:\r\n local_centre = local_centre + self.nodeCentroids[int(iNode)]\r\n \r\n local_centre = local_centre/theNumberOfFaceNodes\r\n \r\n for iTriangle in range(theNumberOfFaceNodes):\r\n \r\n point1 = local_centre\r\n point2 = self.nodeCentroids[int(theNodeIndices[iTriangle])]\r\n \r\n if iTriangle < theNumberOfFaceNodes-1:\r\n point3 = self.nodeCentroids[int(theNodeIndices[iTriangle+1])]\r\n else:\r\n point3 = self.nodeCentroids[int(theNodeIndices[0])]\r\n \r\n local_faceCentroid[iTriangle].append((point1+point2+point3)/3)\r\n \r\n left=point2-point1\r\n right=point3-point1\r\n \r\n forCross1[iTriangle].append(left)\r\n forCross2[iTriangle].append(right)\r\n \r\n \r\n local_Sf=[np.zeros([self.numberOfFaces,3]) for i in range(maxPoints)]\r\n local_area=[np.zeros([self.numberOfFaces,3]) for i in range(maxPoints)]\r\n \r\n centroid=np.zeros([self.numberOfFaces,3])\r\n area=np.zeros([self.numberOfFaces])\r\n Sf=np.zeros([self.numberOfFaces,3])\r\n \r\n #cells with fewer faces than others are full of zeros\r\n for i in range(maxPoints):\r\n \r\n forCrossLeft=np.vstack(np.array(forCross1[i]))\r\n forCrossRight=np.vstack(np.array(forCross2[i]))\r\n \r\n local_Sf[i]=0.5*np.cross(forCrossLeft,forCrossRight)\r\n local_area[i]=np.linalg.norm(local_Sf[i],axis=1)\r\n \r\n centroid = centroid + np.array(local_faceCentroid[i])*local_area[i][:,None]\r\n Sf=Sf+local_Sf[i]\r\n area=area+local_area[i]\r\n \r\n self.faceCentroids=centroid/area[:,None]\r\n self.faceSf=Sf\r\n self.faceAreas=area \r\n \r\n \r\n \"\"\"\r\n Pure python version - causes slowness due to iterative np.cross()\r\n \"\"\"\r\n \r\n # for iFace in range(self.numberOfFaces):\r\n # theNodeIndices = self.faceNodes[iFace]\r\n # theNumberOfFaceNodes = len(theNodeIndices)\r\n # \r\n # #compute a rough centre of the face\r\n # local_centre = [0,0,0]\r\n # \r\n # for iNode in theNodeIndices:\r\n # \r\n # local_centre = local_centre + self.nodeCentroids[int(iNode)]\r\n # \r\n # local_centre = local_centre/theNumberOfFaceNodes\r\n # centroid = [0, 0, 0]\r\n # Sf = [0,0,0]\r\n # area = 0\r\n # \r\n # #finds area of virtual triangles and adds them to the find to find face area\r\n # #and direction (Sf)\r\n # \r\n # \r\n # \r\n # for iTriangle in range(theNumberOfFaceNodes):\r\n # point1 = local_centre\r\n # point2 = self.nodeCentroids[int(theNodeIndices[iTriangle])]\r\n # \r\n # if iTriangle < theNumberOfFaceNodes-1:\r\n # point3 = self.nodeCentroids[int(theNodeIndices[iTriangle+1])]\r\n # else:\r\n # point3 = self.nodeCentroids[int(theNodeIndices[0])]\r\n # \r\n # local_centroid = (point1 + point2 + point3)/3\r\n # \r\n # left=point2-point1\r\n # right=point3-point1\r\n # x = 0.5*((left[1] * right[2]) - (left[2] * right[1]))\r\n # y = 0.5*((left[2] * right[0]) - (left[0] * right[2]))\r\n # z = 0.5*((left[0] * right[1]) - (left[1] * right[0]))\r\n # local_Sf=np.array([x,y,z])\r\n # \r\n # local_area = np.linalg.norm(local_Sf)\r\n # \r\n # centroid = centroid + local_area*local_centroid\r\n # Sf = Sf + local_Sf\r\n # area = area + local_area\r\n # centroid = centroid/area\r\n # self.faceCentroids[iFace]=centroid\r\n # self.faceSf[iFace]=Sf\r\n # self.faceAreas[iFace]=area\r\n \r\n \r\n \"\"\"\r\n Calculate:\r\n -element centroids (elementCentroids)\r\n -element volumes (elementVolumes)\r\n \"\"\"\r\n for iElement in range(self.numberOfElements):\r\n \r\n theElementFaces = self.elementFaces[iElement]\r\n \r\n #compute a rough centre of the element\r\n local_centre = [0,0,0]\r\n \r\n for iFace in range(len(theElementFaces)):\r\n faceIndex = theElementFaces[iFace]\r\n local_centre = local_centre + self.faceCentroids[faceIndex]\r\n \r\n local_centre = local_centre/len(theElementFaces)\r\n \r\n localVolumeCentroidSum = [0,0,0]\r\n localVolumeSum = 0\r\n \r\n for iFace in range(len(theElementFaces)):\r\n faceIndex = theElementFaces[iFace]\r\n \r\n Cf = self.faceCentroids[faceIndex]-local_centre\r\n \r\n faceSign = -1\r\n if iElement == self.owners[faceIndex]:\r\n faceSign = 1\r\n \r\n local_Sf = faceSign*self.faceSf[faceIndex]\r\n \r\n localVolume = np.dot(local_Sf,Cf)/3\r\n \r\n localCentroid = 0.75*self.faceCentroids[faceIndex]+0.25*local_centre\r\n \r\n localVolumeCentroidSum = localVolumeCentroidSum + localCentroid*localVolume\r\n \r\n localVolumeSum = localVolumeSum + localVolume\r\n \r\n self.elementCentroids[iElement]=localVolumeCentroidSum/localVolumeSum\r\n self.elementVolumes[iElement]=localVolumeSum\r\n \r\n \r\n for iFace in range(self.numberOfInteriorFaces):\r\n \r\n n=self.faceSf[iFace]/np.linalg.norm(self.faceSf[iFace])\r\n own=self.owners[iFace]\r\n nei = self.neighbours[iFace]\r\n \r\n self.faceCF[iFace]=self.elementCentroids[nei]-self.elementCentroids[own]\r\n self.faceCf[iFace]=self.faceCentroids[iFace]-self.elementCentroids[own]\r\n self.faceFf[iFace]=self.faceCentroids[iFace]-self.elementCentroids[nei]\r\n self.faceWeights[iFace]=(-np.dot(self.faceFf[iFace],n))/(-np.dot(self.faceFf[iFace],n)+np.dot(self.faceCf[iFace],n))\r\n \r\n for iBFace in range(self.numberOfInteriorFaces, self.numberOfFaces):\r\n \r\n \r\n n=self.faceSf[iBFace]/np.linalg.norm(self.faceSf[iBFace])\r\n own=self.owners[iBFace]\r\n \r\n self.faceCF[iBFace]=self.faceCentroids[iBFace]-self.elementCentroids[own]\r\n self.faceCf[iBFace]=self.faceCentroids[iBFace]-self.elementCentroids[own] \r\n self.faceWeights[iBFace]=1\r\n self.wallDist[iBFace]= max(np.dot(self.faceCf[iBFace], n), 1e-24)\r\n self.wallDistLimited[iBFace]= max(self.wallDist[iBFace], 0.05*np.linalg.norm(self.faceCf[iBFace]))", "def get_cell_centroids(mesh):\n num_els = mesh.num_cells()\n coords = mesh.coordinates()\n cells = mesh.cells()\n dim = len(coords[0])\n\n cell_cent = np.zeros((num_els, dim), dtype=float, order='c')\n\n for i in range(num_els):\n pts = [coords[idx] for idx in cells[i]]\n cell_cent[i] = (1/(dim+1))*sum(pts) #this works only for 2D/3D triangles\n\n return cell_cent", "def init_grid_geometry(self):\n self.vc = self.grid.cells_center() # circumcenters\n self.ec = self.grid.edges_center()\n \n self.c1 = self.grid.edges['cells'][:,0]\n self.c2 = self.grid.edges['cells'][:,1]\n\n # distance between cell centers\n self.d_j = utils.mag( self.vc[self.c1] - self.vc[self.c2] )\n bdry=self.c2<0\n # grid has a few places where vc is coincident with outer boundary, thanks\n # to janet\n self.d_j[bdry] = 2*utils.mag( self.vc[self.c1[bdry]] - self.ec[bdry] ).clip(self.d_j_min,np.inf)\n self.l_j = self.grid.edges_length()\n\n self.normal_j = self.grid.edges_normals()\n self.area_c = self.grid.cells_area()\n\n self.K_j = 100*np.ones(self.grid.Nedges())\n\n j_valid=~self.grid.edges['deleted']\n\n print(\"Checking finite geometry\")\n assert np.all( np.isfinite(self.d_j[j_valid]))\n assert np.all( np.isfinite(self.l_j[j_valid]))\n assert np.all( np.isfinite(self.area_c))\n assert np.all( np.isfinite(self.normal_j[j_valid]))\n assert np.all( self.d_j[j_valid] > 0 )\n assert np.all( self.l_j[j_valid] > 0 )\n assert np.all( self.area_c > 0 )", "def createFccLattice(nx, ny, nz, lat, atoms, boxes, domain):\n nb = 4 # number of atoms in this basis\n\n basis = [ (0.25, 0.25, 0.25),\n (0.25, 0.75, 0.75),\n (0.75, 0.25, 0.75),\n (0.75, 0.75, 0.25)\n ]\n\n begin = [int(x) for x in np.floor(domain.localMin/lat)]\n end = [int(x) for x in np.ceil(domain.localMax/lat)]\n\n idx = 0\n for ix in range(begin[0], end[0]):\n for iy in range(begin[1], end[1]):\n for iz in range(begin[2], end[2]):\n for ib in range(nb):\n rx = (ix+basis[ib][0])*lat\n ry = (iy+basis[ib][1])*lat\n rz = (iz+basis[ib][2])*lat\n if rx < domain.localMin[0] or rx >= domain.localMax[0]:\n continue\n if ry < domain.localMin[1] or ry >= domain.localMax[1]:\n continue\n if rz < domain.localMin[2] or rz >= domain.localMax[2]:\n continue\n\n gid = ib + nb*(iz + nz*(iy + ny*ix))\n boxes.putAtomInBox(atoms, gid, 0, rx, ry, rz)\n idx += 1\n\n nlocal = np.zeros(1, dtype=np.int)\n nglobal = np.zeros(1, dtype=np.int)\n nlocal[0] = idx\n\n parallel.addParallel(nlocal, nglobal)\n\n atoms.nGlobal = nglobal[0]\n if atoms.nGlobal != nb*nx*ny*nz:\n print 'nGlobal = ',atoms.nGlobal\n print 'nb,nx,ny,nz,product',nb,nx,ny,nz,nb*nx*ny*nz\n assert atoms.nGlobal == nb*nx*ny*nz", "def __init__(self, Region):\r\n \r\n self.Region=Region\r\n \r\n ## Path to points files\r\n self.pointsFile = r\"%s/constant/polyMesh/points\" % self.Region.caseDirectoryPath\r\n\r\n ## Path to faces file\r\n self.facesFile = r\"%s/constant/polyMesh/faces\" % self.Region.caseDirectoryPath\r\n\r\n ## Path to owner file\r\n self.ownerFile = r\"%s/constant/polyMesh/owner\" % self.Region.caseDirectoryPath\r\n\r\n ## Path to neighbour file\r\n self.neighbourFile = r\"%s/constant/polyMesh/neighbour\" % self.Region.caseDirectoryPath\r\n\r\n ## Path to boundary file\r\n self.boundaryFile = r\"%s/constant/polyMesh/boundary\" % self.Region.caseDirectoryPath \r\n \r\n print('\\n')\r\n print('Reading contents of ./constant/polyMesh folder ...')\r\n \r\n self.cfdReadPointsFile()\r\n self.cfdReadFacesFile()\r\n self.cfdReadOwnerFile()\r\n self.cfdReadNeighbourFile()\r\n\r\n #maybe these should go in a function?\r\n self.numberOfBFaces=self.numberOfFaces-self.numberOfInteriorFaces\r\n self.numberOfElements = max(self.neighbours)+1 #because of zero indexing in Python\r\n self.numberOfBElements=self.numberOfFaces-self.numberOfInteriorFaces #seems strange that subtracting faces gives elements ...\r\n\r\n self.cfdReadBoundaryFile() \r\n self.cfdCheckIfCavity()\r\n \r\n print('Processing mesh ... please wait ....')\r\n \r\n self.cfdProcessElementTopology()\r\n self.cfdProcessNodeTopology()\r\n self.cfdProcessGeometry()\r\n \r\n self.cfdGetBoundaryElementsSubArrayForBoundaryPatch()\r\n self.cfdGetOwnersSubArrayForBoundaryPatch()\r\n self.cfdGetFaceSfSubArrayForBoundaryPatch()\r\n self.cfdGetFaceCentroidsSubArrayForBoundaryPatch()\r\n \r\n ## (list) 1D, indices refer to an interior face, list value is the face's owner\r\n self.interiorFaceOwners = self.owners[0:self.numberOfInteriorFaces]\r\n\r\n ## (list) 1D, indices refer to an interior face, list value is the face's neighbor cell\r\n self.interiorFaceNeighbours = self.neighbours[0:self.numberOfInteriorFaces]\r\n\r\n ## (list) 1D, face weighting factors. Values near 0.5 mean the face's centroid is approximately halfway between the center of the owner and neighbour cell centers, values less than 0.5 mean the face centroid is closer to the owner and those greater than 0.5 are closer to the neighbour cell).\r\n self.interiorFaceWeights = self.faceWeights[0:self.numberOfInteriorFaces]\r\n\r\n ## (array) 2D, normal vectors (Sf) of the interior faces (indices refer to face index)\r\n self.interiorFaceSf = self.faceSf[0:self.numberOfInteriorFaces]\r\n \r\n ## (array) 2D, CF vectors of the interior faces (indices refer to face index)\r\n self.interiorFaceCF = self.faceCF[0:self.numberOfInteriorFaces]\r\n \r\n ## (list) 1D, indices refer to an boundary face, list value refers to the face's owner\r\n self.owners_b = self.owners[self.numberOfInteriorFaces:self.numberOfFaces]\r\n\r\n ## (list) 1D, normal vectors (Sf) of the boundary faces (indices refer to face index). Boundary face normals always point out of the domain. \r\n self.Sf_b=self.faceSf[self.numberOfInteriorFaces:self.numberOfFaces]", "def mesh_geometry(mesh_file):\n\n # Reading mesh results\n # msh = datread(mesh_file) # Deprecated\n\n with open(mesh_file, 'r') as fr:\n msh = np.array([list(map(float, l.replace('T', '').split())) for l in fr.readlines()])\n\n nnodes = int(msh[0][0])\n nelem = int(msh[1][1])\n ncol = int(msh[0][3])\n nlin = int(msh[0][4])\n\n nodes = np.array(msh[4:(nnodes + 4)])\n xn = np.array(list(chunks([nodes[i][1] for i in range(nnodes)], ncol + 1))).flatten()\n yn = np.array(list(chunks([nodes[i][2] for i in range(nnodes)], nlin + 1))).flatten()\n\n xy = np.array([[xn[i], yn[i]] for i in range(len(xn))])\n\n layers = np.array(list(chunks(xy, ncol + 1)))\n\n # Computing the 4-corners coordinates of each blocks based on the node position\n\n blocks = []\n\n s = layers.shape\n\n for i in range(s[0] - 1):\n for j in range(s[1] - 1):\n blocks.append([\n [layers[i, j, 0], layers[i, j, 1]],\n [layers[i + 1, j, 0], layers[i + 1, j, 1]],\n [layers[i + 1, j + 1, 0], layers[i + 1, j + 1, 1]],\n [layers[i + 1, j + 1, 0], layers[i, j + 1, 1]]\n ])\n\n blocks = np.array(blocks)\n\n centerxy = np.array([[np.mean(blocks[i, :, 0]), np.mean(blocks[i, :, 1])] for i in range(nelem)])\n\n return ncol, nlin, nelem, blocks, centerxy", "def nt_3d_centers(cif_file, consider_all_atoms):\n result =[]\n try:\n structure = MMCIFParser().get_structure(cif_file, cif_file)\n except Exception as e:\n warn(f\"\\n{cif_file.split('/')[-1]} : {e}\", error=True)\n with open(runDir + \"/errors.txt\", \"a\") as f:\n f.write(f\"Exception in nt_3d_centers({cif_file.split('/')[-1]})\\n\")\n f.write(str(e))\n f.write(\"\\n\\n\")\n return result\n for model in structure:\n for chain in model:\n for residue in chain:\n if consider_all_atoms:\n temp_list = []\n for atom in residue:\n temp_list.append(atom.get_coord())\n lg = len(temp_list)\n summ = np.sum(temp_list, axis = 0)\n res_isobaricentre = [summ[0]/lg, summ[1]/lg, summ[2]/lg]\n result.append([res_isobaricentre[0], res_isobaricentre[1], res_isobaricentre[2]])\n else:\n coordinates = None\n for atom in residue:\n if atom.get_name() == \"C1'\":\n coordinates = atom.get_coord()\n if coordinates is None:\n # Residue has no C1'\n res = np.nan\n else:\n res = [coordinates[0], coordinates[1], coordinates[2]]\n result.append(res)\n return(result)", "def populate(self, compound_dict=None, x=None, y=None, z=None):\n if self.dimension == 3:\n a = self.lattice_spacings[0]\n b = self.lattice_spacings[1]\n c = self.lattice_spacings[2]\n if x is None:\n x = 1\n if y is None:\n y = 1\n if z is None:\n z = 1\n if x < 1 or y < 1 or z < 1:\n raise ValueError('Incorrect populate value: X, Y, or Z is < 1.'\n ' Cannot replicate unit cell less than 1')\n elif self.dimension == 2:\n a = self.lattice_spacings[0]\n b = self.lattice_spacings[1]\n if x is None:\n x = 1\n if y is None:\n y = 1\n if z is None:\n pass\n else:\n raise ValueError('Z is defined although dimension is 2D')\n if x < 1 or y < 1:\n raise ValueError('Incorrect populate value: X or Y is < 1. '\n ' Cannot replicate unit cell less than 1')\n elif self.dimension == 1:\n a = self.lattice_spacings[0]\n if x is None:\n x = 1\n if y is None:\n pass\n else:\n raise ValueError('Y is defined although dimension is 1D')\n if z is None:\n pass\n if z is not None:\n raise ValueError('Z is defined although dimension is 2D')\n if x < 1:\n raise ValueError('Incorrect populate value: X < 1. '\n ' Cannot replicate unit cell less than 1')\n else:\n raise ValueError('Dimension not defined.')\n\n cell = defaultdict(list)\n for key, val in self.basis_vectors.items():\n for val_item in range(len(val)):\n if self.dimension == 3:\n for i in range(x):\n for j in range(y):\n for k in range(z):\n tmpx = (val[val_item][0] + i) * a\n tmpy = (val[val_item][1] + j) * b\n tmpz = (val[val_item][2] + k) * c\n tmp_tuple = tuple((tmpx, tmpy, tmpz))\n cell[key].append(((tmp_tuple)))\n elif self.dimension == 2:\n for i in range(x):\n for j in range(y):\n tmpx = (val[val_item][0] + i) * a\n tmpy = (val[val_item][1] + j) * b\n tmp_tuple = tuple((tmpx, tmpy))\n cell[key].append(((tmp_tuple)))\n else:\n for i in range(x):\n tmpx = (val[val_item][0] + i) * a\n tmp_tuple = tuple((tmpx))\n cell[key].append(((tmp_tuple)))\n\n ret_lattice = mb.Compound()\n if compound_dict is None:\n for key_id, all_pos in cell.items():\n particle = mb.Particle(name=key_id, pos=[0, 0, 0])\n for pos in all_pos:\n particle_to_add = mb.clone(particle)\n mb.translate(particle_to_add, list(pos))\n ret_lattice.add(particle_to_add)\n else:\n for key_id, all_pos in cell.items():\n if isinstance(compound_dict[key_id], mb.Compound):\n compound_to_move = compound_dict[key_id]\n for pos in all_pos:\n tmp_comp = mb.clone(compound_to_move)\n mb.translate(tmp_comp, list(pos))\n ret_lattice.add(tmp_comp)\n else:\n err_type = type(compound_dict.get(key_id))\n TypeError('Invalid type in provided Compound Dictionary. '\n 'For key {}, type: {} was provided, '\n 'not Compound.'.format(key_id, err_type))\n return ret_lattice", "def create_start_data(self):\n\t\tdef inputMesh(feature_size):\n\t\t\tc1= np.expand_dims(np.array([0,-0.9]),0)\n\t\t\tc2= np.expand_dims(np.array([-0.9,0.9]),0)\n\t\t\tc3= np.expand_dims(np.array([0.9,0.9]),0)\n\t\t\tx1 = np.expand_dims(np.pad(np.array([0,-0.9]),(0,feature_size-2),'constant',constant_values=(0,0)),0)\n\t\t\tx2 = np.expand_dims(np.pad(np.array([-0.9,0.9]),(0,feature_size-2),'constant',constant_values=(0,0)),0)\n\t\t\tx3 = np.expand_dims(np.pad(np.array([0.9,0.9]),(0,feature_size-2),'constant',constant_values=(0,0)),0)\n\t\t\tedge_index = np.transpose(np.array([[0, 1], [0, 2], [1, 0], [1, 2], [2, 0], [2, 1]])) # COO format\n\t\t\treturn np.concatenate((c1,c2,c3),axis=0), np.concatenate((x1,x2,x3),axis=0),edge_index\n\n\t\tc, x, edge_index = inputMesh(self.params.feature_size)# x is c with zeros appended, x=f ..pixel2mesh\n\t\tdata_list_x = []\n\t\tdata_list_c = []\n\t\tdata_list_pid = []\n\t\tfor i in range(self.params.batch_size):\n\t\t\tdata_list_x.append(Data(x=torch.Tensor(x).type(dtypeF), edge_index=torch.Tensor(edge_index).type(dtypeL)))\n\t\t\tdata_list_c.append(Data(x=torch.Tensor(c).type(dtypeF), edge_index=torch.Tensor(edge_index).type(dtypeL)))\n\t\t\tdata_list_pid.append(Data(x=torch.zeros(c.shape[0],1).type(dtypeL).requires_grad_(False)))\n\t\tbatch_x = Batch.from_data_list(data_list_x)\n\t\tbatch_c = Batch.from_data_list(data_list_c)\n\t\tbatch_pid = Batch.from_data_list(data_list_pid)\n\t\treturn batch_x, batch_c, batch_pid", "def cfdProcessElementTopology(self):\r\n ## (list of lists) List where each index represents an element in the domain. Each index has an associated list which contains the elements for which is shares a face (i.e. the neighouring elements). Do not confuse a faces 'neighbour cell', which refers to a face's neighbour element, with the neighbouring elements of a cell. \r\n self.elementNeighbours = [[] for i in range(0,self.numberOfElements)]\r\n\r\n ## (list of lists) list of face indices forming each element\r\n self.elementFaces = [[] for i in range(0,self.numberOfElements)]\r\n \r\n #populates self.elementNeighbours\r\n for iFace in range(self.numberOfInteriorFaces):\r\n own=self.owners[iFace]\r\n nei=self.neighbours[iFace]\r\n \r\n #adds indices of neighbour cells\r\n self.elementNeighbours[own].append(nei)\r\n self.elementNeighbours[nei].append(own)\r\n \r\n #adds interior faces\r\n self.elementFaces[own].append(iFace)\r\n self.elementFaces[nei].append(iFace)\r\n \r\n #adds boundary faces ('patches')\r\n for iFace in range(self.numberOfInteriorFaces,self.numberOfFaces):\r\n own=self.owners[iFace]\r\n self.elementFaces[own].append(iFace)\r\n \r\n ## List of lists containing points forming each element\r\n self.elementNodes = [[] for i in range(0,self.numberOfElements)]\r\n \r\n for iElement in range(self.numberOfElements):\r\n \r\n for faceIndex in self.elementFaces[iElement]:\r\n self.elementNodes[iElement].append(self.faceNodes[faceIndex])\r\n \r\n self.elementNodes[iElement] = list(set([item for sublist in self.elementNodes[iElement] for item in sublist]))\r\n \r\n ## Upper coefficient indices (owners)\r\n self.upperAnbCoeffIndex=[[] for i in range(0,self.numberOfInteriorFaces)]\r\n \r\n ## Lower coefficient indices (owners)\r\n self.lowerAnbCoeffIndex=[[] for i in range(0,self.numberOfInteriorFaces)]\r\n \r\n for iElement in range(self.numberOfElements):\r\n ## Element number from 1 to numberOfElements + 1\r\n iNb=1\r\n for faceIndex in self.elementFaces[iElement]:\r\n \r\n #skip if it is a boundary face\r\n if faceIndex > self.numberOfInteriorFaces-1:\r\n continue\r\n \r\n own = self.owners[faceIndex]\r\n nei = self.neighbours[faceIndex]\r\n \r\n if iElement == own:\r\n self.upperAnbCoeffIndex[faceIndex] = iNb\r\n elif iElement == nei:\r\n self.lowerAnbCoeffIndex[faceIndex] = iNb\r\n \r\n iNb = iNb +1", "def compute_centers_of_hypercubes(self):\n for hc in self.hypercubes.flatten():\n for i in range(self.dims - 1, -1, -1):\n index = self.dims - (i + 1)\n hc.center[i] = (hc.coords[index] + 0.5) * self.hypercube_measurements[index]", "def _cellTopology(self):\n cellTopology = numerix.empty((self.mesh.numberOfCells,), dtype=numerix.ubyte)\n\n t = self._elementTopology\n cellTopology[:] = t[\"polygon\"]\n\n facesPerCell = self.mesh._facesPerCell\n cellTopology[facesPerCell == 3] = t[\"triangle\"]\n cellTopology[facesPerCell == 4] = t[\"quadrangle\"]\n\n return cellTopology", "def compute_centers_of_hypercubes(self):\n for hypercube in self.hypercubes.flatten():\n sums = np.zeros((len(hypercube.coords)))\n for coords in hypercube.parent_hypercubes_indices:\n for index, summ in enumerate(sums):\n sums[index] += self.parent_hypercubes[coords].center[index]\n hypercube.center = [x / 4 for x in sums]", "def compute_mesh(nrow, ncol, nele):\n tri_index = np.zeros((nele, 3))\n for i in range(nrow-1):\n for j in range(NUM):\n if j == 0:\n tri_index[i*4*NUM+j*4, 0] = (i+1)+(2*j+1)*nrow\n tri_index[i*4*NUM+j*4, 1] = (i+1)\n tri_index[i*4*NUM+j*4, 2] = (i+2)\n\n tri_index[i*4*NUM+j*4+1, 0] = (i+1)+(2*j+1)*nrow\n tri_index[i*4*NUM+j*4+1, 1] = (i+2)\n tri_index[i*4*NUM+j*4+1, 2] = (i+2)+(2*j+1)*nrow\n else:\n tri_index[i*4*NUM+j*4, 0] = (i+1)+(2*j+1)*nrow\n tri_index[i*4*NUM+j*4, 1] = (i+1)+(2*j-1)*nrow\n tri_index[i*4*NUM+j*4, 2] = (i+2)+(2*j-1)*nrow\n\n tri_index[i*4*NUM+j*4+1, 0] = (i+1)+(2*j+1)*nrow\n tri_index[i*4*NUM+j*4+1, 1] = (i+2)+(2*j-1)*nrow\n tri_index[i*4*NUM+j*4+1, 2] = (i+2)+(2*j+1)*nrow\n \n tri_index[i*4*NUM+j*4+2, 0] = (i+1)+2*j*nrow\n tri_index[i*4*NUM+j*4+2, 1] = (i+1)+2*(j+1)*nrow\n tri_index[i*4*NUM+j*4+2, 2] = (i+2)+2*(j+1)*nrow\n\n tri_index[i*4*NUM+j*4+3, 0] = (i+1)+2*j*nrow\n tri_index[i*4*NUM+j*4+3, 1] = (i+2)+2*(j+1)*nrow\n tri_index[i*4*NUM+j*4+3, 2] = (i+2)+2*j*nrow\n return tri_index", "def populateCenters(matrix, row, col, frame, midRange, roughness, perturbance):\n maxIndex = matrix.shape[0]-1\n quarterRange = midRange/2\n\n pf = perturbanceFactor(matrix.shape[0], midRange, perturbance)\n noiseLevel = roughness * pf\n\n \"\"\"\n For each subdivided cube, getIndexRef is used to get the indicies, and center is used\n to determine the points that should be averaged and the point to be set. \n setValue does the calculations.\n \"\"\"\n indexRef = getIndexRef(row, col, frame, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row, col, frame + midRange, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row, col + midRange, frame, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row + midRange, col, frame, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row + midRange, col + midRange, frame, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row + midRange, col, frame + midRange, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row, col + midRange, frame + midRange, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row + midRange, col + midRange, frame + midRange, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n\n #printAllowCancel(matrix)", "def clusters_allocate_cells(self):\n for cluster in self.clusters:\n cluster.cells[:] = []\n for cell in self.block_proc:\n wdists = []\n for cluster in self.clusters:\n s = cluster.size\n d = ( (cell.x-cluster.x)**2 + (cell.y-cluster.y)**2 +\n (cell.z-cluster.z)**2 )\n d = numpy.sqrt(d)\n c = self.c\n # TODO: choose a better distance function below\n r = d*(c+(1-c)*numpy.exp(-s/d))\n r = numpy.clip(r,0,r)\n wdists.append(r)\n self.clusters[numpy.argmin(wdists)].cells.append(cell)", "def _cellTopology(self):\n facesPerCell = self.mesh._facesPerCell\n nodesPerFace = self.mesh._nodesPerFace\n\n def faceCountsMatch(targetCounts):\n if len(targetCounts) > nodesPerFace.shape[0]:\n # pad nodesPerFace with zeros\n paddedNodesPerFace = numerix.zeros((len(targetCounts), nodesPerFace.shape[1]), dtype=numerix.INT_DTYPE)\n paddedNodesPerFace[:nodesPerFace.shape[0],:] = nodesPerFace\n\n paddedTargetCounts = numerix.array(targetCounts)[..., numerix.newaxis]\n else:\n # pad target face node count with zeros\n paddedTargetCounts = numerix.concatenate((targetCounts,\n [0] * (self.mesh._maxFacesPerCell - len(targetCounts))))\n paddedTargetCounts = paddedTargetCounts[..., numerix.newaxis]\n\n paddedNodesPerFace = nodesPerFace\n\n return ((facesPerCell == len(targetCounts))\n & (paddedNodesPerFace == paddedTargetCounts).all(axis=0))\n\n cellTopology = numerix.empty((self.mesh.numberOfCells,), dtype=numerix.ubyte)\n\n t = self._elementTopology\n\n if self.mesh.dim == 1:\n cellTopology[:] = t[\"line\"]\n elif self.mesh.dim == 2:\n cellTopology[:] = t[\"polygon\"]\n cellTopology[faceCountsMatch([2, 2, 2])] = t[\"triangle\"]\n cellTopology[faceCountsMatch([2, 2, 2, 2])] = t[\"quadrangle\"]\n else:\n cellTopology[:] = t[\"unknown\"]\n cellTopology[faceCountsMatch([3, 3, 3, 3])] = t[\"tetrahedron\"]\n cellTopology[faceCountsMatch([4, 4, 4, 4, 4, 4])] = t[\"hexahedron\"]\n cellTopology[faceCountsMatch([4, 4, 4, 3, 3])] = t[\"prism\"]\n cellTopology[faceCountsMatch([4, 3, 3, 3, 3])] = t[\"pyramid\"]\n\n return cellTopology", "def cells_centroid_py(self):\n A=self.cells_area()\n cxy=np.zeros( (self.Ncells(),2), np.float64)\n\n refs=self.nodes['x'][self.cells['nodes'][:,0]]\n\n all_pnts=self.nodes['x'][self.cells['nodes']] - refs[:,None,:]\n\n for c in np.nonzero(~self.cells['deleted'])[0]:\n nodes=self.cell_to_nodes(c)\n\n i=np.arange(len(nodes))\n ip1=(i+1)%len(nodes)\n nA=all_pnts[c,i]\n nB=all_pnts[c,ip1]\n\n tmp=(nA[:,0]*nB[:,1] - nB[:,0]*nA[:,1])\n cxy[c,0] = ( (nA[:,0]+nB[:,0])*tmp).sum()\n cxy[c,1] = ( (nA[:,1]+nB[:,1])*tmp).sum()\n cxy /= 6*A[:,None] \n cxy += refs\n return cxy", "def createFccLattice(nx, ny, nz, lat, atoms):\n nb = 4 # number of atoms in this basis\n\n basis = [ (0.25, 0.25, 0.25),\n (0.25, 0.75, 0.75),\n (0.75, 0.25, 0.75),\n (0.75, 0.75, 0.25)\n ]\n\n idx = 0\n # loop over ix,iy,iz\n for ix in range(nx):\n for iy in range(ny):\n for iz in range(nz):\n for ib in range(nb):\n rx = (ix+basis[ib][0])*lat\n ry = (iy+basis[ib][1])*lat\n rz = (iz+basis[ib][2])*lat\n\n atoms.r[idx,0] = rx\n atoms.r[idx,1] = ry\n atoms.r[idx,2] = rz\n idx += 1\n\n #idx should equal nx*ny*nz*nb", "def local_composition(self, outfile):\n # TODO Rewrite if I ever need this again\n radius = 3.6 * 2\n npix = 64\n #mat = np.zeros((npix,npix,npix),dtype=np.float)\n #mat = np.zeros((npix,npix,npix),dtype={'names':['col1', 'col2', 'col3'], 'formats':['f4','f4','f4']})\n #mat = np.zeros((npix,npix,npix),dtype={'names':['40', '13', '29'], 'formats':['f4','f4','f4']})\n #mat = np.zeros((npix,npix,npix),dtype={'names':['id','data'], 'formats':['f4','f4']})\n #names = ['id','data']\n #formats = ['i4',('f4','f4','f4')]\n #mat = np.zeros((npix,npix,npix),dtype=dict(names = names, formats=formats))\n #mat = np.zeros((npix,npix,npix),dtype={'40':('i4',0), '29':('f4',0), '13':('f4',0)})\n print(\"Creating matrix...\")\n mat = [[[{} for i in range(npix)] for j in range(npix)] for k in range(npix)]\n print(\"Finished creating matrix.\")\n #print(repr(mat))\n dx = self.xsize/npix\n dy = self.ysize/npix\n dz = self.zsize/npix\n for ii,i in enumerate(drange(-npix/2*dx,npix/2*dx-dx,dx)):\n print(\"On ii = {0}\".format(ii))\n for jj,j in enumerate(drange(-npix/2*dy,npix/2*dy-dy,dy)):\n for kk,k in enumerate(drange(-npix/2*dz,npix/2*dz-dz,dz)):\n atoms = self.get_atoms_in_cutoff( (i,j,k), radius )\n comp = {}\n for atom in atoms:\n comp[str(atom.z)] = comp.get(str(atom.z),0) + 1.0\n for key in comp:\n comp[key] /= len(atoms)\n #print(comp)\n #mat[ii][jj][kk] = copy.copy(comp)\n mat[ii][jj][kk] = comp\n of = open(outfile,'w')\n of.write('IGOR\\n')\n for atomtype in self.atomtypes:\n of.write('\\nWAVES/N=({0},{1},{2})\\t {3}\\nBEGIN\\n'.format(npix,npix,npix,'partial_comp_'+znum2sym.z2sym(atomtype)))\n for layer in mat:\n for column in layer:\n for value in column:\n try:\n of.write(\"{0} \".format(value[str(atomtype)]))\n except KeyError:\n of.write(\"{0} \".format(0.0))\n of.write(\"\\n\")\n of.write('END\\n')\n of.write('X SetScale/P x 0,1,\"\", {0}; SetScale/P y 0,1,\"\", {0}; SetScale/P z 0,1,\"\", {0}; SetScale d 0,0,\"\", {0}\\n'.format('partial_comp_'+znum2sym.z2sym(atomtype)))\n of.close()\n return mat", "def computeCenters3d(self, data):\n\n\n for i in range(self.nPoints):\n print(\"Label of point \", i, \" is \", self.labels[i])\n for j in range(3):\n self.centers[self.labels[i]][j] += data[i][j]\n\n for c in range(self.n):\n for j in range(3):\n self.centers[c][j] /= self.tots[c]", "def run(self):\n config = self.config\n logger = self.logger\n\n timeStart = time.time()\n\n section = config['horizontal_grid']\n nx = section.getint('nx')\n ny = section.getint('ny')\n dc = section.getfloat('dc')\n\n dsMesh = make_planar_hex_mesh(nx=nx, ny=ny, dc=dc, nonperiodic_x=True,\n nonperiodic_y=False)\n write_netcdf(dsMesh, 'base_mesh.nc')\n\n dsMesh = cull(dsMesh, logger=logger)\n dsMesh = convert(dsMesh, graphInfoFileName='culled_graph.info',\n logger=logger)\n write_netcdf(dsMesh, 'culled_mesh.nc')\n\n section = config['vertical_grid']\n maxDepth = section.getfloat('bottom_depth')\n nVertLevels = section.getint('vert_levels')\n\n section = config['solitary_wave']\n config_eos_linear_alpha = section.getfloat('eos_linear_alpha')\n config_eos_linear_Tref = section.getfloat('eos_linear_Tref')\n config_eos_linear_Sref = section.getfloat('eos_linear_Sref')\n config_eos_linear_densityref = section.getfloat(\n 'eos_linear_densityref')\n h1 = section.getfloat('h1')\n deltaRho = section.getfloat('deltaRho')\n interfaceThick = section.getfloat('interfaceThick')\n amplitude = section.getfloat('amplitude')\n wavelenght = section.getfloat('wavelenght')\n\n # comment('obtain dimensions and mesh variables')\n # vertical_coordinate = 'uniform'\n\n ds = dsMesh.copy()\n nCells = ds.nCells.size\n nEdges = ds.nEdges.size\n nVertices = ds.nVertices.size\n\n xCell = ds.xCell\n angleEdge = ds.angleEdge\n\n # initialize velocity field\n u = np.zeros([1, nEdges, nVertLevels])\n\n # comment('create and initialize variables')\n time1 = time.time()\n\n surfaceStress = np.nan * np.ones(nCells)\n atmosphericPressure = np.nan * np.ones(nCells)\n boundaryLayerDepth = np.nan * np.ones(nCells)\n\n ds['bottomDepth'] = maxDepth * xarray.ones_like(xCell)\n ds['ssh'] = xarray.zeros_like(xCell)\n\n init_vertical_coord(config, ds)\n\n # initial salinity, density, temperature\n ds['salinity'] = (config_eos_linear_Sref *\n xarray.ones_like(ds.zMid)).where(ds.cellMask)\n ds['density'] = \\\n (config_eos_linear_densityref -\n (0.5*deltaRho)*(np.tanh(\n (2/interfaceThick)*np.arctanh(0.99) *\n (ds.zMid + amplitude*np.exp(\n -(ds.xCell/wavelenght)*(ds.xCell/wavelenght)) + h1))))\n # T = Tref - (rho - rhoRef)/alpha\n ds['temperature'] = \\\n (config_eos_linear_Tref\n - (ds.density - config_eos_linear_densityref) /\n config_eos_linear_alpha)\n\n # initial velocity on edges\n ds['normalVelocity'] = (('Time', 'nEdges', 'nVertLevels',),\n np.zeros([1, nEdges, nVertLevels]))\n normalVelocity = ds['normalVelocity']\n for iEdge in range(0, nEdges):\n normalVelocity[0, iEdge, :] = u[0, iEdge, :] * \\\n math.cos(angleEdge[iEdge])\n\n # Coriolis parameter\n ds['fCell'] = (('nCells', 'nVertLevels',),\n np.zeros([nCells, nVertLevels]))\n ds['fEdge'] = (('nEdges', 'nVertLevels',),\n np.zeros([nEdges, nVertLevels]))\n ds['fVertex'] = (('nVertices', 'nVertLevels',),\n np.zeros([nVertices, nVertLevels]))\n\n # surface fields\n surfaceStress[:] = 0.0\n atmosphericPressure[:] = 0.0\n boundaryLayerDepth[:] = 0.0\n print(f' time: {time.time() - time1}')\n\n # comment('finalize and write file')\n time1 = time.time()\n\n # If you prefer not to have NaN as the fill value, you should consider\n # using mpas_tools.io.write_netcdf() instead\n write_netcdf(ds, 'initial_state.nc')\n print(f' time: {time.time() - time1}')\n print(f'Total time: {time.time() - timeStart}')", "def create_grids_structure(self):\n for indices, hypercube in np.ndenumerate(self.hypercubes):\n self.hypercubes[indices] = Hypercube(coords=indices)", "def test_2_1_3D_cube_init(self):\n check = [(0, 0, 0), (1, 1, 1), (1, 0, 0), (1, 1, 0), (1, 0, 1),\n (0, 1, 0), (0, 1, 1), (0, 0, 1), (0.5, 0.5, 0.5)]\n\n nn_checks = {\n (1, 1, 1): [(1, 1, 0), (0, 1, 1), (1, 0, 0), (0, 0, 1), (1, 0, 1),\n (0.5, 0.5, 0.5), (0, 1, 0)],\n (1, 0, 1): [(1, 0, 0), (0, 0, 1), (0, 0, 0), (0.5, 0.5, 0.5),\n (1, 1, 1)],\n (0.5, 0.5, 0.5): [(1, 1, 0), (0, 1, 1), (0, 1, 0), (1, 0, 0),\n (0, 0, 1), (1, 0, 1), (0, 0, 0), (1, 1, 1)]}\n\n init_triangulation(3, 0, check, nn_checks)", "def cmesh(self):\n return numpy.meshgrid(*self.centers, indexing='ij')", "def calculate_molecular_contents(self, scale, toler, covalent_radii):\n # Calculate the contents of all the cells adjacent to the central cell\n adjacents = ( 0, -1, 1 )\n translations = [ (i, j, k) for i in adjacents for j in adjacents for k in adjacents ]\n fractional_supercell = []\n index_supercell = []\n for tr in translations:\n for l,a in enumerate(self.fractional_coordinates):\n i,j,k = tr\n index_supercell.append( l )\n new_position = [ (xyz1 + xyz2) for xyz1, xyz2 in zip(a, tr) ]\n fractional_supercell.append( new_position )\n #jk print('New positions ',new_position,l,i,j,k)\n # Convert fractional supercell coordinates to xyz\n # xyz_supercell will be an np array\n xyz_supercell = np.empty_like(fractional_supercell)\n for i,abc in enumerate(fractional_supercell):\n xyz_supercell[i] = self.convert_abc_to_xyz(abc)\n # put the atoms into boxes of boxSize\n BoxAtoms = {}\n BoxNeighbours = {}\n boxSize = 0.0\n # calculate boxsize\n rmax = 0.0\n for el in self.element_names:\n #jk print(\"Element name\",el)\n if covalent_radii[el] > rmax:\n rmax = covalent_radii[el]\n boxSize = 2.0*scale*rmax + 0.5 + toler\n #jk print('rmax = ',rmax)\n # Put atoms into boxes and store the box info in Atom_box_id\n Atom_box_id = []\n for i,xyz in enumerate(xyz_supercell):\n a = int( math.floor(xyz[0]/boxSize) )\n b = int( math.floor(xyz[1]/boxSize) )\n c = int( math.floor(xyz[2]/boxSize) )\n abc = (a,b,c)\n Atom_box_id.append(abc)\n try:\n BoxAtoms[abc].append(i)\n except:\n BoxAtoms[abc] = [i]\n # Calculate the neighbouring boxes for each occupied box\n for abc in BoxAtoms:\n #jk print('Box ',abc, BoxAtoms[abc])\n a,b,c = abc\n BoxNeighbours[abc] = []\n for i in [ -1, 0, 1]:\n for j in [ -1, 0, 1]:\n for k in [ -1, 0, 1]:\n BoxNeighbours[abc].append( (a+i,b+j,c+k) )\n # end for abc in Box1\n \n # Calculate the bonding the supercell \n bondedToAtom = {}\n for i,xyzi in enumerate(xyz_supercell):\n #jk print('Calculating bonding to ',i,xyzi)\n bondedToAtom[i] = []\n # Find the element name for this atom in the supercell\n ip = index_supercell[i]\n i_el = self.element_names[ip]\n # lookup all of the boxes that might hold a bonding atom\n cell = Atom_box_id[i]\n for abc in BoxNeighbours[cell]:\n # lookup all the atoms that are in that cell\n try: \n for j in BoxAtoms[abc]:\n if j < i:\n # Find the element name for this atom in the supercell\n jp = index_supercell[j]\n j_el = self.element_names[jp]\n dist1 = scale*( covalent_radii[i_el] + covalent_radii[j_el]) + toler\n dist2 = calculate_distance(xyzi,xyz_supercell[j])\n if dist2 < dist1:\n bondedToAtom[i].append(j)\n bondedToAtom[j].append(i)\n #jk print('new bond', i, j, i_el, j_el, xyzi, xyz_supercell[j], dist1, dist2)\n # end if dist2 < dist1\n # end if j < i\n # end for j\n except KeyError: \n pass\n # end for abc \n # end for i,a\n # \n # Now we have to find how many molecules we have in the cell\n # There must be at least one molecule in the cell and it must contain the first atom\n #\n # BelongsToMolecule[i] holds which molecule the ith atom belongs to\n belongsToMolecule = {}\n # molecules is a dictionary of molecules each entry is a is a list of atoms\n molecules = {}\n molID = -1\n # We stop when all the atoms in the original cell belong to a molecule\n remainingAtoms = [ atom for atom in range(self.nions) ]\n bonds = []\n while len(remainingAtoms) > 0:\n #jk print(\"Remaining atoms\")\n #jk print(remainingAtoms)\n # create a new molecule from the first atom which has no molecule assigned to it\n molID += 1\n useAtom = remainingAtoms[0]\n belongsToMolecule[useAtom] = molID\n molecules[molID] = [ useAtom ]\n remainingAtoms.remove(useAtom)\n # Now using this new molecule with its only atom as a seed find any atoms connected to it\n # We need to continue searching for bonded atoms until there are no more to be found\n moreAtomsToBeFound = True\n while moreAtomsToBeFound:\n moreAtomsToBeFound = False\n for i in range(len(xyz_supercell)):\n # has this atom been assigned a molecule yet?\n if i in belongsToMolecule:\n # atom i is already assigned to a molecule\n useThisMolecule = belongsToMolecule[i]\n #jk print(\"Using this molecule\", useThisMolecule)\n # Go through all the atoms bonded to i and add to the current molecule\n for j in bondedToAtom[i]:\n jx = index_supercell[j]\n #jk print(\"atom j / jx is bonded to atom i\",j,jx,i)\n # The image of j in the original cell might not be available, and j might be bonded\n if jx in remainingAtoms and not j in belongsToMolecule:\n # if j was not already in a molecule then we have new information\n moreAtomsToBeFound = True\n molecules[useThisMolecule].append(j)\n belongsToMolecule[j] = useThisMolecule\n #jk print(\"Removing atom index(j) from remaining atoms\",index_supercell[j])\n remainingAtoms.remove(jx)\n # The j'th atom could be already specified and we have a ring....\n # We also need to make sure that we have unique bonds\n if j in belongsToMolecule:\n if i > j and (i,j) not in bonds:\n bonds.append( (i,j) )\n elif i < j and (j,i) not in bonds:\n bonds.append( (j,i) )\n # end for j\n # end if i in\n # end for i\n # while moreAtomsToBeFound\n # until all the atoms belong to a molecule\n #jk print('Number of molecules', molID+1)\n self.centres_of_mass = []\n self.total_mass = 0.0\n for mol_index in molecules:\n #jk print('Molecule ',mol_index)\n #jk print('Atoms ',molecules[mol_index])\n for atom_index in molecules[mol_index]:\n index = index_supercell[atom_index]\n #jk print('New atom index, old index', atom_index, index, self.element_names[index])\n # Calculate centre of mass\n mass = 0.0\n cm = np.zeros(3)\n for atom_index in molecules[mol_index]:\n index = index_supercell[atom_index]\n mass += self.atomic_masses[index]\n cm = cm + self.atomic_masses[index] * xyz_supercell[atom_index]\n cm_xyz = cm / mass\n cm_fractional = self.convert_xyz_to_abc(cm_xyz)\n self.centres_of_mass.append( cm_fractional )\n #jk print('Mass', mass)\n #jk print('Centre of mass', cm_fractional)\n # Create a new unit cell with the atoms shifted so that whole molecules are ordered and within the cell\n new_molecules = []\n new_fractional = np.empty_like(self.fractional_coordinates)\n new_element_names = []\n new_index = 0\n new_masses = []\n old_order = []\n for mol_index in molecules:\n new_atom_index = []\n cm = self.centres_of_mass[mol_index]\n shift = np.array( [ 0.0, 0.0, 0.0] )\n if cm[0] < 0.0:\n shift += [ 1.0, 0.0, 0.0 ]\n elif cm[0] > 1.0:\n shift += [-1.0, 0.0, 0.0 ]\n if cm[1] < 0.0:\n shift += [ 0.0, 1.0, 0.0 ]\n elif cm[1] > 1.0:\n shift += [ 0.0,-1.0, 0.0 ]\n if cm[2] < 0.0:\n shift += [ 0.0, 0.0, 1.0 ]\n elif cm[2] > 1.0:\n shift += [ 0.0, 0.0,-1.0 ]\n for atom_index in molecules[mol_index]:\n old_index = index_supercell[atom_index]\n old_order.append(old_index)\n new_fractional[new_index] = shift + np.array(fractional_supercell[atom_index])\n new_element_names.append(self.element_names[old_index])\n new_masses.append(self.atomic_masses[old_index])\n new_atom_index.append(new_index)\n new_index += 1\n new_molecules.append(new_atom_index)\n # as well as being able to go from the new order and look up the old order\n # we need to be able to take the old order and look up what the new order is\n invert_old_order = np.zeros_like(old_order)\n for i,j in enumerate(old_order):\n invert_old_order[j] = i\n new_bonds = []\n for bond in bonds:\n i,j = bond\n ix = invert_old_order[index_supercell[i]]\n jx = invert_old_order[index_supercell[j]]\n new_bonds.append( (ix,jx) )\n new_unit_cell = UnitCell( self.a, self.b, self.c, self.alpha, self.beta, self.gamma )\n new_unit_cell.set_fractional_coordinates(new_fractional.tolist())\n new_unit_cell.set_element_names(new_element_names)\n new_unit_cell.set_atomic_masses(new_masses)\n new_unit_cell.set_molecules(new_molecules)\n new_unit_cell.set_bonds(new_bonds)\n return new_unit_cell, len(new_molecules), old_order", "def count_cells(fpath):\n cells = []\n for i in range(40):\n fname = f\"{fpath}/Mesh2d_{i}.vtu\"\n if not os.path.exists(fname):\n print(f\"File {fname} does not exist.\")\n break\n mesh = meshio.read(fname)\n for cell_block in mesh.cells:\n if cell_block.type in (\"triangle\"):\n num_cells = len(cell_block)\n print(f\"{i:2d}: {num_cells:6d} elements, {len(mesh.points):6d} vertices\")\n cells.append(num_cells)\n continue\n return cells", "def initialize_system(self):\n self.mfd.set_mesh(self.mesh)\n [[div_data, div_row, div_col], \n [div_t_data, div_t_row, div_t_col]] = self.mfd.build_div()\n [self.m_x_coo_data, \n m_x_coo_row, \n m_x_coo_col] = self.mfd.build_m(save_update_info=True)\n\n self.m_x_coo_length = len(self.m_x_coo_data)\n \n # The data for the bottom right should be zeros. \n [c_data, c_row, c_col] = self.mfd.build_bottom_right()\n \n [coupling_data, coupling_row, coupling_col] = self.mfd.build_coupling_terms()\n\n self.div = sparse.coo_matrix((np.array(div_data), \n (np.add(np.array(div_row), \n -self.mesh.get_number_of_faces()), \n np.array(div_col))))\n self.div = self.div.tocsr()\n\n lhs_data = self.m_x_coo_data\n lhs_row = m_x_coo_row\n lhs_col = m_x_coo_col\n \n lhs_data += div_data\n lhs_row += div_row\n lhs_col += div_col\n\n lhs_data += div_t_data\n lhs_row += div_t_row\n lhs_col += div_t_col \n \n self.c_start = len(lhs_data)\n \n lhs_data += c_data\n lhs_row += c_row\n lhs_col += c_col \n\n self.c_end = len(c_data)\n\n lhs_data += coupling_data\n lhs_row += coupling_row\n lhs_col += coupling_col\n\n # Convert m_x_coo_data to numpy array. \n self.m_x_coo_data = np.array(self.m_x_coo_data)\n\n self.lhs_coo = sparse.coo_matrix((np.array(lhs_data), \n (np.array(lhs_row), \n np.array(lhs_col))))\n\n # RHS construction is for Neumann and Dirichlet \n # boundaries specified by the mesh. \n self.rhs_mfd = self.mfd.build_rhs()", "def test_2_1_3D_rec_init(self):\n check = [(0.0, -20.0, 0.0), (4.0, -10.0, 1.0), (4.0, -20.0, 0.0),\n (4.0, -10.0, 0.0), (4.0, -20.0, 1.0), (0.0, -10.0, 0.0),\n (0.0, -10.0, 1.0), (0.0, -20.0, 1.0), (2.0, -15.0, 0.5)]\n nn_checks = {\n (4.0, -20.0, 0.0): [(4, -10, 1), (4, -10, 0), (2.0, -15.0, 0.5),\n (0, -20, 0), (4, -20, 1)],\n (4.0, -10.0, 0.0): [(4, -10, 1), (4, -20, 0), (0, -10, 0),\n (2.0, -15.0, 0.5), (0, -20, 0)],\n (4.0, -20.0, 1.0): [(4, -10, 1), (4, -20, 0), (0, -20, 1),\n (2.0, -15.0, 0.5), (0, -20, 0)]}\n\n init_triangulation(3, 0, check, nn_checks, bounds=[(0, 4), (-20, -10), (0, 1)])", "def genLattice(structure,in_network,dim,supercell,prec=1E-4,\n seed_index=0,c_mag=60,y_dist=-1):\n\n # Generate vectors in plane/line, relative to\n # the first atom in the network of atoms\n \n if y_dist==-1:\n y_dist=c_mag/3\n \n new = [x for x in in_network if abs(x[2])<np.pi/2]\n return_structure=False\n mat = np.array(structure.lattice.as_dict()['matrix'])\n coords = np.array([np.dot(mat.T,x.frac_coords%1) for x in structure.sites])\n specs = structure.species\n ref_ele_d = getUniqueCount(specs)\n for i in ref_ele_d:\n ref_ele_d[i]/=(supercell**dim)\n coords = coords-coords[seed_index]\n \n\n\n\n\n for lat_vectors in sorted(new,key=itemgetter(3)):\n\n # Create lattice matrix to fit atomic coordinates against\n # In 2D\n if dim==2:\n new_c = np.cross(lat_vectors[0],lat_vectors[1])\n scale_c = c_mag/magni(new_c)\n\n latt_attempt = np.array([lat_vectors[0],lat_vectors[1],\\\n new_c*scale_c])\n \n # In 1D\n elif dim==1:\n unitV = lat_vectors[0]/magni(lat_vectors[0])\n if unitV[0]==0:\n perp1 = [1,0,0]\n elif unitV[1]==0:\n perp1 = [0,1,0]\n elif unitV[2]==0:\n perp1 = [0,0,1]\n else:\n perp1 = [1,1,-1*(unitV[0]+unitV[1])/unitV[2]]\n perp1 = perp1/np.linalg.norm(perp1)*c_mag\n perp2 = np.cross(unitV,perp1)\n perp2 = perp2/np.linalg.norm(perp2)*c_mag\n latt_attempt = np.array([lat_vectors[0],perp1,perp2])\n \n # Fit atomic sites to new lattice\n temp_fracs = np.linalg.solve(latt_attempt.T,np.array(coords).T)\n \n \n\n # Make list of all fractional positions, ignoring\n # which axis\n new_fracs = list([list(x) for x in temp_fracs.T])\n\n if len([x for x in np.array(new_fracs).T if \n np.all([(y>=0 and y<1) for y in np.around(x[:dim],3)]) and\n np.all([(y>=-y_dist/c_mag and y<y_dist/c_mag) for \n y in np.around(x[dim:],3)])])==len(new_fracs[0])/supercell**dim:\n \n fit_fracs=[]\n new_fracs_t = np.around(new_fracs.T,6)\n for i in range(len(new_fracs[0])):\n if np.all([(y>=0 and y<1) for y in np.around(new_fracs_t[i][:dim],3)]) \\\n and np.all([(y>=-y_dist/c_mag and y<y_dist/c_mag) \n for y in np.around(new_fracs_t[i][dim:],3)]):\n fit_fracs.append([new_fracs_t[i],specs[i]])\n fit_fracs = np.array(fit_fracs).T\n new_ele_d = getUniqueCount(fit_fracs[1])\n unequal=False\n for k in new_ele_d:\n if new_ele_d[k]!=ref_ele_d[k]:\n unequal=True\n\n break\n if not unequal:\n\n return_structure=True\n break\n\n\n\n # If match found\n if return_structure:\n return(np.array(latt_attempt),fit_fracs)\n # If no match found\n else:\n return([],[])" ]
[ "0.7054485", "0.6528232", "0.64348644", "0.6230865", "0.61347985", "0.61224085", "0.60659236", "0.6059065", "0.59654796", "0.5924792", "0.5886461", "0.587063", "0.58324575", "0.5809455", "0.5807334", "0.5799182", "0.57891464", "0.5780056", "0.5752148", "0.5748409", "0.57453036", "0.5741508", "0.57368034", "0.57295537", "0.5675748", "0.5647771", "0.5640436", "0.563948", "0.55800164", "0.55706096" ]
0.68255717
1
generates a 3D box mesh with tetrahedral elements with a cylindrical hole in it input
def box_mesh_with_hole(point1=Point(0,0,0), point2=Point(2,1,1), cyl_cent1 = Point(1, -10, 0.5), cyl_cent2= Point(1, 10, 0.5), cyl_rad=0.25, numpts=15): Router = mshr.Box(point1, point2) Rinner = mshr.Cylinder(cyl_cent1, cyl_cent2, cyl_rad, cyl_rad) domain = Router - Rinner mesh = mshr.generate_mesh(domain, numpts) print_mesh_stats(mesh) return mesh
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_lattice(box):\n from quippy.atoms import make_lattice\n if box.shape == (3, 3):\n\t# http://lammps.sandia.gov/doc/Section_howto.html#howto-12 Describes the\n\t# methodology (look for the section entitled \"6.12. Triclinic\n\t# (non-orthogonal) simulation boxes\") The [a, b, c, alpha, beta, gamma]\n\t# vector can be passed to the ase.Atoms object as a definition for the\n\t# triclinic box (note that the quippy.Atoms class inherits from\n\t# ase.Atoms) Make sure that you note that the data is provided:\n\t# \n\t# ITEM: BOX BOUNDS xy xz yz ....\n\t# xlo_bound xhi_bound xy\n\t# ylo_bound yhi_bound xz\n\t# zlo_bound zhi_bound yz\n\t# \n\t# whereas we need xlo, xhi, etc. not xlo_bound, xhi_bound, etc.\n\txlo = box[0][0] - min(0.0, box[0][2], box[1][2], box[0][2] + box[1][2])\n\txhi = box[0][1] - max(0.0, box[0][2], box[1][2], box[0][2] + box[1][2])\n\tylo = box[1][0] - min(0.0, box[2][2])\n\tyhi = box[1][1] - max(0.0, box[2][2])\n\tzlo = box[2][0]\n\tzhi = box[2][1]\n\n\ta = (xhi - xlo)\n\tb = np.sqrt((yhi - ylo)**2 + (box[0][2])**2)\n\tc = np.sqrt((zhi - zlo)**2 + (box[1][2])**2 + (box[2][2])**2)\n\talpha = np.arccos((box[0][2] * box[1][2] + (yhi - ylo) * box[2][2]) / (b * c))\n\tbeta = np.arccos(box[1][2] / c)\n\tgamma = np.arccos(box[0][2] / b)\n\treturn make_lattice(a, b, c, alpha, beta, gamma)\n \n elif box.shape == (3, 2):\n\treturn make_lattice(box[0][1] - box[0][0],\n box[1][1] - box[1][0],\n box[2][1] - box[2][0])\n else:\n raise ValueError(\"Unexpected box size/parameters: {}\".format(box))", "def create_cube(scale=(1.0,1.0,1.0), st=False, rgba=False, dtype='float32', type='triangles'):\n\n shape = [24, 3]\n rgba_offset = 3\n\n width, height, depth = scale\n # half the dimensions\n width /= 2.0\n height /= 2.0\n depth /= 2.0\n\n vertices = np.array([\n # front\n # top right\n ( width, height, depth,),\n # top left\n (-width, height, depth,),\n # bottom left\n (-width,-height, depth,),\n # bottom right\n ( width,-height, depth,),\n\n # right\n # top right\n ( width, height,-depth),\n # top left\n ( width, height, depth),\n # bottom left\n ( width,-height, depth),\n # bottom right\n ( width,-height,-depth),\n\n # back\n # top right\n (-width, height,-depth),\n # top left\n ( width, height,-depth),\n # bottom left\n ( width,-height,-depth),\n # bottom right\n (-width,-height,-depth),\n\n # left\n # top right\n (-width, height, depth),\n # top left\n (-width, height,-depth),\n # bottom left\n (-width,-height,-depth),\n # bottom right\n (-width,-height, depth),\n\n # top\n # top right\n ( width, height,-depth),\n # top left\n (-width, height,-depth),\n # bottom left\n (-width, height, depth),\n # bottom right\n ( width, height, depth),\n\n # bottom\n # top right\n ( width,-height, depth),\n # top left\n (-width,-height, depth),\n # bottom left\n (-width,-height,-depth),\n # bottom right\n ( width,-height,-depth),\n ], dtype=dtype)\n\n st_values = None\n rgba_values = None\n\n if st:\n # default st values\n st_values = np.tile(\n np.array([\n (1.0, 1.0,),\n (0.0, 1.0,),\n (0.0, 0.0,),\n (1.0, 0.0,),\n ], dtype=dtype),\n (6,1,)\n )\n\n if isinstance(st, bool):\n pass\n elif isinstance(st, (int, float)):\n st_values *= st\n elif isinstance(st, (list, tuple, np.ndarray)):\n st = np.array(st, dtype=dtype)\n if st.shape == (2,2,):\n # min / max\n st_values *= st[1] - st[0]\n st_values += st[0]\n elif st.shape == (4,2,):\n # per face st values specified manually\n st_values[:] = np.tile(st, (6,1,))\n elif st.shape == (6,2,):\n # st values specified manually\n st_values[:] = st\n else:\n raise ValueError('Invalid shape for st')\n else:\n raise ValueError('Invalid value for st')\n\n shape[-1] += st_values.shape[-1]\n rgba_offset += st_values.shape[-1]\n\n if rgba:\n # default rgba values\n rgba_values = np.tile(np.array([1.0, 1.0, 1.0, 1.0], dtype=dtype), (24,1,))\n\n if isinstance(rgba, bool):\n pass\n elif isinstance(rgba, (int, float)):\n # int / float expands to RGBA with all values == value\n rgba_values *= rgba \n elif isinstance(rgba, (list, tuple, np.ndarray)):\n rgba = np.array(rgba, dtype=dtype)\n\n if rgba.shape == (3,):\n rgba_values = np.tile(rgba, (24,1,))\n elif rgba.shape == (4,):\n rgba_values[:] = np.tile(rgba, (24,1,))\n elif rgba.shape == (4,3,):\n rgba_values = np.tile(rgba, (6,1,))\n elif rgba.shape == (4,4,):\n rgba_values = np.tile(rgba, (6,1,))\n elif rgba.shape == (6,3,):\n rgba_values = np.repeat(rgba, 4, axis=0)\n elif rgba.shape == (6,4,):\n rgba_values = np.repeat(rgba, 4, axis=0)\n elif rgba.shape == (24,3,):\n rgba_values = rgba\n elif rgba.shape == (24,4,):\n rgba_values = rgba\n else:\n raise ValueError('Invalid shape for rgba')\n else:\n raise ValueError('Invalid value for rgba')\n\n shape[-1] += rgba_values.shape[-1]\n\n data = np.empty(shape, dtype=dtype)\n data[:,:3] = vertices\n if st_values is not None:\n data[:,3:5] = st_values\n if rgba_values is not None:\n data[:,rgba_offset:] = rgba_values\n\n if type == 'triangles':\n # counter clockwise\n # top right -> top left -> bottom left\n # top right -> bottom left -> bottom right\n indices = np.tile(np.array([0, 1, 2, 0, 2, 3], dtype='int'), (6,1))\n for face in range(6):\n indices[face] += (face * 4)\n indices.shape = (-1,)\n elif type == 'triangle_strip':\n raise NotImplementedError\n elif type == 'triangle_fan':\n raise NotImplementedError\n elif type == 'quads':\n raise NotImplementedError\n elif type == 'quad_strip':\n raise NotImplementedError\n else:\n raise ValueError('Unknown type')\n\n return data, indices", "def genCubes():\n offset = vpy.vector(.5, .5, .5)\n size = vpy.vector(.2, .2, .2)\n B1 = vpy.box(pos=vpy.vector(0, 0, 0)-offset,\n color=vpy.vector(0, 0, 0), size=size, make_trail=True)\n B2 = vpy.box(pos=vpy.vector(0, 0, 1)-offset,\n color=vpy.vector(0, 0, 1), size=size, make_trail=True)\n B3 = vpy.box(pos=vpy.vector(0, 1, 1)-offset,\n color=vpy.vector(0, 1, 1), size=size, make_trail=True)\n B4 = vpy.box(pos=vpy.vector(0, 1, 0)-offset,\n color=vpy.vector(0, 1, 0), size=size, make_trail=True)\n\n B5 = vpy.box(pos=vpy.vector(1, 0, 0)-offset,\n color=vpy.vector(1, 0, 0), size=size, make_trail=True)\n B6 = vpy.box(pos=vpy.vector(1, 0, 1)-offset,\n color=vpy.vector(1, 0, 1), size=size, make_trail=True)\n B7 = vpy.box(pos=vpy.vector(1, 1, 0)-offset,\n color=vpy.vector(1, 1, 0), size=size, make_trail=True)\n B8 = vpy.box(pos=vpy.vector(1, 1, 1)-offset,\n color=vpy.vector(1, 1, 1), size=size, make_trail=True)\n\n return [B1, B2, B3, B4, B5, B6, B7, B8]", "def subdivision(mesh):\n\t\n\t\n\t# 1. generate new nodes in the centre of quad\n\t# 1/4 o-------o 1/4 o: existing vertices\n\t# | | *: newly-generated vertices\n\t# | * |\n\t# | |\n\t# 1/4 o-------o 1/4\n\n\tnew_coor = mesh.give_nodes().give_coor()\n\t\n\tfor face_index in range(mesh.give_model_inf()[2]): \n\t\tnew_x, new_y, new_z = (0, 0, 0)\n\t\tfor vertex_index in range(4):\n\t\t\tmesh.give_faces()\n\t\t\tnode_index = mesh.give_faces().give_node_list(face_index)[vertex_index]\n\n\t\t\tnew_x += 0.25*mesh.give_nodes().give_coor(node_index)[0]\n\t\t\tnew_y += 0.25*mesh.give_nodes().give_coor(node_index)[1]\n\t\t\tnew_z += 0.25*mesh.give_nodes().give_coor(node_index)[2]\n\t\t\t\n\t\tnew_coor.append((new_x, new_y, new_z))\n\t\t\n\t# generating new nodes on the edge\n\t# figure out one edge is shared by how many surfaces\n\tedge_shared_by_faces_list = helper.find_edge_shared_by_which_faces(mesh.give_edges(), mesh.give_faces())\n\t\n\tfor edge_index in range(mesh.give_model_inf()[1]):\n\n\t\tnew_x, new_y, new_z = (0., 0., 0.)\n\t\t\n\t# 2. generate new node on boundary edge\n\t# o: existing vertices\n\t# 1/2 o---*---o 1/2 *: newly-generated vertices\n\t# \n\n\t\tnew_coor = mesh.give_nodes().give_coor()\n\t\tif len(edge_shared_by_faces_list[edge_index]) == 1:\t\n\t\t\tnew_x, new_y, new_z = (0., 0., 0.)\n\t\t\tfor vertex_index in range(2):\n\t\t\t\tthis_node = mesh.give_edges().give_node(edge_index)[vertex_index]\n\t\t\t\tnew_x += 0.5*mesh.give_nodes().give_coor()[this_node][0]\n\t\t\t\tnew_y += 0.5*mesh.give_nodes().give_coor()[this_node][1]\n\t\t\t\tnew_z += 0.5*mesh.give_nodes().give_coor()[this_node][2]\n\t\t\t\t\n\t\t\tnew_coor.append((new_x, new_y, new_z))\n\t\t\t\t\n\t# 3. generate new node on interior edge\n\t# 1/16 o-------o 1/16 o: existing vertices\n\t# | | *: newly-generated vertices\n\t# 3/8 o---*---o 3/8\n\t# | |\n\t# 1/16 o-------o 1/16\n\n\t\telse:\n\t\t\tnew_x, new_y, new_z = (0., 0., 0.)\n\t\t\tconsidered_node = []\n\t\t\tfor vertex_index in range(2):\n\t\t\t\tthis_node = mesh.give_edges().give_node(edge_index)[vertex_index]\n\t\t\t\tconsidered_node.append(this_node)\n\t\t\t\tnew_x += 3./8.*mesh.give_nodes().give_coor()[this_node][0]\n\t\t\t\tnew_y += 3./8.*mesh.give_nodes().give_coor()[this_node][1]\n\t\t\t\tnew_z += 3./8.*mesh.give_nodes().give_coor()[this_node][2]\n\t\t\t\n\t\t\t# faces contain this node\n\t\t\tpotential_node = []\n\t\t\tfor face_index in edge_shared_by_faces_list[edge_index]:\t\t\n\t\t\t\tfor vertex_index in range(4):\n\t\t\t\t\t\tpotential_node.append(mesh.give_faces().give_node_list(face_index)[vertex_index])\n\t\t\t\n\t\t\touter_node = []\n\t\t\tfor node in potential_node:\n\t\t\t\tif (node not in considered_node) & (node not in outer_node):\n\t\t\t\t\touter_node.append(node)\n\t\t\t\t\t\n\t\t\tfor vertex_index in outer_node:\n\t\t\t\tnew_x += 1./16.*mesh.give_nodes().give_coor()[vertex_index][0]\n\t\t\t\tnew_y += 1./16.*mesh.give_nodes().give_coor()[vertex_index][1]\n\t\t\t\tnew_z += 1./16.*mesh.give_nodes().give_coor()[vertex_index][2]\n\t\t\t\n\t\t\tnew_coor.append((new_x, new_y, new_z))\n\n\t# update the links of edges and surfaces\n\tnew_edge_list = []\n\tnew_face_list = []\n\tfor face_index in range(mesh.give_model_inf()[2]):\n\t\told_node0 = mesh.give_faces().give_node_list(face_index)[0]\n\t\told_node1 = mesh.give_faces().give_node_list(face_index)[1]\n\t\told_node2 = mesh.give_faces().give_node_list(face_index)[2]\n\t\told_node3 = mesh.give_faces().give_node_list(face_index)[3]\n\t\t\n\t\told_edge0 = mesh.give_faces().give_edge_list(face_index)[0]\n\t\told_edge1 = mesh.give_faces().give_edge_list(face_index)[1]\n\t\told_edge2 = mesh.give_faces().give_edge_list(face_index)[2]\n\t\told_edge3 = mesh.give_faces().give_edge_list(face_index)[3]\n\t\t\n\t\tnew_node4 = old_edge0 + mesh.give_model_inf()[0] + mesh.give_model_inf()[2] \n\t\tnew_node5 = old_edge1 + mesh.give_model_inf()[0] + mesh.give_model_inf()[2]\n\t\tnew_node6 = old_edge2 + mesh.give_model_inf()[0] + mesh.give_model_inf()[2]\n\t\tnew_node7 = old_edge3 + mesh.give_model_inf()[0] + mesh.give_model_inf()[2]\t\n\t\tnew_node8 = mesh.give_model_inf()[0] + face_index\n\t\t\n\t\tif helper.in_list((old_node0, new_node4), new_edge_list) == False: \n\t\t\tnew_edge_list.append((old_node0, new_node4))\n\t\tif helper.in_list((new_node4, new_node8), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node4, new_node8))\n\t\tif helper.in_list((new_node8, new_node7), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node8, new_node7))\n\t\tif helper.in_list((new_node7, old_node0), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node7, old_node0))\n\t\tif helper.in_list((new_node4, old_node1), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node4, old_node1))\n\t\tif helper.in_list((old_node1, new_node5), new_edge_list) == False: \n\t\t\tnew_edge_list.append((old_node1, new_node5))\n\t\tif helper.in_list((new_node5, new_node8), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node5, new_node8))\n\t\tif helper.in_list((new_node7, old_node3), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node7, old_node3))\n\t\tif helper.in_list((old_node3, new_node6), new_edge_list) == False: \n\t\t\tnew_edge_list.append((old_node3, new_node6))\n\t\tif helper.in_list((new_node6, new_node8), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node6, new_node8))\n\t\tif helper.in_list((new_node6, old_node2), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node6, old_node2))\n\t\tif helper.in_list((old_node2, new_node5), new_edge_list) == False: \n\t\t\tnew_edge_list.append((old_node2, new_node5))\n\t\n\t\tnew_face_list.append((old_node0, new_node4, new_node8, new_node7))\n\t\tnew_face_list.append((new_node4, old_node1, new_node5, new_node8))\n\t\tnew_face_list.append((new_node7, new_node8, new_node6, old_node3))\n\t\tnew_face_list.append((new_node8, new_node5, old_node2, new_node6))\n\t\t\n\tnew_edges = geo.Edge(new_edge_list)\n\t\n\tnew_faces = geo.Face(new_face_list, new_edges)\n\t\t\n\t# update existing nodes\t\n\tfor node_index in range(mesh.give_model_inf()[0]):\n\t\t\n\t\tring1, ring2 = helper.find_neighbour_node(new_edges, new_faces, node_index)\n\t\tvalence = helper.find_valence(node_index, new_faces) \n\t\t#: valence: the number of faces sharing on specific edge\n\n\t# 4. update existing corner vertex\n\t# 2/4 @---* 1/4 *: newly-generated vertices\n\t# | | @: existing vertices to be updated\n\t# 1/4 *---* 0 The higher mask values on neighbouring vertices, \n\t# the more likely a square mesh will be refined into a sphere.\n\t \n\t\tif valence == 1:\n\n\t\t\tnew_x, new_y, new_z = (0, 0, 0)\n\t\t\tprint\n\t\t\tfor node_in_ring1 in ring1:\n\t\t\t\tnew_x += 1./4.*mesh.give_nodes().give_coor()[node_in_ring1][0]\n\t\t\t\tnew_y += 1./4.*mesh.give_nodes().give_coor()[node_in_ring1][1]\n\t\t\t\tnew_z += 1./4.*mesh.give_nodes().give_coor()[node_in_ring1][2]\n\n\t\t\tfor node_in_ring2 in ring2:\n\t\t\t\tnew_x += 0.*mesh.give_nodes().give_coor()[node_in_ring2][0]\n\t\t\t\tnew_y += 0.*mesh.give_nodes().give_coor()[node_in_ring2][1]\n\t\t\t\tnew_z += 0.*mesh.give_nodes().give_coor()[node_in_ring2][2]\n\t\t\t\t\n\t\t\tnew_x += 2./4.*mesh.give_nodes().give_coor()[node_index][0]\n\t\t\tnew_y += 2./4.*mesh.give_nodes().give_coor()[node_index][1]\n\t\t\tnew_z += 2./4.*mesh.give_nodes().give_coor()[node_index][2]\n\n\t# 5. update existing boundary joint vertex\n\t# 3/4\n\t# 1/8 *---*---* 1/8 *: newly-generated vertices\n\t# | | | @: existing vertices to be updated\n\t# 0 *---*---* 0\n\n\t\telif valence == 2:\n\t\t\t\n\t\t\tnew_x, new_y, new_z = (0, 0, 0)\n\t\t\tfor node_in_ring1 in ring1:\n\t\t\t\tif helper.find_valence(node_in_ring1, new_faces) <= 2: \n\t\t\t\t\tnew_x += 1./8.*mesh.give_nodes().give_coor()[node_in_ring1][0]\n\t\t\t\t\tnew_y += 1./8.*mesh.give_nodes().give_coor()[node_in_ring1][1]\n\t\t\t\t\tnew_z += 1./8.*mesh.give_nodes().give_coor()[node_in_ring1][2]\n\t\t\t\t\t\n\t\t\tnew_x += 3./4.*mesh.give_nodes().give_coor()[node_index][0]\n\t\t\tnew_y += 3./4.*mesh.give_nodes().give_coor()[node_index][1]\n\t\t\tnew_z += 3./4.*mesh.give_nodes().give_coor()[node_index][2]\n\t\n\t# 6. update new node on interior edge\n\t# * r/k\n\t# /\\ b/k*\n\t# *__/ \\___ r/k\n\t# \\ \\ /¬¬/ *: newly-generated vertices: \n\t# \\ \\/ / b = 3/2/valence, r = 1/4/valence\n\t# *--@--* b/k\t @: existing vertices to be updated: 1-b-r\t\t\n\t# / /\\ \\\n\t# /__/ \\__\\\n\t# * \\ / * r/k\n\t# \\/\n\t\t\n\t\telse:\n\t\t\tnew_x, new_y, new_z = (0, 0, 0)\n\t\t\tbeta = 3./2./valence\n\t\t\tgamma = 1./4./valence\n\t\t\tfor node_in_ring1 in ring1:\n\t\t\t\tnew_x += beta/valence*mesh.give_nodes().give_coor()[node_in_ring1][0]\n\t\t\t\tnew_y += beta/valence*mesh.give_nodes().give_coor()[node_in_ring1][1]\n\t\t\t\tnew_z += beta/valence*mesh.give_nodes().give_coor()[node_in_ring1][2]\n\t\t\t\n\t\t\tfor node_in_ring2 in ring2:\n\t\t\t\tnew_x += gamma/valence*mesh.give_nodes().give_coor()[node_in_ring2][0]\n\t\t\t\tnew_y += gamma/valence*mesh.give_nodes().give_coor()[node_in_ring2][1]\n\t\t\t\tnew_z += gamma/valence*mesh.give_nodes().give_coor()[node_in_ring2][2]\n\t\t\t\n\t\t\tnew_x += (1. - beta - gamma)*mesh.give_nodes().give_coor()[node_index][0]\n\t\t\tnew_y += (1. - beta - gamma)*mesh.give_nodes().give_coor()[node_index][1]\n\t\t\tnew_z += (1. - beta - gamma)*mesh.give_nodes().give_coor()[node_index][2]\n\t\t\n\t\tnew_coor[node_index] = (new_x, new_y, new_z)\n\t\n\tnew_nodes = geo.Node(new_coor)\n\t\n\tmesh.update(new_nodes, new_edges, new_faces)\n\t\n\t# return new_mesh\n\treturn mesh", "def volume_tetrahedron(xyz, A, B, C, D):\n\n AD = xyz[A, :] - xyz[D, :]\n BD = xyz[B, :] - xyz[D, :]\n CD = xyz[C, :] - xyz[D, :]\n\n V = (\n (BD[:, 0] * CD[:, 1] - BD[:, 1] * CD[:, 0]) * AD[:, 2]\n - (BD[:, 0] * CD[:, 2] - BD[:, 2] * CD[:, 0]) * AD[:, 1]\n + (BD[:, 1] * CD[:, 2] - BD[:, 2] * CD[:, 1]) * AD[:, 0]\n )\n return V / 6", "def Sphere_Bezier(refinements=0):\n \n S = np.array([0,0,0])\n T = np.array([0,0,1])\n V = np.array([0,0,0,0.5,0.5,1,1,1])\n q = 2\n m = 4\n Pj = np.array([[0,0,1],[1,0,1],[1,0,0],[1,0,-1],[0,0,-1]])\n wj = np.array([1,1/np.sqrt(2),1,1/np.sqrt(2),1])\n\n sphere = Bezier.MakeRevolvedSurface(S,T,2*np.pi,q,V,m,Pj,wj)\n \n # Make knot refinements before decomposing\n if refinements:\n uniqueU = np.unique(sphere.U)\n for i in xrange(uniqueU.size-1):\n XI = np.linspace(uniqueU[i],uniqueU[i+1],refinements+2)[1:-1]\n sphere.refineknotvector(XI,'U')\n uniqueV = np.unique(sphere.V)\n for i in xrange(uniqueV.size-1):\n XI = np.linspace(uniqueV[i],uniqueV[i+1],refinements+2)[1:-1]\n sphere.refineknotvector(XI,'V')\n \n sphere=sphere.decompose()\n\n element_list = [Bezier.BezierElement(sphere[i,:,:,:]) for i in xrange(sphere.shape[0])]\n \n \n sphere_domain = Bezier.Domain(element_list)\n mesh = Bezier.Mesh([sphere_domain])\n \n mesh.dList[0].edges = mesh.numElements * 2\n mesh.dList[0].corners = 8\n mesh.dList[0].extraordinary_points = 8\n\n \n return mesh", "def setBorder3D():\n dislin.box3d()", "def create_3d_patch(model,orgin,size,N,setname='default'):\n \n N = map(int,N)\n # create the coordinates and connectivity based on one\n bxyz,bcube = block3d(orgin,size,N)\n \n # add node to model starting with current highest node seq\n nn = int(model.node(bxyz))\n update_bcube = bcube + int(nn)\n \n pelemset = model.element(update_bcube,setname)\n \n \n nx = N[0] + 1\n ny = N[1] + 1\n nz = N[2] + 1\n \n \n nodeline = {}\n\n nodeline['1'] = [nn + 1]\n nodeline['2'] = [nn + 1 + ny*nz*(nx-1)]\n nodeline['3'] = [nn + 1 + ny*nz*(nx-1) + (ny-1)*nz]\n nodeline['4'] = [nn + 1 + nz*(ny-1)]\n nodeline['5'] = [nodeline['1'][0] + (nz-1)]\n nodeline['6'] = [nodeline['2'][0] + (nz-1)]\n nodeline['7'] = [nodeline['3'][0] + (nz-1)]\n nodeline['8'] = [nodeline['4'][0] + (nz-1)]\n \n nodeline['1-2'] = range(nodeline['1'][0],nodeline['2'][0]+1,ny*nz)\n nodeline['2-3'] = range(nodeline['2'][0],nodeline['3'][0]+1,nz)\n nodeline['3-4'] = range(nodeline['4'][0],nodeline['3'][0]+1,ny*nz)\n nodeline['1-4'] = range(nodeline['1'][0],nodeline['4'][0]+1,nz)\n\n nodeline['5-6'] = range(nodeline['5'][0],nodeline['6'][0]+1,ny*nz)\n nodeline['6-7'] = range(nodeline['6'][0],nodeline['7'][0]+1,nz)\n nodeline['7-8'] = range(nodeline['8'][0],nodeline['7'][0]+1,ny*nz)\n nodeline['5-8'] = range(nodeline['5'][0],nodeline['8'][0]+1,nz)\n \n nodeline['1-5'] = range(nodeline['1'][0],nodeline['5'][0]+1,1)\n nodeline['4-8'] = range(nodeline['4'][0],nodeline['8'][0]+1,1)\n nodeline['2-6'] = range(nodeline['2'][0],nodeline['6'][0]+1,1)\n nodeline['3-7'] = range(nodeline['3'][0],nodeline['7'][0]+1,1)\n \n nodeline['1-2-3-4'] = []\n nodeline['5-6-7-8'] = []\n nodeline['1-2-6-5'] = []\n nodeline['4-3-7-8'] = []\n nodeline['1-4-8-5'] = []\n nodeline['2-3-7-6'] = []\n \n for i in range(0,nx):\n for j in nodeline['1-4']:\n nodeline['1-2-3-4'].append(j + i*ny*nz)\n for j in nodeline['5-8']:\n nodeline['5-6-7-8'].append(j + i*ny*nz) \n \n for j in nodeline['1-5']:\n nodeline['1-2-6-5'].append(j + i*ny*nz) \n for j in nodeline['4-8']:\n nodeline['4-3-7-8'].append(j + i*ny*nz)\n \n for i in range(0,ny):\n for j in nodeline['1-5']:\n nodeline['1-4-8-5'].append(j + i*nz)\n for j in nodeline['2-6']:\n nodeline['2-3-7-6'].append(j + i*nz)\n #\n for key in nodeline:\n nodesetname = '-'.join([setname , key])\n model.nodeset(nodesetname,{'nodelist':nodeline[key]})\n\n return model", "def box_mesh(point1=Point(0,0,0), point2=Point(2,1,1),\n numptsX=8, numptsY=4, numptsZ=4):\n mesh = BoxMesh(point1, point2, numptsX, numptsY, numptsZ)\n print_mesh_stats(mesh)\n\n return mesh", "def createCylinder( basePoint=(0,-1,0), tipPoint=(0,1,0), radius = 1.0, colour=(0.6,0.6,0.6), samples = 20 ):\r\n \r\n basePoint = PyUtils.toPoint3d(basePoint)\r\n tipPoint = PyUtils.toPoint3d(tipPoint)\r\n baseToTipVector = Vector3d(basePoint,tipPoint)\r\n if baseToTipVector.isZeroVector() :\r\n raise ValueError( 'Invalid points for cylinder: base and tip are equal!' )\r\n baseToTipUnitVector = baseToTipVector.unit()\r\n xUnitVector = baseToTipUnitVector.crossProductWith( Vector3d(0,0,1) )\r\n if xUnitVector.length() < 0.5 :\r\n xUnitVector = baseToTipUnitVector.crossProductWith( Vector3d(0,-1,0) )\r\n xUnitVector.toUnit()\r\n yUnitVector = baseToTipUnitVector.crossProductWith( Vector3d(-1,0,0) )\r\n if yUnitVector.length() < 0.5 :\r\n yUnitVector = baseToTipUnitVector.crossProductWith( Vector3d(0,1,0) )\r\n yUnitVector.toUnit()\r\n\r\n vertices = []\r\n for i in range(samples):\r\n theta = i * 2 * math.pi / float(samples)\r\n vertices.append( basePoint + xUnitVector * math.cos(theta) * radius + yUnitVector * math.sin(theta) * radius )\r\n for i in range(samples):\r\n theta = i * 2 * math.pi / float(samples)\r\n vertices.append( tipPoint + xUnitVector * math.cos(theta) * radius + yUnitVector * math.sin(theta) * radius )\r\n for i in range(samples):\r\n theta = i * 2 * math.pi / float(samples)\r\n vertices.append( basePoint + xUnitVector * math.cos(theta) * radius + yUnitVector * math.sin(theta) * radius )\r\n vertices.append( tipPoint + xUnitVector * math.cos(theta) * radius + yUnitVector * math.sin(theta) * radius )\r\n \r\n faces = [ range(0,samples), range(samples,2*samples) ]\r\n for i in range(0,2*samples,2) :\r\n base = 2*samples\r\n size = 2*samples\r\n faces.append( (base+i, base+i+1, base+(i+3)%size, base+(i+2)%size ) )\r\n \r\n return create( vertices, faces, colour )", "def cube_vertices(x, y, z, n):\n #def cube_vertices(self):\n # \"\"\" Return the vertices of the cube at position x, y, z with size 2*n.\n #\n # \"\"\"\n # return [\n # x-n,y+n,z-n, x-n,y+n,z+n, x+n,y+n,z+n, x+n,y+n,z-n, # top\n # x-n,y-n,z-n, x+n,y-n,z-n, x+n,y-n,z+n, x-n,y-n,z+n, # bottom\n # x-n,y-n,z-n, x-n,y-n,z+n, x-n,y+n,z+n, x-n,y+n,z-n, # left\n # x+n,y-n,z+n, x+n,y-n,z-n, x+n,y+n,z-n, x+n,y+n,z+n, # right\n # x-n,y-n,z+n, x+n,y-n,z+n, x+n,y+n,z+n, x-n,y+n,z+n, # front\n # x+n,y-n,z-n, x-n,y-n,z-n, x-n,y+n,z-n, x+n,y+n,z-n, # back\n # ]\n return [\n x-n,y+n,z-n, x-n,y+n,z+n, x+n,y+n,z+n, x+n,y+n,z-n, # top\n x-n,y-n,z-n, x+n,y-n,z-n, x+n,y-n,z+n, x-n,y-n,z+n, # bottom\n x-n,y-n,z-n, x-n,y-n,z+n, x-n,y+n,z+n, x-n,y+n,z-n, # left\n x+n,y-n,z+n, x+n,y-n,z-n, x+n,y+n,z-n, x+n,y+n,z+n, # right\n x-n,y-n,z+n, x+n,y-n,z+n, x+n,y+n,z+n, x-n,y+n,z+n, # front\n x+n,y-n,z-n, x-n,y-n,z-n, x-n,y+n,z-n, x+n,y+n,z-n, # back\n ]", "def generate_cube():\n \n num_voxels = 31\n\n data_x = []\n data_y = []\n data_z = []\n data_intensity = []\n\n volume = numpy.zeros((num_voxels, num_voxels, num_voxels))\n\n for x in range(num_voxels):\n for y in range(num_voxels):\n for z in range(num_voxels):\n\n if 5 < x < 10 and 5 < y < 10:\n data_x.append(x)\n data_y.append(y)\n data_z.append(z)\n data_intensity.append(200.0)\n\n volume[x,y,z] = 200.0\n\n\n return data_x, data_y, data_z, data_intensity, volume", "def CreateSurface2DMeshfrom3DMesh(self):\n\n self.__do_memebers_exist__()\n\n p = self.InferPolynomialDegree()\n mm = Mesh()\n if self.element_type == \"hex\":\n mm.element_type = \"quad\"\n elif self.element_type == \"tet\":\n mm.element_type = \"tri\"\n else:\n raise ValueError(\"Cannot make a 2D mesh from the 3D mesh of type {}\".format(self.element_type))\n\n unique_faces, inv_faces = np.unique(self.faces,return_inverse=True)\n mm.points = self.points[unique_faces,:]\n mm.nnode = mm.points.shape[0]\n aranger = np.arange(mm.nnode)\n mm.elements = aranger[inv_faces].reshape(self.faces.shape)\n mm.nelem = mm.elements.shape[0]\n mm.GetBoundaryEdges()\n\n return mm", "def createBox( size=(1,1,1), position=(0,0,0), colour=(0.6,0.6,0.6) ):\r\n \r\n size = PyUtils.toVector3d(size)\r\n position = PyUtils.toPoint3d(position)\r\n vertices = []\r\n delta = MathLib.Vector3d()\r\n for repeat in range(3):\r\n for x in (-0.5,0.5) :\r\n delta.x = size.x * x\r\n for y in (-0.5,0.5) :\r\n delta.y = size.y * y\r\n for z in (-0.5,0.5) :\r\n delta.z = size.z * z\r\n vertices.append( position + delta )\r\n \r\n faces = [(0,1,3,2),(5,4,6,7), # YZ Faces\r\n (9,13,15,11),(12,8,10,14), # XY Faces\r\n (18,19,23,22),(17,16,20,21)] # XZ Faces\r\n \r\n return create( vertices, faces, colour )", "def CreateDummyUpperDimensionalMesh(self):\n\n\n sys.stdout = open(os.devnull, \"w\")\n p = self.InferPolynomialDegree()\n mesh = Mesh()\n if self.element_type == \"tri\":\n mesh.Parallelepiped(nx=1,ny=1,nz=1, element_type=\"tet\")\n mesh.GetHighOrderMesh(p=p)\n elif self.element_type == \"quad\":\n mesh.Parallelepiped(nx=1,ny=1,nz=1, element_type=\"hex\")\n mesh.GetHighOrderMesh(p=p)\n elif self.element_type == \"line\":\n mesh.Rectangle(nx=1,ny=1, element_type=\"quad\")\n mesh.GetHighOrderMesh(p=p)\n sys.stdout = sys.__stdout__\n\n return mesh", "def render_wireframe_3d(self, **kwds):\n wireframe = [];\n for l in self.lines:\n l_coords = self.coordinates_of(l)\n wireframe.append( line3d(l_coords, **kwds))\n for a in self.arrows:\n a_coords = self.coordinates_of(a)\n wireframe.append(arrow3d(a_coords[0], a_coords[1], **kwds))\n return sum(wireframe)", "def test_elliptic_special_triangles(self):\n import itertools\n\n s = space(curvature=1)\n\n # turning constants in radians\n t1_ref = 6.28318530717958647692528676655867\n t2_ref = t1_ref / 2\n t3_ref = t1_ref / 3\n t4_ref = t1_ref / 4\n t5_ref = t1_ref / 5\n t6_ref = t1_ref / 6\n # random number\n magic = 7.77733337337373737373\n tm_ref = t1_ref / magic\n nagic = magic - 4 # strangely named other magic constant\n tn_ref = t1_ref / nagic\n # tetrahedron edge central angle\n p4_ref = 1.91063323624901855632771420503144 # = acos(-1/3)\n # icosahedron edge central angle\n p20_ref = 1.10714871779409050301706546017856 # = atan(2)\n # area constant\n sm = space(0).sphere_s2(1)\n\n # test with each known triangle\n for a, C, b, A, c, B, m in (\n (t3_ref, t2_ref, t3_ref, t2_ref, t3_ref, t2_ref, sm / 2), # literally a hemisphere, which takes up the entire space\n (t2_ref, t4_ref, t4_ref, t2_ref, t4_ref, t4_ref, sm / 4), # diangle which is 1/4 of the sphere\n (t2_ref, tm_ref, t3_ref, t2_ref, t6_ref, tm_ref, sm / magic), # a different diangle\n (t2_ref, tn_ref, t3_ref, t2_ref, t6_ref, tn_ref, sm / nagic), # a different diangle, obtuse angle this time\n (t4_ref, t4_ref, t4_ref, t4_ref, t4_ref, t4_ref, sm / 8), # triangle with 3 right angles\n (t4_ref, tm_ref, t4_ref, t4_ref, tm_ref, t4_ref, sm / magic / 2), # different slice of the previous one, has 2 right angles\n (t4_ref, tn_ref, t4_ref, t4_ref, tn_ref, t4_ref, sm / nagic / 2), # another one but with an obtuse angle\n (p4_ref, t3_ref) * 3 + (sm / 4,), # regular tetrahedron face, projected onto the sphere\n (p20_ref, t5_ref) * 3 + (sm / 20,) # regular icosahedron face, projected onto the sphere\n ):\n # go through all vertex permutations\n for (a, A), (b, B), (c, C) in itertools.permutations([(a, A), (b, B), (c, C)], 3):\n self.assertTrue(isclose(\n s.cosine_law_side(a, b, C),\n c\n ))\n self.assertTrue(t2_ref in (A, B) or isclose(\n s.cosine_law_angle(a, b, c),\n C,\n rel_tol = 1e-5\n ))\n self.assertTrue(isclose(\n s.dual_cosine_law_angle(A, B, c),\n C,\n rel_tol = 1e-5\n ))\n self.assertTrue(t2_ref in (A, B) or isclose(\n s.dual_cosine_law_side(A, B, C),\n c\n ))\n self.assertTrue(A == t2_ref or isclose(\n s.sine_law_side(a, A, B),\n b,\n rel_tol = 1e-5,\n abs_tol = 1e-15\n ) or isclose(\n s.sine_law_side(a, A, B),\n t2_ref - b,\n rel_tol = 1e-5,\n abs_tol = 1e-15\n ))\n self.assertTrue(A == t2_ref or isclose(\n s.sine_law_angle(a, A, b),\n B,\n rel_tol = 1e-5,\n abs_tol = 1e-15\n ) or B > t4_ref and isclose( # SSA triangle solving strangeness\n s.sine_law_angle(a, A, b),\n t2_ref - B,\n rel_tol = 1e-5,\n abs_tol = 1e-15\n ))\n self.assertTrue((A, B, C).count(t2_ref) == 1 or isclose(\n s.triangle_area_from_sides(a, b, c),\n m,\n rel_tol = 1e-5\n ))\n self.assertTrue(isclose(\n s.triangle_area_from_angles(A, B, C),\n m\n ))", "def create_tetrahedron():\n obj = glGenLists(1)\n glNewList(obj, GL_COMPILE)\n glPushMatrix()\n try:\n glutSolidTetrahedron()\n except:\n if not _ERRS[5]:\n printGLError(\n \"la version actual de OpenGL no posee la funcion glutSolidTetrahedron\")\n _ERRS[5] = True\n glPopMatrix()\n glEndList()\n return obj", "def generaCubo(self):\r\n #Use Panda predefined format for vertex coordinate only\r\n format = GeomVertexFormat.getV3()\r\n \r\n #Build Vertex data using the created format. Vertex will never change so I use Static attribute \r\n vdata = GeomVertexData('CuboData', format, Geom.UHStatic)\r\n \r\n #I will have to write vertex data so I create a writer for these data\r\n vertex = GeomVertexWriter(vdata, 'vertex')\r\n \r\n #I now use the writer to add vertex data\r\n vertex.addData3f(0, 0, 0)\r\n vertex.addData3f(1, 1, 1)\r\n vertex.addData3f(0, 1, 1)\r\n vertex.addData3f(0, 1, 0)\r\n vertex.addData3f(0, 0, 1)\r\n vertex.addData3f(1, 0, 0)\r\n vertex.addData3f(1, 0, 1)\r\n vertex.addData3f(1, 1, 0)\r\n \r\n #I now create 12 triangles\r\n prim = GeomTriangles(Geom.UHStatic)\r\n\r\n #and then I add vertex to them\r\n #Next time use addVertices(0,1,2) !!!\r\n prim.addVertex(7)\r\n prim.addVertex(0)\r\n prim.addVertex(5)\r\n prim.closePrimitive()\r\n \r\n prim.addVertex(3)\r\n prim.addVertex(0)\r\n prim.addVertex(7)\r\n prim.closePrimitive()\r\n \r\n prim.addVertex(2)\r\n prim.addVertex(6)\r\n prim.addVertex(4)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(1)\r\n prim.addVertex(6)\r\n prim.addVertex(2)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(7)\r\n prim.addVertex(2)\r\n prim.addVertex(3)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(1)\r\n prim.addVertex(2)\r\n prim.addVertex(7)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(3)\r\n prim.addVertex(4)\r\n prim.addVertex(0)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(2)\r\n prim.addVertex(4)\r\n prim.addVertex(3)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(0)\r\n prim.addVertex(6)\r\n prim.addVertex(5)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(4)\r\n prim.addVertex(6)\r\n prim.addVertex(0)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(5)\r\n prim.addVertex(1)\r\n prim.addVertex(7)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(6)\r\n prim.addVertex(1)\r\n prim.addVertex(5)\r\n prim.closePrimitive()\r\n\r\n #Create a Geom to bing vertex data to primitives\r\n geom = Geom(vdata)\r\n geom.addPrimitive(prim)\r\n\r\n #Create a node for the Geom in order to be able to render it\r\n node = GeomNode('gnode')\r\n node.addGeom(geom)\r\n\r\n #Adde the node to the scene graph == render it!\r\n nodePath = render.attachNewNode(node)\r\n \r\n #is this needed?\r\n nodePath.setPos( 0, 5, 0)\r\n \r\n self.camera.lookAt(nodePath)\r\n \r\n base.setBackgroundColor( .0, .0, .0 )\r\n \r\n taskMgr.add(self.SpinCameraTask, \"SpinCameraTask\")", "def get_quad_mesh(q, dx):\n P0, P1, P2, P3 = q\n p0 = Vector.fromPoint(P0) # fromPoint converts to ECEF\n p1 = Vector.fromPoint(P1)\n p2 = Vector.fromPoint(P2)\n p3 = Vector.fromPoint(P3)\n\n # Get nx based on length of top edge, minimum allowed is 2\n toplen_km = get_quad_length(q)\n nx = int(np.max([round(toplen_km / dx, 0) + 1, 2]))\n\n # Get array of points along top and bottom edges\n xfac = np.linspace(0, 1, nx)\n topp = [p0 + (p1 - p0) * a for a in xfac]\n botp = [p3 + (p2 - p3) * a for a in xfac]\n\n # Get ny based on mean length of vectors connecting top and bottom points\n ylen_km = np.ones(nx)\n for i in range(nx):\n ylen_km[i] = (topp[i] - botp[i]).mag() / 1000\n ny = int(np.max([round(np.mean(ylen_km) / dx, 0) + 1, 2]))\n yfac = np.linspace(0, 1, ny)\n\n # Build mesh: dict of ny by nx arrays (x, y, z):\n mesh = {'x': np.zeros([ny, nx]), 'y': np.zeros(\n [ny, nx]), 'z': np.zeros([ny, nx])}\n for i in range(nx):\n mpts = [topp[i] + (botp[i] - topp[i]) * a for a in yfac]\n mesh['x'][:, i] = [a.x for a in mpts]\n mesh['y'][:, i] = [a.y for a in mpts]\n mesh['z'][:, i] = [a.z for a in mpts]\n\n # Make arrays of pixel corners\n mesh['llx'] = mesh['x'][1:, 0:-1]\n mesh['lrx'] = mesh['x'][1:, 1:]\n mesh['ulx'] = mesh['x'][0:-1, 0:-1]\n mesh['urx'] = mesh['x'][0:-1, 1:]\n mesh['lly'] = mesh['y'][1:, 0:-1]\n mesh['lry'] = mesh['y'][1:, 1:]\n mesh['uly'] = mesh['y'][0:-1, 0:-1]\n mesh['ury'] = mesh['y'][0:-1, 1:]\n mesh['llz'] = mesh['z'][1:, 0:-1]\n mesh['lrz'] = mesh['z'][1:, 1:]\n mesh['ulz'] = mesh['z'][0:-1, 0:-1]\n mesh['urz'] = mesh['z'][0:-1, 1:]\n mesh['cpx'] = np.zeros_like(mesh['llx'])\n mesh['cpy'] = np.zeros_like(mesh['llx'])\n mesh['cpz'] = np.zeros_like(mesh['llx'])\n\n # i and j are indices over subruptures\n ni, nj = mesh['llx'].shape\n for i in range(0, ni):\n for j in range(0, nj):\n # Rupture corner points\n pp0 = Vector(\n mesh['ulx'][i, j], mesh['uly'][i, j], mesh['ulz'][i, j])\n pp1 = Vector(\n mesh['urx'][i, j], mesh['ury'][i, j], mesh['urz'][i, j])\n pp2 = Vector(\n mesh['lrx'][i, j], mesh['lry'][i, j], mesh['lrz'][i, j])\n pp3 = Vector(\n mesh['llx'][i, j], mesh['lly'][i, j], mesh['llz'][i, j])\n # Find center of quad\n mp0 = pp0 + (pp1 - pp0) * 0.5\n mp1 = pp3 + (pp2 - pp3) * 0.5\n cp = mp0 + (mp1 - mp0) * 0.5\n mesh['cpx'][i, j] = cp.x\n mesh['cpy'][i, j] = cp.y\n mesh['cpz'][i, j] = cp.z\n return mesh", "def _create_tecplot_solids(grid, model, nsolids, ntets, tets, nhexas, hexas, is_surface=True):\n if is_surface:\n if nhexas:\n free_faces = np.array(model.get_free_faces(), dtype='int32')# + 1\n nfaces = len(free_faces)\n nelements = nfaces\n unused_elements = free_faces\n grid.Allocate(nfaces, 1000)\n\n #elem.GetCellType() = 9 # vtkQuad\n for face in free_faces:\n elem = vtkQuad()\n epoints = elem.GetPointIds()\n epoints.SetId(0, face[0])\n epoints.SetId(1, face[1])\n epoints.SetId(2, face[2])\n epoints.SetId(3, face[3])\n grid.InsertNextCell(9, epoints)\n else:\n # is_volume\n grid.Allocate(nsolids, 1000)\n if ntets:\n for node_ids in tets:\n elem = vtkTetra()\n epoints = elem.GetPointIds()\n epoints.SetId(0, node_ids[0])\n epoints.SetId(1, node_ids[1])\n epoints.SetId(2, node_ids[2])\n epoints.SetId(3, node_ids[3])\n #elem.GetCellType() = 5 # vtkTriangle\n grid.InsertNextCell(elem.GetCellType(), epoints)\n\n\n if nhexas:\n for node_ids in hexas:\n elem = vtkHexahedron()\n epoints = elem.GetPointIds()\n epoints.SetId(0, node_ids[0])\n epoints.SetId(1, node_ids[1])\n epoints.SetId(2, node_ids[2])\n epoints.SetId(3, node_ids[3])\n epoints.SetId(4, node_ids[4])\n epoints.SetId(5, node_ids[5])\n epoints.SetId(6, node_ids[6])\n epoints.SetId(7, node_ids[7])\n #elem.GetCellType() = 5 # vtkTriangle\n grid.InsertNextCell(elem.GetCellType(), epoints)\n return nelements", "def generate_sphere_full():\n \n num_voxels = 31\n c = (15.0, 15.0, 15.0)\n\n data_x = []\n data_y = []\n data_z = []\n data_intensity = []\n\n volume = numpy.zeros((num_voxels, num_voxels, num_voxels))\n\n for x in range(num_voxels):\n for y in range(num_voxels):\n for z in range(num_voxels):\n\n if numpy.sqrt((x-c[0])**2 + (y-c[1])**2 + (z-c[2])**2) - 7.5 < 1.5:\n data_x.append(x)\n data_y.append(y)\n data_z.append(z)\n data_intensity.append(200.0)\n\n volume[x,y,z] = 200.0\n\n\n return data_x, data_y, data_z, data_intensity, volume", "def create_mesh(self):\n print(\"create_mesh\")\n faces = self.get_faces()\n print(\"num faces: {}\".format(len(faces)))\n\n # TODO: perform face filtering to remove long edges in Z direction\n # filtered_faces = self.get_filtered_faces(faces)\n # print(\"num filtered faces: {}\".format(len(filtered_faces)))\n\n vertices = self.xyz_points.T\n\n # handle texture mappings\n vertex_index_to_texture = []\n for j in range(0, self.height):\n for i in range(0, self.width):\n # vertex_index = (j * self.width) + ij\n w = i / self.width\n h = (self.height - j - 1) / self.height\n vertex_index_to_texture.append(\n (w, h)\n )\n\n # Create material.\n # TODO: make the string/filename randomly generated and unique\n file0 = open(os.path.join(self.args.path, \"triangle_mesh.obj.mtl\"), \"w\") # write mode\n file0.write(\"newmtl material_0\\n\")\n # Save image here.\n cv2.imwrite(os.path.join(self.args.path, \"triangle_mesh.png\"), self.bgr)\n file0.write(\"map_Kd triangle_mesh.png\\n\")\n file0.close()\n\n # https://en.wikipedia.org/wiki/Wavefront_.obj_file\n # https://github.com/mmatl/pyrender/blob/master/examples/models/fuze.obj\n obj_path = os.path.join(self.args.path, \"triangle_mesh.obj\")\n file1 = open(obj_path, \"w\") # write mode\n file1.write(\"mtllib ./triangle_mesh.obj.mtl\\n\")\n for vertex in vertices:\n x, y, z = vertex\n file1.write(\"v {} {} {}\\n\".format(x, y, z))\n file1.write(\"usemtl material_0\\n\")\n for w, h in vertex_index_to_texture:\n file1.write(\"vt {} {}\\n\".format(w, h))\n for face in faces:\n a, b, c = face\n a += 1\n b += 1\n c += 1\n file1.write(\"f {}/{} {}/{} {}/{}\\n\".format(\n a, a, b, b, c, c\n )\n )\n file1.close()\n\n # Load the trimesh from OBJ file.\n trimesh_mesh = trimesh.load(obj_path)\n # trimesh_mesh.show()\n\n mesh = pyrender.Mesh.from_trimesh(trimesh_mesh, smooth=False)\n self.scene = pyrender.Scene(ambient_light=[3.0, 3.0, 3.0])\n\n camera = pyrender.IntrinsicsCamera(\n self.focal_length, self.focal_length, self.width / 2, self.height / 2\n )\n self.camera_pose = np.array([\n [1.0, 0.0, 0.0, 0.0],\n [0.0, 1.0, 0.0, 0.0],\n [0.0, 0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0, 1.0],\n ])\n # https://pyrender.readthedocs.io/en/latest/examples/cameras.html#creating-cameras\n # https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.html\n r = R.from_rotvec(np.array([0, np.pi, 0]))\n r = R.from_rotvec(np.array([0.0, 0, np.pi])) * r\n matrix = r.as_matrix()\n self.camera_pose[:3, :3] = matrix\n\n light = pyrender.PointLight(\n color=[1.0, 1.0, 1.0],\n intensity=0.0\n )\n\n self.nm = pyrender.Node(mesh=mesh, matrix=np.eye(4))\n self.nl = pyrender.Node(light=light, matrix=np.eye(4))\n self.nc = pyrender.Node(camera=camera, matrix=np.eye(4))\n self.scene.add_node(self.nm)\n self.scene.add_node(self.nl)\n self.scene.add_node(self.nc)\n\n # Set the pose and show the image.\n temppose = self.extrinsics @ self.camera_pose\n self.scene.set_pose(self.nl, pose=temppose)\n self.scene.set_pose(self.nc, pose=temppose)\n pyrender.Viewer(self.scene, use_raymond_lighting=True,\n viewport_size=(self.width, self.height))", "def test_2_2_3D_cube_splits(self):\n check = [(0, 0, 0), (1, 1, 1), (1, 0, 0), (1, 1, 0), (1, 0, 1),\n (0, 1, 0),\n (0, 1, 1), (0, 0, 1), (0.5, 0.5, 0.5), (0.0, 0.5, 0.5),\n (0.0, 0.0, 0.5), (0.0, 0.5, 0.0), (0.5, 0.0, 0.5),\n (0.5, 0.0, 0.0),\n (0.5, 0.5, 0.0), (0.25, 0.25, 0.25), (1.0, 0.5, 0.5),\n (1.0, 1.0, 0.5),\n (1.0, 0.5, 1.0), (0.5, 1.0, 0.5), (0.5, 1.0, 1.0),\n (0.5, 0.5, 1.0),\n (0.75, 0.75, 0.75), (1.0, 0.0, 0.5), (1.0, 0.5, 0.0),\n (0.75, 0.25, 0.25), (0.5, 1.0, 0.0), (0.75, 0.75, 0.25),\n (0.5, 0.0, 1.0), (0.75, 0.25, 0.75), (0.0, 1.0, 0.5),\n (0.25, 0.75, 0.25), (0.0, 0.5, 1.0), (0.25, 0.75, 0.75),\n (0.25, 0.25, 0.75), (0.5, 0.25, 0.25), (0.5, 0.5, 0.25),\n (0.5, 0.25, 0.5), (0.25, 0.5, 0.25), (0.25, 0.5, 0.5),\n (0.25, 0.25, 0.5), (0.375, 0.375, 0.375), (0.0, 0.25, 0.25),\n (0.0, 0.0, 0.25), (0.0, 0.25, 0.0), (0.25, 0.0, 0.25),\n (0.25, 0.0, 0.0), (0.25, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.5, 0.25), (0.0, 0.25, 0.5), (0.125, 0.375, 0.375),\n (0.25, 0.0, 0.5), (0.125, 0.125, 0.375), (0.25, 0.5, 0.0),\n (0.125, 0.375, 0.125), (0.5, 0.0, 0.25), (0.375, 0.125, 0.375),\n (0.5, 0.25, 0.0), (0.375, 0.125, 0.125), (0.375, 0.375, 0.125),\n (0.5, 0.75, 0.75), (0.5, 0.5, 0.75), (0.5, 0.75, 0.5),\n (0.75, 0.5, 0.75), (0.75, 0.5, 0.5), (0.75, 0.75, 0.5),\n (0.625, 0.625, 0.625), (1.0, 0.75, 0.75), (1.0, 1.0, 0.75),\n (1.0, 0.75, 1.0), (0.75, 1.0, 0.75), (0.75, 1.0, 1.0),\n (0.75, 0.75, 1.0), (0.875, 0.875, 0.875), (1.0, 0.5, 0.75),\n (1.0, 0.75, 0.5), (0.875, 0.625, 0.625), (0.75, 1.0, 0.5),\n (0.875, 0.875, 0.625), (0.75, 0.5, 1.0), (0.875, 0.625, 0.875),\n (0.5, 1.0, 0.75), (0.625, 0.875, 0.625), (0.5, 0.75, 1.0),\n (0.625, 0.875, 0.875), (0.625, 0.625, 0.875),\n (0.75, 0.5, 0.25),\n (0.75, 0.25, 0.5), (0.625, 0.375, 0.375), (1.0, 0.25, 0.25),\n (1.0, 0.0, 0.25), (1.0, 0.25, 0.0), (0.75, 0.0, 0.25),\n (0.75, 0.0, 0.0), (0.75, 0.25, 0.0), (0.875, 0.125, 0.125),\n (1.0, 0.5, 0.25), (1.0, 0.25, 0.5), (0.875, 0.375, 0.375),\n (0.75, 0.0, 0.5), (0.875, 0.125, 0.375), (0.75, 0.5, 0.0),\n (0.875, 0.375, 0.125), (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.625, 0.375, 0.125), (0.5, 0.75, 0.25),\n (0.625, 0.625, 0.375),\n (1.0, 0.75, 0.25), (1.0, 1.0, 0.25), (1.0, 0.75, 0.0),\n (0.75, 1.0, 0.25), (0.75, 1.0, 0.0), (0.75, 0.75, 0.0),\n (0.875, 0.875, 0.125), (0.875, 0.625, 0.375),\n (0.875, 0.875, 0.375),\n (0.875, 0.625, 0.125), (0.5, 1.0, 0.25), (0.625, 0.875, 0.375),\n (0.5, 0.75, 0.0), (0.625, 0.875, 0.125), (0.625, 0.625, 0.125),\n (0.5, 0.25, 0.75), (0.625, 0.375, 0.625), (1.0, 0.25, 0.75),\n (1.0, 0.0, 0.75), (1.0, 0.25, 1.0), (0.75, 0.0, 0.75),\n (0.75, 0.0, 1.0), (0.75, 0.25, 1.0), (0.875, 0.125, 0.875),\n (0.875, 0.375, 0.625), (0.875, 0.125, 0.625),\n (0.875, 0.375, 0.875),\n (0.5, 0.0, 0.75), (0.625, 0.125, 0.625), (0.5, 0.25, 1.0),\n (0.625, 0.125, 0.875), (0.625, 0.375, 0.875),\n (0.25, 0.75, 0.5),\n (0.375, 0.625, 0.375), (0.0, 0.75, 0.25), (0.0, 1.0, 0.25),\n (0.0, 0.75, 0.0), (0.25, 1.0, 0.25), (0.25, 1.0, 0.0),\n (0.25, 0.75, 0.0), (0.125, 0.875, 0.125), (0.0, 0.75, 0.5),\n (0.125, 0.625, 0.375), (0.25, 1.0, 0.5), (0.125, 0.875, 0.375),\n (0.125, 0.625, 0.125), (0.375, 0.875, 0.375),\n (0.375, 0.875, 0.125),\n (0.375, 0.625, 0.125), (0.25, 0.5, 0.75),\n (0.375, 0.625, 0.625),\n (0.0, 0.75, 0.75), (0.0, 1.0, 0.75), (0.0, 0.75, 1.0),\n (0.25, 1.0, 0.75), (0.25, 1.0, 1.0), (0.25, 0.75, 1.0),\n (0.125, 0.875, 0.875), (0.0, 0.5, 0.75), (0.125, 0.625, 0.625),\n (0.125, 0.875, 0.625), (0.25, 0.5, 1.0), (0.125, 0.625, 0.875),\n (0.375, 0.875, 0.625), (0.375, 0.875, 0.875),\n (0.375, 0.625, 0.875),\n (0.375, 0.375, 0.625), (0.0, 0.25, 0.75), (0.0, 0.0, 0.75),\n (0.0, 0.25, 1.0), (0.25, 0.0, 0.75), (0.25, 0.0, 1.0),\n (0.25, 0.25, 1.0), (0.125, 0.125, 0.875),\n (0.125, 0.375, 0.625),\n (0.125, 0.125, 0.625), (0.125, 0.375, 0.875),\n (0.375, 0.125, 0.625),\n (0.375, 0.125, 0.875), (0.375, 0.375, 0.875)]\n\n nn_checks = {(0.5, 0.25, 0.25): [(0.375, 0.375, 0.125), (0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25),\n (0.625, 0.375, 0.375),\n (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.5, 0.5, 0.25), (0.25, 0.25, 0.25),\n (0.375, 0.375, 0.375),\n (0.5, 0.25, 0.5), (0.5, 0.5, 0.5),\n (0.5, 0.0, 0.25),\n (0.375, 0.125, 0.375), (0.5, 0.0, 0.5),\n (0.5, 0.25, 0.0),\n (0.375, 0.125, 0.125), (0.5, 0.0, 0.0),\n (0.625, 0.375, 0.125)],\n (0.625, 0.625, 0.875): [(0.75, 0.5, 1.0),\n (0.75, 0.75, 1.0),\n (0.5, 0.75, 1.0), (0.5, 0.5, 1.0),\n (0.5, 0.5, 0.75),\n (0.5, 0.75, 0.75),\n (0.75, 0.5, 0.75),\n (0.75, 0.75, 0.75)],\n (0, 0, 0): [(0.0, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.0, 0.25), (0.25, 0.0, 0.0),\n (0.0, 0.25, 0.25), (0.25, 0.25, 0.0),\n (0.25, 0.0, 0.25)]}\n\n init_triangulation(3, 2, check, nn_checks)", "def unoriented_cube():\n faces = get_oriented_cube_faces()\n for face in faces:\n np.random.shuffle(face)\n poly = Polyhedron(get_cube_points(), faces, faces_are_convex=True)\n poly.sort_faces()\n return poly", "def test_2_1_3D_cube_init(self):\n check = [(0, 0, 0), (1, 1, 1), (1, 0, 0), (1, 1, 0), (1, 0, 1),\n (0, 1, 0), (0, 1, 1), (0, 0, 1), (0.5, 0.5, 0.5)]\n\n nn_checks = {\n (1, 1, 1): [(1, 1, 0), (0, 1, 1), (1, 0, 0), (0, 0, 1), (1, 0, 1),\n (0.5, 0.5, 0.5), (0, 1, 0)],\n (1, 0, 1): [(1, 0, 0), (0, 0, 1), (0, 0, 0), (0.5, 0.5, 0.5),\n (1, 1, 1)],\n (0.5, 0.5, 0.5): [(1, 1, 0), (0, 1, 1), (0, 1, 0), (1, 0, 0),\n (0, 0, 1), (1, 0, 1), (0, 0, 0), (1, 1, 1)]}\n\n init_triangulation(3, 0, check, nn_checks)", "def CreateDummy3DMeshfrom2DMesh(self):\n\n self.__do_memebers_exist__()\n\n sys.stdout = open(os.devnull, \"w\")\n\n p = self.InferPolynomialDegree()\n mm = Mesh()\n if self.element_type == \"quad\":\n mm.element_type = \"hex\"\n mm.elements = np.zeros((1,int((p+1)**3))).astype(np.uint64)\n elif self.element_type == \"tri\":\n mm.element_type = \"tet\"\n mm.elements = np.zeros((1,int((p+1)*(p+2)*(p+3)/6))).astype(np.uint64)\n else:\n raise ValueError(\"Cannot make a 3D mesh from the 2D mesh of type {}\".format(self.element_type))\n\n mm.edges = np.zeros((1,p+1)).astype(np.uint64)\n mm.points = np.copy(self.points)\n mm.nelem = 1\n mm.nnode = mm.points.shape[0]\n mm.faces = np.copy(self.elements)\n mm.boundary_face_to_element = np.zeros((mm.faces.shape[0],2)).astype(np.int64)\n mm.boundary_face_to_element[:,0] = 1\n\n sys.stdout = sys.__stdout__\n\n return mm", "def tessellation(mesh):\n # create random barycentric coordinates for each face\n # pad all coordinates by a small amount to bias new vertex towards center\n barycentric = np.random.random(mesh.faces.shape) + .05\n barycentric /= barycentric.sum(axis=1).reshape((-1, 1))\n\n # create one new vertex somewhere in a face\n vertex_face = (barycentric.reshape((-1, 3, 1))\n * mesh.triangles).sum(axis=1)\n vertex_face_id = np.arange(len(vertex_face)) + len(mesh.vertices)\n\n # new vertices are the old vertices stacked on the vertices in the faces\n vertices = np.vstack((mesh.vertices, vertex_face))\n # there are three new faces per old face, and we maintain correct winding\n faces = np.vstack((np.column_stack((mesh.faces[:, [0, 1]], vertex_face_id)),\n np.column_stack(\n (mesh.faces[:, [1, 2]], vertex_face_id)),\n np.column_stack((mesh.faces[:, [2, 0]], vertex_face_id))))\n # make sure the order of the faces is permutated\n faces = np.random.permutation(faces)\n\n mesh_type = util.type_named(mesh, 'Trimesh')\n permutated = mesh_type(vertices=vertices,\n faces=faces)\n return permutated", "def mesher(cse):\n # get dimensionality.\n ndim = 2\n # determine meshing template file name.\n tmplfn = '%s.gmsh.tmpl' % ('cube' if 3 == ndim else 'square')\n # determine characteristic length of mesh.\n try:\n itv = float(cse.io.basefn.split('_')[-1])/1000\n except ValueError:\n itv = 0.2\n # load the meshing commands.\n cmds = open(tmplfn).read() % itv\n cmds = [cmd.strip() for cmd in cmds.strip().split('\\n')]\n # make the original mesh object.\n mobj = sc.helper.Gmsh(cmds)()\n # convert the mesh to block.\n blk = mobj.toblock(bcname_mapper=cse.condition.bcmap,\n use_incenter=cse.solver.use_incenter)\n # return the converted block.\n return blk", "def plot_surface_3D(self, length = 30, fps = 30, **kwargs):\n fig = utils.get_figure(scale = 3)\n ax = fig.add_subplot(111, projection = '3d')\n\n # surface_x = self.xi_1_mesh\n # surface_y = self.xi_2_mesh\n # surface_x, surface_y, surface_z = self.surface()\n xyz = self.surface()\n\n # surface_x, surface_y = np.meshgrid(surface_x, surface_y)\n\n # print(np.shape(surface_x))\n # print(np.shape(surface_y))\n # print(np.shape(surface_z))\n\n control_points_x = np.array([control_point[0] for control_point in self.control_net.values()])\n control_points_y = np.array([control_point[1] for control_point in self.control_net.values()])\n control_points_z = np.array([control_point[2] for control_point in self.control_net.values()])\n\n # x_min = min(np.min(surface_x), np.min(control_points_x))\n # x_max = max(np.max(surface_x), np.max(control_points_x))\n # x_range = np.abs(x_max - x_min)\n #\n # y_min = min(np.min(surface_y), np.min(control_points_y))\n # y_max = max(np.max(surface_y), np.max(control_points_y))\n # y_range = np.abs(y_max - y_min)\n #\n # z_min = min(np.min(surface_z), np.min(control_points_z))\n # z_max = max(np.max(surface_z), np.max(control_points_z))\n # z_range = np.abs(z_max - z_min)\n #\n # ax.set_xlim(x_min - 0.05 * x_range, x_max + 0.05 * x_range)\n # ax.set_ylim(y_min - 0.05 * y_range, y_max + 0.05 * y_range)\n # ax.set_zlim(z_min - 0.05 * z_range, z_max + 0.05 * z_range)\n\n ax.scatter(control_points_x, control_points_y, control_points_z, depthshade = False, **CONTROL_POLYGON_KWARGS)\n\n # print(np.max(surface_x), np.max(surface_y), np.max(surface_z))\n # print(np.min(surface_x), np.min(surface_y), np.min(surface_z))\n # print(surface_x)\n # print(surface_y)\n # print(surface_z)\n xyz = np.reshape(xyz, (-1, 3))\n print(xyz.shape)\n x, y, z = xyz[:, 0], xyz[:, 1], xyz[:, 2]\n ax.scatter(x, y, z)\n # ax.plot_trisurf(\n # x, y, z,\n # cmap = plt.get_cmap('viridis'),\n # linewidth = 0,\n # antialiased = True,\n # )\n # ax.plot_surface(surface_x, surface_y, surface_z, rstride = 1, cstride = 1)\n # ax.plot_trisurf(surface_x, surface_y, surface_z)\n # ax.plot_trisurf(surface_x, surface_y, surface_z, **CURVE_KWARGS)\n\n ax.axis('off')\n\n ax.view_init(elev = 45, azim = 0) # note that this resets ax.dist to 10, so we can't use it below\n ax.dist = 7.5 # default is 10, so zoom in a little because there's no axis to take up the rest of the space\n\n plt.show()\n utils.save_current_figure(**kwargs)\n\n ### ANIMATION ###\n\n frames = length * fps\n\n writer = anim.writers['ffmpeg'](fps = fps, bitrate = 2000) # don't need a very high bitrate\n\n def animate(frame):\n print(frame, frames, frame / frames)\n ax.azim = 360 * frame / frames # one full rotation\n return [] # must return the list of artists we modified (i.e., nothing, since all we did is rotate the view)\n\n ani = anim.FuncAnimation(fig, animate, frames = frames, blit = True)\n ani.save(f\"{os.path.join(kwargs['target_dir'], kwargs['name'])}.mp4\", writer = writer)\n\n plt.close()" ]
[ "0.63809776", "0.62516385", "0.6212014", "0.61972636", "0.6192103", "0.6190166", "0.6146428", "0.60585725", "0.604773", "0.6045741", "0.6044307", "0.5990217", "0.59825504", "0.59320945", "0.59280205", "0.5914633", "0.59137803", "0.5881047", "0.5878436", "0.58624035", "0.58605313", "0.58569574", "0.5853986", "0.5850429", "0.58402497", "0.5839555", "0.5830531", "0.58234525", "0.5797246", "0.5780854" ]
0.6957271
0
returns the cell centroids lying within given geometric extents
def get_cell_centroid2(cents, extents): cells_in_ee = np.empty(0,int) for i in range(len(cents)): c = cents[i] if( (c > extents[0]).all() and (c <= extents[1]).all() ): cells_in_ee = np.append(cells_in_ee, [i], axis=0) return cells_in_ee
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_centroid_cell(self):\n\n x_min, y_min = self.find_min()\n x_max, y_max = self.find_max()\n x_centroid = int((x_max+x_min)/2)\n y_centroid = int((y_max+y_min)/2)\n centroide = x_centroid, y_centroid\n return centroide", "def cells_centroid_py(self):\n A=self.cells_area()\n cxy=np.zeros( (self.Ncells(),2), np.float64)\n\n refs=self.nodes['x'][self.cells['nodes'][:,0]]\n\n all_pnts=self.nodes['x'][self.cells['nodes']] - refs[:,None,:]\n\n for c in np.nonzero(~self.cells['deleted'])[0]:\n nodes=self.cell_to_nodes(c)\n\n i=np.arange(len(nodes))\n ip1=(i+1)%len(nodes)\n nA=all_pnts[c,i]\n nB=all_pnts[c,ip1]\n\n tmp=(nA[:,0]*nB[:,1] - nB[:,0]*nA[:,1])\n cxy[c,0] = ( (nA[:,0]+nB[:,0])*tmp).sum()\n cxy[c,1] = ( (nA[:,1]+nB[:,1])*tmp).sum()\n cxy /= 6*A[:,None] \n cxy += refs\n return cxy", "def cell_centroids_original(crd, con):\n \n nele = con.shape[0]\n dim = crd.shape[1]\n centroid_xy = np.zeros((nele, dim))\n for i in range(len(con)):\n el_crds = crd[con[i, :], :] # (4, 2)\n centroid_xy[i, :] = (el_crds).mean(axis=0)\n return centroid_xy", "def get_cell_centroids(mesh):\n num_els = mesh.num_cells()\n coords = mesh.coordinates()\n cells = mesh.cells()\n dim = len(coords[0])\n\n cell_cent = np.zeros((num_els, dim), dtype=float, order='c')\n\n for i in range(num_els):\n pts = [coords[idx] for idx in cells[i]]\n cell_cent[i] = (1/(dim+1))*sum(pts) #this works only for 2D/3D triangles\n\n return cell_cent", "def find_centroid_for_each(self):", "def get_centroids(self):\n if not self._trained:\n raise ValueError(\"SOM not trained yet\")\n return self._centroid_grid", "def get_centroids(self):\n if not self._trained:\n raise ValueError(\"SOM not trained yet\")\n return self._centroid_grid", "def get_centroids(self):\n if not self._learned:\n raise ValueError(\"SOM not trained yet\")\n return self._centroid_grid", "def get_cell_center_coordinates(self):\n import numpy as np\n x1, x2, x3 = np.ix_(*self.cell_center_coordinates)\n if self.geometry == 'cartesian':\n x, y, z = x1, x2, x3\n elif self.geometry == 'spherical':\n x = x1 * np.sin(x2) * np.cos(x3)\n y = x1 * np.sin(x2) * np.sin(x3)\n z = x1 * np.cos(x2)\n return x, y, z", "def get_element_centroids(self):\n if self.centroids is None:\n self.centroids = np.vstack((\n np.mean(self.grid['x'], axis=1),\n np.mean(self.grid['z'], axis=1)\n )).T\n\n return self.centroids", "def _get_centre(self, gdf):\n bounds = gdf[\"geometry\"].bounds\n centre_x = (bounds[\"maxx\"].max() + bounds[\"minx\"].min()) / 2\n centre_y = (bounds[\"maxy\"].max() + bounds[\"miny\"].min()) / 2\n return centre_x, centre_y", "def get_object_centers(data, north_offset, east_offset, drone_altitude, safety_distance):\n points = []\n for i in range(data.shape[0]):\n north, east, alt, d_north, d_east, d_alt = data[i, :]\n if alt + d_alt + safety_distance > drone_altitude:\n points.append([north - north_offset, east - east_offset])\n return points;", "def center(self):\n cyl = (len(self.cells) - 1) / 2 # Lower and upper bound of list slices\n cxl = (len(self.cells[0]) - 1) / 2\n cyu = len(self.cells) / 2 + 1\n cxu = len(self.cells[0]) / 2 + 1\n\n # candidates are all the cells in the middle,\n # accounting for even dimensions\n candidates = []\n\n for r in self.cells[cyl:cyu]:\n candidates += r[cxl:cxu]\n\n # center is the candidate with the most carrots\n center = max(candidates, key=lambda c: c.carrots)\n\n return center", "def centroid(coords,masses,divider):\n\treturn np.array([np.dot(masses[r].T,coords[r])/np.sum(masses[r]) for r in divider])", "def get_geom_center(coordlist):\n return sum(coordlist) / len(coordlist)", "def _get_grid_cell_indexes(proj, xs, ys, bounding_box):\n # Unpack values from the projection\n eq_rad = proj.semi_major_axis\n polar_rad = proj.semi_minor_axis\n h = proj.perspective_point_height + eq_rad\n lon0 = proj.longitude_of_projection_origin\n \n # Unpack values from the area we want to grab the data\n min_lat, min_lon = bounding_box.sw_corner()\n max_lat, max_lon = bounding_box.ne_corner()\n \n with np.errstate(invalid='ignore'):\n # Calculate the lat and lon grids\n xs, ys = np.meshgrid(xs, ys)\n a_vals = np.power(np.sin(xs), 2.0) + \\\n np.power(np.cos(xs), 2.0) * (np.power(np.cos(ys), 2.0) + \\\n eq_rad * eq_rad / polar_rad / polar_rad * np.power(np.sin(ys), 2.0))\n b_vals = -2 * h * np.cos(xs) * np.cos(ys)\n c_val = h * h - eq_rad * eq_rad\n \n rs = (-b_vals - np.sqrt(np.power(b_vals, 2.0) - 4 * a_vals * c_val)) / (2 * a_vals)\n \n sx = rs * np.cos(xs) * np.cos(ys)\n sy = -rs * np.sin(xs)\n sz = rs * np.cos(xs) * np.sin(ys)\n \n lats = np.arctan((eq_rad *eq_rad * sz) \\\n / (polar_rad * polar_rad * np.sqrt(np.power(h - sx, 2.0) + np.power(sy, 2.0))))\n lats = np.degrees(lats)\n \n lons = np.radians(lon0) - np.arctan(sy / (h - sx))\n lons = np.degrees(lons)\n \n # Flatten the arrays so we get a 1D list of indexes\n lats = lats.flatten()\n lons = lons.flatten()\n \n # Filter out values not in our bounding box\n lats = np.where(np.logical_and(lats >= min_lat, lats <= max_lat))[0]\n lons = np.where(np.logical_and(lons >= min_lon, lons <= max_lon))[0]\n idxs = list(set(lons).intersection(set(lats)))\n \n return idxs", "def sub_block_centroids(self):\n if self.sub_block_corners is None or self.sub_block_sizes is None:\n return None\n return self.sub_block_corners.array + self.sub_block_sizes.array / 2", "def _compute_centroids(self):\n\n for i in range(0, self.k):\n cluster = np.argwhere(self.assigned_clusters == i)\n cluster_points = self.data[cluster].squeeze()\n self.centroids[i] = np.mean(cluster_points, axis=0)", "def centroids2(img_segm,liste):\n m = len(liste)\n xs = np.zeros(m)\n ys = np.zeros(m)\n j=0\n for elt in liste:\n pos_list = np.where(img_segm==elt)\n xs[j] = np.mean(pos_list[0])\n ys[j] = np.mean(pos_list[1])\n j+=1\n return xs,ys", "def centroids(img_segm):\n m = np.amax(img_segm)\n xs = np.zeros(m)\n ys = np.zeros(m)\n \n for i in range(0,m):\n pos_list = np.where(img_segm==i+1)\n xs[i] = np.mean(pos_list[0])\n ys[i] = np.mean(pos_list[1])\n return xs,ys", "def get_center_location(self):\n latitude = 0\n longitude = 0\n for centroid in self.centroids:\n latitude += centroid[0]\n longitude += centroid[1]\n return [latitude / len(self.centroids), longitude / len(self.centroids)]", "def centroid(self): # -> BaseGeometry:\n ...", "def calculate_centroids(self, data, clusters):\n centroids = []\n for i in range(self.n_clusters):\n mask = clusters == i \n centroids.append(np.mean(data[mask, :], axis = 0)) \n return centroids", "def centroids(img_segm):\n m = int(np.amax(img_segm))\n xs = np.zeros(m)\n ys = np.zeros(m)\n \n for i in range(0,m):\n pos_list = np.where(img_segm==i+1)\n xs[i] = np.mean(pos_list[0])\n ys[i] = np.mean(pos_list[1])\n return xs,ys", "def compute_centers_of_hypercubes(self):\n for hc in self.hypercubes.flatten():\n for i in range(self.dims - 1, -1, -1):\n index = self.dims - (i + 1)\n hc.center[i] = (hc.coords[index] + 0.5) * self.hypercube_measurements[index]", "def get_square_centers(self):\n x_values = np.arange(-2, 4, 2) * np.ones(self.GRID_SHAPE)\n y_values = np.arange(2, -4, -2).reshape((3, 1)) * np.ones(self.GRID_SHAPE)\n x_values *= self.spacing\n x_values += self.center[0] # add x-coordinate for grid center\n y_values *= self.spacing\n y_values += self.center[1] # add y-coordinate for grid center\n return np.dstack((x_values, y_values))", "def getCellCenter(self):\n x = np.zeros(self.nElements)\n for iElt in range(self.nElements):\n x[iElt] = Elements._all[iElt].center\n return x", "def calcCentroid(self):\n size = len(self.vectors)\n # zip all features together\n zipped = zip(*self.vectors)\n # Calculate the mean for each feature/column\n centroid = [math.fsum(column)/size for column in zipped]\n \n return centroid", "def centroids(img):\n _, _, _, centr = cv2.connectedComponentsWithStats(img)\n return centr[1:]", "def c_centers(self):\n self.compute_c_centers(self)\n return self._c_centers" ]
[ "0.6934343", "0.688382", "0.672464", "0.66895455", "0.6638991", "0.65940636", "0.65940636", "0.6519114", "0.6447519", "0.63558304", "0.6343967", "0.62743145", "0.6219808", "0.6177495", "0.6171115", "0.6169734", "0.6126709", "0.6084704", "0.60784847", "0.60754013", "0.6071603", "0.60671103", "0.60461307", "0.60350555", "0.60162175", "0.60155046", "0.6012674", "0.600454", "0.599861", "0.599833" ]
0.79571277
0
given a fenics mesh/mshr.mesh as argument, returns the centroid of the cells in the mesh input
def get_cell_centroids(mesh): num_els = mesh.num_cells() coords = mesh.coordinates() cells = mesh.cells() dim = len(coords[0]) cell_cent = np.zeros((num_els, dim), dtype=float, order='c') for i in range(num_els): pts = [coords[idx] for idx in cells[i]] cell_cent[i] = (1/(dim+1))*sum(pts) #this works only for 2D/3D triangles return cell_cent
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def structured_cell_centroids(mesh):\n dim = mesh.topology().dim()\n stride = fact(dim)\n cents = get_cell_centroids(mesh)\n num_cells = int(mesh.num_cells()/stride)\n cell_cents_struct = np.zeros((num_cells,dim),dtype=float)\n\n for i in range(num_cells):\n start = int(stride*i)\n end = int(stride*i)+stride\n cell_cents_struct[i] = np.average(cents[start:end],axis=0)\n\n return cell_cents_struct", "def centroid(im, mask=None, w=None, x=None, y=None):\n from numpy import ones, arange, meshgrid\n # 2009-09-02 13:35 IJC: Created\n if mask==None:\n mask = ones(im.shape)\n if w==None:\n w = ones(im.shape)\n if not (im.shape==mask.shape and im.shape==w.shape):\n print \"Image, mask, and weights must have same shape! Exiting.\"\n return -1\n if x==None or y==None:\n xx = arange(im.shape[1])\n yy = arange(im.shape[0])\n x,y = meshgrid(xx,yy)\n x0 = (x*im*mask*w).sum()/(im*mask*w).sum()\n y0 = (y*im*mask*w).sum()/(im*mask*w).sum()\n\n return (x0,y0)", "def centroid(im, mask=None, w=None, x=None, y=None):\n from numpy import ones, arange, meshgrid\n # 2009-09-02 13:35 IJC: Created\n if mask==None:\n mask = ones(im.shape)\n if w==None:\n w = ones(im.shape)\n if not (im.shape==mask.shape and im.shape==w.shape):\n print \"Image, mask, and weights must have same shape! Exiting.\"\n return -1\n if x==None or y==None:\n xx = arange(im.shape[1])\n yy = arange(im.shape[0])\n x,y = meshgrid(xx,yy)\n x0 = (x*im*mask*w).sum()/(im*mask*w).sum()\n y0 = (y*im*mask*w).sum()/(im*mask*w).sum()\n\n return (x0,y0)", "def centroid(self):\n return self.contours_to_matrix().mean(axis=0)", "def cells_centroid_py(self):\n A=self.cells_area()\n cxy=np.zeros( (self.Ncells(),2), np.float64)\n\n refs=self.nodes['x'][self.cells['nodes'][:,0]]\n\n all_pnts=self.nodes['x'][self.cells['nodes']] - refs[:,None,:]\n\n for c in np.nonzero(~self.cells['deleted'])[0]:\n nodes=self.cell_to_nodes(c)\n\n i=np.arange(len(nodes))\n ip1=(i+1)%len(nodes)\n nA=all_pnts[c,i]\n nB=all_pnts[c,ip1]\n\n tmp=(nA[:,0]*nB[:,1] - nB[:,0]*nA[:,1])\n cxy[c,0] = ( (nA[:,0]+nB[:,0])*tmp).sum()\n cxy[c,1] = ( (nA[:,1]+nB[:,1])*tmp).sum()\n cxy /= 6*A[:,None] \n cxy += refs\n return cxy", "def centroid(self): # -> BaseGeometry:\n ...", "def find_centroid_cell(self):\n\n x_min, y_min = self.find_min()\n x_max, y_max = self.find_max()\n x_centroid = int((x_max+x_min)/2)\n y_centroid = int((y_max+y_min)/2)\n centroide = x_centroid, y_centroid\n return centroide", "def __CalculateCentroid(self, contour):\r\n moments = cv2.moments(contour)\r\n\r\n centroid = (-1, -1)\r\n if moments[\"m00\"] != 0:\r\n centroid = (int(round(moments[\"m10\"] / moments[\"m00\"])),\r\n int(round(moments[\"m01\"] / moments[\"m00\"])))\r\n\r\n return centroid", "def cell_centroids_original(crd, con):\n \n nele = con.shape[0]\n dim = crd.shape[1]\n centroid_xy = np.zeros((nele, dim))\n for i in range(len(con)):\n el_crds = crd[con[i, :], :] # (4, 2)\n centroid_xy[i, :] = (el_crds).mean(axis=0)\n return centroid_xy", "def get_centroids(self, dim):\n cdef np.ndarray[float64, mode='c', ndim=2] out\n\n if dim == 0:\n return self.coors\n\n else:\n out = np.empty((self.mesh.topology.num[dim], self.dim),\n dtype=np.float64)\n mesh_get_centroids(self.mesh, &out[0, 0], dim)\n\n return out", "def get_molecule_centroid(molecule_xyz):\n return np.mean(molecule_xyz, axis=0)", "def centroid(cnt):\n\tM = cv2.moments(cnt)\n\tcx = int(M['m10']/M['m00'])\n\tcy = int(M['m01']/M['m00'])\n\treturn (cx, cy)", "def calcCentroid(self):\n size = len(self.vectors)\n # zip all features together\n zipped = zip(*self.vectors)\n # Calculate the mean for each feature/column\n centroid = [math.fsum(column)/size for column in zipped]\n \n return centroid", "def calc_centroid(self):\n num = 0\n centroid = numpy.zeros(3, float)\n for atm in self:\n if atm.position is not None:\n centroid += atm.position\n num += 1\n return centroid / num", "def getCentroid(cluster):\n try:\n return np.mean(cluster, axis = 0)\n except:\n return None", "def centroid(sign, FS):\n\n time = compute_time(sign, FS)\n\n energy, time_energy=signal_energy(sign, time)\n\n total_energy = np.dot(np.array(time_energy),np.array(energy))\n energy_sum = np.sum(energy)\n\n if energy_sum == 0 or total_energy == 0:\n centroid = 0\n else:\n centroid = total_energy / energy_sum\n return centroid", "def getCentroid(self):\n centroid = 0.0\n sumMagnitude = 0.0\n\n for i in range(0,self.nUniquePoints):\n freq,magnitude = self.fDomain[i]\n\n centroid += freq*magnitude\n sumMagnitude += magnitude\n \n centroid /= sumMagnitude\n return centroid", "def calculateCentroid(self,image):\n\t\tim=cv2.imread(image,0) #reads it in greyscale\n\t\tret,thresh = cv2.threshold(img_copy,128,255,cv2.THRESH_OTSU)\n\t\tim2,contours,hierarchy = cv2.findContours(thresh, 1, 2)\n\t\tcnt = contours[0]\n\t\tM = cv2.moments(cnt)\n\t\tcx = int(M['m10']/M['m00'])\n\t\tcy = int(M['m01']/M['m00'])\n\t\tcentroid=(cx,cy)\n\t\treturn centroid", "def get_cell_center_coordinates(self):\n import numpy as np\n x1, x2, x3 = np.ix_(*self.cell_center_coordinates)\n if self.geometry == 'cartesian':\n x, y, z = x1, x2, x3\n elif self.geometry == 'spherical':\n x = x1 * np.sin(x2) * np.cos(x3)\n y = x1 * np.sin(x2) * np.sin(x3)\n z = x1 * np.cos(x2)\n return x, y, z", "def estimate_centroid(self):\r\n\t\tstrain = self.strain_distribution_compr(self.max_pure_compresive_strain,\\\r\n\t\t\tself.max_pure_compresive_strain)\r\n\t\tself.geometric_centrod = (self.depth/2) \r\n\t\tself.plastic_centroid = (self.depth/2)+\\\r\n\t\t\t(self.sectional_moment(strain, self.depth/2)/\\\r\n\t\t\tself.sectional_force(strain))", "def compute_centroid(data):\n return sum(data[:]) / len(data)", "def get_region_centroid(mask, region):\n coords = np.column_stack(np.where(mask == region))\n coords = np.apply_along_axis(np.mean, 0, coords).round()\n coords = np.uint8(coords)\n return(coords)", "def ComputeCentroid(self, vtkPoints, int_tuple, p_float=..., p_float=..., p_float=...):\n ...", "def centroidFloat(cnt):\n M = cv2.moments(cnt)\n cx = M['m10']/M['m00']\n\tcy = M['m01']/M['m00']\n\treturn (cx, cy)", "def centroid(coords,masses,divider):\n\treturn np.array([np.dot(masses[r].T,coords[r])/np.sum(masses[r]) for r in divider])", "def centroid(self) -> Point:\n points = self.normalized_array\n centroids = [np.average(points[[0, i, i + 1], :-1], axis=0) for i in range(1, points.shape[0] - 1)]\n weights = [det(self._normalized_projection()[[0, i, i + 1]]) / 2 for i in range(1, points.shape[0] - 1)]\n return Point(*np.average(centroids, weights=weights, axis=0))", "def get_centroid(M):\t\n\treturn int(M['m10']/M['m00']), int(M['m01']/M['m00'])", "def getContourCentroid(x, y, w, h):\n coordXCentroid = (x+x+w)/2\n coordYCentroid = (y+y+h)/2\n objectCentroid = (int(coordXCentroid),int(coordYCentroid))\n return objectCentroid", "def compute_centers_of_hypercubes(self):\n for hc in self.hypercubes.flatten():\n for i in range(self.dims - 1, -1, -1):\n index = self.dims - (i + 1)\n hc.center[i] = (hc.coords[index] + 0.5) * self.hypercube_measurements[index]", "def getCentroid(self):\n if len(self.points) == 0:\n # None\n return None\n elif len(self.points) == 1:\n # Same point\n return self.points[0]\n elif len(self.points) == 2:\n # Middle of a segment\n return Segment(*self.points).middle\n elif len(self.points) == 3:\n # Intersection point of 2 medians\n return Point.average(self.points)\n else:\n # Geometric decomposition to compute centroids (wikipedia)\n n = len(self.points) # n is the number of points\n # There are n-2 forms\n forms = [Form([self.points[0]] + self.points[i:i + 2]) for i in range(1, n - 1)]\n # So n-2 centroids and areas, except if some of the points are one upon another, no area is null\n centroids = [form.center for form in forms]\n areas = [form.area for form in forms]\n # we compute the average centroid weighted by the areas\n weighted_centroid = Point.sum([a * c for (c, a) in zip(centroids, areas)])\n centroid = weighted_centroid / sum(areas)\n return centroid" ]
[ "0.7213677", "0.68678993", "0.68678993", "0.68211424", "0.67176604", "0.6685038", "0.6652162", "0.6585276", "0.65823853", "0.6560574", "0.65416116", "0.65348834", "0.64790154", "0.64459264", "0.6443465", "0.6432527", "0.64168453", "0.63676035", "0.63556105", "0.6349497", "0.634873", "0.6343642", "0.633743", "0.6322268", "0.6282419", "0.62707585", "0.6260525", "0.62571555", "0.62451124", "0.62248635" ]
0.7951323
0
given a fenics mesh, this function returns the bounding_box that fits around the domain
def get_domain_bounding_box(mesh=None, cell_cent=None): def local_bbox_method(coords): dim = len(coords[0]) corner_min = np.zeros(dim ,float) corner_max = np.zeros(dim, float) for d in range(dim): corner_min[d] = min(coords[:,d]) corner_max[d] = max(coords[:,d]) return np.vstack((corner_min, corner_max)) if mesh==None and len(np.shape(cell_cent)) == 0: raise AssertionError("provide either fenics mesh or cell centroid of PD particles") if mesh != None and len(np.shape(cell_cent)) == 0: coords = mesh.coordinates() return local_bbox_method(coords) if cell_cent.all() and not mesh: coords = cell_cent return local_bbox_method(coords)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bounds(self):\n return self._bboxes[0][0] #TODO: merge all coverages", "def get_bounding_box(self):\n return self._domain.get_bounding_box()", "def get_boundingbox(face, width, height, scale=1.3, minsize=None):\n x1 = face.left()\n y1 = face.top()\n x2 = face.right()\n y2 = face.bottom()\n size_bb = int(max(x2 - x1, y2 - y1) * scale)\n if minsize:\n if size_bb < minsize:\n size_bb = minsize\n center_x, center_y = (x1 + x2) // 2, (y1 + y2) // 2\n\n # Check for out of bounds, x-y top left corner\n x1 = max(int(center_x - size_bb // 2), 0)\n y1 = max(int(center_y - size_bb // 2), 0)\n # Check for too big bb size for given x, y\n size_bb = min(width - x1, size_bb)\n size_bb = min(height - y1, size_bb)\n\n return x1, y1, size_bb", "def get_peridym_mesh_bounds(mesh, struct_grd=False):\n if(struct_grd):\n cell_cent = structured_cell_centroids(mesh)\n max_edge_len = np.diff(cell_cent[0:2][:,0])\n range_fact = 2.001*max_edge_len \n else:\n cell_cent = get_cell_centroids(mesh)\n max_edge_len = mesh.hmax()\n range_fact = 1.5001*max_edge_len\n\n dim = len(cell_cent[0])\n corner_min, corner_max = get_domain_bounding_box(mesh)\n num_els = len(cell_cent)\n\n bound_range = np.zeros(2*dim, dtype=float)\n bound_nodes = {} #dict to store the node numbers of centroids that lie within bound_range\n bound_cents = {} #dict to store the node centroids corresponding to node numbers above\n\n for d in range(dim):\n \"\"\"\n index to direction along which the normal to boundary occurs:#\n 0 - x_min\n 1 - x_max\n 2 - y_min\n 3 : y_max\n 4 : z_min\n 5 : z_max\n Note: z-normal not applicable to 2d problems\n \"\"\"\n bound_range[2*d] = corner_min[d] + range_fact #min bound for d\n bound_range[2*d +1] = corner_max[d] - range_fact #max bound for d\n bound_nodes[(2*d)] = np.where(cell_cent[:,d] <= bound_range[2*d]) #node nums for min bound\n bound_nodes[(2*d+1)] = np.where(cell_cent[:,d] >= bound_range[2*d+1]) # node nums for max bound\n\n bound_cents[(2*d)] = cell_cent[bound_nodes[2*d][0]] #node centroids for min bound\n bound_cents[(2*d+1)] = cell_cent[bound_nodes[2*d+1][0]] #node centroids for min bound\n\n return bound_nodes, bound_cents #convert list to np array ", "def get_raw_bounds(self) -> [Vector, Vector]:\n\t\tverts = np.array([v.co for mesh in self._meshes for v in mesh.data.vertices])\n\t\tbbox_min = Vector([*np.min(verts, axis=0)])\n\t\tbbox_max = Vector([*np.max(verts, axis=0)])\n\t\treturn bbox_min, bbox_max", "def get_bounding_box(current_building_contour):\n x, y, w, h, = cv.boundingRect(current_building_contour[0])\n return x, y, w, h", "def bounds(self):\n return self._bboxes[0][0] #TODO: merge all coverages", "def get_bound(box_list):\n box_xyxy_list = []\n for box in box_list:\n box_xyxy = xywh2xyxy(box)\n box_xyxy_list.append(box_xyxy)\n\n box_xyxy_list = np.array(box_xyxy_list)\n x1max, y1max, x2max, y2max = np.amax(box_xyxy_list, axis=0)\n x1min, y1min, x2min, y2min = np.amin(box_xyxy_list, axis=0)\n\n boundbox = xyxy2xywh([x1min, y1min, x2max, y2max])\n return boundbox", "def boundingBox(self):\n minx, miny, maxx, maxy = self.substrates.bounds\n return pcbnew.BOX2I(\n pcbnew.VECTOR2I(int(minx), int(miny)),\n pcbnew.VECTOR2I(int(maxx - minx), int(maxy - miny)))", "def bounding_box(self):\n latlon00 = self.ij_to_latlon(-1,-1)\n latlon01 = self.ij_to_latlon(-1,self.domain_size[1]+1)\n latlon11 = self.ij_to_latlon(self.domain_size[0]+1,self.domain_size[1]+1)\n latlon10 = self.ij_to_latlon(self.domain_size[0]+1,-1)\n return (latlon00,latlon01,latlon11,latlon10)", "def get_bounding_box(im):\n coords = np.where(im)\n \n return np.array([np.min(coords[0]), np.max(coords[0]), \n np.min(coords[1]), np.max(coords[1])])", "def get_bounding_box(self):\n if len(self.elements) == 0:\n return None\n if not (self._bb_valid and\n all(ref._bb_valid for ref in self.get_dependencies(True))):\n bb = numpy.array(((1e300, 1e300), (-1e300, -1e300)))\n all_polygons = []\n for element in self.elements:\n if isinstance(element, PolygonSet):\n all_polygons.extend(element.polygons)\n elif isinstance(element, CellReference) or isinstance(\n element, CellArray):\n element_bb = element.get_bounding_box()\n if element_bb is not None:\n bb[0, 0] = min(bb[0, 0], element_bb[0, 0])\n bb[0, 1] = min(bb[0, 1], element_bb[0, 1])\n bb[1, 0] = max(bb[1, 0], element_bb[1, 0])\n bb[1, 1] = max(bb[1, 1], element_bb[1, 1])\n if len(all_polygons) > 0:\n all_points = numpy.concatenate(all_polygons).transpose()\n bb[0, 0] = min(bb[0, 0], all_points[0].min())\n bb[0, 1] = min(bb[0, 1], all_points[1].min())\n bb[1, 0] = max(bb[1, 0], all_points[0].max())\n bb[1, 1] = max(bb[1, 1], all_points[1].max())\n self._bb_valid = True\n _bounding_boxes[self] = bb\n return _bounding_boxes[self]", "def get_bounding_box(self):\n if len(self.polygons) == 0:\n return None\n return numpy.array(((min(pts[:, 0].min() for pts in self.polygons),\n min(pts[:, 1].min() for pts in self.polygons)),\n (max(pts[:, 0].max() for pts in self.polygons),\n max(pts[:, 1].max() for pts in self.polygons))))", "def get_bounds(ds):\n\n trans = get_transform(ds)\n if trans is not None:\n if isinstance(ds, xr.Dataset):\n dims = ds.dims\n elif isinstance(ds, xr.DataArray):\n dims = dict(zip(ds.dims, ds.shape))\n nrows = dims['y']\n ncols = dims['x']\n corners = (np.array([0, 0, ncols-1, ncols-1]),\n np.array([0, nrows-1, 0, nrows-1]))\n corner_x, corner_y = trans * corners\n return BoundingBox(\n left=corner_x.min(),\n bottom=corner_y.min(),\n right=corner_x.max(),\n top=corner_y.max()\n )\n else:\n return BoundingBox(\n left=ds['x'].min(),\n bottom=ds['y'].min(),\n right=ds['x'].max(),\n top=ds['y'].max()\n )", "def get_bounding_box(self):\n deps_still_valid = all(ref._bb_valid for ref in self.get_dependencies(True))\n cached_bbox_still_valid = self._bb_valid and deps_still_valid\n if not cached_bbox_still_valid:\n bb = numpy.array(((1e300, 1e300), (-1e300, -1e300)))\n all_polygons = []\n for polygon in self.polygons:\n all_polygons.extend(polygon.polygons)\n for path in self.paths:\n all_polygons.extend(path.to_polygonset().polygons)\n for reference in self.references:\n reference_bb = reference.get_bounding_box()\n if reference_bb is not None:\n all_polygons.append(reference_bb)\n if len(all_polygons) > 0:\n all_points = numpy.concatenate(all_polygons).transpose()\n bb[0, 0] = min(bb[0, 0], all_points[0].min())\n bb[0, 1] = min(bb[0, 1], all_points[1].min())\n bb[1, 0] = max(bb[1, 0], all_points[0].max())\n bb[1, 1] = max(bb[1, 1], all_points[1].max())\n self._bounding_box = bb\n else:\n self._bounding_box = None\n self._bb_valid = True\n\n if self._bounding_box is None:\n return None\n else:\n # return a *copy* of the cached bounding box to ensure it doesn't get inadvertently modified\n return numpy.array(self._bounding_box)", "def __CalculateBoundingBox(self, contour):\r\n return cv2.boundingRect(contour)", "def mesh_boundary(mesh):\n adja = edges_to_adjacency_matrix(mesh)\n r = sparse.extract.find(adja)\n li = r[0][np.where(r[2] == 1)]\n lj = r[1][np.where(r[2] == 1)]\n edges_boundary = np.vstack([li, lj]).T\n \"\"\"\n # alternative implementation based on edges and grouping from trimesh\n # instead of adjacency matrix\n from trimesh import grouping\n groups = grouping.group_rows(mesh.edges_sorted, require_count=1)\n # vertex_boundary = np.unique(open_mesh.edges_sorted[groups])\n edges_boundary = mesh.edges_sorted[groups]\n \"\"\"\n if li.size == 0:\n print('No holes in the surface !!!!')\n return np.array()\n else:\n return edges_to_boundary(edges_boundary)", "def bounding_box(self, grid=1):\n supp = self.support\n grid = [np.linspace(s[0], s[1], grid+1) for s in supp]\n X = self.grid_eval(grid)\n X.shape = (-1, self.dim)\n return tuple((X[:, d].min(), X[:, d].max()) for d in range(self.dim))", "def boundingBoxArea(box):\n return (box[2] - box[0] + 1) * (box[3] - box[1] + 1)", "def bounding_box(alpha):\n assert alpha.ndim == 2\n\n # Take the bounding box of the support, with a certain threshold.\n #print(\"Using alpha\", self.use_alpha, \"support\", self.support)\n supp_axs = [alpha.max(axis=1-i) for i in range(2)]\n\n th = 0.5 \n # Check first and last value of that threshold\n bb = [np.where(supp_axs[i] > th)[0][[0,-1]] for i in range(2)]\n\n # This bb looks like [(x0, x1), (y0, y1)], when we want it as (x0, y0, x1, y1)\n #psize = self.settings['subsample_size']\n #ret = (bb[0][0]/psize[0], bb[1][0]/psize[1], bb[0][1]/psize[0], bb[1][1]/psize[1])\n\n return (bb[0][0], bb[1][0], bb[0][1], bb[1][1])", "def get_bounds(shape, affine):\n adim, bdim, cdim = shape\n adim -= 1\n bdim -= 1\n cdim -= 1\n # form a collection of vectors for each 8 corners of the box\n box = np.array([[0., 0, 0, 1],\n [adim, 0, 0, 1],\n [0, bdim, 0, 1],\n [0, 0, cdim, 1],\n [adim, bdim, 0, 1],\n [adim, 0, cdim, 1],\n [0, bdim, cdim, 1],\n [adim, bdim, cdim, 1]]).T\n box = np.dot(affine, box)[:3]\n return zip(box.min(axis=-1), box.max(axis=-1))", "def bounding_box(self):\n return None", "def bounding_box(self):\n box_min = []\n box_max = []\n if self.is_empty():\n raise ValueError('empty polytope is not allowed')\n for i in range(0, self.space_dimension()):\n x = Variable(i)\n coords = [ v.coefficient(x) for v in self.generators() ]\n max_coord = max(coords)\n min_coord = min(coords)\n box_max.append(max_coord)\n box_min.append(min_coord)\n return (tuple(box_min), tuple(box_max))", "def calc_bounding_box(self):\n self.BB = self.geos.abs_el(0).BB\n for geo in self.geos.abs_iter():\n self.BB = self.BB.joinBB(geo.BB)", "def bounding_box(self, integral=False):\n box_min = []\n box_max = []\n if self.n_vertices==0:\n raise ValueError('Empty polytope is not allowed')\n for i in range(0,self.ambient_dim()):\n coords = [ v[i] for v in self.Vrep_generator() ]\n max_coord = max(coords)\n min_coord = min(coords)\n if integral:\n box_max.append(ceil(max_coord))\n box_min.append(floor(min_coord))\n else:\n box_max.append(max_coord)\n box_min.append(min_coord)\n return (tuple(box_min), tuple(box_max))", "def bounding_box(points: np.matrix):\n return points.min(axis=0), points.max(axis=0)", "def get_bounding_box(uv_coor, shape):\r\n\txmin = ymin = 99999\r\n\txmax = ymax = 0\r\n\tfor x, y in uv_coor:\r\n\t\txmin = min(xmin, int(x))\r\n\t\txmax = max(xmax, int(x))\r\n\t\tymin = min(ymin, int(y))\r\n\t\tymax = max(ymax, int(y))\r\n\txmin = max(0, xmin - 20)\r\n\tymin = max(0, ymin - 20)\r\n\r\n\txmax = min(shape[1], xmax + 20)\r\n\tymax = min(shape[0], ymax + 20)\r\n\r\n\treturn xmin, xmax, ymin, ymax", "def getBoundingBox(cls, dagPath):\n\n box = BoundingBox3D()\n box.fromExactWorldBoundingBox(cmds.exactWorldBoundingBox(dagPath))\n return box", "def getBoundingBox(self):\n lX, lY = self.lX(), self.lY()\n return min(lX), min(lY), max(lX), max(lY)", "def get_bounding_box(self):\n lon, lat = self.coordinates\n\n ll = (np.min(lon),np.min(lat))\n ul = (np.min(lon),np.max(lat))\n ur = (np.max(lon),np.max(lat))\n lr = (np.max(lon),np.min(lat))\n\n return (ll, ul, ur, lr)" ]
[ "0.68726236", "0.6643377", "0.66258126", "0.6624291", "0.66229665", "0.6602934", "0.659407", "0.6578977", "0.6496308", "0.6475542", "0.63955826", "0.6387074", "0.6363856", "0.63531804", "0.63405913", "0.6319945", "0.6304606", "0.62939525", "0.6264529", "0.62618446", "0.6259569", "0.62301904", "0.62266105", "0.6205296", "0.6191743", "0.61798316", "0.6166346", "0.61537033", "0.61393917", "0.61005396" ]
0.7623161
0
given a set of cell centroid beloning to regular (Square/Tri) discretization in 2D/3D, the method returns the edge length
def get_peridym_edge_length(cell_cent, struct_grd=False): dim = len(cell_cent[0]) el = np.zeros(dim, dtype = float) if(struct_grd): el_fact = 1.0 else: el_fact = 3.0 for d in range(dim): xx = np.unique(cell_cent[:,d]) el[d] = el_fact*np.max(np.abs(np.diff(xx[0:2]))) return el
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cell_centroids_original(crd, con):\n \n nele = con.shape[0]\n dim = crd.shape[1]\n centroid_xy = np.zeros((nele, dim))\n for i in range(len(con)):\n el_crds = crd[con[i, :], :] # (4, 2)\n centroid_xy[i, :] = (el_crds).mean(axis=0)\n return centroid_xy", "def cell_edges(self):", "def sub_block_centroids(self):\n if self.sub_block_corners is None or self.sub_block_sizes is None:\n return None\n return self.sub_block_corners.array + self.sub_block_sizes.array / 2", "def edge_perimeter_length(c, stencil=nn_stencil):\n\n return np.sum(np.logical_not(c) * coordination(c, stencil=stencil))", "def cells_centroid_py(self):\n A=self.cells_area()\n cxy=np.zeros( (self.Ncells(),2), np.float64)\n\n refs=self.nodes['x'][self.cells['nodes'][:,0]]\n\n all_pnts=self.nodes['x'][self.cells['nodes']] - refs[:,None,:]\n\n for c in np.nonzero(~self.cells['deleted'])[0]:\n nodes=self.cell_to_nodes(c)\n\n i=np.arange(len(nodes))\n ip1=(i+1)%len(nodes)\n nA=all_pnts[c,i]\n nB=all_pnts[c,ip1]\n\n tmp=(nA[:,0]*nB[:,1] - nB[:,0]*nA[:,1])\n cxy[c,0] = ( (nA[:,0]+nB[:,0])*tmp).sum()\n cxy[c,1] = ( (nA[:,1]+nB[:,1])*tmp).sum()\n cxy /= 6*A[:,None] \n cxy += refs\n return cxy", "def _geodesic_distance(mesh, face1, face2, edge):\n edge_center = (mesh.vertices[edge[0]] + mesh.vertices[edge[1]]) / 2\n return _list_length(_list_minus(edge_center, _face_center(mesh, face1))) + \\\n _list_length(_list_minus(edge_center, _face_center(mesh, face2)))", "def cable_length(self):\n skel = self.physical_space(copy=False)\n\n v1 = skel.vertices[skel.edges[:,0]]\n v2 = skel.vertices[skel.edges[:,1]]\n\n delta = (v2 - v1)\n delta *= delta\n dist = np.sum(delta, axis=1)\n dist = np.sqrt(dist)\n\n return np.sum(dist)", "def edge_cluster_rectangle(cluster):\n x_starts = []\n x_stops = []\n y_starts = []\n y_stops = []\n for area in cluster:\n x, y, w, h = area\n x_starts.append(x)\n x_stops.append(x + w)\n y_starts.append(y)\n y_stops.append(y + h)\n return min(x_starts), min(y_starts), max(x_stops) - min(x_starts), max(y_stops) - min(y_starts)", "def nspatials(self):\n return int(len(self)/2)", "def get_grid_cell_widths(edge_heights_m_agl):\n\n error_checking.assert_is_geq_numpy_array(edge_heights_m_agl, 0.)\n error_checking.assert_is_numpy_array(edge_heights_m_agl, num_dimensions=1)\n\n return numpy.diff(edge_heights_m_agl)", "def bin_edges_to_centres(edges):\r\n if edges.ndim == 1:\r\n steps = (edges[1:] - edges[:-1]) / 2\r\n return edges[:-1] + steps\r\n else:\r\n steps = (edges[1:, 1:] - edges[:-1, :-1]) / 2\r\n centres = edges[:-1, :-1] + steps\r\n return centres", "def __len__(self):\n return len(self.centroid_vector)", "def calc_incenters(triangles):\n\n # Calculate the side lengths and make the array 3D.\n lengths = calc_side_lengths(triangles)\n lengths3 = np.atleast_3d(lengths)\n\n # Calculate the weights, make them 2D\n weights = lengths.sum(axis=1)\n weights2 = np.atleast_2d(weights)\n\n # Calculate the centers, divide by weights to get incenters\n centers = np.sum(triangles * lengths3, axis=1)\n incenters = centers/weights2.T\n return incenters", "def hypergraph_cut_size(hypergraph, vertex_set):\n cut_size = 0\n\n # Iterate through the edges to find those in the cut\n for edge in hypergraph.edges():\n edge_intersects_vertex_set = len([v for v in edge.elements if v in vertex_set]) > 0\n edge_entirely_inside_vertex_set = len([v for v in edge.elements if v not in vertex_set]) == 0\n\n # If this edge is on the cut, add one to the total\n if edge_intersects_vertex_set and not edge_entirely_inside_vertex_set:\n cut_size += 1\n\n return cut_size", "def EdgeLengths(self,which_edges='boundary'):\n\n assert self.points is not None\n assert self.element_type is not None\n\n\n lengths = None\n if which_edges == 'boundary':\n if self.edges is None:\n self.GetBoundaryEdges()\n\n edge_coords = self.points[self.edges[:,:2],:]\n lengths = np.linalg.norm(edge_coords[:,1,:] - edge_coords[:,0,:],axis=1)\n\n elif which_edges == 'all':\n if self.all_edges is None:\n self.GetEdges()\n\n edge_coords = self.points[self.all_edges[:,:2],:]\n lengths = np.linalg.norm(edge_coords[:,1,:] - edge_coords[:,0,:],axis=1)\n\n return lengths", "def find_centroid_for_each(self):", "def centers(self):\n return tuple(0.5 * (e[1:] + e[:-1]) for e in self.edges)", "def networkx_directed_cut_size(graph, vertex_set_l, vertex_set_r=None):\n edges = nx.edge_boundary(graph, vertex_set_l, vertex_set_r, data=\"weight\", default=1)\n return sum(weight for u, v, weight in edges)", "def find_centroid_cell(self):\n\n x_min, y_min = self.find_min()\n x_max, y_max = self.find_max()\n x_centroid = int((x_max+x_min)/2)\n y_centroid = int((y_max+y_min)/2)\n centroide = x_centroid, y_centroid\n return centroide", "def edge_centers(self):\n x0, y0, width, height = self._rect_bbox\n w = width / 2.\n h = height / 2.\n xe = x0, x0 + w, x0 + width, x0 + w\n ye = y0 + h, y0, y0 + h, y0 + height\n transform = self._get_rotation_transform()\n coords = transform.transform(np.array([xe, ye]).T).T\n return coords[0], coords[1]", "def get_cell_centroids(mesh):\n num_els = mesh.num_cells()\n coords = mesh.coordinates()\n cells = mesh.cells()\n dim = len(coords[0])\n\n cell_cent = np.zeros((num_els, dim), dtype=float, order='c')\n\n for i in range(num_els):\n pts = [coords[idx] for idx in cells[i]]\n cell_cent[i] = (1/(dim+1))*sum(pts) #this works only for 2D/3D triangles\n\n return cell_cent", "def computeEdgeArcLengths(self):\n LIB.mnt_grid_computeEdgeArcLengths.argtypes = [POINTER(c_void_p)]\n ier = LIB.mnt_grid_computeEdgeArcLengths(self.obj)\n if ier:\n error_handler(FILE, 'computeEdgeArcLengths', ier)", "def calc_face_dimensions(face):\n vertical = filter_vertical_edges(face.edges, face.normal).pop()\n horizontal = filter_horizontal_edges(face.edges, face.normal).pop()\n return horizontal.calc_length(), vertical.calc_length()", "def inner_distance(self, distances):\n cluster_distances = distances[np.ix_(self.cluster_indices, self.cluster_indices)]\n number_of_connections = np.power(cluster_distances.shape[0] - 1, 2)\n return np.sum(cluster_distances) // number_of_connections if number_of_connections > 0 else 0", "def gal_count(clusters):\n sum = 0\n for x in clusters:\n sum += x.ngal\n return sum", "def element_size(self):\n vecs = (\n self.nodes[self.elements[:, :4], :][:, 1:, :]\n - self.nodes[self.elements[:, :4], :][:, 0, None, :]\n )\n return np.abs(np.linalg.det(vecs)) / 6", "def cellsize_2d(self):\t\r\n return self.dx * self.dy", "def findClosetCentroids(X, centroids):\n\tm, n = X.shape\n\tK = centroids.shape[0]\n\tidx = np.zeros(m) # m\n\n\tfor i in range(m):\n\t\ttemp = np.tile(X[i, :], K).reshape(centroids.shape)\n\t\tidx[i] = np.argmin(np.sum((centroids - temp) ** 2, axis=1))\n\treturn idx", "def e_size(self) -> int:\n return self.edgesize", "def cfdProcessGeometry(self):\r\n \r\n # self.faceCentroids']= [[] for i in range(self.numberOfFaces'])]\r\n # self.faceSf']= [[] for i in range(self.numberOfFaces'])]\r\n # self.faceAreas']= [[] for i in range(self.numberOfFaces'])]\r\n \r\n ## Linear weight of distance from cell center to face\r\n self.faceWeights= [[0] for i in range(self.numberOfFaces)]\r\n\r\n ## Not\r\n self.faceCF= [[0, 0, 0] for i in range(self.numberOfFaces)]\r\n \r\n self.faceCf= [[0,0,0] for i in range(self.numberOfFaces)]\r\n \r\n self.faceFf= [[0,0,0] for i in range(self.numberOfFaces)]\r\n \r\n self.wallDist= [[] for i in range(self.numberOfFaces)]\r\n \r\n self.wallDistLimited= [[] for i in range(self.numberOfFaces)]\r\n \r\n self.elementCentroids= [[] for i in range(self.numberOfElements)]\r\n self.elementVolumes= [[] for i in range(self.numberOfElements)]\r\n \r\n \"\"\"\r\n Calculate:\r\n -face centroids (faceCentroids)\r\n -face normal (Sf)\r\n -face areas (faceAreas)\r\n \"\"\"\r\n \r\n #find cell with largest number of points\r\n maxPoints=len(max(self.faceNodes, key=len))\r\n forCross1 = [[] for i in range(maxPoints)]\r\n forCross2 = [[] for i in range(maxPoints)]\r\n local_faceCentroid=[[] for i in range(maxPoints)]\r\n \r\n for iFace in range(self.numberOfFaces):\r\n theNodeIndices = self.faceNodes[iFace]\r\n theNumberOfFaceNodes = len(theNodeIndices)\r\n \r\n #compute a rough centre of the face\r\n local_centre = [0,0,0]\r\n \r\n for iNode in theNodeIndices:\r\n local_centre = local_centre + self.nodeCentroids[int(iNode)]\r\n \r\n local_centre = local_centre/theNumberOfFaceNodes\r\n \r\n for iTriangle in range(theNumberOfFaceNodes):\r\n \r\n point1 = local_centre\r\n point2 = self.nodeCentroids[int(theNodeIndices[iTriangle])]\r\n \r\n if iTriangle < theNumberOfFaceNodes-1:\r\n point3 = self.nodeCentroids[int(theNodeIndices[iTriangle+1])]\r\n else:\r\n point3 = self.nodeCentroids[int(theNodeIndices[0])]\r\n \r\n local_faceCentroid[iTriangle].append((point1+point2+point3)/3)\r\n \r\n left=point2-point1\r\n right=point3-point1\r\n \r\n forCross1[iTriangle].append(left)\r\n forCross2[iTriangle].append(right)\r\n \r\n \r\n local_Sf=[np.zeros([self.numberOfFaces,3]) for i in range(maxPoints)]\r\n local_area=[np.zeros([self.numberOfFaces,3]) for i in range(maxPoints)]\r\n \r\n centroid=np.zeros([self.numberOfFaces,3])\r\n area=np.zeros([self.numberOfFaces])\r\n Sf=np.zeros([self.numberOfFaces,3])\r\n \r\n #cells with fewer faces than others are full of zeros\r\n for i in range(maxPoints):\r\n \r\n forCrossLeft=np.vstack(np.array(forCross1[i]))\r\n forCrossRight=np.vstack(np.array(forCross2[i]))\r\n \r\n local_Sf[i]=0.5*np.cross(forCrossLeft,forCrossRight)\r\n local_area[i]=np.linalg.norm(local_Sf[i],axis=1)\r\n \r\n centroid = centroid + np.array(local_faceCentroid[i])*local_area[i][:,None]\r\n Sf=Sf+local_Sf[i]\r\n area=area+local_area[i]\r\n \r\n self.faceCentroids=centroid/area[:,None]\r\n self.faceSf=Sf\r\n self.faceAreas=area \r\n \r\n \r\n \"\"\"\r\n Pure python version - causes slowness due to iterative np.cross()\r\n \"\"\"\r\n \r\n # for iFace in range(self.numberOfFaces):\r\n # theNodeIndices = self.faceNodes[iFace]\r\n # theNumberOfFaceNodes = len(theNodeIndices)\r\n # \r\n # #compute a rough centre of the face\r\n # local_centre = [0,0,0]\r\n # \r\n # for iNode in theNodeIndices:\r\n # \r\n # local_centre = local_centre + self.nodeCentroids[int(iNode)]\r\n # \r\n # local_centre = local_centre/theNumberOfFaceNodes\r\n # centroid = [0, 0, 0]\r\n # Sf = [0,0,0]\r\n # area = 0\r\n # \r\n # #finds area of virtual triangles and adds them to the find to find face area\r\n # #and direction (Sf)\r\n # \r\n # \r\n # \r\n # for iTriangle in range(theNumberOfFaceNodes):\r\n # point1 = local_centre\r\n # point2 = self.nodeCentroids[int(theNodeIndices[iTriangle])]\r\n # \r\n # if iTriangle < theNumberOfFaceNodes-1:\r\n # point3 = self.nodeCentroids[int(theNodeIndices[iTriangle+1])]\r\n # else:\r\n # point3 = self.nodeCentroids[int(theNodeIndices[0])]\r\n # \r\n # local_centroid = (point1 + point2 + point3)/3\r\n # \r\n # left=point2-point1\r\n # right=point3-point1\r\n # x = 0.5*((left[1] * right[2]) - (left[2] * right[1]))\r\n # y = 0.5*((left[2] * right[0]) - (left[0] * right[2]))\r\n # z = 0.5*((left[0] * right[1]) - (left[1] * right[0]))\r\n # local_Sf=np.array([x,y,z])\r\n # \r\n # local_area = np.linalg.norm(local_Sf)\r\n # \r\n # centroid = centroid + local_area*local_centroid\r\n # Sf = Sf + local_Sf\r\n # area = area + local_area\r\n # centroid = centroid/area\r\n # self.faceCentroids[iFace]=centroid\r\n # self.faceSf[iFace]=Sf\r\n # self.faceAreas[iFace]=area\r\n \r\n \r\n \"\"\"\r\n Calculate:\r\n -element centroids (elementCentroids)\r\n -element volumes (elementVolumes)\r\n \"\"\"\r\n for iElement in range(self.numberOfElements):\r\n \r\n theElementFaces = self.elementFaces[iElement]\r\n \r\n #compute a rough centre of the element\r\n local_centre = [0,0,0]\r\n \r\n for iFace in range(len(theElementFaces)):\r\n faceIndex = theElementFaces[iFace]\r\n local_centre = local_centre + self.faceCentroids[faceIndex]\r\n \r\n local_centre = local_centre/len(theElementFaces)\r\n \r\n localVolumeCentroidSum = [0,0,0]\r\n localVolumeSum = 0\r\n \r\n for iFace in range(len(theElementFaces)):\r\n faceIndex = theElementFaces[iFace]\r\n \r\n Cf = self.faceCentroids[faceIndex]-local_centre\r\n \r\n faceSign = -1\r\n if iElement == self.owners[faceIndex]:\r\n faceSign = 1\r\n \r\n local_Sf = faceSign*self.faceSf[faceIndex]\r\n \r\n localVolume = np.dot(local_Sf,Cf)/3\r\n \r\n localCentroid = 0.75*self.faceCentroids[faceIndex]+0.25*local_centre\r\n \r\n localVolumeCentroidSum = localVolumeCentroidSum + localCentroid*localVolume\r\n \r\n localVolumeSum = localVolumeSum + localVolume\r\n \r\n self.elementCentroids[iElement]=localVolumeCentroidSum/localVolumeSum\r\n self.elementVolumes[iElement]=localVolumeSum\r\n \r\n \r\n for iFace in range(self.numberOfInteriorFaces):\r\n \r\n n=self.faceSf[iFace]/np.linalg.norm(self.faceSf[iFace])\r\n own=self.owners[iFace]\r\n nei = self.neighbours[iFace]\r\n \r\n self.faceCF[iFace]=self.elementCentroids[nei]-self.elementCentroids[own]\r\n self.faceCf[iFace]=self.faceCentroids[iFace]-self.elementCentroids[own]\r\n self.faceFf[iFace]=self.faceCentroids[iFace]-self.elementCentroids[nei]\r\n self.faceWeights[iFace]=(-np.dot(self.faceFf[iFace],n))/(-np.dot(self.faceFf[iFace],n)+np.dot(self.faceCf[iFace],n))\r\n \r\n for iBFace in range(self.numberOfInteriorFaces, self.numberOfFaces):\r\n \r\n \r\n n=self.faceSf[iBFace]/np.linalg.norm(self.faceSf[iBFace])\r\n own=self.owners[iBFace]\r\n \r\n self.faceCF[iBFace]=self.faceCentroids[iBFace]-self.elementCentroids[own]\r\n self.faceCf[iBFace]=self.faceCentroids[iBFace]-self.elementCentroids[own] \r\n self.faceWeights[iBFace]=1\r\n self.wallDist[iBFace]= max(np.dot(self.faceCf[iBFace], n), 1e-24)\r\n self.wallDistLimited[iBFace]= max(self.wallDist[iBFace], 0.05*np.linalg.norm(self.faceCf[iBFace]))" ]
[ "0.6159054", "0.610381", "0.6095435", "0.6027462", "0.6002276", "0.59965074", "0.5987988", "0.58697575", "0.58489704", "0.5763668", "0.574332", "0.5738635", "0.57146585", "0.5661359", "0.5644903", "0.56421226", "0.5631822", "0.5622291", "0.55991775", "0.55730665", "0.5567386", "0.5554577", "0.55401915", "0.55369866", "0.55245453", "0.55088353", "0.54990643", "0.54940766", "0.5463517", "0.54616576" ]
0.6498209
0
returns list of markers to be used for plt functions; max markers allowed = 18
def get_markers(num_markers): markers = ['^','o','P','X','*', 'd','<', '>', ',','|', '1','2','3','4','s','p','*','h','+'] if(num_markers>18): sys.exit("cannot create more than 18 markers, refactor your code; force exiting") return markers[0:num_markers]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_markerstyles(n=None):\n all_markers = ['o', 'D', 's', '2', '*', 'h', '8', 'v', 'x', '+', 5, 'd', '>', 7, '.', '1', 'p', '3',\n 6, 0, 1, 2, 3, 4, '4', '<', 'H', '^']\n # Note: 0: 'tickleft', 1: 'tickright', 2: 'tickup', 3: 'tickdown', 4: 'caretleft', 'D': 'diamond', 6: 'caretup',\n # 7: 'caretdown', 's': 'square', '|': 'vline', '': 'nothing', 'None': 'nothing', 'x': 'x', 5: 'caretright',\n # '_': 'hline', '^': 'triangle_up', ' ': 'nothing', 'd': 'thin_diamond', 'h': 'hexagon1', '+': 'plus', '*': 'star',\n # ',': 'pixel', 'o': 'circle', '.': 'point', '1': 'tri_down', 'p': 'pentagon', '3': 'tri_left', '2': 'tri_up',\n # '4': 'tri_right', 'H': 'hexagon2', 'v': 'triangle_down', '8': 'octagon', '<': 'triangle_left', None: 'nothing',\n # '>': 'triangle_right'\n # all_markers = ['circle', 'diamond', 'square', 'tri_up', 'star',\n # 'hexagon1', 'octagon', 'triangle_down', 'x', 'plus',\n # 'caretright', 'thin_diamond', 'triangle_right', 'caretdown',\n # 'point', 'tri_down', 'pentagon', 'tri_left', 'caretup',\n # 'tickleft', 'tickright', 'tickup', 'tickdown', 'caretleft',\n # 'tri_right', 'triangle_left', 'hexagon2', 'triangle_up']\n # Note: markers can be found via\n # import matplotlib.pyplot as plt\n # import matplotlib\n # d = matplotlib.markers.MarkerStyle.markers\n # def find_symbol(syms):\n # outsyms = []\n # for sym in syms:\n # for key in d:\n # if d[key] == sym:\n # outsyms.append(key)\n # return outsyms\n\n if n is None:\n markerlist = all_markers\n elif n > len(all_markers):\n markerlist = all_markers\n markerlist.append(all_markers[0:n - len(all_markers)])\n else:\n markerlist = all_markers[0:n]\n\n return markerlist", "def markers (self):\n return self._markers", "def get_marker_elements(self):\r\n # Fixme: Currently only arrowheads\r\n return [item[1] for item in self.arrows.values()]", "def getMarkerGenes(self):\n try:\n self.marker_genes = Utils.findMarkers(self.sc_annot, self.de_dict)\n except:\n print(\"Please run getCellTypes first to get cell annotations. This step is needed for marker gene finding.\")", "def _add_markers(figure_data, molecules, plot_type=\"scatter3d\"):\n\n drug_data = figure_data[0]\n list_of_drugs = drug_data[\"text\"]\n\n # get the axis index for each drug\n indices = [index for index, value in enumerate(list_of_drugs) if value in molecules]\n\n if plot_type == \"histogram2d\":\n plot_type = \"scatter\"\n\n traces = []\n for point_number in indices:\n trace = {\n \"x\": [drug_data[\"x\"][point_number]],\n \"y\": [drug_data[\"y\"][point_number]],\n \"marker\": {\"color\": \"red\", \"size\": 16, \"opacity\": 0.6, \"symbol\": \"cross\"},\n \"type\": plot_type,\n }\n if plot_type == \"scatter3d\":\n trace[\"z\"] = [drug_data[\"z\"][point_number]]\n traces.append(trace)\n return traces", "def Markers(cls):\n return cls._markers", "def markers(iterables, nyc_map):\n markers = list(map(lambda iterable: folium.Marker(location = iterable[:2],\n popup = f'{iterable[2]}: {iterable[3]}'), iterables))\n for marker in markers:\n marker.add_to(nyc_map)\n \n return nyc_map", "def markers(iterables, nyc_map):\n markers = list(map(lambda iterable: folium.Marker(location = iterable[:2],\n popup = f'{iterable[2]}: {iterable[3]}'), iterables))\n for marker in markers:\n marker.add_to(nyc_map)\n \n return nyc_map", "def generateMarkers(self, *args, **kwargs): \n return 'var PloneMapMarkers = [' + \\\n ''.join([\"{'type': '%s','options': { 'position': new google.maps.LatLng( %s, %s ), 'title' : '%s', 'title_' : '%s' }},\" \n % (object.markerIcon, object.latitude, object.longitude, object.Title(), object.getId()) \n for object in self.context.objectValues() \n if hasattr(object, 'latitude') and len(object.latitude) > 0 ])[:-1] \\\n + '];'", "def get_marking_values():\n array = np.arange(0, MAX_MARK_VALUE +1).tolist()\n return array", "def plot_points(ax, d, nu, t, f, marker, name=None):\n lums = []\n for ii,nuval in enumerate(nu):\n if nuval > 90E9:\n lum = plot_point(ax, d, nuval, t[ii], f[ii], marker, name=name)\n lums.append(lum)\n else:\n lum = plot_point(ax, d, nuval, t[ii], f[ii], marker)\n lums.append(lum)\n ax.plot(\n t, lums, ls='--', c='k', zorder=0)\n return lums", "def plot_markers(adata, key, markers = None, basis = 'umap', n_max = 10,\n use_raw = True, multi_line = True, ignore_case = True,\n protein= False, min_cutoff = None, max_cutoff = None, clustering = 'louvain',\n colorbar = False, prot_key = 'prot', prot_names_key = 'prot_names', **kwags):\n\n # check wether this basis exists\n if 'X_' + basis not in adata.obsm.keys():\n raise ValueError('You have not computed the basis ' + basis + ' yet. ')\n\n X_em = adata.obsm['X_' + basis]\n if basis == 'diffmap': X_em = X_em[:, 1:]\n\n # give some feedback\n print('Current key: {}'.format(key))\n print('Basis: {}'.format(basis))\n\n # get the gene names\n if use_raw:\n try:\n print('Using the rawdata')\n var_names = adata.raw.var_names\n except:\n var_names = adata.var_names\n use_raw = False\n print('adata.raw does not seem to exist')\n else:\n var_names = adata.var_names\n\n # obtain the subset of genes we would like to plot\n if markers is not None and protein is False:\n\n if key not in markers.keys():\n\n print('Key not in the markers dict. Searching in the var names.')\n if ignore_case:\n reg_ex = re.compile(key, re.IGNORECASE)\n else:\n reg_ex = re.compile(key, re.IGNORECASE)\n genes = [l for l in var_names \\\n for m in [reg_ex.search(l)] if m]\n\n else:\n\n print('Key found in the markers dict.')\n genes_pre = markers[key]\n genes = list()\n not_found = list()\n\n # search through the list of genes\n for gene in genes_pre:\n if ignore_case:\n reg_ex = re.compile('^' + gene + '$', re.IGNORECASE)\n else:\n reg_ex = re.compile('^' + gene + '$')\n result = [l for l in var_names \\\n for m in [reg_ex.search(l)] if m]\n if len(result)> 0:\n genes.append(result[0])\n else:\n not_found.append(gene)\n if len(not_found)> 0:\n print('Could not find the following genes: ' + str(not_found))\n\n elif protein is False:\n print('No markers dict given. Searching in the var names.')\n genes = []\n for gene in key:\n if ignore_case:\n reg_ex = re.compile(gene, re.IGNORECASE)\n else:\n reg_ex = re.compile(gene)\n genes_ = [l for l in var_names \\\n for m in [reg_ex.search(l)] if m]\n genes.append(*genes_)\n elif protein is True:\n # we will internally refer to the proteins as genes\n print('Looking for a protein with this name.')\n\n if (prot_names_key not in adata.uns.keys()) or (prot_key not in adata.obsm.keys()):\n raise ValueError('Requires a filed \\'{}\\' in adata.uns and a field \\'{}\\' in adata.obsm'.format(prot_names_key, prot_key))\n proteins = adata.obsm[prot_key]\n protein_names = adata.uns[prot_names_key]\n\n # combine to a dataframe\n proteins = pd.DataFrame(data = proteins, columns=protein_names)\n if ignore_case:\n reg_ex = re.compile(key, re.IGNORECASE)\n else:\n reg_ex = re.compile(key)\n genes = [l for l in protein_names \\\n for m in [reg_ex.search(l)] if m]\n\n\n\n if len(genes) == 0:\n raise ValueError('Could not find any gene or protein to plot.')\n\n # make sure it is not too many genes\n if len(genes) > n_max:\n print('Found ' + str(len(genes)) + ' matches.')\n genes = genes[:n_max]\n if not protein:\n print('Plotting the following genes:' + str(genes))\n else:\n print('Plotting the following proteins:' + str(genes))\n\n # create a gridspec\n n_genes = len(genes)\n\n if multi_line:\n n_col = 3\n n_row = int(np.ceil(n_genes+1/n_col))\n else:\n n_col = n_genes + 1\n n_row = 1\n\n gs = plt.GridSpec(n_row, n_col, figure = plt.figure(None, (12, n_row*12/(n_col+1) ), dpi = 150))\n\n\n # plot the genes\n plt.title(key)\n\n for i in range(n_genes+ 2):\n plt.subplot(gs[i])\n\n # genes\n if i < n_genes:\n # get the color vector for this gene\n if not protein:\n if use_raw:\n color = adata.raw[:, genes[i]].X\n else:\n color = adata[:, genes[i]].X\n plt.title('Gene: ' + genes[i])\n else:\n color = proteins[genes[i]]\n plt.title('Protein: ' + genes[i])\n\n # quantile normalisation\n if min_cutoff is not None:\n color_min = np.quantile(color, np.float(min_cutoff[1:])/100)\n else:\n color_min = np.min(color)\n if max_cutoff is not None:\n color_max = np.quantile(color, np.float(max_cutoff[1:])/100)\n else:\n color_max = np.max(color)\n color = np.clip(color, color_min, color_max)\n\n plt.scatter(X_em[:, 0], X_em[:, 1], marker = '.', c = color, **kwags)\n\n # add a colorbar\n if colorbar: plt.colorbar()\n elif i == n_genes: #louvain\n ax = sc.pl.scatter(adata, basis = basis, color = clustering,\n show = False, ax = plt.subplot(gs[i]),\n legend_loc = 'right margin')\n elif i > n_genes: #condition\n if 'color' in adata.obs.keys():\n print('found key')\n ax = sc.pl.scatter(adata, basis = basis, color = 'color',\n show = False, ax = plt.subplot(gs[i]),\n legend_loc = 'right margin')\n plt.axis(\"off\")\n plt.plot()", "def open_markers(filename):\n markers = {}\n try:\n with open(filename, \"r\") as f:\n lines = f.readlines()\n cur_marker = \"\"\n cur_marker_name = \"\"\n for i in range(len(lines)):\n if i >= 7:\n cur_line = lines[i]\n if cur_line.startswith(\" \"):\n cur_marker += cur_line.replace(\" \", \"\").strip()\n else:\n if i != 7:\n markers[cur_marker_name] = [cur_marker]\n cur_marker_name = cur_line.split(\" \")[0]\n cur_marker = \"\"\n except IOError:\n print(\"Error loading file.\")\n return markers", "def plot_instructions_lim_mags(self):\n return self.__plot_instructions_lim_mags", "def plot_markers_2d(im, markers, newfig=True):\n\n if newfig:\n plt.figure()\n plt.imshow((im - np.min(im)) / (np.max(im) - np.min(im)))\n\n for mark in range(markers.shape[-1]):\n ind = np.unravel_index(\n np.argmax(markers[:, :, mark], axis=None), markers[:, :, mark].shape\n )\n plt.plot(ind[1], ind[0], \".r\")", "def plot_markers_3d(stack, nonan=True):\n x = []\n y = []\n z = []\n for mark in range(stack.shape[-1]):\n ind = np.unravel_index(\n np.argmax(stack[:, :, :, mark], axis=None), stack[:, :, :, mark].shape\n )\n if ~np.isnan(stack[0, 0, 0, mark]) and nonan:\n x.append(ind[1])\n y.append(ind[0])\n z.append(ind[2])\n elif ~np.isnan(stack[0, 0, 0, mark]) and not nonan:\n x.append(ind[1])\n y.append(ind[0])\n z.append(ind[2])\n elif not nonan:\n x.append(np.nan)\n y.append(np.nan)\n z.append(np.nan)\n return x, y, z", "def getAndSortFiducialPoints(self, center):\r\n # self.__registrationStatus.setText('Registration processing...')\r\n # pNode = self.parameterNode()\r\n # fixedAnnotationList = slicer.mrmlScene.GetNodeByID(pNode.GetParameter('fixedLandmarksListID'))\r\n # if fixedAnnotationList != None:\r\n # fixedAnnotationList.RemoveAllChildrenNodes()\r\n markerCenters = center\r\n nbCenter = len(center)\r\n for k in range(nbCenter):\r\n point = [0]\r\n for i in range(nbCenter):\r\n U,V,W = 0,0,0\r\n for j in range(nbCenter):\r\n d = 0\r\n if i != j and markerCenters[i]!=(0,0,0):\r\n d2 = (markerCenters[i][0]-markerCenters[j][0])**2+(markerCenters[i][1]-markerCenters[j][1])**2+(markerCenters[i][2]-markerCenters[j][2])**2\r\n d = d2**0.5\r\n # print markerCenters[i],markerCenters[j]\r\n #print d\r\n if d >=45 and d<=53:\r\n U += 1\r\n elif d >53 and d<60:\r\n V +=1\r\n elif d >=70 and d<80:\r\n W +=1\r\n #print U,V,W\r\n if U+V+W>=3:\r\n #print markerCenters[i]\r\n point.extend([i])\r\n point.remove(0)\r\n minX = [999,999,999,999]\r\n maxX = [-999,-999,-999,-999]\r\n sorted = [[0,0,0] for l in range(4)]\r\n sortedConverted = [[0,0,0] for l in range(4)]\r\n for i in range(2):\r\n for k in point:\r\n if markerCenters[k][0]<= minX[0]:\r\n minX[0] = markerCenters[k][0]\r\n minX[1] = k\r\n elif markerCenters[k][0]<= minX[2]:\r\n minX[2] = markerCenters[k][0]\r\n minX[3] = k\r\n if markerCenters[k][0]>= maxX[0]:\r\n maxX[0] = markerCenters[k][0]\r\n maxX[1] = k\r\n elif markerCenters[k][0]>= maxX[2]:\r\n maxX[2] = markerCenters[k][0]\r\n maxX[3] = k\r\n if markerCenters[minX[1]][1] < markerCenters[minX[3]][1]:\r\n sorted[0] = minX[1]\r\n sorted[1] = minX[3]\r\n else:\r\n sorted[0] = minX[3]\r\n sorted[1] = minX[1]\r\n if markerCenters[maxX[1]][1]>markerCenters[maxX[3]][1]:\r\n sorted[2] = maxX[1]\r\n sorted[3] = maxX[3]\r\n else:\r\n sorted[2] = maxX[3]\r\n sorted[3] = maxX[1]\r\n sorted2 = [0,0,0,0]\r\n if 1:#self.horizontalTemplate.isChecked():\r\n sorted2[0]=sorted[2]\r\n sorted2[2]=sorted[0]\r\n sorted2[1]=sorted[3]\r\n sorted2[3]=sorted[1]\r\n else:\r\n sorted2[0]=sorted[3]\r\n sorted2[2]=sorted[1]\r\n sorted2[1]=sorted[0]\r\n sorted2[3]=sorted[2]\r\n # logic = slicer.modules.annotations.logic()\r\n # logic.SetActiveHierarchyNodeID(pNode.GetParameter('fixedLandmarksListID'))\r\n # if pNode.GetParameter(\"Template\")=='4points':\r\n # nbPoints=4\r\n # elif pNode.GetParameter(\"Template\")=='3pointsCorners':\r\n # nbPoints=3\r\n l = slicer.modules.annotations.logic()\r\n l.SetActiveHierarchyNodeID(slicer.util.getNode('Fiducial List_fixed').GetID())\r\n for k in range(4) :\r\n fiducial = slicer.mrmlScene.CreateNodeByClass('vtkMRMLAnnotationFiducialNode')\r\n fiducial.SetReferenceCount(fiducial.GetReferenceCount()-1)\r\n fiducial.SetFiducialCoordinates(markerCenters[sorted2[k]])\r\n fiducial.SetName(str(k))\r\n fiducial.Initialize(slicer.mrmlScene)\r\n\r\n sRed = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeRed\")\r\n if sRed ==None :\r\n sRed = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode1\")\r\n # sRed.SetSliceVisible(1)\r\n m= sRed.GetSliceToRAS()\r\n m.SetElement(0,3,sortedConverted[3][0])\r\n m.SetElement(1,3,sortedConverted[3][1])\r\n m.SetElement(2,3,sortedConverted[3][2])\r\n sRed.Modified()\r\n return sorted2", "def drawDetectedMarkers(image, corners, ids=None, borderColor=None):\n pass", "def addMarkers(self):\r\n\r\n for x, y in zip(*self.raw_coordinates):\r\n\r\n # put airfoil contour points as graphicsitem\r\n points = gc.GraphicsCollection()\r\n points.pen.setColor(QtGui.QColor(60, 60, 80, 255))\r\n points.brush.setColor(QtGui.QColor(217, 63, 122, 255))\r\n points.pen.setCosmetic(True) # no pen thickness change when zoomed\r\n\r\n points.Circle(x, y, 0.003, marker=True)\r\n\r\n marker = PGraphicsItem.GraphicsItem(points, self.scene)\r\n self.markers.addToGroup(marker)\r\n\r\n self.contour_group.addToGroup(self.markers)", "def detectMarkers(image, dictionary, corners=None, ids=None, parameters=None, rejectedImgPoints=None):\n pass", "def rplots(*args, marker:str=',', T:tuple=None):\n for arg in args:\n plt.figure()\n raster_plot(arg, marker=marker, T=T)", "def get_landmarks(self):\n lm_x = [x for x, y in self.landmarks]\n lm_y = [y for x, y in self.landmarks]\n return [lm_x, lm_y]", "def plot_timeres(timeres):\n tint = 24 / timeres\n step = 255 / (tint + 1)\n\n cont = np.zeros((scale, scale))\n for i in range(self.dataset.shape[0]):\n posy = int(((self.dataset[i][0] - minLat) * normLat))\n posx = int(((self.dataset[i][1] - minLon) * normLon))\n if distrib:\n cont[scale - posy - 1, posx - 1] += 1\n else:\n cont[scale - posy - 1, posx - 1] = 1\n mxcont = np.max(cont)\n\n if distrib:\n cont = cont / mxcont\n for i in range(cont.shape[0]):\n for j in range(cont.shape[1]):\n if cont[i, j] > 0.01:\n mymap.circle_marker(\n location=[minLat + (((scale - i) - 0.5) / normLat), minLon + ((j + 1.5) / normLon)],\n radius=cont[i, j] * (circlesize / scale),\n line_color='#000000',\n fill_color='#110000', fill_opacity=0.3)\n # mymap.addradpoint(minLat+(((scale - i)-0.5)/normLat), minLon+((j+1.5)/normLon),\n # cont[i,j]*(circlesize/scale), \"#FF0000\")\n else:\n for i in range(cont.shape[0]):\n for j in range(cont.shape[1]):\n if cont[i, j] > 0.01:\n mymap.circle_marker(\n location=[minLat + (((scale - i) - 0.5) / normLat), minLon + ((j + 1.5) / normLon)],\n radius=30,\n line_color='#000000',\n fill_color='#110000', fill_opacity=0.3)\n # mymap.addradpoint(minLat+(((scale - i )-0.5)/normLat),\n # minLon+((j+1.5)/normLon), 30, \"#FF0000\")\n for t in range(tint):\n color = '#' + (str(hex((t + 1) * step))[2:]) + (\n str(hex((t + 1) * step))[2:]) + 'FF' # (str(hex((t+1)*step))[2:])\n cont = np.zeros((scale, scale))\n for i in range(self.dataset.shape[0]):\n posy = int(((self.dataset[i][0] - minLat) * normLat))\n posx = int(((self.dataset[i][1] - minLon) * normLon))\n stime = time.localtime(np.int32(self.dataset[i][2]))\n evtime = stime[3]\n if (evtime / timeres) == t:\n if distrib:\n cont[scale - posy - 1, posx - 1] += 1\n else:\n cont[scale - posy - 1, posx - 1] = 1\n if distrib:\n cont = cont / mxcont\n for i in range(cont.shape[0]):\n for j in range(cont.shape[1]):\n if cont[i, j] > 0.01:\n mymap.circle_marker(\n location=[minLat + (((scale - i) - 0.5) / normLat), minLon + ((j + 1.5) / normLon)],\n radius=cont[i, j] * (circlesize / scale),\n line_color=color,\n fill_color='#110000', fill_opacity=0.2)\n else:\n for i in range(cont.shape[0]):\n for j in range(cont.shape[1]):\n if cont[i, j] > 0.01:\n mymap.circle_marker(\n location=[minLat + (((scale - i) - 0.5) / normLat), minLon + ((j + 1.5) / normLon)],\n radius=30,\n line_color=color,\n fill_color='#110000', fill_opacity=0.2)", "def plot(self):\n #prepare the marker list\n marker = itertools.cycle((',', '+', '.', 'o', '*',\n '^', 'v', '<', '>', '8',\n 's', 'p', 'h', 'H', 'D',\n 'd'))\n # first categorised with plane\n for each_plane in self.plane_list:\n if self.is_literal:\n label = \"[\" + \"{0} {1} {2}\".format(each_plane[0], each_plane[1], each_plane[2]) + \"]\"\n else:\n label = \"{\"+\"{0}, {1}, {2}\".format(each_plane[0], each_plane[1], each_plane[2]) + \"}\"\n x_list = []\n y_list = []\n if self.is_literal:\n tmp = [each_plane]\n opposite_plane = [-item for item in each_plane]\n tmp.append(opposite_plane)\n else:\n tmp = PoleFigure.get_permutations(each_plane)\n # second categorised with grain ID\n my_marker = \".\" # default marker\n for i in range(len(self.__data)):\n each_euler = self.__data[i]\n if self.unique_marker:\n my_marker = marker.next()\n plt.rcParams['text.usetex'] = False # otherwise, '^' will cause trouble\n euler = EulerAngle(each_euler[0], each_euler[1], each_euler[2])\n rot_m = np.dot(self.__ref, euler.rotation_matrix)\n self.__data[i] = RotationMatrix(rot_m).euler_angle\n for each_pole in tmp:\n tmp_pole = np.array(each_pole) / self.lattice_vector\n tmp_pole /= np.linalg.norm(tmp_pole)\n coord = np.dot(rot_m, tmp_pole)\n if coord[2] < 0:\n continue # not pointing up, moving on\n else:\n x = coord[0] / (1.0 + float(coord[2]))\n y = coord[1] / (1.0 + float(coord[2]))\n # need to rotate 90 degree\n x_list.append(y)\n y_list.append(-x)\n # start plotting\n if self.__clr_list is not None:\n clr = self.__clr_list.next()\n else:\n clr = np.random.rand(3, 1)\n plt.scatter(x_list, y_list, marker=my_marker, c=clr, label=label, edgecolor='none')\n # label x/y axis\n plt.text(1.1, 0.0, \"y\", horizontalalignment='center', verticalalignment='center', fontsize=15)\n plt.text(0.0, -1.1, \"x\", horizontalalignment='center', verticalalignment='center', fontsize=15)\n # set legend\n plt.legend(loc='upper left', numpoints=1, ncol=6, fontsize=8, bbox_to_anchor=(0, 0))\n plt.title(self.title)\n plt.savefig(self.title + \".\" + self.output)\n plt.close()", "def DefineMarkers(self):\n # Get the colours for the various markers\n style = self.GetItemByName('foldmargin_style')\n back = style.GetFore()\n rgb = eclib.HexToRGB(back[1:])\n back = wx.Colour(red=rgb[0], green=rgb[1], blue=rgb[2])\n\n fore = style.GetBack()\n rgb = eclib.HexToRGB(fore[1:])\n fore = wx.Colour(red=rgb[0], green=rgb[1], blue=rgb[2])\n\n # Buffer background highlight\n caret_line = self.GetItemByName('caret_line').GetBack()\n rgb = eclib.HexToRGB(caret_line[1:])\n clback = wx.Colour(*rgb)\n\n # Code Folding markers\n folder = ed_marker.FoldMarker()\n folder.Foreground = fore\n folder.Background = back\n folder.RegisterWithStc(self)\n\n # Bookmarks\n ed_marker.Bookmark().RegisterWithStc(self)\n\n # Breakpoints\n ed_marker.Breakpoint().RegisterWithStc(self)\n ed_marker.BreakpointDisabled().RegisterWithStc(self)\n step = ed_marker.BreakpointStep()\n step.Background = clback\n step.RegisterWithStc(self)\n ed_marker.StackMarker().RegisterWithStc(self)\n\n # Other markers\n errmk = ed_marker.ErrorMarker()\n errsty = self.GetItemByName('error_style')\n rgb = eclib.HexToRGB(errsty.GetBack()[1:])\n errmk.Background = wx.Colour(*rgb)\n rgb = eclib.HexToRGB(errsty.GetFore()[1:])\n errmk.Foreground = wx.Colour(*rgb)\n errmk.RegisterWithStc(self)\n # Lint Marker\n ed_marker.LintMarker().RegisterWithStc(self)\n ed_marker.LintMarkerWarning().RegisterWithStc(self)\n ed_marker.LintMarkerError().RegisterWithStc(self)", "def pytest_pilot_markers():", "def _build_marker_array(header, objects):\n marker_array = MarkerArray()\n\n clean_all_marker = Marker()\n clean_all_marker.header = header\n clean_all_marker.action = Marker.DELETEALL\n marker_array.markers.append(clean_all_marker)\n\n markers = [obj.linelist() for obj in objects]\n markers.extend([obj.min_text() for obj in objects])\n markers.extend([obj.max_text() for obj in objects])\n markers.extend([obj.name_id_text() for obj in objects])\n for idx, marker in enumerate(markers):\n marker.id = idx\n marker_array.markers.extend(markers)\n\n return marker_array", "def getDefaultColorMarkerComboList(self):\n combo_list = list()\n num_markers = len(MplLineMarkers)\n num_colors = len(MplBasicColors)\n\n for i in xrange(num_markers):\n marker = MplLineMarkers[i]\n for j in xrange(num_colors):\n color = MplBasicColors[j]\n combo_list.append((marker, color))\n # ENDFOR (j)\n # ENDFOR(i)\n\n return combo_list", "def set_neighbor_markers(self):\n marker_texture_path = self.PATHS[\"MINIMAP_BG_TEXTURE\"]\n marker_texture = self.loader.loadTexture(marker_texture_path)\n for location in self.locations:\n location_pos = location.get_position()\n for neighbor_id in location.get_neighbors():\n neighbor = next(self.find_location_by_id(neighbor_id))\n neighbor_pos = neighbor.get_position()\n neighbor_displaced = self.calculate_displacement(location_pos, neighbor_pos).tolist()\n neighbor_displaced_x, neighbor_displaced_y = neighbor_displaced\n reference_displaced = self.calculate_displacement(location_pos, self.reference_point).tolist()\n reference_displaced_x, reference_displaced_y = reference_displaced\n angle = self.calculate_angle(neighbor_displaced, reference_displaced)\n\n def reference_line(x_pos):\n slope = reference_displaced_y / reference_displaced_x\n return slope * x_pos\n\n if reference_line(neighbor_displaced_x) > neighbor_displaced_y:\n angle = 360-angle\n\n location.add_neighbor_marker(neighbor, angle, marker_texture)", "def marker(self):\r\n return _marker_of(self.api.paramstyle)" ]
[ "0.65396273", "0.6017809", "0.5980015", "0.5968961", "0.5907878", "0.5887821", "0.5838874", "0.5838874", "0.5829966", "0.58082724", "0.5748896", "0.5732162", "0.5673139", "0.5570676", "0.5556668", "0.5437964", "0.5424193", "0.5405786", "0.53380895", "0.53265303", "0.5313803", "0.5293885", "0.5277419", "0.52519315", "0.5245267", "0.5242303", "0.52372944", "0.52243435", "0.5213944", "0.5201935" ]
0.74688494
0
Basic attach/detach IPv6 test with single UE
def test_attach_detach_ipv6(self): num_ues = 2 detach_type = [ s1ap_types.ueDetachType_t.UE_NORMAL_DETACH.value, s1ap_types.ueDetachType_t.UE_SWITCHOFF_DETACH.value, ] wait_for_s1 = [True, False] self._s1ap_wrapper.configUEDevice(num_ues) # Default apn over-write magma_default_apn = { "apn_name": "magma.ipv4", # APN-name "qci": 9, # qci "priority": 15, # priority "pre_cap": 1, # preemption-capability "pre_vul": 0, # preemption-vulnerability "mbr_ul": 200000000, # MBR UL "mbr_dl": 100000000, # MBR DL "pdn_type": 2, # PDN Type 0-IPv4,1-IPv6,2-IPv4v6 } apn_list = [magma_default_apn] for i in range(num_ues): req = self._s1ap_wrapper.ue_req print( "************************* Running End to End attach for ", "UE id ", req.ue_id, ) self._s1ap_wrapper.configAPN( "IMSI" + "".join([str(j) for j in req.imsi]), apn_list, default=False, ) # Now actually complete the attach self._s1ap_wrapper._s1_util.attach( req.ue_id, s1ap_types.tfwCmd.UE_END_TO_END_ATTACH_REQUEST, s1ap_types.tfwCmd.UE_ATTACH_ACCEPT_IND, s1ap_types.ueAttachAccept_t, pdn_type=2, ) # Wait on EMM Information from MME self._s1ap_wrapper._s1_util.receive_emm_info() print( "************************* Running UE detach for UE id ", req.ue_id, ) # Now detach the UE self._s1ap_wrapper.s1_util.detach( req.ue_id, detach_type[i], wait_for_s1[i], )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_mgre6(self):\n\n self.pg0.config_ip6()\n self.pg0.resolve_ndp()\n\n e = VppEnum.vl_api_tunnel_encap_decap_flags_t\n\n for itf in self.pg_interfaces[3:]:\n #\n # one underlay nh for each overlay/tunnel peer\n #\n itf.config_ip6()\n itf.generate_remote_hosts(4)\n itf.configure_ipv6_neighbors()\n\n #\n # Create an L3 GRE tunnel.\n # - set it admin up\n # - assign an IP Addres\n # - Add a route via the tunnel\n #\n gre_if = VppGreInterface(\n self,\n itf.local_ip6,\n \"::\",\n mode=(VppEnum.vl_api_tunnel_mode_t.TUNNEL_API_MODE_MP),\n flags=e.TUNNEL_API_ENCAP_DECAP_FLAG_ENCAP_COPY_DSCP,\n )\n\n gre_if.add_vpp_config()\n gre_if.admin_up()\n gre_if.config_ip6()\n gre_if.generate_remote_hosts(4)\n\n #\n # for-each peer\n #\n for ii in range(1, 4):\n route_addr = \"4::%d\" % ii\n\n #\n # Add a TEIB entry resolves the peer\n #\n teib = VppTeib(\n self,\n gre_if,\n gre_if._remote_hosts[ii].ip6,\n itf._remote_hosts[ii].ip6,\n )\n teib.add_vpp_config()\n\n #\n # route traffic via the peer\n #\n route_via_tun = VppIpRoute(\n self,\n route_addr,\n 128,\n [VppRoutePath(gre_if._remote_hosts[ii].ip6, gre_if.sw_if_index)],\n )\n route_via_tun.add_vpp_config()\n\n #\n # Send a packet stream that is routed into the tunnel\n # - packets are GRE encapped\n #\n tx_e = self.create_stream_ip6(\n self.pg0, \"5::5\", route_addr, dscp=2, ecn=1\n )\n rx = self.send_and_expect(self.pg0, tx_e, itf)\n self.verify_tunneled_6o6(\n self.pg0, rx, tx_e, itf.local_ip6, itf._remote_hosts[ii].ip6, dscp=2\n )\n tx_i = self.create_tunnel_stream_6o6(\n self.pg0,\n itf._remote_hosts[ii].ip6,\n itf.local_ip6,\n self.pg0.local_ip6,\n self.pg0.remote_ip6,\n )\n rx = self.send_and_expect(self.pg0, tx_i, self.pg0)\n self.verify_decapped_6o6(self.pg0, rx, tx_i)\n\n #\n # delete and re-add the TEIB\n #\n teib.remove_vpp_config()\n self.send_and_assert_no_replies(self.pg0, tx_e)\n\n teib.add_vpp_config()\n rx = self.send_and_expect(self.pg0, tx_e, itf)\n self.verify_tunneled_6o6(\n self.pg0, rx, tx_e, itf.local_ip6, itf._remote_hosts[ii].ip6, dscp=2\n )\n rx = self.send_and_expect(self.pg0, tx_i, self.pg0)\n self.verify_decapped_6o6(self.pg0, rx, tx_i)\n\n gre_if.admin_down()\n gre_if.unconfig_ip4()\n itf.unconfig_ip6()\n self.pg0.unconfig_ip6()", "def test_lb_ip6_gre6(self):\n\n self.cli(0, \"lb vip 2001::/16 encap gre6\")\n for asid in self.ass:\n self.cli(0, \"lb as 2001::/16 2002::%u\" % (asid))\n\n self.pg_add_stream(0, self.generatePackets(0))\n self.pg_enable_capture([0,1])\n self.pg_start()\n\n # Scapy fails parsing IPv6 over GRE and IPv6 over GRE.\n # This check is therefore disabled for now.\n #self.checkCapture(0, 0)\n\n for asid in self.ass:\n self.cli(0, \"lb as 2001::/16 2002::%u del\" % (asid))\n self.cli(0, \"lb vip 2001::/16 encap gre6 del\")", "def test_lb_ip6_gre4(self):\n\n self.cli(0, \"lb vip 2001::/16 encap gre4\")\n for asid in self.ass:\n self.cli(0, \"lb as 2001::/16 10.0.0.%u\" % (asid))\n\n self.pg_add_stream(0, self.generatePackets(0))\n self.pg_enable_capture([0,1])\n self.pg_start()\n\n # Scapy fails parsing IPv6 over GRE.\n # This check is therefore disabled for now.\n #self.checkCapture(1, 0)\n\n for asid in self.ass:\n self.cli(0, \"lb as 2001::/16 10.0.0.%u del\" % (asid))\n self.cli(0, \"lb vip 2001::/16 encap gre4 del\")", "def test_lb_ip4_gre6(self):\n\n self.cli(0, \"lb vip 90.0.0.0/8 encap gre6\")\n for asid in self.ass:\n self.cli(0, \"lb as 90.0.0.0/8 2002::%u\" % (asid))\n\n self.pg_add_stream(0, self.generatePackets(1))\n self.pg_enable_capture([0,1])\n self.pg_start()\n\n # Scapy fails parsing GRE over IPv6.\n # This check is therefore disabled for now.\n # One can easily patch layers/inet6.py to fix the issue.\n #self.checkCapture(0, 1)\n\n for asid in self.ass:\n self.cli(0, \"lb as 90.0.0.0/8 2002::%u\" % (asid))\n self.cli(0, \"lb vip 90.0.0.0/8 encap gre6 del\")", "def test_01_verify_ipv6_network(self):\n\n self.createIpv6NetworkOffering()\n self.createIpv6NetworkOfferingForUpdate()\n self.createTinyServiceOffering()\n self.deployNetwork()\n self.deployNetworkVm()\n self.checkIpv6Network()\n self.checkIpv6NetworkVm()\n self.prepareRoutingTestResourcesInBackground()\n self.restartNetworkWithCleanup()\n self.checkIpv6Network()\n self.updateNetworkWithOffering()\n self.checkIpv6Network()\n self.checkIpv6NetworkRouting()\n self.checkIpv6FirewallRule()", "def test_gre6(self):\n\n self.pg1.config_ip6()\n self.pg1.resolve_ndp()\n\n #\n # Create an L3 GRE tunnel.\n # - set it admin up\n # - assign an IP Address\n # - Add a route via the tunnel\n #\n gre_if = VppGreInterface(self, self.pg2.local_ip6, \"1002::1\")\n gre_if.add_vpp_config()\n gre_if.admin_up()\n gre_if.config_ip6()\n\n route_via_tun = VppIpRoute(\n self, \"4004::1\", 128, [VppRoutePath(\"0::0\", gre_if.sw_if_index)]\n )\n\n route_via_tun.add_vpp_config()\n\n #\n # Send a packet stream that is routed into the tunnel\n # - they are all dropped since the tunnel's destintation IP\n # is unresolved - or resolves via the default route - which\n # which is a drop.\n #\n tx = self.create_stream_ip6(self.pg2, \"5005::1\", \"4004::1\")\n self.send_and_assert_no_replies(\n self.pg2, tx, \"GRE packets forwarded without DIP resolved\"\n )\n\n #\n # Add a route that resolves the tunnel's destination\n #\n route_tun_dst = VppIpRoute(\n self,\n \"1002::1\",\n 128,\n [VppRoutePath(self.pg2.remote_ip6, self.pg2.sw_if_index)],\n )\n route_tun_dst.add_vpp_config()\n\n #\n # Send a packet stream that is routed into the tunnel\n # - packets are GRE encapped\n #\n tx = self.create_stream_ip6(self.pg2, \"5005::1\", \"4004::1\")\n rx = self.send_and_expect(self.pg2, tx, self.pg2)\n self.verify_tunneled_6o6(self.pg2, rx, tx, self.pg2.local_ip6, \"1002::1\")\n\n #\n # Test decap. decapped packets go out pg1\n #\n tx = self.create_tunnel_stream_6o6(\n self.pg2, \"1002::1\", self.pg2.local_ip6, \"2001::1\", self.pg1.remote_ip6\n )\n rx = self.send_and_expect(self.pg2, tx, self.pg1)\n\n #\n # RX'd packet is UDP over IPv6, test the GRE header is gone.\n #\n self.assertFalse(rx[0].haslayer(GRE))\n self.assertEqual(rx[0][IPv6].dst, self.pg1.remote_ip6)\n\n #\n # Send v4 over v6\n #\n route4_via_tun = VppIpRoute(\n self, \"1.1.1.1\", 32, [VppRoutePath(\"0.0.0.0\", gre_if.sw_if_index)]\n )\n route4_via_tun.add_vpp_config()\n\n tx = self.create_stream_ip4(self.pg0, \"1.1.1.2\", \"1.1.1.1\")\n rx = self.send_and_expect(self.pg0, tx, self.pg2)\n\n self.verify_tunneled_4o6(self.pg0, rx, tx, self.pg2.local_ip6, \"1002::1\")\n\n #\n # test case cleanup\n #\n route_tun_dst.remove_vpp_config()\n route_via_tun.remove_vpp_config()\n route4_via_tun.remove_vpp_config()\n gre_if.remove_vpp_config()\n\n self.pg2.unconfig_ip6()\n self.pg1.unconfig_ip6()", "def test_attach_detach_flaky_retry_success(self):\n num_ues = 2\n detach_type = [\n s1ap_types.ueDetachType_t.UE_NORMAL_DETACH.value,\n s1ap_types.ueDetachType_t.UE_SWITCHOFF_DETACH.value,\n ]\n wait_for_s1 = [True, False]\n self._s1ap_wrapper.configUEDevice(num_ues)\n\n for i in range(num_ues):\n req = self._s1ap_wrapper.ue_req\n print(\n \"************************* Running End to End attach for \",\n \"UE Id\",\n req.ue_id,\n )\n # Now actually complete the attach\n attach = self._s1ap_wrapper._s1_util.attach(\n req.ue_id,\n s1ap_types.tfwCmd.UE_END_TO_END_ATTACH_REQUEST,\n s1ap_types.tfwCmd.UE_ATTACH_ACCEPT_IND,\n s1ap_types.ueAttachAccept_t,\n )\n addr = attach.esmInfo.pAddr.addrInfo\n default_ip = ipaddress.ip_address(bytes(addr[:4]))\n\n # Wait for EMM Information from MME\n self._s1ap_wrapper._s1_util.receive_emm_info()\n\n print(\"Waiting for 3 seconds for the flow rules creation\")\n time.sleep(3)\n # Verify if flow rules are created\n # 1 UL flow for default bearer\n num_ul_flows = 1\n dl_flow_rules = {default_ip: []}\n self._s1ap_wrapper.s1_util.verify_flow_rules(\n num_ul_flows,\n dl_flow_rules,\n )\n\n # Now detach the UE\n print(\n \"************************* Running UE detach for UE Id\",\n req.ue_id,\n )\n self._s1ap_wrapper.s1_util.detach(\n req.ue_id,\n detach_type[i],\n wait_for_s1[i],\n )\n\n print(\"Waiting for 5 seconds for the flow rules deletion\")\n time.sleep(5)\n # Verify that all UL/DL flows are deleted\n self._s1ap_wrapper.s1_util.verify_flow_rules_deletion()", "def test_fif6(self):\n # TODO this should be ideally in setUpClass, but then we hit a bug\n # with VppIpRoute incorrectly reporting it's present when it's not\n # so we need to manually remove the vpp config, thus we cannot have\n # it shared for multiple test cases\n self.tun_ip6 = \"1002::1\"\n\n self.gre6 = VppGreInterface(self, self.src_if.local_ip6, self.tun_ip6)\n self.gre6.add_vpp_config()\n self.gre6.admin_up()\n self.gre6.config_ip6()\n\n self.vapi.ip_reassembly_enable_disable(\n sw_if_index=self.gre6.sw_if_index, enable_ip6=True\n )\n\n self.route6 = VppIpRoute(\n self,\n self.tun_ip6,\n 128,\n [VppRoutePath(self.src_if.remote_ip6, self.src_if.sw_if_index)],\n )\n self.route6.add_vpp_config()\n\n self.reset_packet_infos()\n for i in range(test_packet_count):\n info = self.create_packet_info(self.src_if, self.dst_if)\n payload = self.info_to_payload(info)\n # Ethernet header here is only for size calculation, thus it\n # doesn't matter how it's initialized. This is to ensure that\n # reassembled packet is not > 9000 bytes, so that it's not dropped\n p = (\n Ether()\n / IPv6(src=self.src_if.remote_ip6, dst=self.dst_if.remote_ip6)\n / UDP(sport=1234, dport=5678)\n / Raw(payload)\n )\n size = self.packet_sizes[(i // 2) % len(self.packet_sizes)]\n self.extend_packet(p, size, self.padding)\n info.data = p[IPv6] # use only IPv6 part, without ethernet header\n\n fragments = [\n x\n for _, i in self._packet_infos.items()\n for x in fragment_rfc8200(i.data, i.index, 400)\n ]\n\n encapped_fragments = [\n Ether(dst=self.src_if.local_mac, src=self.src_if.remote_mac)\n / IPv6(src=self.tun_ip6, dst=self.src_if.local_ip6)\n / GRE()\n / p\n for p in fragments\n ]\n\n fragmented_encapped_fragments = [\n x\n for p in encapped_fragments\n for x in (\n fragment_rfc8200(\n p, 2 * len(self._packet_infos) + p[IPv6ExtHdrFragment].id, 200\n )\n if IPv6ExtHdrFragment in p\n else [p]\n )\n ]\n\n self.src_if.add_stream(fragmented_encapped_fragments)\n\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n\n self.src_if.assert_nothing_captured()\n packets = self.dst_if.get_capture(len(self._packet_infos))\n self.verify_capture(packets, IPv6)\n\n # TODO remove gre vpp config by hand until VppIpRoute gets fixed\n # so that it's query_vpp_config() works as it should\n self.gre6.remove_vpp_config()", "def run_example_udpv6():\n config = {\n 'server_address': '10.8.254.65',\n 'wan_port': Device(interface='nontrunk-1',\n mac='00:ff:1f:00:00:01',\n iptype=6, slaac=True),\n 'cpe_port': Device(interface='trunk-1-19',\n iptype=6, dhcp=True,\n nat=True),\n 'traffic_profile': UdpTrafficProfile(interframegap=1000000, # ns\n frame_size=508 # bytes\n ),\n 'number_of_downstream_flows': 2,\n 'number_of_upstream_flows': 1,\n 'traffic_duration': datetime.timedelta(seconds=3)\n }\n\n with Example(**config) as example:\n results = example.run()\n return results", "def test_03_verify_upgraded_ipv6_network(self):\n\n self.createIpv4NetworkOffering(False)\n self.createIpv6NetworkOfferingForUpdate(False)\n self.createTinyServiceOffering()\n self.prepareRoutingTestResourcesInBackground()\n self.deployNetwork()\n self.deployNetworkVm()\n self.updateNetworkWithOffering()\n self.checkIpv6Network()\n self.checkIpv6NetworkVm()\n self.restartNetworkWithCleanup()\n self.checkIpv6Network()\n self.checkIpv6NetworkRouting()\n self.checkIpv6FirewallRule()", "def OSSupportsIPv6(self) -> bool:", "def test_ipv4v6_secondary_pdn_with_ded_bearer_multi_ue(self):\n num_ues = 4\n ue_ids = []\n default_ip = []\n sec_ip_ipv4 = []\n sec_ip_ipv6 = []\n\n # APN of the secondary PDN\n ims_apn = {\n \"apn_name\": \"ims\", # APN-name\n \"qci\": 5, # qci\n \"priority\": 15, # priority\n \"pre_cap\": 0, # preemption-capability\n \"pre_vul\": 0, # preemption-vulnerability\n \"mbr_ul\": 200000000, # MBR UL\n \"mbr_dl\": 100000000, # MBR DL\n \"pdn_type\": 2, # PDN Type 0-IPv4,1-IPv6,2-IPv4v6\n }\n # UL Flow description #1\n ulFlow1 = {\n \"ipv4_dst\": \"192.168.129.42\", # IPv4 destination address\n \"tcp_dst_port\": 5002, # TCP dest port\n \"ip_proto\": FlowMatch.IPPROTO_TCP, # Protocol Type\n \"direction\": FlowMatch.UPLINK, # Direction\n }\n\n # UL Flow description #2\n ulFlow2 = {\n \"ipv4_dst\": \"192.168.129.42\", # IPv4 destination address\n \"tcp_dst_port\": 5001, # TCP dest port\n \"ip_proto\": FlowMatch.IPPROTO_TCP, # Protocol Type\n \"direction\": FlowMatch.UPLINK, # Direction\n }\n\n # UL Flow description #3\n ulFlow3 = {\n \"ipv6_dst\": \"5e90:db7b:b18e::1556\", # IPv6 destination address\n \"tcp_dst_port\": 5003, # TCP dest port\n \"ip_proto\": FlowMatch.IPPROTO_TCP, # Protocol Type\n \"direction\": FlowMatch.UPLINK, # Direction\n }\n\n # DL Flow description #1\n dlFlow1 = {\n \"ipv4_src\": \"192.168.129.42\", # IPv4 source address\n \"tcp_src_port\": 5001, # TCP source port\n \"ip_proto\": FlowMatch.IPPROTO_TCP, # Protocol Type\n \"direction\": FlowMatch.DOWNLINK, # Direction\n }\n\n # DL Flow description #2\n dlFlow2 = {\n \"ipv4_src\": \"192.168.129.64\", # IPv4 source address\n \"tcp_src_port\": 5002, # TCP source port\n \"ip_proto\": FlowMatch.IPPROTO_TCP, # Protocol Type\n \"direction\": FlowMatch.DOWNLINK, # Direction\n }\n\n # DL Flow description #3\n dlFlow3 = {\n \"ipv6_src\": \"6e31:1a95:1e7c::df1\", # IPv6 source address\n \"tcp_src_port\": 5003, # TCP source port\n \"ip_proto\": FlowMatch.IPPROTO_TCP, # Protocol Type\n \"direction\": FlowMatch.DOWNLINK, # Direction\n }\n\n # Flow lists to be configured\n flow_list = [\n ulFlow1,\n ulFlow2,\n ulFlow3,\n dlFlow1,\n dlFlow2,\n dlFlow3,\n ]\n\n # QoS\n qos = {\n \"qci\": 1, # qci value [1 to 9]\n \"priority\": 1, # Range [0-255]\n \"max_req_bw_ul\": 10000000, # MAX bw Uplink\n \"max_req_bw_dl\": 15000000, # MAX bw Downlink\n \"gbr_ul\": 1000000, # GBR Uplink\n \"gbr_dl\": 2000000, # GBR Downlink\n \"arp_prio\": 1, # ARP priority\n \"pre_cap\": 1, # pre-emption capability\n \"pre_vul\": 1, # pre-emption vulnerability\n }\n\n policy_id = \"ims\"\n\n self._s1ap_wrapper.configUEDevice(num_ues)\n for i in range(num_ues):\n req = self._s1ap_wrapper.ue_req\n ue_id = req.ue_id\n\n apn_list = [ims_apn]\n self._s1ap_wrapper.configAPN(\n \"IMSI\" + \"\".join([str(i) for i in req.imsi]), apn_list,\n )\n print(\n \"*********************** Running End to End attach for UE id \",\n ue_id,\n )\n\n print(\"***** Sleeping for 5 seconds\")\n time.sleep(5)\n # Attach\n attach = self._s1ap_wrapper.s1_util.attach(\n ue_id,\n s1ap_types.tfwCmd.UE_END_TO_END_ATTACH_REQUEST,\n s1ap_types.tfwCmd.UE_ATTACH_ACCEPT_IND,\n s1ap_types.ueAttachAccept_t,\n )\n addr = attach.esmInfo.pAddr.addrInfo\n default_ip.append(ipaddress.ip_address(bytes(addr[:4])))\n ue_ids.append(ue_id)\n\n # Wait on EMM Information from MME\n self._s1ap_wrapper._s1_util.receive_emm_info()\n\n print(\"***** Sleeping for 5 seconds\")\n time.sleep(5)\n self._s1ap_wrapper._ue_idx = 0\n for i in range(num_ues):\n req = self._s1ap_wrapper.ue_req\n ue_id = req.ue_id\n\n apn = \"ims\"\n # PDN Type 2 = IPv6, 3 = IPv4v6\n pdn_type = 3\n # Send PDN Connectivity Request\n self._s1ap_wrapper.sendPdnConnectivityReq(\n ue_id, apn, pdn_type=pdn_type,\n )\n # Receive PDN CONN RSP/Activate default EPS bearer context request\n response = self._s1ap_wrapper.s1_util.get_response()\n assert response.msg_type == s1ap_types.tfwCmd.UE_PDN_CONN_RSP_IND.value\n act_def_bearer_req = response.cast(s1ap_types.uePdnConRsp_t)\n\n addr = act_def_bearer_req.m.pdnInfo.pAddr.addrInfo\n sec_ip_ipv4.append(ipaddress.ip_address(bytes(addr[8:12])))\n\n print(\n \"********************** Sending Activate default EPS bearer \"\n \"context accept for APN-%s, UE id-%d\" % (apn, ue_id),\n )\n print(\n \"********************** Added default bearer for apn-%s,\"\n \" bearer id-%d, pdn type-%d\"\n % (\n apn,\n act_def_bearer_req.m.pdnInfo.epsBearerId,\n pdn_type,\n ),\n )\n\n # Receive Router Advertisement message\n response = self._s1ap_wrapper.s1_util.get_response()\n assert response.msg_type == s1ap_types.tfwCmd.UE_ROUTER_ADV_IND.value\n routerAdv = response.cast(s1ap_types.ueRouterAdv_t)\n print(\n \"******************* Received Router Advertisement for APN-%s\"\n \" ,bearer id-%d\" % (apn, routerAdv.bearerId),\n )\n\n ipv6_addr = \"\".join([chr(i) for i in routerAdv.ipv6Addr]).rstrip(\n \"\\x00\",\n )\n print(\"******* UE IPv6 address: \", ipv6_addr)\n sec_ip_ipv6.append(ipaddress.ip_address(ipv6_addr))\n\n print(\"***** Sleeping for 5 seconds\")\n time.sleep(5)\n\n # Add dedicated bearer\n print(\"********************** Adding dedicated bearer to ims PDN\")\n print(\n \"********************** Sending RAR for IMSI\",\n \"\".join([str(i) for i in req.imsi]),\n )\n self._sessionManager_util.send_ReAuthRequest(\n \"IMSI\" + \"\".join([str(i) for i in req.imsi]),\n policy_id,\n flow_list,\n qos,\n )\n response = self._s1ap_wrapper.s1_util.get_response()\n assert response.msg_type == s1ap_types.tfwCmd.UE_ACT_DED_BER_REQ.value\n act_ded_ber_req_ims_apn = response.cast(\n s1ap_types.UeActDedBearCtxtReq_t,\n )\n self._s1ap_wrapper.sendActDedicatedBearerAccept(\n req.ue_id, act_ded_ber_req_ims_apn.bearerId,\n )\n print(\n \"************* Added dedicated bearer\",\n act_ded_ber_req_ims_apn.bearerId,\n )\n\n print(\"***** Sleeping for 10 seconds\")\n time.sleep(10)\n # ipv4 default pdn + ipv4v6(ims) pdn +\n # dedicated bearer for ims pdn for 4 UEs\n num_ul_flows = 12\n for i in range(num_ues):\n dl_flow_rules = {\n default_ip[i]: [],\n sec_ip_ipv4[i]: [flow_list],\n sec_ip_ipv6[i]: [flow_list],\n }\n # Verify if flow rules are created\n self._s1ap_wrapper.s1_util.verify_flow_rules(\n num_ul_flows, dl_flow_rules,\n )\n\n print(\"***** Sleeping for 5 seconds\")\n time.sleep(5)\n for ue in ue_ids:\n print(\n \"******************* Running UE detach (switch-off) for \",\n \"UE id \",\n ue,\n )\n # Now detach the UE\n self._s1ap_wrapper.s1_util.detach(\n ue, s1ap_types.ueDetachType_t.UE_SWITCHOFF_DETACH.value, False,\n )", "def test_add_autoassigned_ipv6(self):\n with DockerHost('host', dind=False) as host:\n # Test that auto-assiging IPv4 addresses gives what we expect\n workloads = self._setup_env(host, count=2, ip=\"ipv6\")\n\n workloads[0].assert_can_ping(\"fd80:24e2:f998:72d6::1\", retries=3)\n workloads[1].assert_can_ping(\"fd80:24e2:f998:72d6::\", retries=3)\n\n host.calicoctl(\"container remove {0}\".format(\"workload0\"))\n host.calicoctl(\"container remove {0}\".format(\"workload1\"))\n\n host.remove_workloads()\n\n # Test that recreating returns the next two IPs (IPs are not\n # reassigned automatically unless we have run out of IPs).\n workloads = self._setup_env(host, count=2, ip=\"ipv6\")\n\n workloads[0].assert_can_ping(\"fd80:24e2:f998:72d6::3\", retries=3)\n workloads[1].assert_can_ping(\"fd80:24e2:f998:72d6::2\", retries=3)", "def test_mgre(self):\n\n for itf in self.pg_interfaces[3:]:\n #\n # one underlay nh for each overlay/tunnel peer\n #\n itf.generate_remote_hosts(4)\n itf.configure_ipv4_neighbors()\n\n #\n # Create an L3 GRE tunnel.\n # - set it admin up\n # - assign an IP Addres\n # - Add a route via the tunnel\n #\n gre_if = VppGreInterface(\n self,\n itf.local_ip4,\n \"0.0.0.0\",\n mode=(VppEnum.vl_api_tunnel_mode_t.TUNNEL_API_MODE_MP),\n )\n gre_if.add_vpp_config()\n gre_if.admin_up()\n gre_if.config_ip4()\n gre_if.generate_remote_hosts(4)\n\n self.logger.info(self.vapi.cli(\"sh adj\"))\n self.logger.info(self.vapi.cli(\"sh ip fib\"))\n\n #\n # ensure we don't match to the tunnel if the source address\n # is all zeros\n #\n tx = self.create_tunnel_stream_4o4(\n self.pg0,\n \"0.0.0.0\",\n itf.local_ip4,\n self.pg0.local_ip4,\n self.pg0.remote_ip4,\n )\n self.send_and_assert_no_replies(self.pg0, tx)\n\n #\n # for-each peer\n #\n for ii in range(1, 4):\n route_addr = \"4.4.4.%d\" % ii\n tx_e = self.create_stream_ip4(self.pg0, \"5.5.5.5\", route_addr)\n\n #\n # route traffic via the peer\n #\n route_via_tun = VppIpRoute(\n self,\n route_addr,\n 32,\n [VppRoutePath(gre_if._remote_hosts[ii].ip4, gre_if.sw_if_index)],\n )\n route_via_tun.add_vpp_config()\n\n # all packets dropped at this point\n rx = self.send_and_assert_no_replies(self.pg0, tx_e)\n\n gre_if.admin_down()\n gre_if.admin_up()\n rx = self.send_and_assert_no_replies(self.pg0, tx_e)\n\n #\n # Add a TEIB entry resolves the peer\n #\n teib = VppTeib(\n self,\n gre_if,\n gre_if._remote_hosts[ii].ip4,\n itf._remote_hosts[ii].ip4,\n )\n teib.add_vpp_config()\n\n #\n # Send a packet stream that is routed into the tunnel\n # - packets are GRE encapped\n #\n rx = self.send_and_expect(self.pg0, tx_e, itf)\n self.verify_tunneled_4o4(\n self.pg0, rx, tx_e, itf.local_ip4, itf._remote_hosts[ii].ip4\n )\n\n tx_i = self.create_tunnel_stream_4o4(\n self.pg0,\n itf._remote_hosts[ii].ip4,\n itf.local_ip4,\n self.pg0.local_ip4,\n self.pg0.remote_ip4,\n )\n rx = self.send_and_expect(self.pg0, tx_i, self.pg0)\n self.verify_decapped_4o4(self.pg0, rx, tx_i)\n\n #\n # delete and re-add the TEIB\n #\n teib.remove_vpp_config()\n self.send_and_assert_no_replies(self.pg0, tx_e)\n self.send_and_assert_no_replies(self.pg0, tx_i)\n\n teib.add_vpp_config()\n rx = self.send_and_expect(self.pg0, tx_e, itf)\n self.verify_tunneled_4o4(\n self.pg0, rx, tx_e, itf.local_ip4, itf._remote_hosts[ii].ip4\n )\n rx = self.send_and_expect(self.pg0, tx_i, self.pg0)\n self.verify_decapped_4o4(self.pg0, rx, tx_i)\n\n #\n # bounce the interface state and try packets again\n #\n gre_if.admin_down()\n gre_if.admin_up()\n rx = self.send_and_expect(self.pg0, tx_e, itf)\n self.verify_tunneled_4o4(\n self.pg0, rx, tx_e, itf.local_ip4, itf._remote_hosts[ii].ip4\n )\n rx = self.send_and_expect(self.pg0, tx_i, self.pg0)\n self.verify_decapped_4o4(self.pg0, rx, tx_i)\n\n gre_if.admin_down()\n gre_if.unconfig_ip4()", "def SupportsIPv6(self) -> bool:", "def ipv6_cmd(args):\n r = requete(\"Devices:get\")\n for i in r['status']:\n a = \"-\"\n if 'IPv6Address' in i:\n for j in i['IPv6Address']:\n if j['Scope'] != 'link':\n a = j['Address']\n b = \"-\"\n if 'IPAddress' in i: b = i['IPAddress']\n if a == \"-\": continue\n print(\"%4s %-32s %-5s %-16s %s\" % (i['Index'], i['Name'], i['Active'], b, a))", "def test_ipv6_in_net(self):\n test_ip = ip_address.IPAddress(\"2001:0db8:85a3:08d3:1319:8a2e:0370:7344/24\")\n assert test_ip.in_network(\"2001:0d00::/24\")\n assert test_ip.in_network(\"2001:0d00::/29\")", "def test_lb_ip4_gre4(self):\n\n return\n self.cli(0, \"lb vip 90.0.0.0/8 encap gre4\")\n for asid in self.ass:\n self.cli(0, \"lb as 90.0.0.0/8 10.0.0.%u\" % (asid))\n\n self.pg_add_stream(0, self.generatePackets(1))\n self.pg_enable_capture([0,1])\n self.pg_start()\n self.checkCapture(1, 1)\n\n for asid in self.ass:\n self.cli(0, \"lb as 90.0.0.0/8 10.0.0.%u del\" % (asid))\n self.cli(0, \"lb vip 90.0.0.0/8 encap gre4 del\")", "def CASE6( self, main ):\n\n from tests.USECASE.SegmentRouting.SRRouting.dependencies.SRRoutingTest import SRRoutingTest\n\n SRRoutingTest.runTest( main,\n test_idx=6,\n onosNodes=3,\n dhcp=1,\n routers=1,\n ipv4=1,\n ipv6=1,\n description=\"Ping between all ipv4 and ipv6 hosts in the topology and check connectivity to external hosts\",\n checkExternalHost=True,\n linkFailure=False,\n countFlowsGroups=False )", "def test_duplicate_attach(self):\n # Ground work.\n self._s1ap_wrapper.configUEDevice(1)\n req = self._s1ap_wrapper.ue_req\n print(\"************************* Running duplicate attach test\")\n self._s1ap_wrapper._s1_util.attach(\n req.ue_id,\n s1ap_types.tfwCmd.UE_ATTACH_REQUEST,\n s1ap_types.tfwCmd.UE_AUTH_REQ_IND,\n s1ap_types.ueAttachComplete_t,\n )\n auth_res = s1ap_types.ueAuthResp_t()\n auth_res.ue_Id = req.ue_id\n sqn_recvd = s1ap_types.ueSqnRcvd_t()\n sqn_recvd.pres = False\n auth_res.sqnRcvd = sqn_recvd\n\n self._s1ap_wrapper._s1_util.issue_cmd(\n s1ap_types.tfwCmd.UE_AUTH_RESP,\n auth_res,\n )\n response = self._s1ap_wrapper.s1_util.get_response()\n assert response.msg_type == s1ap_types.tfwCmd.UE_SEC_MOD_CMD_IND.value\n\n sec_mode_complete = s1ap_types.ueSecModeComplete_t()\n sec_mode_complete.ue_Id = req.ue_id\n self._s1ap_wrapper._s1_util.issue_cmd(\n s1ap_types.tfwCmd.UE_SEC_MOD_COMPLETE,\n sec_mode_complete,\n )\n\n # Receive initial context setup and attach accept indication\n response = (\n self._s1ap_wrapper._s1_util\n .receive_initial_ctxt_setup_and_attach_accept()\n )\n attach_acc = response.cast(s1ap_types.ueAttachAccept_t)\n print(\n \"************************ Dropping received attach accept for UE Id:\",\n attach_acc.ue_Id,\n )\n\n while True:\n response = self._s1ap_wrapper.s1_util.get_response()\n if (\n response.msg_type\n == s1ap_types.tfwCmd.UE_ATTACH_ACCEPT_IND.value\n ):\n attach_acc = response.cast(s1ap_types.ueAttachAccept_t)\n print(\n \"************************ Dropping received attach accept for UE Id:\",\n attach_acc.ue_Id,\n )\n continue\n\n assert response.msg_type == s1ap_types.tfwCmd.UE_CTX_REL_IND.value\n print(\n \"************************ Received UE context release indication\",\n )\n break\n\n print(\"************************ Running RE-Attach\")\n self._s1ap_wrapper._s1_util.attach(\n req.ue_id,\n s1ap_types.tfwCmd.UE_END_TO_END_ATTACH_REQUEST,\n s1ap_types.tfwCmd.UE_ATTACH_ACCEPT_IND,\n s1ap_types.ueAttachAccept_t,\n )\n\n # Wait on EMM Information from MME\n self._s1ap_wrapper._s1_util.receive_emm_info()\n print(\"************************* Running UE detach\")\n # Now detach the UE\n self._s1ap_wrapper._s1_util.detach(\n req.ue_id,\n s1ap_types.ueDetachType_t.UE_NORMAL_DETACH.value,\n )", "async def test_bridge_zeroconf_ipv6(hass: HomeAssistant) -> None:\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_ZEROCONF},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"fd00::eeb5:faff:fe84:b17d\",\n addresses=[\"fd00::eeb5:faff:fe84:b17d\"],\n port=443,\n hostname=\"Philips-hue.local\",\n type=\"_hue._tcp.local.\",\n name=\"Philips Hue - ABCABC._hue._tcp.local.\",\n properties={\n \"_raw\": {\"bridgeid\": b\"ecb5faabcabc\", \"modelid\": b\"BSB002\"},\n \"bridgeid\": \"ecb5faabcabc\",\n \"modelid\": \"BSB002\",\n },\n ),\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"invalid_host\"", "def test_ipv6_validation_success():\n assert is_ipv6('2001:db8::ff00:42:8329')", "def test_02_verify_ipv6_network_redundant(self):\n\n self.createIpv6NetworkOffering(True)\n self.createIpv6NetworkOfferingForUpdate(True)\n self.createTinyServiceOffering()\n self.deployNetwork()\n self.deployNetworkVm()\n self.checkIpv6Network()\n self.checkIpv6NetworkVm()\n self.prepareRoutingTestResourcesInBackground()\n self.restartNetworkWithCleanup()\n self.checkIpv6Network()\n self.updateNetworkWithOffering()\n self.checkIpv6Network()\n self.checkIpv6NetworkRouting()\n self.checkIpv6FirewallRule()\n self.checkNetworkVRRedundancy()", "def test_add_autoassigned_pool_ipv6(self):\n with DockerHost('host', dind=False) as host:\n # Test that auto-assiging IPv6 addresses gives what we expect\n workloads = self._setup_env(host, count=2,\n ip=self.DEFAULT_IPV6_POOL)\n\n workloads[0].assert_can_ping(\"fd80:24e2:f998:72d6::1\", retries=3)\n workloads[1].assert_can_ping(\"fd80:24e2:f998:72d6::\", retries=3)", "def conflictBetweenIPv4AndIPv6():\n s4 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s6 = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)\n try:\n s4.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n s6.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n s4.bind((\"\", 0))\n s4.listen(1)\n usedport = s4.getsockname()[1]\n try:\n s6.bind((\"::\", usedport))\n except socket.error:\n return True\n else:\n return False\n finally:\n s4.close()\n s6.close()", "def test_recv_ipv6(self):\n self.sendto(self.teststr, ('::1', 1666, 0, 0), socket.AF_INET6)\n args, kwargs = self.mock.call_args\n (addr, _, _, _), message = args\n assert addr, message == ('::1', self.teststr)", "def add_ipv6(self, id_network_ipv6, id_equip, description):\n\n ip_map = dict()\n ip_map['id_network_ipv6'] = id_network_ipv6\n ip_map['description'] = description\n ip_map['id_equip'] = id_equip\n\n code, xml = self.submit({'ip': ip_map}, 'POST', 'ipv6/')\n\n return self.response(code, xml)", "def test_ipv4_in_net_internal_v6(self):\n test_ip = ip_address.IPAddress(\"192.168.178.4\")\n assert test_ip.in_network(\"192.168.178.0/24\")\n assert test_ip.in_network(\"192.168.178.0/29\")\n \n test_ip = ip_address.IPAddress(\"192.168.178.4/2\")\n assert test_ip.in_network(\"192.0.0.0/2\")\n\n test_ip = ip_address.IPAddress(\"192.168.178.4\")\n assert test_ip.in_network(\"10.0.11.0/4\") == False\n assert test_ip.in_network(\"192.169.178.0/24\") == False\n \n \n test_ip = ip_address.IPAddress(\"192.168.67.3\")\n assert test_ip.in_network(\"192.168.0.0/16\")", "async def test_ipv6_configuration(\n ip6config_service: IP6ConfigService, dbus_session_bus: MessageBus\n):\n ip6 = IpConfiguration(\"/org/freedesktop/NetworkManager/IP6Config/1\", ip4=False)\n\n assert ip6.gateway is None\n assert ip6.nameservers is None\n\n await ip6.connect(dbus_session_bus)\n\n assert ip6.gateway == IPv6Address(\"fe80::da58:d7ff:fe00:9c69\")\n assert ip6.nameservers == [\n IPv6Address(\"2001:1620:2777:1::10\"),\n IPv6Address(\"2001:1620:2777:2::20\"),\n ]\n\n ip6config_service.emit_properties_changed({\"Gateway\": \"2001:1620:2777:1::10\"})\n await ip6config_service.ping()\n assert ip6.gateway == IPv6Address(\"2001:1620:2777:1::10\")\n\n ip6config_service.emit_properties_changed({}, [\"Gateway\"])\n await ip6config_service.ping()\n await ip6config_service.ping()\n assert ip6.gateway == IPv6Address(\"fe80::da58:d7ff:fe00:9c69\")", "def setUp(self):\n super().setUp()\n for intf in self.send_ifs:\n self.vapi.ip_reassembly_enable_disable(\n sw_if_index=intf.sw_if_index, enable_ip6=True\n )\n self.vapi.ip_reassembly_set(\n timeout_ms=0,\n max_reassemblies=1000,\n max_reassembly_length=1000,\n expire_walk_interval_ms=10,\n is_ip6=1,\n )\n self.virtual_sleep(0.25)\n self.vapi.ip_reassembly_set(\n timeout_ms=1000000,\n max_reassemblies=1000,\n max_reassembly_length=1000,\n expire_walk_interval_ms=1000,\n is_ip6=1,\n )" ]
[ "0.6983675", "0.6860099", "0.6674686", "0.6540398", "0.6405422", "0.63681173", "0.6198082", "0.61734575", "0.6095474", "0.6050625", "0.602617", "0.6024223", "0.59982115", "0.59627396", "0.59101856", "0.58795923", "0.5868089", "0.58631897", "0.5853766", "0.5821142", "0.57543546", "0.5753978", "0.5716748", "0.5708348", "0.56096166", "0.55903065", "0.55760086", "0.55744416", "0.55420554", "0.5537035" ]
0.83120114
0
Create a Pandas DataFrame out of a .yaml file. Examples
def yaml_to_pandas(filename: str) -> Tuple[pd.DataFrame, Optional[str]]: # Read the yaml file with open(filename, 'r') as f: dict_ = yaml.load(f, Loader=yaml.SafeLoader) project = dict_.pop("__project__", None) # Convert the yaml dictionary into a dataframe data: Dict[str, Dict[Tuple[Hashable, Hashable], Any]] = {} for k1, v1 in dict_.items(): for k2, v2 in v1['users'].items(): data[k2] = {('info', k): v for k, v in v1.items() if k != 'users'} data[k2][NAME] = v2 data[k2][PROJECT] = k1 df = pd.DataFrame(data).T # Fortmat, sort and return the dataframe df.index.name = 'username' df[SBU_REQUESTED] = df[SBU_REQUESTED].astype(float) df[TMP] = df.index df.sort_values(by=[PROJECT, TMP], inplace=True) df.sort_index(axis=1, inplace=True, ascending=False) del df[TMP] df[ACTIVE] = False validate_usernames(df) return df, project
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_to_dataframe(\n filename: Union[Path, str],\n marker: str = \"---\",\n csv_options: Optional[Dict[str, Any]] = None,\n yaml_options: Optional[Dict[str, Any]] = None,\n) -> Tuple[DataFrame, Dict[str, Any]]:\n if DataFrame is None:\n raise ModuleNotFoundError(\n \"Module pandas is not present. Install it to read data into DataFrame.\"\n )\n import pandas as pd\n\n yaml_options = yaml_options if yaml_options is not None else {}\n header, nlines, comment = read_header(filename, marker=marker, **yaml_options)\n\n options = csv_options.copy() if csv_options is not None else {}\n options[\"skiprows\"] = nlines\n options[\"comment\"] = comment[0] if len(comment) >= 1 else None\n return pd.read_csv(filename, **options), header", "def load_file(filename):\n with open(filename, 'rt') as f:\n d = json.load(f)\n return pd.DataFrame.from_records(d['dataset'])", "def create_data_frame(input_filepath):\n df = pd.read_json(input_filepath)\n logger = logging.getLogger(__name__)\n logger.info('Imported dataframe:')\n logger.info(df.info())\n logger.info(df.describe())\n logger.info(df.head())\n return df", "def load_dataset(path: str) -> pd.DataFrame:\n with open(path) as f:\n data = []\n problems = 0\n for line_num, line in enumerate(f):\n try:\n # replace ' around keys/values with \"\n cleaned_line = re.sub(r\"(?<={|\\s)'|'(?=,|:|})\", '\"', line)\n # replace all \" in text with \\\"\n cleaned_line = re.sub(\n r\"(?<!{)(?<!,\\s|:\\s)\\\"(?!,|:|})\", '\\\\\"', cleaned_line\n )\n # replace all \\' with '\n cleaned_line = cleaned_line.replace(\"\\\\'\", \"'\")\n\n # removes rows where comments were incorrectly parsed as a key\n data_dict = json.loads(cleaned_line)\n for k in data_dict.keys():\n assert len(k) < 20\n\n data.append(data_dict)\n except Exception:\n problems += 1\n assess_problematic_entries(n_problems=problems, data=data)\n return pd.DataFrame(data)", "def read_source_sink_yaml(yaml_fn):\n with open(yaml_fn, 'r') as file:\n data = yaml.safe_load(file)\n if 'sources' in data.keys():\n df_sources = pd.DataFrame.from_dict(data['sources'],orient='index',\n columns=['x','y'])\n else:\n df_sources = pd.DataFrame() \n if 'sinks' in data.keys():\n df_sinks = pd.DataFrame.from_dict(data['sinks'],orient='index',\n columns=['x','y'])\n else:\n df_sinks = pd.DataFrame() \n return df_sources, df_sinks", "def load_pandas():\n data = _get_data()\n return du.process_pandas(data, endog_idx=5, exog_idx=[10, 2, 6, 7, 8])", "def load_data(file):\n if file == \"test\":\n file_path = '../data/day-4-test.txt'\n elif file == \"full\":\n file_path = '../data/day-4.txt'\n else:\n raise Exception('load_data() must take the input argument \"test\" or \"full\"')\n\n # read file\n with open(file_path) as f:\n lines = f.read().split(\"\\n\\n\")\n\n # turn into a dictionary, then a data frame\n f = lambda x: pd.DataFrame(list_to_dict(x.split()), index = [0])\n x = [f(x) for x in lines]\n return pd.concat(x, ignore_index=True, sort=True)", "def _load_data_yaml(self, pathname): \n pathname = self._yaml_extension(pathname)\n\n with open(pathname) as file:\n traj_data = yaml.load(file, Loader=yaml.FullLoader)\n \n return traj_data", "def from_yaml(cls, path, sample_ids=None, only_landmark=True, **filter_kwargs):\n data_df, sample_meta_df, gene_meta_df = yaml_to_dataframes(\n path, sample_ids, only_landmark, **filter_kwargs\n )\n data = data_df.join(sample_meta_df)\n return cls(data, gene_meta_df, len(gene_meta_df))", "def get_file_df(filepath):\n dd = [json.loads(f) for f in open(filepath).readlines()]\n return pd.DataFrame(dd)", "def load():\n return load_pandas()", "def load_pandas():\n data = _get_data()\n return du.process_pandas(data, endog_idx=0)", "def load_pandas():\n data = _get_data()\n return du.process_pandas(data, endog_idx=0)", "def parser(self) -> 'Builder':\n\n # loop over datasets\n df_parts = []\n entries = defaultdict(dict)\n # for dataset in input_yaml.keys():\n for dataset in self.input_yaml['catalog']:\n # get a list of keys that are common to all files in the dataset\n entries['global'] = {}\n for g in dataset.keys():\n if 'data_sources' not in g and 'ensemble' not in g:\n entries['global'][g] = dataset[g]\n # loop over ensemble members, if they exist\n if 'ensemble' in dataset.keys():\n for member in dataset['ensemble']:\n glob_string = member.pop('glob_string')\n filelist = glob.glob(glob_string)\n for f in filelist:\n entries[f].update(member)\n # loop over all of the data_sources for the dataset, create a dataframe\n # for each data_source, append that dataframe to a list that will contain\n # the full dataframe (or catalog) based on everything in the yaml file.\n for stream_info in dataset['data_sources']:\n filelist = glob.glob(stream_info['glob_string'])\n stream_info.pop('glob_string')\n for f in filelist:\n entries[f].update(stream_info)\n\n partial_parser_netcdf = functools.partial(self._parser_netcdf, local_attrs=entries)\n self.builder = Builder(None, parser=partial_parser_netcdf, lazy=False)\n self.builder.filelist = [x for x in entries.keys() if x != 'global']\n df_parts.append(\n self.builder.build('path', 'variable')\n .df.set_index('path')\n .apply(lambda x: x.apply(pd.Series).stack())\n .reset_index()\n .drop('level_1', 1)\n )\n # create the combined dataframe from all of the data_sources and datasets from\n # the yaml file\n df = pd.concat(df_parts, sort=False)\n\n self.builder.df = df.sort_values(by=['path'])\n return self.builder", "def load_data(filepath):\n \n return pd.read_json(filepath)", "def load(file):\n return pq.read_table(file).to_pandas()", "def load_pickle(args):\n with open(args.pickle_name, 'rb') as fh:\n datum = pickle.load(fh)\n\n df = pd.DataFrame.from_dict(datum['labels'])\n\n return df", "def read(filename, replace_columns=True):\n f = open(filename)\n lines = f.readlines()\n f.close()\n\n # Extract column names from the odt file.\n for i, line in enumerate(lines):\n if line.startswith('# Columns:'):\n columns = []\n odt_section = i # Should be removed after runs are split.\n for part in re.split('Oxs_|Anv_|Southampton_', line)[1:]:\n for char in [\"{\", \"}\", \" \", \"\\n\"]:\n part = part.replace(char, '')\n if replace_columns:\n if part in columns_dic.keys():\n columns.append(columns_dic[part])\n else:\n msg = \"Entry {} not in lookup table.\".format(part)\n raise ValueError(msg)\n else:\n columns.append(part)\n\n # Extract units from the odt file.\n for i, line in enumerate(lines):\n if line.startswith('# Units:'):\n units = line.split()[2:]\n\n # Extract the data from the odt file.\n data = []\n for i, line in enumerate(lines[odt_section:]):\n if not line.startswith(\"#\"):\n data.append([float(number) for number in line.split()])\n\n df = pd.DataFrame(data, columns=columns)\n # next line is required to allow adding list-like attribute to pandas DataFrame\n # see https://github.com/pandas-dev/pandas/blob/2f9d4fbc7f289a48ed8b29f573675cd2e21b2c89/pandas/core/generic.py#L3631\n df._metadata.append('units')\n df.units = dict(zip(columns, units))\n return df", "def h5ToDf(filename):\n log.info(f\"Import data from: {filename}\")\n with h5py.File(filename, \"r\") as hf :\n d = {}\n for name in list(hf.keys()):\n d[name] = np.array(hf[name][:])\n df = pd.DataFrame(data=d)\n return(df)", "def h5ToDf(filename):\n log.info(f\"Import data from: {filename}\")\n with h5py.File(filename, \"r\") as hf :\n d = {}\n for name in list(hf.keys()):\n d[name] = np.array(hf[name][:])\n df = pd.DataFrame(data=d)\n return(df)", "def to_pandas(data: str) -> pandas.DataFrame:\n src = json.loads(data)\n if isinstance(src, list): # list of row dicts\n return pandas.DataFrame.from_records(src)\n if 'instances' in src: # TF serving's \"instances\" format\n return pandas.DataFrame.from_records(src['instances'])\n if 'inputs' in src: # TF serving's \"inputs\" format\n return pandas.DataFrame.from_dict(src['inputs'], orient='columns')\n # fallback to columns\n return pandas.DataFrame.from_dict(src, orient='columns')", "def yaml2csv(source_yaml,source_csv):\n with open(source_yaml, 'r') as file:\n source_sink = yaml.safe_load(file)\n potw = source_sink['sources']\n sites = potw.keys()\n sites = list(sites)\n a = [potw[k] for k in sites]\n a = np.array(a)\n x = a[:,0]\n y = a[:,1]\n df = pd.DataFrame({'sites': sites,'x':x,'y':y})\n df.to_csv(source_csv)", "def from_file(filename, decomposer=None, x=None):\n from six.moves.cPickle import load\n with open(filename, 'rb') as f:\n data = load(f)\n return DF.from_dict(data, decomposer, x)", "def load() -> DataFrame:\n return load_file(__file__, \"default.csv.gz\")", "def run(args):\n with open(args.config, \"r\") as f:\n config = yaml.load(f)\n\n how = config[\"load_data\"][\"how\"]\n\n if how == \"load_csvs\":\n df = load_csvs(**config[\"load_data\"][\"load_csvs\"])\n\n if args.save is not None:\n df.to_csv(args.save)", "def create_datastructure_from_yaml_file(self):\n ## loading the YAML file\n try:\n with open(self.source) as f:\n hbaseSchemaDic = yaml.load(f) \n except:\n msg = \"Error: the HBase substructure could not be created. File %s could not be loaded. Please check the syntax of the '.yml' file.\" % self.source \n raise createDataStructureException(msg)\n status = \"failed\" \n\n try:\n c = Connection(host = self.host, port = int(self.port))\n tbls = c.tables()\n tbls = [str(t) for t in tbls]\n ## check that none of the tables already exists \n for t in hbaseSchemaDic.keys():\n if t in tbls:\n msg = \"Error: the table %s already exists. If you use starbase in python you can drop the table by using \\n>>> from starbase import Connection\\n>>> c = Connection()\\n>>> t = c.table(%s)\\n>>> t.drop()\" % (t,t) \n print(msg)\n status = \"failed\"\n raise createDataStructureException(msg)\n\n ## if none of the table(s) do(es) not exist, let's create them(it) \n for t in hbaseSchemaDic.keys():\n columnFamilies = hbaseSchemaDic[t]['columnFamilies'].keys()\n tC = c.table(t)\n tC.create(*columnFamilies)\n status = \"succeeded\"\n \n except:\n msg = \"Error: the HBase substructure could not be created. Please check your connection parameters or the syntax in your '.yml' file.\"\n raise createDataStructureException(msg)\n status = \"failed\"\n\n return(status)", "def create_dataframe():\r\n\r\n df = pd.read_csv('data/data.csv', header=0)\r\n return df", "def create_df(files_list=my_files):\n\n all_records = list()\n\n for file in files_list:\n all_records += zr_parser(path.join(my_dir, file))\n\n return pd.DataFrame(all_records)", "def load(filename):\n if filename.endswith(MULTI_TEST_FILE_NAME) or\\\n filename.endswith(SINGLE_TEST_FILE_NAME):\n column_names.remove('complexity')\n\n try:\n df = pd.read_csv(f\"{filename}\", delimiter='\\t', header=0,\n names=column_names, quoting=csv.QUOTE_NONE,\n encoding='utf-8')\n except pd.errors.ParserError:\n # sadly occurs in MWE mode\n df = pd.read_csv(f\"{filename}\", delimiter='\\t', header=0,\n names=column_names, quoting=csv.QUOTE_NONE,\n encoding='utf-8')\n return df", "def load_test_as_df(\n key: str, source_base_path: str = SOURCE_BASE_PATH) -> pd.DataFrame:\n\n return _load_target_and_source(key, source_base_path, 'test')" ]
[ "0.6223266", "0.6129303", "0.6100554", "0.60840815", "0.6082889", "0.5902778", "0.5898788", "0.5859711", "0.5854862", "0.5829673", "0.5819154", "0.58064884", "0.58064884", "0.5772481", "0.5768575", "0.57589835", "0.5717933", "0.5713953", "0.5706922", "0.5706922", "0.5701406", "0.56961644", "0.56856465", "0.56144786", "0.55959606", "0.55698055", "0.55465466", "0.55297303", "0.5526247", "0.54746914" ]
0.6970648
0
Validate that all users belonging to an account are available in the .yaml input file. Raises a KeyError If one or more usernames printed by the ``accinfo`` comand are absent from df.
def validate_usernames(df: pd.DataFrame) -> None: _usage = check_output(['accinfo'], encoding='utf8') iterator = filter(None, _usage.splitlines()) for i in iterator: if i == "# Users linked to this account": usage = np.array(list(iterator), dtype=np.str_) break else: raise ValueError("Failed to parse the passed .yaml file") bool_ar1 = np.isin(usage, df.index) bool_ar2 = np.isin(df.index, usage) name_diff = "" name_diff += "".join(f"\n- {name}" for name in usage[~bool_ar1]) name_diff += "".join(f"\n+ {name}" for name in df.index[~bool_ar2].values) if name_diff: raise ValueError(f"User mismatch between .yaml file and `accinfo` output:{name_diff}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def valid_user_data(user_data):\n return 'account_ids' in user_data and 'monthly_expenses' in user_data", "def load_users():\n try:\n # json file should be in the same file location as the function\n base_dir = os.path.dirname(__file__)\n abs_file = os.path.join(base_dir, 'users.json')\n\n with open(abs_file) as json_file:\n users = json.load(json_file, parse_float=Decimal)\n\n for user in users['users']:\n # Check if the user already exists in the table\n response = key_exists('email', user['email'])\n\n if not response:\n table.put_item(Item=user)\n\n except dynamodb_client.exceptions.ResourceNotFoundException:\n print(\"The table you are trying to query does not exist\")", "def validate_user_data(self, expected, actual):\n self.log.debug('actual: {}'.format(repr(actual)))\n for e in expected:\n found = False\n for act in actual:\n a = {'enabled': act.enabled, 'name': act.name,\n 'email': act.email, 'tenantId': act.tenantId,\n 'id': act.id}\n if e['name'] == a['name']:\n found = True\n ret = self._validate_dict_data(e, a)\n if ret:\n return \"unexpected user data - {}\".format(ret)\n if not found:\n return \"user {} does not exist\".format(e['name'])\n return ret", "def has_all_unique_users_names(value):\n names = [user.get(CONF_NAME) for user in value]\n if None in names and any(name is not None for name in names):\n raise vol.Invalid(\"user names of all users must be set if any is set\")\n if not all(name is None for name in names):\n has_unique_values(names)\n return value", "def _CheckUsers(self, all_users):\n summary = self.fd.GetSummary()\n self.assertItemsEqual([x.username for x in summary.users], all_users)\n\n users = [x.username for x in self.fd.Get(self.fd.Schema.USER)]\n self.assertItemsEqual(users, all_users)\n self.assertItemsEqual(self.fd.Get(self.fd.Schema.USERNAMES), all_users)\n\n # Check kb users\n kbusers = [x.username for x in\n self.fd.Get(self.fd.Schema.KNOWLEDGE_BASE).users]\n self.assertItemsEqual(kbusers, all_users)", "def check_fields_in_dict(dictionary, fields, dictionary_name):\n for field in fields:\n if field not in dictionary:\n raise KafkaIotException(\"%s field(s) required but not found in %s: %s\"\n % (\", \".join(fields), dictionary_name, str(dictionary)))\n return True", "def check(self):\n missing = []\n for name in self.data[\"locations\"]:\n try:\n n = self.data[\"names\"][name]\n except KeyError:\n missing.append(name)\n if missing:\n raise RuntimeError(\"\\\"names\\\" list lacks:\\n \" + \"\\n \".join(missing))", "def check_entries():\n\tfor i, entry in enumerate(frame.entries):\n\t\tif i==0 or i==5:\n\t\t\tif entry.get().isalnum():\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tframe.entries[i].config(bg=ERROR_COLOR)\n\t\t\t\traise ValueError(\"user must be alphanumeric\")\n\t\telif i==(len(frame.entries)-1):\n\t\t\tif not entry.get().isdigit():\n\t\t\t\tframe.entries[i].config(bg=ERROR_COLOR)\n\t\t\t\traise ValueError(\"duration should be a positive digit\")\n\t\telse:\n\t\t\tif entry.get().isdigit():\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tframe.entries[i].config(bg=ERROR_COLOR)\n\t\t\t\traise ValueError(\"ip field \"+str(i+1)+\" type is incorrect\")\n\treturn True", "def test_get_users_from_invalid_yaml():\n with pytest.raises(ValueError):\n Users.from_yaml(file_path='{0}/yaml_input/invalid.yml'.format(os.path.dirname(os.path.abspath(__file__))))", "def _validate_num_profiles(self):\n for fp in [self.solar_fpath, self.wind_fpath]:\n with Resource(fp) as res:\n profile_dset_names = [\n n for n in res.dsets\n if self.__profile_reg_check.match(n)\n ]\n if not profile_dset_names:\n msg = (\"Did not find any data sets matching the regex: \"\n \"{!r} in {!r}. Please ensure that the profile data \"\n \"exists and that the data set is named correctly.\")\n e = msg.format(PROFILE_DSET_REGEX, fp)\n logger.error(e)\n raise FileInputError(e)\n elif len(profile_dset_names) > 1:\n msg = (\"Found more than one profile in {!r}: {}. \"\n \"This module is not intended for hybridization of \"\n \"multiple representative profiles. Please re-run \"\n \"on a single aggregated profile.\")\n e = msg.format(fp, profile_dset_names)\n logger.error(e)\n raise FileInputError(e)\n else:\n self.profile_dset_names += profile_dset_names", "def verify_metadata(df):\n # Check that mandatory column headings are present\n col_headings = df.columns.values\n requireds = ['Title', 'Authors', 'Categories', 'Item type', 'Keywords', 'Description', 'License', 'Data Sensitivity', 'RDR Project ID']\n result = all(elem in col_headings for elem in requireds)\n if not result:\n print('Error: You must supply all mandatory column headings')\n sys.exit()\n\n\n # Check that values exist for each of the mandatory fields\n for index, row in df.iterrows():\n if row['Title'] == '' or row['Title'] is None or row['Title'] is np.NaN:\n print(f\"Title is missing on row {index+1}\")\n sys.exit()\n if row['Authors'] == '' or row['Authors'] is None or row['Authors'] is np.NaN:\n print(f\"Authors is missing on row {index+1}\")\n sys.exit()\n if row['Categories'] == '' or row['Categories'] is None or row['Categories'] is np.NaN:\n print(f\"Categories is missing on row {index+1}\")\n sys.exit()\n if row['Item type'] == '' or row['Item type'] is None or row['Item type'] is np.NaN:\n print(f\"Item type is missing on row {index+1}\")\n sys.exit()\n if row['Keywords'] == '' or row['Keywords'] is None or row['Keywords'] is np.NaN:\n print(f\"Keywords is missing on row {index+1}\")\n sys.exit()\n if row['Description'] == '' or row['Description'] is None or row['Description'] is np.NAN:\n print(f\"Description is missing on row {index+1}\")\n sys.exit()\n if row['License'] == '' or row['License'] is None or row['License'] is np.NAN:\n print(f\"License is missing on row {index+1}\")\n sys.exit()\n if row['Data Sensitivity'] == '' or row['Data Sensitivity'] is None or row['Data Sensitivity'] is np.NAN:\n print(f\"Data Sensitivity is missing on row {index+1}\")\n sys.exit()\n if row['RDR Project ID'] == '' or row['RDR Project ID'] is None or row['RDR Project ID'] is np.NAN:\n print(f\"RDR Project ID is missing on row {index+1}\")\n sys.exit()", "def user_checks(users: Dict[str, User]) -> None:\n container_ids = []\n host_volumes = []\n ports = []\n for _, user in users.items():\n for _, container_values in user.user_cfg.items():\n container_ids.append(container_values.container_id)\n host_volumes += [v[0] for v in container_values.volumes]\n ports += [p[0] for p in container_values.ports]\n\n assert len(container_ids) == len(set(container_ids)), \\\n f\"ERR: has duplicate ids, {container_ids}\"\n assert len(ports) == len(set(ports)), \\\n f\"ERR: has duplicate ports, {ports}\"\n\n # mkdir for host_volumes if it doesn't exist\n for volume in host_volumes:\n if not os.path.exists(volume):\n print(f\"WARN: making {volume} because it didn't exist!\")\n os.makedirs(volume)", "def check_validation_results():\n with open('prep/datapackage_validation.json') as report_file:\n report = json.load(report_file)\n\n tasks = report['tasks']\n assert len(tasks) == 5\n\n for task in tasks:\n\n errors = task['errors']\n\n # as a first approximation, allow up to 300 errors on the appearances file\n # this is to account for a common foreign key exception caused by the source data\n if task['resource']['name'] == 'appearances':\n errors_threshold = 300\n # for the rest of the files do nor allow errors at all\n else:\n errors_threshold = 0\n\n if len(errors) > errors_threshold:\n print(f\">={len(errors)} rows did not pass validations!\")\n return False\n else:\n return True", "def check_fields(entry, fields):\n if entry is None:\n raise exceptions.BadInputError(\"empty entry\")\n for field in fields:\n if field not in entry:\n raise exceptions.BadInputError(f\"field {field} required and not found\")\n if entry[field] is None:\n # empty fields will be loaded in as None by pyyaml\n raise exceptions.BadInputError(f\"field {field} required not to be empty\")", "def validate_login(name,password):\n\t\n\t#Read the attendance excelsheet check if username and password matched\n\tdf_atten=pd.read_csv(\"datasrc/People.csv\")\n\t# 10006 ultbjxu\n\t\n\tif (df_atten.Username.astype(str).str.contains(name).any() and df_atten.Password.astype(str).str.contains(password).any()):\t\t\n\t\treturn True\n\telse: \n\t\treturn False", "def validate_keystone_users(self, client):\n u.log.debug('Checking keystone users...')\n base = [\n {'name': 'demoUser',\n 'enabled': True,\n 'id': u.not_null,\n 'email': '[email protected]'},\n {'name': 'admin',\n 'enabled': True,\n 'id': u.not_null,\n 'email': 'juju@localhost'},\n {'name': 'cinder_cinderv2',\n 'enabled': True,\n 'id': u.not_null,\n 'email': u'juju@localhost'}\n ]\n expected = []\n for user_info in base:\n if self.keystone_api_version == 2:\n user_info['tenantId'] = u.not_null\n else:\n user_info['default_project_id'] = u.not_null\n expected.append(user_info)\n actual = client.users.list()\n ret = u.validate_user_data(expected, actual,\n api_version=self.keystone_api_version)\n if ret:\n amulet.raise_status(amulet.FAIL, msg=ret)", "def CheckAccount(self):\n \n if self.userName != '':\n res=self.helper.getAccounts(self.userName)\n if res != None:\n if res == []:\n return False\n else:\n return res\n else:\n return None", "def validate(self):\n\n # Start validation writing errors into dictionary/or json string?\n validation_errors = []\n\n # Check we have the required columns needed for upload to proceed.\n required_columns_missing = []\n for required_column in self.required_columns:\n if required_column not in self.df.columns:\n required_columns_missing.append(required_column)\n if len(required_columns_missing) > 0:\n error = ValidationEntry(\n subject_file=self.upload_file,\n key=\"required_columns_missing\",\n value=required_columns_missing,\n entry_type=\"FATAL\",\n validation_type=\"SYNTAX\",\n )\n error.save()\n validation_errors.append(error)\n self.upload_file.valid_syntax = False\n self.upload_file.save()\n\n # Check we have the expected number of columns.\n static_columns_missing = []\n for static_column in self.static_columns:\n if static_column not in self.df.columns:\n static_columns_missing.append(static_column)\n if len(static_columns_missing) > 0:\n error = ValidationEntry(\n subject_file=self.upload_file,\n key=\"static_columns_missing\",\n value=static_columns_missing,\n entry_type=\"ERROR\",\n validation_type=\"SYNTAX\",\n )\n error.save()\n validation_errors.append(error)\n self.upload_file.valid_syntax = False\n self.upload_file.save()\n\n # Check that all the info is for the same panel\n # It is dangerous to proceed otherwise as we will\n # mainly because of the parameters we dynamically\n # compose from the panel name.\n if \"Panel\" in self.df.columns:\n panels_in_data = self.df[\"Panel\"].unique().tolist()\n n_unique_panels_in_data = len(panels_in_data)\n if n_unique_panels_in_data != 1:\n error = ValidationEntry(\n subject_file=self.upload_file,\n key=\"unique_panel_error\",\n value=f\"Expected 1 unique value for panels in each record\"\n + f\". Got {n_unique_panels_in_data}: {panels_in_data}\",\n entry_type=\"FATAL\",\n validation_type=\"SYNTAX\",\n )\n error.save()\n validation_errors.append(error)\n self.upload_file.valid_syntax = False\n self.upload_file.save()\n\n # Check if the panel(s) are present in the Panel table\n panels_in_data_pk = []\n unknown_panels = []\n for panel in panels_in_data:\n try:\n panels_in_data_pk.append(Panel.objects.get(name=panel.upper()).id)\n except Panel.DoesNotExist as e:\n unknown_panels.append(panel)\n if len(unknown_panels) > 0:\n error = ValidationEntry(\n subject_file=self.upload_file,\n key=\"unknown_panel_error\",\n value=f\"The following panels are not in Panel table: {unknown_panels}\",\n entry_type=\"WARN\",\n validation_type=\"SYNTAX\",\n )\n error.save()\n validation_errors.append(error)\n\n else:\n # ToDo: Can we continue without unique panels?\n panels_in_data = []\n panels_in_data_pk = []\n\n if len(self.unregistered_parameters) > 0:\n error = ValidationEntry(\n subject_file=self.upload_file,\n key=\"unregistered_parameters\",\n value=self.unregistered_parameters,\n entry_type=\"WARN\",\n validation_type=\"SYNTAX\",\n )\n error.save()\n validation_errors.append(error)\n\n if len(self.unregistered_derived_parameters) > 0:\n error = ValidationEntry(\n subject_file=self.upload_file,\n key=\"unregistered_derived_parameters - will be added during upload\",\n value=self.unregistered_derived_parameters,\n entry_type=\"INFO\",\n validation_type=\"SYNTAX\",\n )\n error.save()\n validation_errors.append(error)\n\n # Check all fields needed for processed_sample table present\n\n # Check all clinical samples present in processed_sample table\n\n # Enter values into processed_sample, processed_sample,\n # numeric_value and text_parameter\n\n # Print out list of validation errors\n # print(\"Validation errors:\")\n return validation_errors", "def validate_missing_information(conf):\n failed = False\n\n for field, _ in REQUIRED_SETTINGS.items():\n if field not in conf:\n print 'error: missing configuration for \"{0}\"'.format(field)\n failed = True\n\n if failed:\n sys.exit(1)", "def check_has_accounts( # pylint: disable = no-self-use, no-self-argument\n cls,\n value: AccountsConfig,\n ) -> AccountsConfig:\n if not value:\n raise ValueError(\"No user accounts defined\")\n return value", "def validate(self):\n if self.tba_key is None:\n self.log.error(\n \"You are missing the TBA-Key field. Please check https://github.com/team4099/scouting-data-ingest#tba for more information.\"\n )\n return False\n\n self.check_internet_connection()\n\n if self.year is None:\n self.log.error(\n \"You are missing the Year field. Please add one in the style shown below.\"\n )\n year_example = \"\"\"\n {\n \"Year\": \"2020\"\n }\n \"\"\"\n console.print(Syntax(year_example, \"json\"))\n console.print(\n \"Reference https://github.com/team4099/scouting-data-ingest#configuration for more information.\"\n )\n return False\n\n if self.google_credentials is None:\n self.log.error(\n \"You are missing the Google-Credentials field. Please check https://github.com/team4099/scouting-data-ingest#google-service-account-credentials-file for more information.\"\n )\n return False\n elif not os.path.isfile(f\"config/{self.google_credentials}\"):\n self.log.error(\n \"The file listed in the Google-Credentials field does not exist in the config folder. Please place it inside the config folder.\"\n )\n return False\n else:\n try:\n gc = gspread.service_account(f\"./config/{self.google_credentials}\")\n except ValueError as e:\n self.log.error(\n \"The file listed in the Google-Credentials Field is improper. See below for details.\"\n )\n self.log.error(e)\n return False\n\n if self.spreadsheet is None:\n self.log.error(\n \"You are missing the Spreadsheet field. Please check https://github.com/team4099/scouting-data-ingest#spreadsheet for more information.\"\n )\n return False\n else:\n try:\n gc.open(f\"{self.spreadsheet}\").get_worksheet(0)\n except gspread.exceptions.SpreadsheetNotFound:\n self.log.error(\n \"The file listed in the Spreadsheets field has not been shared with the service account. Please make sure it is.\"\n )\n return False\n\n if self.db_user is None:\n self.log.error(\n \"You are missing the Database User field. Please check https://github.com/team4099/scouting-data-ingest#mysql for more information.\"\n )\n return False\n\n if self.db_pwd is None:\n self.log.error(\n \"You are missing the Database Password field. Please check https://github.com/team4099/scouting-data-ingest#mysql for more information.\"\n )\n return False\n\n try:\n create_engine(\n f\"mysql+pymysql://{self.db_user}:{self.db_pwd}@{self.db_host}/scouting\"\n )\n except pymysql.err.OperationalError:\n self.log.error(\n \"Your Database user name and/or password is not correct. Please verify them.\"\n )\n\n if self.event is None:\n self.log.error(\n \"You are missing the Event field. Please check https://github.com/team4099/scouting-data-ingest#event for more information.\"\n )\n return False\n\n if (\n requests.get(\n f\"https://www.thebluealliance.com/api/v3/event/{self.year}{self.event}\",\n headers={\"X-TBA-Auth-Key\": self.tba_key},\n ).status_code\n == 404\n ):\n self.log.error(\n \"The event listed in the TBA-Key field is not valid. Please ensure the event key and year are correct.\"\n )\n return False\n\n if self.simulation:\n if self.simulator_url is None:\n self.log.error(\n \"You are missing the Simulator URL field. Please check https://github.com/team4099/scouting-data-ingest#tba for more information.\"\n )\n return False\n\n try:\n simulator_status = requests.get(\n f\"{self.simulator_url}/matches\"\n ).status_code\n except (\n ConnectionRefusedError,\n urllib3.exceptions.NewConnectionError,\n requests.exceptions.ConnectionError,\n ):\n self.log.error(\n \"The simulator may not be running or it's at a different url than the one provided.\"\n )\n return False\n\n if simulator_status == 401:\n self.log.error(\n \"The simulator may not be running. Please make sure it is and that it is up-to-date.\"\n )\n return False\n\n if self.simulator_spreadsheet is None:\n self.log.error(\n \"You are missing the Simulator Spreadsheet field. Please check https://github.com/team4099/scouting-data-ingest#spreadsheet for more information.\"\n )\n return False\n else:\n try:\n gc.open(f\"{self.simulator_spreadsheet}\").get_worksheet(0)\n except gspread.exceptions.SpreadsheetNotFound:\n self.log.error(\n \"The file listed in the Simulator Spreadsheet field has not been shared with the service account. Please make sure it is. Please also make sure the name entered is correct.\"\n )\n return False\n\n return True", "def check_metadata(metadata_file, input_validate_dict):\n validated = True\n # Metedata.csv has the following columns:\n # crystal_name: must not be spaces or null and should contain the RealCrystalName\n # RealCrystalName: must not be spaces or null\n # smiles: must not be null\n # new_smiles: no specific validation\n # alternate_name: no specific validation\n # site_name: whole column should either be null or not null (no partial columns)\n # pdb_entry: no specific validation\n\n meta_dataframe = pd.read_csv(metadata_file)\n\n # File level checks.\n meta_sites = meta_dataframe['site_name']\n if meta_sites.isnull().values.all() or meta_sites.notnull().values.all():\n pass\n else:\n add_tset_warning(input_validate_dict, 'Metadata.csv',\n 'site_name column should either be completely filled or completely null', 0)\n validated = False\n\n meta_dataframe['crystal_name'] = meta_dataframe['crystal_name'].astype(str)\n meta_dataframe['RealCrystalName'] = meta_dataframe['RealCrystalName'].astype(str)\n meta_dataframe['smiles'] = meta_dataframe['smiles'].astype(str)\n\n # Loop through metadata doing basic checks on each row\n for idx, (_, row) in enumerate(meta_dataframe.iterrows()):\n validated, input_validate_dict = check_meatadata_row(validated, input_validate_dict, row, idx)\n\n return validated, input_validate_dict", "def _validate_user_fields(fields: dict):\n # Checks\n for k, v in fields.items():\n if k == \"username\":\n if len(v) > UserLimits.USERNAME_MAX_LENGTH or len(v) < UserLimits.USERNAME_MIN_LENGTH:\n raise ForbiddenArgument(\"invalid username\")\n\n elif k == \"fullname\":\n if len(v) > UserLimits.FULLNAME_MAX_LENGTH or len(v) < UserLimits.USERNAME_MIN_LENGTH:\n raise ForbiddenArgument(\"invalid full name\")\n\n elif k == \"email\":\n if not is_email(v) or len(v) > UserLimits.EMAIL_MAX_LENGTH or len(v) < UserLimits.EMAIL_MIN_LENGTH:\n raise ForbiddenArgument(\"invalid email\")\n\n elif k == \"password\":\n if len(v) > UserLimits.PASSWORD_MAX_LENGTH or len(v) < UserLimits.PASSWORD_MIN_LENGTH:\n raise ForbiddenArgument(\"invalid password\")", "def _check_user_entry(user):\n if \"tenant_name\" in user:\n keys = set(user.keys())\n if keys == {\"username\", \"password\", \"tenant_name\",\n \"project_domain_name\", \"user_domain_name\"}:\n if (user[\"user_domain_name\"] == \"\"\n and user[\"project_domain_name\"] == \"\"):\n # it is credentials of keystone v2 and they were created\n # --fromenv\n del user[\"user_domain_name\"]\n del user[\"project_domain_name\"]\n return True\n else:\n # it looks like keystone v3 credentials\n user[\"project_name\"] = user.pop(\"tenant_name\")\n return True", "def validate_acl_data(self, acl_request_data):\n if not isinstance(acl_request_data, dict):\n raise exceptions.BadRequest(self.INVALID_ACL_ERROR)\n\n if acl_request_data:\n obj_roles = role.get_ac_roles_data_for(self.type)\n for acr, users in acl_request_data.items():\n if not isinstance(acr, (str, unicode)) or not isinstance(users, list):\n raise exceptions.BadRequest(self.INVALID_ACL_ERROR)\n if acr not in obj_roles:\n raise exceptions.BadRequest(\"Role '{}' does not exist\".format(acr))", "def _check_yaml(self, yaml):\n if type(yaml['datasets']) == dict:\n logging.error(\n \"[ERROR] \\\"datasets\\\" section of config file must be a list, not a dictionary...\" \n )\n sys.exit()", "def validate_user_request_dict(request_dict):\n if 'first_name' not in request_dict:\n return False\n if 'last_name' not in request_dict:\n return False\n if 'id' not in request_dict:\n return False\n if 'email' not in request_dict:\n return False\n return True", "def _clean_accounts(self, key):\n user_names = filter(None,\n (x.strip()\n for x in self.cleaned_data.get(key, '').split(',')))\n if len(user_names) > 1:\n raise forms.ValidationError('Only one user name is allowed.')\n elif not user_names:\n return None\n user_name = user_names[0]\n if '@' in user_name:\n acct = models.Account.get_account_for_email(user_name)\n else:\n acct = models.Account.get_account_for_nickname(user_name)\n if not acct:\n raise forms.ValidationError('Unknown user')\n return acct.user", "def check_all_have_keys(dict_list, keys, name):\n if len(dict_list) == 0:\n return\n keys = set(keys)\n for dct in dict_list:\n if not keys.issubset(dct.keys()):\n raise DGLError('Expect all {} to include keys {}, but got {}.'.format(\n name, keys, dct.keys()))", "def test_get_users_from_yaml():\n users = Users.from_yaml(file_path='{0}/yaml_input/basic.yml'.format(os.path.dirname(os.path.abspath(__file__))))\n assert isinstance(users, Users)\n assert isinstance(users[0], User)\n assert isinstance(users[0].uid, int)\n assert users[0].name == 'peter'\n assert users[0].home_dir == '/home/bigal'" ]
[ "0.5713202", "0.5615229", "0.56124175", "0.556478", "0.5467763", "0.5272703", "0.5202546", "0.52003455", "0.5149381", "0.5146254", "0.51337475", "0.5124031", "0.509678", "0.5084452", "0.5061947", "0.5024947", "0.5024677", "0.49984267", "0.49967858", "0.4986641", "0.49853197", "0.49746552", "0.49741736", "0.4967864", "0.49504223", "0.49067163", "0.484745", "0.4845094", "0.4836713", "0.48279634" ]
0.76862663
0
This is our handler for the menu item. Our inItemRef is the refcon we registered in our XPLMAppendMenuItem calls. It is either +1000 or 1000 depending on which menu item is picked.
def MyMenuHandlerCallback(self, inMenuRef, inItemRef): if (self.DataRef != 0): """ We read the data ref, add the increment and set it again. This changes the nav frequency. """ XPLMSetDatai(self.DataRef, XPLMGetDatai(self.DataRef) + inItemRef) pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_to_menu ( self, menu_item ):\r\n pass", "def on_menu_item(self, e):\n if e.Id == ids.RESTORE:\n wx.PostEvent(self.app.roster, ev.ShowRoster())\n elif e.Id == ids.HIDE:\n wx.PostEvent(self.app.roster, ev.HideRoster())\n elif e.Id == ids.EXIT:\n wx.PostEvent(self.app.roster, wx.MenuEvent(\n wx.wxEVT_COMMAND_MENU_SELECTED, wx.ID_EXIT))\n elif e.Id == ids.PREFERENCES:\n wx.PostEvent(self.app.roster, wx.MenuEvent(\n wx.wxEVT_COMMAND_MENU_SELECTED, ids.PREFERENCES))\n elif e.Id == ids.OFFLINE:\n wx.PostEvent(self.app, ev.ChangePresence(status='offline'))\n elif e.Id == ids.AWAY:\n wx.PostEvent(self.app, ev.ChangePresence(status='away'))\n elif e.Id == ids.ONLINE:\n wx.PostEvent(self.app, ev.ChangePresence(status='online'))", "def getMenuItemID(self):\r\n return self.eventID", "def request_context_menu(self, pos):\n pass", "def checkMenuItem(self):\r\n self.eventID, self.parameter, res = self.receiver.getMenuItem()\r\n\r\n return res", "def popUpMenuAdv(self, MenuList, MenuRequestingObject, MenuStartPoint, FunctionToBeInvoked, AdditionalArgument=[], popupOffset=QtCore.QPoint(0,0)):\r\n\r\n if type(MenuStartPoint)==type(QtCore.QPoint()):\r\n PopupPoint = MenuStartPoint\r\n else:\r\n PopupPoint = QtCore.QPoint(-3,-5)\r\n\r\n Rmnu = self.menuCreator(MenuList, self.CallingUI, AdditionalArgument, FunctionToBeInvoked)\r\n PopupPoint.setY(PopupPoint.y() + popupOffset.y())\r\n PopupPoint.setX(PopupPoint.x() + popupOffset.x())\r\n Rmnu.exec_(MenuRequestingObject.mapToGlobal(PopupPoint))\r\n del(Rmnu)", "def getMenuItem(self, event):\n return self.GetMenuBar().FindItemById(event.GetId())", "def on_selected_new_item(self, item):\n pass", "def request_context_menu(self, pos):\n super(ItemListView, self).request_context_menu(pos)\n self.get_selected()\n self.manage_actions()\n self.display_context_menu(pos)", "def bind_menu(self, menu, variable, options=[]):\n menu.bind(\"<<MenuSelect>>\", lambda event: self._menu_select(event, variable=variable, options=options), \"+\")", "def ListenToMenu(self, interruptMenuAfter=3):\n time.sleep(interruptMenuAfter)", "def popUpMenu(self, menuRequestingtObject, PopupPoint, menuListString, funcToInvoke, additionalArguments='', iconList = []):\r\n if menuListString == []:\r\n return 0;\r\n Rmnu = QtWidgets.QMenu(self.CallingUI)\r\n for i, itm in enumerate(menuListString):\r\n\r\n newmenuitem = QtWidgets.QAction(itm, self.CallingUI)\r\n #newmenuitem\r\n\r\n if len(itm)>1 and itm[0]=='|':\r\n itm = itm[1:len(itm)]\r\n newmenuitem.setEnabled(False)\r\n newmenuitem.setText(itm)\r\n #var = QtCore.QVariant()\r\n\r\n\r\n\r\n if itm != '':\r\n if len(iconList)>1 and len(iconList)>i:\r\n if iconList[i]!=None:\r\n icon = QtGui.QIcon()\r\n icon.addPixmap(QtGui.QPixmap(iconList[i]), QtGui.QIcon.Normal, QtGui.QIcon.On)\r\n newmenuitem.setIcon(icon)\r\n\r\n #self.CallingUI.connect(newmenuitem, QtCore.SIGNAL(\"triggered()\"), lambda passarg=(itm,i,additionalArguments,newmenuitem): funcToInvoke(passarg))\r\n newmenuitem.triggered.connect(lambda passarg=([itm,i,additionalArguments,newmenuitem]): funcToInvoke(passarg))\r\n newmenuitem.setData(PopupPoint)\r\n\r\n if itm=='':\r\n Rmnu.addSeparator()\r\n else:\r\n Rmnu.addAction(newmenuitem)\r\n\r\n\r\n PopupPoint.setY(PopupPoint.y())\r\n PopupPoint.setX(PopupPoint.x())\r\n Rmnu.exec_(menuRequestingtObject.mapToGlobal(PopupPoint))\r\n del(Rmnu)", "def Do(self):\n # type: (MenuContext) -> None\n raise NotImplementedError", "def endItem(self):\n return self.myEndItem", "def popUpMenu(callingClassObject,menuRequestingtObject,PopupPoint,menuListString,funcToInvoke,additionalArguments='',iconList = []):\r\n if menuListString == []:\r\n return 0;\r\n Rmnu = QtGui.QMenu(callingClassObject)\r\n for i, itm in enumerate(menuListString):\r\n\r\n newmenuitem = QtGui.QAction(itm, callingClassObject)\r\n\r\n if len(itm)>1 and itm[0]=='|':\r\n itm = itm[1:len(itm)]\r\n newmenuitem.setEnabled(False)\r\n newmenuitem.setText(itm)\r\n\r\n if itm != '':\r\n if len(iconList)>1 and len(iconList)>i:\r\n if iconList[i]<>None:\r\n icon = QtGui.QIcon()\r\n icon.addPixmap(QtGui.QPixmap(iconList[i]), QtGui.QIcon.Normal, QtGui.QIcon.On)\r\n newmenuitem.setIcon(icon)\r\n\r\n callingClassObject.connect(newmenuitem, QtCore.SIGNAL(\"triggered()\"), lambda passarg=(itm,i,additionalArguments,newmenuitem): funcToInvoke(passarg))\r\n\r\n if itm=='':\r\n Rmnu.addSeparator()\r\n else:\r\n Rmnu.addAction(newmenuitem)\r\n\r\n\r\n PopupPoint.setY(PopupPoint.y() + 30)\r\n PopupPoint.setX(PopupPoint.x() + 5)\r\n Rmnu.exec_(menuRequestingtObject.mapToGlobal(PopupPoint))\r\n del(Rmnu)", "def _handle_select_event(self):\n selected_item = self.item_list[self.item_cursor.cursor]\n if selected_item == \"CANCEL\":\n self.is_dead = True\n\n # You can't sell key items.\n elif selected_item.type == ItemTypes.KEY_ITEMS:\n self.do_what_response_menu = \\\n Dialogue(\"29\", self.player, self.player,\n replace=[selected_item.name.upper()], show_curs=False)\n\n # Create a sell event with the selected item.\n else:\n self.active_sell_event = SellHowMany(self.player,\n selected_item)", "def menu_handler(self):\n return self._menu_handler", "def add_menu_item(self, menu, id, string, helpstring, evthndlr, bitmap=None,\r\n pos=None):\r\n menu = self.frame.GetMenu(menu)\r\n item = wx.MenuItem(menu, id, string,helpstring)\r\n self.frame.Bind(wx.EVT_MENU, evthndlr, id=id)\r\n if bitmap is not None:\r\n item.SetBitmap(bitmap)\r\n if pos is None:\r\n pos = menu.GetMenuItemCount()\r\n menu.InsertItem(pos,item)", "def onRightClick(self, event): \n\t\tpt = event.GetPosition()\n\t\titem, flags = self.tree.HitTest(pt)\n\t\tif not item:\n\t\t\tLogging.info(\"No item to select\", kw = \"ui\")\n\t\t\treturn\n\t\tself.tree.SelectItem(item)\n\t\tself.selectedItem = item\n\t\tself.PopupMenu(self.menu, event.GetPosition())", "def menu_reference_in_blender(self, event=None):\n if self.app.children:\n self.app.childActive.reference_in_blender()", "def get_menu_item(menu_item_name):\n\n pass", "def onContextMenu(self, event):\n # Skipping the save state functionality for release 0.9.0\n # return\n pos = event.GetPosition()\n pos = self.ScreenToClient(pos)\n self.PopupMenu(self.popUpMenu, pos)", "def on_context_menu(self, event):\n self.declaration.context_menu_event()", "def ProcessEvent(self, event):\r\n \r\n if event.GetEventType() == wx.wxEVT_COMMAND_MENU_SELECTED:\r\n self._last_id = event.GetId()\r\n return True\r\n \r\n if self.GetNextHandler():\r\n return self.GetNextHandler().ProcessEvent(event)\r\n\r\n return False", "def ProcessEvent(self, event):\r\n \r\n if event.GetEventType() == wx.wxEVT_COMMAND_MENU_SELECTED:\r\n self._last_id = event.GetId()\r\n return True\r\n \r\n if self.GetNextHandler():\r\n return self.GetNextHandler().ProcessEvent(event)\r\n\r\n return False", "def draw_menu(self, items):\n\n self.count = len(items) - 1\n while True:\n if self.flag >= 1:\n if items[self.index] == str(BackButton.EXIT.name):\n print(\"exiting from system\")\n sys.exit()\n else:\n\n return items[self.index]\n\n for x in range(0, self.count+1):\n if x == self.index:\n print(Color.B_LightGray + Color.F_Black + items[x] + Base.END)\n elif x == self.count:\n print(Color.F_Red + items[x] + Base.END)\n else:\n print(items[x])\n with Listener(\n on_press=self.on_press\n ) as listener:\n listener.join()\n\n os.system('clear')\n\n # providing if statement in last to emulate do while loop\n if not always_true():\n break", "def in_game_menu ( self ):\n\t\tif self.style == 'qt':\n\t\t\tp = Process( target=self.qt_in_game_menu )\n\t\t\tp.start()\n\t\t\tself.menus.append( p )", "def DoAction(self,event):\r\n selections = self.list.GetSelections()\r\n if not selections: return bell()\r\n itemDex = selections[0]\r\n item = self.items[itemDex]\r\n self.data.action(item)", "def OnUpdateMenu(self, event):\n shell.ShellMenu.OnUpdateMenu(self, event)\n id = event.GetId()\n if id == ID_VERB:\n event.Check(self.verbose)\n elif id == ID_USE_CMD:\n event.Check(self.sim42interp == self.shell.interp)", "def AppendToMenu(self,menu,window,data):\r\n if isinstance(window,Tank):\r\n self.gTank = window\r\n self.selected = window.GetSelected()\r\n self.data = window.data\r\n self.title = window.data.title\r\n else:\r\n self.window = window\r\n self.data = data\r\n #--Generate self.id if necessary (i.e. usually)\r\n if not self.id: self.id = wx.NewId()\r\n wx.EVT_MENU(window,self.id,self.Execute)" ]
[ "0.61091536", "0.57265496", "0.55302376", "0.5491266", "0.5436538", "0.5421155", "0.54151934", "0.54015744", "0.53766143", "0.5230789", "0.5225447", "0.522308", "0.5221148", "0.5216817", "0.5182499", "0.5182356", "0.51636726", "0.5154699", "0.51006156", "0.5060227", "0.5059505", "0.50551236", "0.5003902", "0.5003796", "0.5003796", "0.49792162", "0.49684942", "0.49523538", "0.49452126", "0.49103895" ]
0.7293928
0
Normalize the batch data, use coordinates of the block centered at origin,
def normalize_data(batch_data): B, N, C = batch_data.shape normal_data = np.zeros((B, N, C)) for b in range(B): pc = batch_data[b] centroid = np.mean(pc, axis=0) pc = pc - centroid m = np.max(np.sqrt(np.sum(pc ** 2, axis=1))) pc = pc / m normal_data[b] = pc return normal_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _normalize(self, dataset):\n if self.max is None: # if we are normalizing the training set\n self.max, self.min = dataset.max(), dataset.min() # find max, min value for each columns\n for row in dataset.index: # for each row in dataset\n for col in self.features: # for each feature in the instance (exclude target)\n dataset.at[row, col] = (dataset.at[row, col] - self.min[col]) / (self.max[col] - self.min[col]) if col != \"Bias\" else 1", "def normalize_data(data=None):\n # Data pre-processing\n n = data.shape[0]\n for i in range(n):\n xx = data[i,:,:]\n xx -= np.mean(xx) # Centering in 0\n xx /= np.linalg.norm(xx) # Normalizing to 1\n data[i] = xx # Affect value\n return data", "def normalize(self):\n self._data /= self.norm()", "def normalise(self):\n for at in self.atoms:\n if at.x < 0. :\n at.x = self.coordx + at.x\n if at.y < 0. :\n at.y = self.coordy + at.y\n if at.z < 0. :\n at.z = self.coordz + at.z", "def norm(self):\r\n old_origin = np.array(self.origin)\r\n self.origin = [0, 0, 0]\r\n old_origin[0] = old_origin[0] / self.x[0]\r\n old_origin[1] = old_origin[1] / self.y[1]\r\n old_origin[2] = old_origin[2] / self.z[2]\r\n self.data = ndimage.shift(self.data, -old_origin, mode='wrap')", "def normalize(batch_img: np.ndarray) -> np.ndarray:\n batch_img = batch_img.astype('float32')\n return batch_img / 127.5 - 1", "def normalize(self):\n d = learning_utils.convert_data_to_2d(self._data)\n d = learning_utils.normalize_2d(d)\n self._data = learning_utils.convert_data_to_1d(d)", "def set_normalization(self, dataloader):\n mean = 0\n square = 0\n for (data_in, _) in dataloader:\n mean += data_in.mean()\n square += data_in.pow(2).mean()\n\n mean /= len(dataloader)\n square /= len(dataloader)\n std = np.sqrt(square - mean ** 2)\n\n # The input data should be roughly normally distributed after\n # passing through net_fixed.\n self.scale_in.bias.data.fill_(- mean / std)\n self.scale_in.weight.data.fill_(1 / std)", "def BatchNormalize(S):\n mu = np.mean(S, axis=0)\n v = np.mean((S-mu)**2, axis=0)\n S = (S - mu) / np.sqrt(v + epsilon)\n return S", "def normalize_wrt_x(self):\n\n x_min = min(self.x)\n x_max = max(self.x)\n y_min = min(self.y)\n\n x_range = x_max - x_min\n\n x = np.array(self.x)\n y = np.array(self.y)\n x -= x_min\n y -= y_min\n x = x / float(x_range)\n y = y / float(x_range)\n\n self.x = x.tolist()\n self.y = y.tolist()", "def normalize_batch(batch, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):\n # normalize using imagenet mean and std\n batch = batch.clone()\n mean = torch.tensor(mean).view(-1, 1, 1)\n std = torch.tensor(std).view(-1, 1, 1)\n # if your image data is scaled to scale 0-255, uncomment the line below\n # batch.div_(255.0)\n return (batch - mean) / std", "def normalize_data(self):\n\t\tfull_matrix = self.balance_clases()\n\t\ttexture_matrix = Normalizer().fit_transform(X=full_matrix[:,range(0,24)])\n\n\t\treturn texture_matrix", "def normalize(dataset):\n minVals = dataset.min(axis=0)\n maxVals = dataset.max(axis=0)\n factors = maxVals-minVals\n num = dataset.shape[0]\n norm_data = (dataset - np.tile(minVals,(num,1)))/np.tile(factors,(num,1)) \n return norm_data", "def normalize_data(img):\n nor = np.linalg.norm(img, axis = 1)\n nor = np.reshape(nor, (len(img), 1))\n img = np.divide(img, nor)\n return img", "def normalize(self):\r\n max = np.amax(self.matrix)\r\n min = np.amin(self.matrix)\r\n\r\n self.matrix = ((self.matrix - min) / (max - min))", "def _normalize(self, x):\n # TODO: imagenet normalization\n\n return x", "def normalize_dataset(self):", "def normalize(self):\n total = 0.0\n for i in range(0,self.npoints):\n total+=self.y[i]*self._dx\n for i in range(0,self.npoints):\n self.y[i]/=total\n return", "def normalize(shape):\n s = shape\n matrix = Shape.get_matrix(s.get_vector())\n norm_x = math.sqrt(sum(matrix[:, 0] ** 2))\n norm_y = math.sqrt(sum(matrix[:, 1] ** 2))\n for pt in s.pts:\n pt.x /= norm_x\n pt.y /= norm_y\n return s", "def center_normalize(x):\n return (x - K.mean(x)) / K.std(x)", "def center_normalize(x):\n return (x - K.mean(x)) / K.std(x)", "def _normalize_coordinates(\n target_size: int, coords: np.ndarray, original_size: Tuple[int, int], is_bounding_box=False\n) -> np.ndarray:\n old_height, old_width = original_size\n\n scale = target_size * 1.0 / max(old_height, old_width)\n new_height, new_width = old_height * scale, old_width * scale\n new_width = int(new_width + 0.5)\n new_height = int(new_height + 0.5)\n\n coords = deepcopy(coords).astype(float)\n\n if is_bounding_box:\n coords = coords.reshape(-1, 2, 2)\n\n coords[..., 0] = coords[..., 0] * (new_width / old_width)\n coords[..., 1] = coords[..., 1] * (new_height / old_height)\n\n if is_bounding_box:\n coords = coords.reshape(-1, 4)\n\n return coords", "def normalize(self) -> \"CharacterizationPixel\":\n return replace(\n self,\n data=self.data/self.norm,\n mean=self.mean/self.norm,\n norm=np.ones_like(self.norm),\n )", "def normalize_data(self):\n self.x_mean, self.x_std = du.get_mean_std(self.x_train)\n self.x_train = du.normalize(self.x_train, self.x_mean, self.x_std)\n if self.x_test is not None and self.y_test is not None:\n self.x_test = du.normalize(self.x_test, self.x_mean, self.x_std)\n self.normalized_data = True", "def convert_batch_norm(g, op, block):\n\n ipt_name = op.input(\"X\")[0]\n scale_name = op.input(\"Scale\")[0]\n bias_name = op.input(\"Bias\")[0]\n mean_name = op.input(\"Mean\")[0]\n variance_name = op.input(\"Variance\")[0]\n epsilon = op.attr(\"epsilon\")\n out = _op.nn.batch_norm(\n g.get_node(ipt_name),\n g.get_node(scale_name),\n g.get_node(bias_name),\n g.get_node(mean_name),\n g.get_node(variance_name),\n epsilon=epsilon,\n )\n g.add_node(op.output(\"Y\")[0], out[0])", "def centerMeanAndNormalize(df):\n return minMax(df - df.mean(axis=0))", "def normalize_features(block, norm=1):\n for k in block:\n for b in block[k]:\n nrm = np.sqrt((block[k][b].reshape((block[k][b].shape[0],-1))**2).sum(axis=1).mean(axis=0))\n if nrm > 0.0:\n block[k][b] *= norm/nrm", "def normalize(data):\n row = np.size(data, 0) # number of data points\n col = np.size(data, 1) # dimensionality of data points\n for j in range(col):\n # find the average for each column\n col_sum = 0\n for i in range(row):\n col_sum = col_sum + data[i][j]\n col_sum = col_sum / row\n # subtract the average from each value in the column\n for i in range(row):\n data[i][j] = data[i][j] - col_sum\n return data", "def normalize(data):\n row = np.size(data, 0) # number of data points\n col = np.size(data, 1) # dimensionality of data points\n for j in range(col):\n # find the average for each column\n col_sum = 0\n for i in range(row):\n col_sum = col_sum + data[i][j]\n col_sum = col_sum / row\n # subtract the average from each value in the column\n for i in range(row):\n data[i][j] = data[i][j] - col_sum\n return data", "def normalize_ds(dataset):\n dataset = copy.copy(dataset)\n\n dim_dataset = dataset.shape\n\n for n_row in range(dim_dataset[0]):\n k = dataset[n_row,:]\n k_norm =(k - np.min(k))/(np.max(k) - np.min(k))\n dataset[n_row,:] = k_norm\n\n return dataset" ]
[ "0.69384634", "0.68707067", "0.6639805", "0.6637574", "0.65870476", "0.647955", "0.644119", "0.63818425", "0.6363857", "0.626459", "0.62471074", "0.62398934", "0.62313163", "0.6193541", "0.6191717", "0.61898583", "0.618904", "0.61679614", "0.6146879", "0.61388755", "0.61388755", "0.6099847", "0.60972035", "0.6087413", "0.6067772", "0.606484", "0.6059595", "0.60560733", "0.60560733", "0.6035024" ]
0.7281029
0
Shuffle orders of points in each point cloud changes FPS behavior. Use the same shuffling idx for the entire batch.
def shuffle_points(batch_data): idx = np.arange(batch_data.shape[1]) np.random.shuffle(idx) return batch_data[:,idx,:]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shuffle_points(batch_data):\n idx = np.arange(batch_data.shape[1])\n np.random.shuffle(idx)\n return batch_data[:, idx, :]", "def shuffle_points(mutated_genome,index):\n random.shuffle(mutated_genome[index][2])", "def _shuffle_roidb_idx(self):\n self.perm = np.random.permutation(np.arange(self.num_images))\n self.cur = 0", "def shuffle_points(data):\n idx = np.arange(data.shape[1])\n np.random.shuffle(idx)\n return data[:, idx, :], idx", "def shuffle(self, idx=None):\n for i in [idx] if idx is not None else range(self.num_buckets):\n self.buckets[i] = self.buckets[i].sample(frac=1).reset_index(drop=True)\n self.cursor[i] = 0", "def _batch_shuffle_ddp(self, x):\n # gather from all gpus\n batch_size_this = x.shape[0]\n x_gather = concat_all_gather(x)\n batch_size_all = x_gather.shape[0]\n\n num_gpus = batch_size_all // batch_size_this\n\n # random shuffle index\n idx_shuffle = torch.randperm(batch_size_all).cuda()\n\n # broadcast to all gpus\n torch.distributed.broadcast(idx_shuffle, src=0)\n\n # index for restoring\n idx_unshuffle = torch.argsort(idx_shuffle)\n\n # shuffled index for this gpu\n gpu_idx = torch.distributed.get_rank()\n idx_this = idx_shuffle.view(num_gpus, -1)[gpu_idx]\n\n return x_gather[idx_this], idx_unshuffle", "def shuffle(self):\n self.train_nodes = np.random.permutation(self.train_nodes)\n self.batch_num = 0", "def shuffle(self):\n self.train_nodes = np.random.permutation(self.train_nodes)\n self.batch_num = 0", "def _shuffle(self, reinit_indexes = False):\n print('Shuffling data...')\n # set seed for reproducibility\n #random.seed()\n # shuffle identities\n random.shuffle(self.identities)\n # shuffle images associated to each identity\n for identity in self.groundtruth_metadata.keys():\n random.shuffle(self.groundtruth_metadata[identity]['metadata'])\n if reinit_indexes:\n self.groundtruth_metadata[identity]['index'] = 0\n print('Finished shuffling data!')", "def shuffle(self):\n self.train_edges = np.random.permutation(self.train_edges)\n self.nodes = np.random.permutation(self.nodes)\n self.batch_num = 0", "def shuffle(self):\n self.edges = np.random.permutation(self.edges)\n self.batch_num = 0", "def shuffle_data(self):\n images = list(self.train_images)\n labels = list(self.train_labels)\n self.train_images = []\n self.train_labels = []\n\n # create list of permutated index and shuffle data accoding to list\n idx = np.random.permutation(len(labels))\n for i in idx:\n self.train_images.append(images[i])\n self.train_labels.append(labels[i])", "def _shuffle(self, r, idx_to_shuffle = None):\n n_features = r.shape[0]\n n_trials = r.shape[1]\n r_sh = r\n if idx_to_shuffle is None:\n idx_to_shuffle = range(n_features)\n for i in range(n_features):\n if i in idx_to_shuffle:\n r_sh[i,:] = r[i,random.permutation(range(n_trials))]\n return r_sh", "def shuffle_T(self):\n np.random.shuffle(self.T)", "def next_batch_shuffle(self, batch_size):\n\tperm0 = np.arange(self.TRAIN_SIZE)\n np.random.shuffle(perm0)\n self.data['x_train'], self.data['y_train'] = self.data['x_train'][perm0,:], self.data['y_train'][perm0,:]\n\treturn self.data['x_train'][:batch_size], self.data['y_train'][:batch_size]", "def shuffle(self):\n new_X = np.empty(self.X_data.shape, dtype=self.X_data.dtype)\n new_Y = np.empty(self.Y_data.shape, dtype=self.Y_data.dtype)\n perm = np.random.permutation(self.X_data.shape[0])\n for old_idx, new_idx in enumerate(perm):\n new_X[new_idx] = self.X_data[old_idx]\n new_Y[new_idx] = self.Y_data[old_idx]\n self.X_data = new_X\n self.Y_data = new_Y", "def shuffle(self):\n\t\t\trandom.seed(231)\n\t\t\trandom.shuffle(self.Ind)\n\t\t\tself.Ind = self.Ind[:int(len(self.Ind)/5)*5].reshape((self.cv_iters, -1))\n\t\t\t#index of valication set\n\t\t\tself.CVindex = 1\n\t\t\tself.Testindex = 0", "def shuffle_train(self):\r\n if self.data_container.task == 'Classify':\r\n id_train_list=[]\r\n for i in self.idx_train_list:\r\n id_train_list.append(self._random_state.choice(i,self.train_parms[0]))\r\n for j in self._random_state.choice(self.unique_value, self.train_parms[1]):\r\n id_train_list.append(self._random_state.choice(self.idx_train_list[j],1))\r\n self.idx['train'] = np.concatenate(id_train_list, axis=0)\r\n \r\n self.idx['train'] = self._random_state.permutation(self.idx['train'])", "def shuffle(self):\n for edge_type in self.edge_types:\n for k in range(self.edge_types[edge_type]):\n self.train_edges[edge_type][k] = np.random.permutation(self.train_edges[edge_type][k])\n self.batch_num[self.edge_type2idx[edge_type[0], edge_type[1], k]] = 0\n self.current_edge_type_idx = 0\n self.freebatch_edge_types = list(range(self.num_edge_types))\n self.freebatch_edge_types.remove(self.edge_type2idx[0, 0, 0])\n self.freebatch_edge_types.remove(self.edge_type2idx[0, 1, 0])\n self.freebatch_edge_types.remove(self.edge_type2idx[1, 0, 0])\n self.iter = 0", "def shuffle(self):\n order = np.arange(len(self.data))\n np.random.seed(0xA5EED)\n np.random.shuffle(order)\n self.data = self.data.iloc[order]\n return order", "def shuffle(self):\n self.x['train'], self.y['train'] = self._shuffle(\n self.x['train'],\n self.y['train']\n )", "def shuffle(self, random_state=None): \n if random_state is None:\n random_state = self.random_state\n perm_ids = random_state.permutation(self.n_examples)\n self.u = self.u[perm_ids]\n self.v = self.v[perm_ids]\n self.rating = self.rating[perm_ids]", "def on_epoch_end(self):\n if self.shuffle:\n self.indexes = np.random.permutation(self.indexes)", "def on_epoch_end(self):\n if self.shuffle:\n self.indexes = np.random.permutation(self.indexes)", "def shuffle(self):\n perm = self.rng.permutation(self.inputs.shape[0])\n self._current_order = self._current_order[perm]\n self.inputs = self.inputs[perm]\n self.targets = self.targets[perm]", "def _batch_shuffle_single_gpu(self, x):\n # random shuffle index\n idx_shuffle = torch.randperm(x.shape[0]).cuda()\n\n # index for restoring\n idx_unshuffle = torch.argsort(idx_shuffle)\n\n return x[idx_shuffle], idx_unshuffle", "def shuffle(self):\n perm = self.rng.permutation(self.inputs.shape[0])\n self._current_order = self._current_order[perm]\n self.inputs = self.inputs[perm]\n self.targets = self.targets[perm]\n self.target_ids = self.target_ids[perm]", "def post_randomize(self):\n super(BatchRandomizer, self).post_randomize()\n self.batch_idx += 1", "def on_epoch_end(self):\n if self.shuffle:\n np.random.shuffle(self.indices)", "def on_epoch_end(self):\n if self.shuffle:\n np.random.shuffle(self.indexes)" ]
[ "0.72801876", "0.72373664", "0.6753397", "0.6724248", "0.65495604", "0.6484776", "0.6474384", "0.6474384", "0.6457585", "0.64205366", "0.6403728", "0.6389177", "0.63853645", "0.6350583", "0.6297481", "0.62960184", "0.62893575", "0.62821025", "0.6237439", "0.62186027", "0.6217808", "0.61386067", "0.61190194", "0.61190194", "0.61083096", "0.6106143", "0.6104674", "0.6084777", "0.60418606", "0.6036556" ]
0.725899
1
Randomly shift point cloud. Shift is per point cloud.
def shift_point_cloud(batch_data, shift_range=0.1): B, N, C = batch_data.shape shifts = np.random.uniform(-shift_range, shift_range, (B,3)) for batch_index in range(B): batch_data[batch_index,:,:] += shifts[batch_index,:] return batch_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shift(self):\n \"\"\"\n shift cluster randomly within bounds of im\n \"\"\"\n r = self.std\n mid = self.mid_pixel #center pixel index of 384x384 image\n delta = self.im_size - self.mid_pixel - r - 10\n \n x = np.random.randint(low=-1*delta,high=delta,size=1)[0]\n y = np.random.randint(low=-1*delta,high=delta,size=1)[0]\n\n self.x += x\n self.y += y\n im_shift = np.roll(self.im,shift=y,axis=0)\n self.im = np.roll(im_shift,shift=x,axis=1)\n \n return", "def shift_point(mutated_genome,index):\n Xval = random.randint(-int(imagewidth*0.1),int(imagewidth*0.1))\n Yval = random.randint(-int(imageheight*0.1),int(imageheight*0.1))\n point_index = random.randint(0,max(0,len(mutated_genome[index][2])-1))\n point = mutated_genome[index][2][point_index]\n newpoint = (point[0]+Xval,point[1]+Yval)\n mutated_genome[index][2][point_index] = newpoint", "def shift(self):\n r = self.std\n mid = self.mid_pixel #center pixel index of 384x384 image\n delta = self.size - self.mid_pixel - r\n \n x = np.random.randint(low=-1*delta,high=delta,size=1)[0]\n y = np.random.randint(low=-1*delta,high=delta,size=1)[0]\n\n self.x += x\n self.y += y\n image_shift = np.roll(self.image,shift=x,axis=0)\n self.image = np.roll(image_shift,shift=y,axis=1)\n \n return", "def mutate_point_trig(mutated_genome):\n seed = random.randint(0,1)\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if seed == 0:\n move_point_trig(mutated_genome,index)\n else: #seed == 1:\n shift_point_trig(mutated_genome,index)", "def shift_point_wline(mutated_genome,index):\n Xval = random.randint(-int(imagewidth*0.1),int(imagewidth*0.1))\n Yval = random.randint(-int(imageheight*0.1),int(imageheight*0.1))\n point_index = random.randint(1,max(1,len(mutated_genome[index][2])-1))\n point = mutated_genome[index][2][point_index]\n newpoint = (point[0]+Xval,point[1]+Yval)\n mutated_genome[index][2][point_index] = newpoint", "def shuffle_points(mutated_genome,index):\n random.shuffle(mutated_genome[index][2])", "def switch_points(mutated_genome,index):\n point_index1 = random.randint(0,max(0,len(mutated_genome[index][2])-1))\n point_index2 = random.randint(0,max(0,len(mutated_genome[index][2])-1))\n temp = mutated_genome[index][2][point_index1]\n mutated_genome[index][2][point_index1] = mutated_genome[index][2][point_index2]\n mutated_genome[index][2][point_index2] = temp", "def shift_point_trig(mutated_genome,index):\n Xval = random.randint(-int(imagewidth*0.1),int(imagewidth*0.1))\n Yval = random.randint(-int(imageheight*0.1),int(imageheight*0.1))\n triangle = mutated_genome[index][2]\n seed = random.randint(0,2)\n oldpoint = triangle[seed]\n newpoint = (oldpoint+Xval, newpoint+Yval)\n newtriangle = list(triangle)\n newtriangle[seed] = newpoint\n mutated_genome[index][2] = tuple(newtriangle)", "def random_transform(self, x, seed=None):\n # x is a single audio, so it doesn't have image number at index 0\n img_row_axis = self.row_axis - 1\n img_channel_axis = self.channel_axis - 1\n\n if seed is not None:\n np.random.seed(seed)\n\n if not (self.zoom_range[0] == 1 and self.zoom_range[1] == 1):\n zx = np.random.uniform(self.zoom_range[0], self.zoom_range[1])\n input_length = x.shape[img_row_axis]\n x = resample(x, num=int(zx * x.shape[img_row_axis]), axis=img_row_axis)\n if x.shape[img_row_axis] >= input_length:\n x = x[:input_length]\n else:\n x = np.pad(x, ((0, input_length - x.shape[img_row_axis]), (0, 0)),\n 'constant', constant_values=(0, np.mean(x)))\n\n if shift:\n hx = np.random.uniform(-self.shift, self.shift)\n x = shift(x, (int(hx * x.shape[img_row_axis]), 0), mode=self.fill_mode, cval=self.cval)\n\n if self.roll_range:\n tx = np.random.uniform(-self.roll_range, self.roll_range)\n if self.roll_range < 1:\n tx *= x.shape[img_row_axis]\n x = np.roll(x, int(tx), axis=(img_row_axis))\n\n if self.horizontal_flip:\n if np.random.random() < 0.5:\n x = np.flip(x, axis=img_row_axis)\n\n if (self.noise):\n if np.random.random() < 0.5:\n if self.noise[-1] == 'Uniform':\n x = x + np.random.uniform(self.noise[0], self.noise[1], size=x.shape)\n elif self.noise[-1] == 'Normal':\n x = x + np.random.normal(self.noise[0], self.noise[1], size=x.shape)\n\n if self.brightness_range is not None:\n x = random_brightness(x, self.brightness_range)\n\n return x", "def move_point(mutated_genome,index):\n Xval = random.randint(-int(imagewidth/5.),int(imagewidth*6./5.))\n Yval = random.randint(-int(imageheight/5.),int(imageheight*6./5.))\n point = (Xval,Yval)\n point_index = random.randint(0,max(0,len(mutated_genome[index][2])-1))\n mutated_genome[index][2][point_index] = point", "def MovePoint(pos,points,s=0.5, k = 1000): \n \n x = [pos[0]] \n y = [pos[1]]\n rdm.seed(1) # comment out if you want randomness\n for i in range(k):\n n = len(points)\n r = int(rdm.random()*n)\n \n x.append(x[i] + (points[r][0] - x[i]) * s)\n y.append(y[i] + (points[r][1] - y[i]) * s)\n\n \n return(x,y)", "def _sample_epislon(self, cur_y, cur_z):\n old_loglik = self._loglik(cur_y, cur_z)\n old_epislon = self.epislon\n \n # modify the feature ownership matrix\n self.epislon = np.random.beta(1,1)\n new_loglik = self._loglik(cur_y, cur_z)\n move_prob = 1 / (1 + np.exp(old_loglik - new_loglik));\n if random.random() < move_prob:\n pass\n else:\n self.epislon = old_epislon", "def decrement_point(mutated_genome,index):\n point_index1 = random.randint(1,max(0,len(mutated_genome[index][2])-1))\n seed = random.randint(0,2)\n if seed == 0:\n point_index2 = point_index1 - 1\n elif seed == 1:\n point_index2 = random.randint(0, point_index1)\n else: #seed == 2:\n point_index2 = 0\n temp = mutated_genome[index][2][point_index1]\n mutated_genome[index][2][point_index1] = mutated_genome[index][2][point_index2]\n mutated_genome[index][2][point_index2] = temp", "def mutate_point_circ(mutated_genome):\n seed = random.randint(0,3)\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if seed == 0:\n move_point_circ(mutated_genome,index)\n elif seed == 1:\n shift_point_circ(mutated_genome,index)\n elif seed == 2:\n move_radius_circ(mutated_genome,index)\n else: #seed == 3:\n shift_radius_circ(mutated_genome,index)", "def random_shift(x, fraction):\n min_x, max_x = np.min(x), np.max(x)\n m = np.random.uniform(-fraction, fraction, size=x.shape) + 1\n return np.clip(x * m, min_x, max_x)", "def rotate_point_cloud(data):\n rotated_data = np.zeros(data.shape, dtype=np.float32)\n for k in xrange(data.shape[0]):\n rotation_angle = np.random.uniform() * 2 * np.pi\n cosval = np.cos(rotation_angle)\n sinval = np.sin(rotation_angle)\n rotation_matrix = np.array([[cosval, 0, sinval],\n [0, 1, 0],\n [-sinval, 0, cosval]])\n shape_pc = data[k, ...]\n rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)\n return rotated_data", "def mutate_point_poly3(mutated_genome):\n seed = random.randint(0,7)\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if len(mutated_genome[index][2]) < 1: seed = 0\n if seed == 0:\n insert_point(mutated_genome,index)\n elif seed == 1:\n remove_point(mutated_genome,index)\n elif seed == 2:\n switch_points(mutated_genome,index)\n elif seed == 3:\n shuffle_points(mutated_genome,index)\n elif seed == 4:\n move_point(mutated_genome,index)\n elif seed == 5:\n shift_point(mutated_genome,index)\n elif seed == 6:\n increment_point(mutated_genome,index)\n else: #seed == 7:\n decrement_point(mutated_genome,index)", "def shuffle_points(batch_data):\n idx = np.arange(batch_data.shape[1])\n np.random.shuffle(idx)\n return batch_data[:, idx, :]", "def random_transform_extension(self, x, y, seed=None):\n # x is a single image, so it doesn't have image number at index 0\n img_row_axis = self.row_axis - 1\n img_col_axis = self.col_axis - 1\n img_channel_axis = self.channel_axis - 1\n\n if seed is not None:\n np.random.seed(seed)\n\n # use composition of homographies\n # to generate final transform that needs to be applied\n if self.rotation_range:\n theta = np.pi / 180 * np.random.uniform(-self.rotation_range, self.rotation_range)\n else:\n theta = 0\n\n if self.height_shift_range:\n tx = np.random.uniform(-self.height_shift_range, self.height_shift_range) * x.shape[img_row_axis]\n else:\n tx = 0\n\n if self.width_shift_range:\n ty = np.random.uniform(-self.width_shift_range, self.width_shift_range) * x.shape[img_col_axis]\n else:\n ty = 0\n\n if self.shear_range:\n shear = np.random.uniform(-self.shear_range, self.shear_range)\n else:\n shear = 0\n\n if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:\n zx, zy = 1, 1\n else:\n zx, zy = np.random.uniform(self.zoom_range[0], self.zoom_range[1], 2)\n\n transform_matrix = None\n if theta != 0:\n rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],\n [np.sin(theta), np.cos(theta), 0],\n [0, 0, 1]])\n transform_matrix = rotation_matrix\n\n if tx != 0 or ty != 0:\n shift_matrix = np.array([[1, 0, tx],\n [0, 1, ty],\n [0, 0, 1]])\n transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)\n\n if shear != 0:\n shear_matrix = np.array([[1, -np.sin(shear), 0],\n [0, np.cos(shear), 0],\n [0, 0, 1]])\n transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)\n\n if zx != 1 or zy != 1:\n zoom_matrix = np.array([[zx, 0, 0],\n [0, zy, 0],\n [0, 0, 1]])\n transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)\n\n if transform_matrix is not None:\n hx, wx = x.shape[img_row_axis], x.shape[img_col_axis]\n hy, wy = y.shape[img_row_axis], y.shape[img_col_axis]\n transform_matrix_x = kimage.transform_matrix_offset_center(transform_matrix, hx, wx)\n transform_matrix_y = kimage.transform_matrix_offset_center(transform_matrix, hy, wy)\n x = apply_transform(x, transform_matrix_x, img_channel_axis,\n fill_mode=self.fill_mode, cval=self.cval)\n\n y = apply_transform(y, transform_matrix_y, img_channel_axis,\n fill_mode=self.fill_mode, cval=self.cval)\n\n if self.channel_shift_range != 0:\n x = image.random_channel_shift(x,\n self.channel_shift_range,\n img_channel_axis)\n\n if self.horizontal_flip:\n if np.random.random() < 0.5:\n x = kimage.flip_axis(x, img_col_axis)\n y = kimage.flip_axis(y, img_col_axis)\n\n if self.vertical_flip:\n if np.random.random() < 0.5:\n x = kimage.flip_axis(x, img_row_axis)\n y = kimage.flip_axis(y, img_row_axis)\n return x, y", "def mutate_point_rect(mutated_genome):\n seed = random.randint(0,1)\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if seed == 0:\n move_point_rect(mutated_genome,index)\n else: #seed == 1:\n shift_point_rect(mutated_genome,index)", "def shuffle_points(batch_data):\n idx = np.arange(batch_data.shape[1])\n np.random.shuffle(idx)\n return batch_data[:,idx,:]", "def increment_point(mutated_genome,index):\n point_index1 = random.randint(0,max(0,len(mutated_genome[index][2])-2))\n seed = random.randint(0,2)\n if seed == 0:\n point_index2 = point_index1 + 1\n elif seed == 1:\n point_index2 = random.randint(point_index1,max(0,len(mutated_genome[index][2])-1))\n else: #seed == 2:\n point_index2 = max(0,len(mutated_genome[index][2])-1)\n temp = mutated_genome[index][2][point_index1]\n mutated_genome[index][2][point_index1] = mutated_genome[index][2][point_index2]\n mutated_genome[index][2][point_index2] = temp", "def MoveRandom(self):\n r = random.randint(0,3)\n if r == 0: self.x += 1\n elif r == 1: self.y += 1\n elif r == 2: self.x -= 1\n elif r == 3: self.y -= 1", "def move_point_trig(mutated_genome,index):\n Xval = random.randint(-int(imagewidth/5.),int(imagewidth*6./5.))\n Yval = random.randint(-int(imageheight/5.),int(imageheight*6./5.))\n point = (Xval,Yval)\n old_points = list(mutated_genome[index][2])\n old_points[random.randint(0,2)] = point\n mutated_genome[index][2] = tuple(old_points)", "def random_rotate(self):\r\n rotation = rand.randrange(0, 4, 1) # 0, 1, 2, 3\r\n flip = rand.randrange(0, 2, 1) # 0, 1\r\n new_seed = copy.deepcopy(self)\r\n # rotate by 90 degrees * rotation (0, 90, 180 270)\r\n new_seed.cells = np.rot90(new_seed.cells, rotation) \r\n if (flip == 1):\r\n # flip upside down\r\n new_seed.cells = np.flipud(new_seed.cells)\r\n new_seed.xspan = new_seed.cells.shape[0]\r\n new_seed.yspan = new_seed.cells.shape[1]\r\n return new_seed", "def random_transform(self, x):\n pass", "def rotate_point_cloud(batch_data):\r\n rotated_data = np.zeros(batch_data.shape, dtype=np.float32)\r\n for k in range(batch_data.shape[0]):\r\n rotation_angle = np.random.uniform() * 2 * np.pi\r\n cosval = np.cos(rotation_angle)\r\n sinval = np.sin(rotation_angle)\r\n rotation_matrix = np.array([[cosval, 0, sinval],\r\n [0, 1, 0],\r\n [-sinval, 0, cosval]])\r\n shape_pc = batch_data[k, ...]\r\n rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)\r\n return rotated_data", "def random_shift(x, wrg, hrg, row_axis=0, col_axis=1, channel_axis=2,\n fill_mode='nearest', cval=0., interpolation_order=1):\n h, w = x.shape[row_axis], x.shape[col_axis]\n tx = np.random.uniform(-hrg, hrg) * h\n ty = np.random.uniform(-wrg, wrg) * w\n x = apply_affine_transform(x, tx=tx, ty=ty, channel_axis=channel_axis,\n fill_mode=fill_mode, cval=cval,\n order=interpolation_order)\n return x", "def test_shift_point(self):\n point = (0,0)\n new_point = utils.shift_point(point, 3, 4)\n self.assertEqual((3,4), new_point)\n\n point = (-2.34, 1.19)\n new_point = utils.shift_point(point, 2.34, -1.19)\n self.assertEqual((0,0), new_point)", "def rotate_point_cloud(batch_data):\n rotated_data = np.zeros(batch_data.shape, dtype=np.float32)\n for k in np.arange(batch_data.shape[0]):\n rotation_angle = np.random.uniform() * 2 * np.pi\n cosval = np.cos(rotation_angle)\n sinval = np.sin(rotation_angle)\n rotation_matrix = np.array([[cosval, 0, sinval],\n [0, 1, 0],\n [-sinval, 0, cosval]])\n shape_pc = batch_data[k, ...]\n rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)\n return rotated_data" ]
[ "0.72441024", "0.6811636", "0.64971095", "0.61066484", "0.60589975", "0.5939156", "0.593459", "0.5932551", "0.5909121", "0.57994634", "0.57626474", "0.5751564", "0.57320094", "0.57160926", "0.5685124", "0.56692094", "0.56554824", "0.564319", "0.56338567", "0.5614417", "0.56072867", "0.559016", "0.5574786", "0.5557774", "0.554621", "0.5539712", "0.551367", "0.5493715", "0.5487462", "0.54678863" ]
0.7352118
0
Randomly scale the point cloud. Scale is per point cloud.
def random_scale_point_cloud(batch_data, scale_low=0.8, scale_high=1.25): B, N, C = batch_data.shape scales = np.random.uniform(scale_low, scale_high, B) for batch_index in range(B): batch_data[batch_index,:,:] *= scales[batch_index] return batch_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scale_point_cloud(batch_data, scale_ratio=0.8):\n B, N, C = batch_data.shape\n scale = np.random.uniform(scale_ratio,1/scale_ratio,(B,1,1))\n scaled_data = batch_data*scale\n return scaled_data", "def ScalePoints(points, sigma = 0.02):\n assert(points.shape[1]==3)\n\n scale = np.random.uniform(1-sigma, 1+sigma)\n scale_matrix = np.array([[scale, 0, 0],\n [0, scale, 0],\n [0, 0, scale]])\n scaled = np.dot(points, scale_matrix)\n\n return scaled", "def _random_scale(self, results):\n # For multi-scale training\n shuffle(self.img_scale)\n \n if self.multiscale_mode == 'range':\n scale, scale_idx = self.random_sample_ratio(\n self.img_scale[0], self.ratio_range, self.ratio_hr_lr)\n elif self.multiscale_mode == 'value':\n scale, scale_idx = self.random_select_ratio(self.img_scale[0], self.ratio_range, self.ratio_hr_lr)\n else:\n raise NotImplementedError\n\n results['scale'] = scale\n results['scale_idx'] = scale_idx", "def resample_particles(self):\n # make sure the distribution is normalized\n self.normalize_particles()\n\n newParticles = []\n for i in range(len(self.particle_cloud)):\n # resample the same # of particles\n choice = random_sample()\n # all the particle weights sum to 1\n csum = 0 # cumulative sum\n for particle in self.particle_cloud:\n csum += particle.w\n if csum >= choice:\n # if the random choice fell within the particle's weight\n newParticles.append(deepcopy(particle))\n break\n self.particle_cloud = newParticles", "def _random_scale(self, results):\n\n if self.ratio_range is not None:\n scale, scale_idx = self.random_sample_ratio(\n self.img_scale[0], self.ratio_range)\n elif len(self.img_scale) == 1:\n scale, scale_idx = self.img_scale[0], 0\n elif self.multiscale_mode == 'range':\n scale, scale_idx = self.random_sample(self.img_scale)\n elif self.multiscale_mode == 'value':\n scale, scale_idx = self.random_select(self.img_scale)\n else:\n raise NotImplementedError\n\n results['scale'] = scale\n results['scale_idx'] = scale_idx", "def _getScalesRand(self):\n if self.P > 1:\n scales = []\n for term_i in range(self.n_randEffs):\n _scales = sp.randn(self.diag[term_i].shape[0])\n if self.jitter[term_i] > 0:\n _scales = sp.concatenate(\n (_scales, sp.array([sp.sqrt(self.jitter[term_i])])))\n scales.append(_scales)\n scales = sp.concatenate(scales)\n else:\n scales = sp.randn(self.vd.getNumberScales())\n return scales", "def _random_scale(self, waveform, prob):\n # Get random true or false\n prediction = self._random_true_false(prob=prob)\n\n # Apply random multiplication factor\n waveform = tf.cond(prediction, lambda: self._scale(waveform=waveform),\n lambda: self._do_nothing(waveform=waveform))\n\n return waveform", "def _scale(waveform):\n # Get random scale factor\n scale_factor = tf.random_uniform(shape=[], minval=0.5, maxval=2.5, dtype=tf.float32)\n\n return waveform * scale_factor", "def perturb_point(self, x, scale):\n x_samp = x + (scale / 2.0) * (np.random.rand(3) - 0.5)\n return x_samp", "def scale(self, points, inplace=True):\n points = np.array(points).astype(float)\n if inplace==False:\n points = points.copy()\n # if len(points.shape) == 1:\n # points = points[None,:]\n # if len(points.shape) != 2:\n # logger.error(\"cannot scale array of dimensions\".format(len(points.shape)))\n points -= self.origin\n points /= self.scale_factor\n return points", "def scale(self):\n return self.distribution.scale", "def scale(self,scale_by):\n x = self._x * scale_by\n y = self._y * scale_by\n return Point(x,y)", "def ScaleShape(shape, scale_x, scale_y):\n for i, pt in enumerate(shape.points):\n x, y = pt\n shape.points[i] = [scale_x * x, scale_y * y]", "def scale(self, scale):\n\t\tself._current_score *= scale", "def scale(self, scale):\n self.coords = self.coords * scale\n return self", "def random_scale(imgs, DIFFICULTY):\n MULTIPLY_SCALES = [\n [1, 1],\n [0.9, 1.1],\n [0.85, 1.15],\n [0.8, 1.2],\n [0.75, 1.25],\n [0.7, 1.3],\n [0.65, 1.325],\n [0.6, 1.35],\n [0.55, 1.375],\n [0.50, 1.4],\n [0.48, 1.42],\n [0.46, 1.44],\n [0.44, 1.46],\n [0.42, 1.48],\n [0.4, 1.5],\n [0.35, 1.6],\n ]\n channels = imgs.shape[-1]\n scales = tf.gather(MULTIPLY_SCALES, DIFFICULTY)\n scales = tf.random.uniform([channels], minval=scales[0], maxval=scales[1])\n imgs = imgs * scales\n return imgs", "def set_training_random_scale_factors(self, scale_min, scale_max):\n # Select a random scale factor.\n random_scale_factor = tf.random_uniform([], scale_min, scale_max)\n scaled_size = tf.to_int32(random_scale_factor * self._output_size)\n\n # Recompute the accurate scale_factor using rounded scaled image size.\n height = tf.shape(self._image)[0]\n width = tf.shape(self._image)[1]\n max_image_size = tf.to_float(tf.maximum(height, width))\n image_scale = tf.to_float(scaled_size) / max_image_size\n\n # Select non-zero random offset (x, y) if scaled image is larger than\n # self._output_size.\n scaled_height = tf.to_int32(tf.to_float(height) * image_scale)\n scaled_width = tf.to_int32(tf.to_float(width) * image_scale)\n offset_y = tf.to_float(scaled_height - self._output_size)\n offset_x = tf.to_float(scaled_width - self._output_size)\n offset_y = tf.maximum(0.0, offset_y) * tf.random_uniform([], 0, 1)\n offset_x = tf.maximum(0.0, offset_x) * tf.random_uniform([], 0, 1)\n offset_y = tf.to_int32(offset_y)\n offset_x = tf.to_int32(offset_x)\n self._image_scale = image_scale\n self._scaled_height = scaled_height\n self._scaled_width = scaled_width\n self._crop_offset_x = offset_x\n self._crop_offset_y = offset_y", "def scale_data_point(self, data_point):\n \n data_point_scaled = pd.Series(self.scaler.transform(data_point[self.feature_names].to_numpy().reshape(1, -1)).ravel())\n data_point_scaled.name = data_point.name\n data_point_scaled.index = self.feature_names\n \n # Set any values > 1 to 1. This is only used in visualization.\n data_point_scaled = data_point_scaled.where(data_point_scaled <= 1.0, 1.0)\n #data_point_scaled.values = data_point_scaled.values.apply(> 1.0 else 1.0 for y in x])\n\n return data_point_scaled", "def mutate(self, size):\n rand = random.random()\n if rand <= 0.5:\n print u\"changing colour\"\n idx = random.randrange(0, 4)\n value = random.randrange(0, 256)\n colour = list(self.colour)\n colour[idx] = value\n self.colour = tuple(colour)\n else:\n print u\"changing point\"\n idx = random.randrange(0, len(self.points))\n point = generate_point(size[0], size[1])\n self.points[idx] = point", "def test_set_scale():\n data = io.create_sample_Dataset()\n tmp = data.piv.set_scale(1.0)\n assert np.allclose(tmp[\"x\"], data[\"x\"])\n\n tmp = data.copy()\n tmp.piv.set_scale(2.0)\n tmp_mean = tmp[\"u\"].mean(dim=(\"t\", \"x\", \"y\")).values\n data_mean = data[\"u\"].mean(dim=(\"t\", \"x\", \"y\")).values\n assert np.allclose(tmp_mean / data_mean, 2.0)", "def random_transform(self, x, seed=None):\n # x is a single audio, so it doesn't have image number at index 0\n img_row_axis = self.row_axis - 1\n img_channel_axis = self.channel_axis - 1\n\n if seed is not None:\n np.random.seed(seed)\n\n if not (self.zoom_range[0] == 1 and self.zoom_range[1] == 1):\n zx = np.random.uniform(self.zoom_range[0], self.zoom_range[1])\n input_length = x.shape[img_row_axis]\n x = resample(x, num=int(zx * x.shape[img_row_axis]), axis=img_row_axis)\n if x.shape[img_row_axis] >= input_length:\n x = x[:input_length]\n else:\n x = np.pad(x, ((0, input_length - x.shape[img_row_axis]), (0, 0)),\n 'constant', constant_values=(0, np.mean(x)))\n\n if shift:\n hx = np.random.uniform(-self.shift, self.shift)\n x = shift(x, (int(hx * x.shape[img_row_axis]), 0), mode=self.fill_mode, cval=self.cval)\n\n if self.roll_range:\n tx = np.random.uniform(-self.roll_range, self.roll_range)\n if self.roll_range < 1:\n tx *= x.shape[img_row_axis]\n x = np.roll(x, int(tx), axis=(img_row_axis))\n\n if self.horizontal_flip:\n if np.random.random() < 0.5:\n x = np.flip(x, axis=img_row_axis)\n\n if (self.noise):\n if np.random.random() < 0.5:\n if self.noise[-1] == 'Uniform':\n x = x + np.random.uniform(self.noise[0], self.noise[1], size=x.shape)\n elif self.noise[-1] == 'Normal':\n x = x + np.random.normal(self.noise[0], self.noise[1], size=x.shape)\n\n if self.brightness_range is not None:\n x = random_brightness(x, self.brightness_range)\n\n return x", "def scale_uniform(self, s: float):\n self.vertices = [v * s for v in self.vertices]\n return self", "def rescale(self, points, inplace=True):\n if inplace == False:\n points = points.copy()\n points *= self.scale_factor\n points += self.origin\n return points", "def random_subsample(cloud, sample_size, replace=False):\n\n # Handle small point clouds\n if cloud.shape[0] <= sample_size:\n warn(\"(code 1) Point cloud is already <= desired sample size. \" +\n \"No subsampling is performed.\")\n return cloud\n\n # Perform subsamping\n sample_indices = np.random.choice(np.arange(cloud.shape[0]),\n sample_size, replace=False)\n cloud_subs = cloud[sample_indices]\n\n # Return result\n return cloud_subs", "def scale(x, p=2, inplace=False):\n return x / np.linalg.norm(x, ord=p)", "def scale(self,s):\n return Vector(self.x * s, self.y * s, self.z * s)", "def scale(self, factor):\n new = self.copy()\n new.d.clear()\n\n for val, prob in self.items():\n new.set(val * factor, prob)\n return new", "def scale_point(point, centroid, scale):\n point = np.asarray(point)\n centroid = centroid[:2]\n vector = ((point - centroid)*scale) + centroid\n return vector", "def random_transform(self, x):\n pass", "def scale(self, size=128):\n scale_factor = size / max(self.voxels.shape)\n self.voxels = ndimage.zoom(self.voxels, scale_factor)\n self.point_position = self.point_position * scale_factor\n self.voxel_size = False # To ignore this\n \n return(self)" ]
[ "0.76311445", "0.6895509", "0.67543155", "0.64973104", "0.63981426", "0.62324494", "0.6162519", "0.6148883", "0.6085778", "0.60736156", "0.59713405", "0.5954016", "0.5944525", "0.5929756", "0.58707345", "0.5869925", "0.58671856", "0.58244807", "0.5817822", "0.57851666", "0.5758104", "0.5737039", "0.57343966", "0.5724114", "0.57097816", "0.56886965", "0.56619024", "0.56348586", "0.56319606", "0.5629389" ]
0.76930815
0
return list of id of unread emails
def get_unread_email_ids(gmail_client): response = gmail_client.users().messages().list(userId='me',q='is:unread').execute() if 'messages' in response: # messages key only exists if there are unread messages return [message['id'] for message in response['messages']] else: print("No unread messages...") return [] # still return a list since that's what caller expects
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_unread_emails(self):\n try:\n query = 'is:unread from:[email protected]'\n page_token = None\n p_emails = []\n while True:\n request = self.service.users().messages().list(userId='me',\n q=query, pageToken=page_token)\n response = request.execute()\n if 'messages' not in response:\n break\n p_emails.extend(response['messages'])\n if 'nextPageToken' not in response:\n break\n page_token = response['nextPageToken']\n return p_emails\n\n except errors.HttpError as error:\n _utils.logger.error(f'An error occurred during unread email retrieval: ${error}')", "def get_mail_list(self) -> List[int]:\n response = self.IMAP.select(self.mailconfig.folderInbox)\n if response[0] != \"OK\":\n log.error(\"Error accessing Folder '%s': %s\" % (self.mailconfig.folderInbox, response[1][0].decode()))\n emailcount: int = int(response[1][0])\n if not emailcount > 0:\n return []\n log.info(\"%s email(s) in inbox\" % emailcount)\n\n response = self.IMAP.uid(\"search\", None, \"(ALL)\")\n if response[0] != \"OK\":\n log.error(\"Failed to retrieve mails from inbox: %s\" % response[1][0].decode())\n return []\n # TODO: Raise exception?\n indices: List[bytes] = response[1][0].split()\n return [int(x) for x in indices]", "def find_unread(mailbox):\n # Not checking here would make the case where there are messages quicker\n numunseen = findall(rb'\\d+', mailbox.status('Inbox', '(UNSEEN)')[1][0])\n if numunseen == [b'0']:\n return ['There are no unread messages']\n\n mailbox.select('inbox')\n res, data = mailbox.search(None, 'UNSEEN')\n if res != 'OK':\n raise RuntimeError('error in search call')\n unseen = data[0].split()\n unseen.reverse()\n messagecount = user.capitalize() + ' : {} new messages'.format(len(unseen))\n meslist = [get_from_subject(mesid, mailbox).strip() for mesid in unseen]\n return [messagecount] + meslist", "def util_unread(self):\n try:\n query = 'from:[email protected]'\n page_token = None\n p_emails = []\n while True:\n request = self.service.users().messages().list(userId='me',\n q=query, pageToken=page_token)\n response = request.execute()\n if 'messages' not in response:\n break\n p_emails.extend(response['messages'])\n if 'nextPageToken' not in response:\n break\n page_token = response['nextPageToken']\n self.service.users().messages().batchModify(userId='me', body={\n 'addLabelIds': ['UNREAD'],\n 'ids': [e['id'] for e in p_emails]\n }).execute()\n\n except errors.HttpError as error:\n _utils.logger.error(f'An error occurred: ${error}')", "def get_unread_email_data(gmail_client):\n unread_ids = get_unread_email_ids(gmail_client)\n\n for message_id in unread_ids:\n remove_unread_label = {'removeLabelIds': ['UNREAD']}\n gmail_client.users().messages().modify(userId='me', id=message_id, body=remove_unread_label).execute()\n\n message_data = gmail_client.users().messages().get(userId='me',id=message_id).execute()\n message_payload = message_data['payload']\n has_attachment = 0 < len([part for part in message_payload['parts'] if part['mimeType'] == 'image/jpeg'])\n \n message_headers = message_payload['headers']\n sender = [header['value'] for header in message_headers if header['name'] == 'Return-Path'][0]\n yield sender, has_attachment", "def _get_unread_emails(conn):\n pattern = re.compile('^\\d+\\ \\(ENVELOPE\\ \\(\"(.*?)\"\\ \"(.*?)\"\\ \\(\\(\"(.*?)\"\\ .*?\\ \"(.*?)\"\\ \"(.*?)\"\\)\\).*$')", "def get_unread_messages():\n mark_seen = request.args.get('mark_seen', True)\n unread_msg = g.driver.get_unread()\n\n if mark_seen:\n for msg in unread_msg:\n msg.chat.send_seen()\n\n return jsonify(unread_msg)", "def get_unread_count(username, password):\n obj = imaplib.IMAP4_SSL('imap.gmail.com', '993')\n obj.login(username, password)\n obj.select('Inbox')\n message_ids = obj.search(None, \"UNSEEN\")[1]\n list_of_split_strings = str(message_ids).split(\" \")\n unread = len(list_of_split_strings)\n # speak(str(unread))\n return unread", "def get_email_ids(conn, query='ALL'):\n if conn.state != \"SELECTED\":\n raise imaplib.IMAP4.error(\"Cannot search without selecting a folder\")\n\n rv, data = conn.uid('search', None, query)\n if rv != 'OK':\n print (\"Could not fetch email ids\") # for some reason...\n return []\n\n return data[0].split()", "def get_unread_indexes(self):\n for message in self.messages:\n if message[0] == False:\n return message", "def filter_unread(check_what, criteria, return_what):\n imap = imaplib.IMAP4_SSL(config[\"email\"][\"server\"])\n imap.login(config[\"email\"][\"user\"], config[\"email\"][\"pass\"])\n status, messages = imap.select(\"INBOX\")\n \n status, response = imap.search(None, '(UNSEEN)')\n unread_msg_nums = response[0].split()\n\n ret = [] \n for i in unread_msg_nums:\n parse_return = parse(imap, i, check_what, criteria, return_what)\n if parse_return is not None:\n ret.append(parse_return)\n set_unseen(imap, i)\n imap.close()\n imap.logout()\n\n return ret", "def get_unread_messages(self):\n self.chat.click()\n loaded_messages = self.__get_loaded_messages()\n for message in loaded_messages:\n try:\n if message.get_attribute(\"class\") == \"XFAMv focusable-list-item\":\n unread_index = loaded_messages.index(message)\n return loaded_messages[unread_index + 1:]\n except:\n continue\n return []", "def Get_Unread_Messages(service, userId):\n message_list = []\n message_ids = service.users().messages().list(userId=userId, labelIds='INBOX', alt=\"json\", q='is:unread has:attachment').execute()\n \n if message_ids['resultSizeEstimate'] > 0:\n for message in message_ids['messages']:\n message_list.append(message['id'])\n\n return message_list", "def get_unread_count(imap):\n status, messages = imap.select('Inbox')\n status, response = imap.uid('search', None, 'UNSEEN')\n unread_msg_nums = response[0].split()\n return len(unread_msg_nums)", "def unread(self):\n return self.filter(unread=True)", "def get_new_mails(self):\n\t\tif cint(self.settings.use_imap):\n\t\t\tself.imap.select(\"Inbox\")\n\t\t\tif self.settings.no_remaining == '0' and self.settings.uidnext:\n\t\t\t\tif self.settings.uidnext == self.settings.newuidnext:\n\t\t\t\t\treturn False\n\t\t\t\telse:\n\t\t\t\t\t#request all messages between last uidnext and new\n\t\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tresponse, message = self.imap.uid('search', None, \"ALL\")\n\t\t\temail_list = message[0].split()\n\t\telse:\n\t\t\temail_list = self.pop.list()[1]\n\n\t\treturn email_list", "def get_unread_indexes(self):\n pass", "def get_email_ids(self):\n if self.emails is None or self.emails == '':\n return []\n email_ids = self.emails.replace(' ', '')\n return email_ids.split(',')", "def list_emails_ids(self, *criteria, mailbox=None) -> List[str]:\n ids = self.get_raw_emails_ids(*criteria, mailbox=mailbox)\n if ids is None:\n raise Exception(f'Cannot get the list of email in the mailbox {self._selected_mailbox}! No list of IDs is returned!')\n return self._search(ids)\n # ListEmailIds.reset()\n # ListEmailIds.parse(ids[0].decode())\n # return ListEmailIds.get_tokens_values()", "def filter_seen_messages(self, messages):\n seen_uids = set()\n for uid in messages:\n key = \"%s_%s_%s\" % (self.opt_pop3_server,\n self.opt_global_account[\"username\"], uid.split()[1])\n if self.helper.get_check_point(key) is not None:\n seen_uids.add(uid)\n new_uids = set(messages) - seen_uids\n self.helper.log_debug(\n 'filter_seen_messages: uids on pop3 %s' %\n set(messages))\n self.helper.log_debug(\n 'filter_seen_messages: uids in checkp %s' %\n seen_uids)\n self.helper.log_debug(\n 'filter_seen_messages: uids new %s' %\n new_uids)\n return new_uids", "def get_mailbox_uidls(mailbox):\r\n\r\n mbxfile = \"%s\\\\%s.mbx\" % (mailboxdir, mailbox)\r\n\r\n print \"Opening mbx: [%s]\" % mbxfile\r\n\r\n if not os.path.exists(mbxfile):\r\n return []\r\n\r\n fd = open(mbxfile)\r\n\r\n uidls=[]\r\n\r\n for line in fd.readlines():\r\n if line[0:7] == \"* UIDL:\":\r\n list = line.split(':')\r\n uidls.append( list[1].strip() )\r\n\r\n fd.close()\r\n\r\n return uidls\r\n\r\n \"\"\"This function returns a list of all of the uidl (unique id's) of\r\n all of the messages on the server \"\"\"", "def email_list(self) -> Sequence[str]:\n return pulumi.get(self, \"email_list\")", "def get_all_received_unread_messages(receiver_status):\n unread_message = [\n message for message in user_messages\n if message[\"receiver_status\"] == \"unread\"\n ]\n return unread_message", "def unread_count(self) -> dict[str, int]:\n return self.subreddit._reddit.get(API_PATH[\"modmail_unread_count\"])", "def get_emails(self):\n email_ids = self.get_email_ids()\n Email = get_email_class()\n return [email for email in Email.objects.filter(pk__in=email_ids)]", "def unseen_messages(self, mailbox):\n data = self._cmd(\n \"STATUS\", self._encode_mbox_name(mailbox), \"(UNSEEN)\")\n m = self.unseen_pattern.match(data[-1].decode())\n if m is None:\n return 0\n return int(m.group(1))", "def message_nums(request):\n if request.user.is_authenticated:\n return {'unread_nums': request.user.usermessage_set.filter(has_read=False).count()}\n else:\n return {}", "def get_ids(self):\n return self.redis.hkeys(self.feed_items)", "def get_raw_emails_ids(self, *criteria, mailbox=None) -> Union[List[bytes], None]:\n self._authenticated_or_die()\n if mailbox is not None:\n self.select_mailbox(mailbox)\n if self._selected_mailbox is None:\n raise Exception('In order to get the list of emails in a mailbox, you must select a mailbox first!')\n criteria = ['ALL'] if 0 == len(criteria) else criteria\n # noinspection PyUnusedLocal\n status: str\n status, ids = self._imap.search(None, *criteria)\n if 'OK' != status:\n return None\n return ids", "def get_new_messages(self):\n inbox = list(self.reddit.inbox.unread(limit=10))\n inbox.reverse()\n return inbox" ]
[ "0.7194147", "0.70722127", "0.70129013", "0.69660854", "0.6963227", "0.6954459", "0.69394183", "0.69240445", "0.691629", "0.68746626", "0.68267304", "0.67761564", "0.67230296", "0.6694524", "0.6601468", "0.6559419", "0.6427694", "0.6418745", "0.6417917", "0.63268363", "0.6284322", "0.624063", "0.6136783", "0.60811406", "0.6027288", "0.60116124", "0.5998925", "0.5953211", "0.59355164", "0.59333926" ]
0.8231231
0
Create a k cluster data set with required separation. For the purposes of validating a proof, generate each cluster center such that it is at least 4 delta away from any other cluster for some value of delta > 0.
def gen_k_centers(k, dim): delta = abs(np.random.normal(0.0, 5.0)) eps = 0.001 centers = [] for i in range(k): c = np.random.multivariate_normal(np.zeros(dim), np.identity(dim)) if len(centers): c1 = centers[0] x = np.random.multivariate_normal(c1, np.identity(c1.size)) - c1 direction = x / np.linalg.norm(x) centers.append(c1 + 2.0 * i * delta * direction + eps) else: centers.append(c) return centers, delta
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_clusters(self):\n ex = 0\n print 'Iter - Purity Gini Index'\n while ex < self.MAX_ITERATION:\n new_clusters = np.zeros(self.centroids.shape)\n distances = euclidean_distances(self.vectors, self.centroids).argmin(axis=1)\n for i in range(self.K):\n indexes = np.argwhere(distances == i)\n data = self.vectors[indexes.transpose()[0]]\n if data.shape[0] > 1:\n new_clusters[i] = (np.sum(data, axis=0) / data.shape[0])\n else:\n new_clusters[i] = np.sum(data, axis=0)\n print ex, '----', self.cal_purity()\n ex += 1\n if np.allclose(self.centroids, new_clusters, atol=self.TOLERANCE):\n break\n self.centroids = new_clusters", "def k_clusters(old_ops, max_outputs, mut):\n \n # DM construction\n matrix = starting_centroids(old_ops, max_outputs, mut)\n\n\n # Clustering\n seed = []\n for i in matrix.OPs:\n seed.append(i)\n centroids = cluster(old_ops, seed, mut)\n disto = distortion(centroids, old_ops, mut)\n\n return centroids, disto", "def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n points = cluster_list[:]\n \n # n <-- |p|;\n len_points_list = len(points)\n\n # position initial clusters at the location of clusters with largest populations (i.e., cluster[3] which is population) \n cluster_centers = []\n temp_cl = points[:]\n \n temp_cl.sort(key=lambda cluster: cluster.total_population())\n for cluster in reversed(temp_cl):\n if len(cluster_centers) < num_clusters:\n cluster_centers.append(alg_cluster.Cluster(set([]), cluster.horiz_center(), cluster.vert_center(), 0, 0))\n\n # For number of iterations\n for dummy_var in range(num_iterations):\n # initialize k (num_clusters) empty sets C1, ... Ck;\n cluster_groupings = []\n for index in range(len(cluster_centers)):\n cluster_groupings.append(alg_cluster.Cluster(set(), 0, 0, 0, 0))\n # # For each county\n # for j = 0 to n - 1 do\n for index in range(len_points_list):\n # Find the old cluster center that is closest \n # L <-- argminsub(1<=f<=k) (dsub(psubj), musubf); \n min_dist = float('inf')\n nearest_cluster_index = None\n\n for idx, cluster in enumerate(cluster_centers):\n if points[index].distance(cluster) < min_dist:\n min_dist = points[index].distance(cluster)\n nearest_cluster_index = idx\n\n # Add the county to the corresponding new cluster\n # Handled with Cluster class merge_clusters method, which will automatically update the cluster centers to correct locations.\n cluster_groupings[nearest_cluster_index].merge_clusters(points[index])\n # Set old clusters equal to new clusters \n # for f = 1 to k do\n for index in range(len(cluster_centers)):\n # muf = center (Cf) // handled with Cluster class built-in method(s)\n cluster_centers[index] = cluster_groupings[index].copy()\n\n # return {C1, C2, ..., Ck}; \n return cluster_groupings", "def recalculate_centers(data, k, clusters):\n centers = []\n for k_i in range(k):\n inds = [i for i, j in enumerate(clusters) if j == k_i]\n n = np.take(data, inds, axis=0)\n if len(inds) == 0:\n i = np.random.randint(len(data))\n centers.append((data[i,0], data[i,1]))\n\n elif len(inds) < 2: \n centers.append((n[0][0], n[0][1]))\n else:\n result = np.sum(n, axis=1)/len(inds)\n centers.append((result[0], result[0]))\n return centers", "def generate_k(data_set, k):\n centers = []\n dimensions = len(data_set[0])\n min_max = defaultdict(int)\n\n for point in data_set:\n for i in range(dimensions):\n val = point[i]\n min_key = 'min_{0}d'.format(i)\n max_key = 'max_{0}d'.format(i)\n if min_key not in min_max or val < min_max[min_key]:\n min_max[min_key] = val\n if max_key not in min_max or val > min_max[max_key]:\n min_max[max_key] = val\n\n for _k in range(k):\n rand_point = []\n for i in range(dimensions):\n min_val = min_max['min_{0}d'.format(i)]\n max_val = min_max['max_{0}d'.format(i)]\n \n rand_point.append(uniform(min_val, max_val))\n centers.append(rand_point)\n return centers", "def generateClustersRandomly(k=2, scale=1, num_clusters=1, points_per_cluster=20):\n rands = [[np.random.uniform(-scale, scale) * np.random.rand() for _ in range(k)] for i in range(num_clusters)]\n point_list = []\n for rand in rands:\n lastItem = math.sqrt(1 + np.dot(rand, rand))\n rand.append(lastItem)\n point_list.append(rand)\n counter = 0\n while counter < points_per_cluster:\n nearCluster = np.array([np.random.uniform(-scale, scale) * np.random.rand() for _ in range(k)])\n nearClusterLastItem = math.sqrt(1 + np.dot(nearCluster, nearCluster))\n new_point = np.append(nearCluster, nearClusterLastItem)\n # radius of hyperbolic ball is 0.2\n if hyperboloidDist(new_point, rand) < .2:\n point_list.append(new_point)\n counter += 1\n\n return np.array(point_list)", "def initialize_centers(data, k):\n x_data_min = min(p[0] for p in data)\n x_data_max = max(p[0] for p in data)\n y_data_min = min(p[1] for p in data)\n y_data_max = max(p[1] for p in data)\n\n return generate_random_data(\n k,\n x_data_min,\n x_data_max,\n y_data_min,\n y_data_max\n )", "def cluster(self):\n center_index = np.random.choice(range(100), self.K, replace=False)\n self.centers = np.array([self.X[i] for i in center_index])\n self.cluster_sizes = np.zeros(self.K)\n member_of = np.zeros(100, dtype=int)\n min_dist = np.array([distance.euclidean(self.centers[0], point) for point in self.X])\n self.cluster_sizes[0] = 100\n flag = True\n while flag:\n flag = False\n for i, point in enumerate(self.X):\n for j, center in enumerate(self.centers):\n if member_of[i] != j:\n dist = distance.euclidean(point, center)\n if dist < min_dist[i]:\n flag = True\n current = member_of[i]\n self.cluster_sizes[current] -= 1\n self.cluster_sizes[j] += 1\n member_of[i] = j\n min_dist[i] = dist\n if np.count_nonzero(self.cluster_sizes) != self.K:\n return self.cluster()\n self.centers = np.zeros((self.K, 2), dtype='d')\n for i, point in enumerate(self.X):\n center = member_of[i]\n self.centers[center] += point\n for i, center in enumerate(self.centers):\n center /= self.cluster_sizes[i]", "def initialize_clusters(points, k):\r\n return points[np.random.randint(points.shape[0], size=k)]", "def assign_k_clusters(data, centers):\n clusters = []\n center_data = np.take(data, centers, axis=0)\n best_center = np.argmax(center_data, axis=0)\n for i in range(len(centers)):\n inds = [ind for ind in np.where(best_center == i)[0]]\n clusters.append(inds)\n return clusters", "def generate_initial_centroids(k, data):\n centroids = []\n used_indexes = []\n while len(centroids) < k:\n random_index = random.randint(0, len(data) - 1)\n if random_index not in used_indexes:\n centroids.append(data[random_index])\n used_indexes.append(random_index)\n return centroids", "def kMeans(d, k):\n #First get the random centroids from the data\n newCentroids = getRandomCentroids(d, k)\n #newCentroids = [[-2.0, 1.0], [-2.0, -2.0], [2.0, 2.0], [0.0, 0.0]]\n\n #Get the clusters from these random centroids\n clusters = initiateCentroid(d, newCentroids, k)\n oldCentroids = []\n\n counter = 0\n #While the old centroids are not equal to the new ones\n while oldCentroids != newCentroids:\n #old is equal to new\n oldCentroids = newCentroids\n #Calculate the new centroids\n k, newCentroids = calcCentroids(d, clusters)\n #Calculate the new clusters\n clusters = initiateCentroid(d, newCentroids, k)\n #Count how many iterations\n counter += 1\n\n return counter, clusters", "def optimalK(data, nrefs=3, maxClusters=15):\r\n gaps = np.zeros((len(range(1, maxClusters)),))\r\n resultsdf = pd.DataFrame({'clusterCount':[], 'gap':[]})\r\n for gap_index, k in enumerate(range(1, maxClusters)):\r\n\r\n # Holder for reference dispersion results\r\n refDisps = np.zeros(nrefs)\r\n\r\n # For n references, generate random sample and perform kmeans getting resulting dispersion of each loop\r\n for i in range(nrefs):\r\n\r\n # Create new random reference set\r\n randomReference = np.random.random_sample(size=data.shape)\r\n\r\n # Fit to it\r\n km = KMeans(k)\r\n km.fit(randomReference)\r\n\r\n refDisp = km.inertia_\r\n refDisps[i] = refDisp\r\n\r\n # Fit cluster to original data and create dispersion\r\n km = KMeans(k)\r\n km.fit(data)\r\n print(k)\r\n\r\n origDisp = km.inertia_\r\n\r\n # Calculate gap statistic\r\n gap = np.log(np.mean(refDisps)) - np.log(origDisp)\r\n\r\n # Assign this loop's gap statistic to gaps\r\n gaps[gap_index] = gap\r\n\r\n resultsdf = resultsdf.append({'clusterCount':k, 'gap':gap}, ignore_index=True)\r\n\r\n\r\n return (gaps.argmax() + 1, resultsdf) # Plus 1 because index of 0 means 1 cluster is optimal, index 2 = 3 clusters are optimal\r", "def _assign_clusters(self):\n\n dist = np.zeros((self.k, ))\n distortion = 0\n\n for index in range(0, self.data.shape[0]):\n for i in range(0, self.k):\n dist[i] = np.linalg.norm(self.data[index] - self.centroids[i])\n\n self.assigned_clusters[index] = np.argmin(dist)\n distortion += np.min(dist)\n\n return distortion", "def generateClusterPoints(N, k=2, scale=1):\n rands = [[np.random.uniform(0, scale) * np.random.rand() for _ in range(k)] for i in range(N)]\n rands += [[np.random.uniform(-scale, 0) * np.random.rand() for _ in range(k)] for i in range(N)]\n point_list = []\n for rand in rands:\n # lastItem = math.sqrt(sum([1 + item**2 for item in rand]))\n lastItem = math.sqrt(1 + np.dot(rand, rand))\n rand.append(lastItem)\n point_list.append(rand)\n return np.array(point_list)", "def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n # position initial clusters at the location of clusters with largest populations\n cluster_list_copy = sorted(cluster_list,\n reverse = True,\n key=lambda cluster: cluster.total_population())\n cluster_list_copy = cluster_list_copy[: num_clusters]\n cluster_cent = [(cluster.horiz_center(), cluster.vert_center()) for cluster in cluster_list_copy]\n result = []\n #clustering to k initial centers adjusting the centers after each iteration\n for dummy_q in range(num_iterations):\n #Initialize k empty sets C1,...,Ck\n k_clusters = []\n for dummy_k in range(num_clusters):\n k_clusters.append(alg_cluster.Cluster(set(), 0, 0, 0, 0))\n for idx_j in range(len(cluster_list)):\n # defining the closest k center and add the cluster to it\n dist_list = []\n for idx_k in range(num_clusters):\n center_x, center_y = cluster_cent[idx_k]\n dist = cluster_list[idx_j].distance(\n alg_cluster.Cluster(set(), center_x, center_y, 0, 0))\n dist_list.append((dist, idx_k))\n dummy_k, idx = min(dist_list)\n k_clusters[idx].merge_clusters(cluster_list[idx_j])\n result = k_clusters\n #update the new center of k clusters\n cluster_cent = [(k_clusters[idx_f].horiz_center(), k_clusters[idx_f].vert_center()) for idx_f in range(num_clusters)]\n return result", "def randCent(data,k):\n index = set()\n while len(index) != k:\n index.add(random.randint(0, data.shape[0]))\n index = list(index)\n centroids = data[index]\n return centroids", "def __generate_central_nodes(self,k=3):\n if k < 3:\n k = 3\n \n self.__logger.info(\"CENTRAL_NODES: Try to seek {} nodes which are currently central\".format(k)) \n res = [n for n,_ in sorted(nx.betweenness_centrality(self.G).items(),key=itemgetter(1),reverse=True)[:4*k]]\n self.__logger.info(\"CENTRAL_NODES: Generated top {} central nodes (according to betweeness centrality)\".format(len(res)))\n \n self.__logger.info(\"CENTRAL_NODES: Sample {} items from the candidates as was requested\".format(k))\n tmp = list(res)\n random.shuffle(tmp)\n return tmp[0:k]", "def generate_clusters(df):\n\n df_size = df.shape[0]\n print(df_size)\n n_clusters = 0\n percent_min_pts = 0.105\n min_clusters = 3\n while (n_clusters != min_clusters):\n print(\"percent_min_pts\", percent_min_pts)\n min_cluster_pts = math.floor(df_size * percent_min_pts)\n print(\"min_cluster_pts\", min_cluster_pts)\n\n clusterer = hdbscan.HDBSCAN(min_cluster_size=min_cluster_pts)\n print(df.head())\n clusterer.fit(df)\n cluster_groups = {}\n labels = clusterer.labels_\n for i in labels:\n if cluster_groups.get(i):\n cluster_groups[i] = cluster_groups[i] + 1\n else:\n cluster_groups[i] = 1\n print(\"cluster_groups\", cluster_groups)\n n_clusters = len(set(labels))\n print(\"n_clusters\", n_clusters)\n multiplier = abs(n_clusters - min_clusters) * 0.001\n print(\"multiplier\", multiplier)\n if n_clusters > min_clusters:\n percent_min_pts += multiplier\n else:\n percent_min_pts -= multiplier\n print(\"percent_min_pts\", percent_min_pts)\n return labels", "def create_cluster(df,validate, test, X, k, name):\n \n scaler = StandardScaler(copy=True).fit(df[X])\n X_scaled = pd.DataFrame(scaler.transform(df[X]), columns=df[X].columns.values).set_index([df[X].index.values])\n kmeans = KMeans(n_clusters = k, random_state = 42)\n kmeans.fit(X_scaled)\n kmeans.predict(X_scaled)\n df[name] = kmeans.predict(X_scaled)\n df[name] = 'cluster_' + df[name].astype(str)\n \n v_scaled = pd.DataFrame(scaler.transform(validate[X]), columns=validate[X].columns.values).set_index([validate[X].index.values])\n validate[name] = kmeans.predict(v_scaled)\n validate[name] = 'cluster_' + validate[name].astype(str)\n \n t_scaled = pd.DataFrame(scaler.transform(test[X]), columns=test[X].columns.values).set_index([test[X].index.values])\n test[name] = kmeans.predict(t_scaled)\n test[name] = 'cluster_' + test[name].astype(str)\n \n centroids = pd.DataFrame(scaler.inverse_transform(kmeans.cluster_centers_), columns=X_scaled.columns)\n return df, X_scaled, scaler, kmeans, centroids", "def create_clusters(N, K):\n clusters = []\n centroids = create_points(N, K)\n for idx, centroid in enumerate(centroids):\n cluster = Cluster(centroid)\n cluster.label = _cluster_name(idx)\n clusters.append(cluster)\n return clusters", "def kmeans_cluster(\n cn,\n min_k=2,\n max_k=100,\n ):\n\n X = cn.T.values\n ks = range(min_k, max_k + 1)\n\n logging.info(f'trying with max k={max_k}')\n\n kmeans = []\n bics = []\n for k in ks:\n logging.info(f'trying with k={k}')\n model = sklearn.cluster.KMeans(n_clusters=k, init=\"k-means++\").fit(X)\n bic = compute_bic(model, X)\n kmeans.append(model)\n bics.append(bic)\n\n opt_k = np.array(bics).argmax()\n logging.info(f'selected k={opt_k}')\n\n model = kmeans[opt_k]\n\n embedding = umap.UMAP(\n n_neighbors=15,\n min_dist=0.1,\n n_components=2,\n random_state=42,\n metric='euclidean',\n ).fit_transform(cn.fillna(0).values.T)\n\n clusters = pd.DataFrame({\n 'cell_id': cn.columns, 'cluster_id': model.labels_,\n 'umap1': embedding[:, 0], 'umap2': embedding[:, 1]\n })\n\n return clusters", "def cluster(self, k=3, max_iter=10):\n\n # create a set of k random clusters as seeds\n old_clusters = [None] * k # just a placeholder\n clusters = self.random_clusters(k)\n\n iter = 0\n while (iter < max_iter) and not (old_clusters == clusters):\n print \"iteration %d...\" % iter\n # assign new clusters to old clusters\n for i in xrange(0, k):\n old_clusters[i] = copy(clusters[i])\n clusters[i].documents = []\n\n # for each document\n for document in self.documents:\n\n # determine the cluster with the highest similarity\n similarities = [cosine_similarity(document, cluster) for cluster in old_clusters]\n max_index = array(similarities).argmax()\n\n # assign document to that cluster\n clusters[max_index].add(document)\n\n # update cluster means\n for cluster in clusters:\n cluster.update_centroid()\n \n iter += 1\n \n return clusters", "def cluster(self):\n print(\"Calculating distances\")\n self.all_distances()\n\n print(\"Start making sets\")\n clusters = self.clusters\n\n # Generates a set with neighbours for each point\n for row in self.distances:\n clusters.append(set(np.where(row < self.distance_threshold)[0].tolist()))\n\n print(\"Merging sets\")\n for cluster1 in range(self.point_count):\n for cluster2 in range(self.point_count):\n if clusters[cluster2] is not None and clusters[cluster1] is not None:\n if not clusters[cluster1].isdisjoint(clusters[cluster2]) and cluster1 != cluster2:\n clusters[cluster1].update(clusters[cluster2])\n clusters[cluster2] = None\n # Deletes empty clusters\n clusters = [points for points in clusters if points is not None]\n # Sorts clusters by their size\n clusters.sort(key=len, reverse=True)\n # Builds main set\n for point_set in clusters[0:self.cluster_count_threshold]:\n self.main_cluster.update(point_set)\n\n self.main_cluster = list(self.main_cluster)\n self.clusters = clusters", "def _create_constrained_dataset(centers, delta, size):\n dataset = []\n count = 0\n for i, c in enumerate(centers):\n for j in range(size):\n x = np.random.multivariate_normal(c, np.identity(np.size(c))) - c\n direction = x / np.linalg.norm(x)\n magnitude = np.random.uniform(0.0, 0.5 * delta)\n # magnitude = np.random.uniform(0.0, delta) # NOT DEL-SEPARATED\n dataset.append((c + magnitude * direction, i, count))\n count += 1\n return dataset", "def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n\n # position initial clusters at the location of clusters with largest populations\n \n cluster_n = len(cluster_list)\n\n miu_k = sorted(cluster_list,\n key=lambda c: c.total_population())[-num_clusters:]\n miu_k = [c.copy() for c in miu_k]\n\n # n: cluster_n\n # q: num_iterations\n for _ in xrange(num_iterations):\n cluster_result = [alg_cluster.Cluster(set([]), 0, 0, 0, 0) for _ in range(num_clusters)]\n # put the node into closet center node\n\n for jjj in xrange(cluster_n):\n min_num_k = 0\n min_dist_k = float('inf')\n for num_k in xrange(len(miu_k)):\n dist = cluster_list[jjj].distance(miu_k[num_k])\n if dist < min_dist_k:\n min_dist_k = dist\n min_num_k = num_k\n\n cluster_result[min_num_k].merge_clusters(cluster_list[jjj])\n\n # re-computer its center node\n for kkk in xrange(len(miu_k)):\n miu_k[kkk] = cluster_result[kkk]\n\n return cluster_result", "def generate_clusterarray(k_cluster, cluster_size):\n result = np.zeros(k_cluster + 1, dtype = np.int32)\n for i in range(1,len(result)):\n result[i] = cluster_size\n return result", "def random_centers(k,):\n #centr = np.random.random((k, pos.shape[1]))\n return", "def makeCluster(self):\n for i in range(self.k):\n #vector of length total users, pick random number 1-5\n self.centroids.append(np.random.uniform(low=1,high=5,size=len(self.user)))\n memberList = []\n self.membership.append(memberList)\n self.centroids = np.round(self.centroids)\n\n for movie in self.dictionary.keys():\n #Finds the index of the closest centroid\n closest = np.argmin(self.calculateDistance(self.dictionary[movie]))\n newVector = []\n newVector.append(movie)\n #Add the movie to the list of members of the closest centroid\n self.membership[closest].append(newVector)\n self.recalculateCentroid(self.membership[closest], closest)", "def __initCluster(self):\n data_size, cluster_center = self.data_size, self.cluster_center\n self.cluster_temp = np.zeros(data_size, dtype=int)\n self.cluster_upper_bound = np.full(len(cluster_center), float('inf'), dtype=float)\n for center in cluster_center:\n self.cluster_temp[center] = center" ]
[ "0.7273148", "0.7161901", "0.71461064", "0.70277727", "0.6989481", "0.6961698", "0.69509083", "0.6936706", "0.69362247", "0.68879706", "0.68621993", "0.67435825", "0.6723467", "0.67071426", "0.67059225", "0.66942155", "0.66667116", "0.6627539", "0.6592454", "0.6569137", "0.6537801", "0.6528765", "0.6499644", "0.6492428", "0.647455", "0.6472392", "0.6444663", "0.6442534", "0.6439751", "0.64324623" ]
0.73497814
0
Create a deltaseparated dataset. For each of the centers draw size number of points. No two points may be farther than delta away form each other. Thus, to generate each point, choosea random direction and random distance from the center (of up to 0.5 delta).
def _create_constrained_dataset(centers, delta, size): dataset = [] count = 0 for i, c in enumerate(centers): for j in range(size): x = np.random.multivariate_normal(c, np.identity(np.size(c))) - c direction = x / np.linalg.norm(x) magnitude = np.random.uniform(0.0, 0.5 * delta) # magnitude = np.random.uniform(0.0, delta) # NOT DEL-SEPARATED dataset.append((c + magnitude * direction, i, count)) count += 1 return dataset
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_point_cloud(n:int, d:int = 2, seed=1234) -> np.ndarray:\n initial_seed = np.random.get_state()\n np.random.seed(seed)\n points = np.random.rand(n, d)\n np.random.set_state(initial_seed)\n return points", "def generate(self, n, d, seperation=4.0, pos_fraction=0.5):\n self.n = n\n self.d = d\n\n # 95% of data lies in a 2*sigma ball around the mean.\n # For unit variance normal, the 2*sigma ball mean the centers\n # should be at distance of 4 in the d dimensional space.\n mean1, mean2 = np.array([0.] * d), np.array([seperation / np.sqrt(d)] * d)\n cov = np.eye(d)\n\n # mean1, mean2 = np.random.random_sample((d,)), np.random.random_sample((d,)) + np.power(seperation , 1 / (2*d))\n dist_mean = np.linalg.norm(mean1 - mean2, ord=2)\n # print(dist_mean)\n # cov = np.eye(d)\n\n frac = int(1 / pos_fraction)\n\n cluster1 = np.random.multivariate_normal(mean1, cov, size=n // frac)\n cluster2 = np.random.multivariate_normal(mean2, cov, size=(n - n//frac))\n\n self.X = np.vstack((cluster1, cluster2))\n self.Y = np.array([0]*(n // frac) + [1]*(n - n // frac))\n\n # Make blobs\n # if True:\n # self.X, self.Y = make_blobs(n_samples=self.n, n_features=self.d, centers=2)", "def prepare_data_for_d(self):\n\n center_nodes = []\n neighbor_nodes = []\n labels = []\n for i in self.root_nodes:\n if np.random.rand() < config.update_ratio:\n # self.graph[i] = [neighbors of i]\n pos = self.graph[i]\n neg, _ = self.sample(i, self.trees[i], len(pos), for_d=True)\n # print(\"tree_i_d: \", self.trees[i])\n # print(\"neg_samples: \", neg)\n # print(\"neg is: \", neg)\n if len(pos) != 0 and neg is not None:\n # positive samples\n center_nodes.extend([i] * len(pos))\n neighbor_nodes.extend(pos)\n labels.extend([1] * len(pos))\n\n # negative samples\n center_nodes.extend([i] * len(pos))\n neighbor_nodes.extend(neg)\n labels.extend([0] * len(neg))\n # print(\"cen: \", center_nodes)\n return center_nodes, neighbor_nodes, labels", "def generateData(numPoints,x,y):\n\tfor i in range(0,numPoints):\n\t\tif (i % 2 == 0):\n\t\t\tx.append(random.normalvariate(25, 15))\n\t\t\ty.append(random.normalvariate(25, 15))\n\t\t\t \n\t\t\t\n\t\telse:\n\t\t\tx.append(random.normalvariate(75, 15))\n\t\t\ty.append(random.normalvariate(75, 15))", "def random_verts2D_deviation(vertices, delta_verts2d_dev_range=[-0.01, 0.01]):\n batch_size = vertices.shape[0]\n num_verts = vertices.shape[1]\n device = vertices.device\n\n noisy_vertices = vertices.clone()\n\n l, h = delta_verts2d_dev_range\n delta_verts2d_dev = (h - l) * torch.rand(batch_size, num_verts, 2, device=device) + l\n noisy_vertices[:, :, :2] = noisy_vertices[:, :, :2] + delta_verts2d_dev\n\n return noisy_vertices", "def generate_samples(delta=1, n=32):\n X = np.zeros((n, NROW * NCOL))\n Y = np.zeros((n, NROW * NCOL))\n for i in range(n):\n x, y = generate_sample(delta)\n X[i, :] = x\n Y[i, :] = y\n return X, Y", "def random_projection_split(data, indices, rng_state):\n dim = data.shape[1]\n\n # Select two random points, set the hyperplane between them\n left_index = tau_rand_int(rng_state) % indices.shape[0]\n right_index = tau_rand_int(rng_state) % indices.shape[0]\n right_index += left_index == right_index\n right_index = right_index % indices.shape[0]\n left = indices[left_index]\n right = indices[right_index]\n\n # Compute the normal vector to the hyperplane (the vector between\n # the two points) and the offset from the origin\n hyperplane_offset = 0.0\n hyperplane_vector = np.empty(dim, dtype=np.float32)\n\n for d in range(dim):\n hyperplane_vector[d] = data[left, d] - data[right, d]\n hyperplane_offset -= hyperplane_vector[d] * (\n data[left, d] + data[right, d]) / 2.0\n\n # For each point compute the margin (project into normal vector, add offset)\n # If we are on lower side of the hyperplane put in one pile, otherwise\n # put it in the other pile (if we hit hyperplane on the nose, flip a coin)\n n_left = 0\n n_right = 0\n side = np.empty(indices.shape[0], np.int8)\n for i in range(indices.shape[0]):\n margin = hyperplane_offset\n for d in range(dim):\n margin += hyperplane_vector[d] * data[indices[i], d]\n\n if margin == 0:\n side[i] = tau_rand_int(rng_state) % 2\n if side[i] == 0:\n n_left += 1\n else:\n n_right += 1\n elif margin > 0:\n side[i] = 0\n n_left += 1\n else:\n side[i] = 1\n n_right += 1\n\n # Now that we have the counts allocate arrays\n indices_left = np.empty(n_left, dtype=np.int64)\n indices_right = np.empty(n_right, dtype=np.int64)\n\n # Populate the arrays with indices according to which side they fell on\n n_left = 0\n n_right = 0\n for i in range(side.shape[0]):\n if side[i] == 0:\n indices_left[n_left] = indices[i]\n n_left += 1\n else:\n indices_right[n_right] = indices[i]\n n_right += 1\n\n return indices_left, indices_right", "def ddds(cloud, sample_size, presample=None, processes=10):\n\n #--------------------------------------------------------------------------\n\n ### Prep\n\n # Handle small point clouds\n if cloud.shape[0] <= sample_size:\n warn(\"(code 1) Point cloud is already <= desired sample size. \" +\n \"No subsampling is performed.\")\n return cloud\n\n\n #--------------------------------------------------------------------------\n\n ### Compute per-landmark local densities\n\n # Subsample randomly (for speed/memory efficiency)\n if presample is not None:\n cloud_presubs = random_subsample(cloud, presample)\n else:\n cloud_presubs = np.copy(cloud)\n\n # Compute distance of each subsampled point to the closest other point\n # Note: `k=2` is necessary since `k=1` is the point itself.\n tree = cKDTree(cloud)\n NN_dists = tree.query(cloud_presubs, k=2, n_jobs=processes)[0][:,1]\n\n # Get the size of the local neighborhood\n # which is `alpha * median(smallest_distances)`,\n # where a good value for alpha is 5 according to SPADE\n alpha = 5\n NN_size = alpha * np.median(NN_dists)\n\n # Get the local density (LD) of each landmark\n # ...which is the number of other landmarks in its local neighborhood\n LDs = tree.query_ball_point(cloud, NN_size, n_jobs=processes) # Get indices\n LDs = np.vectorize(len)(LDs) # Count\n\n # Define the target density (TD)\n # Note: Good values according to SPADE: the 3rd or 5th percentile of LDs\n # Note: This effectively defines how strongly the data will be subsampled\n TD_percentile = 3\n TD = np.percentile(LDs, TD_percentile)\n\n\n #--------------------------------------------------------------------------\n\n ### Perform density-dependent subsampling\n\n # Create p(keep_lm) probability vector\n # Note: For each point i, the probability of keeping it is\n # { 1 if LD_i < TD\n # { TD / LD_i otherwise\n p_keep = TD / LDs\n p_keep[LDs<TD] = 1\n\n # Randomly decide if a landmark should be kept according to p(keep_lm)\n rand = np.random.uniform(size=cloud.shape[0])\n keep = p_keep >= rand\n\n # Index the lms to be kept\n cloud_ddds = cloud[keep,:]\n\n\n #--------------------------------------------------------------------------\n\n ### Further random downsampling\n\n # Note: This ensures that the downsampled cloud does not grow with the\n # input data and instead is of the specified sample_size or smaller.\n\n if cloud_ddds.shape[0] > sample_size:\n cloud_ddds = random_subsample(cloud_ddds, sample_size)\n\n #--------------------------------------------------------------------------\n\n ### Return result\n return cloud_ddds", "def crescent_data(num_data=200, seed=default_seed):\r\n np.random.seed(seed=seed)\r\n sqrt2 = np.sqrt(2)\r\n # Rotation matrix\r\n R = np.array([[sqrt2 / 2, -sqrt2 / 2], [sqrt2 / 2, sqrt2 / 2]])\r\n # Scaling matrices\r\n scales = []\r\n scales.append(np.array([[3, 0], [0, 1]]))\r\n scales.append(np.array([[3, 0], [0, 1]]))\r\n scales.append([[1, 0], [0, 3]])\r\n scales.append([[1, 0], [0, 3]])\r\n means = []\r\n means.append(np.array([4, 4]))\r\n means.append(np.array([0, 4]))\r\n means.append(np.array([-4, -4]))\r\n means.append(np.array([0, -4]))\r\n\r\n Xparts = []\r\n num_data_part = []\r\n num_data_total = 0\r\n for i in range(0, 4):\r\n num_data_part.append(round(((i + 1) * num_data) / 4.))\r\n num_data_part[i] -= num_data_total\r\n part = np.random.normal(size=(num_data_part[i], 2))\r\n part = np.dot(np.dot(part, scales[i]), R) + means[i]\r\n Xparts.append(part)\r\n num_data_total += num_data_part[i]\r\n X = np.vstack((Xparts[0], Xparts[1], Xparts[2], Xparts[3]))\r\n\r\n Y = np.vstack((np.ones((num_data_part[0] + num_data_part[1], 1)), -np.ones((num_data_part[2] + num_data_part[3], 1))))\r\n return {'X':X, 'Y':Y, 'info': \"Two separate classes of data formed approximately in the shape of two crescents.\"}", "def generatePoints(centre: Point, radius: float, numPoints: int, jitterRatio: float = 0) -> List[Point]:\n def jitter() -> float:\n diamiter = radius * math.pi * 2\n jitterSize = jitterRatio * diamiter / numPoints\n return random.random() * 2 * jitterSize - jitterSize\n\n points: List[Point] = []\n angle_segment = math.pi * 2 / numPoints\n angle = 0\n\n while angle < math.pi * 2:\n point = (centre[0] + radius * math.cos(angle) + jitter(),\n centre[1] + radius * math.sin(angle) + jitter())\n points.append(point)\n angle += angle_segment\n\n return points", "def test_with_predefined_dist(self, seed):\n dim = Dimension(\"yolo\", dists.norm, 0.9)\n samples = dim.sample(seed=seed)\n assert len(samples) == 1\n assert dists.norm.rvs(0.9) == samples[0]", "def private_centers(\n data, norm, epsilon, delta\n):\n sigma = np.sqrt(2 * np.log(1.25 / delta)) / epsilon\n n, d = data.shape\n return np.mean(data, 0) + norm / n * np.random.normal(0, sigma, d)", "def generate_triangle(seed, num_points=200):\n points = {\n 0: 750,\n 750: 0,\n 1500: 751\n }\n random.seed(seed)\n while len(points) < num_points:\n y_coord = (random.randrange(500) or 1) + 200\n x_coord = random.randrange(round(y_coord*4/3)) + round((500 - y_coord)*(3/4)) + 400\n if (not points.get(x_coord)) and (x_coord != 750):\n points[x_coord] = y_coord\n\n os.makedirs(os.path.join(DATA_DIR, seed), exist_ok=True)\n filepath = os.path.join(DATA_DIR, '{}/triangle.node'.format(seed))\n\n # creates the input nodes used by triangle to create delauney graph\n with open(filepath, 'w') as node_file:\n header = \"{} 2 0 0\\n\".format(len(points))\n node_file.write(header)\n i = 1\n for x_coord, y_coord in points.items():\n node_file.write(\" {} {} {}\\n\".format(i, x_coord, y_coord))\n i += 1\n node_file.close()\n\n call(['triangle', '-e', filepath])", "def generate_points(self, n_samples_array, std, center_x, center_y, xsize, ysize):\n\n # generate clusters of blobs\n generated_points_x, _ = make_blobs(\n n_samples=n_samples_array,\n center_box=center_x,\n cluster_std=std,\n n_features=1,\n )\n\n generated_points_y, _ = make_blobs(\n n_samples=n_samples_array,\n center_box=center_y,\n cluster_std=std,\n n_features=1,\n )\n\n # remove decimals\n generated_points_x = generated_points_x.astype(\"int\")\n generated_points_y = generated_points_y.astype(\"int\")\n\n ind_delete = np.logical_or.reduce(\n (\n generated_points_x < 0,\n generated_points_y < 0,\n generated_points_x > xsize - 1,\n generated_points_y > ysize - 1,\n ),\n )\n generated_points_y = np.delete(generated_points_y, ind_delete.reshape(ind_delete.shape[0]), axis=0)\n generated_points_x = np.delete(generated_points_x, ind_delete.reshape(ind_delete.shape[0]), axis=0)\n\n return generated_points_x, generated_points_y", "def simulate_graph(seed, cluster_sizes, del_factor, ins_factor):\n rand.seed(seed)\n cluster_boundaries = np.cumsum(cluster_sizes)\n print(\"#seed:\", seed)\n print(\"#deletion factor:\", del_factor)\n print(\"#insertion factor:\", ins_factor)\n optimal_costs = np.array([0])\n for c in range(0, len(cluster_sizes)-1):\n n_c = cluster_sizes[c+1]\n offset_c = cluster_boundaries[c]\n edges_c = generate_edges(n_c, offset_c)\n disturb_cluster(n_c, offset_c, edges_c, del_factor, optimal_costs)\n additional_edges(cluster_boundaries, ins_factor, optimal_costs)\n print(\"#optimal costs:\", optimal_costs)", "def generate_curves(self, seed=None):\n num_context = tf.random_uniform(\n shape=[], minval=self._min_num_context, maxval=self._max_num_context, dtype=tf.int32, seed=seed)\n\n # If we are testing we want to have more targets and have them evenly\n # distributed in order to plot the function.\n if self._testing:\n num_target = self._num_pts_per_inst #self._x_data.get_shape().as_list()[0]\n num_total_points = num_target\n # During training the number of target points and their x-positions are\n # selected at random\n else:\n num_target = tf.random_uniform(shape=(), minval=0, \n maxval=self._max_num_context - num_context,\n dtype=tf.int32, seed=seed)\n num_total_points = num_context + num_target\n\n # idx for x vals in target\n idxs = []\n # which instance to get y data from\n insts = []\n for i in range(self._batch_size):\n idxs.append( tf.random_shuffle(tf.range(self._num_pts_per_inst), seed=seed) )\n insts.append( tf.random_uniform(shape=[], minval=0, maxval=self._num_inst-1, dtype=tf.int32, seed=seed) )\n \n idxs = tf.stack(idxs)\n insts = tf.stack(insts)\n \n # batchsize x numtotalpoints x size (xsize or ysize)\n x_values = tf.stack([tf.expand_dims(tf.gather(self._x_uniq, idxs[tf.cast(i,tf.int32)][:tf.cast(num_total_points,tf.int32)]), axis=-1) for i in range(self._batch_size)])\n y_values = tf.stack([tf.expand_dims(tf.gather(self._y_data[insts[i]*self._num_pts_per_inst:(insts[i]+1)*self._num_pts_per_inst], idxs[i][:num_total_points]), axis=-1) for i in range(self._batch_size)])\n \n \n \n if self._testing:\n # Select the targets\n target_x = x_values\n target_y = y_values\n\n # Select the observations\n idx_ctxt = tf.random_shuffle(tf.range(num_target), seed=seed)\n context_x = tf.gather(x_values, idx_ctxt[:num_context], axis=1)\n context_y = tf.gather(y_values, idx_ctxt[:num_context], axis=1)\n\n else:\n # Select the targets which will consist of the context points as well as\n # some new target points\n target_x = x_values[:, :num_target + num_context, :]\n target_y = y_values[:, :num_target + num_context, :]\n\n # Select the observations\n context_x = x_values[:, :num_context, :]\n context_y = y_values[:, :num_context, :]\n \n context_x = tf.squeeze(context_x,-1)\n target_x = tf.squeeze(target_x,-1)\n\n context_y = tf.squeeze(context_y,-1)\n target_y= tf.squeeze(target_y,-1)\n\n query = ((context_x, context_y), target_x)\n\n return NPRegressionDescription(\n query=query,\n target_y=target_y,\n num_total_points=tf.shape(target_x)[1],\n num_context_points=num_context)", "def randomize_spaced_out_points(space, separation, n_points):\n #obj = poisson_disk.pds(space[0], space[1], space[2], separation, n_points)\n obj = naive_spaced_randomizer.naive_spaced_randomizer(space[0], space[1], space[2], separation, n_points)\n return obj.randomize_spaced_points()", "def make_datagens(data_points):\n data_gen_args = dict(\n rotation_range=10.,\n width_shift_range=0.1,\n height_shift_range=0.1,\n zoom_range=0.1,\n )\n\n x_datagen = keras.preprocessing.image.ImageDataGenerator(\n featurewise_center=True,\n featurewise_std_normalization=True,\n **data_gen_args,\n )\n\n y_datagen = keras.preprocessing.image.ImageDataGenerator(\n featurewise_center=False,\n featurewise_std_normalization=False,\n **data_gen_args,\n )\n\n x_datagen.fit(utils.list_of_images_to_4d_array([dp.x for dp in data_points]))\n y_datagen.fit(utils.list_of_images_to_4d_array([dp.y for dp in data_points]))\n\n return [x_datagen, y_datagen]", "def _generate_random_points_in_plane(nvect, dparam, npts, eps=0.0):\n np.random.seed(12345)\n a, b, c = nvect / np.linalg.norm(nvect)\n x, y = np.random.rand(npts), np.random.rand(npts)\n z = (dparam - a * x - b * y) / c\n if eps > 0:\n z += np.random.normal(loc=0., scale=eps, size=npts)\n return np.column_stack((x, y, z))", "def random_linearly_separable_data(num_points, bounds):\n\n points = random_plane_points(num_points, bounds)\n weights = random_hyperplane(bounds)\n labels = np.sign(np.dot(np.column_stack([np.ones((num_points, 1)), \n points]), weights))\n return (points, labels, weights)", "def create_data(self, N, d):\n X_neg = np.random.randn(np.floor(N/2.0), d) + 2*np.ones((1, d))\n X_pos = np.random.randn(np.ceil(N/2.0), d) - 2*np.ones((1, d))\n X = np.vstack((X_neg, X_pos))\n\n Y_neg = np.zeros((np.floor(N/2.0), 1))\n Y_pos = np.ones((np.ceil(N/2.0), 1))\n Y = np.vstack((Y_neg, Y_pos))\n\n return Data(X, Y)", "def generate_curves(self, seed=None):\n num_context = tf.random_uniform(\n shape=[], minval=3, maxval=self._max_num_context, dtype=tf.int32, seed=seed)\n\n # If we are testing we want to have more targets and have them evenly\n # distributed in order to plot the function.\n if self._testing:\n num_target = self._x_data.get_shape().as_list()[0]\n num_total_points = num_target\n # During training the number of target points and their x-positions are\n # selected at random\n else:\n num_target = tf.random_uniform(shape=(), minval=0, \n maxval=self._max_num_context - num_context,\n dtype=tf.int32, seed=seed)\n num_total_points = num_context + num_target\n\n # idx for x vals in target\n idxs = []\n # which instance to get y data from\n insts = []\n for i in range(self._batch_size):\n idxs.append( tf.random_shuffle(tf.range(self._num_pts_per_inst), seed=seed) )\n insts.append( tf.random_uniform(shape=[], minval=0, maxval=self._num_inst-1, dtype=tf.int32, seed=seed) )\n \n idxs = tf.stack(idxs)\n insts = tf.stack(insts)\n \n # batchsize x numtotalpoints x size (xsize or ysize)\n x_values = tf.stack([tf.expand_dims(tf.gather(self._x_uniq, idxs[tf.cast(i,tf.int32)][:tf.cast(num_total_points,tf.int32)]), axis=-1) for i in range(self._batch_size)])\n y_values = tf.stack([tf.expand_dims(tf.gather(self._y_data[insts[i]*self._num_pts_per_inst:(insts[i]+1)*self._num_pts_per_inst], idxs[i][:num_total_points]), axis=-1) for i in range(self._batch_size)])\n \n \n \n if self._testing:\n # Select the targets\n target_x = x_values\n target_y = y_values\n\n # Select the observations\n idx_ctxt = tf.random_shuffle(tf.range(num_target), seed=seed)\n context_x = tf.gather(x_values, idx_ctxt[:num_context], axis=1)\n context_y = tf.gather(y_values, idx_ctxt[:num_context], axis=1)\n\n else:\n # Select the targets which will consist of the context points as well as\n # some new target points\n target_x = x_values[:, :num_target + num_context, :]\n target_y = y_values[:, :num_target + num_context, :]\n\n # Select the observations\n context_x = x_values[:, :num_context, :]\n context_y = y_values[:, :num_context, :]\n \n context_x = tf.squeeze(context_x,-1)\n target_x = tf.squeeze(target_x,-1)\n\n query = ((context_x, context_y), target_x)\n\n return NPRegressionDescription(\n query=query,\n target_y=target_y,\n num_total_points=tf.shape(target_x)[1],\n num_context_points=num_context)", "def generate_data(nb_samples):\n inputs = torch.empty(nb_samples, 2).uniform_(0, 1)\n center = Tensor([0.5, 0.5]).view(1, -1)\n distances = torch.norm((inputs - center).abs(), 2, 1)\n labels = (distances < 1 / math.sqrt(2 * math.pi)).type(LongTensor)\n return inputs.t(), labels", "def random():\n # Define lattice spacing as a multiple of the particle radius\n # using the formula a = 4 r/sqrt(3). Systems which are ordered\n # are probably mostly filled, so use a distribution which goes from\n # zero to one, but leaving 90% of them within 80% of the\n # maximum bcc packing. Lattice distortion values are empirically\n # useful between 0.01 and 0.7. Use an exponential distribution\n # in this range 'cuz its easy.\n radius = 10**np.random.uniform(1.3, 4)\n d_factor = 10**np.random.uniform(-2, -0.7) # sigma_d in 0.01-0.7\n dnn_fraction = np.random.beta(a=10, b=1)\n dnn = radius*4/np.sqrt(3)/dnn_fraction\n pars = dict(\n #sld=1, sld_solvent=0, scale=1, background=1e-32,\n dnn=dnn,\n d_factor=d_factor,\n radius=radius,\n )\n return pars", "def gen_k_centers(k, dim):\n delta = abs(np.random.normal(0.0, 5.0))\n eps = 0.001\n centers = []\n for i in range(k):\n c = np.random.multivariate_normal(np.zeros(dim), np.identity(dim))\n if len(centers):\n c1 = centers[0]\n x = np.random.multivariate_normal(c1, np.identity(c1.size)) - c1\n direction = x / np.linalg.norm(x)\n centers.append(c1 + 2.0 * i * delta * direction + eps)\n else:\n centers.append(c)\n return centers, delta", "def _random_points_3d(self, number_of_seeds, min_z, max_z):\n # Sanity check. We can't get more seeds than what's available in the bounds\n assert number_of_seeds <= self.cell_count\n\n found = {}\n while len(found) < number_of_seeds:\n pt = Point2D(random.randint(self._lower_left.x, self._upper_right.x),\n random.randint(self._lower_left.y, self._upper_right.y))\n if pt not in found: # make sure unique\n found[pt] = random.randint(min_z, max_z)\n return [Point3D(pt.x, pt.y, z) for pt, z in found.items()]", "def _getDistribution(\n self, targetPoints, minDudes, maxDudes, groupCount, maxLevel):\n\n maxIterations = 10+maxDudes*2\n\n def _getTotals(groups):\n totalPoints = 0\n totalDudes = 0\n for group in groups:\n for entry in group:\n dudes = entry[1]\n totalPoints += entry[0]*dudes\n totalDudes += dudes\n return totalPoints, totalDudes\n\n groups = []\n for g in range(groupCount):\n groups.append([])\n\n types = [1]\n if maxLevel > 1:\n types.append(2)\n if maxLevel > 2:\n types.append(3)\n if maxLevel > 3:\n types.append(4)\n\n for iteration in range(maxIterations):\n # see how much we're off our target by\n totalPoints, totalDudes = _getTotals(groups)\n diff = targetPoints - totalPoints\n dudesDiff = maxDudes - totalDudes\n # add an entry if one will fit\n value = types[random.randrange(len(types))]\n group = groups[random.randrange(len(groups))]\n if len(group) == 0:\n maxCount = random.randint(1, 6)\n else:\n maxCount = 2*random.randint(1, 3)\n maxCount = min(maxCount, dudesDiff)\n count = min(maxCount, diff/value)\n if count > 0:\n group.append((value, count))\n totalPoints += value*count\n totalDudes += count\n diff = targetPoints - totalPoints\n\n totalPoints, totalDudes = _getTotals(groups)\n full = (totalPoints >= targetPoints)\n\n if full:\n # every so often, delete a random entry just to\n # shake up our distribution\n if random.random() < 0.2 and iteration != maxIterations-1:\n entryCount = 0\n for group in groups:\n for entry in group:\n entryCount += 1\n if entryCount > 1:\n delEntry = random.randrange(entryCount)\n entryCount = 0\n for group in groups:\n for entry in group:\n if entryCount == delEntry:\n group.remove(entry)\n break\n entryCount += 1\n\n # if we don't have enough dudes, kill the group with\n # the biggest point value\n elif totalDudes < minDudes and iteration != maxIterations-1:\n biggestValue = 9999\n biggestEntry = None\n for group in groups:\n for entry in group:\n if entry[0] > biggestValue or biggestEntry is None:\n biggestValue = entry[0]\n biggestEntry = entry\n biggestEntryGroup = group\n if biggestEntry is not None:\n biggestEntryGroup.remove(biggestEntry)\n\n # if we've got too many dudes, kill the group with the\n # smallest point value\n elif totalDudes > maxDudes and iteration != maxIterations-1:\n smallestValue = 9999\n smallestEntry = None\n for group in groups:\n for entry in group:\n if entry[0] < smallestValue or smallestEntry is None:\n smallestValue = entry[0]\n smallestEntry = entry\n smallestEntryGroup = group\n smallestEntryGroup.remove(smallestEntry)\n\n # close enough.. we're done.\n else:\n if diff == 0:\n break\n\n return groups", "def initialize_centers(data, k):\n x_data_min = min(p[0] for p in data)\n x_data_max = max(p[0] for p in data)\n y_data_min = min(p[1] for p in data)\n y_data_max = max(p[1] for p in data)\n\n return generate_random_data(\n k,\n x_data_min,\n x_data_max,\n y_data_min,\n y_data_max\n )", "def create_data(self):\n\n print (f'Using {self.n_s} simulations for the training data to estimate cov')\n print (f'Using {self.n_p} simulations for the upper/lower training data')\n print (f'Number of splits, to increase number simulations: {self.n_train}')\n print (f'Adding noise to the derivative: {np.invert(self.noiseless_deriv)}')\n\n # Number of upper and lower simulations\n n_p = int(self.n_s * self.derivative_fraction)\n\n # set a seed to surpress the sample variance (EVEN FOR CENTRAL SIMULATIONS)\n seed = np.random.randint(1e6) \n # We should double-check to see if the sample variance if being surpressed\n\n # Perturb lower \n np.random.seed(seed)\n t_m = self.generate_data(np.array([self.theta_fid for i in \n range(self.n_train * self.n_p)])\n ,train = -self.delta_theta, flatten = self.flatten\n ,noiseless_deriv = self.noiseless_deriv) \n # Perturb higher \n np.random.seed(seed)\n t_p = self.generate_data(np.array([theta_fid for i in \n range(self.n_train * self.n_p)])\n ,train = self.delta_theta, flatten = self.flatten\n , noiseless_deriv = self.noiseless_deriv)\n\n # Central\n np.random.seed(seed)\n t = self.generate_data(np.array([self.theta_fid for i in \n range(self.n_train * self.n_s)])\n ,train = None, flatten = self.flatten)\n\n\n # derivative data\n t_d = (t_p - t_m) / (2. * self.delta_theta)\n\n # Save in a dict that the network takes\n data = {\"data\": t, \"data_d\": t_d}\n # for plotting purposes we save the upper/lower separately as well\n data[\"x_m\"], data[\"x_p\"] = t_m, t_p \n\n\n # Repeat the same story to generate test data\n print ('\\n')\n print (f'Using {self.n_s} simulations for the test data to estimate cov')\n print (f'Using {self.n_p_val} simulations for the upper/lower test data')\n print (f'Number of splits, to increase number simulations: {self.n_train_val}')\n print (f'Adding noise to the derivative: {np.invert(self.noiseless_deriv)}')\n print ('\\n')\n\n seed = np.random.randint(1e6)\n # Perturb lower \n np.random.seed(seed)\n tt_m = self.generate_data(np.array([self.theta_fid for i in \n range(self.n_train * self.n_p)])\n , train = -self.delta_theta, flatten = self.flatten\n , noiseless_deriv = self.noiseless_deriv)\n # Perturb higher \n np.random.seed(seed)\n tt_p = self.generate_data(np.array([self.theta_fid for i in \n range(self.n_train * self.n_p)])\n , train = self.delta_theta, flatten = self.flatten\n , noiseless_deriv = self.noiseless_deriv)\n # Central sim\n np.random.seed(seed)\n tt = self.generate_data(np.array([self.theta_fid for i in \n range(self.n_train * self.n_s)])\n , train = None, flatten = self.flatten)\n \n # np.random.seed()\n \n # derivative data\n tt_d = (tt_p - tt_m) / (2. * self.delta_theta)\n\n data[\"validation_data\"] = tt \n data[\"validation_data_d\"] = tt_d\n\n # for plotting purposes we save the upper/lower separately\n data[\"x_m_test\"], data[\"x_p_test\"] = tt_m, tt_p \n\n return data", "def diamond(N, D, rng):\n samples = rng.randn(N, D)\n norm = np.sum(np.abs(samples), axis=1)\n return samples/norm[:,None]" ]
[ "0.6200814", "0.60125065", "0.6008298", "0.58964354", "0.5652766", "0.5627772", "0.56074226", "0.55708045", "0.55676484", "0.54426163", "0.54371774", "0.5432757", "0.54286873", "0.5412957", "0.53979236", "0.53931975", "0.53886104", "0.5388139", "0.5384613", "0.53822875", "0.538003", "0.537996", "0.53692085", "0.5352782", "0.5328673", "0.5315658", "0.5310943", "0.5294078", "0.527585", "0.52743083" ]
0.70498824
0
Create a 5x5 grid of cluster centers. Create 25 cluster centers on the grid I^{[0, 4] x [0,4]}. Each center is a gaussian with standard covariance
def _5x5_grid_clusters(): return [mn(mean=np.array([i, j]), cov=np.array([[1.0, 0.0], [0.0, 1.0]])) for i in range(5) for j in range(5)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _5x5_grid_clusters_spread():\n return [mn(mean=np.array([i * 25, j * 25]), cov=np.array([[1.0, 0.0],\n [0.0, 1.0]]))\n for i in range(5)\n for j in range(5)]", "def _10x10_grid_clusters_spread():\n return [mn(mean=np.array([i * 25, j * 25]), cov=np.array([[1.0, 0.0],\n [0.0, 1.0]]))\n for i in range(10)\n for j in range(10)]", "def _5x5_grid_clusters_close():\n return [mn(mean=np.array([i * 5, j * 5]), cov=np.array([[1.0, 0.0],\n [0.0, 1.0]]))\n for i in range(5)\n for j in range(5)]", "def _10x10_grid_clusters_close():\n return [mn(mean=np.array([i * 5, j * 5]), cov=np.array([[1.0, 0.0],\n [0.0, 1.0]]))\n for i in range(10)\n for j in range(10)]", "def generate_centers(self):\n\t\tcenters = []\n\t\tsize = self.config.image_size\n\t\tfor i in range(self.config.num_obj):\n\t\t\tflag = True\n\t\t\twhile flag:\n\t\t\t\tc = np.random.randint(int(size * 0.05), int(size * 0.95), 2)\n\t\t\t\tflag = False\n\t\t\t\tfor center in centers:\n\t\t\t\t\tif (abs(center[0] - c[0]) <= 0.1 * size) or (abs(center[1] - c[1]) <= 0.1 *size):\n\t\t\t\t\t\tflag = False\n\t\t\tcenters.append(c)\n\t\t\t\t\n\t\treturn centers", "def _2x3_grid_clusters_spread():\n return [mn(mean=np.array([i * 25, j * 25]), cov=np.array([[1.0, 0.0],\n [0.0, 1.0]]))\n for i in range(2)\n for j in range(3)]", "def random_centers(k,):\n #centr = np.random.random((k, pos.shape[1]))\n return", "def create_clusters(self):\n ex = 0\n print 'Iter - Purity Gini Index'\n while ex < self.MAX_ITERATION:\n new_clusters = np.zeros(self.centroids.shape)\n distances = euclidean_distances(self.vectors, self.centroids).argmin(axis=1)\n for i in range(self.K):\n indexes = np.argwhere(distances == i)\n data = self.vectors[indexes.transpose()[0]]\n if data.shape[0] > 1:\n new_clusters[i] = (np.sum(data, axis=0) / data.shape[0])\n else:\n new_clusters[i] = np.sum(data, axis=0)\n print ex, '----', self.cal_purity()\n ex += 1\n if np.allclose(self.centroids, new_clusters, atol=self.TOLERANCE):\n break\n self.centroids = new_clusters", "def _2x3_grid_clusters_close():\n return [mn(mean=np.array([i * 5, j * 5]), cov=np.array([[1.0, 0.0],\n [0.0, 1.0]]))\n for i in range(2)\n for j in range(3)]", "def gen_k_centers(k, dim):\n delta = abs(np.random.normal(0.0, 5.0))\n eps = 0.001\n centers = []\n for i in range(k):\n c = np.random.multivariate_normal(np.zeros(dim), np.identity(dim))\n if len(centers):\n c1 = centers[0]\n x = np.random.multivariate_normal(c1, np.identity(c1.size)) - c1\n direction = x / np.linalg.norm(x)\n centers.append(c1 + 2.0 * i * delta * direction + eps)\n else:\n centers.append(c)\n return centers, delta", "def initialize_centers(data, k):\n x_data_min = min(p[0] for p in data)\n x_data_max = max(p[0] for p in data)\n y_data_min = min(p[1] for p in data)\n y_data_max = max(p[1] for p in data)\n\n return generate_random_data(\n k,\n x_data_min,\n x_data_max,\n y_data_min,\n y_data_max\n )", "def initialize_dom(img: np.ndarray):\n\n channels = img.shape[2]\n\n for cluster in range(numclusters):\n for channel in range(channels):\n cmin = np.amin(img[:,:,channel]) # channel's min\n cmax = np.amax(img[:,:,channel]) # channel's max\n current_cluster_centers[cluster, 0, channel] = np.random.uniform(cmin, cmax)\n\n print(\"Current clusters:\\n\", current_cluster_centers)", "def cluster(self):\n center_index = np.random.choice(range(100), self.K, replace=False)\n self.centers = np.array([self.X[i] for i in center_index])\n self.cluster_sizes = np.zeros(self.K)\n member_of = np.zeros(100, dtype=int)\n min_dist = np.array([distance.euclidean(self.centers[0], point) for point in self.X])\n self.cluster_sizes[0] = 100\n flag = True\n while flag:\n flag = False\n for i, point in enumerate(self.X):\n for j, center in enumerate(self.centers):\n if member_of[i] != j:\n dist = distance.euclidean(point, center)\n if dist < min_dist[i]:\n flag = True\n current = member_of[i]\n self.cluster_sizes[current] -= 1\n self.cluster_sizes[j] += 1\n member_of[i] = j\n min_dist[i] = dist\n if np.count_nonzero(self.cluster_sizes) != self.K:\n return self.cluster()\n self.centers = np.zeros((self.K, 2), dtype='d')\n for i, point in enumerate(self.X):\n center = member_of[i]\n self.centers[center] += point\n for i, center in enumerate(self.centers):\n center /= self.cluster_sizes[i]", "def get_random_centroids(data, k) :\r\n centroids = []\r\n columns = np.size(data, axis=1)\r\n ranges = []\r\n for i in range(columns) :\r\n ranges.append([np.min(data[:,i]), np.max(data[:,i])])\r\n \r\n for i in range(k) :\r\n centroid = []\r\n for span in ranges :\r\n centroid.append(np.random.uniform(span[0], span[1]))\r\n centroids.append(centroid)\r\n \r\n return np.matrix(centroids)", "def _random_standard_centers(n=100):\n generator = mn(mean=np.array([0, 0]),\n cov=np.array([[1.0, 0.0], [0.0, 1.0]]))\n return [mn(mean=pt, cov=np.array([[1.0, 0.0], [0.0, 1.0]]))\n for pt in generator.rvs(size=n)]", "def plot_clustered_data(points, c_means, covs, test_name, image_num, gaussians):\n\t#colors = cm.rainbow(np.linspace(0, 1, gaussians))\n\tcolors = ['b', 'g', 'm', 'y', 'c', 'k']\n\n\tax = plt.gca()\n\t#for i in range(points.shape[1]):\n\t\t#plt.plot(points[:, i][0], points[:, i][1], \".\", color=\"r\", zorder=0)\n\tplt.plot(points[0], points[1], \".\", color=\"r\", zorder=0)\n\t\n\tfor i in range(gaussians):\n\t\tplt.plot(c_means[i][0], c_means[i][1], \".\", color=colors[i], zorder=1)\n\n\t\twidth, height, theta = cov_ellipse(points, covs[i], nstd=2)\n\t\tellipse = Ellipse(xy=(c_means[i][0], c_means[i][1]), width=width, \\\n\t\t\t\theight=height, angle=theta, edgecolor=colors[i], fc='None', lw=2,\n\t\t\t\t\t\tzorder=4)\n\n\t\tax.add_patch(ellipse)\n\t\n\tplt.savefig(\"./images/{0}/{1:08d}.png\".format(test_name, image_num))\n\tplt.close()", "def make_sample_clusters(n_clusters, n_points, n_features=2, std=2, seed=1, limits=(-10, 10)):\n points_per_cluster = n_points // n_clusters\n np.random.seed(seed=seed)\n centroids = []\n for _ in range(n_features):\n centroids.append(np.random.randint(limits[0], limits[1], size=n_clusters))\n\n centroids = np.array(zip(*centroids))\n\n points = []\n for centroid in centroids:\n rands = centroid + np.random.random((points_per_cluster, n_features)) * std\n points.append(rands)\n\n return np.array(points).reshape(-1, 2)", "def kmeans(boxes, k, dist=np.median,seed=1):\n rows = boxes.shape[0]\n distances = np.empty((rows, k)) ## N row x N cluster\n last_clusters = np.zeros((rows,))\n np.random.seed(seed)\n # initialize the cluster centers to be k items\n clusters = boxes[np.random.choice(rows, k, replace=False)]\n aveIOU=0.0\n while True:\n # 为每个点指定聚类的类别(如果这个点距离某类别最近,那么就指定它是这个类别)\n for icluster in range(k):\n distances[:,icluster] = 1 - iou(clusters[icluster], boxes)\n nearest_clusters = np.argmin(distances, axis=1)\n\n for i in range(rows ):\n aveIOU=aveIOU+1-distances[i,nearest_clusters[i]]\n aveIOU=aveIOU/rows\n\n\t# 如果聚类簇的中心位置基本不变了,那么迭代终止。\n if (last_clusters == nearest_clusters).all():\n break\n # 重新计算每个聚类簇的平均中心位置,并它作为聚类中心点\n for cluster in range(k):\n clusters[cluster] = dist(boxes[nearest_clusters == cluster], axis=0)\n last_clusters = nearest_clusters\n\n return clusters,nearest_clusters,distances,aveIOU", "def Test_GenerateClusters(numClusters, pts_minmax=(10, 100), x_mult=(1, 4), y_mult=(1, 3), x_off=(0, 50), y_off=(0, 50)):\n\n # Initialize some empty lists to receive cluster member positions\n testClustersx = []\n testClustersy = []\n # Genereate random values given parameter ranges\n n_points = np.random.randint(pts_minmax[0], pts_minmax[1], numClusters)\n x_multipliers = np.random.randint(x_mult[0], x_mult[1], numClusters)\n y_multipliers = np.random.randint(y_mult[0], y_mult[1], numClusters)\n x_offsets = np.random.randint(x_off[0], x_off[1], numClusters)\n y_offsets = np.random.randint(y_off[0], y_off[1], numClusters)\n\n # Generate random clusters given parameter values\n for idx, npts in enumerate(n_points):\n xpts = np.random.randn(npts) * x_multipliers[idx] + x_offsets[idx]\n ypts = np.random.randn(npts) * y_multipliers[idx] + y_offsets[idx]\n testClustersx.append(xpts)\n testClustersy.append(ypts)\n\n # Convert to a single dataset in OpenCV format\n testClusters = np.float32((np.concatenate(testClustersx), np.concatenate(testClustersy))).transpose()\n\n # Return cluster positions\n return testClusters, testClustersx, testClustersy", "def Test_GenerateClusters(numClusters, pts_minmax=(10, 100), x_mult=(1, 4), y_mult=(1, 3), x_off=(0, 50), y_off=(0, 50)):\n\n # Initialize some empty lists to receive cluster member positions\n testClustersx = []\n testClustersy = []\n # Genereate random values given parameter ranges\n n_points = np.random.randint(pts_minmax[0], pts_minmax[1], numClusters)\n x_multipliers = np.random.randint(x_mult[0], x_mult[1], numClusters)\n y_multipliers = np.random.randint(y_mult[0], y_mult[1], numClusters)\n x_offsets = np.random.randint(x_off[0], x_off[1], numClusters)\n y_offsets = np.random.randint(y_off[0], y_off[1], numClusters)\n\n # Generate random clusters given parameter values\n for idx, npts in enumerate(n_points):\n xpts = np.random.randn(npts) * x_multipliers[idx] + x_offsets[idx]\n ypts = np.random.randn(npts) * y_multipliers[idx] + y_offsets[idx]\n testClustersx.append(xpts)\n testClustersy.append(ypts)\n\n # Convert to a single dataset in OpenCV format\n testClusters = np.float32((np.concatenate(testClustersx), np.concatenate(testClustersy))).transpose()\n\n # Return cluster positions\n return testClusters, testClustersx, testClustersy", "def randCent(data,k):\n index = set()\n while len(index) != k:\n index.add(random.randint(0, data.shape[0]))\n index = list(index)\n centroids = data[index]\n return centroids", "def plotClusters(self):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n fig.set_size_inches(18.5, 9.5)\n ax.set_title('Identification of Cluster Particles with Voronoi Volumes', fontsize=22)\n ax.set_xlabel('x [m]', fontsize=18)\n ax.set_ylabel('y [m]', fontsize=18)\n ax.set_zlabel('z [m]', fontsize=18)\n\n strength = np.linspace(0, 0.8, len(self.unique_labels))\n np.random.shuffle(strength)\n colors = [plt.cm.nipy_spectral(each) for each in strength]\n np.random.shuffle(strength)\n colorsB = [plt.cm.nipy_spectral(each) for each in strength]\n\n for k, col, colB in zip(self.unique_labels, colors, colorsB):\n a = 1\n s = 3\n if k == -1:\n # Black used for noise.\n col = [1, 0, 0]\n a = 0.3\n s = 1\n\n class_member_mask = (self.labels == k)\n xy = self.data[class_member_mask]\n if len(xy) > 0:\n ax.scatter(xy[:, 0], xy[:, 1], xy[:, 2], c=np.reshape(np.array(col), (1, -1)),\n edgecolors=np.reshape(np.array(colB), (1, -1)), alpha=a, s=s, label='Cluster ' + str(k))", "def generateClustersRandomly(k=2, scale=1, num_clusters=1, points_per_cluster=20):\n rands = [[np.random.uniform(-scale, scale) * np.random.rand() for _ in range(k)] for i in range(num_clusters)]\n point_list = []\n for rand in rands:\n lastItem = math.sqrt(1 + np.dot(rand, rand))\n rand.append(lastItem)\n point_list.append(rand)\n counter = 0\n while counter < points_per_cluster:\n nearCluster = np.array([np.random.uniform(-scale, scale) * np.random.rand() for _ in range(k)])\n nearClusterLastItem = math.sqrt(1 + np.dot(nearCluster, nearCluster))\n new_point = np.append(nearCluster, nearClusterLastItem)\n # radius of hyperbolic ball is 0.2\n if hyperboloidDist(new_point, rand) < .2:\n point_list.append(new_point)\n counter += 1\n\n return np.array(point_list)", "def generateClusterPoints(N, k=2, scale=1):\n rands = [[np.random.uniform(0, scale) * np.random.rand() for _ in range(k)] for i in range(N)]\n rands += [[np.random.uniform(-scale, 0) * np.random.rand() for _ in range(k)] for i in range(N)]\n point_list = []\n for rand in rands:\n # lastItem = math.sqrt(sum([1 + item**2 for item in rand]))\n lastItem = math.sqrt(1 + np.dot(rand, rand))\n rand.append(lastItem)\n point_list.append(rand)\n return np.array(point_list)", "def clusters_allocate_cells(self):\n for cluster in self.clusters:\n cluster.cells[:] = []\n for cell in self.block_proc:\n wdists = []\n for cluster in self.clusters:\n s = cluster.size\n d = ( (cell.x-cluster.x)**2 + (cell.y-cluster.y)**2 +\n (cell.z-cluster.z)**2 )\n d = numpy.sqrt(d)\n c = self.c\n # TODO: choose a better distance function below\n r = d*(c+(1-c)*numpy.exp(-s/d))\n r = numpy.clip(r,0,r)\n wdists.append(r)\n self.clusters[numpy.argmin(wdists)].cells.append(cell)", "def simulated_cluster(n_stars=CLUSTER_DEFAULTS['stars'],\n dimensions=CLUSTER_DEFAULTS['dimensions']):\n\n nx, ny = dimensions\n\n # Create empty image\n image = np.zeros((ny, nx))\n\n # Generate random positions\n r = np.random.random(n_stars) * nx\n theta = np.random.uniform(0., 2. * np.pi, n_stars)\n\n # Generate random fluxes\n fluxes = np.random.random(n_stars) ** 2\n\n # Compute position\n x = nx / 2 + r * np.cos(theta)\n y = ny / 2 + r * np.sin(theta)\n\n # Add stars to image\n # ==> First for loop and if statement <==\n for idx in range(n_stars):\n if x[idx] >= 0 and x[idx] < nx and y[idx] >= 0 and y[idx] < ny:\n image[y[idx], x[idx]] += fluxes[idx]\n\n # Convolve with a gaussian\n image = gaussian_filter(image, sigma=1)\n\n # Add noise\n image += np.random.normal(1., 0.001, image.shape)\n\n return image", "def recalculate_centers(data, k, clusters):\n centers = []\n for k_i in range(k):\n inds = [i for i, j in enumerate(clusters) if j == k_i]\n n = np.take(data, inds, axis=0)\n if len(inds) == 0:\n i = np.random.randint(len(data))\n centers.append((data[i,0], data[i,1]))\n\n elif len(inds) < 2: \n centers.append((n[0][0], n[0][1]))\n else:\n result = np.sum(n, axis=1)/len(inds)\n centers.append((result[0], result[0]))\n return centers", "def init_cluster_centroids(x, number_of_clusters):\n return x[np.random.choice(x.shape[0], number_of_clusters, replace=False), :]", "def random_init(self, train_data):\n\n centroids=np.zeros((self.n_clusters_, train_data.shape[1]))\n for c in range(self.n_clusters_):\n for f in range(train_data.shape[1]):\n centroids[c,f]=random.uniform(min(train_data[:,f]), max(train_data[:,f]))\n\n return centroids", "def initialize(img):\n w, h, _ = img.shape\n for c in current_cluster_centers:\n x = np.random.randint(w)\n y = np.random.randint(h)\n c[:] = img[x, y]" ]
[ "0.76212066", "0.7217054", "0.71860254", "0.69142705", "0.68518704", "0.68461156", "0.66278195", "0.66259587", "0.6578559", "0.63746804", "0.63681006", "0.63251126", "0.6259267", "0.62513924", "0.6211149", "0.62032765", "0.61599225", "0.6159027", "0.6151934", "0.6151934", "0.60959435", "0.6091121", "0.60782045", "0.6071997", "0.6064495", "0.6054118", "0.60459524", "0.6036025", "0.603172", "0.6025404" ]
0.8075378
0
Create a 5x5 grid of cluster centers. Create 25 cluster centers on the grid I^{[0, 4] x [0,4]}. Each center is a gaussian with standard covariance
def _5x5_grid_clusters_spread(): return [mn(mean=np.array([i * 25, j * 25]), cov=np.array([[1.0, 0.0], [0.0, 1.0]])) for i in range(5) for j in range(5)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _5x5_grid_clusters():\n return [mn(mean=np.array([i, j]), cov=np.array([[1.0, 0.0],\n [0.0, 1.0]]))\n for i in range(5)\n for j in range(5)]", "def _10x10_grid_clusters_spread():\n return [mn(mean=np.array([i * 25, j * 25]), cov=np.array([[1.0, 0.0],\n [0.0, 1.0]]))\n for i in range(10)\n for j in range(10)]", "def _5x5_grid_clusters_close():\n return [mn(mean=np.array([i * 5, j * 5]), cov=np.array([[1.0, 0.0],\n [0.0, 1.0]]))\n for i in range(5)\n for j in range(5)]", "def _10x10_grid_clusters_close():\n return [mn(mean=np.array([i * 5, j * 5]), cov=np.array([[1.0, 0.0],\n [0.0, 1.0]]))\n for i in range(10)\n for j in range(10)]", "def generate_centers(self):\n\t\tcenters = []\n\t\tsize = self.config.image_size\n\t\tfor i in range(self.config.num_obj):\n\t\t\tflag = True\n\t\t\twhile flag:\n\t\t\t\tc = np.random.randint(int(size * 0.05), int(size * 0.95), 2)\n\t\t\t\tflag = False\n\t\t\t\tfor center in centers:\n\t\t\t\t\tif (abs(center[0] - c[0]) <= 0.1 * size) or (abs(center[1] - c[1]) <= 0.1 *size):\n\t\t\t\t\t\tflag = False\n\t\t\tcenters.append(c)\n\t\t\t\t\n\t\treturn centers", "def _2x3_grid_clusters_spread():\n return [mn(mean=np.array([i * 25, j * 25]), cov=np.array([[1.0, 0.0],\n [0.0, 1.0]]))\n for i in range(2)\n for j in range(3)]", "def random_centers(k,):\n #centr = np.random.random((k, pos.shape[1]))\n return", "def create_clusters(self):\n ex = 0\n print 'Iter - Purity Gini Index'\n while ex < self.MAX_ITERATION:\n new_clusters = np.zeros(self.centroids.shape)\n distances = euclidean_distances(self.vectors, self.centroids).argmin(axis=1)\n for i in range(self.K):\n indexes = np.argwhere(distances == i)\n data = self.vectors[indexes.transpose()[0]]\n if data.shape[0] > 1:\n new_clusters[i] = (np.sum(data, axis=0) / data.shape[0])\n else:\n new_clusters[i] = np.sum(data, axis=0)\n print ex, '----', self.cal_purity()\n ex += 1\n if np.allclose(self.centroids, new_clusters, atol=self.TOLERANCE):\n break\n self.centroids = new_clusters", "def _2x3_grid_clusters_close():\n return [mn(mean=np.array([i * 5, j * 5]), cov=np.array([[1.0, 0.0],\n [0.0, 1.0]]))\n for i in range(2)\n for j in range(3)]", "def gen_k_centers(k, dim):\n delta = abs(np.random.normal(0.0, 5.0))\n eps = 0.001\n centers = []\n for i in range(k):\n c = np.random.multivariate_normal(np.zeros(dim), np.identity(dim))\n if len(centers):\n c1 = centers[0]\n x = np.random.multivariate_normal(c1, np.identity(c1.size)) - c1\n direction = x / np.linalg.norm(x)\n centers.append(c1 + 2.0 * i * delta * direction + eps)\n else:\n centers.append(c)\n return centers, delta", "def initialize_centers(data, k):\n x_data_min = min(p[0] for p in data)\n x_data_max = max(p[0] for p in data)\n y_data_min = min(p[1] for p in data)\n y_data_max = max(p[1] for p in data)\n\n return generate_random_data(\n k,\n x_data_min,\n x_data_max,\n y_data_min,\n y_data_max\n )", "def initialize_dom(img: np.ndarray):\n\n channels = img.shape[2]\n\n for cluster in range(numclusters):\n for channel in range(channels):\n cmin = np.amin(img[:,:,channel]) # channel's min\n cmax = np.amax(img[:,:,channel]) # channel's max\n current_cluster_centers[cluster, 0, channel] = np.random.uniform(cmin, cmax)\n\n print(\"Current clusters:\\n\", current_cluster_centers)", "def cluster(self):\n center_index = np.random.choice(range(100), self.K, replace=False)\n self.centers = np.array([self.X[i] for i in center_index])\n self.cluster_sizes = np.zeros(self.K)\n member_of = np.zeros(100, dtype=int)\n min_dist = np.array([distance.euclidean(self.centers[0], point) for point in self.X])\n self.cluster_sizes[0] = 100\n flag = True\n while flag:\n flag = False\n for i, point in enumerate(self.X):\n for j, center in enumerate(self.centers):\n if member_of[i] != j:\n dist = distance.euclidean(point, center)\n if dist < min_dist[i]:\n flag = True\n current = member_of[i]\n self.cluster_sizes[current] -= 1\n self.cluster_sizes[j] += 1\n member_of[i] = j\n min_dist[i] = dist\n if np.count_nonzero(self.cluster_sizes) != self.K:\n return self.cluster()\n self.centers = np.zeros((self.K, 2), dtype='d')\n for i, point in enumerate(self.X):\n center = member_of[i]\n self.centers[center] += point\n for i, center in enumerate(self.centers):\n center /= self.cluster_sizes[i]", "def get_random_centroids(data, k) :\r\n centroids = []\r\n columns = np.size(data, axis=1)\r\n ranges = []\r\n for i in range(columns) :\r\n ranges.append([np.min(data[:,i]), np.max(data[:,i])])\r\n \r\n for i in range(k) :\r\n centroid = []\r\n for span in ranges :\r\n centroid.append(np.random.uniform(span[0], span[1]))\r\n centroids.append(centroid)\r\n \r\n return np.matrix(centroids)", "def _random_standard_centers(n=100):\n generator = mn(mean=np.array([0, 0]),\n cov=np.array([[1.0, 0.0], [0.0, 1.0]]))\n return [mn(mean=pt, cov=np.array([[1.0, 0.0], [0.0, 1.0]]))\n for pt in generator.rvs(size=n)]", "def plot_clustered_data(points, c_means, covs, test_name, image_num, gaussians):\n\t#colors = cm.rainbow(np.linspace(0, 1, gaussians))\n\tcolors = ['b', 'g', 'm', 'y', 'c', 'k']\n\n\tax = plt.gca()\n\t#for i in range(points.shape[1]):\n\t\t#plt.plot(points[:, i][0], points[:, i][1], \".\", color=\"r\", zorder=0)\n\tplt.plot(points[0], points[1], \".\", color=\"r\", zorder=0)\n\t\n\tfor i in range(gaussians):\n\t\tplt.plot(c_means[i][0], c_means[i][1], \".\", color=colors[i], zorder=1)\n\n\t\twidth, height, theta = cov_ellipse(points, covs[i], nstd=2)\n\t\tellipse = Ellipse(xy=(c_means[i][0], c_means[i][1]), width=width, \\\n\t\t\t\theight=height, angle=theta, edgecolor=colors[i], fc='None', lw=2,\n\t\t\t\t\t\tzorder=4)\n\n\t\tax.add_patch(ellipse)\n\t\n\tplt.savefig(\"./images/{0}/{1:08d}.png\".format(test_name, image_num))\n\tplt.close()", "def make_sample_clusters(n_clusters, n_points, n_features=2, std=2, seed=1, limits=(-10, 10)):\n points_per_cluster = n_points // n_clusters\n np.random.seed(seed=seed)\n centroids = []\n for _ in range(n_features):\n centroids.append(np.random.randint(limits[0], limits[1], size=n_clusters))\n\n centroids = np.array(zip(*centroids))\n\n points = []\n for centroid in centroids:\n rands = centroid + np.random.random((points_per_cluster, n_features)) * std\n points.append(rands)\n\n return np.array(points).reshape(-1, 2)", "def kmeans(boxes, k, dist=np.median,seed=1):\n rows = boxes.shape[0]\n distances = np.empty((rows, k)) ## N row x N cluster\n last_clusters = np.zeros((rows,))\n np.random.seed(seed)\n # initialize the cluster centers to be k items\n clusters = boxes[np.random.choice(rows, k, replace=False)]\n aveIOU=0.0\n while True:\n # 为每个点指定聚类的类别(如果这个点距离某类别最近,那么就指定它是这个类别)\n for icluster in range(k):\n distances[:,icluster] = 1 - iou(clusters[icluster], boxes)\n nearest_clusters = np.argmin(distances, axis=1)\n\n for i in range(rows ):\n aveIOU=aveIOU+1-distances[i,nearest_clusters[i]]\n aveIOU=aveIOU/rows\n\n\t# 如果聚类簇的中心位置基本不变了,那么迭代终止。\n if (last_clusters == nearest_clusters).all():\n break\n # 重新计算每个聚类簇的平均中心位置,并它作为聚类中心点\n for cluster in range(k):\n clusters[cluster] = dist(boxes[nearest_clusters == cluster], axis=0)\n last_clusters = nearest_clusters\n\n return clusters,nearest_clusters,distances,aveIOU", "def Test_GenerateClusters(numClusters, pts_minmax=(10, 100), x_mult=(1, 4), y_mult=(1, 3), x_off=(0, 50), y_off=(0, 50)):\n\n # Initialize some empty lists to receive cluster member positions\n testClustersx = []\n testClustersy = []\n # Genereate random values given parameter ranges\n n_points = np.random.randint(pts_minmax[0], pts_minmax[1], numClusters)\n x_multipliers = np.random.randint(x_mult[0], x_mult[1], numClusters)\n y_multipliers = np.random.randint(y_mult[0], y_mult[1], numClusters)\n x_offsets = np.random.randint(x_off[0], x_off[1], numClusters)\n y_offsets = np.random.randint(y_off[0], y_off[1], numClusters)\n\n # Generate random clusters given parameter values\n for idx, npts in enumerate(n_points):\n xpts = np.random.randn(npts) * x_multipliers[idx] + x_offsets[idx]\n ypts = np.random.randn(npts) * y_multipliers[idx] + y_offsets[idx]\n testClustersx.append(xpts)\n testClustersy.append(ypts)\n\n # Convert to a single dataset in OpenCV format\n testClusters = np.float32((np.concatenate(testClustersx), np.concatenate(testClustersy))).transpose()\n\n # Return cluster positions\n return testClusters, testClustersx, testClustersy", "def Test_GenerateClusters(numClusters, pts_minmax=(10, 100), x_mult=(1, 4), y_mult=(1, 3), x_off=(0, 50), y_off=(0, 50)):\n\n # Initialize some empty lists to receive cluster member positions\n testClustersx = []\n testClustersy = []\n # Genereate random values given parameter ranges\n n_points = np.random.randint(pts_minmax[0], pts_minmax[1], numClusters)\n x_multipliers = np.random.randint(x_mult[0], x_mult[1], numClusters)\n y_multipliers = np.random.randint(y_mult[0], y_mult[1], numClusters)\n x_offsets = np.random.randint(x_off[0], x_off[1], numClusters)\n y_offsets = np.random.randint(y_off[0], y_off[1], numClusters)\n\n # Generate random clusters given parameter values\n for idx, npts in enumerate(n_points):\n xpts = np.random.randn(npts) * x_multipliers[idx] + x_offsets[idx]\n ypts = np.random.randn(npts) * y_multipliers[idx] + y_offsets[idx]\n testClustersx.append(xpts)\n testClustersy.append(ypts)\n\n # Convert to a single dataset in OpenCV format\n testClusters = np.float32((np.concatenate(testClustersx), np.concatenate(testClustersy))).transpose()\n\n # Return cluster positions\n return testClusters, testClustersx, testClustersy", "def randCent(data,k):\n index = set()\n while len(index) != k:\n index.add(random.randint(0, data.shape[0]))\n index = list(index)\n centroids = data[index]\n return centroids", "def plotClusters(self):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n fig.set_size_inches(18.5, 9.5)\n ax.set_title('Identification of Cluster Particles with Voronoi Volumes', fontsize=22)\n ax.set_xlabel('x [m]', fontsize=18)\n ax.set_ylabel('y [m]', fontsize=18)\n ax.set_zlabel('z [m]', fontsize=18)\n\n strength = np.linspace(0, 0.8, len(self.unique_labels))\n np.random.shuffle(strength)\n colors = [plt.cm.nipy_spectral(each) for each in strength]\n np.random.shuffle(strength)\n colorsB = [plt.cm.nipy_spectral(each) for each in strength]\n\n for k, col, colB in zip(self.unique_labels, colors, colorsB):\n a = 1\n s = 3\n if k == -1:\n # Black used for noise.\n col = [1, 0, 0]\n a = 0.3\n s = 1\n\n class_member_mask = (self.labels == k)\n xy = self.data[class_member_mask]\n if len(xy) > 0:\n ax.scatter(xy[:, 0], xy[:, 1], xy[:, 2], c=np.reshape(np.array(col), (1, -1)),\n edgecolors=np.reshape(np.array(colB), (1, -1)), alpha=a, s=s, label='Cluster ' + str(k))", "def generateClustersRandomly(k=2, scale=1, num_clusters=1, points_per_cluster=20):\n rands = [[np.random.uniform(-scale, scale) * np.random.rand() for _ in range(k)] for i in range(num_clusters)]\n point_list = []\n for rand in rands:\n lastItem = math.sqrt(1 + np.dot(rand, rand))\n rand.append(lastItem)\n point_list.append(rand)\n counter = 0\n while counter < points_per_cluster:\n nearCluster = np.array([np.random.uniform(-scale, scale) * np.random.rand() for _ in range(k)])\n nearClusterLastItem = math.sqrt(1 + np.dot(nearCluster, nearCluster))\n new_point = np.append(nearCluster, nearClusterLastItem)\n # radius of hyperbolic ball is 0.2\n if hyperboloidDist(new_point, rand) < .2:\n point_list.append(new_point)\n counter += 1\n\n return np.array(point_list)", "def generateClusterPoints(N, k=2, scale=1):\n rands = [[np.random.uniform(0, scale) * np.random.rand() for _ in range(k)] for i in range(N)]\n rands += [[np.random.uniform(-scale, 0) * np.random.rand() for _ in range(k)] for i in range(N)]\n point_list = []\n for rand in rands:\n # lastItem = math.sqrt(sum([1 + item**2 for item in rand]))\n lastItem = math.sqrt(1 + np.dot(rand, rand))\n rand.append(lastItem)\n point_list.append(rand)\n return np.array(point_list)", "def clusters_allocate_cells(self):\n for cluster in self.clusters:\n cluster.cells[:] = []\n for cell in self.block_proc:\n wdists = []\n for cluster in self.clusters:\n s = cluster.size\n d = ( (cell.x-cluster.x)**2 + (cell.y-cluster.y)**2 +\n (cell.z-cluster.z)**2 )\n d = numpy.sqrt(d)\n c = self.c\n # TODO: choose a better distance function below\n r = d*(c+(1-c)*numpy.exp(-s/d))\n r = numpy.clip(r,0,r)\n wdists.append(r)\n self.clusters[numpy.argmin(wdists)].cells.append(cell)", "def simulated_cluster(n_stars=CLUSTER_DEFAULTS['stars'],\n dimensions=CLUSTER_DEFAULTS['dimensions']):\n\n nx, ny = dimensions\n\n # Create empty image\n image = np.zeros((ny, nx))\n\n # Generate random positions\n r = np.random.random(n_stars) * nx\n theta = np.random.uniform(0., 2. * np.pi, n_stars)\n\n # Generate random fluxes\n fluxes = np.random.random(n_stars) ** 2\n\n # Compute position\n x = nx / 2 + r * np.cos(theta)\n y = ny / 2 + r * np.sin(theta)\n\n # Add stars to image\n # ==> First for loop and if statement <==\n for idx in range(n_stars):\n if x[idx] >= 0 and x[idx] < nx and y[idx] >= 0 and y[idx] < ny:\n image[y[idx], x[idx]] += fluxes[idx]\n\n # Convolve with a gaussian\n image = gaussian_filter(image, sigma=1)\n\n # Add noise\n image += np.random.normal(1., 0.001, image.shape)\n\n return image", "def recalculate_centers(data, k, clusters):\n centers = []\n for k_i in range(k):\n inds = [i for i, j in enumerate(clusters) if j == k_i]\n n = np.take(data, inds, axis=0)\n if len(inds) == 0:\n i = np.random.randint(len(data))\n centers.append((data[i,0], data[i,1]))\n\n elif len(inds) < 2: \n centers.append((n[0][0], n[0][1]))\n else:\n result = np.sum(n, axis=1)/len(inds)\n centers.append((result[0], result[0]))\n return centers", "def init_cluster_centroids(x, number_of_clusters):\n return x[np.random.choice(x.shape[0], number_of_clusters, replace=False), :]", "def random_init(self, train_data):\n\n centroids=np.zeros((self.n_clusters_, train_data.shape[1]))\n for c in range(self.n_clusters_):\n for f in range(train_data.shape[1]):\n centroids[c,f]=random.uniform(min(train_data[:,f]), max(train_data[:,f]))\n\n return centroids", "def initialize(img):\n w, h, _ = img.shape\n for c in current_cluster_centers:\n x = np.random.randint(w)\n y = np.random.randint(h)\n c[:] = img[x, y]" ]
[ "0.8075657", "0.7216367", "0.7186302", "0.691356", "0.68508494", "0.6845249", "0.66261995", "0.6625447", "0.6577691", "0.6374363", "0.63676125", "0.6322932", "0.625821", "0.6250707", "0.6211498", "0.6203698", "0.6160035", "0.6157637", "0.6151316", "0.6151316", "0.6094953", "0.609167", "0.6078563", "0.6072399", "0.60630804", "0.60531735", "0.60448426", "0.6035322", "0.60305756", "0.60231733" ]
0.762155
1
Create random cluster centers. Create n cluster centers randomly. Each cluster center is a draw from a gaussian distribution centered at (0,0) with standard covariance.
def _random_standard_centers(n=100): generator = mn(mean=np.array([0, 0]), cov=np.array([[1.0, 0.0], [0.0, 1.0]])) return [mn(mean=pt, cov=np.array([[1.0, 0.0], [0.0, 1.0]])) for pt in generator.rvs(size=n)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_sample_clusters(n_clusters, n_points, n_features=2, std=2, seed=1, limits=(-10, 10)):\n points_per_cluster = n_points // n_clusters\n np.random.seed(seed=seed)\n centroids = []\n for _ in range(n_features):\n centroids.append(np.random.randint(limits[0], limits[1], size=n_clusters))\n\n centroids = np.array(zip(*centroids))\n\n points = []\n for centroid in centroids:\n rands = centroid + np.random.random((points_per_cluster, n_features)) * std\n points.append(rands)\n\n return np.array(points).reshape(-1, 2)", "def random_centers(k,):\n #centr = np.random.random((k, pos.shape[1]))\n return", "def init_cluster_centroids(x, number_of_clusters):\n return x[np.random.choice(x.shape[0], number_of_clusters, replace=False), :]", "def randCent(data,k):\n index = set()\n while len(index) != k:\n index.add(random.randint(0, data.shape[0]))\n index = list(index)\n centroids = data[index]\n return centroids", "def generate_centers(self):\n\t\tcenters = []\n\t\tsize = self.config.image_size\n\t\tfor i in range(self.config.num_obj):\n\t\t\tflag = True\n\t\t\twhile flag:\n\t\t\t\tc = np.random.randint(int(size * 0.05), int(size * 0.95), 2)\n\t\t\t\tflag = False\n\t\t\t\tfor center in centers:\n\t\t\t\t\tif (abs(center[0] - c[0]) <= 0.1 * size) or (abs(center[1] - c[1]) <= 0.1 *size):\n\t\t\t\t\t\tflag = False\n\t\t\tcenters.append(c)\n\t\t\t\t\n\t\treturn centers", "def initialize_centers(data, k):\n x_data_min = min(p[0] for p in data)\n x_data_max = max(p[0] for p in data)\n y_data_min = min(p[1] for p in data)\n y_data_max = max(p[1] for p in data)\n\n return generate_random_data(\n k,\n x_data_min,\n x_data_max,\n y_data_min,\n y_data_max\n )", "def random_init(self, train_data):\n\n centroids=np.zeros((self.n_clusters_, train_data.shape[1]))\n for c in range(self.n_clusters_):\n for f in range(train_data.shape[1]):\n centroids[c,f]=random.uniform(min(train_data[:,f]), max(train_data[:,f]))\n\n return centroids", "def gen_random_clusters(num_clusters):\n clusters = []\n\n for _ in xrange(num_clusters):\n clusters.append(alg_cluster.Cluster(\n set(),\n random.randint(-1, 1),\n random.randint(-1, 1),\n 0,\n 0\n ))\n return clusters", "def gen_k_centers(k, dim):\n delta = abs(np.random.normal(0.0, 5.0))\n eps = 0.001\n centers = []\n for i in range(k):\n c = np.random.multivariate_normal(np.zeros(dim), np.identity(dim))\n if len(centers):\n c1 = centers[0]\n x = np.random.multivariate_normal(c1, np.identity(c1.size)) - c1\n direction = x / np.linalg.norm(x)\n centers.append(c1 + 2.0 * i * delta * direction + eps)\n else:\n centers.append(c)\n return centers, delta", "def get_random_centroids(data, k) :\r\n centroids = []\r\n columns = np.size(data, axis=1)\r\n ranges = []\r\n for i in range(columns) :\r\n ranges.append([np.min(data[:,i]), np.max(data[:,i])])\r\n \r\n for i in range(k) :\r\n centroid = []\r\n for span in ranges :\r\n centroid.append(np.random.uniform(span[0], span[1]))\r\n centroids.append(centroid)\r\n \r\n return np.matrix(centroids)", "def gen_random_clusters(num_clusters):\n\t\tcluster_list = []\n\t\tlower_bound = -1\n\t\tupper_bound = 1\n\t\trange_width = upper_bound - lower_bound\n\t\tfor idx in range(num_clusters):\n\t\t\tx_val = random.random() * range_width + lower_bound\n\t\t\ty_val = random.random() * range_width + lower_bound\n\t\t\tcluster_list.append(Cluster(set([]), x_val, y_val, 0, 0))\n\t\treturn cluster_list", "def generateClustersRandomly(k=2, scale=1, num_clusters=1, points_per_cluster=20):\n rands = [[np.random.uniform(-scale, scale) * np.random.rand() for _ in range(k)] for i in range(num_clusters)]\n point_list = []\n for rand in rands:\n lastItem = math.sqrt(1 + np.dot(rand, rand))\n rand.append(lastItem)\n point_list.append(rand)\n counter = 0\n while counter < points_per_cluster:\n nearCluster = np.array([np.random.uniform(-scale, scale) * np.random.rand() for _ in range(k)])\n nearClusterLastItem = math.sqrt(1 + np.dot(nearCluster, nearCluster))\n new_point = np.append(nearCluster, nearClusterLastItem)\n # radius of hyperbolic ball is 0.2\n if hyperboloidDist(new_point, rand) < .2:\n point_list.append(new_point)\n counter += 1\n\n return np.array(point_list)", "def initialize_clusters(points, k):\r\n return points[np.random.randint(points.shape[0], size=k)]", "def clusterize(x, n):\n if n == 1:\n return np.zeros(x.shape[0])\n elif n > 1:\n kmeans = KMeans(n_clusters=n).fit(x)\n return kmeans.predict(x)\n else:\n raise ValueError(\"set n_spheres > 0\")", "def generateClusterPoints(N, k=2, scale=1):\n rands = [[np.random.uniform(0, scale) * np.random.rand() for _ in range(k)] for i in range(N)]\n rands += [[np.random.uniform(-scale, 0) * np.random.rand() for _ in range(k)] for i in range(N)]\n point_list = []\n for rand in rands:\n # lastItem = math.sqrt(sum([1 + item**2 for item in rand]))\n lastItem = math.sqrt(1 + np.dot(rand, rand))\n rand.append(lastItem)\n point_list.append(rand)\n return np.array(point_list)", "def generate_initial_centroids(k, data):\n centroids = []\n used_indexes = []\n while len(centroids) < k:\n random_index = random.randint(0, len(data) - 1)\n if random_index not in used_indexes:\n centroids.append(data[random_index])\n used_indexes.append(random_index)\n return centroids", "def gen_random_clusters(num_clusters):\n cluster_list = []\n\n for county_id in range(num_clusters):\n cluster_list.append(alg_cluster.Cluster(set([str(county_id)]), 10.0*random.random(), 10.0*random.random(), random.random(), random.random()))\n return cluster_list", "def recalculate_centers(data, k, clusters):\n centers = []\n for k_i in range(k):\n inds = [i for i, j in enumerate(clusters) if j == k_i]\n n = np.take(data, inds, axis=0)\n if len(inds) == 0:\n i = np.random.randint(len(data))\n centers.append((data[i,0], data[i,1]))\n\n elif len(inds) < 2: \n centers.append((n[0][0], n[0][1]))\n else:\n result = np.sum(n, axis=1)/len(inds)\n centers.append((result[0], result[0]))\n return centers", "def getRandomCentroids(data_points, k):\n return random.sample(data_points, k)", "def init_centroids(self, points , k):\n centroids = points.copy()\n numpy.random.shuffle(centroids)\n return centroids[0:k,:]", "def creaCent(data,k):\n\n Cen = random.sample(data[:,:].tolist(),k=k)\n return np.asarray(Cen)", "def setup_rand_gmm(n_components=3, n_features=10,\n clust_mean_std=2.0,\n cluster_std=1.0,\n cov_how='diag',\n weights_how='uniform',\n random_state=None):\n\n rng = check_random_state(random_state)\n\n ##########################\n # generate cluster means #\n ##########################\n\n # cluster means are generated from a spherical gaussian\n center_cov = np.eye(n_features) * clust_mean_std ** 2\n means = rng.multivariate_normal(mean=np.zeros(n_features),\n cov=center_cov,\n size=n_components)\n\n ################################\n # generate cluster covariances #\n ################################\n\n if cov_how.lower() == 'diag':\n # diagonal covariances\n covariances = np.array([np.eye(n_features) * cluster_std ** 2\n for _ in range(n_components)])\n elif cov_how.lower() == 'wishart':\n raise NotImplementedError # TODO-FEAT: implement this!\n else:\n raise ValueError(\"cov_how must be one of ['diag', 'wishart']\"\n \"not {}\".format(cov_how))\n\n ############################\n # generate cluster weights #\n ############################\n if weights_how.lower() == 'uniform':\n weights = rng.dirichlet(np.ones(n_components))\n elif weights_how.lower() == 'random':\n weights = np.ones(n_components) / n_components\n else:\n raise ValueError(\"weights_how must be one of ['random', 'uniform']\"\n \"not {}\".format(weights_how))\n\n return means, covariances, weights", "def initialize_dom(img: np.ndarray):\n\n channels = img.shape[2]\n\n for cluster in range(numclusters):\n for channel in range(channels):\n cmin = np.amin(img[:,:,channel]) # channel's min\n cmax = np.amax(img[:,:,channel]) # channel's max\n current_cluster_centers[cluster, 0, channel] = np.random.uniform(cmin, cmax)\n\n print(\"Current clusters:\\n\", current_cluster_centers)", "def simulated_cluster(n_stars=CLUSTER_DEFAULTS['stars'],\n dimensions=CLUSTER_DEFAULTS['dimensions']):\n\n nx, ny = dimensions\n\n # Create empty image\n image = np.zeros((ny, nx))\n\n # Generate random positions\n r = np.random.random(n_stars) * nx\n theta = np.random.uniform(0., 2. * np.pi, n_stars)\n\n # Generate random fluxes\n fluxes = np.random.random(n_stars) ** 2\n\n # Compute position\n x = nx / 2 + r * np.cos(theta)\n y = ny / 2 + r * np.sin(theta)\n\n # Add stars to image\n # ==> First for loop and if statement <==\n for idx in range(n_stars):\n if x[idx] >= 0 and x[idx] < nx and y[idx] >= 0 and y[idx] < ny:\n image[y[idx], x[idx]] += fluxes[idx]\n\n # Convolve with a gaussian\n image = gaussian_filter(image, sigma=1)\n\n # Add noise\n image += np.random.normal(1., 0.001, image.shape)\n\n return image", "def _init_centroid(self, seed: int):\n random.seed(seed)\n self.centroid_info = dict()\n self.cluster_result = dict()\n self.centroid_stable_flag = dict()\n for key_index, chosen_value in enumerate(\n random.sample(self.list_data, self.n_cluster)):\n self.centroid_info.setdefault(\"c\" + str(key_index), float(chosen_value))\n self.cluster_result.setdefault(\"c\" + str(key_index), list())\n self.centroid_stable_flag.setdefault(\"c\" + str(key_index), False)", "def _5x5_grid_clusters():\n return [mn(mean=np.array([i, j]), cov=np.array([[1.0, 0.0],\n [0.0, 1.0]]))\n for i in range(5)\n for j in range(5)]", "def initialize(img):\n w, h, _ = img.shape\n for c in current_cluster_centers:\n x = np.random.randint(w)\n y = np.random.randint(h)\n c[:] = img[x, y]", "def init_centroids(X,K):\n c = random.sample(list(X),K)\n return c", "def initialize_pos(img: np.ndarray):\n\n h, w = img.shape[0:2]\n\n for cluster in range(numclusters):\n i = np.random.randint(h) # row index\n j = np.random.randint(w) # col index\n current_cluster_centers[cluster, 0, :] = img[i, j, :]\n\n print(\"Current clusters:\\n\", current_cluster_centers)", "def Test_GenerateClusters(numClusters, pts_minmax=(10, 100), x_mult=(1, 4), y_mult=(1, 3), x_off=(0, 50), y_off=(0, 50)):\n\n # Initialize some empty lists to receive cluster member positions\n testClustersx = []\n testClustersy = []\n # Genereate random values given parameter ranges\n n_points = np.random.randint(pts_minmax[0], pts_minmax[1], numClusters)\n x_multipliers = np.random.randint(x_mult[0], x_mult[1], numClusters)\n y_multipliers = np.random.randint(y_mult[0], y_mult[1], numClusters)\n x_offsets = np.random.randint(x_off[0], x_off[1], numClusters)\n y_offsets = np.random.randint(y_off[0], y_off[1], numClusters)\n\n # Generate random clusters given parameter values\n for idx, npts in enumerate(n_points):\n xpts = np.random.randn(npts) * x_multipliers[idx] + x_offsets[idx]\n ypts = np.random.randn(npts) * y_multipliers[idx] + y_offsets[idx]\n testClustersx.append(xpts)\n testClustersy.append(ypts)\n\n # Convert to a single dataset in OpenCV format\n testClusters = np.float32((np.concatenate(testClustersx), np.concatenate(testClustersy))).transpose()\n\n # Return cluster positions\n return testClusters, testClustersx, testClustersy" ]
[ "0.76822317", "0.76259226", "0.73836875", "0.7335838", "0.7241687", "0.7094557", "0.7092686", "0.7072594", "0.7042424", "0.6994134", "0.6977371", "0.69499964", "0.6929814", "0.6919069", "0.6892269", "0.6817309", "0.6758312", "0.6726237", "0.6718225", "0.66974187", "0.6696951", "0.6625178", "0.662123", "0.6589652", "0.6539657", "0.64829224", "0.6467993", "0.64054203", "0.63689655", "0.6366787" ]
0.7926905
0
Returns the path for userspecific blender scripts for all major platforms
def getScriptsPath(blenderversion): if sys.platform == 'linux': scriptspath = os.path.normpath( os.path.expanduser('~/.config/blender/{0}/scripts'.format(blenderversion)) ) elif sys.platform == 'darwin': scriptspath = os.path.normpath( os.path.expanduser( '~/Library/Application Support/Blender/{0}/scripts'.format(blenderversion) ) ) elif sys.platform == 'win32': scriptspath = os.path.normpath( os.path.expanduser( '~/AppData/Roaming/Blender Foundation/Blender/{0}/scripts'.format(blenderversion) ) ) else: scriptspath = 'ERROR: {0} not supported,'.format(sys.platform) return scriptspath
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getBlenderConfigPath(blenderversion):\n if sys.platform == 'linux':\n scriptspath = os.path.normpath(\n os.path.expanduser('~/.config/blender/{0}/config'.format(blenderversion))\n )\n elif sys.platform == 'darwin':\n scriptspath = os.path.normpath(\n os.path.expanduser(\n '~/Library/Application Support/Blender/{0}/config'.format(blenderversion)\n )\n )\n elif sys.platform == 'win32':\n scriptspath = os.path.normpath(\n os.path.expanduser(\n '~/AppData/Roaming/Blender Foundation/Blender/{0}/config'.format(blenderversion)\n )\n )\n else:\n scriptspath = 'ERROR: {0} not supported,'.format(sys.platform)\n return scriptspath", "def launcher_path() -> Optional[str]:\n return u.resource(LAUNCHER_SCRIPT)", "def environmentImagesPath():\n # A recursion counter to make sure that the loop ends.\n count = 0\n # Get the path to the Blender executable.\n filePath = os.path.dirname(bpy.app.binary_path)\n # Find the lowest path level which contains Blender.\n while \"blender\" not in os.path.basename(filePath).lower():\n filePath = os.path.dirname(filePath)\n if not filePath or count == 20:\n break\n count += 1\n\n # Search all subpaths for the datafiles folder. Based on this folder\n # the path can be completed.\n for dirPath, dirs, fileList in os.walk(filePath):\n if os.path.basename(dirPath) == \"datafiles\":\n return os.path.join(os.path.join(dirPath, \"studiolights\"), \"world\")", "def scripts_folder(self):\n # Copy the script\n if qgis_version() < 21600:\n return ScriptUtils.scriptsFolder()\n else:\n return ScriptUtils.defaultScriptsFolder()", "def bundle_path(self, app):\n return (\n self.platform_path / self.output_format / safe_formal_name(app.formal_name)\n )", "def locate_scripts():\n scripts = []\n bin_dir = os.path.join(os.getcwd(), 'bin')\n if not os.path.isdir(bin_dir):\n return scripts\n for item in os.listdir(bin_dir):\n full_path = os.path.join(bin_dir, item)\n if os.path.isfile(full_path):\n with open(full_path) as f:\n first_line = next(f)\n if first_line.startswith('#!'):\n scripts.append(full_path)\n return scripts", "def scriptpath(self, code):\n if self.url.path.endswith('/api.php'):\n return removesuffix(self.url.path, '/api.php')\n\n # AutoFamily refers to the variable set below, not the function\n # but the reference must be given here\n return super(AutoFamily, self).scriptpath(code)", "def GetLauncherPath(self):\n return os.path.dirname(__file__)", "def GetLauncherPath(self):\n return os.path.dirname(__file__)", "def scriptpath(self, code) -> str:\n return ''", "def PyHiew_GetScriptFileName(script):\r\n return '%s\\\\%s.py' % (PYHIEW_PATH, script)", "def _spdr_engine_location():\n return os.path.realpath(__file__).rpartition('/')[0]", "def get_hookscript_path ( self ):\n return self.hook_script_fspath", "def get_executable(self) -> str:\n ...", "def _getCodeFolder(self):\n if getattr(sys, 'frozen', False):\n # we are running in a bundle (frozen)\n bundle_dir = sys._MEIPASS\n else:\n # we are running in a normal Python environment\n bundle_dir = os.path.dirname(os.path.abspath(__file__))\n return bundle_dir", "def get_script_directory():\n return os.path.dirname(__file__)", "def _blink_base(self):\n module_path = self._filesystem.path_to_module(self.__module__)\n tools_index = module_path.rfind('tools')\n assert tools_index != -1, 'could not find location of this checkout from %s' % module_path\n return self._filesystem.normpath(module_path[0:tools_index - 1])", "def generate_js_dir():\n\n return pkg_resources.resource_filename('linkedin.mobster.har.visualization.js', None)", "def _jupyter_nbextension_paths():\n return [{\n \"section\": \"tree\",\n \"dest\": \"nbsysinfo\",\n \"src\": \"static\",\n \"require\": \"nbsysinfo/main\"\n }]", "def scripts_dir(self):\n\n return os.path.join(self.path, 'Scripts' if sys.platform == 'win32'\n else 'bin')", "def scriptpath(self, code):\n return '' if code == 'en' else ('/' + code)", "def getRootPath():\n return '/'.join(__file__.split('/')[:-4]) # Path of this file with pagebot/__init__.py(c) removed.", "def get_vendor_bundle_path() -> str:\n vendor_bundle_directory = os.path.join(os.path.dirname(__file__), \"dist\", \"js\")\n file_list_with_full_path = []\n for f in os.listdir(vendor_bundle_directory):\n file_path = os.path.join(vendor_bundle_directory, f)\n if os.path.isfile(file_path):\n if os.path.splitext(file_path)[-1].endswith(\"js\"):\n if os.path.splitext(f)[0].startswith(\"chunk-vendors\"):\n file_list_with_full_path.append(os.path.abspath(file_path))\n return file_list_with_full_path[0]", "def get_exe_path(exe):\n for type_, path in get_possible_paths():\n full_path = os.path.join(path, exe)\n if os.path.exists(full_path):\n if type_ == 'bundled':\n bundled_warning()\n return full_path\n return None", "def _getRunMonoScript(self):\n\t\treturn os.path.join(self.getEngineRoot(), 'Engine', 'Build', 'BatchFiles', self.getPlatformIdentifier(), 'RunMono.sh')", "def _extract_system_path(self, script):\r\n\r\n DEFAULT_PATH = ['code']\r\n\r\n # Separate paths by :, like the system path.\r\n raw_path = script.get('system_path', '').split(\":\") + DEFAULT_PATH\r\n\r\n # find additional comma-separated modules search path\r\n path = []\r\n\r\n for dir in raw_path:\r\n if not dir:\r\n continue\r\n\r\n # path is an absolute path or a path relative to the data dir\r\n dir = os.path.join(self.capa_system.filestore.root_path, dir)\r\n # Check that we are within the filestore tree.\r\n reldir = os.path.relpath(dir, self.capa_system.filestore.root_path)\r\n if \"..\" in reldir:\r\n log.warning(\"Ignoring Python directory outside of course: %r\", dir)\r\n continue\r\n\r\n abs_dir = os.path.normpath(dir)\r\n path.append(abs_dir)\r\n\r\n return path", "def _jupyter_bundlerextension_paths():\n return [{\n 'name': 'jorts_bundler',\n 'label': 'Human Readable Report (.pdf)',\n 'module_name': 'jorts',\n 'group': 'download'\n }]", "def bin_path(self) -> Path:\n return self._root_path / \"stefan-on-software-api-client\" / \"bin\"", "def getBaseURL():\n return getQualifiedURL(getScriptname())", "def rliPath():\r\n if isWindows():\r\n homeDir = win32api.GetShortPathName(os.path.expanduser('~'))\r\n return os.path.join(homeDir, 'AppData', 'Roaming', 'GRASS7', 'r.li')\r\n else:\r\n return os.path.join(os.path.expanduser(\"~\"), '.grass7', 'r.li')" ]
[ "0.6769609", "0.62371445", "0.60271466", "0.590203", "0.5760948", "0.5753", "0.57450694", "0.57419586", "0.57419586", "0.5724369", "0.5693562", "0.5689543", "0.56795627", "0.5673507", "0.5627154", "0.5591305", "0.55765843", "0.55733895", "0.55558956", "0.5554302", "0.55541027", "0.55451024", "0.55251455", "0.5520799", "0.551155", "0.55054486", "0.54604656", "0.5459248", "0.545899", "0.54285264" ]
0.72838265
0
Returns the path for configuration data for all major platforms
def getConfigPath(): if sys.platform == 'linux': configpath = os.path.normpath(os.path.expanduser('~/.config/phobos')) elif sys.platform == 'darwin': configpath = os.path.normpath(os.path.expanduser('~/Library/Application Support/phobos')) elif sys.platform == 'win32': configpath = os.path.normpath(os.path.expanduser('~/AppData/Roaming/phobos')) else: configpath = 'ERROR: {0} not supported,'.format(sys.platform) return configpath
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def platform_config_dir():\n if POSIX: # nocover\n dpath_ = os.environ.get('XDG_CONFIG_HOME', '~/.config')\n elif DARWIN: # nocover\n dpath_ = '~/Library/Application Support'\n elif WIN32: # nocover\n dpath_ = os.environ.get('APPDATA', '~/AppData/Roaming')\n else: # nocover\n raise NotImplementedError('Unknown Platform %r' % (sys.platform,))\n dpath = normpath(expanduser(dpath_))\n return dpath", "def path_config(self):\n return HOMEASSISTANT_CONFIG.format(HASSIO_SHARE_INT)", "def get_config_file_location():\n\n return './' + CONFIG_FILE_NAME", "def configPath(self):\n return os.path.dirname(__file__)", "def platform_data_dir():\n if POSIX: # nocover\n dpath_ = os.environ.get('XDG_DATA_HOME', '~/.local/share')\n elif DARWIN: # nocover\n dpath_ = '~/Library/Application Support'\n elif WIN32: # nocover\n dpath_ = os.environ.get('APPDATA', '~/AppData/Roaming')\n else: # nocover\n raise NotImplementedError('Unknown Platform %r' % (sys.platform,))\n dpath = normpath(expanduser(dpath_))\n return dpath", "def config_path(self):\n if os.path.exists(self._config_path):\n if pyhocon.ConfigFactory.parse_file(self._config_path):\n return os.path.realpath(self._config_path)\n # TODO if string is url/git repo, download file locally first\n return None", "def _get_config_filename():\n return 'pylidc.conf' if sys.platform.startswith('win') else '.pylidcrc'", "def path(self):\n return os.path.join(self.config.get('path', os.getcwd()))", "def get_data_path(name):\n js = open('config.json').read()\n data = json.loads(js)\n return os.path.expanduser(data[name]['data_path'])", "def cfg_path(self):\n return self._cfg_path", "def path_config_docker(self):\n return HOMEASSISTANT_CONFIG.format(HASSIO_SHARE_EXT)", "def config_data_path(experiment_name: str) -> Path: # pragma: no cover\n return experiment_dir(experiment_name) / DATA_CONFIG", "def platform_config_filename(region, account_prefix, prod):\n return 'infra/platform-config/%s/%s/%s.json' % (\n account_prefix, \"prod\" if prod else \"dev\", region\n )", "def config_file(self):\n return join_path(self.prefix.etc.bohrium, \"config.ini\")", "def get_config(self):\n root_folder = os.path.dirname(os.path.dirname(__file__)).replace('\\\\', '/')\n root_folder = root_folder.replace('/core', '/config')\n # print root_folder, '<----------------------------------------'\n proj_config = os.path.join(root_folder, self.project.lower()).replace('\\\\', '/')\n # print proj_config, '============================================='\n if not os.path.isfile(proj_config):\n proj_config = os.path.join(root_folder, 'default').replace('\\\\', '/')\n # print proj_config, '<========================================'\n return proj_config", "def config_path():\n dir_ = os.path.dirname(__file__)\n demo_dir = os.path.join(dir_, '../..')\n return os.path.join(demo_dir, 'mike_dev.ini')", "def get_paths():\n paths = {}\n if sys.platform == \"linux\" or sys.platform == \"linux2\":\n if os.path.exists(\"/usr/local/share/opencpn\"):\n paths[\"systemdir\"] = \"/usr/local/share/opencpn\"\n else:\n paths[\"systemdir\"] = \"/usr/share/opencpn\"\n paths[\"systemdir\"] += \"/plugins-metadata\"\n paths[\"destfile\"] = os.path.expanduser(\"~/.opencpn/ocpn-plugins.xml\")\n paths[\"userdir\"] = os.path.expanduser(\"~/.opencpn/plugins-metadata\")\n elif sys.platform == \"darwin\":\n paths[\"systemdir\"] = os.path.expanduser(\n \"~/Desktop/OpenCPN.app/Contents/SharedSupport/ocpn-plugins.xml\")\n paths[\"userdir\"] = os.path.expanduser(\n \"~/Library/Preferences/opencpn/plugins-metadata\")\n paths[\"destfile\"] = os.path.expanduser(\n \"~/Library/Preferences/opencpn/ocpn-plugins.xml\")\n elif sys.platform == \"win32\":\n paths[\"systemdir\"] = r\"\\Program Files (x86)\\OpenCPN\\plugins-metadata\"\n if \"LOCALAPPDATA\" in os.environ:\n appdata = os.environ[\"LOCALAPPDATA\"]\n else:\n appdata = r\"\\ProgramData\\opencpn\"\n paths[\"userdir\"] = os.path.join(appdata, \"plugins-metadata\")\n paths[\"destfile\"] = os.path.join(appdata, \"ocpn-plugins.xml\")\n return paths", "def config_directory(self):\n\n return self.get_raw(\"config_directory\")", "def system_conf_dir(self):\n return buildconfig.SPD_CONF_PATH", "def data_dir():\n return _config.datadir", "def getDataPath():\n\treturn \"..\" + os.sep + \"data\" + os.sep", "def get_default_config_path():\n if os.name == 'posix':\n config_path = os.path.join(os.path.expanduser(\"~\"), '.fpdb')\n elif os.name == 'nt':\n config_path = os.path.join(os.environ[\"APPDATA\"], 'fpdb')\n else: config_path = False\n return config_path", "def config_file_and_path():\n return str(rmfriend_dir() / 'config.cfg')", "def get_global_config_path():\n\n return \"/etc/dapsenv/dapsenv.conf\"", "def appdata_dir():\r\n if platform.system() == \"Windows\":\r\n return os.path.join(os.environ[\"APPDATA\"], \"Electrum\")\r\n elif platform.system() == \"Linux\":\r\n return os.path.join(sys.prefix, \"share\", \"electrum\")\r\n elif (platform.system() == \"Darwin\" or\r\n platform.system() == \"DragonFly\" or\r\n\t platform.system() == \"NetBSD\"):\r\n return \"/Library/Application Support/Electrum\"\r\n else:\r\n raise Exception(\"Unknown system\")", "def get_config_path():\n\n root = os.path.dirname(os.path.abspath(__file__))[:-5]\n config_path = os.path.join(root, 'config.ini')\n\n return config_path", "def get_data_path():\n return os.getcwd() + \"/data/\"", "def getConfigPath():\n\n global args, ConfigPathDefault\n\n if args.config_location:\n return args.config_location;\n return ConfigPathDefault;", "def get_config_dir() -> str:\n # Get the system app configuration standard location\n if 'APPDATA' in os.environ:\n return os.environ['APPDATA']\n elif 'XDG_CONFIG_HOME' in os.environ:\n return os.environ['XDG_CONFIG_HOME']\n else:\n return os.path.join(os.environ['HOME'], '.config')", "def get_data_path():\n\treturn _paths[_DATA_DIRECTORY_KEY]" ]
[ "0.7201866", "0.67362136", "0.6644315", "0.66377807", "0.6636949", "0.66022515", "0.65694153", "0.65639627", "0.65615714", "0.64916116", "0.64902836", "0.64741963", "0.6470176", "0.6469606", "0.6384029", "0.6373276", "0.63566935", "0.63498926", "0.63385725", "0.6334437", "0.6327228", "0.6315316", "0.6294301", "0.62942564", "0.62763315", "0.6261929", "0.62580097", "0.62559307", "0.62549204", "0.62334144" ]
0.72742504
0
Returns the configuration path for userspecific blender data.
def getBlenderConfigPath(blenderversion): if sys.platform == 'linux': scriptspath = os.path.normpath( os.path.expanduser('~/.config/blender/{0}/config'.format(blenderversion)) ) elif sys.platform == 'darwin': scriptspath = os.path.normpath( os.path.expanduser( '~/Library/Application Support/Blender/{0}/config'.format(blenderversion) ) ) elif sys.platform == 'win32': scriptspath = os.path.normpath( os.path.expanduser( '~/AppData/Roaming/Blender Foundation/Blender/{0}/config'.format(blenderversion) ) ) else: scriptspath = 'ERROR: {0} not supported,'.format(sys.platform) return scriptspath
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cfg_path(self):\n return self._cfg_path", "def config_file_and_path():\n return str(rmfriend_dir() / 'config.cfg')", "def configPath(self):\n return os.path.dirname(__file__)", "def get_config_file_location():\n\n return './' + CONFIG_FILE_NAME", "def get_data_path(name):\n js = open('config.json').read()\n data = json.loads(js)\n return os.path.expanduser(data[name]['data_path'])", "def path(self):\n return os.path.join(self.config.get('path', os.getcwd()))", "def config_data_path(experiment_name: str) -> Path: # pragma: no cover\n return experiment_dir(experiment_name) / DATA_CONFIG", "def config_directory(self):\n\n return self.get_raw(\"config_directory\")", "def config_file(self):\n return join_path(self.prefix.etc.bohrium, \"config.ini\")", "def getConfigPath():\n\n global args, ConfigPathDefault\n\n if args.config_location:\n return args.config_location;\n return ConfigPathDefault;", "def get_config_file(self):\r\n return os.path.join(self.cloudletdir, \"applied_config\")", "def config_dir(self) -> Path:\n return self._config_dir", "def get_dataset_config_path(dataset_dir: str) -> str:\n return os.path.join(dataset_dir, DATASET_CONFIG_NAME)", "def config_path():\n dir_ = os.path.dirname(__file__)\n demo_dir = os.path.join(dir_, '../..')\n return os.path.join(demo_dir, 'mike_dev.ini')", "def config_path(self):\n if os.path.exists(self._config_path):\n if pyhocon.ConfigFactory.parse_file(self._config_path):\n return os.path.realpath(self._config_path)\n # TODO if string is url/git repo, download file locally first\n return None", "def get_config_path(config):\n section = config.sections()[0]\n return Path(config.get(section, \"path\")).expanduser().absolute()", "def _get_config_filepath(self):\n\t\tif self.configfilepath is None:\n\t\t\treturn os.path.join(self.workdir, \"config.txt\")\n\t\telse:\n\t\t\treturn self.configfilepath", "def getConfigPath():\n if sys.platform == 'linux':\n configpath = os.path.normpath(os.path.expanduser('~/.config/phobos'))\n elif sys.platform == 'darwin':\n configpath = os.path.normpath(os.path.expanduser('~/Library/Application Support/phobos'))\n elif sys.platform == 'win32':\n configpath = os.path.normpath(os.path.expanduser('~/AppData/Roaming/phobos'))\n else:\n configpath = 'ERROR: {0} not supported,'.format(sys.platform)\n return configpath", "def config_abex_path(experiment_name: str) -> Path: # pragma: no cover\n return experiment_dir(experiment_name) / ABEX_CONFIG", "def cfgPath( *args ):\n return '/'.join( [str( k ) for k in args] )", "def path_config(self):\n return HOMEASSISTANT_CONFIG.format(HASSIO_SHARE_INT)", "def get_user_data_path():\n current_directory = os.path.dirname(os.path.realpath(__file__))\n return os.path.join(current_directory, 'emergency_fund_info.json')", "def path_config_docker(self):\n return HOMEASSISTANT_CONFIG.format(HASSIO_SHARE_EXT)", "def _get_config_path():\n return os.path.join(os.path.expanduser('~'))", "def full_path(self, config_path=CONFIG_PATH):\n return os.path.join(config_path, self.filename)", "def peers_full_path(self, config_path=CONFIG_PATH):\n return os.path.join(config_path, self.peers_filename)", "def config_file_address() -> str:\n\n config_files = json_files_from_folder(\"config\")\n config_file = choose_config(config_files) # Choice a config file if there is more then 1 in config folder\n return config_file", "def configDir():\n return os.path.join(os.environ['HARNESSEDJOBSDIR'], 'config', getSiteName())", "def get_user_config_dir(options):\n return '/root/.spinnaker'", "def get_config_filepath():\n scs_installation_dirs = _path_utils.get_addon_installation_paths()\n\n # SEARCH FOR CONFIG...\n scs_config_file = ''\n for i, location in enumerate(scs_installation_dirs):\n test_path = os.path.join(location, 'config.txt')\n if os.path.isfile(test_path):\n scs_config_file = test_path\n break\n\n # IF NO CONFIG FILE, CREATE ONE...\n if scs_config_file == '':\n lprint(\"S Creating new 'config.txt' file:\\n\\t %r\", (os.path.join(scs_installation_dirs[0], 'config.txt'),))\n scs_config_file = new_config_file(os.path.join(scs_installation_dirs[0], 'config.txt'))\n\n # print('SCS Blender Tools Config File:\\n \"%s\"\\n' % os.path.join(scs_installation_dirs[0], 'config.txt'))\n return scs_config_file" ]
[ "0.72044575", "0.6985214", "0.6869317", "0.68644977", "0.67701125", "0.6757046", "0.67386943", "0.6731083", "0.66863096", "0.66134137", "0.6526606", "0.650571", "0.65033567", "0.65007305", "0.64274645", "0.6427291", "0.64223856", "0.64209414", "0.6402593", "0.6392462", "0.63479346", "0.6329114", "0.6321647", "0.6248544", "0.6234235", "0.62329954", "0.6181251", "0.6164528", "0.6162415", "0.61398965" ]
0.7370779
0
Calculate the area of each grid cell for a userprovided grid cell resolution. Area is in square meters, but resolution is given in decimal degrees.
def do_grid (resolution): # Calculations needs to be in radians lats = np.deg2rad(np.arange(-57,84, resolution)) r_sq = 6371000**2 n_lats = int(360./resolution) area = r_sq*np.ones(n_lats)[:, None]*np.deg2rad(resolution)*( np.sin(lats[1:]) - np.sin(lats[:-1])) return area.T
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def grid_area(ulon,ulat):\n R = 6371. * 1000. # radius of Earth in meters\n dlon = N.diff(ulon)\n dlat = N.diff(ulat)\n dx = N.outer(deg2rad * R * N.cos(deg2rad * ulat),dlon) # dx (meters)\n dy = 60. * 1852. * dlat # dy (meters)\n area = (dx[1:] + dx[:-1]) / 2. * dy[:,N.newaxis] # area of grid cells\n return area", "def area(self):\n num_rows = self.row_end - self.row_start\n num_cols = self.col_end - self.col_start\n area = num_rows*num_cols\n return area", "def area(self, by_spec=False):\n if not isinstance(self.ref_cell, Cell):\n return dict() if by_spec else 0\n if self.magnification is None:\n factor = self.columns * self.rows\n else:\n factor = self.columns * self.rows * self.magnification ** 2\n if by_spec:\n cell_area = self.ref_cell.area(True)\n for kk in cell_area.keys():\n cell_area[kk] *= factor\n return cell_area\n else:\n return self.ref_cell.area() * factor", "def area(self, by_spec=False):\n if not isinstance(self.ref_cell, Cell):\n return dict() if by_spec else 0\n if self.magnification is None:\n factor = self.columns * self.rows\n else:\n factor = self.columns * self.rows * self.magnification**2\n if by_spec:\n cell_area = self.ref_cell.area(True)\n for kk in cell_area.keys():\n cell_area[kk] *= factor\n return cell_area\n else:\n return self.ref_cell.area() * factor", "def area(self, by_spec=False):\n if by_spec:\n cell_area = {}\n for element in self.elements:\n element_area = element.area(True)\n for ll in element_area.keys():\n if ll in cell_area:\n cell_area[ll] += element_area[ll]\n else:\n cell_area[ll] = element_area[ll]\n else:\n cell_area = 0\n for element in self.elements:\n cell_area += element.area()\n return cell_area", "def grid_area(cs_grid=None,cs_res=None):\n # Calculate area on a cubed sphere\n if cs_res is None:\n cs_res = cs_grid['lon_b'].shape[-1] - 1\n elif cs_grid is None:\n cs_grid = cubedsphere.csgrid_GMAO(cs_res)\n elif cs_grid is not None and cs_res is not None:\n assert cs_res == cs_grid['lon_b'].shape[-1], 'Routine grid_area received inconsistent inputs' \n cs_area = np.zeros((6,cs_res,cs_res))\n cs_area[0,:,:] = face_area(cs_grid['lon_b'][0,:,:],cs_grid['lat_b'][0,:,:])\n for i_face in range(1,6):\n cs_area[i_face,:,:] = cs_area[0,:,:].copy()\n return cs_area", "def area_grid(self, dtype=np.float32):\n if self.rotation > 0:\n raise NotImplementedError(\n \"area_grid has not yet been implemented for rotated grids.\"\n )\n if self.crs.is_geographic:\n data = gis_utils.reggrid_area(self.ycoords.values, self.xcoords.values)\n elif self.crs.is_projected:\n ucf = rasterio.crs.CRS.from_user_input(self.crs).linear_units_factor[1]\n data = np.full(self.shape, abs(self.res[0] * self.res[0]) * ucf**2)\n da_area = xr.DataArray(\n data=data.astype(dtype), coords=self.coords, dims=self.dims\n )\n da_area.raster.set_nodata(0)\n da_area.raster.set_crs(self.crs)\n da_area.attrs.update(unit=\"m2\")\n return da_area.rename(\"area\")", "def _area(bounds):\n return (bounds[0, 1] - bounds[0, 0]) * (bounds[1, 1] - bounds[1, 0])", "def calculatearea(self):\r\n return self.width * self.height", "def area(self):\n if isinstance(self.crs, GeographicalCRS):\n major_axis = self.crs.ellipsoid.a\n minor_axis = self.crs.ellipsoid.b\n\n area = 0.0\n if major_axis == minor_axis: # Sphere\n for seg in self.segment_tuples:\n x1, y1 = seg[0]\n x2, y2 = seg[1]\n area += geodesy.spherical_area(major_axis, x1, y1, x2, y2)\n\n else:\n for seg in self.segment_tuples:\n x1, y1 = seg[0]\n x2, y2 = seg[1]\n area += geodesy.ellipsoidal_area(major_axis, minor_axis,\n x1, y1, x2, y2)\n\n else:\n # Cartesian coordinate systems\n x, y = self.coordinates\n x0 = np.min(x)\n area = (0.5*(x[0] + x[-1]) - x0) * (y[0] - y[-1])\n area += sum((0.5*(x[i+1]+x[i]) - x0) * (y[i+1] - y[i]) for i in range(len(x)-1))\n return abs(area) - sum(sub.area for sub in self.subs)", "def area(self):\n area = 0\n last = self._coordinates[-1]\n for c in self._coordinates:\n area += (last[0] * c[1] - last[1] * c[0])\n last = c\n return float(\"{:.2f}\".format(abs(area) * 0.5))", "def compute_grid_area(ds, check_total=True):\n \n radius_earth = 6.37122e6 # m, radius of Earth\n area_earth = 4.0 * np.pi * radius_earth**2 # area of earth [m^2]e\n \n def infer_lon_name(ds):\n lon_names = ['longitude', 'lon']\n for n in lon_names:\n if n in ds:\n return n\n raise ValueError('could not determine lon name') \n \n def infer_lat_name(ds):\n lat_names = ['latitude', 'lat']\n for n in lat_names:\n if n in ds:\n return n\n raise ValueError('could not determine lat name') \n\n lon_name = infer_lon_name(ds) \n lat_name = infer_lat_name(ds) \n \n weights = lat_weights_regular_grid(ds[lat_name])\n area = weights + 0.0 * ds[lon_name] # add 'lon' dimension\n area = (area_earth / area.sum(dim=(lat_name, lon_name))) * area\n \n if check_total:\n np.testing.assert_approx_equal(np.sum(area), area_earth)\n \n return xr.DataArray(area, dims=(lat_name, lon_name), attrs={'units': 'm^2', 'long_name': 'area'})", "def area(self):\n return numpy.prod(\n numpy.meshgrid(*self.binwidths, indexing='ij'), axis=0)", "def total_area(self):\n return numpy.prod([r[1] - r[0] for r in self.range_])", "def area(self, by_spec=False):\n if not isinstance(self.ref_cell, Cell):\n return dict() if by_spec else 0\n if self.magnification is None:\n return self.ref_cell.area(by_spec)\n else:\n if by_spec:\n factor = self.magnification ** 2\n cell_area = self.ref_cell.area(True)\n for kk in cell_area.keys():\n cell_area[kk] *= factor\n return cell_area\n else:\n return self.ref_cell.area() * self.magnification ** 2", "def area(self, by_spec=False):\n if not isinstance(self.ref_cell, Cell):\n return dict() if by_spec else 0\n if self.magnification is None:\n return self.ref_cell.area(by_spec)\n else:\n if by_spec:\n factor = self.magnification**2\n cell_area = self.ref_cell.area(True)\n for kk in cell_area.keys():\n cell_area[kk] *= factor\n return cell_area\n else:\n return self.ref_cell.area() * self.magnification**2", "def area(self, by_spec=False):\n if by_spec:\n cell_area = {}\n for element in itertools.chain(self.polygons, self.paths, self.references):\n element_area = element.area(True)\n for ll in element_area.keys():\n if ll in cell_area:\n cell_area[ll] += element_area[ll]\n else:\n cell_area[ll] = element_area[ll]\n else:\n cell_area = 0\n for element in itertools.chain(self.polygons, self.paths, self.references):\n cell_area += element.area()\n return cell_area", "def compute_surface_area(self):\n return np.sum(self._find_triangle_areas())", "def area(width, height):\n return width * height", "def area(self):\n area = self.__width * self.__height\n return area", "def calculate_area(building, pixel_size=1):\n return len(building.points) * (pixel_size**2)", "def area(self):\n return(self.__width * self.__height)", "def area(self):\n area = 0\n\n for room in self.rooms:\n area += room.polygon.area()\n\n for wall in self.walls:\n area += wall.polygon.area()\n\n return area", "def area(self):\n return (self.width * self.height)", "def area(self):\n\t\treturn self.width * self.height", "def compute_area(boxes):\n area = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])\n\n return area", "def area(self):\n area = self.__size * self.__size\n return(area)", "def area(\n self):\n pi = numpy.pi\n area0 = 4.0 * pi / 8.0\n areadiv = 4.0 ** self.depth\n area = area0 / areadiv * (180.0 / pi) ** 2\n return area", "def area(self):\n area = self.__size * self.__size\n return area", "def area(self):\n return (self.__width * self.__height)" ]
[ "0.6939898", "0.6827993", "0.6783789", "0.6774796", "0.6750549", "0.67404014", "0.6659824", "0.6657721", "0.66374826", "0.66325015", "0.6627994", "0.6614619", "0.65675646", "0.653298", "0.6518739", "0.64830256", "0.6461933", "0.6324823", "0.63124484", "0.6302911", "0.6300791", "0.62962526", "0.6277283", "0.62381583", "0.62356514", "0.6232399", "0.6226022", "0.6225922", "0.6208417", "0.620803" ]
0.743558
0
Integrate dy/dt = rhs_func from t=0 to t=num_days with y(0) = y0. Returns a list of state vectors, one for each day.
def integrate(rhs_func, y0, num_days, iterations_per_day): out = [y0.clone()] y = y0.clone() t = 0 dt = 1 / iterations_per_day for day in range(num_days): for it in range(iterations_per_day): y += dt * rhs_func(y, t) t += dt # y, t = RK4_step(rhs_func,y,t,dt) out.append(y.clone()) return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def integrateTransients(self, numdays=500):\n tend = numdays * 24.0\n\n r = sp.integrate.solve_ivp(self.derv, (0, tend), [0.7, 0.0, 0.0], t_eval=[tend], method='Radau')\n results_trans = np.transpose(r.y)\n\n return (results_trans[-1, :])", "def integrate_explicit(self, y, derivative, min_dt=0.05):\n\n n_component = len(y)\n n_time = len(self.target_times)\n self.soln_array = numpy.zeros((n_time, n_component))\n\n time = self.target_times[0]\n self.soln_array[0, :] = y\n for i_time, new_time in enumerate(self.target_times):\n if i_time == 0:\n continue\n while time < new_time:\n f = derivative(y, time)\n old_time = time\n time = time + min_dt\n dt = min_dt\n if time > new_time:\n dt = new_time - old_time\n time = new_time\n for i in range(n_component):\n y[i] = y[i] + dt * f[i]\n # hack to avoid errors due to time-step\n if y[i] < 0.:\n y[i] = 0.\n self.soln_array[i_time, :] = y", "def solve(self,n_days = 100,init_state = None,start_date = None,d = 1):\n\n # If init state is not given we use I0\n if init_state is None:\n assert self.start_state is not None\n init_state = int(self.I0)\n\n # Transform init_state into state object\n init_state = self.make_state(init_state)\n\n # Safety checks\n tol = 2\n assert hasattr(self,\"compartments\")\n assert len(init_state) == len(self.compartments)\n # assert hasattr(self,\"N\")\n # assert np.abs(init_state.sum() - self.N) < tol,f\"Init state {init_state.values} does not sum to total population {self.N}\"\n assert n_days > self.offset\n \n # Grid of time points (in days)\n # Take offset into account\n offset = self.offset\n t = np.linspace(0, n_days - offset, (n_days - offset +1)*d)\n\n # Integrate the model equations over the time grid, t.\n states = odeint(self.derivative, init_state, t)\n\n # Converts to DataFrame and then to custom object\n states = pd.DataFrame(states,columns = self.compartments)\n if d > 1: \n states.index = states.index / d\n\n # Add offset into account\n if offset > 0:\n states.index = range(offset,n_days + 1)\n states = states.reindex(range(0,n_days + 1))\n states = states.fillna(method = \"bfill\")\n elif offset < 0:\n states.index = [x + offset for x in states.index]\n\n # Convert to custom object\n states = CompartmentStates(states)\n states.build_aggregates(self.states)\n\n # If start date is given, convert to dates\n if self.start_date is not None:\n start_date = self.start_date\n if start_date is not None:\n index = pd.to_datetime(start_date) + pd.TimedeltaIndex(states.index,unit = \"D\")\n states.index = index\n \n return states", "def integrate(self, y0, t0, tend, N=None, abstol=1e-5, reltol=1e-5):\n from ode45 import ode45\n vfun = lambda t,y: self.rhs.Applyf(y)\n vslot = (t0, tend)\n vinit = y0\n t,Y,stats = ode45(vfun,vslot,vinit,abstol=abstol,reltol=reltol,stats=True)\n self.stats = stats\n return t,Y", "def integrate(self, residuals, y0, ydot0, t_eval, events=None):\n raise NotImplementedError", "def compute_integrals(self):\n\n integrals = self.dat.data.reshape(-1, self.nvdofs).T.dot(self.cell_volumes)\n integrals_lift = Function(self._function_space)\n integrals_lift.dat.data[:] = np.tile(integrals,(self.nhdofs,))\n\n return integrals_lift", "def deterministic_integration(self):\n y0 = self.initial_conditions\n t = self.t\n sol = odeint(self.a, y0, t)\n return sol", "def solve_integral(integrand, y):\n solnarr = np.empty(len(y))\n for i in range(len(y)):\n yy = y[i]\n soln = quad(integrand, 0, np.inf, args=(yy))\n solnarr[i] = soln[0]\n return solnarr", "def integrate(self, x0, t0, tend, N=100):\n h = np.double(tend-t0)/N\n t = np.zeros((N+1,1)); t[0]=t0\n x = x0.copy(); y = [x0.copy()]\n for i in xrange(N):\n g = self.rhs.Applyg(x) # evaluate vector g(x)\n A = lambda v: self.rhs.ApplyDf(x,v) # ----------- TODO: test this after implementing procedural A*x support\n x = self.matexp(A,x,h) + h*self.phi1(A,g,h)\n y.append(x)\n t[i+1] = t[i]+h\n return t,np.array(y)", "def monkey_ode_int(self, function, y, time, args, Dfun, tfirst, **kwargs):\n assert all(y == self._state), 'unexpected state before integration'\n assert time == [0, self._tau], 'unexpected time steps for integration'\n assert args == (self.args,), 'unexpected arguments'\n assert tfirst\n return np.array([[0, 2], self._state])", "def step_1(f: Callable[..., float], x: float, y: np.array, params: Tuple,\\\n h: float) -> np.array:\n\n # Initialize the output vector.\n n = len(y)\n y_int = np.zeros(n)\n\n # Find dym/dx using the given function, then use it to compute dym-1/dx.\n y_int[0] = f(x, y, *params) * h\n\n # Starting with dym-1/dx, compute the other values down to y/dx.\n for i in range(1, n):\n y_int[i] = y[n-i] * h\n\n # Reverse the output vector so y/dx is on top.\n y_int = np.flipud(y_int)\n\n return y_int", "def scipy_integrate(func, X0, args, IRK_times, N=0):\n V0 = 0.7 # we fix the volatge initial condition\n t_span = [0.0, args.h * N]\n t_sim = np.array([t_span[0]])\n for k in range(1, N +1):\n temp = (k - 1) * args.h + IRK_times * args.h\n t_sim = np.vstack((t_sim, temp))\n t_next = np.array([k * args.h])\n t_sim = np.vstack((t_sim, t_next))\n del temp, t_next\n sol = solve_ivp(func, t_span, [X0[0], X0[1], X0[2], X0[3], V0], method=args.method, t_eval=t_sim.reshape(-1,))\n y_test = sol.y\n return t_sim[1:,:], y_test[:, 1:]", "def run_simulation(self, init_x: float, init_y: float, eval_times: np.ndarray) -> np.array:\n if isinstance(eval_times, np.ndarray):\n pass\n elif isinstance(eval_times, (list, tuple)):\n eval_times = np.array(eval_times, dtype=np.float64)\n else:\n raise TypeError('eval_times has to be a 1D numpy array, or a list/tuple of type that'\n ' can be cast to float')\n assert eval_times.ndim == 1\n init_state = np.array((init_x, init_y))\n sol = scipy.integrate.solve_ivp(self.system_dynamics_func, t_span=(0, eval_times[-1]),\n y0=init_state, vectorized=True, t_eval=eval_times, rtol=self.rtol)\n return sol.y.T", "def scipy_integrate(func, X0, args, N=0):\n V0 = 0.7 # we fix the volatge initial condition\n t_span = [0.0, args.h * N]\n t_sim = np.linspace(0.0, args.h * N, N+1)\n sol = solve_ivp(func, t_span, [X0[0], X0[1], X0[2], X0[3], V0], method=args.method, t_eval=t_sim.reshape(-1,))\n y_test = sol.y\n t_sim = t_sim.reshape(-1, 1)\n return t_sim[1:,:], y_test[:, 1:]", "def _run(self, step_n, y0_dict, population):\n tstart, dt, tend = 0, 1, step_n\n variables = self._model.VARIABLES[:]\n initials = [y0_dict[var] for var in variables]\n sol = solve_ivp(\n fun=self._model(population=population, **self._param_dict),\n t_span=[tstart, tend],\n y0=np.array(initials, dtype=np.int64),\n t_eval=np.arange(tstart, tend + dt, dt),\n dense_output=False\n )\n y_df = pd.DataFrame(data=sol[\"y\"].T.copy(), columns=variables)\n return y_df.round().astype(np.int64)", "def forecast(days):\n transition = np.array([[.7, .6], [.3, .4]])\n state = 0\n record = []\n for day in xrange(days):\n state = np.random.binomial(1, transition[1, state])\n record.append(state)\n return record", "def dx(self, t):\n \n if not self.sys.a <= t <= self.sys.b:\n self.log_warning(\"Time point 't' has to be in (a,b)\")\n arr = None\n else:\n arr = np.array([self.dx_fnc[xx](t) for xx in self.sys.states])\n \n return arr", "def integrateModelData(self, timespan, initial):\n dt = 0.01\n self.ts = np.arange(timespan[0], timespan[1], dt)\n r = sp.integrate.solve_ivp(self.derv, (timespan[0], timespan[-1]), initial, t_eval=self.ts, method='Radau')\n self.results = np.transpose(r.y)", "def integrate_trapezoid_col(fxdx_col, dx_col, init_val):\n assert len(fxdx_col) == len(dx_col)\n fxdx_l = fxdx_col.tolist()\n dx_l = dx_col.tolist()\n prev_val = init_val\n y = [init_val]*len(dx_col)\n prev_fx = 0\n for i in xrange(len(dx_col)):\n y_val = prev_val + float(dx_l[i]) * (fxdx_l[i] + prev_fx) / 2\n y[i] = y_val\n prev_val = y_val\n prev_fx = fxdx_l[i]\n \n return np.array(y)", "def simulate_scipy_dt(\n self,\n y0: phase_space.PhaseSpace,\n t0: utils.FloatArray,\n dt: utils.FloatArray,\n num_steps: int,\n params: utils.Params,\n ivp_kwargs=None,\n **kwargs: Any\n ) -> phase_space.PhaseSpace:\n t_eval = utils.dt_to_t_eval(t0, dt, num_steps)\n return self.simulate_scipy(y0, t0, t_eval, params, ivp_kwargs, **kwargs)", "def integrate_discrete_time_stochastic(self, y):\n self.compartments = self.convert_list_to_compartments(y)\n for label in self.compartments:\n self.compartments[label] = int(self.compartments[label])\n\n n_compartment = len(y)\n n_time = len(self.target_times)\n self.soln_array = numpy.zeros((n_time, n_compartment))\n\n time = self.target_times[0]\n self.soln_array[0, :] = y\n\n for i_time, new_time in enumerate(self.target_times):\n\n if i_time == 0:\n continue\n\n dt = new_time - self.target_times[i_time - 1]\n\n self.time = time\n self.calculate_vars()\n self.calculate_events()\n\n for event in self.events:\n from_label, to_label, rate = event\n\n mean = rate * dt\n delta_population = numpy.random.poisson(mean, 1)[0]\n\n if from_label and to_label:\n if delta_population > self.compartments[from_label]:\n delta_population = self.compartments[from_label]\n self.compartments[from_label] -= delta_population\n self.compartments[to_label] += delta_population\n elif to_label is None:\n # death\n if delta_population > self.compartments[from_label]:\n delta_population = self.compartments[from_label]\n self.compartments[from_label] -= delta_population\n elif from_label is None:\n # birth\n self.compartments[to_label] += delta_population\n\n self.checks()\n\n time += dt\n\n if i_time < n_time:\n y = self.convert_compartments_to_list(self.compartments)\n self.soln_array[i_time, :] = y", "def accumulate(self, days: int, dt: float, plot=True):\r\n self.floatCheck([days, dt])\r\n self.negValCheck([days, dt])\r\n t = np.linspace(0, days, int(days / dt) + 1)\r\n S, E, I, R = self._simulate(days, dt)\r\n # create a numpy array that will hold all of the values\r\n cases = np.zeros(len(I))\r\n # add up the total infected and removed at given time to account for everyone with the virus\r\n for i in range(len(I)):\r\n cases[i] = I[i] + R[i]\r\n # create a dictionary that holds the data for easy conversion to dataframe\r\n data1 = {\r\n \"Days\": t,\r\n \"Susceptible\": S,\r\n \"Exposed\": E,\r\n \"Infected\": I,\r\n \"Removed\": R,\r\n \"Total Cases\": cases,\r\n }\r\n # create the column labels\r\n labels = [\r\n \"Days\",\r\n \"Susceptible\",\r\n \"Exposed\",\r\n \"Infected\",\r\n \"Removed\",\r\n \"Total Cases\",\r\n ]\r\n # convert to dataframe\r\n df = pd.DataFrame(data=data1, columns=labels)\r\n if plot:\r\n # do some plotting\r\n df.plot(x=\"Days\", y=[\"Total Cases\"])\r\n plt.xlabel(\"Days\")\r\n plt.ylabel(\"Total Cases\")\r\n plt.show()\r\n # return dataframe\r\n return df", "def integrate(self, x, dx):\n return self.State.integrate(x, dx)", "def solve_differential_equation(f_derivatives, initial, oldest=120):\n bunch = solve_ivp(f_derivatives, t_span=(0, oldest), y0=initial, vectorized=True, dense_output=True)\n return bunch.sol", "def one_step(func, y0, rng):\n return scipy.integrate.odeint(func, y0, rng)[-1]", "def integrate_continuous_time_stochastic(self, y):\n\n self.compartments = self.convert_list_to_compartments(y)\n for label in self.compartments:\n self.compartments[label] = int(self.compartments[label])\n\n n_compartment = len(y)\n n_time = len(self.target_times)\n self.soln_array = numpy.zeros((n_time, n_compartment))\n\n time = self.target_times[0]\n self.soln_array[0, :] = y\n n_sample = 0\n for i_time, new_time in enumerate(self.target_times):\n\n if i_time == 0:\n continue\n\n while time < new_time:\n self.time = time\n self.calculate_vars()\n self.calculate_events()\n\n if len(self.events) == 0:\n # equilibrium reached, no more changes, so go\n # to end of interval\n dt = new_time - time\n else:\n event_rates = [event[2] for event in self.events]\n i_event = pick_event(event_rates)\n\n total_rate = sum(event_rates)\n dt = old_div(-math.log(random.random()), total_rate)\n\n from_label, to_label, rate = self.events[i_event]\n if from_label and to_label:\n self.compartments[from_label] -= 1\n self.compartments[to_label] += 1\n elif to_label is None:\n # death\n self.compartments[from_label] -= 1\n elif from_label is None:\n # birth\n self.compartments[to_label] += 1\n\n self.checks()\n time += dt\n n_sample += 1\n\n if i_time < n_time:\n y = self.convert_compartments_to_list(self.compartments)\n self.soln_array[i_time, :] = y", "def rhs(y, t, l, m, g):\n # Unpack the states so you can use the variable names in the\n # sympy.physics.mechanics equations\n q1 = y[0]\n q2 = y[1]\n u1 = y[2]\n u2 = y[3]\n # or you can make use of python's tuple unpacking for a one liner\n # q1, q2, u1, u2 = y\n\n # Initialize a vector for the derivatives.\n dydt = zeros((len(y)))\n\n # Compute the derivatives, these are pasted in from the\n # sympy.physics.mechanics results.\n dydt[0] = u1\n dydt[1] = u2\n dydt[2] = (-g*sin(q1)*sin(q2)**2 + 2*g*sin(q1) -\n g*sin(q2)*cos(q1)*cos(q2) + 2*l*u1**2*sin(q1)*cos(q1)*cos(q2)**2 -\n l*u1**2*sin(q1)*cos(q1) - 2*l*u1**2*sin(q2)*cos(q1)**2*cos(q2) +\n l*u1**2*sin(q2)*cos(q2) + l*u2**2*sin(q1)*cos(q2) -\n l*u2**2*sin(q2)*cos(q1))/(l*(sin(q1)**2*sin(q2)**2 +\n 2*sin(q1)*sin(q2)*cos(q1)*cos(q2) + cos(q1)**2*cos(q2)**2 - 2))\n dydt[3] = (-sin(q1)*sin(q2)/2 - cos(q1)*cos(q2)/2)*(2*g*l*m*sin(q1) -\n l**2*m*(-sin(q1)*cos(q2) +\n sin(q2)*cos(q1))*u2**2)/(l**2*m*(sin(q1)*sin(q2)/2 +\n cos(q1)*cos(q2)/2)*(sin(q1)*sin(q2) + cos(q1)*cos(q2)) -\n l**2*m) + (g*l*m*sin(q2) - l**2*m*(sin(q1)*cos(q2) -\n sin(q2)*cos(q1))*u1**2)/(l**2*m*(sin(q1)*sin(q2)/2 +\n cos(q1)*cos(q2)/2)*(sin(q1)*sin(q2) + cos(q1)*cos(q2))\n - l**2*m)\n\n # Return the derivatives.\n return dydt", "def solve(self):\n \n # getting the time values\n self.days_list = np.linspace(self.tbeg,self.tend,self.npoints)\n\n # calling the odeint method to solve the diff. equations\n self.x = odeint(self.diff_eq,self.x0,self.days_list,args = (self.par,))\n '''\n Its important to note that (par_est,) is the way to define a tuple\n with just ode element. When we put (par_est), the parenteses won't\n indicate a typle\n '''\n \n #setting the variables\n self.confirmed_list = self.x[:,1] + self.x[:,2] + self.x[:,3]\n self.recovered_list = self.x[:,2]\n self.death_list = self.x[:,3]", "def solve_model():\n from scipy.integrate import ode\n # Initialise constants and state variables\n (init_states, constants) = initConsts()\n\n # Set timespan to solve over\n voi = linspace(0, 100, 5000)\n\n # Construct ODE object to solve\n r = ode(computeRates)\n r.set_integrator('vode', method='bdf', atol=1e-06, rtol=1e-06, max_step=1)\n r.set_initial_value(init_states, voi[0])\n r.set_f_params(constants)\n\n # Solve model\n states = array([[0.0] * len(voi)] * sizeStates)\n states[:,0] = init_states\n for (i,t) in enumerate(voi[1:]):\n if r.successful():\n r.integrate(t)\n states[:,i+1] = r.y\n else:\n break\n\n # Compute algebraic variables\n algebraic = computeAlgebraic(constants, states, voi)\n return (voi, states, algebraic)", "def init_state(y_k, y_k_prev, dt):\n return np.array([y_k[0], y_k[2] * np.cos(np.deg2rad(y_k[3])),\n y_k[1], y_k[2] * np.sin(np.deg2rad(y_k[3]))])" ]
[ "0.650054", "0.5741845", "0.5683879", "0.54373115", "0.54229563", "0.53735167", "0.5365727", "0.5364101", "0.53461295", "0.5233571", "0.5200717", "0.518434", "0.5144111", "0.513495", "0.51227826", "0.5083833", "0.5077746", "0.5073881", "0.5071325", "0.504791", "0.5043892", "0.50426126", "0.5038165", "0.5030269", "0.5017669", "0.49971277", "0.4994388", "0.49873644", "0.49743128", "0.4956439" ]
0.78576934
0
This function takes a SOM or SO and goes through the individual spectra adjusting the bin contents by either multiplying or dividing by the bin widths or the bin centers taken from the individual spectra.
def fix_bin_contents(obj, **kwargs): import hlr_utils # set up for working through data (result, res_descr) = hlr_utils.empty_result(obj) o_descr = hlr_utils.get_descr(obj) # Setup keyword arguments try: scale = kwargs["scale"] except KeyError: scale = False try: width = kwargs["width"] except KeyError: width = True try: units = kwargs["units"] except KeyError: units = "microsecond" # Primary axis for transformation. If a SO is passed, the function, will # assume the axis for transformation is at the 0 position if o_descr == "SOM": axis_pos = hlr_utils.one_d_units(obj, units) else: axis_pos = 0 result = hlr_utils.copy_som_attr(result, res_descr, obj, o_descr) # iterate through the values import array_manip import utils for i in xrange(hlr_utils.get_length(obj)): val = hlr_utils.get_value(obj, i, o_descr, "y") err2 = hlr_utils.get_err2(obj, i, o_descr, "y") axis = hlr_utils.get_value(obj, i, o_descr, "x", axis_pos) axis_err2 = hlr_utils.get_err2(obj, i, o_descr, "x", axis_pos) map_so = hlr_utils.get_map_so(obj, None, i) if width: (bin_const, bin_const_err2) = utils.calc_bin_widths(axis, axis_err2) else: (bin_const, bin_const_err2) = utils.calc_bin_centers(axis, axis_err2) if scale: value = array_manip.mult_ncerr(val, err2, bin_const, bin_const_err2) else: value = array_manip.div_ncerr(val, err2, bin_const, bin_const_err2) hlr_utils.result_insert(result, res_descr, value, map_so, "y") return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def binspecdat( wavelength, flux, fluxerr=[], binwidth=10, sigclip=0, sumerrs=False,\n wstart=0, wend=0 ):\n\n w,f = wavelength, flux\n wbinned, fbinned = [], []\n wbin,fbin,dfbin = np.array([]), np.array([]), np.array([])\n dw, df = [], []\n if wstart : istart = np.where( w>wstart )[0][0]\n else : istart = 0\n if wend : iend = np.where( w<wend )[0][-1]\n else : iend = len(w)\n w0 = w[istart]\n for i in range(istart,iend):\n fullbin = False\n if wend and w[i]>wend : break\n if w[i]>w0+binwidth :\n # determine the mean value in this bin\n w0 = w[i]\n igoodval = []\n if sigclip :\n # use sigma clipping to reject outliers\n igoodval = isigclip( fbin, sigclip )\n if len(igoodval) :\n wbinval = np.mean( wbin[igoodval] )\n fbinval = np.mean( fbin[igoodval] )\n dwbinval = (wbin[igoodval].max() - wbin[igoodval].min())/2.\n #dwbinval = (wbin.max() - wbin.min())/2.\n if sumerrs :\n # flux uncertainty is the quadratic sum of the mean flux error\n # and the error of the mean\n dfbinval1 = np.std( fbin[igoodval] ) / np.sqrt(len(igoodval)-2)\n dfbinval2 = np.mean( dfbin[igoodval] ) / np.sqrt(len(igoodval)-2)\n dfbinval = np.sqrt( dfbinval1**2 + dfbinval2**2 )\n else :\n # flux uncertainty is the std error of the mean\n dfbinval = np.std( fbin[igoodval] ) / np.sqrt(len(igoodval)-2)\n\n fullbin = True\n # note: if the binning is not successful, we continue building the bin\n else :\n # use a straight median\n wbinval = np.median( wbin )\n fbinval = np.median( fbin )\n dwbinval = (wbin[-1]-wbin[0])/2.\n if sumerrs :\n # flux uncertainty is the quadratic sum of the mean flux error\n # and the error of the mean\n dfbinval1 = np.std( fbin )/np.sqrt(len(fbin)-2)\n dfbinval2 = np.mean( dfbin )\n dfbinval = np.sqrt( dfbinval1**2 + dfbinval2**2 )\n else :\n # flux uncertainty is the std error of the mean\n dfbinval = np.std( fbin ) / np.sqrt(max(1,len(fbin)))\n fullbin = True\n\n if fullbin :\n wbinned.append( wbinval )\n fbinned.append( fbinval )\n dw.append( dwbinval )\n df.append( dfbinval )\n\n # start a new bin\n wbin,fbin,dfbin = np.array([]), np.array([]), np.array([])\n\n # add a new data point to the bin\n wbin = np.append( wbin, w[i] )\n fbin = np.append( fbin, f[i] )\n if len(fluxerr):\n dfbin = np.append( dfbin, fluxerr[i] )\n else : dfbin = np.append( dfbin, 0 )\n\n return( np.array( wbinned ), np.array(dw), np.array(fbinned), np.array(df) )", "def binning(S, bands):\n B = np.zeros((S.shape[0], len(bands)), dtype=S.dtype)\n for i, b in enumerate(bands):\n B[:, i] = np.mean(S[:, b[0] : b[1]], axis=1)\n\n return B", "def binspecdattomatch( wavelength, flux, wavetomatch, fluxerr=[], sigclip=0,\n sumerrs=False ):\n w,f = wavelength, flux\n if len(fluxerr):\n df = fluxerr\n else :\n df=np.zeros(len(f))\n\n wavetomatch = np.asarray(wavetomatch)\n wavetomatch_halfbinwidth = np.diff(wavetomatch)/2.\n lastbinlow = wavetomatch[-1] - wavetomatch_halfbinwidth[-1]\n lastbinhigh = wavetomatch[-1] + wavetomatch_halfbinwidth[-1]\n wavebinedges = np.append( wavetomatch[:-1]-wavetomatch_halfbinwidth,\n np.array([lastbinlow,lastbinhigh]))\n\n wbinned, dwbinned, fbinned, dfbinned = [], [], [], []\n for i in range(len(wavebinedges)-1):\n wavebinmin=wavebinedges[i]\n wavebinmax=wavebinedges[i+1]\n iinbin = np.where((w>=wavebinmin)&(w<wavebinmax))\n\n winbin = w[iinbin]\n finbin = f[iinbin]\n dfinbin = df[iinbin]\n\n if sigclip :\n # use sigma clipping to reject outliers\n igoodval = isigclip( finbin, sigclip )\n if len(igoodval) :\n wbinval = np.mean( winbin[igoodval] )\n fbinval = np.mean( finbin[igoodval] )\n dwbinval = (winbin[igoodval].max() - winbin[igoodval].min())/2.\n #dwbinval = (wbin.max() - wbin.min())/2.\n if sumerrs :\n # flux uncertainty is the quadratic sum of the mean flux error\n # and the error of the mean\n dfbinval1 = np.std( finbin[igoodval] ) / np.sqrt(len(igoodval)-2)\n dfbinval2 = np.mean( dfinbin[igoodval] ) / np.sqrt(len(igoodval)-2)\n dfbinval = np.sqrt( dfbinval1**2 + dfbinval2**2 )\n else :\n # flux uncertainty is the std error of the mean\n dfbinval = np.std( finbin[igoodval] ) / np.sqrt(len(igoodval)-2)\n else :\n # use a straight median\n wbinval = np.median( winbin )\n fbinval = np.median( finbin )\n dwbinval = (winbin[-1]-winbin[0])/2.\n if sumerrs :\n # flux uncertainty is the quadratic sum of the mean flux error\n # and the error of the mean\n dfbinval1 = np.std( finbin )/np.sqrt(len(finbin)-2)\n dfbinval2 = np.mean( dfinbin )\n dfbinval = np.sqrt( dfbinval1**2 + dfbinval2**2 )\n else :\n # flux uncertainty is the std error of the mean\n dfbinval = np.std( finbin ) / np.sqrt(max(1,len(finbin)))\n\n wbinned.append( wbinval )\n fbinned.append( fbinval )\n dwbinned.append( dwbinval )\n dfbinned.append( dfbinval )\n\n return( np.array( wbinned ), np.array(dwbinned), np.array(fbinned), np.array(dfbinned) )", "def specmod(self, dmbin, tbin, bgwindow=4):\n\n# smarr = n.zeros(len(self.dataph)) # uncomment to do specmod lightcurve\n# for int in range(len(self.dataph)-bgwindow):\n diff = self.tracksub(dmbin, tbin, bgwindow=bgwindow)\n bfspec = diff.mean(axis=0).real # should be ok for multipol data...\n sm = n.sqrt( ((bfspec**2).mean() - bfspec.mean()**2) / bfspec.mean()**2 )\n\n return sm", "def obj_s2n_wave(s2n_dict, wv_bins, flux_bins, otype, outfile=None, ax=None):\n logs = get_logger()\n nwv = wv_bins.size\n nfx = flux_bins.size\n s2n_sum = np.zeros((nwv-1,nfx-1))\n s2n_N = np.zeros((nwv-1,nfx-1)).astype(int)\n # Loop on exposures+wedges (can do just once if these are identical for each)\n for jj, wave in enumerate(s2n_dict['waves']):\n w_i = np.digitize(wave, wv_bins) - 1\n m_i = np.digitize(s2n_dict['fluxes'][jj], flux_bins) - 1\n mmm = []\n for ll in range(nfx-1): # Only need to do once\n mmm.append(m_i == ll)\n #\n for kk in range(nwv-1):\n all_s2n = s2n_dict['s2n'][jj][:,w_i==kk]\n for ll in range(nfx-1):\n if np.any(mmm[ll]):\n s2n_sum[kk, ll] += np.sum(all_s2n[mmm[ll],:])\n s2n_N[kk, ll] += np.sum(mmm[ll]) * all_s2n.shape[1]\n\n sty_otype = get_sty_otype()\n\n # Plot\n if ax is None:\n fig = plt.figure(figsize=(6, 6.0))\n ax= plt.gca()\n # Title\n fig.suptitle('{:s}: Summary'.format(sty_otype[otype]['lbl']),\n fontsize='large')\n\n # Plot em up\n wv_cen = (wv_bins + np.roll(wv_bins,-1))/2.\n lstys = ['-', '--', '-.', ':', (0, (3, 1, 1, 1))]\n mxy = 1e-9\n for ss in range(nfx-1):\n if np.sum(s2n_N[:,ss]) == 0:\n continue\n lbl = 'MAG = [{:0.1f},{:0.1f}]'.format(flux_bins[ss], flux_bins[ss+1])\n ax.plot(wv_cen[:-1], s2n_sum[:,ss]/s2n_N[:,ss], linestyle=lstys[ss],\n label=lbl, color=sty_otype[otype]['color'])\n mxy = max(mxy, np.max(s2n_sum[:,ss]/s2n_N[:,ss]))\n\n ax.set_xlabel('Wavelength (Ang)')\n #ax.set_xlim(-ylim, ylim)\n ax.set_ylabel('Mean S/N per Ang in bins of 20A')\n ax.set_yscale(\"log\", nonposy='clip')\n ax.set_ylim(0.1, mxy*1.1)\n\n legend = plt.legend(loc='upper left', scatterpoints=1, borderpad=0.3,\n handletextpad=0.3, fontsize='medium', numpoints=1)\n\n # Finish\n plt.tight_layout(pad=0.2,h_pad=0.2,w_pad=0.3)\n plt.subplots_adjust(top=0.92)\n if outfile is not None:\n plt.savefig(outfile, dpi=600)\n print(\"Wrote: {:s}\".format(outfile))", "def specmod(self, dmbin, tbin, bgwindow=4):\n\n# smarr = n.zeros(len(self.dataph)) # uncomment to do specmod lightcurve\n# for int in range(len(self.dataph)-bgwindow):\n bfspec = self.dedisperse(dmbin)[tbin].mean(axis=0).real\n sm = n.sqrt( ((bfspec**2).mean() - bfspec.mean()**2) / bfspec.mean()**2 )\n\n return sm", "def wav2(bins, weis):\n\n mean = np.average(bins, weights=weis)\n var = np.average((bins-mean)**2, weights=weis)\n ##\n sum1 = weis.sum()\n sum2 = (weis**2).sum()\n rms = np.sqrt(sum1*var/(sum1-sum2/sum1))\n\n return mean, rms", "def calbands( band = 0, tmo = 30 ) :\n optimizeThresholds(band,tmo)\n flattenPhases(band,tmo)\n calibrateSpectra(band=band,tmo=tmo)", "def bin_spectra(l, cl, binning_file, lmax, type, spectra=None, mbb_inv=None, binned_mcm=True):\n\n bin_lo, bin_hi, lb, bin_size = pspy_utils.read_binning_file(binning_file, lmax)\n n_bins = len(bin_hi)\n\n # the alm2cl return cl starting at l = 0, we use spectra from l = 2\n # this is due in particular to the fact that the mcm is computed only for l>=2\n \n l = np.arange(2, lmax)\n if spectra is None: cl = cl[l]\n else: cl = {f: cl[f][l] for f in spectra}\n \n if type == \"Dl\": fac = (l * (l + 1) / (2 * np.pi))\n elif type == \"Cl\": fac = l * 0 + 1\n\n # we have the option to deconvolve the l-by-l mode coupling matrix\n if (mbb_inv is not None) & (binned_mcm == False):\n l, cl = deconvolve_mode_coupling_matrix(l, cl, mbb_inv, spectra)\n \n # Now the binning part\n if spectra is None:\n ps = np.zeros(n_bins)\n for ibin in range(n_bins):\n loc = np.where((l >= bin_lo[ibin]) & (l <= bin_hi[ibin]))\n ps[ibin] = (cl[loc] * fac[loc]).mean()\n else:\n vec = []\n for f in spectra:\n binned_power = np.zeros(n_bins)\n for ibin in range(n_bins):\n loc = np.where((l >= bin_lo[ibin]) & (l <= bin_hi[ibin]))\n binned_power[ibin] = (cl[f][loc] * fac[loc]).mean()\n vec = np.append(vec, binned_power)\n ps = vec2spec_dict(n_bins, vec, spectra)\n\n # we have the option to deconvolve the binned mode coupling matrix\n if (mbb_inv is not None) & (binned_mcm == True):\n lb, ps = deconvolve_mode_coupling_matrix(lb, ps, mbb_inv, spectra)\n return lb, ps", "def binned(spectra, freqs, kbins):\n\n # Work also with a single spectrum\n if len(spectra.shape) == 1:\n spectra = spectra.reshape(-1, *spectra.shape)\n\n digitized = np.digitize(freqs, kbins)\n return np.array([\n spectra[:, digitized == i].mean() for i in range(1, len(kbins))\n ])", "def set_tone_bins(self, bins, nsamp, amps=None, load=True, normfact=None, phases=None, preset_norm=True):\n if self.BYTES_PER_SAMPLE * nsamp > self.MEMORY_SIZE_BYTES:\n message = \"Requested tone size ({:d} bytes) exceeds available memory ({:d} bytes)\"\n raise ValueError(message.format(self.BYTES_PER_SAMPLE * nsamp, self.MEMORY_SIZE_BYTES))\n if bins.ndim == 1:\n bins.shape = (1, bins.shape[0])\n nwaves = bins.shape[0]\n spec = np.zeros((nwaves, nsamp), dtype='complex')\n self.tone_bins = bins.copy()\n self.tone_nsamp = nsamp\n #this is to make sure phases are correct shape since we are reusing phases\n if phases is None or phases.shape[0] != bins.shape[1]:\n phases = np.random.random(bins.shape[1]) * 2 * np.pi\n self.phases = phases.copy()\n if amps is None:\n amps = 1.0\n self.amps = amps\n for k in range(nwaves):\n spec[k, bins[k, :]] = amps * np.exp(1j * phases)\n wave = np.fft.ifft(spec, axis=1)\n if preset_norm:\n self.wavenorm = calc_wavenorm(bins.shape[1], nsamp)\n else:\n self.wavenorm = np.abs(wave).max()\n if normfact is not None:\n wn = (2.0 / normfact) * len(bins) / float(nsamp)\n print \"ratio of current wavenorm to optimal:\", self.wavenorm / wn\n self.wavenorm = wn\n q_rwave = np.round((wave.real / self.wavenorm) * (2 ** 15 - 1024)).astype('>i2')\n q_iwave = np.round((wave.imag / self.wavenorm) * (2 ** 15 - 1024)).astype('>i2')\n q_iwave = np.roll(q_iwave, self.iq_delay, axis=1)\n q_rwave.shape = (q_rwave.shape[0] * q_rwave.shape[1],)\n q_iwave.shape = (q_iwave.shape[0] * q_iwave.shape[1],)\n self.q_rwave = q_rwave\n self.q_iwave = q_iwave\n if load:\n self.load_waveforms(q_rwave,q_iwave)\n self.save_state()", "def calc_spectra(stream, data_type):\n \n import numpy as np\n from mtspec import mtspec\n from scipy import interpolate\n from scipy.stats import binned_statistic \n\n # Read in file \n tr = stream[0]\n data = tr.data\n delta = tr.stats.delta\n samprate = tr.stats.sampling_rate\n npts = tr.stats.npts\n \n # Determine nyquist frequency\n nyquist = 0.5 * samprate\n \n\n # Calc spectra amplitudes and frequencies \n # Switched number of tapers from 7 to 5. Decreases computation time and\n # results are similar\n amp_squared, freq = mtspec(data, delta=delta, time_bandwidth=4, \n number_of_tapers=5, nfft=npts, quadratic=True)\n \n # Convert from power spectra to amplitude spectra\n amp = np.sqrt(amp_squared)\n \n # Use scipy interpolate function to fill in data in missing bins\n f = interpolate.interp1d(freq, amp)\n freq_new = np.arange(np.min(freq), np.max(freq), 0.0001)\n amp_new = f(freq_new)\n\n # Remove certain frequencies that are too low or high. \n indexes = []\n \n for i, val in enumerate(freq_new):\n \n # Remove frequencies below 1/2 length of record\n if val <= 1/(delta*npts*0.5) :\n indexes.append(i)\n \n # Remove frequencies above 10 Hz for sm data because of the way it was processed \n elif val > 10 and data_type == 'sm':\n indexes.append(i)\n\n # Remove frequencies above nyquist frequency for disp data\n # (it's already removed in the previous step for sm data)\n elif val > nyquist and data_type == 'disp': \n indexes.append(i)\n \n # Remove any duplicate indexes\n indexes = np.unique(indexes)\n freq_new = np.delete(freq_new,indexes)\n amp_new = np.delete(amp_new,indexes) \n \n # Set up bins\n if data_type == 'sm':\n # Starting bins at 0.004 Hz (that is about equal to half the length\n # of the record for the synthetic and observed data) and ending at\n # 10 Hz because after that the sm data is unusable due to how it was\n # processed. \n bins = np.logspace(np.log10(0.004), np.log10(10), num=21)\n \n elif data_type == 'disp':\n # Starting bins at 0.004 Hz (that is about equal to half the length\n # of the record for the synthetic and observed data) and ending at\n # 0.5 Hz because that is the nyquist frequency .\n bins = np.logspace(np.log10(0.004), np.log10(0.5), num=21)\n \n bin_means, bin_edges, binnumber = binned_statistic(freq_new,\n amp_new,\n statistic='mean',\n bins=bins)\n \n # for i in range(len(bin_means)):\n # bin_means[i] = 10**bin_means[i]\n \n \n return(bin_means, freq, amp)", "def bin_by_npixels(self, npix):\n\n disp = self.dispersion\n dbins = disp[1:] - disp[:-1]\n bin_boundary = disp[:-1] + 0.5 * dbins\n\n lbins = bin_boundary[:-1]\n rbins = bin_boundary[1:]\n mbins = disp[1:-1]\n dbins = rbins - lbins\n flux = self.flux[1:-1]\n flux_err = self.flux_err[1:-1]\n num_bins = len(mbins)\n\n num_new_bins = int((num_bins - (num_bins % npix)) / npix)\n\n new_wave = np.zeros(num_new_bins)\n new_flux = np.zeros(num_new_bins)\n new_flux_err = np.zeros(num_new_bins)\n\n for idx in range(num_new_bins):\n\n _new_flux = 0\n _new_flux_err = 0\n _new_dbin = 0\n\n for jdx in range(npix):\n _new_flux += flux[idx * npix + jdx] * dbins[idx * npix + jdx]\n _new_dbin += dbins[idx * npix + jdx]\n _new_flux_err += (flux_err[idx * npix + jdx] * dbins[\n idx * npix + jdx]) ** 2\n\n rbin = rbins[npix * idx + npix - 1]\n lbin = lbins[npix * idx]\n _new_wave = (rbin - lbin) * 0.5 + lbin\n\n new_wave[idx] = _new_wave\n new_flux[idx] = _new_flux / _new_dbin\n new_flux_err[idx] = np.sqrt(_new_flux_err) / _new_dbin\n\n return SpecOneD(dispersion=new_wave, flux=new_flux,\n flux_err=new_flux_err, unit='f_lam')", "def bin_spectra(self, spectra):\n\t\tspectra = np.asarray(spectra)\n\t\tlmax = spectra.shape[-1] - 1\n\t\tif lmax < self.lmax:\n\t\t\traise ValueError('The input spectra do not have enough l.')\n\n\t\treturn np.dot(self.P_bl, spectra[..., :self.lmax+1])", "def coadd_spectra(spec_list_fits, out_name, scale_spectra=True,\r\n use_ratios=False, ratio_range=[4200, 4300], \r\n one_side=True):\r\n\r\n spec_list_txt = [f.replace('fits', 'txt') for f in spec_list_fits]\r\n\r\n # first spectrum in the list is always the reference spectrum\r\n hdr = pyfits.getheader(spec_list_fits[0])\r\n #mjd = hdr['MJD']\r\n #date_obs = hdr['DATE-OBS']\r\n #epoch = hdr['EPOCH']\r\n #observat = hdr['OBSERVAT']\r\n exptime = hdr['EXPTIME']\r\n seeing = hdr['FWHM']\r\n # save some keywords\r\n keys = ['OBJECT', 'OBSERVER', 'DICHROIC', 'APERTURE', 'LAMPS', 'UTSHUT', 'OBSLST', 'RA', 'DEC', 'HOURANG', 'HA', 'TELFOCUS', 'CASSPA', 'PARALLAC', 'CCDTEMP', 'ANGLE', 'GRATING', 'AIRMASS']\r\n #mjd_blue = hdr['MJD']\r\n exptime_blue = hdr['EXPTIME']\r\n hdr_save = {}\r\n for key in keys:\r\n hdr_save[key] = hdr[key]\r\n verr = np.float(hdr['VERR'])**2\r\n spec_ref = np.genfromtxt(spec_list_txt[0], names='wave, flux', \r\n dtype='f4, f4')\r\n err_ref = np.genfromtxt(spec_list_txt[0].replace('spec', 'err'), \r\n names='wave, flux', dtype='f4, f4')\r\n wave = spec_ref['wave']\r\n spec_ref = spec_ref['flux'].view(np.ma.masked_array)\r\n err_ref = err_ref['flux'].view(np.ma.masked_array)\r\n\r\n\r\n # err_ref['flux'] = np.where(err_ref['flux'] <= 0, 1, err_ref['flux']) # reset bad error values to 1\r\n # boolean array: mask out invalid regions so average excludes zeros\r\n bad_err = err_ref <= 0\r\n spec_ref[bad_err] = np.ma.masked\r\n err_ref[bad_err] = np.ma.masked\r\n\r\n\r\n # spectra and their errors will be stored here\r\n spectra = np.ma.zeros((spec_ref.size, len(spec_list_fits)), dtype='f4')\r\n spectra_err = np.ma.zeros((spec_ref.size, len(spec_list_fits)), dtype='f4')\r\n\r\n spectra[:, 0] = spec_ref\r\n spectra_err[:, 0] = err_ref\r\n\r\n ratio = [1]\r\n\r\n for i, fname in enumerate(spec_list_fits[1:]):\r\n fname_txt = spec_list_txt[i+1]\r\n hdr = pyfits.getheader(fname)\r\n exptime += hdr['EXPTIME']\r\n seeing += hdr['FWHM']\r\n verr += np.float(hdr['VERR'])**2\r\n spec = np.genfromtxt(fname_txt, names='wave, flux', dtype='f4, f4')\r\n err = np.genfromtxt(fname_txt.replace('spec', 'err'), \r\n names='wave, flux', dtype='f4, f4')\r\n spec = spec['flux'].view(np.ma.masked_array)\r\n err = err['flux'].view(np.ma.masked_array)\r\n # reset bad error values to 1\r\n # err['flux'] = np.where(err['flux'] <= 0, 1, err['flux']) \r\n bad_err = err <= 0\r\n spec[bad_err] = np.ma.masked\r\n err[bad_err] = np.ma.masked\r\n\r\n spectra[:, i+1] = spec\r\n spectra_err[:, i+1] = err\r\n if scale_spectra:\r\n if use_ratios:\r\n # use the specified region to determine te ratio of spectra\r\n good = np.where((spec > ratio_range[0]) & \r\n (spec < ratio_range[1]))\r\n ratio.append(np.median(spec_ref[good]/spec[good]))\r\n else:\r\n spec_good_err = err > 0\r\n # identify overlap between sides\r\n wgd = (err_ref > 0) & (err > 0)\r\n\r\n ratio.append(match_spectra_leastsq(spec[wgd], \r\n spec_ref[wgd], err[wgd], \r\n err_ref[wgd]))\r\n\r\n \r\n\r\n spec_avg, sum_weights = np.average(spectra*ratio, weights=1./(spectra_err*ratio)**2, axis=1, returned=True)\r\n spec_err = 1./np.sqrt(sum_weights)\r\n # output coadded spectra and uncertainties\r\n f = open('%s.spec.txt' % out_name, 'w')\r\n g = open('%s.err.txt' % out_name, 'w')\r\n h = open('%s.snr.txt' % out_name, 'w')\r\n # add some header keywords\r\n for key in hdr_save.keys():\r\n f.write('# %s = %s\\n' % (key, hdr_save[key]))\r\n if one_side:\r\n # exposure time and velocity error are only well-defined for\r\n # data combined from a single side\r\n f.write('# FWHM = %.2f\\n' % float(seeing/len(spec_list_fits)))\r\n f.write('# VERR = %.2f\\n' % np.sqrt(verr))\r\n #f.write('# MJD = %.6f\\n' % (mjd + exptime/(2.*60.*60.*24.)))\r\n else:\r\n # when combining sides, use the MJD and EXPTIME from the combined blue side\r\n f.write('# EXPTIME = %.0f\\n' % exptime_blue)\r\n #f.write('# MJD = %.6f\\n' % mjd_blue)\r\n\r\n for x, y, z in zip(wave, spec_avg, spec_err):\r\n f.write('%.3f %.5g\\n' % (x, y))\r\n g.write('%.3f %.5g\\n' % (x, z))\r\n h.write('%.3f %.5g\\n' % (x, y/z))\r\n f.close()\r\n g.close()\r\n h.close()\r\n # save as 1D IRAF FITS files\r\n iraf.delete('%s.spec.fits' % out_name, verify=\"no\")\r\n iraf.delete('%s.err.fits' % out_name, verify=\"no\")\r\n iraf.delete('%s.snr.fits' % out_name, verify=\"no\")\r\n iraf.rspectext('%s.spec.txt' % out_name, '%s.spec.fits' % out_name, \r\n crval1 = hdr['CRVAL1'], cdelt1 = hdr['CDELT1'])\r\n iraf.rspectext('%s.err.txt' % out_name, '%s.err.fits' % out_name, \r\n crval1 = hdr['CRVAL1'], cdelt1 = hdr['CDELT1'])\r\n iraf.rspectext('%s.snr.txt' % out_name, '%s.snr.fits' % out_name, \r\n crval1 = hdr['CRVAL1'], cdelt1 = hdr['CDELT1'])\r\n # add keywords\r\n f = pyfits.open('%s.spec.fits' % out_name)\r\n for key in hdr_save.keys():\r\n #f[0].header.update(key, hdr_save[key])\r\n f[0].header[key]= hdr_save[key]\r\n #f[0].header.update('DATE-OBS', date_obs)\r\n #f[0].header.update('OBSERVAT', observat)\r\n #f[0].header.update('EPOCH', epoch)\r\n #f[0].header['DATE-OBS']= date_obs\r\n #f[0].header['OBSERVAT']= observat\r\n #f[0].header['EPOCH']= epoch\r\n if one_side:\r\n # exposure time and velocity error are only well-defined for\r\n # data combined from a single side\r\n #f[0].header.update('EXPTIME', exptime)\r\n #f[0].header.update('FWHM', seeing/len(spec_list_fits))\r\n #f[0].header.update('VERR', '%.2f' % np.sqrt(verr), 'Uncertainty in km/s')\r\n f[0].header['EXPTIME']= exptime\r\n f[0].header['FWHM']= seeing/len(spec_list_fits)\r\n f[0].header['VERR']= '%.2f' %np.sqrt(verr)\r\n #mjd += exptime/(2.*60.*60.*24.)\r\n else:\r\n # when combining sides, use the EXPTIME from the combined blue side\r\n #f[0].header.update('EXPTIME', exptime_blue)\r\n f[0].header['EXPTIME']= exptime_blue\r\n #del f[0].header['VERR'] #DaveC\r\n #f[0].header.update('MJD', np.round(mjd, decimals=6))\r\n #f[0].header['MJD']= np.round(mjd, decimals=6)\r\n\r\n f.writeto('%s.spec.fits' % out_name, clobber=True)\r\n f.close()", "def internal_wave_KE(U, V, z, bin_idx, wl_min, wl_max, bin_size):\n \n \n Uspeci = []\n Vspeci = []\n Uspec = []\n Vspec = []\n Upowi = []\n Vpowi = []\n Upower = []\n Vpower = []\n U = U**2\n V = V**2\n \n sp = np.nanmean(np.gradient(z, axis=0))\n \n U_mx, U_kx = specGrid(U[bin_idx[0,:],0], sp, bin_size)\n \n for Ui, Vi in zip(U.T, V.T):\n \n for binIn in bin_idx:\n Uspec1 = SpectrumGen(Ui[binIn], bin_size)\n Upowi.append(power_spec(Uspec1))\n Uspeci.append(Uspec1)\n Vspec1 = SpectrumGen(Vi[binIn], bin_size)\n Vpowi.append(power_spec(Vspec1))\n Vspeci.append(Vspec1)\n \n Uspeci = np.vstack(Uspeci)\n Vspeci = np.vstack(Vspeci)\n Upowi = np.vstack(Upowi)\n Vpowi = np.vstack(Vpowi)\n \n Uspec.append(Uspeci)\n Vspec.append(Vspeci)\n Upower.append(Upowi)\n Vpower.append(Vpowi)\n Uspeci = []\n Vspeci = []\n Upowi = []\n Vpowi = []\n \n # integrate Power Spec of U and V between chosen vertical wavelengths\n Uint = []\n Vint = []\n \n for Us, Vs in zip(Upower, Vpower):\n Ui = np.vstack([power_int_smart(binIn,\\\n U_mx, wl_min, wl_max) for binIn in Us])\n Vi = np.vstack([power_int_smart(binIn,\\\n U_mx, wl_min, wl_max) for binIn in Vs])\n Uint.append(Ui)\n Vint.append(Vi)\n \n Ui = []\n Vi = []\n \n \n Uint = np.hstack(Uint)\n Vint = np.hstack(Vint)\n \n Ek = 0.5*(Uint + Vint)\n \n return Ek, Upower, Vpower, U_kx, Uspec, Vspec", "def update_binwise_positions(cnarr, segments=None, variants=None):\n cnarr = cnarr.copy()\n if segments:\n segments = segments.copy()\n seg_chroms = set(segments.chromosome.unique())\n if variants:\n variants = variants.copy()\n var_chroms = set(variants.chromosome.unique())\n\n # ENH: look into pandas groupby innards to get group indices\n for chrom in cnarr.chromosome.unique():\n # Enumerate bins, starting from 0\n # NB: plotted points will be at +0.5 offsets\n c_idx = cnarr.chromosome == chrom\n c_bins = cnarr[c_idx] # .copy()\n if segments and chrom in seg_chroms:\n # Match segment boundaries to enumerated bins\n c_seg_idx = (segments.chromosome == chrom).values\n seg_starts = np.searchsorted(\n c_bins.start.values, segments.start.values[c_seg_idx]\n )\n seg_ends = np.r_[seg_starts[1:], len(c_bins)]\n segments.data.loc[c_seg_idx, \"start\"] = seg_starts\n segments.data.loc[c_seg_idx, \"end\"] = seg_ends\n\n if variants and chrom in var_chroms:\n # Match variant positions to enumerated bins, and\n # add fractional increments to multiple variants within 1 bin\n c_varr_idx = (variants.chromosome == chrom).values\n c_varr_df = variants.data[c_varr_idx]\n # Get binwise start indices of the variants\n v_starts = np.searchsorted(c_bins.start.values, c_varr_df.start.values)\n # Overwrite runs of repeats with fractional increments,\n # adding the cumulative fraction to each repeat\n for idx, size in list(get_repeat_slices(v_starts)):\n v_starts[idx] += np.arange(size) / size\n variant_sizes = c_varr_df.end - c_varr_df.start\n variants.data.loc[c_varr_idx, \"start\"] = v_starts\n variants.data.loc[c_varr_idx, \"end\"] = v_starts + variant_sizes\n\n c_starts = np.arange(len(c_bins)) # c_idx.sum())\n c_ends = np.arange(1, len(c_bins) + 1)\n cnarr.data.loc[c_idx, \"start\"] = c_starts\n cnarr.data.loc[c_idx, \"end\"] = c_ends\n\n return cnarr, segments, variants", "def get_individual_manipulated_feature_centered(record,sensor,bins=100):\r\n \r\n #accesses the record's motion sensor\r\n ana=Analysis()\r\n ana.processRecord(record) \r\n motion=MotionProfileV2.extract(record)\r\n m = motion.vpsInDistance.toArray(sensor)\r\n \r\n #initializes variables\r\n my_range = np.linspace(-1,25,bins)\r\n d = np.zeros((len(my_range),1))\r\n prev=0\r\n index=0\r\n \r\n #iterates through the linspace vector\r\n for i in range(0,len(my_range)): \r\n cp=np.zeros((len(m),2))\r\n count=0\r\n \r\n #makes a copy of the values that fall within the given bin\r\n for j in range(0,len(m)):\r\n if m[j][0]+ ((25-record.motion.vehicleLength)/2-m[0][0]) >= my_range[i] and m[j][0]+ ((25-record.motion.vehicleLength)/2-m[0][0]) <= my_range[i+1]:\r\n cp[count][0]=m[j][0] + ((25-record.motion.vehicleLength)/2-m[0][0])\r\n cp[count][1]=m[j][1]\r\n count+=1\r\n\r\n #if there ARE changes within the bin (sensor switches from 0 or 1)\r\n if cp[0][0] != 0:\r\n \r\n #if there is ONLY ONE switch within the bin\r\n if cp[1][0] == 0:\r\n \r\n #if the sensor switches from 1 to 0\r\n if prev == 1:\r\n #finds the area\r\n d[index] = 1 - ((my_range[i+1] - cp[0][0])/(my_range[i+1]-my_range[i]))\r\n #increments the index and updates 'prev' accordingly\r\n index+=1\r\n prev=cp[0][1]\r\n \r\n #if the sensor switches from 0 to 1 \r\n else:\r\n #finds the are\r\n d[index] = ((my_range[i+1] - cp[0][0])/(my_range[i+1]-my_range[i]))\r\n #increments the index and updates 'prev' accordingly\r\n index+=1\r\n prev=cp[0][1]\r\n \r\n #if there are MORE than one switch within the bin \r\n else:\r\n value=0 \r\n #if the sensor switches from 1 to 0 then back any number of times\r\n if cp[0][1] == 1:\r\n #iterates through the copied matrix\r\n for j in range(0,len(cp),2):\r\n \r\n #finds the cumulative area\r\n if j+1<len(cp):\r\n if cp[j+1][0] == 0 and cp[j][0] != 0:\r\n value += my_range[i+1]-cp[j][0]\r\n prev=cp[j][1]\r\n else:\r\n value += cp[j+1][0] - cp[j][0]\r\n \r\n #adds the total area within the bin to the vector \r\n d[index] = value/(my_range[i+1]-my_range[i])\r\n index+=1\r\n \r\n #if the sensor switches from 0 to 1 then back any number of times \r\n else: \r\n #iterates through the copied matrix\r\n for j in range(0,len(cp),2):\r\n \r\n #finds the cumulative area\r\n if j+1<len(cp):\r\n if j == 0:\r\n value += cp[j][0] - my_range[i]\r\n prev=cp[j][1]\r\n elif cp[j][0] == 0 and cp[j-1][0] != 0:\r\n value += my_range[i+1]-cp[j-1][0]\r\n prev=cp[j-1][1]\r\n else:\r\n value += cp[j][0] - cp[j-1][0]\r\n \r\n #adds the total area within the bin to the vector \r\n d[index] = value/(my_range[i+1]-my_range[i])\r\n index+=1\r\n \r\n #if there ARE NOT changes within the bin (sensor stays either 0 or 1)\r\n elif cp[0][0] == 0:\r\n \r\n #changes the 'prev' variable accordingly and increments the index \r\n if prev == 0:\r\n d[index] = 0\r\n index+=1\r\n elif prev == 1:\r\n d[index] = 1\r\n index+=1\r\n \r\n #returns the individual sensor feature vector\r\n return(d)", "def wsi_patch_splitting(wsi_path, patch_dir, patch_size=299, save_size=299,\n wsi_ext=\"tiff\", save_ext=\"png\",\n pyramid_flag=True, overlap_flag=True, level=0):\n\n if pyramid_flag == False:\n try:\n img = io.imread(wsi_path)\n if img.dtype == \"uint16\":\n img = (img / 256.0).astype(np.uint8)\n elif img.dtype == \"uint8\":\n pass\n else:\n raise Exception(\"Unknow imge data type\")\n except:\n print(\"Cannot handle {}\".format(wsi_path))\n else:\n wsi_header = openslide.OpenSlide(wsi_path)\n img = wsi_header.read_region(location=(0, 0), level=level,\n size=wsi_header.level_dimensions[level])\n img = np.asarray(img)[:,:,:-1]\n\n coors_arr = wsi_coor_splitting(wsi_h=img.shape[0], wsi_w=img.shape[1],\n length=patch_size, overlap_flag=overlap_flag)\n filename = os.path.splitext(os.path.basename(wsi_path))[0]\n for coor in coors_arr:\n h_start, w_start = coor[0], coor[1]\n cur_patch = img[h_start:h_start+patch_size, w_start:w_start+patch_size, :]\n if patch_size != save_size:\n save_patch = transform.resize(cur_patch, (save_size, save_size))\n save_patch = (save_patch * 255.0).astype(np.uint8)\n else:\n save_patch = cur_patch\n\n patch_name = \"{}_{}.{}\".format(filename, str(uuid.uuid4())[:8], save_ext)\n patch_filepath = os.path.join(patch_dir, patch_name)\n io.imsave(patch_filepath, save_patch)", "def sharpen_bands(self):\n for label in self.labels:\n self.sharp_bands[label] = self.bands[label] - self.gauss_bands[\n label]", "def rebin_spectra(source):\n # Unpack arguments using the 'source' label; all are assumed to be in km/s\n v_lo, v_hi, new_dv = velocity_ranges[source]\n # Load data cube\n data_filename = os.path.abspath(data_filepaths[source])\n cube = SpectralCube.read(data_filename)\n # Check units\n try:\n # See if the cube can be converted to Kelvins easily\n cube = cube.to(u.K)\n except:\n # Check if it looks like a temperature\n old_bunit = cube.header['BUNIT']\n if \"K (Ta*)\" in old_bunit:\n cube._unit = u.K\n print(f\"Data unit {cube.unit} assigned, based on the header BUNIT {old_bunit}.\")\n else:\n # Don't bother trying to fix it, leave it alone\n print(f\"Data units <{cube._unit}> aren't equivalent to Kelvins, leaving them alone\")\n # Get current channel width (np.diff should return an array of all the same value, so np.mean is overkill but it doesn't matter)\n old_dv = np.mean(np.diff(cube.spectral_axis))\n # Construct a box filter to average the channels\n # Filter width is number of channels; if rebinning from 0.1 km/s to 1 km/s, filter is 10 channels\n # Need to add km/s units to new_dv (old_dv already has units)\n filter_width = np.abs(((new_dv*u.km/u.s) / old_dv).decompose().to_value())\n # Round to nearest integer\n filter_width = np.around(filter_width, 0)\n # Make filter using astropy.convolution.Box1DKernel\n filter = Box1DKernel(filter_width)\n # Define the new spectral axis using the inclusive limits and the new channel width\n new_spectral_axis = np.arange(v_lo, v_hi+new_dv, new_dv) * u.km/u.s\n\n # Do the computationally intensive work\n print(\"Starting spectral smooth\")\n cube = cube.spectral_smooth(filter)\n print(\"Finished spectral smooth. Starting spectral rebin.\")\n cube = cube.spectral_interpolate(new_spectral_axis)\n print(\"Finished spectral rebin.\")\n # Create savename with \"rebin\" and the channel width inserted before the filetype suffix\n save_filename = data_filename.replace(\".fits\", f\".rebin{new_dv:d}kms.fits\")\n cube.write(save_filename, format='fits')", "def get_info_soma(stack, adjust_factor=1):\n\n threshold = np.mean(stack) + adjust_factor * np.std(stack)\n bimg = (stack > threshold).astype('int') \n \n dt = skfmm.distance(bimg, dx=1.1) # Boundary DT\n\n radius = dt.max()\n centroid = np.asarray(np.unravel_index(dt.argmax(), dt.shape))\n\n ballvolume = np.zeros(bimg.shape)\n ballvolume[centroid[0], centroid[1], centroid[2]] = 1\n stt = scipy.ndimage.morphology.generate_binary_structure(3, 1)\n for i in range(np.ceil(radius * 2.5).astype(int)):\n ballvolume = scipy.ndimage.binary_dilation(ballvolume, structure=stt)\n\n mask_3d = np.logical_and(ballvolume, bimg)\n\n mask_xy = mask_3d.sum(2)\n mask_xy[mask_xy !=0] = 1\n mask_xy = np.ma.masked_array(mask_xy, ~mask_3d.any(2))\n \n mask_xz = mask_3d.sum(1)\n mask_xz[mask_xz !=0] = 1\n mask_xz = np.ma.masked_array(mask_xz, ~mask_3d.any(1))\n \n mask_yz = mask_3d.sum(0)\n mask_yz[mask_yz !=0] = 1\n mask_yz = np.ma.masked_array(mask_yz, ~mask_3d.any(0))\n \n soma = {'centroid': centroid, \n 'radius': radius, \n 'mask_xy': mask_xy,\n 'mask_xz': mask_xz,\n 'mask_yz': mask_yz,\n 'mask_3d': mask_3d}\n\n return soma", "def chisqdata_bs_fft(Obsdata, Prior, fft_pad_frac=1):\n biarr = Obsdata.bispectra(mode=\"all\", count=\"min\")\n uv1 = np.hstack((biarr['u1'].reshape(-1,1), biarr['v1'].reshape(-1,1)))\n uv2 = np.hstack((biarr['u2'].reshape(-1,1), biarr['v2'].reshape(-1,1)))\n uv3 = np.hstack((biarr['u3'].reshape(-1,1), biarr['v3'].reshape(-1,1)))\n bi = biarr['bispec']\n sigma = biarr['sigmab']\n\n npad = fft_pad_frac * np.max((Prior.xdim, Prior.ydim))\n\n im_info = (Prior.xdim, Prior.ydim, npad, Prior.psize, Prior.pulse)\n\n A = (im_info, [uv1, uv2, uv3]) \n\n return (bi, sigma, A)", "def sincbroad(w, s, hwhm):\n \"\"\"\n History\n -------\n Dec-90 GB,GM\n Rewrote with fourier convolution algorithm.\n Jul-91 AL\n Translated from ANA to IDL.\n 22-Sep-91 JAV\n Relaxed constant dispersion check# vectorized, 50% faster.\n 05-Jul-92 JAV\n Converted to function, handle nonpositive hwhm.\n 14-Nov-93 JAV\n Adapted from macbro.pro\n 23-Apr-93 JAV\n Verified that convolution kernel has specified hwhm. For IR FTS\n spectra: hwhm=0.0759 Angstroms, max change in profile is 0.4% of continuum.\n Oct-18 AW\n Python Version\n \"\"\"\n\n # Warn user if hwhm is negative.\n if hwhm < 0:\n logger.warning(\"Forcing negative smoothing width to zero.\")\n\n # Return input argument if half-width is nonpositive.\n if hwhm <= 0:\n return s # true: no broadening\n\n # Calculate (uniform) dispersion.\n nw = len(w) ## points in spectrum\n dw = (w[-1] - w[0]) / (nw - 1) # wavelength change per pixel\n\n # Make sinc function out to 20th zero-crossing on either side. Error due to\n # ignoring additional lobes is less than 0.2% of continuum. Reducing extent\n # to 10th zero-crossing doubles maximum error.\n fwhm = 2.0 * hwhm # full width at half maximum\n rperfw = 0.26525 # radians per fwhm of sinc\n xrange = 20 * np.pi # 20th zero of sinc (radians)\n wrange = xrange * fwhm * rperfw # 20th zero of sinc (wavelength)\n nhalf = int(wrange / dw + 0.999) ## points in half sinc\n nsinc = 2 * nhalf + 1 ## points in sinc (odd!)\n wsinc = (np.arange(nsinc, dtype=float) - nhalf) * dw # absissca (wavelength)\n xsinc = wsinc / (fwhm * rperfw) # absissca (radians)\n xsinc[nhalf] = 1.0 # avoid divide by zero\n sinc = np.sin(xsinc) / xsinc # calculate sinc\n sinc[nhalf] = 1.0 # insert midpoint\n xsinc[nhalf] = 0.0 # fix xsinc\n sinc = sinc / np.sum(sinc) # normalize sinc\n\n # Pad spectrum ends to minimize impact of Fourier ringing.\n sout = convolve(s, sinc, mode=\"nearest\")\n\n return sout", "def divide_octaves(self):\n octaves = []\n n_bins = 5\n cur_bin = 0\n cur_octave = 0\n while True:\n octaves.append(0)\n for i in range(n_bins):\n if cur_bin >= len(self.rel_mags):\n self.octaves = octaves\n return\n octaves[cur_octave] += self.rel_mags[cur_bin]\n cur_bin += 1\n n_bins *= 2\n cur_octave += 1", "def rebin(flux, ivar, w_grid):\n new_grid, w = regrid(w_grid)\n\n fl_iv = flux * ivar\n\n # len(flux) will give number of spectra,\n # len(new_grid) will give number of output bins\n flux_out = np.zeros((len(flux), nbins))\n ivar_out = np.zeros_like(flux_out)\n\n # These lines are necessary for SDSS spectra. For DESI\n # spectra nothing will change here, since the entire DESI grid is contained\n # within the QuasarNET one, but for BOSS/eBOSS the grid can extend out\n # past the QuasarNET grid and give negative bin values. I have tests that\n # confirm this still works on DESI data, don't worry.\n fl_iv = fl_iv[:, w]\n new_grid = new_grid[w]\n ivar_temp = ivar[:, w]\n\n for i in range(len(flux)):\n c = np.bincount(new_grid, weights=fl_iv[i, :])\n flux_out[i, :len(c)] += c\n c = np.bincount(new_grid, weights=ivar_temp[i, :])\n ivar_out[i, :len(c)] += c\n\n return flux_out, ivar_out", "def specmod(self, tbin, bgwindow=4):\n\n diff = self.tracksub(tbin, bgwindow=bgwindow)\n bfspec = diff.mean(axis=0).real # should be ok for multipol data...\n sm = n.sqrt( ((bfspec**2).mean() - bfspec.mean()**2) / bfspec.mean()**2 )\n\n return sm", "def rebin(Data, width, mean=False, by_nbins=False) :\n \n # Input tells us whether to use mean or median.\n if mean :\n method = ma.mean\n else :\n method = ma.median\n\n if by_nbins :\n width = int(width)\n if width <= 1 :\n raise ValueError(\"Invalid number of bins to average\")\n # Get new axis parameters.\n new_cdelt = width*Data.field['CDELT1']\n nbins = int(sp.ceil(float(Data.dims[-1])/width))\n new_centre = nbins//2 + 1\n Data.calc_freq()\n Data.field['CRVAL1'] = Data.freq[int((new_centre+0.5)*width)]\n # Case where evenly divisable (much more efficient).\n if Data.dims[-1] % width == 0:\n new_data = Data.data\n new_data.shape = Data.data.shape[:-1] + (nbins, width)\n new_data = method(new_data, -1)\n else :\n # Allowcate memory for Data array.\n new_data = ma.empty(Data.dims[:3] + (nbins,))\n # Loop over new bins and rebin.\n for ii in xrange(nbins) :\n new_data[:,:,:,ii] = method(\n Data.data[:,:,:,ii*width:(ii+1)*width],3)\n Data.set_data(new_data)\n else :\n # Convert to Hertz.\n width = width*1.0e6\n new_cdelt = width * sp.sign(Data.field['CDELT1'])\n # Figure out some basics.\n Data.calc_freq()\n freq = sp.array(Data.freq)\n # Extra bit on the bandwidth is because frequency labels are channel \n # centre.\n bandwidth = abs(freq[-1] - freq[0]) + abs(Data.field['CDELT1'])\n nbins = int(bandwidth//width)\n new_centre = int((Data.field['CRPIX1']-1)\n * abs(Data.field['CDELT1'])/width)\n new_dims = Data.dims[0:-1] + (nbins, )\n # Get old data and allowcate memory for new data.\n old_data = ma.array(Data.data, copy=True)\n Data.set_data(ma.zeros(new_dims))\n new_freq = Data.field['CRVAL1'] + new_cdelt*(sp.arange(nbins)\n - new_centre)\n for ii in range(1,nbins-1) :\n inds = (sp.logical_and(\n abs(freq - new_freq[ii]) <= abs(freq - new_freq[ii+1]),\n abs(freq - new_freq[ii]) < abs(freq - new_freq[ii-1])))\n subdata = (old_data[:,:,:,inds])\n Data.data[:,:,:,ii] = method(subdata, 3)\n # Above loop breaks for end points... deal with them.\n inds, = sp.where(abs(freq - new_freq[0]) <= abs(freq - new_freq[1]))\n subdata = old_data[:,:,:,inds]\n Data.data[:,:,:,0] = method(subdata, 3)\n inds, = sp.where(abs(freq-new_freq[nbins-1])\n < abs(freq-new_freq[nbins-2]))\n subdata = old_data[:,:,:,inds]\n Data.data[:,:,:,nbins-1] = method(subdata, 3)\n Data.freq = new_freq\n Data.field['CRPIX1'] = sp.array(new_centre + 1, dtype=int)\n Data.field['CDELT1'] = sp.array(new_cdelt, dtype=float)", "def scale(structure):\n from numpy.linalg import det\n if \"O\" in [atom.type for atom in structure]: spvol = 8.5**3/4e0\n elif \"Se\" in [atom.type for atom in structure]: spvol = 9.5**3/4e0\n elif \"Te\" in [atom.type for atom in structure]: spvol = 10.5**3/4e0\n else: raise ValueError(\"unknown atom.type: %s\" % (atom.type,))\n\n nfu = float(len(structure)/7)*0.5 # 0.5 because 2 f.u. in spinel unit-cell.\n vol = det(structure.cell)\n return (nfu * spvol / vol)**(1e0/3e0)", "def calc_spindle_buffer_means(self):\n \n print('Aligning spindles...')\n # align spindles accoridng to timedelta & combine into single dataframe\n spindle_buffer_aggregates = {}\n for chan in self.spindles.keys():\n # only use channels that have spindles\n if self.spindles_wbuffer[chan]:\n # set the base df\n agg_df = pd.DataFrame(self.spindles_wbuffer[chan][0]['Raw'])\n rsuffix = list(range(1, len(self.spindles_wbuffer[chan])))\n # join on the index for each spindle\n for x in range(1, len(self.spindles_wbuffer[chan])):\n mean_df = agg_df.join(self.spindles_wbuffer[chan][x]['Raw'], how='outer', rsuffix=rsuffix[x-1])\n spindle_buffer_aggregates[chan] = mean_df\n \n print('Calculating statistics...')\n # create a new multiindex dataframe for calculations\n calcs = ['mean', 'std' ,'sem']\n tuples = [(chan, calc) for chan in spindle_buffer_aggregates.keys() for calc in calcs]\n columns = pd.MultiIndex.from_tuples(tuples, names=['channel', 'calc'])\n spindle_buffer_means = pd.DataFrame(columns=columns)\n \n # fill the dataframe\n for chan in spindle_buffer_aggregates.keys():\n spindle_buffer_means[(chan, 'mean')] = spindle_buffer_aggregates[chan].mean(axis=1)\n spindle_buffer_means[(chan, 'std')] = spindle_buffer_aggregates[chan].std(axis=1)\n spindle_buffer_means[(chan, 'sem')] = spindle_buffer_aggregates[chan].sem(axis=1)\n \n self.spindle_buffer_aggregates = spindle_buffer_aggregates\n self.spindle_buffer_means = spindle_buffer_means\n print('Done. Spindles aggregated by channel in obj.spindle_buffer_aggregates dict. Spindle statisics stored in obj.spindle_buffer_means dataframe.')" ]
[ "0.5740556", "0.5733101", "0.57284594", "0.5622216", "0.55848444", "0.5580762", "0.5569622", "0.55688787", "0.5552011", "0.5551858", "0.5550779", "0.5461542", "0.5432613", "0.5347965", "0.528338", "0.52451", "0.5242152", "0.5237068", "0.5206165", "0.52003783", "0.5179254", "0.51637715", "0.51377076", "0.5135213", "0.51323944", "0.51257646", "0.51044863", "0.51043344", "0.51016754", "0.50947064" ]
0.63741106
0
Returns a feature vector for features given a certain task, model and similarity strategy
def _get_features(task, features, model, similarity_strategy=None): X = [] langs = analysis_utils.get_langs_for_task(task) for feature in features: if feature != "size": # this is a nested array X_feature = analysis_utils.load_lang2vec_vectors(task=task, features=feature) if X_feature is None: #continue return None if similarity_strategy != "-": # We start with similarities to english X_feature = [[sim] for sim in analysis_utils.compute_similarities_of_lang_vecs(X_feature, strategy=similarity_strategy)] elif feature == "size" and model == "xlmr": # this is an array, we put it in a list X_feature = [[size] for size in analysis_utils.xlmr_input_corpus_sizes(langs)] elif feature == "size" and model == "mbert": X_feature = [[size] for size in analysis_utils.mbert_input_corpus_sizes(langs)] else: raise ValueError() # we now have a feature vector for a single feature or feature set if len(X) == 0: X = np.array(X_feature) else: X = np.concatenate((X,np.array(X_feature)), axis=1) if len(X) == 0: return None return np.array(X, dtype=float)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def features(self, img, tasks):\n ensemble_probs = []\n\n model_iterable = self.tasks2models[tasks]\n ensemble_results = []\n for model in model_iterable():\n individual_feats = model.module.features2(img)\n ensemble_results.append(individual_feats)\n\n return torch.stack(ensemble_results)", "def test__extract_features(self):\n text_sample = \"I really really love this movie\"\n feature_sample = ['really','love','good']\n feature_score_type = \"presence\"\n model_sample = Model(feature_sample,feature_score_type)\n result_features = model_sample.extract_features(text_sample)\n assert_equal(result_features,{'really':1,'love':1,'good':0})\n feature_score_type = \"term_frequency\"\n model_sample = Model(feature_sample,feature_score_type)\n result_features = model_sample.extract_features(text_sample)\n assert_equal(result_features,{'really':2,'love':1,'good':0})", "def extractFeatures(self, data, tf=False):\n tfidf_training_matrix, tfidf_terms = self.useTfidfVectorizer(data)\n \n if tf:\n tf_vectorizer = CountVectorizer(max_df=0.5, min_df=2, max_features=10000,\n stop_words='english')\n \n tf_training_matrix = tf_vectorizer.fit_transform(data)\n tf_terms = tf_vectorizer.get_feature_names()\n \n return tfidf_training_matrix, tfidf_terms, tf_training_matrix, tf_terms\n \n else:\n return tfidf_training_matrix, tfidf_terms", "def makeFeatureVec(words, model, num_features):\n featureVec = np.zeros((num_features,),dtype=\"float32\")\n num_words = 0.\n index2word_set = set(model.wv.index2word)\n for word in words:\n if word in index2word_set:\n num_words += 1\n featureVec = np.add(featureVec,model[word]) \n featureVec = np.divide(featureVec,num_words)\n return featureVec", "def review_to_vec(words, model, num_features , index2word_set):\n \n feature_vec = np.zeros((num_features), dtype=\"float32\")\n word_count = 0\n \n \n \n for word in words:\n if word in index2word_set: \n word_count += 1\n feature_vec += model[word]\n\n if word_count == 0:\n word_count = 1\n\n feature_vec /= word_count\n\n return feature_vec", "def model_fn(features,labels,mode,params):\n input_q = features[\"input_q\"] # query feature vector\n input_K = features[\"input_K\"] # Key set Matrix\n input_v = features[\"input_v\"] # image visual feature vector\n input_labels = features[\"input_labels\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n model = modeling.AMT(\n config = config,\n is_trainging = is_training, \n scope = \"AMT\",\n input_q = input_q,\n input_K = input_K,\n input_v = input_v\n )\n loss = model.loss\n q_doc_rank = model.get_predict()\n output_spec = None\n scaffold_fn = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer()\n output_spec = tf.estimator.EstimatorSpec(\n mode = mode,\n loss = loss,\n train_op = train_op,\n scaffold_fn = scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n def metric_fn():\n return 0\n else:\n output_spec = tf.estimator.EstimatorSpec(\n mode = mode,\n predictions = q_doc_rank,\n scaffold_fn = scaffold_fn)\n return output_spec", "def get_feature_vector(user_id: str, session: str) -> DataFrame:\n\n #Find the time windows during which the reader is doing the desired task\n activity_data = read_file(user_id, session, 'Activity.csv')\n task_number = mode(activity_data['TaskID'])\n task_name = task_names[(task_number - 1) % len(task_names)]\n tap_windows = get_tap_events(user_id, session)\n data = get_user_session_data(user_id, session)\n add_magnitude_columns(data)\n add_columns_for_taps(data, tap_windows)\n mark_tap_start_and_end(data, delta_in_ms = 200)\n\n column_names = get_feature_names()\n\n #A feature vector for each tap, to be filled in subsequently:\n featureVectors = pd.DataFrame(columns = column_names)\n\n for tap_file in tap_file_names:\n tap_feature = tap_file_to_feature_name[tap_file]\n print(tap_feature)\n window_start_indices = data[data[tap_feature] == 4].index\n window_end_indices = data[data[tap_feature] == 5].index\n if len(window_start_indices) == 0:\n continue\n \n for i in range(len(window_start_indices)):\n start, end = window_start_indices[i], window_end_indices[i]\n window_of_interest = data[start : end + 1]\n features = feature_list(user_id, session, tap_feature, task_name, window_of_interest)\n if features != None:\n featureVectors.loc[featureVectors.shape[0]] = features\n \n return featureVectors", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\t\ttf.logging.info(\"*** Features ***\")\n\t\tfor name in sorted(features.keys()):\n\t\t\ttf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n\t\tis_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n\t\tinput_ids=[]\n\t\tinput_mask=[]\n\t\tsegment_ids=[]\n\t\tmask_lm_info = []\n\t\tif is_training:\n\t\t\tinput_ids = [features[\"rewrite_query_ids\"], features[\"doc0_ids\"], features[\"doc1_ids\"], features[\"raw_query_ids\"]]\n\t\t\tinput_mask = [features[\"rewrite_query_mask\"], features[\"doc0_mask\"], features[\"doc1_mask\"], features[\"raw_query_mask\"]]\n\t\t\tsegment_ids = [features[\"rewrite_query_segment_ids\"], features[\"doc0_segment_ids\"], features[\"doc1_segment_ids\"], features[\"raw_query_segment_ids\"]]\n\t\t\teffective_mask = [features[\"effective_rewrite_query_mask\"], features[\"effective_doc0_mask\"], features[\"effective_doc1_mask\"], features[\"effective_raw_query_mask\"]]\n\t\telif is_eval:\n\t\t\tinput_ids = [features[\"query_ids\"], features[\"docx_ids\"], 0, features[\"query_ids\"]]\n\t\t\tinput_mask = [features[\"query_mask\"], features[\"docx_mask\"], 0, features[\"query_mask\"]]\n\t\t\tsegment_ids = [features[\"query_segment_ids\"], features[\"docx_segment_ids\"], 0, features[\"query_segment_ids\"]]\n\t\t\teffective_mask = [features[\"effective_query_mask\"], features[\"effective_docx_mask\"], 0, features[\"effective_query_mask\"]]\n\t\telif is_output:\n\t\t\tinput_ids=[features[\"input_ids\"], features[\"input_ids\"], features[\"input_ids\"], features[\"input_ids\"]]\n\t\t\tinput_mask = [features[\"input_mask\"], features[\"input_mask\"], features[\"input_mask\"], features[\"input_mask\"]]\n\t\t\tsegment_ids = [features[\"segment_ids\"], features[\"segment_ids\"], features[\"segment_ids\"], features[\"segment_ids\"]]\n\t\t\teffective_mask = [features[\"effective_input_mask\"], features[\"effective_input_mask\"], features[\"effective_input_mask\"], features[\"effective_input_mask\"]]\n\n\n\n\t\tlabel = features[\"label\"]\n\n\n\t\ttf.logging.info(\"Create model\")\n\t\tif (is_training) or (is_eval):\n\t\t\t(total_loss, score, doc_length) = create_model(\n\t\t\t\tbert_config, is_training, is_eval, is_output, input_ids, input_mask, segment_ids, effective_mask, label, use_one_hot_embeddings,\n\t\t\t\tcolbert_dim, dotbert_dim, max_q_len, max_p_len, doc_type, loss, kd_source, train_model, eval_model)\n\t\telif is_output:\n\t\t\t(pooling_emb, emb, doc_length) = create_model(\n\t\t\t\tbert_config, is_training, is_eval, is_output, input_ids, input_mask, segment_ids, effective_mask, label, use_one_hot_embeddings,\n\t\t\t\tcolbert_dim, dotbert_dim, max_q_len, max_p_len, doc_type, loss, kd_source, train_model, eval_model)\n\n\t\ttf.logging.info(\"Finish create model\")\n\t\ttvars = tf.trainable_variables()\n\n\t\tscaffold_fn = None\n\t\tif init_checkpoint:\n\t\t\t(assignment_map, initialized_variable_names)= modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n\t\t\t(assignment_map1, initialized_variable_names1) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint, 'Student/', 'query_reformulator/')\n\t\t\tassignment_maps = [assignment_map, assignment_map1]\n\t\t\tinitialized_variable_names.update(initialized_variable_names1)\n\n\t\t\ttf.logging.info(\"**** Assignment Map ****\")\n\t\t\tif use_tpu:\n\t\t\t\tdef tpu_scaffold():\n\t\t\t\t\tfor assignment_map in assignment_maps:\n\t\t\t\t\t tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\t\t\t\t\treturn tf.train.Scaffold()\n\n\t\t\t\tscaffold_fn = tpu_scaffold\n\t\t\telse:\n\t\t\t\ttf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\t\ttf.logging.info(\"**** Trainable Variables ****\")\n\n\t\tfor var in tvars:\n\t\t\tinit_string = \"\"\n\t\t\tif var.name in initialized_variable_names:\n\t\t\t\tinit_string = \", *INIT_FROM_CKPT*\"\n\t\t\ttf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n\t\t\t\t\t\t\tinit_string)\n\n\t\toutput_spec = None\n\t\tif mode == tf.estimator.ModeKeys.TRAIN:\n\t\t\ttrain_op = optimization.create_optimizer(\n\t\t\t\t\t\ttotal_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu, train_model)\n\n\t\t\toutput_spec = tf.contrib.tpu.TPUEstimatorSpec(\n\t\t\t\t\t\tmode=mode,\n\t\t\t\t\t\tloss=total_loss,\n\t\t\t\t\t\ttrain_op=train_op,\n\t\t\t\t\t\tscaffold_fn=scaffold_fn)\n\n\t\telif mode == tf.estimator.ModeKeys.PREDICT:\n\t\t\tif is_output:\n\t\t\t\toutput_spec = tf.contrib.tpu.TPUEstimatorSpec(\n\t\t\t\t\t\t\t\tmode=mode,\n\t\t\t\t\t\t\t\tpredictions={\n\t\t\t\t\t\t\t\t\t\"docid\": features['docid'],\n\t\t\t\t\t\t\t\t\t\"pooling_emb\":pooling_emb,\n\t\t\t\t\t\t\t\t\t\"emb\":emb,\n\t\t\t\t\t\t\t\t\t\"doc_length\":doc_length,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tscaffold_fn=scaffold_fn)\n\t\t\telif is_eval:\n\t\t\t\toutput_spec = tf.contrib.tpu.TPUEstimatorSpec(\n\t\t\t\t\t\t\t\tmode=mode,\n\t\t\t\t\t\t\t\tpredictions={\n\t\t\t\t\t\t\t\t\t\"log_probs\": score,\n\t\t\t\t\t\t\t\t\t\"label_ids\": label,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tscaffold_fn=scaffold_fn)\n\n\t\telse:\n\t\t\traise ValueError(\n\t\t\t\t\t\"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n\t\treturn output_spec", "def makeFeatureVec(words, model, num_features):\n\t# Initialize an empty numpy array (for speed) \n\tfeatureVec = np.zeros((num_features,), dtype=\"float32\")\n\t# Initialize a counter (number of words)\n\tnwords = 0.\n\t \n\t# Index2word is a list that contains the names of the words in the model's vocabulary. \n\tindex2word_set = set(model.index2word)\n\t# \n\t# Loop over each word in the review and, if it is in the model's vocaublary, add \n\t# its feature vector to the total \n\tfor word in words:\n\t\tif word in index2word_set:\n\t\t\tnwords = nwords + 1.\n\t\t\tfeatureVec = np.add(featureVec,model[word])\n\t# \n\t# Divide the result by the number of words to get the average \n\tfeatureVec = np.divide(featureVec,nwords)\n\treturn featureVec", "def get_selected_features(dataset_features, model):\r\n model = SelectFromModel(model, prefit=True)\r\n feature_bool_mask = model.get_support()\r\n selected_features = dataset_features.columns[feature_bool_mask]\r\n transformed_dataset = pd.DataFrame(model.transform(dataset_features), columns=dataset_features.columns[feature_bool_mask], index=dataset_features.index)\r\n return selected_features, transformed_dataset", "def compute_sklearn_features():\n text_dir = 'text_model'\n emb_dir = 'embedding_weights'\n filename = 'glove.6B.50d.txt'\n emb_name = 'glove'\n emotions = ['happy', 'sad', 'angry', 'scared', 'disgusted', 'surprised']\n post_size = 200\n df_all, word_to_id, embedding = preprocess_df(text_dir, emb_dir, filename, emb_name, emotions, post_size)\n\n X = np.stack(df_all['text_list'])\n y = df_all['search_query'].values\n\n id_to_word = {i: k for k, i in word_to_id.iteritems()}\n config = {'word_to_id': word_to_id,\n 'id_to_word': id_to_word,\n 'batch_size': 128,\n 'vocab_size': len(word_to_id),\n 'embedding_dim': embedding.shape[1],\n 'post_size': post_size,\n 'fc1_size': 16,\n 'nb_emotions': len(emotions),\n 'dropout': 1.0, # Proba to keep neurons\n 'max_grad_norm': 5.0, # Maximum norm of gradient\n 'init_scale': 0.1, # Weights initialization scale\n 'initial_lr': 1e-3,\n 'lr_decay': 0.5,\n 'max_epoch_no_decay': 2, # Number of epochs without decaying learning rate\n 'nb_epochs': 10} # Maximum number of epochs\n \n tf.reset_default_graph()\n with tf.Session() as sess:\n print('Computing sklearn features:')\n init_scale = config['init_scale']\n initializer = tf.random_uniform_initializer(-init_scale, init_scale) \n with tf.variable_scope('Model', reuse=None, initializer=initializer):\n config['nb_epochs'] = 1\n m_train = WordModel(config)\n sess.run(tf.global_variables_initializer())\n sess.run(m_train.embedding_init, feed_dict={m_train.embedding_placeholder: embedding})\n\n batch_size = m_train.config['batch_size']\n initial_lr = m_train.config['initial_lr']\n \n nb_batches = X.shape[0] / batch_size\n dropout_param = 1.0\n ops = m_train.h1\n \n sess.run(tf.assign(m_train.learning_rate, initial_lr))\n\n X, y = _shuffling(X, y)\n X_reshaped = X[: (nb_batches * batch_size), :].reshape((nb_batches, batch_size, -1))\n y_reshaped = y[: (nb_batches * batch_size)].reshape((nb_batches, batch_size))\n h1_list = []\n for i in range(nb_batches):\n curr_input = X_reshaped[i, :, :]\n curr_target = y_reshaped[i, :]\n h1_features = sess.run(ops, feed_dict={m_train.input_data: curr_input, \n m_train.target: curr_target,\n m_train.keep_prob: dropout_param})\n h1_list.append(h1_features)\n\n X_sklearn = np.vstack(h1_list)\n y_sklearn = y_reshaped.reshape((-1))\n print('Finished')\n return X_sklearn, y_sklearn", "def get_model_feature(\n model,\n batch_x\n):\n features = model.get_feature(batch_x, training=False)\n return features", "def transform(self, graph, instances):\n check_is_fitted(self, ['model_'])\n\n feature_vectors = []\n for instance in instances:\n feature_vectors.append(self.model_.wv.get_vector(str(instance)))\n return feature_vectors", "def transform(self, graph, instances):\n check_is_fitted(self, [\"model_\"])\n\n feature_vectors = []\n for instance in instances:\n feature_vectors.append(self.model_.wv.get_vector(str(instance)))\n return feature_vectors", "def get_sift_features_vectorized(\n image_bw: np.ndarray,\n X: np.ndarray,\n Y: np.ndarray\n) -> np.ndarray:\n\n ###########################################################################\n # TODO: YOUR CODE HERE #\n ###########################################################################\n\n raise NotImplementedError('La función `get_SIFT_features_vectorized` debe implementarse en' +\n '`part4_sift_descriptor.py`')\n\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return fvs", "def getSentenceFeature(tokens, wordVectors, sentence):\n # Implement computation for the sentence features given a sentence. \n \n # Inputs: \n # - tokens: a dictionary that maps words to their indices in \n # the word vector list \n # - wordVectors: word vectors (each row) for all tokens \n # - sentence: a list of words in the sentence of interest \n\n # Output: \n # - sentVector: feature vector for the sentence \n\n sentence_vectors = [wordVectors[tokens[word]] for word in sentence]\n\n return sum(sentence_vectors) * 1.0 / len(sentence_vectors)", "def svm():", "def feature_selection(train_features, test_features, train_similarity_target, test_similarity_target, regressor, used_features):\n\t# percentile selector\n\tpercentile_selector, percentile_score, percentile_train_features_selected, percentile_test_features_selected, percentile_mask = best_percentile_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)\n\n\t# model based selector\n\tmodel_based_selector, model_based_score, model_based_train_features_selected, model_based_test_features_selected, model_based_mask = best_model_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)\n\n\t# iterative based selector\n\titerative_based_selector, iterative_based_score, iterative_based_train_features_selected, iterative_based_test_features_selected, iterative_based_mask = best_iterative_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)\n\n\tall_scores = []\n\n\tregressor.fit(train_features, train_similarity_target)\n\tprint(\"The score on all features: %.3f\" % regressor.score(test_features, test_similarity_target))\n\tall_scores.append(regressor.score(test_features, test_similarity_target))\n\n\t# show results for the percentile selector\n\tall_scores.append(percentile_score)\n\n\t# show results for the model based selector\n\tall_scores.append(model_based_score)\n\n\t# show results for the iterative based selector\n\tall_scores.append(iterative_based_score)\n\n\tmax_value_position = all_scores.index(max(all_scores))\n\n\tif max_value_position == 0:\n\t\tprint(\"Returning all features!\\n\")\n\t\treturn train_features, test_features\n\telif max_value_position == 1:\n\t\tpercentile_mask = build_mask(percentile_mask, used_features)\n\t\tmask_save_path = os.path.join('feature_selection_masks', 'percentile_mask.txt')\n\t\tdebug_data(percentile_mask, mask_save_path)\n\n\t\tprint(\"Returning features selected with the percentile selector!\\n\")\n\t\treturn percentile_selector, percentile_train_features_selected, percentile_test_features_selected\n\telif max_value_position == 2:\n\t\tmodel_based_mask = build_mask(model_based_mask, used_features)\n\t\tmask_save_path = os.path.join('feature_selection_masks', 'model_based_mask.txt')\n\t\tdebug_data(model_based_mask, mask_save_path)\n\n\t\tprint(\"Returning features selected with the model based selector!\\n\")\n\t\treturn model_based_selector, model_based_train_features_selected, model_based_test_features_selected\n\telse:\n\t\titerative_based_mask = build_mask(iterative_based_mask, used_features)\n\t\tmask_save_path = os.path.join('feature_selection_masks', 'iterative_based_mask.txt')\n\t\tdebug_data(iterative_based_mask, mask_save_path)\n\n\t\tprint(\"Returning features selected with the iterative based selector!\\n\")\n\t\treturn iterative_based_selector, iterative_based_train_features_selected, iterative_based_test_features_selected", "def generate_features(paraphrases, pos2index, prep2index, model, wv, word2index, UNK):\n features = []\n for (w1, w2), curr_paraphrases in tqdm.tqdm(paraphrases.items()):\n features.append([extract_paraphrase_features(w1, w2, paraphrase, pos2index, prep2index,\n model, wv, word2index, UNK)\n for paraphrase in curr_paraphrases.keys()])\n\n scores = [list(curr_paraphrases.values()) for curr_paraphrases in paraphrases.values()]\n return features, scores", "def _extract_features(self, ti, tf):\n makedir(self.featdir)\n\n # number of windows in feature request\n Nw = int(np.floor(((tf-ti)/self.dt)/(self.iw-self.io)))\n\n # features to compute\n cfp = ComprehensiveFCParameters()\n if self.compute_only_features:\n cfp = dict([(k, cfp[k]) for k in cfp.keys() if k in self.compute_only_features])\n else:\n # drop features if relevant\n _ = [cfp.pop(df) for df in self.drop_features if df in list(cfp.keys())]\n\n # check if feature matrix already exists and what it contains\n if os.path.isfile(self.featfile):\n t = pd.to_datetime(pd.read_csv(self.featfile, index_col=0, parse_dates=['time'], usecols=['time'], infer_datetime_format=True).index.values)\n ti0,tf0 = t[0],t[-1]\n Nw0 = len(t)\n hds = pd.read_csv(self.featfile, index_col=0, nrows=1)\n hds = list(set([hd.split('__')[1] for hd in hds]))\n\n # option 1, expand rows\n pad_left = int((ti0-ti)/self.dto)# if ti < ti0 else 0\n pad_right = int(((ti+(Nw-1)*self.dto)-tf0)/self.dto)# if tf > tf0 else 0\n i0 = abs(pad_left) if pad_left<0 else 0\n i1 = Nw0 + max([pad_left,0]) + pad_right\n \n # option 2, expand columns\n existing_cols = set(hds) # these features already calculated, in file\n new_cols = set(cfp.keys()) - existing_cols # these features to be added\n more_cols = bool(new_cols)\n all_cols = existing_cols|new_cols\n cfp = ComprehensiveFCParameters()\n cfp = dict([(k, cfp[k]) for k in cfp.keys() if k in all_cols])\n\n # option 3, expand both\n if any([more_cols, pad_left > 0, pad_right > 0]) and self.update_feature_matrix:\n fm = pd.read_csv(self.featfile, index_col=0, parse_dates=['time'], infer_datetime_format=True)\n if more_cols:\n # expand columns now\n df0, wd = self._construct_windows(Nw0, ti0)\n cfp0 = ComprehensiveFCParameters()\n cfp0 = dict([(k, cfp0[k]) for k in cfp0.keys() if k in new_cols])\n fm2 = extract_features(df0, column_id='id', n_jobs=self.n_jobs, default_fc_parameters=cfp0, impute_function=impute)\n fm2.index = pd.Series(wd)\n \n fm = pd.concat([fm,fm2], axis=1, sort=False)\n\n # check if updates required because training period expanded\n # expanded earlier\n if pad_left > 0:\n df, wd = self._construct_windows(Nw, ti, i1=pad_left)\n fm2 = extract_features(df, column_id='id', n_jobs=self.n_jobs, default_fc_parameters=cfp, impute_function=impute)\n fm2.index = pd.Series(wd)\n fm = pd.concat([fm2,fm], sort=False)\n # expanded later\n if pad_right > 0:\n df, wd = self._construct_windows(Nw, ti, i0=Nw - pad_right)\n fm2 = extract_features(df, column_id='id', n_jobs=self.n_jobs, default_fc_parameters=cfp, impute_function=impute)\n fm2.index = pd.Series(wd)\n fm = pd.concat([fm,fm2], sort=False)\n \n # write updated file output\n fm.to_csv(self.featfile, index=True, index_label='time')\n # trim output\n fm = fm.iloc[i0:i1] \n else:\n # read relevant part of matrix\n fm = pd.read_csv(self.featfile, index_col=0, parse_dates=['time'], infer_datetime_format=True, header=0, skiprows=range(1,i0+1), nrows=i1-i0)\n else:\n # create feature matrix from scratch \n df, wd = self._construct_windows(Nw, ti)\n fm = extract_features(df, column_id='id', n_jobs=self.n_jobs, default_fc_parameters=cfp, impute_function=impute)\n fm.index = pd.Series(wd)\n fm.to_csv(self.featfile, index=True, index_label='time')\n \n ys = pd.DataFrame(self._get_label(fm.index.values), columns=['label'], index=fm.index)\n return fm, ys", "def __tf_idf_feature_extraction(self):\n print('=' * 80)\n print(\"TF-IDF Feature Extraction\")\n t0 = time()\n vectorizer = TfidfVectorizer()\n vec_train = vectorizer.fit_transform(self.train.text)\n vec_test = vectorizer.transform(self.test.text)\n duration = time() - t0\n print(\"DONE!!!!! total time: %fs\" % duration)\n print('=' * 80)\n return vec_train, vec_test", "def feature_finder(model):\n \n features = model.steps[0][1].get_feature_names()\n feat_values = model[1].coef_\n\n c = {'features' : features}\n feats = pd.DataFrame(data = c)\n feats['values'] = feat_values[0]\n\n sorted_feats = feats.sort_values(by='values')\n return sorted_feats", "def model_fn(self, features, labels, mode, params, config):\n raise NotImplementedError()", "def best_model_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor):\n\tmodel_based_score = 0\n\tscaling_factors = [\"0.25*mean\", \"0.5*mean\", \"median\", \"1.25*mean\", \"1.5*mean\"]\n\t# scaling_factors = [\"0.5*mean\", \"median\"]\n\tmodel_based_selector = None\n\tmodel_based_train_features_selected = None\n\tmodel_based_test_features_selected = None\n\n\tfor factor in scaling_factors:\n\t\tprint(factor)\n\t\ttemp_model_based_selector = SelectFromModel(RandomForestRegressor(n_estimators=100), threshold=factor)\n\t\ttemp_model_based_selector.fit(train_features, train_similarity_target)\n\t\ttemp_model_based_train_features_selected = temp_model_based_selector.transform(train_features)\n\t\ttemp_model_based_test_features_selected = temp_model_based_selector.transform(test_features)\n\n\t\tregressor.fit(temp_model_based_train_features_selected, train_similarity_target)\n\n\t\ttemp_score = regressor.score(temp_model_based_test_features_selected, test_similarity_target)\n\t\tprint(\"The score on the selected features (Model Based Selector): %.3f\" % temp_score)\n\n\t\tif temp_score > model_based_score:\n\t\t\tmodel_based_score = temp_score\n\t\t\tmodel_based_selector = temp_model_based_selector\n\t\t\tmodel_based_train_features_selected = temp_model_based_train_features_selected\n\t\t\tmodel_based_test_features_selected = temp_model_based_test_features_selected\n\n\tmodel_based_mask = model_based_selector.get_support()\n\tprint(\"This is the model based mask: \")\n\tprint(model_based_mask)\n\n\treturn model_based_selector, model_based_score, model_based_train_features_selected, model_based_test_features_selected, model_based_mask", "def get_feature_vec(board, players):\n token_dict = {i: PROBABILITIES[i]*36 for i in PROBABILITIES}\n res_types = [ResourceType.FOREST, ResourceType.ORE, ResourceType.BRICK, ResourceType.SHEEP, ResourceType.WHEAT]\n hexes = board.hexes()\n feature_data = np.zeros(24)\n feature_data[:4] = [player.vp() for player in players]\n for i, player in enumerate(players):\n for node in player.settlement_nodes():\n tiles = board.get_adj_tile_ids_to_node(node)\n for tile in tiles:\n hexnode = hexes[tile]\n if hexnode.resource() in res_types:\n feature_data[i*5 + res_types.index(hexnode.resource())] = token_dict[hexnode.token()]\n return feature_data", "def get_feature_vector(self, board):\n return self.hot_one(board)\n # return self.get_tesauro_feature_vector(self, board)", "def test_sim(vec_x, vec_y, feature_list, func):\n feature_map_x = create_feature_map(vec_x, feature_list)\n feature_map_y = create_feature_map(vec_y, feature_list)\n\n if func == 0:\n return cosine_similarity(feature_map_x, feature_map_y)\n\n return minmax(feature_map_x, feature_map_y)", "def fashion_similarity(input_txt, features, keys):\n feature_index = keys.index(input_txt)\n input_vector = features[feature_index]\n\n scores = [similarity_function(input_vector, partner) for partner in features]\n return scores", "def extract_feature_vectors(model, data_loader, parameters, features_file_path):\n feature_vectors, label_vectors = [], []\n\n # Set model to evaluation mode\n model.eval()\n\n # Show progress bar while iterating over mini-batches\n with tqdm(total=len(data_loader)) as progress_bar:\n for i, (X_batch, Y_batch) in enumerate(data_loader):\n\n # Dimensions of the input Tensor\n batch_size, channels, height, width = X_batch.size()\n\n # If GPU available, enable CUDA on data\n if parameters.cuda:\n X_batch = X_batch.cuda()\n Y_batch = Y_batch.cuda()\n\n # Wrap the input tensor in a Torch Variable\n X_batch_variable = Variable(X_batch, volatile=True)\n\n # Run the model on this batch of inputs, obtaining a Variable of predicted labels and a Variable of features\n Y_predicted, features = model(X_batch_variable)\n\n # Convert the features Variable (of size [batch_size, 1024]) to a Tensor, move it to\n # CPU, and convert it to a NumPy array\n features_numpy = features.data.cpu().numpy()\n\n # Move the labels Tensor (of size [batch_size, 14]) to CPU and convert it to a NumPy array\n Y_numpy = Y_batch.cpu().numpy()\n\n # For each example in the batch, record its features and labels\n for j in range(batch_size):\n feature_vectors.append(features_numpy[j,:])\n label_vectors.append(Y_numpy[j,:])\n\n progress_bar.update()\n\n utils.write_feature_and_label_vectors(features_file_path, feature_vectors, label_vectors)", "def _make_feature_vec(self, word_list):\n\n # Pre-initialize an empty numpy array (for speed)\n feature_vec = np.zeros((self.num_features,), dtype=\"float32\")\n\n # index2word is a list that contains the names of the words in\n # the model's vocabulary. Convert it to a set, for speed.\n index2word_set = set(self.w2v_model.index2word)\n\n # Loop over each word in the word_list and, if it is in the model's\n # vocabulary, add its feature vector to the total\n nwords = 0\n for word in word_list:\n # NOTE: Careful there, if all words are in caps in the article,\n # this function will return nan values and blow up the forest.\n word = word.lower()\n if word in index2word_set:\n nwords += 1\n feature_vec = np.add(feature_vec, self.w2v_model[word])\n\n # Divide the result by the number of words to get the average\n feature_vec = np.divide(feature_vec, nwords)\n return feature_vec" ]
[ "0.6269173", "0.60586506", "0.6025307", "0.60061306", "0.5941046", "0.58661604", "0.58194387", "0.5813026", "0.58017576", "0.5770882", "0.5751222", "0.572277", "0.56830263", "0.56766546", "0.56654996", "0.5664544", "0.5647656", "0.5646675", "0.56205535", "0.55783176", "0.55477726", "0.5537335", "0.55251634", "0.5520518", "0.5499311", "0.5493598", "0.54884404", "0.5456015", "0.54522705", "0.54483765" ]
0.76417094
0
Check that using a in filter with an empty list provided as input returns no objects.
def test_in_filter_with_empty_list(query): Pet.objects.create(name="Brutus", age=12) Pet.objects.create(name="Mimi", age=8) Pet.objects.create(name="Picotin", age=5) schema = Schema(query=query) query = """ query { pets (name_In: []) { edges { node { name } } } } """ result = schema.execute(query) assert not result.errors assert len(result.data["pets"]["edges"]) == 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def empty_filter(item, *args, **kwargs):\n return True", "def is_empty(self):\n return not list(self._filtered_items)", "def is_empty(self):\n\n return self.some().map(lambda b: not b)", "def test_empty_list_error(self):\n with self.assertRaises(ValueError):\n function_inclusion_filter_builder([])", "def test_filter_with_empty_filters(mockdata, qfilter):\n assert len(qfilter.filter(mockdata)) == 100", "def test_no_op(self):\n request = RequestFactory().get('/?tags=')\n qs = MockQuerySet()\n filter = TestFilterSet(request.GET, qs)\n self.assertNotIn('tags__slug__in', filter.qs.filters)", "def EmptyTarget(self):\n return not self.objects", "def is_empty(self):\n return self.list.length == 0", "def is_empty(self):\n return self.list.length == 0", "def is_list_empty(list):\n if not list:\n return True\n else:\n return False", "def is_empty_record(*args):\n return not any([arg for arg in args])", "def test_apply_filter_none(app):\n with app.app_context():\n users = User.query\n users = apply_filter(users, User, {})\n assert users.whereclause is None", "def test_all_of_empty_list(env):\n evt = env.all_of([])\n assert evt.triggered", "def is_empty(self):\n # TODO: Check if empty\n return self.list == []", "def empty_list(input_list):\n for item in input_list:\n if not isinstance(item, list) or not empty_list(item):\n return False\n return True", "def is_empty(self):\n if len(self.list) == 0:\n return True\n return False", "def is_not_empty(obj):\n # type: (any) -> bool\n return obj is not None and len(obj) > 0", "def _filter_empty(lst):\n return [cell for cell in lst if cell is not Sudoku.EMPTY_CELL]", "def is_empty(self):\n return self.items == []", "def empty(self):\n if len(self.list_x) == 0:\n return True\n else:\n return False", "def is_empty(self):\n return self.list.is_empty()", "def test_any_of_empty_list(env):\n evt = env.any_of([])\n assert evt.triggered", "def test_for_empty_list(self):\n emptylist = []\n self.assertEqual(self.place.amenity_ids, emptylist)", "def test_simplelistfilter_with_none_returning_lookups(self):\n modeladmin = DecadeFilterBookAdminWithNoneReturningLookups(Book, site)\n request = self.request_factory.get(\"/\", {})\n request.user = self.alfred\n changelist = modeladmin.get_changelist_instance(request)\n filterspec = changelist.get_filters(request)[0]\n self.assertEqual(len(filterspec), 0)", "def empty(self):\n return not self.any()", "def isEmpty(self):\n return len(self.worklist) == 0", "def _check_entity_lists_nonempty(self) -> None:\n\n for entity_list in self.all_entity_lists:\n if len(entity_list) < 1:\n raise ValueError(f\"{entity_list.name} is empty. Nothing to launch.\")", "def its_empty(self) -> bool:\n return self.items == []", "def is_empty(self):\n return self._items == []", "def isnt(oin, env, pred_name: YPredName, arg: Any=None):\n return (not env.check_predicate(obj, pred_name, arg) for obj in oin)" ]
[ "0.6915782", "0.68769324", "0.6442358", "0.64264005", "0.6415718", "0.63710684", "0.62980837", "0.6274831", "0.6274831", "0.62719107", "0.6263292", "0.62174505", "0.61688304", "0.61556524", "0.6136472", "0.6135395", "0.61337715", "0.6123703", "0.61021626", "0.60987234", "0.6057721", "0.605258", "0.6025997", "0.60198694", "0.6016721", "0.60145247", "0.60135716", "0.6012003", "0.6010229", "0.5998165" ]
0.70211446
0
Test in filter o an choice field not using an enum (Film.genre).
def test_choice_in_filter_without_enum(query): john_doe = Reporter.objects.create( first_name="John", last_name="Doe", email="[email protected]" ) jean_bon = Reporter.objects.create( first_name="Jean", last_name="Bon", email="[email protected]" ) documentary_film = Film.objects.create(genre="do") documentary_film.reporters.add(john_doe) action_film = Film.objects.create(genre="ac") action_film.reporters.add(john_doe) other_film = Film.objects.create(genre="ot") other_film.reporters.add(john_doe) other_film.reporters.add(jean_bon) schema = Schema(query=query) query = """ query { films (genre_In: ["do", "ac"]) { edges { node { genre reporters { edges { node { lastName } } } } } } } """ result = schema.execute(query) assert not result.errors assert result.data["films"]["edges"] == [ { "node": { "genre": "do", "reporters": {"edges": [{"node": {"lastName": "Doe"}}]}, } }, { "node": { "genre": "ac", "reporters": {"edges": [{"node": {"lastName": "Doe"}}]}, } }, ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_filterval(filterval):\n if filterval != 'description' and filterval != 'fulldescription' and filterval != 'completed':\n return False\n else:\n return True", "def filter_generation_type(self, what):\n return self.form.set_value('generation type', what)", "def test_set_gender_if_other():\n\n # GIVEN Gender.OTHER as input\n gender: Gender = Gender.OTHER\n\n # WHEN running \"set_gender_if_other\"\n validated_gender: str = set_gender_if_other(gender)\n\n # THEN the returned gender should be PlinkGender.UNKNOWN\n assert validated_gender == PlinkGender.UNKNOWN", "def test_gender_sex(self):\n gender_field = self.record.find('field[@name=\\'gender\\']')\n sex_field = self.record.find('field[@name=\\'sex\\']')\n self.assertIn(gender_field.text, self.gender_sex,\n 'Gender not in selection')\n self.assertIn(sex_field.text, self.gender_sex,\n 'Sex not in selection')\n self.assertEqual(gender_field.text, sex_field.text,\n 'Gender and Sex are not the same')", "def testInvalidVariant(self):\n def action(field_class):\n if field_class is not message_types.DateTimeField:\n self.assertRaises(messages.InvalidVariantError,\n field_class,\n 1,\n variant=messages.Variant.ENUM)\n self.ActionOnAllFieldClasses(action)", "def itemFilterType(*args, text: Union[AnyStr, bool]=\"\", type: bool=True, q=True, query=True,\n e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def test_choicesfieldlistfilter_has_none_choice(self):\n\n class BookmarkChoicesAdmin(ModelAdmin):\n list_display = [\"none_or_null\"]\n list_filter = [\"none_or_null\"]\n\n modeladmin = BookmarkChoicesAdmin(Bookmark, site)\n request = self.request_factory.get(\"/\", {})\n request.user = self.alfred\n changelist = modeladmin.get_changelist_instance(request)\n filterspec = changelist.get_filters(request)[0][0]\n choices = list(filterspec.choices(changelist))\n self.assertEqual(choices[-1][\"display\"], \"None\")\n self.assertEqual(choices[-1][\"query_string\"], \"?none_or_null__isnull=True\")", "def filter_feature(feature, typ, value):\n return value is None or feature.__getattribute__(typ) == value", "def get_genre(self) -> Optional[str]:\n return self.genre", "def clean(self, value):\n if self.null_option is not None and value == settings.FILTERS_NULL_CHOICE_VALUE:\n return None\n return super().clean(value)", "def _is_valid_entity_type(self, entity_type):\n return entity_type in [\"artist\", \"song\", \"genre\"]", "def test_choices_from_facets(self):\n fake_facets = {\n \"doctype\": {\"foo\": 1, \"bar\": 2, \"baz\": 3},\n \"has_transcription\": {\"true\": 3, \"false\": 3},\n }\n form = DocumentSearchForm()\n # call the method to configure choices based on facets\n form.set_choices_from_facets(fake_facets)\n # test doctype facets (FacetChoiceField)\n for choice in form.fields[\"doctype\"].widget.choices:\n # choice is index id, label\n choice_label = choice[1]\n assert isinstance(choice_label, str)\n assert \"<span>\" in choice_label\n # test has_transcription facet (BooleanFacetField)\n bool_label = form.fields[\"has_transcription\"].label\n assert isinstance(bool_label, str)\n assert \"3</span>\" in bool_label", "def test_radioselect_field():", "def create_genre_group_val(row):\n if 'pop' in row['genre']:\n return 'pop'\n elif 'hip hop' in row['genre']:\n return 'hip hop'\n else:\n return 'other genre'", "def test_set_gender_if_provided():\n\n # GIVEN a gender which is not Gender.OTHER as input\n gender: PlinkGender = PlinkGender.FEMALE\n\n # WHEN running \"set_gender_if_other\"\n validated_gender: str = set_gender_if_other(gender)\n\n # THEN the returned string should not have been altered\n assert validated_gender == gender", "def filter_chants_by_genre(chants, include=[], exclude=[], logger=None):\n genres = chants['genre_id'].unique().tolist()\n if len(include) == 0:\n include = [genre for genre in genres if genre not in exclude]\n has_right_genre = chants['genre_id'].isin(include)\n return chants[has_right_genre]", "def set_genre(self, genre: str) -> None:\n self.genre = genre", "def test_choice(self):\n elt = random.choice(self.liste)\n self.assertIn(elt, self.liste)", "def test_choice(self):\n elt = random.choice(self.liste)\n self.assertIn(elt, self.liste)", "def addGenre(self, genre):\n if isinstance(genre, Genre):\n pass\n elif isinstance(genre, str):\n try:\n genre_name = genre[:genre.index('(')].strip().lower().replace(' ', '_')\n genre_id = int(genre[genre.index('(')+1 : genre.index(')')])\n except:\n print(\"Error: incorrectly formatted genre {}. Ignoring.\".format(genre))\n return\n genre = Genre(genre_name = genre_name, genre_id = genre_id)\n if genre not in self.anime_genres:\n self.anime_genres.append(genre)", "def genre_choices(request):\n choices = GENRES\n diction = {}\n li = []\n for data in choices:\n li.append(data[0])\n diction['GENRE_CHOICES'] = li\n return JsonResponse(data=diction, status=status.HTTP_200_OK)#, safe=False)", "def dichotomy(filter):\n assert \"bool\" == filter.name\n assert 1 == filter.minimum_should_match\n return filter.should", "def filterVarForWizard(self, v):\n return v.isMeasurement()", "def _get_genres(self):\n separated = self.movies['genre'].apply(self.separate_genre)\n return {g: True for x in separated for g in x}.keys()", "def test_render_none(self):\n self.check_html(\n self.widget(choices=((\"\", \"Unknown\"),) + self.beatles),\n \"beatles\",\n None,\n html=(\n \"\"\"<select multiple name=\"beatles\">\n <option value=\"\">Unknown</option>\n <option value=\"J\">John</option>\n <option value=\"P\">Paul</option>\n <option value=\"G\">George</option>\n <option value=\"R\">Ringo</option>\n </select>\"\"\"\n ),\n )", "def filter_variant(self, x):\n return True", "def validare(film):\n\n erori = []\n if not film.get_fid():\n erori.append(\"Trebuie introdus un ID!\")\n if not type(film.get_fid()) is int or film.get_fid() <= 0:\n erori.append(\"ID-ul filmului trebuie sa fie un numar intreg mai mare sau egal decat 1!\")\n if not film.get_titlu():\n erori.append(\"Trebuie introdus un titlu!\")\n if not film.get_desc():\n erori.append(\"Trebuie introdusa o descriere!\")\n if not film.get_gen():\n erori.append(\"Trebuie introdus un gen!\")\n\n if len(erori) > 0:\n raise exceptie_film(erori)", "def test_should_choice_convert_string():\n assert_conversion(forms.ChoiceField, Int)", "def get_genre(self, gen: str) -> Genre:\n self.logging.log(15, f\"getting genre: {gen}\")\n return self.sess.query(Genre).filter(Genre.genre == gen).one()", "def validate_available_choice(enum, to_value):\n if to_value is not None and not to_value in dict(enum.choices()).keys():\n raise InvalidStatusOperationError(_(u'Select a valid choice. %(value)s is not one of the available choices.') % {'value': to_value})" ]
[ "0.54397243", "0.53808635", "0.5250568", "0.5232108", "0.52033514", "0.51955026", "0.51942164", "0.5174951", "0.5173452", "0.5173218", "0.516942", "0.5157201", "0.5134928", "0.50926733", "0.50899726", "0.5082016", "0.50662625", "0.5031735", "0.5031735", "0.5029781", "0.5016171", "0.50025684", "0.50006235", "0.49957833", "0.49901298", "0.49737254", "0.49551833", "0.49417758", "0.49294367", "0.49247545" ]
0.6587683
0